Category Archives: General

Using ClamAV Daily Scan and Slack Notification

Taking a little inspiration from HowtoForge I was able to create a simple daily ClamAV scanner that sends a slack notification if any viruses are found.

#!/bin/bash
# set -x
# Clam Scan Details
CLAMAV="/usr/bin/clamdscan"
LOGFILE="/tmp/clamav-daily-scan.log"
DIRECTORIES="/home /etc /opt"

# Host Details
HOST=$(hostname)
IP=$(hostname -I)
HOST_ENV=$(env)

# Slack Webhook
SLACK_WEBHOOK="https://hooks.slack.com/services/XXXXXXXXX/XXXXXXXXX/xxxxxxxxxxxxxxxxxxxxxxxx"
SLACK_CHANNEL="#random"
SLACK_BOTNAME="clamav"
SLACK_ICON=":skull:"

[ ! -f $CLAMAV ] && echo "Missing $CLAMAV. Please check path or install first" && exit 1

function scan() {
  [ -f $LOGFILE ] && rm $LOGFILE
  $CLAMAV $DIRECTORIES --fdpass --log=$LOGFILE --infected --multiscan
}

function notify() {   
  # Get "Infected lines"
  MALWARE=$(tail "$LOGFILE"|grep Infected|cut -d" " -f3)
  if [ "$MALWARE" -ne "0" ]; then
    VIRUSES_FOUND=$(cat "$LOGFILE" | grep FOUND | cut -d" " -f2 | sort -u)
    MESSAGE="Found ${MALWARE} infected files on daily virus scan."
    SLACK_PAYLOAD="payload={\"channel\":\"${SLACK_CHANNEL}\",\"icon_emoji\":\":skull:\",\"username\":\"${SLACK_BOTNAME}\",\"attachments\":[{\"fallback\":\"${MESSAGE}\",\"color\":\"#333\",\"pretext\":\"${MESSAGE}\",\"fields\":[{\"title\":\"Host\",\"value\":\"${HOST}\",\"short\":true},{\"title\":\"Log Location\",\"value\":\"${LOGFILE}\",\"short\":true},{\"title\":\"Host IP(s)\",\"value\":\"${IP}\",\"short\":false},{\"title\":\"Viruses found\",\"value\":\"${VIRUSES_FOUND}\",\"short\":false}]}]}" 
    curl -X POST --data-urlencode "${SLACK_PAYLOAD}" "${SLACK_WEBHOOK}"
  fi
}

case "$1" in
  scan|s)
    scan
    ;;
  notify|n)
    notify
    ;;
  *)
    scan
    notify
esac

When a virus is found it produces a slack notification that looks like this:

Using HAProxy Docker with acmetool installed on Docker Host

Install acmetool and configure it as a redirector on port 80 of the host machine.

acmetool quickstart
# choose redirector option
# enable redirector service
# enable renew cron job

Modify the acme-tool defaults to force generation of HAProxy files

$ vi /etc/default/acme-reload
# Space separated list of services to restart after certificates are changed.
# By default, this is a list of common webservers like apache2, nginx, haproxy,
# etc. You can append to this list or replace it entirely.
SERVICES="$SERVICES"
HAPROXY_ALWAYS_GENERATE=yes

Run HAProxy docker container and link it to /var/lib/acme. I have also linked /data/haproxy where I keep my maintenance.http files and a custom haproxy.cfg file.

docker run --name haproxy --net mynetwork \
  -v /var/lib/acme:/var/lib/acme \
  -v /data/haproxy:/data/haproxy \
  -v /data/haproxy/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro \
  -p 443:443 \
  --restart=always -d haproxy

Update the haproxy.cfg file with path to acmetool ssl certs use an appropriate SSL config generator: https://mozilla.github.io/server-side-tls/ssl-config-generator/

global
    # get default parameters to the modern configuration 
    # using https://mozilla.github.io/server-side-tls/ssl-config-generator/

frontend https-in
  mode http
  bind *:443 ssl crt /var/lib/acme/haproxy/

  # HSTS (15768000 seconds = 6 months)
  http-response set-header Strict-Transport-Security max-age=15768000

  reqadd X-Forwarded-Proto:\ https
  acl is_domain_one hdr_end(host) -i domainone.com.au
  acl is_domain_two hdr_end(host) -i domaintwo.com.au
  use_backend domain_one if is_domain_one
  use_backend domain_two if is_domain_two

backend domain_one
  redirect scheme https if !{ ssl_fc }
  option forwardfor
  option http-server-close
  option httpchk
  server domainone container-one:8085 maxconn 50
  errorfile 500 /data/haproxy/errorfiles/serverdown.http
  errorfile 503 /data/haproxy/errorfiles/maintenance.http

backend domain_two
  redirect scheme https if !{ ssl_fc }
  option forwardfor
  option http-server-close
  option httpchk
  balance source
  mode http
  server domainotwo container-two:8085 maxconn 50
  errorfile 500 /data/haproxy/errorfiles/serverdown.http
  errorfile 503 /data/haproxy/errorfiles/maintenance.http

Last add a custom hook to cause haproxy docker container to reload its config (still under testing).

$ vi /var/lib/acme/hooks/reload-haproxy-docker
#!/bin/sh

# This file reloads haproxy docker when the preferred certificate for a hostname
# changes. By default it assumes your docker container name is haproxy. 
#
# Configuration options:
#   /etc/{default,conf.d}/acme-reload
#     Sourced if they exist. Specify variables here.
#     Please note that most of the time, you don't need to specify anything.
#
#   $SERVICES
#     Space-separated list of daemons to reload.
#     Append with CONTAINERS="$CONTAINERS haproxy"

###############################################################################
set -e
EVENT_NAME="$1"
[ "$EVENT_NAME" = "live-updated" ] || exit 42

CONTAINERS="haproxy"
[ -e "/etc/default/acme-reload" ] && . /etc/default/acme-reload
[ -e "/etc/conf.d/acme-reload" ] && . /etc/conf.d/acme-reload
[ -z "$ACME_STATE_DIR" ] && ACME_STATE_DIR="/var/lib/acme"

for x in $CONTAINERS; do
  docker kill --signal=HUP "$x"
done

AWS volume snapshots across multiple regions

So I needed a script to backup volumes each day from multiple regions. I’m sure there are lots of scripts out there but why not add another.

By default this script sets a UTC expiry date tag on snapshots. After the expiry is reached it removes old snapshots.
1st day of month = default expiry 90 days
Sunday = default expiry 21 days
Others = default expiry 1 day

Pre-requisites

Install python 2.7 and python boto library

$ sudo apt-get install python python-pip
$ sudo pip install boto

IAM Policy

Setup a user in AWS IAM with the following policy, keep a copy of credentials you’ll need that for script

{
    "Version": "2012-10-17",
    "Statement": [
        {
            "Effect": "Allow",
            "Action": [
                "ec2:CreateSnapshot",
                "ec2:CreateTags",
                "ec2:DeleteSnapshot",
                "ec2:DescribeAvailabilityZones",
                "ec2:DescribeRegions",
                "ec2:DescribeSnapshots",
                "ec2:DescribeVolumeAttribute",
                "ec2:DescribeVolumeStatus",
                "ec2:DescribeVolumes"
            ],
            "Resource": [
                "*"
            ]
        }
    ]
}

Snapshot script

Don’t forget to replace the credentials in the script with your own, you may also want to specify different regions.

#!/usr/bin/env python
from datetime import datetime, timedelta
import boto.ec2, sys

# snapshot.py used to backup volumes across AWS regions
# author: Matt Weston

# Using backup-account in IAM
aws_key = 'AWS_ACCESS_KEY'
aws_secret = 'AWS_SECRET_KEY'
regions = ['us-east-1','us-west-2','ap-southeast-2']

# snaphot date information
current_time = datetime.utcnow()
day_of_month = current_time.day
day_of_week = current_time.weekday()
week_of_year = current_time.isocalendar()[1]
month_of_year = current_time.month
snapshot_date = current_time.strftime('%Y-%m-%dT%H:%M:%S.000Z')

# determine type and expiry based on current day, week or month
snapshot_type = 'daily'
snapshot_expires = current_time + timedelta(days=1)
if day_of_week == 6:
  snapshot_type = 'weekly'
  snapshot_expires = current_time + timedelta(days=21)
if day_of_month == 1:
  snapshot_type = 'monthly'
  snapshot_expires = current_time + timedelta(days=90)
snapshot_expiry = snapshot_expires.strftime('%Y-%m-%dT%H:%M:%S.000Z')

# Get all Regions
for region in regions:
  print "connecting to", region
  try:
    connection = boto.ec2.connect_to_region(region, aws_access_key_id=aws_key, aws_secret_access_key=aws_secret)
    volumes = connection.get_all_volumes()
    print 'creating snapshots for all attached volumes'
    for volume in volumes:
      attached = volume.attachment_state()
      if attached:
        # create snapshots
        attach_data = volume.attach_data
        snapshot_name = 'snapshot: '+attach_data.instance_id+":"+attach_data.device
        snapshot = volume.create_snapshot(snapshot_name)
        snapshot.add_tag("snapshot-by", 'snapshot.py')
        snapshot.add_tag("snapshot-type", snapshot_type)
        snapshot.add_tag("snapshot-expiry", snapshot_expiry)
        snapshot.add_tag("snapshot-instance-id", attach_data.instance_id)
        snapshot.add_tag("snapshot-device", attach_data.device)
        print 'created', snapshot 
     
    print 'deleting expired snapshots for all attached volumes'
    volumes = connection.get_all_volumes()
    for volume in volumes:
      attached = volume.attachment_state()
      if attached:
        # cleanup snapshots
        existing =  volume.snapshots()
        for snapshot in existing:
          if snapshot.status == 'completed' and 'snapshot-expiry' in snapshot.tags:
            snapshot_expiry = snapshot.tags['snapshot-expiry']
            expiry_time = datetime.strptime(snapshot_expiry, '%Y-%m-%dT%H:%M:%S.000Z')
            if expiry_time < current_time:
              print 'expired snapshot', snapshot.id, snapshot.status, snapshot.description

  except Exception, e:
    print "Unexpected error:", sys.exc_info()[0]

Schedule the script using cron

Easy enough to run it as often as needed via cron.

$ chmod +x /path/to/script/snapshot.py
$ crontab -e
# Snapshot attached volumes each day and cleanup expired
30 01 * * * /path/to/script/snapshot.py > /path/to/script/snapshot.log 2>&1

Mercurial Contribution Charts

One of the things I’d really like to see is Bitbucket to have Github style contribution graphs and after waiting a while to see if that feature request #4307 was ever going to get a green light I decided to see how hard it would be to build it myself.

Yes I am sure there are 100 people who have already done this… but I didn’t find any that I particularly liked, I wanted the distraction this weekend and it was surprisingly easy to do.

After building a simple hg log parser in python I was to convert the data to json and wrap it with highcharts to produce a couple of really nice looking interactive contribution graphs.

commits

diffs

You can grab the code and take a look yourself:
GitHub Repo

If you want to contribute, have some ideas or feedback hit me up on Google+

Enabling SSL and Best Practice Ciphers

Get a free SSL certificate from StartSSL which is surprisingly easy, even if their website is a little awkward to navigate.

After creating a private key, obtaining the certificate and intermediate certificate (sub.class1.server.ca.pem) you can setup your apache/nginx server.

Use the modern compatibility ciphers listed on the Mozilla wiki and the sections for both apache and nginx for details on how to configure your individual server or use their handy online tool ssl config generator

Tip for nginx you will want to create a chained certificate, and use that your ssl_certificate file.

$ cat yourdomain.crt intermediate.pem > yourdomain.crt.chained

Tip for apache enable ssl and headers modules

$ sudo a2enmod ssl headers

Tip for wordpress to force administration pages to use SSL add near the top of the wp-config.php

define('FORCE_SSL_ADMIN', true);

Now you can visit my site via https://mattyboy.net

Sendmail Masquerade Using Generic Map

Changes to configure sendmail to masquerade from address allowing me to replace local system email address user@server.localhost.localdomain with another email address like noreply@domain.com or username@domain.com

Create the following files:

root noreply@domain.com
username username@domain.com
example example@domain.com
...
FEATURE(genericstable, hash /etc/mail/generics)dnl
GENERICS_DOMAIN(servername.localhost.localdomain)dnl
GENERICS_DOMAIN(domain.com)dnl
FEATURE(masquerade_envelope)dnl
FEATURE(allmasquerade)dnl
MAILER(smtp)dnl
MAILER(procmail)dnl
...

Run the following commands:

$ makemap hash /etc/mail/generics &lt; /etc/mail/generics
$ m4 /etc/mail/sendmail.mc &gt; /etc/mail/sendmail.cf
$ /etc/init.d/sendmail restart

Test using the following command:

echo &quot;Checking if masquerade worked&quot; | mailx -s &quot;Masquerade Email Test&quot; user@domain.com

Disclaimer: I would not rely on this information in a production environment.

Heartbleed nginx check

Few quick commands to check nginx has been patched succesfully for heartbleed.

# check nginx compile config
$ /opt/nginx/sbin/nginx -V
nginx version: nginx/1.4.0
built by gcc 4.7.2 (Debian 4.7.2-5) 
TLS SNI support enabled
configure arguments: --prefix=/opt/nginx --with-http_ssl_module --with-pcre=/opt/nginx/pcre-8.32 --with-zlib=/opt/nginx/zlib-1.2.8

# check which ssl library
$ ldd /opt/nginx/sbin/nginx | grep ssl
	libssl.so.1.0.0 => /usr/lib/x86_64-linux-gnu/libssl.so.1.0.0 (0x00007f489cd5a000)

$ strings /usr/lib/x86_64-linux-gnu/libssl.so.1.0.0 | grep "^OpenSSL "
OpenSSL 1.0.1e 11 Feb 2013

# check full ssl version (including when it was built)
$ openssl version -a
OpenSSL 1.0.1e 11 Feb 2013
built on: Tue Apr  8 08:49:19 UTC 2014
platform: debian-amd64
options:  bn(64,64) rc4(16x,int) des(idx,cisc,16,int) blowfish(idx) 
compiler: gcc -fPIC -DOPENSSL_PIC -DZLIB -DOPENSSL_THREADS -D_REENTRANT -DDSO_DLFCN -DHAVE_DLFCN_H -m64 -DL_ENDIAN -DTERMIO -g -O2 -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security -D_FORTIFY_SOURCE=2 -Wl,-z,relro -Wa,--noexecstack -Wall -DMD32_REG_T=int -DOPENSSL_IA32_SSE2 -DOPENSSL_BN_ASM_MONT -DOPENSSL_BN_ASM_MONT5 -DOPENSSL_BN_ASM_GF2m -DSHA1_ASM -DSHA256_ASM -DSHA512_ASM -DMD5_ASM -DAES_ASM -DVPAES_ASM -DBSAES_ASM -DWHIRLPOOL_ASM -DGHASH_ASM
OPENSSLDIR: "/usr/lib/ssl"

# check changelog for note regarding CVE-2014-0160 patch
$ aptitude changelog openssl
openssl (1.0.1e-2+deb7u5) wheezy-security; urgency=high

  * Non-maintainer upload by the Security Team.
  * Add CVE-2014-0160.patch patch.
    CVE-2014-0160: Fix TLS/DTLS hearbeat information disclosure.
    A missing bounds check in the handling of the TLS heartbeat extension
    can be used to reveal up to 64k of memory to a connected client or
    server.

Download script

Just a little script to download files from my remote server and if need be unrar the files.

#!/bin/bash
LOCAL="/opt/files"
UNRAR="$LOCAL/extract"
LOG="$LOCAL/download.log"
REMOTE="server.com.au:path/to/files"
INPUT=$2

function init() {
  # create required directories
  [ ! -d "$LOCAL" ] && mkdir $LOCAL && echo "created $LOCAL"
  [ ! -d "$UNRAR" ] && mkdir $UNRAR && echo "created $UNRAR"
}

function download() {
  # append wildcard to input
  [ -n "$INPUT" ] && INPUT="${INPUT}*" && echo "Downloading $INPUT"
  [ -z "$INPUT" ] && INPUT="*" && echo "Downloading all files"
  # rsync files from remote server to local folder
  rsync -a --quiet --compress --log-file="$LOG" $REMOTE/$INPUT $LOCAL/ >> $LOG 2>&1 &
}

function unrar() {
  echo "Extracting *.rar files to $UNRAR"
  # unrar convert filenames to lower case, exclude paths and do not overwrite
  find $LOCAL -name "*.rar" -exec unrar x -cl -ep -o- {} $UNRAR \; >> $LOG 2>&1 &
}

init
case "$1" in
  download|d)
    download
    ;;
  unrar|u)
    unrar
    ;;
  both|b)
    download
    unrar
    ;;
  *)
    echo "Usage: $0 {download|unrar|both}"
esac