Category Archives: General

Using ClamAV Daily Scan and Slack Notification

Taking a little inspiration from HowtoForge I was able to create a simple daily ClamAV scanner that sends a slack notification if any viruses are found.

# set -x
# Clam Scan Details
DIRECTORIES="/home /etc /opt"

# Host Details
IP=$(hostname -I)

# Slack Webhook

[ ! -f $CLAMAV ] && echo "Missing $CLAMAV. Please check path or install first" && exit 1

function scan() {
  [ -f $LOGFILE ] && rm $LOGFILE
  $CLAMAV $DIRECTORIES --fdpass --log=$LOGFILE --infected --multiscan

function notify() {   
  # Get "Infected lines"
  MALWARE=$(tail "$LOGFILE"|grep Infected|cut -d" " -f3)
  if [ "$MALWARE" -ne "0" ]; then
    VIRUSES_FOUND=$(cat "$LOGFILE" | grep FOUND | cut -d" " -f2 | sort -u)
    MESSAGE="Found ${MALWARE} infected files on daily virus scan."
    SLACK_PAYLOAD="payload={\"channel\":\"${SLACK_CHANNEL}\",\"icon_emoji\":\":skull:\",\"username\":\"${SLACK_BOTNAME}\",\"attachments\":[{\"fallback\":\"${MESSAGE}\",\"color\":\"#333\",\"pretext\":\"${MESSAGE}\",\"fields\":[{\"title\":\"Host\",\"value\":\"${HOST}\",\"short\":true},{\"title\":\"Log Location\",\"value\":\"${LOGFILE}\",\"short\":true},{\"title\":\"Host IP(s)\",\"value\":\"${IP}\",\"short\":false},{\"title\":\"Viruses found\",\"value\":\"${VIRUSES_FOUND}\",\"short\":false}]}]}" 
    curl -X POST --data-urlencode "${SLACK_PAYLOAD}" "${SLACK_WEBHOOK}"

case "$1" in

When a virus is found it produces a slack notification that looks like this:

Using HAProxy Docker with acmetool installed on Docker Host

Install acmetool and configure it as a redirector on port 80 of the host machine.

acmetool quickstart
# choose redirector option
# enable redirector service
# enable renew cron job

Modify the acme-tool defaults to force generation of HAProxy files

$ vi /etc/default/acme-reload
# Space separated list of services to restart after certificates are changed.
# By default, this is a list of common webservers like apache2, nginx, haproxy,
# etc. You can append to this list or replace it entirely.

Run HAProxy docker container and link it to /var/lib/acme. I have also linked /data/haproxy where I keep my maintenance.http files and a custom haproxy.cfg file.

docker run --name haproxy --net mynetwork \
  -v /var/lib/acme:/var/lib/acme \
  -v /data/haproxy:/data/haproxy \
  -v /data/haproxy/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro \
  -p 443:443 \
  --restart=always -d haproxy

Update the haproxy.cfg file with path to acmetool ssl certs use an appropriate SSL config generator:

    # get default parameters to the modern configuration 
    # using

frontend https-in
  mode http
  bind *:443 ssl crt /var/lib/acme/haproxy/

  # HSTS (15768000 seconds = 6 months)
  http-response set-header Strict-Transport-Security max-age=15768000

  reqadd X-Forwarded-Proto:\ https
  acl is_domain_one hdr_end(host) -i
  acl is_domain_two hdr_end(host) -i
  use_backend domain_one if is_domain_one
  use_backend domain_two if is_domain_two

backend domain_one
  redirect scheme https if !{ ssl_fc }
  option forwardfor
  option http-server-close
  option httpchk
  server domainone container-one:8085 maxconn 50
  errorfile 500 /data/haproxy/errorfiles/serverdown.http
  errorfile 503 /data/haproxy/errorfiles/maintenance.http

backend domain_two
  redirect scheme https if !{ ssl_fc }
  option forwardfor
  option http-server-close
  option httpchk
  balance source
  mode http
  server domainotwo container-two:8085 maxconn 50
  errorfile 500 /data/haproxy/errorfiles/serverdown.http
  errorfile 503 /data/haproxy/errorfiles/maintenance.http

Last add a custom hook to cause haproxy docker container to reload its config (still under testing).

$ vi /var/lib/acme/hooks/reload-haproxy-docker

# This file reloads haproxy docker when the preferred certificate for a hostname
# changes. By default it assumes your docker container name is haproxy. 
# Configuration options:
#   /etc/{default,conf.d}/acme-reload
#     Sourced if they exist. Specify variables here.
#     Please note that most of the time, you don't need to specify anything.
#     Space-separated list of daemons to reload.
#     Append with CONTAINERS="$CONTAINERS haproxy"

set -e
[ "$EVENT_NAME" = "live-updated" ] || exit 42

[ -e "/etc/default/acme-reload" ] && . /etc/default/acme-reload
[ -e "/etc/conf.d/acme-reload" ] && . /etc/conf.d/acme-reload
[ -z "$ACME_STATE_DIR" ] && ACME_STATE_DIR="/var/lib/acme"

for x in $CONTAINERS; do
  docker kill --signal=HUP "$x"

AWS volume snapshots across multiple regions

So I needed a script to backup volumes each day from multiple regions. I’m sure there are lots of scripts out there but why not add another.

By default this script sets a UTC expiry date tag on snapshots. After the expiry is reached it removes old snapshots.
1st day of month = default expiry 90 days
Sunday = default expiry 21 days
Others = default expiry 1 day


Install python 2.7 and python boto library

$ sudo apt-get install python python-pip
$ sudo pip install boto

IAM Policy

Setup a user in AWS IAM with the following policy, keep a copy of credentials you’ll need that for script

    "Version": "2012-10-17",
    "Statement": [
            "Effect": "Allow",
            "Action": [
            "Resource": [

Snapshot script

Don’t forget to replace the credentials in the script with your own, you may also want to specify different regions.

#!/usr/bin/env python
from datetime import datetime, timedelta
import boto.ec2, sys

# used to backup volumes across AWS regions
# author: Matt Weston

# Using backup-account in IAM
aws_key = 'AWS_ACCESS_KEY'
aws_secret = 'AWS_SECRET_KEY'
regions = ['us-east-1','us-west-2','ap-southeast-2']

# snaphot date information
current_time = datetime.utcnow()
day_of_month =
day_of_week = current_time.weekday()
week_of_year = current_time.isocalendar()[1]
month_of_year = current_time.month
snapshot_date = current_time.strftime('%Y-%m-%dT%H:%M:%S.000Z')

# determine type and expiry based on current day, week or month
snapshot_type = 'daily'
snapshot_expires = current_time + timedelta(days=1)
if day_of_week == 6:
  snapshot_type = 'weekly'
  snapshot_expires = current_time + timedelta(days=21)
if day_of_month == 1:
  snapshot_type = 'monthly'
  snapshot_expires = current_time + timedelta(days=90)
snapshot_expiry = snapshot_expires.strftime('%Y-%m-%dT%H:%M:%S.000Z')

# Get all Regions
for region in regions:
  print "connecting to", region
    connection = boto.ec2.connect_to_region(region, aws_access_key_id=aws_key, aws_secret_access_key=aws_secret)
    volumes = connection.get_all_volumes()
    print 'creating snapshots for all attached volumes'
    for volume in volumes:
      attached = volume.attachment_state()
      if attached:
        # create snapshots
        attach_data = volume.attach_data
        snapshot_name = 'snapshot: '+attach_data.instance_id+":"+attach_data.device
        snapshot = volume.create_snapshot(snapshot_name)
        snapshot.add_tag("snapshot-by", '')
        snapshot.add_tag("snapshot-type", snapshot_type)
        snapshot.add_tag("snapshot-expiry", snapshot_expiry)
        snapshot.add_tag("snapshot-instance-id", attach_data.instance_id)
        snapshot.add_tag("snapshot-device", attach_data.device)
        print 'created', snapshot 
    print 'deleting expired snapshots for all attached volumes'
    volumes = connection.get_all_volumes()
    for volume in volumes:
      attached = volume.attachment_state()
      if attached:
        # cleanup snapshots
        existing =  volume.snapshots()
        for snapshot in existing:
          if snapshot.status == 'completed' and 'snapshot-expiry' in snapshot.tags:
            snapshot_expiry = snapshot.tags['snapshot-expiry']
            expiry_time = datetime.strptime(snapshot_expiry, '%Y-%m-%dT%H:%M:%S.000Z')
            if expiry_time < current_time:
              print 'expired snapshot',, snapshot.status, snapshot.description

  except Exception, e:
    print "Unexpected error:", sys.exc_info()[0]

Schedule the script using cron

Easy enough to run it as often as needed via cron.

$ chmod +x /path/to/script/
$ crontab -e
# Snapshot attached volumes each day and cleanup expired
30 01 * * * /path/to/script/ > /path/to/script/snapshot.log 2>&1

Mercurial Contribution Charts

One of the things I’d really like to see is Bitbucket to have Github style contribution graphs and after waiting a while to see if that feature request #4307 was ever going to get a green light I decided to see how hard it would be to build it myself.

Yes I am sure there are 100 people who have already done this… but I didn’t find any that I particularly liked, I wanted the distraction this weekend and it was surprisingly easy to do.

After building a simple hg log parser in python I was to convert the data to json and wrap it with highcharts to produce a couple of really nice looking interactive contribution graphs.



You can grab the code and take a look yourself:
GitHub Repo

If you want to contribute, have some ideas or feedback hit me up on Google+

Enabling SSL and Best Practice Ciphers

Get a free SSL certificate from StartSSL which is surprisingly easy, even if their website is a little awkward to navigate.

After creating a private key, obtaining the certificate and intermediate certificate ( you can setup your apache/nginx server.

Use the modern compatibility ciphers listed on the Mozilla wiki and the sections for both apache and nginx for details on how to configure your individual server or use their handy online tool ssl config generator

Tip for nginx you will want to create a chained certificate, and use that your ssl_certificate file.

$ cat yourdomain.crt intermediate.pem > yourdomain.crt.chained

Tip for apache enable ssl and headers modules

$ sudo a2enmod ssl headers

Tip for wordpress to force administration pages to use SSL add near the top of the wp-config.php

define('FORCE_SSL_ADMIN', true);

Now you can visit my site via

Sendmail Masquerade Using Generic Map

Changes to configure sendmail to masquerade from address allowing me to replace local system email address user@server.localhost.localdomain with another email address like or

Create the following files:

FEATURE(genericstable, hash /etc/mail/generics)dnl

Run the following commands:

$ makemap hash /etc/mail/generics &lt; /etc/mail/generics
$ m4 /etc/mail/ &gt; /etc/mail/
$ /etc/init.d/sendmail restart

Test using the following command:

echo &quot;Checking if masquerade worked&quot; | mailx -s &quot;Masquerade Email Test&quot;

Disclaimer: I would not rely on this information in a production environment.

Heartbleed nginx check

Few quick commands to check nginx has been patched succesfully for heartbleed.

# check nginx compile config
$ /opt/nginx/sbin/nginx -V
nginx version: nginx/1.4.0
built by gcc 4.7.2 (Debian 4.7.2-5) 
TLS SNI support enabled
configure arguments: --prefix=/opt/nginx --with-http_ssl_module --with-pcre=/opt/nginx/pcre-8.32 --with-zlib=/opt/nginx/zlib-1.2.8

# check which ssl library
$ ldd /opt/nginx/sbin/nginx | grep ssl => /usr/lib/x86_64-linux-gnu/ (0x00007f489cd5a000)

$ strings /usr/lib/x86_64-linux-gnu/ | grep "^OpenSSL "
OpenSSL 1.0.1e 11 Feb 2013

# check full ssl version (including when it was built)
$ openssl version -a
OpenSSL 1.0.1e 11 Feb 2013
built on: Tue Apr  8 08:49:19 UTC 2014
platform: debian-amd64
options:  bn(64,64) rc4(16x,int) des(idx,cisc,16,int) blowfish(idx) 
OPENSSLDIR: "/usr/lib/ssl"

# check changelog for note regarding CVE-2014-0160 patch
$ aptitude changelog openssl
openssl (1.0.1e-2+deb7u5) wheezy-security; urgency=high

  * Non-maintainer upload by the Security Team.
  * Add CVE-2014-0160.patch patch.
    CVE-2014-0160: Fix TLS/DTLS hearbeat information disclosure.
    A missing bounds check in the handling of the TLS heartbeat extension
    can be used to reveal up to 64k of memory to a connected client or

Download script

Just a little script to download files from my remote server and if need be unrar the files.


function init() {
  # create required directories
  [ ! -d "$LOCAL" ] && mkdir $LOCAL && echo "created $LOCAL"
  [ ! -d "$UNRAR" ] && mkdir $UNRAR && echo "created $UNRAR"

function download() {
  # append wildcard to input
  [ -n "$INPUT" ] && INPUT="${INPUT}*" && echo "Downloading $INPUT"
  [ -z "$INPUT" ] && INPUT="*" && echo "Downloading all files"
  # rsync files from remote server to local folder
  rsync -a --quiet --compress --log-file="$LOG" $REMOTE/$INPUT $LOCAL/ >> $LOG 2>&1 &

function unrar() {
  echo "Extracting *.rar files to $UNRAR"
  # unrar convert filenames to lower case, exclude paths and do not overwrite
  find $LOCAL -name "*.rar" -exec unrar x -cl -ep -o- {} $UNRAR \; >> $LOG 2>&1 &

case "$1" in
    echo "Usage: $0 {download|unrar|both}"