Latest Stories: RSS Feed

Backup Strategy; tags=FOSS, kFreeBSD, Backup

I've been working on my backup strategy for the notebook recently. The idea is to have full backups every now month and then incremental backups in between as fine-grained as possible. As it's a mobile device there's no point in time where it is guaranteed to be up, connected and within reach of the backup server.

As I'm running Debian GNU/kFreeBSD on it, using ZFS and specifically zfs send comes quite naturally. I'm now generating a new file system snapshot every day (if the notebook happens to be online during that day) using cron.

@daily zfs snapshot base/root@`date -I`
@daily zfs snapshot base/home@`date -I`
@reboot zfs snapshot base/root@`date -I`
@reboot zfs snapshot base/home@`date -I`

When connected to the home network I'm synchronizing off all incrementals that are not yet on the backup server. This is using zfs send together with gpg to encrypt the data and then put it off to some sftp storage. For the first snapshot every month a full backup is created. As there doesn't seem to be a way to merge zfs send streams without importing everything in a zfs pool I create additional incremental streams to the first snapshot of last month so I'm able to delete older full backups and daily snapshots and still keep coarse-gained backups for a longer period of time.

#!/usr/bin/python
# -*- coding: utf-8 -*-

####################
# Config
SFTP_HOST = 'botero.siccegge.de'
SFTP_DIR  = '/srv/backup/mitoraj'
SFTP_USER = 'root'
ZPOOL     = 'base'
GPGUSER   = '9FED5C6CE206B70A585770CA965522B9D49AE731'
#
####################

import subprocess
import os.path
import sys
import paramiko


term = {
    'green':  "\033[0;32m",
    'red':    "\033[0;31m",
    'yellow': "\033[0;33m",
    'purple': "\033[0;35m",
    'none':   "\033[0m",
    }

sftp = None

def print_colored(data, color):
    sys.stdout.write(term[color])
    sys.stdout.write(data)
    sys.stdout.write(term['none'])
    sys.stdout.write('\n')
    sys.stdout.flush()

def postprocess_datasets(datasets):
    devices = set([entry.split('@')[0] for entry in datasets])

    result = dict()
    for device in devices:
        result[device] = sorted([ entry.split('@')[1] for entry in datasets
                                    if entry.startswith(device) ])

    return result

def sftp_connect():
    global sftp

    host_keys = paramiko.util.load_host_keys(os.path.expanduser('~/.ssh/known_hosts'))
    hostkeytype = host_keys[SFTP_HOST].keys()[0]
    hostkey = host_keys[SFTP_HOST][hostkeytype]

    agent = paramiko.Agent()
    transport = paramiko.Transport((SFTP_HOST, 22))
    transport.connect(hostkey=hostkey)

    for key in agent.get_keys():
        try:
            transport.auth_publickey(SFTP_USER, key)
            break
        except paramiko.SSHException:
            continue

    sftp = paramiko.SFTPClient.from_transport(transport)
    sftp.chdir(SFTP_DIR)

def sftp_send(dataset, reference=None):
    zfscommand = ['sudo', 'zfs', 'send', '%s/%s' % (ZPOOL, dataset)]
    if reference is not None:
        zfscommand = zfscommand + ['-i', reference]

    zfs = subprocess.Popen(zfscommand, stdout=subprocess.PIPE)

    gpgcommand = [ 'gpg', '--batch', '--compress-algo', 'ZLIB',
                   '--sign', '--encrypt', '--recipient', GPGUSER ]
    gpg = subprocess.Popen(gpgcommand, stdout=subprocess.PIPE,
                                       stdin=zfs.stdout,
                                       stderr=subprocess.PIPE)

    gpg.poll()
    if gpg.returncode not in [None, 0]:
        print_colored("Error:\n\n" + gpg.stderr, 'red')
        return

    if reference is None:
        filename = '%s.full.zfs.gpg' % dataset
    else:
        filename = '%s.from.%s.zfs.gpg' % (dataset, reference)

    with sftp.open(filename, 'w') as remotefile:
        sys.stdout.write(term['purple'])
        while True:
            junk = gpg.stdout.read(1024*1024)
            if len(junk) == 0:
                break

            sys.stdout.write('#')
            sys.stdout.flush()
            remotefile.write(junk)
        print_colored(" DONE", 'green')

def syncronize(local_datasets, remote_datasets):
    for device in local_datasets.keys():
        current = ""
        for dataset in local_datasets[device]:
            last = current
            current = dataset

            if device in remote_datasets:
                if dataset in remote_datasets[device]:
                    print_colored("%s@%s -- found on remote server" % (device, dataset), 'yellow')
                    continue

            if last == '':
                print_colored("Initial syncronization for device %s" % device, 'green')
                sftp_send("%s@%s" % (device, dataset))
                lastmonth = dataset
                continue

            if last[:7] == dataset[:7]:
                print_colored("%s@%s -- incremental backup (reference: %s)" %
                              (device, dataset, last), 'green')
                sftp_send("%s@%s" % (device, dataset), last)
            else:
                print_colored("%s@%s -- full backup" % (device, dataset), 'green')
                sftp_send("%s@%s" % (device, dataset))
                print_colored("%s@%s -- doing incremental backup" % (device, dataset), 'green')
                sftp_send("%s@%s" % (device, dataset), lastmonth)
                lastmonth = dataset

def get_remote_datasets():
    datasets = sftp.listdir()
    datasets = filter(lambda x: '@' in x, datasets)

    datasets = [ entry.split('.')[0] for entry in datasets ]

    return postprocess_datasets(datasets)

def get_local_datasets():
    datasets = subprocess.check_output(['sudo', 'zfs', 'list', '-t', 'snapshot', '-H', '-o', 'name'])
    datasets = datasets.strip().split('\n')

    datasets = [ entry[5:] for entry in datasets ]

    return postprocess_datasets(datasets)

def main():
    sftp_connect()
    syncronize(get_local_datasets(), get_remote_datasets())

if __name__ == '__main__':
    main()

Rumors have it, btrfs has gained similar functionality to zfs send so maybe I'll be able to extend that code and use it on my linux nodes some future day (after migrating to btrfs there for a start).


-- Christoph Egger <christoph@christoph-egger.org> Fri, 27 Jun 2014 22:54:24 +0200

pass xdotool dmenu; tags=FOSS, GnuPG

I've written a small dmenu-based script which allows to select passwords from one's pass password manager and have it xdotool typed in. This should completely bypass the clipboard (which is distrusted by people for a reason). As I've been asked about the script a few times in the past here it is. Feel free to copy and any suggestions welcome.

#!/bin/bash

shopt -s nullglob globstar

list_passwords() {
	basedir=~/.password-store/
	passwords=( ~/.password-store/**/*.gpg )
	for password in "${passwords[@]}"
	do
		filename="${password#$basedir}"
		filename="${filename%.gpg}"
		echo $filename
	done
}

xdotool_command() {
	echo -n "type "
	pass "$1"
}

selected_password="$(list_passwords 2>/dev/null| dmenu)"

echo $selected_password
if [ -n "$selected_password" ]
then
	xdotool_command "$selected_password" | xdotool -
fi

-- Christoph Egger <christoph@christoph-egger.org> Fri, 27 Jun 2014 22:20:03 +0200

[HOWTO] unsubscribe from a google group; tags=Hier, Web, Kurios, Rant, Fail, HowTo

Writing this because there seems to be no correct documentation on the relevant google websites and it turns out to be non-trivial. Our goal here is to unsubscribe from a ordinary google group.

Mails from the google group contain the quoted footer:

-- 
You received this message because you are subscribed to the Google
Groups "FOO" group.
To unsubscribe from this group and stop receiving emails from it, send
an email to FOO+unsubscribe@googlegroups.com.
Visit this group at http://groups.google.com/group/FOO
For more options, visit https://groups.google.com/groups/opt_out.

Seems easy enough, so let's send a Mail to this FOO+unsubscribe address. Back comes a E-Mail:

From: FOO <FOO+unsubconfirm@googlegroups.com>
Subject: Unsubscribe request for FOO [{EJzZjpgFhDHd9seTdRA0}]
To: Christoph Egger <christoph@example.com>
Date: Tue, 18 Feb 2014 18:55:24 +0000 (38 minutes, 53 seconds ago)

 [Leave This Group]

Visit Go 

[Start] your own group, [visit] the help center, or [report]
abuse.

So click on the [Leave This Group] link and be done? Unfortunately not. Looking at the link you notice it's called http://groups.google.com/group/FOO/subscribe -- no token and "subscribe"? I actually want to unsubscribe! And indeed, clicking gets an Interface that offers to "Enter the email address to subscribe:" + Captcha. And whatever it does, it -- of course -- doesn't unsubscribe. (My guess is, it would actually work if you had a real google account associated with that email address and were logged in to that account but there's no way of verifying this as already the first condition is false in this case)

Now if you disable HTML completely for the email, a totally different content emerges:

Hello christoph@example.com,

We have received your request to unsubscribe from FOO. In order for us to complete the request, please reply to this email or visit the following confirmation URL:

http://groups.google.com/group/FOO/subscribe

If you have questions related to this or any other Google Group, visit the Help Center at http://groups.google.com/support/.

Thanks,

Google Groups

Still the non-functional link, however it also mentions a different solution: "please reply to this email" which was not present in the HTML mail at all. And it works.


-- Christoph Egger <christoph@christoph-egger.org> Tue, 18 Feb 2014 20:37:01 +0100

RuCTFe nsaless; tags=Uni, HowTo, Security

Greetings from the FAU Security Team (FAUST), the Uni Erlangen CTF group. We were participating in the RuCTFe competition and made it to 4th place. Following is my write-up on the nsaless service, the main crypto challenge in the competition. nsaless is a nodejs webservice providing a short message service. People can post messages and their followers receive the message encrypted to their individual RSA key.

About the gameserver protocol

The gameserver created groups of 8 users on the service 7 were just following the first user (and authorized by the first user to do so) while the first user sent a tweet containing the flag. The service used 512bit RSA with 7 as public exponent. While RSA512 is certainly weak, it's strong enough to make it unfeasible to break directly.

Attacking RSA

There are some known attacks against RSA with small exponents if no proper padding is done. The most straightforward version just takes the e-th root of the cipher-text and, if the clear message was small enough, outputs that root as plain-text. As the flag was long enough to make this attack impossible, we need a somewhat improved Attack.

Håstad's Broadcast Attack

Reminder:

  • In RSA, given a plain-text A, the sender computes Aᵉ mod N to build the cipher-text B.
  • Given simultaneous congruences we can efficiently compute a x ∈ ℤ such that x satisfies all congruences using the Chinese remainder theorem.

For NSAless we actually get several such B for different N (each belonging to different users receiving the tweet because they follow the poster). This effectively means we get Aᵉ in mod N for different N. Using the Chinese remainder theorem we can now compute a x ∈ ℤ ≡ Aᵉ mod Π Nᵢ. If we use at least e different B for this we are guaranteed that x actually equals Aᵉ (in ): A needs to be smaller than N for all N used (otherwise we lose information during encryption), therefore Aᵉ needs to be smaller than Nᵉ.

Computing now the e-th root of x we get the plain-text A – the flag.

Fix

Fixing your service is easy enough, just increase e to an suitable number > 8. At the end of the contest 5 Teams had fixed this vulnerability by either using 17 or 65537.

EXPLOIT

The basic exploit is shown below. Unfortunately it needs to retrieve all tweets for all users the compute the flags which just takes too long to be feasible (at least at the end of the competition where tons of users already existed) so you would need some caching to make it actually work. Would have been a great idea to have users expire after an hour or two in the service!

#!/usr/bin/python

import httplib
import urllib
import re
import json
import pprint
import gmpy
import sys

userparse_re = re.compile('<a [^>]*>([^<]*)</a></div>\s*<div>([^<]*)</div>')
tweetparse_re = re.compile("<div id='last_tweet'>([0-9]+)</div>")
followingparse_re = re.compile('<div><a href="/[0-9]+">([0-9]+)</a></div>')

def my_parse_number(number):
    string = "%x" % number
    if len(string) != 64:
        return ""
    erg = []
    while string != '':
        erg = erg + [chr(int(string[:2], 16))]
        string = string[2:]
    return ''.join(erg)

def extended_gcd(a, b):
    x,y = 0, 1
    lastx, lasty = 1, 0

    while b:
        a, (q, b) = b, divmod(a,b)
        x, lastx = lastx-q*x, x
        y, lasty = lasty-q*y, y

    return (lastx, lasty, a)

def chinese_remainder_theorem(items):
  N = 1
  for a, n in items:
    N *= n

  result = 0
  for a, n in items:
    m = N/n
    r, s, d = extended_gcd(n, m)
    if d != 1:
      raise "Input not pairwise co-prime"
    result += a*s*m

  return result % N, N

def get_tweet(uid):
    try:
        conn = httplib.HTTPConnection("%s:48879" % sys.argv[1], timeout=60)
        conn.request("GET", "/%s" % uid)
        r1 = conn.getresponse()
        data = r1.read()
        tweet = re.findall(tweetparse_re, data)
        if len(tweet) != 1:
            return None
        followers = re.findall(followingparse_re, data)
        return tweet[0], followers
    except:
        return None

def get_users():
    conn = httplib.HTTPConnection("%s:48879" % sys.argv[1], timeout=60)
    conn.request("GET", "/users")
    r1 = conn.getresponse()
    data1 = r1.read(1024 * 1024)
    data = dict()
    for i in re.findall(userparse_re, data1)[:100]:
        userinfo = get_tweet(i[0])
        if userinfo != None:
            data[i[0]] = (json.loads(i[1].replace('&quot;', '"'))['n'], userinfo)

    return data

users = get_users()
allusers = users.keys()
masters = [ user for user in allusers if len(users[user][1][1]) > 0 ]

for test in masters:
    try:
        followers = users[test][1][1]
        data = []

        for fol in followers:
            n = int(users[fol][0])
            tweet = int(users[fol][1][0])
            data = data + [(tweet, n)]

        x, n = chinese_remainder_theorem(data)

        realnum = gmpy.mpz(x).root(7)[0].digits()
        print my_parse_number(int(realnum))
    except:
        pass

-- Christoph Egger <christoph@christoph-egger.org> Fri, 20 Dec 2013 13:59:29 +0100

Generating .wot files now; tags=Web, Security, GnuPG

As you might have noticed, the original source of Web-Of-Trust Graph information went offline and probably won't come back. As a result also pathfinders like the one of Henk P. Penning are stuck in February 2012.

As I always found this kind of statistics interesting I've hacked the pks2wot python script that is part of the wotsap package to use normal hkp instead of the pks client and running it against my own sks keyserver which seems to work good enough to do a weekly dump of the current web-of-trust which can be found at http://wot.christoph-egger.org/download/. I'd be happy to hear if this is useful to anyone besides myself.


-- Christoph Egger <christoph@christoph-egger.org> Tue, 04 Dec 2012 00:12:56 +0100

More entries

valid XHTML, CSS -- Django based -- ©2008 Christoph Egger