1#! /bin/sh
2#
3# Copyright (c) 2014 The DragonFly Project.  All rights reserved.
4#
5# This code is derived from software contributed to The DragonFly Project
6# by Antonio Huete <tuxillo@quantumachine.net>
7#
8# Redistribution and use in source and binary forms, with or without
9# modification, are permitted provided that the following conditions
10# are met:
11#
12# 1. Redistributions of source code must retain the above copyright
13#    notice, this list of conditions and the following disclaimer.
14# 2. Redistributions in binary form must reproduce the above copyright
15#    notice, this list of conditions and the following disclaimer in
16#    the documentation and/or other materials provided with the
17#    distribution.
18# 3. Neither the name of The DragonFly Project nor the names of its
19#    contributors may be used to endorse or promote products derived
20#    from this software without specific, prior written permission.
21#
22# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25# FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26# COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27# INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33# SUCH DAMAGE.
34#
35# set -x
36#
37# hammer-backup
38#
39# This script operats HAMMER PFSes and dumps its contents for backup
40# purposes.
41#
42
43initialization()
44{
45    VERSION="0.2"
46    SCRIPTNAME=${0##*/}
47
48    dryrun=0	  # Dry-run
49    backup_type=0	  # Type of backup
50    incr_full_file="" # Full backup file for the incremental
51    input_file=""	  # Full backup filename
52    output_file=""	  # Output data file
53    metadata_file=""  # Output metadata fiole
54    pfs_path=""	  # PFS path to be backed up
55    backup_dir=""	  # Target directory for backups
56    compress=0	  # Compress output file?
57    comp_rate=6	  # Compression rate
58    verbose=0	  # Verbosity on/off
59    list_opt=0	  # List backups
60    checksum_opt=0 # Perfom a checksum of all backups
61    find_last=0	  # Find last full backup
62    timestamp=$(date +'%Y%m%d%H%M%S')
63}
64
65info()
66{
67    [ ${verbose} -eq 1 ] && echo "INFO: $1"
68}
69
70#
71# err exitval message
72#     Display an error and exit
73#
74err()
75{
76    exitval=$1
77    shift
78
79    echo 1>&2 "$0: ERROR: $*"
80    exit $exitval
81}
82
83usage()
84{
85    exitval=${1:-1}
86    echo "Usage: ${SCRIPTNAME} [-hlvfk] [-i <full-backup-file|auto>]" \
87	"[-c <compress-rate>] -d [<backup-dir>] [pfs path]"
88    exit $exitval
89}
90
91check_pfs()
92{
93    info "Validating PFS ${pfs_path}"
94
95    # Backup directory must exist
96    if [ -z "${pfs_path}" ]; then
97	usage
98    fi
99
100    # Make sure we are working on a HAMMER PFS
101    hammer pfs-status ${pfs_path} > /dev/null 2>&1
102    if [ $? -ne 0 ]; then
103	err 2 "${pfs} is not a HAMMER PFS"
104    fi
105}
106
107get_endtid()
108{
109    local logfile=$1
110
111    awk -F "[=: ]" -vRS="\r" '{
112	if ($4 == "tids") {
113		print $6;
114		exit
115	}
116    }' ${logfile}
117}
118
119get_uuid()
120{
121    # Get the shared UUID for the PFS
122   hammer pfs-status ${pfs_path} | awk -F'[ =]+' '
123	$2 == "shared-uuid" {
124		print $3;
125		exit;
126	}'
127}
128
129file2date()
130{
131    local filename=""
132    local filedate=""
133
134    # Extract the date
135    filename=$(basename $1)
136    filedate=$(echo ${filename} | cut -d "_" -f1)
137
138    date -j -f '%Y%m%d%H%M%S' ${filedate} +"%B %d, %Y %H:%M:%S %Z"
139}
140
141
142update_mdata()
143{
144    local filename=$(basename ${output_file})
145    local uuid=$(get_uuid)
146    local endtid=$1
147    local md5sum=$(md5 -q ${output_file} 2> /dev/null)
148
149    if [ -z "${endtid}" ]; then
150	rm ${output_file}
151	err 1 "Couldn't update the metadata file! Deleting ${output_file}"
152    fi
153    # XXX - Sanity checks missing?!!
154    if [ ${dryrun} -eq 0 ]; then
155	printf "%s,,,%d,%s,%s,%s\n" ${filename} ${backup_type} ${uuid} \
156	    ${endtid} ${md5sum} >> ${metadata_file}
157    fi
158}
159
160do_backup()
161{
162    local tmplog=$1
163    local compress_opts=""
164    local begtid=$2
165
166    # Calculate the compression options
167    if [ ${compress} -eq 1 ]; then
168	compress_opts=" | xz -c -${comp_rate}"
169	output_file="${output_file}.xz"
170    fi
171
172    # Generate the datafile according to the options specified
173    cmd="hammer -y -v mirror-read ${pfs_path} ${begtid} 2> ${tmplog} \
174	${compress_opts} > ${output_file}"
175
176    info "Launching: ${cmd}."
177    if [ ${dryrun} -eq 0 ]; then
178	# Sync to disk before mirror-read
179	hammer synctid ${pfs_path} > /dev/null 2>&1
180	eval ${cmd}
181	if [ $? -eq 0 ]; then
182	    info "Backup completed."
183	else
184	    rm -f ${output_file}
185	    rm -f ${tmplog}
186	    err 1 "Failed to created backup data file!"
187	fi
188    else
189	info "Dry-run execution."
190    fi
191}
192
193full_backup()
194{
195    local tmplog=$(mktemp)
196    local filename=""
197    local endtid=""
198
199    # Full backup (no param specified)
200    info "Initiating full backup."
201    do_backup ${tmplog}
202
203    # Generate the metadata file itself
204    metadata_file="${output_file}.bkp"
205    endtid=$(get_endtid ${tmplog})
206
207    update_mdata ${endtid}
208
209    # Cleanup
210    rm ${tmplog}
211}
212
213check_metadata()
214{
215    local line=""
216    local f1=""
217    local f2=""
218
219    if [ ! -r ${metadata_file} ]; then
220	err 1 "Could not find ${metadata_file}"
221    fi
222
223    f1=$(basename ${metadata_file})
224    f2=$(head -1 ${metadata_file} | cut -d "," -f1)
225
226    if [ "${f1}" != "${f2}.bkp" ]; then
227	err 2 "Bad metadata file ${metadata_file}"
228    fi
229}
230
231detect_latest_backup()
232{
233    local latest=""
234    local pattern=""
235
236    # XXX
237    # Find latest metadata backup file if needed. Right now the timestamp
238    # in the filename will let them be sorted by ls. But this could actually
239    # change.
240    if [ ${find_last} -eq 1 ]; then
241	pattern=$(echo ${pfs_path} | tr "/" "_").xz.bkp
242	latest=$(ls -1 ${backup_dir}/*${pattern} 2> /dev/null | tail -1)
243	if [ -z "${latest}" ]; then
244	    err 1 "Failed to detect the latest full backup file."
245	fi
246	incr_full_file=${latest}
247    fi
248}
249
250incr_backup()
251{
252    local tmplog=$(mktemp)
253    local begtid=""
254    local endtid=""
255    local line=""
256    local srcuuid=""
257    local tgtuuid=""
258    local btype=0
259
260    detect_latest_backup
261
262    # Make sure the file exists and it can be read
263    if [ ! -r ${incr_full_file} ]; then
264	err 1 "Specified file ${incr_full_file} does not exist."
265    fi
266    metadata_file=${incr_full_file}
267
268    # Verify we were passed a real metadata file
269    check_metadata
270
271    # The first backup of the metadata file must be a full one
272    line=$(head -1 ${incr_full_file})
273    btype=$(echo ${line} | cut -d ',' -f4)
274    if [ ${btype} -ne 1 ]; then
275	err 1 "No full backup in ${incr_full_file}. Cannot do incremental ones."
276    fi
277
278    # Read metadata info for the last backup performed
279    line=$(tail -1 ${incr_full_file})
280    srcuuid=$(echo $line| cut -d ',' -f 5)
281    begtid=$(echo $line| cut -d ',' -f 6)
282
283    # Verify shared uuid are the same
284    tgtuuid=$(get_uuid)
285    if [ "${srcuuid}" != "${tgtuuid}" ]; then
286	err 255 "Shared UUIDs do not match! ${srcuuid} -> ${tgtuuid}"
287    fi
288
289    # Do an incremental backup
290    info "Initiating incremental backup."
291    do_backup ${tmplog} 0x${begtid}
292
293    # Store the metadata in the full backup file
294    endtid=$(get_endtid ${tmplog})
295
296    #
297    # Handle the case where the hammer mirror-read command did not retrieve
298    # any data because the PFS was not modified at all. In that case we keep
299    # TID of the previous backup.
300    #
301    if [ -z "${endtid}" ]; then
302	endtid=${begtid}
303    fi
304    update_mdata ${endtid}
305
306    # Cleanup
307    rm ${tmplog}
308}
309
310list_backups()
311{
312    local nofiles=1
313
314    for bkp in ${backup_dir}/*.bkp
315    do
316	# Skip files that don't exist
317	if [ ! -f ${bkp} ]; then
318	    continue
319	fi
320	# Show incremental backups related to the full backup above
321	awk -F "," '{
322		if ($4 == 1) {
323			printf("full: ");
324		}
325		if ($4 == 2) {
326			printf("\tincr: ");
327		}
328	printf("%s endtid: 0x%s md5: %s\n", $1, $6, $7);
329	}' ${bkp}
330	nofiles=0
331    done
332
333    if [ ${nofiles} -eq 1 ]; then
334	err 255 "No backup files found in ${backup_dir}"
335    fi
336
337    exit 0
338}
339
340checksum_backups()
341{
342    local nofiles=1
343    local storedck=""
344    local fileck=""
345    local tmp=""
346
347    for bkp in ${backup_dir}/*.bkp
348    do
349	# Skip files that don't exist
350	if [ ! -f ${bkp} ]; then
351	    continue
352	fi
353	# Perform a checksum test
354	while read line
355	do
356	    tmp=$(echo $line | cut -d "," -f1)
357	    fname=${backup_dir}/${tmp}
358	    storedck=$(echo $line | cut -d "," -f7)
359	    fileck=$(md5 -q ${fname} 2> /dev/null)
360	    echo -n "${fname} : "
361	    if [ ! -f ${fname} ]; then
362		echo "MISSING"
363		continue
364	    elif [ "${storedck}" == "${fileck}" ]; then
365		echo "OK"
366	    else
367		echo "FAILED"
368	    fi
369	done < ${bkp}
370	nofiles=0
371    done
372
373    if [ ${nofiles} -eq 1 ]; then
374	err 255 "No backup files found in ${backup_dir}"
375    fi
376
377    exit 0
378}
379# -------------------------------------------------------------
380
381# Setup some vars
382initialization
383
384# Only can be run by root
385if [  $(id -u) -ne 0 ]; then
386    err 255 "Only root can run this script."
387fi
388
389# Checks hammer program
390if [ ! -x /sbin/hammer ]; then
391    err 1 'Could not find find hammer(8) program.'
392fi
393
394# Handle options
395while getopts d:i:c:fvhnlk op
396do
397    case $op in
398	d)
399	    backup_dir=$OPTARG
400	    ;;
401	f)
402	    if [ ${backup_type} -eq 2 ]; then
403		err 1 "-f and -i are mutually exclusive."
404	    fi
405	    backup_type=1
406	    ;;
407	i)
408	    if [ ${backup_type} -eq 2 ]; then
409		err 1 "-f and -i are mutually exclusive."
410	    fi
411	    backup_type=2
412	    if [ "${OPTARG}" == "auto" ]; then
413		find_last=1
414	    else
415		incr_full_file=$OPTARG
416	    fi
417	    ;;
418	c)
419	    compress=1
420
421	    case "$OPTARG" in
422		[1-9])
423		    comp_rate=$OPTARG
424		    ;;
425		*)
426		    err 1 "Bad compression level specified."
427		    ;;
428	    esac
429	    ;;
430	k)
431	    checksum_opt=1
432	    ;;
433	n)
434	    dryrun=1
435	    ;;
436	l)
437	    list_opt=1
438	    ;;
439	v)
440	    verbose=1
441	    ;;
442	h)
443	    usage 0
444	    ;;
445	*)
446	    usage
447	    ;;
448    esac
449done
450
451shift $(($OPTIND - 1))
452
453info "hammer-backup version ${VERSION}"
454
455#
456# If list option is selected
457pfs_path="$1"
458
459# Backup directory must exist
460if [ -z "${backup_dir}" ]; then
461    usage
462elif [ ! -d "${backup_dir}" ]; then
463    err 1 "Backup directory does not exist!"
464fi
465info "Backup dir is ${backup_dir}"
466
467# Output file format is YYYYmmddHHMMSS
468tmp=$(echo ${pfs_path} | tr '/' '_')
469output_file="${backup_dir}/${timestamp}${tmp}"
470
471# List backups if needed
472if [ ${list_opt} -eq 1 ]; then
473    info "Listing backups."
474    list_backups
475fi
476
477# Checksum test
478if [ ${checksum_opt} -eq 1 ]; then
479    info "Checksum test for all backup files."
480    checksum_backups
481fi
482
483# Only work on a HAMMER fs
484check_pfs
485
486# Actually launch the backup itself
487if [ ${backup_type} -eq 1 ]; then
488    info "Full backup."
489    full_backup
490elif [ ${backup_type} -eq 2 ]; then
491    info "Incremental backup."
492    incr_full_file=${backup_dir}/${incr_full_file}
493    incr_backup
494else
495    err 255 "Impossible backup type."
496fi
497