1#! /bin/sh
2#
3# Copyright (c) 2014 The DragonFly Project.  All rights reserved.
4#
5# This code is derived from software contributed to The DragonFly Project
6# by Antonio Huete <tuxillo@quantumachine.net>
7#
8# Redistribution and use in source and binary forms, with or without
9# modification, are permitted provided that the following conditions
10# are met:
11#
12# 1. Redistributions of source code must retain the above copyright
13#    notice, this list of conditions and the following disclaimer.
14# 2. Redistributions in binary form must reproduce the above copyright
15#    notice, this list of conditions and the following disclaimer in
16#    the documentation and/or other materials provided with the
17#    distribution.
18# 3. Neither the name of The DragonFly Project nor the names of its
19#    contributors may be used to endorse or promote products derived
20#    from this software without specific, prior written permission.
21#
22# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25# FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26# COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27# INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33# SUCH DAMAGE.
34#
35# set -x
36#
37# hammer-backup
38#
39# This script operates HAMMER PFSes and dumps its contents for backup
40# purposes.
41#
42
43initialization()
44{
45    VERSION="0.3"
46    SCRIPTNAME=${0##*/}
47
48    dryrun=0	  # Dry-run
49    backup_type=0	  # Type of backup
50    incr_full_file="" # Full backup file for the incremental
51    input_file=""	  # Full backup filename
52    output_file=""	  # Output data file
53    metadata_file=""  # Output metadata fiole
54    pfs_path=""	  # PFS path to be backed up
55    backup_dir=""	  # Target directory for backups
56    compress=0	  # Compress output file?
57    comp_rate=6	  # Compression rate
58    verbose=0	  # Verbosity on/off
59    list_opt=0	  # List backups
60    checksum_opt=0 # Perform a checksum of all backups
61    find_last=0	  # Find last full backup
62    timestamp=$(date +'%Y%m%d%H%M%S')
63    memlimit="10%"
64}
65
66info()
67{
68    [ ${verbose} -eq 1 ] && echo "INFO: $1"
69}
70
71#
72# err exitval message
73#     Display an error and exit
74#
75err()
76{
77    exitval=$1
78    shift
79
80    echo 1>&2 "$0: ERROR: $*"
81    exit $exitval
82}
83
84usage()
85{
86    exitval=${1:-1}
87    echo "Usage: ${SCRIPTNAME} [-hlvfk] [-i <full-backup-file|auto>]" \
88	"[-c <compress-rate>] -d [<backup-dir>] [pfs path]"
89    exit $exitval
90}
91
92check_pfs()
93{
94    info "Validating PFS ${pfs_path}"
95
96    # Backup directory must exist
97    if [ -z "${pfs_path}" ]; then
98	usage
99    fi
100
101    # Make sure we are working on a HAMMER PFS
102    hammer pfs-status ${pfs_path} > /dev/null 2>&1
103    if [ $? -ne 0 ]; then
104	err 2 "${pfs} is not a HAMMER PFS"
105    fi
106}
107
108get_endtid()
109{
110    local logfile=$1
111
112    awk -F "[=: ]" -vRS="\r" '{
113	if ($4 == "tids") {
114		print $6;
115		exit
116	}
117    }' ${logfile}
118}
119
120get_uuid()
121{
122    # Get the shared UUID for the PFS
123   hammer pfs-status ${pfs_path} | awk -F'[ =]+' '
124	$2 == "shared-uuid" {
125		print $3;
126		exit;
127	}'
128}
129
130namesuffix()
131{
132    # Output file format is YYYYmmddHHMMSS
133    if [ "${pfs_path}" == "/" ]; then
134	tmp="_root"
135    else
136	tmp=$(echo ${pfs_path} | tr '/' '_')
137    fi
138    echo ${tmp}
139}
140
141file2date()
142{
143    local filename=""
144    local filedate=""
145
146    # Extract the date
147    filename=$(basename $1)
148    filedate=$(echo ${filename} | cut -d "_" -f1)
149
150    date -j -f '%Y%m%d%H%M%S' ${filedate} +"%B %d, %Y %H:%M:%S %Z"
151}
152
153
154update_mdata()
155{
156    local filename=$(basename ${output_file})
157    local uuid=$(get_uuid)
158    local endtid=$1
159    local md5sum=$(md5 -q ${output_file} 2> /dev/null)
160
161    if [ -z "${endtid}" ]; then
162	rm ${output_file}
163	err 1 "Couldn't update the metadata file! Deleting ${output_file}"
164    fi
165    # XXX - Sanity checks missing?!!
166    if [ ${dryrun} -eq 0 ]; then
167	printf "%s,,,%d,%s,%s,%s\n" ${filename} ${backup_type} ${uuid} \
168	    ${endtid} ${md5sum} >> ${metadata_file}
169    fi
170}
171
172do_backup()
173{
174    local tmplog=$1
175    local compress_opts=""
176    local begtid=$2
177
178    # Calculate the compression options
179    if [ ${compress} -eq 1 ]; then
180	compress_opts=" | xz -M ${memlimit} -c -${comp_rate}"
181	output_file="${output_file}.xz"
182    fi
183
184    # Generate the datafile according to the options specified
185    cmd="hammer -y -v mirror-read ${pfs_path} ${begtid} 2> ${tmplog} \
186	${compress_opts} > ${output_file}"
187
188    info "Launching: ${cmd}"
189    if [ ${dryrun} -eq 0 ]; then
190	# Sync to disk before mirror-read
191	hammer synctid ${pfs_path} > /dev/null 2>&1
192	eval ${cmd}
193	if [ $? -eq 0 ]; then
194	    # On completion, make sure only root can access backup files.
195	    chmod 600 ${output_file}
196	    info "Backup completed."
197	else
198	    rm -f ${output_file}
199	    rm -f ${tmplog}
200	    err 1 "Failed to created backup data file!"
201	fi
202    else
203	info "Dry-run execution."
204    fi
205}
206
207full_backup()
208{
209    local tmplog=$(mktemp)
210    local filename=""
211    local endtid=""
212
213    # Full backup (no param specified)
214    info "Initiating full backup."
215    do_backup ${tmplog}
216
217    # Generate the metadata file itself
218    metadata_file="${output_file}.bkp"
219    endtid=$(get_endtid ${tmplog})
220
221    update_mdata ${endtid}
222
223    # Cleanup
224    rm ${tmplog}
225}
226
227check_metadata()
228{
229    local line=""
230    local f1=""
231    local f2=""
232
233    if [ ! -r ${metadata_file} ]; then
234	err 1 "Could not find ${metadata_file}"
235    fi
236
237    f1=$(basename ${metadata_file})
238    f2=$(head -1 ${metadata_file} | cut -d "," -f1)
239
240    if [ "${f1}" != "${f2}.bkp" ]; then
241	err 2 "Bad metadata file ${metadata_file}"
242    fi
243}
244
245detect_latest_backup()
246{
247    local latest=""
248    local pattern=""
249
250    # XXX
251    # Find latest metadata backup file if needed. Right now the timestamp
252    # in the filename will let them be sorted by ls. But this could actually
253    # change.
254    if [ ${find_last} -eq 1 ]; then
255	pattern=$(namesuffix).*bkp
256	latest=$(ls -1 ${backup_dir}/*${pattern} 2> /dev/null | tail -1)
257	if [ -z "${latest}" ]; then
258	    err 1 "Failed to detect the latest full backup file."
259	fi
260	incr_full_file=${latest}
261    fi
262}
263
264incr_backup()
265{
266    local tmplog=$(mktemp)
267    local begtid=""
268    local endtid=""
269    local line=""
270    local srcuuid=""
271    local tgtuuid=""
272    local btype=0
273
274    detect_latest_backup
275
276    # Make sure the file exists and it can be read
277    if [ ! -r ${incr_full_file} ]; then
278	err 1 "Specified file ${incr_full_file} does not exist."
279    fi
280    metadata_file=${incr_full_file}
281
282    # Verify we were passed a real metadata file
283    check_metadata
284
285    # The first backup of the metadata file must be a full one
286    line=$(head -1 ${incr_full_file})
287    btype=$(echo ${line} | cut -d ',' -f4)
288    if [ ${btype} -ne 1 ]; then
289	err 1 "No full backup in ${incr_full_file}. Cannot do incremental ones."
290    fi
291
292    # Read metadata info for the last backup performed
293    line=$(tail -1 ${incr_full_file})
294    srcuuid=$(echo $line| cut -d ',' -f 5)
295    begtid=$(echo $line| cut -d ',' -f 6)
296
297    # Verify shared uuid are the same
298    tgtuuid=$(get_uuid)
299    if [ "${srcuuid}" != "${tgtuuid}" ]; then
300	err 255 "Shared UUIDs do not match! ${srcuuid} -> ${tgtuuid}"
301    fi
302
303    # Do an incremental backup
304    info "Initiating incremental backup."
305    do_backup ${tmplog} 0x${begtid}
306
307    # Store the metadata in the full backup file
308    endtid=$(get_endtid ${tmplog})
309
310    #
311    # Handle the case where the hammer mirror-read command did not retrieve
312    # any data because the PFS was not modified at all. In that case we keep
313    # TID of the previous backup.
314    #
315    if [ -z "${endtid}" ]; then
316	endtid=${begtid}
317    fi
318    update_mdata ${endtid}
319
320    # Cleanup
321    rm ${tmplog}
322}
323
324list_backups()
325{
326    local nofiles=1
327
328    for bkp in ${backup_dir}/*.bkp
329    do
330	# Skip files that don't exist
331	if [ ! -f ${bkp} ]; then
332	    continue
333	fi
334	# Show incremental backups related to the full backup above
335	awk -F "," '{
336		if ($4 == 1) {
337			printf("full: ");
338		}
339		if ($4 == 2) {
340			printf("\tincr: ");
341		}
342	printf("%s endtid: 0x%s md5: %s\n", $1, $6, $7);
343	}' ${bkp}
344	nofiles=0
345    done
346
347    if [ ${nofiles} -eq 1 ]; then
348	err 255 "No backup files found in ${backup_dir}"
349    fi
350
351    exit 0
352}
353
354checksum_backups()
355{
356    local nofiles=1
357    local storedck=""
358    local fileck=""
359    local tmp=""
360    local retf=0
361    local retm=0
362
363    for bkp in ${backup_dir}/*.bkp
364    do
365	# Skip files that don't exist
366	if [ ! -f ${bkp} ]; then
367	    continue
368	fi
369	# Perform a checksum test
370	while read line
371	do
372	    tmp=$(echo $line | cut -d "," -f1)
373	    fname=${backup_dir}/${tmp}
374	    storedck=$(echo $line | cut -d "," -f7)
375	    fileck=$(md5 -q ${fname} 2> /dev/null)
376	    echo -n "${fname} : "
377	    if [ ! -f ${fname} ]; then
378		echo "MISSING"
379		retm=1
380		continue
381	    elif [ "${storedck}" == "${fileck}" ]; then
382		echo "OK"
383	    else
384		retf=2
385		echo "FAILED"
386	    fi
387	done < ${bkp}
388	nofiles=0
389    done
390
391    if [ ${nofiles} -eq 1 ]; then
392	err 255 "No backup files found in ${backup_dir}"
393    fi
394
395    exit $(( ${retm} + ${retf} ))
396}
397# -------------------------------------------------------------
398
399# Setup some vars
400initialization
401
402# Only can be run by root
403if [  $(id -u) -ne 0 ]; then
404    err 255 "Only root can run this script."
405fi
406
407# Checks hammer program
408if [ ! -x /sbin/hammer ]; then
409    err 1 'Could not find find hammer(8) program.'
410fi
411
412# Handle options
413while getopts d:i:c:fvhnlk op
414do
415    case $op in
416	d)
417	    backup_dir=$OPTARG
418	    ;;
419	f)
420	    if [ ${backup_type} -eq 2 ]; then
421		err 1 "-f and -i are mutually exclusive."
422	    fi
423	    backup_type=1
424	    ;;
425	i)
426	    if [ ${backup_type} -eq 2 ]; then
427		err 1 "-f and -i are mutually exclusive."
428	    fi
429	    backup_type=2
430	    if [ "${OPTARG}" == "auto" ]; then
431		find_last=1
432	    else
433		incr_full_file=$OPTARG
434	    fi
435	    ;;
436	c)
437	    compress=1
438
439	    case "$OPTARG" in
440		[1-9])
441		    comp_rate=$OPTARG
442		    ;;
443		*)
444		    err 1 "Bad compression level specified."
445		    ;;
446	    esac
447	    ;;
448	k)
449	    checksum_opt=1
450	    ;;
451	n)
452	    dryrun=1
453	    ;;
454	l)
455	    list_opt=1
456	    ;;
457	v)
458	    verbose=1
459	    ;;
460	h)
461	    usage 0
462	    ;;
463	*)
464	    usage
465	    ;;
466    esac
467done
468
469shift $(($OPTIND - 1))
470
471info "hammer-backup version ${VERSION}"
472
473pfs_path="$1"
474
475# Backup directory must exist
476if [ -z "${backup_dir}" ]; then
477    usage
478elif [ ! -d "${backup_dir}" ]; then
479    err 1 "Backup directory does not exist!"
480fi
481info "Backup dir is ${backup_dir}"
482
483
484# List backups if needed
485if [ ${list_opt} -eq 1 ]; then
486    info "Listing backups."
487    list_backups
488fi
489
490# Checksum test
491if [ ${checksum_opt} -eq 1 ]; then
492    info "Checksum test for all backup files."
493    checksum_backups
494fi
495
496# Calculate output filename and make
497# sure absolute paths are passed
498firstchr=$(echo ${pfs_path} | cut -b1)
499if [ "${firstchr}" != "/" ]; then
500    err 1 "You must specify an absolute path"
501fi
502output_file="${backup_dir}/${timestamp}"$(namesuffix)
503
504# Only work on a HAMMER fs
505check_pfs
506
507# Actually launch the backup itself
508if [ ${backup_type} -eq 1 ]; then
509    info "Full backup."
510    full_backup
511elif [ ${backup_type} -eq 2 ]; then
512    info "Incremental backup."
513    incr_full_file=${backup_dir}/${incr_full_file}
514    incr_backup
515else
516    err 255 "Impossible backup type."
517fi
518