}
run () {
+ stdinp=nil
+ while :; do
+ case $1 in
+ -stdin) stdinp=t; shift ;;
+ --) shift; break ;;
+ *) break ;;
+ esac
+ done
tag=$1 cmd=$2; shift 2
## Run CMD, logging its output in a pleasing manner.
nil)
log "BEGIN $tag"
rc=$(
+ case $stdinp in nil) exec </dev/null ;; esac
{ { { ( set +e
"$cmd" "$@" 3>&- 4>&- 5>&- 9>&-
echo $? >&5; ) |
copy "|" >&4; } 2>&1 |
copy "*" >&4; } 4>&1 |
- cat >&9; } 5>&1 </dev/null
+ cat >&9; } 5>&1
)
case $rc in
0) log "END $tag" ;;
return $rc
}
+run_diff () {
+ out=$1 old=$2 new=$3
+ ## Write a unified diff from OLD to NEW, to OUT.
+
+ set +e; diff -u "$old" "$new" >"$out"; rc=$?; set -e
+ case $rc in 1) cat "$out" ;; esac
+ return $rc
+}
+
localp () {
h=$1
## Answer whether H is a local host.
done
}
+remove_old_logfiles () {
+ base=$1
+ ## Remove old logfiles with names of the form BASE.DATE#N, so that there
+ ## are at most $MAXLOG of them.
+
+ ## Count up the logfiles.
+ nlog=0
+ for i in "$base".*; do
+ if [ ! -f "$i" ]; then continue; fi
+ nlog=$(( nlog + 1 ))
+ done
+
+ ## If there are too many, go through and delete some early ones.
+ if [ $dryrun = nil ] && [ $nlog -gt $MAXLOG ]; then
+ n=$(( nlog - MAXLOG ))
+ for i in "$base".*; do
+ if [ ! -f "$i" ]; then continue; fi
+ rm -f "$i"
+ n=$(( n - 1 ))
+ if [ $n -eq 0 ]; then break; fi
+ done
+ fi
+}
+
###--------------------------------------------------------------------------
### Database operations.
bkprc=0
+hash_file () {
+ file=$1
+
+ case $HASH in
+ md5 | sha1 | sha224 | sha256 | sha384 | sha512)
+ set -- $(${HASH}sum <"$file")
+ echo "$1"
+ ;;
+ *)
+ set -- $(openssl dgst -$HASH <"$file")
+ echo "$2"
+ ;;
+ esac
+}
+
remote_fshash () {
_hostrun $userat$host "
umask 077
set -e
attempt=0
+ fshash_diff=nil
## Run a hook beforehand.
set +e; runhook setup $host $fs $date; rc=$?; set -e
## Maybe we need to retry the backup.
while :; do
+ ## Rig checksum variables to mismatch unless they're set later.
+ hrfs=REMOTE hlfs=LOCAL
+
## Create and mount the remote snapshot.
case $dryrun in
t)
esac
$verbose " create snapshot"
+ ## If we had a fshash-mismatch, then clear out the potentially stale
+ ## entries, both locally and remotely.
+ case $fshash_diff in
+ nil) ;;
+ *)
+ $verbose " prune cache"
+ run -stdin "local prune fshash" \
+ fshash -u -c$STOREDIR/fshash.cache -H$HASH new/ <$fshash_diff
+ run -stdin "@$host: prune fshash" \
+ _hostrun $userat$host <$fshash_diff \
+ "fshash -u -c$fshashdir/$fs.bkp -H$HASH ${snapmnt#*:}"
+ ;;
+ esac
+
## Build the list of hardlink sources.
linkdests=""
for i in $host $like; do
run "@$host: fshash $fs" remote_fshash
rc_fshash=$?
set -e
- case $dryrun in nil) $verbose " done" ;; esac
+ case $dryrun in
+ nil)
+ hrfs=$(hash_file "$fshashdir/$fs.bkp")
+ $log "remote fshash $HASH checksum: $hlfs"
+ $verbose " done"
+ ;;
+ t)
+ hrfs=UNSET
+ ;;
+ esac
## Remove the snapshot.
maybe unsnap_$snap $snapargs $fs $fsarg
nil) $verbose -n " local fshash..." ;;
esac
run "local fshash $host:$fs" local_fshash || return $?
- case $dryrun in nil) $verbose " done" ;; esac
+ case $dryrun in
+ nil)
+ hlfs=$(hash_file "$localmap")
+ $log "local fshash $HASH checksum: $hlfs"
+ $verbose " done"
+ ;;
+ t)
+ hlfs=UNSET
+ ;;
+ esac
## Compare the two maps.
set +e
- run "compare fshash maps for $host:$fs" diff -u new.fshash $localmap
+ fshash_diff=$STOREDIR/tmp/fshash-diff.$host.$fs.$date
+ run "compare fshash maps for $host:$fs" \
+ run_diff $fshash_diff new.fshash $localmap
rc_diff=$?
set -e
case $rc_diff in
esac
done
+ ## Double-check the checksums.
+ if [ $hrfs != $hlfs ]; then
+ cat >&2 <<EOF
+$0: INTERNAL ERROR: fshash $HASH checksum mismatch -- aborting
+ remote fshash checksum = $hrfs
+ local fshash checksum = $hlfs
+EOF
+ exit 127
+ fi
+
## Glorious success.
maybe rm -f $localmap
+ case $fshash_diff in nil) ;; *) maybe rm -f $fshash_diff ;; esac
$verbose " fshash match"
## Commit this backup.
bkprc=1
fi
- ## Count up the logfiles.
- nlog=0
- for i in "$logdir/$host/$fs".*; do
- if [ ! -f "$i" ]; then continue; fi
- nlog=$(( nlog + 1 ))
- done
-
- ## If there are too many, go through and delete some early ones.
- if [ $dryrun = nil ] && [ $nlog -gt $MAXLOG ]; then
- n=$(( nlog - MAXLOG ))
- for i in "$logdir/$host/$fs".*; do
- if [ ! -f "$i" ]; then continue; fi
- rm -f "$i"
- n=$(( n - 1 ))
- if [ $n -eq 0 ]; then break; fi
- done
- fi
+ ## Clear away any old logfiles.
+ remove_old_logfiles "$logdir/$host/$fs"
}
backup () {
$verbose "host $host"
}
-snaptype () { snap=$1; shift; snapargs="$*"; retry=0; }
+snaptype () { snap=$1; shift; snapargs="$*"; retry=1; }
rsyncargs () { rsyncargs="$*"; }
like () { like="$*"; }
retry () { retry="$*"; }