1#!/bin/sh
2# Not yet working
3#
4# Run a simple backup  then copy it
5#   to another device.
6##
7TestName="prune-copy-test"
8JobName=CopyJobSave
9. scripts/functions
10
11
12scripts/cleanup
13scripts/copy-migration-confs
14scripts/prepare-disk-changer
15
16# Directory to backup.
17# This directory will be created by setup_data().
18BackupDirectory="${tmp}/data"
19
20# Use a tgz to setup data to be backuped.
21# Data will be placed at "${tmp}/data/".
22setup_data data/small.tgz
23
24# the default fileset FS_TESTJOB backups all file and directories defined in "${tmp}/file-list".
25echo "${BackupDirectory}" >${tmp}/file-list
26
27
28sed 's/migrate/copy/g' ${cwd}/bin/bareos-dir.conf > ${cwd}/tmp/1
29sed 's/Migrate/Copy/g' ${cwd}/tmp/1 > ${cwd}/bin/bareos-dir.conf
30
31dircfg=$conf/bareos-dir.conf
32$bperl -e "add_attribute('$dircfg', 'AutoPrune', 'No', 'Client')"
33$bperl -e "add_attribute('$dircfg', 'Job Retention', '1s', 'Client')"
34
35change_jobname NightlySave $JobName
36start_test
37
38#
39# Note, we first backup into Pool Default,
40#          then Copy into Pool Full.
41#              Pool Default uses Storage=File
42#              Pool Full    uses Storage=DiskChanger
43
44# Write out bconsole commands
45cat <<END_OF_DATA >${cwd}/tmp/bconcmds
46@$out /dev/null
47messages
48@$out ${cwd}/tmp/log1.out
49label storage=File volume=FileVolume001 Pool=Default
50label storage=DiskChanger volume=ChangerVolume001 slot=1 Pool=Full drive=0
51label storage=DiskChanger volume=ChangerVolume002 slot=2 Pool=Full drive=0
52@# run two jobs (both will be copied)
53run job=$JobName level=Full yes
54@sleep 5
55run job=$JobName level=Full yes
56wait
57list jobs
58list volumes
59@#setdebug level=100 dir
60@# should copy two jobs
61@#setdebug level=51 storage=DiskChanger
62@sleep 2
63run job=copy-job yes
64wait
65messages
66@#purge volume=FileVolume001
67list jobs
68list volumes
69wait
70messages
71@#
72@# Now do another backup, but level Incremental
73@#
74@sleep 2
75run job=$JobName level=Incremental yes
76wait
77messages
78@#
79@# This final job that runs should be Incremental and
80@# not upgraded to full.
81@sleep 2
82@exec "touch $BackupDirectory/fr.po"
83run job=$JobName level=Incremental yes
84wait
85messages
86@$out ${cwd}/tmp/log10.out
87@sleep 1
88setdebug level=1 director
89list jobs
90sql
91SELECT JobId, Name, JobTDate, StartTime, Type, Level, JobFiles, JobStatus
92FROM Job ORDER BY StartTime;
93
94prune jobs jobtype=backup yes
95list jobs
96@################################################################
97@# now do a restore
98@#
99@$out ${cwd}/tmp/log2.out
100list volumes
101restore where=${cwd}/tmp/bareos-restores select
102unmark *
103mark *
104done
105yes
106list volumes
107wait
108messages
109quit
110END_OF_DATA
111
112run_bareos
113check_for_zombie_jobs storage=File
114stop_bareos
115
116check_two_logs
117check_restore_diff
118
119# Now we will verify that the pruning code is working as expected.  Each time,
120# we run 'list jobs', 'prune', 'list jobs'. check_prune_list ensures that jobids
121# in argument are present in the first 'list jobs', the 'prune' command deletes
122# them (same number), and checks that the last 'list jobs' doesn't contain them
123# anymore. See scripts/functions.pm for details.
124
125# Copy jobs are pruned like normal jobs after the Job Retention period
126# the admin 'c'opy job should also be pruned
127# F F c c C C I0 I -> F I
128$bperl -e "check_prune_list('$tmp/log10.out',1,5,7,8)"
129estat=$(($estat + $?))
130
131end_test
132