1package main
2
3import (
4	"bufio"
5	"bytes"
6	"context"
7	"crypto/rand"
8	"encoding/json"
9	"fmt"
10	"io"
11	"io/ioutil"
12	mrand "math/rand"
13	"os"
14	"path/filepath"
15	"regexp"
16	"runtime"
17	"strings"
18	"syscall"
19	"testing"
20	"time"
21
22	"github.com/restic/restic/internal/errors"
23	"github.com/restic/restic/internal/filter"
24	"github.com/restic/restic/internal/fs"
25	"github.com/restic/restic/internal/repository"
26	"github.com/restic/restic/internal/restic"
27	rtest "github.com/restic/restic/internal/test"
28	"github.com/restic/restic/internal/ui/termstatus"
29	"golang.org/x/sync/errgroup"
30)
31
32func parseIDsFromReader(t testing.TB, rd io.Reader) restic.IDs {
33	IDs := restic.IDs{}
34	sc := bufio.NewScanner(rd)
35
36	for sc.Scan() {
37		id, err := restic.ParseID(sc.Text())
38		if err != nil {
39			t.Logf("parse id %v: %v", sc.Text(), err)
40			continue
41		}
42
43		IDs = append(IDs, id)
44	}
45
46	return IDs
47}
48
49func testRunInit(t testing.TB, opts GlobalOptions) {
50	repository.TestUseLowSecurityKDFParameters(t)
51	restic.TestDisableCheckPolynomial(t)
52	restic.TestSetLockTimeout(t, 0)
53
54	rtest.OK(t, runInit(InitOptions{}, opts, nil))
55	t.Logf("repository initialized at %v", opts.Repo)
56}
57
58func testRunBackupAssumeFailure(t testing.TB, dir string, target []string, opts BackupOptions, gopts GlobalOptions) error {
59	ctx, cancel := context.WithCancel(gopts.ctx)
60	defer cancel()
61
62	var wg errgroup.Group
63	term := termstatus.New(gopts.stdout, gopts.stderr, gopts.Quiet)
64	wg.Go(func() error { term.Run(ctx); return nil })
65
66	gopts.stdout = ioutil.Discard
67	t.Logf("backing up %v in %v", target, dir)
68	if dir != "" {
69		cleanup := rtest.Chdir(t, dir)
70		defer cleanup()
71	}
72
73	backupErr := runBackup(opts, gopts, term, target)
74
75	cancel()
76
77	err := wg.Wait()
78	if err != nil {
79		t.Fatal(err)
80	}
81
82	return backupErr
83}
84
85func testRunBackup(t testing.TB, dir string, target []string, opts BackupOptions, gopts GlobalOptions) {
86	err := testRunBackupAssumeFailure(t, dir, target, opts, gopts)
87	rtest.Assert(t, err == nil, "Error while backing up")
88}
89
90func testRunList(t testing.TB, tpe string, opts GlobalOptions) restic.IDs {
91	buf := bytes.NewBuffer(nil)
92	globalOptions.stdout = buf
93	defer func() {
94		globalOptions.stdout = os.Stdout
95	}()
96
97	rtest.OK(t, runList(cmdList, opts, []string{tpe}))
98	return parseIDsFromReader(t, buf)
99}
100
101func testRunRestore(t testing.TB, opts GlobalOptions, dir string, snapshotID restic.ID) {
102	testRunRestoreExcludes(t, opts, dir, snapshotID, nil)
103}
104
105func testRunRestoreLatest(t testing.TB, gopts GlobalOptions, dir string, paths []string, hosts []string) {
106	opts := RestoreOptions{
107		Target: dir,
108		Hosts:  hosts,
109		Paths:  paths,
110	}
111
112	rtest.OK(t, runRestore(opts, gopts, []string{"latest"}))
113}
114
115func testRunRestoreExcludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, excludes []string) {
116	opts := RestoreOptions{
117		Target:  dir,
118		Exclude: excludes,
119	}
120
121	rtest.OK(t, runRestore(opts, gopts, []string{snapshotID.String()}))
122}
123
124func testRunRestoreIncludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, includes []string) {
125	opts := RestoreOptions{
126		Target:  dir,
127		Include: includes,
128	}
129
130	rtest.OK(t, runRestore(opts, gopts, []string{snapshotID.String()}))
131}
132
133func testRunCheck(t testing.TB, gopts GlobalOptions) {
134	opts := CheckOptions{
135		ReadData:    true,
136		CheckUnused: true,
137	}
138	rtest.OK(t, runCheck(opts, gopts, nil))
139}
140
141func testRunCheckOutput(gopts GlobalOptions) (string, error) {
142	buf := bytes.NewBuffer(nil)
143
144	globalOptions.stdout = buf
145	defer func() {
146		globalOptions.stdout = os.Stdout
147	}()
148
149	opts := CheckOptions{
150		ReadData: true,
151	}
152
153	err := runCheck(opts, gopts, nil)
154	return buf.String(), err
155}
156
157func testRunDiffOutput(gopts GlobalOptions, firstSnapshotID string, secondSnapshotID string) (string, error) {
158	buf := bytes.NewBuffer(nil)
159
160	globalOptions.stdout = buf
161	defer func() {
162		globalOptions.stdout = os.Stdout
163	}()
164
165	opts := DiffOptions{
166		ShowMetadata: false,
167	}
168	err := runDiff(opts, gopts, []string{firstSnapshotID, secondSnapshotID})
169	return buf.String(), err
170}
171
172func testRunRebuildIndex(t testing.TB, gopts GlobalOptions) {
173	globalOptions.stdout = ioutil.Discard
174	defer func() {
175		globalOptions.stdout = os.Stdout
176	}()
177
178	rtest.OK(t, runRebuildIndex(RebuildIndexOptions{}, gopts))
179}
180
181func testRunLs(t testing.TB, gopts GlobalOptions, snapshotID string) []string {
182	buf := bytes.NewBuffer(nil)
183	globalOptions.stdout = buf
184	quiet := globalOptions.Quiet
185	globalOptions.Quiet = true
186	defer func() {
187		globalOptions.stdout = os.Stdout
188		globalOptions.Quiet = quiet
189	}()
190
191	opts := LsOptions{}
192
193	rtest.OK(t, runLs(opts, gopts, []string{snapshotID}))
194
195	return strings.Split(buf.String(), "\n")
196}
197
198func testRunFind(t testing.TB, wantJSON bool, gopts GlobalOptions, pattern string) []byte {
199	buf := bytes.NewBuffer(nil)
200	globalOptions.stdout = buf
201	globalOptions.JSON = wantJSON
202	defer func() {
203		globalOptions.stdout = os.Stdout
204		globalOptions.JSON = false
205	}()
206
207	opts := FindOptions{}
208
209	rtest.OK(t, runFind(opts, gopts, []string{pattern}))
210
211	return buf.Bytes()
212}
213
214func testRunSnapshots(t testing.TB, gopts GlobalOptions) (newest *Snapshot, snapmap map[restic.ID]Snapshot) {
215	buf := bytes.NewBuffer(nil)
216	globalOptions.stdout = buf
217	globalOptions.JSON = true
218	defer func() {
219		globalOptions.stdout = os.Stdout
220		globalOptions.JSON = gopts.JSON
221	}()
222
223	opts := SnapshotOptions{}
224
225	rtest.OK(t, runSnapshots(opts, globalOptions, []string{}))
226
227	snapshots := []Snapshot{}
228	rtest.OK(t, json.Unmarshal(buf.Bytes(), &snapshots))
229
230	snapmap = make(map[restic.ID]Snapshot, len(snapshots))
231	for _, sn := range snapshots {
232		snapmap[*sn.ID] = sn
233		if newest == nil || sn.Time.After(newest.Time) {
234			newest = &sn
235		}
236	}
237	return
238}
239
240func testRunForget(t testing.TB, gopts GlobalOptions, args ...string) {
241	opts := ForgetOptions{}
242	rtest.OK(t, runForget(opts, gopts, args))
243}
244
245func testRunForgetJSON(t testing.TB, gopts GlobalOptions, args ...string) {
246	buf := bytes.NewBuffer(nil)
247	oldJSON := gopts.JSON
248	gopts.stdout = buf
249	gopts.JSON = true
250	defer func() {
251		gopts.stdout = os.Stdout
252		gopts.JSON = oldJSON
253	}()
254
255	opts := ForgetOptions{
256		DryRun: true,
257		Last:   1,
258	}
259
260	rtest.OK(t, runForget(opts, gopts, args))
261
262	var forgets []*ForgetGroup
263	rtest.OK(t, json.Unmarshal(buf.Bytes(), &forgets))
264
265	rtest.Assert(t, len(forgets) == 1,
266		"Expected 1 snapshot group, got %v", len(forgets))
267	rtest.Assert(t, len(forgets[0].Keep) == 1,
268		"Expected 1 snapshot to be kept, got %v", len(forgets[0].Keep))
269	rtest.Assert(t, len(forgets[0].Remove) == 2,
270		"Expected 2 snapshots to be removed, got %v", len(forgets[0].Remove))
271}
272
273func testRunPrune(t testing.TB, gopts GlobalOptions, opts PruneOptions) {
274	rtest.OK(t, runPrune(opts, gopts))
275}
276
277func testSetupBackupData(t testing.TB, env *testEnvironment) string {
278	datafile := filepath.Join("testdata", "backup-data.tar.gz")
279	testRunInit(t, env.gopts)
280	rtest.SetupTarTestFixture(t, env.testdata, datafile)
281	return datafile
282}
283
284func TestBackup(t *testing.T) {
285	testBackup(t, false)
286}
287
288func TestBackupWithFilesystemSnapshots(t *testing.T) {
289	if runtime.GOOS == "windows" && fs.HasSufficientPrivilegesForVSS() == nil {
290		testBackup(t, true)
291	}
292}
293
294func testBackup(t *testing.T, useFsSnapshot bool) {
295	env, cleanup := withTestEnvironment(t)
296	defer cleanup()
297
298	testSetupBackupData(t, env)
299	opts := BackupOptions{UseFsSnapshot: useFsSnapshot}
300
301	// first backup
302	testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
303	snapshotIDs := testRunList(t, "snapshots", env.gopts)
304	rtest.Assert(t, len(snapshotIDs) == 1,
305		"expected one snapshot, got %v", snapshotIDs)
306
307	testRunCheck(t, env.gopts)
308	stat1 := dirStats(env.repo)
309
310	// second backup, implicit incremental
311	testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
312	snapshotIDs = testRunList(t, "snapshots", env.gopts)
313	rtest.Assert(t, len(snapshotIDs) == 2,
314		"expected two snapshots, got %v", snapshotIDs)
315
316	stat2 := dirStats(env.repo)
317	if stat2.size > stat1.size+stat1.size/10 {
318		t.Error("repository size has grown by more than 10 percent")
319	}
320	t.Logf("repository grown by %d bytes", stat2.size-stat1.size)
321
322	testRunCheck(t, env.gopts)
323	// third backup, explicit incremental
324	opts.Parent = snapshotIDs[0].String()
325	testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
326	snapshotIDs = testRunList(t, "snapshots", env.gopts)
327	rtest.Assert(t, len(snapshotIDs) == 3,
328		"expected three snapshots, got %v", snapshotIDs)
329
330	stat3 := dirStats(env.repo)
331	if stat3.size > stat1.size+stat1.size/10 {
332		t.Error("repository size has grown by more than 10 percent")
333	}
334	t.Logf("repository grown by %d bytes", stat3.size-stat2.size)
335
336	// restore all backups and compare
337	for i, snapshotID := range snapshotIDs {
338		restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
339		t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir)
340		testRunRestore(t, env.gopts, restoredir, snapshotID)
341		diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, "testdata"))
342		rtest.Assert(t, diff == "", "directories are not equal: %v", diff)
343	}
344
345	testRunCheck(t, env.gopts)
346}
347
348func TestBackupNonExistingFile(t *testing.T) {
349	env, cleanup := withTestEnvironment(t)
350	defer cleanup()
351
352	testSetupBackupData(t, env)
353	globalOptions.stderr = ioutil.Discard
354	defer func() {
355		globalOptions.stderr = os.Stderr
356	}()
357
358	p := filepath.Join(env.testdata, "0", "0", "9")
359	dirs := []string{
360		filepath.Join(p, "0"),
361		filepath.Join(p, "1"),
362		filepath.Join(p, "nonexisting"),
363		filepath.Join(p, "5"),
364	}
365
366	opts := BackupOptions{}
367
368	testRunBackup(t, "", dirs, opts, env.gopts)
369}
370
371func removePacksExcept(gopts GlobalOptions, t *testing.T, keep restic.IDSet, removeTreePacks bool) {
372	r, err := OpenRepository(gopts)
373	rtest.OK(t, err)
374
375	// Get all tree packs
376	rtest.OK(t, r.LoadIndex(gopts.ctx))
377	treePacks := restic.NewIDSet()
378	for _, idx := range r.Index().(*repository.MasterIndex).All() {
379		for _, id := range idx.TreePacks() {
380			treePacks.Insert(id)
381		}
382	}
383
384	// remove all packs containing data blobs
385	rtest.OK(t, r.List(gopts.ctx, restic.PackFile, func(id restic.ID, size int64) error {
386		if treePacks.Has(id) != removeTreePacks || keep.Has(id) {
387			return nil
388		}
389		return r.Backend().Remove(gopts.ctx, restic.Handle{Type: restic.PackFile, Name: id.String()})
390	}))
391}
392
393func TestBackupSelfHealing(t *testing.T) {
394	env, cleanup := withTestEnvironment(t)
395	defer cleanup()
396
397	testRunInit(t, env.gopts)
398
399	p := filepath.Join(env.testdata, "test/test")
400	rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
401	rtest.OK(t, appendRandomData(p, 5))
402
403	opts := BackupOptions{}
404
405	testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
406	testRunCheck(t, env.gopts)
407
408	// remove all data packs
409	removePacksExcept(env.gopts, t, restic.NewIDSet(), false)
410
411	testRunRebuildIndex(t, env.gopts)
412	// now the repo is also missing the data blob in the index; check should report this
413	rtest.Assert(t, runCheck(CheckOptions{}, env.gopts, nil) != nil,
414		"check should have reported an error")
415
416	// second backup should report an error but "heal" this situation
417	err := testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
418	rtest.Assert(t, err != nil,
419		"backup should have reported an error")
420	testRunCheck(t, env.gopts)
421}
422
423func TestBackupTreeLoadError(t *testing.T) {
424	env, cleanup := withTestEnvironment(t)
425	defer cleanup()
426
427	testRunInit(t, env.gopts)
428	p := filepath.Join(env.testdata, "test/test")
429	rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
430	rtest.OK(t, appendRandomData(p, 5))
431
432	opts := BackupOptions{}
433	// Backup a subdirectory first, such that we can remove the tree pack for the subdirectory
434	testRunBackup(t, env.testdata, []string{"test"}, opts, env.gopts)
435
436	r, err := OpenRepository(env.gopts)
437	rtest.OK(t, err)
438	rtest.OK(t, r.LoadIndex(env.gopts.ctx))
439	// collect tree packs of subdirectory
440	subTreePacks := restic.NewIDSet()
441	for _, idx := range r.Index().(*repository.MasterIndex).All() {
442		for _, id := range idx.TreePacks() {
443			subTreePacks.Insert(id)
444		}
445	}
446
447	testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
448	testRunCheck(t, env.gopts)
449
450	// delete the subdirectory pack first
451	for id := range subTreePacks {
452		rtest.OK(t, r.Backend().Remove(env.gopts.ctx, restic.Handle{Type: restic.PackFile, Name: id.String()}))
453	}
454	testRunRebuildIndex(t, env.gopts)
455	// now the repo is missing the tree blob in the index; check should report this
456	rtest.Assert(t, runCheck(CheckOptions{}, env.gopts, nil) != nil, "check should have reported an error")
457	// second backup should report an error but "heal" this situation
458	err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
459	rtest.Assert(t, err != nil, "backup should have reported an error for the subdirectory")
460	testRunCheck(t, env.gopts)
461
462	// remove all tree packs
463	removePacksExcept(env.gopts, t, restic.NewIDSet(), true)
464	testRunRebuildIndex(t, env.gopts)
465	// now the repo is also missing the data blob in the index; check should report this
466	rtest.Assert(t, runCheck(CheckOptions{}, env.gopts, nil) != nil, "check should have reported an error")
467	// second backup should report an error but "heal" this situation
468	err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
469	rtest.Assert(t, err != nil, "backup should have reported an error")
470	testRunCheck(t, env.gopts)
471}
472
473func includes(haystack []string, needle string) bool {
474	for _, s := range haystack {
475		if s == needle {
476			return true
477		}
478	}
479
480	return false
481}
482
483func loadSnapshotMap(t testing.TB, gopts GlobalOptions) map[string]struct{} {
484	snapshotIDs := testRunList(t, "snapshots", gopts)
485
486	m := make(map[string]struct{})
487	for _, id := range snapshotIDs {
488		m[id.String()] = struct{}{}
489	}
490
491	return m
492}
493
494func lastSnapshot(old, new map[string]struct{}) (map[string]struct{}, string) {
495	for k := range new {
496		if _, ok := old[k]; !ok {
497			old[k] = struct{}{}
498			return old, k
499		}
500	}
501
502	return old, ""
503}
504
505var backupExcludeFilenames = []string{
506	"testfile1",
507	"foo.tar.gz",
508	"private/secret/passwords.txt",
509	"work/source/test.c",
510}
511
512func TestBackupExclude(t *testing.T) {
513	env, cleanup := withTestEnvironment(t)
514	defer cleanup()
515
516	testRunInit(t, env.gopts)
517
518	datadir := filepath.Join(env.base, "testdata")
519
520	for _, filename := range backupExcludeFilenames {
521		fp := filepath.Join(datadir, filename)
522		rtest.OK(t, os.MkdirAll(filepath.Dir(fp), 0755))
523
524		f, err := os.Create(fp)
525		rtest.OK(t, err)
526
527		fmt.Fprint(f, filename)
528		rtest.OK(t, f.Close())
529	}
530
531	snapshots := make(map[string]struct{})
532
533	opts := BackupOptions{}
534
535	testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
536	snapshots, snapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
537	files := testRunLs(t, env.gopts, snapshotID)
538	rtest.Assert(t, includes(files, "/testdata/foo.tar.gz"),
539		"expected file %q in first snapshot, but it's not included", "foo.tar.gz")
540
541	opts.Excludes = []string{"*.tar.gz"}
542	testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
543	snapshots, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
544	files = testRunLs(t, env.gopts, snapshotID)
545	rtest.Assert(t, !includes(files, "/testdata/foo.tar.gz"),
546		"expected file %q not in first snapshot, but it's included", "foo.tar.gz")
547
548	opts.Excludes = []string{"*.tar.gz", "private/secret"}
549	testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
550	_, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
551	files = testRunLs(t, env.gopts, snapshotID)
552	rtest.Assert(t, !includes(files, "/testdata/foo.tar.gz"),
553		"expected file %q not in first snapshot, but it's included", "foo.tar.gz")
554	rtest.Assert(t, !includes(files, "/testdata/private/secret/passwords.txt"),
555		"expected file %q not in first snapshot, but it's included", "passwords.txt")
556}
557
558func TestBackupErrors(t *testing.T) {
559	if runtime.GOOS == "windows" {
560		return
561	}
562	env, cleanup := withTestEnvironment(t)
563	defer cleanup()
564
565	testSetupBackupData(t, env)
566
567	// Assume failure
568	inaccessibleFile := filepath.Join(env.testdata, "0", "0", "9", "0")
569	rtest.OK(t, os.Chmod(inaccessibleFile, 0000))
570	defer func() {
571		rtest.OK(t, os.Chmod(inaccessibleFile, 0644))
572	}()
573	opts := BackupOptions{}
574	gopts := env.gopts
575	gopts.stderr = ioutil.Discard
576	err := testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, gopts)
577	rtest.Assert(t, err != nil, "Assumed failure, but no error occurred.")
578	rtest.Assert(t, err == ErrInvalidSourceData, "Wrong error returned")
579	snapshotIDs := testRunList(t, "snapshots", env.gopts)
580	rtest.Assert(t, len(snapshotIDs) == 1,
581		"expected one snapshot, got %v", snapshotIDs)
582}
583
584const (
585	incrementalFirstWrite  = 10 * 1042 * 1024
586	incrementalSecondWrite = 1 * 1042 * 1024
587	incrementalThirdWrite  = 1 * 1042 * 1024
588)
589
590func appendRandomData(filename string, bytes uint) error {
591	f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0666)
592	if err != nil {
593		fmt.Fprint(os.Stderr, err)
594		return err
595	}
596
597	_, err = f.Seek(0, 2)
598	if err != nil {
599		fmt.Fprint(os.Stderr, err)
600		return err
601	}
602
603	_, err = io.Copy(f, io.LimitReader(rand.Reader, int64(bytes)))
604	if err != nil {
605		fmt.Fprint(os.Stderr, err)
606		return err
607	}
608
609	return f.Close()
610}
611
612func TestIncrementalBackup(t *testing.T) {
613	env, cleanup := withTestEnvironment(t)
614	defer cleanup()
615
616	testRunInit(t, env.gopts)
617
618	datadir := filepath.Join(env.base, "testdata")
619	testfile := filepath.Join(datadir, "testfile")
620
621	rtest.OK(t, appendRandomData(testfile, incrementalFirstWrite))
622
623	opts := BackupOptions{}
624
625	testRunBackup(t, "", []string{datadir}, opts, env.gopts)
626	testRunCheck(t, env.gopts)
627	stat1 := dirStats(env.repo)
628
629	rtest.OK(t, appendRandomData(testfile, incrementalSecondWrite))
630
631	testRunBackup(t, "", []string{datadir}, opts, env.gopts)
632	testRunCheck(t, env.gopts)
633	stat2 := dirStats(env.repo)
634	if stat2.size-stat1.size > incrementalFirstWrite {
635		t.Errorf("repository size has grown by more than %d bytes", incrementalFirstWrite)
636	}
637	t.Logf("repository grown by %d bytes", stat2.size-stat1.size)
638
639	rtest.OK(t, appendRandomData(testfile, incrementalThirdWrite))
640
641	testRunBackup(t, "", []string{datadir}, opts, env.gopts)
642	testRunCheck(t, env.gopts)
643	stat3 := dirStats(env.repo)
644	if stat3.size-stat2.size > incrementalFirstWrite {
645		t.Errorf("repository size has grown by more than %d bytes", incrementalFirstWrite)
646	}
647	t.Logf("repository grown by %d bytes", stat3.size-stat2.size)
648}
649
650func TestBackupTags(t *testing.T) {
651	env, cleanup := withTestEnvironment(t)
652	defer cleanup()
653
654	testSetupBackupData(t, env)
655	opts := BackupOptions{}
656
657	testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
658	testRunCheck(t, env.gopts)
659	newest, _ := testRunSnapshots(t, env.gopts)
660
661	if newest == nil {
662		t.Fatal("expected a backup, got nil")
663	}
664
665	rtest.Assert(t, len(newest.Tags) == 0,
666		"expected no tags, got %v", newest.Tags)
667	parent := newest
668
669	opts.Tags = restic.TagLists{[]string{"NL"}}
670	testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
671	testRunCheck(t, env.gopts)
672	newest, _ = testRunSnapshots(t, env.gopts)
673
674	if newest == nil {
675		t.Fatal("expected a backup, got nil")
676	}
677
678	rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "NL",
679		"expected one NL tag, got %v", newest.Tags)
680	// Tagged backup should have untagged backup as parent.
681	rtest.Assert(t, parent.ID.Equal(*newest.Parent),
682		"expected parent to be %v, got %v", parent.ID, newest.Parent)
683}
684
685func testRunCopy(t testing.TB, srcGopts GlobalOptions, dstGopts GlobalOptions) {
686	copyOpts := CopyOptions{
687		secondaryRepoOptions: secondaryRepoOptions{
688			Repo:     dstGopts.Repo,
689			password: dstGopts.password,
690		},
691	}
692
693	rtest.OK(t, runCopy(copyOpts, srcGopts, nil))
694}
695
696func TestCopy(t *testing.T) {
697	env, cleanup := withTestEnvironment(t)
698	defer cleanup()
699	env2, cleanup2 := withTestEnvironment(t)
700	defer cleanup2()
701
702	testSetupBackupData(t, env)
703	opts := BackupOptions{}
704	testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts)
705	testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
706	testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
707	testRunCheck(t, env.gopts)
708
709	testRunInit(t, env2.gopts)
710	testRunCopy(t, env.gopts, env2.gopts)
711
712	snapshotIDs := testRunList(t, "snapshots", env.gopts)
713	copiedSnapshotIDs := testRunList(t, "snapshots", env2.gopts)
714
715	// Check that the copies size seems reasonable
716	rtest.Assert(t, len(snapshotIDs) == len(copiedSnapshotIDs), "expected %v snapshots, found %v",
717		len(snapshotIDs), len(copiedSnapshotIDs))
718	stat := dirStats(env.repo)
719	stat2 := dirStats(env2.repo)
720	sizeDiff := int64(stat.size) - int64(stat2.size)
721	if sizeDiff < 0 {
722		sizeDiff = -sizeDiff
723	}
724	rtest.Assert(t, sizeDiff < int64(stat.size)/50, "expected less than 2%% size difference: %v vs. %v",
725		stat.size, stat2.size)
726
727	// Check integrity of the copy
728	testRunCheck(t, env2.gopts)
729
730	// Check that the copied snapshots have the same tree contents as the old ones (= identical tree hash)
731	origRestores := make(map[string]struct{})
732	for i, snapshotID := range snapshotIDs {
733		restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
734		origRestores[restoredir] = struct{}{}
735		testRunRestore(t, env.gopts, restoredir, snapshotID)
736	}
737	for i, snapshotID := range copiedSnapshotIDs {
738		restoredir := filepath.Join(env2.base, fmt.Sprintf("restore%d", i))
739		testRunRestore(t, env2.gopts, restoredir, snapshotID)
740		foundMatch := false
741		for cmpdir := range origRestores {
742			diff := directoriesContentsDiff(restoredir, cmpdir)
743			if diff == "" {
744				delete(origRestores, cmpdir)
745				foundMatch = true
746			}
747		}
748
749		rtest.Assert(t, foundMatch, "found no counterpart for snapshot %v", snapshotID)
750	}
751
752	rtest.Assert(t, len(origRestores) == 0, "found not copied snapshots")
753}
754
755func TestCopyIncremental(t *testing.T) {
756	env, cleanup := withTestEnvironment(t)
757	defer cleanup()
758	env2, cleanup2 := withTestEnvironment(t)
759	defer cleanup2()
760
761	testSetupBackupData(t, env)
762	opts := BackupOptions{}
763	testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts)
764	testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
765	testRunCheck(t, env.gopts)
766
767	testRunInit(t, env2.gopts)
768	testRunCopy(t, env.gopts, env2.gopts)
769
770	snapshotIDs := testRunList(t, "snapshots", env.gopts)
771	copiedSnapshotIDs := testRunList(t, "snapshots", env2.gopts)
772
773	// Check that the copies size seems reasonable
774	testRunCheck(t, env2.gopts)
775	rtest.Assert(t, len(snapshotIDs) == len(copiedSnapshotIDs), "expected %v snapshots, found %v",
776		len(snapshotIDs), len(copiedSnapshotIDs))
777
778	// check that no snapshots are copied, as there are no new ones
779	testRunCopy(t, env.gopts, env2.gopts)
780	testRunCheck(t, env2.gopts)
781	copiedSnapshotIDs = testRunList(t, "snapshots", env2.gopts)
782	rtest.Assert(t, len(snapshotIDs) == len(copiedSnapshotIDs), "still expected %v snapshots, found %v",
783		len(snapshotIDs), len(copiedSnapshotIDs))
784
785	// check that only new snapshots are copied
786	testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
787	testRunCopy(t, env.gopts, env2.gopts)
788	testRunCheck(t, env2.gopts)
789	snapshotIDs = testRunList(t, "snapshots", env.gopts)
790	copiedSnapshotIDs = testRunList(t, "snapshots", env2.gopts)
791	rtest.Assert(t, len(snapshotIDs) == len(copiedSnapshotIDs), "still expected %v snapshots, found %v",
792		len(snapshotIDs), len(copiedSnapshotIDs))
793
794	// also test the reverse direction
795	testRunCopy(t, env2.gopts, env.gopts)
796	testRunCheck(t, env.gopts)
797	snapshotIDs = testRunList(t, "snapshots", env.gopts)
798	rtest.Assert(t, len(snapshotIDs) == len(copiedSnapshotIDs), "still expected %v snapshots, found %v",
799		len(copiedSnapshotIDs), len(snapshotIDs))
800}
801
802func TestCopyUnstableJSON(t *testing.T) {
803	env, cleanup := withTestEnvironment(t)
804	defer cleanup()
805	env2, cleanup2 := withTestEnvironment(t)
806	defer cleanup2()
807
808	// contains a symlink created using `ln -s '../i/'$'\355\246\361''d/samba' broken-symlink`
809	datafile := filepath.Join("testdata", "copy-unstable-json.tar.gz")
810	rtest.SetupTarTestFixture(t, env.base, datafile)
811
812	testRunInit(t, env2.gopts)
813	testRunCopy(t, env.gopts, env2.gopts)
814	testRunCheck(t, env2.gopts)
815
816	copiedSnapshotIDs := testRunList(t, "snapshots", env2.gopts)
817	rtest.Assert(t, 1 == len(copiedSnapshotIDs), "still expected %v snapshot, found %v",
818		1, len(copiedSnapshotIDs))
819}
820
821func TestInitCopyChunkerParams(t *testing.T) {
822	env, cleanup := withTestEnvironment(t)
823	defer cleanup()
824	env2, cleanup2 := withTestEnvironment(t)
825	defer cleanup2()
826
827	testRunInit(t, env2.gopts)
828
829	initOpts := InitOptions{
830		secondaryRepoOptions: secondaryRepoOptions{
831			Repo:     env2.gopts.Repo,
832			password: env2.gopts.password,
833		},
834	}
835	rtest.Assert(t, runInit(initOpts, env.gopts, nil) != nil, "expected invalid init options to fail")
836
837	initOpts.CopyChunkerParameters = true
838	rtest.OK(t, runInit(initOpts, env.gopts, nil))
839
840	repo, err := OpenRepository(env.gopts)
841	rtest.OK(t, err)
842
843	otherRepo, err := OpenRepository(env2.gopts)
844	rtest.OK(t, err)
845
846	rtest.Assert(t, repo.Config().ChunkerPolynomial == otherRepo.Config().ChunkerPolynomial,
847		"expected equal chunker polynomials, got %v expected %v", repo.Config().ChunkerPolynomial,
848		otherRepo.Config().ChunkerPolynomial)
849}
850
851func testRunTag(t testing.TB, opts TagOptions, gopts GlobalOptions) {
852	rtest.OK(t, runTag(opts, gopts, []string{}))
853}
854
855func TestTag(t *testing.T) {
856	env, cleanup := withTestEnvironment(t)
857	defer cleanup()
858
859	testSetupBackupData(t, env)
860	testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts)
861	testRunCheck(t, env.gopts)
862	newest, _ := testRunSnapshots(t, env.gopts)
863	if newest == nil {
864		t.Fatal("expected a new backup, got nil")
865	}
866
867	rtest.Assert(t, len(newest.Tags) == 0,
868		"expected no tags, got %v", newest.Tags)
869	rtest.Assert(t, newest.Original == nil,
870		"expected original ID to be nil, got %v", newest.Original)
871	originalID := *newest.ID
872
873	testRunTag(t, TagOptions{SetTags: restic.TagLists{[]string{"NL"}}}, env.gopts)
874	testRunCheck(t, env.gopts)
875	newest, _ = testRunSnapshots(t, env.gopts)
876	if newest == nil {
877		t.Fatal("expected a backup, got nil")
878	}
879	rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "NL",
880		"set failed, expected one NL tag, got %v", newest.Tags)
881	rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
882	rtest.Assert(t, *newest.Original == originalID,
883		"expected original ID to be set to the first snapshot id")
884
885	testRunTag(t, TagOptions{AddTags: restic.TagLists{[]string{"CH"}}}, env.gopts)
886	testRunCheck(t, env.gopts)
887	newest, _ = testRunSnapshots(t, env.gopts)
888	if newest == nil {
889		t.Fatal("expected a backup, got nil")
890	}
891	rtest.Assert(t, len(newest.Tags) == 2 && newest.Tags[0] == "NL" && newest.Tags[1] == "CH",
892		"add failed, expected CH,NL tags, got %v", newest.Tags)
893	rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
894	rtest.Assert(t, *newest.Original == originalID,
895		"expected original ID to be set to the first snapshot id")
896
897	testRunTag(t, TagOptions{RemoveTags: restic.TagLists{[]string{"NL"}}}, env.gopts)
898	testRunCheck(t, env.gopts)
899	newest, _ = testRunSnapshots(t, env.gopts)
900	if newest == nil {
901		t.Fatal("expected a backup, got nil")
902	}
903	rtest.Assert(t, len(newest.Tags) == 1 && newest.Tags[0] == "CH",
904		"remove failed, expected one CH tag, got %v", newest.Tags)
905	rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
906	rtest.Assert(t, *newest.Original == originalID,
907		"expected original ID to be set to the first snapshot id")
908
909	testRunTag(t, TagOptions{AddTags: restic.TagLists{[]string{"US", "RU"}}}, env.gopts)
910	testRunTag(t, TagOptions{RemoveTags: restic.TagLists{[]string{"CH", "US", "RU"}}}, env.gopts)
911	testRunCheck(t, env.gopts)
912	newest, _ = testRunSnapshots(t, env.gopts)
913	if newest == nil {
914		t.Fatal("expected a backup, got nil")
915	}
916	rtest.Assert(t, len(newest.Tags) == 0,
917		"expected no tags, got %v", newest.Tags)
918	rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
919	rtest.Assert(t, *newest.Original == originalID,
920		"expected original ID to be set to the first snapshot id")
921
922	// Check special case of removing all tags.
923	testRunTag(t, TagOptions{SetTags: restic.TagLists{[]string{""}}}, env.gopts)
924	testRunCheck(t, env.gopts)
925	newest, _ = testRunSnapshots(t, env.gopts)
926	if newest == nil {
927		t.Fatal("expected a backup, got nil")
928	}
929	rtest.Assert(t, len(newest.Tags) == 0,
930		"expected no tags, got %v", newest.Tags)
931	rtest.Assert(t, newest.Original != nil, "expected original snapshot id, got nil")
932	rtest.Assert(t, *newest.Original == originalID,
933		"expected original ID to be set to the first snapshot id")
934}
935
936func testRunKeyListOtherIDs(t testing.TB, gopts GlobalOptions) []string {
937	buf := bytes.NewBuffer(nil)
938
939	globalOptions.stdout = buf
940	defer func() {
941		globalOptions.stdout = os.Stdout
942	}()
943
944	rtest.OK(t, runKey(gopts, []string{"list"}))
945
946	scanner := bufio.NewScanner(buf)
947	exp := regexp.MustCompile(`^ ([a-f0-9]+) `)
948
949	IDs := []string{}
950	for scanner.Scan() {
951		if id := exp.FindStringSubmatch(scanner.Text()); id != nil {
952			IDs = append(IDs, id[1])
953		}
954	}
955
956	return IDs
957}
958
959func testRunKeyAddNewKey(t testing.TB, newPassword string, gopts GlobalOptions) {
960	testKeyNewPassword = newPassword
961	defer func() {
962		testKeyNewPassword = ""
963	}()
964
965	rtest.OK(t, runKey(gopts, []string{"add"}))
966}
967
968func testRunKeyAddNewKeyUserHost(t testing.TB, gopts GlobalOptions) {
969	testKeyNewPassword = "john's geheimnis"
970	defer func() {
971		testKeyNewPassword = ""
972		keyUsername = ""
973		keyHostname = ""
974	}()
975
976	rtest.OK(t, cmdKey.Flags().Parse([]string{"--user=john", "--host=example.com"}))
977
978	t.Log("adding key for john@example.com")
979	rtest.OK(t, runKey(gopts, []string{"add"}))
980
981	repo, err := OpenRepository(gopts)
982	rtest.OK(t, err)
983	key, err := repository.SearchKey(gopts.ctx, repo, testKeyNewPassword, 1, "")
984	rtest.OK(t, err)
985
986	rtest.Equals(t, "john", key.Username)
987	rtest.Equals(t, "example.com", key.Hostname)
988}
989
990func testRunKeyPasswd(t testing.TB, newPassword string, gopts GlobalOptions) {
991	testKeyNewPassword = newPassword
992	defer func() {
993		testKeyNewPassword = ""
994	}()
995
996	rtest.OK(t, runKey(gopts, []string{"passwd"}))
997}
998
999func testRunKeyRemove(t testing.TB, gopts GlobalOptions, IDs []string) {
1000	t.Logf("remove %d keys: %q\n", len(IDs), IDs)
1001	for _, id := range IDs {
1002		rtest.OK(t, runKey(gopts, []string{"remove", id}))
1003	}
1004}
1005
1006func TestKeyAddRemove(t *testing.T) {
1007	passwordList := []string{
1008		"OnnyiasyatvodsEvVodyawit",
1009		"raicneirvOjEfEigonOmLasOd",
1010	}
1011
1012	env, cleanup := withTestEnvironment(t)
1013	defer cleanup()
1014
1015	testRunInit(t, env.gopts)
1016
1017	testRunKeyPasswd(t, "geheim2", env.gopts)
1018	env.gopts.password = "geheim2"
1019	t.Logf("changed password to %q", env.gopts.password)
1020
1021	for _, newPassword := range passwordList {
1022		testRunKeyAddNewKey(t, newPassword, env.gopts)
1023		t.Logf("added new password %q", newPassword)
1024		env.gopts.password = newPassword
1025		testRunKeyRemove(t, env.gopts, testRunKeyListOtherIDs(t, env.gopts))
1026	}
1027
1028	env.gopts.password = passwordList[len(passwordList)-1]
1029	t.Logf("testing access with last password %q\n", env.gopts.password)
1030	rtest.OK(t, runKey(env.gopts, []string{"list"}))
1031	testRunCheck(t, env.gopts)
1032
1033	testRunKeyAddNewKeyUserHost(t, env.gopts)
1034}
1035
1036func testFileSize(filename string, size int64) error {
1037	fi, err := os.Stat(filename)
1038	if err != nil {
1039		return err
1040	}
1041
1042	if fi.Size() != size {
1043		return errors.Fatalf("wrong file size for %v: expected %v, got %v", filename, size, fi.Size())
1044	}
1045
1046	return nil
1047}
1048
1049func TestRestoreFilter(t *testing.T) {
1050	testfiles := []struct {
1051		name string
1052		size uint
1053	}{
1054		{"testfile1.c", 100},
1055		{"testfile2.exe", 101},
1056		{"subdir1/subdir2/testfile3.docx", 102},
1057		{"subdir1/subdir2/testfile4.c", 102},
1058	}
1059
1060	env, cleanup := withTestEnvironment(t)
1061	defer cleanup()
1062
1063	testRunInit(t, env.gopts)
1064
1065	for _, testFile := range testfiles {
1066		p := filepath.Join(env.testdata, testFile.name)
1067		rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
1068		rtest.OK(t, appendRandomData(p, testFile.size))
1069	}
1070
1071	opts := BackupOptions{}
1072
1073	testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
1074	testRunCheck(t, env.gopts)
1075
1076	snapshotID := testRunList(t, "snapshots", env.gopts)[0]
1077
1078	// no restore filter should restore all files
1079	testRunRestore(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID)
1080	for _, testFile := range testfiles {
1081		rtest.OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", testFile.name), int64(testFile.size)))
1082	}
1083
1084	for i, pat := range []string{"*.c", "*.exe", "*", "*file3*"} {
1085		base := filepath.Join(env.base, fmt.Sprintf("restore%d", i+1))
1086		testRunRestoreExcludes(t, env.gopts, base, snapshotID, []string{pat})
1087		for _, testFile := range testfiles {
1088			err := testFileSize(filepath.Join(base, "testdata", testFile.name), int64(testFile.size))
1089			if ok, _ := filter.Match(pat, filepath.Base(testFile.name)); !ok {
1090				rtest.OK(t, err)
1091			} else {
1092				rtest.Assert(t, os.IsNotExist(errors.Cause(err)),
1093					"expected %v to not exist in restore step %v, but it exists, err %v", testFile.name, i+1, err)
1094			}
1095		}
1096	}
1097}
1098
1099func TestRestore(t *testing.T) {
1100	env, cleanup := withTestEnvironment(t)
1101	defer cleanup()
1102
1103	testRunInit(t, env.gopts)
1104
1105	for i := 0; i < 10; i++ {
1106		p := filepath.Join(env.testdata, fmt.Sprintf("foo/bar/testfile%v", i))
1107		rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
1108		rtest.OK(t, appendRandomData(p, uint(mrand.Intn(2<<21))))
1109	}
1110
1111	opts := BackupOptions{}
1112
1113	testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
1114	testRunCheck(t, env.gopts)
1115
1116	// Restore latest without any filters
1117	restoredir := filepath.Join(env.base, "restore")
1118	testRunRestoreLatest(t, env.gopts, restoredir, nil, nil)
1119
1120	diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, filepath.Base(env.testdata)))
1121	rtest.Assert(t, diff == "", "directories are not equal %v", diff)
1122}
1123
1124func TestRestoreLatest(t *testing.T) {
1125	env, cleanup := withTestEnvironment(t)
1126	defer cleanup()
1127
1128	testRunInit(t, env.gopts)
1129
1130	p := filepath.Join(env.testdata, "testfile.c")
1131	rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
1132	rtest.OK(t, appendRandomData(p, 100))
1133
1134	opts := BackupOptions{}
1135
1136	// chdir manually here so we can get the current directory. This is not the
1137	// same as the temp dir returned by ioutil.TempDir() on darwin.
1138	back := rtest.Chdir(t, filepath.Dir(env.testdata))
1139	defer back()
1140
1141	curdir, err := os.Getwd()
1142	if err != nil {
1143		t.Fatal(err)
1144	}
1145
1146	testRunBackup(t, "", []string{filepath.Base(env.testdata)}, opts, env.gopts)
1147	testRunCheck(t, env.gopts)
1148
1149	rtest.OK(t, os.Remove(p))
1150	rtest.OK(t, appendRandomData(p, 101))
1151	testRunBackup(t, "", []string{filepath.Base(env.testdata)}, opts, env.gopts)
1152	testRunCheck(t, env.gopts)
1153
1154	// Restore latest without any filters
1155	testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore0"), nil, nil)
1156	rtest.OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", "testfile.c"), int64(101)))
1157
1158	// Setup test files in different directories backed up in different snapshots
1159	p1 := filepath.Join(curdir, filepath.FromSlash("p1/testfile.c"))
1160
1161	rtest.OK(t, os.MkdirAll(filepath.Dir(p1), 0755))
1162	rtest.OK(t, appendRandomData(p1, 102))
1163	testRunBackup(t, "", []string{"p1"}, opts, env.gopts)
1164	testRunCheck(t, env.gopts)
1165
1166	p2 := filepath.Join(curdir, filepath.FromSlash("p2/testfile.c"))
1167
1168	rtest.OK(t, os.MkdirAll(filepath.Dir(p2), 0755))
1169	rtest.OK(t, appendRandomData(p2, 103))
1170	testRunBackup(t, "", []string{"p2"}, opts, env.gopts)
1171	testRunCheck(t, env.gopts)
1172
1173	p1rAbs := filepath.Join(env.base, "restore1", "p1/testfile.c")
1174	p2rAbs := filepath.Join(env.base, "restore2", "p2/testfile.c")
1175
1176	testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore1"), []string{filepath.Dir(p1)}, nil)
1177	rtest.OK(t, testFileSize(p1rAbs, int64(102)))
1178	if _, err := os.Stat(p2rAbs); os.IsNotExist(errors.Cause(err)) {
1179		rtest.Assert(t, os.IsNotExist(errors.Cause(err)),
1180			"expected %v to not exist in restore, but it exists, err %v", p2rAbs, err)
1181	}
1182
1183	testRunRestoreLatest(t, env.gopts, filepath.Join(env.base, "restore2"), []string{filepath.Dir(p2)}, nil)
1184	rtest.OK(t, testFileSize(p2rAbs, int64(103)))
1185	if _, err := os.Stat(p1rAbs); os.IsNotExist(errors.Cause(err)) {
1186		rtest.Assert(t, os.IsNotExist(errors.Cause(err)),
1187			"expected %v to not exist in restore, but it exists, err %v", p1rAbs, err)
1188	}
1189}
1190
1191func TestRestoreWithPermissionFailure(t *testing.T) {
1192	env, cleanup := withTestEnvironment(t)
1193	defer cleanup()
1194
1195	datafile := filepath.Join("testdata", "repo-restore-permissions-test.tar.gz")
1196	rtest.SetupTarTestFixture(t, env.base, datafile)
1197
1198	snapshots := testRunList(t, "snapshots", env.gopts)
1199	rtest.Assert(t, len(snapshots) > 0,
1200		"no snapshots found in repo (%v)", datafile)
1201
1202	globalOptions.stderr = ioutil.Discard
1203	defer func() {
1204		globalOptions.stderr = os.Stderr
1205	}()
1206
1207	testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshots[0])
1208
1209	// make sure that all files have been restored, regardless of any
1210	// permission errors
1211	files := testRunLs(t, env.gopts, snapshots[0].String())
1212	for _, filename := range files {
1213		fi, err := os.Lstat(filepath.Join(env.base, "restore", filename))
1214		rtest.OK(t, err)
1215
1216		rtest.Assert(t, !isFile(fi) || fi.Size() > 0,
1217			"file %v restored, but filesize is 0", filename)
1218	}
1219}
1220
1221func setZeroModTime(filename string) error {
1222	var utimes = []syscall.Timespec{
1223		syscall.NsecToTimespec(0),
1224		syscall.NsecToTimespec(0),
1225	}
1226
1227	return syscall.UtimesNano(filename, utimes)
1228}
1229
1230func TestRestoreNoMetadataOnIgnoredIntermediateDirs(t *testing.T) {
1231	env, cleanup := withTestEnvironment(t)
1232	defer cleanup()
1233
1234	testRunInit(t, env.gopts)
1235
1236	p := filepath.Join(env.testdata, "subdir1", "subdir2", "subdir3", "file.ext")
1237	rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
1238	rtest.OK(t, appendRandomData(p, 200))
1239	rtest.OK(t, setZeroModTime(filepath.Join(env.testdata, "subdir1", "subdir2")))
1240
1241	opts := BackupOptions{}
1242
1243	testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
1244	testRunCheck(t, env.gopts)
1245
1246	snapshotID := testRunList(t, "snapshots", env.gopts)[0]
1247
1248	// restore with filter "*.ext", this should restore "file.ext", but
1249	// since the directories are ignored and only created because of
1250	// "file.ext", no meta data should be restored for them.
1251	testRunRestoreIncludes(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID, []string{"*.ext"})
1252
1253	f1 := filepath.Join(env.base, "restore0", "testdata", "subdir1", "subdir2")
1254	_, err := os.Stat(f1)
1255	rtest.OK(t, err)
1256
1257	// restore with filter "*", this should restore meta data on everything.
1258	testRunRestoreIncludes(t, env.gopts, filepath.Join(env.base, "restore1"), snapshotID, []string{"*"})
1259
1260	f2 := filepath.Join(env.base, "restore1", "testdata", "subdir1", "subdir2")
1261	fi, err := os.Stat(f2)
1262	rtest.OK(t, err)
1263
1264	rtest.Assert(t, fi.ModTime() == time.Unix(0, 0),
1265		"meta data of intermediate directory hasn't been restore")
1266}
1267
1268func TestFind(t *testing.T) {
1269	env, cleanup := withTestEnvironment(t)
1270	defer cleanup()
1271
1272	datafile := testSetupBackupData(t, env)
1273	opts := BackupOptions{}
1274
1275	testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
1276	testRunCheck(t, env.gopts)
1277
1278	results := testRunFind(t, false, env.gopts, "unexistingfile")
1279	rtest.Assert(t, len(results) == 0, "unexisting file found in repo (%v)", datafile)
1280
1281	results = testRunFind(t, false, env.gopts, "testfile")
1282	lines := strings.Split(string(results), "\n")
1283	rtest.Assert(t, len(lines) == 2, "expected one file found in repo (%v)", datafile)
1284
1285	results = testRunFind(t, false, env.gopts, "testfile*")
1286	lines = strings.Split(string(results), "\n")
1287	rtest.Assert(t, len(lines) == 4, "expected three files found in repo (%v)", datafile)
1288}
1289
1290type testMatch struct {
1291	Path        string    `json:"path,omitempty"`
1292	Permissions string    `json:"permissions,omitempty"`
1293	Size        uint64    `json:"size,omitempty"`
1294	Date        time.Time `json:"date,omitempty"`
1295	UID         uint32    `json:"uid,omitempty"`
1296	GID         uint32    `json:"gid,omitempty"`
1297}
1298
1299type testMatches struct {
1300	Hits       int         `json:"hits,omitempty"`
1301	SnapshotID string      `json:"snapshot,omitempty"`
1302	Matches    []testMatch `json:"matches,omitempty"`
1303}
1304
1305func TestFindJSON(t *testing.T) {
1306	env, cleanup := withTestEnvironment(t)
1307	defer cleanup()
1308
1309	datafile := testSetupBackupData(t, env)
1310	opts := BackupOptions{}
1311
1312	testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
1313	testRunCheck(t, env.gopts)
1314
1315	results := testRunFind(t, true, env.gopts, "unexistingfile")
1316	matches := []testMatches{}
1317	rtest.OK(t, json.Unmarshal(results, &matches))
1318	rtest.Assert(t, len(matches) == 0, "expected no match in repo (%v)", datafile)
1319
1320	results = testRunFind(t, true, env.gopts, "testfile")
1321	rtest.OK(t, json.Unmarshal(results, &matches))
1322	rtest.Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile)
1323	rtest.Assert(t, len(matches[0].Matches) == 1, "expected a single file to match (%v)", datafile)
1324	rtest.Assert(t, matches[0].Hits == 1, "expected hits to show 1 match (%v)", datafile)
1325
1326	results = testRunFind(t, true, env.gopts, "testfile*")
1327	rtest.OK(t, json.Unmarshal(results, &matches))
1328	rtest.Assert(t, len(matches) == 1, "expected a single snapshot in repo (%v)", datafile)
1329	rtest.Assert(t, len(matches[0].Matches) == 3, "expected 3 files to match (%v)", datafile)
1330	rtest.Assert(t, matches[0].Hits == 3, "expected hits to show 3 matches (%v)", datafile)
1331}
1332
1333func TestRebuildIndex(t *testing.T) {
1334	env, cleanup := withTestEnvironment(t)
1335	defer cleanup()
1336
1337	datafile := filepath.Join("..", "..", "internal", "checker", "testdata", "duplicate-packs-in-index-test-repo.tar.gz")
1338	rtest.SetupTarTestFixture(t, env.base, datafile)
1339
1340	out, err := testRunCheckOutput(env.gopts)
1341	if !strings.Contains(out, "contained in several indexes") {
1342		t.Fatalf("did not find checker hint for packs in several indexes")
1343	}
1344
1345	if err != nil {
1346		t.Fatalf("expected no error from checker for test repository, got %v", err)
1347	}
1348
1349	if !strings.Contains(out, "restic rebuild-index") {
1350		t.Fatalf("did not find hint for rebuild-index command")
1351	}
1352
1353	testRunRebuildIndex(t, env.gopts)
1354
1355	out, err = testRunCheckOutput(env.gopts)
1356	if len(out) != 0 {
1357		t.Fatalf("expected no output from the checker, got: %v", out)
1358	}
1359
1360	if err != nil {
1361		t.Fatalf("expected no error from checker after rebuild-index, got: %v", err)
1362	}
1363}
1364
1365func TestRebuildIndexAlwaysFull(t *testing.T) {
1366	repository.IndexFull = func(*repository.Index) bool { return true }
1367	TestRebuildIndex(t)
1368}
1369
1370type appendOnlyBackend struct {
1371	restic.Backend
1372}
1373
1374// called via repo.Backend().Remove()
1375func (b *appendOnlyBackend) Remove(ctx context.Context, h restic.Handle) error {
1376	return errors.Errorf("Failed to remove %v", h)
1377}
1378
1379func TestRebuildIndexFailsOnAppendOnly(t *testing.T) {
1380	env, cleanup := withTestEnvironment(t)
1381	defer cleanup()
1382
1383	datafile := filepath.Join("..", "..", "internal", "checker", "testdata", "duplicate-packs-in-index-test-repo.tar.gz")
1384	rtest.SetupTarTestFixture(t, env.base, datafile)
1385
1386	globalOptions.stdout = ioutil.Discard
1387	defer func() {
1388		globalOptions.stdout = os.Stdout
1389	}()
1390
1391	env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) {
1392		return &appendOnlyBackend{r}, nil
1393	}
1394	err := runRebuildIndex(RebuildIndexOptions{}, env.gopts)
1395	if err == nil {
1396		t.Error("expected rebuildIndex to fail")
1397	}
1398	t.Log(err)
1399}
1400
1401func TestCheckRestoreNoLock(t *testing.T) {
1402	env, cleanup := withTestEnvironment(t)
1403	defer cleanup()
1404
1405	datafile := filepath.Join("testdata", "small-repo.tar.gz")
1406	rtest.SetupTarTestFixture(t, env.base, datafile)
1407
1408	err := filepath.Walk(env.repo, func(p string, fi os.FileInfo, e error) error {
1409		if e != nil {
1410			return e
1411		}
1412		return os.Chmod(p, fi.Mode() & ^(os.FileMode(0222)))
1413	})
1414	rtest.OK(t, err)
1415
1416	env.gopts.NoLock = true
1417
1418	testRunCheck(t, env.gopts)
1419
1420	snapshotIDs := testRunList(t, "snapshots", env.gopts)
1421	if len(snapshotIDs) == 0 {
1422		t.Fatalf("found no snapshots")
1423	}
1424
1425	testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshotIDs[0])
1426}
1427
1428func TestPrune(t *testing.T) {
1429	t.Run("0", func(t *testing.T) {
1430		opts := PruneOptions{MaxUnused: "0%"}
1431		checkOpts := CheckOptions{ReadData: true, CheckUnused: true}
1432		testPrune(t, opts, checkOpts)
1433	})
1434
1435	t.Run("50", func(t *testing.T) {
1436		opts := PruneOptions{MaxUnused: "50%"}
1437		checkOpts := CheckOptions{ReadData: true}
1438		testPrune(t, opts, checkOpts)
1439	})
1440
1441	t.Run("unlimited", func(t *testing.T) {
1442		opts := PruneOptions{MaxUnused: "unlimited"}
1443		checkOpts := CheckOptions{ReadData: true}
1444		testPrune(t, opts, checkOpts)
1445	})
1446
1447	t.Run("CachableOnly", func(t *testing.T) {
1448		opts := PruneOptions{MaxUnused: "5%", RepackCachableOnly: true}
1449		checkOpts := CheckOptions{ReadData: true}
1450		testPrune(t, opts, checkOpts)
1451	})
1452}
1453
1454func testPrune(t *testing.T, pruneOpts PruneOptions, checkOpts CheckOptions) {
1455	env, cleanup := withTestEnvironment(t)
1456	defer cleanup()
1457
1458	testSetupBackupData(t, env)
1459	opts := BackupOptions{}
1460
1461	testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts)
1462	firstSnapshot := testRunList(t, "snapshots", env.gopts)
1463	rtest.Assert(t, len(firstSnapshot) == 1,
1464		"expected one snapshot, got %v", firstSnapshot)
1465
1466	testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
1467	testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
1468
1469	snapshotIDs := testRunList(t, "snapshots", env.gopts)
1470	rtest.Assert(t, len(snapshotIDs) == 3,
1471		"expected 3 snapshot, got %v", snapshotIDs)
1472
1473	testRunForgetJSON(t, env.gopts)
1474	testRunForget(t, env.gopts, firstSnapshot[0].String())
1475	testRunPrune(t, env.gopts, pruneOpts)
1476	rtest.OK(t, runCheck(checkOpts, env.gopts, nil))
1477}
1478
1479var pruneDefaultOptions = PruneOptions{MaxUnused: "5%"}
1480
1481func listPacks(gopts GlobalOptions, t *testing.T) restic.IDSet {
1482	r, err := OpenRepository(gopts)
1483	rtest.OK(t, err)
1484
1485	packs := restic.NewIDSet()
1486
1487	rtest.OK(t, r.List(gopts.ctx, restic.PackFile, func(id restic.ID, size int64) error {
1488		packs.Insert(id)
1489		return nil
1490	}))
1491	return packs
1492}
1493
1494func TestPruneWithDamagedRepository(t *testing.T) {
1495	env, cleanup := withTestEnvironment(t)
1496	defer cleanup()
1497
1498	datafile := filepath.Join("testdata", "backup-data.tar.gz")
1499	testRunInit(t, env.gopts)
1500
1501	rtest.SetupTarTestFixture(t, env.testdata, datafile)
1502	opts := BackupOptions{}
1503
1504	// create and delete snapshot to create unused blobs
1505	testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
1506	firstSnapshot := testRunList(t, "snapshots", env.gopts)
1507	rtest.Assert(t, len(firstSnapshot) == 1,
1508		"expected one snapshot, got %v", firstSnapshot)
1509	testRunForget(t, env.gopts, firstSnapshot[0].String())
1510
1511	oldPacks := listPacks(env.gopts, t)
1512
1513	// create new snapshot, but lose all data
1514	testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
1515	snapshotIDs := testRunList(t, "snapshots", env.gopts)
1516
1517	removePacksExcept(env.gopts, t, oldPacks, false)
1518
1519	rtest.Assert(t, len(snapshotIDs) == 1,
1520		"expected one snapshot, got %v", snapshotIDs)
1521
1522	// prune should fail
1523	rtest.Assert(t, runPrune(pruneDefaultOptions, env.gopts) == errorPacksMissing,
1524		"prune should have reported index not complete error")
1525}
1526
1527// Test repos for edge cases
1528func TestEdgeCaseRepos(t *testing.T) {
1529	opts := CheckOptions{}
1530
1531	// repo where index is completely missing
1532	// => check and prune should fail
1533	t.Run("no-index", func(t *testing.T) {
1534		testEdgeCaseRepo(t, "repo-index-missing.tar.gz", opts, pruneDefaultOptions, false, false)
1535	})
1536
1537	// repo where an existing and used blob is missing from the index
1538	// => check and prune should fail
1539	t.Run("index-missing-blob", func(t *testing.T) {
1540		testEdgeCaseRepo(t, "repo-index-missing-blob.tar.gz", opts, pruneDefaultOptions, false, false)
1541	})
1542
1543	// repo where a blob is missing
1544	// => check and prune should fail
1545	t.Run("missing-data", func(t *testing.T) {
1546		testEdgeCaseRepo(t, "repo-data-missing.tar.gz", opts, pruneDefaultOptions, false, false)
1547	})
1548
1549	// repo where blobs which are not needed are missing or in invalid pack files
1550	// => check should fail and prune should repair this
1551	t.Run("missing-unused-data", func(t *testing.T) {
1552		testEdgeCaseRepo(t, "repo-unused-data-missing.tar.gz", opts, pruneDefaultOptions, false, true)
1553	})
1554
1555	// repo where data exists that is not referenced
1556	// => check and prune should fully work
1557	t.Run("unreferenced-data", func(t *testing.T) {
1558		testEdgeCaseRepo(t, "repo-unreferenced-data.tar.gz", opts, pruneDefaultOptions, true, true)
1559	})
1560
1561	// repo where an obsolete index still exists
1562	// => check and prune should fully work
1563	t.Run("obsolete-index", func(t *testing.T) {
1564		testEdgeCaseRepo(t, "repo-obsolete-index.tar.gz", opts, pruneDefaultOptions, true, true)
1565	})
1566
1567	// repo which contains mixed (data/tree) packs
1568	// => check and prune should fully work
1569	t.Run("mixed-packs", func(t *testing.T) {
1570		testEdgeCaseRepo(t, "repo-mixed.tar.gz", opts, pruneDefaultOptions, true, true)
1571	})
1572
1573	// repo which contains duplicate blobs
1574	// => checking for unused data should report an error and prune resolves the
1575	// situation
1576	opts = CheckOptions{
1577		ReadData:    true,
1578		CheckUnused: true,
1579	}
1580	t.Run("duplicates", func(t *testing.T) {
1581		testEdgeCaseRepo(t, "repo-duplicates.tar.gz", opts, pruneDefaultOptions, false, true)
1582	})
1583}
1584
1585func testEdgeCaseRepo(t *testing.T, tarfile string, optionsCheck CheckOptions, optionsPrune PruneOptions, checkOK, pruneOK bool) {
1586	env, cleanup := withTestEnvironment(t)
1587	defer cleanup()
1588
1589	datafile := filepath.Join("testdata", tarfile)
1590	rtest.SetupTarTestFixture(t, env.base, datafile)
1591
1592	if checkOK {
1593		testRunCheck(t, env.gopts)
1594	} else {
1595		rtest.Assert(t, runCheck(optionsCheck, env.gopts, nil) != nil,
1596			"check should have reported an error")
1597	}
1598
1599	if pruneOK {
1600		testRunPrune(t, env.gopts, optionsPrune)
1601		testRunCheck(t, env.gopts)
1602	} else {
1603		rtest.Assert(t, runPrune(optionsPrune, env.gopts) != nil,
1604			"prune should have reported an error")
1605	}
1606}
1607
1608// a listOnceBackend only allows listing once per filetype
1609// listing filetypes more than once may cause problems with eventually consistent
1610// backends (like e.g. AWS S3) as the second listing may be inconsistent to what
1611// is expected by the first listing + some operations.
1612type listOnceBackend struct {
1613	restic.Backend
1614	listedFileType map[restic.FileType]bool
1615}
1616
1617func newListOnceBackend(be restic.Backend) *listOnceBackend {
1618	return &listOnceBackend{
1619		Backend:        be,
1620		listedFileType: make(map[restic.FileType]bool),
1621	}
1622}
1623
1624func (be *listOnceBackend) List(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error {
1625	if t != restic.LockFile && be.listedFileType[t] {
1626		return errors.Errorf("tried listing type %v the second time", t)
1627	}
1628	be.listedFileType[t] = true
1629	return be.Backend.List(ctx, t, fn)
1630}
1631
1632func TestListOnce(t *testing.T) {
1633	env, cleanup := withTestEnvironment(t)
1634	defer cleanup()
1635
1636	env.gopts.backendTestHook = func(r restic.Backend) (restic.Backend, error) {
1637		return newListOnceBackend(r), nil
1638	}
1639
1640	pruneOpts := PruneOptions{MaxUnused: "0"}
1641	checkOpts := CheckOptions{ReadData: true, CheckUnused: true}
1642
1643	testSetupBackupData(t, env)
1644	opts := BackupOptions{}
1645
1646	testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts)
1647	firstSnapshot := testRunList(t, "snapshots", env.gopts)
1648	rtest.Assert(t, len(firstSnapshot) == 1,
1649		"expected one snapshot, got %v", firstSnapshot)
1650
1651	testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
1652	testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
1653
1654	snapshotIDs := testRunList(t, "snapshots", env.gopts)
1655	rtest.Assert(t, len(snapshotIDs) == 3,
1656		"expected 3 snapshot, got %v", snapshotIDs)
1657
1658	testRunForgetJSON(t, env.gopts)
1659	testRunForget(t, env.gopts, firstSnapshot[0].String())
1660	testRunPrune(t, env.gopts, pruneOpts)
1661	rtest.OK(t, runCheck(checkOpts, env.gopts, nil))
1662
1663	rtest.OK(t, runRebuildIndex(RebuildIndexOptions{}, env.gopts))
1664	rtest.OK(t, runRebuildIndex(RebuildIndexOptions{ReadAllPacks: true}, env.gopts))
1665}
1666
1667func TestHardLink(t *testing.T) {
1668	// this test assumes a test set with a single directory containing hard linked files
1669	env, cleanup := withTestEnvironment(t)
1670	defer cleanup()
1671
1672	datafile := filepath.Join("testdata", "test.hl.tar.gz")
1673	fd, err := os.Open(datafile)
1674	if os.IsNotExist(errors.Cause(err)) {
1675		t.Skipf("unable to find data file %q, skipping", datafile)
1676		return
1677	}
1678	rtest.OK(t, err)
1679	rtest.OK(t, fd.Close())
1680
1681	testRunInit(t, env.gopts)
1682
1683	rtest.SetupTarTestFixture(t, env.testdata, datafile)
1684
1685	linkTests := createFileSetPerHardlink(env.testdata)
1686
1687	opts := BackupOptions{}
1688
1689	// first backup
1690	testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
1691	snapshotIDs := testRunList(t, "snapshots", env.gopts)
1692	rtest.Assert(t, len(snapshotIDs) == 1,
1693		"expected one snapshot, got %v", snapshotIDs)
1694
1695	testRunCheck(t, env.gopts)
1696
1697	// restore all backups and compare
1698	for i, snapshotID := range snapshotIDs {
1699		restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
1700		t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir)
1701		testRunRestore(t, env.gopts, restoredir, snapshotID)
1702		diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, "testdata"))
1703		rtest.Assert(t, diff == "", "directories are not equal %v", diff)
1704
1705		linkResults := createFileSetPerHardlink(filepath.Join(restoredir, "testdata"))
1706		rtest.Assert(t, linksEqual(linkTests, linkResults),
1707			"links are not equal")
1708	}
1709
1710	testRunCheck(t, env.gopts)
1711}
1712
1713func linksEqual(source, dest map[uint64][]string) bool {
1714	for _, vs := range source {
1715		found := false
1716		for kd, vd := range dest {
1717			if linkEqual(vs, vd) {
1718				delete(dest, kd)
1719				found = true
1720				break
1721			}
1722		}
1723		if !found {
1724			return false
1725		}
1726	}
1727
1728	return len(dest) == 0
1729}
1730
1731func linkEqual(source, dest []string) bool {
1732	// equal if sliced are equal without considering order
1733	if source == nil && dest == nil {
1734		return true
1735	}
1736
1737	if source == nil || dest == nil {
1738		return false
1739	}
1740
1741	if len(source) != len(dest) {
1742		return false
1743	}
1744
1745	for i := range source {
1746		found := false
1747		for j := range dest {
1748			if source[i] == dest[j] {
1749				found = true
1750				break
1751			}
1752		}
1753		if !found {
1754			return false
1755		}
1756	}
1757
1758	return true
1759}
1760
1761func TestQuietBackup(t *testing.T) {
1762	env, cleanup := withTestEnvironment(t)
1763	defer cleanup()
1764
1765	testSetupBackupData(t, env)
1766	opts := BackupOptions{}
1767
1768	env.gopts.Quiet = false
1769	testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
1770	snapshotIDs := testRunList(t, "snapshots", env.gopts)
1771	rtest.Assert(t, len(snapshotIDs) == 1,
1772		"expected one snapshot, got %v", snapshotIDs)
1773
1774	testRunCheck(t, env.gopts)
1775
1776	env.gopts.Quiet = true
1777	testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
1778	snapshotIDs = testRunList(t, "snapshots", env.gopts)
1779	rtest.Assert(t, len(snapshotIDs) == 2,
1780		"expected two snapshots, got %v", snapshotIDs)
1781
1782	testRunCheck(t, env.gopts)
1783}
1784
1785func copyFile(dst string, src string) error {
1786	srcFile, err := os.Open(src)
1787	if err != nil {
1788		return err
1789	}
1790
1791	dstFile, err := os.Create(dst)
1792	if err != nil {
1793		// ignore subsequent errors
1794		_ = srcFile.Close()
1795		return err
1796	}
1797
1798	_, err = io.Copy(dstFile, srcFile)
1799	if err != nil {
1800		// ignore subsequent errors
1801		_ = srcFile.Close()
1802		_ = dstFile.Close()
1803		return err
1804	}
1805
1806	err = srcFile.Close()
1807	if err != nil {
1808		// ignore subsequent errors
1809		_ = dstFile.Close()
1810		return err
1811	}
1812
1813	err = dstFile.Close()
1814	if err != nil {
1815		return err
1816	}
1817
1818	return nil
1819}
1820
1821var diffOutputRegexPatterns = []string{
1822	"-.+modfile",
1823	"M.+modfile1",
1824	"\\+.+modfile2",
1825	"\\+.+modfile3",
1826	"\\+.+modfile4",
1827	"-.+submoddir",
1828	"-.+submoddir.subsubmoddir",
1829	"\\+.+submoddir2",
1830	"\\+.+submoddir2.subsubmoddir",
1831	"Files: +2 new, +1 removed, +1 changed",
1832	"Dirs: +3 new, +2 removed",
1833	"Data Blobs: +2 new, +1 removed",
1834	"Added: +7[0-9]{2}\\.[0-9]{3} KiB",
1835	"Removed: +2[0-9]{2}\\.[0-9]{3} KiB",
1836}
1837
1838func TestDiff(t *testing.T) {
1839	env, cleanup := withTestEnvironment(t)
1840	defer cleanup()
1841
1842	testRunInit(t, env.gopts)
1843
1844	datadir := filepath.Join(env.base, "testdata")
1845	testdir := filepath.Join(datadir, "testdir")
1846	subtestdir := filepath.Join(testdir, "subtestdir")
1847	testfile := filepath.Join(testdir, "testfile")
1848
1849	rtest.OK(t, os.Mkdir(testdir, 0755))
1850	rtest.OK(t, os.Mkdir(subtestdir, 0755))
1851	rtest.OK(t, appendRandomData(testfile, 256*1024))
1852
1853	moddir := filepath.Join(datadir, "moddir")
1854	submoddir := filepath.Join(moddir, "submoddir")
1855	subsubmoddir := filepath.Join(submoddir, "subsubmoddir")
1856	modfile := filepath.Join(moddir, "modfile")
1857	rtest.OK(t, os.Mkdir(moddir, 0755))
1858	rtest.OK(t, os.Mkdir(submoddir, 0755))
1859	rtest.OK(t, os.Mkdir(subsubmoddir, 0755))
1860	rtest.OK(t, copyFile(modfile, testfile))
1861	rtest.OK(t, appendRandomData(modfile+"1", 256*1024))
1862
1863	snapshots := make(map[string]struct{})
1864	opts := BackupOptions{}
1865	testRunBackup(t, "", []string{datadir}, opts, env.gopts)
1866	snapshots, firstSnapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
1867
1868	rtest.OK(t, os.Rename(modfile, modfile+"3"))
1869	rtest.OK(t, os.Rename(submoddir, submoddir+"2"))
1870	rtest.OK(t, appendRandomData(modfile+"1", 256*1024))
1871	rtest.OK(t, appendRandomData(modfile+"2", 256*1024))
1872	rtest.OK(t, os.Mkdir(modfile+"4", 0755))
1873
1874	testRunBackup(t, "", []string{datadir}, opts, env.gopts)
1875	_, secondSnapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
1876
1877	_, err := testRunDiffOutput(env.gopts, "", secondSnapshotID)
1878	rtest.Assert(t, err != nil, "expected error on invalid snapshot id")
1879
1880	out, err := testRunDiffOutput(env.gopts, firstSnapshotID, secondSnapshotID)
1881	if err != nil {
1882		t.Fatalf("expected no error from diff for test repository, got %v", err)
1883	}
1884
1885	for _, pattern := range diffOutputRegexPatterns {
1886		r, err := regexp.Compile(pattern)
1887		rtest.Assert(t, err == nil, "failed to compile regexp %v", pattern)
1888		rtest.Assert(t, r.MatchString(out), "expected pattern %v in output, got\n%v", pattern, out)
1889	}
1890}
1891
1892type writeToOnly struct {
1893	rd io.Reader
1894}
1895
1896func (r *writeToOnly) Read(p []byte) (n int, err error) {
1897	return 0, fmt.Errorf("should have called WriteTo instead")
1898}
1899
1900func (r *writeToOnly) WriteTo(w io.Writer) (int64, error) {
1901	return io.Copy(w, r.rd)
1902}
1903
1904type onlyLoadWithWriteToBackend struct {
1905	restic.Backend
1906}
1907
1908func (be *onlyLoadWithWriteToBackend) Load(ctx context.Context, h restic.Handle,
1909	length int, offset int64, fn func(rd io.Reader) error) error {
1910
1911	return be.Backend.Load(ctx, h, length, offset, func(rd io.Reader) error {
1912		return fn(&writeToOnly{rd: rd})
1913	})
1914}
1915
1916func TestBackendLoadWriteTo(t *testing.T) {
1917	env, cleanup := withTestEnvironment(t)
1918	defer cleanup()
1919
1920	// setup backend which only works if it's WriteTo method is correctly propagated upwards
1921	env.gopts.backendInnerTestHook = func(r restic.Backend) (restic.Backend, error) {
1922		return &onlyLoadWithWriteToBackend{Backend: r}, nil
1923	}
1924
1925	testSetupBackupData(t, env)
1926
1927	// add some data, but make sure that it isn't cached during upload
1928	opts := BackupOptions{}
1929	env.gopts.NoCache = true
1930	testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts)
1931
1932	// loading snapshots must still work
1933	env.gopts.NoCache = false
1934	firstSnapshot := testRunList(t, "snapshots", env.gopts)
1935	rtest.Assert(t, len(firstSnapshot) == 1,
1936		"expected one snapshot, got %v", firstSnapshot)
1937
1938	// test readData using the hashing.Reader
1939	testRunCheck(t, env.gopts)
1940}
1941