1// Package fstests provides generic integration tests for the Fs and
2// Object interfaces.
3//
4// These tests are concerned with the basic functionality of a
5// backend.  The tests in fs/sync and fs/operations tests more
6// cornercases that these tests don't.
7package fstests
8
9import (
10	"bytes"
11	"context"
12	"fmt"
13	"io"
14	"io/ioutil"
15	"math/bits"
16	"os"
17	"path"
18	"path/filepath"
19	"reflect"
20	"sort"
21	"strconv"
22	"strings"
23	"testing"
24	"time"
25
26	"github.com/pkg/errors"
27	"github.com/rclone/rclone/fs"
28	"github.com/rclone/rclone/fs/config"
29	"github.com/rclone/rclone/fs/fserrors"
30	"github.com/rclone/rclone/fs/fspath"
31	"github.com/rclone/rclone/fs/hash"
32	"github.com/rclone/rclone/fs/object"
33	"github.com/rclone/rclone/fs/operations"
34	"github.com/rclone/rclone/fs/walk"
35	"github.com/rclone/rclone/fstest"
36	"github.com/rclone/rclone/fstest/testserver"
37	"github.com/rclone/rclone/lib/encoder"
38	"github.com/rclone/rclone/lib/random"
39	"github.com/rclone/rclone/lib/readers"
40	"github.com/stretchr/testify/assert"
41	"github.com/stretchr/testify/require"
42)
43
44// InternalTester is an optional interface for Fs which allows to execute internal tests
45//
46// This interface should be implemented in 'backend'_internal_test.go and not in 'backend'.go
47type InternalTester interface {
48	InternalTest(*testing.T)
49}
50
51// ChunkedUploadConfig contains the values used by TestFsPutChunked
52// to determine the limits of chunked uploading
53type ChunkedUploadConfig struct {
54	// Minimum allowed chunk size
55	MinChunkSize fs.SizeSuffix
56	// Maximum allowed chunk size, 0 is no limit
57	MaxChunkSize fs.SizeSuffix
58	// Rounds the given chunk size up to the next valid value
59	// nil will disable rounding
60	// e.g. the next power of 2
61	CeilChunkSize func(fs.SizeSuffix) fs.SizeSuffix
62	// More than one chunk is required on upload
63	NeedMultipleChunks bool
64}
65
66// SetUploadChunkSizer is a test only interface to change the upload chunk size at runtime
67type SetUploadChunkSizer interface {
68	// Change the configured UploadChunkSize.
69	// Will only be called while no transfer is in progress.
70	SetUploadChunkSize(fs.SizeSuffix) (fs.SizeSuffix, error)
71}
72
73// SetUploadCutoffer is a test only interface to change the upload cutoff size at runtime
74type SetUploadCutoffer interface {
75	// Change the configured UploadCutoff.
76	// Will only be called while no transfer is in progress.
77	SetUploadCutoff(fs.SizeSuffix) (fs.SizeSuffix, error)
78}
79
80// NextPowerOfTwo returns the current or next bigger power of two.
81// All values less or equal 0 will return 0
82func NextPowerOfTwo(i fs.SizeSuffix) fs.SizeSuffix {
83	return 1 << uint(64-bits.LeadingZeros64(uint64(i)-1))
84}
85
86// NextMultipleOf returns a function that can be used as a CeilChunkSize function.
87// This function will return the next multiple of m that is equal or bigger than i.
88// All values less or equal 0 will return 0.
89func NextMultipleOf(m fs.SizeSuffix) func(fs.SizeSuffix) fs.SizeSuffix {
90	if m <= 0 {
91		panic(fmt.Sprintf("invalid multiplier %s", m))
92	}
93	return func(i fs.SizeSuffix) fs.SizeSuffix {
94		if i <= 0 {
95			return 0
96		}
97
98		return (((i - 1) / m) + 1) * m
99	}
100}
101
102// dirsToNames returns a sorted list of names
103func dirsToNames(dirs []fs.Directory) []string {
104	names := []string{}
105	for _, dir := range dirs {
106		names = append(names, fstest.Normalize(dir.Remote()))
107	}
108	sort.Strings(names)
109	return names
110}
111
112// objsToNames returns a sorted list of object names
113func objsToNames(objs []fs.Object) []string {
114	names := []string{}
115	for _, obj := range objs {
116		names = append(names, fstest.Normalize(obj.Remote()))
117	}
118	sort.Strings(names)
119	return names
120}
121
122// findObject finds the object on the remote
123func findObject(ctx context.Context, t *testing.T, f fs.Fs, Name string) fs.Object {
124	var obj fs.Object
125	var err error
126	sleepTime := 1 * time.Second
127	for i := 1; i <= *fstest.ListRetries; i++ {
128		obj, err = f.NewObject(ctx, Name)
129		if err == nil {
130			break
131		}
132		t.Logf("Sleeping for %v for findObject eventual consistency: %d/%d (%v)", sleepTime, i, *fstest.ListRetries, err)
133		time.Sleep(sleepTime)
134		sleepTime = (sleepTime * 3) / 2
135	}
136	require.NoError(t, err)
137	return obj
138}
139
140// retry f() until no retriable error
141func retry(t *testing.T, what string, f func() error) {
142	const maxTries = 10
143	var err error
144	for tries := 1; tries <= maxTries; tries++ {
145		err = f()
146		// exit if no error, or error is not retriable
147		if err == nil || !fserrors.IsRetryError(err) {
148			break
149		}
150		t.Logf("%s error: %v - low level retry %d/%d", what, err, tries, maxTries)
151		time.Sleep(2 * time.Second)
152	}
153	require.NoError(t, err, what)
154}
155
156// An fs.ObjectInfo that can override mime type
157type objectInfoWithMimeType struct {
158	fs.ObjectInfo
159	mimeType string
160}
161
162// Return a wrapped fs.ObjectInfo which returns the mime type given
163func overrideMimeType(o fs.ObjectInfo, mimeType string) fs.ObjectInfo {
164	return &objectInfoWithMimeType{
165		ObjectInfo: o,
166		mimeType:   mimeType,
167	}
168}
169
170// MimeType that was overridden
171func (o *objectInfoWithMimeType) MimeType(ctx context.Context) string {
172	return o.mimeType
173}
174
175// check interface
176var _ fs.MimeTyper = (*objectInfoWithMimeType)(nil)
177
178// putTestContentsMimeType puts file with given contents to the remote and checks it but unlike TestPutLarge doesn't remove
179//
180// it uploads the object with the mimeType passed in if set
181func putTestContentsMimeType(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item, contents string, check bool, mimeType string) (string, fs.Object) {
182	var (
183		err        error
184		obj        fs.Object
185		uploadHash *hash.MultiHasher
186	)
187	retry(t, "Put", func() error {
188		buf := bytes.NewBufferString(contents)
189		uploadHash = hash.NewMultiHasher()
190		in := io.TeeReader(buf, uploadHash)
191
192		file.Size = int64(buf.Len())
193		obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
194		if mimeType != "" {
195			obji = overrideMimeType(obji, mimeType)
196		}
197		obj, err = f.Put(ctx, in, obji)
198		return err
199	})
200	file.Hashes = uploadHash.Sums()
201	if check {
202		file.Check(t, obj, f.Precision())
203		// Re-read the object and check again
204		obj = findObject(ctx, t, f, file.Path)
205		file.Check(t, obj, f.Precision())
206	}
207	return contents, obj
208}
209
210// PutTestContents puts file with given contents to the remote and checks it but unlike TestPutLarge doesn't remove
211func PutTestContents(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item, contents string, check bool) (string, fs.Object) {
212	return putTestContentsMimeType(ctx, t, f, file, contents, check, "")
213}
214
215// testPut puts file with random contents to the remote
216func testPut(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item) (string, fs.Object) {
217	return PutTestContents(ctx, t, f, file, random.String(100), true)
218}
219
220// testPutMimeType puts file with random contents to the remote and the mime type given
221func testPutMimeType(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item, mimeType string) (string, fs.Object) {
222	return putTestContentsMimeType(ctx, t, f, file, random.String(100), true, mimeType)
223}
224
225// TestPutLarge puts file to the remote, checks it and removes it on success.
226func TestPutLarge(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item) {
227	var (
228		err        error
229		obj        fs.Object
230		uploadHash *hash.MultiHasher
231	)
232	retry(t, "PutLarge", func() error {
233		r := readers.NewPatternReader(file.Size)
234		uploadHash = hash.NewMultiHasher()
235		in := io.TeeReader(r, uploadHash)
236
237		obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
238		obj, err = f.Put(ctx, in, obji)
239		if file.Size == 0 && err == fs.ErrorCantUploadEmptyFiles {
240			t.Skip("Can't upload zero length files")
241		}
242		return err
243	})
244	file.Hashes = uploadHash.Sums()
245	file.Check(t, obj, f.Precision())
246
247	// Re-read the object and check again
248	obj = findObject(ctx, t, f, file.Path)
249	file.Check(t, obj, f.Precision())
250
251	// Download the object and check it is OK
252	downloadHash := hash.NewMultiHasher()
253	download, err := obj.Open(ctx)
254	require.NoError(t, err)
255	n, err := io.Copy(downloadHash, download)
256	require.NoError(t, err)
257	assert.Equal(t, file.Size, n)
258	require.NoError(t, download.Close())
259	assert.Equal(t, file.Hashes, downloadHash.Sums())
260
261	// Remove the object
262	require.NoError(t, obj.Remove(ctx))
263}
264
265// read the contents of an object as a string
266func readObject(ctx context.Context, t *testing.T, obj fs.Object, limit int64, options ...fs.OpenOption) string {
267	what := fmt.Sprintf("readObject(%q) limit=%d, options=%+v", obj, limit, options)
268	in, err := obj.Open(ctx, options...)
269	require.NoError(t, err, what)
270	var r io.Reader = in
271	if limit >= 0 {
272		r = &io.LimitedReader{R: r, N: limit}
273	}
274	contents, err := ioutil.ReadAll(r)
275	require.NoError(t, err, what)
276	err = in.Close()
277	require.NoError(t, err, what)
278	return string(contents)
279}
280
281// ExtraConfigItem describes a config item for the tests
282type ExtraConfigItem struct{ Name, Key, Value string }
283
284// Opt is options for Run
285type Opt struct {
286	RemoteName                   string
287	NilObject                    fs.Object
288	ExtraConfig                  []ExtraConfigItem
289	SkipBadWindowsCharacters     bool     // skips unusable characters for windows if set
290	SkipFsMatch                  bool     // if set skip exact matching of Fs value
291	TiersToTest                  []string // List of tiers which can be tested in setTier test
292	ChunkedUpload                ChunkedUploadConfig
293	UnimplementableFsMethods     []string // List of methods which can't be implemented in this wrapping Fs
294	UnimplementableObjectMethods []string // List of methods which can't be implemented in this wrapping Fs
295	SkipFsCheckWrap              bool     // if set skip FsCheckWrap
296	SkipObjectCheckWrap          bool     // if set skip ObjectCheckWrap
297	SkipInvalidUTF8              bool     // if set skip invalid UTF-8 checks
298}
299
300// returns true if x is found in ss
301func stringsContains(x string, ss []string) bool {
302	for _, s := range ss {
303		if x == s {
304			return true
305		}
306	}
307	return false
308}
309
310// toUpperASCII returns a copy of the string s with all Unicode
311// letters mapped to their upper case.
312func toUpperASCII(s string) string {
313	return strings.Map(func(r rune) rune {
314		if 'a' <= r && r <= 'z' {
315			r -= 'a' - 'A'
316		}
317		return r
318	}, s)
319}
320
321// removeConfigID removes any {xyz} parts of the name put in for
322// config disambiguation
323func removeConfigID(s string) string {
324	bra := strings.IndexRune(s, '{')
325	ket := strings.IndexRune(s, '}')
326	if bra >= 0 && ket > bra {
327		s = s[:bra] + s[ket+1:]
328	}
329	return s
330}
331
332// Run runs the basic integration tests for a remote using the options passed in.
333//
334// They are structured in a hierarchical way so that dependencies for the tests can be created.
335//
336// For example some tests require the directory to be created - these
337// are inside the "FsMkdir" test.  Some tests require some tests files
338// - these are inside the "FsPutFiles" test.
339func Run(t *testing.T, opt *Opt) {
340	var (
341		f             fs.Fs
342		remoteName    = opt.RemoteName
343		subRemoteName string
344		subRemoteLeaf string
345		file1         = fstest.Item{
346			ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
347			Path:    "file name.txt",
348		}
349		file1Contents string
350		file1MimeType = "text/csv"
351		file2         = fstest.Item{
352			ModTime: fstest.Time("2001-02-03T04:05:10.123123123Z"),
353			Path:    `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`,
354		}
355		isLocalRemote        bool
356		purged               bool // whether the dir has been purged or not
357		ctx                  = context.Background()
358		ci                   = fs.GetConfig(ctx)
359		unwrappableFsMethods = []string{"Command"} // these Fs methods don't need to be wrapped ever
360	)
361
362	if strings.HasSuffix(os.Getenv("RCLONE_CONFIG"), "/notfound") && *fstest.RemoteName == "" {
363		t.Skip("quicktest only")
364	}
365
366	// Skip the test if the remote isn't configured
367	skipIfNotOk := func(t *testing.T) {
368		if f == nil {
369			t.Skipf("WARN: %q not configured", remoteName)
370		}
371	}
372
373	// Skip if remote is not ListR capable, otherwise set the useListR
374	// flag, returning a function to restore its value
375	skipIfNotListR := func(t *testing.T) func() {
376		skipIfNotOk(t)
377		if f.Features().ListR == nil {
378			t.Skip("FS has no ListR interface")
379		}
380		previous := ci.UseListR
381		ci.UseListR = true
382		return func() {
383			ci.UseListR = previous
384		}
385	}
386
387	// Skip if remote is not SetTier and GetTier capable
388	skipIfNotSetTier := func(t *testing.T) {
389		skipIfNotOk(t)
390		if f.Features().SetTier == false ||
391			f.Features().GetTier == false {
392			t.Skip("FS has no SetTier & GetTier interfaces")
393		}
394	}
395
396	// Return true if f (or any of the things it wraps) is bucket
397	// based but not at the root.
398	isBucketBasedButNotRoot := func(f fs.Fs) bool {
399		f = fs.UnWrapFs(f)
400		return f.Features().BucketBased && strings.Contains(strings.Trim(f.Root(), "/"), "/")
401	}
402
403	// Initialise the remote
404	fstest.Initialise()
405
406	// Set extra config if supplied
407	for _, item := range opt.ExtraConfig {
408		config.FileSet(item.Name, item.Key, item.Value)
409	}
410	if *fstest.RemoteName != "" {
411		remoteName = *fstest.RemoteName
412	}
413	oldFstestRemoteName := fstest.RemoteName
414	fstest.RemoteName = &remoteName
415	defer func() {
416		fstest.RemoteName = oldFstestRemoteName
417	}()
418	t.Logf("Using remote %q", remoteName)
419	var err error
420	if remoteName == "" {
421		remoteName, err = fstest.LocalRemote()
422		require.NoError(t, err)
423		isLocalRemote = true
424	}
425
426	// Start any test servers if required
427	finish, err := testserver.Start(remoteName)
428	require.NoError(t, err)
429	defer finish()
430
431	// Make the Fs we are testing with, initialising the local variables
432	// subRemoteName - name of the remote after the TestRemote:
433	// subRemoteLeaf - a subdirectory to use under that
434	// remote - the result of  fs.NewFs(TestRemote:subRemoteName)
435	subRemoteName, subRemoteLeaf, err = fstest.RandomRemoteName(remoteName)
436	require.NoError(t, err)
437	f, err = fs.NewFs(context.Background(), subRemoteName)
438	if err == fs.ErrorNotFoundInConfigFile {
439		t.Logf("Didn't find %q in config file - skipping tests", remoteName)
440		return
441	}
442	require.NoError(t, err, fmt.Sprintf("unexpected error: %v", err))
443
444	// Skip the rest if it failed
445	skipIfNotOk(t)
446
447	// Check to see if Fs that wrap other Fs implement all the optional methods
448	t.Run("FsCheckWrap", func(t *testing.T) {
449		skipIfNotOk(t)
450		if opt.SkipFsCheckWrap {
451			t.Skip("Skipping FsCheckWrap on this Fs")
452		}
453		ft := new(fs.Features).Fill(ctx, f)
454		if ft.UnWrap == nil {
455			t.Skip("Not a wrapping Fs")
456		}
457		v := reflect.ValueOf(ft).Elem()
458		vType := v.Type()
459		for i := 0; i < v.NumField(); i++ {
460			vName := vType.Field(i).Name
461			if stringsContains(vName, opt.UnimplementableFsMethods) {
462				continue
463			}
464			if stringsContains(vName, unwrappableFsMethods) {
465				continue
466			}
467			field := v.Field(i)
468			// skip the bools
469			if field.Type().Kind() == reflect.Bool {
470				continue
471			}
472			if field.IsNil() {
473				t.Errorf("Missing Fs wrapper for %s", vName)
474			}
475		}
476	})
477
478	// Check to see if Fs advertises commands and they work and have docs
479	t.Run("FsCommand", func(t *testing.T) {
480		skipIfNotOk(t)
481		doCommand := f.Features().Command
482		if doCommand == nil {
483			t.Skip("No commands in this remote")
484		}
485		// Check the correct error is generated
486		_, err := doCommand(context.Background(), "NOTFOUND", nil, nil)
487		assert.Equal(t, fs.ErrorCommandNotFound, err, "Incorrect error generated on command not found")
488		// Check there are some commands in the fsInfo
489		fsInfo, _, _, _, err := fs.ConfigFs(remoteName)
490		require.NoError(t, err)
491		assert.True(t, len(fsInfo.CommandHelp) > 0, "Command is declared, must return some help in CommandHelp")
492	})
493
494	// TestFsRmdirNotFound tests deleting a non existent directory
495	t.Run("FsRmdirNotFound", func(t *testing.T) {
496		skipIfNotOk(t)
497		if isBucketBasedButNotRoot(f) {
498			t.Skip("Skipping test as non root bucket based remote")
499		}
500		err := f.Rmdir(ctx, "")
501		assert.Error(t, err, "Expecting error on Rmdir non existent")
502	})
503
504	// Make the directory
505	err = f.Mkdir(ctx, "")
506	require.NoError(t, err)
507	fstest.CheckListing(t, f, []fstest.Item{})
508
509	// TestFsString tests the String method
510	t.Run("FsString", func(t *testing.T) {
511		skipIfNotOk(t)
512		str := f.String()
513		require.NotEqual(t, "", str)
514	})
515
516	// TestFsName tests the Name method
517	t.Run("FsName", func(t *testing.T) {
518		skipIfNotOk(t)
519		got := removeConfigID(f.Name())
520		want := remoteName[:strings.LastIndex(remoteName, ":")+1]
521		if isLocalRemote {
522			want = "local:"
523		}
524		require.Equal(t, want, got+":")
525	})
526
527	// TestFsRoot tests the Root method
528	t.Run("FsRoot", func(t *testing.T) {
529		skipIfNotOk(t)
530		name := removeConfigID(f.Name()) + ":"
531		root := f.Root()
532		if isLocalRemote {
533			// only check last path element on local
534			require.Equal(t, filepath.Base(subRemoteName), filepath.Base(root))
535		} else {
536			require.Equal(t, subRemoteName, name+root)
537		}
538	})
539
540	// TestFsRmdirEmpty tests deleting an empty directory
541	t.Run("FsRmdirEmpty", func(t *testing.T) {
542		skipIfNotOk(t)
543		err := f.Rmdir(ctx, "")
544		require.NoError(t, err)
545	})
546
547	// TestFsMkdir tests making a directory
548	//
549	// Tests that require the directory to be made are within this
550	t.Run("FsMkdir", func(t *testing.T) {
551		skipIfNotOk(t)
552
553		err := f.Mkdir(ctx, "")
554		require.NoError(t, err)
555		fstest.CheckListing(t, f, []fstest.Item{})
556
557		err = f.Mkdir(ctx, "")
558		require.NoError(t, err)
559
560		// TestFsMkdirRmdirSubdir tests making and removing a sub directory
561		t.Run("FsMkdirRmdirSubdir", func(t *testing.T) {
562			skipIfNotOk(t)
563			dir := "dir/subdir"
564			err := operations.Mkdir(ctx, f, dir)
565			require.NoError(t, err)
566			fstest.CheckListingWithPrecision(t, f, []fstest.Item{}, []string{"dir", "dir/subdir"}, fs.GetModifyWindow(ctx, f))
567
568			err = operations.Rmdir(ctx, f, dir)
569			require.NoError(t, err)
570			fstest.CheckListingWithPrecision(t, f, []fstest.Item{}, []string{"dir"}, fs.GetModifyWindow(ctx, f))
571
572			err = operations.Rmdir(ctx, f, "dir")
573			require.NoError(t, err)
574			fstest.CheckListingWithPrecision(t, f, []fstest.Item{}, []string{}, fs.GetModifyWindow(ctx, f))
575		})
576
577		// TestFsListEmpty tests listing an empty directory
578		t.Run("FsListEmpty", func(t *testing.T) {
579			skipIfNotOk(t)
580			fstest.CheckListing(t, f, []fstest.Item{})
581		})
582
583		// TestFsListDirEmpty tests listing the directories from an empty directory
584		TestFsListDirEmpty := func(t *testing.T) {
585			skipIfNotOk(t)
586			objs, dirs, err := walk.GetAll(ctx, f, "", true, 1)
587			if !f.Features().CanHaveEmptyDirectories {
588				if err != fs.ErrorDirNotFound {
589					require.NoError(t, err)
590				}
591			} else {
592				require.NoError(t, err)
593			}
594			assert.Equal(t, []string{}, objsToNames(objs))
595			assert.Equal(t, []string{}, dirsToNames(dirs))
596		}
597		t.Run("FsListDirEmpty", TestFsListDirEmpty)
598
599		// TestFsListRDirEmpty tests listing the directories from an empty directory using ListR
600		t.Run("FsListRDirEmpty", func(t *testing.T) {
601			defer skipIfNotListR(t)()
602			TestFsListDirEmpty(t)
603		})
604
605		// TestFsListDirNotFound tests listing the directories from an empty directory
606		TestFsListDirNotFound := func(t *testing.T) {
607			skipIfNotOk(t)
608			objs, dirs, err := walk.GetAll(ctx, f, "does not exist", true, 1)
609			if !f.Features().CanHaveEmptyDirectories {
610				if err != fs.ErrorDirNotFound {
611					assert.NoError(t, err)
612					assert.Equal(t, 0, len(objs)+len(dirs))
613				}
614			} else {
615				assert.Equal(t, fs.ErrorDirNotFound, err)
616			}
617		}
618		t.Run("FsListDirNotFound", TestFsListDirNotFound)
619
620		// TestFsListRDirNotFound tests listing the directories from an empty directory using ListR
621		t.Run("FsListRDirNotFound", func(t *testing.T) {
622			defer skipIfNotListR(t)()
623			TestFsListDirNotFound(t)
624		})
625
626		// FsEncoding tests that file name encodings are
627		// working by uploading a series of unusual files
628		// Must be run in an empty directory
629		t.Run("FsEncoding", func(t *testing.T) {
630			skipIfNotOk(t)
631			if testing.Short() {
632				t.Skip("not running with -short")
633			}
634
635			// check no files or dirs as pre-requisite
636			fstest.CheckListingWithPrecision(t, f, []fstest.Item{}, []string{}, fs.GetModifyWindow(ctx, f))
637
638			for _, test := range []struct {
639				name string
640				path string
641			}{
642				// See lib/encoder/encoder.go for list of things that go here
643				{"control chars", "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F\x7F"},
644				{"dot", "."},
645				{"dot dot", ".."},
646				{"punctuation", "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~"},
647				{"leading space", " leading space"},
648				{"leading tilde", "~leading tilde"},
649				{"leading CR", "\rleading CR"},
650				{"leading LF", "\nleading LF"},
651				{"leading HT", "\tleading HT"},
652				{"leading VT", "\vleading VT"},
653				{"leading dot", ".leading dot"},
654				{"trailing space", "trailing space "},
655				{"trailing CR", "trailing CR\r"},
656				{"trailing LF", "trailing LF\n"},
657				{"trailing HT", "trailing HT\t"},
658				{"trailing VT", "trailing VT\v"},
659				{"trailing dot", "trailing dot."},
660				{"invalid UTF-8", "invalid utf-8\xfe"},
661			} {
662				t.Run(test.name, func(t *testing.T) {
663					if opt.SkipInvalidUTF8 && test.name == "invalid UTF-8" {
664						t.Skip("Skipping " + test.name)
665					}
666					// turn raw strings into Standard encoding
667					fileName := encoder.Standard.Encode(test.path)
668					dirName := fileName
669					t.Logf("testing %q", fileName)
670					assert.NoError(t, f.Mkdir(ctx, dirName))
671					file := fstest.Item{
672						ModTime: time.Now(),
673						Path:    dirName + "/" + fileName, // test creating a file and dir with that name
674					}
675					_, o := testPut(context.Background(), t, f, &file)
676					fstest.CheckListingWithPrecision(t, f, []fstest.Item{file}, []string{dirName}, fs.GetModifyWindow(ctx, f))
677					assert.NoError(t, o.Remove(ctx))
678					assert.NoError(t, f.Rmdir(ctx, dirName))
679					fstest.CheckListingWithPrecision(t, f, []fstest.Item{}, []string{}, fs.GetModifyWindow(ctx, f))
680				})
681			}
682		})
683
684		// TestFsNewObjectNotFound tests not finding an object
685		t.Run("FsNewObjectNotFound", func(t *testing.T) {
686			skipIfNotOk(t)
687			// Object in an existing directory
688			o, err := f.NewObject(ctx, "potato")
689			assert.Nil(t, o)
690			assert.Equal(t, fs.ErrorObjectNotFound, err)
691			// Now try an object in a non existing directory
692			o, err = f.NewObject(ctx, "directory/not/found/potato")
693			assert.Nil(t, o)
694			assert.Equal(t, fs.ErrorObjectNotFound, err)
695		})
696
697		// TestFsPutError tests uploading a file where there is an error
698		//
699		// It makes sure that aborting a file half way through does not create
700		// a file on the remote.
701		//
702		// go test -v -run 'TestIntegration/Test(Setup|Init|FsMkdir|FsPutError)$'
703		t.Run("FsPutError", func(t *testing.T) {
704			skipIfNotOk(t)
705
706			var N int64 = 5 * 1024
707			if *fstest.SizeLimit > 0 && N > *fstest.SizeLimit {
708				N = *fstest.SizeLimit
709				t.Logf("Reduce file size due to limit %d", N)
710			}
711
712			// Read N bytes then produce an error
713			contents := random.String(int(N))
714			buf := bytes.NewBufferString(contents)
715			er := &readers.ErrorReader{Err: errors.New("potato")}
716			in := io.MultiReader(buf, er)
717
718			obji := object.NewStaticObjectInfo(file2.Path, file2.ModTime, 2*N, true, nil, nil)
719			_, err := f.Put(ctx, in, obji)
720			// assert.Nil(t, obj) - FIXME some remotes return the object even on nil
721			assert.NotNil(t, err)
722
723			obj, err := f.NewObject(ctx, file2.Path)
724			assert.Nil(t, obj)
725			assert.Equal(t, fs.ErrorObjectNotFound, err)
726		})
727
728		t.Run("FsPutZeroLength", func(t *testing.T) {
729			skipIfNotOk(t)
730
731			TestPutLarge(ctx, t, f, &fstest.Item{
732				ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
733				Path:    fmt.Sprintf("zero-length-file"),
734				Size:    int64(0),
735			})
736		})
737
738		t.Run("FsOpenWriterAt", func(t *testing.T) {
739			skipIfNotOk(t)
740			openWriterAt := f.Features().OpenWriterAt
741			if openWriterAt == nil {
742				t.Skip("FS has no OpenWriterAt interface")
743			}
744			path := "writer-at-subdir/writer-at-file"
745			out, err := openWriterAt(ctx, path, -1)
746			require.NoError(t, err)
747
748			var n int
749			n, err = out.WriteAt([]byte("def"), 3)
750			assert.NoError(t, err)
751			assert.Equal(t, 3, n)
752			n, err = out.WriteAt([]byte("ghi"), 6)
753			assert.NoError(t, err)
754			assert.Equal(t, 3, n)
755			n, err = out.WriteAt([]byte("abc"), 0)
756			assert.NoError(t, err)
757			assert.Equal(t, 3, n)
758
759			assert.NoError(t, out.Close())
760
761			obj := findObject(ctx, t, f, path)
762			assert.Equal(t, "abcdefghi", readObject(ctx, t, obj, -1), "contents of file differ")
763
764			assert.NoError(t, obj.Remove(ctx))
765			assert.NoError(t, f.Rmdir(ctx, "writer-at-subdir"))
766		})
767
768		// TestFsChangeNotify tests that changes are properly
769		// propagated
770		//
771		// go test -v -remote TestDrive: -run '^Test(Setup|Init|FsChangeNotify)$' -verbose
772		t.Run("FsChangeNotify", func(t *testing.T) {
773			skipIfNotOk(t)
774
775			// Check have ChangeNotify
776			doChangeNotify := f.Features().ChangeNotify
777			if doChangeNotify == nil {
778				t.Skip("FS has no ChangeNotify interface")
779			}
780
781			err := operations.Mkdir(ctx, f, "dir")
782			require.NoError(t, err)
783
784			pollInterval := make(chan time.Duration)
785			dirChanges := map[string]struct{}{}
786			objChanges := map[string]struct{}{}
787			doChangeNotify(ctx, func(x string, e fs.EntryType) {
788				fs.Debugf(nil, "doChangeNotify(%q, %+v)", x, e)
789				if strings.HasPrefix(x, file1.Path[:5]) || strings.HasPrefix(x, file2.Path[:5]) {
790					fs.Debugf(nil, "Ignoring notify for file1 or file2: %q, %v", x, e)
791					return
792				}
793				if e == fs.EntryDirectory {
794					dirChanges[x] = struct{}{}
795				} else if e == fs.EntryObject {
796					objChanges[x] = struct{}{}
797				}
798			}, pollInterval)
799			defer func() { close(pollInterval) }()
800			pollInterval <- time.Second
801
802			var dirs []string
803			for _, idx := range []int{1, 3, 2} {
804				dir := fmt.Sprintf("dir/subdir%d", idx)
805				err = operations.Mkdir(ctx, f, dir)
806				require.NoError(t, err)
807				dirs = append(dirs, dir)
808			}
809
810			var objs []fs.Object
811			for _, idx := range []int{2, 4, 3} {
812				file := fstest.Item{
813					ModTime: time.Now(),
814					Path:    fmt.Sprintf("dir/file%d", idx),
815				}
816				_, o := testPut(ctx, t, f, &file)
817				objs = append(objs, o)
818			}
819
820			// Looks for each item in wants in changes -
821			// if they are all found it returns true
822			contains := func(changes map[string]struct{}, wants []string) bool {
823				for _, want := range wants {
824					_, ok := changes[want]
825					if !ok {
826						return false
827					}
828				}
829				return true
830			}
831
832			// Wait a little while for the changes to come in
833			wantDirChanges := []string{"dir/subdir1", "dir/subdir3", "dir/subdir2"}
834			wantObjChanges := []string{"dir/file2", "dir/file4", "dir/file3"}
835			ok := false
836			for tries := 1; tries < 10; tries++ {
837				ok = contains(dirChanges, wantDirChanges) && contains(objChanges, wantObjChanges)
838				if ok {
839					break
840				}
841				t.Logf("Try %d/10 waiting for dirChanges and objChanges", tries)
842				time.Sleep(3 * time.Second)
843			}
844			if !ok {
845				t.Errorf("%+v does not contain %+v or \n%+v does not contain %+v", dirChanges, wantDirChanges, objChanges, wantObjChanges)
846			}
847
848			// tidy up afterwards
849			for _, o := range objs {
850				assert.NoError(t, o.Remove(ctx))
851			}
852			dirs = append(dirs, "dir")
853			for _, dir := range dirs {
854				assert.NoError(t, f.Rmdir(ctx, dir))
855			}
856		})
857
858		// TestFsPut files writes file1, file2 and tests an update
859		//
860		// Tests that require file1, file2 are within this
861		t.Run("FsPutFiles", func(t *testing.T) {
862			skipIfNotOk(t)
863			file1Contents, _ = testPut(ctx, t, f, &file1)
864			/* file2Contents = */ testPut(ctx, t, f, &file2)
865			file1Contents, _ = testPutMimeType(ctx, t, f, &file1, file1MimeType)
866			// Note that the next test will check there are no duplicated file names
867
868			// TestFsListDirFile2 tests the files are correctly uploaded by doing
869			// Depth 1 directory listings
870			TestFsListDirFile2 := func(t *testing.T) {
871				skipIfNotOk(t)
872				list := func(dir string, expectedDirNames, expectedObjNames []string) {
873					var objNames, dirNames []string
874					for i := 1; i <= *fstest.ListRetries; i++ {
875						objs, dirs, err := walk.GetAll(ctx, f, dir, true, 1)
876						if errors.Cause(err) == fs.ErrorDirNotFound {
877							objs, dirs, err = walk.GetAll(ctx, f, dir, true, 1)
878						}
879						require.NoError(t, err)
880						objNames = objsToNames(objs)
881						dirNames = dirsToNames(dirs)
882						if len(objNames) >= len(expectedObjNames) && len(dirNames) >= len(expectedDirNames) {
883							break
884						}
885						t.Logf("Sleeping for 1 second for TestFsListDirFile2 eventual consistency: %d/%d", i, *fstest.ListRetries)
886						time.Sleep(1 * time.Second)
887					}
888					assert.Equal(t, expectedDirNames, dirNames)
889					assert.Equal(t, expectedObjNames, objNames)
890				}
891				dir := file2.Path
892				deepest := true
893				for dir != "" {
894					expectedObjNames := []string{}
895					expectedDirNames := []string{}
896					child := dir
897					dir = path.Dir(dir)
898					if dir == "." {
899						dir = ""
900						expectedObjNames = append(expectedObjNames, file1.Path)
901					}
902					if deepest {
903						expectedObjNames = append(expectedObjNames, file2.Path)
904						deepest = false
905					} else {
906						expectedDirNames = append(expectedDirNames, child)
907					}
908					list(dir, expectedDirNames, expectedObjNames)
909				}
910			}
911			t.Run("FsListDirFile2", TestFsListDirFile2)
912
913			// TestFsListRDirFile2 tests the files are correctly uploaded by doing
914			// Depth 1 directory listings using ListR
915			t.Run("FsListRDirFile2", func(t *testing.T) {
916				defer skipIfNotListR(t)()
917				TestFsListDirFile2(t)
918			})
919
920			// Test the files are all there with walk.ListR recursive listings
921			t.Run("FsListR", func(t *testing.T) {
922				skipIfNotOk(t)
923				objs, dirs, err := walk.GetAll(ctx, f, "", true, -1)
924				require.NoError(t, err)
925				assert.Equal(t, []string{
926					"hello? sausage",
927					"hello? sausage/êé",
928					"hello? sausage/êé/Hello, 世界",
929					"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠",
930				}, dirsToNames(dirs))
931				assert.Equal(t, []string{
932					"file name.txt",
933					"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠/z.txt",
934				}, objsToNames(objs))
935			})
936
937			// Test the files are all there with
938			// walk.ListR recursive listings on a sub dir
939			t.Run("FsListRSubdir", func(t *testing.T) {
940				skipIfNotOk(t)
941				objs, dirs, err := walk.GetAll(ctx, f, path.Dir(path.Dir(path.Dir(path.Dir(file2.Path)))), true, -1)
942				require.NoError(t, err)
943				assert.Equal(t, []string{
944					"hello? sausage/êé",
945					"hello? sausage/êé/Hello, 世界",
946					"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠",
947				}, dirsToNames(dirs))
948				assert.Equal(t, []string{
949					"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠/z.txt",
950				}, objsToNames(objs))
951			})
952
953			// TestFsListDirRoot tests that DirList works in the root
954			TestFsListDirRoot := func(t *testing.T) {
955				skipIfNotOk(t)
956				rootRemote, err := fs.NewFs(context.Background(), remoteName)
957				require.NoError(t, err)
958				_, dirs, err := walk.GetAll(ctx, rootRemote, "", true, 1)
959				require.NoError(t, err)
960				assert.Contains(t, dirsToNames(dirs), subRemoteLeaf, "Remote leaf not found")
961			}
962			t.Run("FsListDirRoot", TestFsListDirRoot)
963
964			// TestFsListRDirRoot tests that DirList works in the root using ListR
965			t.Run("FsListRDirRoot", func(t *testing.T) {
966				defer skipIfNotListR(t)()
967				TestFsListDirRoot(t)
968			})
969
970			// TestFsListSubdir tests List works for a subdirectory
971			TestFsListSubdir := func(t *testing.T) {
972				skipIfNotOk(t)
973				fileName := file2.Path
974				var err error
975				var objs []fs.Object
976				var dirs []fs.Directory
977				for i := 0; i < 2; i++ {
978					dir, _ := path.Split(fileName)
979					dir = dir[:len(dir)-1]
980					objs, dirs, err = walk.GetAll(ctx, f, dir, true, -1)
981				}
982				require.NoError(t, err)
983				require.Len(t, objs, 1)
984				assert.Equal(t, fileName, objs[0].Remote())
985				require.Len(t, dirs, 0)
986			}
987			t.Run("FsListSubdir", TestFsListSubdir)
988
989			// TestFsListRSubdir tests List works for a subdirectory using ListR
990			t.Run("FsListRSubdir", func(t *testing.T) {
991				defer skipIfNotListR(t)()
992				TestFsListSubdir(t)
993			})
994
995			// TestFsListLevel2 tests List works for 2 levels
996			TestFsListLevel2 := func(t *testing.T) {
997				skipIfNotOk(t)
998				objs, dirs, err := walk.GetAll(ctx, f, "", true, 2)
999				if err == fs.ErrorLevelNotSupported {
1000					return
1001				}
1002				require.NoError(t, err)
1003				assert.Equal(t, []string{file1.Path}, objsToNames(objs))
1004				assert.Equal(t, []string{"hello? sausage", "hello? sausage/êé"}, dirsToNames(dirs))
1005			}
1006			t.Run("FsListLevel2", TestFsListLevel2)
1007
1008			// TestFsListRLevel2 tests List works for 2 levels using ListR
1009			t.Run("FsListRLevel2", func(t *testing.T) {
1010				defer skipIfNotListR(t)()
1011				TestFsListLevel2(t)
1012			})
1013
1014			// TestFsListFile1 tests file present
1015			t.Run("FsListFile1", func(t *testing.T) {
1016				skipIfNotOk(t)
1017				fstest.CheckListing(t, f, []fstest.Item{file1, file2})
1018			})
1019
1020			// TestFsNewObject tests NewObject
1021			t.Run("FsNewObject", func(t *testing.T) {
1022				skipIfNotOk(t)
1023				obj := findObject(ctx, t, f, file1.Path)
1024				file1.Check(t, obj, f.Precision())
1025			})
1026
1027			// FsNewObjectCaseInsensitive tests NewObject on a case insensitive file system
1028			t.Run("FsNewObjectCaseInsensitive", func(t *testing.T) {
1029				skipIfNotOk(t)
1030				if !f.Features().CaseInsensitive {
1031					t.Skip("Not Case Insensitive")
1032				}
1033				obj := findObject(ctx, t, f, toUpperASCII(file1.Path))
1034				file1.Check(t, obj, f.Precision())
1035				t.Run("Dir", func(t *testing.T) {
1036					obj := findObject(ctx, t, f, toUpperASCII(file2.Path))
1037					file2.Check(t, obj, f.Precision())
1038				})
1039			})
1040
1041			// TestFsListFile1and2 tests two files present
1042			t.Run("FsListFile1and2", func(t *testing.T) {
1043				skipIfNotOk(t)
1044				fstest.CheckListing(t, f, []fstest.Item{file1, file2})
1045			})
1046
1047			// TestFsNewObjectDir tests NewObject on a directory which should produce fs.ErrorIsDir if possible or fs.ErrorObjectNotFound if not
1048			t.Run("FsNewObjectDir", func(t *testing.T) {
1049				skipIfNotOk(t)
1050				dir := path.Dir(file2.Path)
1051				obj, err := f.NewObject(ctx, dir)
1052				assert.Nil(t, obj)
1053				assert.True(t, err == fs.ErrorIsDir || err == fs.ErrorObjectNotFound, fmt.Sprintf("Wrong error: expecting fs.ErrorIsDir or fs.ErrorObjectNotFound but got: %#v", err))
1054			})
1055
1056			// TestFsPurge tests Purge
1057			t.Run("FsPurge", func(t *testing.T) {
1058				skipIfNotOk(t)
1059
1060				// Check have Purge
1061				doPurge := f.Features().Purge
1062				if doPurge == nil {
1063					t.Skip("FS has no Purge interface")
1064				}
1065
1066				// put up a file to purge
1067				fileToPurge := fstest.Item{
1068					ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
1069					Path:    "dirToPurge/fileToPurge.txt",
1070				}
1071				_, _ = testPut(ctx, t, f, &fileToPurge)
1072
1073				fstest.CheckListingWithPrecision(t, f, []fstest.Item{file1, file2, fileToPurge}, []string{
1074					"dirToPurge",
1075					"hello? sausage",
1076					"hello? sausage/êé",
1077					"hello? sausage/êé/Hello, 世界",
1078					"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠",
1079				}, fs.GetModifyWindow(ctx, f))
1080
1081				// Now purge it
1082				err = operations.Purge(ctx, f, "dirToPurge")
1083				require.NoError(t, err)
1084
1085				fstest.CheckListingWithPrecision(t, f, []fstest.Item{file1, file2}, []string{
1086					"hello? sausage",
1087					"hello? sausage/êé",
1088					"hello? sausage/êé/Hello, 世界",
1089					"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠",
1090				}, fs.GetModifyWindow(ctx, f))
1091			})
1092
1093			// TestFsCopy tests Copy
1094			t.Run("FsCopy", func(t *testing.T) {
1095				skipIfNotOk(t)
1096
1097				// Check have Copy
1098				doCopy := f.Features().Copy
1099				if doCopy == nil {
1100					t.Skip("FS has no Copier interface")
1101				}
1102
1103				// Test with file2 so have + and ' ' in file name
1104				var file2Copy = file2
1105				file2Copy.Path += "-copy"
1106
1107				// do the copy
1108				src := findObject(ctx, t, f, file2.Path)
1109				dst, err := doCopy(ctx, src, file2Copy.Path)
1110				if err == fs.ErrorCantCopy {
1111					t.Skip("FS can't copy")
1112				}
1113				require.NoError(t, err, fmt.Sprintf("Error: %#v", err))
1114
1115				// check file exists in new listing
1116				fstest.CheckListing(t, f, []fstest.Item{file1, file2, file2Copy})
1117
1118				// Check dst lightly - list above has checked ModTime/Hashes
1119				assert.Equal(t, file2Copy.Path, dst.Remote())
1120
1121				// Delete copy
1122				err = dst.Remove(ctx)
1123				require.NoError(t, err)
1124
1125			})
1126
1127			// TestFsMove tests Move
1128			t.Run("FsMove", func(t *testing.T) {
1129				skipIfNotOk(t)
1130
1131				// Check have Move
1132				doMove := f.Features().Move
1133				if doMove == nil {
1134					t.Skip("FS has no Mover interface")
1135				}
1136
1137				// state of files now:
1138				// 1: file name.txt
1139				// 2: hello sausage?/../z.txt
1140
1141				var file1Move = file1
1142				var file2Move = file2
1143
1144				// check happy path, i.e. no naming conflicts when rename and move are two
1145				// separate operations
1146				file2Move.Path = "other.txt"
1147				src := findObject(ctx, t, f, file2.Path)
1148				dst, err := doMove(ctx, src, file2Move.Path)
1149				if err == fs.ErrorCantMove {
1150					t.Skip("FS can't move")
1151				}
1152				require.NoError(t, err)
1153				// check file exists in new listing
1154				fstest.CheckListing(t, f, []fstest.Item{file1, file2Move})
1155				// Check dst lightly - list above has checked ModTime/Hashes
1156				assert.Equal(t, file2Move.Path, dst.Remote())
1157				// 1: file name.txt
1158				// 2: other.txt
1159
1160				// Check conflict on "rename, then move"
1161				file1Move.Path = "moveTest/other.txt"
1162				src = findObject(ctx, t, f, file1.Path)
1163				_, err = doMove(ctx, src, file1Move.Path)
1164				require.NoError(t, err)
1165				fstest.CheckListing(t, f, []fstest.Item{file1Move, file2Move})
1166				// 1: moveTest/other.txt
1167				// 2: other.txt
1168
1169				// Check conflict on "move, then rename"
1170				src = findObject(ctx, t, f, file1Move.Path)
1171				_, err = doMove(ctx, src, file1.Path)
1172				require.NoError(t, err)
1173				fstest.CheckListing(t, f, []fstest.Item{file1, file2Move})
1174				// 1: file name.txt
1175				// 2: other.txt
1176
1177				src = findObject(ctx, t, f, file2Move.Path)
1178				_, err = doMove(ctx, src, file2.Path)
1179				require.NoError(t, err)
1180				fstest.CheckListing(t, f, []fstest.Item{file1, file2})
1181				// 1: file name.txt
1182				// 2: hello sausage?/../z.txt
1183
1184				// Tidy up moveTest directory
1185				require.NoError(t, f.Rmdir(ctx, "moveTest"))
1186			})
1187
1188			// Move src to this remote using server-side move operations.
1189			//
1190			// Will only be called if src.Fs().Name() == f.Name()
1191			//
1192			// If it isn't possible then return fs.ErrorCantDirMove
1193			//
1194			// If destination exists then return fs.ErrorDirExists
1195
1196			// TestFsDirMove tests DirMove
1197			//
1198			// go test -v -run 'TestIntegration/Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsDirMove)$
1199			t.Run("FsDirMove", func(t *testing.T) {
1200				skipIfNotOk(t)
1201
1202				// Check have DirMove
1203				doDirMove := f.Features().DirMove
1204				if doDirMove == nil {
1205					t.Skip("FS has no DirMover interface")
1206				}
1207
1208				// Check it can't move onto itself
1209				err := doDirMove(ctx, f, "", "")
1210				require.Equal(t, fs.ErrorDirExists, err)
1211
1212				// new remote
1213				newRemote, _, removeNewRemote, err := fstest.RandomRemote()
1214				require.NoError(t, err)
1215				defer removeNewRemote()
1216
1217				const newName = "new_name/sub_new_name"
1218				// try the move
1219				err = newRemote.Features().DirMove(ctx, f, "", newName)
1220				require.NoError(t, err)
1221
1222				// check remotes
1223				// remote should not exist here
1224				_, err = f.List(ctx, "")
1225				assert.Equal(t, fs.ErrorDirNotFound, errors.Cause(err))
1226				//fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{}, remote.Precision())
1227				file1Copy := file1
1228				file1Copy.Path = path.Join(newName, file1.Path)
1229				file2Copy := file2
1230				file2Copy.Path = path.Join(newName, file2.Path)
1231				fstest.CheckListingWithPrecision(t, newRemote, []fstest.Item{file2Copy, file1Copy}, []string{
1232					"new_name",
1233					"new_name/sub_new_name",
1234					"new_name/sub_new_name/hello? sausage",
1235					"new_name/sub_new_name/hello? sausage/êé",
1236					"new_name/sub_new_name/hello? sausage/êé/Hello, 世界",
1237					"new_name/sub_new_name/hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠",
1238				}, newRemote.Precision())
1239
1240				// move it back
1241				err = doDirMove(ctx, newRemote, newName, "")
1242				require.NoError(t, err)
1243
1244				// check remotes
1245				fstest.CheckListingWithPrecision(t, f, []fstest.Item{file2, file1}, []string{
1246					"hello? sausage",
1247					"hello? sausage/êé",
1248					"hello? sausage/êé/Hello, 世界",
1249					"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠",
1250				}, f.Precision())
1251				fstest.CheckListingWithPrecision(t, newRemote, []fstest.Item{}, []string{
1252					"new_name",
1253				}, newRemote.Precision())
1254			})
1255
1256			// TestFsRmdirFull tests removing a non empty directory
1257			t.Run("FsRmdirFull", func(t *testing.T) {
1258				skipIfNotOk(t)
1259				if isBucketBasedButNotRoot(f) {
1260					t.Skip("Skipping test as non root bucket based remote")
1261				}
1262				err := f.Rmdir(ctx, "")
1263				require.Error(t, err, "Expecting error on RMdir on non empty remote")
1264			})
1265
1266			// TestFsPrecision tests the Precision of the Fs
1267			t.Run("FsPrecision", func(t *testing.T) {
1268				skipIfNotOk(t)
1269				precision := f.Precision()
1270				if precision == fs.ModTimeNotSupported {
1271					return
1272				}
1273				if precision > time.Second || precision < 0 {
1274					t.Fatalf("Precision out of range %v", precision)
1275				}
1276				// FIXME check expected precision
1277			})
1278
1279			// TestObjectString tests the Object String method
1280			t.Run("ObjectString", func(t *testing.T) {
1281				skipIfNotOk(t)
1282				obj := findObject(ctx, t, f, file1.Path)
1283				assert.Equal(t, file1.Path, obj.String())
1284				if opt.NilObject != nil {
1285					assert.Equal(t, "<nil>", opt.NilObject.String())
1286				}
1287			})
1288
1289			// TestObjectFs tests the object can be found
1290			t.Run("ObjectFs", func(t *testing.T) {
1291				skipIfNotOk(t)
1292				obj := findObject(ctx, t, f, file1.Path)
1293				// If this is set we don't do the direct comparison of
1294				// the Fs from the object as it may be different
1295				if opt.SkipFsMatch {
1296					return
1297				}
1298				testRemote := f
1299				if obj.Fs() != testRemote {
1300					// Check to see if this wraps something else
1301					if doUnWrap := testRemote.Features().UnWrap; doUnWrap != nil {
1302						testRemote = doUnWrap()
1303					}
1304				}
1305				assert.Equal(t, obj.Fs(), testRemote)
1306			})
1307
1308			// TestObjectRemote tests the Remote is correct
1309			t.Run("ObjectRemote", func(t *testing.T) {
1310				skipIfNotOk(t)
1311				obj := findObject(ctx, t, f, file1.Path)
1312				assert.Equal(t, file1.Path, obj.Remote())
1313			})
1314
1315			// TestObjectHashes checks all the hashes the object supports
1316			t.Run("ObjectHashes", func(t *testing.T) {
1317				skipIfNotOk(t)
1318				obj := findObject(ctx, t, f, file1.Path)
1319				file1.CheckHashes(t, obj)
1320			})
1321
1322			// TestObjectModTime tests the ModTime of the object is correct
1323			TestObjectModTime := func(t *testing.T) {
1324				skipIfNotOk(t)
1325				obj := findObject(ctx, t, f, file1.Path)
1326				file1.CheckModTime(t, obj, obj.ModTime(ctx), f.Precision())
1327			}
1328			t.Run("ObjectModTime", TestObjectModTime)
1329
1330			// TestObjectMimeType tests the MimeType of the object is correct
1331			t.Run("ObjectMimeType", func(t *testing.T) {
1332				skipIfNotOk(t)
1333				features := f.Features()
1334				obj := findObject(ctx, t, f, file1.Path)
1335				do, ok := obj.(fs.MimeTyper)
1336				if !ok {
1337					require.False(t, features.ReadMimeType, "Features.ReadMimeType is set but Object.MimeType method not found")
1338					t.Skip("MimeType method not supported")
1339				}
1340				mimeType := do.MimeType(ctx)
1341				if !features.ReadMimeType {
1342					require.Equal(t, "", mimeType, "Features.ReadMimeType is not set but Object.MimeType returned a non-empty MimeType")
1343				} else if features.WriteMimeType {
1344					assert.Equal(t, file1MimeType, mimeType, "can read and write mime types but failed")
1345				} else {
1346					if strings.ContainsRune(mimeType, ';') {
1347						assert.Equal(t, "text/plain; charset=utf-8", mimeType)
1348					} else {
1349						assert.Equal(t, "text/plain", mimeType)
1350					}
1351				}
1352			})
1353
1354			// TestObjectSetModTime tests that SetModTime works
1355			t.Run("ObjectSetModTime", func(t *testing.T) {
1356				skipIfNotOk(t)
1357				newModTime := fstest.Time("2011-12-13T14:15:16.999999999Z")
1358				obj := findObject(ctx, t, f, file1.Path)
1359				err := obj.SetModTime(ctx, newModTime)
1360				if err == fs.ErrorCantSetModTime || err == fs.ErrorCantSetModTimeWithoutDelete {
1361					t.Log(err)
1362					return
1363				}
1364				require.NoError(t, err)
1365				file1.ModTime = newModTime
1366				file1.CheckModTime(t, obj, obj.ModTime(ctx), f.Precision())
1367				// And make a new object and read it from there too
1368				TestObjectModTime(t)
1369			})
1370
1371			// TestObjectSize tests that Size works
1372			t.Run("ObjectSize", func(t *testing.T) {
1373				skipIfNotOk(t)
1374				obj := findObject(ctx, t, f, file1.Path)
1375				assert.Equal(t, file1.Size, obj.Size())
1376			})
1377
1378			// TestObjectOpen tests that Open works
1379			t.Run("ObjectOpen", func(t *testing.T) {
1380				skipIfNotOk(t)
1381				obj := findObject(ctx, t, f, file1.Path)
1382				assert.Equal(t, file1Contents, readObject(ctx, t, obj, -1), "contents of file1 differ")
1383			})
1384
1385			// TestObjectOpenSeek tests that Open works with SeekOption
1386			t.Run("ObjectOpenSeek", func(t *testing.T) {
1387				skipIfNotOk(t)
1388				obj := findObject(ctx, t, f, file1.Path)
1389				assert.Equal(t, file1Contents[50:], readObject(ctx, t, obj, -1, &fs.SeekOption{Offset: 50}), "contents of file1 differ after seek")
1390			})
1391
1392			// TestObjectOpenRange tests that Open works with RangeOption
1393			//
1394			// go test -v -run 'TestIntegration/Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|ObjectOpenRange)$'
1395			t.Run("ObjectOpenRange", func(t *testing.T) {
1396				skipIfNotOk(t)
1397				obj := findObject(ctx, t, f, file1.Path)
1398				for _, test := range []struct {
1399					ro                 fs.RangeOption
1400					wantStart, wantEnd int
1401				}{
1402					{fs.RangeOption{Start: 5, End: 15}, 5, 16},
1403					{fs.RangeOption{Start: 80, End: -1}, 80, 100},
1404					{fs.RangeOption{Start: 81, End: 100000}, 81, 100},
1405					{fs.RangeOption{Start: -1, End: 20}, 80, 100}, // if start is omitted this means get the final bytes
1406					// {fs.RangeOption{Start: -1, End: -1}, 0, 100}, - this seems to work but the RFC doesn't define it
1407				} {
1408					got := readObject(ctx, t, obj, -1, &test.ro)
1409					foundAt := strings.Index(file1Contents, got)
1410					help := fmt.Sprintf("%#v failed want [%d:%d] got [%d:%d]", test.ro, test.wantStart, test.wantEnd, foundAt, foundAt+len(got))
1411					assert.Equal(t, file1Contents[test.wantStart:test.wantEnd], got, help)
1412				}
1413			})
1414
1415			// TestObjectPartialRead tests that reading only part of the object does the correct thing
1416			t.Run("ObjectPartialRead", func(t *testing.T) {
1417				skipIfNotOk(t)
1418				obj := findObject(ctx, t, f, file1.Path)
1419				assert.Equal(t, file1Contents[:50], readObject(ctx, t, obj, 50), "contents of file1 differ after limited read")
1420			})
1421
1422			// TestObjectUpdate tests that Update works
1423			t.Run("ObjectUpdate", func(t *testing.T) {
1424				skipIfNotOk(t)
1425				contents := random.String(200)
1426				buf := bytes.NewBufferString(contents)
1427				hash := hash.NewMultiHasher()
1428				in := io.TeeReader(buf, hash)
1429
1430				file1.Size = int64(buf.Len())
1431				obj := findObject(ctx, t, f, file1.Path)
1432				obji := object.NewStaticObjectInfo(file1.Path, file1.ModTime, int64(len(contents)), true, nil, obj.Fs())
1433				err := obj.Update(ctx, in, obji)
1434				require.NoError(t, err)
1435				file1.Hashes = hash.Sums()
1436
1437				// check the object has been updated
1438				file1.Check(t, obj, f.Precision())
1439
1440				// Re-read the object and check again
1441				obj = findObject(ctx, t, f, file1.Path)
1442				file1.Check(t, obj, f.Precision())
1443
1444				// check contents correct
1445				assert.Equal(t, contents, readObject(ctx, t, obj, -1), "contents of updated file1 differ")
1446				file1Contents = contents
1447			})
1448
1449			// TestObjectStorable tests that Storable works
1450			t.Run("ObjectStorable", func(t *testing.T) {
1451				skipIfNotOk(t)
1452				obj := findObject(ctx, t, f, file1.Path)
1453				require.NotNil(t, !obj.Storable(), "Expecting object to be storable")
1454			})
1455
1456			// TestFsIsFile tests that an error is returned along with a valid fs
1457			// which points to the parent directory.
1458			t.Run("FsIsFile", func(t *testing.T) {
1459				skipIfNotOk(t)
1460				remoteName := subRemoteName + "/" + file2.Path
1461				file2Copy := file2
1462				file2Copy.Path = "z.txt"
1463				fileRemote, err := fs.NewFs(context.Background(), remoteName)
1464				require.NotNil(t, fileRemote)
1465				assert.Equal(t, fs.ErrorIsFile, err)
1466
1467				if strings.HasPrefix(remoteName, "TestChunker") && strings.Contains(remoteName, "Nometa") {
1468					// TODO fix chunker and remove this bypass
1469					t.Logf("Skip listing check -- chunker can't yet handle this tricky case")
1470					return
1471				}
1472				fstest.CheckListing(t, fileRemote, []fstest.Item{file2Copy})
1473			})
1474
1475			// TestFsIsFileNotFound tests that an error is not returned if no object is found
1476			t.Run("FsIsFileNotFound", func(t *testing.T) {
1477				skipIfNotOk(t)
1478				remoteName := subRemoteName + "/not found.txt"
1479				fileRemote, err := fs.NewFs(context.Background(), remoteName)
1480				require.NoError(t, err)
1481				fstest.CheckListing(t, fileRemote, []fstest.Item{})
1482			})
1483
1484			// Test that things work from the root
1485			t.Run("FromRoot", func(t *testing.T) {
1486				if features := f.Features(); features.BucketBased && !features.BucketBasedRootOK {
1487					t.Skip("Can't list from root on this remote")
1488				}
1489
1490				parsed, err := fspath.Parse(subRemoteName)
1491				require.NoError(t, err)
1492				configName, configLeaf := parsed.ConfigString, parsed.Path
1493				if configName == "" {
1494					configName, configLeaf = path.Split(subRemoteName)
1495				} else {
1496					configName += ":"
1497				}
1498				t.Logf("Opening root remote %q path %q from %q", configName, configLeaf, subRemoteName)
1499				rootRemote, err := fs.NewFs(context.Background(), configName)
1500				require.NoError(t, err)
1501
1502				file1Root := file1
1503				file1Root.Path = path.Join(configLeaf, file1Root.Path)
1504				file2Root := file2
1505				file2Root.Path = path.Join(configLeaf, file2Root.Path)
1506				var dirs []string
1507				dir := file2.Path
1508				for {
1509					dir = path.Dir(dir)
1510					if dir == "" || dir == "." || dir == "/" {
1511						break
1512					}
1513					dirs = append(dirs, path.Join(configLeaf, dir))
1514				}
1515
1516				// Check that we can see file1 and file2 from the root
1517				t.Run("List", func(t *testing.T) {
1518					fstest.CheckListingWithRoot(t, rootRemote, configLeaf, []fstest.Item{file1Root, file2Root}, dirs, rootRemote.Precision())
1519				})
1520
1521				// Check that that listing the entries is OK
1522				t.Run("ListEntries", func(t *testing.T) {
1523					entries, err := rootRemote.List(context.Background(), configLeaf)
1524					require.NoError(t, err)
1525					fstest.CompareItems(t, entries, []fstest.Item{file1Root}, dirs[len(dirs)-1:], rootRemote.Precision(), "ListEntries")
1526				})
1527
1528				// List the root with ListR
1529				t.Run("ListR", func(t *testing.T) {
1530					doListR := rootRemote.Features().ListR
1531					if doListR == nil {
1532						t.Skip("FS has no ListR interface")
1533					}
1534					file1Found, file2Found := false, false
1535					stopTime := time.Now().Add(10 * time.Second)
1536					errTooMany := errors.New("too many files")
1537					errFound := errors.New("found")
1538					err := doListR(context.Background(), "", func(entries fs.DirEntries) error {
1539						for _, entry := range entries {
1540							remote := entry.Remote()
1541							if remote == file1Root.Path {
1542								file1Found = true
1543							}
1544							if remote == file2Root.Path {
1545								file2Found = true
1546							}
1547							if file1Found && file2Found {
1548								return errFound
1549							}
1550						}
1551						if time.Now().After(stopTime) {
1552							return errTooMany
1553						}
1554						return nil
1555					})
1556					if err != errFound && err != errTooMany {
1557						assert.NoError(t, err)
1558					}
1559					if err != errTooMany {
1560						assert.True(t, file1Found, "file1Root not found")
1561						assert.True(t, file2Found, "file2Root not found")
1562					} else {
1563						t.Logf("Too many files to list - giving up")
1564					}
1565				})
1566
1567				// Create a new file
1568				t.Run("Put", func(t *testing.T) {
1569					file3Root := fstest.Item{
1570						ModTime: time.Now(),
1571						Path:    path.Join(configLeaf, "created from root.txt"),
1572					}
1573					_, file3Obj := testPut(ctx, t, rootRemote, &file3Root)
1574					fstest.CheckListingWithRoot(t, rootRemote, configLeaf, []fstest.Item{file1Root, file2Root, file3Root}, nil, rootRemote.Precision())
1575
1576					// And then remove it
1577					t.Run("Remove", func(t *testing.T) {
1578						require.NoError(t, file3Obj.Remove(context.Background()))
1579						fstest.CheckListingWithRoot(t, rootRemote, configLeaf, []fstest.Item{file1Root, file2Root}, nil, rootRemote.Precision())
1580					})
1581				})
1582			})
1583
1584			// TestPublicLink tests creation of sharable, public links
1585			// go test -v -run 'TestIntegration/Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|PublicLink)$'
1586			t.Run("PublicLink", func(t *testing.T) {
1587				skipIfNotOk(t)
1588
1589				doPublicLink := f.Features().PublicLink
1590				if doPublicLink == nil {
1591					t.Skip("FS has no PublicLinker interface")
1592				}
1593
1594				expiry := fs.Duration(60 * time.Second)
1595
1596				// if object not found
1597				link, err := doPublicLink(ctx, file1.Path+"_does_not_exist", expiry, false)
1598				require.Error(t, err, "Expected to get error when file doesn't exist")
1599				require.Equal(t, "", link, "Expected link to be empty on error")
1600
1601				// sharing file for the first time
1602				link1, err := doPublicLink(ctx, file1.Path, expiry, false)
1603				require.NoError(t, err)
1604				require.NotEqual(t, "", link1, "Link should not be empty")
1605
1606				link2, err := doPublicLink(ctx, file2.Path, expiry, false)
1607				require.NoError(t, err)
1608				require.NotEqual(t, "", link2, "Link should not be empty")
1609
1610				require.NotEqual(t, link1, link2, "Links to different files should differ")
1611
1612				// sharing file for the 2nd time
1613				link1, err = doPublicLink(ctx, file1.Path, expiry, false)
1614				require.NoError(t, err)
1615				require.NotEqual(t, "", link1, "Link should not be empty")
1616
1617				// sharing directory for the first time
1618				path := path.Dir(file2.Path)
1619				link3, err := doPublicLink(ctx, path, expiry, false)
1620				if err != nil && (errors.Cause(err) == fs.ErrorCantShareDirectories || errors.Cause(err) == fs.ErrorObjectNotFound) {
1621					t.Log("skipping directory tests as not supported on this backend")
1622				} else {
1623					require.NoError(t, err)
1624					require.NotEqual(t, "", link3, "Link should not be empty")
1625
1626					// sharing directory for the second time
1627					link3, err = doPublicLink(ctx, path, expiry, false)
1628					require.NoError(t, err)
1629					require.NotEqual(t, "", link3, "Link should not be empty")
1630
1631					// sharing the "root" directory in a subremote
1632					subRemote, _, removeSubRemote, err := fstest.RandomRemote()
1633					require.NoError(t, err)
1634					defer removeSubRemote()
1635					// ensure sub remote isn't empty
1636					buf := bytes.NewBufferString("somecontent")
1637					obji := object.NewStaticObjectInfo("somefile", time.Now(), int64(buf.Len()), true, nil, nil)
1638					_, err = subRemote.Put(ctx, buf, obji)
1639					require.NoError(t, err)
1640
1641					link4, err := subRemote.Features().PublicLink(ctx, "", expiry, false)
1642					require.NoError(t, err, "Sharing root in a sub-remote should work")
1643					require.NotEqual(t, "", link4, "Link should not be empty")
1644				}
1645			})
1646
1647			// TestSetTier tests SetTier and GetTier functionality
1648			t.Run("SetTier", func(t *testing.T) {
1649				skipIfNotSetTier(t)
1650				obj := findObject(ctx, t, f, file1.Path)
1651				setter, ok := obj.(fs.SetTierer)
1652				assert.NotNil(t, ok)
1653				getter, ok := obj.(fs.GetTierer)
1654				assert.NotNil(t, ok)
1655				// If interfaces are supported TiersToTest should contain
1656				// at least one entry
1657				supportedTiers := opt.TiersToTest
1658				assert.NotEmpty(t, supportedTiers)
1659				// test set tier changes on supported storage classes or tiers
1660				for _, tier := range supportedTiers {
1661					err := setter.SetTier(tier)
1662					assert.Nil(t, err)
1663					got := getter.GetTier()
1664					assert.Equal(t, tier, got)
1665				}
1666			})
1667
1668			// Check to see if Fs that wrap other Objects implement all the optional methods
1669			t.Run("ObjectCheckWrap", func(t *testing.T) {
1670				skipIfNotOk(t)
1671				if opt.SkipObjectCheckWrap {
1672					t.Skip("Skipping FsCheckWrap on this Fs")
1673				}
1674				ft := new(fs.Features).Fill(ctx, f)
1675				if ft.UnWrap == nil {
1676					t.Skip("Not a wrapping Fs")
1677				}
1678				obj := findObject(ctx, t, f, file1.Path)
1679				_, unsupported := fs.ObjectOptionalInterfaces(obj)
1680				for _, name := range unsupported {
1681					if !stringsContains(name, opt.UnimplementableObjectMethods) {
1682						t.Errorf("Missing Object wrapper for %s", name)
1683					}
1684				}
1685			})
1686
1687			// TestObjectRemove tests Remove
1688			t.Run("ObjectRemove", func(t *testing.T) {
1689				skipIfNotOk(t)
1690				// remove file1
1691				obj := findObject(ctx, t, f, file1.Path)
1692				err := obj.Remove(ctx)
1693				require.NoError(t, err)
1694				// check listing without modtime as TestPublicLink may change the modtime
1695				fstest.CheckListingWithPrecision(t, f, []fstest.Item{file2}, nil, fs.ModTimeNotSupported)
1696			})
1697
1698			// TestAbout tests the About optional interface
1699			t.Run("ObjectAbout", func(t *testing.T) {
1700				skipIfNotOk(t)
1701
1702				// Check have About
1703				doAbout := f.Features().About
1704				if doAbout == nil {
1705					t.Skip("FS does not support About")
1706				}
1707
1708				// Can't really check the output much!
1709				usage, err := doAbout(context.Background())
1710				require.NoError(t, err)
1711				require.NotNil(t, usage)
1712				assert.NotEqual(t, int64(0), usage.Total)
1713			})
1714
1715			// Just file2 remains for Purge to clean up
1716
1717			// TestFsPutStream tests uploading files when size isn't known in advance.
1718			// This may trigger large buffer allocation in some backends, keep it
1719			// close to the end of suite. (See fs/operations/xtra_operations_test.go)
1720			t.Run("FsPutStream", func(t *testing.T) {
1721				skipIfNotOk(t)
1722				if f.Features().PutStream == nil {
1723					t.Skip("FS has no PutStream interface")
1724				}
1725
1726				for _, contentSize := range []int{0, 100} {
1727					t.Run(strconv.Itoa(contentSize), func(t *testing.T) {
1728						file := fstest.Item{
1729							ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
1730							Path:    "piped data.txt",
1731							Size:    -1, // use unknown size during upload
1732						}
1733
1734						var (
1735							err        error
1736							obj        fs.Object
1737							uploadHash *hash.MultiHasher
1738						)
1739						retry(t, "PutStream", func() error {
1740							contents := random.String(contentSize)
1741							buf := bytes.NewBufferString(contents)
1742							uploadHash = hash.NewMultiHasher()
1743							in := io.TeeReader(buf, uploadHash)
1744
1745							file.Size = -1
1746							obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
1747							obj, err = f.Features().PutStream(ctx, in, obji)
1748							return err
1749						})
1750						file.Hashes = uploadHash.Sums()
1751						file.Size = int64(contentSize) // use correct size when checking
1752						file.Check(t, obj, f.Precision())
1753						// Re-read the object and check again
1754						obj = findObject(ctx, t, f, file.Path)
1755						file.Check(t, obj, f.Precision())
1756						require.NoError(t, obj.Remove(ctx))
1757					})
1758				}
1759			})
1760
1761			// TestInternal calls InternalTest() on the Fs
1762			t.Run("Internal", func(t *testing.T) {
1763				skipIfNotOk(t)
1764				if it, ok := f.(InternalTester); ok {
1765					it.InternalTest(t)
1766				} else {
1767					t.Skipf("%T does not implement InternalTester", f)
1768				}
1769			})
1770
1771		})
1772
1773		// TestFsPutChunked may trigger large buffer allocation with
1774		// some backends (see fs/operations/xtra_operations_test.go),
1775		// keep it closer to the end of suite.
1776		t.Run("FsPutChunked", func(t *testing.T) {
1777			skipIfNotOk(t)
1778			if testing.Short() {
1779				t.Skip("not running with -short")
1780			}
1781
1782			setUploadChunkSizer, _ := f.(SetUploadChunkSizer)
1783			if setUploadChunkSizer == nil {
1784				t.Skipf("%T does not implement SetUploadChunkSizer", f)
1785			}
1786
1787			setUploadCutoffer, _ := f.(SetUploadCutoffer)
1788
1789			minChunkSize := opt.ChunkedUpload.MinChunkSize
1790			if minChunkSize < 100 {
1791				minChunkSize = 100
1792			}
1793			if opt.ChunkedUpload.CeilChunkSize != nil {
1794				minChunkSize = opt.ChunkedUpload.CeilChunkSize(minChunkSize)
1795			}
1796
1797			maxChunkSize := 2 * fs.Mebi
1798			if maxChunkSize < 2*minChunkSize {
1799				maxChunkSize = 2 * minChunkSize
1800			}
1801			if opt.ChunkedUpload.MaxChunkSize > 0 && maxChunkSize > opt.ChunkedUpload.MaxChunkSize {
1802				maxChunkSize = opt.ChunkedUpload.MaxChunkSize
1803			}
1804			if opt.ChunkedUpload.CeilChunkSize != nil {
1805				maxChunkSize = opt.ChunkedUpload.CeilChunkSize(maxChunkSize)
1806			}
1807
1808			next := func(f func(fs.SizeSuffix) fs.SizeSuffix) fs.SizeSuffix {
1809				s := f(minChunkSize)
1810				if s > maxChunkSize {
1811					s = minChunkSize
1812				}
1813				return s
1814			}
1815
1816			chunkSizes := fs.SizeSuffixList{
1817				minChunkSize,
1818				minChunkSize + (maxChunkSize-minChunkSize)/3,
1819				next(NextPowerOfTwo),
1820				next(NextMultipleOf(100000)),
1821				next(NextMultipleOf(100001)),
1822				maxChunkSize,
1823			}
1824			chunkSizes.Sort()
1825
1826			// Set the minimum chunk size, upload cutoff and reset it at the end
1827			oldChunkSize, err := setUploadChunkSizer.SetUploadChunkSize(minChunkSize)
1828			require.NoError(t, err)
1829			var oldUploadCutoff fs.SizeSuffix
1830			if setUploadCutoffer != nil {
1831				oldUploadCutoff, err = setUploadCutoffer.SetUploadCutoff(minChunkSize)
1832				require.NoError(t, err)
1833			}
1834			defer func() {
1835				_, err := setUploadChunkSizer.SetUploadChunkSize(oldChunkSize)
1836				assert.NoError(t, err)
1837				if setUploadCutoffer != nil {
1838					_, err := setUploadCutoffer.SetUploadCutoff(oldUploadCutoff)
1839					assert.NoError(t, err)
1840				}
1841			}()
1842
1843			var lastCs fs.SizeSuffix
1844			for _, cs := range chunkSizes {
1845				if cs <= lastCs {
1846					continue
1847				}
1848				if opt.ChunkedUpload.CeilChunkSize != nil {
1849					cs = opt.ChunkedUpload.CeilChunkSize(cs)
1850				}
1851				lastCs = cs
1852
1853				t.Run(cs.String(), func(t *testing.T) {
1854					_, err := setUploadChunkSizer.SetUploadChunkSize(cs)
1855					require.NoError(t, err)
1856					if setUploadCutoffer != nil {
1857						_, err = setUploadCutoffer.SetUploadCutoff(cs)
1858						require.NoError(t, err)
1859					}
1860
1861					var testChunks []fs.SizeSuffix
1862					if opt.ChunkedUpload.NeedMultipleChunks {
1863						// If NeedMultipleChunks is set then test with > cs
1864						testChunks = []fs.SizeSuffix{cs + 1, 2 * cs, 2*cs + 1}
1865					} else {
1866						testChunks = []fs.SizeSuffix{cs - 1, cs, 2*cs + 1}
1867					}
1868
1869					for _, fileSize := range testChunks {
1870						t.Run(fmt.Sprintf("%d", fileSize), func(t *testing.T) {
1871							TestPutLarge(ctx, t, f, &fstest.Item{
1872								ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
1873								Path:    fmt.Sprintf("chunked-%s-%s.bin", cs.String(), fileSize.String()),
1874								Size:    int64(fileSize),
1875							})
1876						})
1877					}
1878				})
1879			}
1880		})
1881
1882		// TestFsUploadUnknownSize ensures Fs.Put() and Object.Update() don't panic when
1883		// src.Size() == -1
1884		//
1885		// This may trigger large buffer allocation in some backends, keep it
1886		// closer to the suite end. (See fs/operations/xtra_operations_test.go)
1887		t.Run("FsUploadUnknownSize", func(t *testing.T) {
1888			skipIfNotOk(t)
1889
1890			t.Run("FsPutUnknownSize", func(t *testing.T) {
1891				defer func() {
1892					assert.Nil(t, recover(), "Fs.Put() should not panic when src.Size() == -1")
1893				}()
1894
1895				contents := random.String(100)
1896				in := bytes.NewBufferString(contents)
1897
1898				obji := object.NewStaticObjectInfo("unknown-size-put.txt", fstest.Time("2002-02-03T04:05:06.499999999Z"), -1, true, nil, nil)
1899				obj, err := f.Put(ctx, in, obji)
1900				if err == nil {
1901					require.NoError(t, obj.Remove(ctx), "successfully uploaded unknown-sized file but failed to remove")
1902				}
1903				// if err != nil: it's okay as long as no panic
1904			})
1905
1906			t.Run("FsUpdateUnknownSize", func(t *testing.T) {
1907				unknownSizeUpdateFile := fstest.Item{
1908					ModTime: fstest.Time("2002-02-03T04:05:06.499999999Z"),
1909					Path:    "unknown-size-update.txt",
1910				}
1911
1912				testPut(ctx, t, f, &unknownSizeUpdateFile)
1913
1914				defer func() {
1915					assert.Nil(t, recover(), "Object.Update() should not panic when src.Size() == -1")
1916				}()
1917
1918				newContents := random.String(200)
1919				in := bytes.NewBufferString(newContents)
1920
1921				obj := findObject(ctx, t, f, unknownSizeUpdateFile.Path)
1922				obji := object.NewStaticObjectInfo(unknownSizeUpdateFile.Path, unknownSizeUpdateFile.ModTime, -1, true, nil, obj.Fs())
1923				err := obj.Update(ctx, in, obji)
1924				if err == nil {
1925					require.NoError(t, obj.Remove(ctx), "successfully updated object with unknown-sized source but failed to remove")
1926				}
1927				// if err != nil: it's okay as long as no panic
1928			})
1929
1930		})
1931
1932		// TestFsRootCollapse tests if the root of an fs "collapses" to the
1933		// absolute root. It creates a new fs of the same backend type with its
1934		// root set to a *non-existent* folder, and attempts to read the info of
1935		// an object in that folder, whose name is taken from a directory that
1936		// exists in the absolute root.
1937		// This test is added after
1938		// https://github.com/rclone/rclone/issues/3164.
1939		t.Run("FsRootCollapse", func(t *testing.T) {
1940			deepRemoteName := subRemoteName + "/deeper/nonexisting/directory"
1941			deepRemote, err := fs.NewFs(context.Background(), deepRemoteName)
1942			require.NoError(t, err)
1943
1944			colonIndex := strings.IndexRune(deepRemoteName, ':')
1945			firstSlashIndex := strings.IndexRune(deepRemoteName, '/')
1946			firstDir := deepRemoteName[colonIndex+1 : firstSlashIndex]
1947			_, err = deepRemote.NewObject(ctx, firstDir)
1948			require.Equal(t, fs.ErrorObjectNotFound, err)
1949			// If err is not fs.ErrorObjectNotFound, it means the backend is
1950			// somehow confused about root and absolute root.
1951		})
1952
1953		// Purge the folder
1954		err = operations.Purge(ctx, f, "")
1955		if errors.Cause(err) != fs.ErrorDirNotFound {
1956			require.NoError(t, err)
1957		}
1958		purged = true
1959		fstest.CheckListing(t, f, []fstest.Item{})
1960
1961		// Check purging again if not bucket based
1962		if !isBucketBasedButNotRoot(f) {
1963			err = operations.Purge(ctx, f, "")
1964			assert.Error(t, err, "Expecting error after on second purge")
1965			if errors.Cause(err) != fs.ErrorDirNotFound {
1966				t.Log("Warning: this should produce fs.ErrorDirNotFound")
1967			}
1968		}
1969
1970	})
1971
1972	// Check directory is purged
1973	if !purged {
1974		_ = operations.Purge(ctx, f, "")
1975	}
1976
1977	t.Run("FsShutdown", func(t *testing.T) {
1978		do := f.Features().Shutdown
1979		if do == nil {
1980			t.Skip("Shutdown method not supported")
1981		}
1982		require.NoError(t, do(ctx))
1983		require.NoError(t, do(ctx), "must be able to call Shutdown twice")
1984	})
1985
1986	// Remove the local directory so we don't clutter up /tmp
1987	if strings.HasPrefix(remoteName, "/") {
1988		t.Log("remoteName", remoteName)
1989		// Remove temp directory
1990		err := os.Remove(remoteName)
1991		require.NoError(t, err)
1992	}
1993}
1994