1// Package amazonclouddrive provides an interface to the Amazon Cloud
2// Drive object storage system.
3package amazonclouddrive
4
5/*
6FIXME make searching for directory in id and file in id more efficient
7- use the name: search parameter - remember the escaping rules
8- use Folder GetNode and GetFile
9
10FIXME make the default for no files and no dirs be (FILE & FOLDER) so
11we ignore assets completely!
12*/
13
14import (
15	"context"
16	"encoding/json"
17	"fmt"
18	"io"
19	"net/http"
20	"path"
21	"strings"
22	"time"
23
24	acd "github.com/ncw/go-acd"
25	"github.com/pkg/errors"
26	"github.com/rclone/rclone/fs"
27	"github.com/rclone/rclone/fs/config"
28	"github.com/rclone/rclone/fs/config/configmap"
29	"github.com/rclone/rclone/fs/config/configstruct"
30	"github.com/rclone/rclone/fs/fserrors"
31	"github.com/rclone/rclone/fs/fshttp"
32	"github.com/rclone/rclone/fs/hash"
33	"github.com/rclone/rclone/lib/dircache"
34	"github.com/rclone/rclone/lib/encoder"
35	"github.com/rclone/rclone/lib/oauthutil"
36	"github.com/rclone/rclone/lib/pacer"
37	"golang.org/x/oauth2"
38)
39
40const (
41	folderKind               = "FOLDER"
42	fileKind                 = "FILE"
43	statusAvailable          = "AVAILABLE"
44	timeFormat               = time.RFC3339 // 2014-03-07T22:31:12.173Z
45	minSleep                 = 20 * time.Millisecond
46	warnFileSize             = 50000 << 20            // Display warning for files larger than this size
47	defaultTempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink
48)
49
50// Globals
51var (
52	// Description of how to auth for this app
53	acdConfig = &oauth2.Config{
54		Scopes: []string{"clouddrive:read_all", "clouddrive:write"},
55		Endpoint: oauth2.Endpoint{
56			AuthURL:  "https://www.amazon.com/ap/oa",
57			TokenURL: "https://api.amazon.com/auth/o2/token",
58		},
59		ClientID:     "",
60		ClientSecret: "",
61		RedirectURL:  oauthutil.RedirectURL,
62	}
63)
64
65// Register with Fs
66func init() {
67	fs.Register(&fs.RegInfo{
68		Name:        "amazon cloud drive",
69		Prefix:      "acd",
70		Description: "Amazon Drive",
71		NewFs:       NewFs,
72		Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
73			return oauthutil.ConfigOut("", &oauthutil.Options{
74				OAuth2Config: acdConfig,
75			})
76		},
77		Options: append(oauthutil.SharedOptions, []fs.Option{{
78			Name:     "checkpoint",
79			Help:     "Checkpoint for internal polling (debug).",
80			Hide:     fs.OptionHideBoth,
81			Advanced: true,
82		}, {
83			Name: "upload_wait_per_gb",
84			Help: `Additional time per GiB to wait after a failed complete upload to see if it appears.
85
86Sometimes Amazon Drive gives an error when a file has been fully
87uploaded but the file appears anyway after a little while.  This
88happens sometimes for files over 1 GiB in size and nearly every time for
89files bigger than 10 GiB. This parameter controls the time rclone waits
90for the file to appear.
91
92The default value for this parameter is 3 minutes per GiB, so by
93default it will wait 3 minutes for every GiB uploaded to see if the
94file appears.
95
96You can disable this feature by setting it to 0. This may cause
97conflict errors as rclone retries the failed upload but the file will
98most likely appear correctly eventually.
99
100These values were determined empirically by observing lots of uploads
101of big files for a range of file sizes.
102
103Upload with the "-v" flag to see more info about what rclone is doing
104in this situation.`,
105			Default:  fs.Duration(180 * time.Second),
106			Advanced: true,
107		}, {
108			Name: "templink_threshold",
109			Help: `Files >= this size will be downloaded via their tempLink.
110
111Files this size or more will be downloaded via their "tempLink". This
112is to work around a problem with Amazon Drive which blocks downloads
113of files bigger than about 10 GiB. The default for this is 9 GiB which
114shouldn't need to be changed.
115
116To download files above this threshold, rclone requests a "tempLink"
117which downloads the file through a temporary URL directly from the
118underlying S3 storage.`,
119			Default:  defaultTempLinkThreshold,
120			Advanced: true,
121		}, {
122			Name:     config.ConfigEncoding,
123			Help:     config.ConfigEncodingHelp,
124			Advanced: true,
125			// Encode invalid UTF-8 bytes as json doesn't handle them properly.
126			Default: (encoder.Base |
127				encoder.EncodeInvalidUtf8),
128		}}...),
129	})
130}
131
132// Options defines the configuration for this backend
133type Options struct {
134	Checkpoint        string               `config:"checkpoint"`
135	UploadWaitPerGB   fs.Duration          `config:"upload_wait_per_gb"`
136	TempLinkThreshold fs.SizeSuffix        `config:"templink_threshold"`
137	Enc               encoder.MultiEncoder `config:"encoding"`
138}
139
140// Fs represents a remote acd server
141type Fs struct {
142	name         string             // name of this remote
143	features     *fs.Features       // optional features
144	opt          Options            // options for this Fs
145	ci           *fs.ConfigInfo     // global config
146	c            *acd.Client        // the connection to the acd server
147	noAuthClient *http.Client       // unauthenticated http client
148	root         string             // the path we are working on
149	dirCache     *dircache.DirCache // Map of directory path to directory id
150	pacer        *fs.Pacer          // pacer for API calls
151	trueRootID   string             // ID of true root directory
152	tokenRenewer *oauthutil.Renew   // renew the token on expiry
153}
154
155// Object describes an acd object
156//
157// Will definitely have info but maybe not meta
158type Object struct {
159	fs     *Fs       // what this object is part of
160	remote string    // The remote path
161	info   *acd.Node // Info from the acd object if known
162}
163
164// ------------------------------------------------------------
165
166// Name of the remote (as passed into NewFs)
167func (f *Fs) Name() string {
168	return f.name
169}
170
171// Root of the remote (as passed into NewFs)
172func (f *Fs) Root() string {
173	return f.root
174}
175
176// String converts this Fs to a string
177func (f *Fs) String() string {
178	return fmt.Sprintf("amazon drive root '%s'", f.root)
179}
180
181// Features returns the optional features of this Fs
182func (f *Fs) Features() *fs.Features {
183	return f.features
184}
185
186// parsePath parses an acd 'url'
187func parsePath(path string) (root string) {
188	root = strings.Trim(path, "/")
189	return
190}
191
192// retryErrorCodes is a slice of error codes that we will retry
193var retryErrorCodes = []int{
194	400, // Bad request (seen in "Next token is expired")
195	401, // Unauthorized (seen in "Token has expired")
196	408, // Request Timeout
197	429, // Rate exceeded.
198	500, // Get occasional 500 Internal Server Error
199	502, // Bad Gateway when doing big listings
200	503, // Service Unavailable
201	504, // Gateway Time-out
202}
203
204// shouldRetry returns a boolean as to whether this resp and err
205// deserve to be retried.  It returns the err as a convenience
206func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
207	if fserrors.ContextError(ctx, &err) {
208		return false, err
209	}
210	if resp != nil {
211		if resp.StatusCode == 401 {
212			f.tokenRenewer.Invalidate()
213			fs.Debugf(f, "401 error received - invalidating token")
214			return true, err
215		}
216		// Work around receiving this error sporadically on authentication
217		//
218		// HTTP code 403: "403 Forbidden", response body: {"message":"Authorization header requires 'Credential' parameter. Authorization header requires 'Signature' parameter. Authorization header requires 'SignedHeaders' parameter. Authorization header requires existence of either a 'X-Amz-Date' or a 'Date' header. Authorization=Bearer"}
219		if resp.StatusCode == 403 && strings.Contains(err.Error(), "Authorization header requires") {
220			fs.Debugf(f, "403 \"Authorization header requires...\" error received - retry")
221			return true, err
222		}
223	}
224	return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
225}
226
227// If query parameters contain X-Amz-Algorithm remove Authorization header
228//
229// This happens when ACD redirects to S3 for the download.  The oauth
230// transport puts an Authorization header in which we need to remove
231// otherwise we get this message from AWS
232//
233// Only one auth mechanism allowed; only the X-Amz-Algorithm query
234// parameter, Signature query string parameter or the Authorization
235// header should be specified
236func filterRequest(req *http.Request) {
237	if req.URL.Query().Get("X-Amz-Algorithm") != "" {
238		fs.Debugf(nil, "Removing Authorization: header after redirect to S3")
239		req.Header.Del("Authorization")
240	}
241}
242
243// NewFs constructs an Fs from the path, container:path
244func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
245	// Parse config into Options struct
246	opt := new(Options)
247	err := configstruct.Set(m, opt)
248	if err != nil {
249		return nil, err
250	}
251	root = parsePath(root)
252	baseClient := fshttp.NewClient(ctx)
253	if do, ok := baseClient.Transport.(interface {
254		SetRequestFilter(f func(req *http.Request))
255	}); ok {
256		do.SetRequestFilter(filterRequest)
257	} else {
258		fs.Debugf(name+":", "Couldn't add request filter - large file downloads will fail")
259	}
260	oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, acdConfig, baseClient)
261	if err != nil {
262		return nil, errors.Wrap(err, "failed to configure Amazon Drive")
263	}
264
265	c := acd.NewClient(oAuthClient)
266	ci := fs.GetConfig(ctx)
267	f := &Fs{
268		name:         name,
269		root:         root,
270		opt:          *opt,
271		ci:           ci,
272		c:            c,
273		pacer:        fs.NewPacer(ctx, pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))),
274		noAuthClient: fshttp.NewClient(ctx),
275	}
276	f.features = (&fs.Features{
277		CaseInsensitive:         true,
278		ReadMimeType:            true,
279		CanHaveEmptyDirectories: true,
280	}).Fill(ctx, f)
281
282	// Renew the token in the background
283	f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
284		_, err := f.getRootInfo(ctx)
285		return err
286	})
287
288	// Update endpoints
289	var resp *http.Response
290	err = f.pacer.Call(func() (bool, error) {
291		_, resp, err = f.c.Account.GetEndpoints()
292		return f.shouldRetry(ctx, resp, err)
293	})
294	if err != nil {
295		return nil, errors.Wrap(err, "failed to get endpoints")
296	}
297
298	// Get rootID
299	rootInfo, err := f.getRootInfo(ctx)
300	if err != nil || rootInfo.Id == nil {
301		return nil, errors.Wrap(err, "failed to get root")
302	}
303	f.trueRootID = *rootInfo.Id
304
305	f.dirCache = dircache.New(root, f.trueRootID, f)
306
307	// Find the current root
308	err = f.dirCache.FindRoot(ctx, false)
309	if err != nil {
310		// Assume it is a file
311		newRoot, remote := dircache.SplitPath(root)
312		tempF := *f
313		tempF.dirCache = dircache.New(newRoot, f.trueRootID, &tempF)
314		tempF.root = newRoot
315		// Make new Fs which is the parent
316		err = tempF.dirCache.FindRoot(ctx, false)
317		if err != nil {
318			// No root so return old f
319			return f, nil
320		}
321		_, err := tempF.newObjectWithInfo(ctx, remote, nil)
322		if err != nil {
323			if err == fs.ErrorObjectNotFound {
324				// File doesn't exist so return old f
325				return f, nil
326			}
327			return nil, err
328		}
329		// XXX: update the old f here instead of returning tempF, since
330		// `features` were already filled with functions having *f as a receiver.
331		// See https://github.com/rclone/rclone/issues/2182
332		f.dirCache = tempF.dirCache
333		f.root = tempF.root
334		// return an error with an fs which points to the parent
335		return f, fs.ErrorIsFile
336	}
337	return f, nil
338}
339
340// getRootInfo gets the root folder info
341func (f *Fs) getRootInfo(ctx context.Context) (rootInfo *acd.Folder, err error) {
342	var resp *http.Response
343	err = f.pacer.Call(func() (bool, error) {
344		rootInfo, resp, err = f.c.Nodes.GetRoot()
345		return f.shouldRetry(ctx, resp, err)
346	})
347	return rootInfo, err
348}
349
350// Return an Object from a path
351//
352// If it can't be found it returns the error fs.ErrorObjectNotFound.
353func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *acd.Node) (fs.Object, error) {
354	o := &Object{
355		fs:     f,
356		remote: remote,
357	}
358	if info != nil {
359		// Set info but not meta
360		o.info = info
361	} else {
362		err := o.readMetaData(ctx) // reads info and meta, returning an error
363		if err != nil {
364			return nil, err
365		}
366	}
367	return o, nil
368}
369
370// NewObject finds the Object at remote.  If it can't be found
371// it returns the error fs.ErrorObjectNotFound.
372func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
373	return f.newObjectWithInfo(ctx, remote, nil)
374}
375
376// FindLeaf finds a directory of name leaf in the folder with ID pathID
377func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
378	//fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
379	folder := acd.FolderFromId(pathID, f.c.Nodes)
380	var resp *http.Response
381	var subFolder *acd.Folder
382	err = f.pacer.Call(func() (bool, error) {
383		subFolder, resp, err = folder.GetFolder(f.opt.Enc.FromStandardName(leaf))
384		return f.shouldRetry(ctx, resp, err)
385	})
386	if err != nil {
387		if err == acd.ErrorNodeNotFound {
388			//fs.Debugf(f, "...Not found")
389			return "", false, nil
390		}
391		//fs.Debugf(f, "...Error %v", err)
392		return "", false, err
393	}
394	if subFolder.Status != nil && *subFolder.Status != statusAvailable {
395		fs.Debugf(f, "Ignoring folder %q in state %q", leaf, *subFolder.Status)
396		time.Sleep(1 * time.Second) // FIXME wait for problem to go away!
397		return "", false, nil
398	}
399	//fs.Debugf(f, "...Found(%q, %v)", *subFolder.Id, leaf)
400	return *subFolder.Id, true, nil
401}
402
403// CreateDir makes a directory with pathID as parent and name leaf
404func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
405	//fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf)
406	folder := acd.FolderFromId(pathID, f.c.Nodes)
407	var resp *http.Response
408	var info *acd.Folder
409	err = f.pacer.Call(func() (bool, error) {
410		info, resp, err = folder.CreateFolder(f.opt.Enc.FromStandardName(leaf))
411		return f.shouldRetry(ctx, resp, err)
412	})
413	if err != nil {
414		//fmt.Printf("...Error %v\n", err)
415		return "", err
416	}
417	//fmt.Printf("...Id %q\n", *info.Id)
418	return *info.Id, nil
419}
420
421// list the objects into the function supplied
422//
423// If directories is set it only sends directories
424// User function to process a File item from listAll
425//
426// Should return true to finish processing
427type listAllFn func(*acd.Node) bool
428
429// Lists the directory required calling the user function on each item found
430//
431// If the user fn ever returns true then it early exits with found = true
432func (f *Fs) listAll(ctx context.Context, dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
433	query := "parents:" + dirID
434	if directoriesOnly {
435		query += " AND kind:" + folderKind
436	} else if filesOnly {
437		query += " AND kind:" + fileKind
438	} else {
439		// FIXME none of these work
440		//query += " AND kind:(" + fileKind + " OR " + folderKind + ")"
441		//query += " AND (kind:" + fileKind + " OR kind:" + folderKind + ")"
442	}
443	opts := acd.NodeListOptions{
444		Filters: query,
445	}
446	var nodes []*acd.Node
447	var out []*acd.Node
448	//var resp *http.Response
449	for {
450		var resp *http.Response
451		err = f.pacer.CallNoRetry(func() (bool, error) {
452			nodes, resp, err = f.c.Nodes.GetNodes(&opts)
453			return f.shouldRetry(ctx, resp, err)
454		})
455		if err != nil {
456			return false, err
457		}
458		if nodes == nil {
459			break
460		}
461		for _, node := range nodes {
462			if node.Name != nil && node.Id != nil && node.Kind != nil && node.Status != nil {
463				// Ignore nodes if not AVAILABLE
464				if *node.Status != statusAvailable {
465					continue
466				}
467				// Ignore bogus nodes Amazon Drive sometimes reports
468				hasValidParent := false
469				for _, parent := range node.Parents {
470					if parent == dirID {
471						hasValidParent = true
472						break
473					}
474				}
475				if !hasValidParent {
476					continue
477				}
478				*node.Name = f.opt.Enc.ToStandardName(*node.Name)
479				// Store the nodes up in case we have to retry the listing
480				out = append(out, node)
481			}
482		}
483	}
484	// Send the nodes now
485	for _, node := range out {
486		if fn(node) {
487			found = true
488			break
489		}
490	}
491	return
492}
493
494// List the objects and directories in dir into entries.  The
495// entries can be returned in any order but should be for a
496// complete directory.
497//
498// dir should be "" to list the root, and should not have
499// trailing slashes.
500//
501// This should return ErrDirNotFound if the directory isn't
502// found.
503func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
504	directoryID, err := f.dirCache.FindDir(ctx, dir, false)
505	if err != nil {
506		return nil, err
507	}
508	maxTries := f.ci.LowLevelRetries
509	var iErr error
510	for tries := 1; tries <= maxTries; tries++ {
511		entries = nil
512		_, err = f.listAll(ctx, directoryID, "", false, false, func(node *acd.Node) bool {
513			remote := path.Join(dir, *node.Name)
514			switch *node.Kind {
515			case folderKind:
516				// cache the directory ID for later lookups
517				f.dirCache.Put(remote, *node.Id)
518				when, _ := time.Parse(timeFormat, *node.ModifiedDate) // FIXME
519				d := fs.NewDir(remote, when).SetID(*node.Id)
520				entries = append(entries, d)
521			case fileKind:
522				o, err := f.newObjectWithInfo(ctx, remote, node)
523				if err != nil {
524					iErr = err
525					return true
526				}
527				entries = append(entries, o)
528			default:
529				// ignore ASSET, etc.
530			}
531			return false
532		})
533		if iErr != nil {
534			return nil, iErr
535		}
536		if fserrors.IsRetryError(err) {
537			fs.Debugf(f, "Directory listing error for %q: %v - low level retry %d/%d", dir, err, tries, maxTries)
538			continue
539		}
540		if err != nil {
541			return nil, err
542		}
543		break
544	}
545	return entries, nil
546}
547
548// checkUpload checks to see if an error occurred after the file was
549// completely uploaded.
550//
551// If it was then it waits for a while to see if the file really
552// exists and is the right size and returns an updated info.
553//
554// If the file wasn't found or was the wrong size then it returns the
555// original error.
556//
557// This is a workaround for Amazon sometimes returning
558//
559//  * 408 REQUEST_TIMEOUT
560//  * 504 GATEWAY_TIMEOUT
561//  * 500 Internal server error
562//
563// At the end of large uploads.  The speculation is that the timeout
564// is waiting for the sha1 hashing to complete and the file may well
565// be properly uploaded.
566func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error, uploadTime time.Duration) (fixedError bool, info *acd.File, err error) {
567	// Return if no error - all is well
568	if inErr == nil {
569		return false, inInfo, inErr
570	}
571	// If not one of the errors we can fix return
572	// if resp == nil || resp.StatusCode != 408 && resp.StatusCode != 500 && resp.StatusCode != 504 {
573	// 	return false, inInfo, inErr
574	// }
575
576	// The HTTP status
577	httpStatus := "HTTP status UNKNOWN"
578	if resp != nil {
579		httpStatus = resp.Status
580	}
581
582	// check to see if we read to the end
583	buf := make([]byte, 1)
584	n, err := in.Read(buf)
585	if !(n == 0 && err == io.EOF) {
586		fs.Debugf(src, "Upload error detected but didn't finish upload: %v (%q)", inErr, httpStatus)
587		return false, inInfo, inErr
588	}
589
590	// Don't wait for uploads - assume they will appear later
591	if f.opt.UploadWaitPerGB <= 0 {
592		fs.Debugf(src, "Upload error detected but waiting disabled: %v (%q)", inErr, httpStatus)
593		return false, inInfo, inErr
594	}
595
596	// Time we should wait for the upload
597	uploadWaitPerByte := float64(f.opt.UploadWaitPerGB) / 1024 / 1024 / 1024
598	timeToWait := time.Duration(uploadWaitPerByte * float64(src.Size()))
599
600	const sleepTime = 5 * time.Second                        // sleep between tries
601	retries := int((timeToWait + sleepTime - 1) / sleepTime) // number of retries, rounded up
602
603	fs.Debugf(src, "Error detected after finished upload - waiting to see if object was uploaded correctly: %v (%q)", inErr, httpStatus)
604	remote := src.Remote()
605	for i := 1; i <= retries; i++ {
606		o, err := f.NewObject(ctx, remote)
607		if err == fs.ErrorObjectNotFound {
608			fs.Debugf(src, "Object not found - waiting (%d/%d)", i, retries)
609		} else if err != nil {
610			fs.Debugf(src, "Object returned error - waiting (%d/%d): %v", i, retries, err)
611		} else {
612			if src.Size() == o.Size() {
613				fs.Debugf(src, "Object found with correct size %d after waiting (%d/%d) - %v - returning with no error", src.Size(), i, retries, sleepTime*time.Duration(i-1))
614				info = &acd.File{
615					Node: o.(*Object).info,
616				}
617				return true, info, nil
618			}
619			fs.Debugf(src, "Object found but wrong size %d vs %d - waiting (%d/%d)", src.Size(), o.Size(), i, retries)
620		}
621		time.Sleep(sleepTime)
622	}
623	fs.Debugf(src, "Giving up waiting for object - returning original error: %v (%q)", inErr, httpStatus)
624	return false, inInfo, inErr
625}
626
627// Put the object into the container
628//
629// Copy the reader in to the new object which is returned
630//
631// The new object may have been created if an error is returned
632func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
633	remote := src.Remote()
634	size := src.Size()
635	// Temporary Object under construction
636	o := &Object{
637		fs:     f,
638		remote: remote,
639	}
640	// Check if object already exists
641	err := o.readMetaData(ctx)
642	switch err {
643	case nil:
644		return o, o.Update(ctx, in, src, options...)
645	case fs.ErrorObjectNotFound:
646		// Not found so create it
647	default:
648		return nil, err
649	}
650	// If not create it
651	leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
652	if err != nil {
653		return nil, err
654	}
655	if size > warnFileSize {
656		fs.Logf(f, "Warning: file %q may fail because it is too big. Use --max-size=%dM to skip large files.", remote, warnFileSize>>20)
657	}
658	folder := acd.FolderFromId(directoryID, o.fs.c.Nodes)
659	var info *acd.File
660	var resp *http.Response
661	err = f.pacer.CallNoRetry(func() (bool, error) {
662		start := time.Now()
663		f.tokenRenewer.Start()
664		info, resp, err = folder.Put(in, f.opt.Enc.FromStandardName(leaf))
665		f.tokenRenewer.Stop()
666		var ok bool
667		ok, info, err = f.checkUpload(ctx, resp, in, src, info, err, time.Since(start))
668		if ok {
669			return false, nil
670		}
671		return f.shouldRetry(ctx, resp, err)
672	})
673	if err != nil {
674		return nil, err
675	}
676	o.info = info.Node
677	return o, nil
678}
679
680// Mkdir creates the container if it doesn't exist
681func (f *Fs) Mkdir(ctx context.Context, dir string) error {
682	_, err := f.dirCache.FindDir(ctx, dir, true)
683	return err
684}
685
686// Move src to this remote using server-side move operations.
687//
688// This is stored with the remote path given
689//
690// It returns the destination Object and a possible error
691//
692// Will only be called if src.Fs().Name() == f.Name()
693//
694// If it isn't possible then return fs.ErrorCantMove
695func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
696	//  go test -v -run '^Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsMove)$'
697	srcObj, ok := src.(*Object)
698	if !ok {
699		fs.Debugf(src, "Can't move - not same remote type")
700		return nil, fs.ErrorCantMove
701	}
702
703	// create the destination directory if necessary
704	srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
705	if err != nil {
706		return nil, err
707	}
708	dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, remote, true)
709	if err != nil {
710		return nil, err
711	}
712	err = f.moveNode(ctx, srcObj.remote, dstLeaf, dstDirectoryID, srcObj.info, srcLeaf, srcDirectoryID, false)
713	if err != nil {
714		return nil, err
715	}
716	// Wait for directory caching so we can no longer see the old
717	// object and see the new object
718	time.Sleep(200 * time.Millisecond) // enough time 90% of the time
719	var (
720		dstObj         fs.Object
721		srcErr, dstErr error
722	)
723	for i := 1; i <= f.ci.LowLevelRetries; i++ {
724		_, srcErr = srcObj.fs.NewObject(ctx, srcObj.remote) // try reading the object
725		if srcErr != nil && srcErr != fs.ErrorObjectNotFound {
726			// exit if error on source
727			return nil, srcErr
728		}
729		dstObj, dstErr = f.NewObject(ctx, remote)
730		if dstErr != nil && dstErr != fs.ErrorObjectNotFound {
731			// exit if error on dst
732			return nil, dstErr
733		}
734		if srcErr == fs.ErrorObjectNotFound && dstErr == nil {
735			// finished if src not found and dst found
736			break
737		}
738		fs.Debugf(src, "Wait for directory listing to update after move %d/%d", i, f.ci.LowLevelRetries)
739		time.Sleep(1 * time.Second)
740	}
741	return dstObj, dstErr
742}
743
744// DirCacheFlush resets the directory cache - used in testing as an
745// optional interface
746func (f *Fs) DirCacheFlush() {
747	f.dirCache.ResetRoot()
748}
749
750// DirMove moves src, srcRemote to this remote at dstRemote
751// using server-side move operations.
752//
753// Will only be called if src.Fs().Name() == f.Name()
754//
755// If it isn't possible then return fs.ErrorCantDirMove
756//
757// If destination exists then return fs.ErrorDirExists
758func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
759	srcFs, ok := src.(*Fs)
760	if !ok {
761		fs.Debugf(src, "DirMove error: not same remote type")
762		return fs.ErrorCantDirMove
763	}
764	srcPath := path.Join(srcFs.root, srcRemote)
765	dstPath := path.Join(f.root, dstRemote)
766
767	// Refuse to move to or from the root
768	if srcPath == "" || dstPath == "" {
769		fs.Debugf(src, "DirMove error: Can't move root")
770		return errors.New("can't move root directory")
771	}
772
773	// Find ID of dst parent, creating subdirs if necessary
774	dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, dstRemote, true)
775	if err != nil {
776		return err
777	}
778
779	// Check destination does not exist
780	_, err = f.dirCache.FindDir(ctx, dstRemote, false)
781	if err == fs.ErrorDirNotFound {
782		// OK
783	} else if err != nil {
784		return err
785	} else {
786		return fs.ErrorDirExists
787	}
788
789	// Find ID of src parent
790	_, srcDirectoryID, err := srcFs.dirCache.FindPath(ctx, srcRemote, false)
791	if err != nil {
792		return err
793	}
794	srcLeaf, _ := dircache.SplitPath(srcPath)
795
796	// Find ID of src
797	srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
798	if err != nil {
799		return err
800	}
801
802	// FIXME make a proper node.UpdateMetadata command
803	srcInfo := acd.NodeFromId(srcID, f.c.Nodes)
804	var jsonStr string
805	err = srcFs.pacer.Call(func() (bool, error) {
806		jsonStr, err = srcInfo.GetMetadata()
807		return srcFs.shouldRetry(ctx, nil, err)
808	})
809	if err != nil {
810		fs.Debugf(src, "DirMove error: error reading src metadata: %v", err)
811		return err
812	}
813	err = json.Unmarshal([]byte(jsonStr), &srcInfo)
814	if err != nil {
815		fs.Debugf(src, "DirMove error: error reading unpacking src metadata: %v", err)
816		return err
817	}
818
819	err = f.moveNode(ctx, srcPath, dstLeaf, dstDirectoryID, srcInfo, srcLeaf, srcDirectoryID, true)
820	if err != nil {
821		return err
822	}
823
824	srcFs.dirCache.FlushDir(srcRemote)
825	return nil
826}
827
828// purgeCheck remotes the root directory, if check is set then it
829// refuses to do so if it has anything in
830func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
831	root := path.Join(f.root, dir)
832	if root == "" {
833		return errors.New("can't purge root directory")
834	}
835	dc := f.dirCache
836	rootID, err := dc.FindDir(ctx, dir, false)
837	if err != nil {
838		return err
839	}
840
841	if check {
842		// check directory is empty
843		empty := true
844		_, err = f.listAll(ctx, rootID, "", false, false, func(node *acd.Node) bool {
845			switch *node.Kind {
846			case folderKind:
847				empty = false
848				return true
849			case fileKind:
850				empty = false
851				return true
852			default:
853				fs.Debugf("Found ASSET %s", *node.Id)
854			}
855			return false
856		})
857		if err != nil {
858			return err
859		}
860		if !empty {
861			return errors.New("directory not empty")
862		}
863	}
864
865	node := acd.NodeFromId(rootID, f.c.Nodes)
866	var resp *http.Response
867	err = f.pacer.Call(func() (bool, error) {
868		resp, err = node.Trash()
869		return f.shouldRetry(ctx, resp, err)
870	})
871	if err != nil {
872		return err
873	}
874
875	f.dirCache.FlushDir(dir)
876	if err != nil {
877		return err
878	}
879	return nil
880}
881
882// Rmdir deletes the root folder
883//
884// Returns an error if it isn't empty
885func (f *Fs) Rmdir(ctx context.Context, dir string) error {
886	return f.purgeCheck(ctx, dir, true)
887}
888
889// Precision return the precision of this Fs
890func (f *Fs) Precision() time.Duration {
891	return fs.ModTimeNotSupported
892}
893
894// Hashes returns the supported hash sets.
895func (f *Fs) Hashes() hash.Set {
896	return hash.Set(hash.MD5)
897}
898
899// Copy src to this remote using server-side copy operations.
900//
901// This is stored with the remote path given
902//
903// It returns the destination Object and a possible error
904//
905// Will only be called if src.Fs().Name() == f.Name()
906//
907// If it isn't possible then return fs.ErrorCantCopy
908//func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
909// srcObj, ok := src.(*Object)
910// if !ok {
911// 	fs.Debugf(src, "Can't copy - not same remote type")
912// 	return nil, fs.ErrorCantCopy
913// }
914// srcFs := srcObj.fs
915// _, err := f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
916// if err != nil {
917// 	return nil, err
918// }
919// return f.NewObject(ctx, remote), nil
920//}
921
922// Purge deletes all the files and the container
923//
924// Optional interface: Only implement this if you have a way of
925// deleting all the files quicker than just running Remove() on the
926// result of List()
927func (f *Fs) Purge(ctx context.Context, dir string) error {
928	return f.purgeCheck(ctx, dir, false)
929}
930
931// ------------------------------------------------------------
932
933// Fs returns the parent Fs
934func (o *Object) Fs() fs.Info {
935	return o.fs
936}
937
938// Return a string version
939func (o *Object) String() string {
940	if o == nil {
941		return "<nil>"
942	}
943	return o.remote
944}
945
946// Remote returns the remote path
947func (o *Object) Remote() string {
948	return o.remote
949}
950
951// Hash returns the Md5sum of an object returning a lowercase hex string
952func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
953	if t != hash.MD5 {
954		return "", hash.ErrUnsupported
955	}
956	if o.info.ContentProperties != nil && o.info.ContentProperties.Md5 != nil {
957		return *o.info.ContentProperties.Md5, nil
958	}
959	return "", nil
960}
961
962// Size returns the size of an object in bytes
963func (o *Object) Size() int64 {
964	if o.info.ContentProperties != nil && o.info.ContentProperties.Size != nil {
965		return int64(*o.info.ContentProperties.Size)
966	}
967	return 0 // Object is likely PENDING
968}
969
970// readMetaData gets the metadata if it hasn't already been fetched
971//
972// it also sets the info
973//
974// If it can't be found it returns the error fs.ErrorObjectNotFound.
975func (o *Object) readMetaData(ctx context.Context) (err error) {
976	if o.info != nil {
977		return nil
978	}
979	leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, o.remote, false)
980	if err != nil {
981		if err == fs.ErrorDirNotFound {
982			return fs.ErrorObjectNotFound
983		}
984		return err
985	}
986	folder := acd.FolderFromId(directoryID, o.fs.c.Nodes)
987	var resp *http.Response
988	var info *acd.File
989	err = o.fs.pacer.Call(func() (bool, error) {
990		info, resp, err = folder.GetFile(o.fs.opt.Enc.FromStandardName(leaf))
991		return o.fs.shouldRetry(ctx, resp, err)
992	})
993	if err != nil {
994		if err == acd.ErrorNodeNotFound {
995			return fs.ErrorObjectNotFound
996		}
997		return err
998	}
999	o.info = info.Node
1000	return nil
1001}
1002
1003// ModTime returns the modification time of the object
1004//
1005//
1006// It attempts to read the objects mtime and if that isn't present the
1007// LastModified returned in the http headers
1008func (o *Object) ModTime(ctx context.Context) time.Time {
1009	err := o.readMetaData(ctx)
1010	if err != nil {
1011		fs.Debugf(o, "Failed to read metadata: %v", err)
1012		return time.Now()
1013	}
1014	modTime, err := time.Parse(timeFormat, *o.info.ModifiedDate)
1015	if err != nil {
1016		fs.Debugf(o, "Failed to read mtime from object: %v", err)
1017		return time.Now()
1018	}
1019	return modTime
1020}
1021
1022// SetModTime sets the modification time of the local fs object
1023func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
1024	// FIXME not implemented
1025	return fs.ErrorCantSetModTime
1026}
1027
1028// Storable returns a boolean showing whether this object storable
1029func (o *Object) Storable() bool {
1030	return true
1031}
1032
1033// Open an object for read
1034func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
1035	bigObject := o.Size() >= int64(o.fs.opt.TempLinkThreshold)
1036	if bigObject {
1037		fs.Debugf(o, "Downloading large object via tempLink")
1038	}
1039	file := acd.File{Node: o.info}
1040	var resp *http.Response
1041	headers := fs.OpenOptionHeaders(options)
1042	err = o.fs.pacer.Call(func() (bool, error) {
1043		if !bigObject {
1044			in, resp, err = file.OpenHeaders(headers)
1045		} else {
1046			in, resp, err = file.OpenTempURLHeaders(o.fs.noAuthClient, headers)
1047		}
1048		return o.fs.shouldRetry(ctx, resp, err)
1049	})
1050	return in, err
1051}
1052
1053// Update the object with the contents of the io.Reader, modTime and size
1054//
1055// The new object may have been created if an error is returned
1056func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
1057	file := acd.File{Node: o.info}
1058	var info *acd.File
1059	var resp *http.Response
1060	var err error
1061	err = o.fs.pacer.CallNoRetry(func() (bool, error) {
1062		start := time.Now()
1063		o.fs.tokenRenewer.Start()
1064		info, resp, err = file.Overwrite(in)
1065		o.fs.tokenRenewer.Stop()
1066		var ok bool
1067		ok, info, err = o.fs.checkUpload(ctx, resp, in, src, info, err, time.Since(start))
1068		if ok {
1069			return false, nil
1070		}
1071		return o.fs.shouldRetry(ctx, resp, err)
1072	})
1073	if err != nil {
1074		return err
1075	}
1076	o.info = info.Node
1077	return nil
1078}
1079
1080// Remove a node
1081func (f *Fs) removeNode(ctx context.Context, info *acd.Node) error {
1082	var resp *http.Response
1083	var err error
1084	err = f.pacer.Call(func() (bool, error) {
1085		resp, err = info.Trash()
1086		return f.shouldRetry(ctx, resp, err)
1087	})
1088	return err
1089}
1090
1091// Remove an object
1092func (o *Object) Remove(ctx context.Context) error {
1093	return o.fs.removeNode(ctx, o.info)
1094}
1095
1096// Restore a node
1097func (f *Fs) restoreNode(ctx context.Context, info *acd.Node) (newInfo *acd.Node, err error) {
1098	var resp *http.Response
1099	err = f.pacer.Call(func() (bool, error) {
1100		newInfo, resp, err = info.Restore()
1101		return f.shouldRetry(ctx, resp, err)
1102	})
1103	return newInfo, err
1104}
1105
1106// Changes name of given node
1107func (f *Fs) renameNode(ctx context.Context, info *acd.Node, newName string) (newInfo *acd.Node, err error) {
1108	var resp *http.Response
1109	err = f.pacer.Call(func() (bool, error) {
1110		newInfo, resp, err = info.Rename(f.opt.Enc.FromStandardName(newName))
1111		return f.shouldRetry(ctx, resp, err)
1112	})
1113	return newInfo, err
1114}
1115
1116// Replaces one parent with another, effectively moving the file. Leaves other
1117// parents untouched. ReplaceParent cannot be used when the file is trashed.
1118func (f *Fs) replaceParent(ctx context.Context, info *acd.Node, oldParentID string, newParentID string) error {
1119	return f.pacer.Call(func() (bool, error) {
1120		resp, err := info.ReplaceParent(oldParentID, newParentID)
1121		return f.shouldRetry(ctx, resp, err)
1122	})
1123}
1124
1125// Adds one additional parent to object.
1126func (f *Fs) addParent(ctx context.Context, info *acd.Node, newParentID string) error {
1127	return f.pacer.Call(func() (bool, error) {
1128		resp, err := info.AddParent(newParentID)
1129		return f.shouldRetry(ctx, resp, err)
1130	})
1131}
1132
1133// Remove given parent from object, leaving the other possible
1134// parents untouched. Object can end up having no parents.
1135func (f *Fs) removeParent(ctx context.Context, info *acd.Node, parentID string) error {
1136	return f.pacer.Call(func() (bool, error) {
1137		resp, err := info.RemoveParent(parentID)
1138		return f.shouldRetry(ctx, resp, err)
1139	})
1140}
1141
1142// moveNode moves the node given from the srcLeaf,srcDirectoryID to
1143// the dstLeaf,dstDirectoryID
1144func (f *Fs) moveNode(ctx context.Context, name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, srcLeaf, srcDirectoryID string, useDirErrorMsgs bool) (err error) {
1145	// fs.Debugf(name, "moveNode dst(%q,%s) <- src(%q,%s)", dstLeaf, dstDirectoryID, srcLeaf, srcDirectoryID)
1146	cantMove := fs.ErrorCantMove
1147	if useDirErrorMsgs {
1148		cantMove = fs.ErrorCantDirMove
1149	}
1150
1151	if len(srcInfo.Parents) > 1 && srcLeaf != dstLeaf {
1152		fs.Debugf(name, "Move error: object is attached to multiple parents and should be renamed. This would change the name of the node in all parents.")
1153		return cantMove
1154	}
1155
1156	if srcLeaf != dstLeaf {
1157		// fs.Debugf(name, "renaming")
1158		_, err = f.renameNode(ctx, srcInfo, dstLeaf)
1159		if err != nil {
1160			fs.Debugf(name, "Move: quick path rename failed: %v", err)
1161			goto OnConflict
1162		}
1163	}
1164	if srcDirectoryID != dstDirectoryID {
1165		// fs.Debugf(name, "trying parent replace: %s -> %s", oldParentID, newParentID)
1166		err = f.replaceParent(ctx, srcInfo, srcDirectoryID, dstDirectoryID)
1167		if err != nil {
1168			fs.Debugf(name, "Move: quick path parent replace failed: %v", err)
1169			return err
1170		}
1171	}
1172
1173	return nil
1174
1175OnConflict:
1176	fs.Debugf(name, "Could not directly rename file, presumably because there was a file with the same name already. Instead, the file will now be trashed where such operations do not cause errors. It will be restored to the correct parent after. If any of the subsequent calls fails, the rename/move will be in an invalid state.")
1177
1178	// fs.Debugf(name, "Trashing file")
1179	err = f.removeNode(ctx, srcInfo)
1180	if err != nil {
1181		fs.Debugf(name, "Move: remove node failed: %v", err)
1182		return err
1183	}
1184	// fs.Debugf(name, "Renaming file")
1185	_, err = f.renameNode(ctx, srcInfo, dstLeaf)
1186	if err != nil {
1187		fs.Debugf(name, "Move: rename node failed: %v", err)
1188		return err
1189	}
1190	// note: replacing parent is forbidden by API, modifying them individually is
1191	// okay though
1192	// fs.Debugf(name, "Adding target parent")
1193	err = f.addParent(ctx, srcInfo, dstDirectoryID)
1194	if err != nil {
1195		fs.Debugf(name, "Move: addParent failed: %v", err)
1196		return err
1197	}
1198	// fs.Debugf(name, "removing original parent")
1199	err = f.removeParent(ctx, srcInfo, srcDirectoryID)
1200	if err != nil {
1201		fs.Debugf(name, "Move: removeParent failed: %v", err)
1202		return err
1203	}
1204	// fs.Debugf(name, "Restoring")
1205	_, err = f.restoreNode(ctx, srcInfo)
1206	if err != nil {
1207		fs.Debugf(name, "Move: restoreNode node failed: %v", err)
1208		return err
1209	}
1210	return nil
1211}
1212
1213// MimeType of an Object if known, "" otherwise
1214func (o *Object) MimeType(ctx context.Context) string {
1215	if o.info.ContentProperties != nil && o.info.ContentProperties.ContentType != nil {
1216		return *o.info.ContentProperties.ContentType
1217	}
1218	return ""
1219}
1220
1221// ChangeNotify calls the passed function with a path that has had changes.
1222// If the implementation uses polling, it should adhere to the given interval.
1223//
1224// Automatically restarts itself in case of unexpected behaviour of the remote.
1225//
1226// Close the returned channel to stop being notified.
1227func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
1228	checkpoint := f.opt.Checkpoint
1229
1230	go func() {
1231		var ticker *time.Ticker
1232		var tickerC <-chan time.Time
1233		for {
1234			select {
1235			case pollInterval, ok := <-pollIntervalChan:
1236				if !ok {
1237					if ticker != nil {
1238						ticker.Stop()
1239					}
1240					return
1241				}
1242				if pollInterval == 0 {
1243					if ticker != nil {
1244						ticker.Stop()
1245						ticker, tickerC = nil, nil
1246					}
1247				} else {
1248					ticker = time.NewTicker(pollInterval)
1249					tickerC = ticker.C
1250				}
1251			case <-tickerC:
1252				checkpoint = f.changeNotifyRunner(notifyFunc, checkpoint)
1253				if err := config.SetValueAndSave(f.name, "checkpoint", checkpoint); err != nil {
1254					fs.Debugf(f, "Unable to save checkpoint: %v", err)
1255				}
1256			}
1257		}
1258	}()
1259}
1260
1261func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoint string) string {
1262	var err error
1263	var resp *http.Response
1264	var reachedEnd bool
1265	var csCount int
1266	var nodeCount int
1267
1268	fs.Debugf(f, "Checking for changes on remote (Checkpoint %q)", checkpoint)
1269	err = f.pacer.CallNoRetry(func() (bool, error) {
1270		resp, err = f.c.Changes.GetChangesFunc(&acd.ChangesOptions{
1271			Checkpoint:    checkpoint,
1272			IncludePurged: true,
1273		}, func(changeSet *acd.ChangeSet, err error) error {
1274			if err != nil {
1275				return err
1276			}
1277
1278			type entryType struct {
1279				path      string
1280				entryType fs.EntryType
1281			}
1282			var pathsToClear []entryType
1283			csCount++
1284			nodeCount += len(changeSet.Nodes)
1285			if changeSet.End {
1286				reachedEnd = true
1287			}
1288			if changeSet.Checkpoint != "" {
1289				checkpoint = changeSet.Checkpoint
1290			}
1291			for _, node := range changeSet.Nodes {
1292				if path, ok := f.dirCache.GetInv(*node.Id); ok {
1293					if node.IsFile() {
1294						pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject})
1295					} else {
1296						pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryDirectory})
1297					}
1298					continue
1299				}
1300
1301				if node.IsFile() {
1302					// translate the parent dir of this object
1303					if len(node.Parents) > 0 {
1304						if path, ok := f.dirCache.GetInv(node.Parents[0]); ok {
1305							// and append the drive file name to compute the full file name
1306							name := f.opt.Enc.ToStandardName(*node.Name)
1307							if len(path) > 0 {
1308								path = path + "/" + name
1309							} else {
1310								path = name
1311							}
1312							// this will now clear the actual file too
1313							pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject})
1314						}
1315					} else { // a true root object that is changed
1316						pathsToClear = append(pathsToClear, entryType{path: *node.Name, entryType: fs.EntryObject})
1317					}
1318				}
1319			}
1320
1321			visitedPaths := make(map[string]bool)
1322			for _, entry := range pathsToClear {
1323				if _, ok := visitedPaths[entry.path]; ok {
1324					continue
1325				}
1326				visitedPaths[entry.path] = true
1327				notifyFunc(entry.path, entry.entryType)
1328			}
1329
1330			return nil
1331		})
1332		return false, err
1333	})
1334	fs.Debugf(f, "Got %d ChangeSets with %d Nodes", csCount, nodeCount)
1335
1336	if err != nil && err != io.ErrUnexpectedEOF {
1337		fs.Debugf(f, "Failed to get Changes: %v", err)
1338		return checkpoint
1339	}
1340
1341	if reachedEnd {
1342		reachedEnd = false
1343		fs.Debugf(f, "All changes were processed. Waiting for more.")
1344	} else if checkpoint == "" {
1345		fs.Debugf(f, "Did not get any checkpoint, something went wrong! %+v", resp)
1346	}
1347	return checkpoint
1348}
1349
1350// ID returns the ID of the Object if known, or "" if not
1351func (o *Object) ID() string {
1352	if o.info.Id == nil {
1353		return ""
1354	}
1355	return *o.info.Id
1356}
1357
1358// Check the interfaces are satisfied
1359var (
1360	_ fs.Fs     = (*Fs)(nil)
1361	_ fs.Purger = (*Fs)(nil)
1362	//	_ fs.Copier   = (*Fs)(nil)
1363	_ fs.Mover           = (*Fs)(nil)
1364	_ fs.DirMover        = (*Fs)(nil)
1365	_ fs.DirCacheFlusher = (*Fs)(nil)
1366	_ fs.ChangeNotifier  = (*Fs)(nil)
1367	_ fs.Object          = (*Object)(nil)
1368	_ fs.MimeTyper       = &Object{}
1369	_ fs.IDer            = &Object{}
1370)
1371