1package command
2
3import (
4	"context"
5	"fmt"
6	"io"
7	"io/ioutil"
8	"mime"
9	"net/http"
10	"os"
11	"path/filepath"
12	"strings"
13
14	"github.com/hashicorp/go-multierror"
15	"github.com/urfave/cli/v2"
16
17	errorpkg "github.com/peak/s5cmd/error"
18	"github.com/peak/s5cmd/log"
19	"github.com/peak/s5cmd/log/stat"
20	"github.com/peak/s5cmd/parallel"
21	"github.com/peak/s5cmd/storage"
22	"github.com/peak/s5cmd/storage/url"
23)
24
25const (
26	defaultCopyConcurrency = 5
27	defaultPartSize        = 50 // MiB
28	megabytes              = 1024 * 1024
29)
30
31var copyHelpTemplate = `Name:
32	{{.HelpName}} - {{.Usage}}
33
34Usage:
35	{{.HelpName}} [options] source destination
36
37Options:
38	{{range .VisibleFlags}}{{.}}
39	{{end}}
40Examples:
41	01. Download an S3 object to working directory
42		 > s5cmd {{.HelpName}} s3://bucket/prefix/object.gz .
43
44	02. Download an S3 object and rename
45		 > s5cmd {{.HelpName}} s3://bucket/prefix/object.gz myobject.gz
46
47	03. Download all S3 objects to a directory
48		 > s5cmd {{.HelpName}} s3://bucket/* target-directory/
49
50	04. Download an S3 object from a public bucket
51		 > s5cmd --no-sign-request {{.HelpName}} s3://bucket/prefix/object.gz .
52
53	05. Upload a file to S3 bucket
54		 > s5cmd {{.HelpName}} myfile.gz s3://bucket/
55
56	06. Upload matching files to S3 bucket
57		 > s5cmd {{.HelpName}} dir/*.gz s3://bucket/
58
59	07. Upload all files in a directory to S3 bucket recursively
60		 > s5cmd {{.HelpName}} dir/ s3://bucket/
61
62	08. Copy S3 object to another bucket
63		 > s5cmd {{.HelpName}} s3://bucket/object s3://target-bucket/prefix/object
64
65	09. Copy matching S3 objects to another bucket
66		 > s5cmd {{.HelpName}} s3://bucket/*.gz s3://target-bucket/prefix/
67
68	10. Copy files in a directory to S3 prefix if not found on target
69		 > s5cmd {{.HelpName}} -n -s -u dir/ s3://bucket/target-prefix/
70
71	11. Copy files in an S3 prefix to another S3 prefix if not found on target
72		 > s5cmd {{.HelpName}} -n -s -u s3://bucket/source-prefix/* s3://bucket/target-prefix/
73
74	12. Perform KMS Server Side Encryption of the object(s) at the destination
75		 > s5cmd {{.HelpName}} --sse aws:kms s3://bucket/object s3://target-bucket/prefix/object
76
77	13. Perform KMS-SSE of the object(s) at the destination using customer managed Customer Master Key (CMK) key id
78		 > s5cmd {{.HelpName}} --sse aws:kms --sse-kms-key-id <your-kms-key-id> s3://bucket/object s3://target-bucket/prefix/object
79
80	14. Force transfer of GLACIER objects with a prefix whether they are restored or not
81		 > s5cmd {{.HelpName}} --force-glacier-transfer s3://bucket/prefix/* target-directory/
82
83	15. Upload a file to S3 bucket with public read s3 acl
84		 > s5cmd {{.HelpName}} --acl "public-read" myfile.gz s3://bucket/
85
86	16. Upload a file to S3 bucket with expires header
87		 > s5cmd {{.HelpName}} --expires "2024-10-01T20:30:00Z" myfile.gz s3://bucket/
88
89	17. Upload a file to S3 bucket with cache-control header
90		 > s5cmd {{.HelpName}} --cache-control "public, max-age=345600" myfile.gz s3://bucket/
91
92	18. Copy all files to S3 bucket but exclude the ones with txt and gz extension
93		 > s5cmd {{.HelpName}} --exclude "*.txt" --exclude "*.gz" dir/ s3://bucket
94
95	19. Copy all files from S3 bucket to another S3 bucket but exclude the ones starts with log
96		 > s5cmd {{.HelpName}} --exclude "log*" s3://bucket/* s3://destbucket
97`
98
99func NewCopyCommandFlags() []cli.Flag {
100	return []cli.Flag{
101		&cli.BoolFlag{
102			Name:    "no-clobber",
103			Aliases: []string{"n"},
104			Usage:   "do not overwrite destination if already exists",
105		},
106		&cli.BoolFlag{
107			Name:    "if-size-differ",
108			Aliases: []string{"s"},
109			Usage:   "only overwrite destination if size differs",
110		},
111		&cli.BoolFlag{
112			Name:    "if-source-newer",
113			Aliases: []string{"u"},
114			Usage:   "only overwrite destination if source modtime is newer",
115		},
116		&cli.BoolFlag{
117			Name:    "flatten",
118			Aliases: []string{"f"},
119			Usage:   "flatten directory structure of source, starting from the first wildcard",
120		},
121		&cli.BoolFlag{
122			Name:  "no-follow-symlinks",
123			Usage: "do not follow symbolic links",
124		},
125		&cli.StringFlag{
126			Name:  "storage-class",
127			Usage: "set storage class for target ('STANDARD','REDUCED_REDUNDANCY','GLACIER','STANDARD_IA','ONEZONE_IA','INTELLIGENT_TIERING','DEEP_ARCHIVE')",
128		},
129		&cli.IntFlag{
130			Name:    "concurrency",
131			Aliases: []string{"c"},
132			Value:   defaultCopyConcurrency,
133			Usage:   "number of concurrent parts transferred between host and remote server",
134		},
135		&cli.IntFlag{
136			Name:    "part-size",
137			Aliases: []string{"p"},
138			Value:   defaultPartSize,
139			Usage:   "size of each part transferred between host and remote server, in MiB",
140		},
141		&cli.StringFlag{
142			Name:  "sse",
143			Usage: "perform server side encryption of the data at its destination, e.g. aws:kms",
144		},
145		&cli.StringFlag{
146			Name:  "sse-kms-key-id",
147			Usage: "customer master key (CMK) id for SSE-KMS encryption; leave it out if server-side generated key is desired",
148		},
149		&cli.StringFlag{
150			Name:  "acl",
151			Usage: "set acl for target: defines granted accesses and their types on different accounts/groups, e.g. cp --acl 'public-read'",
152		},
153		&cli.StringFlag{
154			Name:  "cache-control",
155			Usage: "set cache control for target: defines cache control header for object, e.g. cp --cache-control 'public, max-age=345600'",
156		},
157		&cli.StringFlag{
158			Name:  "expires",
159			Usage: "set expires for target (uses RFC3339 format): defines expires header for object, e.g. cp  --expires '2024-10-01T20:30:00Z'",
160		},
161		&cli.BoolFlag{
162			Name:  "force-glacier-transfer",
163			Usage: "force transfer of GLACIER objects whether they are restored or not",
164		},
165		&cli.StringFlag{
166			Name:  "source-region",
167			Usage: "set the region of source bucket; the region of the source bucket will be automatically discovered if --source-region is not specified",
168		},
169		&cli.StringFlag{
170			Name:  "destination-region",
171			Usage: "set the region of destination bucket: the region of the destination bucket will be automatically discovered if --destination-region is not specified",
172		},
173		&cli.StringSliceFlag{
174			Name:  "exclude",
175			Usage: "exclude objects with given pattern",
176		},
177		&cli.BoolFlag{
178			Name:  "raw",
179			Usage: "disable the wildcard operations, useful with filenames that contains glob characters.",
180		},
181	}
182}
183
184func NewCopyCommand() *cli.Command {
185	return &cli.Command{
186		Name:               "cp",
187		HelpName:           "cp",
188		Usage:              "copy objects",
189		Flags:              NewCopyCommandFlags(),
190		CustomHelpTemplate: copyHelpTemplate,
191		Before: func(c *cli.Context) error {
192			err := validateCopyCommand(c)
193			if err != nil {
194				printError(givenCommand(c), c.Command.Name, err)
195			}
196			return err
197		},
198		Action: func(c *cli.Context) (err error) {
199			defer stat.Collect(c.Command.FullName(), &err)()
200
201			// don't delete source
202			return NewCopy(c, false).Run(c.Context)
203		},
204	}
205}
206
207// Copy holds copy operation flags and states.
208type Copy struct {
209	src         string
210	dst         string
211	op          string
212	fullCommand string
213
214	deleteSource bool
215
216	// flags
217	noClobber            bool
218	ifSizeDiffer         bool
219	ifSourceNewer        bool
220	flatten              bool
221	followSymlinks       bool
222	storageClass         storage.StorageClass
223	encryptionMethod     string
224	encryptionKeyID      string
225	acl                  string
226	forceGlacierTransfer bool
227	exclude              []string
228	raw                  bool
229	cacheControl         string
230	expires              string
231
232	// region settings
233	srcRegion string
234	dstRegion string
235
236	// s3 options
237	concurrency int
238	partSize    int64
239	storageOpts storage.Options
240}
241
242// NewCopy creates Copy from cli.Context.
243func NewCopy(c *cli.Context, deleteSource bool) Copy {
244	return Copy{
245		src:          c.Args().Get(0),
246		dst:          c.Args().Get(1),
247		op:           c.Command.Name,
248		fullCommand:  givenCommand(c),
249		deleteSource: deleteSource,
250		// flags
251		noClobber:            c.Bool("no-clobber"),
252		ifSizeDiffer:         c.Bool("if-size-differ"),
253		ifSourceNewer:        c.Bool("if-source-newer"),
254		flatten:              c.Bool("flatten"),
255		followSymlinks:       !c.Bool("no-follow-symlinks"),
256		storageClass:         storage.StorageClass(c.String("storage-class")),
257		concurrency:          c.Int("concurrency"),
258		partSize:             c.Int64("part-size") * megabytes,
259		encryptionMethod:     c.String("sse"),
260		encryptionKeyID:      c.String("sse-kms-key-id"),
261		acl:                  c.String("acl"),
262		forceGlacierTransfer: c.Bool("force-glacier-transfer"),
263		exclude:              c.StringSlice("exclude"),
264		raw:                  c.Bool("raw"),
265		cacheControl:         c.String("cache-control"),
266		expires:              c.String("expires"),
267		// region settings
268		srcRegion: c.String("source-region"),
269		dstRegion: c.String("destination-region"),
270
271		storageOpts: NewStorageOpts(c),
272	}
273}
274
275const fdlimitWarning = `
276WARNING: s5cmd is hitting the max open file limit allowed by your OS. Either
277increase the open file limit or try to decrease the number of workers with
278'-numworkers' parameter.
279`
280
281// Run starts copying given source objects to destination.
282func (c Copy) Run(ctx context.Context) error {
283	srcurl, err := url.New(c.src, url.WithRaw(c.raw))
284	if err != nil {
285		printError(c.fullCommand, c.op, err)
286		return err
287	}
288
289	dsturl, err := url.New(c.dst, url.WithRaw(c.raw))
290	if err != nil {
291		printError(c.fullCommand, c.op, err)
292		return err
293	}
294
295	// override source region if set
296	if c.srcRegion != "" {
297		c.storageOpts.SetRegion(c.srcRegion)
298	}
299
300	client, err := storage.NewClient(ctx, srcurl, c.storageOpts)
301	if err != nil {
302		printError(c.fullCommand, c.op, err)
303		return err
304	}
305
306	objch, err := expandSource(ctx, client, c.followSymlinks, srcurl)
307
308	if err != nil {
309		printError(c.fullCommand, c.op, err)
310		return err
311	}
312
313	waiter := parallel.NewWaiter()
314
315	var (
316		merror    error
317		errDoneCh = make(chan bool)
318	)
319
320	go func() {
321		defer close(errDoneCh)
322		for err := range waiter.Err() {
323			if strings.Contains(err.Error(), "too many open files") {
324				fmt.Println(strings.TrimSpace(fdlimitWarning))
325				fmt.Printf("ERROR %v\n", err)
326
327				os.Exit(1)
328			}
329			printError(c.fullCommand, c.op, err)
330			merror = multierror.Append(merror, err)
331		}
332	}()
333
334	isBatch := srcurl.IsWildcard()
335	if !isBatch && !srcurl.IsRemote() {
336		obj, _ := client.Stat(ctx, srcurl)
337		isBatch = obj != nil && obj.Type.IsDir()
338	}
339
340	excludePatterns, err := createExcludesFromWildcard(c.exclude)
341	if err != nil {
342		printError(c.fullCommand, c.op, err)
343		return err
344	}
345
346	for object := range objch {
347		if object.Type.IsDir() || errorpkg.IsCancelation(object.Err) {
348			continue
349		}
350
351		if err := object.Err; err != nil {
352			printError(c.fullCommand, c.op, err)
353			continue
354		}
355
356		if object.StorageClass.IsGlacier() && !c.forceGlacierTransfer {
357			err := fmt.Errorf("object '%v' is on Glacier storage", object)
358			printError(c.fullCommand, c.op, err)
359			continue
360		}
361
362		if isURLExcluded(excludePatterns, object.URL.Path, srcurl.Prefix) {
363			continue
364		}
365
366		srcurl := object.URL
367		var task parallel.Task
368
369		switch {
370		case srcurl.Type == dsturl.Type: // local->local or remote->remote
371			task = c.prepareCopyTask(ctx, srcurl, dsturl, isBatch)
372		case srcurl.IsRemote(): // remote->local
373			task = c.prepareDownloadTask(ctx, srcurl, dsturl, isBatch)
374		case dsturl.IsRemote(): // local->remote
375			task = c.prepareUploadTask(ctx, srcurl, dsturl, isBatch)
376		default:
377			panic("unexpected src-dst pair")
378		}
379
380		parallel.Run(task, waiter)
381	}
382
383	waiter.Wait()
384	<-errDoneCh
385
386	return merror
387}
388
389func (c Copy) prepareCopyTask(
390	ctx context.Context,
391	srcurl *url.URL,
392	dsturl *url.URL,
393	isBatch bool,
394) func() error {
395	return func() error {
396		dsturl = prepareRemoteDestination(srcurl, dsturl, c.flatten, isBatch)
397		err := c.doCopy(ctx, srcurl, dsturl)
398		if err != nil {
399			return &errorpkg.Error{
400				Op:  c.op,
401				Src: srcurl,
402				Dst: dsturl,
403				Err: err,
404			}
405		}
406		return nil
407	}
408}
409
410func (c Copy) prepareDownloadTask(
411	ctx context.Context,
412	srcurl *url.URL,
413	dsturl *url.URL,
414	isBatch bool,
415) func() error {
416	return func() error {
417		dsturl, err := prepareLocalDestination(ctx, srcurl, dsturl, c.flatten, isBatch, c.storageOpts)
418		if err != nil {
419			return err
420		}
421		err = c.doDownload(ctx, srcurl, dsturl)
422		if err != nil {
423			return &errorpkg.Error{
424				Op:  c.op,
425				Src: srcurl,
426				Dst: dsturl,
427				Err: err,
428			}
429		}
430		return nil
431	}
432}
433
434func (c Copy) prepareUploadTask(
435	ctx context.Context,
436	srcurl *url.URL,
437	dsturl *url.URL,
438	isBatch bool,
439) func() error {
440	return func() error {
441		dsturl = prepareRemoteDestination(srcurl, dsturl, c.flatten, isBatch)
442		err := c.doUpload(ctx, srcurl, dsturl)
443		if err != nil {
444			return &errorpkg.Error{
445				Op:  c.op,
446				Src: srcurl,
447				Dst: dsturl,
448				Err: err,
449			}
450		}
451		return nil
452	}
453}
454
455// doDownload is used to fetch a remote object and save as a local object.
456func (c Copy) doDownload(ctx context.Context, srcurl *url.URL, dsturl *url.URL) error {
457	srcClient, err := storage.NewRemoteClient(ctx, srcurl, c.storageOpts)
458	if err != nil {
459		return err
460	}
461
462	dstClient := storage.NewLocalClient(c.storageOpts)
463
464	err = c.shouldOverride(ctx, srcurl, dsturl)
465	if err != nil {
466		// FIXME(ig): rename
467		if errorpkg.IsWarning(err) {
468			printDebug(c.op, srcurl, dsturl, err)
469			return nil
470		}
471		return err
472	}
473
474	file, err := dstClient.Create(dsturl.Absolute())
475	if err != nil {
476		return err
477	}
478	defer file.Close()
479
480	size, err := srcClient.Get(ctx, srcurl, file, c.concurrency, c.partSize)
481	if err != nil {
482		_ = dstClient.Delete(ctx, dsturl)
483		return err
484	}
485
486	if c.deleteSource {
487		_ = srcClient.Delete(ctx, srcurl)
488	}
489
490	msg := log.InfoMessage{
491		Operation:   c.op,
492		Source:      srcurl,
493		Destination: dsturl,
494		Object: &storage.Object{
495			Size: size,
496		},
497	}
498	log.Info(msg)
499
500	return nil
501}
502
503func (c Copy) doUpload(ctx context.Context, srcurl *url.URL, dsturl *url.URL) error {
504	srcClient := storage.NewLocalClient(c.storageOpts)
505
506	file, err := srcClient.Open(srcurl.Absolute())
507	if err != nil {
508		return err
509	}
510	defer file.Close()
511
512	err = c.shouldOverride(ctx, srcurl, dsturl)
513	if err != nil {
514		if errorpkg.IsWarning(err) {
515			printDebug(c.op, srcurl, dsturl, err)
516			return nil
517		}
518		return err
519	}
520
521	// override destination region if set
522	if c.dstRegion != "" {
523		c.storageOpts.SetRegion(c.dstRegion)
524	}
525	dstClient, err := storage.NewRemoteClient(ctx, dsturl, c.storageOpts)
526	if err != nil {
527		return err
528	}
529
530	metadata := storage.NewMetadata().
531		SetContentType(guessContentType(file)).
532		SetStorageClass(string(c.storageClass)).
533		SetSSE(c.encryptionMethod).
534		SetSSEKeyID(c.encryptionKeyID).
535		SetACL(c.acl).
536		SetCacheControl(c.cacheControl).
537		SetExpires(c.expires)
538
539	err = dstClient.Put(ctx, file, dsturl, metadata, c.concurrency, c.partSize)
540	if err != nil {
541		return err
542	}
543
544	obj, _ := srcClient.Stat(ctx, srcurl)
545	size := obj.Size
546
547	if c.deleteSource {
548		// close the file before deleting
549		file.Close()
550		if err := srcClient.Delete(ctx, srcurl); err != nil {
551			return err
552		}
553	}
554
555	msg := log.InfoMessage{
556		Operation:   c.op,
557		Source:      srcurl,
558		Destination: dsturl,
559		Object: &storage.Object{
560			Size:         size,
561			StorageClass: c.storageClass,
562		},
563	}
564	log.Info(msg)
565
566	return nil
567}
568
569func (c Copy) doCopy(ctx context.Context, srcurl, dsturl *url.URL) error {
570	// override destination region if set
571	if c.dstRegion != "" {
572		c.storageOpts.SetRegion(c.dstRegion)
573	}
574	dstClient, err := storage.NewClient(ctx, dsturl, c.storageOpts)
575	if err != nil {
576		return err
577	}
578
579	metadata := storage.NewMetadata().
580		SetStorageClass(string(c.storageClass)).
581		SetSSE(c.encryptionMethod).
582		SetSSEKeyID(c.encryptionKeyID).
583		SetACL(c.acl).
584		SetCacheControl(c.cacheControl).
585		SetExpires(c.expires)
586
587	err = c.shouldOverride(ctx, srcurl, dsturl)
588	if err != nil {
589		if errorpkg.IsWarning(err) {
590			printDebug(c.op, srcurl, dsturl, err)
591			return nil
592		}
593		return err
594	}
595
596	err = dstClient.Copy(ctx, srcurl, dsturl, metadata)
597	if err != nil {
598		return err
599	}
600
601	if c.deleteSource {
602		srcClient, err := storage.NewClient(ctx, srcurl, c.storageOpts)
603		if err != nil {
604			return err
605		}
606		if err := srcClient.Delete(ctx, srcurl); err != nil {
607			return err
608		}
609	}
610
611	msg := log.InfoMessage{
612		Operation:   c.op,
613		Source:      srcurl,
614		Destination: dsturl,
615		Object: &storage.Object{
616			URL:          dsturl,
617			StorageClass: c.storageClass,
618		},
619	}
620	log.Info(msg)
621
622	return nil
623}
624
625// shouldOverride function checks if the destination should be overridden if
626// the source-destination pair and given copy flags conform to the
627// override criteria. For example; "cp -n -s <src> <dst>" should not override
628// the <dst> if <src> and <dst> filenames are the same, except if the size
629// differs.
630func (c Copy) shouldOverride(ctx context.Context, srcurl *url.URL, dsturl *url.URL) error {
631	// if not asked to override, ignore.
632	if !c.noClobber && !c.ifSizeDiffer && !c.ifSourceNewer {
633		return nil
634	}
635
636	srcClient, err := storage.NewClient(ctx, srcurl, c.storageOpts)
637	if err != nil {
638		return err
639	}
640
641	srcObj, err := getObject(ctx, srcurl, srcClient)
642	if err != nil {
643		return err
644	}
645
646	dstClient, err := storage.NewClient(ctx, dsturl, c.storageOpts)
647	if err != nil {
648		return err
649	}
650
651	dstObj, err := getObject(ctx, dsturl, dstClient)
652	if err != nil {
653		return err
654	}
655
656	// if destination not exists, no conditions apply.
657	if dstObj == nil {
658		return nil
659	}
660
661	var stickyErr error
662	if c.noClobber {
663		stickyErr = errorpkg.ErrObjectExists
664	}
665
666	if c.ifSizeDiffer {
667		if srcObj.Size == dstObj.Size {
668			stickyErr = errorpkg.ErrObjectSizesMatch
669		} else {
670			stickyErr = nil
671		}
672	}
673
674	if c.ifSourceNewer {
675		srcMod, dstMod := srcObj.ModTime, dstObj.ModTime
676
677		if !srcMod.After(*dstMod) {
678			stickyErr = errorpkg.ErrObjectIsNewer
679		} else {
680			stickyErr = nil
681		}
682	}
683
684	return stickyErr
685}
686
687// prepareRemoteDestination will return a new destination URL for
688// remote->remote and local->remote copy operations.
689func prepareRemoteDestination(
690	srcurl *url.URL,
691	dsturl *url.URL,
692	flatten bool,
693	isBatch bool,
694) *url.URL {
695	objname := srcurl.Base()
696	if isBatch && !flatten {
697		objname = srcurl.Relative()
698	}
699
700	if dsturl.IsPrefix() || dsturl.IsBucket() {
701		dsturl = dsturl.Join(objname)
702	}
703	return dsturl
704}
705
706// prepareDownloadDestination will return a new destination URL for
707// remote->local copy operations.
708func prepareLocalDestination(
709	ctx context.Context,
710	srcurl *url.URL,
711	dsturl *url.URL,
712	flatten bool,
713	isBatch bool,
714	storageOpts storage.Options,
715) (*url.URL, error) {
716	objname := srcurl.Base()
717	if isBatch && !flatten {
718		objname = srcurl.Relative()
719	}
720
721	client := storage.NewLocalClient(storageOpts)
722
723	if isBatch {
724		err := client.MkdirAll(dsturl.Absolute())
725		if err != nil {
726			return nil, err
727		}
728	}
729
730	obj, err := client.Stat(ctx, dsturl)
731	if err != nil && err != storage.ErrGivenObjectNotFound {
732		return nil, err
733	}
734
735	if isBatch && !flatten {
736		dsturl = dsturl.Join(objname)
737		err := client.MkdirAll(dsturl.Dir())
738		if err != nil {
739			return nil, err
740		}
741	}
742
743	if err == storage.ErrGivenObjectNotFound {
744		err := client.MkdirAll(dsturl.Dir())
745		if err != nil {
746			return nil, err
747		}
748		if strings.HasSuffix(dsturl.Absolute(), "/") {
749			dsturl = dsturl.Join(objname)
750		}
751	} else {
752		if obj.Type.IsDir() {
753			dsturl = obj.URL.Join(objname)
754		}
755	}
756
757	return dsturl, nil
758}
759
760// getObject checks if the object from given url exists. If no object is
761// found, error and returning object would be nil.
762func getObject(ctx context.Context, url *url.URL, client storage.Storage) (*storage.Object, error) {
763	obj, err := client.Stat(ctx, url)
764	if err == storage.ErrGivenObjectNotFound {
765		return nil, nil
766	}
767
768	return obj, err
769}
770
771func validateCopyCommand(c *cli.Context) error {
772	if c.Args().Len() != 2 {
773		return fmt.Errorf("expected source and destination arguments")
774	}
775
776	ctx := c.Context
777	src := c.Args().Get(0)
778	dst := c.Args().Get(1)
779
780	srcurl, err := url.New(src, url.WithRaw(c.Bool("raw")))
781	if err != nil {
782		return err
783	}
784
785	dsturl, err := url.New(dst, url.WithRaw(c.Bool("raw")))
786	if err != nil {
787		return err
788	}
789
790	// wildcard destination doesn't mean anything
791	if dsturl.IsWildcard() {
792		return fmt.Errorf("target %q can not contain glob characters", dst)
793	}
794
795	// we don't operate on S3 prefixes for copy and delete operations.
796	if srcurl.IsBucket() || srcurl.IsPrefix() {
797		return fmt.Errorf("source argument must contain wildcard character")
798	}
799
800	// 'cp dir/* s3://bucket/prefix': expect a trailing slash to avoid any
801	// surprises.
802	if srcurl.IsWildcard() && dsturl.IsRemote() && !dsturl.IsPrefix() && !dsturl.IsBucket() {
803		return fmt.Errorf("target %q must be a bucket or a prefix", dsturl)
804	}
805
806	switch {
807	case srcurl.Type == dsturl.Type:
808		return validateCopy(srcurl, dsturl)
809	case dsturl.IsRemote():
810		return validateUpload(ctx, srcurl, dsturl, NewStorageOpts(c))
811	default:
812		return nil
813	}
814}
815
816func validateCopy(srcurl, dsturl *url.URL) error {
817	if srcurl.IsRemote() || dsturl.IsRemote() {
818		return nil
819	}
820
821	// we don't support local->local copies
822	return fmt.Errorf("local->local copy operations are not permitted")
823}
824
825func validateUpload(ctx context.Context, srcurl, dsturl *url.URL, storageOpts storage.Options) error {
826	srcclient := storage.NewLocalClient(storageOpts)
827
828	if srcurl.IsWildcard() {
829		return nil
830	}
831
832	obj, err := srcclient.Stat(ctx, srcurl)
833	if err != nil {
834		return err
835	}
836
837	// 'cp dir/ s3://bucket/prefix-without-slash': expect a trailing slash to
838	// avoid any surprises.
839	if obj.Type.IsDir() && !dsturl.IsBucket() && !dsturl.IsPrefix() {
840		return fmt.Errorf("target %q must be a bucket or a prefix", dsturl)
841	}
842
843	return nil
844}
845
846// guessContentType gets content type of the file.
847func guessContentType(file *os.File) string {
848	contentType := mime.TypeByExtension(filepath.Ext(file.Name()))
849	if contentType == "" {
850		defer file.Seek(0, io.SeekStart)
851
852		const bufsize = 512
853		buf, err := ioutil.ReadAll(io.LimitReader(file, bufsize))
854		if err != nil {
855			return ""
856		}
857
858		return http.DetectContentType(buf)
859	}
860	return contentType
861}
862
863func givenCommand(c *cli.Context) string {
864	cmd := c.Command.FullName()
865	if c.Args().Len() > 0 {
866		cmd = fmt.Sprintf("%v %v", cmd, strings.Join(c.Args().Slice(), " "))
867	}
868
869	return cmd
870}
871