1// Copyright (c) 2015-2021 MinIO, Inc.
2//
3// This file is part of MinIO Object Storage stack
4//
5// This program is free software: you can redistribute it and/or modify
6// it under the terms of the GNU Affero General Public License as published by
7// the Free Software Foundation, either version 3 of the License, or
8// (at your option) any later version.
9//
10// This program is distributed in the hope that it will be useful
11// but WITHOUT ANY WARRANTY; without even the implied warranty of
12// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13// GNU Affero General Public License for more details.
14//
15// You should have received a copy of the GNU Affero General Public License
16// along with this program.  If not, see <http://www.gnu.org/licenses/>.
17
18package cmd
19
20import (
21	"bufio"
22	"context"
23	"errors"
24	"fmt"
25	"io"
26	"os"
27	"path/filepath"
28	"strings"
29
30	"github.com/fatih/color"
31	jsoniter "github.com/json-iterator/go"
32	"github.com/minio/cli"
33	json "github.com/minio/colorjson"
34	"github.com/minio/mc/pkg/probe"
35	"github.com/minio/pkg/console"
36)
37
38// cp command flags.
39var (
40	cpFlags = []cli.Flag{
41		cli.StringFlag{
42			Name:  "rewind",
43			Usage: "roll back object(s) to current version at specified time",
44		},
45		cli.StringFlag{
46			Name:  "version-id, vid",
47			Usage: "select an object version to copy",
48		},
49		cli.BoolFlag{
50			Name:  "recursive, r",
51			Usage: "copy recursively",
52		},
53		cli.StringFlag{
54			Name:  "older-than",
55			Usage: "copy objects older than L days, M hours and N minutes",
56		},
57		cli.StringFlag{
58			Name:  "newer-than",
59			Usage: "copy objects newer than L days, M hours and N minutes",
60		},
61		cli.StringFlag{
62			Name:  "storage-class, sc",
63			Usage: "set storage class for new object(s) on target",
64		},
65		cli.StringFlag{
66			Name:  "encrypt",
67			Usage: "encrypt/decrypt objects (using server-side encryption with server managed keys)",
68		},
69		cli.StringFlag{
70			Name:  "attr",
71			Usage: "add custom metadata for the object",
72		},
73		cli.BoolFlag{
74			Name:  "continue, c",
75			Usage: "create or resume copy session",
76		},
77		cli.BoolFlag{
78			Name:  "preserve, a",
79			Usage: "preserve filesystem attributes (mode, ownership, timestamps)",
80		},
81		cli.BoolFlag{
82			Name:  "disable-multipart",
83			Usage: "disable multipart upload feature",
84		},
85		cli.BoolFlag{
86			Name:  "md5",
87			Usage: "force all upload(s) to calculate md5sum checksum",
88		},
89		cli.StringFlag{
90			Name:  "tags",
91			Usage: "apply one or more tags to the uploaded objects",
92		},
93		cli.StringFlag{
94			Name:  rmFlag,
95			Usage: "retention mode to be applied on the object (governance, compliance)",
96		},
97		cli.StringFlag{
98			Name:  rdFlag,
99			Usage: "retention duration for the object in d days or y years",
100		},
101		cli.StringFlag{
102			Name:  lhFlag,
103			Usage: "apply legal hold to the copied object (on, off)",
104		},
105	}
106)
107
108var rmFlag = "retention-mode"
109var rdFlag = "retention-duration"
110var lhFlag = "legal-hold"
111
112// ErrInvalidMetadata reflects invalid metadata format
113var ErrInvalidMetadata = errors.New("specified metadata should be of form key1=value1;key2=value2;... and so on")
114
115// Copy command.
116var cpCmd = cli.Command{
117	Name:         "cp",
118	Usage:        "copy objects",
119	Action:       mainCopy,
120	OnUsageError: onUsageError,
121	Before:       setGlobalsFromContext,
122	Flags:        append(append(cpFlags, ioFlags...), globalFlags...),
123	CustomHelpTemplate: `NAME:
124  {{.HelpName}} - {{.Usage}}
125
126USAGE:
127  {{.HelpName}} [FLAGS] SOURCE [SOURCE...] TARGET
128
129FLAGS:
130  {{range .VisibleFlags}}{{.}}
131  {{end}}
132ENVIRONMENT VARIABLES:
133  MC_ENCRYPT:      list of comma delimited prefixes
134  MC_ENCRYPT_KEY:  list of comma delimited prefix=secret values
135
136EXAMPLES:
137  01. Copy a list of objects from local file system to Amazon S3 cloud storage.
138      {{.Prompt}} {{.HelpName}} Music/*.ogg s3/jukebox/
139
140  02. Copy a folder recursively from MinIO cloud storage to Amazon S3 cloud storage.
141      {{.Prompt}} {{.HelpName}} --recursive play/mybucket/burningman2011/ s3/mybucket/
142
143  03. Copy multiple local folders recursively to MinIO cloud storage.
144      {{.Prompt}} {{.HelpName}} --recursive backup/2014/ backup/2015/ play/archive/
145
146  04. Copy a bucket recursively from aliased Amazon S3 cloud storage to local filesystem on Windows.
147      {{.Prompt}} {{.HelpName}} --recursive s3\documents\2014\ C:\Backups\2014
148
149  05. Copy files older than 7 days and 10 hours from MinIO cloud storage to Amazon S3 cloud storage.
150      {{.Prompt}} {{.HelpName}} --older-than 7d10h play/mybucket/burningman2011/ s3/mybucket/
151
152  06. Copy files newer than 7 days and 10 hours from MinIO cloud storage to a local path.
153      {{.Prompt}} {{.HelpName}} --newer-than 7d10h play/mybucket/burningman2011/ ~/latest/
154
155  07. Copy an object with name containing unicode characters to Amazon S3 cloud storage.
156      {{.Prompt}} {{.HelpName}} 本語 s3/andoria/
157
158  08. Copy a local folder with space separated characters to Amazon S3 cloud storage.
159      {{.Prompt}} {{.HelpName}} --recursive 'workdir/documents/May 2014/' s3/miniocloud
160
161  09. Copy a folder with encrypted objects recursively from Amazon S3 to MinIO cloud storage.
162      {{.Prompt}} {{.HelpName}} --recursive --encrypt-key "s3/documents/=32byteslongsecretkeymustbegiven1,myminio/documents/=32byteslongsecretkeymustbegiven2" s3/documents/ myminio/documents/
163
164  10. Copy a folder with encrypted objects recursively from Amazon S3 to MinIO cloud storage. In case the encryption key contains non-printable character like tab, pass the
165      base64 encoded string as key.
166      {{.Prompt}} {{.HelpName}} --recursive --encrypt-key "s3/documents/=MzJieXRlc2xvbmdzZWNyZWFiY2RlZmcJZ2l2ZW5uMjE=,myminio/documents/=MzJieXRlc2xvbmdzZWNyZWFiY2RlZmcJZ2l2ZW5uMjE=" s3/documents/ myminio/documents/
167
168  11. Copy a list of objects from local file system to MinIO cloud storage with specified metadata, separated by ";"
169      {{.Prompt}} {{.HelpName}} --attr "key1=value1;key2=value2" Music/*.mp4 play/mybucket/
170
171  12. Copy a folder recursively from MinIO cloud storage to Amazon S3 cloud storage with Cache-Control and custom metadata, separated by ";".
172      {{.Prompt}} {{.HelpName}} --attr "Cache-Control=max-age=90000,min-fresh=9000;key1=value1;key2=value2" --recursive play/mybucket/burningman2011/ s3/mybucket/
173
174  13. Copy a text file to an object storage and assign REDUCED_REDUNDANCY storage-class to the uploaded object.
175      {{.Prompt}} {{.HelpName}} --storage-class REDUCED_REDUNDANCY myobject.txt play/mybucket
176
177  14. Copy a text file to an object storage and create or resume copy session.
178      {{.Prompt}} {{.HelpName}} --recursive --continue dir/ play/mybucket
179
180  15. Copy a text file to an object storage and preserve the file system attribute as metadata.
181      {{.Prompt}} {{.HelpName}} -a myobject.txt play/mybucket
182
183  16. Copy a text file to an object storage with object lock mode set to 'GOVERNANCE' with retention duration 1 day.
184      {{.Prompt}} {{.HelpName}} --retention-mode governance --retention-duration 1d locked.txt play/locked-bucket/
185
186  17. Copy a text file to an object storage with legal-hold enabled.
187      {{.Prompt}} {{.HelpName}} --legal-hold on locked.txt play/locked-bucket/
188
189  18. Copy a text file to an object storage and disable multipart upload feature.
190      {{.Prompt}} {{.HelpName}} --disable-multipart myobject.txt play/mybucket
191
192  19. Roll back 10 days in the past to copy the content of 'mybucket'
193      {{.Prompt}} {{.HelpName}} --rewind 10d -r play/mybucket/ /tmp/dest/
194
195  20. Set tags to the uploaded objects
196      {{.Prompt}} {{.HelpName}} -r --tags "category=prod&type=backup" ./data/ play/another-bucket/
197
198`,
199}
200
201// copyMessage container for file copy messages
202type copyMessage struct {
203	Status     string `json:"status"`
204	Source     string `json:"source"`
205	Target     string `json:"target"`
206	Size       int64  `json:"size"`
207	TotalCount int64  `json:"totalCount"`
208	TotalSize  int64  `json:"totalSize"`
209}
210
211// String colorized copy message
212func (c copyMessage) String() string {
213	return console.Colorize("Copy", fmt.Sprintf("`%s` -> `%s`", c.Source, c.Target))
214}
215
216// JSON jsonified copy message
217func (c copyMessage) JSON() string {
218	c.Status = "success"
219	copyMessageBytes, e := json.MarshalIndent(c, "", " ")
220	fatalIf(probe.NewError(e), "Unable to marshal into JSON.")
221
222	return string(copyMessageBytes)
223}
224
225// Progress - an interface which describes current amount
226// of data written.
227type Progress interface {
228	Get() int64
229	SetTotal(int64)
230}
231
232// ProgressReader can be used to update the progress of
233// an on-going transfer progress.
234type ProgressReader interface {
235	io.Reader
236	Progress
237}
238
239// doCopy - Copy a single file from source to destination
240func doCopy(ctx context.Context, cpURLs URLs, pg ProgressReader, encKeyDB map[string][]prefixSSEPair, isMvCmd bool, preserve bool) URLs {
241	if cpURLs.Error != nil {
242		cpURLs.Error = cpURLs.Error.Trace()
243		return cpURLs
244	}
245
246	sourceAlias := cpURLs.SourceAlias
247	sourceURL := cpURLs.SourceContent.URL
248	targetAlias := cpURLs.TargetAlias
249	targetURL := cpURLs.TargetContent.URL
250	length := cpURLs.SourceContent.Size
251	sourcePath := filepath.ToSlash(filepath.Join(sourceAlias, sourceURL.Path))
252
253	if progressReader, ok := pg.(*progressBar); ok {
254		progressReader.SetCaption(cpURLs.SourceContent.URL.String() + ": ")
255	} else {
256		targetPath := filepath.ToSlash(filepath.Join(targetAlias, targetURL.Path))
257		printMsg(copyMessage{
258			Source:     sourcePath,
259			Target:     targetPath,
260			Size:       length,
261			TotalCount: cpURLs.TotalCount,
262			TotalSize:  cpURLs.TotalSize,
263		})
264	}
265
266	urls := uploadSourceToTargetURL(ctx, cpURLs, pg, encKeyDB, preserve)
267	if isMvCmd && urls.Error == nil {
268		rmManager.add(ctx, sourceAlias, sourceURL.String())
269	}
270
271	return urls
272}
273
274// doCopyFake - Perform a fake copy to update the progress bar appropriately.
275func doCopyFake(ctx context.Context, cpURLs URLs, pg Progress) URLs {
276	if progressReader, ok := pg.(*progressBar); ok {
277		progressReader.ProgressBar.Add64(cpURLs.SourceContent.Size)
278	}
279
280	return cpURLs
281}
282
283// doPrepareCopyURLs scans the source URL and prepares a list of objects for copying.
284func doPrepareCopyURLs(ctx context.Context, session *sessionV8, cancelCopy context.CancelFunc) (totalBytes, totalObjects int64) {
285	// Separate source and target. 'cp' can take only one target,
286	// but any number of sources.
287	sourceURLs := session.Header.CommandArgs[:len(session.Header.CommandArgs)-1]
288	targetURL := session.Header.CommandArgs[len(session.Header.CommandArgs)-1] // Last one is target
289
290	// Access recursive flag inside the session header.
291	isRecursive := session.Header.CommandBoolFlags["recursive"]
292	rewind := session.Header.CommandStringFlags["rewind"]
293	versionID := session.Header.CommandStringFlags["version-id"]
294	olderThan := session.Header.CommandStringFlags["older-than"]
295	newerThan := session.Header.CommandStringFlags["newer-than"]
296	encryptKeys := session.Header.CommandStringFlags["encrypt-key"]
297	encrypt := session.Header.CommandStringFlags["encrypt"]
298	encKeyDB, err := parseAndValidateEncryptionKeys(encryptKeys, encrypt)
299	fatalIf(err, "Unable to parse encryption keys.")
300
301	// Create a session data file to store the processed URLs.
302	dataFP := session.NewDataWriter()
303
304	var scanBar scanBarFunc
305	if !globalQuiet && !globalJSON { // set up progress bar
306		scanBar = scanBarFactory()
307	}
308
309	URLsCh := prepareCopyURLs(ctx, sourceURLs, targetURL, isRecursive, encKeyDB, olderThan, newerThan, parseRewindFlag(rewind), versionID)
310	done := false
311	for !done {
312		select {
313		case cpURLs, ok := <-URLsCh:
314			if !ok { // Done with URL preparation
315				done = true
316				break
317			}
318			if cpURLs.Error != nil {
319				// Print in new line and adjust to top so that we don't print over the ongoing scan bar
320				if !globalQuiet && !globalJSON {
321					console.Eraseline()
322				}
323				if strings.Contains(cpURLs.Error.ToGoError().Error(), " is a folder.") {
324					errorIf(cpURLs.Error.Trace(), "Folder cannot be copied. Please use `...` suffix.")
325				} else {
326					errorIf(cpURLs.Error.Trace(), "Unable to prepare URL for copying.")
327				}
328				break
329			}
330
331			var jsoniter = jsoniter.ConfigCompatibleWithStandardLibrary
332			jsonData, e := jsoniter.Marshal(cpURLs)
333			if e != nil {
334				session.Delete()
335				fatalIf(probe.NewError(e), "Unable to prepare URL for copying. Error in JSON marshaling.")
336			}
337			dataFP.Write(jsonData)
338			dataFP.Write([]byte{'\n'})
339			if !globalQuiet && !globalJSON {
340				scanBar(cpURLs.SourceContent.URL.String())
341			}
342
343			totalBytes += cpURLs.SourceContent.Size
344			totalObjects++
345		case <-globalContext.Done():
346			cancelCopy()
347			// Print in new line and adjust to top so that we don't print over the ongoing scan bar
348			if !globalQuiet && !globalJSON {
349				console.Eraseline()
350			}
351			session.Delete() // If we are interrupted during the URL scanning, we drop the session.
352			os.Exit(0)
353		}
354	}
355
356	session.Header.TotalBytes = totalBytes
357	session.Header.TotalObjects = totalObjects
358	session.Save()
359	return
360}
361
362func doCopySession(ctx context.Context, cancelCopy context.CancelFunc, cli *cli.Context, session *sessionV8, encKeyDB map[string][]prefixSSEPair, isMvCmd bool) error {
363	var isCopied func(string) bool
364	var totalObjects, totalBytes int64
365
366	var cpURLsCh = make(chan URLs, 10000)
367
368	// Store a progress bar or an accounter
369	var pg ProgressReader
370
371	// Enable progress bar reader only during default mode.
372	if !globalQuiet && !globalJSON { // set up progress bar
373		pg = newProgressBar(totalBytes)
374	} else {
375		pg = newAccounter(totalBytes)
376	}
377
378	sourceURLs := cli.Args()[:len(cli.Args())-1]
379	targetURL := cli.Args()[len(cli.Args())-1] // Last one is target
380
381	tgtClnt, err := newClient(targetURL)
382	fatalIf(err, "Unable to initialize `"+targetURL+"`.")
383
384	// Check if the target bucket has object locking enabled
385	var withLock bool
386	if _, _, _, _, err = tgtClnt.GetObjectLockConfig(ctx); err == nil {
387		withLock = true
388	}
389
390	if session != nil {
391		// isCopied returns true if an object has been already copied
392		// or not. This is useful when we resume from a session.
393		isCopied = isLastFactory(session.Header.LastCopied)
394
395		if !session.HasData() {
396			totalBytes, totalObjects = doPrepareCopyURLs(ctx, session, cancelCopy)
397		} else {
398			totalBytes, totalObjects = session.Header.TotalBytes, session.Header.TotalObjects
399		}
400
401		pg.SetTotal(totalBytes)
402
403		go func() {
404			var jsoniter = jsoniter.ConfigCompatibleWithStandardLibrary
405			// Prepare URL scanner from session data file.
406			urlScanner := bufio.NewScanner(session.NewDataReader())
407			for {
408				if !urlScanner.Scan() || urlScanner.Err() != nil {
409					close(cpURLsCh)
410					break
411				}
412
413				var cpURLs URLs
414				if e := jsoniter.Unmarshal([]byte(urlScanner.Text()), &cpURLs); e != nil {
415					errorIf(probe.NewError(e), "Unable to unmarshal %s", urlScanner.Text())
416					continue
417				}
418
419				cpURLsCh <- cpURLs
420			}
421		}()
422	} else {
423		// Access recursive flag inside the session header.
424		isRecursive := cli.Bool("recursive")
425		olderThan := cli.String("older-than")
426		newerThan := cli.String("newer-than")
427		rewind := cli.String("rewind")
428		versionID := cli.String("version-id")
429
430		go func() {
431			totalBytes := int64(0)
432			for cpURLs := range prepareCopyURLs(ctx, sourceURLs, targetURL, isRecursive,
433				encKeyDB, olderThan, newerThan, parseRewindFlag(rewind), versionID) {
434				if cpURLs.Error != nil {
435					// Print in new line and adjust to top so that we
436					// don't print over the ongoing scan bar
437					if !globalQuiet && !globalJSON {
438						console.Eraseline()
439					}
440					if strings.Contains(cpURLs.Error.ToGoError().Error(),
441						" is a folder.") {
442						errorIf(cpURLs.Error.Trace(),
443							"Folder cannot be copied. Please use `...` suffix.")
444					} else {
445						errorIf(cpURLs.Error.Trace(),
446							"Unable to start copying.")
447					}
448					break
449				} else {
450					totalBytes += cpURLs.SourceContent.Size
451					pg.SetTotal(totalBytes)
452					totalObjects++
453				}
454				cpURLsCh <- cpURLs
455			}
456			close(cpURLsCh)
457		}()
458	}
459
460	var quitCh = make(chan struct{})
461	var statusCh = make(chan URLs)
462
463	parallel := newParallelManager(statusCh)
464
465	go func() {
466		gracefulStop := func() {
467			parallel.stopAndWait()
468			close(statusCh)
469		}
470
471		for {
472			select {
473			case <-quitCh:
474				gracefulStop()
475				return
476			case cpURLs, ok := <-cpURLsCh:
477				if !ok {
478					gracefulStop()
479					return
480				}
481
482				// Save total count.
483				cpURLs.TotalCount = totalObjects
484
485				// Save totalSize.
486				cpURLs.TotalSize = totalBytes
487
488				// Initialize target metadata.
489				cpURLs.TargetContent.Metadata = make(map[string]string)
490
491				// Initialize target user metadata.
492				cpURLs.TargetContent.UserMetadata = make(map[string]string)
493
494				// Check and handle storage class if passed in command line args
495				if storageClass := cli.String("storage-class"); storageClass != "" {
496					cpURLs.TargetContent.StorageClass = storageClass
497				}
498
499				if rm := cli.String(rmFlag); rm != "" {
500					cpURLs.TargetContent.RetentionMode = rm
501					cpURLs.TargetContent.RetentionEnabled = true
502				}
503				if rd := cli.String(rdFlag); rd != "" {
504					cpURLs.TargetContent.RetentionDuration = rd
505				}
506				if lh := cli.String(lhFlag); lh != "" {
507					cpURLs.TargetContent.LegalHold = strings.ToUpper(lh)
508					cpURLs.TargetContent.LegalHoldEnabled = true
509				}
510
511				if tags := cli.String("tags"); tags != "" {
512					cpURLs.TargetContent.Metadata["X-Amz-Tagging"] = tags
513				}
514
515				preserve := cli.Bool("preserve")
516				if cli.String("attr") != "" {
517					userMetaMap, _ := getMetaDataEntry(cli.String("attr"))
518					for metadataKey, metaDataVal := range userMetaMap {
519						cpURLs.TargetContent.UserMetadata[metadataKey] = metaDataVal
520					}
521				}
522
523				cpURLs.MD5 = cli.Bool("md5") || withLock
524				cpURLs.DisableMultipart = cli.Bool("disable-multipart")
525
526				// Verify if previously copied, notify progress bar.
527				if isCopied != nil && isCopied(cpURLs.SourceContent.URL.String()) {
528					parallel.queueTask(func() URLs {
529						return doCopyFake(ctx, cpURLs, pg)
530					}, 0)
531				} else {
532					parallel.queueTask(func() URLs {
533						return doCopy(ctx, cpURLs, pg, encKeyDB, isMvCmd, preserve)
534					}, cpURLs.SourceContent.Size)
535				}
536			}
537		}
538	}()
539
540	var retErr error
541	errSeen := false
542	cpAllFilesErr := true
543
544loop:
545	for {
546		select {
547		case <-globalContext.Done():
548			close(quitCh)
549			cancelCopy()
550			// Receive interrupt notification.
551			if !globalQuiet && !globalJSON {
552				console.Eraseline()
553			}
554			if session != nil {
555				session.CloseAndDie()
556			}
557			break loop
558		case cpURLs, ok := <-statusCh:
559			// Status channel is closed, we should return.
560			if !ok {
561				break loop
562			}
563			if cpURLs.Error == nil {
564				if session != nil {
565					session.Header.LastCopied = cpURLs.SourceContent.URL.String()
566					session.Save()
567				}
568				cpAllFilesErr = false
569			} else {
570
571				// Set exit status for any copy error
572				retErr = exitStatus(globalErrorExitStatus)
573
574				// Print in new line and adjust to top so that we
575				// don't print over the ongoing progress bar.
576				if !globalQuiet && !globalJSON {
577					console.Eraseline()
578				}
579				errorIf(cpURLs.Error.Trace(cpURLs.SourceContent.URL.String()),
580					fmt.Sprintf("Failed to copy `%s`.", cpURLs.SourceContent.URL.String()))
581				if isErrIgnored(cpURLs.Error) {
582					cpAllFilesErr = false
583					continue loop
584				}
585
586				errSeen = true
587				if progressReader, pgok := pg.(*progressBar); pgok {
588					if progressReader.ProgressBar.Get() > 0 {
589						writeContSize := (int)(cpURLs.SourceContent.Size)
590						totalPGSize := (int)(progressReader.ProgressBar.Total)
591						written := (int)(progressReader.ProgressBar.Get())
592						if totalPGSize > writeContSize && written > writeContSize {
593							progressReader.ProgressBar.Set((written - writeContSize))
594							progressReader.ProgressBar.Update()
595						}
596					}
597				}
598
599				if session != nil {
600					// For critical errors we should exit. Session
601					// can be resumed after the user figures out
602					// the  problem.
603					session.copyCloseAndDie(session.Header.CommandBoolFlags["session"])
604				}
605			}
606		}
607	}
608
609	if progressReader, ok := pg.(*progressBar); ok {
610		if (errSeen && totalObjects == 1) || (cpAllFilesErr && totalObjects > 1) {
611			console.Eraseline()
612		} else if progressReader.ProgressBar.Get() > 0 {
613			progressReader.ProgressBar.Finish()
614		}
615	} else {
616		if accntReader, ok := pg.(*accounter); ok {
617			printMsg(accntReader.Stat())
618		}
619	}
620
621	return retErr
622}
623
624// mainCopy is the entry point for cp command.
625func mainCopy(cliCtx *cli.Context) error {
626	ctx, cancelCopy := context.WithCancel(globalContext)
627	defer cancelCopy()
628
629	// Parse encryption keys per command.
630	encKeyDB, err := getEncKeys(cliCtx)
631	fatalIf(err, "Unable to parse encryption keys.")
632
633	// Parse metadata.
634	userMetaMap := make(map[string]string)
635	if cliCtx.String("attr") != "" {
636		userMetaMap, err = getMetaDataEntry(cliCtx.String("attr"))
637		fatalIf(err, "Unable to parse attribute %v", cliCtx.String("attr"))
638	}
639
640	// check 'copy' cli arguments.
641	checkCopySyntax(ctx, cliCtx, encKeyDB, false)
642
643	// Additional command specific theme customization.
644	console.SetColor("Copy", color.New(color.FgGreen, color.Bold))
645
646	recursive := cliCtx.Bool("recursive")
647	rewind := cliCtx.String("rewind")
648	versionID := cliCtx.String("version-id")
649	olderThan := cliCtx.String("older-than")
650	newerThan := cliCtx.String("newer-than")
651	storageClass := cliCtx.String("storage-class")
652	retentionMode := cliCtx.String(rmFlag)
653	retentionDuration := cliCtx.String(rdFlag)
654	legalHold := strings.ToUpper(cliCtx.String(lhFlag))
655	tags := cliCtx.String("tags")
656	sseKeys := os.Getenv("MC_ENCRYPT_KEY")
657	if key := cliCtx.String("encrypt-key"); key != "" {
658		sseKeys = key
659	}
660
661	if sseKeys != "" {
662		sseKeys, err = getDecodedKey(sseKeys)
663		fatalIf(err, "Unable to parse encryption keys.")
664	}
665	sse := cliCtx.String("encrypt")
666
667	var session *sessionV8
668
669	if cliCtx.Bool("continue") {
670		sessionID := getHash("cp", os.Args[1:])
671		if isSessionExists(sessionID) {
672			session, err = loadSessionV8(sessionID)
673			fatalIf(err.Trace(sessionID), "Unable to load session.")
674		} else {
675			session = newSessionV8(sessionID)
676			session.Header.CommandType = "cp"
677			session.Header.CommandBoolFlags["recursive"] = recursive
678			session.Header.CommandStringFlags["rewind"] = rewind
679			session.Header.CommandStringFlags["version-id"] = versionID
680			session.Header.CommandStringFlags["older-than"] = olderThan
681			session.Header.CommandStringFlags["newer-than"] = newerThan
682			session.Header.CommandStringFlags["storage-class"] = storageClass
683			session.Header.CommandStringFlags["tags"] = tags
684			session.Header.CommandStringFlags[rmFlag] = retentionMode
685			session.Header.CommandStringFlags[rdFlag] = retentionDuration
686			session.Header.CommandStringFlags[lhFlag] = legalHold
687			session.Header.CommandStringFlags["encrypt-key"] = sseKeys
688			session.Header.CommandStringFlags["encrypt"] = sse
689			session.Header.CommandBoolFlags["session"] = cliCtx.Bool("continue")
690
691			if cliCtx.Bool("preserve") {
692				session.Header.CommandBoolFlags["preserve"] = cliCtx.Bool("preserve")
693			}
694			session.Header.UserMetaData = userMetaMap
695			session.Header.CommandBoolFlags["md5"] = cliCtx.Bool("md5")
696			session.Header.CommandBoolFlags["disable-multipart"] = cliCtx.Bool("disable-multipart")
697
698			var e error
699			if session.Header.RootPath, e = os.Getwd(); e != nil {
700				session.Delete()
701				fatalIf(probe.NewError(e), "Unable to get current working folder.")
702			}
703
704			// extract URLs.
705			session.Header.CommandArgs = cliCtx.Args()
706		}
707	}
708
709	e := doCopySession(ctx, cancelCopy, cliCtx, session, encKeyDB, false)
710	if session != nil {
711		session.Delete()
712	}
713
714	return e
715}
716