1package azblob_test
2
3import (
4	"bytes"
5	"context"
6	"encoding/base64"
7	"encoding/binary"
8	"fmt"
9	"io"
10	"log"
11	"net"
12	"net/http"
13	"net/url"
14	"os"
15	"strings"
16	"time"
17
18	"math/rand"
19
20	"github.com/Azure/azure-pipeline-go/pipeline"
21	"github.com/Azure/azure-storage-blob-go/azblob"
22)
23
24// https://godoc.org/github.com/fluhus/godoc-tricks
25
26func accountInfo() (string, string) {
27	return os.Getenv("ACCOUNT_NAME"), os.Getenv("ACCOUNT_KEY")
28}
29
30// This example shows how to get started using the Azure Storage Blob SDK for Go.
31func Example() {
32	// From the Azure portal, get your Storage account's name and account key.
33	accountName, accountKey := accountInfo()
34
35	// Use your Storage account's name and key to create a credential object; this is used to access your account.
36	credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
37	if err != nil {
38		log.Fatal(err)
39	}
40
41	// Create a request pipeline that is used to process HTTP(S) requests and responses. It requires
42	// your account credentials. In more advanced scenarios, you can configure telemetry, retry policies,
43	// logging, and other options. Also, you can configure multiple request pipelines for different scenarios.
44	p := azblob.NewPipeline(credential, azblob.PipelineOptions{})
45
46	// From the Azure portal, get your Storage account blob service URL endpoint.
47	// The URL typically looks like this:
48	u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", accountName))
49
50	// Create an ServiceURL object that wraps the service URL and a request pipeline.
51	serviceURL := azblob.NewServiceURL(*u, p)
52
53	// Now, you can use the serviceURL to perform various container and blob operations.
54
55	// All HTTP operations allow you to specify a Go context.Context object to control cancellation/timeout.
56	ctx := context.Background() // This example uses a never-expiring context.
57
58	// This example shows several common operations just to get you started.
59
60	// Create a URL that references a to-be-created container in your Azure Storage account.
61	// This returns a ContainerURL object that wraps the container's URL and a request pipeline (inherited from serviceURL)
62	containerURL := serviceURL.NewContainerURL("mycontainer") // Container names require lowercase
63
64	// Create the container on the service (with no metadata and no public access)
65	_, err = containerURL.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
66	if err != nil {
67		log.Fatal(err)
68	}
69
70	// Create a URL that references a to-be-created blob in your Azure Storage account's container.
71	// This returns a BlockBlobURL object that wraps the blob's URL and a request pipeline (inherited from containerURL)
72	blobURL := containerURL.NewBlockBlobURL("HelloWorld.txt") // Blob names can be mixed case
73
74	// Create the blob with string (plain text) content.
75	data := "Hello World!"
76	_, err = blobURL.Upload(ctx, strings.NewReader(data), azblob.BlobHTTPHeaders{ContentType: "text/plain"}, azblob.Metadata{}, azblob.BlobAccessConditions{})
77	if err != nil {
78		log.Fatal(err)
79	}
80
81	// Download the blob's contents and verify that it worked correctly
82	get, err := blobURL.Download(ctx, 0, 0, azblob.BlobAccessConditions{}, false)
83	if err != nil {
84		log.Fatal(err)
85	}
86
87	downloadedData := &bytes.Buffer{}
88	reader := get.Body(azblob.RetryReaderOptions{})
89	downloadedData.ReadFrom(reader)
90	reader.Close() // The client must close the response body when finished with it
91	if data != downloadedData.String() {
92		log.Fatal("downloaded data doesn't match uploaded data")
93	}
94
95	// List the blob(s) in our container; since a container may hold millions of blobs, this is done 1 segment at a time.
96	for marker := (azblob.Marker{}); marker.NotDone(); { // The parens around Marker{} are required to avoid compiler error.
97		// Get a result segment starting with the blob indicated by the current Marker.
98		listBlob, err := containerURL.ListBlobsFlatSegment(ctx, marker, azblob.ListBlobsSegmentOptions{})
99		if err != nil {
100			log.Fatal(err)
101		}
102		// IMPORTANT: ListBlobs returns the start of the next segment; you MUST use this to get
103		// the next segment (after processing the current result segment).
104		marker = listBlob.NextMarker
105
106		// Process the blobs returned in this result segment (if the segment is empty, the loop body won't execute)
107		for _, blobInfo := range listBlob.Segment.BlobItems {
108			fmt.Print("Blob name: " + blobInfo.Name + "\n")
109		}
110	}
111
112	// Delete the blob we created earlier.
113	_, err = blobURL.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{})
114	if err != nil {
115		log.Fatal(err)
116	}
117
118	// Delete the container we created earlier.
119	_, err = containerURL.Delete(ctx, azblob.ContainerAccessConditions{})
120	if err != nil {
121		log.Fatal(err)
122	}
123}
124
125// This example shows how you can configure a pipeline for making HTTP requests to the Azure Storage Blob Service.
126func ExampleNewPipeline() {
127	// This example shows how to wire in your own logging mechanism (this example uses
128	// Go's standard logger to write log information to standard error)
129	logger := log.New(os.Stderr, "", log.Ldate|log.Lmicroseconds)
130
131	// Create/configure a request pipeline options object.
132	// All PipelineOptions' fields are optional; reasonable defaults are set for anything you do not specify
133	po := azblob.PipelineOptions{
134		// Set RetryOptions to control how HTTP request are retried when retryable failures occur
135		Retry: azblob.RetryOptions{
136			Policy:        azblob.RetryPolicyExponential, // Use exponential backoff as opposed to linear
137			MaxTries:      3,                             // Try at most 3 times to perform the operation (set to 1 to disable retries)
138			TryTimeout:    time.Second * 3,               // Maximum time allowed for any single try
139			RetryDelay:    time.Second * 1,               // Backoff amount for each retry (exponential or linear)
140			MaxRetryDelay: time.Second * 3,               // Max delay between retries
141		},
142
143		// Set RequestLogOptions to control how each HTTP request & its response is logged
144		RequestLog: azblob.RequestLogOptions{
145			LogWarningIfTryOverThreshold: time.Millisecond * 200, // A successful response taking more than this time to arrive is logged as a warning
146		},
147
148		// Set LogOptions to control what & where all pipeline log events go
149		Log: pipeline.LogOptions{
150			Log: func(s pipeline.LogLevel, m string) { // This func is called to log each event
151				// This method is not called for filtered-out severities.
152				logger.Output(2, m) // This example uses Go's standard logger
153			},
154			ShouldLog: func(level pipeline.LogLevel) bool {
155				return level <= pipeline.LogWarning // Log all events from warning to more severe
156			},
157		},
158
159		// Set HTTPSender to override the default HTTP Sender that sends the request over the network
160		HTTPSender: pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
161			return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
162				// Implement the HTTP client that will override the default sender.
163				// For example, below HTTP client uses a transport that is different from http.DefaultTransport
164				client := http.Client{
165					Transport: &http.Transport{
166						Proxy: nil,
167						DialContext: (&net.Dialer{
168							Timeout:   30 * time.Second,
169							KeepAlive: 30 * time.Second,
170							DualStack: true,
171						}).DialContext,
172						MaxIdleConns:          100,
173						IdleConnTimeout:       180 * time.Second,
174						TLSHandshakeTimeout:   10 * time.Second,
175						ExpectContinueTimeout: 1 * time.Second,
176					},
177				}
178
179				// Send the request over the network
180				resp, err := client.Do(request.WithContext(ctx))
181
182				return &httpResponse{response: resp}, err
183			}
184		}),
185	}
186
187	// Create a request pipeline object configured with credentials and with pipeline options. Once created,
188	// a pipeline object is goroutine-safe and can be safely used with many XxxURL objects simultaneously.
189	p := azblob.NewPipeline(azblob.NewAnonymousCredential(), po) // A pipeline always requires some credential object
190
191	// Once you've created a pipeline object, associate it with an XxxURL object so that you can perform HTTP requests with it.
192	u, _ := url.Parse("https://myaccount.blob.core.windows.net")
193	serviceURL := azblob.NewServiceURL(*u, p)
194	// Use the serviceURL as desired...
195
196	// NOTE: When you use an XxxURL object to create another XxxURL object, the new XxxURL object inherits the
197	// same pipeline object as its parent. For example, the containerURL and blobURL objects (created below)
198	// all share the same pipeline. Any HTTP operations you perform with these objects share the behavior (retry, logging, etc.)
199	containerURL := serviceURL.NewContainerURL("mycontainer")
200	blobURL := containerURL.NewBlockBlobURL("ReadMe.txt")
201
202	// If you'd like to perform some operations with different behavior, create a new pipeline object and
203	// associate it with a new XxxURL object by passing the new pipeline to the XxxURL object's WithPipeline method.
204
205	// In this example, I reconfigure the retry policies, create a new pipeline, and then create a new
206	// ContainerURL object that has the same URL as its parent.
207	po.Retry = azblob.RetryOptions{
208		Policy:        azblob.RetryPolicyFixed, // Use fixed time backoff
209		MaxTries:      4,                       // Try at most 3 times to perform the operation (set to 1 to disable retries)
210		TryTimeout:    time.Minute * 1,         // Maximum time allowed for any single try
211		RetryDelay:    time.Second * 5,         // Backoff amount for each retry (exponential or linear)
212		MaxRetryDelay: time.Second * 10,        // Max delay between retries
213	}
214	newContainerURL := containerURL.WithPipeline(azblob.NewPipeline(azblob.NewAnonymousCredential(), po))
215
216	// Now, any XxxBlobURL object created using newContainerURL inherits the pipeline with the new retry policy.
217	newBlobURL := newContainerURL.NewBlockBlobURL("ReadMe.txt")
218	_, _ = blobURL, newBlobURL // Avoid compiler's "declared and not used" error
219}
220
221func ExampleStorageError() {
222	// This example shows how to handle errors returned from various XxxURL methods. All these methods return an
223	// object implementing the pipeline.Response interface and an object implementing Go's error interface.
224	// The error result is nil if the request was successful; your code can safely use the Response interface object.
225	// If error is non-nil, the error could be due to:
226
227	// 1. An invalid argument passed to the method. You should not write code to handle these errors;
228	//    instead, fix these errors as they appear during development/testing.
229
230	// 2. A network request didn't reach an Azure Storage Service. This usually happens due to a bad URL or
231	//    faulty networking infrastructure (like a router issue). In this case, an object implementing the
232	//    net.Error interface will be returned. The net.Error interface offers Timeout and Temporary methods
233	//    which return true if the network error is determined to be a timeout or temporary condition. If
234	//    your pipeline uses the retry policy factory, then this policy looks for Timeout/Temporary and
235	//    automatically retries based on the retry options you've configured. Because of the retry policy,
236	//    your code will usually not call the Timeout/Temporary methods explicitly other than possibly logging
237	//    the network failure.
238
239	// 3. A network request did reach the Azure Storage Service but the service failed to perform the
240	//    requested operation. In this case, an object implementing the StorageError interface is returned.
241	//    The StorageError interface also implements the net.Error interface and, if you use the retry policy,
242	//    you would most likely ignore the Timeout/Temporary methods. However, the StorageError interface exposes
243	//    richer information such as a service error code, an error description, details data, and the
244	//    service-returned http.Response. And, from the http.Response, you can get the initiating http.Request.
245
246	u, _ := url.Parse("http://myaccount.blob.core.windows.net/mycontainer")
247	containerURL := azblob.NewContainerURL(*u, azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}))
248	create, err := containerURL.Create(context.Background(), azblob.Metadata{}, azblob.PublicAccessNone)
249
250	if err != nil { // An error occurred
251		if stgErr, ok := err.(azblob.StorageError); ok { // This error is a Service-specific error
252			// StorageError also implements net.Error so you could call its Timeout/Temporary methods if you want.
253			switch stgErr.ServiceCode() { // Compare serviceCode to various ServiceCodeXxx constants
254			case azblob.ServiceCodeContainerAlreadyExists:
255				// You can also look at the http.Response object that failed.
256				if failedResponse := stgErr.Response(); failedResponse != nil {
257					// From the response object, you can get the initiating http.Request object
258					failedRequest := failedResponse.Request
259					_ = failedRequest // Avoid compiler's "declared and not used" error
260				}
261
262			case azblob.ServiceCodeContainerBeingDeleted:
263				// Handle this error ...
264			default:
265				// Handle other errors ...
266			}
267		}
268		log.Fatal(err) // Error is not due to Azure Storage service; networking infrastructure failure
269	}
270
271	// If err is nil, then the method was successful; use the response to access the result
272	_ = create // Avoid compiler's "declared and not used" error
273}
274
275// This example shows how to break a URL into its parts so you can
276// examine and/or change some of its values and then construct a new URL.
277func ExampleBlobURLParts() {
278	// Let's start with a URL that identifies a snapshot of a blob in a container.
279	// The URL also contains a Shared Access Signature (SAS):
280	u, _ := url.Parse("https://myaccount.blob.core.windows.net/mycontainter/ReadMe.txt?" +
281		"snapshot=2011-03-09T01:42:34Z&" +
282		"sv=2015-02-21&sr=b&st=2111-01-09T01:42:34.936Z&se=2222-03-09T01:42:34.936Z&sp=rw&sip=168.1.5.60-168.1.5.70&" +
283		"spr=https,http&si=myIdentifier&ss=bf&srt=s&sig=92836758923659283652983562==")
284
285	// You can parse this URL into its constituent parts:
286	parts := azblob.NewBlobURLParts(*u)
287
288	// Now, we access the parts (this example prints them).
289	fmt.Println(parts.Host, parts.ContainerName, parts.BlobName, parts.Snapshot)
290	sas := parts.SAS
291	fmt.Println(sas.Version(), sas.Resource(), sas.StartTime(), sas.ExpiryTime(), sas.Permissions(),
292		sas.IPRange(), sas.Protocol(), sas.Identifier(), sas.Services(), sas.Signature())
293
294	// You can then change some of the fields and construct a new URL:
295	parts.SAS = azblob.SASQueryParameters{} // Remove the SAS query parameters
296	parts.Snapshot = ""                     // Remove the snapshot timestamp
297	parts.ContainerName = "othercontainer"  // Change the container name
298	// In this example, we'll keep the blob name as is.
299
300	// Construct a new URL from the parts:
301	newURL := parts.URL()
302	fmt.Print(newURL.String())
303	// NOTE: You can pass the new URL to NewBlockBlobURL (or similar methods) to manipulate the blob.
304}
305
306// This example shows how to create and use an Azure Storage account Shared Access Signature (SAS).
307func ExampleAccountSASSignatureValues() {
308	// From the Azure portal, get your Storage account's name and account key.
309	accountName, accountKey := accountInfo()
310
311	// Use your Storage account's name and key to create a credential object; this is required to sign a SAS.
312	credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
313	if err != nil {
314		log.Fatal(err)
315	}
316
317	// Set the desired SAS signature values and sign them with the shared key credentials to get the SAS query parameters.
318	sasQueryParams, err := azblob.AccountSASSignatureValues{
319		Protocol:      azblob.SASProtocolHTTPS,              // Users MUST use HTTPS (not HTTP)
320		ExpiryTime:    time.Now().UTC().Add(48 * time.Hour), // 48-hours before expiration
321		Permissions:   azblob.AccountSASPermissions{Read: true, List: true}.String(),
322		Services:      azblob.AccountSASServices{Blob: true}.String(),
323		ResourceTypes: azblob.AccountSASResourceTypes{Container: true, Object: true}.String(),
324	}.NewSASQueryParameters(credential)
325	if err != nil {
326		log.Fatal(err)
327	}
328
329	qp := sasQueryParams.Encode()
330	urlToSendToSomeone := fmt.Sprintf("https://%s.blob.core.windows.net?%s", accountName, qp)
331	// At this point, you can send the urlToSendToSomeone to someone via email or any other mechanism you choose.
332
333	// ************************************************************************************************
334
335	// When someone receives the URL, they access the SAS-protected resource with code like this:
336	u, _ := url.Parse(urlToSendToSomeone)
337
338	// Create an ServiceURL object that wraps the service URL (and its SAS) and a pipeline.
339	// When using a SAS URLs, anonymous credentials are required.
340	serviceURL := azblob.NewServiceURL(*u, azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}))
341	// Now, you can use this serviceURL just like any other to make requests of the resource.
342
343	// You can parse a URL into its constituent parts:
344	blobURLParts := azblob.NewBlobURLParts(serviceURL.URL())
345	fmt.Printf("SAS expiry time=%v", blobURLParts.SAS.ExpiryTime())
346
347	_ = serviceURL // Avoid compiler's "declared and not used" error
348}
349
350// This example shows how to create and use a Blob Service Shared Access Signature (SAS).
351func ExampleBlobSASSignatureValues() {
352	// From the Azure portal, get your Storage account's name and account key.
353	accountName, accountKey := accountInfo()
354
355	// Use your Storage account's name and key to create a credential object; this is required to sign a SAS.
356	credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
357	if err != nil {
358		log.Fatal(err)
359	}
360
361	// This is the name of the container and blob that we're creating a SAS to.
362	containerName := "mycontainer" // Container names require lowercase
363	blobName := "HelloWorld.txt"   // Blob names can be mixed case
364
365	// Set the desired SAS signature values and sign them with the shared key credentials to get the SAS query parameters.
366	sasQueryParams, err := azblob.BlobSASSignatureValues{
367		Protocol:      azblob.SASProtocolHTTPS,              // Users MUST use HTTPS (not HTTP)
368		ExpiryTime:    time.Now().UTC().Add(48 * time.Hour), // 48-hours before expiration
369		ContainerName: containerName,
370		BlobName:      blobName,
371
372		// To produce a container SAS (as opposed to a blob SAS), assign to Permissions using
373		// ContainerSASPermissions and make sure the BlobName field is "" (the default).
374		Permissions: azblob.BlobSASPermissions{Add: true, Read: true, Write: true}.String(),
375	}.NewSASQueryParameters(credential)
376	if err != nil {
377		log.Fatal(err)
378	}
379
380	// Create the URL of the resource you wish to access and append the SAS query parameters.
381	// Since this is a blob SAS, the URL is to the Azure storage blob.
382	qp := sasQueryParams.Encode()
383	urlToSendToSomeone := fmt.Sprintf("https://%s.blob.core.windows.net/%s/%s?%s",
384		accountName, containerName, blobName, qp)
385	// At this point, you can send the urlToSendToSomeone to someone via email or any other mechanism you choose.
386
387	// ************************************************************************************************
388
389	// When someone receives the URL, they access the SAS-protected resource with code like this:
390	u, _ := url.Parse(urlToSendToSomeone)
391
392	// Create an BlobURL object that wraps the blob URL (and its SAS) and a pipeline.
393	// When using a SAS URLs, anonymous credentials are required.
394	blobURL := azblob.NewBlobURL(*u, azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}))
395	// Now, you can use this blobURL just like any other to make requests of the resource.
396
397	// If you have a SAS query parameter string, you can parse it into its parts:
398	blobURLParts := azblob.NewBlobURLParts(blobURL.URL())
399	fmt.Printf("SAS expiry time=%v", blobURLParts.SAS.ExpiryTime())
400
401	_ = blobURL // Avoid compiler's "declared and not used" error
402}
403
404// This example shows how to manipulate a container's permissions.
405func ExampleContainerURL_SetContainerAccessPolicy() {
406	// From the Azure portal, get your Storage account's name and account key.
407	accountName, accountKey := accountInfo()
408
409	// Use your Storage account's name and key to create a credential object; this is used to access your account.
410	credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
411	if err != nil {
412		log.Fatal(err)
413	}
414
415	// Create an ContainerURL object that wraps the container's URL and a default pipeline.
416	u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer", accountName))
417	containerURL := azblob.NewContainerURL(*u, azblob.NewPipeline(credential, azblob.PipelineOptions{}))
418
419	// All operations allow you to specify a timeout via a Go context.Context object.
420	ctx := context.Background() // This example uses a never-expiring context
421
422	// Create the container (with no metadata and no public access)
423	_, err = containerURL.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
424	if err != nil {
425		log.Fatal(err)
426	}
427
428	// Create a URL that references a to-be-created blob in your Azure Storage account's container.
429	// This returns a BlockBlobURL object that wraps the blob's URL and a request pipeline (inherited from containerURL)
430	blobURL := containerURL.NewBlockBlobURL("HelloWorld.txt") // Blob names can be mixed case
431
432	// Create the blob and put some text in it
433	_, err = blobURL.Upload(ctx, strings.NewReader("Hello World!"), azblob.BlobHTTPHeaders{ContentType: "text/plain"},
434		azblob.Metadata{}, azblob.BlobAccessConditions{})
435	if err != nil {
436		log.Fatal(err)
437	}
438
439	// Attempt to read the blob via a simple HTTP GET operation
440	rawBlobURL := blobURL.URL()
441	get, err := http.Get(rawBlobURL.String())
442	if err != nil {
443		log.Fatal(err)
444	}
445	if get.StatusCode == http.StatusNotFound {
446		// We expected this error because the service returns an HTTP 404 status code when a blob
447		// exists but the requester does not have permission to access it.
448		// This is how we change the container's permission to allow public/anonymous aceess:
449		_, err := containerURL.SetAccessPolicy(ctx, azblob.PublicAccessBlob, []azblob.SignedIdentifier{}, azblob.ContainerAccessConditions{})
450		if err != nil {
451			log.Fatal(err)
452		}
453
454		// Now, this works:
455		get, err = http.Get(rawBlobURL.String())
456		if err != nil {
457			log.Fatal(err)
458		}
459		defer get.Body.Close()
460		var text bytes.Buffer
461		text.ReadFrom(get.Body)
462		fmt.Print(text.String())
463	}
464}
465
466// This example shows how to perform operations on blob conditionally.
467func ExampleBlobAccessConditions() {
468	// From the Azure portal, get your Storage account's name and account key.
469	accountName, accountKey := accountInfo()
470
471	// Create a BlockBlobURL object that wraps a blob's URL and a default pipeline.
472	u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer/Data.txt", accountName))
473	credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
474	if err != nil {
475		log.Fatal(err)
476	}
477	blobURL := azblob.NewBlockBlobURL(*u, azblob.NewPipeline(credential, azblob.PipelineOptions{}))
478
479	ctx := context.Background() // This example uses a never-expiring context
480
481	// This helper function displays the results of an operation; it is called frequently below.
482	showResult := func(response pipeline.Response, err error) {
483		if err != nil {
484			if stgErr, ok := err.(azblob.StorageError); !ok {
485				log.Fatal(err) // Network failure
486			} else {
487				fmt.Print("Failure: " + stgErr.Response().Status + "\n")
488			}
489		} else {
490			if get, ok := response.(*azblob.DownloadResponse); ok {
491				get.Body(azblob.RetryReaderOptions{}).Close() // The client must close the response body when finished with it
492			}
493			fmt.Print("Success: " + response.Response().Status + "\n")
494		}
495	}
496
497	// Create the blob (unconditionally; succeeds)
498	upload, err := blobURL.Upload(ctx, strings.NewReader("Text-1"), azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{})
499	showResult(upload, err)
500
501	// Download blob content if the blob has been modified since we uploaded it (fails):
502	showResult(blobURL.Download(ctx, 0, 0,
503		azblob.BlobAccessConditions{ModifiedAccessConditions: azblob.ModifiedAccessConditions{IfModifiedSince: upload.LastModified()}}, false))
504
505	// Download blob content if the blob hasn't been modified in the last 24 hours (fails):
506	showResult(blobURL.Download(ctx, 0, 0,
507		azblob.BlobAccessConditions{ModifiedAccessConditions: azblob.ModifiedAccessConditions{IfUnmodifiedSince: time.Now().UTC().Add(time.Hour * -24)}}, false))
508
509	// Upload new content if the blob hasn't changed since the version identified by ETag (succeeds):
510	upload, err = blobURL.Upload(ctx, strings.NewReader("Text-2"), azblob.BlobHTTPHeaders{}, azblob.Metadata{},
511		azblob.BlobAccessConditions{ModifiedAccessConditions: azblob.ModifiedAccessConditions{IfMatch: upload.ETag()}})
512	showResult(upload, err)
513
514	// Download content if it has changed since the version identified by ETag (fails):
515	showResult(blobURL.Download(ctx, 0, 0,
516		azblob.BlobAccessConditions{ModifiedAccessConditions: azblob.ModifiedAccessConditions{IfNoneMatch: upload.ETag()}}, false))
517
518	// Upload content if the blob doesn't already exist (fails):
519	showResult(blobURL.Upload(ctx, strings.NewReader("Text-3"), azblob.BlobHTTPHeaders{}, azblob.Metadata{},
520		azblob.BlobAccessConditions{ModifiedAccessConditions: azblob.ModifiedAccessConditions{IfNoneMatch: azblob.ETagAny}}))
521}
522
523// This examples shows how to create a container with metadata and then how to read & update the metadata.
524func ExampleMetadata_containers() {
525	// From the Azure portal, get your Storage account blob service URL endpoint.
526	accountName, accountKey := accountInfo()
527
528	// Create a ContainerURL object that wraps a soon-to-be-created container's URL and a default pipeline.
529	u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer", accountName))
530	credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
531	if err != nil {
532		log.Fatal(err)
533	}
534	containerURL := azblob.NewContainerURL(*u, azblob.NewPipeline(credential, azblob.PipelineOptions{}))
535
536	ctx := context.Background() // This example uses a never-expiring context
537
538	// Create a container with some metadata (string key/value pairs)
539	// NOTE: Metadata key names are always converted to lowercase before being sent to the Storage Service.
540	// Therefore, you should always use lowercase letters; especially when querying a map for a metadata key.
541	creatingApp, _ := os.Executable()
542	_, err = containerURL.Create(ctx, azblob.Metadata{"author": "Jeffrey", "app": creatingApp}, azblob.PublicAccessNone)
543	if err != nil {
544		log.Fatal(err)
545	}
546
547	// Query the container's metadata
548	get, err := containerURL.GetProperties(ctx, azblob.LeaseAccessConditions{})
549	if err != nil {
550		log.Fatal(err)
551	}
552
553	// Show the container's metadata
554	metadata := get.NewMetadata()
555	for k, v := range metadata {
556		fmt.Print(k + "=" + v + "\n")
557	}
558
559	// Update the metadata and write it back to the container
560	metadata["author"] = "Aidan" // NOTE: The keyname is in all lowercase letters
561	_, err = containerURL.SetMetadata(ctx, metadata, azblob.ContainerAccessConditions{})
562	if err != nil {
563		log.Fatal(err)
564	}
565
566	// NOTE: The SetMetadata & SetProperties methods update the container's ETag & LastModified properties
567}
568
569// This examples shows how to create a blob with metadata and then how to read & update
570// the blob's read-only properties and metadata.
571func ExampleMetadata_blobs() {
572	// From the Azure portal, get your Storage account blob service URL endpoint.
573	accountName, accountKey := accountInfo()
574
575	// Create a ContainerURL object that wraps a soon-to-be-created blob's URL and a default pipeline.
576	u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer/ReadMe.txt", accountName))
577	credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
578	if err != nil {
579		log.Fatal(err)
580	}
581	blobURL := azblob.NewBlockBlobURL(*u, azblob.NewPipeline(credential, azblob.PipelineOptions{}))
582
583	ctx := context.Background() // This example uses a never-expiring context
584
585	// Create a blob with metadata (string key/value pairs)
586	// NOTE: Metadata key names are always converted to lowercase before being sent to the Storage Service.
587	// Therefore, you should always use lowercase letters; especially when querying a map for a metadata key.
588	creatingApp, _ := os.Executable()
589	_, err = blobURL.Upload(ctx, strings.NewReader("Some text"), azblob.BlobHTTPHeaders{},
590		azblob.Metadata{"author": "Jeffrey", "app": creatingApp}, azblob.BlobAccessConditions{})
591	if err != nil {
592		log.Fatal(err)
593	}
594
595	// Query the blob's properties and metadata
596	get, err := blobURL.GetProperties(ctx, azblob.BlobAccessConditions{})
597	if err != nil {
598		log.Fatal(err)
599	}
600
601	// Show some of the blob's read-only properties
602	fmt.Println(get.BlobType(), get.ETag(), get.LastModified())
603
604	// Show the blob's metadata
605	metadata := get.NewMetadata()
606	for k, v := range metadata {
607		fmt.Print(k + "=" + v + "\n")
608	}
609
610	// Update the blob's metadata and write it back to the blob
611	metadata["editor"] = "Grant" // Add a new key/value; NOTE: The keyname is in all lowercase letters
612	_, err = blobURL.SetMetadata(ctx, metadata, azblob.BlobAccessConditions{})
613	if err != nil {
614		log.Fatal(err)
615	}
616
617	// NOTE: The SetMetadata method updates the blob's ETag & LastModified properties
618}
619
620// This examples shows how to create a blob with HTTP Headers and then how to read & update
621// the blob's HTTP headers.
622func ExampleBlobHTTPHeaders() {
623	// From the Azure portal, get your Storage account blob service URL endpoint.
624	accountName, accountKey := accountInfo()
625
626	// Create a ContainerURL object that wraps a soon-to-be-created blob's URL and a default pipeline.
627	u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer/ReadMe.txt", accountName))
628	credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
629	if err != nil {
630		log.Fatal(err)
631	}
632	blobURL := azblob.NewBlockBlobURL(*u, azblob.NewPipeline(credential, azblob.PipelineOptions{}))
633
634	ctx := context.Background() // This example uses a never-expiring context
635
636	// Create a blob with HTTP headers
637	_, err = blobURL.Upload(ctx, strings.NewReader("Some text"),
638		azblob.BlobHTTPHeaders{
639			ContentType:        "text/html; charset=utf-8",
640			ContentDisposition: "attachment",
641		}, azblob.Metadata{}, azblob.BlobAccessConditions{})
642	if err != nil {
643		log.Fatal(err)
644	}
645
646	// GetMetadata returns the blob's properties, HTTP headers, and metadata
647	get, err := blobURL.GetProperties(ctx, azblob.BlobAccessConditions{})
648	if err != nil {
649		log.Fatal(err)
650	}
651
652	// Show some of the blob's read-only properties
653	fmt.Println(get.BlobType(), get.ETag(), get.LastModified())
654
655	// Shows some of the blob's HTTP Headers
656	httpHeaders := get.NewHTTPHeaders()
657	fmt.Println(httpHeaders.ContentType, httpHeaders.ContentDisposition)
658
659	// Update the blob's HTTP Headers and write them back to the blob
660	httpHeaders.ContentType = "text/plain"
661	_, err = blobURL.SetHTTPHeaders(ctx, httpHeaders, azblob.BlobAccessConditions{})
662	if err != nil {
663		log.Fatal(err)
664	}
665
666	// NOTE: The SetMetadata method updates the blob's ETag & LastModified properties
667}
668
669// ExampleBlockBlobURL shows how to upload a lot of data (in blocks) to a blob.
670// A block blob can have a maximum of 50,000 blocks; each block can have a maximum of 100MB.
671// Therefore, the maximum size of a block blob is slightly more than 4.75 TB (100 MB X 50,000 blocks).
672func ExampleBlockBlobURL() {
673	// From the Azure portal, get your Storage account blob service URL endpoint.
674	accountName, accountKey := accountInfo()
675
676	// Create a ContainerURL object that wraps a soon-to-be-created blob's URL and a default pipeline.
677	u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer/MyBlockBlob.txt", accountName))
678	credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
679	if err != nil {
680		log.Fatal(err)
681	}
682	blobURL := azblob.NewBlockBlobURL(*u, azblob.NewPipeline(credential, azblob.PipelineOptions{}))
683
684	ctx := context.Background() // This example uses a never-expiring context
685
686	// These helper functions convert a binary block ID to a base-64 string and vice versa
687	// NOTE: The blockID must be <= 64 bytes and ALL blockIDs for the block must be the same length
688	blockIDBinaryToBase64 := func(blockID []byte) string { return base64.StdEncoding.EncodeToString(blockID) }
689	blockIDBase64ToBinary := func(blockID string) []byte { binary, _ := base64.StdEncoding.DecodeString(blockID); return binary }
690
691	// These helper functions convert an int block ID to a base-64 string and vice versa
692	blockIDIntToBase64 := func(blockID int) string {
693		binaryBlockID := (&[4]byte{})[:] // All block IDs are 4 bytes long
694		binary.LittleEndian.PutUint32(binaryBlockID, uint32(blockID))
695		return blockIDBinaryToBase64(binaryBlockID)
696	}
697	blockIDBase64ToInt := func(blockID string) int {
698		blockIDBase64ToBinary(blockID)
699		return int(binary.LittleEndian.Uint32(blockIDBase64ToBinary(blockID)))
700	}
701
702	// Upload 4 blocks to the blob (these blocks are tiny; they can be up to 100MB each)
703	words := []string{"Azure ", "Storage ", "Block ", "Blob."}
704	base64BlockIDs := make([]string, len(words)) // The collection of block IDs (base 64 strings)
705
706	// Upload each block sequentially (one after the other); for better performance, you want to upload multiple blocks in parallel)
707	for index, word := range words {
708		// This example uses the index as the block ID; convert the index/ID into a base-64 encoded string as required by the service.
709		// NOTE: Over the lifetime of a blob, all block IDs (before base 64 encoding) must be the same length (this example uses 4 byte block IDs).
710		base64BlockIDs[index] = blockIDIntToBase64(index) // Some people use UUIDs for block IDs
711
712		// Upload a block to this blob specifying the Block ID and its content (up to 100MB); this block is uncommitted.
713		_, err := blobURL.StageBlock(ctx, base64BlockIDs[index], strings.NewReader(word), azblob.LeaseAccessConditions{}, nil)
714		if err != nil {
715			log.Fatal(err)
716		}
717	}
718
719	// After all the blocks are uploaded, atomically commit them to the blob.
720	_, err = blobURL.CommitBlockList(ctx, base64BlockIDs, azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{})
721	if err != nil {
722		log.Fatal(err)
723	}
724
725	// For the blob, show each block (ID and size) that is a committed part of it.
726	getBlock, err := blobURL.GetBlockList(ctx, azblob.BlockListAll, azblob.LeaseAccessConditions{})
727	if err != nil {
728		log.Fatal(err)
729	}
730	for _, block := range getBlock.CommittedBlocks {
731		fmt.Printf("Block ID=%d, Size=%d\n", blockIDBase64ToInt(block.Name), block.Size)
732	}
733
734	// Download the blob in its entirety; download operations do not take blocks into account.
735	// NOTE: For really large blobs, downloading them like allocates a lot of memory.
736	get, err := blobURL.Download(ctx, 0, 0, azblob.BlobAccessConditions{}, false)
737	if err != nil {
738		log.Fatal(err)
739	}
740	blobData := &bytes.Buffer{}
741	reader := get.Body(azblob.RetryReaderOptions{})
742	blobData.ReadFrom(reader)
743	reader.Close() // The client must close the response body when finished with it
744	fmt.Println(blobData)
745}
746
747// ExampleAppendBlobURL shows how to append data (in blocks) to an append blob.
748// An append blob can have a maximum of 50,000 blocks; each block can have a maximum of 100MB.
749// Therefore, the maximum size of an append blob is slightly more than 4.75 TB (100 MB X 50,000 blocks).
750func ExampleAppendBlobURL() {
751	// From the Azure portal, get your Storage account blob service URL endpoint.
752	accountName, accountKey := accountInfo()
753
754	// Create a ContainerURL object that wraps a soon-to-be-created blob's URL and a default pipeline.
755	u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer/MyAppendBlob.txt", accountName))
756	credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
757	if err != nil {
758		log.Fatal(err)
759	}
760	appendBlobURL := azblob.NewAppendBlobURL(*u, azblob.NewPipeline(credential, azblob.PipelineOptions{}))
761
762	ctx := context.Background() // This example uses a never-expiring context
763	_, err = appendBlobURL.Create(ctx, azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{})
764	if err != nil {
765		log.Fatal(err)
766	}
767
768	for i := 0; i < 5; i++ { // Append 5 blocks to the append blob
769		_, err := appendBlobURL.AppendBlock(ctx, strings.NewReader(fmt.Sprintf("Appending block #%d\n", i)), azblob.AppendBlobAccessConditions{}, nil)
770		if err != nil {
771			log.Fatal(err)
772		}
773	}
774
775	// Download the entire append blob's contents and show it.
776	get, err := appendBlobURL.Download(ctx, 0, 0, azblob.BlobAccessConditions{}, false)
777	if err != nil {
778		log.Fatal(err)
779	}
780	b := bytes.Buffer{}
781	reader := get.Body(azblob.RetryReaderOptions{})
782	b.ReadFrom(reader)
783	reader.Close() // The client must close the response body when finished with it
784	fmt.Println(b.String())
785}
786
787// ExamplePageBlobURL shows how to manipulate a page blob with PageBlobURL.
788// A page blob is a collection of 512-byte pages optimized for random read and write operations.
789// The maximum size for a page blob is 8 TB.
790func ExamplePageBlobURL() {
791	// From the Azure portal, get your Storage account blob service URL endpoint.
792	accountName, accountKey := accountInfo()
793
794	// Create a ContainerURL object that wraps a soon-to-be-created blob's URL and a default pipeline.
795	u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer/MyPageBlob.txt", accountName))
796	credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
797	if err != nil {
798		log.Fatal(err)
799	}
800	blobURL := azblob.NewPageBlobURL(*u, azblob.NewPipeline(credential, azblob.PipelineOptions{}))
801
802	ctx := context.Background() // This example uses a never-expiring context
803	_, err = blobURL.Create(ctx, azblob.PageBlobPageBytes*4, 0, azblob.BlobHTTPHeaders{},
804		azblob.Metadata{}, azblob.BlobAccessConditions{})
805	if err != nil {
806		log.Fatal(err)
807	}
808
809	page := [azblob.PageBlobPageBytes]byte{}
810	copy(page[:], "Page 0")
811	_, err = blobURL.UploadPages(ctx, 0*azblob.PageBlobPageBytes, bytes.NewReader(page[:]), azblob.PageBlobAccessConditions{}, nil)
812	if err != nil {
813		log.Fatal(err)
814	}
815
816	copy(page[:], "Page 1")
817	_, err = blobURL.UploadPages(ctx, 2*azblob.PageBlobPageBytes, bytes.NewReader(page[:]), azblob.PageBlobAccessConditions{}, nil)
818	if err != nil {
819		log.Fatal(err)
820	}
821
822	getPages, err := blobURL.GetPageRanges(ctx, 0*azblob.PageBlobPageBytes, 10*azblob.PageBlobPageBytes, azblob.BlobAccessConditions{})
823	if err != nil {
824		log.Fatal(err)
825	}
826	for _, pr := range getPages.PageRange {
827		fmt.Printf("Start=%d, End=%d\n", pr.Start, pr.End)
828	}
829
830	_, err = blobURL.ClearPages(ctx, 0*azblob.PageBlobPageBytes, 1*azblob.PageBlobPageBytes, azblob.PageBlobAccessConditions{})
831	if err != nil {
832		log.Fatal(err)
833	}
834
835	getPages, err = blobURL.GetPageRanges(ctx, 0*azblob.PageBlobPageBytes, 10*azblob.PageBlobPageBytes, azblob.BlobAccessConditions{})
836	if err != nil {
837		log.Fatal(err)
838	}
839	for _, pr := range getPages.PageRange {
840		fmt.Printf("Start=%d, End=%d\n", pr.Start, pr.End)
841	}
842
843	get, err := blobURL.Download(ctx, 0, 0, azblob.BlobAccessConditions{}, false)
844	if err != nil {
845		log.Fatal(err)
846	}
847	blobData := &bytes.Buffer{}
848	reader := get.Body(azblob.RetryReaderOptions{})
849	blobData.ReadFrom(reader)
850	reader.Close() // The client must close the response body when finished with it
851	fmt.Printf("%#v", blobData.Bytes())
852}
853
854// This example show how to create a blob, take a snapshot of it, update the base blob,
855// read from the blob snapshot, list blobs with their snapshots, and hot to delete blob snapshots.
856func Example_blobSnapshots() {
857	// From the Azure portal, get your Storage account blob service URL endpoint.
858	accountName, accountKey := accountInfo()
859
860	// Create a ContainerURL object to a container where we'll create a blob and its snapshot.
861	u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer", accountName))
862	credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
863	if err != nil {
864		log.Fatal(err)
865	}
866	containerURL := azblob.NewContainerURL(*u, azblob.NewPipeline(credential, azblob.PipelineOptions{}))
867
868	// Create a BlockBlobURL object to a blob in the container.
869	baseBlobURL := containerURL.NewBlockBlobURL("Original.txt")
870
871	ctx := context.Background() // This example uses a never-expiring context
872
873	// Create the original blob:
874	_, err = baseBlobURL.Upload(ctx, strings.NewReader("Some text"), azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{})
875	if err != nil {
876		log.Fatal(err)
877	}
878
879	// Create a snapshot of the original blob & save its timestamp:
880	createSnapshot, err := baseBlobURL.CreateSnapshot(ctx, azblob.Metadata{}, azblob.BlobAccessConditions{})
881	snapshot := createSnapshot.Snapshot()
882
883	// Modify the original blob & show it:
884	_, err = baseBlobURL.Upload(ctx, strings.NewReader("New text"), azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{})
885	if err != nil {
886		log.Fatal(err)
887	}
888
889	get, err := baseBlobURL.Download(ctx, 0, 0, azblob.BlobAccessConditions{}, false)
890	b := bytes.Buffer{}
891	reader := get.Body(azblob.RetryReaderOptions{})
892	b.ReadFrom(reader)
893	reader.Close() // The client must close the response body when finished with it
894	fmt.Println(b.String())
895
896	// Show snapshot blob via original blob URI & snapshot time:
897	snapshotBlobURL := baseBlobURL.WithSnapshot(snapshot)
898	get, err = snapshotBlobURL.Download(ctx, 0, 0, azblob.BlobAccessConditions{}, false)
899	b.Reset()
900	reader = get.Body(azblob.RetryReaderOptions{})
901	b.ReadFrom(reader)
902	reader.Close() // The client must close the response body when finished with it
903	fmt.Println(b.String())
904
905	// FYI: You can get the base blob URL from one of its snapshot by passing "" to WithSnapshot:
906	baseBlobURL = snapshotBlobURL.WithSnapshot("")
907
908	// Show all blobs in the container with their snapshots:
909	// List the blob(s) in our container; since a container may hold millions of blobs, this is done 1 segment at a time.
910	for marker := (azblob.Marker{}); marker.NotDone(); { // The parens around Marker{} are required to avoid compiler error.
911		// Get a result segment starting with the blob indicated by the current Marker.
912		listBlobs, err := containerURL.ListBlobsFlatSegment(ctx, marker, azblob.ListBlobsSegmentOptions{
913			Details: azblob.BlobListingDetails{Snapshots: true}})
914		if err != nil {
915			log.Fatal(err)
916		}
917		// IMPORTANT: ListBlobs returns the start of the next segment; you MUST use this to get
918		// the next segment (after processing the current result segment).
919		marker = listBlobs.NextMarker
920
921		// Process the blobs returned in this result segment (if the segment is empty, the loop body won't execute)
922		for _, blobInfo := range listBlobs.Segment.BlobItems {
923			snaptime := "N/A"
924			if blobInfo.Snapshot != "" {
925				snaptime = blobInfo.Snapshot
926			}
927			fmt.Printf("Blob name: %s, Snapshot: %s\n", blobInfo.Name, snaptime)
928		}
929	}
930
931	// Promote read-only snapshot to writable base blob:
932	_, err = baseBlobURL.StartCopyFromURL(ctx, snapshotBlobURL.URL(), azblob.Metadata{}, azblob.ModifiedAccessConditions{}, azblob.BlobAccessConditions{})
933	if err != nil {
934		log.Fatal(err)
935	}
936
937	// When calling Delete on a base blob:
938	// DeleteSnapshotsOptionOnly deletes all the base blob's snapshots but not the base blob itself
939	// DeleteSnapshotsOptionInclude deletes the base blob & all its snapshots.
940	// DeleteSnapshotOptionNone produces an error if the base blob has any snapshots.
941	_, err = baseBlobURL.Delete(ctx, azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{})
942	if err != nil {
943		log.Fatal(err)
944	}
945}
946
947func Example_progressUploadDownload() {
948	// Create a request pipeline using your Storage account's name and account key.
949	accountName, accountKey := accountInfo()
950	credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
951	if err != nil {
952		log.Fatal(err)
953	}
954	p := azblob.NewPipeline(credential, azblob.PipelineOptions{})
955
956	// From the Azure portal, get your Storage account blob service URL endpoint.
957	cURL, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer", accountName))
958
959	// Create an ServiceURL object that wraps the service URL and a request pipeline to making requests.
960	containerURL := azblob.NewContainerURL(*cURL, p)
961
962	ctx := context.Background() // This example uses a never-expiring context
963	// Here's how to create a blob with HTTP headers and metadata (I'm using the same metadata that was put on the container):
964	blobURL := containerURL.NewBlockBlobURL("Data.bin")
965
966	// requestBody is the stream of data to write
967	requestBody := strings.NewReader("Some text to write")
968
969	// Wrap the request body in a RequestBodyProgress and pass a callback function for progress reporting.
970	_, err = blobURL.Upload(ctx,
971		pipeline.NewRequestBodyProgress(requestBody, func(bytesTransferred int64) {
972			fmt.Printf("Wrote %d of %d bytes.", bytesTransferred, requestBody.Size())
973		}),
974		azblob.BlobHTTPHeaders{
975			ContentType:        "text/html; charset=utf-8",
976			ContentDisposition: "attachment",
977		}, azblob.Metadata{}, azblob.BlobAccessConditions{})
978	if err != nil {
979		log.Fatal(err)
980	}
981
982	// Here's how to read the blob's data with progress reporting:
983	get, err := blobURL.Download(ctx, 0, 0, azblob.BlobAccessConditions{}, false)
984	if err != nil {
985		log.Fatal(err)
986	}
987
988	// Wrap the response body in a ResponseBodyProgress and pass a callback function for progress reporting.
989	responseBody := pipeline.NewResponseBodyProgress(get.Body(azblob.RetryReaderOptions{}),
990		func(bytesTransferred int64) {
991			fmt.Printf("Read %d of %d bytes.", bytesTransferred, get.ContentLength())
992		})
993
994	downloadedData := &bytes.Buffer{}
995	downloadedData.ReadFrom(responseBody)
996	responseBody.Close() // The client must close the response body when finished with it
997	// The downloaded blob data is in downloadData's buffer
998}
999
1000// This example shows how to copy a source document on the Internet to a blob.
1001func ExampleBlobURL_startCopy() {
1002	// From the Azure portal, get your Storage account blob service URL endpoint.
1003	accountName, accountKey := accountInfo()
1004
1005	// Create a ContainerURL object to a container where we'll create a blob and its snapshot.
1006	// Create a BlockBlobURL object to a blob in the container.
1007	u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer/CopiedBlob.bin", accountName))
1008	credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
1009	if err != nil {
1010		log.Fatal(err)
1011	}
1012	blobURL := azblob.NewBlobURL(*u, azblob.NewPipeline(credential, azblob.PipelineOptions{}))
1013
1014	ctx := context.Background() // This example uses a never-expiring context
1015
1016	src, _ := url.Parse("https://cdn2.auth0.com/docs/media/addons/azure_blob.svg")
1017	startCopy, err := blobURL.StartCopyFromURL(ctx, *src, nil, azblob.ModifiedAccessConditions{}, azblob.BlobAccessConditions{})
1018	if err != nil {
1019		log.Fatal(err)
1020	}
1021
1022	copyID := startCopy.CopyID()
1023	copyStatus := startCopy.CopyStatus()
1024	for copyStatus == azblob.CopyStatusPending {
1025		time.Sleep(time.Second * 2)
1026		getMetadata, err := blobURL.GetProperties(ctx, azblob.BlobAccessConditions{})
1027		if err != nil {
1028			log.Fatal(err)
1029		}
1030		copyStatus = getMetadata.CopyStatus()
1031	}
1032	fmt.Printf("Copy from %s to %s: ID=%s, Status=%s\n", src.String(), blobURL, copyID, copyStatus)
1033}
1034
1035// This example shows how to copy a large stream in blocks (chunks) to a block blob.
1036func ExampleUploadFileToBlockBlobAndDownloadItBack() {
1037	file, err := os.Open("BigFile.bin") // Open the file we want to upload
1038	if err != nil {
1039		log.Fatal(err)
1040	}
1041	defer file.Close()
1042	fileSize, err := file.Stat() // Get the size of the file (stream)
1043	if err != nil {
1044		log.Fatal(err)
1045	}
1046
1047	// From the Azure portal, get your Storage account blob service URL endpoint.
1048	accountName, accountKey := accountInfo()
1049
1050	// Create a BlockBlobURL object to a blob in the container (we assume the container already exists).
1051	u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer/BigBlockBlob.bin", accountName))
1052	credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
1053	if err != nil {
1054		log.Fatal(err)
1055	}
1056	blockBlobURL := azblob.NewBlockBlobURL(*u, azblob.NewPipeline(credential, azblob.PipelineOptions{}))
1057
1058	ctx := context.Background() // This example uses a never-expiring context
1059
1060	// Pass the Context, stream, stream size, block blob URL, and options to StreamToBlockBlob
1061	response, err := azblob.UploadFileToBlockBlob(ctx, file, blockBlobURL,
1062		azblob.UploadToBlockBlobOptions{
1063			// If Progress is non-nil, this function is called periodically as bytes are uploaded.
1064			Progress: func(bytesTransferred int64) {
1065				fmt.Printf("Uploaded %d of %d bytes.\n", bytesTransferred, fileSize.Size())
1066			},
1067		})
1068	if err != nil {
1069		log.Fatal(err)
1070	}
1071	_ = response // Avoid compiler's "declared and not used" error
1072
1073	// Set up file to download the blob to
1074	destFileName := "BigFile-downloaded.bin"
1075	destFile, err := os.Create(destFileName)
1076	defer destFile.Close()
1077
1078	// Perform download
1079	err = azblob.DownloadBlobToFile(context.Background(), blockBlobURL.BlobURL, 0, azblob.CountToEnd, destFile,
1080		azblob.DownloadFromBlobOptions{
1081			// If Progress is non-nil, this function is called periodically as bytes are uploaded.
1082			Progress: func(bytesTransferred int64) {
1083				fmt.Printf("Downloaded %d of %d bytes.\n", bytesTransferred, fileSize.Size())
1084			}})
1085
1086	if err != nil {
1087		log.Fatal(err)
1088	}
1089}
1090
1091// This example shows how to download a large stream with intelligent retries. Specifically, if
1092// the connection fails while reading, continuing to read from this stream initiates a new
1093// GetBlob call passing a range that starts from the last byte successfully read before the failure.
1094func ExampleBlobUrl_Download() {
1095	// From the Azure portal, get your Storage account blob service URL endpoint.
1096	accountName, accountKey := accountInfo()
1097
1098	// Create a BlobURL object to a blob in the container (we assume the container & blob already exist).
1099	u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer/BigBlob.bin", accountName))
1100	credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
1101	if err != nil {
1102		log.Fatal(err)
1103	}
1104	blobURL := azblob.NewBlobURL(*u, azblob.NewPipeline(credential, azblob.PipelineOptions{}))
1105
1106	contentLength := int64(0) // Used for progress reporting to report the total number of bytes being downloaded.
1107
1108	// Download returns an intelligent retryable stream around a blob; it returns an io.ReadCloser.
1109	dr, err := blobURL.Download(context.TODO(), 0, -1, azblob.BlobAccessConditions{}, false)
1110	if err != nil {
1111		log.Fatal(err)
1112	}
1113	rs := dr.Body(azblob.RetryReaderOptions{})
1114
1115	// NewResponseBodyProgress wraps the GetRetryStream with progress reporting; it returns an io.ReadCloser.
1116	stream := pipeline.NewResponseBodyProgress(rs,
1117		func(bytesTransferred int64) {
1118			fmt.Printf("Downloaded %d of %d bytes.\n", bytesTransferred, contentLength)
1119		})
1120	defer stream.Close() // The client must close the response body when finished with it
1121
1122	file, err := os.Create("BigFile.bin") // Create the file to hold the downloaded blob contents.
1123	if err != nil {
1124		log.Fatal(err)
1125	}
1126	defer file.Close()
1127
1128	written, err := io.Copy(file, stream) // Write to the file by reading from the blob (with intelligent retries).
1129	if err != nil {
1130		log.Fatal(err)
1131	}
1132	_ = written // Avoid compiler's "declared and not used" error
1133}
1134
1135func ExampleUploadStreamToBlockBlob() {
1136	// From the Azure portal, get your Storage account blob service URL endpoint.
1137	accountName, accountKey := accountInfo()
1138
1139	// Create a BlockBlobURL object to a blob in the container (we assume the container already exists).
1140	u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer/BigBlockBlob.bin", accountName))
1141	credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
1142	if err != nil {
1143		log.Fatal(err)
1144	}
1145	blockBlobURL := azblob.NewBlockBlobURL(*u, azblob.NewPipeline(credential, azblob.PipelineOptions{}))
1146
1147	ctx := context.Background() // This example uses a never-expiring context
1148
1149	// Create some data to test the upload stream
1150	blobSize := 8 * 1024 * 1024
1151	data := make([]byte, blobSize)
1152	rand.Read(data)
1153
1154	// Perform UploadStreamToBlockBlob
1155	bufferSize := 2 * 1024 * 1024 // Configure the size of the rotating buffers that are used when uploading
1156	maxBuffers := 3               // Configure the number of rotating buffers that are used when uploading
1157	_, err = azblob.UploadStreamToBlockBlob(ctx, bytes.NewReader(data), blockBlobURL,
1158		azblob.UploadStreamToBlockBlobOptions{BufferSize: bufferSize, MaxBuffers: maxBuffers})
1159
1160	// Verify that upload was successful
1161	if err != nil {
1162		log.Fatal(err)
1163	}
1164}
1165
1166// This example shows how to perform various lease operations on a container.
1167// The same lease operations can be performed on individual blobs as well.
1168// A lease on a container prevents it from being deleted by others, while a lease on a blob
1169// protects it from both modifications and deletions.
1170func ExampleLeaseContainer() {
1171	// From the Azure portal, get your Storage account's name and account key.
1172	accountName, accountKey := accountInfo()
1173
1174	// Use your Storage account's name and key to create a credential object; this is used to access your account.
1175	credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
1176	if err != nil {
1177		log.Fatal(err)
1178	}
1179
1180	// Create an ContainerURL object that wraps the container's URL and a default pipeline.
1181	u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer", accountName))
1182	containerURL := azblob.NewContainerURL(*u, azblob.NewPipeline(credential, azblob.PipelineOptions{}))
1183
1184	// All operations allow you to specify a timeout via a Go context.Context object.
1185	ctx := context.Background() // This example uses a never-expiring context
1186
1187	// Now acquire a lease on the container.
1188	// You can choose to pass an empty string for proposed ID so that the service automatically assigns one for you.
1189	acquireLeaseResponse, err := containerURL.AcquireLease(ctx, "", 60, azblob.ModifiedAccessConditions{})
1190	if err != nil {
1191		log.Fatal(err)
1192	}
1193	fmt.Println("The container is leased for delete operations with lease ID", acquireLeaseResponse.LeaseID())
1194
1195	// The container cannot be deleted without providing the lease ID.
1196	_, err = containerURL.Delete(ctx, azblob.ContainerAccessConditions{})
1197	if err == nil {
1198		log.Fatal("delete should have failed")
1199	}
1200	fmt.Println("The container cannot be deleted while there is an active lease")
1201
1202	// We can release the lease now and the container can be deleted.
1203	_, err = containerURL.ReleaseLease(ctx, acquireLeaseResponse.LeaseID(), azblob.ModifiedAccessConditions{})
1204	if err != nil {
1205		log.Fatal(err)
1206	}
1207	fmt.Println("The lease on the container is now released")
1208
1209	// Acquire a lease again to perform other operations.
1210	acquireLeaseResponse, err = containerURL.AcquireLease(ctx, "", 60, azblob.ModifiedAccessConditions{})
1211	if err != nil {
1212		log.Fatal(err)
1213	}
1214	fmt.Println("The container is leased again with lease ID", acquireLeaseResponse.LeaseID())
1215
1216	// We can change the ID of an existing lease.
1217	// A lease ID can be any valid GUID string format.
1218	newLeaseID := newUUID()
1219	newLeaseID[0] = 1
1220	changeLeaseResponse, err := containerURL.ChangeLease(ctx, acquireLeaseResponse.LeaseID(), newLeaseID.String(), azblob.ModifiedAccessConditions{})
1221	if err != nil {
1222		log.Fatal(err)
1223	}
1224	fmt.Println("The lease ID was changed to", changeLeaseResponse.LeaseID())
1225
1226	// The lease can be renewed.
1227	renewLeaseResponse, err := containerURL.RenewLease(ctx, changeLeaseResponse.LeaseID(), azblob.ModifiedAccessConditions{})
1228	if err != nil {
1229		log.Fatal(err)
1230	}
1231	fmt.Println("The lease was renewed with the same ID", renewLeaseResponse.LeaseID())
1232
1233	// Finally, the lease can be broken and we could prevent others from acquiring a lease for a period of time
1234	_, err = containerURL.BreakLease(ctx, 60, azblob.ModifiedAccessConditions{})
1235	if err != nil {
1236		log.Fatal(err)
1237	}
1238	fmt.Println("The lease was borken, and nobody can acquire a lease for 60 seconds")
1239}
1240
1241// This example shows how to list blobs with hierarchy, by using a delimiter.
1242func ExampleListBlobsHierarchy() {
1243	// From the Azure portal, get your Storage account's name and account key.
1244	accountName, accountKey := accountInfo()
1245
1246	// Use your Storage account's name and key to create a credential object; this is used to access your account.
1247	credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
1248	if err != nil {
1249		log.Fatal(err)
1250	}
1251
1252	// Create an ContainerURL object that wraps the container's URL and a default pipeline.
1253	u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer", accountName))
1254	containerURL := azblob.NewContainerURL(*u, azblob.NewPipeline(credential, azblob.PipelineOptions{}))
1255
1256	// All operations allow you to specify a timeout via a Go context.Context object.
1257	ctx := context.Background() // This example uses a never-expiring context
1258
1259	// Create 4 blobs: 3 of which have a virtual directory
1260	blobNames := []string{"a/1", "a/2", "b/1", "boaty_mcboatface"}
1261	for _, blobName := range blobNames {
1262		blobURL := containerURL.NewBlockBlobURL(blobName)
1263		_, err := blobURL.Upload(ctx, strings.NewReader("test"), azblob.BlobHTTPHeaders{}, nil, azblob.BlobAccessConditions{})
1264
1265		if err != nil {
1266			log.Fatal("an error occurred while creating blobs for the example setup")
1267		}
1268	}
1269
1270	// Perform a listing operation on blobs with hierarchy
1271	resp, err := containerURL.ListBlobsHierarchySegment(ctx, azblob.Marker{}, "/", azblob.ListBlobsSegmentOptions{})
1272	if err != nil {
1273		log.Fatal("an error occurred while listing blobs")
1274	}
1275
1276	// When a delimiter is used, the listing operation returns BlobPrefix elements that acts as
1277	// a placeholder for all blobs whose names begin with the same substring up to the appearance of the delimiter character.
1278	// In our example, this means that a/ and b/ will be both returned
1279	fmt.Println("======First listing=====")
1280	for _, blobPrefix := range resp.Segment.BlobPrefixes {
1281		fmt.Println("The blob prefix with name", blobPrefix.Name, "was returned in the listing operation")
1282	}
1283
1284	// The blobs that do not contain the delimiter are still returned
1285	for _, blob := range resp.Segment.BlobItems {
1286		fmt.Println("The blob with name", blob.Name, "was returned in the listing operation")
1287	}
1288
1289	// For the prefixes that are returned, we can perform another listing operation on them, to see their contents
1290	resp, err = containerURL.ListBlobsHierarchySegment(ctx, azblob.Marker{}, "/", azblob.ListBlobsSegmentOptions{
1291		Prefix: "a/",
1292	})
1293	if err != nil {
1294		log.Fatal("an error occurred while listing blobs")
1295	}
1296
1297	// This time, there is no blob prefix returned, since nothing under a/ has another / in its name.
1298	// In other words, in the virtual directory of a/, there aren't any sub-level virtual directory.
1299	fmt.Println("======Second listing=====")
1300	fmt.Println("No prefiex should be returned now, and the actual count is", len(resp.Segment.BlobPrefixes))
1301
1302	// The blobs a/1 and a/2 should be returned
1303	for _, blob := range resp.Segment.BlobItems {
1304		fmt.Println("The blob with name", blob.Name, "was returned in the listing operation")
1305	}
1306
1307	// Delete the blobs created by this example
1308	for _, blobName := range blobNames {
1309		blobURL := containerURL.NewBlockBlobURL(blobName)
1310		_, err := blobURL.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{})
1311
1312		if err != nil {
1313			log.Fatal("an error occurred while deleting the blobs created by the example")
1314		}
1315	}
1316}
1317