1// Copyright 2014 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//      http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15package storage
16
17import (
18	"bytes"
19	"compress/gzip"
20	"context"
21	"crypto/md5"
22	"crypto/sha256"
23	"encoding/base64"
24	"encoding/json"
25	"flag"
26	"fmt"
27	"hash/crc32"
28	"io"
29	"io/ioutil"
30	"log"
31	"math"
32	"math/rand"
33	"mime/multipart"
34	"net/http"
35	"net/http/httputil"
36	"os"
37	"path/filepath"
38	"runtime"
39	"sort"
40	"strconv"
41	"strings"
42	"testing"
43	"time"
44
45	"cloud.google.com/go/httpreplay"
46	"cloud.google.com/go/iam"
47	"cloud.google.com/go/internal/testutil"
48	"cloud.google.com/go/internal/uid"
49	"github.com/google/go-cmp/cmp"
50	"github.com/google/go-cmp/cmp/cmpopts"
51	"golang.org/x/oauth2/google"
52	"google.golang.org/api/googleapi"
53	"google.golang.org/api/iterator"
54	itesting "google.golang.org/api/iterator/testing"
55	"google.golang.org/api/option"
56	iampb "google.golang.org/genproto/googleapis/iam/v1"
57)
58
59const (
60	testPrefix     = "go-integration-test"
61	replayFilename = "storage.replay"
62	// TODO(jba): move to testutil, factor out from firestore/integration_test.go.
63	envFirestoreProjID     = "GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID"
64	envFirestorePrivateKey = "GCLOUD_TESTS_GOLANG_FIRESTORE_KEY"
65	grpcTestPrefix         = "golang-grpc-test-"
66)
67
68var (
69	record = flag.Bool("record", false, "record RPCs")
70
71	uidSpace       *uid.Space
72	uidSpaceGRPC   *uid.Space
73	bucketName     string
74	grpcBucketName string
75	// Use our own random number generator to isolate the sequence of random numbers from
76	// other packages. This makes it possible to use HTTP replay and draw the same sequence
77	// of numbers as during recording.
78	rng           *rand.Rand
79	newTestClient func(ctx context.Context, opts ...option.ClientOption) (*Client, error)
80	replaying     bool
81	testTime      time.Time
82)
83
84func TestMain(m *testing.M) {
85	cleanup := initIntegrationTest()
86	exit := m.Run()
87	if err := cleanup(); err != nil {
88		// Don't fail the test if cleanup fails.
89		log.Printf("Post-test cleanup failed: %v", err)
90	}
91	os.Exit(exit)
92}
93
94// If integration tests will be run, create a unique bucket for them.
95// Also, set newTestClient to handle record/replay.
96// Return a cleanup function.
97func initIntegrationTest() func() error {
98	flag.Parse() // needed for testing.Short()
99	switch {
100	case testing.Short() && *record:
101		log.Fatal("cannot combine -short and -record")
102		return nil
103
104	case testing.Short() && httpreplay.Supported() && testutil.CanReplay(replayFilename) && testutil.ProjID() != "":
105		// go test -short with a replay file will replay the integration tests, if
106		// the appropriate environment variables have been set.
107		replaying = true
108		httpreplay.DebugHeaders()
109		replayer, err := httpreplay.NewReplayer(replayFilename)
110		if err != nil {
111			log.Fatal(err)
112		}
113		var t time.Time
114		if err := json.Unmarshal(replayer.Initial(), &t); err != nil {
115			log.Fatal(err)
116		}
117		initUIDsAndRand(t)
118		newTestClient = func(ctx context.Context, _ ...option.ClientOption) (*Client, error) {
119			hc, err := replayer.Client(ctx) // no creds needed
120			if err != nil {
121				return nil, err
122			}
123			return NewClient(ctx, option.WithHTTPClient(hc))
124		}
125		log.Printf("replaying from %s", replayFilename)
126		return func() error { return replayer.Close() }
127
128	case testing.Short():
129		// go test -short without a replay file skips the integration tests.
130		if testutil.CanReplay(replayFilename) && testutil.ProjID() != "" {
131			log.Print("replay not supported for Go versions before 1.8")
132		}
133		newTestClient = nil
134		return func() error { return nil }
135
136	default: // Run integration tests against a real backend.
137		now := time.Now().UTC()
138		initUIDsAndRand(now)
139		var cleanup func() error
140		if *record && httpreplay.Supported() {
141			// Remember the time for replay.
142			nowBytes, err := json.Marshal(now)
143			if err != nil {
144				log.Fatal(err)
145			}
146			recorder, err := httpreplay.NewRecorder(replayFilename, nowBytes)
147			if err != nil {
148				log.Fatalf("could not record: %v", err)
149			}
150			newTestClient = func(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
151				hc, err := recorder.Client(ctx, opts...)
152				if err != nil {
153					return nil, err
154				}
155				return NewClient(ctx, option.WithHTTPClient(hc))
156			}
157			cleanup = func() error {
158				err1 := cleanupBuckets()
159				err2 := recorder.Close()
160				if err1 != nil {
161					return err1
162				}
163				return err2
164			}
165			log.Printf("recording to %s", replayFilename)
166		} else {
167			if *record {
168				log.Print("record not supported for Go versions before 1.8")
169			}
170			newTestClient = NewClient
171			cleanup = cleanupBuckets
172		}
173		ctx := context.Background()
174		client := config(ctx)
175		if client == nil {
176			return func() error { return nil }
177		}
178		defer client.Close()
179		if err := client.Bucket(bucketName).Create(ctx, testutil.ProjID(), nil); err != nil {
180			log.Fatalf("creating bucket %q: %v", bucketName, err)
181		}
182		if err := client.Bucket(grpcBucketName).Create(ctx, testutil.ProjID(), nil); err != nil {
183			log.Fatalf("creating bucket %q: %v", grpcBucketName, err)
184		}
185		return cleanup
186	}
187}
188
189func initUIDsAndRand(t time.Time) {
190	uidSpace = uid.NewSpace(testPrefix, &uid.Options{Time: t})
191	bucketName = uidSpace.New()
192	uidSpaceGRPC = uid.NewSpace(grpcTestPrefix, &uid.Options{Time: t})
193	grpcBucketName = uidSpaceGRPC.New()
194	// Use our own random source, to avoid other parts of the program taking
195	// random numbers from the global source and putting record and replay
196	// out of sync.
197	rng = testutil.NewRand(t)
198	testTime = t
199}
200
201// testConfig returns the Client used to access GCS. testConfig skips
202// the current test if credentials are not available or when being run
203// in Short mode.
204func testConfig(ctx context.Context, t *testing.T) *Client {
205	if testing.Short() && !replaying {
206		t.Skip("Integration tests skipped in short mode")
207	}
208	client := config(ctx)
209	if client == nil {
210		t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
211	}
212	return client
213}
214
215// testConfigGPRC returns a gRPC-based client to access GCS. testConfigGRPC
216// skips the curent test when being run in Short mode.
217func testConfigGRPC(ctx context.Context, t *testing.T) (gc *Client) {
218	if testing.Short() {
219		t.Skip("Integration tests skipped in short mode")
220	}
221
222	gc, err := newHybridClient(ctx, nil)
223	if err != nil {
224		t.Fatalf("newHybridClient: %v", err)
225	}
226
227	return
228}
229
230// config is like testConfig, but it doesn't need a *testing.T.
231func config(ctx context.Context) *Client {
232	ts := testutil.TokenSource(ctx, ScopeFullControl)
233	if ts == nil {
234		return nil
235	}
236	client, err := newTestClient(ctx, option.WithTokenSource(ts))
237	if err != nil {
238		log.Fatalf("NewClient: %v", err)
239	}
240	return client
241}
242
243func TestIntegration_BucketMethods(t *testing.T) {
244	ctx := context.Background()
245	client := testConfig(ctx, t)
246	defer client.Close()
247	h := testHelper{t}
248
249	projectID := testutil.ProjID()
250	newBucketName := uidSpace.New()
251	b := client.Bucket(newBucketName)
252	// Test Create and Delete.
253	h.mustCreate(b, projectID, nil)
254	attrs := h.mustBucketAttrs(b)
255	if got, want := attrs.MetaGeneration, int64(1); got != want {
256		t.Errorf("got metagen %d, want %d", got, want)
257	}
258	if got, want := attrs.StorageClass, "STANDARD"; got != want {
259		t.Errorf("got storage class %q, want %q", got, want)
260	}
261	if attrs.VersioningEnabled {
262		t.Error("got versioning enabled, wanted it disabled")
263	}
264	if attrs.LocationType == "" {
265		t.Error("got an empty LocationType")
266	}
267	h.mustDeleteBucket(b)
268
269	// Test Create and Delete with attributes.
270	labels := map[string]string{
271		"l1":    "v1",
272		"empty": "",
273	}
274	attrs = &BucketAttrs{
275		StorageClass:      "NEARLINE",
276		VersioningEnabled: true,
277		Labels:            labels,
278		Lifecycle: Lifecycle{
279			Rules: []LifecycleRule{{
280				Action: LifecycleAction{
281					Type:         SetStorageClassAction,
282					StorageClass: "NEARLINE",
283				},
284				Condition: LifecycleCondition{
285					AgeInDays:             10,
286					Liveness:              Archived,
287					CreatedBefore:         time.Date(2017, 1, 1, 0, 0, 0, 0, time.UTC),
288					MatchesStorageClasses: []string{"STANDARD"},
289					NumNewerVersions:      3,
290				},
291			}, {
292				Action: LifecycleAction{
293					Type:         SetStorageClassAction,
294					StorageClass: "ARCHIVE",
295				},
296				Condition: LifecycleCondition{
297					CustomTimeBefore:      time.Date(2020, 1, 2, 3, 0, 0, 0, time.UTC),
298					DaysSinceCustomTime:   20,
299					Liveness:              Live,
300					MatchesStorageClasses: []string{"STANDARD"},
301				},
302			}, {
303				Action: LifecycleAction{
304					Type: DeleteAction,
305				},
306				Condition: LifecycleCondition{
307					DaysSinceNoncurrentTime: 30,
308					Liveness:                Live,
309					NoncurrentTimeBefore:    time.Date(2017, 1, 1, 0, 0, 0, 0, time.UTC),
310					MatchesStorageClasses:   []string{"NEARLINE"},
311					NumNewerVersions:        10,
312				},
313			}},
314		},
315	}
316	h.mustCreate(b, projectID, attrs)
317	attrs = h.mustBucketAttrs(b)
318	if got, want := attrs.MetaGeneration, int64(1); got != want {
319		t.Errorf("got metagen %d, want %d", got, want)
320	}
321	if got, want := attrs.StorageClass, "NEARLINE"; got != want {
322		t.Errorf("got storage class %q, want %q", got, want)
323	}
324	if !attrs.VersioningEnabled {
325		t.Error("got versioning disabled, wanted it enabled")
326	}
327	if got, want := attrs.Labels, labels; !testutil.Equal(got, want) {
328		t.Errorf("labels: got %v, want %v", got, want)
329	}
330	if attrs.LocationType == "" {
331		t.Error("got an empty LocationType")
332	}
333	h.mustDeleteBucket(b)
334}
335
336func TestIntegration_BucketUpdate(t *testing.T) {
337	ctx := context.Background()
338	client := testConfig(ctx, t)
339	defer client.Close()
340	h := testHelper{t}
341
342	b := client.Bucket(uidSpace.New())
343	h.mustCreate(b, testutil.ProjID(), nil)
344	defer h.mustDeleteBucket(b)
345
346	attrs := h.mustBucketAttrs(b)
347	if attrs.VersioningEnabled {
348		t.Fatal("bucket should not have versioning by default")
349	}
350	if len(attrs.Labels) > 0 {
351		t.Fatal("bucket should not have labels initially")
352	}
353
354	// Using empty BucketAttrsToUpdate should be a no-nop.
355	attrs = h.mustUpdateBucket(b, BucketAttrsToUpdate{})
356	if attrs.VersioningEnabled {
357		t.Fatal("should not have versioning")
358	}
359	if len(attrs.Labels) > 0 {
360		t.Fatal("should not have labels")
361	}
362
363	// Turn on versioning, add some labels.
364	ua := BucketAttrsToUpdate{VersioningEnabled: true}
365	ua.SetLabel("l1", "v1")
366	ua.SetLabel("empty", "")
367	attrs = h.mustUpdateBucket(b, ua)
368	if !attrs.VersioningEnabled {
369		t.Fatal("should have versioning now")
370	}
371	wantLabels := map[string]string{
372		"l1":    "v1",
373		"empty": "",
374	}
375	if !testutil.Equal(attrs.Labels, wantLabels) {
376		t.Fatalf("got %v, want %v", attrs.Labels, wantLabels)
377	}
378
379	// Turn off versioning again; add and remove some more labels.
380	ua = BucketAttrsToUpdate{VersioningEnabled: false}
381	ua.SetLabel("l1", "v2")   // update
382	ua.SetLabel("new", "new") // create
383	ua.DeleteLabel("empty")   // delete
384	ua.DeleteLabel("absent")  // delete non-existent
385	attrs = h.mustUpdateBucket(b, ua)
386	if attrs.VersioningEnabled {
387		t.Fatal("should have versioning off")
388	}
389	wantLabels = map[string]string{
390		"l1":  "v2",
391		"new": "new",
392	}
393	if !testutil.Equal(attrs.Labels, wantLabels) {
394		t.Fatalf("got %v, want %v", attrs.Labels, wantLabels)
395	}
396
397	// Configure a lifecycle
398	wantLifecycle := Lifecycle{
399		Rules: []LifecycleRule{
400			{
401				Action:    LifecycleAction{Type: "Delete"},
402				Condition: LifecycleCondition{AgeInDays: 30},
403			},
404		},
405	}
406	ua = BucketAttrsToUpdate{Lifecycle: &wantLifecycle}
407	attrs = h.mustUpdateBucket(b, ua)
408	if !testutil.Equal(attrs.Lifecycle, wantLifecycle) {
409		t.Fatalf("got %v, want %v", attrs.Lifecycle, wantLifecycle)
410	}
411	// Check that StorageClass has "STANDARD" value for unset field by default
412	// before passing new value.
413	wantStorageClass := "STANDARD"
414	if !testutil.Equal(attrs.StorageClass, wantStorageClass) {
415		t.Fatalf("got %v, want %v", attrs.StorageClass, wantStorageClass)
416	}
417	wantStorageClass = "NEARLINE"
418	ua = BucketAttrsToUpdate{StorageClass: wantStorageClass}
419	attrs = h.mustUpdateBucket(b, ua)
420	if !testutil.Equal(attrs.StorageClass, wantStorageClass) {
421		t.Fatalf("got %v, want %v", attrs.StorageClass, wantStorageClass)
422	}
423}
424
425func TestIntegration_BucketPolicyOnly(t *testing.T) {
426	ctx := context.Background()
427	client := testConfig(ctx, t)
428	defer client.Close()
429	h := testHelper{t}
430	bkt := client.Bucket(bucketName)
431
432	// Insert an object with custom ACL.
433	o := bkt.Object("bucketPolicyOnly")
434	defer func() {
435		if err := o.Delete(ctx); err != nil {
436			log.Printf("failed to delete test object: %v", err)
437		}
438	}()
439	wc := o.NewWriter(ctx)
440	wc.ContentType = "text/plain"
441	h.mustWrite(wc, []byte("test"))
442	a := o.ACL()
443	aclEntity := ACLEntity("user-test@example.com")
444	err := a.Set(ctx, aclEntity, RoleReader)
445	if err != nil {
446		t.Fatalf("set ACL failed: %v", err)
447	}
448
449	// Enable BucketPolicyOnly.
450	ua := BucketAttrsToUpdate{BucketPolicyOnly: &BucketPolicyOnly{Enabled: true}}
451	attrs := h.mustUpdateBucket(bkt, ua)
452	if got, want := attrs.BucketPolicyOnly.Enabled, true; got != want {
453		t.Fatalf("got %v, want %v", got, want)
454	}
455	if got := attrs.BucketPolicyOnly.LockedTime; got.IsZero() {
456		t.Fatal("got a zero time value, want a populated value")
457	}
458
459	// Confirm BucketAccessControl returns error.
460	err = retry(ctx, func() error {
461		_, err = bkt.ACL().List(ctx)
462		return nil
463	}, func() error {
464		if err == nil {
465			return fmt.Errorf("ACL.List: expected bucket ACL list to fail")
466		}
467		return nil
468	})
469	if err != nil {
470		t.Fatal(err)
471	}
472
473	// Confirm ObjectAccessControl returns error.
474	err = retry(ctx, func() error {
475		_, err = o.ACL().List(ctx)
476		return nil
477	}, func() error {
478		if err == nil {
479			return fmt.Errorf("ACL.List: expected object ACL list to fail")
480		}
481		return nil
482	})
483	if err != nil {
484		t.Fatal(err)
485	}
486
487	// Disable BucketPolicyOnly.
488	ua = BucketAttrsToUpdate{BucketPolicyOnly: &BucketPolicyOnly{Enabled: false}}
489	attrs = h.mustUpdateBucket(bkt, ua)
490	if got, want := attrs.BucketPolicyOnly.Enabled, false; got != want {
491		t.Fatalf("got %v, want %v", got, want)
492	}
493
494	// Check that the object ACLs are the same.
495	var acls []ACLRule
496	err = retry(ctx, func() error {
497		acls, err = o.ACL().List(ctx)
498		if err != nil {
499			return fmt.Errorf("ACL.List: object ACL list failed: %v", err)
500		}
501		return nil
502	}, func() error {
503		if !containsACL(acls, aclEntity, RoleReader) {
504			return fmt.Errorf("containsACL: expected ACLs %v to include custom ACL entity %v", acls, aclEntity)
505		}
506		return nil
507	})
508	if err != nil {
509		t.Fatal(err)
510	}
511}
512
513func TestIntegration_UniformBucketLevelAccess(t *testing.T) {
514	ctx := context.Background()
515	client := testConfig(ctx, t)
516	defer client.Close()
517	h := testHelper{t}
518	bkt := client.Bucket(uidSpace.New())
519	h.mustCreate(bkt, testutil.ProjID(), nil)
520	defer h.mustDeleteBucket(bkt)
521
522	// Insert an object with custom ACL.
523	o := bkt.Object("uniformBucketLevelAccess")
524	defer func() {
525		if err := o.Delete(ctx); err != nil {
526			log.Printf("failed to delete test object: %v", err)
527		}
528	}()
529	wc := o.NewWriter(ctx)
530	wc.ContentType = "text/plain"
531	h.mustWrite(wc, []byte("test"))
532	a := o.ACL()
533	aclEntity := ACLEntity("user-test@example.com")
534	err := a.Set(ctx, aclEntity, RoleReader)
535	if err != nil {
536		t.Fatalf("set ACL failed: %v", err)
537	}
538
539	// Enable UniformBucketLevelAccess.
540	ua := BucketAttrsToUpdate{UniformBucketLevelAccess: &UniformBucketLevelAccess{Enabled: true}}
541	attrs := h.mustUpdateBucket(bkt, ua)
542	if got, want := attrs.UniformBucketLevelAccess.Enabled, true; got != want {
543		t.Fatalf("got %v, want %v", got, want)
544	}
545	if got := attrs.UniformBucketLevelAccess.LockedTime; got.IsZero() {
546		t.Fatal("got a zero time value, want a populated value")
547	}
548
549	// Confirm BucketAccessControl returns error.
550	err = retry(ctx, func() error {
551		_, err = bkt.ACL().List(ctx)
552		return nil
553	}, func() error {
554		if err == nil {
555			return fmt.Errorf("ACL.List: expected bucket ACL list to fail")
556		}
557		return nil
558	})
559	if err != nil {
560		t.Fatal(err)
561	}
562
563	// Confirm ObjectAccessControl returns error.
564	err = retry(ctx, func() error {
565		_, err = o.ACL().List(ctx)
566		return nil
567	}, func() error {
568		if err == nil {
569			return fmt.Errorf("ACL.List: expected object ACL list to fail")
570		}
571		return nil
572	})
573	if err != nil {
574		t.Fatal(err)
575	}
576
577	// Disable UniformBucketLevelAccess.
578	ua = BucketAttrsToUpdate{UniformBucketLevelAccess: &UniformBucketLevelAccess{Enabled: false}}
579	attrs = h.mustUpdateBucket(bkt, ua)
580	if got, want := attrs.UniformBucketLevelAccess.Enabled, false; got != want {
581		t.Fatalf("got %v, want %v", got, want)
582	}
583
584	// Check that the object ACLs are the same.
585	var acls []ACLRule
586	err = retry(ctx, func() error {
587		acls, err = o.ACL().List(ctx)
588		if err != nil {
589			return fmt.Errorf("ACL.List: object ACL list failed: %v", err)
590		}
591		return nil
592	}, func() error {
593		if !containsACL(acls, aclEntity, RoleReader) {
594			return fmt.Errorf("containsACL: expected ACLs %v to include custom ACL entity %v", acls, aclEntity)
595		}
596		return nil
597	})
598	if err != nil {
599		t.Fatal(err)
600	}
601}
602
603func TestIntegration_PublicAccessPrevention(t *testing.T) {
604	ctx := context.Background()
605	client := testConfig(ctx, t)
606	defer client.Close()
607	h := testHelper{t}
608
609	// Create a bucket with PublicAccessPrevention enforced.
610	bkt := client.Bucket(uidSpace.New())
611	h.mustCreate(bkt, testutil.ProjID(), &BucketAttrs{PublicAccessPrevention: PublicAccessPreventionEnforced})
612	defer h.mustDeleteBucket(bkt)
613
614	// Making bucket public should fail.
615	policy, err := bkt.IAM().V3().Policy(ctx)
616	if err != nil {
617		t.Fatalf("fetching bucket IAM policy: %v", err)
618	}
619	policy.Bindings = append(policy.Bindings, &iampb.Binding{
620		Role:    "roles/storage.objectViewer",
621		Members: []string{iam.AllUsers},
622	})
623	if err := bkt.IAM().V3().SetPolicy(ctx, policy); err == nil {
624		t.Error("SetPolicy: expected adding AllUsers policy to bucket should fail")
625	}
626
627	// Making object public via ACL should fail.
628	o := bkt.Object("publicAccessPrevention")
629	defer func() {
630		if err := o.Delete(ctx); err != nil {
631			log.Printf("failed to delete test object: %v", err)
632		}
633	}()
634	wc := o.NewWriter(ctx)
635	wc.ContentType = "text/plain"
636	h.mustWrite(wc, []byte("test"))
637	a := o.ACL()
638	if err := a.Set(ctx, AllUsers, RoleReader); err == nil {
639		t.Error("ACL.Set: expected adding AllUsers ACL to object should fail")
640	}
641
642	// Update PAP setting to unspecified should work and not affect UBLA setting.
643	attrs, err := bkt.Update(ctx, BucketAttrsToUpdate{PublicAccessPrevention: PublicAccessPreventionUnspecified})
644	if err != nil {
645		t.Fatalf("updating PublicAccessPrevention failed: %v", err)
646	}
647	if attrs.PublicAccessPrevention != PublicAccessPreventionUnspecified {
648		t.Errorf("updating PublicAccessPrevention: got %s, want %s", attrs.PublicAccessPrevention, PublicAccessPreventionUnspecified)
649	}
650	if attrs.UniformBucketLevelAccess.Enabled || attrs.BucketPolicyOnly.Enabled {
651		t.Error("updating PublicAccessPrevention changed UBLA setting")
652	}
653
654	// Now, making object public or making bucket public should succeed. Run with
655	// retry because ACL settings may take time to propagate.
656	if err := retry(ctx,
657		func() error {
658			a = o.ACL()
659			return a.Set(ctx, AllUsers, RoleReader)
660		},
661		nil); err != nil {
662		t.Errorf("ACL.Set: making object public failed: %v", err)
663	}
664	policy, err = bkt.IAM().V3().Policy(ctx)
665	if err != nil {
666		t.Fatalf("fetching bucket IAM policy: %v", err)
667	}
668	policy.Bindings = append(policy.Bindings, &iampb.Binding{
669		Role:    "roles/storage.objectViewer",
670		Members: []string{iam.AllUsers},
671	})
672	if err := bkt.IAM().V3().SetPolicy(ctx, policy); err != nil {
673		t.Errorf("SetPolicy: making bucket public failed: %v", err)
674	}
675
676	// Updating UBLA should not affect PAP setting.
677	attrs, err = bkt.Update(ctx, BucketAttrsToUpdate{UniformBucketLevelAccess: &UniformBucketLevelAccess{Enabled: true}})
678	if err != nil {
679		t.Fatalf("updating UBLA failed: %v", err)
680	}
681	if !attrs.UniformBucketLevelAccess.Enabled {
682		t.Error("updating UBLA: got UBLA not enabled, want enabled")
683	}
684	if attrs.PublicAccessPrevention != PublicAccessPreventionUnspecified {
685		t.Errorf("updating UBLA: got %s, want %s", attrs.PublicAccessPrevention, PublicAccessPreventionUnspecified)
686	}
687}
688
689func TestIntegration_ConditionalDelete(t *testing.T) {
690	ctx := context.Background()
691	client := testConfig(ctx, t)
692	defer client.Close()
693	h := testHelper{t}
694
695	o := client.Bucket(bucketName).Object("conddel")
696
697	wc := o.NewWriter(ctx)
698	wc.ContentType = "text/plain"
699	h.mustWrite(wc, []byte("foo"))
700
701	gen := wc.Attrs().Generation
702	metaGen := wc.Attrs().Metageneration
703
704	if err := o.Generation(gen - 1).Delete(ctx); err == nil {
705		t.Fatalf("Unexpected successful delete with Generation")
706	}
707	if err := o.If(Conditions{MetagenerationMatch: metaGen + 1}).Delete(ctx); err == nil {
708		t.Fatalf("Unexpected successful delete with IfMetaGenerationMatch")
709	}
710	if err := o.If(Conditions{MetagenerationNotMatch: metaGen}).Delete(ctx); err == nil {
711		t.Fatalf("Unexpected successful delete with IfMetaGenerationNotMatch")
712	}
713	if err := o.Generation(gen).Delete(ctx); err != nil {
714		t.Fatalf("final delete failed: %v", err)
715	}
716}
717
718func TestIntegration_ObjectsRangeReader(t *testing.T) {
719	ctx := context.Background()
720	client := testConfig(ctx, t)
721	defer client.Close()
722	bkt := client.Bucket(bucketName)
723
724	objName := uidSpace.New()
725	obj := bkt.Object(objName)
726	contents := []byte("Hello, world this is a range request")
727
728	if err := retry(ctx, func() error {
729		w := obj.NewWriter(ctx)
730		if _, err := w.Write(contents); err != nil {
731			return fmt.Errorf("Failed to write contents: %v", err)
732		}
733		if err := w.Close(); err != nil {
734			return fmt.Errorf("Failed to close writer: %v", err)
735		}
736		return nil
737	}, nil); err != nil {
738		t.Fatal(err)
739	}
740
741	last5s := []struct {
742		name   string
743		start  int64
744		length int64
745	}{
746		{name: "negative offset", start: -5, length: -1},
747		{name: "offset with specified length", start: int64(len(contents)) - 5, length: 5},
748		{name: "offset and read till end", start: int64(len(contents)) - 5, length: -1},
749	}
750
751	for _, last5 := range last5s {
752		t.Run(last5.name, func(t *testing.T) {
753			r, err := obj.NewRangeReader(ctx, last5.start, last5.length)
754			if err != nil {
755				t.Fatalf("Failed to make range read: %v", err)
756			}
757			defer r.Close()
758
759			if got, want := r.Attrs.StartOffset, int64(len(contents))-5; got != want {
760				t.Fatalf("StartOffset mismatch, got %d want %d", got, want)
761			}
762
763			nr, _ := io.Copy(ioutil.Discard, r)
764			if got, want := nr, int64(5); got != want {
765				t.Fatalf("Body length mismatch, got %d want %d", got, want)
766			}
767		})
768	}
769}
770
771func TestIntegration_ObjectReadGRPC(t *testing.T) {
772	ctx := context.Background()
773
774	// Create an HTTP client to upload test data and a gRPC client to test with.
775	hc := testConfig(ctx, t)
776	defer hc.Close()
777	gc := testConfigGRPC(ctx, t)
778	defer gc.Close()
779
780	content := []byte("Hello, world this is a grpc request")
781
782	// Upload test data.
783	name := uidSpace.New()
784	ho := hc.Bucket(grpcBucketName).Object(name)
785	if err := writeObject(ctx, ho, "text/plain", content); err != nil {
786		t.Fatal(err)
787	}
788	defer func() {
789		if err := ho.Delete(ctx); err != nil {
790			log.Printf("failed to delete test object: %v", err)
791		}
792	}()
793
794	obj := gc.Bucket(grpcBucketName).Object(name)
795
796	r, err := obj.NewReader(ctx)
797	if err != nil {
798		t.Fatal(err)
799	}
800	defer r.Close()
801
802	if size := r.Size(); size != int64(len(content)) {
803		t.Errorf("got size = %v, want %v", size, len(content))
804	}
805	if rem := r.Remain(); rem != int64(len(content)) {
806		t.Errorf("got %v bytes remaining, want %v", rem, len(content))
807	}
808
809	b := new(bytes.Buffer)
810	b.Grow(len(content))
811
812	n, err := io.Copy(b, r)
813	if err != nil {
814		t.Fatal(err)
815	}
816	if n == 0 {
817		t.Fatal("Expected to have read more than 0 bytes")
818	}
819
820	got := b.String()
821	want := string(content)
822	if diff := cmp.Diff(got, want); diff != "" {
823		t.Errorf("got(-),want(+):\n%s", diff)
824	}
825
826	if rem := r.Remain(); rem != 0 {
827		t.Errorf("got %v bytes remaining, want 0", rem)
828	}
829}
830
831func TestIntegration_ObjectReadChunksGRPC(t *testing.T) {
832	ctx := context.Background()
833
834	// Create an HTTP client to upload test data and a gRPC client to test with.
835	hc := testConfig(ctx, t)
836	defer hc.Close()
837	gc := testConfigGRPC(ctx, t)
838	defer gc.Close()
839
840	// Use a larger blob to test chunking logic. This is a little over 5MB.
841	content := bytes.Repeat([]byte("a"), 5<<20)
842
843	// Upload test data.
844	name := uidSpace.New()
845	ho := hc.Bucket(grpcBucketName).Object(name)
846	if err := writeObject(ctx, ho, "text/plain", content); err != nil {
847		t.Fatal(err)
848	}
849	defer func() {
850		if err := ho.Delete(ctx); err != nil {
851			log.Printf("failed to delete test object: %v", err)
852		}
853	}()
854
855	obj := gc.Bucket(grpcBucketName).Object(name)
856
857	r, err := obj.NewReader(ctx)
858	if err != nil {
859		t.Fatal(err)
860	}
861	defer r.Close()
862
863	if size := r.Size(); size != int64(len(content)) {
864		t.Errorf("got size = %v, want %v", size, len(content))
865	}
866	if rem := r.Remain(); rem != int64(len(content)) {
867		t.Errorf("got %v bytes remaining, want %v", rem, len(content))
868	}
869
870	bufSize := len(content)
871	buf := make([]byte, bufSize)
872
873	// Read in smaller chunks, offset to provoke reading across a Recv boundary.
874	chunk := 4<<10 + 1234
875	offset := 0
876	for {
877		end := math.Min(float64(offset+chunk), float64(bufSize))
878		n, err := r.Read(buf[offset:int(end)])
879		if err == io.EOF {
880			break
881		}
882		if err != nil {
883			t.Fatal(err)
884		}
885		offset += n
886	}
887
888	if rem := r.Remain(); rem != 0 {
889		t.Errorf("got %v bytes remaining, want 0", rem)
890	}
891
892	// TODO: Verify content with the checksums.
893}
894
895func TestIntegration_ObjectReadRelativeToEndGRPC(t *testing.T) {
896	ctx := context.Background()
897
898	// Create an HTTP client to upload test data and a gRPC client to test with.
899	hc := testConfig(ctx, t)
900	defer hc.Close()
901	gc := testConfigGRPC(ctx, t)
902	defer gc.Close()
903
904	content := []byte("Hello, world this is a grpc request")
905
906	// Upload test data.
907	name := uidSpace.New()
908	ho := hc.Bucket(grpcBucketName).Object(name)
909	if err := writeObject(ctx, ho, "text/plain", content); err != nil {
910		t.Fatal(err)
911	}
912	defer func() {
913		if err := ho.Delete(ctx); err != nil {
914			log.Printf("failed to delete test object: %v", err)
915		}
916	}()
917
918	obj := gc.Bucket(grpcBucketName).Object(name)
919
920	offset := 7
921	// Using a negative offset to start reading relative to the end of the
922	// object, and length to indicate reading to the end.
923	r, err := obj.NewRangeReader(ctx, int64(offset*-1), -1)
924	if err != nil {
925		t.Fatal(err)
926	}
927	defer r.Close()
928
929	if size := r.Size(); size != int64(len(content)) {
930		t.Errorf("got size = %v, want %v", size, len(content))
931	}
932	if rem := r.Remain(); rem != int64(offset) {
933		t.Errorf("got %v bytes remaining, want %v", rem, offset)
934	}
935
936	b := new(bytes.Buffer)
937	b.Grow(offset)
938
939	n, err := io.Copy(b, r)
940	if err != nil {
941		t.Fatal(err)
942	}
943	if n == 0 {
944		t.Fatal("Expected to have read more than 0 bytes")
945	}
946
947	got := b.String()
948	want := string(content[len(content)-offset:])
949	if diff := cmp.Diff(got, want); diff != "" {
950		t.Errorf("got(-),want(+):\n%s", diff)
951	}
952
953	if rem := r.Remain(); rem != 0 {
954		t.Errorf("got %v bytes remaining, want 0", rem)
955	}
956}
957
958func TestIntegration_ObjectReadPartialContentGRPC(t *testing.T) {
959	ctx := context.Background()
960
961	// Create an HTTP client to upload test data and a gRPC client to test with.
962	hc := testConfig(ctx, t)
963	defer hc.Close()
964	gc := testConfigGRPC(ctx, t)
965	defer gc.Close()
966
967	content := []byte("Hello, world this is a grpc request")
968
969	// Upload test data.
970	name := uidSpace.New()
971	ho := hc.Bucket(grpcBucketName).Object(name)
972	if err := writeObject(ctx, ho, "text/plain", content); err != nil {
973		t.Fatal(err)
974	}
975	defer func() {
976		if err := ho.Delete(ctx); err != nil {
977			log.Printf("failed to delete test object: %v", err)
978		}
979	}()
980
981	obj := gc.Bucket(grpcBucketName).Object(name)
982
983	offset := 5
984	length := 5
985	// Using a negative offset to start reading relative to the end of the
986	// object, and length to indicate reading to the end.
987	r, err := obj.NewRangeReader(ctx, int64(offset), int64(length))
988	if err != nil {
989		t.Fatal(err)
990	}
991	defer r.Close()
992
993	b := new(bytes.Buffer)
994	b.Grow(offset)
995
996	n, err := io.Copy(b, r)
997	if err != nil {
998		t.Fatal(err)
999	}
1000	if n == 0 {
1001		t.Fatal("Expected to have read more than 0 bytes")
1002	}
1003
1004	got := b.String()
1005	want := string(content[offset : offset+length])
1006	if diff := cmp.Diff(got, want); diff != "" {
1007		t.Errorf("got(-),want(+):\n%s", diff)
1008	}
1009}
1010
1011func TestIntegration_ConditionalDownloadGRPC(t *testing.T) {
1012	ctx := context.Background()
1013
1014	// Create an HTTP client to upload test data and a gRPC client to test with.
1015	hc := testConfig(ctx, t)
1016	defer hc.Close()
1017	gc := testConfigGRPC(ctx, t)
1018	defer gc.Close()
1019	h := testHelper{t}
1020
1021	o := hc.Bucket(grpcBucketName).Object("condread")
1022	defer o.Delete(ctx)
1023
1024	wc := o.NewWriter(ctx)
1025	wc.ContentType = "text/plain"
1026	h.mustWrite(wc, []byte("foo"))
1027
1028	gen := wc.Attrs().Generation
1029	metaGen := wc.Attrs().Metageneration
1030
1031	obj := gc.Bucket(grpcBucketName).Object(o.ObjectName())
1032
1033	if _, err := obj.Generation(gen + 1).NewReader(ctx); err == nil {
1034		t.Fatalf("Unexpected successful download with nonexistent Generation")
1035	}
1036	if _, err := obj.If(Conditions{MetagenerationMatch: metaGen + 1}).NewReader(ctx); err == nil {
1037		t.Fatalf("Unexpected successful download with failed preconditions IfMetaGenerationMatch")
1038	}
1039	if _, err := obj.If(Conditions{GenerationMatch: gen + 1}).NewReader(ctx); err == nil {
1040		t.Fatalf("Unexpected successful download with failed preconditions IfGenerationMatch")
1041	}
1042	if _, err := obj.If(Conditions{GenerationMatch: gen}).NewReader(ctx); err != nil {
1043		t.Fatalf("Download failed: %v", err)
1044	}
1045}
1046
1047func TestIntegration_ConditionalDownload(t *testing.T) {
1048	ctx := context.Background()
1049	client := testConfig(ctx, t)
1050	defer client.Close()
1051	h := testHelper{t}
1052
1053	o := client.Bucket(bucketName).Object("condread")
1054	defer o.Delete(ctx)
1055
1056	wc := o.NewWriter(ctx)
1057	wc.ContentType = "text/plain"
1058	h.mustWrite(wc, []byte("foo"))
1059
1060	gen := wc.Attrs().Generation
1061	metaGen := wc.Attrs().Metageneration
1062
1063	if _, err := o.Generation(gen + 1).NewReader(ctx); err == nil {
1064		t.Fatalf("Unexpected successful download with nonexistent Generation")
1065	}
1066	if _, err := o.If(Conditions{MetagenerationMatch: metaGen + 1}).NewReader(ctx); err == nil {
1067		t.Fatalf("Unexpected successful download with failed preconditions IfMetaGenerationMatch")
1068	}
1069	if _, err := o.If(Conditions{GenerationMatch: gen + 1}).NewReader(ctx); err == nil {
1070		t.Fatalf("Unexpected successful download with failed preconditions IfGenerationMatch")
1071	}
1072	if _, err := o.If(Conditions{GenerationMatch: gen}).NewReader(ctx); err != nil {
1073		t.Fatalf("Download failed: %v", err)
1074	}
1075}
1076
1077func TestIntegration_Objects(t *testing.T) {
1078	// TODO(jba): Use subtests (Go 1.7).
1079	ctx := context.Background()
1080	client := testConfig(ctx, t)
1081	defer client.Close()
1082	// Reset testTime, 'cause object last modification time should be within 5 min
1083	// from test (test iteration if -count passed) start time.
1084	testTime = time.Now().UTC()
1085	newBucketName := uidSpace.New()
1086	h := testHelper{t}
1087	bkt := client.Bucket(newBucketName)
1088
1089	h.mustCreate(bkt, testutil.ProjID(), nil)
1090	defer func() {
1091		if err := killBucket(ctx, client, newBucketName); err != nil {
1092			log.Printf("deleting %q: %v", newBucketName, err)
1093		}
1094	}()
1095	const defaultType = "text/plain"
1096
1097	// Populate object names and make a map for their contents.
1098	objects := []string{
1099		"obj1",
1100		"obj2",
1101		"obj/with/slashes",
1102	}
1103	contents := make(map[string][]byte)
1104
1105	// Test Writer.
1106	for _, obj := range objects {
1107		c := randomContents()
1108		if err := writeObject(ctx, bkt.Object(obj), defaultType, c); err != nil {
1109			t.Errorf("Write for %v failed with %v", obj, err)
1110		}
1111		contents[obj] = c
1112	}
1113
1114	testObjectIterator(t, bkt, objects)
1115	testObjectsIterateSelectedAttrs(t, bkt, objects)
1116	testObjectsIterateAllSelectedAttrs(t, bkt, objects)
1117	testObjectIteratorWithOffset(t, bkt, objects)
1118	testObjectsIterateWithProjection(t, bkt)
1119	t.Run("testObjectsIterateSelectedAttrsDelimiter", func(t *testing.T) {
1120		query := &Query{Prefix: "", Delimiter: "/"}
1121		if err := query.SetAttrSelection([]string{"Name"}); err != nil {
1122			t.Fatalf("selecting query attrs: %v", err)
1123		}
1124
1125		var gotNames []string
1126		var gotPrefixes []string
1127		it := bkt.Objects(context.Background(), query)
1128		for {
1129			attrs, err := it.Next()
1130			if err == iterator.Done {
1131				break
1132			}
1133			if err != nil {
1134				t.Fatalf("iterator.Next: %v", err)
1135			}
1136			if attrs.Name != "" {
1137				gotNames = append(gotNames, attrs.Name)
1138			} else if attrs.Prefix != "" {
1139				gotPrefixes = append(gotPrefixes, attrs.Prefix)
1140			}
1141
1142			if attrs.Bucket != "" {
1143				t.Errorf("Bucket field not selected, want empty, got = %v", attrs.Bucket)
1144			}
1145		}
1146
1147		sortedNames := []string{"obj1", "obj2"}
1148		if !cmp.Equal(sortedNames, gotNames) {
1149			t.Errorf("names = %v, want %v", gotNames, sortedNames)
1150		}
1151		sortedPrefixes := []string{"obj/"}
1152		if !cmp.Equal(sortedPrefixes, gotPrefixes) {
1153			t.Errorf("prefixes = %v, want %v", gotPrefixes, sortedPrefixes)
1154		}
1155	})
1156
1157	// Test Reader.
1158	for _, obj := range objects {
1159		rc, err := bkt.Object(obj).NewReader(ctx)
1160		if err != nil {
1161			t.Errorf("Can't create a reader for %v, errored with %v", obj, err)
1162			continue
1163		}
1164		if !rc.checkCRC {
1165			t.Errorf("%v: not checking CRC", obj)
1166		}
1167		slurp, err := ioutil.ReadAll(rc)
1168		if err != nil {
1169			t.Errorf("Can't ReadAll object %v, errored with %v", obj, err)
1170		}
1171		if got, want := slurp, contents[obj]; !bytes.Equal(got, want) {
1172			t.Errorf("Contents (%q) = %q; want %q", obj, got, want)
1173		}
1174		if got, want := rc.Size(), len(contents[obj]); got != int64(want) {
1175			t.Errorf("Size (%q) = %d; want %d", obj, got, want)
1176		}
1177		if got, want := rc.ContentType(), "text/plain"; got != want {
1178			t.Errorf("ContentType (%q) = %q; want %q", obj, got, want)
1179		}
1180		if got, want := rc.CacheControl(), "public, max-age=60"; got != want {
1181			t.Errorf("CacheControl (%q) = %q; want %q", obj, got, want)
1182		}
1183		// We just wrote these objects, so they should have a recent last-modified time.
1184		lm, err := rc.LastModified()
1185		// Accept a time within +/- of the test time, to account for natural
1186		// variation and the fact that testTime is set at the start of the test run.
1187		expectedVariance := 5 * time.Minute
1188		if err != nil {
1189			t.Errorf("LastModified (%q): got error %v", obj, err)
1190		} else if lm.Before(testTime.Add(-expectedVariance)) || lm.After(testTime.Add(expectedVariance)) {
1191			t.Errorf("LastModified (%q): got %s, which not the %v from now (%v)", obj, lm, expectedVariance, testTime)
1192		}
1193		rc.Close()
1194
1195		// Check early close.
1196		buf := make([]byte, 1)
1197		rc, err = bkt.Object(obj).NewReader(ctx)
1198		if err != nil {
1199			t.Fatalf("%v: %v", obj, err)
1200		}
1201		_, err = rc.Read(buf)
1202		if err != nil {
1203			t.Fatalf("%v: %v", obj, err)
1204		}
1205		if got, want := buf, contents[obj][:1]; !bytes.Equal(got, want) {
1206			t.Errorf("Contents[0] (%q) = %q; want %q", obj, got, want)
1207		}
1208		if err := rc.Close(); err != nil {
1209			t.Errorf("%v Close: %v", obj, err)
1210		}
1211	}
1212
1213	obj := objects[0]
1214	objlen := int64(len(contents[obj]))
1215	// Test Range Reader.
1216	for i, r := range []struct {
1217		offset, length, want int64
1218	}{
1219		{0, objlen, objlen},
1220		{0, objlen / 2, objlen / 2},
1221		{objlen / 2, objlen, objlen / 2},
1222		{0, 0, 0},
1223		{objlen / 2, 0, 0},
1224		{objlen / 2, -1, objlen / 2},
1225		{0, objlen * 2, objlen},
1226		{-2, -1, 2},
1227		{-objlen, -1, objlen},
1228		{-(objlen / 2), -1, objlen / 2},
1229	} {
1230		rc, err := bkt.Object(obj).NewRangeReader(ctx, r.offset, r.length)
1231		if err != nil {
1232			t.Errorf("%+v: Can't create a range reader for %v, errored with %v", i, obj, err)
1233			continue
1234		}
1235		if rc.Size() != objlen {
1236			t.Errorf("%+v: Reader has a content-size of %d, want %d", i, rc.Size(), objlen)
1237		}
1238		if rc.Remain() != r.want {
1239			t.Errorf("%+v: Reader's available bytes reported as %d, want %d", i, rc.Remain(), r.want)
1240		}
1241		slurp, err := ioutil.ReadAll(rc)
1242		if err != nil {
1243			t.Errorf("%+v: can't ReadAll object %v, errored with %v", r, obj, err)
1244			continue
1245		}
1246		if len(slurp) != int(r.want) {
1247			t.Errorf("%+v: RangeReader (%d, %d): Read %d bytes, wanted %d bytes", i, r.offset, r.length, len(slurp), r.want)
1248			continue
1249		}
1250
1251		switch {
1252		case r.offset < 0: // The case of reading the last N bytes.
1253			start := objlen + r.offset
1254			if got, want := slurp, contents[obj][start:]; !bytes.Equal(got, want) {
1255				t.Errorf("RangeReader (%d, %d) = %q; want %q", r.offset, r.length, got, want)
1256			}
1257
1258		default:
1259			if got, want := slurp, contents[obj][r.offset:r.offset+r.want]; !bytes.Equal(got, want) {
1260				t.Errorf("RangeReader (%d, %d) = %q; want %q", r.offset, r.length, got, want)
1261			}
1262		}
1263		rc.Close()
1264	}
1265
1266	objName := objects[0]
1267
1268	// Test NewReader googleapi.Error.
1269	// Since a 429 or 5xx is hard to cause, we trigger a 416.
1270	realLen := len(contents[objName])
1271	_, err := bkt.Object(objName).NewRangeReader(ctx, int64(realLen*2), 10)
1272	if err, ok := err.(*googleapi.Error); !ok {
1273		t.Error("NewRangeReader did not return a googleapi.Error")
1274	} else {
1275		if err.Code != 416 {
1276			t.Errorf("Code = %d; want %d", err.Code, 416)
1277		}
1278		if len(err.Header) == 0 {
1279			t.Error("Missing googleapi.Error.Header")
1280		}
1281		if len(err.Body) == 0 {
1282			t.Error("Missing googleapi.Error.Body")
1283		}
1284	}
1285
1286	// Test StatObject.
1287	o := h.mustObjectAttrs(bkt.Object(objName))
1288	if got, want := o.Name, objName; got != want {
1289		t.Errorf("Name (%v) = %q; want %q", objName, got, want)
1290	}
1291	if got, want := o.ContentType, defaultType; got != want {
1292		t.Errorf("ContentType (%v) = %q; want %q", objName, got, want)
1293	}
1294	created := o.Created
1295	// Check that the object is newer than its containing bucket.
1296	bAttrs := h.mustBucketAttrs(bkt)
1297	if o.Created.Before(bAttrs.Created) {
1298		t.Errorf("Object %v is older than its containing bucket, %v", o, bAttrs)
1299	}
1300
1301	// Test object copy.
1302	copyName := "copy-" + objName
1303	copyObj, err := bkt.Object(copyName).CopierFrom(bkt.Object(objName)).Run(ctx)
1304	if err != nil {
1305		t.Errorf("Copier.Run failed with %v", err)
1306	} else if !namesEqual(copyObj, newBucketName, copyName) {
1307		t.Errorf("Copy object bucket, name: got %q.%q, want %q.%q",
1308			copyObj.Bucket, copyObj.Name, newBucketName, copyName)
1309	}
1310
1311	// Copying with attributes.
1312	const contentEncoding = "identity"
1313	copier := bkt.Object(copyName).CopierFrom(bkt.Object(objName))
1314	copier.ContentEncoding = contentEncoding
1315	copyObj, err = copier.Run(ctx)
1316	if err != nil {
1317		t.Errorf("Copier.Run failed with %v", err)
1318	} else {
1319		if !namesEqual(copyObj, newBucketName, copyName) {
1320			t.Errorf("Copy object bucket, name: got %q.%q, want %q.%q",
1321				copyObj.Bucket, copyObj.Name, newBucketName, copyName)
1322		}
1323		if copyObj.ContentEncoding != contentEncoding {
1324			t.Errorf("Copy ContentEncoding: got %q, want %q", copyObj.ContentEncoding, contentEncoding)
1325		}
1326	}
1327
1328	// Test UpdateAttrs.
1329	metadata := map[string]string{"key": "value"}
1330	updated := h.mustUpdateObject(bkt.Object(objName), ObjectAttrsToUpdate{
1331		ContentType:     "text/html",
1332		ContentLanguage: "en",
1333		Metadata:        metadata,
1334		ACL:             []ACLRule{{Entity: "domain-google.com", Role: RoleReader}},
1335	})
1336	if got, want := updated.ContentType, "text/html"; got != want {
1337		t.Errorf("updated.ContentType == %q; want %q", got, want)
1338	}
1339	if got, want := updated.ContentLanguage, "en"; got != want {
1340		t.Errorf("updated.ContentLanguage == %q; want %q", updated.ContentLanguage, want)
1341	}
1342	if got, want := updated.Metadata, metadata; !testutil.Equal(got, want) {
1343		t.Errorf("updated.Metadata == %+v; want %+v", updated.Metadata, want)
1344	}
1345	if got, want := updated.Created, created; got != want {
1346		t.Errorf("updated.Created == %q; want %q", got, want)
1347	}
1348	if !updated.Created.Before(updated.Updated) {
1349		t.Errorf("updated.Updated should be newer than update.Created")
1350	}
1351
1352	// Delete ContentType and ContentLanguage.
1353	updated = h.mustUpdateObject(bkt.Object(objName), ObjectAttrsToUpdate{
1354		ContentType:     "",
1355		ContentLanguage: "",
1356		Metadata:        map[string]string{},
1357	})
1358	if got, want := updated.ContentType, ""; got != want {
1359		t.Errorf("updated.ContentType == %q; want %q", got, want)
1360	}
1361	if got, want := updated.ContentLanguage, ""; got != want {
1362		t.Errorf("updated.ContentLanguage == %q; want %q", updated.ContentLanguage, want)
1363	}
1364	if updated.Metadata != nil {
1365		t.Errorf("updated.Metadata == %+v; want nil", updated.Metadata)
1366	}
1367	if got, want := updated.Created, created; got != want {
1368		t.Errorf("updated.Created == %q; want %q", got, want)
1369	}
1370	if !updated.Created.Before(updated.Updated) {
1371		t.Errorf("updated.Updated should be newer than update.Created")
1372	}
1373
1374	// Test checksums.
1375	checksumCases := []struct {
1376		name     string
1377		contents [][]byte
1378		size     int64
1379		md5      string
1380		crc32c   uint32
1381	}{
1382		{
1383			name:     "checksum-object",
1384			contents: [][]byte{[]byte("hello"), []byte("world")},
1385			size:     10,
1386			md5:      "fc5e038d38a57032085441e7fe7010b0",
1387			crc32c:   1456190592,
1388		},
1389		{
1390			name:     "zero-object",
1391			contents: [][]byte{},
1392			size:     0,
1393			md5:      "d41d8cd98f00b204e9800998ecf8427e",
1394			crc32c:   0,
1395		},
1396	}
1397	for _, c := range checksumCases {
1398		wc := bkt.Object(c.name).NewWriter(ctx)
1399		for _, data := range c.contents {
1400			if _, err := wc.Write(data); err != nil {
1401				t.Errorf("Write(%q) failed with %q", data, err)
1402			}
1403		}
1404		if err = wc.Close(); err != nil {
1405			t.Errorf("%q: close failed with %q", c.name, err)
1406		}
1407		obj := wc.Attrs()
1408		if got, want := obj.Size, c.size; got != want {
1409			t.Errorf("Object (%q) Size = %v; want %v", c.name, got, want)
1410		}
1411		if got, want := fmt.Sprintf("%x", obj.MD5), c.md5; got != want {
1412			t.Errorf("Object (%q) MD5 = %q; want %q", c.name, got, want)
1413		}
1414		if got, want := obj.CRC32C, c.crc32c; got != want {
1415			t.Errorf("Object (%q) CRC32C = %v; want %v", c.name, got, want)
1416		}
1417	}
1418
1419	// Test public ACL.
1420	publicObj := objects[0]
1421	if err = bkt.Object(publicObj).ACL().Set(ctx, AllUsers, RoleReader); err != nil {
1422		t.Errorf("PutACLEntry failed with %v", err)
1423	}
1424	publicClient, err := newTestClient(ctx, option.WithoutAuthentication())
1425	if err != nil {
1426		t.Fatal(err)
1427	}
1428
1429	slurp := h.mustRead(publicClient.Bucket(newBucketName).Object(publicObj))
1430	if !bytes.Equal(slurp, contents[publicObj]) {
1431		t.Errorf("Public object's content: got %q, want %q", slurp, contents[publicObj])
1432	}
1433
1434	// Test writer error handling.
1435	wc := publicClient.Bucket(newBucketName).Object(publicObj).NewWriter(ctx)
1436	if _, err := wc.Write([]byte("hello")); err != nil {
1437		t.Errorf("Write unexpectedly failed with %v", err)
1438	}
1439	if err = wc.Close(); err == nil {
1440		t.Error("Close expected an error, found none")
1441	}
1442
1443	// Test deleting the copy object.
1444	h.mustDeleteObject(bkt.Object(copyName))
1445	// Deleting it a second time should return ErrObjectNotExist.
1446	if err := bkt.Object(copyName).Delete(ctx); err != ErrObjectNotExist {
1447		t.Errorf("second deletion of %v = %v; want ErrObjectNotExist", copyName, err)
1448	}
1449	_, err = bkt.Object(copyName).Attrs(ctx)
1450	if err != ErrObjectNotExist {
1451		t.Errorf("Copy is expected to be deleted, stat errored with %v", err)
1452	}
1453
1454	// Test object composition.
1455	var compSrcs []*ObjectHandle
1456	var wantContents []byte
1457	for _, obj := range objects {
1458		compSrcs = append(compSrcs, bkt.Object(obj))
1459		wantContents = append(wantContents, contents[obj]...)
1460	}
1461	checkCompose := func(obj *ObjectHandle, wantContentType string) {
1462		rc := h.mustNewReader(obj)
1463		slurp, err = ioutil.ReadAll(rc)
1464		if err != nil {
1465			t.Fatalf("ioutil.ReadAll: %v", err)
1466		}
1467		defer rc.Close()
1468		if !bytes.Equal(slurp, wantContents) {
1469			t.Errorf("Composed object contents\ngot:  %q\nwant: %q", slurp, wantContents)
1470		}
1471		if got := rc.ContentType(); got != wantContentType {
1472			t.Errorf("Composed object content-type = %q, want %q", got, wantContentType)
1473		}
1474	}
1475
1476	// Compose should work even if the user sets no destination attributes.
1477	compDst := bkt.Object("composed1")
1478	c := compDst.ComposerFrom(compSrcs...)
1479	if _, err := c.Run(ctx); err != nil {
1480		t.Fatalf("ComposeFrom error: %v", err)
1481	}
1482	checkCompose(compDst, "application/octet-stream")
1483
1484	// It should also work if we do.
1485	compDst = bkt.Object("composed2")
1486	c = compDst.ComposerFrom(compSrcs...)
1487	c.ContentType = "text/json"
1488	if _, err := c.Run(ctx); err != nil {
1489		t.Fatalf("ComposeFrom error: %v", err)
1490	}
1491	checkCompose(compDst, "text/json")
1492}
1493
1494func TestIntegration_Encoding(t *testing.T) {
1495	ctx := context.Background()
1496	client := testConfig(ctx, t)
1497	defer client.Close()
1498	bkt := client.Bucket(bucketName)
1499
1500	// Test content encoding
1501	const zeroCount = 20 << 1 // TODO: should be 20 << 20
1502	obj := bkt.Object("gzip-test")
1503	w := obj.NewWriter(ctx)
1504	w.ContentEncoding = "gzip"
1505	gw := gzip.NewWriter(w)
1506	if _, err := io.Copy(gw, io.LimitReader(zeros{}, zeroCount)); err != nil {
1507		t.Fatalf("io.Copy, upload: %v", err)
1508	}
1509	if err := gw.Close(); err != nil {
1510		t.Errorf("gzip.Close(): %v", err)
1511	}
1512	if err := w.Close(); err != nil {
1513		t.Errorf("w.Close(): %v", err)
1514	}
1515	r, err := obj.NewReader(ctx)
1516	if err != nil {
1517		t.Fatalf("NewReader(gzip-test): %v", err)
1518	}
1519	n, err := io.Copy(ioutil.Discard, r)
1520	if err != nil {
1521		t.Errorf("io.Copy, download: %v", err)
1522	}
1523	if n != zeroCount {
1524		t.Errorf("downloaded bad data: got %d bytes, want %d", n, zeroCount)
1525	}
1526
1527	// Test NotFound.
1528	_, err = bkt.Object("obj-not-exists").NewReader(ctx)
1529	if err != ErrObjectNotExist {
1530		t.Errorf("Object should not exist, err found to be %v", err)
1531	}
1532}
1533
1534func testObjectIterator(t *testing.T, bkt *BucketHandle, objects []string) {
1535	ctx := context.Background()
1536	h := testHelper{t}
1537	// Collect the list of items we expect: ObjectAttrs in lexical order by name.
1538	names := make([]string, len(objects))
1539	copy(names, objects)
1540	sort.Strings(names)
1541	var attrs []*ObjectAttrs
1542	for _, name := range names {
1543		attrs = append(attrs, h.mustObjectAttrs(bkt.Object(name)))
1544	}
1545	msg, ok := itesting.TestIterator(attrs,
1546		func() interface{} { return bkt.Objects(ctx, &Query{Prefix: "obj"}) },
1547		func(it interface{}) (interface{}, error) { return it.(*ObjectIterator).Next() })
1548	if !ok {
1549		t.Errorf("ObjectIterator.Next: %s", msg)
1550	}
1551	// TODO(jba): test query.Delimiter != ""
1552}
1553
1554func testObjectIteratorWithOffset(t *testing.T, bkt *BucketHandle, objects []string) {
1555	ctx := context.Background()
1556	h := testHelper{t}
1557	// Collect the list of items we expect: ObjectAttrs in lexical order by name.
1558	names := make([]string, len(objects))
1559	copy(names, objects)
1560	sort.Strings(names)
1561	var attrs []*ObjectAttrs
1562	for _, name := range names {
1563		attrs = append(attrs, h.mustObjectAttrs(bkt.Object(name)))
1564	}
1565	m := make(map[string][]*ObjectAttrs)
1566	for i, name := range names {
1567		// StartOffset takes the value of object names, the result must be for:
1568		// ― obj/with/slashes: obj/with/slashes, obj1, obj2
1569		// ― obj1: obj1, obj2
1570		// ― obj2: obj2.
1571		m[name] = attrs[i:]
1572		msg, ok := itesting.TestIterator(m[name],
1573			func() interface{} { return bkt.Objects(ctx, &Query{StartOffset: name}) },
1574			func(it interface{}) (interface{}, error) { return it.(*ObjectIterator).Next() })
1575		if !ok {
1576			t.Errorf("ObjectIterator.Next: %s", msg)
1577		}
1578		// EndOffset takes the value of object names, the result must be for:
1579		// ― obj/with/slashes: ""
1580		// ― obj1: obj/with/slashes
1581		// ― obj2: obj/with/slashes, obj1.
1582		m[name] = attrs[:i]
1583		msg, ok = itesting.TestIterator(m[name],
1584			func() interface{} { return bkt.Objects(ctx, &Query{EndOffset: name}) },
1585			func(it interface{}) (interface{}, error) { return it.(*ObjectIterator).Next() })
1586		if !ok {
1587			t.Errorf("ObjectIterator.Next: %s", msg)
1588		}
1589	}
1590}
1591
1592func testObjectsIterateSelectedAttrs(t *testing.T, bkt *BucketHandle, objects []string) {
1593	// Create a query that will only select the "Name" attr of objects, and
1594	// invoke object listing.
1595	query := &Query{Prefix: ""}
1596	query.SetAttrSelection([]string{"Name"})
1597
1598	var gotNames []string
1599	it := bkt.Objects(context.Background(), query)
1600	for {
1601		attrs, err := it.Next()
1602		if err == iterator.Done {
1603			break
1604		}
1605		if err != nil {
1606			t.Fatalf("iterator.Next: %v", err)
1607		}
1608		gotNames = append(gotNames, attrs.Name)
1609
1610		if len(attrs.Bucket) > 0 {
1611			t.Errorf("Bucket field not selected, want empty, got = %v", attrs.Bucket)
1612		}
1613	}
1614
1615	sortedNames := make([]string, len(objects))
1616	copy(sortedNames, objects)
1617	sort.Strings(sortedNames)
1618	sort.Strings(gotNames)
1619
1620	if !cmp.Equal(sortedNames, gotNames) {
1621		t.Errorf("names = %v, want %v", gotNames, sortedNames)
1622	}
1623}
1624
1625func testObjectsIterateAllSelectedAttrs(t *testing.T, bkt *BucketHandle, objects []string) {
1626	// Tests that all selected attributes work - query succeeds (without actually
1627	// verifying the returned results).
1628	query := &Query{
1629		Prefix:      "",
1630		StartOffset: "obj/with/slashes",
1631		EndOffset:   "obj2",
1632	}
1633	var selectedAttrs []string
1634	for k := range attrToFieldMap {
1635		selectedAttrs = append(selectedAttrs, k)
1636	}
1637	query.SetAttrSelection(selectedAttrs)
1638
1639	count := 0
1640	it := bkt.Objects(context.Background(), query)
1641	for {
1642		_, err := it.Next()
1643		if err == iterator.Done {
1644			break
1645		}
1646		if err != nil {
1647			t.Fatalf("iterator.Next: %v", err)
1648		}
1649		count++
1650	}
1651
1652	if count != len(objects)-1 {
1653		t.Errorf("count = %v, want %v", count, len(objects)-1)
1654	}
1655}
1656
1657func testObjectsIterateWithProjection(t *testing.T, bkt *BucketHandle) {
1658	projections := map[Projection]bool{
1659		ProjectionDefault: true,
1660		ProjectionFull:    true,
1661		ProjectionNoACL:   false,
1662	}
1663
1664	for projection, expectACL := range projections {
1665		query := &Query{Projection: projection}
1666		it := bkt.Objects(context.Background(), query)
1667		attrs, err := it.Next()
1668		if err == iterator.Done {
1669			t.Fatalf("iterator: no objects")
1670		}
1671		if err != nil {
1672			t.Fatalf("iterator.Next: %v", err)
1673		}
1674
1675		if expectACL {
1676			if attrs.Owner == "" {
1677				t.Errorf("projection %q: Owner is empty, want nonempty Owner", projection)
1678			}
1679			if len(attrs.ACL) == 0 {
1680				t.Errorf("projection %q: ACL is empty, want at least one ACL rule", projection)
1681			}
1682		} else {
1683			if attrs.Owner != "" {
1684				t.Errorf("projection %q: got Owner = %q, want empty Owner", projection, attrs.Owner)
1685			}
1686			if len(attrs.ACL) != 0 {
1687				t.Errorf("projection %q: got %d ACL rules, want empty ACL", projection, len(attrs.ACL))
1688			}
1689		}
1690	}
1691}
1692
1693func TestIntegration_SignedURL(t *testing.T) {
1694	if testing.Short() { // do not test during replay
1695		t.Skip("Integration tests skipped in short mode")
1696	}
1697	// To test SignedURL, we need a real user email and private key. Extract them
1698	// from the JSON key file.
1699	jwtConf, err := testutil.JWTConfig()
1700	if err != nil {
1701		t.Fatal(err)
1702	}
1703	if jwtConf == nil {
1704		t.Skip("JSON key file is not present")
1705	}
1706
1707	ctx := context.Background()
1708	client := testConfig(ctx, t)
1709	defer client.Close()
1710
1711	bkt := client.Bucket(bucketName)
1712	obj := "signedURL"
1713	contents := []byte("This is a test of SignedURL.\n")
1714	md5 := "Jyxvgwm9n2MsrGTMPbMeYA==" // base64-encoded MD5 of contents
1715	if err := writeObject(ctx, bkt.Object(obj), "text/plain", contents); err != nil {
1716		t.Fatalf("writing: %v", err)
1717	}
1718	for _, test := range []struct {
1719		desc    string
1720		opts    SignedURLOptions
1721		headers map[string][]string
1722		fail    bool
1723	}{
1724		{
1725			desc: "basic v2",
1726		},
1727		{
1728			desc: "basic v4",
1729			opts: SignedURLOptions{Scheme: SigningSchemeV4},
1730		},
1731		{
1732			desc:    "MD5 sent and matches",
1733			opts:    SignedURLOptions{MD5: md5},
1734			headers: map[string][]string{"Content-MD5": {md5}},
1735		},
1736		{
1737			desc: "MD5 not sent",
1738			opts: SignedURLOptions{MD5: md5},
1739			fail: true,
1740		},
1741		{
1742			desc:    "Content-Type sent and matches",
1743			opts:    SignedURLOptions{ContentType: "text/plain"},
1744			headers: map[string][]string{"Content-Type": {"text/plain"}},
1745		},
1746		{
1747			desc:    "Content-Type sent but does not match",
1748			opts:    SignedURLOptions{ContentType: "text/plain"},
1749			headers: map[string][]string{"Content-Type": {"application/json"}},
1750			fail:    true,
1751		},
1752		{
1753			desc: "Canonical headers sent and match",
1754			opts: SignedURLOptions{Headers: []string{
1755				" X-Goog-Foo: Bar baz ",
1756				"X-Goog-Novalue", // ignored: no value
1757				"X-Google-Foo",   // ignored: wrong prefix
1758			}},
1759			headers: map[string][]string{"X-Goog-foo": {"Bar baz  "}},
1760		},
1761		{
1762			desc:    "Canonical headers sent but don't match",
1763			opts:    SignedURLOptions{Headers: []string{" X-Goog-Foo: Bar baz"}},
1764			headers: map[string][]string{"X-Goog-Foo": {"bar baz"}},
1765			fail:    true,
1766		},
1767	} {
1768		opts := test.opts
1769		opts.GoogleAccessID = jwtConf.Email
1770		opts.PrivateKey = jwtConf.PrivateKey
1771		opts.Method = "GET"
1772		opts.Expires = time.Now().Add(time.Hour)
1773		u, err := SignedURL(bucketName, obj, &opts)
1774		if err != nil {
1775			t.Errorf("%s: SignedURL: %v", test.desc, err)
1776			continue
1777		}
1778		got, err := getURL(u, test.headers)
1779		if err != nil && !test.fail {
1780			t.Errorf("%s: getURL %q: %v", test.desc, u, err)
1781		} else if err == nil && !bytes.Equal(got, contents) {
1782			t.Errorf("%s: got %q, want %q", test.desc, got, contents)
1783		}
1784	}
1785}
1786
1787func TestIntegration_SignedURL_WithEncryptionKeys(t *testing.T) {
1788	if testing.Short() { // do not test during replay
1789		t.Skip("Integration tests skipped in short mode")
1790	}
1791	// To test SignedURL, we need a real user email and private key. Extract
1792	// them from the JSON key file.
1793	jwtConf, err := testutil.JWTConfig()
1794	if err != nil {
1795		t.Fatal(err)
1796	}
1797	if jwtConf == nil {
1798		t.Skip("JSON key file is not present")
1799	}
1800
1801	ctx := context.Background()
1802	client := testConfig(ctx, t)
1803	defer client.Close()
1804
1805	// TODO(deklerk): document how these were generated and their significance
1806	encryptionKey := "AAryxNglNkXQY0Wa+h9+7BLSFMhCzPo22MtXUWjOBbI="
1807	encryptionKeySha256 := "QlCdVONb17U1aCTAjrFvMbnxW/Oul8VAvnG1875WJ3k="
1808	headers := map[string][]string{
1809		"x-goog-encryption-algorithm":  {"AES256"},
1810		"x-goog-encryption-key":        {encryptionKey},
1811		"x-goog-encryption-key-sha256": {encryptionKeySha256},
1812	}
1813	contents := []byte(`{"message":"encryption with csek works"}`)
1814	tests := []struct {
1815		desc string
1816		opts *SignedURLOptions
1817	}{
1818		{
1819			desc: "v4 URL with customer supplied encryption keys for PUT",
1820			opts: &SignedURLOptions{
1821				Method: "PUT",
1822				Headers: []string{
1823					"x-goog-encryption-algorithm:AES256",
1824					"x-goog-encryption-key:AAryxNglNkXQY0Wa+h9+7BLSFMhCzPo22MtXUWjOBbI=",
1825					"x-goog-encryption-key-sha256:QlCdVONb17U1aCTAjrFvMbnxW/Oul8VAvnG1875WJ3k=",
1826				},
1827				Scheme: SigningSchemeV4,
1828			},
1829		},
1830		{
1831			desc: "v4 URL with customer supplied encryption keys for GET",
1832			opts: &SignedURLOptions{
1833				Method: "GET",
1834				Headers: []string{
1835					"x-goog-encryption-algorithm:AES256",
1836					fmt.Sprintf("x-goog-encryption-key:%s", encryptionKey),
1837					fmt.Sprintf("x-goog-encryption-key-sha256:%s", encryptionKeySha256),
1838				},
1839				Scheme: SigningSchemeV4,
1840			},
1841		},
1842	}
1843	defer func() {
1844		// Delete encrypted object.
1845		bkt := client.Bucket(bucketName)
1846		err := bkt.Object("csek.json").Delete(ctx)
1847		if err != nil {
1848			log.Printf("failed to deleted encrypted file: %v", err)
1849		}
1850	}()
1851
1852	for _, test := range tests {
1853		opts := test.opts
1854		opts.GoogleAccessID = jwtConf.Email
1855		opts.PrivateKey = jwtConf.PrivateKey
1856		opts.Expires = time.Now().Add(time.Hour)
1857
1858		u, err := SignedURL(bucketName, "csek.json", test.opts)
1859		if err != nil {
1860			t.Fatalf("%s: %v", test.desc, err)
1861		}
1862
1863		if test.opts.Method == "PUT" {
1864			if _, err := putURL(u, headers, bytes.NewReader(contents)); err != nil {
1865				t.Fatalf("%s: %v", test.desc, err)
1866			}
1867		}
1868
1869		if test.opts.Method == "GET" {
1870			got, err := getURL(u, headers)
1871			if err != nil {
1872				t.Fatalf("%s: %v", test.desc, err)
1873			}
1874			if !bytes.Equal(got, contents) {
1875				t.Fatalf("%s: got %q, want %q", test.desc, got, contents)
1876			}
1877		}
1878	}
1879}
1880
1881func TestIntegration_SignedURL_EmptyStringObjectName(t *testing.T) {
1882	if testing.Short() { // do not test during replay
1883		t.Skip("Integration tests skipped in short mode")
1884	}
1885
1886	// To test SignedURL, we need a real user email and private key. Extract them
1887	// from the JSON key file.
1888	jwtConf, err := testutil.JWTConfig()
1889	if err != nil {
1890		t.Fatal(err)
1891	}
1892	if jwtConf == nil {
1893		t.Skip("JSON key file is not present")
1894	}
1895
1896	ctx := context.Background()
1897	client := testConfig(ctx, t)
1898	defer client.Close()
1899
1900	opts := &SignedURLOptions{
1901		Scheme:         SigningSchemeV4,
1902		Method:         "GET",
1903		GoogleAccessID: jwtConf.Email,
1904		PrivateKey:     jwtConf.PrivateKey,
1905		Expires:        time.Now().Add(time.Hour),
1906	}
1907
1908	u, err := SignedURL(bucketName, "", opts)
1909	if err != nil {
1910		t.Fatal(err)
1911	}
1912
1913	// Should be some ListBucketResult response.
1914	_, err = getURL(u, nil)
1915	if err != nil {
1916		t.Fatal(err)
1917	}
1918}
1919
1920func TestIntegration_ACL(t *testing.T) {
1921	ctx := context.Background()
1922	client := testConfig(ctx, t)
1923	defer client.Close()
1924
1925	bkt := client.Bucket(bucketName)
1926
1927	entity := ACLEntity("domain-google.com")
1928	rule := ACLRule{Entity: entity, Role: RoleReader, Domain: "google.com"}
1929	if err := bkt.DefaultObjectACL().Set(ctx, entity, RoleReader); err != nil {
1930		t.Errorf("Can't put default ACL rule for the bucket, errored with %v", err)
1931	}
1932
1933	acl, err := bkt.DefaultObjectACL().List(ctx)
1934	if err != nil {
1935		t.Errorf("DefaultObjectACL.List for bucket %q: %v", bucketName, err)
1936	} else if !hasRule(acl, rule) {
1937		t.Errorf("default ACL missing %#v", rule)
1938	}
1939	aclObjects := []string{"acl1", "acl2"}
1940	name := aclObjects[0]
1941	o := bkt.Object(name)
1942	err = retry(ctx, func() error {
1943		for _, obj := range aclObjects {
1944			c := randomContents()
1945			if err := writeObject(ctx, bkt.Object(obj), "", c); err != nil {
1946				t.Errorf("Write for %v failed with %v", obj, err)
1947			}
1948		}
1949		acl, err = o.ACL().List(ctx)
1950		if err != nil {
1951			return fmt.Errorf("ACL.List: can't retrieve ACL of %v", name)
1952		}
1953		return nil
1954	}, func() error {
1955		if !hasRule(acl, rule) {
1956			return fmt.Errorf("hasRule: object ACL missing %+v", rule)
1957		}
1958		return nil
1959	})
1960	if err != nil {
1961		t.Error(err)
1962	}
1963	if err := o.ACL().Delete(ctx, entity); err != nil {
1964		t.Errorf("object ACL: could not delete entity %s", entity)
1965	}
1966	// Delete the default ACL rule. We can't move this code earlier in the
1967	// test, because the test depends on the fact that the object ACL inherits
1968	// it.
1969	if err := bkt.DefaultObjectACL().Delete(ctx, entity); err != nil {
1970		t.Errorf("default ACL: could not delete entity %s", entity)
1971	}
1972
1973	entity2 := ACLEntity("user-jbd@google.com")
1974	rule2 := ACLRule{Entity: entity2, Role: RoleReader, Email: "jbd@google.com"}
1975	if err := bkt.ACL().Set(ctx, entity2, RoleReader); err != nil {
1976		t.Errorf("Error while putting bucket ACL rule: %v", err)
1977	}
1978	var bACL []ACLRule
1979	err = retry(ctx, func() error {
1980		bACL, err = bkt.ACL().List(ctx)
1981		if err != nil {
1982			return fmt.Errorf("ACL.List: error while getting the ACL of the bucket: %v", err)
1983		}
1984		return nil
1985	}, func() error {
1986		if !hasRule(bACL, rule2) {
1987			return fmt.Errorf("hasRule: bucket ACL missing %+v", rule2)
1988		}
1989		return nil
1990	})
1991	if err != nil {
1992		t.Error(err)
1993	}
1994	if err := bkt.ACL().Delete(ctx, entity2); err != nil {
1995		t.Errorf("Error while deleting bucket ACL rule: %v", err)
1996	}
1997
1998}
1999
2000func TestIntegration_ValidObjectNames(t *testing.T) {
2001	ctx := context.Background()
2002	client := testConfig(ctx, t)
2003	defer client.Close()
2004
2005	bkt := client.Bucket(bucketName)
2006
2007	validNames := []string{
2008		"gopher",
2009		"Гоферови",
2010		"a",
2011		strings.Repeat("a", 1024),
2012	}
2013	for _, name := range validNames {
2014		if err := writeObject(ctx, bkt.Object(name), "", []byte("data")); err != nil {
2015			t.Errorf("Object %q write failed: %v. Want success", name, err)
2016			continue
2017		}
2018		defer bkt.Object(name).Delete(ctx)
2019	}
2020
2021	invalidNames := []string{
2022		"",                        // Too short.
2023		strings.Repeat("a", 1025), // Too long.
2024		"new\nlines",
2025		"bad\xffunicode",
2026	}
2027	for _, name := range invalidNames {
2028		// Invalid object names will either cause failure during Write or Close.
2029		if err := writeObject(ctx, bkt.Object(name), "", []byte("data")); err != nil {
2030			continue
2031		}
2032		defer bkt.Object(name).Delete(ctx)
2033		t.Errorf("%q should have failed. Didn't", name)
2034	}
2035}
2036
2037func TestIntegration_WriterContentType(t *testing.T) {
2038	ctx := context.Background()
2039	client := testConfig(ctx, t)
2040	defer client.Close()
2041
2042	obj := client.Bucket(bucketName).Object("content")
2043	testCases := []struct {
2044		content           string
2045		setType, wantType string
2046	}{
2047		{
2048			content:  "It was the best of times, it was the worst of times.",
2049			wantType: "text/plain; charset=utf-8",
2050		},
2051		{
2052			content:  "<html><head><title>My first page</title></head></html>",
2053			wantType: "text/html; charset=utf-8",
2054		},
2055		{
2056			content:  "<html><head><title>My first page</title></head></html>",
2057			setType:  "text/html",
2058			wantType: "text/html",
2059		},
2060		{
2061			content:  "<html><head><title>My first page</title></head></html>",
2062			setType:  "image/jpeg",
2063			wantType: "image/jpeg",
2064		},
2065	}
2066	for i, tt := range testCases {
2067		if err := writeObject(ctx, obj, tt.setType, []byte(tt.content)); err != nil {
2068			t.Errorf("writing #%d: %v", i, err)
2069		}
2070		attrs, err := obj.Attrs(ctx)
2071		if err != nil {
2072			t.Errorf("obj.Attrs: %v", err)
2073			continue
2074		}
2075		if got := attrs.ContentType; got != tt.wantType {
2076			t.Errorf("Content-Type = %q; want %q\nContent: %q\nSet Content-Type: %q", got, tt.wantType, tt.content, tt.setType)
2077		}
2078	}
2079}
2080
2081func TestIntegration_ZeroSizedObject(t *testing.T) {
2082	t.Parallel()
2083	ctx := context.Background()
2084	client := testConfig(ctx, t)
2085	defer client.Close()
2086	h := testHelper{t}
2087
2088	obj := client.Bucket(bucketName).Object("zero")
2089
2090	// Check writing it works as expected.
2091	w := obj.NewWriter(ctx)
2092	if err := w.Close(); err != nil {
2093		t.Fatalf("Writer.Close: %v", err)
2094	}
2095	defer obj.Delete(ctx)
2096
2097	// Check we can read it too.
2098	body := h.mustRead(obj)
2099	if len(body) != 0 {
2100		t.Errorf("Body is %v, want empty []byte{}", body)
2101	}
2102}
2103
2104func TestIntegration_Encryption(t *testing.T) {
2105	// This function tests customer-supplied encryption keys for all operations
2106	// involving objects. Bucket and ACL operations aren't tested because they
2107	// aren't affected by customer encryption. Neither is deletion.
2108	ctx := context.Background()
2109	client := testConfig(ctx, t)
2110	defer client.Close()
2111	h := testHelper{t}
2112
2113	obj := client.Bucket(bucketName).Object("customer-encryption")
2114	key := []byte("my-secret-AES-256-encryption-key")
2115	keyHash := sha256.Sum256(key)
2116	keyHashB64 := base64.StdEncoding.EncodeToString(keyHash[:])
2117	key2 := []byte("My-Secret-AES-256-Encryption-Key")
2118	contents := "top secret."
2119
2120	checkMetadataCall := func(msg string, f func(o *ObjectHandle) (*ObjectAttrs, error)) {
2121		// Performing a metadata operation without the key should succeed.
2122		attrs, err := f(obj)
2123		if err != nil {
2124			t.Fatalf("%s: %v", msg, err)
2125		}
2126		// The key hash should match...
2127		if got, want := attrs.CustomerKeySHA256, keyHashB64; got != want {
2128			t.Errorf("%s: key hash: got %q, want %q", msg, got, want)
2129		}
2130		// ...but CRC and MD5 should not be present.
2131		if attrs.CRC32C != 0 {
2132			t.Errorf("%s: CRC: got %v, want 0", msg, attrs.CRC32C)
2133		}
2134		if len(attrs.MD5) > 0 {
2135			t.Errorf("%s: MD5: got %v, want len == 0", msg, attrs.MD5)
2136		}
2137
2138		// Performing a metadata operation with the key should succeed.
2139		attrs, err = f(obj.Key(key))
2140		if err != nil {
2141			t.Fatalf("%s: %v", msg, err)
2142		}
2143		// Check the key and content hashes.
2144		if got, want := attrs.CustomerKeySHA256, keyHashB64; got != want {
2145			t.Errorf("%s: key hash: got %q, want %q", msg, got, want)
2146		}
2147		if attrs.CRC32C == 0 {
2148			t.Errorf("%s: CRC: got 0, want non-zero", msg)
2149		}
2150		if len(attrs.MD5) == 0 {
2151			t.Errorf("%s: MD5: got len == 0, want len > 0", msg)
2152		}
2153	}
2154
2155	checkRead := func(msg string, o *ObjectHandle, k []byte, wantContents string) {
2156		// Reading the object without the key should fail.
2157		if _, err := readObject(ctx, o); err == nil {
2158			t.Errorf("%s: reading without key: want error, got nil", msg)
2159		}
2160		// Reading the object with the key should succeed.
2161		got := h.mustRead(o.Key(k))
2162		gotContents := string(got)
2163		// And the contents should match what we wrote.
2164		if gotContents != wantContents {
2165			t.Errorf("%s: contents: got %q, want %q", msg, gotContents, wantContents)
2166		}
2167	}
2168
2169	checkReadUnencrypted := func(msg string, obj *ObjectHandle, wantContents string) {
2170		got := h.mustRead(obj)
2171		gotContents := string(got)
2172		if gotContents != wantContents {
2173			t.Errorf("%s: got %q, want %q", msg, gotContents, wantContents)
2174		}
2175	}
2176
2177	// Write to obj using our own encryption key, which is a valid 32-byte
2178	// AES-256 key.
2179	h.mustWrite(obj.Key(key).NewWriter(ctx), []byte(contents))
2180
2181	checkMetadataCall("Attrs", func(o *ObjectHandle) (*ObjectAttrs, error) {
2182		return o.Attrs(ctx)
2183	})
2184
2185	checkMetadataCall("Update", func(o *ObjectHandle) (*ObjectAttrs, error) {
2186		return o.Update(ctx, ObjectAttrsToUpdate{ContentLanguage: "en"})
2187	})
2188
2189	checkRead("first object", obj, key, contents)
2190
2191	obj2 := client.Bucket(bucketName).Object("customer-encryption-2")
2192	// Copying an object without the key should fail.
2193	if _, err := obj2.CopierFrom(obj).Run(ctx); err == nil {
2194		t.Fatal("want error, got nil")
2195	}
2196	// Copying an object with the key should succeed.
2197	if _, err := obj2.CopierFrom(obj.Key(key)).Run(ctx); err != nil {
2198		t.Fatal(err)
2199	}
2200	// The destination object is not encrypted; we can read it without a key.
2201	checkReadUnencrypted("copy dest", obj2, contents)
2202
2203	// Providing a key on the destination but not the source should fail,
2204	// since the source is encrypted.
2205	if _, err := obj2.Key(key2).CopierFrom(obj).Run(ctx); err == nil {
2206		t.Fatal("want error, got nil")
2207	}
2208
2209	// But copying with keys for both source and destination should succeed.
2210	if _, err := obj2.Key(key2).CopierFrom(obj.Key(key)).Run(ctx); err != nil {
2211		t.Fatal(err)
2212	}
2213	// And the destination should be encrypted, meaning we can only read it
2214	// with a key.
2215	checkRead("copy destination", obj2, key2, contents)
2216
2217	// Change obj2's key to prepare for compose, where all objects must have
2218	// the same key. Also illustrates key rotation: copy an object to itself
2219	// with a different key.
2220	if _, err := obj2.Key(key).CopierFrom(obj2.Key(key2)).Run(ctx); err != nil {
2221		t.Fatal(err)
2222	}
2223	obj3 := client.Bucket(bucketName).Object("customer-encryption-3")
2224	// Composing without keys should fail.
2225	if _, err := obj3.ComposerFrom(obj, obj2).Run(ctx); err == nil {
2226		t.Fatal("want error, got nil")
2227	}
2228	// Keys on the source objects result in an error.
2229	if _, err := obj3.ComposerFrom(obj.Key(key), obj2).Run(ctx); err == nil {
2230		t.Fatal("want error, got nil")
2231	}
2232	// A key on the destination object both decrypts the source objects
2233	// and encrypts the destination.
2234	if _, err := obj3.Key(key).ComposerFrom(obj, obj2).Run(ctx); err != nil {
2235		t.Fatalf("got %v, want nil", err)
2236	}
2237	// Check that the destination in encrypted.
2238	checkRead("compose destination", obj3, key, contents+contents)
2239
2240	// You can't compose one or more unencrypted source objects into an
2241	// encrypted destination object.
2242	_, err := obj2.CopierFrom(obj2.Key(key)).Run(ctx) // unencrypt obj2
2243	if err != nil {
2244		t.Fatal(err)
2245	}
2246	if _, err := obj3.Key(key).ComposerFrom(obj2).Run(ctx); err == nil {
2247		t.Fatal("got nil, want error")
2248	}
2249}
2250
2251func TestIntegration_NonexistentBucket(t *testing.T) {
2252	t.Parallel()
2253	ctx := context.Background()
2254	client := testConfig(ctx, t)
2255	defer client.Close()
2256
2257	bkt := client.Bucket(uidSpace.New())
2258	if _, err := bkt.Attrs(ctx); err != ErrBucketNotExist {
2259		t.Errorf("Attrs: got %v, want ErrBucketNotExist", err)
2260	}
2261	it := bkt.Objects(ctx, nil)
2262	if _, err := it.Next(); err != ErrBucketNotExist {
2263		t.Errorf("Objects: got %v, want ErrBucketNotExist", err)
2264	}
2265}
2266
2267func TestIntegration_PerObjectStorageClass(t *testing.T) {
2268	const (
2269		defaultStorageClass = "STANDARD"
2270		newStorageClass     = "NEARLINE"
2271	)
2272	ctx := context.Background()
2273	client := testConfig(ctx, t)
2274	defer client.Close()
2275	h := testHelper{t}
2276
2277	bkt := client.Bucket(bucketName)
2278
2279	// The bucket should have the default storage class.
2280	battrs := h.mustBucketAttrs(bkt)
2281	if battrs.StorageClass != defaultStorageClass {
2282		t.Fatalf("bucket storage class: got %q, want %q",
2283			battrs.StorageClass, defaultStorageClass)
2284	}
2285	// Write an object; it should start with the bucket's storage class.
2286	obj := bkt.Object("posc")
2287	h.mustWrite(obj.NewWriter(ctx), []byte("foo"))
2288	oattrs, err := obj.Attrs(ctx)
2289	if err != nil {
2290		t.Fatal(err)
2291	}
2292	if oattrs.StorageClass != defaultStorageClass {
2293		t.Fatalf("object storage class: got %q, want %q",
2294			oattrs.StorageClass, defaultStorageClass)
2295	}
2296	// Now use Copy to change the storage class.
2297	copier := obj.CopierFrom(obj)
2298	copier.StorageClass = newStorageClass
2299	oattrs2, err := copier.Run(ctx)
2300	if err != nil {
2301		log.Fatal(err)
2302	}
2303	if oattrs2.StorageClass != newStorageClass {
2304		t.Fatalf("new object storage class: got %q, want %q",
2305			oattrs2.StorageClass, newStorageClass)
2306	}
2307
2308	// We can also write a new object using a non-default storage class.
2309	obj2 := bkt.Object("posc2")
2310	w := obj2.NewWriter(ctx)
2311	w.StorageClass = newStorageClass
2312	h.mustWrite(w, []byte("xxx"))
2313	if w.Attrs().StorageClass != newStorageClass {
2314		t.Fatalf("new object storage class: got %q, want %q",
2315			w.Attrs().StorageClass, newStorageClass)
2316	}
2317}
2318
2319func TestIntegration_BucketInCopyAttrs(t *testing.T) {
2320	// Confirm that if bucket is included in the object attributes of a rewrite
2321	// call, but object name and content-type aren't, then we get an error. See
2322	// the comment in Copier.Run.
2323	ctx := context.Background()
2324	client := testConfig(ctx, t)
2325	defer client.Close()
2326	h := testHelper{t}
2327
2328	bkt := client.Bucket(bucketName)
2329	obj := bkt.Object("bucketInCopyAttrs")
2330	h.mustWrite(obj.NewWriter(ctx), []byte("foo"))
2331	copier := obj.CopierFrom(obj)
2332	rawObject := copier.ObjectAttrs.toRawObject(bucketName)
2333	_, err := copier.callRewrite(ctx, rawObject)
2334	if err == nil {
2335		t.Errorf("got nil, want error")
2336	}
2337}
2338
2339func TestIntegration_NoUnicodeNormalization(t *testing.T) {
2340	t.Parallel()
2341	ctx := context.Background()
2342	client := testConfig(ctx, t)
2343	defer client.Close()
2344	bkt := client.Bucket(bucketName)
2345	h := testHelper{t}
2346
2347	for _, tst := range []struct {
2348		nameQuoted, content string
2349	}{
2350		{`"Caf\u00e9"`, "Normalization Form C"},
2351		{`"Cafe\u0301"`, "Normalization Form D"},
2352	} {
2353		name, err := strconv.Unquote(tst.nameQuoted)
2354		w := bkt.Object(name).NewWriter(ctx)
2355		h.mustWrite(w, []byte(tst.content))
2356		if err != nil {
2357			t.Fatalf("invalid name: %s: %v", tst.nameQuoted, err)
2358		}
2359		if got := string(h.mustRead(bkt.Object(name))); got != tst.content {
2360			t.Errorf("content of %s is %q, want %q", tst.nameQuoted, got, tst.content)
2361		}
2362	}
2363}
2364
2365func TestIntegration_HashesOnUpload(t *testing.T) {
2366	// Check that the user can provide hashes on upload, and that these are checked.
2367	ctx := context.Background()
2368	client := testConfig(ctx, t)
2369	defer client.Close()
2370	obj := client.Bucket(bucketName).Object("hashesOnUpload-1")
2371	data := []byte("I can't wait to be verified")
2372
2373	write := func(w *Writer) error {
2374		if _, err := w.Write(data); err != nil {
2375			_ = w.Close()
2376			return err
2377		}
2378		return w.Close()
2379	}
2380
2381	crc32c := crc32.Checksum(data, crc32cTable)
2382	// The correct CRC should succeed.
2383	w := obj.NewWriter(ctx)
2384	w.CRC32C = crc32c
2385	w.SendCRC32C = true
2386	if err := write(w); err != nil {
2387		t.Fatal(err)
2388	}
2389
2390	// If we change the CRC, validation should fail.
2391	w = obj.NewWriter(ctx)
2392	w.CRC32C = crc32c + 1
2393	w.SendCRC32C = true
2394	if err := write(w); err == nil {
2395		t.Fatal("write with bad CRC32c: want error, got nil")
2396	}
2397
2398	// If we have the wrong CRC but forget to send it, we succeed.
2399	w = obj.NewWriter(ctx)
2400	w.CRC32C = crc32c + 1
2401	if err := write(w); err != nil {
2402		t.Fatal(err)
2403	}
2404
2405	// MD5
2406	md5 := md5.Sum(data)
2407	// The correct MD5 should succeed.
2408	w = obj.NewWriter(ctx)
2409	w.MD5 = md5[:]
2410	if err := write(w); err != nil {
2411		t.Fatal(err)
2412	}
2413
2414	// If we change the MD5, validation should fail.
2415	w = obj.NewWriter(ctx)
2416	w.MD5 = append([]byte(nil), md5[:]...)
2417	w.MD5[0]++
2418	if err := write(w); err == nil {
2419		t.Fatal("write with bad MD5: want error, got nil")
2420	}
2421}
2422
2423func TestIntegration_BucketIAM(t *testing.T) {
2424	ctx := context.Background()
2425	client := testConfig(ctx, t)
2426	defer client.Close()
2427	h := testHelper{t}
2428	bkt := client.Bucket(uidSpace.New())
2429	h.mustCreate(bkt, testutil.ProjID(), nil)
2430	defer h.mustDeleteBucket(bkt)
2431	// This bucket is unique to this test run. So we don't have
2432	// to worry about other runs interfering with our IAM policy
2433	// changes.
2434
2435	member := "projectViewer:" + testutil.ProjID()
2436	role := iam.RoleName("roles/storage.objectViewer")
2437	// Get the bucket's IAM policy.
2438	policy, err := bkt.IAM().Policy(ctx)
2439	if err != nil {
2440		t.Fatalf("Getting policy: %v", err)
2441	}
2442	// The member should not have the role.
2443	if policy.HasRole(member, role) {
2444		t.Errorf("member %q has role %q", member, role)
2445	}
2446	// Change the policy.
2447	policy.Add(member, role)
2448	if err := bkt.IAM().SetPolicy(ctx, policy); err != nil {
2449		t.Fatalf("SetPolicy: %v", err)
2450	}
2451	// Confirm that the binding was added.
2452	policy, err = bkt.IAM().Policy(ctx)
2453	if err != nil {
2454		t.Fatalf("Getting policy: %v", err)
2455	}
2456	if !policy.HasRole(member, role) {
2457		t.Errorf("member %q does not have role %q", member, role)
2458	}
2459
2460	// Check TestPermissions.
2461	// This client should have all these permissions (and more).
2462	perms := []string{"storage.buckets.get", "storage.buckets.delete"}
2463	got, err := bkt.IAM().TestPermissions(ctx, perms)
2464	if err != nil {
2465		t.Fatalf("TestPermissions: %v", err)
2466	}
2467	sort.Strings(perms)
2468	sort.Strings(got)
2469	if !testutil.Equal(got, perms) {
2470		t.Errorf("got %v, want %v", got, perms)
2471	}
2472}
2473
2474func TestIntegration_RequesterPays(t *testing.T) {
2475	// This test needs a second project and user (token source) to test
2476	// all possibilities. Since we need these things for Firestore already,
2477	// we use them here.
2478	//
2479	// There are up to three entities involved in a requester-pays call:
2480	//
2481	// 1. The user making the request. Here, we use
2482	//    a. The account used to create the token source used for all our
2483	//       integration tests (see testutil.TokenSource).
2484	//    b. The account used for the Firestore tests.
2485	// 2. The project that owns the requester-pays bucket. Here, that
2486	//    is the test project ID (see testutil.ProjID).
2487	// 3. The project provided as the userProject parameter of the request;
2488	//    the project to be billed. This test uses:
2489	//    a. The project that owns the requester-pays bucket (same as (2))
2490	//    b. Another project (the Firestore project).
2491	//
2492	// The following must hold for this test to work:
2493	// - (1a) must have resourcemanager.projects.createBillingAssignment permission
2494	//       (Owner role) on (2) (the project, not the bucket).
2495	// - (1b) must NOT have that permission on (2).
2496	// - (1b) must have serviceusage.services.use permission (Editor role) on (3b).
2497	// - (1b) must NOT have that permission on (3a).
2498	// - (1a) must NOT have that permission on (3b).
2499
2500	const wantErrorCode = 400
2501
2502	ctx := context.Background()
2503	client := testConfig(ctx, t)
2504	defer client.Close()
2505	h := testHelper{t}
2506
2507	bucketName2 := uidSpace.New()
2508	b1 := client.Bucket(bucketName2)
2509	projID := testutil.ProjID()
2510	// Use Firestore project as a project that does not contain the bucket.
2511	otherProjID := os.Getenv(envFirestoreProjID)
2512	if otherProjID == "" {
2513		t.Fatalf("need a second project (env var %s)", envFirestoreProjID)
2514	}
2515	ts := testutil.TokenSourceEnv(ctx, envFirestorePrivateKey, ScopeFullControl)
2516	if ts == nil {
2517		t.Fatalf("need a second account (env var %s)", envFirestorePrivateKey)
2518	}
2519	otherClient, err := newTestClient(ctx, option.WithTokenSource(ts))
2520	if err != nil {
2521		t.Fatal(err)
2522	}
2523	defer otherClient.Close()
2524	b2 := otherClient.Bucket(bucketName2)
2525	user, err := keyFileEmail(os.Getenv("GCLOUD_TESTS_GOLANG_KEY"))
2526	if err != nil {
2527		t.Fatal(err)
2528	}
2529	otherUser, err := keyFileEmail(os.Getenv(envFirestorePrivateKey))
2530	if err != nil {
2531		t.Fatal(err)
2532	}
2533
2534	// Create a requester-pays bucket. The bucket is contained in the project projID.
2535	h.mustCreate(b1, projID, &BucketAttrs{RequesterPays: true})
2536	if err := b1.ACL().Set(ctx, ACLEntity("user-"+otherUser), RoleOwner); err != nil {
2537		t.Fatal(err)
2538	}
2539
2540	// Extract the error code from err if it's a googleapi.Error.
2541	errCode := func(err error) int {
2542		if err == nil {
2543			return 0
2544		}
2545		if err, ok := err.(*googleapi.Error); ok {
2546			return err.Code
2547		}
2548		return -1
2549	}
2550
2551	// Call f under various conditions.
2552	// Here b1 and b2 refer to the same bucket, but b1 is bound to client,
2553	// while b2 is bound to otherClient. The clients differ in their credentials,
2554	// i.e. the identity of the user making the RPC: b1's user is an Owner on the
2555	// bucket's containing project, b2's is not.
2556	call := func(msg string, f func(*BucketHandle) error) {
2557		// user: an Owner on the containing project
2558		// userProject: absent
2559		// result: success, by the rule permitting access by owners of the containing bucket.
2560		if err := f(b1); err != nil {
2561			t.Errorf("%s: %v, want nil\n"+
2562				"confirm that %s is an Owner on %s",
2563				msg, err, user, projID)
2564		}
2565		// user: an Owner on the containing project
2566		// userProject: containing project
2567		// result: success, by the same rule as above; userProject is unnecessary but allowed.
2568		if err := f(b1.UserProject(projID)); err != nil {
2569			t.Errorf("%s: got %v, want nil", msg, err)
2570		}
2571		// user: not an Owner on the containing project
2572		// userProject: absent
2573		// result: failure, by the standard requester-pays rule
2574		err := f(b2)
2575		if got, want := errCode(err), wantErrorCode; got != want {
2576			t.Errorf("%s: got error %v with code %d, want code %d\n"+
2577				"confirm that %s is NOT an Owner on %s",
2578				msg, err, got, want, otherUser, projID)
2579		}
2580		// user: not an Owner on the containing project
2581		// userProject: not the containing one, but user has Editor role on it
2582		// result: success, by the standard requester-pays rule
2583		if err := f(b2.UserProject(otherProjID)); err != nil {
2584			t.Errorf("%s: got %v, want nil\n"+
2585				"confirm that %s is an Editor on %s and that that project has billing enabled",
2586				msg, err, otherUser, otherProjID)
2587		}
2588		// user: not an Owner on the containing project
2589		// userProject: the containing one, on which the user does NOT have Editor permission.
2590		// result: failure
2591		err = f(b2.UserProject("veener-jba"))
2592		if got, want := errCode(err), 403; got != want {
2593			t.Errorf("%s: got error %v, want code %d\n"+
2594				"confirm that %s is NOT an Editor on %s",
2595				msg, err, want, otherUser, "veener-jba")
2596		}
2597	}
2598
2599	// Getting its attributes requires a user project.
2600	var attrs *BucketAttrs
2601	call("Bucket attrs", func(b *BucketHandle) error {
2602		a, err := b.Attrs(ctx)
2603		if a != nil {
2604			attrs = a
2605		}
2606		return err
2607	})
2608	if attrs != nil {
2609		if got, want := attrs.RequesterPays, true; got != want {
2610			t.Fatalf("attr.RequesterPays = %t, want %t", got, want)
2611		}
2612	}
2613	// Object operations.
2614	obj1 := "acl-go-test" + uidSpace.New()
2615	call("write object", func(b *BucketHandle) error {
2616		return writeObject(ctx, b.Object(obj1), "text/plain", []byte("hello"))
2617	})
2618	call("read object", func(b *BucketHandle) error {
2619		_, err := readObject(ctx, b.Object(obj1))
2620		return err
2621	})
2622	call("object attrs", func(b *BucketHandle) error {
2623		_, err := b.Object(obj1).Attrs(ctx)
2624		return err
2625	})
2626	call("update object", func(b *BucketHandle) error {
2627		_, err := b.Object(obj1).Update(ctx, ObjectAttrsToUpdate{ContentLanguage: "en"})
2628		return err
2629	})
2630
2631	// ACL operations.
2632	// Create another object for these to avoid object rate limits.
2633	obj2 := "acl-go-test" + uidSpace.New()
2634	call("write object", func(b *BucketHandle) error {
2635		return writeObject(ctx, b.Object(obj2), "text/plain", []byte("hello"))
2636	})
2637	entity := ACLEntity("domain-google.com")
2638	call("bucket acl set", func(b *BucketHandle) error {
2639		return b.ACL().Set(ctx, entity, RoleReader)
2640	})
2641	call("bucket acl list", func(b *BucketHandle) error {
2642		_, err := b.ACL().List(ctx)
2643		return err
2644	})
2645	call("bucket acl delete", func(b *BucketHandle) error {
2646		err := b.ACL().Delete(ctx, entity)
2647		if errCode(err) == 404 {
2648			// Since we call the function multiple times, it will
2649			// fail with NotFound for all but the first.
2650			return nil
2651		}
2652		return err
2653	})
2654	call("default object acl set", func(b *BucketHandle) error {
2655		return b.DefaultObjectACL().Set(ctx, entity, RoleReader)
2656	})
2657	call("default object acl list", func(b *BucketHandle) error {
2658		_, err := b.DefaultObjectACL().List(ctx)
2659		return err
2660	})
2661	call("default object acl delete", func(b *BucketHandle) error {
2662		err := b.DefaultObjectACL().Delete(ctx, entity)
2663		if errCode(err) == 404 {
2664			return nil
2665		}
2666		return err
2667	})
2668	call("object acl set", func(b *BucketHandle) error {
2669		return b.Object(obj2).ACL().Set(ctx, entity, RoleReader)
2670	})
2671	call("object acl list", func(b *BucketHandle) error {
2672		_, err := b.Object(obj2).ACL().List(ctx)
2673		return err
2674	})
2675	call("object acl delete", func(b *BucketHandle) error {
2676		err := b.Object(obj2).ACL().Delete(ctx, entity)
2677		if errCode(err) == 404 {
2678			return nil
2679		}
2680		return err
2681	})
2682
2683	// Copy and compose.
2684	call("copy", func(b *BucketHandle) error {
2685		_, err := b.Object("copy").CopierFrom(b.Object(obj1)).Run(ctx)
2686		return err
2687	})
2688	call("compose", func(b *BucketHandle) error {
2689		_, err := b.Object("compose").ComposerFrom(b.Object(obj1), b.Object("copy")).Run(ctx)
2690		return err
2691	})
2692	call("delete object", func(b *BucketHandle) error {
2693		// Make sure the object exists, so we don't get confused by ErrObjectNotExist.
2694		// The storage service may perform validation in any order (perhaps in parallel),
2695		// so if we delete an object that doesn't exist and for which we lack permission,
2696		// we could see either of those two errors. (See Google-internal bug 78341001.)
2697		h.mustWrite(b1.Object(obj1).NewWriter(ctx), []byte("hello")) // note: b1, not b.
2698		return b.Object(obj1).Delete(ctx)
2699	})
2700	b1.Object(obj1).Delete(ctx) // Clean up created objects.
2701	b1.Object(obj2).Delete(ctx)
2702	for _, obj := range []string{"copy", "compose"} {
2703		if err := b1.UserProject(projID).Object(obj).Delete(ctx); err != nil {
2704			t.Fatalf("could not delete %q: %v", obj, err)
2705		}
2706	}
2707
2708	h.mustDeleteBucket(b1)
2709}
2710
2711func TestIntegration_Notifications(t *testing.T) {
2712	ctx := context.Background()
2713	client := testConfig(ctx, t)
2714	defer client.Close()
2715	bkt := client.Bucket(bucketName)
2716
2717	checkNotifications := func(msg string, want map[string]*Notification) {
2718		got, err := bkt.Notifications(ctx)
2719		if err != nil {
2720			t.Fatal(err)
2721		}
2722		if diff := testutil.Diff(got, want); diff != "" {
2723			t.Errorf("%s: got=-, want=+:\n%s", msg, diff)
2724		}
2725	}
2726	checkNotifications("initial", map[string]*Notification{})
2727
2728	nArg := &Notification{
2729		TopicProjectID: testutil.ProjID(),
2730		TopicID:        "go-storage-notification-test",
2731		PayloadFormat:  NoPayload,
2732	}
2733	n, err := bkt.AddNotification(ctx, nArg)
2734	if err != nil {
2735		t.Fatal(err)
2736	}
2737	nArg.ID = n.ID
2738	if !testutil.Equal(n, nArg) {
2739		t.Errorf("got %+v, want %+v", n, nArg)
2740	}
2741	checkNotifications("after add", map[string]*Notification{n.ID: n})
2742
2743	if err := bkt.DeleteNotification(ctx, n.ID); err != nil {
2744		t.Fatal(err)
2745	}
2746	checkNotifications("after delete", map[string]*Notification{})
2747}
2748
2749func TestIntegration_PublicBucket(t *testing.T) {
2750	// Confirm that an unauthenticated client can access a public bucket.
2751	// See https://cloud.google.com/storage/docs/public-datasets/landsat
2752	if testing.Short() && !replaying {
2753		t.Skip("Integration tests skipped in short mode")
2754	}
2755
2756	const landsatBucket = "gcp-public-data-landsat"
2757	const landsatPrefix = "LC08/01/001/002/LC08_L1GT_001002_20160817_20170322_01_T2/"
2758	const landsatObject = landsatPrefix + "LC08_L1GT_001002_20160817_20170322_01_T2_ANG.txt"
2759
2760	// Create an unauthenticated client.
2761	ctx := context.Background()
2762	client, err := newTestClient(ctx, option.WithoutAuthentication())
2763	if err != nil {
2764		t.Fatal(err)
2765	}
2766	defer client.Close()
2767	h := testHelper{t}
2768	bkt := client.Bucket(landsatBucket)
2769	obj := bkt.Object(landsatObject)
2770
2771	// Read a public object.
2772	bytes := h.mustRead(obj)
2773	if got, want := len(bytes), 117255; got != want {
2774		t.Errorf("len(bytes) = %d, want %d", got, want)
2775	}
2776
2777	// List objects in a public bucket.
2778	iter := bkt.Objects(ctx, &Query{Prefix: landsatPrefix})
2779	gotCount := 0
2780	for {
2781		_, err := iter.Next()
2782		if err == iterator.Done {
2783			break
2784		}
2785		if err != nil {
2786			t.Fatal(err)
2787		}
2788		gotCount++
2789	}
2790	if wantCount := 14; gotCount != wantCount {
2791		t.Errorf("object count: got %d, want %d", gotCount, wantCount)
2792	}
2793
2794	errCode := func(err error) int {
2795		err2, ok := err.(*googleapi.Error)
2796		if !ok {
2797			return -1
2798		}
2799		return err2.Code
2800	}
2801
2802	// Reading from or writing to a non-public bucket fails.
2803	c := testConfig(ctx, t)
2804	defer c.Close()
2805	nonPublicObj := client.Bucket(bucketName).Object("noauth")
2806	// Oddly, reading returns 403 but writing returns 401.
2807	_, err = readObject(ctx, nonPublicObj)
2808	if got, want := errCode(err), 403; got != want {
2809		t.Errorf("got code %d; want %d\nerror: %v", got, want, err)
2810	}
2811	err = writeObject(ctx, nonPublicObj, "text/plain", []byte("b"))
2812	if got, want := errCode(err), 401; got != want {
2813		t.Errorf("got code %d; want %d\nerror: %v", got, want, err)
2814	}
2815}
2816
2817func TestIntegration_ReadCRC(t *testing.T) {
2818	// Test that the checksum is handled correctly when reading files.
2819	// For gzipped files, see https://github.com/GoogleCloudPlatform/google-cloud-dotnet/issues/1641.
2820	if testing.Short() && !replaying {
2821		t.Skip("Integration tests skipped in short mode")
2822	}
2823
2824	const (
2825		// This is an uncompressed file.
2826		// See https://cloud.google.com/storage/docs/public-datasets/landsat
2827		uncompressedBucket = "gcp-public-data-landsat"
2828		uncompressedObject = "LC08/01/001/002/LC08_L1GT_001002_20160817_20170322_01_T2/LC08_L1GT_001002_20160817_20170322_01_T2_ANG.txt"
2829
2830		gzippedObject = "gzipped-text.txt"
2831	)
2832	ctx := context.Background()
2833	client, err := newTestClient(ctx)
2834	if err != nil {
2835		t.Fatal(err)
2836	}
2837	h := testHelper{t}
2838	defer client.Close()
2839
2840	// Create gzipped object.
2841	var buf bytes.Buffer
2842	zw := gzip.NewWriter(&buf)
2843	zw.Name = gzippedObject
2844	if _, err := zw.Write([]byte("gzipped object data")); err != nil {
2845		t.Fatalf("creating gzip: %v", err)
2846	}
2847	if err := zw.Close(); err != nil {
2848		t.Fatalf("closing gzip writer: %v", err)
2849	}
2850	w := client.Bucket(bucketName).Object(gzippedObject).NewWriter(ctx)
2851	w.ContentEncoding = "gzip"
2852	w.ContentType = "text/plain"
2853	h.mustWrite(w, buf.Bytes())
2854
2855	for _, test := range []struct {
2856		desc           string
2857		obj            *ObjectHandle
2858		offset, length int64
2859		readCompressed bool // don't decompress a gzipped file
2860
2861		wantErr   bool
2862		wantCheck bool // Should Reader try to check the CRC?
2863	}{
2864		{
2865			desc:           "uncompressed, entire file",
2866			obj:            client.Bucket(uncompressedBucket).Object(uncompressedObject),
2867			offset:         0,
2868			length:         -1,
2869			readCompressed: false,
2870			wantCheck:      true,
2871		},
2872		{
2873			desc:           "uncompressed, entire file, don't decompress",
2874			obj:            client.Bucket(uncompressedBucket).Object(uncompressedObject),
2875			offset:         0,
2876			length:         -1,
2877			readCompressed: true,
2878			wantCheck:      true,
2879		},
2880		{
2881			desc:           "uncompressed, suffix",
2882			obj:            client.Bucket(uncompressedBucket).Object(uncompressedObject),
2883			offset:         1,
2884			length:         -1,
2885			readCompressed: false,
2886			wantCheck:      false,
2887		},
2888		{
2889			desc:           "uncompressed, prefix",
2890			obj:            client.Bucket(uncompressedBucket).Object(uncompressedObject),
2891			offset:         0,
2892			length:         18,
2893			readCompressed: false,
2894			wantCheck:      false,
2895		},
2896		{
2897			// When a gzipped file is unzipped on read, we can't verify the checksum
2898			// because it was computed against the zipped contents. We can detect
2899			// this case using http.Response.Uncompressed.
2900			desc:           "compressed, entire file, unzipped",
2901			obj:            client.Bucket(bucketName).Object(gzippedObject),
2902			offset:         0,
2903			length:         -1,
2904			readCompressed: false,
2905			wantCheck:      false,
2906		},
2907		{
2908			// When we read a gzipped file uncompressed, it's like reading a regular file:
2909			// the served content and the CRC match.
2910			desc:           "compressed, entire file, read compressed",
2911			obj:            client.Bucket(bucketName).Object(gzippedObject),
2912			offset:         0,
2913			length:         -1,
2914			readCompressed: true,
2915			wantCheck:      true,
2916		},
2917		{
2918			desc:           "compressed, partial, server unzips",
2919			obj:            client.Bucket(bucketName).Object(gzippedObject),
2920			offset:         1,
2921			length:         8,
2922			readCompressed: false,
2923			wantErr:        true, // GCS can't serve part of a gzipped object
2924			wantCheck:      false,
2925		},
2926		{
2927			desc:           "compressed, partial, read compressed",
2928			obj:            client.Bucket(bucketName).Object(gzippedObject),
2929			offset:         1,
2930			length:         8,
2931			readCompressed: true,
2932			wantCheck:      false,
2933		},
2934	} {
2935		obj := test.obj.ReadCompressed(test.readCompressed)
2936		r, err := obj.NewRangeReader(ctx, test.offset, test.length)
2937		if err != nil {
2938			if test.wantErr {
2939				continue
2940			}
2941			t.Fatalf("%s: %v", test.desc, err)
2942		}
2943		if got, want := r.checkCRC, test.wantCheck; got != want {
2944			t.Errorf("%s, checkCRC: got %t, want %t", test.desc, got, want)
2945		}
2946		_, err = ioutil.ReadAll(r)
2947		_ = r.Close()
2948		if err != nil {
2949			t.Fatalf("%s: %v", test.desc, err)
2950		}
2951	}
2952}
2953
2954func TestIntegration_CancelWrite(t *testing.T) {
2955	// Verify that canceling the writer's context immediately stops uploading an object.
2956	ctx := context.Background()
2957	client := testConfig(ctx, t)
2958	defer client.Close()
2959	bkt := client.Bucket(bucketName)
2960
2961	cctx, cancel := context.WithCancel(ctx)
2962	defer cancel()
2963	obj := bkt.Object("cancel-write")
2964	w := obj.NewWriter(cctx)
2965	w.ChunkSize = googleapi.MinUploadChunkSize
2966	buf := make([]byte, w.ChunkSize)
2967	// Write the first chunk. This is read in its entirety before sending the request
2968	// (see google.golang.org/api/gensupport.PrepareUpload), so we expect it to return
2969	// without error.
2970	_, err := w.Write(buf)
2971	if err != nil {
2972		t.Fatal(err)
2973	}
2974	// Now cancel the context.
2975	cancel()
2976	// The next Write should return context.Canceled.
2977	_, err = w.Write(buf)
2978	// TODO: Once we drop support for Go versions < 1.13, use errors.Is() to
2979	// check for context cancellation instead.
2980	if err != context.Canceled && !strings.Contains(err.Error(), "context canceled") {
2981		t.Fatalf("got %v, wanted context.Canceled", err)
2982	}
2983	// The Close should too.
2984	err = w.Close()
2985	if err != context.Canceled && !strings.Contains(err.Error(), "context canceled") {
2986		t.Fatalf("got %v, wanted context.Canceled", err)
2987	}
2988}
2989
2990func TestIntegration_UpdateCORS(t *testing.T) {
2991	ctx := context.Background()
2992	client := testConfig(ctx, t)
2993	defer client.Close()
2994	h := testHelper{t}
2995
2996	initialSettings := []CORS{
2997		{
2998			MaxAge:          time.Hour,
2999			Methods:         []string{"POST"},
3000			Origins:         []string{"some-origin.com"},
3001			ResponseHeaders: []string{"foo-bar"},
3002		},
3003	}
3004
3005	for _, test := range []struct {
3006		input []CORS
3007		want  []CORS
3008	}{
3009		{
3010			input: []CORS{
3011				{
3012					MaxAge:          time.Hour,
3013					Methods:         []string{"GET"},
3014					Origins:         []string{"*"},
3015					ResponseHeaders: []string{"some-header"},
3016				},
3017			},
3018			want: []CORS{
3019				{
3020					MaxAge:          time.Hour,
3021					Methods:         []string{"GET"},
3022					Origins:         []string{"*"},
3023					ResponseHeaders: []string{"some-header"},
3024				},
3025			},
3026		},
3027		{
3028			input: []CORS{},
3029			want:  nil,
3030		},
3031		{
3032			input: nil,
3033			want: []CORS{
3034				{
3035					MaxAge:          time.Hour,
3036					Methods:         []string{"POST"},
3037					Origins:         []string{"some-origin.com"},
3038					ResponseHeaders: []string{"foo-bar"},
3039				},
3040			},
3041		},
3042	} {
3043		bkt := client.Bucket(uidSpace.New())
3044		h.mustCreate(bkt, testutil.ProjID(), &BucketAttrs{CORS: initialSettings})
3045		defer h.mustDeleteBucket(bkt)
3046		h.mustUpdateBucket(bkt, BucketAttrsToUpdate{CORS: test.input})
3047		attrs := h.mustBucketAttrs(bkt)
3048		if diff := testutil.Diff(attrs.CORS, test.want); diff != "" {
3049			t.Errorf("input: %v\ngot=-, want=+:\n%s", test.input, diff)
3050		}
3051	}
3052}
3053
3054func TestIntegration_UpdateDefaultEventBasedHold(t *testing.T) {
3055	ctx := context.Background()
3056	client := testConfig(ctx, t)
3057	defer client.Close()
3058	h := testHelper{t}
3059
3060	bkt := client.Bucket(uidSpace.New())
3061	h.mustCreate(bkt, testutil.ProjID(), &BucketAttrs{})
3062	defer h.mustDeleteBucket(bkt)
3063	attrs := h.mustBucketAttrs(bkt)
3064	if attrs.DefaultEventBasedHold != false {
3065		t.Errorf("got=%v, want=%v", attrs.DefaultEventBasedHold, false)
3066	}
3067
3068	h.mustUpdateBucket(bkt, BucketAttrsToUpdate{DefaultEventBasedHold: true})
3069	attrs = h.mustBucketAttrs(bkt)
3070	if attrs.DefaultEventBasedHold != true {
3071		t.Errorf("got=%v, want=%v", attrs.DefaultEventBasedHold, true)
3072	}
3073
3074	// Omitting it should leave the value unchanged.
3075	h.mustUpdateBucket(bkt, BucketAttrsToUpdate{RequesterPays: true})
3076	attrs = h.mustBucketAttrs(bkt)
3077	if attrs.DefaultEventBasedHold != true {
3078		t.Errorf("got=%v, want=%v", attrs.DefaultEventBasedHold, true)
3079	}
3080}
3081
3082func TestIntegration_UpdateEventBasedHold(t *testing.T) {
3083	ctx := context.Background()
3084	client := testConfig(ctx, t)
3085	defer client.Close()
3086	h := testHelper{t}
3087
3088	bkt := client.Bucket(uidSpace.New())
3089	h.mustCreate(bkt, testutil.ProjID(), &BucketAttrs{})
3090	obj := bkt.Object("some-obj")
3091	h.mustWrite(obj.NewWriter(ctx), randomContents())
3092
3093	defer func() {
3094		h.mustUpdateObject(obj, ObjectAttrsToUpdate{EventBasedHold: false})
3095		h.mustDeleteObject(obj)
3096		h.mustDeleteBucket(bkt)
3097	}()
3098
3099	attrs := h.mustObjectAttrs(obj)
3100	if attrs.EventBasedHold != false {
3101		t.Fatalf("got=%v, want=%v", attrs.EventBasedHold, false)
3102	}
3103
3104	h.mustUpdateObject(obj, ObjectAttrsToUpdate{EventBasedHold: true})
3105	attrs = h.mustObjectAttrs(obj)
3106	if attrs.EventBasedHold != true {
3107		t.Fatalf("got=%v, want=%v", attrs.EventBasedHold, true)
3108	}
3109
3110	// Omitting it should leave the value unchanged.
3111	h.mustUpdateObject(obj, ObjectAttrsToUpdate{ContentType: "foo"})
3112	attrs = h.mustObjectAttrs(obj)
3113	if attrs.EventBasedHold != true {
3114		t.Fatalf("got=%v, want=%v", attrs.EventBasedHold, true)
3115	}
3116}
3117
3118func TestIntegration_UpdateTemporaryHold(t *testing.T) {
3119	ctx := context.Background()
3120	client := testConfig(ctx, t)
3121	defer client.Close()
3122	h := testHelper{t}
3123
3124	bkt := client.Bucket(uidSpace.New())
3125	h.mustCreate(bkt, testutil.ProjID(), &BucketAttrs{})
3126	obj := bkt.Object("some-obj")
3127	h.mustWrite(obj.NewWriter(ctx), randomContents())
3128
3129	defer func() {
3130		h.mustUpdateObject(obj, ObjectAttrsToUpdate{TemporaryHold: false})
3131		h.mustDeleteObject(obj)
3132		h.mustDeleteBucket(bkt)
3133	}()
3134
3135	attrs := h.mustObjectAttrs(obj)
3136	if attrs.TemporaryHold != false {
3137		t.Fatalf("got=%v, want=%v", attrs.TemporaryHold, false)
3138	}
3139
3140	h.mustUpdateObject(obj, ObjectAttrsToUpdate{TemporaryHold: true})
3141	attrs = h.mustObjectAttrs(obj)
3142	if attrs.TemporaryHold != true {
3143		t.Fatalf("got=%v, want=%v", attrs.TemporaryHold, true)
3144	}
3145
3146	// Omitting it should leave the value unchanged.
3147	h.mustUpdateObject(obj, ObjectAttrsToUpdate{ContentType: "foo"})
3148	attrs = h.mustObjectAttrs(obj)
3149	if attrs.TemporaryHold != true {
3150		t.Fatalf("got=%v, want=%v", attrs.TemporaryHold, true)
3151	}
3152}
3153
3154func TestIntegration_UpdateRetentionExpirationTime(t *testing.T) {
3155	ctx := context.Background()
3156	client := testConfig(ctx, t)
3157	defer client.Close()
3158	h := testHelper{t}
3159
3160	bkt := client.Bucket(uidSpace.New())
3161	h.mustCreate(bkt, testutil.ProjID(), &BucketAttrs{RetentionPolicy: &RetentionPolicy{RetentionPeriod: time.Hour}})
3162	obj := bkt.Object("some-obj")
3163	h.mustWrite(obj.NewWriter(ctx), randomContents())
3164
3165	defer func() {
3166		h.mustUpdateBucket(bkt, BucketAttrsToUpdate{RetentionPolicy: &RetentionPolicy{RetentionPeriod: 0}})
3167
3168		// RetentionPeriod of less than a day is explicitly called out
3169		// as best effort and not guaranteed, so let's log problems deleting
3170		// objects instead of failing.
3171		if err := obj.Delete(context.Background()); err != nil {
3172			t.Logf("%s: object delete: %v", loc(), err)
3173		}
3174		if err := bkt.Delete(context.Background()); err != nil {
3175			t.Logf("%s: bucket delete: %v", loc(), err)
3176		}
3177	}()
3178
3179	attrs := h.mustObjectAttrs(obj)
3180	if attrs.RetentionExpirationTime == (time.Time{}) {
3181		t.Fatalf("got=%v, wanted a non-zero value", attrs.RetentionExpirationTime)
3182	}
3183}
3184
3185func TestIntegration_CustomTime(t *testing.T) {
3186	ctx := context.Background()
3187	client := testConfig(ctx, t)
3188	defer client.Close()
3189	h := testHelper{t}
3190
3191	// Create object with CustomTime.
3192	bkt := client.Bucket(bucketName)
3193	obj := bkt.Object("custom-time-obj")
3194	w := obj.NewWriter(ctx)
3195	ct := time.Date(2020, 8, 25, 12, 12, 12, 0, time.UTC)
3196	w.ObjectAttrs.CustomTime = ct
3197	h.mustWrite(w, randomContents())
3198
3199	// Validate that CustomTime has been set
3200	checkCustomTime := func(want time.Time) error {
3201		attrs, err := obj.Attrs(ctx)
3202		if err != nil {
3203			return fmt.Errorf("failed to get object attrs: %v", err)
3204		}
3205		if got := attrs.CustomTime; got != want {
3206			return fmt.Errorf("CustomTime not set correctly: got %+v, want %+v", got, ct)
3207		}
3208		return nil
3209	}
3210
3211	if err := checkCustomTime(ct); err != nil {
3212		t.Fatalf("checking CustomTime: %v", err)
3213	}
3214
3215	// Update CustomTime to the future should succeed.
3216	laterTime := ct.Add(10 * time.Hour)
3217	if _, err := obj.Update(ctx, ObjectAttrsToUpdate{CustomTime: laterTime}); err != nil {
3218		t.Fatalf("updating CustomTime: %v", err)
3219	}
3220
3221	// Update CustomTime to the past should give error.
3222	earlierTime := ct.Add(5 * time.Hour)
3223	if _, err := obj.Update(ctx, ObjectAttrsToUpdate{CustomTime: earlierTime}); err == nil {
3224		t.Fatalf("backdating CustomTime: expected error, got none")
3225	}
3226
3227	// Zero value for CustomTime should be ignored.
3228	if _, err := obj.Update(ctx, ObjectAttrsToUpdate{}); err != nil {
3229		t.Fatalf("empty update: %v", err)
3230	}
3231	if err := checkCustomTime(laterTime); err != nil {
3232		t.Fatalf("after sending zero value: %v", err)
3233	}
3234}
3235
3236func TestIntegration_UpdateRetentionPolicy(t *testing.T) {
3237	ctx := context.Background()
3238	client := testConfig(ctx, t)
3239	defer client.Close()
3240	h := testHelper{t}
3241
3242	initial := &RetentionPolicy{RetentionPeriod: time.Minute}
3243
3244	for _, test := range []struct {
3245		input *RetentionPolicy
3246		want  *RetentionPolicy
3247	}{
3248		{ // Update
3249			input: &RetentionPolicy{RetentionPeriod: time.Hour},
3250			want:  &RetentionPolicy{RetentionPeriod: time.Hour},
3251		},
3252		{ // Update even with timestamp (EffectiveTime should be ignored)
3253			input: &RetentionPolicy{RetentionPeriod: time.Hour, EffectiveTime: time.Now()},
3254			want:  &RetentionPolicy{RetentionPeriod: time.Hour},
3255		},
3256		{ // Remove
3257			input: &RetentionPolicy{},
3258			want:  nil,
3259		},
3260		{ // Remove even with timestamp (EffectiveTime should be ignored)
3261			input: &RetentionPolicy{EffectiveTime: time.Now()},
3262			want:  nil,
3263		},
3264		{ // Ignore
3265			input: nil,
3266			want:  initial,
3267		},
3268	} {
3269		bkt := client.Bucket(uidSpace.New())
3270		h.mustCreate(bkt, testutil.ProjID(), &BucketAttrs{RetentionPolicy: initial})
3271		defer h.mustDeleteBucket(bkt)
3272		h.mustUpdateBucket(bkt, BucketAttrsToUpdate{RetentionPolicy: test.input})
3273		attrs := h.mustBucketAttrs(bkt)
3274		if attrs.RetentionPolicy != nil && attrs.RetentionPolicy.EffectiveTime.Unix() == 0 {
3275			// Should be set by the server and parsed by the client
3276			t.Fatal("EffectiveTime should be set, but it was not")
3277		}
3278		if diff := testutil.Diff(attrs.RetentionPolicy, test.want, cmpopts.IgnoreTypes(time.Time{})); diff != "" {
3279			t.Errorf("input: %v\ngot=-, want=+:\n%s", test.input, diff)
3280		}
3281	}
3282}
3283
3284func TestIntegration_DeleteObjectInBucketWithRetentionPolicy(t *testing.T) {
3285	ctx := context.Background()
3286	client := testConfig(ctx, t)
3287	defer client.Close()
3288	h := testHelper{t}
3289
3290	bkt := client.Bucket(uidSpace.New())
3291	h.mustCreate(bkt, testutil.ProjID(), &BucketAttrs{RetentionPolicy: &RetentionPolicy{RetentionPeriod: 25 * time.Hour}})
3292
3293	oh := bkt.Object("some-object")
3294	if err := writeObject(ctx, oh, "text/plain", []byte("hello world")); err != nil {
3295		t.Fatal(err)
3296	}
3297
3298	if err := oh.Delete(ctx); err == nil {
3299		t.Fatal("expected to err deleting an object in a bucket with retention period, but got nil")
3300	}
3301
3302	// Remove the retention period
3303	h.mustUpdateBucket(bkt, BucketAttrsToUpdate{RetentionPolicy: &RetentionPolicy{}})
3304	// Deleting with retry, as bucket metadata changes
3305	// can take some time to propagate.
3306	err := retry(ctx, func() error {
3307		return oh.Delete(ctx)
3308	}, nil)
3309	if err != nil {
3310		h.t.Fatalf("%s: object delete: %v", loc(), err)
3311	}
3312	h.mustDeleteBucket(bkt)
3313}
3314
3315func TestIntegration_LockBucket(t *testing.T) {
3316	ctx := context.Background()
3317	client := testConfig(ctx, t)
3318	defer client.Close()
3319	h := testHelper{t}
3320
3321	bkt := client.Bucket(uidSpace.New())
3322	h.mustCreate(bkt, testutil.ProjID(), &BucketAttrs{RetentionPolicy: &RetentionPolicy{RetentionPeriod: time.Hour * 25}})
3323	attrs := h.mustBucketAttrs(bkt)
3324	if attrs.RetentionPolicy.IsLocked {
3325		t.Fatal("Expected bucket to begin unlocked, but it was not")
3326	}
3327	err := bkt.If(BucketConditions{MetagenerationMatch: attrs.MetaGeneration}).LockRetentionPolicy(ctx)
3328	if err != nil {
3329		t.Fatal("could not lock", err)
3330	}
3331
3332	attrs = h.mustBucketAttrs(bkt)
3333	if !attrs.RetentionPolicy.IsLocked {
3334		t.Fatal("Expected bucket to be locked, but it was not")
3335	}
3336
3337	_, err = bkt.Update(ctx, BucketAttrsToUpdate{RetentionPolicy: &RetentionPolicy{RetentionPeriod: time.Hour}})
3338	if err == nil {
3339		t.Fatal("Expected error updating locked bucket, got nil")
3340	}
3341}
3342
3343func TestIntegration_LockBucket_MetagenerationRequired(t *testing.T) {
3344	ctx := context.Background()
3345	client := testConfig(ctx, t)
3346	defer client.Close()
3347	h := testHelper{t}
3348
3349	bkt := client.Bucket(uidSpace.New())
3350	h.mustCreate(bkt, testutil.ProjID(), &BucketAttrs{
3351		RetentionPolicy: &RetentionPolicy{RetentionPeriod: time.Hour * 25},
3352	})
3353	err := bkt.LockRetentionPolicy(ctx)
3354	if err == nil {
3355		t.Fatal("expected error locking bucket without metageneration condition, got nil")
3356	}
3357}
3358
3359func TestIntegration_KMS(t *testing.T) {
3360	ctx := context.Background()
3361	client := testConfig(ctx, t)
3362	defer client.Close()
3363	h := testHelper{t}
3364
3365	keyRingName := os.Getenv("GCLOUD_TESTS_GOLANG_KEYRING")
3366	if keyRingName == "" {
3367		t.Fatal("GCLOUD_TESTS_GOLANG_KEYRING must be set. See CONTRIBUTING.md for details")
3368	}
3369	keyName1 := keyRingName + "/cryptoKeys/key1"
3370	keyName2 := keyRingName + "/cryptoKeys/key2"
3371	contents := []byte("my secret")
3372
3373	write := func(obj *ObjectHandle, setKey bool) {
3374		w := obj.NewWriter(ctx)
3375		if setKey {
3376			w.KMSKeyName = keyName1
3377		}
3378		h.mustWrite(w, contents)
3379	}
3380
3381	checkRead := func(obj *ObjectHandle) {
3382		got := h.mustRead(obj)
3383		if !bytes.Equal(got, contents) {
3384			t.Errorf("got %v, want %v", got, contents)
3385		}
3386		attrs := h.mustObjectAttrs(obj)
3387		if len(attrs.KMSKeyName) < len(keyName1) || attrs.KMSKeyName[:len(keyName1)] != keyName1 {
3388			t.Errorf("got %q, want %q", attrs.KMSKeyName, keyName1)
3389		}
3390	}
3391
3392	// Write an object with a key, then read it to verify its contents and the presence of the key name.
3393	bkt := client.Bucket(bucketName)
3394	obj := bkt.Object("kms")
3395	write(obj, true)
3396	checkRead(obj)
3397	h.mustDeleteObject(obj)
3398
3399	// Encrypt an object with a CSEK, then copy it using a CMEK.
3400	src := bkt.Object("csek").Key(testEncryptionKey)
3401	if err := writeObject(ctx, src, "text/plain", contents); err != nil {
3402		t.Fatal(err)
3403	}
3404	dest := bkt.Object("cmek")
3405	c := dest.CopierFrom(src)
3406	c.DestinationKMSKeyName = keyName1
3407	if _, err := c.Run(ctx); err != nil {
3408		t.Fatal(err)
3409	}
3410	checkRead(dest)
3411	src.Delete(ctx)
3412	dest.Delete(ctx)
3413
3414	// Create a bucket with a default key, then write and read an object.
3415	bkt = client.Bucket(uidSpace.New())
3416	h.mustCreate(bkt, testutil.ProjID(), &BucketAttrs{
3417		Location:   "US",
3418		Encryption: &BucketEncryption{DefaultKMSKeyName: keyName1},
3419	})
3420	defer h.mustDeleteBucket(bkt)
3421
3422	attrs := h.mustBucketAttrs(bkt)
3423	if got, want := attrs.Encryption.DefaultKMSKeyName, keyName1; got != want {
3424		t.Fatalf("got %q, want %q", got, want)
3425	}
3426	obj = bkt.Object("kms")
3427	write(obj, false)
3428	checkRead(obj)
3429	h.mustDeleteObject(obj)
3430
3431	// Update the bucket's default key to a different name.
3432	// (This key doesn't have to exist.)
3433	attrs = h.mustUpdateBucket(bkt, BucketAttrsToUpdate{Encryption: &BucketEncryption{DefaultKMSKeyName: keyName2}})
3434	if got, want := attrs.Encryption.DefaultKMSKeyName, keyName2; got != want {
3435		t.Fatalf("got %q, want %q", got, want)
3436	}
3437	attrs = h.mustBucketAttrs(bkt)
3438	if got, want := attrs.Encryption.DefaultKMSKeyName, keyName2; got != want {
3439		t.Fatalf("got %q, want %q", got, want)
3440	}
3441
3442	// Remove the default KMS key.
3443	attrs = h.mustUpdateBucket(bkt, BucketAttrsToUpdate{Encryption: &BucketEncryption{DefaultKMSKeyName: ""}})
3444	if attrs.Encryption != nil {
3445		t.Fatalf("got %#v, want nil", attrs.Encryption)
3446	}
3447}
3448
3449func TestIntegration_PredefinedACLs(t *testing.T) {
3450	check := func(msg string, rs []ACLRule, i int, wantEntity ACLEntity, wantRole ACLRole) {
3451		if i >= len(rs) {
3452			t.Errorf("%s: no rule at index %d", msg, i)
3453			return
3454		}
3455		got := rs[i]
3456		if got.Entity != wantEntity || got.Role != wantRole {
3457			t.Errorf("%s[%d]: got %+v, want Entity %s and Role %s",
3458				msg, i, got, wantEntity, wantRole)
3459		}
3460	}
3461	checkPrefix := func(msg string, rs []ACLRule, i int, wantPrefix string, wantRole ACLRole) {
3462		if i >= len(rs) {
3463			t.Errorf("%s: no rule at index %d", msg, i)
3464			return
3465		}
3466		got := rs[i]
3467		if !strings.HasPrefix(string(got.Entity), wantPrefix) || got.Role != wantRole {
3468			t.Errorf("%s[%d]: got %+v, want Entity %s... and Role %s",
3469				msg, i, got, wantPrefix, wantRole)
3470		}
3471	}
3472
3473	ctx := context.Background()
3474	client := testConfig(ctx, t)
3475	defer client.Close()
3476	h := testHelper{t}
3477
3478	bkt := client.Bucket(uidSpace.New())
3479	h.mustCreate(bkt, testutil.ProjID(), &BucketAttrs{
3480		PredefinedACL:              "authenticatedRead",
3481		PredefinedDefaultObjectACL: "publicRead",
3482	})
3483	defer h.mustDeleteBucket(bkt)
3484	attrs := h.mustBucketAttrs(bkt)
3485	checkPrefix("Bucket.ACL", attrs.ACL, 0, "project-owners", RoleOwner)
3486	check("Bucket.ACL", attrs.ACL, 1, AllAuthenticatedUsers, RoleReader)
3487	check("DefaultObjectACL", attrs.DefaultObjectACL, 0, AllUsers, RoleReader)
3488
3489	// Bucket update
3490	attrs = h.mustUpdateBucket(bkt, BucketAttrsToUpdate{
3491		PredefinedACL:              "private",
3492		PredefinedDefaultObjectACL: "authenticatedRead",
3493	})
3494	checkPrefix("Bucket.ACL update", attrs.ACL, 0, "project-owners", RoleOwner)
3495	check("DefaultObjectACL update", attrs.DefaultObjectACL, 0, AllAuthenticatedUsers, RoleReader)
3496
3497	// Object creation
3498	obj := bkt.Object("private")
3499	w := obj.NewWriter(ctx)
3500	w.PredefinedACL = "authenticatedRead"
3501	h.mustWrite(w, []byte("hello"))
3502	defer h.mustDeleteObject(obj)
3503	checkPrefix("Object.ACL", w.Attrs().ACL, 0, "user", RoleOwner)
3504	check("Object.ACL", w.Attrs().ACL, 1, AllAuthenticatedUsers, RoleReader)
3505
3506	// Object update
3507	oattrs := h.mustUpdateObject(obj, ObjectAttrsToUpdate{PredefinedACL: "private"})
3508	checkPrefix("Object.ACL update", oattrs.ACL, 0, "user", RoleOwner)
3509	if got := len(oattrs.ACL); got != 1 {
3510		t.Errorf("got %d ACLs, want 1", got)
3511	}
3512
3513	// Copy
3514	dst := bkt.Object("dst")
3515	copier := dst.CopierFrom(obj)
3516	copier.PredefinedACL = "publicRead"
3517	oattrs, err := copier.Run(ctx)
3518	if err != nil {
3519		t.Fatal(err)
3520	}
3521	defer h.mustDeleteObject(dst)
3522	// The copied object still retains the "private" ACL of the source object.
3523	checkPrefix("Copy dest", oattrs.ACL, 0, "user", RoleOwner)
3524	check("Copy dest", oattrs.ACL, 1, AllUsers, RoleReader)
3525
3526	// Compose
3527	comp := bkt.Object("comp")
3528	composer := comp.ComposerFrom(obj, dst)
3529	composer.PredefinedACL = "authenticatedRead"
3530	oattrs, err = composer.Run(ctx)
3531	if err != nil {
3532		t.Fatal(err)
3533	}
3534	defer h.mustDeleteObject(comp)
3535	// The composed object still retains the "private" ACL.
3536	checkPrefix("Copy dest", oattrs.ACL, 0, "user", RoleOwner)
3537	check("Copy dest", oattrs.ACL, 1, AllAuthenticatedUsers, RoleReader)
3538}
3539
3540func TestIntegration_ServiceAccount(t *testing.T) {
3541	ctx := context.Background()
3542	client := testConfig(ctx, t)
3543	defer client.Close()
3544
3545	s, err := client.ServiceAccount(ctx, testutil.ProjID())
3546	if err != nil {
3547		t.Fatal(err)
3548	}
3549	want := "@gs-project-accounts.iam.gserviceaccount.com"
3550	if !strings.Contains(s, want) {
3551		t.Fatalf("got %v, want to contain %v", s, want)
3552	}
3553}
3554
3555func TestIntegration_ReaderAttrs(t *testing.T) {
3556	ctx := context.Background()
3557	client := testConfig(ctx, t)
3558	defer client.Close()
3559	bkt := client.Bucket(bucketName)
3560
3561	const defaultType = "text/plain"
3562	obj := "some-object"
3563	c := randomContents()
3564	if err := writeObject(ctx, bkt.Object(obj), defaultType, c); err != nil {
3565		t.Errorf("Write for %v failed with %v", obj, err)
3566	}
3567	oh := bkt.Object(obj)
3568
3569	rc, err := oh.NewReader(ctx)
3570	if err != nil {
3571		t.Fatal(err)
3572	}
3573
3574	attrs, err := oh.Attrs(ctx)
3575	if err != nil {
3576		t.Fatal(err)
3577	}
3578
3579	got := rc.Attrs
3580	want := ReaderObjectAttrs{
3581		Size:            attrs.Size,
3582		ContentType:     attrs.ContentType,
3583		ContentEncoding: attrs.ContentEncoding,
3584		CacheControl:    attrs.CacheControl,
3585		LastModified:    got.LastModified, // ignored, tested separately
3586		Generation:      attrs.Generation,
3587		Metageneration:  attrs.Metageneration,
3588	}
3589	if got != want {
3590		t.Fatalf("got %v, wanted %v", got, want)
3591	}
3592
3593	if got.LastModified.IsZero() {
3594		t.Fatal("LastModified is 0, should be >0")
3595	}
3596}
3597
3598// Test that context cancellation correctly stops a download before completion.
3599func TestIntegration_ReaderCancel(t *testing.T) {
3600	ctx := context.Background()
3601	client := testConfig(ctx, t)
3602	defer client.Close()
3603
3604	bkt := client.Bucket(bucketName)
3605
3606	// Upload a 1MB object.
3607	obj := bkt.Object("reader-cancel-obj")
3608	w := obj.NewWriter(ctx)
3609	c := randomContents()
3610	for i := 0; i < 62500; i++ {
3611		if _, err := w.Write(c); err != nil {
3612			t.Fatalf("writer.Write: %v", err)
3613		}
3614
3615	}
3616	w.Close()
3617
3618	// Create a reader (which makes a GET request to GCS and opens the body to
3619	// read the object) and then cancel the context before reading.
3620	readerCtx, cancel := context.WithCancel(ctx)
3621	r, err := obj.NewReader(readerCtx)
3622	if err != nil {
3623		t.Fatalf("obj.NewReader: %v", err)
3624	}
3625	defer r.Close()
3626
3627	cancel()
3628
3629	// Read the object 1KB a time. We cannot guarantee that Reads will return a
3630	// context canceled error immediately, but they should always do so before we
3631	// reach EOF.
3632	var readErr error
3633	for i := 0; i < 1000; i++ {
3634		buf := make([]byte, 1000)
3635		_, readErr = r.Read(buf)
3636		if readErr != nil {
3637			if readErr == context.Canceled {
3638				return
3639			}
3640			break
3641		}
3642	}
3643	t.Fatalf("Reader.Read: got %v, want context.Canceled", readErr)
3644}
3645
3646// Ensures that a file stored with a:
3647// * Content-Encoding of "gzip"
3648// * Content-Type of "text/plain"
3649// will be properly served back.
3650// See:
3651//  * https://cloud.google.com/storage/docs/transcoding#transcoding_and_gzip
3652//  * https://github.com/googleapis/google-cloud-go/issues/1800
3653func TestIntegration_NewReaderWithContentEncodingGzip(t *testing.T) {
3654	ctx := context.Background()
3655	client := testConfig(ctx, t)
3656	defer client.Close()
3657
3658	h := testHelper{t}
3659
3660	projectID := testutil.ProjID()
3661	bkt := client.Bucket(uidSpace.New())
3662	h.mustCreate(bkt, projectID, nil)
3663	defer h.mustDeleteBucket(bkt)
3664	obj := bkt.Object("decompressive-transcoding")
3665	original := bytes.Repeat([]byte("a"), 4<<10)
3666
3667	// Wrap the file upload in a retry.
3668	// TODO: Investigate removing retry after resolving
3669	// https://github.com/googleapis/google-api-go-client/issues/392.
3670	err := retry(ctx, func() error {
3671		// Firstly upload the gzip compressed file.
3672		w := obj.NewWriter(ctx)
3673		// Compress and upload the content.
3674		gzw := gzip.NewWriter(w)
3675		if _, err := gzw.Write(original); err != nil {
3676			return fmt.Errorf("Failed to compress content: %v", err)
3677		}
3678		if err := gzw.Close(); err != nil {
3679			return fmt.Errorf("Failed to compress content: %v", err)
3680		}
3681		if err := w.Close(); err != nil {
3682			return fmt.Errorf("Failed to finish uploading the file: %v", err)
3683		}
3684		return nil
3685	},
3686		nil)
3687
3688	defer h.mustDeleteObject(obj)
3689
3690	// Now update the Content-Encoding and Content-Type to enable
3691	// decompressive transcoding.
3692	updatedAttrs, err := obj.Update(ctx, ObjectAttrsToUpdate{
3693		ContentEncoding: "gzip",
3694		ContentType:     "text/plain",
3695	})
3696	if err != nil {
3697		t.Fatalf("Attribute update failure: %v", err)
3698	}
3699	if g, w := updatedAttrs.ContentEncoding, "gzip"; g != w {
3700		t.Fatalf("ContentEncoding mismtach:\nGot:  %q\nWant: %q", g, w)
3701	}
3702	if g, w := updatedAttrs.ContentType, "text/plain"; g != w {
3703		t.Fatalf("ContentType mismtach:\nGot:  %q\nWant: %q", g, w)
3704	}
3705
3706	rWhole, err := obj.NewReader(ctx)
3707	if err != nil {
3708		t.Fatalf("Failed to create wholesome reader: %v", err)
3709	}
3710	blobWhole, err := ioutil.ReadAll(rWhole)
3711	rWhole.Close()
3712	if err != nil {
3713		t.Fatalf("Failed to read the whole body: %v", err)
3714	}
3715	if g, w := blobWhole, original; !bytes.Equal(g, w) {
3716		t.Fatalf("Body mismatch\nGot:\n%s\n\nWant:\n%s", g, w)
3717	}
3718
3719	// Now try a range read, which should return the whole body anyways since
3720	// for decompressive transcoding, range requests ARE IGNORED by Cloud Storage.
3721	r2kBTo3kB, err := obj.NewRangeReader(ctx, 2<<10, 3<<10)
3722	if err != nil {
3723		t.Fatalf("Failed to create range reader: %v", err)
3724	}
3725	blob2kBTo3kB, err := ioutil.ReadAll(r2kBTo3kB)
3726	r2kBTo3kB.Close()
3727	if err != nil {
3728		t.Fatalf("Failed to read with the 2kB to 3kB range request: %v", err)
3729	}
3730	// The ENTIRE body MUST be served back regardless of the requested range.
3731	if g, w := blob2kBTo3kB, original; !bytes.Equal(g, w) {
3732		t.Fatalf("Body mismatch\nGot:\n%s\n\nWant:\n%s", g, w)
3733	}
3734}
3735
3736func TestIntegration_HMACKey(t *testing.T) {
3737	ctx := context.Background()
3738	client := testConfig(ctx, t)
3739	defer client.Close()
3740
3741	projectID := testutil.ProjID()
3742
3743	// Use the service account email from the user's credentials. Requires that the
3744	// credentials are set via a JSON credentials file.
3745	// Note that a service account may only have up to 5 active HMAC keys at once; if
3746	// we see flakes because of this, we should consider switching to using a project
3747	// pool.
3748	credentials := testutil.CredentialsEnv(ctx, "GCLOUD_TESTS_GOLANG_KEY")
3749	if credentials == nil {
3750		t.Fatal("credentials could not be determined, is GCLOUD_TESTS_GOLANG_KEY set correctly?")
3751	}
3752	if credentials.JSON == nil {
3753		t.Fatal("could not read the JSON key file, is GCLOUD_TESTS_GOLANG_KEY set correctly?")
3754	}
3755	conf, err := google.JWTConfigFromJSON(credentials.JSON)
3756	if err != nil {
3757		t.Fatal(err)
3758	}
3759	serviceAccountEmail := conf.Email
3760
3761	hmacKey, err := client.CreateHMACKey(ctx, projectID, serviceAccountEmail)
3762	if err != nil {
3763		t.Fatalf("Failed to create HMACKey: %v", err)
3764	}
3765	if hmacKey == nil {
3766		t.Fatal("Unexpectedly got back a nil HMAC key")
3767	}
3768
3769	if hmacKey.State != Active {
3770		t.Fatalf("Unexpected state %q, expected %q", hmacKey.State, Active)
3771	}
3772
3773	hkh := client.HMACKeyHandle(projectID, hmacKey.AccessID)
3774	// 1. Ensure that we CANNOT delete an ACTIVE key.
3775	if err := hkh.Delete(ctx); err == nil {
3776		t.Fatal("Unexpectedly deleted key whose state is ACTIVE: No error from Delete.")
3777	}
3778
3779	invalidStates := []HMACState{"", Deleted, "active", "inactive", "foo_bar"}
3780	for _, invalidState := range invalidStates {
3781		t.Run("invalid-"+string(invalidState), func(t *testing.T) {
3782			_, err := hkh.Update(ctx, HMACKeyAttrsToUpdate{
3783				State: invalidState,
3784			})
3785			if err == nil {
3786				t.Fatal("Unexpectedly succeeded")
3787			}
3788			invalidStateMsg := fmt.Sprintf(`storage: invalid state %q for update, must be either "ACTIVE" or "INACTIVE"`, invalidState)
3789			if err.Error() != invalidStateMsg {
3790				t.Fatalf("Mismatched error: got:  %q\nwant: %q", err, invalidStateMsg)
3791			}
3792		})
3793	}
3794
3795	// 2.1. Setting the State to Inactive should succeed.
3796	hu, err := hkh.Update(ctx, HMACKeyAttrsToUpdate{
3797		State: Inactive,
3798	})
3799	if err != nil {
3800		t.Fatalf("Unexpected Update failure: %v", err)
3801	}
3802	if got, want := hu.State, Inactive; got != want {
3803		t.Fatalf("Unexpected updated state %q, expected %q", got, want)
3804	}
3805
3806	// 2.2. Setting the State back to Active should succeed.
3807	hu, err = hkh.Update(ctx, HMACKeyAttrsToUpdate{
3808		State: Active,
3809	})
3810	if err != nil {
3811		t.Fatalf("Unexpected Update failure: %v", err)
3812	}
3813	if got, want := hu.State, Active; got != want {
3814		t.Fatalf("Unexpected updated state %q, expected %q", got, want)
3815	}
3816
3817	// 3. Verify that keys are listed as expected.
3818	iter := client.ListHMACKeys(ctx, projectID)
3819	count := 0
3820	for ; ; count++ {
3821		_, err := iter.Next()
3822		if err == iterator.Done {
3823			break
3824		}
3825		if err != nil {
3826			t.Fatalf("Failed to ListHMACKeys: %v", err)
3827		}
3828	}
3829	if count == 0 {
3830		t.Fatal("Failed to list any HMACKeys")
3831	}
3832
3833	// 4. Finally set it to back to Inactive and
3834	// then retry the deletion which should now succeed.
3835	_, _ = hkh.Update(ctx, HMACKeyAttrsToUpdate{
3836		State: Inactive,
3837	})
3838	if err := hkh.Delete(ctx); err != nil {
3839		t.Fatalf("Unexpected deletion failure: %v", err)
3840	}
3841
3842	_, err = hkh.Get(ctx)
3843	if err != nil && !strings.Contains(err.Error(), "404") {
3844		// If the deleted key has already been garbage collected, a 404 is expected.
3845		// Other errors should cause a failure and are not expected.
3846		t.Fatalf("Unexpected error: %v", err)
3847	}
3848}
3849
3850func TestIntegration_PostPolicyV4(t *testing.T) {
3851	jwtConf, err := testutil.JWTConfig()
3852	if err != nil {
3853		t.Fatal(err)
3854	}
3855	if jwtConf == nil {
3856		t.Skip("JSON key file is not present")
3857	}
3858
3859	ctx := context.Background()
3860	client := testConfig(ctx, t)
3861	defer client.Close()
3862
3863	projectID := testutil.ProjID()
3864	newBucketName := uidSpace.New()
3865	b := client.Bucket(newBucketName)
3866	h := testHelper{t}
3867	h.mustCreate(b, projectID, nil)
3868	defer h.mustDeleteBucket(b)
3869
3870	statusCodeToRespond := 200
3871	opts := &PostPolicyV4Options{
3872		GoogleAccessID: jwtConf.Email,
3873		PrivateKey:     jwtConf.PrivateKey,
3874
3875		Expires: time.Now().Add(30 * time.Minute),
3876
3877		Fields: &PolicyV4Fields{
3878			StatusCodeOnSuccess: statusCodeToRespond,
3879			ContentType:         "text/plain",
3880			ACL:                 "public-read",
3881		},
3882
3883		// The conditions that the uploaded file will be expected to conform to.
3884		Conditions: []PostPolicyV4Condition{
3885			// Make the file a maximum of 10mB.
3886			ConditionContentLengthRange(0, 10<<20),
3887			ConditionStartsWith("$acl", "public"),
3888		},
3889	}
3890
3891	objectName := "my-object.txt"
3892	pv4, err := GenerateSignedPostPolicyV4(newBucketName, objectName, opts)
3893	if err != nil {
3894		t.Fatal(err)
3895	}
3896
3897	formBuf := new(bytes.Buffer)
3898	mw := multipart.NewWriter(formBuf)
3899	for fieldName, value := range pv4.Fields {
3900		if err := mw.WriteField(fieldName, value); err != nil {
3901			t.Fatalf("Failed to write form field: %q: %v", fieldName, err)
3902		}
3903	}
3904
3905	// Now let's perform the upload.
3906	fileBody := bytes.Repeat([]byte("a"), 25)
3907	mf, err := mw.CreateFormFile("file", "myfile.txt")
3908	if err != nil {
3909		t.Fatal(err)
3910	}
3911	if _, err := mf.Write(fileBody); err != nil {
3912		t.Fatal(err)
3913	}
3914	if err := mw.Close(); err != nil {
3915		t.Fatal(err)
3916	}
3917
3918	// Compose the HTTP request.
3919	req, err := http.NewRequest("POST", pv4.URL, formBuf)
3920	if err != nil {
3921		t.Fatalf("Failed to compose HTTP request: %v", err)
3922	}
3923	// Ensure the Content-Type is derived from the writer.
3924	req.Header.Set("Content-Type", mw.FormDataContentType())
3925	res, err := http.DefaultClient.Do(req)
3926	if err != nil {
3927		t.Fatal(err)
3928	}
3929	if g, w := res.StatusCode, statusCodeToRespond; g != w {
3930		blob, _ := httputil.DumpResponse(res, true)
3931		t.Fatalf("Status code in response mismatch: got %d want %d\nBody: %s", g, w, blob)
3932	}
3933	io.Copy(ioutil.Discard, res.Body)
3934
3935	// Verify that the file was properly uploaded, by
3936	// reading back its attributes and content!
3937	obj := b.Object(objectName)
3938	defer h.mustDeleteObject(obj)
3939
3940	attrs, err := obj.Attrs(ctx)
3941	if err != nil {
3942		t.Fatalf("Failed to retrieve attributes: %v", err)
3943	}
3944	if g, w := attrs.Size, int64(len(fileBody)); g != w {
3945		t.Errorf("ContentLength mismatch: got %d want %d", g, w)
3946	}
3947	if g, w := attrs.MD5, md5.Sum(fileBody); !bytes.Equal(g, w[:]) {
3948		t.Errorf("MD5Checksum mismatch\nGot:  %x\nWant: %x", g, w)
3949	}
3950
3951	// Compare the uploaded body with the expected.
3952	rd, err := obj.NewReader(ctx)
3953	if err != nil {
3954		t.Fatalf("Failed to create a reader: %v", err)
3955	}
3956	gotBody, err := ioutil.ReadAll(rd)
3957	if err != nil {
3958		t.Fatalf("Failed to read the body: %v", err)
3959	}
3960	if diff := testutil.Diff(string(gotBody), string(fileBody)); diff != "" {
3961		t.Fatalf("Body mismatch: got - want +\n%s", diff)
3962	}
3963}
3964
3965// Verify that custom scopes passed in by the user are applied correctly.
3966func TestIntegration_Scopes(t *testing.T) {
3967	// A default client should be able to write objects since it has scope of
3968	// FullControl
3969	ctx := context.Background()
3970	clientFullControl := testConfig(ctx, t)
3971	defer clientFullControl.Close()
3972
3973	bkt := clientFullControl.Bucket(bucketName)
3974	obj := "FakeObj1"
3975	contents := []byte("This object should be written successfully\n")
3976	if err := writeObject(ctx, bkt.Object(obj), "text/plain", contents); err != nil {
3977		t.Fatalf("writing: %v", err)
3978	}
3979
3980	// A client with ReadOnly scope should not be able to write successfully.
3981	clientReadOnly, err := NewClient(ctx, option.WithScopes(ScopeReadOnly))
3982	defer clientReadOnly.Close()
3983	if err != nil {
3984		t.Fatalf("error creating client: %v", err)
3985	}
3986
3987	bkt = clientReadOnly.Bucket(bucketName)
3988	obj = "FakeObj2"
3989	contents = []byte("This object should not be written.\n")
3990
3991	if err := writeObject(ctx, bkt.Object(obj), "text/plain", contents); err == nil {
3992		t.Fatal("client with ScopeReadOnly was able to write an object unexpectedly.")
3993	}
3994
3995}
3996
3997type testHelper struct {
3998	t *testing.T
3999}
4000
4001func (h testHelper) mustCreate(b *BucketHandle, projID string, attrs *BucketAttrs) {
4002	if err := b.Create(context.Background(), projID, attrs); err != nil {
4003		h.t.Fatalf("%s: bucket create: %v", loc(), err)
4004	}
4005}
4006
4007func (h testHelper) mustDeleteBucket(b *BucketHandle) {
4008	if err := b.Delete(context.Background()); err != nil {
4009		h.t.Fatalf("%s: bucket delete: %v", loc(), err)
4010	}
4011}
4012
4013func (h testHelper) mustBucketAttrs(b *BucketHandle) *BucketAttrs {
4014	attrs, err := b.Attrs(context.Background())
4015	if err != nil {
4016		h.t.Fatalf("%s: bucket attrs: %v", loc(), err)
4017	}
4018	return attrs
4019}
4020
4021func (h testHelper) mustUpdateBucket(b *BucketHandle, ua BucketAttrsToUpdate) *BucketAttrs {
4022	attrs, err := b.Update(context.Background(), ua)
4023	if err != nil {
4024		h.t.Fatalf("%s: update: %v", loc(), err)
4025	}
4026	return attrs
4027}
4028
4029func (h testHelper) mustObjectAttrs(o *ObjectHandle) *ObjectAttrs {
4030	attrs, err := o.Attrs(context.Background())
4031	if err != nil {
4032		h.t.Fatalf("%s: object attrs: %v", loc(), err)
4033	}
4034	return attrs
4035}
4036
4037func (h testHelper) mustDeleteObject(o *ObjectHandle) {
4038	if err := o.Delete(context.Background()); err != nil {
4039		h.t.Fatalf("%s: object delete: %v", loc(), err)
4040	}
4041}
4042
4043func (h testHelper) mustUpdateObject(o *ObjectHandle, ua ObjectAttrsToUpdate) *ObjectAttrs {
4044	attrs, err := o.Update(context.Background(), ua)
4045	if err != nil {
4046		h.t.Fatalf("%s: update: %v", loc(), err)
4047	}
4048	return attrs
4049}
4050
4051func (h testHelper) mustWrite(w *Writer, data []byte) {
4052	if _, err := w.Write(data); err != nil {
4053		w.Close()
4054		h.t.Fatalf("%s: write: %v", loc(), err)
4055	}
4056	if err := w.Close(); err != nil {
4057		h.t.Fatalf("%s: close write: %v", loc(), err)
4058	}
4059}
4060
4061func (h testHelper) mustRead(obj *ObjectHandle) []byte {
4062	data, err := readObject(context.Background(), obj)
4063	if err != nil {
4064		h.t.Fatalf("%s: read: %v", loc(), err)
4065	}
4066	return data
4067}
4068
4069func (h testHelper) mustNewReader(obj *ObjectHandle) *Reader {
4070	r, err := obj.NewReader(context.Background())
4071	if err != nil {
4072		h.t.Fatalf("%s: new reader: %v", loc(), err)
4073	}
4074	return r
4075}
4076
4077func writeObject(ctx context.Context, obj *ObjectHandle, contentType string, contents []byte) error {
4078	w := obj.NewWriter(ctx)
4079	w.ContentType = contentType
4080	w.CacheControl = "public, max-age=60"
4081	if contents != nil {
4082		if _, err := w.Write(contents); err != nil {
4083			_ = w.Close()
4084			return err
4085		}
4086	}
4087	return w.Close()
4088}
4089
4090// loc returns a string describing the file and line of its caller's call site. In
4091// other words, if a test function calls a helper, and the helper calls loc, then the
4092// string will refer to the line on which the test function called the helper.
4093// TODO(jba): use t.Helper once we drop go 1.6.
4094func loc() string {
4095	_, file, line, ok := runtime.Caller(2)
4096	if !ok {
4097		return "???"
4098	}
4099	return fmt.Sprintf("%s:%d", filepath.Base(file), line)
4100}
4101
4102func readObject(ctx context.Context, obj *ObjectHandle) ([]byte, error) {
4103	r, err := obj.NewReader(ctx)
4104	if err != nil {
4105		return nil, err
4106	}
4107	defer r.Close()
4108	return ioutil.ReadAll(r)
4109}
4110
4111// cleanupBuckets deletes the bucket used for testing, as well as old
4112// testing buckets that weren't cleaned previously.
4113func cleanupBuckets() error {
4114	if testing.Short() {
4115		return nil // Don't clean up in short mode.
4116	}
4117	ctx := context.Background()
4118	client := config(ctx)
4119	if client == nil {
4120		return nil // Don't cleanup if we're not configured correctly.
4121	}
4122	defer client.Close()
4123	if err := killBucket(ctx, client, bucketName); err != nil {
4124		return err
4125	}
4126	if err := killBucket(ctx, client, grpcBucketName); err != nil {
4127		return err
4128	}
4129
4130	// Delete buckets whose name begins with our test prefix, and which were
4131	// created a while ago. (Unfortunately GCS doesn't provide last-modified
4132	// time, which would be a better way to check for staleness.)
4133	if err := deleteExpiredBuckets(ctx, client, testPrefix); err != nil {
4134		return err
4135	}
4136	return deleteExpiredBuckets(ctx, client, grpcTestPrefix)
4137}
4138
4139func deleteExpiredBuckets(ctx context.Context, client *Client, prefix string) error {
4140	const expireAge = 24 * time.Hour
4141	projectID := testutil.ProjID()
4142	it := client.Buckets(ctx, projectID)
4143	it.Prefix = prefix
4144	for {
4145		bktAttrs, err := it.Next()
4146		if err == iterator.Done {
4147			break
4148		}
4149		if err != nil {
4150			return err
4151		}
4152		if time.Since(bktAttrs.Created) > expireAge {
4153			log.Printf("deleting bucket %q, which is more than %s old", bktAttrs.Name, expireAge)
4154			if err := killBucket(ctx, client, bktAttrs.Name); err != nil {
4155				return err
4156			}
4157		}
4158	}
4159	return nil
4160}
4161
4162// killBucket deletes a bucket and all its objects.
4163func killBucket(ctx context.Context, client *Client, bucketName string) error {
4164	bkt := client.Bucket(bucketName)
4165	// Bucket must be empty to delete.
4166	it := bkt.Objects(ctx, nil)
4167	for {
4168		objAttrs, err := it.Next()
4169		if err == iterator.Done {
4170			break
4171		}
4172		if err != nil {
4173			return err
4174		}
4175		// Objects with a hold must have the hold released.
4176		if objAttrs.EventBasedHold || objAttrs.TemporaryHold {
4177			obj := bkt.Object(objAttrs.Name)
4178			if _, err := obj.Update(ctx, ObjectAttrsToUpdate{EventBasedHold: false, TemporaryHold: false}); err != nil {
4179				return fmt.Errorf("removing hold from %q: %v", bucketName+"/"+objAttrs.Name, err)
4180			}
4181		}
4182		if err := bkt.Object(objAttrs.Name).Delete(ctx); err != nil {
4183			return fmt.Errorf("deleting %q: %v", bucketName+"/"+objAttrs.Name, err)
4184		}
4185	}
4186	// GCS is eventually consistent, so this delete may fail because the
4187	// replica still sees an object in the bucket. We log the error and expect
4188	// a later test run to delete the bucket.
4189	if err := bkt.Delete(ctx); err != nil {
4190		log.Printf("deleting %q: %v", bucketName, err)
4191	}
4192	return nil
4193}
4194
4195func randomContents() []byte {
4196	h := md5.New()
4197	io.WriteString(h, fmt.Sprintf("hello world%d", rng.Intn(100000)))
4198	return h.Sum(nil)
4199}
4200
4201type zeros struct{}
4202
4203func (zeros) Read(p []byte) (int, error) { return len(p), nil }
4204
4205// Make a GET request to a URL using an unauthenticated client, and return its contents.
4206func getURL(url string, headers map[string][]string) ([]byte, error) {
4207	req, err := http.NewRequest("GET", url, nil)
4208	if err != nil {
4209		return nil, err
4210	}
4211	req.Header = headers
4212	res, err := http.DefaultClient.Do(req)
4213	if err != nil {
4214		return nil, err
4215	}
4216	defer res.Body.Close()
4217	bytes, err := ioutil.ReadAll(res.Body)
4218	if err != nil {
4219		return nil, err
4220	}
4221	if res.StatusCode != 200 {
4222		return nil, fmt.Errorf("code=%d, body=%s", res.StatusCode, string(bytes))
4223	}
4224	return bytes, nil
4225}
4226
4227// Make a PUT request to a URL using an unauthenticated client, and return its contents.
4228func putURL(url string, headers map[string][]string, payload io.Reader) ([]byte, error) {
4229	req, err := http.NewRequest("PUT", url, payload)
4230	if err != nil {
4231		return nil, err
4232	}
4233	req.Header = headers
4234	res, err := http.DefaultClient.Do(req)
4235	if err != nil {
4236		return nil, err
4237	}
4238	defer res.Body.Close()
4239	bytes, err := ioutil.ReadAll(res.Body)
4240	if err != nil {
4241		return nil, err
4242	}
4243	if res.StatusCode != 200 {
4244		return nil, fmt.Errorf("code=%d, body=%s", res.StatusCode, string(bytes))
4245	}
4246	return bytes, nil
4247}
4248
4249func namesEqual(obj *ObjectAttrs, bucketName, objectName string) bool {
4250	return obj.Bucket == bucketName && obj.Name == objectName
4251}
4252
4253func keyFileEmail(filename string) (string, error) {
4254	bytes, err := ioutil.ReadFile(filename)
4255	if err != nil {
4256		return "", err
4257	}
4258	var v struct {
4259		ClientEmail string `json:"client_email"`
4260	}
4261	if err := json.Unmarshal(bytes, &v); err != nil {
4262		return "", err
4263	}
4264	return v.ClientEmail, nil
4265}
4266
4267func containsACL(acls []ACLRule, e ACLEntity, r ACLRole) bool {
4268	for _, a := range acls {
4269		if a.Entity == e && a.Role == r {
4270			return true
4271		}
4272	}
4273	return false
4274}
4275
4276func hasRule(acl []ACLRule, rule ACLRule) bool {
4277	for _, r := range acl {
4278		if cmp.Equal(r, rule) {
4279			return true
4280		}
4281	}
4282	return false
4283}
4284
4285// retry retries a function call as well as an (optional) correctness check for up
4286// to 11 seconds. Both call and check must run without error in order to succeed.
4287// If the timeout is hit, the most recent error from call or check will be returned.
4288// This function should be used to wrap calls that might cause integration test
4289// flakes due to delays in propagation (for example, metadata updates).
4290func retry(ctx context.Context, call func() error, check func() error) error {
4291	timeout := time.After(11 * time.Second)
4292	var err error
4293	for {
4294		select {
4295		case <-timeout:
4296			return err
4297		default:
4298		}
4299		err = call()
4300		if err == nil {
4301			if check == nil || check() == nil {
4302				return nil
4303			}
4304			err = check()
4305		}
4306		time.Sleep(200 * time.Millisecond)
4307	}
4308}
4309