1// Copyright (C) MongoDB, Inc. 2017-present.
2//
3// Licensed under the Apache License, Version 2.0 (the "License"); you may
4// not use this file except in compliance with the License. You may obtain
5// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
6
7package gridfs
8
9import (
10	"context"
11	"encoding/hex"
12	"encoding/json"
13	"io/ioutil"
14	"path"
15	"testing"
16
17	"bytes"
18
19	"fmt"
20
21	"time"
22
23	"go.mongodb.org/mongo-driver/bson"
24	"go.mongodb.org/mongo-driver/bson/primitive"
25	"go.mongodb.org/mongo-driver/internal/testutil"
26	"go.mongodb.org/mongo-driver/internal/testutil/helpers"
27	"go.mongodb.org/mongo-driver/mongo"
28	"go.mongodb.org/mongo-driver/mongo/options"
29	"go.mongodb.org/mongo-driver/x/bsonx"
30)
31
32type testFile struct {
33	Data  dataSection `json:"data"`
34	Tests []test      `json:"tests"`
35}
36
37type dataSection struct {
38	Files  []json.RawMessage `json:"files"`
39	Chunks []json.RawMessage `json:"chunks"`
40}
41
42type test struct {
43	Description string         `json:"description"`
44	Arrange     arrangeSection `json:"arrange"`
45	Act         actSection     `json:"act"`
46	Assert      assertSection  `json:"assert"`
47}
48
49type arrangeSection struct {
50	Data []json.RawMessage `json:"data"`
51}
52
53type actSection struct {
54	Operation string          `json:"operation"`
55	Arguments json.RawMessage `json:"arguments"`
56}
57
58type assertSection struct {
59	Result json.RawMessage     `json:"result"`
60	Error  string              `json:"error"`
61	Data   []assertDataSection `json:"data"`
62}
63
64type assertDataSection struct {
65	Insert    string            `json:"insert"`
66	Documents []interface{}     `json:"documents"`
67	Delete    string            `json:"delete"`
68	Deletes   []json.RawMessage `json:"deletes"`
69}
70
71const gridFsTestDir = "../../data/gridfs"
72const downloadBufferSize = 100
73
74var ctx = context.Background()
75var emptyDoc = bsonx.Doc{}
76var client *mongo.Client
77var db *mongo.Database
78var chunks, files, expectedChunks, expectedFiles *mongo.Collection
79
80var downloadBuffer = make([]byte, downloadBufferSize)
81var deadline = time.Now().Add(time.Hour)
82
83// load initial data files into a files and chunks collection
84// returns the chunkSize embedded in the documents if there is one
85func loadInitialFiles(t *testing.T, data dataSection) int32 {
86	filesDocs := make([]interface{}, 0, len(data.Files))
87	chunksDocs := make([]interface{}, 0, len(data.Chunks))
88	var chunkSize int32
89
90	for _, v := range data.Files {
91		docBytes, err := v.MarshalJSON()
92		testhelpers.RequireNil(t, err, "error converting raw message to bytes: %s", err)
93		doc := bsonx.Doc{}
94		err = bson.UnmarshalExtJSON(docBytes, false, &doc)
95		testhelpers.RequireNil(t, err, "error creating file document: %s", err)
96
97		// convert length from int32 to int64
98		if length, err := doc.LookupErr("length"); err == nil {
99			doc = doc.Delete("length")
100			doc = doc.Append("length", bsonx.Int64(int64(length.Int32())))
101		}
102		if cs, err := doc.LookupErr("chunkSize"); err == nil {
103			chunkSize = cs.Int32()
104		}
105
106		filesDocs = append(filesDocs, doc)
107	}
108
109	for _, v := range data.Chunks {
110		docBytes, err := v.MarshalJSON()
111		testhelpers.RequireNil(t, err, "error converting raw message to bytes: %s", err)
112		doc := bsonx.Doc{}
113		err = bson.UnmarshalExtJSON(docBytes, false, &doc)
114		testhelpers.RequireNil(t, err, "error creating file document: %s", err)
115
116		// convert data $hex to binary value
117		if hexStr, err := doc.LookupErr("data", "$hex"); err == nil {
118			hexBytes := convertHexToBytes(t, hexStr.StringValue())
119			doc = doc.Delete("data")
120			doc = append(doc, bsonx.Elem{"data", bsonx.Binary(0x00, hexBytes)})
121		}
122
123		// convert n from int64 to int32
124		if n, err := doc.LookupErr("n"); err == nil {
125			doc = doc.Delete("n")
126			doc = append(doc, bsonx.Elem{"n", bsonx.Int32(n.Int32())})
127		}
128
129		chunksDocs = append(chunksDocs, doc)
130	}
131
132	if len(filesDocs) > 0 {
133		_, err := files.InsertMany(ctx, filesDocs)
134		testhelpers.RequireNil(t, err, "error inserting into files: %s", err)
135		_, err = expectedFiles.InsertMany(ctx, filesDocs)
136		testhelpers.RequireNil(t, err, "error inserting into expected files: %s", err)
137	}
138
139	if len(chunksDocs) > 0 {
140		_, err := chunks.InsertMany(ctx, chunksDocs)
141		testhelpers.RequireNil(t, err, "error inserting into chunks: %s", err)
142		_, err = expectedChunks.InsertMany(ctx, chunksDocs)
143		testhelpers.RequireNil(t, err, "error inserting into expected chunks: %s", err)
144	}
145
146	return chunkSize
147}
148
149func dropColl(t *testing.T, c *mongo.Collection) {
150	err := c.Drop(ctx)
151	testhelpers.RequireNil(t, err, "error dropping %s: %s", c.Name(), err)
152}
153
154func clearCollections(t *testing.T) {
155	dropColl(t, files)
156	dropColl(t, expectedFiles)
157	dropColl(t, chunks)
158	dropColl(t, expectedChunks)
159}
160
161func TestGridFSSpec(t *testing.T) {
162	var err error
163	cs := testutil.ConnString(t)
164	client, err = mongo.NewClient(options.Client().ApplyURI(cs.String()))
165	testhelpers.RequireNil(t, err, "error creating client: %s", err)
166
167	err = client.Connect(ctx)
168	testhelpers.RequireNil(t, err, "error connecting client: %s", err)
169
170	db = client.Database("gridFSTestDB")
171	chunks = db.Collection("fs.chunks")
172	files = db.Collection("fs.files")
173	expectedChunks = db.Collection("expected.chunks")
174	expectedFiles = db.Collection("expected.files")
175
176	for _, file := range testhelpers.FindJSONFilesInDir(t, gridFsTestDir) {
177		runGridFSTestFile(t, path.Join(gridFsTestDir, file), db)
178	}
179}
180
181func runGridFSTestFile(t *testing.T, filepath string, db *mongo.Database) {
182	content, err := ioutil.ReadFile(filepath)
183	testhelpers.RequireNil(t, err, "error reading file %s: %s", filepath, err)
184
185	var testfile testFile
186	err = json.Unmarshal(content, &testfile)
187	testhelpers.RequireNil(t, err, "error unmarshalling test file for %s: %s", filepath, err)
188
189	clearCollections(t)
190	chunkSize := loadInitialFiles(t, testfile.Data)
191	if chunkSize == 0 {
192		chunkSize = DefaultChunkSize
193	}
194
195	bucket, err := NewBucket(db, options.GridFSBucket().SetChunkSizeBytes(chunkSize))
196	testhelpers.RequireNil(t, err, "error creating bucket: %s", err)
197	err = bucket.SetWriteDeadline(deadline)
198	testhelpers.RequireNil(t, err, "error setting write deadline: %s", err)
199	err = bucket.SetReadDeadline(deadline)
200	testhelpers.RequireNil(t, err, "error setting read deadline: %s", err)
201
202	for _, test := range testfile.Tests {
203		t.Run(test.Description, func(t *testing.T) {
204			switch test.Act.Operation {
205			case "upload":
206				runUploadTest(t, test, bucket)
207				clearCollections(t)
208				runUploadFromStreamTest(t, test, bucket)
209			case "download":
210				runDownloadTest(t, test, bucket)
211				runDownloadToStreamTest(t, test, bucket)
212			case "download_by_name":
213				runDownloadByNameTest(t, test, bucket)
214				runDownloadByNameToStreamTest(t, test, bucket)
215			case "delete":
216				runDeleteTest(t, test, bucket)
217			}
218		})
219
220		if test.Arrange.Data != nil {
221			clearCollections(t)
222			loadInitialFiles(t, testfile.Data)
223		}
224	}
225}
226
227func getInt64(val bsonx.Val) int64 {
228	switch val.Type() {
229	case bson.TypeInt32:
230		return int64(val.Int32())
231	case bson.TypeInt64:
232		return val.Int64()
233	case bson.TypeDouble:
234		return int64(val.Double())
235	}
236
237	return 0
238}
239
240func compareValues(expected bsonx.Val, actual bsonx.Val) bool {
241	if expected.IsNumber() {
242		if !actual.IsNumber() {
243			return false
244		}
245
246		return getInt64(expected) == getInt64(actual)
247	}
248
249	switch expected.Type() {
250	case bson.TypeString:
251		return actual.StringValue() == expected.StringValue()
252	case bson.TypeBinary:
253		aSub, aBytes := actual.Binary()
254		eSub, eBytes := expected.Binary()
255
256		return aSub == eSub && bytes.Equal(aBytes, eBytes)
257	case bson.TypeObjectID:
258		eID := [12]byte(expected.ObjectID())
259		aID := [12]byte(actual.ObjectID())
260
261		return bytes.Equal(eID[:], aID[:])
262	case bson.TypeEmbeddedDocument:
263		return expected.Document().Equal(actual.Document())
264	default:
265		fmt.Printf("unknown type: %d\n", expected.Type())
266	}
267
268	return true // shouldn't get here
269}
270
271func compareGfsDoc(t *testing.T, expected bsonx.Doc, actual bsonx.Doc, filesID interface{}) {
272	for _, elem := range expected {
273		key := elem.Key
274
275		// continue for deprecated fields
276		if key == "md5" || key == "contentType" || key == "aliases" {
277			continue
278		}
279
280		actualVal, err := actual.LookupErr(key)
281		testhelpers.RequireNil(t, err, "key %s not found in actual for test %s", key, t.Name())
282
283		// continue for fields with unknown values
284		if key == "_id" || key == "uploadDate" {
285			continue
286		}
287
288		if key == "files_id" {
289			expectedBytes := make([]byte, 12)
290			actualBytes := make([]byte, 12)
291
292			var oid primitive.ObjectID
293			err = (&oid).UnmarshalJSON(expectedBytes)
294			testhelpers.RequireNil(t, err, "error unmarshalling expected bytes: %s", err)
295			filesID = oid
296			actualID := actualVal.ObjectID()
297			err = (&actualID).UnmarshalJSON(actualBytes)
298			testhelpers.RequireNil(t, err, "error unmarshalling actual bytes: %s", err)
299
300			if !bytes.Equal(expectedBytes, actualBytes) {
301				t.Fatalf("files_id mismatch for test %s", t.Name())
302			}
303
304			continue
305		}
306
307		if eDoc, ok := elem.Value.DocumentOK(); ok {
308			compareGfsDoc(t, eDoc, actualVal.Document(), filesID)
309			continue
310		}
311
312		if !compareValues(elem.Value, actualVal) {
313			t.Fatalf("values for key %s not equal for test %s", key, t.Name())
314		}
315	}
316}
317
318// compare chunks and expectedChunks collections
319func compareChunks(t *testing.T, filesID interface{}) {
320	actualCursor, err := chunks.Find(ctx, emptyDoc)
321	testhelpers.RequireNil(t, err, "error running Find for chunks: %s", err)
322	expectedCursor, err := expectedChunks.Find(ctx, emptyDoc)
323	testhelpers.RequireNil(t, err, "error running Find for expected chunks: %s", err)
324
325	for expectedCursor.Next(ctx) {
326		if !actualCursor.Next(ctx) {
327			t.Fatalf("chunks has fewer documents than expectedChunks")
328		}
329
330		var actualChunk bsonx.Doc
331		var expectedChunk bsonx.Doc
332
333		err = actualCursor.Decode(&actualChunk)
334		testhelpers.RequireNil(t, err, "error decoding actual chunk: %s", err)
335		err = expectedCursor.Decode(&expectedChunk)
336		testhelpers.RequireNil(t, err, "error decoding expected chunk: %s", err)
337
338		compareGfsDoc(t, expectedChunk, actualChunk, filesID)
339	}
340}
341
342// compare files and expectedFiles collections
343func compareFiles(t *testing.T) {
344	actualCursor, err := files.Find(ctx, emptyDoc)
345	testhelpers.RequireNil(t, err, "error running Find for files: %s", err)
346	expectedCursor, err := expectedFiles.Find(ctx, emptyDoc)
347	testhelpers.RequireNil(t, err, "error running Find for expected files: %s", err)
348
349	for expectedCursor.Next(ctx) {
350		if !actualCursor.Next(ctx) {
351			t.Fatalf("files has fewer documents than expectedFiles")
352		}
353
354		var actualFile bsonx.Doc
355		var expectedFile bsonx.Doc
356
357		err = actualCursor.Decode(&actualFile)
358		testhelpers.RequireNil(t, err, "error decoding actual file: %s", err)
359		err = expectedCursor.Decode(&expectedFile)
360		testhelpers.RequireNil(t, err, "error decoding expected file: %s", err)
361
362		compareGfsDoc(t, expectedFile, actualFile, primitive.ObjectID{})
363	}
364}
365
366func convertHexToBytes(t *testing.T, hexStr string) []byte {
367	hexBytes, err := hex.DecodeString(hexStr)
368	testhelpers.RequireNil(t, err, "error decoding hex for %s: %s", t.Name(), err)
369	return hexBytes
370}
371
372func msgToDoc(t *testing.T, msg json.RawMessage) bsonx.Doc {
373	rawBytes, err := msg.MarshalJSON()
374	testhelpers.RequireNil(t, err, "error marshalling message: %s", err)
375
376	doc := bsonx.Doc{}
377	err = bson.UnmarshalExtJSON(rawBytes, true, &doc)
378	testhelpers.RequireNil(t, err, "error creating BSON doc: %s", err)
379
380	return doc
381}
382
383func runUploadAssert(t *testing.T, test test, fileID interface{}) {
384	assert := test.Assert
385
386	for _, assertData := range assert.Data {
387		// each assertData section is a single command that modifies an expected collection
388		if assertData.Insert != "" {
389			var err error
390			docs := make([]interface{}, len(assertData.Documents))
391
392			for i, docInterface := range assertData.Documents {
393				rdr, err := bson.Marshal(docInterface)
394				testhelpers.RequireNil(t, err, "error marshaling doc: %s", err)
395				doc, err := bsonx.ReadDoc(rdr)
396				testhelpers.RequireNil(t, err, "error reading doc: %s", err)
397
398				if id, err := doc.LookupErr("_id"); err == nil {
399					idStr := id.StringValue()
400					if idStr == "*result" || idStr == "*actual" {
401						// server will create _id
402						doc = doc.Delete("_id")
403					}
404				}
405
406				if data, err := doc.LookupErr("data"); err == nil {
407					hexBytes := convertHexToBytes(t, data.Document().Lookup("$hex").StringValue())
408					doc = doc.Delete("data")
409					doc = append(doc, bsonx.Elem{"data", bsonx.Binary(0x00, hexBytes)})
410				}
411
412				docs[i] = doc
413			}
414
415			switch assertData.Insert {
416			case "expected.files":
417				_, err = expectedFiles.InsertMany(ctx, docs)
418			case "expected.chunks":
419				_, err = expectedChunks.InsertMany(ctx, docs)
420			}
421
422			testhelpers.RequireNil(t, err, "error modifying expected collections: %s", err)
423		}
424
425		compareFiles(t)
426		compareChunks(t, fileID)
427	}
428}
429
430func parseUploadOptions(args bsonx.Doc) *options.UploadOptions {
431	opts := options.GridFSUpload()
432
433	if optionsVal, err := args.LookupErr("options"); err == nil {
434		for _, elem := range optionsVal.Document() {
435			val := elem.Value
436
437			switch elem.Key {
438			case "chunkSizeBytes":
439				size := val.Int32()
440				opts = opts.SetChunkSizeBytes(size)
441			case "metadata":
442				opts = opts.SetMetadata(val.Document())
443			}
444		}
445	}
446
447	return opts
448}
449
450func runUploadFromStreamTest(t *testing.T, test test, bucket *Bucket) {
451	args := msgToDoc(t, test.Act.Arguments)
452	opts := parseUploadOptions(args)
453	hexBytes := convertHexToBytes(t, args.Lookup("source", "$hex").StringValue())
454
455	fileID, err := bucket.UploadFromStream(args.Lookup("filename").StringValue(), bytes.NewBuffer(hexBytes), opts)
456	testhelpers.RequireNil(t, err, "error uploading from stream: %s", err)
457
458	runUploadAssert(t, test, fileID)
459}
460
461func runUploadTest(t *testing.T, test test, bucket *Bucket) {
462	// run operation from act section
463	args := msgToDoc(t, test.Act.Arguments)
464
465	opts := parseUploadOptions(args)
466	hexBytes := convertHexToBytes(t, args.Lookup("source", "$hex").StringValue())
467	stream, err := bucket.OpenUploadStream(args.Lookup("filename").StringValue(), opts)
468	testhelpers.RequireNil(t, err, "error opening upload stream for %s: %s", t.Name(), err)
469
470	err = stream.SetWriteDeadline(deadline)
471	testhelpers.RequireNil(t, err, "error setting write deadline: %s", err)
472	n, err := stream.Write(hexBytes)
473	if n != len(hexBytes) {
474		t.Fatalf("all bytes not written for %s. expected %d got %d", t.Name(), len(hexBytes), n)
475	}
476
477	err = stream.Close()
478	testhelpers.RequireNil(t, err, "error closing upload stream for %s: %s", t.Name(), err)
479
480	// assert section is laid out as a series of commands that modify expected.files and expected.chunks
481	runUploadAssert(t, test, stream.FileID)
482}
483
484// run a series of delete operations that are already BSON documents
485func runDeletes(t *testing.T, deletes bsonx.Arr, coll *mongo.Collection) {
486	for _, val := range deletes {
487		doc := val.Document() // has q and limit
488		filter := doc.Lookup("q").Document()
489
490		_, err := coll.DeleteOne(ctx, filter)
491		testhelpers.RequireNil(t, err, "error running deleteOne for %s: %s", t.Name(), err)
492	}
493}
494
495// run a series of updates that are already BSON documents
496func runUpdates(t *testing.T, updates bsonx.Arr, coll *mongo.Collection) {
497	for _, val := range updates {
498		updateDoc := val.Document()
499		filter := updateDoc.Lookup("q").Document()
500		update := updateDoc.Lookup("u").Document()
501
502		// update has $set -> data -> $hex
503		if hexStr, err := update.LookupErr("$set", "data", "$hex"); err == nil {
504			hexBytes := convertHexToBytes(t, hexStr.StringValue())
505			update = update.Delete("$set")
506			update = append(update, bsonx.Elem{"$set", bsonx.Document(bsonx.Doc{
507				{"data", bsonx.Binary(0x00, hexBytes)},
508			})})
509			testhelpers.RequireNil(t, err, "error concatenating data bytes to update: %s", err)
510		}
511
512		_, err := coll.UpdateOne(ctx, filter, update)
513		testhelpers.RequireNil(t, err, "error running updateOne for test %s: %s", t.Name(), err)
514	}
515}
516
517func compareDownloadAssertResult(t *testing.T, assert assertSection, copied int64) {
518	assertResult, err := assert.Result.MarshalJSON() // json.RawMessage
519	testhelpers.RequireNil(t, err, "error marshalling assert result: %s", err)
520	assertDoc := bsonx.Doc{}
521	err = bson.UnmarshalExtJSON(assertResult, true, &assertDoc)
522	testhelpers.RequireNil(t, err, "error constructing result doc: %s", err)
523
524	if hexStr, err := assertDoc.LookupErr("$hex"); err == nil {
525		hexBytes := convertHexToBytes(t, hexStr.StringValue())
526
527		if copied != int64(len(hexBytes)) {
528			t.Fatalf("bytes missing. expected %d bytes, got %d", len(hexBytes), copied)
529		}
530
531		if !bytes.Equal(hexBytes, downloadBuffer[:copied]) {
532			t.Fatalf("downloaded bytes mismatch. expected %v, got %v", hexBytes, downloadBuffer[:copied])
533		}
534	} else {
535		t.Fatalf("%v", err)
536	}
537}
538
539func compareDownloadAssert(t *testing.T, assert assertSection, stream *DownloadStream, streamErr error) {
540	var copied int
541	var copiedErr error
542
543	if streamErr == nil {
544		// files are small enough to read into memory once
545		err := stream.SetReadDeadline(deadline)
546		testhelpers.RequireNil(t, err, "error setting read deadline: %s", err)
547		copied, copiedErr = stream.Read(downloadBuffer)
548		testhelpers.RequireNil(t, err, "error reading from stream: %s", err)
549	}
550
551	// assert section
552	if assert.Result != nil {
553		testhelpers.RequireNil(t, streamErr, "error downloading to stream: %s", streamErr)
554		compareDownloadAssertResult(t, assert, int64(copied))
555	} else if assert.Error != "" {
556		var errToCompare error
557		var expectedErr error
558
559		switch assert.Error {
560		case "FileNotFound":
561			fallthrough
562		case "RevisionNotFound":
563			errToCompare = streamErr
564			expectedErr = ErrFileNotFound
565		case "ChunkIsMissing":
566			errToCompare = copiedErr
567			expectedErr = ErrWrongIndex
568		case "ChunkIsWrongSize":
569			errToCompare = copiedErr
570			expectedErr = ErrWrongSize
571		}
572
573		testhelpers.RequireNotNil(t, errToCompare, "errToCompare is nil")
574		if errToCompare != expectedErr {
575			t.Fatalf("err mismatch. expected %s got %s", expectedErr, errToCompare)
576		}
577	}
578}
579
580func compareDownloadToStreamAssert(t *testing.T, assert assertSection, n int64, err error) {
581	if assert.Result != nil {
582		testhelpers.RequireNil(t, err, "error downloading to stream: %s", err)
583		compareDownloadAssertResult(t, assert, n)
584	} else if assert.Error != "" {
585		var compareErr error
586
587		switch assert.Error {
588		case "FileNotFound":
589			fallthrough
590		case "RevisionNotFound":
591			compareErr = ErrFileNotFound
592		case "ChunkIsMissing":
593			compareErr = ErrWrongIndex
594		case "ChunkIsWrongSize":
595			compareErr = ErrWrongSize
596		}
597
598		testhelpers.RequireNotNil(t, err, "no error when downloading to stream. expected %s", compareErr)
599		if err != compareErr {
600			t.Fatalf("download to stream error mismatch. expected %s got %s", compareErr, err)
601		}
602	}
603}
604
605func runArrangeSection(t *testing.T, test test, coll *mongo.Collection) {
606	for _, msg := range test.Arrange.Data {
607		msgBytes, err := msg.MarshalJSON()
608		testhelpers.RequireNil(t, err, "error marshalling arrange data for test %s: %s", t.Name(), err)
609
610		msgDoc := bsonx.Doc{}
611		err = bson.UnmarshalExtJSON(msgBytes, true, &msgDoc)
612		testhelpers.RequireNil(t, err, "error creating arrange data doc for test %s: %s", t.Name(), err)
613
614		if _, err = msgDoc.LookupErr("delete"); err == nil {
615			// all arrange sections in the current spec tests operate on the fs.chunks collection
616			runDeletes(t, msgDoc.Lookup("deletes").Array(), coll)
617		} else if _, err = msgDoc.LookupErr("update"); err == nil {
618			runUpdates(t, msgDoc.Lookup("updates").Array(), coll)
619		}
620	}
621}
622
623func runDownloadTest(t *testing.T, test test, bucket *Bucket) {
624	runArrangeSection(t, test, chunks)
625
626	args := msgToDoc(t, test.Act.Arguments)
627	stream, streamErr := bucket.OpenDownloadStream(args.Lookup("id").ObjectID())
628	compareDownloadAssert(t, test.Assert, stream, streamErr)
629}
630
631func runDownloadToStreamTest(t *testing.T, test test, bucket *Bucket) {
632	runArrangeSection(t, test, chunks)
633	args := msgToDoc(t, test.Act.Arguments)
634
635	downloadStream := bytes.NewBuffer(downloadBuffer)
636	n, err := bucket.DownloadToStream(args.Lookup("id").ObjectID(), downloadStream)
637
638	compareDownloadToStreamAssert(t, test.Assert, n, err)
639}
640
641func parseDownloadByNameOpts(t *testing.T, args bsonx.Doc) *options.NameOptions {
642	opts := options.GridFSName()
643
644	if optsVal, err := args.LookupErr("options"); err == nil {
645		optsDoc := optsVal.Document()
646
647		if revVal, err := optsDoc.LookupErr("revision"); err == nil {
648			opts = opts.SetRevision(revVal.Int32())
649		}
650	}
651
652	return opts
653}
654
655func runDownloadByNameTest(t *testing.T, test test, bucket *Bucket) {
656	// act section
657	args := msgToDoc(t, test.Act.Arguments)
658	opts := parseDownloadByNameOpts(t, args)
659	stream, streamErr := bucket.OpenDownloadStreamByName(args.Lookup("filename").StringValue(), opts)
660	compareDownloadAssert(t, test.Assert, stream, streamErr)
661}
662
663func runDownloadByNameToStreamTest(t *testing.T, test test, bucket *Bucket) {
664	args := msgToDoc(t, test.Act.Arguments)
665	opts := parseDownloadByNameOpts(t, args)
666	downloadStream := bytes.NewBuffer(downloadBuffer)
667	n, err := bucket.DownloadToStreamByName(args.Lookup("filename").StringValue(), downloadStream, opts)
668
669	compareDownloadToStreamAssert(t, test.Assert, n, err)
670}
671
672func runDeleteTest(t *testing.T, test test, bucket *Bucket) {
673	runArrangeSection(t, test, files)
674	args := msgToDoc(t, test.Act.Arguments)
675
676	err := bucket.Delete(args.Lookup("id").ObjectID())
677	if test.Assert.Error != "" {
678		var errToCompare error
679		switch test.Assert.Error {
680		case "FileNotFound":
681			errToCompare = ErrFileNotFound
682		}
683
684		if err != errToCompare {
685			t.Fatalf("error mismatch for delete. expected %s got %s", errToCompare, err)
686		}
687	}
688
689	if len(test.Assert.Data) != 0 {
690		for _, data := range test.Assert.Data {
691			deletes := bsonx.Arr{}
692
693			for _, deleteMsg := range data.Deletes {
694				deletes = append(deletes, bsonx.Document(msgToDoc(t, deleteMsg)))
695			}
696
697			runDeletes(t, deletes, expectedFiles)
698			compareFiles(t)
699		}
700	}
701}
702