1// Copyright 2020 Google LLC 2// 3// Licensed under the Apache License, Version 2.0 (the "License"); 4// you may not use this file except in compliance with the License. 5// You may obtain a copy of the License at 6// 7// https://www.apache.org/licenses/LICENSE-2.0 8// 9// Unless required by applicable law or agreed to in writing, software 10// distributed under the License is distributed on an "AS IS" BASIS, 11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12// See the License for the specific language governing permissions and 13// limitations under the License. 14 15// Code generated by protoc-gen-go_gapic. DO NOT EDIT. 16 17package storage 18 19import ( 20 "context" 21 "fmt" 22 "math" 23 "net/url" 24 "time" 25 26 gax "github.com/googleapis/gax-go/v2" 27 "google.golang.org/api/option" 28 gtransport "google.golang.org/api/transport/grpc" 29 storagepb "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1" 30 "google.golang.org/grpc" 31 "google.golang.org/grpc/codes" 32 "google.golang.org/grpc/metadata" 33) 34 35var newBigQueryReadClientHook clientHook 36 37// BigQueryReadCallOptions contains the retry settings for each method of BigQueryReadClient. 38type BigQueryReadCallOptions struct { 39 CreateReadSession []gax.CallOption 40 ReadRows []gax.CallOption 41 SplitReadStream []gax.CallOption 42} 43 44func defaultBigQueryReadClientOptions() []option.ClientOption { 45 return []option.ClientOption{ 46 option.WithEndpoint("bigquerystorage.googleapis.com:443"), 47 option.WithGRPCDialOption(grpc.WithDisableServiceConfig()), 48 option.WithScopes(DefaultAuthScopes()...), 49 option.WithGRPCDialOption(grpc.WithDefaultCallOptions( 50 grpc.MaxCallRecvMsgSize(math.MaxInt32))), 51 } 52} 53 54func defaultBigQueryReadCallOptions() *BigQueryReadCallOptions { 55 return &BigQueryReadCallOptions{ 56 CreateReadSession: []gax.CallOption{ 57 gax.WithRetry(func() gax.Retryer { 58 return gax.OnCodes([]codes.Code{ 59 codes.DeadlineExceeded, 60 codes.Unavailable, 61 }, gax.Backoff{ 62 Initial: 100 * time.Millisecond, 63 Max: 60000 * time.Millisecond, 64 Multiplier: 1.30, 65 }) 66 }), 67 }, 68 ReadRows: []gax.CallOption{ 69 gax.WithRetry(func() gax.Retryer { 70 return gax.OnCodes([]codes.Code{ 71 codes.Unavailable, 72 }, gax.Backoff{ 73 Initial: 100 * time.Millisecond, 74 Max: 60000 * time.Millisecond, 75 Multiplier: 1.30, 76 }) 77 }), 78 }, 79 SplitReadStream: []gax.CallOption{ 80 gax.WithRetry(func() gax.Retryer { 81 return gax.OnCodes([]codes.Code{ 82 codes.DeadlineExceeded, 83 codes.Unavailable, 84 }, gax.Backoff{ 85 Initial: 100 * time.Millisecond, 86 Max: 60000 * time.Millisecond, 87 Multiplier: 1.30, 88 }) 89 }), 90 }, 91 } 92} 93 94// BigQueryReadClient is a client for interacting with BigQuery Storage API. 95// 96// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. 97type BigQueryReadClient struct { 98 // Connection pool of gRPC connections to the service. 99 connPool gtransport.ConnPool 100 101 // The gRPC API client. 102 bigQueryReadClient storagepb.BigQueryReadClient 103 104 // The call options for this service. 105 CallOptions *BigQueryReadCallOptions 106 107 // The x-goog-* metadata to be sent with each request. 108 xGoogMetadata metadata.MD 109} 110 111// NewBigQueryReadClient creates a new big query read client. 112// 113// BigQuery Read API. 114// 115// The Read API can be used to read data from BigQuery. 116func NewBigQueryReadClient(ctx context.Context, opts ...option.ClientOption) (*BigQueryReadClient, error) { 117 clientOpts := defaultBigQueryReadClientOptions() 118 119 if newBigQueryReadClientHook != nil { 120 hookOpts, err := newBigQueryReadClientHook(ctx, clientHookParams{}) 121 if err != nil { 122 return nil, err 123 } 124 clientOpts = append(clientOpts, hookOpts...) 125 } 126 127 connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) 128 if err != nil { 129 return nil, err 130 } 131 c := &BigQueryReadClient{ 132 connPool: connPool, 133 CallOptions: defaultBigQueryReadCallOptions(), 134 135 bigQueryReadClient: storagepb.NewBigQueryReadClient(connPool), 136 } 137 c.setGoogleClientInfo() 138 139 return c, nil 140} 141 142// Connection returns a connection to the API service. 143// 144// Deprecated. 145func (c *BigQueryReadClient) Connection() *grpc.ClientConn { 146 return c.connPool.Conn() 147} 148 149// Close closes the connection to the API service. The user should invoke this when 150// the client is no longer required. 151func (c *BigQueryReadClient) Close() error { 152 return c.connPool.Close() 153} 154 155// setGoogleClientInfo sets the name and version of the application in 156// the `x-goog-api-client` header passed on each request. Intended for 157// use by Google-written clients. 158func (c *BigQueryReadClient) setGoogleClientInfo(keyval ...string) { 159 kv := append([]string{"gl-go", versionGo()}, keyval...) 160 kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) 161 c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) 162} 163 164// CreateReadSession creates a new read session. A read session divides the contents of a 165// BigQuery table into one or more streams, which can then be used to read 166// data from the table. The read session also specifies properties of the 167// data to be read, such as a list of columns or a push-down filter describing 168// the rows to be returned. 169// 170// A particular row can be read by at most one stream. When the caller has 171// reached the end of each stream in the session, then all the data in the 172// table has been read. 173// 174// Data is assigned to each stream such that roughly the same number of 175// rows can be read from each stream. Because the server-side unit for 176// assigning data is collections of rows, the API does not guarantee that 177// each stream will return the same number or rows. Additionally, the 178// limits are enforced based on the number of pre-filtered rows, so some 179// filters can lead to lopsided assignments. 180// 181// Read sessions automatically expire 24 hours after they are created and do 182// not require manual clean-up by the caller. 183func (c *BigQueryReadClient) CreateReadSession(ctx context.Context, req *storagepb.CreateReadSessionRequest, opts ...gax.CallOption) (*storagepb.ReadSession, error) { 184 md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "read_session.table", url.QueryEscape(req.GetReadSession().GetTable()))) 185 ctx = insertMetadata(ctx, c.xGoogMetadata, md) 186 opts = append(c.CallOptions.CreateReadSession[0:len(c.CallOptions.CreateReadSession):len(c.CallOptions.CreateReadSession)], opts...) 187 var resp *storagepb.ReadSession 188 err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { 189 var err error 190 resp, err = c.bigQueryReadClient.CreateReadSession(ctx, req, settings.GRPC...) 191 return err 192 }, opts...) 193 if err != nil { 194 return nil, err 195 } 196 return resp, nil 197} 198 199// ReadRows reads rows from the stream in the format prescribed by the ReadSession. 200// Each response contains one or more table rows, up to a maximum of 100 MiB 201// per response; read requests which attempt to read individual rows larger 202// than 100 MiB will fail. 203// 204// Each request also returns a set of stream statistics reflecting the current 205// state of the stream. 206func (c *BigQueryReadClient) ReadRows(ctx context.Context, req *storagepb.ReadRowsRequest, opts ...gax.CallOption) (storagepb.BigQueryRead_ReadRowsClient, error) { 207 md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "read_stream", url.QueryEscape(req.GetReadStream()))) 208 ctx = insertMetadata(ctx, c.xGoogMetadata, md) 209 opts = append(c.CallOptions.ReadRows[0:len(c.CallOptions.ReadRows):len(c.CallOptions.ReadRows)], opts...) 210 var resp storagepb.BigQueryRead_ReadRowsClient 211 err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { 212 var err error 213 resp, err = c.bigQueryReadClient.ReadRows(ctx, req, settings.GRPC...) 214 return err 215 }, opts...) 216 if err != nil { 217 return nil, err 218 } 219 return resp, nil 220} 221 222// SplitReadStream splits a given ReadStream into two ReadStream objects. These 223// ReadStream objects are referred to as the primary and the residual 224// streams of the split. The original ReadStream can still be read from in 225// the same manner as before. Both of the returned ReadStream objects can 226// also be read from, and the rows returned by both child streams will be 227// the same as the rows read from the original stream. 228// 229// Moreover, the two child streams will be allocated back-to-back in the 230// original ReadStream. Concretely, it is guaranteed that for streams 231// original, primary, and residual, that original[0-j] = primary[0-j] and 232// original[j-n] = residual[0-m] once the streams have been read to 233// completion. 234func (c *BigQueryReadClient) SplitReadStream(ctx context.Context, req *storagepb.SplitReadStreamRequest, opts ...gax.CallOption) (*storagepb.SplitReadStreamResponse, error) { 235 md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) 236 ctx = insertMetadata(ctx, c.xGoogMetadata, md) 237 opts = append(c.CallOptions.SplitReadStream[0:len(c.CallOptions.SplitReadStream):len(c.CallOptions.SplitReadStream)], opts...) 238 var resp *storagepb.SplitReadStreamResponse 239 err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { 240 var err error 241 resp, err = c.bigQueryReadClient.SplitReadStream(ctx, req, settings.GRPC...) 242 return err 243 }, opts...) 244 if err != nil { 245 return nil, err 246 } 247 return resp, nil 248} 249