1// Copyright 2020 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
16
17package storage
18
19import (
20	"context"
21	"fmt"
22	"math"
23	"net/url"
24	"time"
25
26	gax "github.com/googleapis/gax-go/v2"
27	"google.golang.org/api/option"
28	gtransport "google.golang.org/api/transport/grpc"
29	storagepb "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta2"
30	"google.golang.org/grpc"
31	"google.golang.org/grpc/codes"
32	"google.golang.org/grpc/metadata"
33)
34
35var newBigQueryReadClientHook clientHook
36
37// BigQueryReadCallOptions contains the retry settings for each method of BigQueryReadClient.
38type BigQueryReadCallOptions struct {
39	CreateReadSession []gax.CallOption
40	ReadRows          []gax.CallOption
41	SplitReadStream   []gax.CallOption
42}
43
44func defaultBigQueryReadClientOptions() []option.ClientOption {
45	return []option.ClientOption{
46		option.WithEndpoint("bigquerystorage.googleapis.com:443"),
47		option.WithGRPCDialOption(grpc.WithDisableServiceConfig()),
48		option.WithScopes(DefaultAuthScopes()...),
49		option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
50			grpc.MaxCallRecvMsgSize(math.MaxInt32))),
51	}
52}
53
54func defaultBigQueryReadCallOptions() *BigQueryReadCallOptions {
55	return &BigQueryReadCallOptions{
56		CreateReadSession: []gax.CallOption{
57			gax.WithRetry(func() gax.Retryer {
58				return gax.OnCodes([]codes.Code{
59					codes.DeadlineExceeded,
60					codes.Unavailable,
61				}, gax.Backoff{
62					Initial:    100 * time.Millisecond,
63					Max:        60000 * time.Millisecond,
64					Multiplier: 1.30,
65				})
66			}),
67		},
68		ReadRows: []gax.CallOption{
69			gax.WithRetry(func() gax.Retryer {
70				return gax.OnCodes([]codes.Code{
71					codes.Unavailable,
72				}, gax.Backoff{
73					Initial:    100 * time.Millisecond,
74					Max:        60000 * time.Millisecond,
75					Multiplier: 1.30,
76				})
77			}),
78		},
79		SplitReadStream: []gax.CallOption{
80			gax.WithRetry(func() gax.Retryer {
81				return gax.OnCodes([]codes.Code{
82					codes.DeadlineExceeded,
83					codes.Unavailable,
84				}, gax.Backoff{
85					Initial:    100 * time.Millisecond,
86					Max:        60000 * time.Millisecond,
87					Multiplier: 1.30,
88				})
89			}),
90		},
91	}
92}
93
94// BigQueryReadClient is a client for interacting with BigQuery Storage API.
95//
96// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
97type BigQueryReadClient struct {
98	// Connection pool of gRPC connections to the service.
99	connPool gtransport.ConnPool
100
101	// flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE
102	disableDeadlines bool
103
104	// The gRPC API client.
105	bigQueryReadClient storagepb.BigQueryReadClient
106
107	// The call options for this service.
108	CallOptions *BigQueryReadCallOptions
109
110	// The x-goog-* metadata to be sent with each request.
111	xGoogMetadata metadata.MD
112}
113
114// NewBigQueryReadClient creates a new big query read client.
115//
116// BigQuery Read API.
117//
118// The Read API can be used to read data from BigQuery.
119func NewBigQueryReadClient(ctx context.Context, opts ...option.ClientOption) (*BigQueryReadClient, error) {
120	clientOpts := defaultBigQueryReadClientOptions()
121
122	if newBigQueryReadClientHook != nil {
123		hookOpts, err := newBigQueryReadClientHook(ctx, clientHookParams{})
124		if err != nil {
125			return nil, err
126		}
127		clientOpts = append(clientOpts, hookOpts...)
128	}
129
130	disableDeadlines, err := checkDisableDeadlines()
131	if err != nil {
132		return nil, err
133	}
134
135	connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
136	if err != nil {
137		return nil, err
138	}
139	c := &BigQueryReadClient{
140		connPool:         connPool,
141		disableDeadlines: disableDeadlines,
142		CallOptions:      defaultBigQueryReadCallOptions(),
143
144		bigQueryReadClient: storagepb.NewBigQueryReadClient(connPool),
145	}
146	c.setGoogleClientInfo()
147
148	return c, nil
149}
150
151// Connection returns a connection to the API service.
152//
153// Deprecated.
154func (c *BigQueryReadClient) Connection() *grpc.ClientConn {
155	return c.connPool.Conn()
156}
157
158// Close closes the connection to the API service. The user should invoke this when
159// the client is no longer required.
160func (c *BigQueryReadClient) Close() error {
161	return c.connPool.Close()
162}
163
164// setGoogleClientInfo sets the name and version of the application in
165// the `x-goog-api-client` header passed on each request. Intended for
166// use by Google-written clients.
167func (c *BigQueryReadClient) setGoogleClientInfo(keyval ...string) {
168	kv := append([]string{"gl-go", versionGo()}, keyval...)
169	kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
170	c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
171}
172
173// CreateReadSession creates a new read session. A read session divides the contents of a
174// BigQuery table into one or more streams, which can then be used to read
175// data from the table. The read session also specifies properties of the
176// data to be read, such as a list of columns or a push-down filter describing
177// the rows to be returned.
178//
179// A particular row can be read by at most one stream. When the caller has
180// reached the end of each stream in the session, then all the data in the
181// table has been read.
182//
183// Data is assigned to each stream such that roughly the same number of
184// rows can be read from each stream. Because the server-side unit for
185// assigning data is collections of rows, the API does not guarantee that
186// each stream will return the same number or rows. Additionally, the
187// limits are enforced based on the number of pre-filtered rows, so some
188// filters can lead to lopsided assignments.
189//
190// Read sessions automatically expire 24 hours after they are created and do
191// not require manual clean-up by the caller.
192func (c *BigQueryReadClient) CreateReadSession(ctx context.Context, req *storagepb.CreateReadSessionRequest, opts ...gax.CallOption) (*storagepb.ReadSession, error) {
193	if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
194		cctx, cancel := context.WithTimeout(ctx, 600000*time.Millisecond)
195		defer cancel()
196		ctx = cctx
197	}
198	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "read_session.table", url.QueryEscape(req.GetReadSession().GetTable())))
199	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
200	opts = append(c.CallOptions.CreateReadSession[0:len(c.CallOptions.CreateReadSession):len(c.CallOptions.CreateReadSession)], opts...)
201	var resp *storagepb.ReadSession
202	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
203		var err error
204		resp, err = c.bigQueryReadClient.CreateReadSession(ctx, req, settings.GRPC...)
205		return err
206	}, opts...)
207	if err != nil {
208		return nil, err
209	}
210	return resp, nil
211}
212
213// ReadRows reads rows from the stream in the format prescribed by the ReadSession.
214// Each response contains one or more table rows, up to a maximum of 100 MiB
215// per response; read requests which attempt to read individual rows larger
216// than 100 MiB will fail.
217//
218// Each request also returns a set of stream statistics reflecting the current
219// state of the stream.
220func (c *BigQueryReadClient) ReadRows(ctx context.Context, req *storagepb.ReadRowsRequest, opts ...gax.CallOption) (storagepb.BigQueryRead_ReadRowsClient, error) {
221	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "read_stream", url.QueryEscape(req.GetReadStream())))
222	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
223	opts = append(c.CallOptions.ReadRows[0:len(c.CallOptions.ReadRows):len(c.CallOptions.ReadRows)], opts...)
224	var resp storagepb.BigQueryRead_ReadRowsClient
225	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
226		var err error
227		resp, err = c.bigQueryReadClient.ReadRows(ctx, req, settings.GRPC...)
228		return err
229	}, opts...)
230	if err != nil {
231		return nil, err
232	}
233	return resp, nil
234}
235
236// SplitReadStream splits a given ReadStream into two ReadStream objects. These
237// ReadStream objects are referred to as the primary and the residual
238// streams of the split. The original ReadStream can still be read from in
239// the same manner as before. Both of the returned ReadStream objects can
240// also be read from, and the rows returned by both child streams will be
241// the same as the rows read from the original stream.
242//
243// Moreover, the two child streams will be allocated back-to-back in the
244// original ReadStream. Concretely, it is guaranteed that for streams
245// original, primary, and residual, that original[0-j] = primary[0-j] and
246// original[j-n] = residual[0-m] once the streams have been read to
247// completion.
248func (c *BigQueryReadClient) SplitReadStream(ctx context.Context, req *storagepb.SplitReadStreamRequest, opts ...gax.CallOption) (*storagepb.SplitReadStreamResponse, error) {
249	if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
250		cctx, cancel := context.WithTimeout(ctx, 600000*time.Millisecond)
251		defer cancel()
252		ctx = cctx
253	}
254	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
255	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
256	opts = append(c.CallOptions.SplitReadStream[0:len(c.CallOptions.SplitReadStream):len(c.CallOptions.SplitReadStream)], opts...)
257	var resp *storagepb.SplitReadStreamResponse
258	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
259		var err error
260		resp, err = c.bigQueryReadClient.SplitReadStream(ctx, req, settings.GRPC...)
261		return err
262	}, opts...)
263	if err != nil {
264		return nil, err
265	}
266	return resp, nil
267}
268