1// Copyright 2021 Google LLC.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Code generated file. DO NOT EDIT.
6
7// Package speech provides access to the Cloud Speech-to-Text API.
8//
9// This package is DEPRECATED. Use package cloud.google.com/go/speech/apiv1 instead.
10//
11// For product documentation, see: https://cloud.google.com/speech-to-text/docs/quickstart-protocol
12//
13// Creating a client
14//
15// Usage example:
16//
17//   import "google.golang.org/api/speech/v1"
18//   ...
19//   ctx := context.Background()
20//   speechService, err := speech.NewService(ctx)
21//
22// In this example, Google Application Default Credentials are used for authentication.
23//
24// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials.
25//
26// Other authentication options
27//
28// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey:
29//
30//   speechService, err := speech.NewService(ctx, option.WithAPIKey("AIza..."))
31//
32// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource:
33//
34//   config := &oauth2.Config{...}
35//   // ...
36//   token, err := config.Exchange(ctx, ...)
37//   speechService, err := speech.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token)))
38//
39// See https://godoc.org/google.golang.org/api/option/ for details on options.
40package speech // import "google.golang.org/api/speech/v1"
41
42import (
43	"bytes"
44	"context"
45	"encoding/json"
46	"errors"
47	"fmt"
48	"io"
49	"net/http"
50	"net/url"
51	"strconv"
52	"strings"
53
54	googleapi "google.golang.org/api/googleapi"
55	gensupport "google.golang.org/api/internal/gensupport"
56	option "google.golang.org/api/option"
57	internaloption "google.golang.org/api/option/internaloption"
58	htransport "google.golang.org/api/transport/http"
59)
60
61// Always reference these packages, just in case the auto-generated code
62// below doesn't.
63var _ = bytes.NewBuffer
64var _ = strconv.Itoa
65var _ = fmt.Sprintf
66var _ = json.NewDecoder
67var _ = io.Copy
68var _ = url.Parse
69var _ = gensupport.MarshalJSON
70var _ = googleapi.Version
71var _ = errors.New
72var _ = strings.Replace
73var _ = context.Canceled
74var _ = internaloption.WithDefaultEndpoint
75
76const apiId = "speech:v1"
77const apiName = "speech"
78const apiVersion = "v1"
79const basePath = "https://speech.googleapis.com/"
80const mtlsBasePath = "https://speech.mtls.googleapis.com/"
81
82// OAuth2 scopes used by this API.
83const (
84	// See, edit, configure, and delete your Google Cloud Platform data
85	CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform"
86)
87
88// NewService creates a new Service.
89func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) {
90	scopesOption := option.WithScopes(
91		"https://www.googleapis.com/auth/cloud-platform",
92	)
93	// NOTE: prepend, so we don't override user-specified scopes.
94	opts = append([]option.ClientOption{scopesOption}, opts...)
95	opts = append(opts, internaloption.WithDefaultEndpoint(basePath))
96	opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath))
97	client, endpoint, err := htransport.NewClient(ctx, opts...)
98	if err != nil {
99		return nil, err
100	}
101	s, err := New(client)
102	if err != nil {
103		return nil, err
104	}
105	if endpoint != "" {
106		s.BasePath = endpoint
107	}
108	return s, nil
109}
110
111// New creates a new Service. It uses the provided http.Client for requests.
112//
113// Deprecated: please use NewService instead.
114// To provide a custom HTTP client, use option.WithHTTPClient.
115// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead.
116func New(client *http.Client) (*Service, error) {
117	if client == nil {
118		return nil, errors.New("client is nil")
119	}
120	s := &Service{client: client, BasePath: basePath}
121	s.Operations = NewOperationsService(s)
122	s.Speech = NewSpeechService(s)
123	return s, nil
124}
125
126type Service struct {
127	client    *http.Client
128	BasePath  string // API endpoint base URL
129	UserAgent string // optional additional User-Agent fragment
130
131	Operations *OperationsService
132
133	Speech *SpeechService
134}
135
136func (s *Service) userAgent() string {
137	if s.UserAgent == "" {
138		return googleapi.UserAgent
139	}
140	return googleapi.UserAgent + " " + s.UserAgent
141}
142
143func NewOperationsService(s *Service) *OperationsService {
144	rs := &OperationsService{s: s}
145	return rs
146}
147
148type OperationsService struct {
149	s *Service
150}
151
152func NewSpeechService(s *Service) *SpeechService {
153	rs := &SpeechService{s: s}
154	return rs
155}
156
157type SpeechService struct {
158	s *Service
159}
160
161// ListOperationsResponse: The response message for
162// Operations.ListOperations.
163type ListOperationsResponse struct {
164	// NextPageToken: The standard List next-page token.
165	NextPageToken string `json:"nextPageToken,omitempty"`
166
167	// Operations: A list of operations that matches the specified filter in
168	// the request.
169	Operations []*Operation `json:"operations,omitempty"`
170
171	// ServerResponse contains the HTTP response code and headers from the
172	// server.
173	googleapi.ServerResponse `json:"-"`
174
175	// ForceSendFields is a list of field names (e.g. "NextPageToken") to
176	// unconditionally include in API requests. By default, fields with
177	// empty or default values are omitted from API requests. However, any
178	// non-pointer, non-interface field appearing in ForceSendFields will be
179	// sent to the server regardless of whether the field is empty or not.
180	// This may be used to include empty fields in Patch requests.
181	ForceSendFields []string `json:"-"`
182
183	// NullFields is a list of field names (e.g. "NextPageToken") to include
184	// in API requests with the JSON null value. By default, fields with
185	// empty values are omitted from API requests. However, any field with
186	// an empty value appearing in NullFields will be sent to the server as
187	// null. It is an error if a field in this list has a non-empty value.
188	// This may be used to include null fields in Patch requests.
189	NullFields []string `json:"-"`
190}
191
192func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) {
193	type NoMethod ListOperationsResponse
194	raw := NoMethod(*s)
195	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
196}
197
198// LongRunningRecognizeMetadata: Describes the progress of a
199// long-running `LongRunningRecognize` call. It is included in the
200// `metadata` field of the `Operation` returned by the `GetOperation`
201// call of the `google::longrunning::Operations` service.
202type LongRunningRecognizeMetadata struct {
203	// LastUpdateTime: Time of the most recent processing update.
204	LastUpdateTime string `json:"lastUpdateTime,omitempty"`
205
206	// ProgressPercent: Approximate percentage of audio processed thus far.
207	// Guaranteed to be 100 when the audio is fully processed and the
208	// results are available.
209	ProgressPercent int64 `json:"progressPercent,omitempty"`
210
211	// StartTime: Time when the request was received.
212	StartTime string `json:"startTime,omitempty"`
213
214	// Uri: Output only. The URI of the audio file being transcribed. Empty
215	// if the audio was sent as byte content.
216	Uri string `json:"uri,omitempty"`
217
218	// ForceSendFields is a list of field names (e.g. "LastUpdateTime") to
219	// unconditionally include in API requests. By default, fields with
220	// empty or default values are omitted from API requests. However, any
221	// non-pointer, non-interface field appearing in ForceSendFields will be
222	// sent to the server regardless of whether the field is empty or not.
223	// This may be used to include empty fields in Patch requests.
224	ForceSendFields []string `json:"-"`
225
226	// NullFields is a list of field names (e.g. "LastUpdateTime") to
227	// include in API requests with the JSON null value. By default, fields
228	// with empty values are omitted from API requests. However, any field
229	// with an empty value appearing in NullFields will be sent to the
230	// server as null. It is an error if a field in this list has a
231	// non-empty value. This may be used to include null fields in Patch
232	// requests.
233	NullFields []string `json:"-"`
234}
235
236func (s *LongRunningRecognizeMetadata) MarshalJSON() ([]byte, error) {
237	type NoMethod LongRunningRecognizeMetadata
238	raw := NoMethod(*s)
239	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
240}
241
242// LongRunningRecognizeRequest: The top-level message sent by the client
243// for the `LongRunningRecognize` method.
244type LongRunningRecognizeRequest struct {
245	// Audio: Required. The audio data to be recognized.
246	Audio *RecognitionAudio `json:"audio,omitempty"`
247
248	// Config: Required. Provides information to the recognizer that
249	// specifies how to process the request.
250	Config *RecognitionConfig `json:"config,omitempty"`
251
252	// ForceSendFields is a list of field names (e.g. "Audio") to
253	// unconditionally include in API requests. By default, fields with
254	// empty or default values are omitted from API requests. However, any
255	// non-pointer, non-interface field appearing in ForceSendFields will be
256	// sent to the server regardless of whether the field is empty or not.
257	// This may be used to include empty fields in Patch requests.
258	ForceSendFields []string `json:"-"`
259
260	// NullFields is a list of field names (e.g. "Audio") to include in API
261	// requests with the JSON null value. By default, fields with empty
262	// values are omitted from API requests. However, any field with an
263	// empty value appearing in NullFields will be sent to the server as
264	// null. It is an error if a field in this list has a non-empty value.
265	// This may be used to include null fields in Patch requests.
266	NullFields []string `json:"-"`
267}
268
269func (s *LongRunningRecognizeRequest) MarshalJSON() ([]byte, error) {
270	type NoMethod LongRunningRecognizeRequest
271	raw := NoMethod(*s)
272	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
273}
274
275// LongRunningRecognizeResponse: The only message returned to the client
276// by the `LongRunningRecognize` method. It contains the result as zero
277// or more sequential `SpeechRecognitionResult` messages. It is included
278// in the `result.response` field of the `Operation` returned by the
279// `GetOperation` call of the `google::longrunning::Operations` service.
280type LongRunningRecognizeResponse struct {
281	// Results: Sequential list of transcription results corresponding to
282	// sequential portions of audio.
283	Results []*SpeechRecognitionResult `json:"results,omitempty"`
284
285	// TotalBilledTime: When available, billed audio seconds for the
286	// corresponding request.
287	TotalBilledTime string `json:"totalBilledTime,omitempty"`
288
289	// ForceSendFields is a list of field names (e.g. "Results") to
290	// unconditionally include in API requests. By default, fields with
291	// empty or default values are omitted from API requests. However, any
292	// non-pointer, non-interface field appearing in ForceSendFields will be
293	// sent to the server regardless of whether the field is empty or not.
294	// This may be used to include empty fields in Patch requests.
295	ForceSendFields []string `json:"-"`
296
297	// NullFields is a list of field names (e.g. "Results") to include in
298	// API requests with the JSON null value. By default, fields with empty
299	// values are omitted from API requests. However, any field with an
300	// empty value appearing in NullFields will be sent to the server as
301	// null. It is an error if a field in this list has a non-empty value.
302	// This may be used to include null fields in Patch requests.
303	NullFields []string `json:"-"`
304}
305
306func (s *LongRunningRecognizeResponse) MarshalJSON() ([]byte, error) {
307	type NoMethod LongRunningRecognizeResponse
308	raw := NoMethod(*s)
309	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
310}
311
312// Operation: This resource represents a long-running operation that is
313// the result of a network API call.
314type Operation struct {
315	// Done: If the value is `false`, it means the operation is still in
316	// progress. If `true`, the operation is completed, and either `error`
317	// or `response` is available.
318	Done bool `json:"done,omitempty"`
319
320	// Error: The error result of the operation in case of failure or
321	// cancellation.
322	Error *Status `json:"error,omitempty"`
323
324	// Metadata: Service-specific metadata associated with the operation. It
325	// typically contains progress information and common metadata such as
326	// create time. Some services might not provide such metadata. Any
327	// method that returns a long-running operation should document the
328	// metadata type, if any.
329	Metadata googleapi.RawMessage `json:"metadata,omitempty"`
330
331	// Name: The server-assigned name, which is only unique within the same
332	// service that originally returns it. If you use the default HTTP
333	// mapping, the `name` should be a resource name ending with
334	// `operations/{unique_id}`.
335	Name string `json:"name,omitempty"`
336
337	// Response: The normal response of the operation in case of success. If
338	// the original method returns no data on success, such as `Delete`, the
339	// response is `google.protobuf.Empty`. If the original method is
340	// standard `Get`/`Create`/`Update`, the response should be the
341	// resource. For other methods, the response should have the type
342	// `XxxResponse`, where `Xxx` is the original method name. For example,
343	// if the original method name is `TakeSnapshot()`, the inferred
344	// response type is `TakeSnapshotResponse`.
345	Response googleapi.RawMessage `json:"response,omitempty"`
346
347	// ServerResponse contains the HTTP response code and headers from the
348	// server.
349	googleapi.ServerResponse `json:"-"`
350
351	// ForceSendFields is a list of field names (e.g. "Done") to
352	// unconditionally include in API requests. By default, fields with
353	// empty or default values are omitted from API requests. However, any
354	// non-pointer, non-interface field appearing in ForceSendFields will be
355	// sent to the server regardless of whether the field is empty or not.
356	// This may be used to include empty fields in Patch requests.
357	ForceSendFields []string `json:"-"`
358
359	// NullFields is a list of field names (e.g. "Done") to include in API
360	// requests with the JSON null value. By default, fields with empty
361	// values are omitted from API requests. However, any field with an
362	// empty value appearing in NullFields will be sent to the server as
363	// null. It is an error if a field in this list has a non-empty value.
364	// This may be used to include null fields in Patch requests.
365	NullFields []string `json:"-"`
366}
367
368func (s *Operation) MarshalJSON() ([]byte, error) {
369	type NoMethod Operation
370	raw := NoMethod(*s)
371	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
372}
373
374// RecognitionAudio: Contains audio data in the encoding specified in
375// the `RecognitionConfig`. Either `content` or `uri` must be supplied.
376// Supplying both or neither returns google.rpc.Code.INVALID_ARGUMENT.
377// See content limits
378// (https://cloud.google.com/speech-to-text/quotas#content).
379type RecognitionAudio struct {
380	// Content: The audio data bytes encoded as specified in
381	// `RecognitionConfig`. Note: as with all bytes fields, proto buffers
382	// use a pure binary representation, whereas JSON representations use
383	// base64.
384	Content string `json:"content,omitempty"`
385
386	// Uri: URI that points to a file that contains audio data bytes as
387	// specified in `RecognitionConfig`. The file must not be compressed
388	// (for example, gzip). Currently, only Google Cloud Storage URIs are
389	// supported, which must be specified in the following format:
390	// `gs://bucket_name/object_name` (other URI formats return
391	// google.rpc.Code.INVALID_ARGUMENT). For more information, see Request
392	// URIs (https://cloud.google.com/storage/docs/reference-uris).
393	Uri string `json:"uri,omitempty"`
394
395	// ForceSendFields is a list of field names (e.g. "Content") to
396	// unconditionally include in API requests. By default, fields with
397	// empty or default values are omitted from API requests. However, any
398	// non-pointer, non-interface field appearing in ForceSendFields will be
399	// sent to the server regardless of whether the field is empty or not.
400	// This may be used to include empty fields in Patch requests.
401	ForceSendFields []string `json:"-"`
402
403	// NullFields is a list of field names (e.g. "Content") to include in
404	// API requests with the JSON null value. By default, fields with empty
405	// values are omitted from API requests. However, any field with an
406	// empty value appearing in NullFields will be sent to the server as
407	// null. It is an error if a field in this list has a non-empty value.
408	// This may be used to include null fields in Patch requests.
409	NullFields []string `json:"-"`
410}
411
412func (s *RecognitionAudio) MarshalJSON() ([]byte, error) {
413	type NoMethod RecognitionAudio
414	raw := NoMethod(*s)
415	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
416}
417
418// RecognitionConfig: Provides information to the recognizer that
419// specifies how to process the request.
420type RecognitionConfig struct {
421	// AudioChannelCount: The number of channels in the input audio data.
422	// ONLY set this for MULTI-CHANNEL recognition. Valid values for
423	// LINEAR16 and FLAC are `1`-`8`. Valid values for OGG_OPUS are
424	// '1'-'254'. Valid value for MULAW, AMR, AMR_WB and
425	// SPEEX_WITH_HEADER_BYTE is only `1`. If `0` or omitted, defaults to
426	// one channel (mono). Note: We only recognize the first channel by
427	// default. To perform independent recognition on each channel set
428	// `enable_separate_recognition_per_channel` to 'true'.
429	AudioChannelCount int64 `json:"audioChannelCount,omitempty"`
430
431	// DiarizationConfig: Config to enable speaker diarization and set
432	// additional parameters to make diarization better suited for your
433	// application. Note: When this is enabled, we send all the words from
434	// the beginning of the audio for the top alternative in every
435	// consecutive STREAMING responses. This is done in order to improve our
436	// speaker tags as our models learn to identify the speakers in the
437	// conversation over time. For non-streaming requests, the diarization
438	// results will be provided only in the top alternative of the FINAL
439	// SpeechRecognitionResult.
440	DiarizationConfig *SpeakerDiarizationConfig `json:"diarizationConfig,omitempty"`
441
442	// EnableAutomaticPunctuation: If 'true', adds punctuation to
443	// recognition result hypotheses. This feature is only available in
444	// select languages. Setting this for requests in other languages has no
445	// effect at all. The default 'false' value does not add punctuation to
446	// result hypotheses.
447	EnableAutomaticPunctuation bool `json:"enableAutomaticPunctuation,omitempty"`
448
449	// EnableSeparateRecognitionPerChannel: This needs to be set to `true`
450	// explicitly and `audio_channel_count` > 1 to get each channel
451	// recognized separately. The recognition result will contain a
452	// `channel_tag` field to state which channel that result belongs to. If
453	// this is not true, we will only recognize the first channel. The
454	// request is billed cumulatively for all channels recognized:
455	// `audio_channel_count` multiplied by the length of the audio.
456	EnableSeparateRecognitionPerChannel bool `json:"enableSeparateRecognitionPerChannel,omitempty"`
457
458	// EnableWordTimeOffsets: If `true`, the top result includes a list of
459	// words and the start and end time offsets (timestamps) for those
460	// words. If `false`, no word-level time offset information is returned.
461	// The default is `false`.
462	EnableWordTimeOffsets bool `json:"enableWordTimeOffsets,omitempty"`
463
464	// Encoding: Encoding of audio data sent in all `RecognitionAudio`
465	// messages. This field is optional for `FLAC` and `WAV` audio files and
466	// required for all other audio formats. For details, see AudioEncoding.
467	//
468	// Possible values:
469	//   "ENCODING_UNSPECIFIED" - Not specified.
470	//   "LINEAR16" - Uncompressed 16-bit signed little-endian samples
471	// (Linear PCM).
472	//   "FLAC" - `FLAC` (Free Lossless Audio Codec) is the recommended
473	// encoding because it is lossless--therefore recognition is not
474	// compromised--and requires only about half the bandwidth of
475	// `LINEAR16`. `FLAC` stream encoding supports 16-bit and 24-bit
476	// samples, however, not all fields in `STREAMINFO` are supported.
477	//   "MULAW" - 8-bit samples that compand 14-bit audio samples using
478	// G.711 PCMU/mu-law.
479	//   "AMR" - Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz`
480	// must be 8000.
481	//   "AMR_WB" - Adaptive Multi-Rate Wideband codec. `sample_rate_hertz`
482	// must be 16000.
483	//   "OGG_OPUS" - Opus encoded audio frames in Ogg container
484	// ([OggOpus](https://wiki.xiph.org/OggOpus)). `sample_rate_hertz` must
485	// be one of 8000, 12000, 16000, 24000, or 48000.
486	//   "SPEEX_WITH_HEADER_BYTE" - Although the use of lossy encodings is
487	// not recommended, if a very low bitrate encoding is required,
488	// `OGG_OPUS` is highly preferred over Speex encoding. The
489	// [Speex](https://speex.org/) encoding supported by Cloud Speech API
490	// has a header byte in each block, as in MIME type
491	// `audio/x-speex-with-header-byte`. It is a variant of the RTP Speex
492	// encoding defined in [RFC 5574](https://tools.ietf.org/html/rfc5574).
493	// The stream is a sequence of blocks, one block per RTP packet. Each
494	// block starts with a byte containing the length of the block, in
495	// bytes, followed by one or more frames of Speex data, padded to an
496	// integral number of bytes (octets) as specified in RFC 5574. In other
497	// words, each RTP header is replaced with a single byte containing the
498	// block length. Only Speex wideband is supported. `sample_rate_hertz`
499	// must be 16000.
500	Encoding string `json:"encoding,omitempty"`
501
502	// LanguageCode: Required. The language of the supplied audio as a
503	// BCP-47 (https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
504	// Example: "en-US". See Language Support
505	// (https://cloud.google.com/speech-to-text/docs/languages) for a list
506	// of the currently supported language codes.
507	LanguageCode string `json:"languageCode,omitempty"`
508
509	// MaxAlternatives: Maximum number of recognition hypotheses to be
510	// returned. Specifically, the maximum number of
511	// `SpeechRecognitionAlternative` messages within each
512	// `SpeechRecognitionResult`. The server may return fewer than
513	// `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1`
514	// will return a maximum of one. If omitted, will return a maximum of
515	// one.
516	MaxAlternatives int64 `json:"maxAlternatives,omitempty"`
517
518	// Metadata: Metadata regarding this request.
519	Metadata *RecognitionMetadata `json:"metadata,omitempty"`
520
521	// Model: Which model to select for the given request. Select the model
522	// best suited to your domain to get best results. If a model is not
523	// explicitly specified, then we auto-select a model based on the
524	// parameters in the RecognitionConfig. *Model* *Description*
525	// command_and_search Best for short queries such as voice commands or
526	// voice search. phone_call Best for audio that originated from a phone
527	// call (typically recorded at an 8khz sampling rate). video Best for
528	// audio that originated from video or includes multiple speakers.
529	// Ideally the audio is recorded at a 16khz or greater sampling rate.
530	// This is a premium model that costs more than the standard rate.
531	// default Best for audio that is not one of the specific audio models.
532	// For example, long-form audio. Ideally the audio is high-fidelity,
533	// recorded at a 16khz or greater sampling rate.
534	Model string `json:"model,omitempty"`
535
536	// ProfanityFilter: If set to `true`, the server will attempt to filter
537	// out profanities, replacing all but the initial character in each
538	// filtered word with asterisks, e.g. "f***". If set to `false` or
539	// omitted, profanities won't be filtered out.
540	ProfanityFilter bool `json:"profanityFilter,omitempty"`
541
542	// SampleRateHertz: Sample rate in Hertz of the audio data sent in all
543	// `RecognitionAudio` messages. Valid values are: 8000-48000. 16000 is
544	// optimal. For best results, set the sampling rate of the audio source
545	// to 16000 Hz. If that's not possible, use the native sample rate of
546	// the audio source (instead of re-sampling). This field is optional for
547	// FLAC and WAV audio files, but is required for all other audio
548	// formats. For details, see AudioEncoding.
549	SampleRateHertz int64 `json:"sampleRateHertz,omitempty"`
550
551	// SpeechContexts: Array of SpeechContext. A means to provide context to
552	// assist the speech recognition. For more information, see speech
553	// adaptation (https://cloud.google.com/speech-to-text/docs/adaptation).
554	SpeechContexts []*SpeechContext `json:"speechContexts,omitempty"`
555
556	// UseEnhanced: Set to true to use an enhanced model for speech
557	// recognition. If `use_enhanced` is set to true and the `model` field
558	// is not set, then an appropriate enhanced model is chosen if an
559	// enhanced model exists for the audio. If `use_enhanced` is true and an
560	// enhanced version of the specified model does not exist, then the
561	// speech is recognized using the standard version of the specified
562	// model.
563	UseEnhanced bool `json:"useEnhanced,omitempty"`
564
565	// ForceSendFields is a list of field names (e.g. "AudioChannelCount")
566	// to unconditionally include in API requests. By default, fields with
567	// empty or default values are omitted from API requests. However, any
568	// non-pointer, non-interface field appearing in ForceSendFields will be
569	// sent to the server regardless of whether the field is empty or not.
570	// This may be used to include empty fields in Patch requests.
571	ForceSendFields []string `json:"-"`
572
573	// NullFields is a list of field names (e.g. "AudioChannelCount") to
574	// include in API requests with the JSON null value. By default, fields
575	// with empty values are omitted from API requests. However, any field
576	// with an empty value appearing in NullFields will be sent to the
577	// server as null. It is an error if a field in this list has a
578	// non-empty value. This may be used to include null fields in Patch
579	// requests.
580	NullFields []string `json:"-"`
581}
582
583func (s *RecognitionConfig) MarshalJSON() ([]byte, error) {
584	type NoMethod RecognitionConfig
585	raw := NoMethod(*s)
586	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
587}
588
589// RecognitionMetadata: Description of audio data to be recognized.
590type RecognitionMetadata struct {
591	// AudioTopic: Description of the content. Eg. "Recordings of federal
592	// supreme court hearings from 2012".
593	AudioTopic string `json:"audioTopic,omitempty"`
594
595	// IndustryNaicsCodeOfAudio: The industry vertical to which this speech
596	// recognition request most closely applies. This is most indicative of
597	// the topics contained in the audio. Use the 6-digit NAICS code to
598	// identify the industry vertical - see https://www.naics.com/search/.
599	IndustryNaicsCodeOfAudio int64 `json:"industryNaicsCodeOfAudio,omitempty"`
600
601	// InteractionType: The use case most closely describing the audio
602	// content to be recognized.
603	//
604	// Possible values:
605	//   "INTERACTION_TYPE_UNSPECIFIED" - Use case is either unknown or is
606	// something other than one of the other values below.
607	//   "DISCUSSION" - Multiple people in a conversation or discussion. For
608	// example in a meeting with two or more people actively participating.
609	// Typically all the primary people speaking would be in the same room
610	// (if not, see PHONE_CALL)
611	//   "PRESENTATION" - One or more persons lecturing or presenting to
612	// others, mostly uninterrupted.
613	//   "PHONE_CALL" - A phone-call or video-conference in which two or
614	// more people, who are not in the same room, are actively
615	// participating.
616	//   "VOICEMAIL" - A recorded message intended for another person to
617	// listen to.
618	//   "PROFESSIONALLY_PRODUCED" - Professionally produced audio (eg. TV
619	// Show, Podcast).
620	//   "VOICE_SEARCH" - Transcribe spoken questions and queries into text.
621	//   "VOICE_COMMAND" - Transcribe voice commands, such as for
622	// controlling a device.
623	//   "DICTATION" - Transcribe speech to text to create a written
624	// document, such as a text-message, email or report.
625	InteractionType string `json:"interactionType,omitempty"`
626
627	// MicrophoneDistance: The audio type that most closely describes the
628	// audio being recognized.
629	//
630	// Possible values:
631	//   "MICROPHONE_DISTANCE_UNSPECIFIED" - Audio type is not known.
632	//   "NEARFIELD" - The audio was captured from a closely placed
633	// microphone. Eg. phone, dictaphone, or handheld microphone. Generally
634	// if there speaker is within 1 meter of the microphone.
635	//   "MIDFIELD" - The speaker if within 3 meters of the microphone.
636	//   "FARFIELD" - The speaker is more than 3 meters away from the
637	// microphone.
638	MicrophoneDistance string `json:"microphoneDistance,omitempty"`
639
640	// OriginalMediaType: The original media the speech was recorded on.
641	//
642	// Possible values:
643	//   "ORIGINAL_MEDIA_TYPE_UNSPECIFIED" - Unknown original media type.
644	//   "AUDIO" - The speech data is an audio recording.
645	//   "VIDEO" - The speech data originally recorded on a video.
646	OriginalMediaType string `json:"originalMediaType,omitempty"`
647
648	// OriginalMimeType: Mime type of the original audio file. For example
649	// `audio/m4a`, `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`. A list
650	// of possible audio mime types is maintained at
651	// http://www.iana.org/assignments/media-types/media-types.xhtml#audio
652	OriginalMimeType string `json:"originalMimeType,omitempty"`
653
654	// RecordingDeviceName: The device used to make the recording. Examples
655	// 'Nexus 5X' or 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or
656	// 'Cardioid Microphone'.
657	RecordingDeviceName string `json:"recordingDeviceName,omitempty"`
658
659	// RecordingDeviceType: The type of device the speech was recorded with.
660	//
661	// Possible values:
662	//   "RECORDING_DEVICE_TYPE_UNSPECIFIED" - The recording device is
663	// unknown.
664	//   "SMARTPHONE" - Speech was recorded on a smartphone.
665	//   "PC" - Speech was recorded using a personal computer or tablet.
666	//   "PHONE_LINE" - Speech was recorded over a phone line.
667	//   "VEHICLE" - Speech was recorded in a vehicle.
668	//   "OTHER_OUTDOOR_DEVICE" - Speech was recorded outdoors.
669	//   "OTHER_INDOOR_DEVICE" - Speech was recorded indoors.
670	RecordingDeviceType string `json:"recordingDeviceType,omitempty"`
671
672	// ForceSendFields is a list of field names (e.g. "AudioTopic") to
673	// unconditionally include in API requests. By default, fields with
674	// empty or default values are omitted from API requests. However, any
675	// non-pointer, non-interface field appearing in ForceSendFields will be
676	// sent to the server regardless of whether the field is empty or not.
677	// This may be used to include empty fields in Patch requests.
678	ForceSendFields []string `json:"-"`
679
680	// NullFields is a list of field names (e.g. "AudioTopic") to include in
681	// API requests with the JSON null value. By default, fields with empty
682	// values are omitted from API requests. However, any field with an
683	// empty value appearing in NullFields will be sent to the server as
684	// null. It is an error if a field in this list has a non-empty value.
685	// This may be used to include null fields in Patch requests.
686	NullFields []string `json:"-"`
687}
688
689func (s *RecognitionMetadata) MarshalJSON() ([]byte, error) {
690	type NoMethod RecognitionMetadata
691	raw := NoMethod(*s)
692	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
693}
694
695// RecognizeRequest: The top-level message sent by the client for the
696// `Recognize` method.
697type RecognizeRequest struct {
698	// Audio: Required. The audio data to be recognized.
699	Audio *RecognitionAudio `json:"audio,omitempty"`
700
701	// Config: Required. Provides information to the recognizer that
702	// specifies how to process the request.
703	Config *RecognitionConfig `json:"config,omitempty"`
704
705	// ForceSendFields is a list of field names (e.g. "Audio") to
706	// unconditionally include in API requests. By default, fields with
707	// empty or default values are omitted from API requests. However, any
708	// non-pointer, non-interface field appearing in ForceSendFields will be
709	// sent to the server regardless of whether the field is empty or not.
710	// This may be used to include empty fields in Patch requests.
711	ForceSendFields []string `json:"-"`
712
713	// NullFields is a list of field names (e.g. "Audio") to include in API
714	// requests with the JSON null value. By default, fields with empty
715	// values are omitted from API requests. However, any field with an
716	// empty value appearing in NullFields will be sent to the server as
717	// null. It is an error if a field in this list has a non-empty value.
718	// This may be used to include null fields in Patch requests.
719	NullFields []string `json:"-"`
720}
721
722func (s *RecognizeRequest) MarshalJSON() ([]byte, error) {
723	type NoMethod RecognizeRequest
724	raw := NoMethod(*s)
725	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
726}
727
728// RecognizeResponse: The only message returned to the client by the
729// `Recognize` method. It contains the result as zero or more sequential
730// `SpeechRecognitionResult` messages.
731type RecognizeResponse struct {
732	// Results: Sequential list of transcription results corresponding to
733	// sequential portions of audio.
734	Results []*SpeechRecognitionResult `json:"results,omitempty"`
735
736	// TotalBilledTime: When available, billed audio seconds for the
737	// corresponding request.
738	TotalBilledTime string `json:"totalBilledTime,omitempty"`
739
740	// ServerResponse contains the HTTP response code and headers from the
741	// server.
742	googleapi.ServerResponse `json:"-"`
743
744	// ForceSendFields is a list of field names (e.g. "Results") to
745	// unconditionally include in API requests. By default, fields with
746	// empty or default values are omitted from API requests. However, any
747	// non-pointer, non-interface field appearing in ForceSendFields will be
748	// sent to the server regardless of whether the field is empty or not.
749	// This may be used to include empty fields in Patch requests.
750	ForceSendFields []string `json:"-"`
751
752	// NullFields is a list of field names (e.g. "Results") to include in
753	// API requests with the JSON null value. By default, fields with empty
754	// values are omitted from API requests. However, any field with an
755	// empty value appearing in NullFields will be sent to the server as
756	// null. It is an error if a field in this list has a non-empty value.
757	// This may be used to include null fields in Patch requests.
758	NullFields []string `json:"-"`
759}
760
761func (s *RecognizeResponse) MarshalJSON() ([]byte, error) {
762	type NoMethod RecognizeResponse
763	raw := NoMethod(*s)
764	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
765}
766
767// SpeakerDiarizationConfig: Config to enable speaker diarization.
768type SpeakerDiarizationConfig struct {
769	// EnableSpeakerDiarization: If 'true', enables speaker detection for
770	// each recognized word in the top alternative of the recognition result
771	// using a speaker_tag provided in the WordInfo.
772	EnableSpeakerDiarization bool `json:"enableSpeakerDiarization,omitempty"`
773
774	// MaxSpeakerCount: Maximum number of speakers in the conversation. This
775	// range gives you more flexibility by allowing the system to
776	// automatically determine the correct number of speakers. If not set,
777	// the default value is 6.
778	MaxSpeakerCount int64 `json:"maxSpeakerCount,omitempty"`
779
780	// MinSpeakerCount: Minimum number of speakers in the conversation. This
781	// range gives you more flexibility by allowing the system to
782	// automatically determine the correct number of speakers. If not set,
783	// the default value is 2.
784	MinSpeakerCount int64 `json:"minSpeakerCount,omitempty"`
785
786	// SpeakerTag: Output only. Unused.
787	SpeakerTag int64 `json:"speakerTag,omitempty"`
788
789	// ForceSendFields is a list of field names (e.g.
790	// "EnableSpeakerDiarization") to unconditionally include in API
791	// requests. By default, fields with empty or default values are omitted
792	// from API requests. However, any non-pointer, non-interface field
793	// appearing in ForceSendFields will be sent to the server regardless of
794	// whether the field is empty or not. This may be used to include empty
795	// fields in Patch requests.
796	ForceSendFields []string `json:"-"`
797
798	// NullFields is a list of field names (e.g. "EnableSpeakerDiarization")
799	// to include in API requests with the JSON null value. By default,
800	// fields with empty values are omitted from API requests. However, any
801	// field with an empty value appearing in NullFields will be sent to the
802	// server as null. It is an error if a field in this list has a
803	// non-empty value. This may be used to include null fields in Patch
804	// requests.
805	NullFields []string `json:"-"`
806}
807
808func (s *SpeakerDiarizationConfig) MarshalJSON() ([]byte, error) {
809	type NoMethod SpeakerDiarizationConfig
810	raw := NoMethod(*s)
811	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
812}
813
814// SpeechContext: Provides "hints" to the speech recognizer to favor
815// specific words and phrases in the results.
816type SpeechContext struct {
817	// Phrases: A list of strings containing words and phrases "hints" so
818	// that the speech recognition is more likely to recognize them. This
819	// can be used to improve the accuracy for specific words and phrases,
820	// for example, if specific commands are typically spoken by the user.
821	// This can also be used to add additional words to the vocabulary of
822	// the recognizer. See usage limits
823	// (https://cloud.google.com/speech-to-text/quotas#content). List items
824	// can also be set to classes for groups of words that represent common
825	// concepts that occur in natural language. For example, rather than
826	// providing phrase hints for every month of the year, using the $MONTH
827	// class improves the likelihood of correctly transcribing audio that
828	// includes months.
829	Phrases []string `json:"phrases,omitempty"`
830
831	// ForceSendFields is a list of field names (e.g. "Phrases") to
832	// unconditionally include in API requests. By default, fields with
833	// empty or default values are omitted from API requests. However, any
834	// non-pointer, non-interface field appearing in ForceSendFields will be
835	// sent to the server regardless of whether the field is empty or not.
836	// This may be used to include empty fields in Patch requests.
837	ForceSendFields []string `json:"-"`
838
839	// NullFields is a list of field names (e.g. "Phrases") to include in
840	// API requests with the JSON null value. By default, fields with empty
841	// values are omitted from API requests. However, any field with an
842	// empty value appearing in NullFields will be sent to the server as
843	// null. It is an error if a field in this list has a non-empty value.
844	// This may be used to include null fields in Patch requests.
845	NullFields []string `json:"-"`
846}
847
848func (s *SpeechContext) MarshalJSON() ([]byte, error) {
849	type NoMethod SpeechContext
850	raw := NoMethod(*s)
851	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
852}
853
854// SpeechRecognitionAlternative: Alternative hypotheses (a.k.a. n-best
855// list).
856type SpeechRecognitionAlternative struct {
857	// Confidence: The confidence estimate between 0.0 and 1.0. A higher
858	// number indicates an estimated greater likelihood that the recognized
859	// words are correct. This field is set only for the top alternative of
860	// a non-streaming result or, of a streaming result where
861	// `is_final=true`. This field is not guaranteed to be accurate and
862	// users should not rely on it to be always provided. The default of 0.0
863	// is a sentinel value indicating `confidence` was not set.
864	Confidence float64 `json:"confidence,omitempty"`
865
866	// Transcript: Transcript text representing the words that the user
867	// spoke.
868	Transcript string `json:"transcript,omitempty"`
869
870	// Words: A list of word-specific information for each recognized word.
871	// Note: When `enable_speaker_diarization` is true, you will see all the
872	// words from the beginning of the audio.
873	Words []*WordInfo `json:"words,omitempty"`
874
875	// ForceSendFields is a list of field names (e.g. "Confidence") to
876	// unconditionally include in API requests. By default, fields with
877	// empty or default values are omitted from API requests. However, any
878	// non-pointer, non-interface field appearing in ForceSendFields will be
879	// sent to the server regardless of whether the field is empty or not.
880	// This may be used to include empty fields in Patch requests.
881	ForceSendFields []string `json:"-"`
882
883	// NullFields is a list of field names (e.g. "Confidence") to include in
884	// API requests with the JSON null value. By default, fields with empty
885	// values are omitted from API requests. However, any field with an
886	// empty value appearing in NullFields will be sent to the server as
887	// null. It is an error if a field in this list has a non-empty value.
888	// This may be used to include null fields in Patch requests.
889	NullFields []string `json:"-"`
890}
891
892func (s *SpeechRecognitionAlternative) MarshalJSON() ([]byte, error) {
893	type NoMethod SpeechRecognitionAlternative
894	raw := NoMethod(*s)
895	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
896}
897
898func (s *SpeechRecognitionAlternative) UnmarshalJSON(data []byte) error {
899	type NoMethod SpeechRecognitionAlternative
900	var s1 struct {
901		Confidence gensupport.JSONFloat64 `json:"confidence"`
902		*NoMethod
903	}
904	s1.NoMethod = (*NoMethod)(s)
905	if err := json.Unmarshal(data, &s1); err != nil {
906		return err
907	}
908	s.Confidence = float64(s1.Confidence)
909	return nil
910}
911
912// SpeechRecognitionResult: A speech recognition result corresponding to
913// a portion of the audio.
914type SpeechRecognitionResult struct {
915	// Alternatives: May contain one or more recognition hypotheses (up to
916	// the maximum specified in `max_alternatives`). These alternatives are
917	// ordered in terms of accuracy, with the top (first) alternative being
918	// the most probable, as ranked by the recognizer.
919	Alternatives []*SpeechRecognitionAlternative `json:"alternatives,omitempty"`
920
921	// ChannelTag: For multi-channel audio, this is the channel number
922	// corresponding to the recognized result for the audio from that
923	// channel. For audio_channel_count = N, its output values can range
924	// from '1' to 'N'.
925	ChannelTag int64 `json:"channelTag,omitempty"`
926
927	// ForceSendFields is a list of field names (e.g. "Alternatives") to
928	// unconditionally include in API requests. By default, fields with
929	// empty or default values are omitted from API requests. However, any
930	// non-pointer, non-interface field appearing in ForceSendFields will be
931	// sent to the server regardless of whether the field is empty or not.
932	// This may be used to include empty fields in Patch requests.
933	ForceSendFields []string `json:"-"`
934
935	// NullFields is a list of field names (e.g. "Alternatives") to include
936	// in API requests with the JSON null value. By default, fields with
937	// empty values are omitted from API requests. However, any field with
938	// an empty value appearing in NullFields will be sent to the server as
939	// null. It is an error if a field in this list has a non-empty value.
940	// This may be used to include null fields in Patch requests.
941	NullFields []string `json:"-"`
942}
943
944func (s *SpeechRecognitionResult) MarshalJSON() ([]byte, error) {
945	type NoMethod SpeechRecognitionResult
946	raw := NoMethod(*s)
947	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
948}
949
950// Status: The `Status` type defines a logical error model that is
951// suitable for different programming environments, including REST APIs
952// and RPC APIs. It is used by gRPC (https://github.com/grpc). Each
953// `Status` message contains three pieces of data: error code, error
954// message, and error details. You can find out more about this error
955// model and how to work with it in the API Design Guide
956// (https://cloud.google.com/apis/design/errors).
957type Status struct {
958	// Code: The status code, which should be an enum value of
959	// google.rpc.Code.
960	Code int64 `json:"code,omitempty"`
961
962	// Details: A list of messages that carry the error details. There is a
963	// common set of message types for APIs to use.
964	Details []googleapi.RawMessage `json:"details,omitempty"`
965
966	// Message: A developer-facing error message, which should be in
967	// English. Any user-facing error message should be localized and sent
968	// in the google.rpc.Status.details field, or localized by the client.
969	Message string `json:"message,omitempty"`
970
971	// ForceSendFields is a list of field names (e.g. "Code") to
972	// unconditionally include in API requests. By default, fields with
973	// empty or default values are omitted from API requests. However, any
974	// non-pointer, non-interface field appearing in ForceSendFields will be
975	// sent to the server regardless of whether the field is empty or not.
976	// This may be used to include empty fields in Patch requests.
977	ForceSendFields []string `json:"-"`
978
979	// NullFields is a list of field names (e.g. "Code") to include in API
980	// requests with the JSON null value. By default, fields with empty
981	// values are omitted from API requests. However, any field with an
982	// empty value appearing in NullFields will be sent to the server as
983	// null. It is an error if a field in this list has a non-empty value.
984	// This may be used to include null fields in Patch requests.
985	NullFields []string `json:"-"`
986}
987
988func (s *Status) MarshalJSON() ([]byte, error) {
989	type NoMethod Status
990	raw := NoMethod(*s)
991	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
992}
993
994// WordInfo: Word-specific information for recognized words.
995type WordInfo struct {
996	// EndTime: Time offset relative to the beginning of the audio, and
997	// corresponding to the end of the spoken word. This field is only set
998	// if `enable_word_time_offsets=true` and only in the top hypothesis.
999	// This is an experimental feature and the accuracy of the time offset
1000	// can vary.
1001	EndTime string `json:"endTime,omitempty"`
1002
1003	// SpeakerTag: Output only. A distinct integer value is assigned for
1004	// every speaker within the audio. This field specifies which one of
1005	// those speakers was detected to have spoken this word. Value ranges
1006	// from '1' to diarization_speaker_count. speaker_tag is set if
1007	// enable_speaker_diarization = 'true' and only in the top alternative.
1008	SpeakerTag int64 `json:"speakerTag,omitempty"`
1009
1010	// StartTime: Time offset relative to the beginning of the audio, and
1011	// corresponding to the start of the spoken word. This field is only set
1012	// if `enable_word_time_offsets=true` and only in the top hypothesis.
1013	// This is an experimental feature and the accuracy of the time offset
1014	// can vary.
1015	StartTime string `json:"startTime,omitempty"`
1016
1017	// Word: The word corresponding to this set of information.
1018	Word string `json:"word,omitempty"`
1019
1020	// ForceSendFields is a list of field names (e.g. "EndTime") to
1021	// unconditionally include in API requests. By default, fields with
1022	// empty or default values are omitted from API requests. However, any
1023	// non-pointer, non-interface field appearing in ForceSendFields will be
1024	// sent to the server regardless of whether the field is empty or not.
1025	// This may be used to include empty fields in Patch requests.
1026	ForceSendFields []string `json:"-"`
1027
1028	// NullFields is a list of field names (e.g. "EndTime") to include in
1029	// API requests with the JSON null value. By default, fields with empty
1030	// values are omitted from API requests. However, any field with an
1031	// empty value appearing in NullFields will be sent to the server as
1032	// null. It is an error if a field in this list has a non-empty value.
1033	// This may be used to include null fields in Patch requests.
1034	NullFields []string `json:"-"`
1035}
1036
1037func (s *WordInfo) MarshalJSON() ([]byte, error) {
1038	type NoMethod WordInfo
1039	raw := NoMethod(*s)
1040	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1041}
1042
1043// method id "speech.operations.get":
1044
1045type OperationsGetCall struct {
1046	s            *Service
1047	name         string
1048	urlParams_   gensupport.URLParams
1049	ifNoneMatch_ string
1050	ctx_         context.Context
1051	header_      http.Header
1052}
1053
1054// Get: Gets the latest state of a long-running operation. Clients can
1055// use this method to poll the operation result at intervals as
1056// recommended by the API service.
1057//
1058// - name: The name of the operation resource.
1059func (r *OperationsService) Get(name string) *OperationsGetCall {
1060	c := &OperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
1061	c.name = name
1062	return c
1063}
1064
1065// Fields allows partial responses to be retrieved. See
1066// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
1067// for more information.
1068func (c *OperationsGetCall) Fields(s ...googleapi.Field) *OperationsGetCall {
1069	c.urlParams_.Set("fields", googleapi.CombineFields(s))
1070	return c
1071}
1072
1073// IfNoneMatch sets the optional parameter which makes the operation
1074// fail if the object's ETag matches the given value. This is useful for
1075// getting updates only after the object has changed since the last
1076// request. Use googleapi.IsNotModified to check whether the response
1077// error from Do is the result of In-None-Match.
1078func (c *OperationsGetCall) IfNoneMatch(entityTag string) *OperationsGetCall {
1079	c.ifNoneMatch_ = entityTag
1080	return c
1081}
1082
1083// Context sets the context to be used in this call's Do method. Any
1084// pending HTTP request will be aborted if the provided context is
1085// canceled.
1086func (c *OperationsGetCall) Context(ctx context.Context) *OperationsGetCall {
1087	c.ctx_ = ctx
1088	return c
1089}
1090
1091// Header returns an http.Header that can be modified by the caller to
1092// add HTTP headers to the request.
1093func (c *OperationsGetCall) Header() http.Header {
1094	if c.header_ == nil {
1095		c.header_ = make(http.Header)
1096	}
1097	return c.header_
1098}
1099
1100func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) {
1101	reqHeaders := make(http.Header)
1102	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210812")
1103	for k, v := range c.header_ {
1104		reqHeaders[k] = v
1105	}
1106	reqHeaders.Set("User-Agent", c.s.userAgent())
1107	if c.ifNoneMatch_ != "" {
1108		reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
1109	}
1110	var body io.Reader = nil
1111	c.urlParams_.Set("alt", alt)
1112	c.urlParams_.Set("prettyPrint", "false")
1113	urls := googleapi.ResolveRelative(c.s.BasePath, "v1/operations/{+name}")
1114	urls += "?" + c.urlParams_.Encode()
1115	req, err := http.NewRequest("GET", urls, body)
1116	if err != nil {
1117		return nil, err
1118	}
1119	req.Header = reqHeaders
1120	googleapi.Expand(req.URL, map[string]string{
1121		"name": c.name,
1122	})
1123	return gensupport.SendRequest(c.ctx_, c.s.client, req)
1124}
1125
1126// Do executes the "speech.operations.get" call.
1127// Exactly one of *Operation or error will be non-nil. Any non-2xx
1128// status code is an error. Response headers are in either
1129// *Operation.ServerResponse.Header or (if a response was returned at
1130// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
1131// to check whether the returned error was because
1132// http.StatusNotModified was returned.
1133func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
1134	gensupport.SetOptions(c.urlParams_, opts...)
1135	res, err := c.doRequest("json")
1136	if res != nil && res.StatusCode == http.StatusNotModified {
1137		if res.Body != nil {
1138			res.Body.Close()
1139		}
1140		return nil, &googleapi.Error{
1141			Code:   res.StatusCode,
1142			Header: res.Header,
1143		}
1144	}
1145	if err != nil {
1146		return nil, err
1147	}
1148	defer googleapi.CloseBody(res)
1149	if err := googleapi.CheckResponse(res); err != nil {
1150		return nil, err
1151	}
1152	ret := &Operation{
1153		ServerResponse: googleapi.ServerResponse{
1154			Header:         res.Header,
1155			HTTPStatusCode: res.StatusCode,
1156		},
1157	}
1158	target := &ret
1159	if err := gensupport.DecodeResponse(target, res); err != nil {
1160		return nil, err
1161	}
1162	return ret, nil
1163	// {
1164	//   "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.",
1165	//   "flatPath": "v1/operations/{operationsId}",
1166	//   "httpMethod": "GET",
1167	//   "id": "speech.operations.get",
1168	//   "parameterOrder": [
1169	//     "name"
1170	//   ],
1171	//   "parameters": {
1172	//     "name": {
1173	//       "description": "The name of the operation resource.",
1174	//       "location": "path",
1175	//       "pattern": "^.*$",
1176	//       "required": true,
1177	//       "type": "string"
1178	//     }
1179	//   },
1180	//   "path": "v1/operations/{+name}",
1181	//   "response": {
1182	//     "$ref": "Operation"
1183	//   },
1184	//   "scopes": [
1185	//     "https://www.googleapis.com/auth/cloud-platform"
1186	//   ]
1187	// }
1188
1189}
1190
1191// method id "speech.operations.list":
1192
1193type OperationsListCall struct {
1194	s            *Service
1195	urlParams_   gensupport.URLParams
1196	ifNoneMatch_ string
1197	ctx_         context.Context
1198	header_      http.Header
1199}
1200
1201// List: Lists operations that match the specified filter in the
1202// request. If the server doesn't support this method, it returns
1203// `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to
1204// override the binding to use different resource name schemes, such as
1205// `users/*/operations`. To override the binding, API services can add a
1206// binding such as "/v1/{name=users/*}/operations" to their service
1207// configuration. For backwards compatibility, the default name includes
1208// the operations collection id, however overriding users must ensure
1209// the name binding is the parent resource, without the operations
1210// collection id.
1211func (r *OperationsService) List() *OperationsListCall {
1212	c := &OperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
1213	return c
1214}
1215
1216// Filter sets the optional parameter "filter": The standard list
1217// filter.
1218func (c *OperationsListCall) Filter(filter string) *OperationsListCall {
1219	c.urlParams_.Set("filter", filter)
1220	return c
1221}
1222
1223// Name sets the optional parameter "name": The name of the operation's
1224// parent resource.
1225func (c *OperationsListCall) Name(name string) *OperationsListCall {
1226	c.urlParams_.Set("name", name)
1227	return c
1228}
1229
1230// PageSize sets the optional parameter "pageSize": The standard list
1231// page size.
1232func (c *OperationsListCall) PageSize(pageSize int64) *OperationsListCall {
1233	c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
1234	return c
1235}
1236
1237// PageToken sets the optional parameter "pageToken": The standard list
1238// page token.
1239func (c *OperationsListCall) PageToken(pageToken string) *OperationsListCall {
1240	c.urlParams_.Set("pageToken", pageToken)
1241	return c
1242}
1243
1244// Fields allows partial responses to be retrieved. See
1245// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
1246// for more information.
1247func (c *OperationsListCall) Fields(s ...googleapi.Field) *OperationsListCall {
1248	c.urlParams_.Set("fields", googleapi.CombineFields(s))
1249	return c
1250}
1251
1252// IfNoneMatch sets the optional parameter which makes the operation
1253// fail if the object's ETag matches the given value. This is useful for
1254// getting updates only after the object has changed since the last
1255// request. Use googleapi.IsNotModified to check whether the response
1256// error from Do is the result of In-None-Match.
1257func (c *OperationsListCall) IfNoneMatch(entityTag string) *OperationsListCall {
1258	c.ifNoneMatch_ = entityTag
1259	return c
1260}
1261
1262// Context sets the context to be used in this call's Do method. Any
1263// pending HTTP request will be aborted if the provided context is
1264// canceled.
1265func (c *OperationsListCall) Context(ctx context.Context) *OperationsListCall {
1266	c.ctx_ = ctx
1267	return c
1268}
1269
1270// Header returns an http.Header that can be modified by the caller to
1271// add HTTP headers to the request.
1272func (c *OperationsListCall) Header() http.Header {
1273	if c.header_ == nil {
1274		c.header_ = make(http.Header)
1275	}
1276	return c.header_
1277}
1278
1279func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) {
1280	reqHeaders := make(http.Header)
1281	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210812")
1282	for k, v := range c.header_ {
1283		reqHeaders[k] = v
1284	}
1285	reqHeaders.Set("User-Agent", c.s.userAgent())
1286	if c.ifNoneMatch_ != "" {
1287		reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
1288	}
1289	var body io.Reader = nil
1290	c.urlParams_.Set("alt", alt)
1291	c.urlParams_.Set("prettyPrint", "false")
1292	urls := googleapi.ResolveRelative(c.s.BasePath, "v1/operations")
1293	urls += "?" + c.urlParams_.Encode()
1294	req, err := http.NewRequest("GET", urls, body)
1295	if err != nil {
1296		return nil, err
1297	}
1298	req.Header = reqHeaders
1299	return gensupport.SendRequest(c.ctx_, c.s.client, req)
1300}
1301
1302// Do executes the "speech.operations.list" call.
1303// Exactly one of *ListOperationsResponse or error will be non-nil. Any
1304// non-2xx status code is an error. Response headers are in either
1305// *ListOperationsResponse.ServerResponse.Header or (if a response was
1306// returned at all) in error.(*googleapi.Error).Header. Use
1307// googleapi.IsNotModified to check whether the returned error was
1308// because http.StatusNotModified was returned.
1309func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsResponse, error) {
1310	gensupport.SetOptions(c.urlParams_, opts...)
1311	res, err := c.doRequest("json")
1312	if res != nil && res.StatusCode == http.StatusNotModified {
1313		if res.Body != nil {
1314			res.Body.Close()
1315		}
1316		return nil, &googleapi.Error{
1317			Code:   res.StatusCode,
1318			Header: res.Header,
1319		}
1320	}
1321	if err != nil {
1322		return nil, err
1323	}
1324	defer googleapi.CloseBody(res)
1325	if err := googleapi.CheckResponse(res); err != nil {
1326		return nil, err
1327	}
1328	ret := &ListOperationsResponse{
1329		ServerResponse: googleapi.ServerResponse{
1330			Header:         res.Header,
1331			HTTPStatusCode: res.StatusCode,
1332		},
1333	}
1334	target := &ret
1335	if err := gensupport.DecodeResponse(target, res); err != nil {
1336		return nil, err
1337	}
1338	return ret, nil
1339	// {
1340	//   "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.",
1341	//   "flatPath": "v1/operations",
1342	//   "httpMethod": "GET",
1343	//   "id": "speech.operations.list",
1344	//   "parameterOrder": [],
1345	//   "parameters": {
1346	//     "filter": {
1347	//       "description": "The standard list filter.",
1348	//       "location": "query",
1349	//       "type": "string"
1350	//     },
1351	//     "name": {
1352	//       "description": "The name of the operation's parent resource.",
1353	//       "location": "query",
1354	//       "type": "string"
1355	//     },
1356	//     "pageSize": {
1357	//       "description": "The standard list page size.",
1358	//       "format": "int32",
1359	//       "location": "query",
1360	//       "type": "integer"
1361	//     },
1362	//     "pageToken": {
1363	//       "description": "The standard list page token.",
1364	//       "location": "query",
1365	//       "type": "string"
1366	//     }
1367	//   },
1368	//   "path": "v1/operations",
1369	//   "response": {
1370	//     "$ref": "ListOperationsResponse"
1371	//   },
1372	//   "scopes": [
1373	//     "https://www.googleapis.com/auth/cloud-platform"
1374	//   ]
1375	// }
1376
1377}
1378
1379// Pages invokes f for each page of results.
1380// A non-nil error returned from f will halt the iteration.
1381// The provided context supersedes any context provided to the Context method.
1382func (c *OperationsListCall) Pages(ctx context.Context, f func(*ListOperationsResponse) error) error {
1383	c.ctx_ = ctx
1384	defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
1385	for {
1386		x, err := c.Do()
1387		if err != nil {
1388			return err
1389		}
1390		if err := f(x); err != nil {
1391			return err
1392		}
1393		if x.NextPageToken == "" {
1394			return nil
1395		}
1396		c.PageToken(x.NextPageToken)
1397	}
1398}
1399
1400// method id "speech.speech.longrunningrecognize":
1401
1402type SpeechLongrunningrecognizeCall struct {
1403	s                           *Service
1404	longrunningrecognizerequest *LongRunningRecognizeRequest
1405	urlParams_                  gensupport.URLParams
1406	ctx_                        context.Context
1407	header_                     http.Header
1408}
1409
1410// Longrunningrecognize: Performs asynchronous speech recognition:
1411// receive results via the google.longrunning.Operations interface.
1412// Returns either an `Operation.error` or an `Operation.response` which
1413// contains a `LongRunningRecognizeResponse` message. For more
1414// information on asynchronous speech recognition, see the how-to
1415// (https://cloud.google.com/speech-to-text/docs/async-recognize).
1416func (r *SpeechService) Longrunningrecognize(longrunningrecognizerequest *LongRunningRecognizeRequest) *SpeechLongrunningrecognizeCall {
1417	c := &SpeechLongrunningrecognizeCall{s: r.s, urlParams_: make(gensupport.URLParams)}
1418	c.longrunningrecognizerequest = longrunningrecognizerequest
1419	return c
1420}
1421
1422// Fields allows partial responses to be retrieved. See
1423// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
1424// for more information.
1425func (c *SpeechLongrunningrecognizeCall) Fields(s ...googleapi.Field) *SpeechLongrunningrecognizeCall {
1426	c.urlParams_.Set("fields", googleapi.CombineFields(s))
1427	return c
1428}
1429
1430// Context sets the context to be used in this call's Do method. Any
1431// pending HTTP request will be aborted if the provided context is
1432// canceled.
1433func (c *SpeechLongrunningrecognizeCall) Context(ctx context.Context) *SpeechLongrunningrecognizeCall {
1434	c.ctx_ = ctx
1435	return c
1436}
1437
1438// Header returns an http.Header that can be modified by the caller to
1439// add HTTP headers to the request.
1440func (c *SpeechLongrunningrecognizeCall) Header() http.Header {
1441	if c.header_ == nil {
1442		c.header_ = make(http.Header)
1443	}
1444	return c.header_
1445}
1446
1447func (c *SpeechLongrunningrecognizeCall) doRequest(alt string) (*http.Response, error) {
1448	reqHeaders := make(http.Header)
1449	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210812")
1450	for k, v := range c.header_ {
1451		reqHeaders[k] = v
1452	}
1453	reqHeaders.Set("User-Agent", c.s.userAgent())
1454	var body io.Reader = nil
1455	body, err := googleapi.WithoutDataWrapper.JSONReader(c.longrunningrecognizerequest)
1456	if err != nil {
1457		return nil, err
1458	}
1459	reqHeaders.Set("Content-Type", "application/json")
1460	c.urlParams_.Set("alt", alt)
1461	c.urlParams_.Set("prettyPrint", "false")
1462	urls := googleapi.ResolveRelative(c.s.BasePath, "v1/speech:longrunningrecognize")
1463	urls += "?" + c.urlParams_.Encode()
1464	req, err := http.NewRequest("POST", urls, body)
1465	if err != nil {
1466		return nil, err
1467	}
1468	req.Header = reqHeaders
1469	return gensupport.SendRequest(c.ctx_, c.s.client, req)
1470}
1471
1472// Do executes the "speech.speech.longrunningrecognize" call.
1473// Exactly one of *Operation or error will be non-nil. Any non-2xx
1474// status code is an error. Response headers are in either
1475// *Operation.ServerResponse.Header or (if a response was returned at
1476// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
1477// to check whether the returned error was because
1478// http.StatusNotModified was returned.
1479func (c *SpeechLongrunningrecognizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
1480	gensupport.SetOptions(c.urlParams_, opts...)
1481	res, err := c.doRequest("json")
1482	if res != nil && res.StatusCode == http.StatusNotModified {
1483		if res.Body != nil {
1484			res.Body.Close()
1485		}
1486		return nil, &googleapi.Error{
1487			Code:   res.StatusCode,
1488			Header: res.Header,
1489		}
1490	}
1491	if err != nil {
1492		return nil, err
1493	}
1494	defer googleapi.CloseBody(res)
1495	if err := googleapi.CheckResponse(res); err != nil {
1496		return nil, err
1497	}
1498	ret := &Operation{
1499		ServerResponse: googleapi.ServerResponse{
1500			Header:         res.Header,
1501			HTTPStatusCode: res.StatusCode,
1502		},
1503	}
1504	target := &ret
1505	if err := gensupport.DecodeResponse(target, res); err != nil {
1506		return nil, err
1507	}
1508	return ret, nil
1509	// {
1510	//   "description": "Performs asynchronous speech recognition: receive results via the google.longrunning.Operations interface. Returns either an `Operation.error` or an `Operation.response` which contains a `LongRunningRecognizeResponse` message. For more information on asynchronous speech recognition, see the [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize).",
1511	//   "flatPath": "v1/speech:longrunningrecognize",
1512	//   "httpMethod": "POST",
1513	//   "id": "speech.speech.longrunningrecognize",
1514	//   "parameterOrder": [],
1515	//   "parameters": {},
1516	//   "path": "v1/speech:longrunningrecognize",
1517	//   "request": {
1518	//     "$ref": "LongRunningRecognizeRequest"
1519	//   },
1520	//   "response": {
1521	//     "$ref": "Operation"
1522	//   },
1523	//   "scopes": [
1524	//     "https://www.googleapis.com/auth/cloud-platform"
1525	//   ]
1526	// }
1527
1528}
1529
1530// method id "speech.speech.recognize":
1531
1532type SpeechRecognizeCall struct {
1533	s                *Service
1534	recognizerequest *RecognizeRequest
1535	urlParams_       gensupport.URLParams
1536	ctx_             context.Context
1537	header_          http.Header
1538}
1539
1540// Recognize: Performs synchronous speech recognition: receive results
1541// after all audio has been sent and processed.
1542func (r *SpeechService) Recognize(recognizerequest *RecognizeRequest) *SpeechRecognizeCall {
1543	c := &SpeechRecognizeCall{s: r.s, urlParams_: make(gensupport.URLParams)}
1544	c.recognizerequest = recognizerequest
1545	return c
1546}
1547
1548// Fields allows partial responses to be retrieved. See
1549// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
1550// for more information.
1551func (c *SpeechRecognizeCall) Fields(s ...googleapi.Field) *SpeechRecognizeCall {
1552	c.urlParams_.Set("fields", googleapi.CombineFields(s))
1553	return c
1554}
1555
1556// Context sets the context to be used in this call's Do method. Any
1557// pending HTTP request will be aborted if the provided context is
1558// canceled.
1559func (c *SpeechRecognizeCall) Context(ctx context.Context) *SpeechRecognizeCall {
1560	c.ctx_ = ctx
1561	return c
1562}
1563
1564// Header returns an http.Header that can be modified by the caller to
1565// add HTTP headers to the request.
1566func (c *SpeechRecognizeCall) Header() http.Header {
1567	if c.header_ == nil {
1568		c.header_ = make(http.Header)
1569	}
1570	return c.header_
1571}
1572
1573func (c *SpeechRecognizeCall) doRequest(alt string) (*http.Response, error) {
1574	reqHeaders := make(http.Header)
1575	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210812")
1576	for k, v := range c.header_ {
1577		reqHeaders[k] = v
1578	}
1579	reqHeaders.Set("User-Agent", c.s.userAgent())
1580	var body io.Reader = nil
1581	body, err := googleapi.WithoutDataWrapper.JSONReader(c.recognizerequest)
1582	if err != nil {
1583		return nil, err
1584	}
1585	reqHeaders.Set("Content-Type", "application/json")
1586	c.urlParams_.Set("alt", alt)
1587	c.urlParams_.Set("prettyPrint", "false")
1588	urls := googleapi.ResolveRelative(c.s.BasePath, "v1/speech:recognize")
1589	urls += "?" + c.urlParams_.Encode()
1590	req, err := http.NewRequest("POST", urls, body)
1591	if err != nil {
1592		return nil, err
1593	}
1594	req.Header = reqHeaders
1595	return gensupport.SendRequest(c.ctx_, c.s.client, req)
1596}
1597
1598// Do executes the "speech.speech.recognize" call.
1599// Exactly one of *RecognizeResponse or error will be non-nil. Any
1600// non-2xx status code is an error. Response headers are in either
1601// *RecognizeResponse.ServerResponse.Header or (if a response was
1602// returned at all) in error.(*googleapi.Error).Header. Use
1603// googleapi.IsNotModified to check whether the returned error was
1604// because http.StatusNotModified was returned.
1605func (c *SpeechRecognizeCall) Do(opts ...googleapi.CallOption) (*RecognizeResponse, error) {
1606	gensupport.SetOptions(c.urlParams_, opts...)
1607	res, err := c.doRequest("json")
1608	if res != nil && res.StatusCode == http.StatusNotModified {
1609		if res.Body != nil {
1610			res.Body.Close()
1611		}
1612		return nil, &googleapi.Error{
1613			Code:   res.StatusCode,
1614			Header: res.Header,
1615		}
1616	}
1617	if err != nil {
1618		return nil, err
1619	}
1620	defer googleapi.CloseBody(res)
1621	if err := googleapi.CheckResponse(res); err != nil {
1622		return nil, err
1623	}
1624	ret := &RecognizeResponse{
1625		ServerResponse: googleapi.ServerResponse{
1626			Header:         res.Header,
1627			HTTPStatusCode: res.StatusCode,
1628		},
1629	}
1630	target := &ret
1631	if err := gensupport.DecodeResponse(target, res); err != nil {
1632		return nil, err
1633	}
1634	return ret, nil
1635	// {
1636	//   "description": "Performs synchronous speech recognition: receive results after all audio has been sent and processed.",
1637	//   "flatPath": "v1/speech:recognize",
1638	//   "httpMethod": "POST",
1639	//   "id": "speech.speech.recognize",
1640	//   "parameterOrder": [],
1641	//   "parameters": {},
1642	//   "path": "v1/speech:recognize",
1643	//   "request": {
1644	//     "$ref": "RecognizeRequest"
1645	//   },
1646	//   "response": {
1647	//     "$ref": "RecognizeResponse"
1648	//   },
1649	//   "scopes": [
1650	//     "https://www.googleapis.com/auth/cloud-platform"
1651	//   ]
1652	// }
1653
1654}
1655