1// Copyright 2021 Google LLC.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Code generated file. DO NOT EDIT.
6
7// Package speech provides access to the Cloud Speech-to-Text API.
8//
9// This package is DEPRECATED. Use package cloud.google.com/go/speech/apiv1 instead.
10//
11// For product documentation, see: https://cloud.google.com/speech-to-text/docs/quickstart-protocol
12//
13// Creating a client
14//
15// Usage example:
16//
17//   import "google.golang.org/api/speech/v1"
18//   ...
19//   ctx := context.Background()
20//   speechService, err := speech.NewService(ctx)
21//
22// In this example, Google Application Default Credentials are used for authentication.
23//
24// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials.
25//
26// Other authentication options
27//
28// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey:
29//
30//   speechService, err := speech.NewService(ctx, option.WithAPIKey("AIza..."))
31//
32// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource:
33//
34//   config := &oauth2.Config{...}
35//   // ...
36//   token, err := config.Exchange(ctx, ...)
37//   speechService, err := speech.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token)))
38//
39// See https://godoc.org/google.golang.org/api/option/ for details on options.
40package speech // import "google.golang.org/api/speech/v1"
41
42import (
43	"bytes"
44	"context"
45	"encoding/json"
46	"errors"
47	"fmt"
48	"io"
49	"net/http"
50	"net/url"
51	"strconv"
52	"strings"
53
54	googleapi "google.golang.org/api/googleapi"
55	gensupport "google.golang.org/api/internal/gensupport"
56	option "google.golang.org/api/option"
57	internaloption "google.golang.org/api/option/internaloption"
58	htransport "google.golang.org/api/transport/http"
59)
60
61// Always reference these packages, just in case the auto-generated code
62// below doesn't.
63var _ = bytes.NewBuffer
64var _ = strconv.Itoa
65var _ = fmt.Sprintf
66var _ = json.NewDecoder
67var _ = io.Copy
68var _ = url.Parse
69var _ = gensupport.MarshalJSON
70var _ = googleapi.Version
71var _ = errors.New
72var _ = strings.Replace
73var _ = context.Canceled
74var _ = internaloption.WithDefaultEndpoint
75
76const apiId = "speech:v1"
77const apiName = "speech"
78const apiVersion = "v1"
79const basePath = "https://speech.googleapis.com/"
80const mtlsBasePath = "https://speech.mtls.googleapis.com/"
81
82// OAuth2 scopes used by this API.
83const (
84	// View and manage your data across Google Cloud Platform services
85	CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform"
86)
87
88// NewService creates a new Service.
89func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) {
90	scopesOption := option.WithScopes(
91		"https://www.googleapis.com/auth/cloud-platform",
92	)
93	// NOTE: prepend, so we don't override user-specified scopes.
94	opts = append([]option.ClientOption{scopesOption}, opts...)
95	opts = append(opts, internaloption.WithDefaultEndpoint(basePath))
96	opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath))
97	client, endpoint, err := htransport.NewClient(ctx, opts...)
98	if err != nil {
99		return nil, err
100	}
101	s, err := New(client)
102	if err != nil {
103		return nil, err
104	}
105	if endpoint != "" {
106		s.BasePath = endpoint
107	}
108	return s, nil
109}
110
111// New creates a new Service. It uses the provided http.Client for requests.
112//
113// Deprecated: please use NewService instead.
114// To provide a custom HTTP client, use option.WithHTTPClient.
115// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead.
116func New(client *http.Client) (*Service, error) {
117	if client == nil {
118		return nil, errors.New("client is nil")
119	}
120	s := &Service{client: client, BasePath: basePath}
121	s.Operations = NewOperationsService(s)
122	s.Speech = NewSpeechService(s)
123	return s, nil
124}
125
126type Service struct {
127	client    *http.Client
128	BasePath  string // API endpoint base URL
129	UserAgent string // optional additional User-Agent fragment
130
131	Operations *OperationsService
132
133	Speech *SpeechService
134}
135
136func (s *Service) userAgent() string {
137	if s.UserAgent == "" {
138		return googleapi.UserAgent
139	}
140	return googleapi.UserAgent + " " + s.UserAgent
141}
142
143func NewOperationsService(s *Service) *OperationsService {
144	rs := &OperationsService{s: s}
145	return rs
146}
147
148type OperationsService struct {
149	s *Service
150}
151
152func NewSpeechService(s *Service) *SpeechService {
153	rs := &SpeechService{s: s}
154	return rs
155}
156
157type SpeechService struct {
158	s *Service
159}
160
161// ListOperationsResponse: The response message for
162// Operations.ListOperations.
163type ListOperationsResponse struct {
164	// NextPageToken: The standard List next-page token.
165	NextPageToken string `json:"nextPageToken,omitempty"`
166
167	// Operations: A list of operations that matches the specified filter in
168	// the request.
169	Operations []*Operation `json:"operations,omitempty"`
170
171	// ServerResponse contains the HTTP response code and headers from the
172	// server.
173	googleapi.ServerResponse `json:"-"`
174
175	// ForceSendFields is a list of field names (e.g. "NextPageToken") to
176	// unconditionally include in API requests. By default, fields with
177	// empty values are omitted from API requests. However, any non-pointer,
178	// non-interface field appearing in ForceSendFields will be sent to the
179	// server regardless of whether the field is empty or not. This may be
180	// used to include empty fields in Patch requests.
181	ForceSendFields []string `json:"-"`
182
183	// NullFields is a list of field names (e.g. "NextPageToken") to include
184	// in API requests with the JSON null value. By default, fields with
185	// empty values are omitted from API requests. However, any field with
186	// an empty value appearing in NullFields will be sent to the server as
187	// null. It is an error if a field in this list has a non-empty value.
188	// This may be used to include null fields in Patch requests.
189	NullFields []string `json:"-"`
190}
191
192func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) {
193	type NoMethod ListOperationsResponse
194	raw := NoMethod(*s)
195	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
196}
197
198// LongRunningRecognizeMetadata: Describes the progress of a
199// long-running `LongRunningRecognize` call. It is included in the
200// `metadata` field of the `Operation` returned by the `GetOperation`
201// call of the `google::longrunning::Operations` service.
202type LongRunningRecognizeMetadata struct {
203	// LastUpdateTime: Time of the most recent processing update.
204	LastUpdateTime string `json:"lastUpdateTime,omitempty"`
205
206	// ProgressPercent: Approximate percentage of audio processed thus far.
207	// Guaranteed to be 100 when the audio is fully processed and the
208	// results are available.
209	ProgressPercent int64 `json:"progressPercent,omitempty"`
210
211	// StartTime: Time when the request was received.
212	StartTime string `json:"startTime,omitempty"`
213
214	// Uri: Output only. The URI of the audio file being transcribed. Empty
215	// if the audio was sent as byte content.
216	Uri string `json:"uri,omitempty"`
217
218	// ForceSendFields is a list of field names (e.g. "LastUpdateTime") to
219	// unconditionally include in API requests. By default, fields with
220	// empty values are omitted from API requests. However, any non-pointer,
221	// non-interface field appearing in ForceSendFields will be sent to the
222	// server regardless of whether the field is empty or not. This may be
223	// used to include empty fields in Patch requests.
224	ForceSendFields []string `json:"-"`
225
226	// NullFields is a list of field names (e.g. "LastUpdateTime") to
227	// include in API requests with the JSON null value. By default, fields
228	// with empty values are omitted from API requests. However, any field
229	// with an empty value appearing in NullFields will be sent to the
230	// server as null. It is an error if a field in this list has a
231	// non-empty value. This may be used to include null fields in Patch
232	// requests.
233	NullFields []string `json:"-"`
234}
235
236func (s *LongRunningRecognizeMetadata) MarshalJSON() ([]byte, error) {
237	type NoMethod LongRunningRecognizeMetadata
238	raw := NoMethod(*s)
239	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
240}
241
242// LongRunningRecognizeRequest: The top-level message sent by the client
243// for the `LongRunningRecognize` method.
244type LongRunningRecognizeRequest struct {
245	// Audio: Required. The audio data to be recognized.
246	Audio *RecognitionAudio `json:"audio,omitempty"`
247
248	// Config: Required. Provides information to the recognizer that
249	// specifies how to process the request.
250	Config *RecognitionConfig `json:"config,omitempty"`
251
252	// ForceSendFields is a list of field names (e.g. "Audio") to
253	// unconditionally include in API requests. By default, fields with
254	// empty values are omitted from API requests. However, any non-pointer,
255	// non-interface field appearing in ForceSendFields will be sent to the
256	// server regardless of whether the field is empty or not. This may be
257	// used to include empty fields in Patch requests.
258	ForceSendFields []string `json:"-"`
259
260	// NullFields is a list of field names (e.g. "Audio") to include in API
261	// requests with the JSON null value. By default, fields with empty
262	// values are omitted from API requests. However, any field with an
263	// empty value appearing in NullFields will be sent to the server as
264	// null. It is an error if a field in this list has a non-empty value.
265	// This may be used to include null fields in Patch requests.
266	NullFields []string `json:"-"`
267}
268
269func (s *LongRunningRecognizeRequest) MarshalJSON() ([]byte, error) {
270	type NoMethod LongRunningRecognizeRequest
271	raw := NoMethod(*s)
272	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
273}
274
275// LongRunningRecognizeResponse: The only message returned to the client
276// by the `LongRunningRecognize` method. It contains the result as zero
277// or more sequential `SpeechRecognitionResult` messages. It is included
278// in the `result.response` field of the `Operation` returned by the
279// `GetOperation` call of the `google::longrunning::Operations` service.
280type LongRunningRecognizeResponse struct {
281	// Results: Sequential list of transcription results corresponding to
282	// sequential portions of audio.
283	Results []*SpeechRecognitionResult `json:"results,omitempty"`
284
285	// ForceSendFields is a list of field names (e.g. "Results") to
286	// unconditionally include in API requests. By default, fields with
287	// empty values are omitted from API requests. However, any non-pointer,
288	// non-interface field appearing in ForceSendFields will be sent to the
289	// server regardless of whether the field is empty or not. This may be
290	// used to include empty fields in Patch requests.
291	ForceSendFields []string `json:"-"`
292
293	// NullFields is a list of field names (e.g. "Results") to include in
294	// API requests with the JSON null value. By default, fields with empty
295	// values are omitted from API requests. However, any field with an
296	// empty value appearing in NullFields will be sent to the server as
297	// null. It is an error if a field in this list has a non-empty value.
298	// This may be used to include null fields in Patch requests.
299	NullFields []string `json:"-"`
300}
301
302func (s *LongRunningRecognizeResponse) MarshalJSON() ([]byte, error) {
303	type NoMethod LongRunningRecognizeResponse
304	raw := NoMethod(*s)
305	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
306}
307
308// Operation: This resource represents a long-running operation that is
309// the result of a network API call.
310type Operation struct {
311	// Done: If the value is `false`, it means the operation is still in
312	// progress. If `true`, the operation is completed, and either `error`
313	// or `response` is available.
314	Done bool `json:"done,omitempty"`
315
316	// Error: The error result of the operation in case of failure or
317	// cancellation.
318	Error *Status `json:"error,omitempty"`
319
320	// Metadata: Service-specific metadata associated with the operation. It
321	// typically contains progress information and common metadata such as
322	// create time. Some services might not provide such metadata. Any
323	// method that returns a long-running operation should document the
324	// metadata type, if any.
325	Metadata googleapi.RawMessage `json:"metadata,omitempty"`
326
327	// Name: The server-assigned name, which is only unique within the same
328	// service that originally returns it. If you use the default HTTP
329	// mapping, the `name` should be a resource name ending with
330	// `operations/{unique_id}`.
331	Name string `json:"name,omitempty"`
332
333	// Response: The normal response of the operation in case of success. If
334	// the original method returns no data on success, such as `Delete`, the
335	// response is `google.protobuf.Empty`. If the original method is
336	// standard `Get`/`Create`/`Update`, the response should be the
337	// resource. For other methods, the response should have the type
338	// `XxxResponse`, where `Xxx` is the original method name. For example,
339	// if the original method name is `TakeSnapshot()`, the inferred
340	// response type is `TakeSnapshotResponse`.
341	Response googleapi.RawMessage `json:"response,omitempty"`
342
343	// ServerResponse contains the HTTP response code and headers from the
344	// server.
345	googleapi.ServerResponse `json:"-"`
346
347	// ForceSendFields is a list of field names (e.g. "Done") to
348	// unconditionally include in API requests. By default, fields with
349	// empty values are omitted from API requests. However, any non-pointer,
350	// non-interface field appearing in ForceSendFields will be sent to the
351	// server regardless of whether the field is empty or not. This may be
352	// used to include empty fields in Patch requests.
353	ForceSendFields []string `json:"-"`
354
355	// NullFields is a list of field names (e.g. "Done") to include in API
356	// requests with the JSON null value. By default, fields with empty
357	// values are omitted from API requests. However, any field with an
358	// empty value appearing in NullFields will be sent to the server as
359	// null. It is an error if a field in this list has a non-empty value.
360	// This may be used to include null fields in Patch requests.
361	NullFields []string `json:"-"`
362}
363
364func (s *Operation) MarshalJSON() ([]byte, error) {
365	type NoMethod Operation
366	raw := NoMethod(*s)
367	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
368}
369
370// RecognitionAudio: Contains audio data in the encoding specified in
371// the `RecognitionConfig`. Either `content` or `uri` must be supplied.
372// Supplying both or neither returns google.rpc.Code.INVALID_ARGUMENT.
373// See content limits
374// (https://cloud.google.com/speech-to-text/quotas#content).
375type RecognitionAudio struct {
376	// Content: The audio data bytes encoded as specified in
377	// `RecognitionConfig`. Note: as with all bytes fields, proto buffers
378	// use a pure binary representation, whereas JSON representations use
379	// base64.
380	Content string `json:"content,omitempty"`
381
382	// Uri: URI that points to a file that contains audio data bytes as
383	// specified in `RecognitionConfig`. The file must not be compressed
384	// (for example, gzip). Currently, only Google Cloud Storage URIs are
385	// supported, which must be specified in the following format:
386	// `gs://bucket_name/object_name` (other URI formats return
387	// google.rpc.Code.INVALID_ARGUMENT). For more information, see Request
388	// URIs (https://cloud.google.com/storage/docs/reference-uris).
389	Uri string `json:"uri,omitempty"`
390
391	// ForceSendFields is a list of field names (e.g. "Content") to
392	// unconditionally include in API requests. By default, fields with
393	// empty values are omitted from API requests. However, any non-pointer,
394	// non-interface field appearing in ForceSendFields will be sent to the
395	// server regardless of whether the field is empty or not. This may be
396	// used to include empty fields in Patch requests.
397	ForceSendFields []string `json:"-"`
398
399	// NullFields is a list of field names (e.g. "Content") to include in
400	// API requests with the JSON null value. By default, fields with empty
401	// values are omitted from API requests. However, any field with an
402	// empty value appearing in NullFields will be sent to the server as
403	// null. It is an error if a field in this list has a non-empty value.
404	// This may be used to include null fields in Patch requests.
405	NullFields []string `json:"-"`
406}
407
408func (s *RecognitionAudio) MarshalJSON() ([]byte, error) {
409	type NoMethod RecognitionAudio
410	raw := NoMethod(*s)
411	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
412}
413
414// RecognitionConfig: Provides information to the recognizer that
415// specifies how to process the request.
416type RecognitionConfig struct {
417	// AudioChannelCount: The number of channels in the input audio data.
418	// ONLY set this for MULTI-CHANNEL recognition. Valid values for
419	// LINEAR16 and FLAC are `1`-`8`. Valid values for OGG_OPUS are
420	// '1'-'254'. Valid value for MULAW, AMR, AMR_WB and
421	// SPEEX_WITH_HEADER_BYTE is only `1`. If `0` or omitted, defaults to
422	// one channel (mono). Note: We only recognize the first channel by
423	// default. To perform independent recognition on each channel set
424	// `enable_separate_recognition_per_channel` to 'true'.
425	AudioChannelCount int64 `json:"audioChannelCount,omitempty"`
426
427	// DiarizationConfig: Config to enable speaker diarization and set
428	// additional parameters to make diarization better suited for your
429	// application. Note: When this is enabled, we send all the words from
430	// the beginning of the audio for the top alternative in every
431	// consecutive STREAMING responses. This is done in order to improve our
432	// speaker tags as our models learn to identify the speakers in the
433	// conversation over time. For non-streaming requests, the diarization
434	// results will be provided only in the top alternative of the FINAL
435	// SpeechRecognitionResult.
436	DiarizationConfig *SpeakerDiarizationConfig `json:"diarizationConfig,omitempty"`
437
438	// EnableAutomaticPunctuation: If 'true', adds punctuation to
439	// recognition result hypotheses. This feature is only available in
440	// select languages. Setting this for requests in other languages has no
441	// effect at all. The default 'false' value does not add punctuation to
442	// result hypotheses.
443	EnableAutomaticPunctuation bool `json:"enableAutomaticPunctuation,omitempty"`
444
445	// EnableSeparateRecognitionPerChannel: This needs to be set to `true`
446	// explicitly and `audio_channel_count` > 1 to get each channel
447	// recognized separately. The recognition result will contain a
448	// `channel_tag` field to state which channel that result belongs to. If
449	// this is not true, we will only recognize the first channel. The
450	// request is billed cumulatively for all channels recognized:
451	// `audio_channel_count` multiplied by the length of the audio.
452	EnableSeparateRecognitionPerChannel bool `json:"enableSeparateRecognitionPerChannel,omitempty"`
453
454	// EnableWordTimeOffsets: If `true`, the top result includes a list of
455	// words and the start and end time offsets (timestamps) for those
456	// words. If `false`, no word-level time offset information is returned.
457	// The default is `false`.
458	EnableWordTimeOffsets bool `json:"enableWordTimeOffsets,omitempty"`
459
460	// Encoding: Encoding of audio data sent in all `RecognitionAudio`
461	// messages. This field is optional for `FLAC` and `WAV` audio files and
462	// required for all other audio formats. For details, see AudioEncoding.
463	//
464	// Possible values:
465	//   "ENCODING_UNSPECIFIED" - Not specified.
466	//   "LINEAR16" - Uncompressed 16-bit signed little-endian samples
467	// (Linear PCM).
468	//   "FLAC" - `FLAC` (Free Lossless Audio Codec) is the recommended
469	// encoding because it is lossless--therefore recognition is not
470	// compromised--and requires only about half the bandwidth of
471	// `LINEAR16`. `FLAC` stream encoding supports 16-bit and 24-bit
472	// samples, however, not all fields in `STREAMINFO` are supported.
473	//   "MULAW" - 8-bit samples that compand 14-bit audio samples using
474	// G.711 PCMU/mu-law.
475	//   "AMR" - Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz`
476	// must be 8000.
477	//   "AMR_WB" - Adaptive Multi-Rate Wideband codec. `sample_rate_hertz`
478	// must be 16000.
479	//   "OGG_OPUS" - Opus encoded audio frames in Ogg container
480	// ([OggOpus](https://wiki.xiph.org/OggOpus)). `sample_rate_hertz` must
481	// be one of 8000, 12000, 16000, 24000, or 48000.
482	//   "SPEEX_WITH_HEADER_BYTE" - Although the use of lossy encodings is
483	// not recommended, if a very low bitrate encoding is required,
484	// `OGG_OPUS` is highly preferred over Speex encoding. The
485	// [Speex](https://speex.org/) encoding supported by Cloud Speech API
486	// has a header byte in each block, as in MIME type
487	// `audio/x-speex-with-header-byte`. It is a variant of the RTP Speex
488	// encoding defined in [RFC 5574](https://tools.ietf.org/html/rfc5574).
489	// The stream is a sequence of blocks, one block per RTP packet. Each
490	// block starts with a byte containing the length of the block, in
491	// bytes, followed by one or more frames of Speex data, padded to an
492	// integral number of bytes (octets) as specified in RFC 5574. In other
493	// words, each RTP header is replaced with a single byte containing the
494	// block length. Only Speex wideband is supported. `sample_rate_hertz`
495	// must be 16000.
496	Encoding string `json:"encoding,omitempty"`
497
498	// LanguageCode: Required. The language of the supplied audio as a
499	// BCP-47 (https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
500	// Example: "en-US". See Language Support
501	// (https://cloud.google.com/speech-to-text/docs/languages) for a list
502	// of the currently supported language codes.
503	LanguageCode string `json:"languageCode,omitempty"`
504
505	// MaxAlternatives: Maximum number of recognition hypotheses to be
506	// returned. Specifically, the maximum number of
507	// `SpeechRecognitionAlternative` messages within each
508	// `SpeechRecognitionResult`. The server may return fewer than
509	// `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1`
510	// will return a maximum of one. If omitted, will return a maximum of
511	// one.
512	MaxAlternatives int64 `json:"maxAlternatives,omitempty"`
513
514	// Metadata: Metadata regarding this request.
515	Metadata *RecognitionMetadata `json:"metadata,omitempty"`
516
517	// Model: Which model to select for the given request. Select the model
518	// best suited to your domain to get best results. If a model is not
519	// explicitly specified, then we auto-select a model based on the
520	// parameters in the RecognitionConfig. *Model* *Description*
521	// command_and_search Best for short queries such as voice commands or
522	// voice search. phone_call Best for audio that originated from a phone
523	// call (typically recorded at an 8khz sampling rate). video Best for
524	// audio that originated from from video or includes multiple speakers.
525	// Ideally the audio is recorded at a 16khz or greater sampling rate.
526	// This is a premium model that costs more than the standard rate.
527	// default Best for audio that is not one of the specific audio models.
528	// For example, long-form audio. Ideally the audio is high-fidelity,
529	// recorded at a 16khz or greater sampling rate.
530	Model string `json:"model,omitempty"`
531
532	// ProfanityFilter: If set to `true`, the server will attempt to filter
533	// out profanities, replacing all but the initial character in each
534	// filtered word with asterisks, e.g. "f***". If set to `false` or
535	// omitted, profanities won't be filtered out.
536	ProfanityFilter bool `json:"profanityFilter,omitempty"`
537
538	// SampleRateHertz: Sample rate in Hertz of the audio data sent in all
539	// `RecognitionAudio` messages. Valid values are: 8000-48000. 16000 is
540	// optimal. For best results, set the sampling rate of the audio source
541	// to 16000 Hz. If that's not possible, use the native sample rate of
542	// the audio source (instead of re-sampling). This field is optional for
543	// FLAC and WAV audio files, but is required for all other audio
544	// formats. For details, see AudioEncoding.
545	SampleRateHertz int64 `json:"sampleRateHertz,omitempty"`
546
547	// SpeechContexts: Array of SpeechContext. A means to provide context to
548	// assist the speech recognition. For more information, see speech
549	// adaptation
550	// (https://cloud.google.com/speech-to-text/docs/context-strength).
551	SpeechContexts []*SpeechContext `json:"speechContexts,omitempty"`
552
553	// UseEnhanced: Set to true to use an enhanced model for speech
554	// recognition. If `use_enhanced` is set to true and the `model` field
555	// is not set, then an appropriate enhanced model is chosen if an
556	// enhanced model exists for the audio. If `use_enhanced` is true and an
557	// enhanced version of the specified model does not exist, then the
558	// speech is recognized using the standard version of the specified
559	// model.
560	UseEnhanced bool `json:"useEnhanced,omitempty"`
561
562	// ForceSendFields is a list of field names (e.g. "AudioChannelCount")
563	// to unconditionally include in API requests. By default, fields with
564	// empty values are omitted from API requests. However, any non-pointer,
565	// non-interface field appearing in ForceSendFields will be sent to the
566	// server regardless of whether the field is empty or not. This may be
567	// used to include empty fields in Patch requests.
568	ForceSendFields []string `json:"-"`
569
570	// NullFields is a list of field names (e.g. "AudioChannelCount") to
571	// include in API requests with the JSON null value. By default, fields
572	// with empty values are omitted from API requests. However, any field
573	// with an empty value appearing in NullFields will be sent to the
574	// server as null. It is an error if a field in this list has a
575	// non-empty value. This may be used to include null fields in Patch
576	// requests.
577	NullFields []string `json:"-"`
578}
579
580func (s *RecognitionConfig) MarshalJSON() ([]byte, error) {
581	type NoMethod RecognitionConfig
582	raw := NoMethod(*s)
583	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
584}
585
586// RecognitionMetadata: Description of audio data to be recognized.
587type RecognitionMetadata struct {
588	// AudioTopic: Description of the content. Eg. "Recordings of federal
589	// supreme court hearings from 2012".
590	AudioTopic string `json:"audioTopic,omitempty"`
591
592	// IndustryNaicsCodeOfAudio: The industry vertical to which this speech
593	// recognition request most closely applies. This is most indicative of
594	// the topics contained in the audio. Use the 6-digit NAICS code to
595	// identify the industry vertical - see https://www.naics.com/search/.
596	IndustryNaicsCodeOfAudio int64 `json:"industryNaicsCodeOfAudio,omitempty"`
597
598	// InteractionType: The use case most closely describing the audio
599	// content to be recognized.
600	//
601	// Possible values:
602	//   "INTERACTION_TYPE_UNSPECIFIED" - Use case is either unknown or is
603	// something other than one of the other values below.
604	//   "DISCUSSION" - Multiple people in a conversation or discussion. For
605	// example in a meeting with two or more people actively participating.
606	// Typically all the primary people speaking would be in the same room
607	// (if not, see PHONE_CALL)
608	//   "PRESENTATION" - One or more persons lecturing or presenting to
609	// others, mostly uninterrupted.
610	//   "PHONE_CALL" - A phone-call or video-conference in which two or
611	// more people, who are not in the same room, are actively
612	// participating.
613	//   "VOICEMAIL" - A recorded message intended for another person to
614	// listen to.
615	//   "PROFESSIONALLY_PRODUCED" - Professionally produced audio (eg. TV
616	// Show, Podcast).
617	//   "VOICE_SEARCH" - Transcribe spoken questions and queries into text.
618	//   "VOICE_COMMAND" - Transcribe voice commands, such as for
619	// controlling a device.
620	//   "DICTATION" - Transcribe speech to text to create a written
621	// document, such as a text-message, email or report.
622	InteractionType string `json:"interactionType,omitempty"`
623
624	// MicrophoneDistance: The audio type that most closely describes the
625	// audio being recognized.
626	//
627	// Possible values:
628	//   "MICROPHONE_DISTANCE_UNSPECIFIED" - Audio type is not known.
629	//   "NEARFIELD" - The audio was captured from a closely placed
630	// microphone. Eg. phone, dictaphone, or handheld microphone. Generally
631	// if there speaker is within 1 meter of the microphone.
632	//   "MIDFIELD" - The speaker if within 3 meters of the microphone.
633	//   "FARFIELD" - The speaker is more than 3 meters away from the
634	// microphone.
635	MicrophoneDistance string `json:"microphoneDistance,omitempty"`
636
637	// OriginalMediaType: The original media the speech was recorded on.
638	//
639	// Possible values:
640	//   "ORIGINAL_MEDIA_TYPE_UNSPECIFIED" - Unknown original media type.
641	//   "AUDIO" - The speech data is an audio recording.
642	//   "VIDEO" - The speech data originally recorded on a video.
643	OriginalMediaType string `json:"originalMediaType,omitempty"`
644
645	// OriginalMimeType: Mime type of the original audio file. For example
646	// `audio/m4a`, `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`. A list
647	// of possible audio mime types is maintained at
648	// http://www.iana.org/assignments/media-types/media-types.xhtml#audio
649	OriginalMimeType string `json:"originalMimeType,omitempty"`
650
651	// RecordingDeviceName: The device used to make the recording. Examples
652	// 'Nexus 5X' or 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or
653	// 'Cardioid Microphone'.
654	RecordingDeviceName string `json:"recordingDeviceName,omitempty"`
655
656	// RecordingDeviceType: The type of device the speech was recorded with.
657	//
658	// Possible values:
659	//   "RECORDING_DEVICE_TYPE_UNSPECIFIED" - The recording device is
660	// unknown.
661	//   "SMARTPHONE" - Speech was recorded on a smartphone.
662	//   "PC" - Speech was recorded using a personal computer or tablet.
663	//   "PHONE_LINE" - Speech was recorded over a phone line.
664	//   "VEHICLE" - Speech was recorded in a vehicle.
665	//   "OTHER_OUTDOOR_DEVICE" - Speech was recorded outdoors.
666	//   "OTHER_INDOOR_DEVICE" - Speech was recorded indoors.
667	RecordingDeviceType string `json:"recordingDeviceType,omitempty"`
668
669	// ForceSendFields is a list of field names (e.g. "AudioTopic") to
670	// unconditionally include in API requests. By default, fields with
671	// empty values are omitted from API requests. However, any non-pointer,
672	// non-interface field appearing in ForceSendFields will be sent to the
673	// server regardless of whether the field is empty or not. This may be
674	// used to include empty fields in Patch requests.
675	ForceSendFields []string `json:"-"`
676
677	// NullFields is a list of field names (e.g. "AudioTopic") to include in
678	// API requests with the JSON null value. By default, fields with empty
679	// values are omitted from API requests. However, any field with an
680	// empty value appearing in NullFields will be sent to the server as
681	// null. It is an error if a field in this list has a non-empty value.
682	// This may be used to include null fields in Patch requests.
683	NullFields []string `json:"-"`
684}
685
686func (s *RecognitionMetadata) MarshalJSON() ([]byte, error) {
687	type NoMethod RecognitionMetadata
688	raw := NoMethod(*s)
689	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
690}
691
692// RecognizeRequest: The top-level message sent by the client for the
693// `Recognize` method.
694type RecognizeRequest struct {
695	// Audio: Required. The audio data to be recognized.
696	Audio *RecognitionAudio `json:"audio,omitempty"`
697
698	// Config: Required. Provides information to the recognizer that
699	// specifies how to process the request.
700	Config *RecognitionConfig `json:"config,omitempty"`
701
702	// ForceSendFields is a list of field names (e.g. "Audio") to
703	// unconditionally include in API requests. By default, fields with
704	// empty values are omitted from API requests. However, any non-pointer,
705	// non-interface field appearing in ForceSendFields will be sent to the
706	// server regardless of whether the field is empty or not. This may be
707	// used to include empty fields in Patch requests.
708	ForceSendFields []string `json:"-"`
709
710	// NullFields is a list of field names (e.g. "Audio") to include in API
711	// requests with the JSON null value. By default, fields with empty
712	// values are omitted from API requests. However, any field with an
713	// empty value appearing in NullFields will be sent to the server as
714	// null. It is an error if a field in this list has a non-empty value.
715	// This may be used to include null fields in Patch requests.
716	NullFields []string `json:"-"`
717}
718
719func (s *RecognizeRequest) MarshalJSON() ([]byte, error) {
720	type NoMethod RecognizeRequest
721	raw := NoMethod(*s)
722	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
723}
724
725// RecognizeResponse: The only message returned to the client by the
726// `Recognize` method. It contains the result as zero or more sequential
727// `SpeechRecognitionResult` messages.
728type RecognizeResponse struct {
729	// Results: Sequential list of transcription results corresponding to
730	// sequential portions of audio.
731	Results []*SpeechRecognitionResult `json:"results,omitempty"`
732
733	// ServerResponse contains the HTTP response code and headers from the
734	// server.
735	googleapi.ServerResponse `json:"-"`
736
737	// ForceSendFields is a list of field names (e.g. "Results") to
738	// unconditionally include in API requests. By default, fields with
739	// empty values are omitted from API requests. However, any non-pointer,
740	// non-interface field appearing in ForceSendFields will be sent to the
741	// server regardless of whether the field is empty or not. This may be
742	// used to include empty fields in Patch requests.
743	ForceSendFields []string `json:"-"`
744
745	// NullFields is a list of field names (e.g. "Results") to include in
746	// API requests with the JSON null value. By default, fields with empty
747	// values are omitted from API requests. However, any field with an
748	// empty value appearing in NullFields will be sent to the server as
749	// null. It is an error if a field in this list has a non-empty value.
750	// This may be used to include null fields in Patch requests.
751	NullFields []string `json:"-"`
752}
753
754func (s *RecognizeResponse) MarshalJSON() ([]byte, error) {
755	type NoMethod RecognizeResponse
756	raw := NoMethod(*s)
757	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
758}
759
760// SpeakerDiarizationConfig: Config to enable speaker diarization.
761type SpeakerDiarizationConfig struct {
762	// EnableSpeakerDiarization: If 'true', enables speaker detection for
763	// each recognized word in the top alternative of the recognition result
764	// using a speaker_tag provided in the WordInfo.
765	EnableSpeakerDiarization bool `json:"enableSpeakerDiarization,omitempty"`
766
767	// MaxSpeakerCount: Maximum number of speakers in the conversation. This
768	// range gives you more flexibility by allowing the system to
769	// automatically determine the correct number of speakers. If not set,
770	// the default value is 6.
771	MaxSpeakerCount int64 `json:"maxSpeakerCount,omitempty"`
772
773	// MinSpeakerCount: Minimum number of speakers in the conversation. This
774	// range gives you more flexibility by allowing the system to
775	// automatically determine the correct number of speakers. If not set,
776	// the default value is 2.
777	MinSpeakerCount int64 `json:"minSpeakerCount,omitempty"`
778
779	// SpeakerTag: Output only. Unused.
780	SpeakerTag int64 `json:"speakerTag,omitempty"`
781
782	// ForceSendFields is a list of field names (e.g.
783	// "EnableSpeakerDiarization") to unconditionally include in API
784	// requests. By default, fields with empty values are omitted from API
785	// requests. However, any non-pointer, non-interface field appearing in
786	// ForceSendFields will be sent to the server regardless of whether the
787	// field is empty or not. This may be used to include empty fields in
788	// Patch requests.
789	ForceSendFields []string `json:"-"`
790
791	// NullFields is a list of field names (e.g. "EnableSpeakerDiarization")
792	// to include in API requests with the JSON null value. By default,
793	// fields with empty values are omitted from API requests. However, any
794	// field with an empty value appearing in NullFields will be sent to the
795	// server as null. It is an error if a field in this list has a
796	// non-empty value. This may be used to include null fields in Patch
797	// requests.
798	NullFields []string `json:"-"`
799}
800
801func (s *SpeakerDiarizationConfig) MarshalJSON() ([]byte, error) {
802	type NoMethod SpeakerDiarizationConfig
803	raw := NoMethod(*s)
804	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
805}
806
807// SpeechContext: Provides "hints" to the speech recognizer to favor
808// specific words and phrases in the results.
809type SpeechContext struct {
810	// Phrases: A list of strings containing words and phrases "hints" so
811	// that the speech recognition is more likely to recognize them. This
812	// can be used to improve the accuracy for specific words and phrases,
813	// for example, if specific commands are typically spoken by the user.
814	// This can also be used to add additional words to the vocabulary of
815	// the recognizer. See usage limits
816	// (https://cloud.google.com/speech-to-text/quotas#content). List items
817	// can also be set to classes for groups of words that represent common
818	// concepts that occur in natural language. For example, rather than
819	// providing phrase hints for every month of the year, using the $MONTH
820	// class improves the likelihood of correctly transcribing audio that
821	// includes months.
822	Phrases []string `json:"phrases,omitempty"`
823
824	// ForceSendFields is a list of field names (e.g. "Phrases") to
825	// unconditionally include in API requests. By default, fields with
826	// empty values are omitted from API requests. However, any non-pointer,
827	// non-interface field appearing in ForceSendFields will be sent to the
828	// server regardless of whether the field is empty or not. This may be
829	// used to include empty fields in Patch requests.
830	ForceSendFields []string `json:"-"`
831
832	// NullFields is a list of field names (e.g. "Phrases") to include in
833	// API requests with the JSON null value. By default, fields with empty
834	// values are omitted from API requests. However, any field with an
835	// empty value appearing in NullFields will be sent to the server as
836	// null. It is an error if a field in this list has a non-empty value.
837	// This may be used to include null fields in Patch requests.
838	NullFields []string `json:"-"`
839}
840
841func (s *SpeechContext) MarshalJSON() ([]byte, error) {
842	type NoMethod SpeechContext
843	raw := NoMethod(*s)
844	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
845}
846
847// SpeechRecognitionAlternative: Alternative hypotheses (a.k.a. n-best
848// list).
849type SpeechRecognitionAlternative struct {
850	// Confidence: The confidence estimate between 0.0 and 1.0. A higher
851	// number indicates an estimated greater likelihood that the recognized
852	// words are correct. This field is set only for the top alternative of
853	// a non-streaming result or, of a streaming result where
854	// `is_final=true`. This field is not guaranteed to be accurate and
855	// users should not rely on it to be always provided. The default of 0.0
856	// is a sentinel value indicating `confidence` was not set.
857	Confidence float64 `json:"confidence,omitempty"`
858
859	// Transcript: Transcript text representing the words that the user
860	// spoke.
861	Transcript string `json:"transcript,omitempty"`
862
863	// Words: A list of word-specific information for each recognized word.
864	// Note: When `enable_speaker_diarization` is true, you will see all the
865	// words from the beginning of the audio.
866	Words []*WordInfo `json:"words,omitempty"`
867
868	// ForceSendFields is a list of field names (e.g. "Confidence") to
869	// unconditionally include in API requests. By default, fields with
870	// empty values are omitted from API requests. However, any non-pointer,
871	// non-interface field appearing in ForceSendFields will be sent to the
872	// server regardless of whether the field is empty or not. This may be
873	// used to include empty fields in Patch requests.
874	ForceSendFields []string `json:"-"`
875
876	// NullFields is a list of field names (e.g. "Confidence") to include in
877	// API requests with the JSON null value. By default, fields with empty
878	// values are omitted from API requests. However, any field with an
879	// empty value appearing in NullFields will be sent to the server as
880	// null. It is an error if a field in this list has a non-empty value.
881	// This may be used to include null fields in Patch requests.
882	NullFields []string `json:"-"`
883}
884
885func (s *SpeechRecognitionAlternative) MarshalJSON() ([]byte, error) {
886	type NoMethod SpeechRecognitionAlternative
887	raw := NoMethod(*s)
888	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
889}
890
891func (s *SpeechRecognitionAlternative) UnmarshalJSON(data []byte) error {
892	type NoMethod SpeechRecognitionAlternative
893	var s1 struct {
894		Confidence gensupport.JSONFloat64 `json:"confidence"`
895		*NoMethod
896	}
897	s1.NoMethod = (*NoMethod)(s)
898	if err := json.Unmarshal(data, &s1); err != nil {
899		return err
900	}
901	s.Confidence = float64(s1.Confidence)
902	return nil
903}
904
905// SpeechRecognitionResult: A speech recognition result corresponding to
906// a portion of the audio.
907type SpeechRecognitionResult struct {
908	// Alternatives: May contain one or more recognition hypotheses (up to
909	// the maximum specified in `max_alternatives`). These alternatives are
910	// ordered in terms of accuracy, with the top (first) alternative being
911	// the most probable, as ranked by the recognizer.
912	Alternatives []*SpeechRecognitionAlternative `json:"alternatives,omitempty"`
913
914	// ChannelTag: For multi-channel audio, this is the channel number
915	// corresponding to the recognized result for the audio from that
916	// channel. For audio_channel_count = N, its output values can range
917	// from '1' to 'N'.
918	ChannelTag int64 `json:"channelTag,omitempty"`
919
920	// ForceSendFields is a list of field names (e.g. "Alternatives") to
921	// unconditionally include in API requests. By default, fields with
922	// empty values are omitted from API requests. However, any non-pointer,
923	// non-interface field appearing in ForceSendFields will be sent to the
924	// server regardless of whether the field is empty or not. This may be
925	// used to include empty fields in Patch requests.
926	ForceSendFields []string `json:"-"`
927
928	// NullFields is a list of field names (e.g. "Alternatives") to include
929	// in API requests with the JSON null value. By default, fields with
930	// empty values are omitted from API requests. However, any field with
931	// an empty value appearing in NullFields will be sent to the server as
932	// null. It is an error if a field in this list has a non-empty value.
933	// This may be used to include null fields in Patch requests.
934	NullFields []string `json:"-"`
935}
936
937func (s *SpeechRecognitionResult) MarshalJSON() ([]byte, error) {
938	type NoMethod SpeechRecognitionResult
939	raw := NoMethod(*s)
940	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
941}
942
943// Status: The `Status` type defines a logical error model that is
944// suitable for different programming environments, including REST APIs
945// and RPC APIs. It is used by gRPC (https://github.com/grpc). Each
946// `Status` message contains three pieces of data: error code, error
947// message, and error details. You can find out more about this error
948// model and how to work with it in the API Design Guide
949// (https://cloud.google.com/apis/design/errors).
950type Status struct {
951	// Code: The status code, which should be an enum value of
952	// google.rpc.Code.
953	Code int64 `json:"code,omitempty"`
954
955	// Details: A list of messages that carry the error details. There is a
956	// common set of message types for APIs to use.
957	Details []googleapi.RawMessage `json:"details,omitempty"`
958
959	// Message: A developer-facing error message, which should be in
960	// English. Any user-facing error message should be localized and sent
961	// in the google.rpc.Status.details field, or localized by the client.
962	Message string `json:"message,omitempty"`
963
964	// ForceSendFields is a list of field names (e.g. "Code") to
965	// unconditionally include in API requests. By default, fields with
966	// empty values are omitted from API requests. However, any non-pointer,
967	// non-interface field appearing in ForceSendFields will be sent to the
968	// server regardless of whether the field is empty or not. This may be
969	// used to include empty fields in Patch requests.
970	ForceSendFields []string `json:"-"`
971
972	// NullFields is a list of field names (e.g. "Code") to include in API
973	// requests with the JSON null value. By default, fields with empty
974	// values are omitted from API requests. However, any field with an
975	// empty value appearing in NullFields will be sent to the server as
976	// null. It is an error if a field in this list has a non-empty value.
977	// This may be used to include null fields in Patch requests.
978	NullFields []string `json:"-"`
979}
980
981func (s *Status) MarshalJSON() ([]byte, error) {
982	type NoMethod Status
983	raw := NoMethod(*s)
984	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
985}
986
987// WordInfo: Word-specific information for recognized words.
988type WordInfo struct {
989	// EndTime: Time offset relative to the beginning of the audio, and
990	// corresponding to the end of the spoken word. This field is only set
991	// if `enable_word_time_offsets=true` and only in the top hypothesis.
992	// This is an experimental feature and the accuracy of the time offset
993	// can vary.
994	EndTime string `json:"endTime,omitempty"`
995
996	// SpeakerTag: Output only. A distinct integer value is assigned for
997	// every speaker within the audio. This field specifies which one of
998	// those speakers was detected to have spoken this word. Value ranges
999	// from '1' to diarization_speaker_count. speaker_tag is set if
1000	// enable_speaker_diarization = 'true' and only in the top alternative.
1001	SpeakerTag int64 `json:"speakerTag,omitempty"`
1002
1003	// StartTime: Time offset relative to the beginning of the audio, and
1004	// corresponding to the start of the spoken word. This field is only set
1005	// if `enable_word_time_offsets=true` and only in the top hypothesis.
1006	// This is an experimental feature and the accuracy of the time offset
1007	// can vary.
1008	StartTime string `json:"startTime,omitempty"`
1009
1010	// Word: The word corresponding to this set of information.
1011	Word string `json:"word,omitempty"`
1012
1013	// ForceSendFields is a list of field names (e.g. "EndTime") to
1014	// unconditionally include in API requests. By default, fields with
1015	// empty values are omitted from API requests. However, any non-pointer,
1016	// non-interface field appearing in ForceSendFields will be sent to the
1017	// server regardless of whether the field is empty or not. This may be
1018	// used to include empty fields in Patch requests.
1019	ForceSendFields []string `json:"-"`
1020
1021	// NullFields is a list of field names (e.g. "EndTime") to include in
1022	// API requests with the JSON null value. By default, fields with empty
1023	// values are omitted from API requests. However, any field with an
1024	// empty value appearing in NullFields will be sent to the server as
1025	// null. It is an error if a field in this list has a non-empty value.
1026	// This may be used to include null fields in Patch requests.
1027	NullFields []string `json:"-"`
1028}
1029
1030func (s *WordInfo) MarshalJSON() ([]byte, error) {
1031	type NoMethod WordInfo
1032	raw := NoMethod(*s)
1033	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1034}
1035
1036// method id "speech.operations.get":
1037
1038type OperationsGetCall struct {
1039	s            *Service
1040	name         string
1041	urlParams_   gensupport.URLParams
1042	ifNoneMatch_ string
1043	ctx_         context.Context
1044	header_      http.Header
1045}
1046
1047// Get: Gets the latest state of a long-running operation. Clients can
1048// use this method to poll the operation result at intervals as
1049// recommended by the API service.
1050func (r *OperationsService) Get(name string) *OperationsGetCall {
1051	c := &OperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
1052	c.name = name
1053	return c
1054}
1055
1056// Fields allows partial responses to be retrieved. See
1057// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
1058// for more information.
1059func (c *OperationsGetCall) Fields(s ...googleapi.Field) *OperationsGetCall {
1060	c.urlParams_.Set("fields", googleapi.CombineFields(s))
1061	return c
1062}
1063
1064// IfNoneMatch sets the optional parameter which makes the operation
1065// fail if the object's ETag matches the given value. This is useful for
1066// getting updates only after the object has changed since the last
1067// request. Use googleapi.IsNotModified to check whether the response
1068// error from Do is the result of In-None-Match.
1069func (c *OperationsGetCall) IfNoneMatch(entityTag string) *OperationsGetCall {
1070	c.ifNoneMatch_ = entityTag
1071	return c
1072}
1073
1074// Context sets the context to be used in this call's Do method. Any
1075// pending HTTP request will be aborted if the provided context is
1076// canceled.
1077func (c *OperationsGetCall) Context(ctx context.Context) *OperationsGetCall {
1078	c.ctx_ = ctx
1079	return c
1080}
1081
1082// Header returns an http.Header that can be modified by the caller to
1083// add HTTP headers to the request.
1084func (c *OperationsGetCall) Header() http.Header {
1085	if c.header_ == nil {
1086		c.header_ = make(http.Header)
1087	}
1088	return c.header_
1089}
1090
1091func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) {
1092	reqHeaders := make(http.Header)
1093	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
1094	for k, v := range c.header_ {
1095		reqHeaders[k] = v
1096	}
1097	reqHeaders.Set("User-Agent", c.s.userAgent())
1098	if c.ifNoneMatch_ != "" {
1099		reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
1100	}
1101	var body io.Reader = nil
1102	c.urlParams_.Set("alt", alt)
1103	c.urlParams_.Set("prettyPrint", "false")
1104	urls := googleapi.ResolveRelative(c.s.BasePath, "v1/operations/{+name}")
1105	urls += "?" + c.urlParams_.Encode()
1106	req, err := http.NewRequest("GET", urls, body)
1107	if err != nil {
1108		return nil, err
1109	}
1110	req.Header = reqHeaders
1111	googleapi.Expand(req.URL, map[string]string{
1112		"name": c.name,
1113	})
1114	return gensupport.SendRequest(c.ctx_, c.s.client, req)
1115}
1116
1117// Do executes the "speech.operations.get" call.
1118// Exactly one of *Operation or error will be non-nil. Any non-2xx
1119// status code is an error. Response headers are in either
1120// *Operation.ServerResponse.Header or (if a response was returned at
1121// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
1122// to check whether the returned error was because
1123// http.StatusNotModified was returned.
1124func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
1125	gensupport.SetOptions(c.urlParams_, opts...)
1126	res, err := c.doRequest("json")
1127	if res != nil && res.StatusCode == http.StatusNotModified {
1128		if res.Body != nil {
1129			res.Body.Close()
1130		}
1131		return nil, &googleapi.Error{
1132			Code:   res.StatusCode,
1133			Header: res.Header,
1134		}
1135	}
1136	if err != nil {
1137		return nil, err
1138	}
1139	defer googleapi.CloseBody(res)
1140	if err := googleapi.CheckResponse(res); err != nil {
1141		return nil, err
1142	}
1143	ret := &Operation{
1144		ServerResponse: googleapi.ServerResponse{
1145			Header:         res.Header,
1146			HTTPStatusCode: res.StatusCode,
1147		},
1148	}
1149	target := &ret
1150	if err := gensupport.DecodeResponse(target, res); err != nil {
1151		return nil, err
1152	}
1153	return ret, nil
1154	// {
1155	//   "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.",
1156	//   "flatPath": "v1/operations/{operationsId}",
1157	//   "httpMethod": "GET",
1158	//   "id": "speech.operations.get",
1159	//   "parameterOrder": [
1160	//     "name"
1161	//   ],
1162	//   "parameters": {
1163	//     "name": {
1164	//       "description": "The name of the operation resource.",
1165	//       "location": "path",
1166	//       "pattern": "^.*$",
1167	//       "required": true,
1168	//       "type": "string"
1169	//     }
1170	//   },
1171	//   "path": "v1/operations/{+name}",
1172	//   "response": {
1173	//     "$ref": "Operation"
1174	//   },
1175	//   "scopes": [
1176	//     "https://www.googleapis.com/auth/cloud-platform"
1177	//   ]
1178	// }
1179
1180}
1181
1182// method id "speech.operations.list":
1183
1184type OperationsListCall struct {
1185	s            *Service
1186	urlParams_   gensupport.URLParams
1187	ifNoneMatch_ string
1188	ctx_         context.Context
1189	header_      http.Header
1190}
1191
1192// List: Lists operations that match the specified filter in the
1193// request. If the server doesn't support this method, it returns
1194// `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to
1195// override the binding to use different resource name schemes, such as
1196// `users/*/operations`. To override the binding, API services can add a
1197// binding such as "/v1/{name=users/*}/operations" to their service
1198// configuration. For backwards compatibility, the default name includes
1199// the operations collection id, however overriding users must ensure
1200// the name binding is the parent resource, without the operations
1201// collection id.
1202func (r *OperationsService) List() *OperationsListCall {
1203	c := &OperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
1204	return c
1205}
1206
1207// Filter sets the optional parameter "filter": The standard list
1208// filter.
1209func (c *OperationsListCall) Filter(filter string) *OperationsListCall {
1210	c.urlParams_.Set("filter", filter)
1211	return c
1212}
1213
1214// Name sets the optional parameter "name": The name of the operation's
1215// parent resource.
1216func (c *OperationsListCall) Name(name string) *OperationsListCall {
1217	c.urlParams_.Set("name", name)
1218	return c
1219}
1220
1221// PageSize sets the optional parameter "pageSize": The standard list
1222// page size.
1223func (c *OperationsListCall) PageSize(pageSize int64) *OperationsListCall {
1224	c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
1225	return c
1226}
1227
1228// PageToken sets the optional parameter "pageToken": The standard list
1229// page token.
1230func (c *OperationsListCall) PageToken(pageToken string) *OperationsListCall {
1231	c.urlParams_.Set("pageToken", pageToken)
1232	return c
1233}
1234
1235// Fields allows partial responses to be retrieved. See
1236// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
1237// for more information.
1238func (c *OperationsListCall) Fields(s ...googleapi.Field) *OperationsListCall {
1239	c.urlParams_.Set("fields", googleapi.CombineFields(s))
1240	return c
1241}
1242
1243// IfNoneMatch sets the optional parameter which makes the operation
1244// fail if the object's ETag matches the given value. This is useful for
1245// getting updates only after the object has changed since the last
1246// request. Use googleapi.IsNotModified to check whether the response
1247// error from Do is the result of In-None-Match.
1248func (c *OperationsListCall) IfNoneMatch(entityTag string) *OperationsListCall {
1249	c.ifNoneMatch_ = entityTag
1250	return c
1251}
1252
1253// Context sets the context to be used in this call's Do method. Any
1254// pending HTTP request will be aborted if the provided context is
1255// canceled.
1256func (c *OperationsListCall) Context(ctx context.Context) *OperationsListCall {
1257	c.ctx_ = ctx
1258	return c
1259}
1260
1261// Header returns an http.Header that can be modified by the caller to
1262// add HTTP headers to the request.
1263func (c *OperationsListCall) Header() http.Header {
1264	if c.header_ == nil {
1265		c.header_ = make(http.Header)
1266	}
1267	return c.header_
1268}
1269
1270func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) {
1271	reqHeaders := make(http.Header)
1272	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
1273	for k, v := range c.header_ {
1274		reqHeaders[k] = v
1275	}
1276	reqHeaders.Set("User-Agent", c.s.userAgent())
1277	if c.ifNoneMatch_ != "" {
1278		reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
1279	}
1280	var body io.Reader = nil
1281	c.urlParams_.Set("alt", alt)
1282	c.urlParams_.Set("prettyPrint", "false")
1283	urls := googleapi.ResolveRelative(c.s.BasePath, "v1/operations")
1284	urls += "?" + c.urlParams_.Encode()
1285	req, err := http.NewRequest("GET", urls, body)
1286	if err != nil {
1287		return nil, err
1288	}
1289	req.Header = reqHeaders
1290	return gensupport.SendRequest(c.ctx_, c.s.client, req)
1291}
1292
1293// Do executes the "speech.operations.list" call.
1294// Exactly one of *ListOperationsResponse or error will be non-nil. Any
1295// non-2xx status code is an error. Response headers are in either
1296// *ListOperationsResponse.ServerResponse.Header or (if a response was
1297// returned at all) in error.(*googleapi.Error).Header. Use
1298// googleapi.IsNotModified to check whether the returned error was
1299// because http.StatusNotModified was returned.
1300func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsResponse, error) {
1301	gensupport.SetOptions(c.urlParams_, opts...)
1302	res, err := c.doRequest("json")
1303	if res != nil && res.StatusCode == http.StatusNotModified {
1304		if res.Body != nil {
1305			res.Body.Close()
1306		}
1307		return nil, &googleapi.Error{
1308			Code:   res.StatusCode,
1309			Header: res.Header,
1310		}
1311	}
1312	if err != nil {
1313		return nil, err
1314	}
1315	defer googleapi.CloseBody(res)
1316	if err := googleapi.CheckResponse(res); err != nil {
1317		return nil, err
1318	}
1319	ret := &ListOperationsResponse{
1320		ServerResponse: googleapi.ServerResponse{
1321			Header:         res.Header,
1322			HTTPStatusCode: res.StatusCode,
1323		},
1324	}
1325	target := &ret
1326	if err := gensupport.DecodeResponse(target, res); err != nil {
1327		return nil, err
1328	}
1329	return ret, nil
1330	// {
1331	//   "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.",
1332	//   "flatPath": "v1/operations",
1333	//   "httpMethod": "GET",
1334	//   "id": "speech.operations.list",
1335	//   "parameterOrder": [],
1336	//   "parameters": {
1337	//     "filter": {
1338	//       "description": "The standard list filter.",
1339	//       "location": "query",
1340	//       "type": "string"
1341	//     },
1342	//     "name": {
1343	//       "description": "The name of the operation's parent resource.",
1344	//       "location": "query",
1345	//       "type": "string"
1346	//     },
1347	//     "pageSize": {
1348	//       "description": "The standard list page size.",
1349	//       "format": "int32",
1350	//       "location": "query",
1351	//       "type": "integer"
1352	//     },
1353	//     "pageToken": {
1354	//       "description": "The standard list page token.",
1355	//       "location": "query",
1356	//       "type": "string"
1357	//     }
1358	//   },
1359	//   "path": "v1/operations",
1360	//   "response": {
1361	//     "$ref": "ListOperationsResponse"
1362	//   },
1363	//   "scopes": [
1364	//     "https://www.googleapis.com/auth/cloud-platform"
1365	//   ]
1366	// }
1367
1368}
1369
1370// Pages invokes f for each page of results.
1371// A non-nil error returned from f will halt the iteration.
1372// The provided context supersedes any context provided to the Context method.
1373func (c *OperationsListCall) Pages(ctx context.Context, f func(*ListOperationsResponse) error) error {
1374	c.ctx_ = ctx
1375	defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
1376	for {
1377		x, err := c.Do()
1378		if err != nil {
1379			return err
1380		}
1381		if err := f(x); err != nil {
1382			return err
1383		}
1384		if x.NextPageToken == "" {
1385			return nil
1386		}
1387		c.PageToken(x.NextPageToken)
1388	}
1389}
1390
1391// method id "speech.speech.longrunningrecognize":
1392
1393type SpeechLongrunningrecognizeCall struct {
1394	s                           *Service
1395	longrunningrecognizerequest *LongRunningRecognizeRequest
1396	urlParams_                  gensupport.URLParams
1397	ctx_                        context.Context
1398	header_                     http.Header
1399}
1400
1401// Longrunningrecognize: Performs asynchronous speech recognition:
1402// receive results via the google.longrunning.Operations interface.
1403// Returns either an `Operation.error` or an `Operation.response` which
1404// contains a `LongRunningRecognizeResponse` message. For more
1405// information on asynchronous speech recognition, see the how-to
1406// (https://cloud.google.com/speech-to-text/docs/async-recognize).
1407func (r *SpeechService) Longrunningrecognize(longrunningrecognizerequest *LongRunningRecognizeRequest) *SpeechLongrunningrecognizeCall {
1408	c := &SpeechLongrunningrecognizeCall{s: r.s, urlParams_: make(gensupport.URLParams)}
1409	c.longrunningrecognizerequest = longrunningrecognizerequest
1410	return c
1411}
1412
1413// Fields allows partial responses to be retrieved. See
1414// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
1415// for more information.
1416func (c *SpeechLongrunningrecognizeCall) Fields(s ...googleapi.Field) *SpeechLongrunningrecognizeCall {
1417	c.urlParams_.Set("fields", googleapi.CombineFields(s))
1418	return c
1419}
1420
1421// Context sets the context to be used in this call's Do method. Any
1422// pending HTTP request will be aborted if the provided context is
1423// canceled.
1424func (c *SpeechLongrunningrecognizeCall) Context(ctx context.Context) *SpeechLongrunningrecognizeCall {
1425	c.ctx_ = ctx
1426	return c
1427}
1428
1429// Header returns an http.Header that can be modified by the caller to
1430// add HTTP headers to the request.
1431func (c *SpeechLongrunningrecognizeCall) Header() http.Header {
1432	if c.header_ == nil {
1433		c.header_ = make(http.Header)
1434	}
1435	return c.header_
1436}
1437
1438func (c *SpeechLongrunningrecognizeCall) doRequest(alt string) (*http.Response, error) {
1439	reqHeaders := make(http.Header)
1440	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
1441	for k, v := range c.header_ {
1442		reqHeaders[k] = v
1443	}
1444	reqHeaders.Set("User-Agent", c.s.userAgent())
1445	var body io.Reader = nil
1446	body, err := googleapi.WithoutDataWrapper.JSONReader(c.longrunningrecognizerequest)
1447	if err != nil {
1448		return nil, err
1449	}
1450	reqHeaders.Set("Content-Type", "application/json")
1451	c.urlParams_.Set("alt", alt)
1452	c.urlParams_.Set("prettyPrint", "false")
1453	urls := googleapi.ResolveRelative(c.s.BasePath, "v1/speech:longrunningrecognize")
1454	urls += "?" + c.urlParams_.Encode()
1455	req, err := http.NewRequest("POST", urls, body)
1456	if err != nil {
1457		return nil, err
1458	}
1459	req.Header = reqHeaders
1460	return gensupport.SendRequest(c.ctx_, c.s.client, req)
1461}
1462
1463// Do executes the "speech.speech.longrunningrecognize" call.
1464// Exactly one of *Operation or error will be non-nil. Any non-2xx
1465// status code is an error. Response headers are in either
1466// *Operation.ServerResponse.Header or (if a response was returned at
1467// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
1468// to check whether the returned error was because
1469// http.StatusNotModified was returned.
1470func (c *SpeechLongrunningrecognizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
1471	gensupport.SetOptions(c.urlParams_, opts...)
1472	res, err := c.doRequest("json")
1473	if res != nil && res.StatusCode == http.StatusNotModified {
1474		if res.Body != nil {
1475			res.Body.Close()
1476		}
1477		return nil, &googleapi.Error{
1478			Code:   res.StatusCode,
1479			Header: res.Header,
1480		}
1481	}
1482	if err != nil {
1483		return nil, err
1484	}
1485	defer googleapi.CloseBody(res)
1486	if err := googleapi.CheckResponse(res); err != nil {
1487		return nil, err
1488	}
1489	ret := &Operation{
1490		ServerResponse: googleapi.ServerResponse{
1491			Header:         res.Header,
1492			HTTPStatusCode: res.StatusCode,
1493		},
1494	}
1495	target := &ret
1496	if err := gensupport.DecodeResponse(target, res); err != nil {
1497		return nil, err
1498	}
1499	return ret, nil
1500	// {
1501	//   "description": "Performs asynchronous speech recognition: receive results via the google.longrunning.Operations interface. Returns either an `Operation.error` or an `Operation.response` which contains a `LongRunningRecognizeResponse` message. For more information on asynchronous speech recognition, see the [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize).",
1502	//   "flatPath": "v1/speech:longrunningrecognize",
1503	//   "httpMethod": "POST",
1504	//   "id": "speech.speech.longrunningrecognize",
1505	//   "parameterOrder": [],
1506	//   "parameters": {},
1507	//   "path": "v1/speech:longrunningrecognize",
1508	//   "request": {
1509	//     "$ref": "LongRunningRecognizeRequest"
1510	//   },
1511	//   "response": {
1512	//     "$ref": "Operation"
1513	//   },
1514	//   "scopes": [
1515	//     "https://www.googleapis.com/auth/cloud-platform"
1516	//   ]
1517	// }
1518
1519}
1520
1521// method id "speech.speech.recognize":
1522
1523type SpeechRecognizeCall struct {
1524	s                *Service
1525	recognizerequest *RecognizeRequest
1526	urlParams_       gensupport.URLParams
1527	ctx_             context.Context
1528	header_          http.Header
1529}
1530
1531// Recognize: Performs synchronous speech recognition: receive results
1532// after all audio has been sent and processed.
1533func (r *SpeechService) Recognize(recognizerequest *RecognizeRequest) *SpeechRecognizeCall {
1534	c := &SpeechRecognizeCall{s: r.s, urlParams_: make(gensupport.URLParams)}
1535	c.recognizerequest = recognizerequest
1536	return c
1537}
1538
1539// Fields allows partial responses to be retrieved. See
1540// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
1541// for more information.
1542func (c *SpeechRecognizeCall) Fields(s ...googleapi.Field) *SpeechRecognizeCall {
1543	c.urlParams_.Set("fields", googleapi.CombineFields(s))
1544	return c
1545}
1546
1547// Context sets the context to be used in this call's Do method. Any
1548// pending HTTP request will be aborted if the provided context is
1549// canceled.
1550func (c *SpeechRecognizeCall) Context(ctx context.Context) *SpeechRecognizeCall {
1551	c.ctx_ = ctx
1552	return c
1553}
1554
1555// Header returns an http.Header that can be modified by the caller to
1556// add HTTP headers to the request.
1557func (c *SpeechRecognizeCall) Header() http.Header {
1558	if c.header_ == nil {
1559		c.header_ = make(http.Header)
1560	}
1561	return c.header_
1562}
1563
1564func (c *SpeechRecognizeCall) doRequest(alt string) (*http.Response, error) {
1565	reqHeaders := make(http.Header)
1566	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
1567	for k, v := range c.header_ {
1568		reqHeaders[k] = v
1569	}
1570	reqHeaders.Set("User-Agent", c.s.userAgent())
1571	var body io.Reader = nil
1572	body, err := googleapi.WithoutDataWrapper.JSONReader(c.recognizerequest)
1573	if err != nil {
1574		return nil, err
1575	}
1576	reqHeaders.Set("Content-Type", "application/json")
1577	c.urlParams_.Set("alt", alt)
1578	c.urlParams_.Set("prettyPrint", "false")
1579	urls := googleapi.ResolveRelative(c.s.BasePath, "v1/speech:recognize")
1580	urls += "?" + c.urlParams_.Encode()
1581	req, err := http.NewRequest("POST", urls, body)
1582	if err != nil {
1583		return nil, err
1584	}
1585	req.Header = reqHeaders
1586	return gensupport.SendRequest(c.ctx_, c.s.client, req)
1587}
1588
1589// Do executes the "speech.speech.recognize" call.
1590// Exactly one of *RecognizeResponse or error will be non-nil. Any
1591// non-2xx status code is an error. Response headers are in either
1592// *RecognizeResponse.ServerResponse.Header or (if a response was
1593// returned at all) in error.(*googleapi.Error).Header. Use
1594// googleapi.IsNotModified to check whether the returned error was
1595// because http.StatusNotModified was returned.
1596func (c *SpeechRecognizeCall) Do(opts ...googleapi.CallOption) (*RecognizeResponse, error) {
1597	gensupport.SetOptions(c.urlParams_, opts...)
1598	res, err := c.doRequest("json")
1599	if res != nil && res.StatusCode == http.StatusNotModified {
1600		if res.Body != nil {
1601			res.Body.Close()
1602		}
1603		return nil, &googleapi.Error{
1604			Code:   res.StatusCode,
1605			Header: res.Header,
1606		}
1607	}
1608	if err != nil {
1609		return nil, err
1610	}
1611	defer googleapi.CloseBody(res)
1612	if err := googleapi.CheckResponse(res); err != nil {
1613		return nil, err
1614	}
1615	ret := &RecognizeResponse{
1616		ServerResponse: googleapi.ServerResponse{
1617			Header:         res.Header,
1618			HTTPStatusCode: res.StatusCode,
1619		},
1620	}
1621	target := &ret
1622	if err := gensupport.DecodeResponse(target, res); err != nil {
1623		return nil, err
1624	}
1625	return ret, nil
1626	// {
1627	//   "description": "Performs synchronous speech recognition: receive results after all audio has been sent and processed.",
1628	//   "flatPath": "v1/speech:recognize",
1629	//   "httpMethod": "POST",
1630	//   "id": "speech.speech.recognize",
1631	//   "parameterOrder": [],
1632	//   "parameters": {},
1633	//   "path": "v1/speech:recognize",
1634	//   "request": {
1635	//     "$ref": "RecognizeRequest"
1636	//   },
1637	//   "response": {
1638	//     "$ref": "RecognizeResponse"
1639	//   },
1640	//   "scopes": [
1641	//     "https://www.googleapis.com/auth/cloud-platform"
1642	//   ]
1643	// }
1644
1645}
1646