1// Copyright 2021 Google LLC.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Code generated file. DO NOT EDIT.
6
7// Package speech provides access to the Cloud Speech-to-Text API.
8//
9// This package is DEPRECATED. Use package cloud.google.com/go/speech/apiv1 instead.
10//
11// For product documentation, see: https://cloud.google.com/speech-to-text/docs/quickstart-protocol
12//
13// Creating a client
14//
15// Usage example:
16//
17//   import "google.golang.org/api/speech/v1"
18//   ...
19//   ctx := context.Background()
20//   speechService, err := speech.NewService(ctx)
21//
22// In this example, Google Application Default Credentials are used for authentication.
23//
24// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials.
25//
26// Other authentication options
27//
28// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey:
29//
30//   speechService, err := speech.NewService(ctx, option.WithAPIKey("AIza..."))
31//
32// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource:
33//
34//   config := &oauth2.Config{...}
35//   // ...
36//   token, err := config.Exchange(ctx, ...)
37//   speechService, err := speech.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token)))
38//
39// See https://godoc.org/google.golang.org/api/option/ for details on options.
40package speech // import "google.golang.org/api/speech/v1"
41
42import (
43	"bytes"
44	"context"
45	"encoding/json"
46	"errors"
47	"fmt"
48	"io"
49	"net/http"
50	"net/url"
51	"strconv"
52	"strings"
53
54	googleapi "google.golang.org/api/googleapi"
55	gensupport "google.golang.org/api/internal/gensupport"
56	option "google.golang.org/api/option"
57	internaloption "google.golang.org/api/option/internaloption"
58	htransport "google.golang.org/api/transport/http"
59)
60
61// Always reference these packages, just in case the auto-generated code
62// below doesn't.
63var _ = bytes.NewBuffer
64var _ = strconv.Itoa
65var _ = fmt.Sprintf
66var _ = json.NewDecoder
67var _ = io.Copy
68var _ = url.Parse
69var _ = gensupport.MarshalJSON
70var _ = googleapi.Version
71var _ = errors.New
72var _ = strings.Replace
73var _ = context.Canceled
74var _ = internaloption.WithDefaultEndpoint
75
76const apiId = "speech:v1"
77const apiName = "speech"
78const apiVersion = "v1"
79const basePath = "https://speech.googleapis.com/"
80const mtlsBasePath = "https://speech.mtls.googleapis.com/"
81
82// OAuth2 scopes used by this API.
83const (
84	// See, edit, configure, and delete your Google Cloud data and see the
85	// email address for your Google Account.
86	CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform"
87)
88
89// NewService creates a new Service.
90func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) {
91	scopesOption := option.WithScopes(
92		"https://www.googleapis.com/auth/cloud-platform",
93	)
94	// NOTE: prepend, so we don't override user-specified scopes.
95	opts = append([]option.ClientOption{scopesOption}, opts...)
96	opts = append(opts, internaloption.WithDefaultEndpoint(basePath))
97	opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath))
98	client, endpoint, err := htransport.NewClient(ctx, opts...)
99	if err != nil {
100		return nil, err
101	}
102	s, err := New(client)
103	if err != nil {
104		return nil, err
105	}
106	if endpoint != "" {
107		s.BasePath = endpoint
108	}
109	return s, nil
110}
111
112// New creates a new Service. It uses the provided http.Client for requests.
113//
114// Deprecated: please use NewService instead.
115// To provide a custom HTTP client, use option.WithHTTPClient.
116// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead.
117func New(client *http.Client) (*Service, error) {
118	if client == nil {
119		return nil, errors.New("client is nil")
120	}
121	s := &Service{client: client, BasePath: basePath}
122	s.Operations = NewOperationsService(s)
123	s.Speech = NewSpeechService(s)
124	return s, nil
125}
126
127type Service struct {
128	client    *http.Client
129	BasePath  string // API endpoint base URL
130	UserAgent string // optional additional User-Agent fragment
131
132	Operations *OperationsService
133
134	Speech *SpeechService
135}
136
137func (s *Service) userAgent() string {
138	if s.UserAgent == "" {
139		return googleapi.UserAgent
140	}
141	return googleapi.UserAgent + " " + s.UserAgent
142}
143
144func NewOperationsService(s *Service) *OperationsService {
145	rs := &OperationsService{s: s}
146	return rs
147}
148
149type OperationsService struct {
150	s *Service
151}
152
153func NewSpeechService(s *Service) *SpeechService {
154	rs := &SpeechService{s: s}
155	return rs
156}
157
158type SpeechService struct {
159	s *Service
160}
161
162// ListOperationsResponse: The response message for
163// Operations.ListOperations.
164type ListOperationsResponse struct {
165	// NextPageToken: The standard List next-page token.
166	NextPageToken string `json:"nextPageToken,omitempty"`
167
168	// Operations: A list of operations that matches the specified filter in
169	// the request.
170	Operations []*Operation `json:"operations,omitempty"`
171
172	// ServerResponse contains the HTTP response code and headers from the
173	// server.
174	googleapi.ServerResponse `json:"-"`
175
176	// ForceSendFields is a list of field names (e.g. "NextPageToken") to
177	// unconditionally include in API requests. By default, fields with
178	// empty or default values are omitted from API requests. However, any
179	// non-pointer, non-interface field appearing in ForceSendFields will be
180	// sent to the server regardless of whether the field is empty or not.
181	// This may be used to include empty fields in Patch requests.
182	ForceSendFields []string `json:"-"`
183
184	// NullFields is a list of field names (e.g. "NextPageToken") to include
185	// in API requests with the JSON null value. By default, fields with
186	// empty values are omitted from API requests. However, any field with
187	// an empty value appearing in NullFields will be sent to the server as
188	// null. It is an error if a field in this list has a non-empty value.
189	// This may be used to include null fields in Patch requests.
190	NullFields []string `json:"-"`
191}
192
193func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) {
194	type NoMethod ListOperationsResponse
195	raw := NoMethod(*s)
196	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
197}
198
199// LongRunningRecognizeMetadata: Describes the progress of a
200// long-running `LongRunningRecognize` call. It is included in the
201// `metadata` field of the `Operation` returned by the `GetOperation`
202// call of the `google::longrunning::Operations` service.
203type LongRunningRecognizeMetadata struct {
204	// LastUpdateTime: Time of the most recent processing update.
205	LastUpdateTime string `json:"lastUpdateTime,omitempty"`
206
207	// ProgressPercent: Approximate percentage of audio processed thus far.
208	// Guaranteed to be 100 when the audio is fully processed and the
209	// results are available.
210	ProgressPercent int64 `json:"progressPercent,omitempty"`
211
212	// StartTime: Time when the request was received.
213	StartTime string `json:"startTime,omitempty"`
214
215	// Uri: Output only. The URI of the audio file being transcribed. Empty
216	// if the audio was sent as byte content.
217	Uri string `json:"uri,omitempty"`
218
219	// ForceSendFields is a list of field names (e.g. "LastUpdateTime") to
220	// unconditionally include in API requests. By default, fields with
221	// empty or default values are omitted from API requests. However, any
222	// non-pointer, non-interface field appearing in ForceSendFields will be
223	// sent to the server regardless of whether the field is empty or not.
224	// This may be used to include empty fields in Patch requests.
225	ForceSendFields []string `json:"-"`
226
227	// NullFields is a list of field names (e.g. "LastUpdateTime") to
228	// include in API requests with the JSON null value. By default, fields
229	// with empty values are omitted from API requests. However, any field
230	// with an empty value appearing in NullFields will be sent to the
231	// server as null. It is an error if a field in this list has a
232	// non-empty value. This may be used to include null fields in Patch
233	// requests.
234	NullFields []string `json:"-"`
235}
236
237func (s *LongRunningRecognizeMetadata) MarshalJSON() ([]byte, error) {
238	type NoMethod LongRunningRecognizeMetadata
239	raw := NoMethod(*s)
240	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
241}
242
243// LongRunningRecognizeRequest: The top-level message sent by the client
244// for the `LongRunningRecognize` method.
245type LongRunningRecognizeRequest struct {
246	// Audio: Required. The audio data to be recognized.
247	Audio *RecognitionAudio `json:"audio,omitempty"`
248
249	// Config: Required. Provides information to the recognizer that
250	// specifies how to process the request.
251	Config *RecognitionConfig `json:"config,omitempty"`
252
253	// ForceSendFields is a list of field names (e.g. "Audio") to
254	// unconditionally include in API requests. By default, fields with
255	// empty or default values are omitted from API requests. However, any
256	// non-pointer, non-interface field appearing in ForceSendFields will be
257	// sent to the server regardless of whether the field is empty or not.
258	// This may be used to include empty fields in Patch requests.
259	ForceSendFields []string `json:"-"`
260
261	// NullFields is a list of field names (e.g. "Audio") to include in API
262	// requests with the JSON null value. By default, fields with empty
263	// values are omitted from API requests. However, any field with an
264	// empty value appearing in NullFields will be sent to the server as
265	// null. It is an error if a field in this list has a non-empty value.
266	// This may be used to include null fields in Patch requests.
267	NullFields []string `json:"-"`
268}
269
270func (s *LongRunningRecognizeRequest) MarshalJSON() ([]byte, error) {
271	type NoMethod LongRunningRecognizeRequest
272	raw := NoMethod(*s)
273	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
274}
275
276// LongRunningRecognizeResponse: The only message returned to the client
277// by the `LongRunningRecognize` method. It contains the result as zero
278// or more sequential `SpeechRecognitionResult` messages. It is included
279// in the `result.response` field of the `Operation` returned by the
280// `GetOperation` call of the `google::longrunning::Operations` service.
281type LongRunningRecognizeResponse struct {
282	// Results: Sequential list of transcription results corresponding to
283	// sequential portions of audio.
284	Results []*SpeechRecognitionResult `json:"results,omitempty"`
285
286	// TotalBilledTime: When available, billed audio seconds for the
287	// corresponding request.
288	TotalBilledTime string `json:"totalBilledTime,omitempty"`
289
290	// ForceSendFields is a list of field names (e.g. "Results") to
291	// unconditionally include in API requests. By default, fields with
292	// empty or default values are omitted from API requests. However, any
293	// non-pointer, non-interface field appearing in ForceSendFields will be
294	// sent to the server regardless of whether the field is empty or not.
295	// This may be used to include empty fields in Patch requests.
296	ForceSendFields []string `json:"-"`
297
298	// NullFields is a list of field names (e.g. "Results") to include in
299	// API requests with the JSON null value. By default, fields with empty
300	// values are omitted from API requests. However, any field with an
301	// empty value appearing in NullFields will be sent to the server as
302	// null. It is an error if a field in this list has a non-empty value.
303	// This may be used to include null fields in Patch requests.
304	NullFields []string `json:"-"`
305}
306
307func (s *LongRunningRecognizeResponse) MarshalJSON() ([]byte, error) {
308	type NoMethod LongRunningRecognizeResponse
309	raw := NoMethod(*s)
310	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
311}
312
313// Operation: This resource represents a long-running operation that is
314// the result of a network API call.
315type Operation struct {
316	// Done: If the value is `false`, it means the operation is still in
317	// progress. If `true`, the operation is completed, and either `error`
318	// or `response` is available.
319	Done bool `json:"done,omitempty"`
320
321	// Error: The error result of the operation in case of failure or
322	// cancellation.
323	Error *Status `json:"error,omitempty"`
324
325	// Metadata: Service-specific metadata associated with the operation. It
326	// typically contains progress information and common metadata such as
327	// create time. Some services might not provide such metadata. Any
328	// method that returns a long-running operation should document the
329	// metadata type, if any.
330	Metadata googleapi.RawMessage `json:"metadata,omitempty"`
331
332	// Name: The server-assigned name, which is only unique within the same
333	// service that originally returns it. If you use the default HTTP
334	// mapping, the `name` should be a resource name ending with
335	// `operations/{unique_id}`.
336	Name string `json:"name,omitempty"`
337
338	// Response: The normal response of the operation in case of success. If
339	// the original method returns no data on success, such as `Delete`, the
340	// response is `google.protobuf.Empty`. If the original method is
341	// standard `Get`/`Create`/`Update`, the response should be the
342	// resource. For other methods, the response should have the type
343	// `XxxResponse`, where `Xxx` is the original method name. For example,
344	// if the original method name is `TakeSnapshot()`, the inferred
345	// response type is `TakeSnapshotResponse`.
346	Response googleapi.RawMessage `json:"response,omitempty"`
347
348	// ServerResponse contains the HTTP response code and headers from the
349	// server.
350	googleapi.ServerResponse `json:"-"`
351
352	// ForceSendFields is a list of field names (e.g. "Done") to
353	// unconditionally include in API requests. By default, fields with
354	// empty or default values are omitted from API requests. However, any
355	// non-pointer, non-interface field appearing in ForceSendFields will be
356	// sent to the server regardless of whether the field is empty or not.
357	// This may be used to include empty fields in Patch requests.
358	ForceSendFields []string `json:"-"`
359
360	// NullFields is a list of field names (e.g. "Done") to include in API
361	// requests with the JSON null value. By default, fields with empty
362	// values are omitted from API requests. However, any field with an
363	// empty value appearing in NullFields will be sent to the server as
364	// null. It is an error if a field in this list has a non-empty value.
365	// This may be used to include null fields in Patch requests.
366	NullFields []string `json:"-"`
367}
368
369func (s *Operation) MarshalJSON() ([]byte, error) {
370	type NoMethod Operation
371	raw := NoMethod(*s)
372	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
373}
374
375// RecognitionAudio: Contains audio data in the encoding specified in
376// the `RecognitionConfig`. Either `content` or `uri` must be supplied.
377// Supplying both or neither returns google.rpc.Code.INVALID_ARGUMENT.
378// See content limits
379// (https://cloud.google.com/speech-to-text/quotas#content).
380type RecognitionAudio struct {
381	// Content: The audio data bytes encoded as specified in
382	// `RecognitionConfig`. Note: as with all bytes fields, proto buffers
383	// use a pure binary representation, whereas JSON representations use
384	// base64.
385	Content string `json:"content,omitempty"`
386
387	// Uri: URI that points to a file that contains audio data bytes as
388	// specified in `RecognitionConfig`. The file must not be compressed
389	// (for example, gzip). Currently, only Google Cloud Storage URIs are
390	// supported, which must be specified in the following format:
391	// `gs://bucket_name/object_name` (other URI formats return
392	// google.rpc.Code.INVALID_ARGUMENT). For more information, see Request
393	// URIs (https://cloud.google.com/storage/docs/reference-uris).
394	Uri string `json:"uri,omitempty"`
395
396	// ForceSendFields is a list of field names (e.g. "Content") to
397	// unconditionally include in API requests. By default, fields with
398	// empty or default values are omitted from API requests. However, any
399	// non-pointer, non-interface field appearing in ForceSendFields will be
400	// sent to the server regardless of whether the field is empty or not.
401	// This may be used to include empty fields in Patch requests.
402	ForceSendFields []string `json:"-"`
403
404	// NullFields is a list of field names (e.g. "Content") to include in
405	// API requests with the JSON null value. By default, fields with empty
406	// values are omitted from API requests. However, any field with an
407	// empty value appearing in NullFields will be sent to the server as
408	// null. It is an error if a field in this list has a non-empty value.
409	// This may be used to include null fields in Patch requests.
410	NullFields []string `json:"-"`
411}
412
413func (s *RecognitionAudio) MarshalJSON() ([]byte, error) {
414	type NoMethod RecognitionAudio
415	raw := NoMethod(*s)
416	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
417}
418
419// RecognitionConfig: Provides information to the recognizer that
420// specifies how to process the request.
421type RecognitionConfig struct {
422	// AudioChannelCount: The number of channels in the input audio data.
423	// ONLY set this for MULTI-CHANNEL recognition. Valid values for
424	// LINEAR16 and FLAC are `1`-`8`. Valid values for OGG_OPUS are
425	// '1'-'254'. Valid value for MULAW, AMR, AMR_WB and
426	// SPEEX_WITH_HEADER_BYTE is only `1`. If `0` or omitted, defaults to
427	// one channel (mono). Note: We only recognize the first channel by
428	// default. To perform independent recognition on each channel set
429	// `enable_separate_recognition_per_channel` to 'true'.
430	AudioChannelCount int64 `json:"audioChannelCount,omitempty"`
431
432	// DiarizationConfig: Config to enable speaker diarization and set
433	// additional parameters to make diarization better suited for your
434	// application. Note: When this is enabled, we send all the words from
435	// the beginning of the audio for the top alternative in every
436	// consecutive STREAMING responses. This is done in order to improve our
437	// speaker tags as our models learn to identify the speakers in the
438	// conversation over time. For non-streaming requests, the diarization
439	// results will be provided only in the top alternative of the FINAL
440	// SpeechRecognitionResult.
441	DiarizationConfig *SpeakerDiarizationConfig `json:"diarizationConfig,omitempty"`
442
443	// EnableAutomaticPunctuation: If 'true', adds punctuation to
444	// recognition result hypotheses. This feature is only available in
445	// select languages. Setting this for requests in other languages has no
446	// effect at all. The default 'false' value does not add punctuation to
447	// result hypotheses.
448	EnableAutomaticPunctuation bool `json:"enableAutomaticPunctuation,omitempty"`
449
450	// EnableSeparateRecognitionPerChannel: This needs to be set to `true`
451	// explicitly and `audio_channel_count` > 1 to get each channel
452	// recognized separately. The recognition result will contain a
453	// `channel_tag` field to state which channel that result belongs to. If
454	// this is not true, we will only recognize the first channel. The
455	// request is billed cumulatively for all channels recognized:
456	// `audio_channel_count` multiplied by the length of the audio.
457	EnableSeparateRecognitionPerChannel bool `json:"enableSeparateRecognitionPerChannel,omitempty"`
458
459	// EnableWordTimeOffsets: If `true`, the top result includes a list of
460	// words and the start and end time offsets (timestamps) for those
461	// words. If `false`, no word-level time offset information is returned.
462	// The default is `false`.
463	EnableWordTimeOffsets bool `json:"enableWordTimeOffsets,omitempty"`
464
465	// Encoding: Encoding of audio data sent in all `RecognitionAudio`
466	// messages. This field is optional for `FLAC` and `WAV` audio files and
467	// required for all other audio formats. For details, see AudioEncoding.
468	//
469	// Possible values:
470	//   "ENCODING_UNSPECIFIED" - Not specified.
471	//   "LINEAR16" - Uncompressed 16-bit signed little-endian samples
472	// (Linear PCM).
473	//   "FLAC" - `FLAC` (Free Lossless Audio Codec) is the recommended
474	// encoding because it is lossless--therefore recognition is not
475	// compromised--and requires only about half the bandwidth of
476	// `LINEAR16`. `FLAC` stream encoding supports 16-bit and 24-bit
477	// samples, however, not all fields in `STREAMINFO` are supported.
478	//   "MULAW" - 8-bit samples that compand 14-bit audio samples using
479	// G.711 PCMU/mu-law.
480	//   "AMR" - Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz`
481	// must be 8000.
482	//   "AMR_WB" - Adaptive Multi-Rate Wideband codec. `sample_rate_hertz`
483	// must be 16000.
484	//   "OGG_OPUS" - Opus encoded audio frames in Ogg container
485	// ([OggOpus](https://wiki.xiph.org/OggOpus)). `sample_rate_hertz` must
486	// be one of 8000, 12000, 16000, 24000, or 48000.
487	//   "SPEEX_WITH_HEADER_BYTE" - Although the use of lossy encodings is
488	// not recommended, if a very low bitrate encoding is required,
489	// `OGG_OPUS` is highly preferred over Speex encoding. The
490	// [Speex](https://speex.org/) encoding supported by Cloud Speech API
491	// has a header byte in each block, as in MIME type
492	// `audio/x-speex-with-header-byte`. It is a variant of the RTP Speex
493	// encoding defined in [RFC 5574](https://tools.ietf.org/html/rfc5574).
494	// The stream is a sequence of blocks, one block per RTP packet. Each
495	// block starts with a byte containing the length of the block, in
496	// bytes, followed by one or more frames of Speex data, padded to an
497	// integral number of bytes (octets) as specified in RFC 5574. In other
498	// words, each RTP header is replaced with a single byte containing the
499	// block length. Only Speex wideband is supported. `sample_rate_hertz`
500	// must be 16000.
501	Encoding string `json:"encoding,omitempty"`
502
503	// LanguageCode: Required. The language of the supplied audio as a
504	// BCP-47 (https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
505	// Example: "en-US". See Language Support
506	// (https://cloud.google.com/speech-to-text/docs/languages) for a list
507	// of the currently supported language codes.
508	LanguageCode string `json:"languageCode,omitempty"`
509
510	// MaxAlternatives: Maximum number of recognition hypotheses to be
511	// returned. Specifically, the maximum number of
512	// `SpeechRecognitionAlternative` messages within each
513	// `SpeechRecognitionResult`. The server may return fewer than
514	// `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1`
515	// will return a maximum of one. If omitted, will return a maximum of
516	// one.
517	MaxAlternatives int64 `json:"maxAlternatives,omitempty"`
518
519	// Metadata: Metadata regarding this request.
520	Metadata *RecognitionMetadata `json:"metadata,omitempty"`
521
522	// Model: Which model to select for the given request. Select the model
523	// best suited to your domain to get best results. If a model is not
524	// explicitly specified, then we auto-select a model based on the
525	// parameters in the RecognitionConfig. *Model* *Description*
526	// command_and_search Best for short queries such as voice commands or
527	// voice search. phone_call Best for audio that originated from a phone
528	// call (typically recorded at an 8khz sampling rate). video Best for
529	// audio that originated from video or includes multiple speakers.
530	// Ideally the audio is recorded at a 16khz or greater sampling rate.
531	// This is a premium model that costs more than the standard rate.
532	// default Best for audio that is not one of the specific audio models.
533	// For example, long-form audio. Ideally the audio is high-fidelity,
534	// recorded at a 16khz or greater sampling rate.
535	Model string `json:"model,omitempty"`
536
537	// ProfanityFilter: If set to `true`, the server will attempt to filter
538	// out profanities, replacing all but the initial character in each
539	// filtered word with asterisks, e.g. "f***". If set to `false` or
540	// omitted, profanities won't be filtered out.
541	ProfanityFilter bool `json:"profanityFilter,omitempty"`
542
543	// SampleRateHertz: Sample rate in Hertz of the audio data sent in all
544	// `RecognitionAudio` messages. Valid values are: 8000-48000. 16000 is
545	// optimal. For best results, set the sampling rate of the audio source
546	// to 16000 Hz. If that's not possible, use the native sample rate of
547	// the audio source (instead of re-sampling). This field is optional for
548	// FLAC and WAV audio files, but is required for all other audio
549	// formats. For details, see AudioEncoding.
550	SampleRateHertz int64 `json:"sampleRateHertz,omitempty"`
551
552	// SpeechContexts: Array of SpeechContext. A means to provide context to
553	// assist the speech recognition. For more information, see speech
554	// adaptation (https://cloud.google.com/speech-to-text/docs/adaptation).
555	SpeechContexts []*SpeechContext `json:"speechContexts,omitempty"`
556
557	// UseEnhanced: Set to true to use an enhanced model for speech
558	// recognition. If `use_enhanced` is set to true and the `model` field
559	// is not set, then an appropriate enhanced model is chosen if an
560	// enhanced model exists for the audio. If `use_enhanced` is true and an
561	// enhanced version of the specified model does not exist, then the
562	// speech is recognized using the standard version of the specified
563	// model.
564	UseEnhanced bool `json:"useEnhanced,omitempty"`
565
566	// ForceSendFields is a list of field names (e.g. "AudioChannelCount")
567	// to unconditionally include in API requests. By default, fields with
568	// empty or default values are omitted from API requests. However, any
569	// non-pointer, non-interface field appearing in ForceSendFields will be
570	// sent to the server regardless of whether the field is empty or not.
571	// This may be used to include empty fields in Patch requests.
572	ForceSendFields []string `json:"-"`
573
574	// NullFields is a list of field names (e.g. "AudioChannelCount") to
575	// include in API requests with the JSON null value. By default, fields
576	// with empty values are omitted from API requests. However, any field
577	// with an empty value appearing in NullFields will be sent to the
578	// server as null. It is an error if a field in this list has a
579	// non-empty value. This may be used to include null fields in Patch
580	// requests.
581	NullFields []string `json:"-"`
582}
583
584func (s *RecognitionConfig) MarshalJSON() ([]byte, error) {
585	type NoMethod RecognitionConfig
586	raw := NoMethod(*s)
587	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
588}
589
590// RecognitionMetadata: Description of audio data to be recognized.
591type RecognitionMetadata struct {
592	// AudioTopic: Description of the content. Eg. "Recordings of federal
593	// supreme court hearings from 2012".
594	AudioTopic string `json:"audioTopic,omitempty"`
595
596	// IndustryNaicsCodeOfAudio: The industry vertical to which this speech
597	// recognition request most closely applies. This is most indicative of
598	// the topics contained in the audio. Use the 6-digit NAICS code to
599	// identify the industry vertical - see https://www.naics.com/search/.
600	IndustryNaicsCodeOfAudio int64 `json:"industryNaicsCodeOfAudio,omitempty"`
601
602	// InteractionType: The use case most closely describing the audio
603	// content to be recognized.
604	//
605	// Possible values:
606	//   "INTERACTION_TYPE_UNSPECIFIED" - Use case is either unknown or is
607	// something other than one of the other values below.
608	//   "DISCUSSION" - Multiple people in a conversation or discussion. For
609	// example in a meeting with two or more people actively participating.
610	// Typically all the primary people speaking would be in the same room
611	// (if not, see PHONE_CALL)
612	//   "PRESENTATION" - One or more persons lecturing or presenting to
613	// others, mostly uninterrupted.
614	//   "PHONE_CALL" - A phone-call or video-conference in which two or
615	// more people, who are not in the same room, are actively
616	// participating.
617	//   "VOICEMAIL" - A recorded message intended for another person to
618	// listen to.
619	//   "PROFESSIONALLY_PRODUCED" - Professionally produced audio (eg. TV
620	// Show, Podcast).
621	//   "VOICE_SEARCH" - Transcribe spoken questions and queries into text.
622	//   "VOICE_COMMAND" - Transcribe voice commands, such as for
623	// controlling a device.
624	//   "DICTATION" - Transcribe speech to text to create a written
625	// document, such as a text-message, email or report.
626	InteractionType string `json:"interactionType,omitempty"`
627
628	// MicrophoneDistance: The audio type that most closely describes the
629	// audio being recognized.
630	//
631	// Possible values:
632	//   "MICROPHONE_DISTANCE_UNSPECIFIED" - Audio type is not known.
633	//   "NEARFIELD" - The audio was captured from a closely placed
634	// microphone. Eg. phone, dictaphone, or handheld microphone. Generally
635	// if there speaker is within 1 meter of the microphone.
636	//   "MIDFIELD" - The speaker if within 3 meters of the microphone.
637	//   "FARFIELD" - The speaker is more than 3 meters away from the
638	// microphone.
639	MicrophoneDistance string `json:"microphoneDistance,omitempty"`
640
641	// OriginalMediaType: The original media the speech was recorded on.
642	//
643	// Possible values:
644	//   "ORIGINAL_MEDIA_TYPE_UNSPECIFIED" - Unknown original media type.
645	//   "AUDIO" - The speech data is an audio recording.
646	//   "VIDEO" - The speech data originally recorded on a video.
647	OriginalMediaType string `json:"originalMediaType,omitempty"`
648
649	// OriginalMimeType: Mime type of the original audio file. For example
650	// `audio/m4a`, `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`. A list
651	// of possible audio mime types is maintained at
652	// http://www.iana.org/assignments/media-types/media-types.xhtml#audio
653	OriginalMimeType string `json:"originalMimeType,omitempty"`
654
655	// RecordingDeviceName: The device used to make the recording. Examples
656	// 'Nexus 5X' or 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or
657	// 'Cardioid Microphone'.
658	RecordingDeviceName string `json:"recordingDeviceName,omitempty"`
659
660	// RecordingDeviceType: The type of device the speech was recorded with.
661	//
662	// Possible values:
663	//   "RECORDING_DEVICE_TYPE_UNSPECIFIED" - The recording device is
664	// unknown.
665	//   "SMARTPHONE" - Speech was recorded on a smartphone.
666	//   "PC" - Speech was recorded using a personal computer or tablet.
667	//   "PHONE_LINE" - Speech was recorded over a phone line.
668	//   "VEHICLE" - Speech was recorded in a vehicle.
669	//   "OTHER_OUTDOOR_DEVICE" - Speech was recorded outdoors.
670	//   "OTHER_INDOOR_DEVICE" - Speech was recorded indoors.
671	RecordingDeviceType string `json:"recordingDeviceType,omitempty"`
672
673	// ForceSendFields is a list of field names (e.g. "AudioTopic") to
674	// unconditionally include in API requests. By default, fields with
675	// empty or default values are omitted from API requests. However, any
676	// non-pointer, non-interface field appearing in ForceSendFields will be
677	// sent to the server regardless of whether the field is empty or not.
678	// This may be used to include empty fields in Patch requests.
679	ForceSendFields []string `json:"-"`
680
681	// NullFields is a list of field names (e.g. "AudioTopic") to include in
682	// API requests with the JSON null value. By default, fields with empty
683	// values are omitted from API requests. However, any field with an
684	// empty value appearing in NullFields will be sent to the server as
685	// null. It is an error if a field in this list has a non-empty value.
686	// This may be used to include null fields in Patch requests.
687	NullFields []string `json:"-"`
688}
689
690func (s *RecognitionMetadata) MarshalJSON() ([]byte, error) {
691	type NoMethod RecognitionMetadata
692	raw := NoMethod(*s)
693	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
694}
695
696// RecognizeRequest: The top-level message sent by the client for the
697// `Recognize` method.
698type RecognizeRequest struct {
699	// Audio: Required. The audio data to be recognized.
700	Audio *RecognitionAudio `json:"audio,omitempty"`
701
702	// Config: Required. Provides information to the recognizer that
703	// specifies how to process the request.
704	Config *RecognitionConfig `json:"config,omitempty"`
705
706	// ForceSendFields is a list of field names (e.g. "Audio") to
707	// unconditionally include in API requests. By default, fields with
708	// empty or default values are omitted from API requests. However, any
709	// non-pointer, non-interface field appearing in ForceSendFields will be
710	// sent to the server regardless of whether the field is empty or not.
711	// This may be used to include empty fields in Patch requests.
712	ForceSendFields []string `json:"-"`
713
714	// NullFields is a list of field names (e.g. "Audio") to include in API
715	// requests with the JSON null value. By default, fields with empty
716	// values are omitted from API requests. However, any field with an
717	// empty value appearing in NullFields will be sent to the server as
718	// null. It is an error if a field in this list has a non-empty value.
719	// This may be used to include null fields in Patch requests.
720	NullFields []string `json:"-"`
721}
722
723func (s *RecognizeRequest) MarshalJSON() ([]byte, error) {
724	type NoMethod RecognizeRequest
725	raw := NoMethod(*s)
726	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
727}
728
729// RecognizeResponse: The only message returned to the client by the
730// `Recognize` method. It contains the result as zero or more sequential
731// `SpeechRecognitionResult` messages.
732type RecognizeResponse struct {
733	// Results: Sequential list of transcription results corresponding to
734	// sequential portions of audio.
735	Results []*SpeechRecognitionResult `json:"results,omitempty"`
736
737	// TotalBilledTime: When available, billed audio seconds for the
738	// corresponding request.
739	TotalBilledTime string `json:"totalBilledTime,omitempty"`
740
741	// ServerResponse contains the HTTP response code and headers from the
742	// server.
743	googleapi.ServerResponse `json:"-"`
744
745	// ForceSendFields is a list of field names (e.g. "Results") to
746	// unconditionally include in API requests. By default, fields with
747	// empty or default values are omitted from API requests. However, any
748	// non-pointer, non-interface field appearing in ForceSendFields will be
749	// sent to the server regardless of whether the field is empty or not.
750	// This may be used to include empty fields in Patch requests.
751	ForceSendFields []string `json:"-"`
752
753	// NullFields is a list of field names (e.g. "Results") to include in
754	// API requests with the JSON null value. By default, fields with empty
755	// values are omitted from API requests. However, any field with an
756	// empty value appearing in NullFields will be sent to the server as
757	// null. It is an error if a field in this list has a non-empty value.
758	// This may be used to include null fields in Patch requests.
759	NullFields []string `json:"-"`
760}
761
762func (s *RecognizeResponse) MarshalJSON() ([]byte, error) {
763	type NoMethod RecognizeResponse
764	raw := NoMethod(*s)
765	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
766}
767
768// SpeakerDiarizationConfig: Config to enable speaker diarization.
769type SpeakerDiarizationConfig struct {
770	// EnableSpeakerDiarization: If 'true', enables speaker detection for
771	// each recognized word in the top alternative of the recognition result
772	// using a speaker_tag provided in the WordInfo.
773	EnableSpeakerDiarization bool `json:"enableSpeakerDiarization,omitempty"`
774
775	// MaxSpeakerCount: Maximum number of speakers in the conversation. This
776	// range gives you more flexibility by allowing the system to
777	// automatically determine the correct number of speakers. If not set,
778	// the default value is 6.
779	MaxSpeakerCount int64 `json:"maxSpeakerCount,omitempty"`
780
781	// MinSpeakerCount: Minimum number of speakers in the conversation. This
782	// range gives you more flexibility by allowing the system to
783	// automatically determine the correct number of speakers. If not set,
784	// the default value is 2.
785	MinSpeakerCount int64 `json:"minSpeakerCount,omitempty"`
786
787	// SpeakerTag: Output only. Unused.
788	SpeakerTag int64 `json:"speakerTag,omitempty"`
789
790	// ForceSendFields is a list of field names (e.g.
791	// "EnableSpeakerDiarization") to unconditionally include in API
792	// requests. By default, fields with empty or default values are omitted
793	// from API requests. However, any non-pointer, non-interface field
794	// appearing in ForceSendFields will be sent to the server regardless of
795	// whether the field is empty or not. This may be used to include empty
796	// fields in Patch requests.
797	ForceSendFields []string `json:"-"`
798
799	// NullFields is a list of field names (e.g. "EnableSpeakerDiarization")
800	// to include in API requests with the JSON null value. By default,
801	// fields with empty values are omitted from API requests. However, any
802	// field with an empty value appearing in NullFields will be sent to the
803	// server as null. It is an error if a field in this list has a
804	// non-empty value. This may be used to include null fields in Patch
805	// requests.
806	NullFields []string `json:"-"`
807}
808
809func (s *SpeakerDiarizationConfig) MarshalJSON() ([]byte, error) {
810	type NoMethod SpeakerDiarizationConfig
811	raw := NoMethod(*s)
812	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
813}
814
815// SpeechContext: Provides "hints" to the speech recognizer to favor
816// specific words and phrases in the results.
817type SpeechContext struct {
818	// Phrases: A list of strings containing words and phrases "hints" so
819	// that the speech recognition is more likely to recognize them. This
820	// can be used to improve the accuracy for specific words and phrases,
821	// for example, if specific commands are typically spoken by the user.
822	// This can also be used to add additional words to the vocabulary of
823	// the recognizer. See usage limits
824	// (https://cloud.google.com/speech-to-text/quotas#content). List items
825	// can also be set to classes for groups of words that represent common
826	// concepts that occur in natural language. For example, rather than
827	// providing phrase hints for every month of the year, using the $MONTH
828	// class improves the likelihood of correctly transcribing audio that
829	// includes months.
830	Phrases []string `json:"phrases,omitempty"`
831
832	// ForceSendFields is a list of field names (e.g. "Phrases") to
833	// unconditionally include in API requests. By default, fields with
834	// empty or default values are omitted from API requests. However, any
835	// non-pointer, non-interface field appearing in ForceSendFields will be
836	// sent to the server regardless of whether the field is empty or not.
837	// This may be used to include empty fields in Patch requests.
838	ForceSendFields []string `json:"-"`
839
840	// NullFields is a list of field names (e.g. "Phrases") to include in
841	// API requests with the JSON null value. By default, fields with empty
842	// values are omitted from API requests. However, any field with an
843	// empty value appearing in NullFields will be sent to the server as
844	// null. It is an error if a field in this list has a non-empty value.
845	// This may be used to include null fields in Patch requests.
846	NullFields []string `json:"-"`
847}
848
849func (s *SpeechContext) MarshalJSON() ([]byte, error) {
850	type NoMethod SpeechContext
851	raw := NoMethod(*s)
852	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
853}
854
855// SpeechRecognitionAlternative: Alternative hypotheses (a.k.a. n-best
856// list).
857type SpeechRecognitionAlternative struct {
858	// Confidence: The confidence estimate between 0.0 and 1.0. A higher
859	// number indicates an estimated greater likelihood that the recognized
860	// words are correct. This field is set only for the top alternative of
861	// a non-streaming result or, of a streaming result where
862	// `is_final=true`. This field is not guaranteed to be accurate and
863	// users should not rely on it to be always provided. The default of 0.0
864	// is a sentinel value indicating `confidence` was not set.
865	Confidence float64 `json:"confidence,omitempty"`
866
867	// Transcript: Transcript text representing the words that the user
868	// spoke.
869	Transcript string `json:"transcript,omitempty"`
870
871	// Words: A list of word-specific information for each recognized word.
872	// Note: When `enable_speaker_diarization` is true, you will see all the
873	// words from the beginning of the audio.
874	Words []*WordInfo `json:"words,omitempty"`
875
876	// ForceSendFields is a list of field names (e.g. "Confidence") to
877	// unconditionally include in API requests. By default, fields with
878	// empty or default values are omitted from API requests. However, any
879	// non-pointer, non-interface field appearing in ForceSendFields will be
880	// sent to the server regardless of whether the field is empty or not.
881	// This may be used to include empty fields in Patch requests.
882	ForceSendFields []string `json:"-"`
883
884	// NullFields is a list of field names (e.g. "Confidence") to include in
885	// API requests with the JSON null value. By default, fields with empty
886	// values are omitted from API requests. However, any field with an
887	// empty value appearing in NullFields will be sent to the server as
888	// null. It is an error if a field in this list has a non-empty value.
889	// This may be used to include null fields in Patch requests.
890	NullFields []string `json:"-"`
891}
892
893func (s *SpeechRecognitionAlternative) MarshalJSON() ([]byte, error) {
894	type NoMethod SpeechRecognitionAlternative
895	raw := NoMethod(*s)
896	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
897}
898
899func (s *SpeechRecognitionAlternative) UnmarshalJSON(data []byte) error {
900	type NoMethod SpeechRecognitionAlternative
901	var s1 struct {
902		Confidence gensupport.JSONFloat64 `json:"confidence"`
903		*NoMethod
904	}
905	s1.NoMethod = (*NoMethod)(s)
906	if err := json.Unmarshal(data, &s1); err != nil {
907		return err
908	}
909	s.Confidence = float64(s1.Confidence)
910	return nil
911}
912
913// SpeechRecognitionResult: A speech recognition result corresponding to
914// a portion of the audio.
915type SpeechRecognitionResult struct {
916	// Alternatives: May contain one or more recognition hypotheses (up to
917	// the maximum specified in `max_alternatives`). These alternatives are
918	// ordered in terms of accuracy, with the top (first) alternative being
919	// the most probable, as ranked by the recognizer.
920	Alternatives []*SpeechRecognitionAlternative `json:"alternatives,omitempty"`
921
922	// ChannelTag: For multi-channel audio, this is the channel number
923	// corresponding to the recognized result for the audio from that
924	// channel. For audio_channel_count = N, its output values can range
925	// from '1' to 'N'.
926	ChannelTag int64 `json:"channelTag,omitempty"`
927
928	// ForceSendFields is a list of field names (e.g. "Alternatives") to
929	// unconditionally include in API requests. By default, fields with
930	// empty or default values are omitted from API requests. However, any
931	// non-pointer, non-interface field appearing in ForceSendFields will be
932	// sent to the server regardless of whether the field is empty or not.
933	// This may be used to include empty fields in Patch requests.
934	ForceSendFields []string `json:"-"`
935
936	// NullFields is a list of field names (e.g. "Alternatives") to include
937	// in API requests with the JSON null value. By default, fields with
938	// empty values are omitted from API requests. However, any field with
939	// an empty value appearing in NullFields will be sent to the server as
940	// null. It is an error if a field in this list has a non-empty value.
941	// This may be used to include null fields in Patch requests.
942	NullFields []string `json:"-"`
943}
944
945func (s *SpeechRecognitionResult) MarshalJSON() ([]byte, error) {
946	type NoMethod SpeechRecognitionResult
947	raw := NoMethod(*s)
948	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
949}
950
951// Status: The `Status` type defines a logical error model that is
952// suitable for different programming environments, including REST APIs
953// and RPC APIs. It is used by gRPC (https://github.com/grpc). Each
954// `Status` message contains three pieces of data: error code, error
955// message, and error details. You can find out more about this error
956// model and how to work with it in the API Design Guide
957// (https://cloud.google.com/apis/design/errors).
958type Status struct {
959	// Code: The status code, which should be an enum value of
960	// google.rpc.Code.
961	Code int64 `json:"code,omitempty"`
962
963	// Details: A list of messages that carry the error details. There is a
964	// common set of message types for APIs to use.
965	Details []googleapi.RawMessage `json:"details,omitempty"`
966
967	// Message: A developer-facing error message, which should be in
968	// English. Any user-facing error message should be localized and sent
969	// in the google.rpc.Status.details field, or localized by the client.
970	Message string `json:"message,omitempty"`
971
972	// ForceSendFields is a list of field names (e.g. "Code") to
973	// unconditionally include in API requests. By default, fields with
974	// empty or default values are omitted from API requests. However, any
975	// non-pointer, non-interface field appearing in ForceSendFields will be
976	// sent to the server regardless of whether the field is empty or not.
977	// This may be used to include empty fields in Patch requests.
978	ForceSendFields []string `json:"-"`
979
980	// NullFields is a list of field names (e.g. "Code") to include in API
981	// requests with the JSON null value. By default, fields with empty
982	// values are omitted from API requests. However, any field with an
983	// empty value appearing in NullFields will be sent to the server as
984	// null. It is an error if a field in this list has a non-empty value.
985	// This may be used to include null fields in Patch requests.
986	NullFields []string `json:"-"`
987}
988
989func (s *Status) MarshalJSON() ([]byte, error) {
990	type NoMethod Status
991	raw := NoMethod(*s)
992	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
993}
994
995// WordInfo: Word-specific information for recognized words.
996type WordInfo struct {
997	// EndTime: Time offset relative to the beginning of the audio, and
998	// corresponding to the end of the spoken word. This field is only set
999	// if `enable_word_time_offsets=true` and only in the top hypothesis.
1000	// This is an experimental feature and the accuracy of the time offset
1001	// can vary.
1002	EndTime string `json:"endTime,omitempty"`
1003
1004	// SpeakerTag: Output only. A distinct integer value is assigned for
1005	// every speaker within the audio. This field specifies which one of
1006	// those speakers was detected to have spoken this word. Value ranges
1007	// from '1' to diarization_speaker_count. speaker_tag is set if
1008	// enable_speaker_diarization = 'true' and only in the top alternative.
1009	SpeakerTag int64 `json:"speakerTag,omitempty"`
1010
1011	// StartTime: Time offset relative to the beginning of the audio, and
1012	// corresponding to the start of the spoken word. This field is only set
1013	// if `enable_word_time_offsets=true` and only in the top hypothesis.
1014	// This is an experimental feature and the accuracy of the time offset
1015	// can vary.
1016	StartTime string `json:"startTime,omitempty"`
1017
1018	// Word: The word corresponding to this set of information.
1019	Word string `json:"word,omitempty"`
1020
1021	// ForceSendFields is a list of field names (e.g. "EndTime") to
1022	// unconditionally include in API requests. By default, fields with
1023	// empty or default values are omitted from API requests. However, any
1024	// non-pointer, non-interface field appearing in ForceSendFields will be
1025	// sent to the server regardless of whether the field is empty or not.
1026	// This may be used to include empty fields in Patch requests.
1027	ForceSendFields []string `json:"-"`
1028
1029	// NullFields is a list of field names (e.g. "EndTime") to include in
1030	// API requests with the JSON null value. By default, fields with empty
1031	// values are omitted from API requests. However, any field with an
1032	// empty value appearing in NullFields will be sent to the server as
1033	// null. It is an error if a field in this list has a non-empty value.
1034	// This may be used to include null fields in Patch requests.
1035	NullFields []string `json:"-"`
1036}
1037
1038func (s *WordInfo) MarshalJSON() ([]byte, error) {
1039	type NoMethod WordInfo
1040	raw := NoMethod(*s)
1041	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1042}
1043
1044// method id "speech.operations.get":
1045
1046type OperationsGetCall struct {
1047	s            *Service
1048	name         string
1049	urlParams_   gensupport.URLParams
1050	ifNoneMatch_ string
1051	ctx_         context.Context
1052	header_      http.Header
1053}
1054
1055// Get: Gets the latest state of a long-running operation. Clients can
1056// use this method to poll the operation result at intervals as
1057// recommended by the API service.
1058//
1059// - name: The name of the operation resource.
1060func (r *OperationsService) Get(name string) *OperationsGetCall {
1061	c := &OperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
1062	c.name = name
1063	return c
1064}
1065
1066// Fields allows partial responses to be retrieved. See
1067// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
1068// for more information.
1069func (c *OperationsGetCall) Fields(s ...googleapi.Field) *OperationsGetCall {
1070	c.urlParams_.Set("fields", googleapi.CombineFields(s))
1071	return c
1072}
1073
1074// IfNoneMatch sets the optional parameter which makes the operation
1075// fail if the object's ETag matches the given value. This is useful for
1076// getting updates only after the object has changed since the last
1077// request. Use googleapi.IsNotModified to check whether the response
1078// error from Do is the result of In-None-Match.
1079func (c *OperationsGetCall) IfNoneMatch(entityTag string) *OperationsGetCall {
1080	c.ifNoneMatch_ = entityTag
1081	return c
1082}
1083
1084// Context sets the context to be used in this call's Do method. Any
1085// pending HTTP request will be aborted if the provided context is
1086// canceled.
1087func (c *OperationsGetCall) Context(ctx context.Context) *OperationsGetCall {
1088	c.ctx_ = ctx
1089	return c
1090}
1091
1092// Header returns an http.Header that can be modified by the caller to
1093// add HTTP headers to the request.
1094func (c *OperationsGetCall) Header() http.Header {
1095	if c.header_ == nil {
1096		c.header_ = make(http.Header)
1097	}
1098	return c.header_
1099}
1100
1101func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) {
1102	reqHeaders := make(http.Header)
1103	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
1104	for k, v := range c.header_ {
1105		reqHeaders[k] = v
1106	}
1107	reqHeaders.Set("User-Agent", c.s.userAgent())
1108	if c.ifNoneMatch_ != "" {
1109		reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
1110	}
1111	var body io.Reader = nil
1112	c.urlParams_.Set("alt", alt)
1113	c.urlParams_.Set("prettyPrint", "false")
1114	urls := googleapi.ResolveRelative(c.s.BasePath, "v1/operations/{+name}")
1115	urls += "?" + c.urlParams_.Encode()
1116	req, err := http.NewRequest("GET", urls, body)
1117	if err != nil {
1118		return nil, err
1119	}
1120	req.Header = reqHeaders
1121	googleapi.Expand(req.URL, map[string]string{
1122		"name": c.name,
1123	})
1124	return gensupport.SendRequest(c.ctx_, c.s.client, req)
1125}
1126
1127// Do executes the "speech.operations.get" call.
1128// Exactly one of *Operation or error will be non-nil. Any non-2xx
1129// status code is an error. Response headers are in either
1130// *Operation.ServerResponse.Header or (if a response was returned at
1131// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
1132// to check whether the returned error was because
1133// http.StatusNotModified was returned.
1134func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
1135	gensupport.SetOptions(c.urlParams_, opts...)
1136	res, err := c.doRequest("json")
1137	if res != nil && res.StatusCode == http.StatusNotModified {
1138		if res.Body != nil {
1139			res.Body.Close()
1140		}
1141		return nil, &googleapi.Error{
1142			Code:   res.StatusCode,
1143			Header: res.Header,
1144		}
1145	}
1146	if err != nil {
1147		return nil, err
1148	}
1149	defer googleapi.CloseBody(res)
1150	if err := googleapi.CheckResponse(res); err != nil {
1151		return nil, err
1152	}
1153	ret := &Operation{
1154		ServerResponse: googleapi.ServerResponse{
1155			Header:         res.Header,
1156			HTTPStatusCode: res.StatusCode,
1157		},
1158	}
1159	target := &ret
1160	if err := gensupport.DecodeResponse(target, res); err != nil {
1161		return nil, err
1162	}
1163	return ret, nil
1164	// {
1165	//   "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.",
1166	//   "flatPath": "v1/operations/{operationsId}",
1167	//   "httpMethod": "GET",
1168	//   "id": "speech.operations.get",
1169	//   "parameterOrder": [
1170	//     "name"
1171	//   ],
1172	//   "parameters": {
1173	//     "name": {
1174	//       "description": "The name of the operation resource.",
1175	//       "location": "path",
1176	//       "pattern": "^.*$",
1177	//       "required": true,
1178	//       "type": "string"
1179	//     }
1180	//   },
1181	//   "path": "v1/operations/{+name}",
1182	//   "response": {
1183	//     "$ref": "Operation"
1184	//   },
1185	//   "scopes": [
1186	//     "https://www.googleapis.com/auth/cloud-platform"
1187	//   ]
1188	// }
1189
1190}
1191
1192// method id "speech.operations.list":
1193
1194type OperationsListCall struct {
1195	s            *Service
1196	urlParams_   gensupport.URLParams
1197	ifNoneMatch_ string
1198	ctx_         context.Context
1199	header_      http.Header
1200}
1201
1202// List: Lists operations that match the specified filter in the
1203// request. If the server doesn't support this method, it returns
1204// `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to
1205// override the binding to use different resource name schemes, such as
1206// `users/*/operations`. To override the binding, API services can add a
1207// binding such as "/v1/{name=users/*}/operations" to their service
1208// configuration. For backwards compatibility, the default name includes
1209// the operations collection id, however overriding users must ensure
1210// the name binding is the parent resource, without the operations
1211// collection id.
1212func (r *OperationsService) List() *OperationsListCall {
1213	c := &OperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
1214	return c
1215}
1216
1217// Filter sets the optional parameter "filter": The standard list
1218// filter.
1219func (c *OperationsListCall) Filter(filter string) *OperationsListCall {
1220	c.urlParams_.Set("filter", filter)
1221	return c
1222}
1223
1224// Name sets the optional parameter "name": The name of the operation's
1225// parent resource.
1226func (c *OperationsListCall) Name(name string) *OperationsListCall {
1227	c.urlParams_.Set("name", name)
1228	return c
1229}
1230
1231// PageSize sets the optional parameter "pageSize": The standard list
1232// page size.
1233func (c *OperationsListCall) PageSize(pageSize int64) *OperationsListCall {
1234	c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
1235	return c
1236}
1237
1238// PageToken sets the optional parameter "pageToken": The standard list
1239// page token.
1240func (c *OperationsListCall) PageToken(pageToken string) *OperationsListCall {
1241	c.urlParams_.Set("pageToken", pageToken)
1242	return c
1243}
1244
1245// Fields allows partial responses to be retrieved. See
1246// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
1247// for more information.
1248func (c *OperationsListCall) Fields(s ...googleapi.Field) *OperationsListCall {
1249	c.urlParams_.Set("fields", googleapi.CombineFields(s))
1250	return c
1251}
1252
1253// IfNoneMatch sets the optional parameter which makes the operation
1254// fail if the object's ETag matches the given value. This is useful for
1255// getting updates only after the object has changed since the last
1256// request. Use googleapi.IsNotModified to check whether the response
1257// error from Do is the result of In-None-Match.
1258func (c *OperationsListCall) IfNoneMatch(entityTag string) *OperationsListCall {
1259	c.ifNoneMatch_ = entityTag
1260	return c
1261}
1262
1263// Context sets the context to be used in this call's Do method. Any
1264// pending HTTP request will be aborted if the provided context is
1265// canceled.
1266func (c *OperationsListCall) Context(ctx context.Context) *OperationsListCall {
1267	c.ctx_ = ctx
1268	return c
1269}
1270
1271// Header returns an http.Header that can be modified by the caller to
1272// add HTTP headers to the request.
1273func (c *OperationsListCall) Header() http.Header {
1274	if c.header_ == nil {
1275		c.header_ = make(http.Header)
1276	}
1277	return c.header_
1278}
1279
1280func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) {
1281	reqHeaders := make(http.Header)
1282	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
1283	for k, v := range c.header_ {
1284		reqHeaders[k] = v
1285	}
1286	reqHeaders.Set("User-Agent", c.s.userAgent())
1287	if c.ifNoneMatch_ != "" {
1288		reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
1289	}
1290	var body io.Reader = nil
1291	c.urlParams_.Set("alt", alt)
1292	c.urlParams_.Set("prettyPrint", "false")
1293	urls := googleapi.ResolveRelative(c.s.BasePath, "v1/operations")
1294	urls += "?" + c.urlParams_.Encode()
1295	req, err := http.NewRequest("GET", urls, body)
1296	if err != nil {
1297		return nil, err
1298	}
1299	req.Header = reqHeaders
1300	return gensupport.SendRequest(c.ctx_, c.s.client, req)
1301}
1302
1303// Do executes the "speech.operations.list" call.
1304// Exactly one of *ListOperationsResponse or error will be non-nil. Any
1305// non-2xx status code is an error. Response headers are in either
1306// *ListOperationsResponse.ServerResponse.Header or (if a response was
1307// returned at all) in error.(*googleapi.Error).Header. Use
1308// googleapi.IsNotModified to check whether the returned error was
1309// because http.StatusNotModified was returned.
1310func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsResponse, error) {
1311	gensupport.SetOptions(c.urlParams_, opts...)
1312	res, err := c.doRequest("json")
1313	if res != nil && res.StatusCode == http.StatusNotModified {
1314		if res.Body != nil {
1315			res.Body.Close()
1316		}
1317		return nil, &googleapi.Error{
1318			Code:   res.StatusCode,
1319			Header: res.Header,
1320		}
1321	}
1322	if err != nil {
1323		return nil, err
1324	}
1325	defer googleapi.CloseBody(res)
1326	if err := googleapi.CheckResponse(res); err != nil {
1327		return nil, err
1328	}
1329	ret := &ListOperationsResponse{
1330		ServerResponse: googleapi.ServerResponse{
1331			Header:         res.Header,
1332			HTTPStatusCode: res.StatusCode,
1333		},
1334	}
1335	target := &ret
1336	if err := gensupport.DecodeResponse(target, res); err != nil {
1337		return nil, err
1338	}
1339	return ret, nil
1340	// {
1341	//   "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.",
1342	//   "flatPath": "v1/operations",
1343	//   "httpMethod": "GET",
1344	//   "id": "speech.operations.list",
1345	//   "parameterOrder": [],
1346	//   "parameters": {
1347	//     "filter": {
1348	//       "description": "The standard list filter.",
1349	//       "location": "query",
1350	//       "type": "string"
1351	//     },
1352	//     "name": {
1353	//       "description": "The name of the operation's parent resource.",
1354	//       "location": "query",
1355	//       "type": "string"
1356	//     },
1357	//     "pageSize": {
1358	//       "description": "The standard list page size.",
1359	//       "format": "int32",
1360	//       "location": "query",
1361	//       "type": "integer"
1362	//     },
1363	//     "pageToken": {
1364	//       "description": "The standard list page token.",
1365	//       "location": "query",
1366	//       "type": "string"
1367	//     }
1368	//   },
1369	//   "path": "v1/operations",
1370	//   "response": {
1371	//     "$ref": "ListOperationsResponse"
1372	//   },
1373	//   "scopes": [
1374	//     "https://www.googleapis.com/auth/cloud-platform"
1375	//   ]
1376	// }
1377
1378}
1379
1380// Pages invokes f for each page of results.
1381// A non-nil error returned from f will halt the iteration.
1382// The provided context supersedes any context provided to the Context method.
1383func (c *OperationsListCall) Pages(ctx context.Context, f func(*ListOperationsResponse) error) error {
1384	c.ctx_ = ctx
1385	defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
1386	for {
1387		x, err := c.Do()
1388		if err != nil {
1389			return err
1390		}
1391		if err := f(x); err != nil {
1392			return err
1393		}
1394		if x.NextPageToken == "" {
1395			return nil
1396		}
1397		c.PageToken(x.NextPageToken)
1398	}
1399}
1400
1401// method id "speech.speech.longrunningrecognize":
1402
1403type SpeechLongrunningrecognizeCall struct {
1404	s                           *Service
1405	longrunningrecognizerequest *LongRunningRecognizeRequest
1406	urlParams_                  gensupport.URLParams
1407	ctx_                        context.Context
1408	header_                     http.Header
1409}
1410
1411// Longrunningrecognize: Performs asynchronous speech recognition:
1412// receive results via the google.longrunning.Operations interface.
1413// Returns either an `Operation.error` or an `Operation.response` which
1414// contains a `LongRunningRecognizeResponse` message. For more
1415// information on asynchronous speech recognition, see the how-to
1416// (https://cloud.google.com/speech-to-text/docs/async-recognize).
1417func (r *SpeechService) Longrunningrecognize(longrunningrecognizerequest *LongRunningRecognizeRequest) *SpeechLongrunningrecognizeCall {
1418	c := &SpeechLongrunningrecognizeCall{s: r.s, urlParams_: make(gensupport.URLParams)}
1419	c.longrunningrecognizerequest = longrunningrecognizerequest
1420	return c
1421}
1422
1423// Fields allows partial responses to be retrieved. See
1424// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
1425// for more information.
1426func (c *SpeechLongrunningrecognizeCall) Fields(s ...googleapi.Field) *SpeechLongrunningrecognizeCall {
1427	c.urlParams_.Set("fields", googleapi.CombineFields(s))
1428	return c
1429}
1430
1431// Context sets the context to be used in this call's Do method. Any
1432// pending HTTP request will be aborted if the provided context is
1433// canceled.
1434func (c *SpeechLongrunningrecognizeCall) Context(ctx context.Context) *SpeechLongrunningrecognizeCall {
1435	c.ctx_ = ctx
1436	return c
1437}
1438
1439// Header returns an http.Header that can be modified by the caller to
1440// add HTTP headers to the request.
1441func (c *SpeechLongrunningrecognizeCall) Header() http.Header {
1442	if c.header_ == nil {
1443		c.header_ = make(http.Header)
1444	}
1445	return c.header_
1446}
1447
1448func (c *SpeechLongrunningrecognizeCall) doRequest(alt string) (*http.Response, error) {
1449	reqHeaders := make(http.Header)
1450	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
1451	for k, v := range c.header_ {
1452		reqHeaders[k] = v
1453	}
1454	reqHeaders.Set("User-Agent", c.s.userAgent())
1455	var body io.Reader = nil
1456	body, err := googleapi.WithoutDataWrapper.JSONReader(c.longrunningrecognizerequest)
1457	if err != nil {
1458		return nil, err
1459	}
1460	reqHeaders.Set("Content-Type", "application/json")
1461	c.urlParams_.Set("alt", alt)
1462	c.urlParams_.Set("prettyPrint", "false")
1463	urls := googleapi.ResolveRelative(c.s.BasePath, "v1/speech:longrunningrecognize")
1464	urls += "?" + c.urlParams_.Encode()
1465	req, err := http.NewRequest("POST", urls, body)
1466	if err != nil {
1467		return nil, err
1468	}
1469	req.Header = reqHeaders
1470	return gensupport.SendRequest(c.ctx_, c.s.client, req)
1471}
1472
1473// Do executes the "speech.speech.longrunningrecognize" call.
1474// Exactly one of *Operation or error will be non-nil. Any non-2xx
1475// status code is an error. Response headers are in either
1476// *Operation.ServerResponse.Header or (if a response was returned at
1477// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
1478// to check whether the returned error was because
1479// http.StatusNotModified was returned.
1480func (c *SpeechLongrunningrecognizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
1481	gensupport.SetOptions(c.urlParams_, opts...)
1482	res, err := c.doRequest("json")
1483	if res != nil && res.StatusCode == http.StatusNotModified {
1484		if res.Body != nil {
1485			res.Body.Close()
1486		}
1487		return nil, &googleapi.Error{
1488			Code:   res.StatusCode,
1489			Header: res.Header,
1490		}
1491	}
1492	if err != nil {
1493		return nil, err
1494	}
1495	defer googleapi.CloseBody(res)
1496	if err := googleapi.CheckResponse(res); err != nil {
1497		return nil, err
1498	}
1499	ret := &Operation{
1500		ServerResponse: googleapi.ServerResponse{
1501			Header:         res.Header,
1502			HTTPStatusCode: res.StatusCode,
1503		},
1504	}
1505	target := &ret
1506	if err := gensupport.DecodeResponse(target, res); err != nil {
1507		return nil, err
1508	}
1509	return ret, nil
1510	// {
1511	//   "description": "Performs asynchronous speech recognition: receive results via the google.longrunning.Operations interface. Returns either an `Operation.error` or an `Operation.response` which contains a `LongRunningRecognizeResponse` message. For more information on asynchronous speech recognition, see the [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize).",
1512	//   "flatPath": "v1/speech:longrunningrecognize",
1513	//   "httpMethod": "POST",
1514	//   "id": "speech.speech.longrunningrecognize",
1515	//   "parameterOrder": [],
1516	//   "parameters": {},
1517	//   "path": "v1/speech:longrunningrecognize",
1518	//   "request": {
1519	//     "$ref": "LongRunningRecognizeRequest"
1520	//   },
1521	//   "response": {
1522	//     "$ref": "Operation"
1523	//   },
1524	//   "scopes": [
1525	//     "https://www.googleapis.com/auth/cloud-platform"
1526	//   ]
1527	// }
1528
1529}
1530
1531// method id "speech.speech.recognize":
1532
1533type SpeechRecognizeCall struct {
1534	s                *Service
1535	recognizerequest *RecognizeRequest
1536	urlParams_       gensupport.URLParams
1537	ctx_             context.Context
1538	header_          http.Header
1539}
1540
1541// Recognize: Performs synchronous speech recognition: receive results
1542// after all audio has been sent and processed.
1543func (r *SpeechService) Recognize(recognizerequest *RecognizeRequest) *SpeechRecognizeCall {
1544	c := &SpeechRecognizeCall{s: r.s, urlParams_: make(gensupport.URLParams)}
1545	c.recognizerequest = recognizerequest
1546	return c
1547}
1548
1549// Fields allows partial responses to be retrieved. See
1550// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
1551// for more information.
1552func (c *SpeechRecognizeCall) Fields(s ...googleapi.Field) *SpeechRecognizeCall {
1553	c.urlParams_.Set("fields", googleapi.CombineFields(s))
1554	return c
1555}
1556
1557// Context sets the context to be used in this call's Do method. Any
1558// pending HTTP request will be aborted if the provided context is
1559// canceled.
1560func (c *SpeechRecognizeCall) Context(ctx context.Context) *SpeechRecognizeCall {
1561	c.ctx_ = ctx
1562	return c
1563}
1564
1565// Header returns an http.Header that can be modified by the caller to
1566// add HTTP headers to the request.
1567func (c *SpeechRecognizeCall) Header() http.Header {
1568	if c.header_ == nil {
1569		c.header_ = make(http.Header)
1570	}
1571	return c.header_
1572}
1573
1574func (c *SpeechRecognizeCall) doRequest(alt string) (*http.Response, error) {
1575	reqHeaders := make(http.Header)
1576	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
1577	for k, v := range c.header_ {
1578		reqHeaders[k] = v
1579	}
1580	reqHeaders.Set("User-Agent", c.s.userAgent())
1581	var body io.Reader = nil
1582	body, err := googleapi.WithoutDataWrapper.JSONReader(c.recognizerequest)
1583	if err != nil {
1584		return nil, err
1585	}
1586	reqHeaders.Set("Content-Type", "application/json")
1587	c.urlParams_.Set("alt", alt)
1588	c.urlParams_.Set("prettyPrint", "false")
1589	urls := googleapi.ResolveRelative(c.s.BasePath, "v1/speech:recognize")
1590	urls += "?" + c.urlParams_.Encode()
1591	req, err := http.NewRequest("POST", urls, body)
1592	if err != nil {
1593		return nil, err
1594	}
1595	req.Header = reqHeaders
1596	return gensupport.SendRequest(c.ctx_, c.s.client, req)
1597}
1598
1599// Do executes the "speech.speech.recognize" call.
1600// Exactly one of *RecognizeResponse or error will be non-nil. Any
1601// non-2xx status code is an error. Response headers are in either
1602// *RecognizeResponse.ServerResponse.Header or (if a response was
1603// returned at all) in error.(*googleapi.Error).Header. Use
1604// googleapi.IsNotModified to check whether the returned error was
1605// because http.StatusNotModified was returned.
1606func (c *SpeechRecognizeCall) Do(opts ...googleapi.CallOption) (*RecognizeResponse, error) {
1607	gensupport.SetOptions(c.urlParams_, opts...)
1608	res, err := c.doRequest("json")
1609	if res != nil && res.StatusCode == http.StatusNotModified {
1610		if res.Body != nil {
1611			res.Body.Close()
1612		}
1613		return nil, &googleapi.Error{
1614			Code:   res.StatusCode,
1615			Header: res.Header,
1616		}
1617	}
1618	if err != nil {
1619		return nil, err
1620	}
1621	defer googleapi.CloseBody(res)
1622	if err := googleapi.CheckResponse(res); err != nil {
1623		return nil, err
1624	}
1625	ret := &RecognizeResponse{
1626		ServerResponse: googleapi.ServerResponse{
1627			Header:         res.Header,
1628			HTTPStatusCode: res.StatusCode,
1629		},
1630	}
1631	target := &ret
1632	if err := gensupport.DecodeResponse(target, res); err != nil {
1633		return nil, err
1634	}
1635	return ret, nil
1636	// {
1637	//   "description": "Performs synchronous speech recognition: receive results after all audio has been sent and processed.",
1638	//   "flatPath": "v1/speech:recognize",
1639	//   "httpMethod": "POST",
1640	//   "id": "speech.speech.recognize",
1641	//   "parameterOrder": [],
1642	//   "parameters": {},
1643	//   "path": "v1/speech:recognize",
1644	//   "request": {
1645	//     "$ref": "RecognizeRequest"
1646	//   },
1647	//   "response": {
1648	//     "$ref": "RecognizeResponse"
1649	//   },
1650	//   "scopes": [
1651	//     "https://www.googleapis.com/auth/cloud-platform"
1652	//   ]
1653	// }
1654
1655}
1656