1// Copyright 2019 Google LLC.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Code generated file. DO NOT EDIT.
6
7// Package speech provides access to the Cloud Speech-to-Text API.
8//
9// This package is DEPRECATED. Use package cloud.google.com/go/speech/apiv1 instead.
10//
11// For product documentation, see: https://cloud.google.com/speech-to-text/docs/quickstart-protocol
12//
13// Creating a client
14//
15// Usage example:
16//
17//   import "google.golang.org/api/speech/v1p1beta1"
18//   ...
19//   ctx := context.Background()
20//   speechService, err := speech.NewService(ctx)
21//
22// In this example, Google Application Default Credentials are used for authentication.
23//
24// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials.
25//
26// Other authentication options
27//
28// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey:
29//
30//   speechService, err := speech.NewService(ctx, option.WithAPIKey("AIza..."))
31//
32// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource:
33//
34//   config := &oauth2.Config{...}
35//   // ...
36//   token, err := config.Exchange(ctx, ...)
37//   speechService, err := speech.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token)))
38//
39// See https://godoc.org/google.golang.org/api/option/ for details on options.
40package speech // import "google.golang.org/api/speech/v1p1beta1"
41
42import (
43	"bytes"
44	"context"
45	"encoding/json"
46	"errors"
47	"fmt"
48	"io"
49	"net/http"
50	"net/url"
51	"strconv"
52	"strings"
53
54	googleapi "google.golang.org/api/googleapi"
55	gensupport "google.golang.org/api/internal/gensupport"
56	option "google.golang.org/api/option"
57	htransport "google.golang.org/api/transport/http"
58)
59
60// Always reference these packages, just in case the auto-generated code
61// below doesn't.
62var _ = bytes.NewBuffer
63var _ = strconv.Itoa
64var _ = fmt.Sprintf
65var _ = json.NewDecoder
66var _ = io.Copy
67var _ = url.Parse
68var _ = gensupport.MarshalJSON
69var _ = googleapi.Version
70var _ = errors.New
71var _ = strings.Replace
72var _ = context.Canceled
73
74const apiId = "speech:v1p1beta1"
75const apiName = "speech"
76const apiVersion = "v1p1beta1"
77const basePath = "https://speech.googleapis.com/"
78
79// OAuth2 scopes used by this API.
80const (
81	// View and manage your data across Google Cloud Platform services
82	CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform"
83)
84
85// NewService creates a new Service.
86func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) {
87	scopesOption := option.WithScopes(
88		"https://www.googleapis.com/auth/cloud-platform",
89	)
90	// NOTE: prepend, so we don't override user-specified scopes.
91	opts = append([]option.ClientOption{scopesOption}, opts...)
92	client, endpoint, err := htransport.NewClient(ctx, opts...)
93	if err != nil {
94		return nil, err
95	}
96	s, err := New(client)
97	if err != nil {
98		return nil, err
99	}
100	if endpoint != "" {
101		s.BasePath = endpoint
102	}
103	return s, nil
104}
105
106// New creates a new Service. It uses the provided http.Client for requests.
107//
108// Deprecated: please use NewService instead.
109// To provide a custom HTTP client, use option.WithHTTPClient.
110// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead.
111func New(client *http.Client) (*Service, error) {
112	if client == nil {
113		return nil, errors.New("client is nil")
114	}
115	s := &Service{client: client, BasePath: basePath}
116	s.Operations = NewOperationsService(s)
117	s.Projects = NewProjectsService(s)
118	s.Speech = NewSpeechService(s)
119	return s, nil
120}
121
122type Service struct {
123	client    *http.Client
124	BasePath  string // API endpoint base URL
125	UserAgent string // optional additional User-Agent fragment
126
127	Operations *OperationsService
128
129	Projects *ProjectsService
130
131	Speech *SpeechService
132}
133
134func (s *Service) userAgent() string {
135	if s.UserAgent == "" {
136		return googleapi.UserAgent
137	}
138	return googleapi.UserAgent + " " + s.UserAgent
139}
140
141func NewOperationsService(s *Service) *OperationsService {
142	rs := &OperationsService{s: s}
143	return rs
144}
145
146type OperationsService struct {
147	s *Service
148}
149
150func NewProjectsService(s *Service) *ProjectsService {
151	rs := &ProjectsService{s: s}
152	rs.Locations = NewProjectsLocationsService(s)
153	return rs
154}
155
156type ProjectsService struct {
157	s *Service
158
159	Locations *ProjectsLocationsService
160}
161
162func NewProjectsLocationsService(s *Service) *ProjectsLocationsService {
163	rs := &ProjectsLocationsService{s: s}
164	rs.Operations = NewProjectsLocationsOperationsService(s)
165	return rs
166}
167
168type ProjectsLocationsService struct {
169	s *Service
170
171	Operations *ProjectsLocationsOperationsService
172}
173
174func NewProjectsLocationsOperationsService(s *Service) *ProjectsLocationsOperationsService {
175	rs := &ProjectsLocationsOperationsService{s: s}
176	return rs
177}
178
179type ProjectsLocationsOperationsService struct {
180	s *Service
181}
182
183func NewSpeechService(s *Service) *SpeechService {
184	rs := &SpeechService{s: s}
185	return rs
186}
187
188type SpeechService struct {
189	s *Service
190}
191
192// ListOperationsResponse: The response message for
193// Operations.ListOperations.
194type ListOperationsResponse struct {
195	// NextPageToken: The standard List next-page token.
196	NextPageToken string `json:"nextPageToken,omitempty"`
197
198	// Operations: A list of operations that matches the specified filter in
199	// the request.
200	Operations []*Operation `json:"operations,omitempty"`
201
202	// ServerResponse contains the HTTP response code and headers from the
203	// server.
204	googleapi.ServerResponse `json:"-"`
205
206	// ForceSendFields is a list of field names (e.g. "NextPageToken") to
207	// unconditionally include in API requests. By default, fields with
208	// empty values are omitted from API requests. However, any non-pointer,
209	// non-interface field appearing in ForceSendFields will be sent to the
210	// server regardless of whether the field is empty or not. This may be
211	// used to include empty fields in Patch requests.
212	ForceSendFields []string `json:"-"`
213
214	// NullFields is a list of field names (e.g. "NextPageToken") to include
215	// in API requests with the JSON null value. By default, fields with
216	// empty values are omitted from API requests. However, any field with
217	// an empty value appearing in NullFields will be sent to the server as
218	// null. It is an error if a field in this list has a non-empty value.
219	// This may be used to include null fields in Patch requests.
220	NullFields []string `json:"-"`
221}
222
223func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) {
224	type NoMethod ListOperationsResponse
225	raw := NoMethod(*s)
226	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
227}
228
229// LongRunningRecognizeMetadata: Describes the progress of a
230// long-running `LongRunningRecognize` call. It is
231// included in the `metadata` field of the `Operation` returned by
232// the
233// `GetOperation` call of the `google::longrunning::Operations` service.
234type LongRunningRecognizeMetadata struct {
235	// LastUpdateTime: Time of the most recent processing update.
236	LastUpdateTime string `json:"lastUpdateTime,omitempty"`
237
238	// ProgressPercent: Approximate percentage of audio processed thus far.
239	// Guaranteed to be 100
240	// when the audio is fully processed and the results are available.
241	ProgressPercent int64 `json:"progressPercent,omitempty"`
242
243	// StartTime: Time when the request was received.
244	StartTime string `json:"startTime,omitempty"`
245
246	// ForceSendFields is a list of field names (e.g. "LastUpdateTime") to
247	// unconditionally include in API requests. By default, fields with
248	// empty values are omitted from API requests. However, any non-pointer,
249	// non-interface field appearing in ForceSendFields will be sent to the
250	// server regardless of whether the field is empty or not. This may be
251	// used to include empty fields in Patch requests.
252	ForceSendFields []string `json:"-"`
253
254	// NullFields is a list of field names (e.g. "LastUpdateTime") to
255	// include in API requests with the JSON null value. By default, fields
256	// with empty values are omitted from API requests. However, any field
257	// with an empty value appearing in NullFields will be sent to the
258	// server as null. It is an error if a field in this list has a
259	// non-empty value. This may be used to include null fields in Patch
260	// requests.
261	NullFields []string `json:"-"`
262}
263
264func (s *LongRunningRecognizeMetadata) MarshalJSON() ([]byte, error) {
265	type NoMethod LongRunningRecognizeMetadata
266	raw := NoMethod(*s)
267	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
268}
269
270// LongRunningRecognizeRequest: The top-level message sent by the client
271// for the `LongRunningRecognize`
272// method.
273type LongRunningRecognizeRequest struct {
274	// Audio: Required. The audio data to be recognized.
275	Audio *RecognitionAudio `json:"audio,omitempty"`
276
277	// Config: Required. Provides information to the recognizer that
278	// specifies how to
279	// process the request.
280	Config *RecognitionConfig `json:"config,omitempty"`
281
282	// ForceSendFields is a list of field names (e.g. "Audio") to
283	// unconditionally include in API requests. By default, fields with
284	// empty values are omitted from API requests. However, any non-pointer,
285	// non-interface field appearing in ForceSendFields will be sent to the
286	// server regardless of whether the field is empty or not. This may be
287	// used to include empty fields in Patch requests.
288	ForceSendFields []string `json:"-"`
289
290	// NullFields is a list of field names (e.g. "Audio") to include in API
291	// requests with the JSON null value. By default, fields with empty
292	// values are omitted from API requests. However, any field with an
293	// empty value appearing in NullFields will be sent to the server as
294	// null. It is an error if a field in this list has a non-empty value.
295	// This may be used to include null fields in Patch requests.
296	NullFields []string `json:"-"`
297}
298
299func (s *LongRunningRecognizeRequest) MarshalJSON() ([]byte, error) {
300	type NoMethod LongRunningRecognizeRequest
301	raw := NoMethod(*s)
302	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
303}
304
305// LongRunningRecognizeResponse: The only message returned to the client
306// by the `LongRunningRecognize` method.
307// It contains the result as zero or more sequential
308// `SpeechRecognitionResult`
309// messages. It is included in the `result.response` field of the
310// `Operation`
311// returned by the `GetOperation` call of the
312// `google::longrunning::Operations`
313// service.
314type LongRunningRecognizeResponse struct {
315	// Results: Sequential list of transcription results corresponding
316	// to
317	// sequential portions of audio.
318	Results []*SpeechRecognitionResult `json:"results,omitempty"`
319
320	// ForceSendFields is a list of field names (e.g. "Results") to
321	// unconditionally include in API requests. By default, fields with
322	// empty values are omitted from API requests. However, any non-pointer,
323	// non-interface field appearing in ForceSendFields will be sent to the
324	// server regardless of whether the field is empty or not. This may be
325	// used to include empty fields in Patch requests.
326	ForceSendFields []string `json:"-"`
327
328	// NullFields is a list of field names (e.g. "Results") to include in
329	// API requests with the JSON null value. By default, fields with empty
330	// values are omitted from API requests. However, any field with an
331	// empty value appearing in NullFields will be sent to the server as
332	// null. It is an error if a field in this list has a non-empty value.
333	// This may be used to include null fields in Patch requests.
334	NullFields []string `json:"-"`
335}
336
337func (s *LongRunningRecognizeResponse) MarshalJSON() ([]byte, error) {
338	type NoMethod LongRunningRecognizeResponse
339	raw := NoMethod(*s)
340	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
341}
342
343// Operation: This resource represents a long-running operation that is
344// the result of a
345// network API call.
346type Operation struct {
347	// Done: If the value is `false`, it means the operation is still in
348	// progress.
349	// If `true`, the operation is completed, and either `error` or
350	// `response` is
351	// available.
352	Done bool `json:"done,omitempty"`
353
354	// Error: The error result of the operation in case of failure or
355	// cancellation.
356	Error *Status `json:"error,omitempty"`
357
358	// Metadata: Service-specific metadata associated with the operation.
359	// It typically
360	// contains progress information and common metadata such as create
361	// time.
362	// Some services might not provide such metadata.  Any method that
363	// returns a
364	// long-running operation should document the metadata type, if any.
365	Metadata googleapi.RawMessage `json:"metadata,omitempty"`
366
367	// Name: The server-assigned name, which is only unique within the same
368	// service that
369	// originally returns it. If you use the default HTTP mapping,
370	// the
371	// `name` should be a resource name ending with
372	// `operations/{unique_id}`.
373	Name string `json:"name,omitempty"`
374
375	// Response: The normal response of the operation in case of success.
376	// If the original
377	// method returns no data on success, such as `Delete`, the response
378	// is
379	// `google.protobuf.Empty`.  If the original method is
380	// standard
381	// `Get`/`Create`/`Update`, the response should be the resource.  For
382	// other
383	// methods, the response should have the type `XxxResponse`, where
384	// `Xxx`
385	// is the original method name.  For example, if the original method
386	// name
387	// is `TakeSnapshot()`, the inferred response type
388	// is
389	// `TakeSnapshotResponse`.
390	Response googleapi.RawMessage `json:"response,omitempty"`
391
392	// ServerResponse contains the HTTP response code and headers from the
393	// server.
394	googleapi.ServerResponse `json:"-"`
395
396	// ForceSendFields is a list of field names (e.g. "Done") to
397	// unconditionally include in API requests. By default, fields with
398	// empty values are omitted from API requests. However, any non-pointer,
399	// non-interface field appearing in ForceSendFields will be sent to the
400	// server regardless of whether the field is empty or not. This may be
401	// used to include empty fields in Patch requests.
402	ForceSendFields []string `json:"-"`
403
404	// NullFields is a list of field names (e.g. "Done") to include in API
405	// requests with the JSON null value. By default, fields with empty
406	// values are omitted from API requests. However, any field with an
407	// empty value appearing in NullFields will be sent to the server as
408	// null. It is an error if a field in this list has a non-empty value.
409	// This may be used to include null fields in Patch requests.
410	NullFields []string `json:"-"`
411}
412
413func (s *Operation) MarshalJSON() ([]byte, error) {
414	type NoMethod Operation
415	raw := NoMethod(*s)
416	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
417}
418
419// RecognitionAudio: Contains audio data in the encoding specified in
420// the `RecognitionConfig`.
421// Either `content` or `uri` must be supplied. Supplying both or
422// neither
423// returns google.rpc.Code.INVALID_ARGUMENT. See
424// [content
425// limits](https://cloud.google.com/speech-to-text/quotas#content).
426type RecognitionAudio struct {
427	// Content: The audio data bytes encoded as specified
428	// in
429	// `RecognitionConfig`. Note: as with all bytes fields, proto buffers
430	// use a
431	// pure binary representation, whereas JSON representations use base64.
432	Content string `json:"content,omitempty"`
433
434	// Uri: URI that points to a file that contains audio data bytes as
435	// specified in
436	// `RecognitionConfig`. The file must not be compressed (for example,
437	// gzip).
438	// Currently, only Google Cloud Storage URIs are
439	// supported, which must be specified in the following
440	// format:
441	// `gs://bucket_name/object_name` (other URI formats
442	// return
443	// google.rpc.Code.INVALID_ARGUMENT). For more information, see
444	// [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
445	Uri string `json:"uri,omitempty"`
446
447	// ForceSendFields is a list of field names (e.g. "Content") to
448	// unconditionally include in API requests. By default, fields with
449	// empty values are omitted from API requests. However, any non-pointer,
450	// non-interface field appearing in ForceSendFields will be sent to the
451	// server regardless of whether the field is empty or not. This may be
452	// used to include empty fields in Patch requests.
453	ForceSendFields []string `json:"-"`
454
455	// NullFields is a list of field names (e.g. "Content") to include in
456	// API requests with the JSON null value. By default, fields with empty
457	// values are omitted from API requests. However, any field with an
458	// empty value appearing in NullFields will be sent to the server as
459	// null. It is an error if a field in this list has a non-empty value.
460	// This may be used to include null fields in Patch requests.
461	NullFields []string `json:"-"`
462}
463
464func (s *RecognitionAudio) MarshalJSON() ([]byte, error) {
465	type NoMethod RecognitionAudio
466	raw := NoMethod(*s)
467	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
468}
469
470// RecognitionConfig: Provides information to the recognizer that
471// specifies how to process the
472// request.
473type RecognitionConfig struct {
474	// AlternativeLanguageCodes: A list of up to 3
475	// additional
476	// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language
477	// tags,
478	// listing possible alternative languages of the supplied audio.
479	// See
480	// [Language
481	// Support](https://cloud.google.com/speech-to-text/docs/langua
482	// ges) for a list
483	// of the currently supported language codes. If alternative languages
484	// are
485	// listed, recognition result will contain recognition in the most
486	// likely
487	// language detected including the main language_code. The recognition
488	// result
489	// will include the language tag of the language detected in the audio.
490	// Note:
491	// This feature is only supported for Voice Command and Voice Search use
492	// cases
493	// and performance may vary for other use cases (e.g., phone
494	// call
495	// transcription).
496	AlternativeLanguageCodes []string `json:"alternativeLanguageCodes,omitempty"`
497
498	// AudioChannelCount: The number of channels in the input audio
499	// data.
500	// ONLY set this for MULTI-CHANNEL recognition.
501	// Valid values for LINEAR16 and FLAC are `1`-`8`.
502	// Valid values for OGG_OPUS are '1'-'254'.
503	// Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only
504	// `1`.
505	// If `0` or omitted, defaults to one channel (mono).
506	// Note: We only recognize the first channel by default.
507	// To perform independent recognition on each channel
508	// set
509	// `enable_separate_recognition_per_channel` to 'true'.
510	AudioChannelCount int64 `json:"audioChannelCount,omitempty"`
511
512	// DiarizationConfig: Config to enable speaker diarization and set
513	// additional
514	// parameters to make diarization better suited for your
515	// application.
516	// Note: When this is enabled, we send all the words from the beginning
517	// of the
518	// audio for the top alternative in every consecutive STREAMING
519	// responses.
520	// This is done in order to improve our speaker tags as our models learn
521	// to
522	// identify the speakers in the conversation over time.
523	// For non-streaming requests, the diarization results will be provided
524	// only
525	// in the top alternative of the FINAL SpeechRecognitionResult.
526	DiarizationConfig *SpeakerDiarizationConfig `json:"diarizationConfig,omitempty"`
527
528	// DiarizationSpeakerCount: If set, specifies the estimated number of
529	// speakers in the conversation.
530	// Defaults to '2'. Ignored unless enable_speaker_diarization is set to
531	// true.
532	// Note: Use diarization_config instead.
533	DiarizationSpeakerCount int64 `json:"diarizationSpeakerCount,omitempty"`
534
535	// EnableAutomaticPunctuation: If 'true', adds punctuation to
536	// recognition result hypotheses.
537	// This feature is only available in select languages. Setting this
538	// for
539	// requests in other languages has no effect at all.
540	// The default 'false' value does not add punctuation to result
541	// hypotheses.
542	// Note: This is currently offered as an experimental service,
543	// complimentary
544	// to all users. In the future this may be exclusively available as
545	// a
546	// premium feature.
547	EnableAutomaticPunctuation bool `json:"enableAutomaticPunctuation,omitempty"`
548
549	// EnableSeparateRecognitionPerChannel: This needs to be set to `true`
550	// explicitly and `audio_channel_count` > 1
551	// to get each channel recognized separately. The recognition result
552	// will
553	// contain a `channel_tag` field to state which channel that result
554	// belongs
555	// to. If this is not true, we will only recognize the first channel.
556	// The
557	// request is billed cumulatively for all channels
558	// recognized:
559	// `audio_channel_count` multiplied by the length of the audio.
560	EnableSeparateRecognitionPerChannel bool `json:"enableSeparateRecognitionPerChannel,omitempty"`
561
562	// EnableSpeakerDiarization: If 'true', enables speaker detection for
563	// each recognized word in
564	// the top alternative of the recognition result using a speaker_tag
565	// provided
566	// in the WordInfo.
567	// Note: Use diarization_config instead.
568	EnableSpeakerDiarization bool `json:"enableSpeakerDiarization,omitempty"`
569
570	// EnableWordConfidence: If `true`, the top result includes a list of
571	// words and the
572	// confidence for those words. If `false`, no word-level
573	// confidence
574	// information is returned. The default is `false`.
575	EnableWordConfidence bool `json:"enableWordConfidence,omitempty"`
576
577	// EnableWordTimeOffsets: If `true`, the top result includes a list of
578	// words and
579	// the start and end time offsets (timestamps) for those words.
580	// If
581	// `false`, no word-level time offset information is returned. The
582	// default is
583	// `false`.
584	EnableWordTimeOffsets bool `json:"enableWordTimeOffsets,omitempty"`
585
586	// Encoding: Encoding of audio data sent in all `RecognitionAudio`
587	// messages.
588	// This field is optional for `FLAC` and `WAV` audio files and
589	// required
590	// for all other audio formats. For details, see AudioEncoding.
591	//
592	// Possible values:
593	//   "ENCODING_UNSPECIFIED" - Not specified.
594	//   "LINEAR16" - Uncompressed 16-bit signed little-endian samples
595	// (Linear PCM).
596	//   "FLAC" - `FLAC` (Free Lossless Audio
597	// Codec) is the recommended encoding because it is
598	// lossless--therefore recognition is not compromised--and
599	// requires only about half the bandwidth of `LINEAR16`. `FLAC`
600	// stream
601	// encoding supports 16-bit and 24-bit samples, however, not all fields
602	// in
603	// `STREAMINFO` are supported.
604	//   "MULAW" - 8-bit samples that compand 14-bit audio samples using
605	// G.711 PCMU/mu-law.
606	//   "AMR" - Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz`
607	// must be 8000.
608	//   "AMR_WB" - Adaptive Multi-Rate Wideband codec. `sample_rate_hertz`
609	// must be 16000.
610	//   "OGG_OPUS" - Opus encoded audio frames in Ogg
611	// container
612	// ([OggOpus](https://wiki.xiph.org/OggOpus)).
613	// `sample_rate_her
614	// tz` must be one of 8000, 12000, 16000, 24000, or 48000.
615	//   "SPEEX_WITH_HEADER_BYTE" - Although the use of lossy encodings is
616	// not recommended, if a very low
617	// bitrate encoding is required, `OGG_OPUS` is highly preferred
618	// over
619	// Speex encoding. The [Speex](https://speex.org/)  encoding supported
620	// by
621	// Cloud Speech API has a header byte in each block, as in MIME
622	// type
623	// `audio/x-speex-with-header-byte`.
624	// It is a variant of the RTP Speex encoding defined in
625	// [RFC 5574](https://tools.ietf.org/html/rfc5574).
626	// The stream is a sequence of blocks, one block per RTP packet. Each
627	// block
628	// starts with a byte containing the length of the block, in bytes,
629	// followed
630	// by one or more frames of Speex data, padded to an integral number
631	// of
632	// bytes (octets) as specified in RFC 5574. In other words, each RTP
633	// header
634	// is replaced with a single byte containing the block length. Only
635	// Speex
636	// wideband is supported. `sample_rate_hertz` must be 16000.
637	//   "MP3" - MP3 audio. Support all standard MP3 bitrates (which range
638	// from 32-320
639	// kbps). When using this encoding, `sample_rate_hertz` can be
640	// optionally
641	// unset if not known.
642	Encoding string `json:"encoding,omitempty"`
643
644	// LanguageCode: Required. The language of the supplied audio as
645	// a
646	// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language
647	// tag.
648	// Example: "en-US".
649	// See
650	// [Language
651	// Support](https://cloud.google.com/speech-to-text/docs/langua
652	// ges) for a list
653	// of the currently supported language codes.
654	LanguageCode string `json:"languageCode,omitempty"`
655
656	// MaxAlternatives: Maximum number of recognition hypotheses to be
657	// returned.
658	// Specifically, the maximum number of `SpeechRecognitionAlternative`
659	// messages
660	// within each `SpeechRecognitionResult`.
661	// The server may return fewer than `max_alternatives`.
662	// Valid values are `0`-`30`. A value of `0` or `1` will return a
663	// maximum of
664	// one. If omitted, will return a maximum of one.
665	MaxAlternatives int64 `json:"maxAlternatives,omitempty"`
666
667	// Metadata: Metadata regarding this request.
668	Metadata *RecognitionMetadata `json:"metadata,omitempty"`
669
670	// Model: Which model to select for the given request. Select the
671	// model
672	// best suited to your domain to get best results. If a model is
673	// not
674	// explicitly specified, then we auto-select a model based on the
675	// parameters
676	// in the RecognitionConfig.
677	// <table>
678	//   <tr>
679	//     <td><b>Model</b></td>
680	//     <td><b>Description</b></td>
681	//   </tr>
682	//   <tr>
683	//     <td><code>command_and_search</code></td>
684	//     <td>Best for short queries such as voice commands or voice
685	// search.</td>
686	//   </tr>
687	//   <tr>
688	//     <td><code>phone_call</code></td>
689	//     <td>Best for audio that originated from a phone call (typically
690	//     recorded at an 8khz sampling rate).</td>
691	//   </tr>
692	//   <tr>
693	//     <td><code>video</code></td>
694	//     <td>Best for audio that originated from from video or includes
695	// multiple
696	//         speakers. Ideally the audio is recorded at a 16khz or
697	// greater
698	//         sampling rate. This is a premium model that costs more than
699	// the
700	//         standard rate.</td>
701	//   </tr>
702	//   <tr>
703	//     <td><code>default</code></td>
704	//     <td>Best for audio that is not one of the specific audio models.
705	//         For example, long-form audio. Ideally the audio is
706	// high-fidelity,
707	//         recorded at a 16khz or greater sampling rate.</td>
708	//   </tr>
709	// </table>
710	Model string `json:"model,omitempty"`
711
712	// ProfanityFilter: If set to `true`, the server will attempt to filter
713	// out
714	// profanities, replacing all but the initial character in each filtered
715	// word
716	// with asterisks, e.g. "f***". If set to `false` or omitted,
717	// profanities
718	// won't be filtered out.
719	ProfanityFilter bool `json:"profanityFilter,omitempty"`
720
721	// SampleRateHertz: Sample rate in Hertz of the audio data sent in
722	// all
723	// `RecognitionAudio` messages. Valid values are: 8000-48000.
724	// 16000 is optimal. For best results, set the sampling rate of the
725	// audio
726	// source to 16000 Hz. If that's not possible, use the native sample
727	// rate of
728	// the audio source (instead of re-sampling).
729	// This field is optional for FLAC and WAV audio files, but is
730	// required for all other audio formats. For details, see AudioEncoding.
731	SampleRateHertz int64 `json:"sampleRateHertz,omitempty"`
732
733	// SpeechContexts: Array of SpeechContext.
734	// A means to provide context to assist the speech recognition. For
735	// more
736	// information,
737	// see
738	// [speech
739	// adaptation](https://cloud.google.com/speech-to-text/docs/c
740	// ontext-strength).
741	SpeechContexts []*SpeechContext `json:"speechContexts,omitempty"`
742
743	// UseEnhanced: Set to true to use an enhanced model for speech
744	// recognition.
745	// If `use_enhanced` is set to true and the `model` field is not set,
746	// then
747	// an appropriate enhanced model is chosen if an enhanced model exists
748	// for
749	// the audio.
750	//
751	// If `use_enhanced` is true and an enhanced version of the specified
752	// model
753	// does not exist, then the speech is recognized using the standard
754	// version
755	// of the specified model.
756	UseEnhanced bool `json:"useEnhanced,omitempty"`
757
758	// ForceSendFields is a list of field names (e.g.
759	// "AlternativeLanguageCodes") to unconditionally include in API
760	// requests. By default, fields with empty values are omitted from API
761	// requests. However, any non-pointer, non-interface field appearing in
762	// ForceSendFields will be sent to the server regardless of whether the
763	// field is empty or not. This may be used to include empty fields in
764	// Patch requests.
765	ForceSendFields []string `json:"-"`
766
767	// NullFields is a list of field names (e.g. "AlternativeLanguageCodes")
768	// to include in API requests with the JSON null value. By default,
769	// fields with empty values are omitted from API requests. However, any
770	// field with an empty value appearing in NullFields will be sent to the
771	// server as null. It is an error if a field in this list has a
772	// non-empty value. This may be used to include null fields in Patch
773	// requests.
774	NullFields []string `json:"-"`
775}
776
777func (s *RecognitionConfig) MarshalJSON() ([]byte, error) {
778	type NoMethod RecognitionConfig
779	raw := NoMethod(*s)
780	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
781}
782
783// RecognitionMetadata: Description of audio data to be recognized.
784type RecognitionMetadata struct {
785	// AudioTopic: Description of the content. Eg. "Recordings of federal
786	// supreme court
787	// hearings from 2012".
788	AudioTopic string `json:"audioTopic,omitempty"`
789
790	// IndustryNaicsCodeOfAudio: The industry vertical to which this speech
791	// recognition request most
792	// closely applies. This is most indicative of the topics contained
793	// in the audio.  Use the 6-digit NAICS code to identify the
794	// industry
795	// vertical - see https://www.naics.com/search/.
796	IndustryNaicsCodeOfAudio int64 `json:"industryNaicsCodeOfAudio,omitempty"`
797
798	// InteractionType: The use case most closely describing the audio
799	// content to be recognized.
800	//
801	// Possible values:
802	//   "INTERACTION_TYPE_UNSPECIFIED" - Use case is either unknown or is
803	// something other than one of the other
804	// values below.
805	//   "DISCUSSION" - Multiple people in a conversation or discussion. For
806	// example in a
807	// meeting with two or more people actively participating. Typically
808	// all the primary people speaking would be in the same room (if
809	// not,
810	// see PHONE_CALL)
811	//   "PRESENTATION" - One or more persons lecturing or presenting to
812	// others, mostly
813	// uninterrupted.
814	//   "PHONE_CALL" - A phone-call or video-conference in which two or
815	// more people, who are
816	// not in the same room, are actively participating.
817	//   "VOICEMAIL" - A recorded message intended for another person to
818	// listen to.
819	//   "PROFESSIONALLY_PRODUCED" - Professionally produced audio (eg. TV
820	// Show, Podcast).
821	//   "VOICE_SEARCH" - Transcribe spoken questions and queries into text.
822	//   "VOICE_COMMAND" - Transcribe voice commands, such as for
823	// controlling a device.
824	//   "DICTATION" - Transcribe speech to text to create a written
825	// document, such as a
826	// text-message, email or report.
827	InteractionType string `json:"interactionType,omitempty"`
828
829	// MicrophoneDistance: The audio type that most closely describes the
830	// audio being recognized.
831	//
832	// Possible values:
833	//   "MICROPHONE_DISTANCE_UNSPECIFIED" - Audio type is not known.
834	//   "NEARFIELD" - The audio was captured from a closely placed
835	// microphone. Eg. phone,
836	// dictaphone, or handheld microphone. Generally if there speaker is
837	// within
838	// 1 meter of the microphone.
839	//   "MIDFIELD" - The speaker if within 3 meters of the microphone.
840	//   "FARFIELD" - The speaker is more than 3 meters away from the
841	// microphone.
842	MicrophoneDistance string `json:"microphoneDistance,omitempty"`
843
844	// ObfuscatedId: Obfuscated (privacy-protected) ID of the user, to
845	// identify number of
846	// unique users using the service.
847	ObfuscatedId int64 `json:"obfuscatedId,omitempty,string"`
848
849	// OriginalMediaType: The original media the speech was recorded on.
850	//
851	// Possible values:
852	//   "ORIGINAL_MEDIA_TYPE_UNSPECIFIED" - Unknown original media type.
853	//   "AUDIO" - The speech data is an audio recording.
854	//   "VIDEO" - The speech data originally recorded on a video.
855	OriginalMediaType string `json:"originalMediaType,omitempty"`
856
857	// OriginalMimeType: Mime type of the original audio file.  For example
858	// `audio/m4a`,
859	// `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`.
860	// A list of possible audio mime types is maintained
861	// at
862	// http://www.iana.org/assignments/media-types/media-types.xhtml#audio
863	OriginalMimeType string `json:"originalMimeType,omitempty"`
864
865	// RecordingDeviceName: The device used to make the recording.  Examples
866	// 'Nexus 5X' or
867	// 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or
868	// 'Cardioid Microphone'.
869	RecordingDeviceName string `json:"recordingDeviceName,omitempty"`
870
871	// RecordingDeviceType: The type of device the speech was recorded with.
872	//
873	// Possible values:
874	//   "RECORDING_DEVICE_TYPE_UNSPECIFIED" - The recording device is
875	// unknown.
876	//   "SMARTPHONE" - Speech was recorded on a smartphone.
877	//   "PC" - Speech was recorded using a personal computer or tablet.
878	//   "PHONE_LINE" - Speech was recorded over a phone line.
879	//   "VEHICLE" - Speech was recorded in a vehicle.
880	//   "OTHER_OUTDOOR_DEVICE" - Speech was recorded outdoors.
881	//   "OTHER_INDOOR_DEVICE" - Speech was recorded indoors.
882	RecordingDeviceType string `json:"recordingDeviceType,omitempty"`
883
884	// ForceSendFields is a list of field names (e.g. "AudioTopic") to
885	// unconditionally include in API requests. By default, fields with
886	// empty values are omitted from API requests. However, any non-pointer,
887	// non-interface field appearing in ForceSendFields will be sent to the
888	// server regardless of whether the field is empty or not. This may be
889	// used to include empty fields in Patch requests.
890	ForceSendFields []string `json:"-"`
891
892	// NullFields is a list of field names (e.g. "AudioTopic") to include in
893	// API requests with the JSON null value. By default, fields with empty
894	// values are omitted from API requests. However, any field with an
895	// empty value appearing in NullFields will be sent to the server as
896	// null. It is an error if a field in this list has a non-empty value.
897	// This may be used to include null fields in Patch requests.
898	NullFields []string `json:"-"`
899}
900
901func (s *RecognitionMetadata) MarshalJSON() ([]byte, error) {
902	type NoMethod RecognitionMetadata
903	raw := NoMethod(*s)
904	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
905}
906
907// RecognizeRequest: The top-level message sent by the client for the
908// `Recognize` method.
909type RecognizeRequest struct {
910	// Audio: Required. The audio data to be recognized.
911	Audio *RecognitionAudio `json:"audio,omitempty"`
912
913	// Config: Required. Provides information to the recognizer that
914	// specifies how to
915	// process the request.
916	Config *RecognitionConfig `json:"config,omitempty"`
917
918	// Name: Use `model` field in RecognitionConfig instead.
919	Name string `json:"name,omitempty"`
920
921	// ForceSendFields is a list of field names (e.g. "Audio") to
922	// unconditionally include in API requests. By default, fields with
923	// empty values are omitted from API requests. However, any non-pointer,
924	// non-interface field appearing in ForceSendFields will be sent to the
925	// server regardless of whether the field is empty or not. This may be
926	// used to include empty fields in Patch requests.
927	ForceSendFields []string `json:"-"`
928
929	// NullFields is a list of field names (e.g. "Audio") to include in API
930	// requests with the JSON null value. By default, fields with empty
931	// values are omitted from API requests. However, any field with an
932	// empty value appearing in NullFields will be sent to the server as
933	// null. It is an error if a field in this list has a non-empty value.
934	// This may be used to include null fields in Patch requests.
935	NullFields []string `json:"-"`
936}
937
938func (s *RecognizeRequest) MarshalJSON() ([]byte, error) {
939	type NoMethod RecognizeRequest
940	raw := NoMethod(*s)
941	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
942}
943
944// RecognizeResponse: The only message returned to the client by the
945// `Recognize` method. It
946// contains the result as zero or more sequential
947// `SpeechRecognitionResult`
948// messages.
949type RecognizeResponse struct {
950	// Results: Sequential list of transcription results corresponding
951	// to
952	// sequential portions of audio.
953	Results []*SpeechRecognitionResult `json:"results,omitempty"`
954
955	// ServerResponse contains the HTTP response code and headers from the
956	// server.
957	googleapi.ServerResponse `json:"-"`
958
959	// ForceSendFields is a list of field names (e.g. "Results") to
960	// unconditionally include in API requests. By default, fields with
961	// empty values are omitted from API requests. However, any non-pointer,
962	// non-interface field appearing in ForceSendFields will be sent to the
963	// server regardless of whether the field is empty or not. This may be
964	// used to include empty fields in Patch requests.
965	ForceSendFields []string `json:"-"`
966
967	// NullFields is a list of field names (e.g. "Results") to include in
968	// API requests with the JSON null value. By default, fields with empty
969	// values are omitted from API requests. However, any field with an
970	// empty value appearing in NullFields will be sent to the server as
971	// null. It is an error if a field in this list has a non-empty value.
972	// This may be used to include null fields in Patch requests.
973	NullFields []string `json:"-"`
974}
975
976func (s *RecognizeResponse) MarshalJSON() ([]byte, error) {
977	type NoMethod RecognizeResponse
978	raw := NoMethod(*s)
979	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
980}
981
982// SpeakerDiarizationConfig: Config to enable speaker diarization.
983type SpeakerDiarizationConfig struct {
984	// EnableSpeakerDiarization: If 'true', enables speaker detection for
985	// each recognized word in
986	// the top alternative of the recognition result using a speaker_tag
987	// provided
988	// in the WordInfo.
989	EnableSpeakerDiarization bool `json:"enableSpeakerDiarization,omitempty"`
990
991	// MaxSpeakerCount: Maximum number of speakers in the conversation. This
992	// range gives you more
993	// flexibility by allowing the system to automatically determine the
994	// correct
995	// number of speakers. If not set, the default value is 6.
996	MaxSpeakerCount int64 `json:"maxSpeakerCount,omitempty"`
997
998	// MinSpeakerCount: Minimum number of speakers in the conversation. This
999	// range gives you more
1000	// flexibility by allowing the system to automatically determine the
1001	// correct
1002	// number of speakers. If not set, the default value is 2.
1003	MinSpeakerCount int64 `json:"minSpeakerCount,omitempty"`
1004
1005	// ForceSendFields is a list of field names (e.g.
1006	// "EnableSpeakerDiarization") to unconditionally include in API
1007	// requests. By default, fields with empty values are omitted from API
1008	// requests. However, any non-pointer, non-interface field appearing in
1009	// ForceSendFields will be sent to the server regardless of whether the
1010	// field is empty or not. This may be used to include empty fields in
1011	// Patch requests.
1012	ForceSendFields []string `json:"-"`
1013
1014	// NullFields is a list of field names (e.g. "EnableSpeakerDiarization")
1015	// to include in API requests with the JSON null value. By default,
1016	// fields with empty values are omitted from API requests. However, any
1017	// field with an empty value appearing in NullFields will be sent to the
1018	// server as null. It is an error if a field in this list has a
1019	// non-empty value. This may be used to include null fields in Patch
1020	// requests.
1021	NullFields []string `json:"-"`
1022}
1023
1024func (s *SpeakerDiarizationConfig) MarshalJSON() ([]byte, error) {
1025	type NoMethod SpeakerDiarizationConfig
1026	raw := NoMethod(*s)
1027	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1028}
1029
1030// SpeechContext: Provides "hints" to the speech recognizer to favor
1031// specific words and phrases
1032// in the results.
1033type SpeechContext struct {
1034	// Boost: Hint Boost. Positive value will increase the probability that
1035	// a specific
1036	// phrase will be recognized over other similar sounding phrases. The
1037	// higher
1038	// the boost, the higher the chance of false positive recognition as
1039	// well.
1040	// Negative boost values would correspond to anti-biasing. Anti-biasing
1041	// is not
1042	// enabled, so negative boost will simply be ignored. Though `boost`
1043	// can
1044	// accept a wide range of positive values, most use cases are best
1045	// served with
1046	// values between 0 and 20. We recommend using a binary search approach
1047	// to
1048	// finding the optimal value for your use case.
1049	Boost float64 `json:"boost,omitempty"`
1050
1051	// Phrases: A list of strings containing words and phrases "hints" so
1052	// that
1053	// the speech recognition is more likely to recognize them. This can be
1054	// used
1055	// to improve the accuracy for specific words and phrases, for example,
1056	// if
1057	// specific commands are typically spoken by the user. This can also be
1058	// used
1059	// to add additional words to the vocabulary of the recognizer.
1060	// See
1061	// [usage
1062	// limits](https://cloud.google.com/speech-to-text/quotas#content).
1063	//
1064	// List
1065	//  items can also be set to classes for groups of words that
1066	// represent
1067	// common concepts that occur in natural language. For example, rather
1068	// than
1069	// providing phrase hints for every month of the year, using the $MONTH
1070	// class
1071	// improves the likelihood of correctly transcribing audio that
1072	// includes
1073	// months.
1074	Phrases []string `json:"phrases,omitempty"`
1075
1076	// ForceSendFields is a list of field names (e.g. "Boost") to
1077	// unconditionally include in API requests. By default, fields with
1078	// empty values are omitted from API requests. However, any non-pointer,
1079	// non-interface field appearing in ForceSendFields will be sent to the
1080	// server regardless of whether the field is empty or not. This may be
1081	// used to include empty fields in Patch requests.
1082	ForceSendFields []string `json:"-"`
1083
1084	// NullFields is a list of field names (e.g. "Boost") to include in API
1085	// requests with the JSON null value. By default, fields with empty
1086	// values are omitted from API requests. However, any field with an
1087	// empty value appearing in NullFields will be sent to the server as
1088	// null. It is an error if a field in this list has a non-empty value.
1089	// This may be used to include null fields in Patch requests.
1090	NullFields []string `json:"-"`
1091}
1092
1093func (s *SpeechContext) MarshalJSON() ([]byte, error) {
1094	type NoMethod SpeechContext
1095	raw := NoMethod(*s)
1096	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1097}
1098
1099func (s *SpeechContext) UnmarshalJSON(data []byte) error {
1100	type NoMethod SpeechContext
1101	var s1 struct {
1102		Boost gensupport.JSONFloat64 `json:"boost"`
1103		*NoMethod
1104	}
1105	s1.NoMethod = (*NoMethod)(s)
1106	if err := json.Unmarshal(data, &s1); err != nil {
1107		return err
1108	}
1109	s.Boost = float64(s1.Boost)
1110	return nil
1111}
1112
1113// SpeechRecognitionAlternative: Alternative hypotheses (a.k.a. n-best
1114// list).
1115type SpeechRecognitionAlternative struct {
1116	// Confidence: The confidence estimate between 0.0 and 1.0. A higher
1117	// number
1118	// indicates an estimated greater likelihood that the recognized words
1119	// are
1120	// correct. This field is set only for the top alternative of a
1121	// non-streaming
1122	// result or, of a streaming result where `is_final=true`.
1123	// This field is not guaranteed to be accurate and users should not rely
1124	// on it
1125	// to be always provided.
1126	// The default of 0.0 is a sentinel value indicating `confidence` was
1127	// not set.
1128	Confidence float64 `json:"confidence,omitempty"`
1129
1130	// Transcript: Transcript text representing the words that the user
1131	// spoke.
1132	Transcript string `json:"transcript,omitempty"`
1133
1134	// Words: A list of word-specific information for each recognized
1135	// word.
1136	// Note: When `enable_speaker_diarization` is true, you will see all the
1137	// words
1138	// from the beginning of the audio.
1139	Words []*WordInfo `json:"words,omitempty"`
1140
1141	// ForceSendFields is a list of field names (e.g. "Confidence") to
1142	// unconditionally include in API requests. By default, fields with
1143	// empty values are omitted from API requests. However, any non-pointer,
1144	// non-interface field appearing in ForceSendFields will be sent to the
1145	// server regardless of whether the field is empty or not. This may be
1146	// used to include empty fields in Patch requests.
1147	ForceSendFields []string `json:"-"`
1148
1149	// NullFields is a list of field names (e.g. "Confidence") to include in
1150	// API requests with the JSON null value. By default, fields with empty
1151	// values are omitted from API requests. However, any field with an
1152	// empty value appearing in NullFields will be sent to the server as
1153	// null. It is an error if a field in this list has a non-empty value.
1154	// This may be used to include null fields in Patch requests.
1155	NullFields []string `json:"-"`
1156}
1157
1158func (s *SpeechRecognitionAlternative) MarshalJSON() ([]byte, error) {
1159	type NoMethod SpeechRecognitionAlternative
1160	raw := NoMethod(*s)
1161	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1162}
1163
1164func (s *SpeechRecognitionAlternative) UnmarshalJSON(data []byte) error {
1165	type NoMethod SpeechRecognitionAlternative
1166	var s1 struct {
1167		Confidence gensupport.JSONFloat64 `json:"confidence"`
1168		*NoMethod
1169	}
1170	s1.NoMethod = (*NoMethod)(s)
1171	if err := json.Unmarshal(data, &s1); err != nil {
1172		return err
1173	}
1174	s.Confidence = float64(s1.Confidence)
1175	return nil
1176}
1177
1178// SpeechRecognitionResult: A speech recognition result corresponding to
1179// a portion of the audio.
1180type SpeechRecognitionResult struct {
1181	// Alternatives: May contain one or more recognition hypotheses (up to
1182	// the
1183	// maximum specified in `max_alternatives`).
1184	// These alternatives are ordered in terms of accuracy, with the top
1185	// (first)
1186	// alternative being the most probable, as ranked by the recognizer.
1187	Alternatives []*SpeechRecognitionAlternative `json:"alternatives,omitempty"`
1188
1189	// ChannelTag: For multi-channel audio, this is the channel number
1190	// corresponding to the
1191	// recognized result for the audio from that channel.
1192	// For audio_channel_count = N, its output values can range from '1' to
1193	// 'N'.
1194	ChannelTag int64 `json:"channelTag,omitempty"`
1195
1196	// LanguageCode: The
1197	// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language
1198	// tag
1199	// of the language in this result. This language code was detected to
1200	// have
1201	// the most likelihood of being spoken in the audio.
1202	LanguageCode string `json:"languageCode,omitempty"`
1203
1204	// ForceSendFields is a list of field names (e.g. "Alternatives") to
1205	// unconditionally include in API requests. By default, fields with
1206	// empty values are omitted from API requests. However, any non-pointer,
1207	// non-interface field appearing in ForceSendFields will be sent to the
1208	// server regardless of whether the field is empty or not. This may be
1209	// used to include empty fields in Patch requests.
1210	ForceSendFields []string `json:"-"`
1211
1212	// NullFields is a list of field names (e.g. "Alternatives") to include
1213	// in API requests with the JSON null value. By default, fields with
1214	// empty values are omitted from API requests. However, any field with
1215	// an empty value appearing in NullFields will be sent to the server as
1216	// null. It is an error if a field in this list has a non-empty value.
1217	// This may be used to include null fields in Patch requests.
1218	NullFields []string `json:"-"`
1219}
1220
1221func (s *SpeechRecognitionResult) MarshalJSON() ([]byte, error) {
1222	type NoMethod SpeechRecognitionResult
1223	raw := NoMethod(*s)
1224	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1225}
1226
1227// Status: The `Status` type defines a logical error model that is
1228// suitable for
1229// different programming environments, including REST APIs and RPC APIs.
1230// It is
1231// used by [gRPC](https://github.com/grpc). Each `Status` message
1232// contains
1233// three pieces of data: error code, error message, and error
1234// details.
1235//
1236// You can find out more about this error model and how to work with it
1237// in the
1238// [API Design Guide](https://cloud.google.com/apis/design/errors).
1239type Status struct {
1240	// Code: The status code, which should be an enum value of
1241	// google.rpc.Code.
1242	Code int64 `json:"code,omitempty"`
1243
1244	// Details: A list of messages that carry the error details.  There is a
1245	// common set of
1246	// message types for APIs to use.
1247	Details []googleapi.RawMessage `json:"details,omitempty"`
1248
1249	// Message: A developer-facing error message, which should be in
1250	// English. Any
1251	// user-facing error message should be localized and sent in
1252	// the
1253	// google.rpc.Status.details field, or localized by the client.
1254	Message string `json:"message,omitempty"`
1255
1256	// ForceSendFields is a list of field names (e.g. "Code") to
1257	// unconditionally include in API requests. By default, fields with
1258	// empty values are omitted from API requests. However, any non-pointer,
1259	// non-interface field appearing in ForceSendFields will be sent to the
1260	// server regardless of whether the field is empty or not. This may be
1261	// used to include empty fields in Patch requests.
1262	ForceSendFields []string `json:"-"`
1263
1264	// NullFields is a list of field names (e.g. "Code") to include in API
1265	// requests with the JSON null value. By default, fields with empty
1266	// values are omitted from API requests. However, any field with an
1267	// empty value appearing in NullFields will be sent to the server as
1268	// null. It is an error if a field in this list has a non-empty value.
1269	// This may be used to include null fields in Patch requests.
1270	NullFields []string `json:"-"`
1271}
1272
1273func (s *Status) MarshalJSON() ([]byte, error) {
1274	type NoMethod Status
1275	raw := NoMethod(*s)
1276	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1277}
1278
1279// WordInfo: Word-specific information for recognized words.
1280type WordInfo struct {
1281	// Confidence: The confidence estimate between 0.0 and 1.0. A higher
1282	// number
1283	// indicates an estimated greater likelihood that the recognized words
1284	// are
1285	// correct. This field is set only for the top alternative of a
1286	// non-streaming
1287	// result or, of a streaming result where `is_final=true`.
1288	// This field is not guaranteed to be accurate and users should not rely
1289	// on it
1290	// to be always provided.
1291	// The default of 0.0 is a sentinel value indicating `confidence` was
1292	// not set.
1293	Confidence float64 `json:"confidence,omitempty"`
1294
1295	// EndTime: Time offset relative to the beginning of the audio,
1296	// and corresponding to the end of the spoken word.
1297	// This field is only set if `enable_word_time_offsets=true` and only
1298	// in the top hypothesis.
1299	// This is an experimental feature and the accuracy of the time offset
1300	// can
1301	// vary.
1302	EndTime string `json:"endTime,omitempty"`
1303
1304	// SpeakerTag: A distinct integer value is assigned for every speaker
1305	// within
1306	// the audio. This field specifies which one of those speakers was
1307	// detected to
1308	// have spoken this word. Value ranges from '1' to
1309	// diarization_speaker_count.
1310	// speaker_tag is set if enable_speaker_diarization = 'true' and only in
1311	// the
1312	// top alternative.
1313	SpeakerTag int64 `json:"speakerTag,omitempty"`
1314
1315	// StartTime: Time offset relative to the beginning of the audio,
1316	// and corresponding to the start of the spoken word.
1317	// This field is only set if `enable_word_time_offsets=true` and only
1318	// in the top hypothesis.
1319	// This is an experimental feature and the accuracy of the time offset
1320	// can
1321	// vary.
1322	StartTime string `json:"startTime,omitempty"`
1323
1324	// Word: The word corresponding to this set of information.
1325	Word string `json:"word,omitempty"`
1326
1327	// ForceSendFields is a list of field names (e.g. "Confidence") to
1328	// unconditionally include in API requests. By default, fields with
1329	// empty values are omitted from API requests. However, any non-pointer,
1330	// non-interface field appearing in ForceSendFields will be sent to the
1331	// server regardless of whether the field is empty or not. This may be
1332	// used to include empty fields in Patch requests.
1333	ForceSendFields []string `json:"-"`
1334
1335	// NullFields is a list of field names (e.g. "Confidence") to include in
1336	// API requests with the JSON null value. By default, fields with empty
1337	// values are omitted from API requests. However, any field with an
1338	// empty value appearing in NullFields will be sent to the server as
1339	// null. It is an error if a field in this list has a non-empty value.
1340	// This may be used to include null fields in Patch requests.
1341	NullFields []string `json:"-"`
1342}
1343
1344func (s *WordInfo) MarshalJSON() ([]byte, error) {
1345	type NoMethod WordInfo
1346	raw := NoMethod(*s)
1347	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1348}
1349
1350func (s *WordInfo) UnmarshalJSON(data []byte) error {
1351	type NoMethod WordInfo
1352	var s1 struct {
1353		Confidence gensupport.JSONFloat64 `json:"confidence"`
1354		*NoMethod
1355	}
1356	s1.NoMethod = (*NoMethod)(s)
1357	if err := json.Unmarshal(data, &s1); err != nil {
1358		return err
1359	}
1360	s.Confidence = float64(s1.Confidence)
1361	return nil
1362}
1363
1364// method id "speech.operations.get":
1365
1366type OperationsGetCall struct {
1367	s            *Service
1368	name         string
1369	urlParams_   gensupport.URLParams
1370	ifNoneMatch_ string
1371	ctx_         context.Context
1372	header_      http.Header
1373}
1374
1375// Get: Gets the latest state of a long-running operation.  Clients can
1376// use this
1377// method to poll the operation result at intervals as recommended by
1378// the API
1379// service.
1380func (r *OperationsService) Get(name string) *OperationsGetCall {
1381	c := &OperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
1382	c.name = name
1383	return c
1384}
1385
1386// Fields allows partial responses to be retrieved. See
1387// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
1388// for more information.
1389func (c *OperationsGetCall) Fields(s ...googleapi.Field) *OperationsGetCall {
1390	c.urlParams_.Set("fields", googleapi.CombineFields(s))
1391	return c
1392}
1393
1394// IfNoneMatch sets the optional parameter which makes the operation
1395// fail if the object's ETag matches the given value. This is useful for
1396// getting updates only after the object has changed since the last
1397// request. Use googleapi.IsNotModified to check whether the response
1398// error from Do is the result of In-None-Match.
1399func (c *OperationsGetCall) IfNoneMatch(entityTag string) *OperationsGetCall {
1400	c.ifNoneMatch_ = entityTag
1401	return c
1402}
1403
1404// Context sets the context to be used in this call's Do method. Any
1405// pending HTTP request will be aborted if the provided context is
1406// canceled.
1407func (c *OperationsGetCall) Context(ctx context.Context) *OperationsGetCall {
1408	c.ctx_ = ctx
1409	return c
1410}
1411
1412// Header returns an http.Header that can be modified by the caller to
1413// add HTTP headers to the request.
1414func (c *OperationsGetCall) Header() http.Header {
1415	if c.header_ == nil {
1416		c.header_ = make(http.Header)
1417	}
1418	return c.header_
1419}
1420
1421func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) {
1422	reqHeaders := make(http.Header)
1423	reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926")
1424	for k, v := range c.header_ {
1425		reqHeaders[k] = v
1426	}
1427	reqHeaders.Set("User-Agent", c.s.userAgent())
1428	if c.ifNoneMatch_ != "" {
1429		reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
1430	}
1431	var body io.Reader = nil
1432	c.urlParams_.Set("alt", alt)
1433	c.urlParams_.Set("prettyPrint", "false")
1434	urls := googleapi.ResolveRelative(c.s.BasePath, "v1p1beta1/operations/{+name}")
1435	urls += "?" + c.urlParams_.Encode()
1436	req, err := http.NewRequest("GET", urls, body)
1437	if err != nil {
1438		return nil, err
1439	}
1440	req.Header = reqHeaders
1441	googleapi.Expand(req.URL, map[string]string{
1442		"name": c.name,
1443	})
1444	return gensupport.SendRequest(c.ctx_, c.s.client, req)
1445}
1446
1447// Do executes the "speech.operations.get" call.
1448// Exactly one of *Operation or error will be non-nil. Any non-2xx
1449// status code is an error. Response headers are in either
1450// *Operation.ServerResponse.Header or (if a response was returned at
1451// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
1452// to check whether the returned error was because
1453// http.StatusNotModified was returned.
1454func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
1455	gensupport.SetOptions(c.urlParams_, opts...)
1456	res, err := c.doRequest("json")
1457	if res != nil && res.StatusCode == http.StatusNotModified {
1458		if res.Body != nil {
1459			res.Body.Close()
1460		}
1461		return nil, &googleapi.Error{
1462			Code:   res.StatusCode,
1463			Header: res.Header,
1464		}
1465	}
1466	if err != nil {
1467		return nil, err
1468	}
1469	defer googleapi.CloseBody(res)
1470	if err := googleapi.CheckResponse(res); err != nil {
1471		return nil, err
1472	}
1473	ret := &Operation{
1474		ServerResponse: googleapi.ServerResponse{
1475			Header:         res.Header,
1476			HTTPStatusCode: res.StatusCode,
1477		},
1478	}
1479	target := &ret
1480	if err := gensupport.DecodeResponse(target, res); err != nil {
1481		return nil, err
1482	}
1483	return ret, nil
1484	// {
1485	//   "description": "Gets the latest state of a long-running operation.  Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.",
1486	//   "flatPath": "v1p1beta1/operations/{operationsId}",
1487	//   "httpMethod": "GET",
1488	//   "id": "speech.operations.get",
1489	//   "parameterOrder": [
1490	//     "name"
1491	//   ],
1492	//   "parameters": {
1493	//     "name": {
1494	//       "description": "The name of the operation resource.",
1495	//       "location": "path",
1496	//       "pattern": "^.+$",
1497	//       "required": true,
1498	//       "type": "string"
1499	//     }
1500	//   },
1501	//   "path": "v1p1beta1/operations/{+name}",
1502	//   "response": {
1503	//     "$ref": "Operation"
1504	//   },
1505	//   "scopes": [
1506	//     "https://www.googleapis.com/auth/cloud-platform"
1507	//   ]
1508	// }
1509
1510}
1511
1512// method id "speech.operations.list":
1513
1514type OperationsListCall struct {
1515	s            *Service
1516	urlParams_   gensupport.URLParams
1517	ifNoneMatch_ string
1518	ctx_         context.Context
1519	header_      http.Header
1520}
1521
1522// List: Lists operations that match the specified filter in the
1523// request. If the
1524// server doesn't support this method, it returns
1525// `UNIMPLEMENTED`.
1526//
1527// NOTE: the `name` binding allows API services to override the
1528// binding
1529// to use different resource name schemes, such as `users/*/operations`.
1530// To
1531// override the binding, API services can add a binding such
1532// as
1533// "/v1/{name=users/*}/operations" to their service configuration.
1534// For backwards compatibility, the default name includes the
1535// operations
1536// collection id, however overriding users must ensure the name
1537// binding
1538// is the parent resource, without the operations collection id.
1539func (r *OperationsService) List() *OperationsListCall {
1540	c := &OperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
1541	return c
1542}
1543
1544// Filter sets the optional parameter "filter": The standard list
1545// filter.
1546func (c *OperationsListCall) Filter(filter string) *OperationsListCall {
1547	c.urlParams_.Set("filter", filter)
1548	return c
1549}
1550
1551// Name sets the optional parameter "name": The name of the operation's
1552// parent resource.
1553func (c *OperationsListCall) Name(name string) *OperationsListCall {
1554	c.urlParams_.Set("name", name)
1555	return c
1556}
1557
1558// PageSize sets the optional parameter "pageSize": The standard list
1559// page size.
1560func (c *OperationsListCall) PageSize(pageSize int64) *OperationsListCall {
1561	c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
1562	return c
1563}
1564
1565// PageToken sets the optional parameter "pageToken": The standard list
1566// page token.
1567func (c *OperationsListCall) PageToken(pageToken string) *OperationsListCall {
1568	c.urlParams_.Set("pageToken", pageToken)
1569	return c
1570}
1571
1572// Fields allows partial responses to be retrieved. See
1573// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
1574// for more information.
1575func (c *OperationsListCall) Fields(s ...googleapi.Field) *OperationsListCall {
1576	c.urlParams_.Set("fields", googleapi.CombineFields(s))
1577	return c
1578}
1579
1580// IfNoneMatch sets the optional parameter which makes the operation
1581// fail if the object's ETag matches the given value. This is useful for
1582// getting updates only after the object has changed since the last
1583// request. Use googleapi.IsNotModified to check whether the response
1584// error from Do is the result of In-None-Match.
1585func (c *OperationsListCall) IfNoneMatch(entityTag string) *OperationsListCall {
1586	c.ifNoneMatch_ = entityTag
1587	return c
1588}
1589
1590// Context sets the context to be used in this call's Do method. Any
1591// pending HTTP request will be aborted if the provided context is
1592// canceled.
1593func (c *OperationsListCall) Context(ctx context.Context) *OperationsListCall {
1594	c.ctx_ = ctx
1595	return c
1596}
1597
1598// Header returns an http.Header that can be modified by the caller to
1599// add HTTP headers to the request.
1600func (c *OperationsListCall) Header() http.Header {
1601	if c.header_ == nil {
1602		c.header_ = make(http.Header)
1603	}
1604	return c.header_
1605}
1606
1607func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) {
1608	reqHeaders := make(http.Header)
1609	reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926")
1610	for k, v := range c.header_ {
1611		reqHeaders[k] = v
1612	}
1613	reqHeaders.Set("User-Agent", c.s.userAgent())
1614	if c.ifNoneMatch_ != "" {
1615		reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
1616	}
1617	var body io.Reader = nil
1618	c.urlParams_.Set("alt", alt)
1619	c.urlParams_.Set("prettyPrint", "false")
1620	urls := googleapi.ResolveRelative(c.s.BasePath, "v1p1beta1/operations")
1621	urls += "?" + c.urlParams_.Encode()
1622	req, err := http.NewRequest("GET", urls, body)
1623	if err != nil {
1624		return nil, err
1625	}
1626	req.Header = reqHeaders
1627	return gensupport.SendRequest(c.ctx_, c.s.client, req)
1628}
1629
1630// Do executes the "speech.operations.list" call.
1631// Exactly one of *ListOperationsResponse or error will be non-nil. Any
1632// non-2xx status code is an error. Response headers are in either
1633// *ListOperationsResponse.ServerResponse.Header or (if a response was
1634// returned at all) in error.(*googleapi.Error).Header. Use
1635// googleapi.IsNotModified to check whether the returned error was
1636// because http.StatusNotModified was returned.
1637func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsResponse, error) {
1638	gensupport.SetOptions(c.urlParams_, opts...)
1639	res, err := c.doRequest("json")
1640	if res != nil && res.StatusCode == http.StatusNotModified {
1641		if res.Body != nil {
1642			res.Body.Close()
1643		}
1644		return nil, &googleapi.Error{
1645			Code:   res.StatusCode,
1646			Header: res.Header,
1647		}
1648	}
1649	if err != nil {
1650		return nil, err
1651	}
1652	defer googleapi.CloseBody(res)
1653	if err := googleapi.CheckResponse(res); err != nil {
1654		return nil, err
1655	}
1656	ret := &ListOperationsResponse{
1657		ServerResponse: googleapi.ServerResponse{
1658			Header:         res.Header,
1659			HTTPStatusCode: res.StatusCode,
1660		},
1661	}
1662	target := &ret
1663	if err := gensupport.DecodeResponse(target, res); err != nil {
1664		return nil, err
1665	}
1666	return ret, nil
1667	// {
1668	//   "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`. To\noverride the binding, API services can add a binding such as\n`\"/v1/{name=users/*}/operations\"` to their service configuration.\nFor backwards compatibility, the default name includes the operations\ncollection id, however overriding users must ensure the name binding\nis the parent resource, without the operations collection id.",
1669	//   "flatPath": "v1p1beta1/operations",
1670	//   "httpMethod": "GET",
1671	//   "id": "speech.operations.list",
1672	//   "parameterOrder": [],
1673	//   "parameters": {
1674	//     "filter": {
1675	//       "description": "The standard list filter.",
1676	//       "location": "query",
1677	//       "type": "string"
1678	//     },
1679	//     "name": {
1680	//       "description": "The name of the operation's parent resource.",
1681	//       "location": "query",
1682	//       "type": "string"
1683	//     },
1684	//     "pageSize": {
1685	//       "description": "The standard list page size.",
1686	//       "format": "int32",
1687	//       "location": "query",
1688	//       "type": "integer"
1689	//     },
1690	//     "pageToken": {
1691	//       "description": "The standard list page token.",
1692	//       "location": "query",
1693	//       "type": "string"
1694	//     }
1695	//   },
1696	//   "path": "v1p1beta1/operations",
1697	//   "response": {
1698	//     "$ref": "ListOperationsResponse"
1699	//   },
1700	//   "scopes": [
1701	//     "https://www.googleapis.com/auth/cloud-platform"
1702	//   ]
1703	// }
1704
1705}
1706
1707// Pages invokes f for each page of results.
1708// A non-nil error returned from f will halt the iteration.
1709// The provided context supersedes any context provided to the Context method.
1710func (c *OperationsListCall) Pages(ctx context.Context, f func(*ListOperationsResponse) error) error {
1711	c.ctx_ = ctx
1712	defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
1713	for {
1714		x, err := c.Do()
1715		if err != nil {
1716			return err
1717		}
1718		if err := f(x); err != nil {
1719			return err
1720		}
1721		if x.NextPageToken == "" {
1722			return nil
1723		}
1724		c.PageToken(x.NextPageToken)
1725	}
1726}
1727
1728// method id "speech.projects.locations.operations.get":
1729
1730type ProjectsLocationsOperationsGetCall struct {
1731	s            *Service
1732	name         string
1733	urlParams_   gensupport.URLParams
1734	ifNoneMatch_ string
1735	ctx_         context.Context
1736	header_      http.Header
1737}
1738
1739// Get: Gets the latest state of a long-running operation.  Clients can
1740// use this
1741// method to poll the operation result at intervals as recommended by
1742// the API
1743// service.
1744func (r *ProjectsLocationsOperationsService) Get(name string) *ProjectsLocationsOperationsGetCall {
1745	c := &ProjectsLocationsOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
1746	c.name = name
1747	return c
1748}
1749
1750// Fields allows partial responses to be retrieved. See
1751// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
1752// for more information.
1753func (c *ProjectsLocationsOperationsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsOperationsGetCall {
1754	c.urlParams_.Set("fields", googleapi.CombineFields(s))
1755	return c
1756}
1757
1758// IfNoneMatch sets the optional parameter which makes the operation
1759// fail if the object's ETag matches the given value. This is useful for
1760// getting updates only after the object has changed since the last
1761// request. Use googleapi.IsNotModified to check whether the response
1762// error from Do is the result of In-None-Match.
1763func (c *ProjectsLocationsOperationsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsOperationsGetCall {
1764	c.ifNoneMatch_ = entityTag
1765	return c
1766}
1767
1768// Context sets the context to be used in this call's Do method. Any
1769// pending HTTP request will be aborted if the provided context is
1770// canceled.
1771func (c *ProjectsLocationsOperationsGetCall) Context(ctx context.Context) *ProjectsLocationsOperationsGetCall {
1772	c.ctx_ = ctx
1773	return c
1774}
1775
1776// Header returns an http.Header that can be modified by the caller to
1777// add HTTP headers to the request.
1778func (c *ProjectsLocationsOperationsGetCall) Header() http.Header {
1779	if c.header_ == nil {
1780		c.header_ = make(http.Header)
1781	}
1782	return c.header_
1783}
1784
1785func (c *ProjectsLocationsOperationsGetCall) doRequest(alt string) (*http.Response, error) {
1786	reqHeaders := make(http.Header)
1787	reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926")
1788	for k, v := range c.header_ {
1789		reqHeaders[k] = v
1790	}
1791	reqHeaders.Set("User-Agent", c.s.userAgent())
1792	if c.ifNoneMatch_ != "" {
1793		reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
1794	}
1795	var body io.Reader = nil
1796	c.urlParams_.Set("alt", alt)
1797	c.urlParams_.Set("prettyPrint", "false")
1798	urls := googleapi.ResolveRelative(c.s.BasePath, "v1p1beta1/{+name}")
1799	urls += "?" + c.urlParams_.Encode()
1800	req, err := http.NewRequest("GET", urls, body)
1801	if err != nil {
1802		return nil, err
1803	}
1804	req.Header = reqHeaders
1805	googleapi.Expand(req.URL, map[string]string{
1806		"name": c.name,
1807	})
1808	return gensupport.SendRequest(c.ctx_, c.s.client, req)
1809}
1810
1811// Do executes the "speech.projects.locations.operations.get" call.
1812// Exactly one of *Operation or error will be non-nil. Any non-2xx
1813// status code is an error. Response headers are in either
1814// *Operation.ServerResponse.Header or (if a response was returned at
1815// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
1816// to check whether the returned error was because
1817// http.StatusNotModified was returned.
1818func (c *ProjectsLocationsOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
1819	gensupport.SetOptions(c.urlParams_, opts...)
1820	res, err := c.doRequest("json")
1821	if res != nil && res.StatusCode == http.StatusNotModified {
1822		if res.Body != nil {
1823			res.Body.Close()
1824		}
1825		return nil, &googleapi.Error{
1826			Code:   res.StatusCode,
1827			Header: res.Header,
1828		}
1829	}
1830	if err != nil {
1831		return nil, err
1832	}
1833	defer googleapi.CloseBody(res)
1834	if err := googleapi.CheckResponse(res); err != nil {
1835		return nil, err
1836	}
1837	ret := &Operation{
1838		ServerResponse: googleapi.ServerResponse{
1839			Header:         res.Header,
1840			HTTPStatusCode: res.StatusCode,
1841		},
1842	}
1843	target := &ret
1844	if err := gensupport.DecodeResponse(target, res); err != nil {
1845		return nil, err
1846	}
1847	return ret, nil
1848	// {
1849	//   "description": "Gets the latest state of a long-running operation.  Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.",
1850	//   "flatPath": "v1p1beta1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}",
1851	//   "httpMethod": "GET",
1852	//   "id": "speech.projects.locations.operations.get",
1853	//   "parameterOrder": [
1854	//     "name"
1855	//   ],
1856	//   "parameters": {
1857	//     "name": {
1858	//       "description": "The name of the operation resource.",
1859	//       "location": "path",
1860	//       "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$",
1861	//       "required": true,
1862	//       "type": "string"
1863	//     }
1864	//   },
1865	//   "path": "v1p1beta1/{+name}",
1866	//   "response": {
1867	//     "$ref": "Operation"
1868	//   },
1869	//   "scopes": [
1870	//     "https://www.googleapis.com/auth/cloud-platform"
1871	//   ]
1872	// }
1873
1874}
1875
1876// method id "speech.projects.locations.operations.list":
1877
1878type ProjectsLocationsOperationsListCall struct {
1879	s            *Service
1880	name         string
1881	urlParams_   gensupport.URLParams
1882	ifNoneMatch_ string
1883	ctx_         context.Context
1884	header_      http.Header
1885}
1886
1887// List: Lists operations that match the specified filter in the
1888// request. If the
1889// server doesn't support this method, it returns
1890// `UNIMPLEMENTED`.
1891//
1892// NOTE: the `name` binding allows API services to override the
1893// binding
1894// to use different resource name schemes, such as `users/*/operations`.
1895// To
1896// override the binding, API services can add a binding such
1897// as
1898// "/v1/{name=users/*}/operations" to their service configuration.
1899// For backwards compatibility, the default name includes the
1900// operations
1901// collection id, however overriding users must ensure the name
1902// binding
1903// is the parent resource, without the operations collection id.
1904func (r *ProjectsLocationsOperationsService) List(name string) *ProjectsLocationsOperationsListCall {
1905	c := &ProjectsLocationsOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
1906	c.name = name
1907	return c
1908}
1909
1910// Filter sets the optional parameter "filter": The standard list
1911// filter.
1912func (c *ProjectsLocationsOperationsListCall) Filter(filter string) *ProjectsLocationsOperationsListCall {
1913	c.urlParams_.Set("filter", filter)
1914	return c
1915}
1916
1917// PageSize sets the optional parameter "pageSize": The standard list
1918// page size.
1919func (c *ProjectsLocationsOperationsListCall) PageSize(pageSize int64) *ProjectsLocationsOperationsListCall {
1920	c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
1921	return c
1922}
1923
1924// PageToken sets the optional parameter "pageToken": The standard list
1925// page token.
1926func (c *ProjectsLocationsOperationsListCall) PageToken(pageToken string) *ProjectsLocationsOperationsListCall {
1927	c.urlParams_.Set("pageToken", pageToken)
1928	return c
1929}
1930
1931// Fields allows partial responses to be retrieved. See
1932// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
1933// for more information.
1934func (c *ProjectsLocationsOperationsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsOperationsListCall {
1935	c.urlParams_.Set("fields", googleapi.CombineFields(s))
1936	return c
1937}
1938
1939// IfNoneMatch sets the optional parameter which makes the operation
1940// fail if the object's ETag matches the given value. This is useful for
1941// getting updates only after the object has changed since the last
1942// request. Use googleapi.IsNotModified to check whether the response
1943// error from Do is the result of In-None-Match.
1944func (c *ProjectsLocationsOperationsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsOperationsListCall {
1945	c.ifNoneMatch_ = entityTag
1946	return c
1947}
1948
1949// Context sets the context to be used in this call's Do method. Any
1950// pending HTTP request will be aborted if the provided context is
1951// canceled.
1952func (c *ProjectsLocationsOperationsListCall) Context(ctx context.Context) *ProjectsLocationsOperationsListCall {
1953	c.ctx_ = ctx
1954	return c
1955}
1956
1957// Header returns an http.Header that can be modified by the caller to
1958// add HTTP headers to the request.
1959func (c *ProjectsLocationsOperationsListCall) Header() http.Header {
1960	if c.header_ == nil {
1961		c.header_ = make(http.Header)
1962	}
1963	return c.header_
1964}
1965
1966func (c *ProjectsLocationsOperationsListCall) doRequest(alt string) (*http.Response, error) {
1967	reqHeaders := make(http.Header)
1968	reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926")
1969	for k, v := range c.header_ {
1970		reqHeaders[k] = v
1971	}
1972	reqHeaders.Set("User-Agent", c.s.userAgent())
1973	if c.ifNoneMatch_ != "" {
1974		reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
1975	}
1976	var body io.Reader = nil
1977	c.urlParams_.Set("alt", alt)
1978	c.urlParams_.Set("prettyPrint", "false")
1979	urls := googleapi.ResolveRelative(c.s.BasePath, "v1p1beta1/{+name}/operations")
1980	urls += "?" + c.urlParams_.Encode()
1981	req, err := http.NewRequest("GET", urls, body)
1982	if err != nil {
1983		return nil, err
1984	}
1985	req.Header = reqHeaders
1986	googleapi.Expand(req.URL, map[string]string{
1987		"name": c.name,
1988	})
1989	return gensupport.SendRequest(c.ctx_, c.s.client, req)
1990}
1991
1992// Do executes the "speech.projects.locations.operations.list" call.
1993// Exactly one of *ListOperationsResponse or error will be non-nil. Any
1994// non-2xx status code is an error. Response headers are in either
1995// *ListOperationsResponse.ServerResponse.Header or (if a response was
1996// returned at all) in error.(*googleapi.Error).Header. Use
1997// googleapi.IsNotModified to check whether the returned error was
1998// because http.StatusNotModified was returned.
1999func (c *ProjectsLocationsOperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsResponse, error) {
2000	gensupport.SetOptions(c.urlParams_, opts...)
2001	res, err := c.doRequest("json")
2002	if res != nil && res.StatusCode == http.StatusNotModified {
2003		if res.Body != nil {
2004			res.Body.Close()
2005		}
2006		return nil, &googleapi.Error{
2007			Code:   res.StatusCode,
2008			Header: res.Header,
2009		}
2010	}
2011	if err != nil {
2012		return nil, err
2013	}
2014	defer googleapi.CloseBody(res)
2015	if err := googleapi.CheckResponse(res); err != nil {
2016		return nil, err
2017	}
2018	ret := &ListOperationsResponse{
2019		ServerResponse: googleapi.ServerResponse{
2020			Header:         res.Header,
2021			HTTPStatusCode: res.StatusCode,
2022		},
2023	}
2024	target := &ret
2025	if err := gensupport.DecodeResponse(target, res); err != nil {
2026		return nil, err
2027	}
2028	return ret, nil
2029	// {
2030	//   "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`. To\noverride the binding, API services can add a binding such as\n`\"/v1/{name=users/*}/operations\"` to their service configuration.\nFor backwards compatibility, the default name includes the operations\ncollection id, however overriding users must ensure the name binding\nis the parent resource, without the operations collection id.",
2031	//   "flatPath": "v1p1beta1/projects/{projectsId}/locations/{locationsId}/operations",
2032	//   "httpMethod": "GET",
2033	//   "id": "speech.projects.locations.operations.list",
2034	//   "parameterOrder": [
2035	//     "name"
2036	//   ],
2037	//   "parameters": {
2038	//     "filter": {
2039	//       "description": "The standard list filter.",
2040	//       "location": "query",
2041	//       "type": "string"
2042	//     },
2043	//     "name": {
2044	//       "description": "The name of the operation's parent resource.",
2045	//       "location": "path",
2046	//       "pattern": "^projects/[^/]+/locations/[^/]+$",
2047	//       "required": true,
2048	//       "type": "string"
2049	//     },
2050	//     "pageSize": {
2051	//       "description": "The standard list page size.",
2052	//       "format": "int32",
2053	//       "location": "query",
2054	//       "type": "integer"
2055	//     },
2056	//     "pageToken": {
2057	//       "description": "The standard list page token.",
2058	//       "location": "query",
2059	//       "type": "string"
2060	//     }
2061	//   },
2062	//   "path": "v1p1beta1/{+name}/operations",
2063	//   "response": {
2064	//     "$ref": "ListOperationsResponse"
2065	//   },
2066	//   "scopes": [
2067	//     "https://www.googleapis.com/auth/cloud-platform"
2068	//   ]
2069	// }
2070
2071}
2072
2073// Pages invokes f for each page of results.
2074// A non-nil error returned from f will halt the iteration.
2075// The provided context supersedes any context provided to the Context method.
2076func (c *ProjectsLocationsOperationsListCall) Pages(ctx context.Context, f func(*ListOperationsResponse) error) error {
2077	c.ctx_ = ctx
2078	defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
2079	for {
2080		x, err := c.Do()
2081		if err != nil {
2082			return err
2083		}
2084		if err := f(x); err != nil {
2085			return err
2086		}
2087		if x.NextPageToken == "" {
2088			return nil
2089		}
2090		c.PageToken(x.NextPageToken)
2091	}
2092}
2093
2094// method id "speech.speech.longrunningrecognize":
2095
2096type SpeechLongrunningrecognizeCall struct {
2097	s                           *Service
2098	longrunningrecognizerequest *LongRunningRecognizeRequest
2099	urlParams_                  gensupport.URLParams
2100	ctx_                        context.Context
2101	header_                     http.Header
2102}
2103
2104// Longrunningrecognize: Performs asynchronous speech recognition:
2105// receive results via the
2106// google.longrunning.Operations interface. Returns either
2107// an
2108// `Operation.error` or an `Operation.response` which contains
2109// a `LongRunningRecognizeResponse` message.
2110// For more information on asynchronous speech recognition, see
2111// the
2112// [how-to](https://cloud.google.com/speech-to-text/docs/async-recogn
2113// ize).
2114func (r *SpeechService) Longrunningrecognize(longrunningrecognizerequest *LongRunningRecognizeRequest) *SpeechLongrunningrecognizeCall {
2115	c := &SpeechLongrunningrecognizeCall{s: r.s, urlParams_: make(gensupport.URLParams)}
2116	c.longrunningrecognizerequest = longrunningrecognizerequest
2117	return c
2118}
2119
2120// Fields allows partial responses to be retrieved. See
2121// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
2122// for more information.
2123func (c *SpeechLongrunningrecognizeCall) Fields(s ...googleapi.Field) *SpeechLongrunningrecognizeCall {
2124	c.urlParams_.Set("fields", googleapi.CombineFields(s))
2125	return c
2126}
2127
2128// Context sets the context to be used in this call's Do method. Any
2129// pending HTTP request will be aborted if the provided context is
2130// canceled.
2131func (c *SpeechLongrunningrecognizeCall) Context(ctx context.Context) *SpeechLongrunningrecognizeCall {
2132	c.ctx_ = ctx
2133	return c
2134}
2135
2136// Header returns an http.Header that can be modified by the caller to
2137// add HTTP headers to the request.
2138func (c *SpeechLongrunningrecognizeCall) Header() http.Header {
2139	if c.header_ == nil {
2140		c.header_ = make(http.Header)
2141	}
2142	return c.header_
2143}
2144
2145func (c *SpeechLongrunningrecognizeCall) doRequest(alt string) (*http.Response, error) {
2146	reqHeaders := make(http.Header)
2147	reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926")
2148	for k, v := range c.header_ {
2149		reqHeaders[k] = v
2150	}
2151	reqHeaders.Set("User-Agent", c.s.userAgent())
2152	var body io.Reader = nil
2153	body, err := googleapi.WithoutDataWrapper.JSONReader(c.longrunningrecognizerequest)
2154	if err != nil {
2155		return nil, err
2156	}
2157	reqHeaders.Set("Content-Type", "application/json")
2158	c.urlParams_.Set("alt", alt)
2159	c.urlParams_.Set("prettyPrint", "false")
2160	urls := googleapi.ResolveRelative(c.s.BasePath, "v1p1beta1/speech:longrunningrecognize")
2161	urls += "?" + c.urlParams_.Encode()
2162	req, err := http.NewRequest("POST", urls, body)
2163	if err != nil {
2164		return nil, err
2165	}
2166	req.Header = reqHeaders
2167	return gensupport.SendRequest(c.ctx_, c.s.client, req)
2168}
2169
2170// Do executes the "speech.speech.longrunningrecognize" call.
2171// Exactly one of *Operation or error will be non-nil. Any non-2xx
2172// status code is an error. Response headers are in either
2173// *Operation.ServerResponse.Header or (if a response was returned at
2174// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
2175// to check whether the returned error was because
2176// http.StatusNotModified was returned.
2177func (c *SpeechLongrunningrecognizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
2178	gensupport.SetOptions(c.urlParams_, opts...)
2179	res, err := c.doRequest("json")
2180	if res != nil && res.StatusCode == http.StatusNotModified {
2181		if res.Body != nil {
2182			res.Body.Close()
2183		}
2184		return nil, &googleapi.Error{
2185			Code:   res.StatusCode,
2186			Header: res.Header,
2187		}
2188	}
2189	if err != nil {
2190		return nil, err
2191	}
2192	defer googleapi.CloseBody(res)
2193	if err := googleapi.CheckResponse(res); err != nil {
2194		return nil, err
2195	}
2196	ret := &Operation{
2197		ServerResponse: googleapi.ServerResponse{
2198			Header:         res.Header,
2199			HTTPStatusCode: res.StatusCode,
2200		},
2201	}
2202	target := &ret
2203	if err := gensupport.DecodeResponse(target, res); err != nil {
2204		return nil, err
2205	}
2206	return ret, nil
2207	// {
2208	//   "description": "Performs asynchronous speech recognition: receive results via the\ngoogle.longrunning.Operations interface. Returns either an\n`Operation.error` or an `Operation.response` which contains\na `LongRunningRecognizeResponse` message.\nFor more information on asynchronous speech recognition, see the\n[how-to](https://cloud.google.com/speech-to-text/docs/async-recognize).",
2209	//   "flatPath": "v1p1beta1/speech:longrunningrecognize",
2210	//   "httpMethod": "POST",
2211	//   "id": "speech.speech.longrunningrecognize",
2212	//   "parameterOrder": [],
2213	//   "parameters": {},
2214	//   "path": "v1p1beta1/speech:longrunningrecognize",
2215	//   "request": {
2216	//     "$ref": "LongRunningRecognizeRequest"
2217	//   },
2218	//   "response": {
2219	//     "$ref": "Operation"
2220	//   },
2221	//   "scopes": [
2222	//     "https://www.googleapis.com/auth/cloud-platform"
2223	//   ]
2224	// }
2225
2226}
2227
2228// method id "speech.speech.recognize":
2229
2230type SpeechRecognizeCall struct {
2231	s                *Service
2232	recognizerequest *RecognizeRequest
2233	urlParams_       gensupport.URLParams
2234	ctx_             context.Context
2235	header_          http.Header
2236}
2237
2238// Recognize: Performs synchronous speech recognition: receive results
2239// after all audio
2240// has been sent and processed.
2241func (r *SpeechService) Recognize(recognizerequest *RecognizeRequest) *SpeechRecognizeCall {
2242	c := &SpeechRecognizeCall{s: r.s, urlParams_: make(gensupport.URLParams)}
2243	c.recognizerequest = recognizerequest
2244	return c
2245}
2246
2247// Fields allows partial responses to be retrieved. See
2248// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
2249// for more information.
2250func (c *SpeechRecognizeCall) Fields(s ...googleapi.Field) *SpeechRecognizeCall {
2251	c.urlParams_.Set("fields", googleapi.CombineFields(s))
2252	return c
2253}
2254
2255// Context sets the context to be used in this call's Do method. Any
2256// pending HTTP request will be aborted if the provided context is
2257// canceled.
2258func (c *SpeechRecognizeCall) Context(ctx context.Context) *SpeechRecognizeCall {
2259	c.ctx_ = ctx
2260	return c
2261}
2262
2263// Header returns an http.Header that can be modified by the caller to
2264// add HTTP headers to the request.
2265func (c *SpeechRecognizeCall) Header() http.Header {
2266	if c.header_ == nil {
2267		c.header_ = make(http.Header)
2268	}
2269	return c.header_
2270}
2271
2272func (c *SpeechRecognizeCall) doRequest(alt string) (*http.Response, error) {
2273	reqHeaders := make(http.Header)
2274	reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926")
2275	for k, v := range c.header_ {
2276		reqHeaders[k] = v
2277	}
2278	reqHeaders.Set("User-Agent", c.s.userAgent())
2279	var body io.Reader = nil
2280	body, err := googleapi.WithoutDataWrapper.JSONReader(c.recognizerequest)
2281	if err != nil {
2282		return nil, err
2283	}
2284	reqHeaders.Set("Content-Type", "application/json")
2285	c.urlParams_.Set("alt", alt)
2286	c.urlParams_.Set("prettyPrint", "false")
2287	urls := googleapi.ResolveRelative(c.s.BasePath, "v1p1beta1/speech:recognize")
2288	urls += "?" + c.urlParams_.Encode()
2289	req, err := http.NewRequest("POST", urls, body)
2290	if err != nil {
2291		return nil, err
2292	}
2293	req.Header = reqHeaders
2294	return gensupport.SendRequest(c.ctx_, c.s.client, req)
2295}
2296
2297// Do executes the "speech.speech.recognize" call.
2298// Exactly one of *RecognizeResponse or error will be non-nil. Any
2299// non-2xx status code is an error. Response headers are in either
2300// *RecognizeResponse.ServerResponse.Header or (if a response was
2301// returned at all) in error.(*googleapi.Error).Header. Use
2302// googleapi.IsNotModified to check whether the returned error was
2303// because http.StatusNotModified was returned.
2304func (c *SpeechRecognizeCall) Do(opts ...googleapi.CallOption) (*RecognizeResponse, error) {
2305	gensupport.SetOptions(c.urlParams_, opts...)
2306	res, err := c.doRequest("json")
2307	if res != nil && res.StatusCode == http.StatusNotModified {
2308		if res.Body != nil {
2309			res.Body.Close()
2310		}
2311		return nil, &googleapi.Error{
2312			Code:   res.StatusCode,
2313			Header: res.Header,
2314		}
2315	}
2316	if err != nil {
2317		return nil, err
2318	}
2319	defer googleapi.CloseBody(res)
2320	if err := googleapi.CheckResponse(res); err != nil {
2321		return nil, err
2322	}
2323	ret := &RecognizeResponse{
2324		ServerResponse: googleapi.ServerResponse{
2325			Header:         res.Header,
2326			HTTPStatusCode: res.StatusCode,
2327		},
2328	}
2329	target := &ret
2330	if err := gensupport.DecodeResponse(target, res); err != nil {
2331		return nil, err
2332	}
2333	return ret, nil
2334	// {
2335	//   "description": "Performs synchronous speech recognition: receive results after all audio\nhas been sent and processed.",
2336	//   "flatPath": "v1p1beta1/speech:recognize",
2337	//   "httpMethod": "POST",
2338	//   "id": "speech.speech.recognize",
2339	//   "parameterOrder": [],
2340	//   "parameters": {},
2341	//   "path": "v1p1beta1/speech:recognize",
2342	//   "request": {
2343	//     "$ref": "RecognizeRequest"
2344	//   },
2345	//   "response": {
2346	//     "$ref": "RecognizeResponse"
2347	//   },
2348	//   "scopes": [
2349	//     "https://www.googleapis.com/auth/cloud-platform"
2350	//   ]
2351	// }
2352
2353}
2354