1// Copyright 2019 Google LLC.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Code generated file. DO NOT EDIT.
6
7// Package speech provides access to the Cloud Speech-to-Text API.
8//
9// This package is DEPRECATED. Use package cloud.google.com/go/speech/apiv1 instead.
10//
11// For product documentation, see: https://cloud.google.com/speech-to-text/docs/quickstart-protocol
12//
13// Creating a client
14//
15// Usage example:
16//
17//   import "google.golang.org/api/speech/v1p1beta1"
18//   ...
19//   ctx := context.Background()
20//   speechService, err := speech.NewService(ctx)
21//
22// In this example, Google Application Default Credentials are used for authentication.
23//
24// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials.
25//
26// Other authentication options
27//
28// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey:
29//
30//   speechService, err := speech.NewService(ctx, option.WithAPIKey("AIza..."))
31//
32// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource:
33//
34//   config := &oauth2.Config{...}
35//   // ...
36//   token, err := config.Exchange(ctx, ...)
37//   speechService, err := speech.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token)))
38//
39// See https://godoc.org/google.golang.org/api/option/ for details on options.
40package speech // import "google.golang.org/api/speech/v1p1beta1"
41
42import (
43	"bytes"
44	"context"
45	"encoding/json"
46	"errors"
47	"fmt"
48	"io"
49	"net/http"
50	"net/url"
51	"strconv"
52	"strings"
53
54	googleapi "google.golang.org/api/googleapi"
55	gensupport "google.golang.org/api/internal/gensupport"
56	option "google.golang.org/api/option"
57	htransport "google.golang.org/api/transport/http"
58)
59
60// Always reference these packages, just in case the auto-generated code
61// below doesn't.
62var _ = bytes.NewBuffer
63var _ = strconv.Itoa
64var _ = fmt.Sprintf
65var _ = json.NewDecoder
66var _ = io.Copy
67var _ = url.Parse
68var _ = gensupport.MarshalJSON
69var _ = googleapi.Version
70var _ = errors.New
71var _ = strings.Replace
72var _ = context.Canceled
73
74const apiId = "speech:v1p1beta1"
75const apiName = "speech"
76const apiVersion = "v1p1beta1"
77const basePath = "https://speech.googleapis.com/"
78
79// OAuth2 scopes used by this API.
80const (
81	// View and manage your data across Google Cloud Platform services
82	CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform"
83)
84
85// NewService creates a new Service.
86func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) {
87	scopesOption := option.WithScopes(
88		"https://www.googleapis.com/auth/cloud-platform",
89	)
90	// NOTE: prepend, so we don't override user-specified scopes.
91	opts = append([]option.ClientOption{scopesOption}, opts...)
92	client, endpoint, err := htransport.NewClient(ctx, opts...)
93	if err != nil {
94		return nil, err
95	}
96	s, err := New(client)
97	if err != nil {
98		return nil, err
99	}
100	if endpoint != "" {
101		s.BasePath = endpoint
102	}
103	return s, nil
104}
105
106// New creates a new Service. It uses the provided http.Client for requests.
107//
108// Deprecated: please use NewService instead.
109// To provide a custom HTTP client, use option.WithHTTPClient.
110// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead.
111func New(client *http.Client) (*Service, error) {
112	if client == nil {
113		return nil, errors.New("client is nil")
114	}
115	s := &Service{client: client, BasePath: basePath}
116	s.Operations = NewOperationsService(s)
117	s.Projects = NewProjectsService(s)
118	s.Speech = NewSpeechService(s)
119	return s, nil
120}
121
122type Service struct {
123	client    *http.Client
124	BasePath  string // API endpoint base URL
125	UserAgent string // optional additional User-Agent fragment
126
127	Operations *OperationsService
128
129	Projects *ProjectsService
130
131	Speech *SpeechService
132}
133
134func (s *Service) userAgent() string {
135	if s.UserAgent == "" {
136		return googleapi.UserAgent
137	}
138	return googleapi.UserAgent + " " + s.UserAgent
139}
140
141func NewOperationsService(s *Service) *OperationsService {
142	rs := &OperationsService{s: s}
143	return rs
144}
145
146type OperationsService struct {
147	s *Service
148}
149
150func NewProjectsService(s *Service) *ProjectsService {
151	rs := &ProjectsService{s: s}
152	rs.Locations = NewProjectsLocationsService(s)
153	return rs
154}
155
156type ProjectsService struct {
157	s *Service
158
159	Locations *ProjectsLocationsService
160}
161
162func NewProjectsLocationsService(s *Service) *ProjectsLocationsService {
163	rs := &ProjectsLocationsService{s: s}
164	rs.Operations = NewProjectsLocationsOperationsService(s)
165	return rs
166}
167
168type ProjectsLocationsService struct {
169	s *Service
170
171	Operations *ProjectsLocationsOperationsService
172}
173
174func NewProjectsLocationsOperationsService(s *Service) *ProjectsLocationsOperationsService {
175	rs := &ProjectsLocationsOperationsService{s: s}
176	return rs
177}
178
179type ProjectsLocationsOperationsService struct {
180	s *Service
181}
182
183func NewSpeechService(s *Service) *SpeechService {
184	rs := &SpeechService{s: s}
185	return rs
186}
187
188type SpeechService struct {
189	s *Service
190}
191
192// ListOperationsResponse: The response message for
193// Operations.ListOperations.
194type ListOperationsResponse struct {
195	// NextPageToken: The standard List next-page token.
196	NextPageToken string `json:"nextPageToken,omitempty"`
197
198	// Operations: A list of operations that matches the specified filter in
199	// the request.
200	Operations []*Operation `json:"operations,omitempty"`
201
202	// ServerResponse contains the HTTP response code and headers from the
203	// server.
204	googleapi.ServerResponse `json:"-"`
205
206	// ForceSendFields is a list of field names (e.g. "NextPageToken") to
207	// unconditionally include in API requests. By default, fields with
208	// empty values are omitted from API requests. However, any non-pointer,
209	// non-interface field appearing in ForceSendFields will be sent to the
210	// server regardless of whether the field is empty or not. This may be
211	// used to include empty fields in Patch requests.
212	ForceSendFields []string `json:"-"`
213
214	// NullFields is a list of field names (e.g. "NextPageToken") to include
215	// in API requests with the JSON null value. By default, fields with
216	// empty values are omitted from API requests. However, any field with
217	// an empty value appearing in NullFields will be sent to the server as
218	// null. It is an error if a field in this list has a non-empty value.
219	// This may be used to include null fields in Patch requests.
220	NullFields []string `json:"-"`
221}
222
223func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) {
224	type NoMethod ListOperationsResponse
225	raw := NoMethod(*s)
226	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
227}
228
229// LongRunningRecognizeMetadata: Describes the progress of a
230// long-running `LongRunningRecognize` call. It is
231// included in the `metadata` field of the `Operation` returned by
232// the
233// `GetOperation` call of the `google::longrunning::Operations` service.
234type LongRunningRecognizeMetadata struct {
235	// LastUpdateTime: Time of the most recent processing update.
236	LastUpdateTime string `json:"lastUpdateTime,omitempty"`
237
238	// ProgressPercent: Approximate percentage of audio processed thus far.
239	// Guaranteed to be 100
240	// when the audio is fully processed and the results are available.
241	ProgressPercent int64 `json:"progressPercent,omitempty"`
242
243	// StartTime: Time when the request was received.
244	StartTime string `json:"startTime,omitempty"`
245
246	// ForceSendFields is a list of field names (e.g. "LastUpdateTime") to
247	// unconditionally include in API requests. By default, fields with
248	// empty values are omitted from API requests. However, any non-pointer,
249	// non-interface field appearing in ForceSendFields will be sent to the
250	// server regardless of whether the field is empty or not. This may be
251	// used to include empty fields in Patch requests.
252	ForceSendFields []string `json:"-"`
253
254	// NullFields is a list of field names (e.g. "LastUpdateTime") to
255	// include in API requests with the JSON null value. By default, fields
256	// with empty values are omitted from API requests. However, any field
257	// with an empty value appearing in NullFields will be sent to the
258	// server as null. It is an error if a field in this list has a
259	// non-empty value. This may be used to include null fields in Patch
260	// requests.
261	NullFields []string `json:"-"`
262}
263
264func (s *LongRunningRecognizeMetadata) MarshalJSON() ([]byte, error) {
265	type NoMethod LongRunningRecognizeMetadata
266	raw := NoMethod(*s)
267	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
268}
269
270// LongRunningRecognizeRequest: The top-level message sent by the client
271// for the `LongRunningRecognize`
272// method.
273type LongRunningRecognizeRequest struct {
274	// Audio: Required. The audio data to be recognized.
275	Audio *RecognitionAudio `json:"audio,omitempty"`
276
277	// Config: Required. Provides information to the recognizer that
278	// specifies how to
279	// process the request.
280	Config *RecognitionConfig `json:"config,omitempty"`
281
282	// ForceSendFields is a list of field names (e.g. "Audio") to
283	// unconditionally include in API requests. By default, fields with
284	// empty values are omitted from API requests. However, any non-pointer,
285	// non-interface field appearing in ForceSendFields will be sent to the
286	// server regardless of whether the field is empty or not. This may be
287	// used to include empty fields in Patch requests.
288	ForceSendFields []string `json:"-"`
289
290	// NullFields is a list of field names (e.g. "Audio") to include in API
291	// requests with the JSON null value. By default, fields with empty
292	// values are omitted from API requests. However, any field with an
293	// empty value appearing in NullFields will be sent to the server as
294	// null. It is an error if a field in this list has a non-empty value.
295	// This may be used to include null fields in Patch requests.
296	NullFields []string `json:"-"`
297}
298
299func (s *LongRunningRecognizeRequest) MarshalJSON() ([]byte, error) {
300	type NoMethod LongRunningRecognizeRequest
301	raw := NoMethod(*s)
302	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
303}
304
305// LongRunningRecognizeResponse: The only message returned to the client
306// by the `LongRunningRecognize` method.
307// It contains the result as zero or more sequential
308// `SpeechRecognitionResult`
309// messages. It is included in the `result.response` field of the
310// `Operation`
311// returned by the `GetOperation` call of the
312// `google::longrunning::Operations`
313// service.
314type LongRunningRecognizeResponse struct {
315	// Results: Sequential list of transcription results corresponding
316	// to
317	// sequential portions of audio.
318	Results []*SpeechRecognitionResult `json:"results,omitempty"`
319
320	// ForceSendFields is a list of field names (e.g. "Results") to
321	// unconditionally include in API requests. By default, fields with
322	// empty values are omitted from API requests. However, any non-pointer,
323	// non-interface field appearing in ForceSendFields will be sent to the
324	// server regardless of whether the field is empty or not. This may be
325	// used to include empty fields in Patch requests.
326	ForceSendFields []string `json:"-"`
327
328	// NullFields is a list of field names (e.g. "Results") to include in
329	// API requests with the JSON null value. By default, fields with empty
330	// values are omitted from API requests. However, any field with an
331	// empty value appearing in NullFields will be sent to the server as
332	// null. It is an error if a field in this list has a non-empty value.
333	// This may be used to include null fields in Patch requests.
334	NullFields []string `json:"-"`
335}
336
337func (s *LongRunningRecognizeResponse) MarshalJSON() ([]byte, error) {
338	type NoMethod LongRunningRecognizeResponse
339	raw := NoMethod(*s)
340	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
341}
342
343// Operation: This resource represents a long-running operation that is
344// the result of a
345// network API call.
346type Operation struct {
347	// Done: If the value is `false`, it means the operation is still in
348	// progress.
349	// If `true`, the operation is completed, and either `error` or
350	// `response` is
351	// available.
352	Done bool `json:"done,omitempty"`
353
354	// Error: The error result of the operation in case of failure or
355	// cancellation.
356	Error *Status `json:"error,omitempty"`
357
358	// Metadata: Service-specific metadata associated with the operation.
359	// It typically
360	// contains progress information and common metadata such as create
361	// time.
362	// Some services might not provide such metadata.  Any method that
363	// returns a
364	// long-running operation should document the metadata type, if any.
365	Metadata googleapi.RawMessage `json:"metadata,omitempty"`
366
367	// Name: The server-assigned name, which is only unique within the same
368	// service that
369	// originally returns it. If you use the default HTTP mapping,
370	// the
371	// `name` should be a resource name ending with
372	// `operations/{unique_id}`.
373	Name string `json:"name,omitempty"`
374
375	// Response: The normal response of the operation in case of success.
376	// If the original
377	// method returns no data on success, such as `Delete`, the response
378	// is
379	// `google.protobuf.Empty`.  If the original method is
380	// standard
381	// `Get`/`Create`/`Update`, the response should be the resource.  For
382	// other
383	// methods, the response should have the type `XxxResponse`, where
384	// `Xxx`
385	// is the original method name.  For example, if the original method
386	// name
387	// is `TakeSnapshot()`, the inferred response type
388	// is
389	// `TakeSnapshotResponse`.
390	Response googleapi.RawMessage `json:"response,omitempty"`
391
392	// ServerResponse contains the HTTP response code and headers from the
393	// server.
394	googleapi.ServerResponse `json:"-"`
395
396	// ForceSendFields is a list of field names (e.g. "Done") to
397	// unconditionally include in API requests. By default, fields with
398	// empty values are omitted from API requests. However, any non-pointer,
399	// non-interface field appearing in ForceSendFields will be sent to the
400	// server regardless of whether the field is empty or not. This may be
401	// used to include empty fields in Patch requests.
402	ForceSendFields []string `json:"-"`
403
404	// NullFields is a list of field names (e.g. "Done") to include in API
405	// requests with the JSON null value. By default, fields with empty
406	// values are omitted from API requests. However, any field with an
407	// empty value appearing in NullFields will be sent to the server as
408	// null. It is an error if a field in this list has a non-empty value.
409	// This may be used to include null fields in Patch requests.
410	NullFields []string `json:"-"`
411}
412
413func (s *Operation) MarshalJSON() ([]byte, error) {
414	type NoMethod Operation
415	raw := NoMethod(*s)
416	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
417}
418
419// RecognitionAudio: Contains audio data in the encoding specified in
420// the `RecognitionConfig`.
421// Either `content` or `uri` must be supplied. Supplying both or
422// neither
423// returns google.rpc.Code.INVALID_ARGUMENT. See
424// [content
425// limits](https://cloud.google.com/speech-to-text/quotas#content).
426type RecognitionAudio struct {
427	// Content: The audio data bytes encoded as specified
428	// in
429	// `RecognitionConfig`. Note: as with all bytes fields, proto buffers
430	// use a
431	// pure binary representation, whereas JSON representations use base64.
432	Content string `json:"content,omitempty"`
433
434	// Uri: URI that points to a file that contains audio data bytes as
435	// specified in
436	// `RecognitionConfig`. The file must not be compressed (for example,
437	// gzip).
438	// Currently, only Google Cloud Storage URIs are
439	// supported, which must be specified in the following
440	// format:
441	// `gs://bucket_name/object_name` (other URI formats
442	// return
443	// google.rpc.Code.INVALID_ARGUMENT). For more information, see
444	// [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
445	Uri string `json:"uri,omitempty"`
446
447	// ForceSendFields is a list of field names (e.g. "Content") to
448	// unconditionally include in API requests. By default, fields with
449	// empty values are omitted from API requests. However, any non-pointer,
450	// non-interface field appearing in ForceSendFields will be sent to the
451	// server regardless of whether the field is empty or not. This may be
452	// used to include empty fields in Patch requests.
453	ForceSendFields []string `json:"-"`
454
455	// NullFields is a list of field names (e.g. "Content") to include in
456	// API requests with the JSON null value. By default, fields with empty
457	// values are omitted from API requests. However, any field with an
458	// empty value appearing in NullFields will be sent to the server as
459	// null. It is an error if a field in this list has a non-empty value.
460	// This may be used to include null fields in Patch requests.
461	NullFields []string `json:"-"`
462}
463
464func (s *RecognitionAudio) MarshalJSON() ([]byte, error) {
465	type NoMethod RecognitionAudio
466	raw := NoMethod(*s)
467	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
468}
469
470// RecognitionConfig: Provides information to the recognizer that
471// specifies how to process the
472// request.
473type RecognitionConfig struct {
474	// AlternativeLanguageCodes: A list of up to 3
475	// additional
476	// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language
477	// tags,
478	// listing possible alternative languages of the supplied audio.
479	// See
480	// [Language
481	// Support](https://cloud.google.com/speech-to-text/docs/langua
482	// ges) for a list
483	// of the currently supported language codes. If alternative languages
484	// are
485	// listed, recognition result will contain recognition in the most
486	// likely
487	// language detected including the main language_code. The recognition
488	// result
489	// will include the language tag of the language detected in the audio.
490	// Note:
491	// This feature is only supported for Voice Command and Voice Search use
492	// cases
493	// and performance may vary for other use cases (e.g., phone
494	// call
495	// transcription).
496	AlternativeLanguageCodes []string `json:"alternativeLanguageCodes,omitempty"`
497
498	// AudioChannelCount: The number of channels in the input audio
499	// data.
500	// ONLY set this for MULTI-CHANNEL recognition.
501	// Valid values for LINEAR16 and FLAC are `1`-`8`.
502	// Valid values for OGG_OPUS are '1'-'254'.
503	// Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only
504	// `1`.
505	// If `0` or omitted, defaults to one channel (mono).
506	// Note: We only recognize the first channel by default.
507	// To perform independent recognition on each channel
508	// set
509	// `enable_separate_recognition_per_channel` to 'true'.
510	AudioChannelCount int64 `json:"audioChannelCount,omitempty"`
511
512	// DiarizationConfig: Config to enable speaker diarization and set
513	// additional
514	// parameters to make diarization better suited for your
515	// application.
516	// Note: When this is enabled, we send all the words from the beginning
517	// of the
518	// audio for the top alternative in every consecutive STREAMING
519	// responses.
520	// This is done in order to improve our speaker tags as our models learn
521	// to
522	// identify the speakers in the conversation over time.
523	// For non-streaming requests, the diarization results will be provided
524	// only
525	// in the top alternative of the FINAL SpeechRecognitionResult.
526	DiarizationConfig *SpeakerDiarizationConfig `json:"diarizationConfig,omitempty"`
527
528	// DiarizationSpeakerCount: If set, specifies the estimated number of
529	// speakers in the conversation.
530	// Defaults to '2'. Ignored unless enable_speaker_diarization is set to
531	// true.
532	// Note: Use diarization_config instead.
533	DiarizationSpeakerCount int64 `json:"diarizationSpeakerCount,omitempty"`
534
535	// EnableAutomaticPunctuation: If 'true', adds punctuation to
536	// recognition result hypotheses.
537	// This feature is only available in select languages. Setting this
538	// for
539	// requests in other languages has no effect at all.
540	// The default 'false' value does not add punctuation to result
541	// hypotheses.
542	// Note: This is currently offered as an experimental service,
543	// complimentary
544	// to all users. In the future this may be exclusively available as
545	// a
546	// premium feature.
547	EnableAutomaticPunctuation bool `json:"enableAutomaticPunctuation,omitempty"`
548
549	// EnableSeparateRecognitionPerChannel: This needs to be set to `true`
550	// explicitly and `audio_channel_count` > 1
551	// to get each channel recognized separately. The recognition result
552	// will
553	// contain a `channel_tag` field to state which channel that result
554	// belongs
555	// to. If this is not true, we will only recognize the first channel.
556	// The
557	// request is billed cumulatively for all channels
558	// recognized:
559	// `audio_channel_count` multiplied by the length of the audio.
560	EnableSeparateRecognitionPerChannel bool `json:"enableSeparateRecognitionPerChannel,omitempty"`
561
562	// EnableSpeakerDiarization: If 'true', enables speaker detection for
563	// each recognized word in
564	// the top alternative of the recognition result using a speaker_tag
565	// provided
566	// in the WordInfo.
567	// Note: Use diarization_config instead.
568	EnableSpeakerDiarization bool `json:"enableSpeakerDiarization,omitempty"`
569
570	// EnableWordConfidence: If `true`, the top result includes a list of
571	// words and the
572	// confidence for those words. If `false`, no word-level
573	// confidence
574	// information is returned. The default is `false`.
575	EnableWordConfidence bool `json:"enableWordConfidence,omitempty"`
576
577	// EnableWordTimeOffsets: If `true`, the top result includes a list of
578	// words and
579	// the start and end time offsets (timestamps) for those words.
580	// If
581	// `false`, no word-level time offset information is returned. The
582	// default is
583	// `false`.
584	EnableWordTimeOffsets bool `json:"enableWordTimeOffsets,omitempty"`
585
586	// Encoding: Encoding of audio data sent in all `RecognitionAudio`
587	// messages.
588	// This field is optional for `FLAC` and `WAV` audio files and
589	// required
590	// for all other audio formats. For details, see AudioEncoding.
591	//
592	// Possible values:
593	//   "ENCODING_UNSPECIFIED" - Not specified.
594	//   "LINEAR16" - Uncompressed 16-bit signed little-endian samples
595	// (Linear PCM).
596	//   "FLAC" - `FLAC` (Free Lossless Audio
597	// Codec) is the recommended encoding because it is
598	// lossless--therefore recognition is not compromised--and
599	// requires only about half the bandwidth of `LINEAR16`. `FLAC`
600	// stream
601	// encoding supports 16-bit and 24-bit samples, however, not all fields
602	// in
603	// `STREAMINFO` are supported.
604	//   "MULAW" - 8-bit samples that compand 14-bit audio samples using
605	// G.711 PCMU/mu-law.
606	//   "AMR" - Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz`
607	// must be 8000.
608	//   "AMR_WB" - Adaptive Multi-Rate Wideband codec. `sample_rate_hertz`
609	// must be 16000.
610	//   "OGG_OPUS" - Opus encoded audio frames in Ogg
611	// container
612	// ([OggOpus](https://wiki.xiph.org/OggOpus)).
613	// `sample_rate_her
614	// tz` must be one of 8000, 12000, 16000, 24000, or 48000.
615	//   "SPEEX_WITH_HEADER_BYTE" - Although the use of lossy encodings is
616	// not recommended, if a very low
617	// bitrate encoding is required, `OGG_OPUS` is highly preferred
618	// over
619	// Speex encoding. The [Speex](https://speex.org/)  encoding supported
620	// by
621	// Cloud Speech API has a header byte in each block, as in MIME
622	// type
623	// `audio/x-speex-with-header-byte`.
624	// It is a variant of the RTP Speex encoding defined in
625	// [RFC 5574](https://tools.ietf.org/html/rfc5574).
626	// The stream is a sequence of blocks, one block per RTP packet. Each
627	// block
628	// starts with a byte containing the length of the block, in bytes,
629	// followed
630	// by one or more frames of Speex data, padded to an integral number
631	// of
632	// bytes (octets) as specified in RFC 5574. In other words, each RTP
633	// header
634	// is replaced with a single byte containing the block length. Only
635	// Speex
636	// wideband is supported. `sample_rate_hertz` must be 16000.
637	//   "MP3" - MP3 audio. Support all standard MP3 bitrates (which range
638	// from 32-320
639	// kbps). When using this encoding, `sample_rate_hertz` can be
640	// optionally
641	// unset if not known.
642	Encoding string `json:"encoding,omitempty"`
643
644	// LanguageCode: Required. The language of the supplied audio as
645	// a
646	// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language
647	// tag.
648	// Example: "en-US".
649	// See
650	// [Language
651	// Support](https://cloud.google.com/speech-to-text/docs/langua
652	// ges) for a list
653	// of the currently supported language codes.
654	LanguageCode string `json:"languageCode,omitempty"`
655
656	// MaxAlternatives: Maximum number of recognition hypotheses to be
657	// returned.
658	// Specifically, the maximum number of `SpeechRecognitionAlternative`
659	// messages
660	// within each `SpeechRecognitionResult`.
661	// The server may return fewer than `max_alternatives`.
662	// Valid values are `0`-`30`. A value of `0` or `1` will return a
663	// maximum of
664	// one. If omitted, will return a maximum of one.
665	MaxAlternatives int64 `json:"maxAlternatives,omitempty"`
666
667	// Metadata: Metadata regarding this request.
668	Metadata *RecognitionMetadata `json:"metadata,omitempty"`
669
670	// Model: Which model to select for the given request. Select the
671	// model
672	// best suited to your domain to get best results. If a model is
673	// not
674	// explicitly specified, then we auto-select a model based on the
675	// parameters
676	// in the RecognitionConfig.
677	// <table>
678	//   <tr>
679	//     <td><b>Model</b></td>
680	//     <td><b>Description</b></td>
681	//   </tr>
682	//   <tr>
683	//     <td><code>command_and_search</code></td>
684	//     <td>Best for short queries such as voice commands or voice
685	// search.</td>
686	//   </tr>
687	//   <tr>
688	//     <td><code>phone_call</code></td>
689	//     <td>Best for audio that originated from a phone call (typically
690	//     recorded at an 8khz sampling rate).</td>
691	//   </tr>
692	//   <tr>
693	//     <td><code>video</code></td>
694	//     <td>Best for audio that originated from from video or includes
695	// multiple
696	//         speakers. Ideally the audio is recorded at a 16khz or
697	// greater
698	//         sampling rate. This is a premium model that costs more than
699	// the
700	//         standard rate.</td>
701	//   </tr>
702	//   <tr>
703	//     <td><code>default</code></td>
704	//     <td>Best for audio that is not one of the specific audio models.
705	//         For example, long-form audio. Ideally the audio is
706	// high-fidelity,
707	//         recorded at a 16khz or greater sampling rate.</td>
708	//   </tr>
709	// </table>
710	Model string `json:"model,omitempty"`
711
712	// ProfanityFilter: If set to `true`, the server will attempt to filter
713	// out
714	// profanities, replacing all but the initial character in each filtered
715	// word
716	// with asterisks, e.g. "f***". If set to `false` or omitted,
717	// profanities
718	// won't be filtered out.
719	ProfanityFilter bool `json:"profanityFilter,omitempty"`
720
721	// SampleRateHertz: Sample rate in Hertz of the audio data sent in
722	// all
723	// `RecognitionAudio` messages. Valid values are: 8000-48000.
724	// 16000 is optimal. For best results, set the sampling rate of the
725	// audio
726	// source to 16000 Hz. If that's not possible, use the native sample
727	// rate of
728	// the audio source (instead of re-sampling).
729	// This field is optional for FLAC and WAV audio files, but is
730	// required for all other audio formats. For details, see AudioEncoding.
731	SampleRateHertz int64 `json:"sampleRateHertz,omitempty"`
732
733	// SpeechContexts: Array of SpeechContext.
734	// A means to provide context to assist the speech recognition. For
735	// more
736	// information,
737	// see
738	// [speech
739	// adaptation](https://cloud.google.com/speech-to-text/docs/c
740	// ontext-strength).
741	SpeechContexts []*SpeechContext `json:"speechContexts,omitempty"`
742
743	// UseEnhanced: Set to true to use an enhanced model for speech
744	// recognition.
745	// If `use_enhanced` is set to true and the `model` field is not set,
746	// then
747	// an appropriate enhanced model is chosen if an enhanced model exists
748	// for
749	// the audio.
750	//
751	// If `use_enhanced` is true and an enhanced version of the specified
752	// model
753	// does not exist, then the speech is recognized using the standard
754	// version
755	// of the specified model.
756	UseEnhanced bool `json:"useEnhanced,omitempty"`
757
758	// ForceSendFields is a list of field names (e.g.
759	// "AlternativeLanguageCodes") to unconditionally include in API
760	// requests. By default, fields with empty values are omitted from API
761	// requests. However, any non-pointer, non-interface field appearing in
762	// ForceSendFields will be sent to the server regardless of whether the
763	// field is empty or not. This may be used to include empty fields in
764	// Patch requests.
765	ForceSendFields []string `json:"-"`
766
767	// NullFields is a list of field names (e.g. "AlternativeLanguageCodes")
768	// to include in API requests with the JSON null value. By default,
769	// fields with empty values are omitted from API requests. However, any
770	// field with an empty value appearing in NullFields will be sent to the
771	// server as null. It is an error if a field in this list has a
772	// non-empty value. This may be used to include null fields in Patch
773	// requests.
774	NullFields []string `json:"-"`
775}
776
777func (s *RecognitionConfig) MarshalJSON() ([]byte, error) {
778	type NoMethod RecognitionConfig
779	raw := NoMethod(*s)
780	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
781}
782
783// RecognitionMetadata: Description of audio data to be recognized.
784type RecognitionMetadata struct {
785	// AudioTopic: Description of the content. Eg. "Recordings of federal
786	// supreme court
787	// hearings from 2012".
788	AudioTopic string `json:"audioTopic,omitempty"`
789
790	// IndustryNaicsCodeOfAudio: The industry vertical to which this speech
791	// recognition request most
792	// closely applies. This is most indicative of the topics contained
793	// in the audio.  Use the 6-digit NAICS code to identify the
794	// industry
795	// vertical - see https://www.naics.com/search/.
796	IndustryNaicsCodeOfAudio int64 `json:"industryNaicsCodeOfAudio,omitempty"`
797
798	// InteractionType: The use case most closely describing the audio
799	// content to be recognized.
800	//
801	// Possible values:
802	//   "INTERACTION_TYPE_UNSPECIFIED" - Use case is either unknown or is
803	// something other than one of the other
804	// values below.
805	//   "DISCUSSION" - Multiple people in a conversation or discussion. For
806	// example in a
807	// meeting with two or more people actively participating. Typically
808	// all the primary people speaking would be in the same room (if
809	// not,
810	// see PHONE_CALL)
811	//   "PRESENTATION" - One or more persons lecturing or presenting to
812	// others, mostly
813	// uninterrupted.
814	//   "PHONE_CALL" - A phone-call or video-conference in which two or
815	// more people, who are
816	// not in the same room, are actively participating.
817	//   "VOICEMAIL" - A recorded message intended for another person to
818	// listen to.
819	//   "PROFESSIONALLY_PRODUCED" - Professionally produced audio (eg. TV
820	// Show, Podcast).
821	//   "VOICE_SEARCH" - Transcribe spoken questions and queries into text.
822	//   "VOICE_COMMAND" - Transcribe voice commands, such as for
823	// controlling a device.
824	//   "DICTATION" - Transcribe speech to text to create a written
825	// document, such as a
826	// text-message, email or report.
827	InteractionType string `json:"interactionType,omitempty"`
828
829	// MicrophoneDistance: The audio type that most closely describes the
830	// audio being recognized.
831	//
832	// Possible values:
833	//   "MICROPHONE_DISTANCE_UNSPECIFIED" - Audio type is not known.
834	//   "NEARFIELD" - The audio was captured from a closely placed
835	// microphone. Eg. phone,
836	// dictaphone, or handheld microphone. Generally if there speaker is
837	// within
838	// 1 meter of the microphone.
839	//   "MIDFIELD" - The speaker if within 3 meters of the microphone.
840	//   "FARFIELD" - The speaker is more than 3 meters away from the
841	// microphone.
842	MicrophoneDistance string `json:"microphoneDistance,omitempty"`
843
844	// ObfuscatedId: Obfuscated (privacy-protected) ID of the user, to
845	// identify number of
846	// unique users using the service.
847	ObfuscatedId int64 `json:"obfuscatedId,omitempty,string"`
848
849	// OriginalMediaType: The original media the speech was recorded on.
850	//
851	// Possible values:
852	//   "ORIGINAL_MEDIA_TYPE_UNSPECIFIED" - Unknown original media type.
853	//   "AUDIO" - The speech data is an audio recording.
854	//   "VIDEO" - The speech data originally recorded on a video.
855	OriginalMediaType string `json:"originalMediaType,omitempty"`
856
857	// OriginalMimeType: Mime type of the original audio file.  For example
858	// `audio/m4a`,
859	// `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`.
860	// A list of possible audio mime types is maintained
861	// at
862	// http://www.iana.org/assignments/media-types/media-types.xhtml#audio
863	OriginalMimeType string `json:"originalMimeType,omitempty"`
864
865	// RecordingDeviceName: The device used to make the recording.  Examples
866	// 'Nexus 5X' or
867	// 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or
868	// 'Cardioid Microphone'.
869	RecordingDeviceName string `json:"recordingDeviceName,omitempty"`
870
871	// RecordingDeviceType: The type of device the speech was recorded with.
872	//
873	// Possible values:
874	//   "RECORDING_DEVICE_TYPE_UNSPECIFIED" - The recording device is
875	// unknown.
876	//   "SMARTPHONE" - Speech was recorded on a smartphone.
877	//   "PC" - Speech was recorded using a personal computer or tablet.
878	//   "PHONE_LINE" - Speech was recorded over a phone line.
879	//   "VEHICLE" - Speech was recorded in a vehicle.
880	//   "OTHER_OUTDOOR_DEVICE" - Speech was recorded outdoors.
881	//   "OTHER_INDOOR_DEVICE" - Speech was recorded indoors.
882	RecordingDeviceType string `json:"recordingDeviceType,omitempty"`
883
884	// ForceSendFields is a list of field names (e.g. "AudioTopic") to
885	// unconditionally include in API requests. By default, fields with
886	// empty values are omitted from API requests. However, any non-pointer,
887	// non-interface field appearing in ForceSendFields will be sent to the
888	// server regardless of whether the field is empty or not. This may be
889	// used to include empty fields in Patch requests.
890	ForceSendFields []string `json:"-"`
891
892	// NullFields is a list of field names (e.g. "AudioTopic") to include in
893	// API requests with the JSON null value. By default, fields with empty
894	// values are omitted from API requests. However, any field with an
895	// empty value appearing in NullFields will be sent to the server as
896	// null. It is an error if a field in this list has a non-empty value.
897	// This may be used to include null fields in Patch requests.
898	NullFields []string `json:"-"`
899}
900
901func (s *RecognitionMetadata) MarshalJSON() ([]byte, error) {
902	type NoMethod RecognitionMetadata
903	raw := NoMethod(*s)
904	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
905}
906
907// RecognizeRequest: The top-level message sent by the client for the
908// `Recognize` method.
909type RecognizeRequest struct {
910	// Audio: Required. The audio data to be recognized.
911	Audio *RecognitionAudio `json:"audio,omitempty"`
912
913	// Config: Required. Provides information to the recognizer that
914	// specifies how to
915	// process the request.
916	Config *RecognitionConfig `json:"config,omitempty"`
917
918	// Name: Use `model` field in RecognitionConfig instead.
919	Name string `json:"name,omitempty"`
920
921	// ForceSendFields is a list of field names (e.g. "Audio") to
922	// unconditionally include in API requests. By default, fields with
923	// empty values are omitted from API requests. However, any non-pointer,
924	// non-interface field appearing in ForceSendFields will be sent to the
925	// server regardless of whether the field is empty or not. This may be
926	// used to include empty fields in Patch requests.
927	ForceSendFields []string `json:"-"`
928
929	// NullFields is a list of field names (e.g. "Audio") to include in API
930	// requests with the JSON null value. By default, fields with empty
931	// values are omitted from API requests. However, any field with an
932	// empty value appearing in NullFields will be sent to the server as
933	// null. It is an error if a field in this list has a non-empty value.
934	// This may be used to include null fields in Patch requests.
935	NullFields []string `json:"-"`
936}
937
938func (s *RecognizeRequest) MarshalJSON() ([]byte, error) {
939	type NoMethod RecognizeRequest
940	raw := NoMethod(*s)
941	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
942}
943
944// RecognizeResponse: The only message returned to the client by the
945// `Recognize` method. It
946// contains the result as zero or more sequential
947// `SpeechRecognitionResult`
948// messages.
949type RecognizeResponse struct {
950	// Results: Sequential list of transcription results corresponding
951	// to
952	// sequential portions of audio.
953	Results []*SpeechRecognitionResult `json:"results,omitempty"`
954
955	// ServerResponse contains the HTTP response code and headers from the
956	// server.
957	googleapi.ServerResponse `json:"-"`
958
959	// ForceSendFields is a list of field names (e.g. "Results") to
960	// unconditionally include in API requests. By default, fields with
961	// empty values are omitted from API requests. However, any non-pointer,
962	// non-interface field appearing in ForceSendFields will be sent to the
963	// server regardless of whether the field is empty or not. This may be
964	// used to include empty fields in Patch requests.
965	ForceSendFields []string `json:"-"`
966
967	// NullFields is a list of field names (e.g. "Results") to include in
968	// API requests with the JSON null value. By default, fields with empty
969	// values are omitted from API requests. However, any field with an
970	// empty value appearing in NullFields will be sent to the server as
971	// null. It is an error if a field in this list has a non-empty value.
972	// This may be used to include null fields in Patch requests.
973	NullFields []string `json:"-"`
974}
975
976func (s *RecognizeResponse) MarshalJSON() ([]byte, error) {
977	type NoMethod RecognizeResponse
978	raw := NoMethod(*s)
979	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
980}
981
982// SpeakerDiarizationConfig: Config to enable speaker diarization.
983type SpeakerDiarizationConfig struct {
984	// EnableSpeakerDiarization: If 'true', enables speaker detection for
985	// each recognized word in
986	// the top alternative of the recognition result using a speaker_tag
987	// provided
988	// in the WordInfo.
989	EnableSpeakerDiarization bool `json:"enableSpeakerDiarization,omitempty"`
990
991	// MaxSpeakerCount: Maximum number of speakers in the conversation. This
992	// range gives you more
993	// flexibility by allowing the system to automatically determine the
994	// correct
995	// number of speakers. If not set, the default value is 6.
996	MaxSpeakerCount int64 `json:"maxSpeakerCount,omitempty"`
997
998	// MinSpeakerCount: Minimum number of speakers in the conversation. This
999	// range gives you more
1000	// flexibility by allowing the system to automatically determine the
1001	// correct
1002	// number of speakers. If not set, the default value is 2.
1003	MinSpeakerCount int64 `json:"minSpeakerCount,omitempty"`
1004
1005	// SpeakerTag: Output only. Unused.
1006	SpeakerTag int64 `json:"speakerTag,omitempty"`
1007
1008	// ForceSendFields is a list of field names (e.g.
1009	// "EnableSpeakerDiarization") to unconditionally include in API
1010	// requests. By default, fields with empty values are omitted from API
1011	// requests. However, any non-pointer, non-interface field appearing in
1012	// ForceSendFields will be sent to the server regardless of whether the
1013	// field is empty or not. This may be used to include empty fields in
1014	// Patch requests.
1015	ForceSendFields []string `json:"-"`
1016
1017	// NullFields is a list of field names (e.g. "EnableSpeakerDiarization")
1018	// to include in API requests with the JSON null value. By default,
1019	// fields with empty values are omitted from API requests. However, any
1020	// field with an empty value appearing in NullFields will be sent to the
1021	// server as null. It is an error if a field in this list has a
1022	// non-empty value. This may be used to include null fields in Patch
1023	// requests.
1024	NullFields []string `json:"-"`
1025}
1026
1027func (s *SpeakerDiarizationConfig) MarshalJSON() ([]byte, error) {
1028	type NoMethod SpeakerDiarizationConfig
1029	raw := NoMethod(*s)
1030	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1031}
1032
1033// SpeechContext: Provides "hints" to the speech recognizer to favor
1034// specific words and phrases
1035// in the results.
1036type SpeechContext struct {
1037	// Boost: Hint Boost. Positive value will increase the probability that
1038	// a specific
1039	// phrase will be recognized over other similar sounding phrases. The
1040	// higher
1041	// the boost, the higher the chance of false positive recognition as
1042	// well.
1043	// Negative boost values would correspond to anti-biasing. Anti-biasing
1044	// is not
1045	// enabled, so negative boost will simply be ignored. Though `boost`
1046	// can
1047	// accept a wide range of positive values, most use cases are best
1048	// served with
1049	// values between 0 and 20. We recommend using a binary search approach
1050	// to
1051	// finding the optimal value for your use case.
1052	Boost float64 `json:"boost,omitempty"`
1053
1054	// Phrases: A list of strings containing words and phrases "hints" so
1055	// that
1056	// the speech recognition is more likely to recognize them. This can be
1057	// used
1058	// to improve the accuracy for specific words and phrases, for example,
1059	// if
1060	// specific commands are typically spoken by the user. This can also be
1061	// used
1062	// to add additional words to the vocabulary of the recognizer.
1063	// See
1064	// [usage
1065	// limits](https://cloud.google.com/speech-to-text/quotas#content).
1066	//
1067	// List
1068	//  items can also be set to classes for groups of words that
1069	// represent
1070	// common concepts that occur in natural language. For example, rather
1071	// than
1072	// providing phrase hints for every month of the year, using the $MONTH
1073	// class
1074	// improves the likelihood of correctly transcribing audio that
1075	// includes
1076	// months.
1077	Phrases []string `json:"phrases,omitempty"`
1078
1079	// ForceSendFields is a list of field names (e.g. "Boost") to
1080	// unconditionally include in API requests. By default, fields with
1081	// empty values are omitted from API requests. However, any non-pointer,
1082	// non-interface field appearing in ForceSendFields will be sent to the
1083	// server regardless of whether the field is empty or not. This may be
1084	// used to include empty fields in Patch requests.
1085	ForceSendFields []string `json:"-"`
1086
1087	// NullFields is a list of field names (e.g. "Boost") to include in API
1088	// requests with the JSON null value. By default, fields with empty
1089	// values are omitted from API requests. However, any field with an
1090	// empty value appearing in NullFields will be sent to the server as
1091	// null. It is an error if a field in this list has a non-empty value.
1092	// This may be used to include null fields in Patch requests.
1093	NullFields []string `json:"-"`
1094}
1095
1096func (s *SpeechContext) MarshalJSON() ([]byte, error) {
1097	type NoMethod SpeechContext
1098	raw := NoMethod(*s)
1099	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1100}
1101
1102func (s *SpeechContext) UnmarshalJSON(data []byte) error {
1103	type NoMethod SpeechContext
1104	var s1 struct {
1105		Boost gensupport.JSONFloat64 `json:"boost"`
1106		*NoMethod
1107	}
1108	s1.NoMethod = (*NoMethod)(s)
1109	if err := json.Unmarshal(data, &s1); err != nil {
1110		return err
1111	}
1112	s.Boost = float64(s1.Boost)
1113	return nil
1114}
1115
1116// SpeechRecognitionAlternative: Alternative hypotheses (a.k.a. n-best
1117// list).
1118type SpeechRecognitionAlternative struct {
1119	// Confidence: The confidence estimate between 0.0 and 1.0. A higher
1120	// number
1121	// indicates an estimated greater likelihood that the recognized words
1122	// are
1123	// correct. This field is set only for the top alternative of a
1124	// non-streaming
1125	// result or, of a streaming result where `is_final=true`.
1126	// This field is not guaranteed to be accurate and users should not rely
1127	// on it
1128	// to be always provided.
1129	// The default of 0.0 is a sentinel value indicating `confidence` was
1130	// not set.
1131	Confidence float64 `json:"confidence,omitempty"`
1132
1133	// Transcript: Transcript text representing the words that the user
1134	// spoke.
1135	Transcript string `json:"transcript,omitempty"`
1136
1137	// Words: A list of word-specific information for each recognized
1138	// word.
1139	// Note: When `enable_speaker_diarization` is true, you will see all the
1140	// words
1141	// from the beginning of the audio.
1142	Words []*WordInfo `json:"words,omitempty"`
1143
1144	// ForceSendFields is a list of field names (e.g. "Confidence") to
1145	// unconditionally include in API requests. By default, fields with
1146	// empty values are omitted from API requests. However, any non-pointer,
1147	// non-interface field appearing in ForceSendFields will be sent to the
1148	// server regardless of whether the field is empty or not. This may be
1149	// used to include empty fields in Patch requests.
1150	ForceSendFields []string `json:"-"`
1151
1152	// NullFields is a list of field names (e.g. "Confidence") to include in
1153	// API requests with the JSON null value. By default, fields with empty
1154	// values are omitted from API requests. However, any field with an
1155	// empty value appearing in NullFields will be sent to the server as
1156	// null. It is an error if a field in this list has a non-empty value.
1157	// This may be used to include null fields in Patch requests.
1158	NullFields []string `json:"-"`
1159}
1160
1161func (s *SpeechRecognitionAlternative) MarshalJSON() ([]byte, error) {
1162	type NoMethod SpeechRecognitionAlternative
1163	raw := NoMethod(*s)
1164	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1165}
1166
1167func (s *SpeechRecognitionAlternative) UnmarshalJSON(data []byte) error {
1168	type NoMethod SpeechRecognitionAlternative
1169	var s1 struct {
1170		Confidence gensupport.JSONFloat64 `json:"confidence"`
1171		*NoMethod
1172	}
1173	s1.NoMethod = (*NoMethod)(s)
1174	if err := json.Unmarshal(data, &s1); err != nil {
1175		return err
1176	}
1177	s.Confidence = float64(s1.Confidence)
1178	return nil
1179}
1180
1181// SpeechRecognitionResult: A speech recognition result corresponding to
1182// a portion of the audio.
1183type SpeechRecognitionResult struct {
1184	// Alternatives: May contain one or more recognition hypotheses (up to
1185	// the
1186	// maximum specified in `max_alternatives`).
1187	// These alternatives are ordered in terms of accuracy, with the top
1188	// (first)
1189	// alternative being the most probable, as ranked by the recognizer.
1190	Alternatives []*SpeechRecognitionAlternative `json:"alternatives,omitempty"`
1191
1192	// ChannelTag: For multi-channel audio, this is the channel number
1193	// corresponding to the
1194	// recognized result for the audio from that channel.
1195	// For audio_channel_count = N, its output values can range from '1' to
1196	// 'N'.
1197	ChannelTag int64 `json:"channelTag,omitempty"`
1198
1199	// LanguageCode: The
1200	// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language
1201	// tag
1202	// of the language in this result. This language code was detected to
1203	// have
1204	// the most likelihood of being spoken in the audio.
1205	LanguageCode string `json:"languageCode,omitempty"`
1206
1207	// ForceSendFields is a list of field names (e.g. "Alternatives") to
1208	// unconditionally include in API requests. By default, fields with
1209	// empty values are omitted from API requests. However, any non-pointer,
1210	// non-interface field appearing in ForceSendFields will be sent to the
1211	// server regardless of whether the field is empty or not. This may be
1212	// used to include empty fields in Patch requests.
1213	ForceSendFields []string `json:"-"`
1214
1215	// NullFields is a list of field names (e.g. "Alternatives") to include
1216	// in API requests with the JSON null value. By default, fields with
1217	// empty values are omitted from API requests. However, any field with
1218	// an empty value appearing in NullFields will be sent to the server as
1219	// null. It is an error if a field in this list has a non-empty value.
1220	// This may be used to include null fields in Patch requests.
1221	NullFields []string `json:"-"`
1222}
1223
1224func (s *SpeechRecognitionResult) MarshalJSON() ([]byte, error) {
1225	type NoMethod SpeechRecognitionResult
1226	raw := NoMethod(*s)
1227	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1228}
1229
1230// Status: The `Status` type defines a logical error model that is
1231// suitable for
1232// different programming environments, including REST APIs and RPC APIs.
1233// It is
1234// used by [gRPC](https://github.com/grpc). Each `Status` message
1235// contains
1236// three pieces of data: error code, error message, and error
1237// details.
1238//
1239// You can find out more about this error model and how to work with it
1240// in the
1241// [API Design Guide](https://cloud.google.com/apis/design/errors).
1242type Status struct {
1243	// Code: The status code, which should be an enum value of
1244	// google.rpc.Code.
1245	Code int64 `json:"code,omitempty"`
1246
1247	// Details: A list of messages that carry the error details.  There is a
1248	// common set of
1249	// message types for APIs to use.
1250	Details []googleapi.RawMessage `json:"details,omitempty"`
1251
1252	// Message: A developer-facing error message, which should be in
1253	// English. Any
1254	// user-facing error message should be localized and sent in
1255	// the
1256	// google.rpc.Status.details field, or localized by the client.
1257	Message string `json:"message,omitempty"`
1258
1259	// ForceSendFields is a list of field names (e.g. "Code") to
1260	// unconditionally include in API requests. By default, fields with
1261	// empty values are omitted from API requests. However, any non-pointer,
1262	// non-interface field appearing in ForceSendFields will be sent to the
1263	// server regardless of whether the field is empty or not. This may be
1264	// used to include empty fields in Patch requests.
1265	ForceSendFields []string `json:"-"`
1266
1267	// NullFields is a list of field names (e.g. "Code") to include in API
1268	// requests with the JSON null value. By default, fields with empty
1269	// values are omitted from API requests. However, any field with an
1270	// empty value appearing in NullFields will be sent to the server as
1271	// null. It is an error if a field in this list has a non-empty value.
1272	// This may be used to include null fields in Patch requests.
1273	NullFields []string `json:"-"`
1274}
1275
1276func (s *Status) MarshalJSON() ([]byte, error) {
1277	type NoMethod Status
1278	raw := NoMethod(*s)
1279	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1280}
1281
1282// WordInfo: Word-specific information for recognized words.
1283type WordInfo struct {
1284	// Confidence: The confidence estimate between 0.0 and 1.0. A higher
1285	// number
1286	// indicates an estimated greater likelihood that the recognized words
1287	// are
1288	// correct. This field is set only for the top alternative of a
1289	// non-streaming
1290	// result or, of a streaming result where `is_final=true`.
1291	// This field is not guaranteed to be accurate and users should not rely
1292	// on it
1293	// to be always provided.
1294	// The default of 0.0 is a sentinel value indicating `confidence` was
1295	// not set.
1296	Confidence float64 `json:"confidence,omitempty"`
1297
1298	// EndTime: Time offset relative to the beginning of the audio,
1299	// and corresponding to the end of the spoken word.
1300	// This field is only set if `enable_word_time_offsets=true` and only
1301	// in the top hypothesis.
1302	// This is an experimental feature and the accuracy of the time offset
1303	// can
1304	// vary.
1305	EndTime string `json:"endTime,omitempty"`
1306
1307	// SpeakerTag: A distinct integer value is assigned for every speaker
1308	// within
1309	// the audio. This field specifies which one of those speakers was
1310	// detected to
1311	// have spoken this word. Value ranges from '1' to
1312	// diarization_speaker_count.
1313	// speaker_tag is set if enable_speaker_diarization = 'true' and only in
1314	// the
1315	// top alternative.
1316	SpeakerTag int64 `json:"speakerTag,omitempty"`
1317
1318	// StartTime: Time offset relative to the beginning of the audio,
1319	// and corresponding to the start of the spoken word.
1320	// This field is only set if `enable_word_time_offsets=true` and only
1321	// in the top hypothesis.
1322	// This is an experimental feature and the accuracy of the time offset
1323	// can
1324	// vary.
1325	StartTime string `json:"startTime,omitempty"`
1326
1327	// Word: The word corresponding to this set of information.
1328	Word string `json:"word,omitempty"`
1329
1330	// ForceSendFields is a list of field names (e.g. "Confidence") to
1331	// unconditionally include in API requests. By default, fields with
1332	// empty values are omitted from API requests. However, any non-pointer,
1333	// non-interface field appearing in ForceSendFields will be sent to the
1334	// server regardless of whether the field is empty or not. This may be
1335	// used to include empty fields in Patch requests.
1336	ForceSendFields []string `json:"-"`
1337
1338	// NullFields is a list of field names (e.g. "Confidence") to include in
1339	// API requests with the JSON null value. By default, fields with empty
1340	// values are omitted from API requests. However, any field with an
1341	// empty value appearing in NullFields will be sent to the server as
1342	// null. It is an error if a field in this list has a non-empty value.
1343	// This may be used to include null fields in Patch requests.
1344	NullFields []string `json:"-"`
1345}
1346
1347func (s *WordInfo) MarshalJSON() ([]byte, error) {
1348	type NoMethod WordInfo
1349	raw := NoMethod(*s)
1350	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1351}
1352
1353func (s *WordInfo) UnmarshalJSON(data []byte) error {
1354	type NoMethod WordInfo
1355	var s1 struct {
1356		Confidence gensupport.JSONFloat64 `json:"confidence"`
1357		*NoMethod
1358	}
1359	s1.NoMethod = (*NoMethod)(s)
1360	if err := json.Unmarshal(data, &s1); err != nil {
1361		return err
1362	}
1363	s.Confidence = float64(s1.Confidence)
1364	return nil
1365}
1366
1367// method id "speech.operations.get":
1368
1369type OperationsGetCall struct {
1370	s            *Service
1371	name         string
1372	urlParams_   gensupport.URLParams
1373	ifNoneMatch_ string
1374	ctx_         context.Context
1375	header_      http.Header
1376}
1377
1378// Get: Gets the latest state of a long-running operation.  Clients can
1379// use this
1380// method to poll the operation result at intervals as recommended by
1381// the API
1382// service.
1383func (r *OperationsService) Get(name string) *OperationsGetCall {
1384	c := &OperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
1385	c.name = name
1386	return c
1387}
1388
1389// Fields allows partial responses to be retrieved. See
1390// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
1391// for more information.
1392func (c *OperationsGetCall) Fields(s ...googleapi.Field) *OperationsGetCall {
1393	c.urlParams_.Set("fields", googleapi.CombineFields(s))
1394	return c
1395}
1396
1397// IfNoneMatch sets the optional parameter which makes the operation
1398// fail if the object's ETag matches the given value. This is useful for
1399// getting updates only after the object has changed since the last
1400// request. Use googleapi.IsNotModified to check whether the response
1401// error from Do is the result of In-None-Match.
1402func (c *OperationsGetCall) IfNoneMatch(entityTag string) *OperationsGetCall {
1403	c.ifNoneMatch_ = entityTag
1404	return c
1405}
1406
1407// Context sets the context to be used in this call's Do method. Any
1408// pending HTTP request will be aborted if the provided context is
1409// canceled.
1410func (c *OperationsGetCall) Context(ctx context.Context) *OperationsGetCall {
1411	c.ctx_ = ctx
1412	return c
1413}
1414
1415// Header returns an http.Header that can be modified by the caller to
1416// add HTTP headers to the request.
1417func (c *OperationsGetCall) Header() http.Header {
1418	if c.header_ == nil {
1419		c.header_ = make(http.Header)
1420	}
1421	return c.header_
1422}
1423
1424func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) {
1425	reqHeaders := make(http.Header)
1426	reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216")
1427	for k, v := range c.header_ {
1428		reqHeaders[k] = v
1429	}
1430	reqHeaders.Set("User-Agent", c.s.userAgent())
1431	if c.ifNoneMatch_ != "" {
1432		reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
1433	}
1434	var body io.Reader = nil
1435	c.urlParams_.Set("alt", alt)
1436	c.urlParams_.Set("prettyPrint", "false")
1437	urls := googleapi.ResolveRelative(c.s.BasePath, "v1p1beta1/operations/{+name}")
1438	urls += "?" + c.urlParams_.Encode()
1439	req, err := http.NewRequest("GET", urls, body)
1440	if err != nil {
1441		return nil, err
1442	}
1443	req.Header = reqHeaders
1444	googleapi.Expand(req.URL, map[string]string{
1445		"name": c.name,
1446	})
1447	return gensupport.SendRequest(c.ctx_, c.s.client, req)
1448}
1449
1450// Do executes the "speech.operations.get" call.
1451// Exactly one of *Operation or error will be non-nil. Any non-2xx
1452// status code is an error. Response headers are in either
1453// *Operation.ServerResponse.Header or (if a response was returned at
1454// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
1455// to check whether the returned error was because
1456// http.StatusNotModified was returned.
1457func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
1458	gensupport.SetOptions(c.urlParams_, opts...)
1459	res, err := c.doRequest("json")
1460	if res != nil && res.StatusCode == http.StatusNotModified {
1461		if res.Body != nil {
1462			res.Body.Close()
1463		}
1464		return nil, &googleapi.Error{
1465			Code:   res.StatusCode,
1466			Header: res.Header,
1467		}
1468	}
1469	if err != nil {
1470		return nil, err
1471	}
1472	defer googleapi.CloseBody(res)
1473	if err := googleapi.CheckResponse(res); err != nil {
1474		return nil, err
1475	}
1476	ret := &Operation{
1477		ServerResponse: googleapi.ServerResponse{
1478			Header:         res.Header,
1479			HTTPStatusCode: res.StatusCode,
1480		},
1481	}
1482	target := &ret
1483	if err := gensupport.DecodeResponse(target, res); err != nil {
1484		return nil, err
1485	}
1486	return ret, nil
1487	// {
1488	//   "description": "Gets the latest state of a long-running operation.  Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.",
1489	//   "flatPath": "v1p1beta1/operations/{operationsId}",
1490	//   "httpMethod": "GET",
1491	//   "id": "speech.operations.get",
1492	//   "parameterOrder": [
1493	//     "name"
1494	//   ],
1495	//   "parameters": {
1496	//     "name": {
1497	//       "description": "The name of the operation resource.",
1498	//       "location": "path",
1499	//       "pattern": "^.+$",
1500	//       "required": true,
1501	//       "type": "string"
1502	//     }
1503	//   },
1504	//   "path": "v1p1beta1/operations/{+name}",
1505	//   "response": {
1506	//     "$ref": "Operation"
1507	//   },
1508	//   "scopes": [
1509	//     "https://www.googleapis.com/auth/cloud-platform"
1510	//   ]
1511	// }
1512
1513}
1514
1515// method id "speech.operations.list":
1516
1517type OperationsListCall struct {
1518	s            *Service
1519	urlParams_   gensupport.URLParams
1520	ifNoneMatch_ string
1521	ctx_         context.Context
1522	header_      http.Header
1523}
1524
1525// List: Lists operations that match the specified filter in the
1526// request. If the
1527// server doesn't support this method, it returns
1528// `UNIMPLEMENTED`.
1529//
1530// NOTE: the `name` binding allows API services to override the
1531// binding
1532// to use different resource name schemes, such as `users/*/operations`.
1533// To
1534// override the binding, API services can add a binding such
1535// as
1536// "/v1/{name=users/*}/operations" to their service configuration.
1537// For backwards compatibility, the default name includes the
1538// operations
1539// collection id, however overriding users must ensure the name
1540// binding
1541// is the parent resource, without the operations collection id.
1542func (r *OperationsService) List() *OperationsListCall {
1543	c := &OperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
1544	return c
1545}
1546
1547// Filter sets the optional parameter "filter": The standard list
1548// filter.
1549func (c *OperationsListCall) Filter(filter string) *OperationsListCall {
1550	c.urlParams_.Set("filter", filter)
1551	return c
1552}
1553
1554// Name sets the optional parameter "name": The name of the operation's
1555// parent resource.
1556func (c *OperationsListCall) Name(name string) *OperationsListCall {
1557	c.urlParams_.Set("name", name)
1558	return c
1559}
1560
1561// PageSize sets the optional parameter "pageSize": The standard list
1562// page size.
1563func (c *OperationsListCall) PageSize(pageSize int64) *OperationsListCall {
1564	c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
1565	return c
1566}
1567
1568// PageToken sets the optional parameter "pageToken": The standard list
1569// page token.
1570func (c *OperationsListCall) PageToken(pageToken string) *OperationsListCall {
1571	c.urlParams_.Set("pageToken", pageToken)
1572	return c
1573}
1574
1575// Fields allows partial responses to be retrieved. See
1576// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
1577// for more information.
1578func (c *OperationsListCall) Fields(s ...googleapi.Field) *OperationsListCall {
1579	c.urlParams_.Set("fields", googleapi.CombineFields(s))
1580	return c
1581}
1582
1583// IfNoneMatch sets the optional parameter which makes the operation
1584// fail if the object's ETag matches the given value. This is useful for
1585// getting updates only after the object has changed since the last
1586// request. Use googleapi.IsNotModified to check whether the response
1587// error from Do is the result of In-None-Match.
1588func (c *OperationsListCall) IfNoneMatch(entityTag string) *OperationsListCall {
1589	c.ifNoneMatch_ = entityTag
1590	return c
1591}
1592
1593// Context sets the context to be used in this call's Do method. Any
1594// pending HTTP request will be aborted if the provided context is
1595// canceled.
1596func (c *OperationsListCall) Context(ctx context.Context) *OperationsListCall {
1597	c.ctx_ = ctx
1598	return c
1599}
1600
1601// Header returns an http.Header that can be modified by the caller to
1602// add HTTP headers to the request.
1603func (c *OperationsListCall) Header() http.Header {
1604	if c.header_ == nil {
1605		c.header_ = make(http.Header)
1606	}
1607	return c.header_
1608}
1609
1610func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) {
1611	reqHeaders := make(http.Header)
1612	reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216")
1613	for k, v := range c.header_ {
1614		reqHeaders[k] = v
1615	}
1616	reqHeaders.Set("User-Agent", c.s.userAgent())
1617	if c.ifNoneMatch_ != "" {
1618		reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
1619	}
1620	var body io.Reader = nil
1621	c.urlParams_.Set("alt", alt)
1622	c.urlParams_.Set("prettyPrint", "false")
1623	urls := googleapi.ResolveRelative(c.s.BasePath, "v1p1beta1/operations")
1624	urls += "?" + c.urlParams_.Encode()
1625	req, err := http.NewRequest("GET", urls, body)
1626	if err != nil {
1627		return nil, err
1628	}
1629	req.Header = reqHeaders
1630	return gensupport.SendRequest(c.ctx_, c.s.client, req)
1631}
1632
1633// Do executes the "speech.operations.list" call.
1634// Exactly one of *ListOperationsResponse or error will be non-nil. Any
1635// non-2xx status code is an error. Response headers are in either
1636// *ListOperationsResponse.ServerResponse.Header or (if a response was
1637// returned at all) in error.(*googleapi.Error).Header. Use
1638// googleapi.IsNotModified to check whether the returned error was
1639// because http.StatusNotModified was returned.
1640func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsResponse, error) {
1641	gensupport.SetOptions(c.urlParams_, opts...)
1642	res, err := c.doRequest("json")
1643	if res != nil && res.StatusCode == http.StatusNotModified {
1644		if res.Body != nil {
1645			res.Body.Close()
1646		}
1647		return nil, &googleapi.Error{
1648			Code:   res.StatusCode,
1649			Header: res.Header,
1650		}
1651	}
1652	if err != nil {
1653		return nil, err
1654	}
1655	defer googleapi.CloseBody(res)
1656	if err := googleapi.CheckResponse(res); err != nil {
1657		return nil, err
1658	}
1659	ret := &ListOperationsResponse{
1660		ServerResponse: googleapi.ServerResponse{
1661			Header:         res.Header,
1662			HTTPStatusCode: res.StatusCode,
1663		},
1664	}
1665	target := &ret
1666	if err := gensupport.DecodeResponse(target, res); err != nil {
1667		return nil, err
1668	}
1669	return ret, nil
1670	// {
1671	//   "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`. To\noverride the binding, API services can add a binding such as\n`\"/v1/{name=users/*}/operations\"` to their service configuration.\nFor backwards compatibility, the default name includes the operations\ncollection id, however overriding users must ensure the name binding\nis the parent resource, without the operations collection id.",
1672	//   "flatPath": "v1p1beta1/operations",
1673	//   "httpMethod": "GET",
1674	//   "id": "speech.operations.list",
1675	//   "parameterOrder": [],
1676	//   "parameters": {
1677	//     "filter": {
1678	//       "description": "The standard list filter.",
1679	//       "location": "query",
1680	//       "type": "string"
1681	//     },
1682	//     "name": {
1683	//       "description": "The name of the operation's parent resource.",
1684	//       "location": "query",
1685	//       "type": "string"
1686	//     },
1687	//     "pageSize": {
1688	//       "description": "The standard list page size.",
1689	//       "format": "int32",
1690	//       "location": "query",
1691	//       "type": "integer"
1692	//     },
1693	//     "pageToken": {
1694	//       "description": "The standard list page token.",
1695	//       "location": "query",
1696	//       "type": "string"
1697	//     }
1698	//   },
1699	//   "path": "v1p1beta1/operations",
1700	//   "response": {
1701	//     "$ref": "ListOperationsResponse"
1702	//   },
1703	//   "scopes": [
1704	//     "https://www.googleapis.com/auth/cloud-platform"
1705	//   ]
1706	// }
1707
1708}
1709
1710// Pages invokes f for each page of results.
1711// A non-nil error returned from f will halt the iteration.
1712// The provided context supersedes any context provided to the Context method.
1713func (c *OperationsListCall) Pages(ctx context.Context, f func(*ListOperationsResponse) error) error {
1714	c.ctx_ = ctx
1715	defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
1716	for {
1717		x, err := c.Do()
1718		if err != nil {
1719			return err
1720		}
1721		if err := f(x); err != nil {
1722			return err
1723		}
1724		if x.NextPageToken == "" {
1725			return nil
1726		}
1727		c.PageToken(x.NextPageToken)
1728	}
1729}
1730
1731// method id "speech.projects.locations.operations.get":
1732
1733type ProjectsLocationsOperationsGetCall struct {
1734	s            *Service
1735	name         string
1736	urlParams_   gensupport.URLParams
1737	ifNoneMatch_ string
1738	ctx_         context.Context
1739	header_      http.Header
1740}
1741
1742// Get: Gets the latest state of a long-running operation.  Clients can
1743// use this
1744// method to poll the operation result at intervals as recommended by
1745// the API
1746// service.
1747func (r *ProjectsLocationsOperationsService) Get(name string) *ProjectsLocationsOperationsGetCall {
1748	c := &ProjectsLocationsOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
1749	c.name = name
1750	return c
1751}
1752
1753// Fields allows partial responses to be retrieved. See
1754// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
1755// for more information.
1756func (c *ProjectsLocationsOperationsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsOperationsGetCall {
1757	c.urlParams_.Set("fields", googleapi.CombineFields(s))
1758	return c
1759}
1760
1761// IfNoneMatch sets the optional parameter which makes the operation
1762// fail if the object's ETag matches the given value. This is useful for
1763// getting updates only after the object has changed since the last
1764// request. Use googleapi.IsNotModified to check whether the response
1765// error from Do is the result of In-None-Match.
1766func (c *ProjectsLocationsOperationsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsOperationsGetCall {
1767	c.ifNoneMatch_ = entityTag
1768	return c
1769}
1770
1771// Context sets the context to be used in this call's Do method. Any
1772// pending HTTP request will be aborted if the provided context is
1773// canceled.
1774func (c *ProjectsLocationsOperationsGetCall) Context(ctx context.Context) *ProjectsLocationsOperationsGetCall {
1775	c.ctx_ = ctx
1776	return c
1777}
1778
1779// Header returns an http.Header that can be modified by the caller to
1780// add HTTP headers to the request.
1781func (c *ProjectsLocationsOperationsGetCall) Header() http.Header {
1782	if c.header_ == nil {
1783		c.header_ = make(http.Header)
1784	}
1785	return c.header_
1786}
1787
1788func (c *ProjectsLocationsOperationsGetCall) doRequest(alt string) (*http.Response, error) {
1789	reqHeaders := make(http.Header)
1790	reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216")
1791	for k, v := range c.header_ {
1792		reqHeaders[k] = v
1793	}
1794	reqHeaders.Set("User-Agent", c.s.userAgent())
1795	if c.ifNoneMatch_ != "" {
1796		reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
1797	}
1798	var body io.Reader = nil
1799	c.urlParams_.Set("alt", alt)
1800	c.urlParams_.Set("prettyPrint", "false")
1801	urls := googleapi.ResolveRelative(c.s.BasePath, "v1p1beta1/{+name}")
1802	urls += "?" + c.urlParams_.Encode()
1803	req, err := http.NewRequest("GET", urls, body)
1804	if err != nil {
1805		return nil, err
1806	}
1807	req.Header = reqHeaders
1808	googleapi.Expand(req.URL, map[string]string{
1809		"name": c.name,
1810	})
1811	return gensupport.SendRequest(c.ctx_, c.s.client, req)
1812}
1813
1814// Do executes the "speech.projects.locations.operations.get" call.
1815// Exactly one of *Operation or error will be non-nil. Any non-2xx
1816// status code is an error. Response headers are in either
1817// *Operation.ServerResponse.Header or (if a response was returned at
1818// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
1819// to check whether the returned error was because
1820// http.StatusNotModified was returned.
1821func (c *ProjectsLocationsOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
1822	gensupport.SetOptions(c.urlParams_, opts...)
1823	res, err := c.doRequest("json")
1824	if res != nil && res.StatusCode == http.StatusNotModified {
1825		if res.Body != nil {
1826			res.Body.Close()
1827		}
1828		return nil, &googleapi.Error{
1829			Code:   res.StatusCode,
1830			Header: res.Header,
1831		}
1832	}
1833	if err != nil {
1834		return nil, err
1835	}
1836	defer googleapi.CloseBody(res)
1837	if err := googleapi.CheckResponse(res); err != nil {
1838		return nil, err
1839	}
1840	ret := &Operation{
1841		ServerResponse: googleapi.ServerResponse{
1842			Header:         res.Header,
1843			HTTPStatusCode: res.StatusCode,
1844		},
1845	}
1846	target := &ret
1847	if err := gensupport.DecodeResponse(target, res); err != nil {
1848		return nil, err
1849	}
1850	return ret, nil
1851	// {
1852	//   "description": "Gets the latest state of a long-running operation.  Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.",
1853	//   "flatPath": "v1p1beta1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}",
1854	//   "httpMethod": "GET",
1855	//   "id": "speech.projects.locations.operations.get",
1856	//   "parameterOrder": [
1857	//     "name"
1858	//   ],
1859	//   "parameters": {
1860	//     "name": {
1861	//       "description": "The name of the operation resource.",
1862	//       "location": "path",
1863	//       "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$",
1864	//       "required": true,
1865	//       "type": "string"
1866	//     }
1867	//   },
1868	//   "path": "v1p1beta1/{+name}",
1869	//   "response": {
1870	//     "$ref": "Operation"
1871	//   },
1872	//   "scopes": [
1873	//     "https://www.googleapis.com/auth/cloud-platform"
1874	//   ]
1875	// }
1876
1877}
1878
1879// method id "speech.projects.locations.operations.list":
1880
1881type ProjectsLocationsOperationsListCall struct {
1882	s            *Service
1883	name         string
1884	urlParams_   gensupport.URLParams
1885	ifNoneMatch_ string
1886	ctx_         context.Context
1887	header_      http.Header
1888}
1889
1890// List: Lists operations that match the specified filter in the
1891// request. If the
1892// server doesn't support this method, it returns
1893// `UNIMPLEMENTED`.
1894//
1895// NOTE: the `name` binding allows API services to override the
1896// binding
1897// to use different resource name schemes, such as `users/*/operations`.
1898// To
1899// override the binding, API services can add a binding such
1900// as
1901// "/v1/{name=users/*}/operations" to their service configuration.
1902// For backwards compatibility, the default name includes the
1903// operations
1904// collection id, however overriding users must ensure the name
1905// binding
1906// is the parent resource, without the operations collection id.
1907func (r *ProjectsLocationsOperationsService) List(name string) *ProjectsLocationsOperationsListCall {
1908	c := &ProjectsLocationsOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
1909	c.name = name
1910	return c
1911}
1912
1913// Filter sets the optional parameter "filter": The standard list
1914// filter.
1915func (c *ProjectsLocationsOperationsListCall) Filter(filter string) *ProjectsLocationsOperationsListCall {
1916	c.urlParams_.Set("filter", filter)
1917	return c
1918}
1919
1920// PageSize sets the optional parameter "pageSize": The standard list
1921// page size.
1922func (c *ProjectsLocationsOperationsListCall) PageSize(pageSize int64) *ProjectsLocationsOperationsListCall {
1923	c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
1924	return c
1925}
1926
1927// PageToken sets the optional parameter "pageToken": The standard list
1928// page token.
1929func (c *ProjectsLocationsOperationsListCall) PageToken(pageToken string) *ProjectsLocationsOperationsListCall {
1930	c.urlParams_.Set("pageToken", pageToken)
1931	return c
1932}
1933
1934// Fields allows partial responses to be retrieved. See
1935// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
1936// for more information.
1937func (c *ProjectsLocationsOperationsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsOperationsListCall {
1938	c.urlParams_.Set("fields", googleapi.CombineFields(s))
1939	return c
1940}
1941
1942// IfNoneMatch sets the optional parameter which makes the operation
1943// fail if the object's ETag matches the given value. This is useful for
1944// getting updates only after the object has changed since the last
1945// request. Use googleapi.IsNotModified to check whether the response
1946// error from Do is the result of In-None-Match.
1947func (c *ProjectsLocationsOperationsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsOperationsListCall {
1948	c.ifNoneMatch_ = entityTag
1949	return c
1950}
1951
1952// Context sets the context to be used in this call's Do method. Any
1953// pending HTTP request will be aborted if the provided context is
1954// canceled.
1955func (c *ProjectsLocationsOperationsListCall) Context(ctx context.Context) *ProjectsLocationsOperationsListCall {
1956	c.ctx_ = ctx
1957	return c
1958}
1959
1960// Header returns an http.Header that can be modified by the caller to
1961// add HTTP headers to the request.
1962func (c *ProjectsLocationsOperationsListCall) Header() http.Header {
1963	if c.header_ == nil {
1964		c.header_ = make(http.Header)
1965	}
1966	return c.header_
1967}
1968
1969func (c *ProjectsLocationsOperationsListCall) doRequest(alt string) (*http.Response, error) {
1970	reqHeaders := make(http.Header)
1971	reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216")
1972	for k, v := range c.header_ {
1973		reqHeaders[k] = v
1974	}
1975	reqHeaders.Set("User-Agent", c.s.userAgent())
1976	if c.ifNoneMatch_ != "" {
1977		reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
1978	}
1979	var body io.Reader = nil
1980	c.urlParams_.Set("alt", alt)
1981	c.urlParams_.Set("prettyPrint", "false")
1982	urls := googleapi.ResolveRelative(c.s.BasePath, "v1p1beta1/{+name}/operations")
1983	urls += "?" + c.urlParams_.Encode()
1984	req, err := http.NewRequest("GET", urls, body)
1985	if err != nil {
1986		return nil, err
1987	}
1988	req.Header = reqHeaders
1989	googleapi.Expand(req.URL, map[string]string{
1990		"name": c.name,
1991	})
1992	return gensupport.SendRequest(c.ctx_, c.s.client, req)
1993}
1994
1995// Do executes the "speech.projects.locations.operations.list" call.
1996// Exactly one of *ListOperationsResponse or error will be non-nil. Any
1997// non-2xx status code is an error. Response headers are in either
1998// *ListOperationsResponse.ServerResponse.Header or (if a response was
1999// returned at all) in error.(*googleapi.Error).Header. Use
2000// googleapi.IsNotModified to check whether the returned error was
2001// because http.StatusNotModified was returned.
2002func (c *ProjectsLocationsOperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsResponse, error) {
2003	gensupport.SetOptions(c.urlParams_, opts...)
2004	res, err := c.doRequest("json")
2005	if res != nil && res.StatusCode == http.StatusNotModified {
2006		if res.Body != nil {
2007			res.Body.Close()
2008		}
2009		return nil, &googleapi.Error{
2010			Code:   res.StatusCode,
2011			Header: res.Header,
2012		}
2013	}
2014	if err != nil {
2015		return nil, err
2016	}
2017	defer googleapi.CloseBody(res)
2018	if err := googleapi.CheckResponse(res); err != nil {
2019		return nil, err
2020	}
2021	ret := &ListOperationsResponse{
2022		ServerResponse: googleapi.ServerResponse{
2023			Header:         res.Header,
2024			HTTPStatusCode: res.StatusCode,
2025		},
2026	}
2027	target := &ret
2028	if err := gensupport.DecodeResponse(target, res); err != nil {
2029		return nil, err
2030	}
2031	return ret, nil
2032	// {
2033	//   "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`. To\noverride the binding, API services can add a binding such as\n`\"/v1/{name=users/*}/operations\"` to their service configuration.\nFor backwards compatibility, the default name includes the operations\ncollection id, however overriding users must ensure the name binding\nis the parent resource, without the operations collection id.",
2034	//   "flatPath": "v1p1beta1/projects/{projectsId}/locations/{locationsId}/operations",
2035	//   "httpMethod": "GET",
2036	//   "id": "speech.projects.locations.operations.list",
2037	//   "parameterOrder": [
2038	//     "name"
2039	//   ],
2040	//   "parameters": {
2041	//     "filter": {
2042	//       "description": "The standard list filter.",
2043	//       "location": "query",
2044	//       "type": "string"
2045	//     },
2046	//     "name": {
2047	//       "description": "The name of the operation's parent resource.",
2048	//       "location": "path",
2049	//       "pattern": "^projects/[^/]+/locations/[^/]+$",
2050	//       "required": true,
2051	//       "type": "string"
2052	//     },
2053	//     "pageSize": {
2054	//       "description": "The standard list page size.",
2055	//       "format": "int32",
2056	//       "location": "query",
2057	//       "type": "integer"
2058	//     },
2059	//     "pageToken": {
2060	//       "description": "The standard list page token.",
2061	//       "location": "query",
2062	//       "type": "string"
2063	//     }
2064	//   },
2065	//   "path": "v1p1beta1/{+name}/operations",
2066	//   "response": {
2067	//     "$ref": "ListOperationsResponse"
2068	//   },
2069	//   "scopes": [
2070	//     "https://www.googleapis.com/auth/cloud-platform"
2071	//   ]
2072	// }
2073
2074}
2075
2076// Pages invokes f for each page of results.
2077// A non-nil error returned from f will halt the iteration.
2078// The provided context supersedes any context provided to the Context method.
2079func (c *ProjectsLocationsOperationsListCall) Pages(ctx context.Context, f func(*ListOperationsResponse) error) error {
2080	c.ctx_ = ctx
2081	defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
2082	for {
2083		x, err := c.Do()
2084		if err != nil {
2085			return err
2086		}
2087		if err := f(x); err != nil {
2088			return err
2089		}
2090		if x.NextPageToken == "" {
2091			return nil
2092		}
2093		c.PageToken(x.NextPageToken)
2094	}
2095}
2096
2097// method id "speech.speech.longrunningrecognize":
2098
2099type SpeechLongrunningrecognizeCall struct {
2100	s                           *Service
2101	longrunningrecognizerequest *LongRunningRecognizeRequest
2102	urlParams_                  gensupport.URLParams
2103	ctx_                        context.Context
2104	header_                     http.Header
2105}
2106
2107// Longrunningrecognize: Performs asynchronous speech recognition:
2108// receive results via the
2109// google.longrunning.Operations interface. Returns either
2110// an
2111// `Operation.error` or an `Operation.response` which contains
2112// a `LongRunningRecognizeResponse` message.
2113// For more information on asynchronous speech recognition, see
2114// the
2115// [how-to](https://cloud.google.com/speech-to-text/docs/async-recogn
2116// ize).
2117func (r *SpeechService) Longrunningrecognize(longrunningrecognizerequest *LongRunningRecognizeRequest) *SpeechLongrunningrecognizeCall {
2118	c := &SpeechLongrunningrecognizeCall{s: r.s, urlParams_: make(gensupport.URLParams)}
2119	c.longrunningrecognizerequest = longrunningrecognizerequest
2120	return c
2121}
2122
2123// Fields allows partial responses to be retrieved. See
2124// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
2125// for more information.
2126func (c *SpeechLongrunningrecognizeCall) Fields(s ...googleapi.Field) *SpeechLongrunningrecognizeCall {
2127	c.urlParams_.Set("fields", googleapi.CombineFields(s))
2128	return c
2129}
2130
2131// Context sets the context to be used in this call's Do method. Any
2132// pending HTTP request will be aborted if the provided context is
2133// canceled.
2134func (c *SpeechLongrunningrecognizeCall) Context(ctx context.Context) *SpeechLongrunningrecognizeCall {
2135	c.ctx_ = ctx
2136	return c
2137}
2138
2139// Header returns an http.Header that can be modified by the caller to
2140// add HTTP headers to the request.
2141func (c *SpeechLongrunningrecognizeCall) Header() http.Header {
2142	if c.header_ == nil {
2143		c.header_ = make(http.Header)
2144	}
2145	return c.header_
2146}
2147
2148func (c *SpeechLongrunningrecognizeCall) doRequest(alt string) (*http.Response, error) {
2149	reqHeaders := make(http.Header)
2150	reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216")
2151	for k, v := range c.header_ {
2152		reqHeaders[k] = v
2153	}
2154	reqHeaders.Set("User-Agent", c.s.userAgent())
2155	var body io.Reader = nil
2156	body, err := googleapi.WithoutDataWrapper.JSONReader(c.longrunningrecognizerequest)
2157	if err != nil {
2158		return nil, err
2159	}
2160	reqHeaders.Set("Content-Type", "application/json")
2161	c.urlParams_.Set("alt", alt)
2162	c.urlParams_.Set("prettyPrint", "false")
2163	urls := googleapi.ResolveRelative(c.s.BasePath, "v1p1beta1/speech:longrunningrecognize")
2164	urls += "?" + c.urlParams_.Encode()
2165	req, err := http.NewRequest("POST", urls, body)
2166	if err != nil {
2167		return nil, err
2168	}
2169	req.Header = reqHeaders
2170	return gensupport.SendRequest(c.ctx_, c.s.client, req)
2171}
2172
2173// Do executes the "speech.speech.longrunningrecognize" call.
2174// Exactly one of *Operation or error will be non-nil. Any non-2xx
2175// status code is an error. Response headers are in either
2176// *Operation.ServerResponse.Header or (if a response was returned at
2177// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
2178// to check whether the returned error was because
2179// http.StatusNotModified was returned.
2180func (c *SpeechLongrunningrecognizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
2181	gensupport.SetOptions(c.urlParams_, opts...)
2182	res, err := c.doRequest("json")
2183	if res != nil && res.StatusCode == http.StatusNotModified {
2184		if res.Body != nil {
2185			res.Body.Close()
2186		}
2187		return nil, &googleapi.Error{
2188			Code:   res.StatusCode,
2189			Header: res.Header,
2190		}
2191	}
2192	if err != nil {
2193		return nil, err
2194	}
2195	defer googleapi.CloseBody(res)
2196	if err := googleapi.CheckResponse(res); err != nil {
2197		return nil, err
2198	}
2199	ret := &Operation{
2200		ServerResponse: googleapi.ServerResponse{
2201			Header:         res.Header,
2202			HTTPStatusCode: res.StatusCode,
2203		},
2204	}
2205	target := &ret
2206	if err := gensupport.DecodeResponse(target, res); err != nil {
2207		return nil, err
2208	}
2209	return ret, nil
2210	// {
2211	//   "description": "Performs asynchronous speech recognition: receive results via the\ngoogle.longrunning.Operations interface. Returns either an\n`Operation.error` or an `Operation.response` which contains\na `LongRunningRecognizeResponse` message.\nFor more information on asynchronous speech recognition, see the\n[how-to](https://cloud.google.com/speech-to-text/docs/async-recognize).",
2212	//   "flatPath": "v1p1beta1/speech:longrunningrecognize",
2213	//   "httpMethod": "POST",
2214	//   "id": "speech.speech.longrunningrecognize",
2215	//   "parameterOrder": [],
2216	//   "parameters": {},
2217	//   "path": "v1p1beta1/speech:longrunningrecognize",
2218	//   "request": {
2219	//     "$ref": "LongRunningRecognizeRequest"
2220	//   },
2221	//   "response": {
2222	//     "$ref": "Operation"
2223	//   },
2224	//   "scopes": [
2225	//     "https://www.googleapis.com/auth/cloud-platform"
2226	//   ]
2227	// }
2228
2229}
2230
2231// method id "speech.speech.recognize":
2232
2233type SpeechRecognizeCall struct {
2234	s                *Service
2235	recognizerequest *RecognizeRequest
2236	urlParams_       gensupport.URLParams
2237	ctx_             context.Context
2238	header_          http.Header
2239}
2240
2241// Recognize: Performs synchronous speech recognition: receive results
2242// after all audio
2243// has been sent and processed.
2244func (r *SpeechService) Recognize(recognizerequest *RecognizeRequest) *SpeechRecognizeCall {
2245	c := &SpeechRecognizeCall{s: r.s, urlParams_: make(gensupport.URLParams)}
2246	c.recognizerequest = recognizerequest
2247	return c
2248}
2249
2250// Fields allows partial responses to be retrieved. See
2251// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
2252// for more information.
2253func (c *SpeechRecognizeCall) Fields(s ...googleapi.Field) *SpeechRecognizeCall {
2254	c.urlParams_.Set("fields", googleapi.CombineFields(s))
2255	return c
2256}
2257
2258// Context sets the context to be used in this call's Do method. Any
2259// pending HTTP request will be aborted if the provided context is
2260// canceled.
2261func (c *SpeechRecognizeCall) Context(ctx context.Context) *SpeechRecognizeCall {
2262	c.ctx_ = ctx
2263	return c
2264}
2265
2266// Header returns an http.Header that can be modified by the caller to
2267// add HTTP headers to the request.
2268func (c *SpeechRecognizeCall) Header() http.Header {
2269	if c.header_ == nil {
2270		c.header_ = make(http.Header)
2271	}
2272	return c.header_
2273}
2274
2275func (c *SpeechRecognizeCall) doRequest(alt string) (*http.Response, error) {
2276	reqHeaders := make(http.Header)
2277	reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191216")
2278	for k, v := range c.header_ {
2279		reqHeaders[k] = v
2280	}
2281	reqHeaders.Set("User-Agent", c.s.userAgent())
2282	var body io.Reader = nil
2283	body, err := googleapi.WithoutDataWrapper.JSONReader(c.recognizerequest)
2284	if err != nil {
2285		return nil, err
2286	}
2287	reqHeaders.Set("Content-Type", "application/json")
2288	c.urlParams_.Set("alt", alt)
2289	c.urlParams_.Set("prettyPrint", "false")
2290	urls := googleapi.ResolveRelative(c.s.BasePath, "v1p1beta1/speech:recognize")
2291	urls += "?" + c.urlParams_.Encode()
2292	req, err := http.NewRequest("POST", urls, body)
2293	if err != nil {
2294		return nil, err
2295	}
2296	req.Header = reqHeaders
2297	return gensupport.SendRequest(c.ctx_, c.s.client, req)
2298}
2299
2300// Do executes the "speech.speech.recognize" call.
2301// Exactly one of *RecognizeResponse or error will be non-nil. Any
2302// non-2xx status code is an error. Response headers are in either
2303// *RecognizeResponse.ServerResponse.Header or (if a response was
2304// returned at all) in error.(*googleapi.Error).Header. Use
2305// googleapi.IsNotModified to check whether the returned error was
2306// because http.StatusNotModified was returned.
2307func (c *SpeechRecognizeCall) Do(opts ...googleapi.CallOption) (*RecognizeResponse, error) {
2308	gensupport.SetOptions(c.urlParams_, opts...)
2309	res, err := c.doRequest("json")
2310	if res != nil && res.StatusCode == http.StatusNotModified {
2311		if res.Body != nil {
2312			res.Body.Close()
2313		}
2314		return nil, &googleapi.Error{
2315			Code:   res.StatusCode,
2316			Header: res.Header,
2317		}
2318	}
2319	if err != nil {
2320		return nil, err
2321	}
2322	defer googleapi.CloseBody(res)
2323	if err := googleapi.CheckResponse(res); err != nil {
2324		return nil, err
2325	}
2326	ret := &RecognizeResponse{
2327		ServerResponse: googleapi.ServerResponse{
2328			Header:         res.Header,
2329			HTTPStatusCode: res.StatusCode,
2330		},
2331	}
2332	target := &ret
2333	if err := gensupport.DecodeResponse(target, res); err != nil {
2334		return nil, err
2335	}
2336	return ret, nil
2337	// {
2338	//   "description": "Performs synchronous speech recognition: receive results after all audio\nhas been sent and processed.",
2339	//   "flatPath": "v1p1beta1/speech:recognize",
2340	//   "httpMethod": "POST",
2341	//   "id": "speech.speech.recognize",
2342	//   "parameterOrder": [],
2343	//   "parameters": {},
2344	//   "path": "v1p1beta1/speech:recognize",
2345	//   "request": {
2346	//     "$ref": "RecognizeRequest"
2347	//   },
2348	//   "response": {
2349	//     "$ref": "RecognizeResponse"
2350	//   },
2351	//   "scopes": [
2352	//     "https://www.googleapis.com/auth/cloud-platform"
2353	//   ]
2354	// }
2355
2356}
2357