1// Code generated by protoc-gen-go. DO NOT EDIT.
2// source: google/cloud/speech/v1p1beta1/cloud_speech.proto
3
4package speech
5
6import (
7	context "context"
8	fmt "fmt"
9	math "math"
10
11	proto "github.com/golang/protobuf/proto"
12	_ "github.com/golang/protobuf/ptypes/any"
13	duration "github.com/golang/protobuf/ptypes/duration"
14	timestamp "github.com/golang/protobuf/ptypes/timestamp"
15	_ "google.golang.org/genproto/googleapis/api/annotations"
16	longrunning "google.golang.org/genproto/googleapis/longrunning"
17	status "google.golang.org/genproto/googleapis/rpc/status"
18	grpc "google.golang.org/grpc"
19	codes "google.golang.org/grpc/codes"
20	status1 "google.golang.org/grpc/status"
21)
22
23// Reference imports to suppress errors if they are not otherwise used.
24var _ = proto.Marshal
25var _ = fmt.Errorf
26var _ = math.Inf
27
28// This is a compile-time assertion to ensure that this generated file
29// is compatible with the proto package it is being compiled against.
30// A compilation error at this line likely means your copy of the
31// proto package needs to be updated.
32const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
33
34// The encoding of the audio data sent in the request.
35//
36// All encodings support only 1 channel (mono) audio, unless the
37// `audio_channel_count` and `enable_separate_recognition_per_channel` fields
38// are set.
39//
40// For best results, the audio source should be captured and transmitted using
41// a lossless encoding (`FLAC` or `LINEAR16`). The accuracy of the speech
42// recognition can be reduced if lossy codecs are used to capture or transmit
43// audio, particularly if background noise is present. Lossy codecs include
44// `MULAW`, `AMR`, `AMR_WB`, `OGG_OPUS`, `SPEEX_WITH_HEADER_BYTE`, and `MP3`.
45//
46// The `FLAC` and `WAV` audio file formats include a header that describes the
47// included audio content. You can request recognition for `WAV` files that
48// contain either `LINEAR16` or `MULAW` encoded audio.
49// If you send `FLAC` or `WAV` audio file format in
50// your request, you do not need to specify an `AudioEncoding`; the audio
51// encoding format is determined from the file header. If you specify
52// an `AudioEncoding` when you send  send `FLAC` or `WAV` audio, the
53// encoding configuration must match the encoding described in the audio
54// header; otherwise the request returns an
55// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT] error code.
56type RecognitionConfig_AudioEncoding int32
57
58const (
59	// Not specified.
60	RecognitionConfig_ENCODING_UNSPECIFIED RecognitionConfig_AudioEncoding = 0
61	// Uncompressed 16-bit signed little-endian samples (Linear PCM).
62	RecognitionConfig_LINEAR16 RecognitionConfig_AudioEncoding = 1
63	// `FLAC` (Free Lossless Audio
64	// Codec) is the recommended encoding because it is
65	// lossless--therefore recognition is not compromised--and
66	// requires only about half the bandwidth of `LINEAR16`. `FLAC` stream
67	// encoding supports 16-bit and 24-bit samples, however, not all fields in
68	// `STREAMINFO` are supported.
69	RecognitionConfig_FLAC RecognitionConfig_AudioEncoding = 2
70	// 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
71	RecognitionConfig_MULAW RecognitionConfig_AudioEncoding = 3
72	// Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.
73	RecognitionConfig_AMR RecognitionConfig_AudioEncoding = 4
74	// Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.
75	RecognitionConfig_AMR_WB RecognitionConfig_AudioEncoding = 5
76	// Opus encoded audio frames in Ogg container
77	// ([OggOpus](https://wiki.xiph.org/OggOpus)).
78	// `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000.
79	RecognitionConfig_OGG_OPUS RecognitionConfig_AudioEncoding = 6
80	// Although the use of lossy encodings is not recommended, if a very low
81	// bitrate encoding is required, `OGG_OPUS` is highly preferred over
82	// Speex encoding. The [Speex](https://speex.org/)  encoding supported by
83	// Cloud Speech API has a header byte in each block, as in MIME type
84	// `audio/x-speex-with-header-byte`.
85	// It is a variant of the RTP Speex encoding defined in
86	// [RFC 5574](https://tools.ietf.org/html/rfc5574).
87	// The stream is a sequence of blocks, one block per RTP packet. Each block
88	// starts with a byte containing the length of the block, in bytes, followed
89	// by one or more frames of Speex data, padded to an integral number of
90	// bytes (octets) as specified in RFC 5574. In other words, each RTP header
91	// is replaced with a single byte containing the block length. Only Speex
92	// wideband is supported. `sample_rate_hertz` must be 16000.
93	RecognitionConfig_SPEEX_WITH_HEADER_BYTE RecognitionConfig_AudioEncoding = 7
94	// MP3 audio. Support all standard MP3 bitrates (which range from 32-320
95	// kbps). When using this encoding, `sample_rate_hertz` can be optionally
96	// unset if not known.
97	RecognitionConfig_MP3 RecognitionConfig_AudioEncoding = 8
98)
99
100var RecognitionConfig_AudioEncoding_name = map[int32]string{
101	0: "ENCODING_UNSPECIFIED",
102	1: "LINEAR16",
103	2: "FLAC",
104	3: "MULAW",
105	4: "AMR",
106	5: "AMR_WB",
107	6: "OGG_OPUS",
108	7: "SPEEX_WITH_HEADER_BYTE",
109	8: "MP3",
110}
111
112var RecognitionConfig_AudioEncoding_value = map[string]int32{
113	"ENCODING_UNSPECIFIED":   0,
114	"LINEAR16":               1,
115	"FLAC":                   2,
116	"MULAW":                  3,
117	"AMR":                    4,
118	"AMR_WB":                 5,
119	"OGG_OPUS":               6,
120	"SPEEX_WITH_HEADER_BYTE": 7,
121	"MP3":                    8,
122}
123
124func (x RecognitionConfig_AudioEncoding) String() string {
125	return proto.EnumName(RecognitionConfig_AudioEncoding_name, int32(x))
126}
127
128func (RecognitionConfig_AudioEncoding) EnumDescriptor() ([]byte, []int) {
129	return fileDescriptor_6adcab595cc29495, []int{4, 0}
130}
131
132// Use case categories that the audio recognition request can be described
133// by.
134type RecognitionMetadata_InteractionType int32
135
136const (
137	// Use case is either unknown or is something other than one of the other
138	// values below.
139	RecognitionMetadata_INTERACTION_TYPE_UNSPECIFIED RecognitionMetadata_InteractionType = 0
140	// Multiple people in a conversation or discussion. For example in a
141	// meeting with two or more people actively participating. Typically
142	// all the primary people speaking would be in the same room (if not,
143	// see PHONE_CALL)
144	RecognitionMetadata_DISCUSSION RecognitionMetadata_InteractionType = 1
145	// One or more persons lecturing or presenting to others, mostly
146	// uninterrupted.
147	RecognitionMetadata_PRESENTATION RecognitionMetadata_InteractionType = 2
148	// A phone-call or video-conference in which two or more people, who are
149	// not in the same room, are actively participating.
150	RecognitionMetadata_PHONE_CALL RecognitionMetadata_InteractionType = 3
151	// A recorded message intended for another person to listen to.
152	RecognitionMetadata_VOICEMAIL RecognitionMetadata_InteractionType = 4
153	// Professionally produced audio (eg. TV Show, Podcast).
154	RecognitionMetadata_PROFESSIONALLY_PRODUCED RecognitionMetadata_InteractionType = 5
155	// Transcribe spoken questions and queries into text.
156	RecognitionMetadata_VOICE_SEARCH RecognitionMetadata_InteractionType = 6
157	// Transcribe voice commands, such as for controlling a device.
158	RecognitionMetadata_VOICE_COMMAND RecognitionMetadata_InteractionType = 7
159	// Transcribe speech to text to create a written document, such as a
160	// text-message, email or report.
161	RecognitionMetadata_DICTATION RecognitionMetadata_InteractionType = 8
162)
163
164var RecognitionMetadata_InteractionType_name = map[int32]string{
165	0: "INTERACTION_TYPE_UNSPECIFIED",
166	1: "DISCUSSION",
167	2: "PRESENTATION",
168	3: "PHONE_CALL",
169	4: "VOICEMAIL",
170	5: "PROFESSIONALLY_PRODUCED",
171	6: "VOICE_SEARCH",
172	7: "VOICE_COMMAND",
173	8: "DICTATION",
174}
175
176var RecognitionMetadata_InteractionType_value = map[string]int32{
177	"INTERACTION_TYPE_UNSPECIFIED": 0,
178	"DISCUSSION":                   1,
179	"PRESENTATION":                 2,
180	"PHONE_CALL":                   3,
181	"VOICEMAIL":                    4,
182	"PROFESSIONALLY_PRODUCED":      5,
183	"VOICE_SEARCH":                 6,
184	"VOICE_COMMAND":                7,
185	"DICTATION":                    8,
186}
187
188func (x RecognitionMetadata_InteractionType) String() string {
189	return proto.EnumName(RecognitionMetadata_InteractionType_name, int32(x))
190}
191
192func (RecognitionMetadata_InteractionType) EnumDescriptor() ([]byte, []int) {
193	return fileDescriptor_6adcab595cc29495, []int{6, 0}
194}
195
196// Enumerates the types of capture settings describing an audio file.
197type RecognitionMetadata_MicrophoneDistance int32
198
199const (
200	// Audio type is not known.
201	RecognitionMetadata_MICROPHONE_DISTANCE_UNSPECIFIED RecognitionMetadata_MicrophoneDistance = 0
202	// The audio was captured from a closely placed microphone. Eg. phone,
203	// dictaphone, or handheld microphone. Generally if there speaker is within
204	// 1 meter of the microphone.
205	RecognitionMetadata_NEARFIELD RecognitionMetadata_MicrophoneDistance = 1
206	// The speaker if within 3 meters of the microphone.
207	RecognitionMetadata_MIDFIELD RecognitionMetadata_MicrophoneDistance = 2
208	// The speaker is more than 3 meters away from the microphone.
209	RecognitionMetadata_FARFIELD RecognitionMetadata_MicrophoneDistance = 3
210)
211
212var RecognitionMetadata_MicrophoneDistance_name = map[int32]string{
213	0: "MICROPHONE_DISTANCE_UNSPECIFIED",
214	1: "NEARFIELD",
215	2: "MIDFIELD",
216	3: "FARFIELD",
217}
218
219var RecognitionMetadata_MicrophoneDistance_value = map[string]int32{
220	"MICROPHONE_DISTANCE_UNSPECIFIED": 0,
221	"NEARFIELD":                       1,
222	"MIDFIELD":                        2,
223	"FARFIELD":                        3,
224}
225
226func (x RecognitionMetadata_MicrophoneDistance) String() string {
227	return proto.EnumName(RecognitionMetadata_MicrophoneDistance_name, int32(x))
228}
229
230func (RecognitionMetadata_MicrophoneDistance) EnumDescriptor() ([]byte, []int) {
231	return fileDescriptor_6adcab595cc29495, []int{6, 1}
232}
233
234// The original media the speech was recorded on.
235type RecognitionMetadata_OriginalMediaType int32
236
237const (
238	// Unknown original media type.
239	RecognitionMetadata_ORIGINAL_MEDIA_TYPE_UNSPECIFIED RecognitionMetadata_OriginalMediaType = 0
240	// The speech data is an audio recording.
241	RecognitionMetadata_AUDIO RecognitionMetadata_OriginalMediaType = 1
242	// The speech data originally recorded on a video.
243	RecognitionMetadata_VIDEO RecognitionMetadata_OriginalMediaType = 2
244)
245
246var RecognitionMetadata_OriginalMediaType_name = map[int32]string{
247	0: "ORIGINAL_MEDIA_TYPE_UNSPECIFIED",
248	1: "AUDIO",
249	2: "VIDEO",
250}
251
252var RecognitionMetadata_OriginalMediaType_value = map[string]int32{
253	"ORIGINAL_MEDIA_TYPE_UNSPECIFIED": 0,
254	"AUDIO":                           1,
255	"VIDEO":                           2,
256}
257
258func (x RecognitionMetadata_OriginalMediaType) String() string {
259	return proto.EnumName(RecognitionMetadata_OriginalMediaType_name, int32(x))
260}
261
262func (RecognitionMetadata_OriginalMediaType) EnumDescriptor() ([]byte, []int) {
263	return fileDescriptor_6adcab595cc29495, []int{6, 2}
264}
265
266// The type of device the speech was recorded with.
267type RecognitionMetadata_RecordingDeviceType int32
268
269const (
270	// The recording device is unknown.
271	RecognitionMetadata_RECORDING_DEVICE_TYPE_UNSPECIFIED RecognitionMetadata_RecordingDeviceType = 0
272	// Speech was recorded on a smartphone.
273	RecognitionMetadata_SMARTPHONE RecognitionMetadata_RecordingDeviceType = 1
274	// Speech was recorded using a personal computer or tablet.
275	RecognitionMetadata_PC RecognitionMetadata_RecordingDeviceType = 2
276	// Speech was recorded over a phone line.
277	RecognitionMetadata_PHONE_LINE RecognitionMetadata_RecordingDeviceType = 3
278	// Speech was recorded in a vehicle.
279	RecognitionMetadata_VEHICLE RecognitionMetadata_RecordingDeviceType = 4
280	// Speech was recorded outdoors.
281	RecognitionMetadata_OTHER_OUTDOOR_DEVICE RecognitionMetadata_RecordingDeviceType = 5
282	// Speech was recorded indoors.
283	RecognitionMetadata_OTHER_INDOOR_DEVICE RecognitionMetadata_RecordingDeviceType = 6
284)
285
286var RecognitionMetadata_RecordingDeviceType_name = map[int32]string{
287	0: "RECORDING_DEVICE_TYPE_UNSPECIFIED",
288	1: "SMARTPHONE",
289	2: "PC",
290	3: "PHONE_LINE",
291	4: "VEHICLE",
292	5: "OTHER_OUTDOOR_DEVICE",
293	6: "OTHER_INDOOR_DEVICE",
294}
295
296var RecognitionMetadata_RecordingDeviceType_value = map[string]int32{
297	"RECORDING_DEVICE_TYPE_UNSPECIFIED": 0,
298	"SMARTPHONE":                        1,
299	"PC":                                2,
300	"PHONE_LINE":                        3,
301	"VEHICLE":                           4,
302	"OTHER_OUTDOOR_DEVICE":              5,
303	"OTHER_INDOOR_DEVICE":               6,
304}
305
306func (x RecognitionMetadata_RecordingDeviceType) String() string {
307	return proto.EnumName(RecognitionMetadata_RecordingDeviceType_name, int32(x))
308}
309
310func (RecognitionMetadata_RecordingDeviceType) EnumDescriptor() ([]byte, []int) {
311	return fileDescriptor_6adcab595cc29495, []int{6, 3}
312}
313
314// Indicates the type of speech event.
315type StreamingRecognizeResponse_SpeechEventType int32
316
317const (
318	// No speech event specified.
319	StreamingRecognizeResponse_SPEECH_EVENT_UNSPECIFIED StreamingRecognizeResponse_SpeechEventType = 0
320	// This event indicates that the server has detected the end of the user's
321	// speech utterance and expects no additional speech. Therefore, the server
322	// will not process additional audio (although it may subsequently return
323	// additional results). The client should stop sending additional audio
324	// data, half-close the gRPC connection, and wait for any additional results
325	// until the server closes the gRPC connection. This event is only sent if
326	// `single_utterance` was set to `true`, and is not used otherwise.
327	StreamingRecognizeResponse_END_OF_SINGLE_UTTERANCE StreamingRecognizeResponse_SpeechEventType = 1
328)
329
330var StreamingRecognizeResponse_SpeechEventType_name = map[int32]string{
331	0: "SPEECH_EVENT_UNSPECIFIED",
332	1: "END_OF_SINGLE_UTTERANCE",
333}
334
335var StreamingRecognizeResponse_SpeechEventType_value = map[string]int32{
336	"SPEECH_EVENT_UNSPECIFIED": 0,
337	"END_OF_SINGLE_UTTERANCE":  1,
338}
339
340func (x StreamingRecognizeResponse_SpeechEventType) String() string {
341	return proto.EnumName(StreamingRecognizeResponse_SpeechEventType_name, int32(x))
342}
343
344func (StreamingRecognizeResponse_SpeechEventType) EnumDescriptor() ([]byte, []int) {
345	return fileDescriptor_6adcab595cc29495, []int{12, 0}
346}
347
348// The top-level message sent by the client for the `Recognize` method.
349type RecognizeRequest struct {
350	// Required. Provides information to the recognizer that specifies how to
351	// process the request.
352	Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
353	// Required. The audio data to be recognized.
354	Audio                *RecognitionAudio `protobuf:"bytes,2,opt,name=audio,proto3" json:"audio,omitempty"`
355	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
356	XXX_unrecognized     []byte            `json:"-"`
357	XXX_sizecache        int32             `json:"-"`
358}
359
360func (m *RecognizeRequest) Reset()         { *m = RecognizeRequest{} }
361func (m *RecognizeRequest) String() string { return proto.CompactTextString(m) }
362func (*RecognizeRequest) ProtoMessage()    {}
363func (*RecognizeRequest) Descriptor() ([]byte, []int) {
364	return fileDescriptor_6adcab595cc29495, []int{0}
365}
366
367func (m *RecognizeRequest) XXX_Unmarshal(b []byte) error {
368	return xxx_messageInfo_RecognizeRequest.Unmarshal(m, b)
369}
370func (m *RecognizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
371	return xxx_messageInfo_RecognizeRequest.Marshal(b, m, deterministic)
372}
373func (m *RecognizeRequest) XXX_Merge(src proto.Message) {
374	xxx_messageInfo_RecognizeRequest.Merge(m, src)
375}
376func (m *RecognizeRequest) XXX_Size() int {
377	return xxx_messageInfo_RecognizeRequest.Size(m)
378}
379func (m *RecognizeRequest) XXX_DiscardUnknown() {
380	xxx_messageInfo_RecognizeRequest.DiscardUnknown(m)
381}
382
383var xxx_messageInfo_RecognizeRequest proto.InternalMessageInfo
384
385func (m *RecognizeRequest) GetConfig() *RecognitionConfig {
386	if m != nil {
387		return m.Config
388	}
389	return nil
390}
391
392func (m *RecognizeRequest) GetAudio() *RecognitionAudio {
393	if m != nil {
394		return m.Audio
395	}
396	return nil
397}
398
399// The top-level message sent by the client for the `LongRunningRecognize`
400// method.
401type LongRunningRecognizeRequest struct {
402	// Required. Provides information to the recognizer that specifies how to
403	// process the request.
404	Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
405	// Required. The audio data to be recognized.
406	Audio                *RecognitionAudio `protobuf:"bytes,2,opt,name=audio,proto3" json:"audio,omitempty"`
407	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
408	XXX_unrecognized     []byte            `json:"-"`
409	XXX_sizecache        int32             `json:"-"`
410}
411
412func (m *LongRunningRecognizeRequest) Reset()         { *m = LongRunningRecognizeRequest{} }
413func (m *LongRunningRecognizeRequest) String() string { return proto.CompactTextString(m) }
414func (*LongRunningRecognizeRequest) ProtoMessage()    {}
415func (*LongRunningRecognizeRequest) Descriptor() ([]byte, []int) {
416	return fileDescriptor_6adcab595cc29495, []int{1}
417}
418
419func (m *LongRunningRecognizeRequest) XXX_Unmarshal(b []byte) error {
420	return xxx_messageInfo_LongRunningRecognizeRequest.Unmarshal(m, b)
421}
422func (m *LongRunningRecognizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
423	return xxx_messageInfo_LongRunningRecognizeRequest.Marshal(b, m, deterministic)
424}
425func (m *LongRunningRecognizeRequest) XXX_Merge(src proto.Message) {
426	xxx_messageInfo_LongRunningRecognizeRequest.Merge(m, src)
427}
428func (m *LongRunningRecognizeRequest) XXX_Size() int {
429	return xxx_messageInfo_LongRunningRecognizeRequest.Size(m)
430}
431func (m *LongRunningRecognizeRequest) XXX_DiscardUnknown() {
432	xxx_messageInfo_LongRunningRecognizeRequest.DiscardUnknown(m)
433}
434
435var xxx_messageInfo_LongRunningRecognizeRequest proto.InternalMessageInfo
436
437func (m *LongRunningRecognizeRequest) GetConfig() *RecognitionConfig {
438	if m != nil {
439		return m.Config
440	}
441	return nil
442}
443
444func (m *LongRunningRecognizeRequest) GetAudio() *RecognitionAudio {
445	if m != nil {
446		return m.Audio
447	}
448	return nil
449}
450
451// The top-level message sent by the client for the `StreamingRecognize` method.
452// Multiple `StreamingRecognizeRequest` messages are sent. The first message
453// must contain a `streaming_config` message and must not contain
454// `audio_content`. All subsequent messages must contain `audio_content` and
455// must not contain a `streaming_config` message.
456type StreamingRecognizeRequest struct {
457	// The streaming request, which is either a streaming config or audio content.
458	//
459	// Types that are valid to be assigned to StreamingRequest:
460	//	*StreamingRecognizeRequest_StreamingConfig
461	//	*StreamingRecognizeRequest_AudioContent
462	StreamingRequest     isStreamingRecognizeRequest_StreamingRequest `protobuf_oneof:"streaming_request"`
463	XXX_NoUnkeyedLiteral struct{}                                     `json:"-"`
464	XXX_unrecognized     []byte                                       `json:"-"`
465	XXX_sizecache        int32                                        `json:"-"`
466}
467
468func (m *StreamingRecognizeRequest) Reset()         { *m = StreamingRecognizeRequest{} }
469func (m *StreamingRecognizeRequest) String() string { return proto.CompactTextString(m) }
470func (*StreamingRecognizeRequest) ProtoMessage()    {}
471func (*StreamingRecognizeRequest) Descriptor() ([]byte, []int) {
472	return fileDescriptor_6adcab595cc29495, []int{2}
473}
474
475func (m *StreamingRecognizeRequest) XXX_Unmarshal(b []byte) error {
476	return xxx_messageInfo_StreamingRecognizeRequest.Unmarshal(m, b)
477}
478func (m *StreamingRecognizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
479	return xxx_messageInfo_StreamingRecognizeRequest.Marshal(b, m, deterministic)
480}
481func (m *StreamingRecognizeRequest) XXX_Merge(src proto.Message) {
482	xxx_messageInfo_StreamingRecognizeRequest.Merge(m, src)
483}
484func (m *StreamingRecognizeRequest) XXX_Size() int {
485	return xxx_messageInfo_StreamingRecognizeRequest.Size(m)
486}
487func (m *StreamingRecognizeRequest) XXX_DiscardUnknown() {
488	xxx_messageInfo_StreamingRecognizeRequest.DiscardUnknown(m)
489}
490
491var xxx_messageInfo_StreamingRecognizeRequest proto.InternalMessageInfo
492
493type isStreamingRecognizeRequest_StreamingRequest interface {
494	isStreamingRecognizeRequest_StreamingRequest()
495}
496
497type StreamingRecognizeRequest_StreamingConfig struct {
498	StreamingConfig *StreamingRecognitionConfig `protobuf:"bytes,1,opt,name=streaming_config,json=streamingConfig,proto3,oneof"`
499}
500
501type StreamingRecognizeRequest_AudioContent struct {
502	AudioContent []byte `protobuf:"bytes,2,opt,name=audio_content,json=audioContent,proto3,oneof"`
503}
504
505func (*StreamingRecognizeRequest_StreamingConfig) isStreamingRecognizeRequest_StreamingRequest() {}
506
507func (*StreamingRecognizeRequest_AudioContent) isStreamingRecognizeRequest_StreamingRequest() {}
508
509func (m *StreamingRecognizeRequest) GetStreamingRequest() isStreamingRecognizeRequest_StreamingRequest {
510	if m != nil {
511		return m.StreamingRequest
512	}
513	return nil
514}
515
516func (m *StreamingRecognizeRequest) GetStreamingConfig() *StreamingRecognitionConfig {
517	if x, ok := m.GetStreamingRequest().(*StreamingRecognizeRequest_StreamingConfig); ok {
518		return x.StreamingConfig
519	}
520	return nil
521}
522
523func (m *StreamingRecognizeRequest) GetAudioContent() []byte {
524	if x, ok := m.GetStreamingRequest().(*StreamingRecognizeRequest_AudioContent); ok {
525		return x.AudioContent
526	}
527	return nil
528}
529
530// XXX_OneofWrappers is for the internal use of the proto package.
531func (*StreamingRecognizeRequest) XXX_OneofWrappers() []interface{} {
532	return []interface{}{
533		(*StreamingRecognizeRequest_StreamingConfig)(nil),
534		(*StreamingRecognizeRequest_AudioContent)(nil),
535	}
536}
537
538// Provides information to the recognizer that specifies how to process the
539// request.
540type StreamingRecognitionConfig struct {
541	// Required. Provides information to the recognizer that specifies how to
542	// process the request.
543	Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
544	// If `false` or omitted, the recognizer will perform continuous
545	// recognition (continuing to wait for and process audio even if the user
546	// pauses speaking) until the client closes the input stream (gRPC API) or
547	// until the maximum time limit has been reached. May return multiple
548	// `StreamingRecognitionResult`s with the `is_final` flag set to `true`.
549	//
550	// If `true`, the recognizer will detect a single spoken utterance. When it
551	// detects that the user has paused or stopped speaking, it will return an
552	// `END_OF_SINGLE_UTTERANCE` event and cease recognition. It will return no
553	// more than one `StreamingRecognitionResult` with the `is_final` flag set to
554	// `true`.
555	SingleUtterance bool `protobuf:"varint,2,opt,name=single_utterance,json=singleUtterance,proto3" json:"single_utterance,omitempty"`
556	// If `true`, interim results (tentative hypotheses) may be
557	// returned as they become available (these interim results are indicated with
558	// the `is_final=false` flag).
559	// If `false` or omitted, only `is_final=true` result(s) are returned.
560	InterimResults       bool     `protobuf:"varint,3,opt,name=interim_results,json=interimResults,proto3" json:"interim_results,omitempty"`
561	XXX_NoUnkeyedLiteral struct{} `json:"-"`
562	XXX_unrecognized     []byte   `json:"-"`
563	XXX_sizecache        int32    `json:"-"`
564}
565
566func (m *StreamingRecognitionConfig) Reset()         { *m = StreamingRecognitionConfig{} }
567func (m *StreamingRecognitionConfig) String() string { return proto.CompactTextString(m) }
568func (*StreamingRecognitionConfig) ProtoMessage()    {}
569func (*StreamingRecognitionConfig) Descriptor() ([]byte, []int) {
570	return fileDescriptor_6adcab595cc29495, []int{3}
571}
572
573func (m *StreamingRecognitionConfig) XXX_Unmarshal(b []byte) error {
574	return xxx_messageInfo_StreamingRecognitionConfig.Unmarshal(m, b)
575}
576func (m *StreamingRecognitionConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
577	return xxx_messageInfo_StreamingRecognitionConfig.Marshal(b, m, deterministic)
578}
579func (m *StreamingRecognitionConfig) XXX_Merge(src proto.Message) {
580	xxx_messageInfo_StreamingRecognitionConfig.Merge(m, src)
581}
582func (m *StreamingRecognitionConfig) XXX_Size() int {
583	return xxx_messageInfo_StreamingRecognitionConfig.Size(m)
584}
585func (m *StreamingRecognitionConfig) XXX_DiscardUnknown() {
586	xxx_messageInfo_StreamingRecognitionConfig.DiscardUnknown(m)
587}
588
589var xxx_messageInfo_StreamingRecognitionConfig proto.InternalMessageInfo
590
591func (m *StreamingRecognitionConfig) GetConfig() *RecognitionConfig {
592	if m != nil {
593		return m.Config
594	}
595	return nil
596}
597
598func (m *StreamingRecognitionConfig) GetSingleUtterance() bool {
599	if m != nil {
600		return m.SingleUtterance
601	}
602	return false
603}
604
605func (m *StreamingRecognitionConfig) GetInterimResults() bool {
606	if m != nil {
607		return m.InterimResults
608	}
609	return false
610}
611
612// Provides information to the recognizer that specifies how to process the
613// request.
614type RecognitionConfig struct {
615	// Encoding of audio data sent in all `RecognitionAudio` messages.
616	// This field is optional for `FLAC` and `WAV` audio files and required
617	// for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
618	Encoding RecognitionConfig_AudioEncoding `protobuf:"varint,1,opt,name=encoding,proto3,enum=google.cloud.speech.v1p1beta1.RecognitionConfig_AudioEncoding" json:"encoding,omitempty"`
619	// Sample rate in Hertz of the audio data sent in all
620	// `RecognitionAudio` messages. Valid values are: 8000-48000.
621	// 16000 is optimal. For best results, set the sampling rate of the audio
622	// source to 16000 Hz. If that's not possible, use the native sample rate of
623	// the audio source (instead of re-sampling).
624	// This field is optional for FLAC and WAV audio files, but is
625	// required for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
626	SampleRateHertz int32 `protobuf:"varint,2,opt,name=sample_rate_hertz,json=sampleRateHertz,proto3" json:"sample_rate_hertz,omitempty"`
627	// The number of channels in the input audio data.
628	// ONLY set this for MULTI-CHANNEL recognition.
629	// Valid values for LINEAR16 and FLAC are `1`-`8`.
630	// Valid values for OGG_OPUS are '1'-'254'.
631	// Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
632	// If `0` or omitted, defaults to one channel (mono).
633	// Note: We only recognize the first channel by default.
634	// To perform independent recognition on each channel set
635	// `enable_separate_recognition_per_channel` to 'true'.
636	AudioChannelCount int32 `protobuf:"varint,7,opt,name=audio_channel_count,json=audioChannelCount,proto3" json:"audio_channel_count,omitempty"`
637	// This needs to be set to `true` explicitly and `audio_channel_count` > 1
638	// to get each channel recognized separately. The recognition result will
639	// contain a `channel_tag` field to state which channel that result belongs
640	// to. If this is not true, we will only recognize the first channel. The
641	// request is billed cumulatively for all channels recognized:
642	// `audio_channel_count` multiplied by the length of the audio.
643	EnableSeparateRecognitionPerChannel bool `protobuf:"varint,12,opt,name=enable_separate_recognition_per_channel,json=enableSeparateRecognitionPerChannel,proto3" json:"enable_separate_recognition_per_channel,omitempty"`
644	// Required. The language of the supplied audio as a
645	// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
646	// Example: "en-US".
647	// See [Language
648	// Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
649	// of the currently supported language codes.
650	LanguageCode string `protobuf:"bytes,3,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
651	// A list of up to 3 additional
652	// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
653	// listing possible alternative languages of the supplied audio.
654	// See [Language
655	// Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
656	// of the currently supported language codes. If alternative languages are
657	// listed, recognition result will contain recognition in the most likely
658	// language detected including the main language_code. The recognition result
659	// will include the language tag of the language detected in the audio. Note:
660	// This feature is only supported for Voice Command and Voice Search use cases
661	// and performance may vary for other use cases (e.g., phone call
662	// transcription).
663	AlternativeLanguageCodes []string `protobuf:"bytes,18,rep,name=alternative_language_codes,json=alternativeLanguageCodes,proto3" json:"alternative_language_codes,omitempty"`
664	// Maximum number of recognition hypotheses to be returned.
665	// Specifically, the maximum number of `SpeechRecognitionAlternative` messages
666	// within each `SpeechRecognitionResult`.
667	// The server may return fewer than `max_alternatives`.
668	// Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
669	// one. If omitted, will return a maximum of one.
670	MaxAlternatives int32 `protobuf:"varint,4,opt,name=max_alternatives,json=maxAlternatives,proto3" json:"max_alternatives,omitempty"`
671	// If set to `true`, the server will attempt to filter out
672	// profanities, replacing all but the initial character in each filtered word
673	// with asterisks, e.g. "f***". If set to `false` or omitted, profanities
674	// won't be filtered out.
675	ProfanityFilter bool `protobuf:"varint,5,opt,name=profanity_filter,json=profanityFilter,proto3" json:"profanity_filter,omitempty"`
676	// Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
677	// A means to provide context to assist the speech recognition. For more
678	// information, see
679	// [speech
680	// adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
681	SpeechContexts []*SpeechContext `protobuf:"bytes,6,rep,name=speech_contexts,json=speechContexts,proto3" json:"speech_contexts,omitempty"`
682	// If `true`, the top result includes a list of words and
683	// the start and end time offsets (timestamps) for those words. If
684	// `false`, no word-level time offset information is returned. The default is
685	// `false`.
686	EnableWordTimeOffsets bool `protobuf:"varint,8,opt,name=enable_word_time_offsets,json=enableWordTimeOffsets,proto3" json:"enable_word_time_offsets,omitempty"`
687	// If `true`, the top result includes a list of words and the
688	// confidence for those words. If `false`, no word-level confidence
689	// information is returned. The default is `false`.
690	EnableWordConfidence bool `protobuf:"varint,15,opt,name=enable_word_confidence,json=enableWordConfidence,proto3" json:"enable_word_confidence,omitempty"`
691	// If 'true', adds punctuation to recognition result hypotheses.
692	// This feature is only available in select languages. Setting this for
693	// requests in other languages has no effect at all.
694	// The default 'false' value does not add punctuation to result hypotheses.
695	// Note: This is currently offered as an experimental service, complimentary
696	// to all users. In the future this may be exclusively available as a
697	// premium feature.
698	EnableAutomaticPunctuation bool `protobuf:"varint,11,opt,name=enable_automatic_punctuation,json=enableAutomaticPunctuation,proto3" json:"enable_automatic_punctuation,omitempty"`
699	// If 'true', enables speaker detection for each recognized word in
700	// the top alternative of the recognition result using a speaker_tag provided
701	// in the WordInfo.
702	// Note: Use diarization_config instead.
703	EnableSpeakerDiarization bool `protobuf:"varint,16,opt,name=enable_speaker_diarization,json=enableSpeakerDiarization,proto3" json:"enable_speaker_diarization,omitempty"` // Deprecated: Do not use.
704	// If set, specifies the estimated number of speakers in the conversation.
705	// Defaults to '2'. Ignored unless enable_speaker_diarization is set to true.
706	// Note: Use diarization_config instead.
707	DiarizationSpeakerCount int32 `protobuf:"varint,17,opt,name=diarization_speaker_count,json=diarizationSpeakerCount,proto3" json:"diarization_speaker_count,omitempty"` // Deprecated: Do not use.
708	// Config to enable speaker diarization and set additional
709	// parameters to make diarization better suited for your application.
710	// Note: When this is enabled, we send all the words from the beginning of the
711	// audio for the top alternative in every consecutive STREAMING responses.
712	// This is done in order to improve our speaker tags as our models learn to
713	// identify the speakers in the conversation over time.
714	// For non-streaming requests, the diarization results will be provided only
715	// in the top alternative of the FINAL SpeechRecognitionResult.
716	DiarizationConfig *SpeakerDiarizationConfig `protobuf:"bytes,19,opt,name=diarization_config,json=diarizationConfig,proto3" json:"diarization_config,omitempty"`
717	// Metadata regarding this request.
718	Metadata *RecognitionMetadata `protobuf:"bytes,9,opt,name=metadata,proto3" json:"metadata,omitempty"`
719	// Which model to select for the given request. Select the model
720	// best suited to your domain to get best results. If a model is not
721	// explicitly specified, then we auto-select a model based on the parameters
722	// in the RecognitionConfig.
723	// <table>
724	//   <tr>
725	//     <td><b>Model</b></td>
726	//     <td><b>Description</b></td>
727	//   </tr>
728	//   <tr>
729	//     <td><code>command_and_search</code></td>
730	//     <td>Best for short queries such as voice commands or voice search.</td>
731	//   </tr>
732	//   <tr>
733	//     <td><code>phone_call</code></td>
734	//     <td>Best for audio that originated from a phone call (typically
735	//     recorded at an 8khz sampling rate).</td>
736	//   </tr>
737	//   <tr>
738	//     <td><code>video</code></td>
739	//     <td>Best for audio that originated from from video or includes multiple
740	//         speakers. Ideally the audio is recorded at a 16khz or greater
741	//         sampling rate. This is a premium model that costs more than the
742	//         standard rate.</td>
743	//   </tr>
744	//   <tr>
745	//     <td><code>default</code></td>
746	//     <td>Best for audio that is not one of the specific audio models.
747	//         For example, long-form audio. Ideally the audio is high-fidelity,
748	//         recorded at a 16khz or greater sampling rate.</td>
749	//   </tr>
750	// </table>
751	Model string `protobuf:"bytes,13,opt,name=model,proto3" json:"model,omitempty"`
752	// Set to true to use an enhanced model for speech recognition.
753	// If `use_enhanced` is set to true and the `model` field is not set, then
754	// an appropriate enhanced model is chosen if an enhanced model exists for
755	// the audio.
756	//
757	// If `use_enhanced` is true and an enhanced version of the specified model
758	// does not exist, then the speech is recognized using the standard version
759	// of the specified model.
760	UseEnhanced          bool     `protobuf:"varint,14,opt,name=use_enhanced,json=useEnhanced,proto3" json:"use_enhanced,omitempty"`
761	XXX_NoUnkeyedLiteral struct{} `json:"-"`
762	XXX_unrecognized     []byte   `json:"-"`
763	XXX_sizecache        int32    `json:"-"`
764}
765
766func (m *RecognitionConfig) Reset()         { *m = RecognitionConfig{} }
767func (m *RecognitionConfig) String() string { return proto.CompactTextString(m) }
768func (*RecognitionConfig) ProtoMessage()    {}
769func (*RecognitionConfig) Descriptor() ([]byte, []int) {
770	return fileDescriptor_6adcab595cc29495, []int{4}
771}
772
773func (m *RecognitionConfig) XXX_Unmarshal(b []byte) error {
774	return xxx_messageInfo_RecognitionConfig.Unmarshal(m, b)
775}
776func (m *RecognitionConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
777	return xxx_messageInfo_RecognitionConfig.Marshal(b, m, deterministic)
778}
779func (m *RecognitionConfig) XXX_Merge(src proto.Message) {
780	xxx_messageInfo_RecognitionConfig.Merge(m, src)
781}
782func (m *RecognitionConfig) XXX_Size() int {
783	return xxx_messageInfo_RecognitionConfig.Size(m)
784}
785func (m *RecognitionConfig) XXX_DiscardUnknown() {
786	xxx_messageInfo_RecognitionConfig.DiscardUnknown(m)
787}
788
789var xxx_messageInfo_RecognitionConfig proto.InternalMessageInfo
790
791func (m *RecognitionConfig) GetEncoding() RecognitionConfig_AudioEncoding {
792	if m != nil {
793		return m.Encoding
794	}
795	return RecognitionConfig_ENCODING_UNSPECIFIED
796}
797
798func (m *RecognitionConfig) GetSampleRateHertz() int32 {
799	if m != nil {
800		return m.SampleRateHertz
801	}
802	return 0
803}
804
805func (m *RecognitionConfig) GetAudioChannelCount() int32 {
806	if m != nil {
807		return m.AudioChannelCount
808	}
809	return 0
810}
811
812func (m *RecognitionConfig) GetEnableSeparateRecognitionPerChannel() bool {
813	if m != nil {
814		return m.EnableSeparateRecognitionPerChannel
815	}
816	return false
817}
818
819func (m *RecognitionConfig) GetLanguageCode() string {
820	if m != nil {
821		return m.LanguageCode
822	}
823	return ""
824}
825
826func (m *RecognitionConfig) GetAlternativeLanguageCodes() []string {
827	if m != nil {
828		return m.AlternativeLanguageCodes
829	}
830	return nil
831}
832
833func (m *RecognitionConfig) GetMaxAlternatives() int32 {
834	if m != nil {
835		return m.MaxAlternatives
836	}
837	return 0
838}
839
840func (m *RecognitionConfig) GetProfanityFilter() bool {
841	if m != nil {
842		return m.ProfanityFilter
843	}
844	return false
845}
846
847func (m *RecognitionConfig) GetSpeechContexts() []*SpeechContext {
848	if m != nil {
849		return m.SpeechContexts
850	}
851	return nil
852}
853
854func (m *RecognitionConfig) GetEnableWordTimeOffsets() bool {
855	if m != nil {
856		return m.EnableWordTimeOffsets
857	}
858	return false
859}
860
861func (m *RecognitionConfig) GetEnableWordConfidence() bool {
862	if m != nil {
863		return m.EnableWordConfidence
864	}
865	return false
866}
867
868func (m *RecognitionConfig) GetEnableAutomaticPunctuation() bool {
869	if m != nil {
870		return m.EnableAutomaticPunctuation
871	}
872	return false
873}
874
875// Deprecated: Do not use.
876func (m *RecognitionConfig) GetEnableSpeakerDiarization() bool {
877	if m != nil {
878		return m.EnableSpeakerDiarization
879	}
880	return false
881}
882
883// Deprecated: Do not use.
884func (m *RecognitionConfig) GetDiarizationSpeakerCount() int32 {
885	if m != nil {
886		return m.DiarizationSpeakerCount
887	}
888	return 0
889}
890
891func (m *RecognitionConfig) GetDiarizationConfig() *SpeakerDiarizationConfig {
892	if m != nil {
893		return m.DiarizationConfig
894	}
895	return nil
896}
897
898func (m *RecognitionConfig) GetMetadata() *RecognitionMetadata {
899	if m != nil {
900		return m.Metadata
901	}
902	return nil
903}
904
905func (m *RecognitionConfig) GetModel() string {
906	if m != nil {
907		return m.Model
908	}
909	return ""
910}
911
912func (m *RecognitionConfig) GetUseEnhanced() bool {
913	if m != nil {
914		return m.UseEnhanced
915	}
916	return false
917}
918
919// Config to enable speaker diarization.
920type SpeakerDiarizationConfig struct {
921	// If 'true', enables speaker detection for each recognized word in
922	// the top alternative of the recognition result using a speaker_tag provided
923	// in the WordInfo.
924	EnableSpeakerDiarization bool `protobuf:"varint,1,opt,name=enable_speaker_diarization,json=enableSpeakerDiarization,proto3" json:"enable_speaker_diarization,omitempty"`
925	// Minimum number of speakers in the conversation. This range gives you more
926	// flexibility by allowing the system to automatically determine the correct
927	// number of speakers. If not set, the default value is 2.
928	MinSpeakerCount int32 `protobuf:"varint,2,opt,name=min_speaker_count,json=minSpeakerCount,proto3" json:"min_speaker_count,omitempty"`
929	// Maximum number of speakers in the conversation. This range gives you more
930	// flexibility by allowing the system to automatically determine the correct
931	// number of speakers. If not set, the default value is 6.
932	MaxSpeakerCount      int32    `protobuf:"varint,3,opt,name=max_speaker_count,json=maxSpeakerCount,proto3" json:"max_speaker_count,omitempty"`
933	XXX_NoUnkeyedLiteral struct{} `json:"-"`
934	XXX_unrecognized     []byte   `json:"-"`
935	XXX_sizecache        int32    `json:"-"`
936}
937
938func (m *SpeakerDiarizationConfig) Reset()         { *m = SpeakerDiarizationConfig{} }
939func (m *SpeakerDiarizationConfig) String() string { return proto.CompactTextString(m) }
940func (*SpeakerDiarizationConfig) ProtoMessage()    {}
941func (*SpeakerDiarizationConfig) Descriptor() ([]byte, []int) {
942	return fileDescriptor_6adcab595cc29495, []int{5}
943}
944
945func (m *SpeakerDiarizationConfig) XXX_Unmarshal(b []byte) error {
946	return xxx_messageInfo_SpeakerDiarizationConfig.Unmarshal(m, b)
947}
948func (m *SpeakerDiarizationConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
949	return xxx_messageInfo_SpeakerDiarizationConfig.Marshal(b, m, deterministic)
950}
951func (m *SpeakerDiarizationConfig) XXX_Merge(src proto.Message) {
952	xxx_messageInfo_SpeakerDiarizationConfig.Merge(m, src)
953}
954func (m *SpeakerDiarizationConfig) XXX_Size() int {
955	return xxx_messageInfo_SpeakerDiarizationConfig.Size(m)
956}
957func (m *SpeakerDiarizationConfig) XXX_DiscardUnknown() {
958	xxx_messageInfo_SpeakerDiarizationConfig.DiscardUnknown(m)
959}
960
961var xxx_messageInfo_SpeakerDiarizationConfig proto.InternalMessageInfo
962
963func (m *SpeakerDiarizationConfig) GetEnableSpeakerDiarization() bool {
964	if m != nil {
965		return m.EnableSpeakerDiarization
966	}
967	return false
968}
969
970func (m *SpeakerDiarizationConfig) GetMinSpeakerCount() int32 {
971	if m != nil {
972		return m.MinSpeakerCount
973	}
974	return 0
975}
976
977func (m *SpeakerDiarizationConfig) GetMaxSpeakerCount() int32 {
978	if m != nil {
979		return m.MaxSpeakerCount
980	}
981	return 0
982}
983
984// Description of audio data to be recognized.
985type RecognitionMetadata struct {
986	// The use case most closely describing the audio content to be recognized.
987	InteractionType RecognitionMetadata_InteractionType `protobuf:"varint,1,opt,name=interaction_type,json=interactionType,proto3,enum=google.cloud.speech.v1p1beta1.RecognitionMetadata_InteractionType" json:"interaction_type,omitempty"`
988	// The industry vertical to which this speech recognition request most
989	// closely applies. This is most indicative of the topics contained
990	// in the audio.  Use the 6-digit NAICS code to identify the industry
991	// vertical - see https://www.naics.com/search/.
992	IndustryNaicsCodeOfAudio uint32 `protobuf:"varint,3,opt,name=industry_naics_code_of_audio,json=industryNaicsCodeOfAudio,proto3" json:"industry_naics_code_of_audio,omitempty"`
993	// The audio type that most closely describes the audio being recognized.
994	MicrophoneDistance RecognitionMetadata_MicrophoneDistance `protobuf:"varint,4,opt,name=microphone_distance,json=microphoneDistance,proto3,enum=google.cloud.speech.v1p1beta1.RecognitionMetadata_MicrophoneDistance" json:"microphone_distance,omitempty"`
995	// The original media the speech was recorded on.
996	OriginalMediaType RecognitionMetadata_OriginalMediaType `protobuf:"varint,5,opt,name=original_media_type,json=originalMediaType,proto3,enum=google.cloud.speech.v1p1beta1.RecognitionMetadata_OriginalMediaType" json:"original_media_type,omitempty"`
997	// The type of device the speech was recorded with.
998	RecordingDeviceType RecognitionMetadata_RecordingDeviceType `protobuf:"varint,6,opt,name=recording_device_type,json=recordingDeviceType,proto3,enum=google.cloud.speech.v1p1beta1.RecognitionMetadata_RecordingDeviceType" json:"recording_device_type,omitempty"`
999	// The device used to make the recording.  Examples 'Nexus 5X' or
1000	// 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or
1001	// 'Cardioid Microphone'.
1002	RecordingDeviceName string `protobuf:"bytes,7,opt,name=recording_device_name,json=recordingDeviceName,proto3" json:"recording_device_name,omitempty"`
1003	// Mime type of the original audio file.  For example `audio/m4a`,
1004	// `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`.
1005	// A list of possible audio mime types is maintained at
1006	// http://www.iana.org/assignments/media-types/media-types.xhtml#audio
1007	OriginalMimeType string `protobuf:"bytes,8,opt,name=original_mime_type,json=originalMimeType,proto3" json:"original_mime_type,omitempty"`
1008	// Obfuscated (privacy-protected) ID of the user, to identify number of
1009	// unique users using the service.
1010	ObfuscatedId int64 `protobuf:"varint,9,opt,name=obfuscated_id,json=obfuscatedId,proto3" json:"obfuscated_id,omitempty"` // Deprecated: Do not use.
1011	// Description of the content. Eg. "Recordings of federal supreme court
1012	// hearings from 2012".
1013	AudioTopic           string   `protobuf:"bytes,10,opt,name=audio_topic,json=audioTopic,proto3" json:"audio_topic,omitempty"`
1014	XXX_NoUnkeyedLiteral struct{} `json:"-"`
1015	XXX_unrecognized     []byte   `json:"-"`
1016	XXX_sizecache        int32    `json:"-"`
1017}
1018
1019func (m *RecognitionMetadata) Reset()         { *m = RecognitionMetadata{} }
1020func (m *RecognitionMetadata) String() string { return proto.CompactTextString(m) }
1021func (*RecognitionMetadata) ProtoMessage()    {}
1022func (*RecognitionMetadata) Descriptor() ([]byte, []int) {
1023	return fileDescriptor_6adcab595cc29495, []int{6}
1024}
1025
1026func (m *RecognitionMetadata) XXX_Unmarshal(b []byte) error {
1027	return xxx_messageInfo_RecognitionMetadata.Unmarshal(m, b)
1028}
1029func (m *RecognitionMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1030	return xxx_messageInfo_RecognitionMetadata.Marshal(b, m, deterministic)
1031}
1032func (m *RecognitionMetadata) XXX_Merge(src proto.Message) {
1033	xxx_messageInfo_RecognitionMetadata.Merge(m, src)
1034}
1035func (m *RecognitionMetadata) XXX_Size() int {
1036	return xxx_messageInfo_RecognitionMetadata.Size(m)
1037}
1038func (m *RecognitionMetadata) XXX_DiscardUnknown() {
1039	xxx_messageInfo_RecognitionMetadata.DiscardUnknown(m)
1040}
1041
1042var xxx_messageInfo_RecognitionMetadata proto.InternalMessageInfo
1043
1044func (m *RecognitionMetadata) GetInteractionType() RecognitionMetadata_InteractionType {
1045	if m != nil {
1046		return m.InteractionType
1047	}
1048	return RecognitionMetadata_INTERACTION_TYPE_UNSPECIFIED
1049}
1050
1051func (m *RecognitionMetadata) GetIndustryNaicsCodeOfAudio() uint32 {
1052	if m != nil {
1053		return m.IndustryNaicsCodeOfAudio
1054	}
1055	return 0
1056}
1057
1058func (m *RecognitionMetadata) GetMicrophoneDistance() RecognitionMetadata_MicrophoneDistance {
1059	if m != nil {
1060		return m.MicrophoneDistance
1061	}
1062	return RecognitionMetadata_MICROPHONE_DISTANCE_UNSPECIFIED
1063}
1064
1065func (m *RecognitionMetadata) GetOriginalMediaType() RecognitionMetadata_OriginalMediaType {
1066	if m != nil {
1067		return m.OriginalMediaType
1068	}
1069	return RecognitionMetadata_ORIGINAL_MEDIA_TYPE_UNSPECIFIED
1070}
1071
1072func (m *RecognitionMetadata) GetRecordingDeviceType() RecognitionMetadata_RecordingDeviceType {
1073	if m != nil {
1074		return m.RecordingDeviceType
1075	}
1076	return RecognitionMetadata_RECORDING_DEVICE_TYPE_UNSPECIFIED
1077}
1078
1079func (m *RecognitionMetadata) GetRecordingDeviceName() string {
1080	if m != nil {
1081		return m.RecordingDeviceName
1082	}
1083	return ""
1084}
1085
1086func (m *RecognitionMetadata) GetOriginalMimeType() string {
1087	if m != nil {
1088		return m.OriginalMimeType
1089	}
1090	return ""
1091}
1092
1093// Deprecated: Do not use.
1094func (m *RecognitionMetadata) GetObfuscatedId() int64 {
1095	if m != nil {
1096		return m.ObfuscatedId
1097	}
1098	return 0
1099}
1100
1101func (m *RecognitionMetadata) GetAudioTopic() string {
1102	if m != nil {
1103		return m.AudioTopic
1104	}
1105	return ""
1106}
1107
1108// Provides "hints" to the speech recognizer to favor specific words and phrases
1109// in the results.
1110type SpeechContext struct {
1111	// A list of strings containing words and phrases "hints" so that
1112	// the speech recognition is more likely to recognize them. This can be used
1113	// to improve the accuracy for specific words and phrases, for example, if
1114	// specific commands are typically spoken by the user. This can also be used
1115	// to add additional words to the vocabulary of the recognizer. See
1116	// [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
1117	//
1118	// List items can also be set to classes for groups of words that represent
1119	// common concepts that occur in natural language. For example, rather than
1120	// providing phrase hints for every month of the year, using the $MONTH class
1121	// improves the likelihood of correctly transcribing audio that includes
1122	// months.
1123	Phrases []string `protobuf:"bytes,1,rep,name=phrases,proto3" json:"phrases,omitempty"`
1124	// Hint Boost. Positive value will increase the probability that a specific
1125	// phrase will be recognized over other similar sounding phrases. The higher
1126	// the boost, the higher the chance of false positive recognition as well.
1127	// Negative boost values would correspond to anti-biasing. Anti-biasing is not
1128	// enabled, so negative boost will simply be ignored. Though `boost` can
1129	// accept a wide range of positive values, most use cases are best served with
1130	// values between 0 and 20. We recommend using a binary search approach to
1131	// finding the optimal value for your use case.
1132	Boost                float32  `protobuf:"fixed32,4,opt,name=boost,proto3" json:"boost,omitempty"`
1133	XXX_NoUnkeyedLiteral struct{} `json:"-"`
1134	XXX_unrecognized     []byte   `json:"-"`
1135	XXX_sizecache        int32    `json:"-"`
1136}
1137
1138func (m *SpeechContext) Reset()         { *m = SpeechContext{} }
1139func (m *SpeechContext) String() string { return proto.CompactTextString(m) }
1140func (*SpeechContext) ProtoMessage()    {}
1141func (*SpeechContext) Descriptor() ([]byte, []int) {
1142	return fileDescriptor_6adcab595cc29495, []int{7}
1143}
1144
1145func (m *SpeechContext) XXX_Unmarshal(b []byte) error {
1146	return xxx_messageInfo_SpeechContext.Unmarshal(m, b)
1147}
1148func (m *SpeechContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1149	return xxx_messageInfo_SpeechContext.Marshal(b, m, deterministic)
1150}
1151func (m *SpeechContext) XXX_Merge(src proto.Message) {
1152	xxx_messageInfo_SpeechContext.Merge(m, src)
1153}
1154func (m *SpeechContext) XXX_Size() int {
1155	return xxx_messageInfo_SpeechContext.Size(m)
1156}
1157func (m *SpeechContext) XXX_DiscardUnknown() {
1158	xxx_messageInfo_SpeechContext.DiscardUnknown(m)
1159}
1160
1161var xxx_messageInfo_SpeechContext proto.InternalMessageInfo
1162
1163func (m *SpeechContext) GetPhrases() []string {
1164	if m != nil {
1165		return m.Phrases
1166	}
1167	return nil
1168}
1169
1170func (m *SpeechContext) GetBoost() float32 {
1171	if m != nil {
1172		return m.Boost
1173	}
1174	return 0
1175}
1176
1177// Contains audio data in the encoding specified in the `RecognitionConfig`.
1178// Either `content` or `uri` must be supplied. Supplying both or neither
1179// returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. See
1180// [content limits](https://cloud.google.com/speech-to-text/quotas#content).
1181type RecognitionAudio struct {
1182	// The audio source, which is either inline content or a Google Cloud
1183	// Storage uri.
1184	//
1185	// Types that are valid to be assigned to AudioSource:
1186	//	*RecognitionAudio_Content
1187	//	*RecognitionAudio_Uri
1188	AudioSource          isRecognitionAudio_AudioSource `protobuf_oneof:"audio_source"`
1189	XXX_NoUnkeyedLiteral struct{}                       `json:"-"`
1190	XXX_unrecognized     []byte                         `json:"-"`
1191	XXX_sizecache        int32                          `json:"-"`
1192}
1193
1194func (m *RecognitionAudio) Reset()         { *m = RecognitionAudio{} }
1195func (m *RecognitionAudio) String() string { return proto.CompactTextString(m) }
1196func (*RecognitionAudio) ProtoMessage()    {}
1197func (*RecognitionAudio) Descriptor() ([]byte, []int) {
1198	return fileDescriptor_6adcab595cc29495, []int{8}
1199}
1200
1201func (m *RecognitionAudio) XXX_Unmarshal(b []byte) error {
1202	return xxx_messageInfo_RecognitionAudio.Unmarshal(m, b)
1203}
1204func (m *RecognitionAudio) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1205	return xxx_messageInfo_RecognitionAudio.Marshal(b, m, deterministic)
1206}
1207func (m *RecognitionAudio) XXX_Merge(src proto.Message) {
1208	xxx_messageInfo_RecognitionAudio.Merge(m, src)
1209}
1210func (m *RecognitionAudio) XXX_Size() int {
1211	return xxx_messageInfo_RecognitionAudio.Size(m)
1212}
1213func (m *RecognitionAudio) XXX_DiscardUnknown() {
1214	xxx_messageInfo_RecognitionAudio.DiscardUnknown(m)
1215}
1216
1217var xxx_messageInfo_RecognitionAudio proto.InternalMessageInfo
1218
1219type isRecognitionAudio_AudioSource interface {
1220	isRecognitionAudio_AudioSource()
1221}
1222
1223type RecognitionAudio_Content struct {
1224	Content []byte `protobuf:"bytes,1,opt,name=content,proto3,oneof"`
1225}
1226
1227type RecognitionAudio_Uri struct {
1228	Uri string `protobuf:"bytes,2,opt,name=uri,proto3,oneof"`
1229}
1230
1231func (*RecognitionAudio_Content) isRecognitionAudio_AudioSource() {}
1232
1233func (*RecognitionAudio_Uri) isRecognitionAudio_AudioSource() {}
1234
1235func (m *RecognitionAudio) GetAudioSource() isRecognitionAudio_AudioSource {
1236	if m != nil {
1237		return m.AudioSource
1238	}
1239	return nil
1240}
1241
1242func (m *RecognitionAudio) GetContent() []byte {
1243	if x, ok := m.GetAudioSource().(*RecognitionAudio_Content); ok {
1244		return x.Content
1245	}
1246	return nil
1247}
1248
1249func (m *RecognitionAudio) GetUri() string {
1250	if x, ok := m.GetAudioSource().(*RecognitionAudio_Uri); ok {
1251		return x.Uri
1252	}
1253	return ""
1254}
1255
1256// XXX_OneofWrappers is for the internal use of the proto package.
1257func (*RecognitionAudio) XXX_OneofWrappers() []interface{} {
1258	return []interface{}{
1259		(*RecognitionAudio_Content)(nil),
1260		(*RecognitionAudio_Uri)(nil),
1261	}
1262}
1263
1264// The only message returned to the client by the `Recognize` method. It
1265// contains the result as zero or more sequential `SpeechRecognitionResult`
1266// messages.
1267type RecognizeResponse struct {
1268	// Sequential list of transcription results corresponding to
1269	// sequential portions of audio.
1270	Results              []*SpeechRecognitionResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"`
1271	XXX_NoUnkeyedLiteral struct{}                   `json:"-"`
1272	XXX_unrecognized     []byte                     `json:"-"`
1273	XXX_sizecache        int32                      `json:"-"`
1274}
1275
1276func (m *RecognizeResponse) Reset()         { *m = RecognizeResponse{} }
1277func (m *RecognizeResponse) String() string { return proto.CompactTextString(m) }
1278func (*RecognizeResponse) ProtoMessage()    {}
1279func (*RecognizeResponse) Descriptor() ([]byte, []int) {
1280	return fileDescriptor_6adcab595cc29495, []int{9}
1281}
1282
1283func (m *RecognizeResponse) XXX_Unmarshal(b []byte) error {
1284	return xxx_messageInfo_RecognizeResponse.Unmarshal(m, b)
1285}
1286func (m *RecognizeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1287	return xxx_messageInfo_RecognizeResponse.Marshal(b, m, deterministic)
1288}
1289func (m *RecognizeResponse) XXX_Merge(src proto.Message) {
1290	xxx_messageInfo_RecognizeResponse.Merge(m, src)
1291}
1292func (m *RecognizeResponse) XXX_Size() int {
1293	return xxx_messageInfo_RecognizeResponse.Size(m)
1294}
1295func (m *RecognizeResponse) XXX_DiscardUnknown() {
1296	xxx_messageInfo_RecognizeResponse.DiscardUnknown(m)
1297}
1298
1299var xxx_messageInfo_RecognizeResponse proto.InternalMessageInfo
1300
1301func (m *RecognizeResponse) GetResults() []*SpeechRecognitionResult {
1302	if m != nil {
1303		return m.Results
1304	}
1305	return nil
1306}
1307
1308// The only message returned to the client by the `LongRunningRecognize` method.
1309// It contains the result as zero or more sequential `SpeechRecognitionResult`
1310// messages. It is included in the `result.response` field of the `Operation`
1311// returned by the `GetOperation` call of the `google::longrunning::Operations`
1312// service.
1313type LongRunningRecognizeResponse struct {
1314	// Sequential list of transcription results corresponding to
1315	// sequential portions of audio.
1316	Results              []*SpeechRecognitionResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"`
1317	XXX_NoUnkeyedLiteral struct{}                   `json:"-"`
1318	XXX_unrecognized     []byte                     `json:"-"`
1319	XXX_sizecache        int32                      `json:"-"`
1320}
1321
1322func (m *LongRunningRecognizeResponse) Reset()         { *m = LongRunningRecognizeResponse{} }
1323func (m *LongRunningRecognizeResponse) String() string { return proto.CompactTextString(m) }
1324func (*LongRunningRecognizeResponse) ProtoMessage()    {}
1325func (*LongRunningRecognizeResponse) Descriptor() ([]byte, []int) {
1326	return fileDescriptor_6adcab595cc29495, []int{10}
1327}
1328
1329func (m *LongRunningRecognizeResponse) XXX_Unmarshal(b []byte) error {
1330	return xxx_messageInfo_LongRunningRecognizeResponse.Unmarshal(m, b)
1331}
1332func (m *LongRunningRecognizeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1333	return xxx_messageInfo_LongRunningRecognizeResponse.Marshal(b, m, deterministic)
1334}
1335func (m *LongRunningRecognizeResponse) XXX_Merge(src proto.Message) {
1336	xxx_messageInfo_LongRunningRecognizeResponse.Merge(m, src)
1337}
1338func (m *LongRunningRecognizeResponse) XXX_Size() int {
1339	return xxx_messageInfo_LongRunningRecognizeResponse.Size(m)
1340}
1341func (m *LongRunningRecognizeResponse) XXX_DiscardUnknown() {
1342	xxx_messageInfo_LongRunningRecognizeResponse.DiscardUnknown(m)
1343}
1344
1345var xxx_messageInfo_LongRunningRecognizeResponse proto.InternalMessageInfo
1346
1347func (m *LongRunningRecognizeResponse) GetResults() []*SpeechRecognitionResult {
1348	if m != nil {
1349		return m.Results
1350	}
1351	return nil
1352}
1353
1354// Describes the progress of a long-running `LongRunningRecognize` call. It is
1355// included in the `metadata` field of the `Operation` returned by the
1356// `GetOperation` call of the `google::longrunning::Operations` service.
1357type LongRunningRecognizeMetadata struct {
1358	// Approximate percentage of audio processed thus far. Guaranteed to be 100
1359	// when the audio is fully processed and the results are available.
1360	ProgressPercent int32 `protobuf:"varint,1,opt,name=progress_percent,json=progressPercent,proto3" json:"progress_percent,omitempty"`
1361	// Time when the request was received.
1362	StartTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
1363	// Time of the most recent processing update.
1364	LastUpdateTime       *timestamp.Timestamp `protobuf:"bytes,3,opt,name=last_update_time,json=lastUpdateTime,proto3" json:"last_update_time,omitempty"`
1365	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
1366	XXX_unrecognized     []byte               `json:"-"`
1367	XXX_sizecache        int32                `json:"-"`
1368}
1369
1370func (m *LongRunningRecognizeMetadata) Reset()         { *m = LongRunningRecognizeMetadata{} }
1371func (m *LongRunningRecognizeMetadata) String() string { return proto.CompactTextString(m) }
1372func (*LongRunningRecognizeMetadata) ProtoMessage()    {}
1373func (*LongRunningRecognizeMetadata) Descriptor() ([]byte, []int) {
1374	return fileDescriptor_6adcab595cc29495, []int{11}
1375}
1376
1377func (m *LongRunningRecognizeMetadata) XXX_Unmarshal(b []byte) error {
1378	return xxx_messageInfo_LongRunningRecognizeMetadata.Unmarshal(m, b)
1379}
1380func (m *LongRunningRecognizeMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1381	return xxx_messageInfo_LongRunningRecognizeMetadata.Marshal(b, m, deterministic)
1382}
1383func (m *LongRunningRecognizeMetadata) XXX_Merge(src proto.Message) {
1384	xxx_messageInfo_LongRunningRecognizeMetadata.Merge(m, src)
1385}
1386func (m *LongRunningRecognizeMetadata) XXX_Size() int {
1387	return xxx_messageInfo_LongRunningRecognizeMetadata.Size(m)
1388}
1389func (m *LongRunningRecognizeMetadata) XXX_DiscardUnknown() {
1390	xxx_messageInfo_LongRunningRecognizeMetadata.DiscardUnknown(m)
1391}
1392
1393var xxx_messageInfo_LongRunningRecognizeMetadata proto.InternalMessageInfo
1394
1395func (m *LongRunningRecognizeMetadata) GetProgressPercent() int32 {
1396	if m != nil {
1397		return m.ProgressPercent
1398	}
1399	return 0
1400}
1401
1402func (m *LongRunningRecognizeMetadata) GetStartTime() *timestamp.Timestamp {
1403	if m != nil {
1404		return m.StartTime
1405	}
1406	return nil
1407}
1408
1409func (m *LongRunningRecognizeMetadata) GetLastUpdateTime() *timestamp.Timestamp {
1410	if m != nil {
1411		return m.LastUpdateTime
1412	}
1413	return nil
1414}
1415
1416// `StreamingRecognizeResponse` is the only message returned to the client by
1417// `StreamingRecognize`. A series of zero or more `StreamingRecognizeResponse`
1418// messages are streamed back to the client. If there is no recognizable
1419// audio, and `single_utterance` is set to false, then no messages are streamed
1420// back to the client.
1421//
1422// Here's an example of a series of ten `StreamingRecognizeResponse`s that might
1423// be returned while processing audio:
1424//
1425// 1. results { alternatives { transcript: "tube" } stability: 0.01 }
1426//
1427// 2. results { alternatives { transcript: "to be a" } stability: 0.01 }
1428//
1429// 3. results { alternatives { transcript: "to be" } stability: 0.9 }
1430//    results { alternatives { transcript: " or not to be" } stability: 0.01 }
1431//
1432// 4. results { alternatives { transcript: "to be or not to be"
1433//                             confidence: 0.92 }
1434//              alternatives { transcript: "to bee or not to bee" }
1435//              is_final: true }
1436//
1437// 5. results { alternatives { transcript: " that's" } stability: 0.01 }
1438//
1439// 6. results { alternatives { transcript: " that is" } stability: 0.9 }
1440//    results { alternatives { transcript: " the question" } stability: 0.01 }
1441//
1442// 7. results { alternatives { transcript: " that is the question"
1443//                             confidence: 0.98 }
1444//              alternatives { transcript: " that was the question" }
1445//              is_final: true }
1446//
1447// Notes:
1448//
1449// - Only two of the above responses #4 and #7 contain final results; they are
1450//   indicated by `is_final: true`. Concatenating these together generates the
1451//   full transcript: "to be or not to be that is the question".
1452//
1453// - The others contain interim `results`. #3 and #6 contain two interim
1454//   `results`: the first portion has a high stability and is less likely to
1455//   change; the second portion has a low stability and is very likely to
1456//   change. A UI designer might choose to show only high stability `results`.
1457//
1458// - The specific `stability` and `confidence` values shown above are only for
1459//   illustrative purposes. Actual values may vary.
1460//
1461// - In each response, only one of these fields will be set:
1462//     `error`,
1463//     `speech_event_type`, or
1464//     one or more (repeated) `results`.
1465type StreamingRecognizeResponse struct {
1466	// If set, returns a [google.rpc.Status][google.rpc.Status] message that
1467	// specifies the error for the operation.
1468	Error *status.Status `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
1469	// This repeated list contains zero or more results that
1470	// correspond to consecutive portions of the audio currently being processed.
1471	// It contains zero or one `is_final=true` result (the newly settled portion),
1472	// followed by zero or more `is_final=false` results (the interim results).
1473	Results []*StreamingRecognitionResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"`
1474	// Indicates the type of speech event.
1475	SpeechEventType      StreamingRecognizeResponse_SpeechEventType `protobuf:"varint,4,opt,name=speech_event_type,json=speechEventType,proto3,enum=google.cloud.speech.v1p1beta1.StreamingRecognizeResponse_SpeechEventType" json:"speech_event_type,omitempty"`
1476	XXX_NoUnkeyedLiteral struct{}                                   `json:"-"`
1477	XXX_unrecognized     []byte                                     `json:"-"`
1478	XXX_sizecache        int32                                      `json:"-"`
1479}
1480
1481func (m *StreamingRecognizeResponse) Reset()         { *m = StreamingRecognizeResponse{} }
1482func (m *StreamingRecognizeResponse) String() string { return proto.CompactTextString(m) }
1483func (*StreamingRecognizeResponse) ProtoMessage()    {}
1484func (*StreamingRecognizeResponse) Descriptor() ([]byte, []int) {
1485	return fileDescriptor_6adcab595cc29495, []int{12}
1486}
1487
1488func (m *StreamingRecognizeResponse) XXX_Unmarshal(b []byte) error {
1489	return xxx_messageInfo_StreamingRecognizeResponse.Unmarshal(m, b)
1490}
1491func (m *StreamingRecognizeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1492	return xxx_messageInfo_StreamingRecognizeResponse.Marshal(b, m, deterministic)
1493}
1494func (m *StreamingRecognizeResponse) XXX_Merge(src proto.Message) {
1495	xxx_messageInfo_StreamingRecognizeResponse.Merge(m, src)
1496}
1497func (m *StreamingRecognizeResponse) XXX_Size() int {
1498	return xxx_messageInfo_StreamingRecognizeResponse.Size(m)
1499}
1500func (m *StreamingRecognizeResponse) XXX_DiscardUnknown() {
1501	xxx_messageInfo_StreamingRecognizeResponse.DiscardUnknown(m)
1502}
1503
1504var xxx_messageInfo_StreamingRecognizeResponse proto.InternalMessageInfo
1505
1506func (m *StreamingRecognizeResponse) GetError() *status.Status {
1507	if m != nil {
1508		return m.Error
1509	}
1510	return nil
1511}
1512
1513func (m *StreamingRecognizeResponse) GetResults() []*StreamingRecognitionResult {
1514	if m != nil {
1515		return m.Results
1516	}
1517	return nil
1518}
1519
1520func (m *StreamingRecognizeResponse) GetSpeechEventType() StreamingRecognizeResponse_SpeechEventType {
1521	if m != nil {
1522		return m.SpeechEventType
1523	}
1524	return StreamingRecognizeResponse_SPEECH_EVENT_UNSPECIFIED
1525}
1526
1527// A streaming speech recognition result corresponding to a portion of the audio
1528// that is currently being processed.
1529type StreamingRecognitionResult struct {
1530	// May contain one or more recognition hypotheses (up to the
1531	// maximum specified in `max_alternatives`).
1532	// These alternatives are ordered in terms of accuracy, with the top (first)
1533	// alternative being the most probable, as ranked by the recognizer.
1534	Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives,proto3" json:"alternatives,omitempty"`
1535	// If `false`, this `StreamingRecognitionResult` represents an
1536	// interim result that may change. If `true`, this is the final time the
1537	// speech service will return this particular `StreamingRecognitionResult`,
1538	// the recognizer will not return any further hypotheses for this portion of
1539	// the transcript and corresponding audio.
1540	IsFinal bool `protobuf:"varint,2,opt,name=is_final,json=isFinal,proto3" json:"is_final,omitempty"`
1541	// An estimate of the likelihood that the recognizer will not
1542	// change its guess about this interim result. Values range from 0.0
1543	// (completely unstable) to 1.0 (completely stable).
1544	// This field is only provided for interim results (`is_final=false`).
1545	// The default of 0.0 is a sentinel value indicating `stability` was not set.
1546	Stability float32 `protobuf:"fixed32,3,opt,name=stability,proto3" json:"stability,omitempty"`
1547	// Time offset of the end of this result relative to the
1548	// beginning of the audio.
1549	ResultEndTime *duration.Duration `protobuf:"bytes,4,opt,name=result_end_time,json=resultEndTime,proto3" json:"result_end_time,omitempty"`
1550	// For multi-channel audio, this is the channel number corresponding to the
1551	// recognized result for the audio from that channel.
1552	// For audio_channel_count = N, its output values can range from '1' to 'N'.
1553	ChannelTag int32 `protobuf:"varint,5,opt,name=channel_tag,json=channelTag,proto3" json:"channel_tag,omitempty"`
1554	// The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag
1555	// of the language in this result. This language code was detected to have
1556	// the most likelihood of being spoken in the audio.
1557	LanguageCode         string   `protobuf:"bytes,6,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
1558	XXX_NoUnkeyedLiteral struct{} `json:"-"`
1559	XXX_unrecognized     []byte   `json:"-"`
1560	XXX_sizecache        int32    `json:"-"`
1561}
1562
1563func (m *StreamingRecognitionResult) Reset()         { *m = StreamingRecognitionResult{} }
1564func (m *StreamingRecognitionResult) String() string { return proto.CompactTextString(m) }
1565func (*StreamingRecognitionResult) ProtoMessage()    {}
1566func (*StreamingRecognitionResult) Descriptor() ([]byte, []int) {
1567	return fileDescriptor_6adcab595cc29495, []int{13}
1568}
1569
1570func (m *StreamingRecognitionResult) XXX_Unmarshal(b []byte) error {
1571	return xxx_messageInfo_StreamingRecognitionResult.Unmarshal(m, b)
1572}
1573func (m *StreamingRecognitionResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1574	return xxx_messageInfo_StreamingRecognitionResult.Marshal(b, m, deterministic)
1575}
1576func (m *StreamingRecognitionResult) XXX_Merge(src proto.Message) {
1577	xxx_messageInfo_StreamingRecognitionResult.Merge(m, src)
1578}
1579func (m *StreamingRecognitionResult) XXX_Size() int {
1580	return xxx_messageInfo_StreamingRecognitionResult.Size(m)
1581}
1582func (m *StreamingRecognitionResult) XXX_DiscardUnknown() {
1583	xxx_messageInfo_StreamingRecognitionResult.DiscardUnknown(m)
1584}
1585
1586var xxx_messageInfo_StreamingRecognitionResult proto.InternalMessageInfo
1587
1588func (m *StreamingRecognitionResult) GetAlternatives() []*SpeechRecognitionAlternative {
1589	if m != nil {
1590		return m.Alternatives
1591	}
1592	return nil
1593}
1594
1595func (m *StreamingRecognitionResult) GetIsFinal() bool {
1596	if m != nil {
1597		return m.IsFinal
1598	}
1599	return false
1600}
1601
1602func (m *StreamingRecognitionResult) GetStability() float32 {
1603	if m != nil {
1604		return m.Stability
1605	}
1606	return 0
1607}
1608
1609func (m *StreamingRecognitionResult) GetResultEndTime() *duration.Duration {
1610	if m != nil {
1611		return m.ResultEndTime
1612	}
1613	return nil
1614}
1615
1616func (m *StreamingRecognitionResult) GetChannelTag() int32 {
1617	if m != nil {
1618		return m.ChannelTag
1619	}
1620	return 0
1621}
1622
1623func (m *StreamingRecognitionResult) GetLanguageCode() string {
1624	if m != nil {
1625		return m.LanguageCode
1626	}
1627	return ""
1628}
1629
1630// A speech recognition result corresponding to a portion of the audio.
1631type SpeechRecognitionResult struct {
1632	// May contain one or more recognition hypotheses (up to the
1633	// maximum specified in `max_alternatives`).
1634	// These alternatives are ordered in terms of accuracy, with the top (first)
1635	// alternative being the most probable, as ranked by the recognizer.
1636	Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives,proto3" json:"alternatives,omitempty"`
1637	// For multi-channel audio, this is the channel number corresponding to the
1638	// recognized result for the audio from that channel.
1639	// For audio_channel_count = N, its output values can range from '1' to 'N'.
1640	ChannelTag int32 `protobuf:"varint,2,opt,name=channel_tag,json=channelTag,proto3" json:"channel_tag,omitempty"`
1641	// The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag
1642	// of the language in this result. This language code was detected to have
1643	// the most likelihood of being spoken in the audio.
1644	LanguageCode         string   `protobuf:"bytes,5,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
1645	XXX_NoUnkeyedLiteral struct{} `json:"-"`
1646	XXX_unrecognized     []byte   `json:"-"`
1647	XXX_sizecache        int32    `json:"-"`
1648}
1649
1650func (m *SpeechRecognitionResult) Reset()         { *m = SpeechRecognitionResult{} }
1651func (m *SpeechRecognitionResult) String() string { return proto.CompactTextString(m) }
1652func (*SpeechRecognitionResult) ProtoMessage()    {}
1653func (*SpeechRecognitionResult) Descriptor() ([]byte, []int) {
1654	return fileDescriptor_6adcab595cc29495, []int{14}
1655}
1656
1657func (m *SpeechRecognitionResult) XXX_Unmarshal(b []byte) error {
1658	return xxx_messageInfo_SpeechRecognitionResult.Unmarshal(m, b)
1659}
1660func (m *SpeechRecognitionResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1661	return xxx_messageInfo_SpeechRecognitionResult.Marshal(b, m, deterministic)
1662}
1663func (m *SpeechRecognitionResult) XXX_Merge(src proto.Message) {
1664	xxx_messageInfo_SpeechRecognitionResult.Merge(m, src)
1665}
1666func (m *SpeechRecognitionResult) XXX_Size() int {
1667	return xxx_messageInfo_SpeechRecognitionResult.Size(m)
1668}
1669func (m *SpeechRecognitionResult) XXX_DiscardUnknown() {
1670	xxx_messageInfo_SpeechRecognitionResult.DiscardUnknown(m)
1671}
1672
1673var xxx_messageInfo_SpeechRecognitionResult proto.InternalMessageInfo
1674
1675func (m *SpeechRecognitionResult) GetAlternatives() []*SpeechRecognitionAlternative {
1676	if m != nil {
1677		return m.Alternatives
1678	}
1679	return nil
1680}
1681
1682func (m *SpeechRecognitionResult) GetChannelTag() int32 {
1683	if m != nil {
1684		return m.ChannelTag
1685	}
1686	return 0
1687}
1688
1689func (m *SpeechRecognitionResult) GetLanguageCode() string {
1690	if m != nil {
1691		return m.LanguageCode
1692	}
1693	return ""
1694}
1695
1696// Alternative hypotheses (a.k.a. n-best list).
1697type SpeechRecognitionAlternative struct {
1698	// Transcript text representing the words that the user spoke.
1699	Transcript string `protobuf:"bytes,1,opt,name=transcript,proto3" json:"transcript,omitempty"`
1700	// The confidence estimate between 0.0 and 1.0. A higher number
1701	// indicates an estimated greater likelihood that the recognized words are
1702	// correct. This field is set only for the top alternative of a non-streaming
1703	// result or, of a streaming result where `is_final=true`.
1704	// This field is not guaranteed to be accurate and users should not rely on it
1705	// to be always provided.
1706	// The default of 0.0 is a sentinel value indicating `confidence` was not set.
1707	Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
1708	// A list of word-specific information for each recognized word.
1709	// Note: When `enable_speaker_diarization` is true, you will see all the words
1710	// from the beginning of the audio.
1711	Words                []*WordInfo `protobuf:"bytes,3,rep,name=words,proto3" json:"words,omitempty"`
1712	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
1713	XXX_unrecognized     []byte      `json:"-"`
1714	XXX_sizecache        int32       `json:"-"`
1715}
1716
1717func (m *SpeechRecognitionAlternative) Reset()         { *m = SpeechRecognitionAlternative{} }
1718func (m *SpeechRecognitionAlternative) String() string { return proto.CompactTextString(m) }
1719func (*SpeechRecognitionAlternative) ProtoMessage()    {}
1720func (*SpeechRecognitionAlternative) Descriptor() ([]byte, []int) {
1721	return fileDescriptor_6adcab595cc29495, []int{15}
1722}
1723
1724func (m *SpeechRecognitionAlternative) XXX_Unmarshal(b []byte) error {
1725	return xxx_messageInfo_SpeechRecognitionAlternative.Unmarshal(m, b)
1726}
1727func (m *SpeechRecognitionAlternative) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1728	return xxx_messageInfo_SpeechRecognitionAlternative.Marshal(b, m, deterministic)
1729}
1730func (m *SpeechRecognitionAlternative) XXX_Merge(src proto.Message) {
1731	xxx_messageInfo_SpeechRecognitionAlternative.Merge(m, src)
1732}
1733func (m *SpeechRecognitionAlternative) XXX_Size() int {
1734	return xxx_messageInfo_SpeechRecognitionAlternative.Size(m)
1735}
1736func (m *SpeechRecognitionAlternative) XXX_DiscardUnknown() {
1737	xxx_messageInfo_SpeechRecognitionAlternative.DiscardUnknown(m)
1738}
1739
1740var xxx_messageInfo_SpeechRecognitionAlternative proto.InternalMessageInfo
1741
1742func (m *SpeechRecognitionAlternative) GetTranscript() string {
1743	if m != nil {
1744		return m.Transcript
1745	}
1746	return ""
1747}
1748
1749func (m *SpeechRecognitionAlternative) GetConfidence() float32 {
1750	if m != nil {
1751		return m.Confidence
1752	}
1753	return 0
1754}
1755
1756func (m *SpeechRecognitionAlternative) GetWords() []*WordInfo {
1757	if m != nil {
1758		return m.Words
1759	}
1760	return nil
1761}
1762
1763// Word-specific information for recognized words.
1764type WordInfo struct {
1765	// Time offset relative to the beginning of the audio,
1766	// and corresponding to the start of the spoken word.
1767	// This field is only set if `enable_word_time_offsets=true` and only
1768	// in the top hypothesis.
1769	// This is an experimental feature and the accuracy of the time offset can
1770	// vary.
1771	StartTime *duration.Duration `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
1772	// Time offset relative to the beginning of the audio,
1773	// and corresponding to the end of the spoken word.
1774	// This field is only set if `enable_word_time_offsets=true` and only
1775	// in the top hypothesis.
1776	// This is an experimental feature and the accuracy of the time offset can
1777	// vary.
1778	EndTime *duration.Duration `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
1779	// The word corresponding to this set of information.
1780	Word string `protobuf:"bytes,3,opt,name=word,proto3" json:"word,omitempty"`
1781	// The confidence estimate between 0.0 and 1.0. A higher number
1782	// indicates an estimated greater likelihood that the recognized words are
1783	// correct. This field is set only for the top alternative of a non-streaming
1784	// result or, of a streaming result where `is_final=true`.
1785	// This field is not guaranteed to be accurate and users should not rely on it
1786	// to be always provided.
1787	// The default of 0.0 is a sentinel value indicating `confidence` was not set.
1788	Confidence float32 `protobuf:"fixed32,4,opt,name=confidence,proto3" json:"confidence,omitempty"`
1789	// A distinct integer value is assigned for every speaker within
1790	// the audio. This field specifies which one of those speakers was detected to
1791	// have spoken this word. Value ranges from '1' to diarization_speaker_count.
1792	// speaker_tag is set if enable_speaker_diarization = 'true' and only in the
1793	// top alternative.
1794	SpeakerTag           int32    `protobuf:"varint,5,opt,name=speaker_tag,json=speakerTag,proto3" json:"speaker_tag,omitempty"`
1795	XXX_NoUnkeyedLiteral struct{} `json:"-"`
1796	XXX_unrecognized     []byte   `json:"-"`
1797	XXX_sizecache        int32    `json:"-"`
1798}
1799
1800func (m *WordInfo) Reset()         { *m = WordInfo{} }
1801func (m *WordInfo) String() string { return proto.CompactTextString(m) }
1802func (*WordInfo) ProtoMessage()    {}
1803func (*WordInfo) Descriptor() ([]byte, []int) {
1804	return fileDescriptor_6adcab595cc29495, []int{16}
1805}
1806
1807func (m *WordInfo) XXX_Unmarshal(b []byte) error {
1808	return xxx_messageInfo_WordInfo.Unmarshal(m, b)
1809}
1810func (m *WordInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1811	return xxx_messageInfo_WordInfo.Marshal(b, m, deterministic)
1812}
1813func (m *WordInfo) XXX_Merge(src proto.Message) {
1814	xxx_messageInfo_WordInfo.Merge(m, src)
1815}
1816func (m *WordInfo) XXX_Size() int {
1817	return xxx_messageInfo_WordInfo.Size(m)
1818}
1819func (m *WordInfo) XXX_DiscardUnknown() {
1820	xxx_messageInfo_WordInfo.DiscardUnknown(m)
1821}
1822
1823var xxx_messageInfo_WordInfo proto.InternalMessageInfo
1824
1825func (m *WordInfo) GetStartTime() *duration.Duration {
1826	if m != nil {
1827		return m.StartTime
1828	}
1829	return nil
1830}
1831
1832func (m *WordInfo) GetEndTime() *duration.Duration {
1833	if m != nil {
1834		return m.EndTime
1835	}
1836	return nil
1837}
1838
1839func (m *WordInfo) GetWord() string {
1840	if m != nil {
1841		return m.Word
1842	}
1843	return ""
1844}
1845
1846func (m *WordInfo) GetConfidence() float32 {
1847	if m != nil {
1848		return m.Confidence
1849	}
1850	return 0
1851}
1852
1853func (m *WordInfo) GetSpeakerTag() int32 {
1854	if m != nil {
1855		return m.SpeakerTag
1856	}
1857	return 0
1858}
1859
1860func init() {
1861	proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionConfig_AudioEncoding", RecognitionConfig_AudioEncoding_name, RecognitionConfig_AudioEncoding_value)
1862	proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionMetadata_InteractionType", RecognitionMetadata_InteractionType_name, RecognitionMetadata_InteractionType_value)
1863	proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionMetadata_MicrophoneDistance", RecognitionMetadata_MicrophoneDistance_name, RecognitionMetadata_MicrophoneDistance_value)
1864	proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionMetadata_OriginalMediaType", RecognitionMetadata_OriginalMediaType_name, RecognitionMetadata_OriginalMediaType_value)
1865	proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionMetadata_RecordingDeviceType", RecognitionMetadata_RecordingDeviceType_name, RecognitionMetadata_RecordingDeviceType_value)
1866	proto.RegisterEnum("google.cloud.speech.v1p1beta1.StreamingRecognizeResponse_SpeechEventType", StreamingRecognizeResponse_SpeechEventType_name, StreamingRecognizeResponse_SpeechEventType_value)
1867	proto.RegisterType((*RecognizeRequest)(nil), "google.cloud.speech.v1p1beta1.RecognizeRequest")
1868	proto.RegisterType((*LongRunningRecognizeRequest)(nil), "google.cloud.speech.v1p1beta1.LongRunningRecognizeRequest")
1869	proto.RegisterType((*StreamingRecognizeRequest)(nil), "google.cloud.speech.v1p1beta1.StreamingRecognizeRequest")
1870	proto.RegisterType((*StreamingRecognitionConfig)(nil), "google.cloud.speech.v1p1beta1.StreamingRecognitionConfig")
1871	proto.RegisterType((*RecognitionConfig)(nil), "google.cloud.speech.v1p1beta1.RecognitionConfig")
1872	proto.RegisterType((*SpeakerDiarizationConfig)(nil), "google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig")
1873	proto.RegisterType((*RecognitionMetadata)(nil), "google.cloud.speech.v1p1beta1.RecognitionMetadata")
1874	proto.RegisterType((*SpeechContext)(nil), "google.cloud.speech.v1p1beta1.SpeechContext")
1875	proto.RegisterType((*RecognitionAudio)(nil), "google.cloud.speech.v1p1beta1.RecognitionAudio")
1876	proto.RegisterType((*RecognizeResponse)(nil), "google.cloud.speech.v1p1beta1.RecognizeResponse")
1877	proto.RegisterType((*LongRunningRecognizeResponse)(nil), "google.cloud.speech.v1p1beta1.LongRunningRecognizeResponse")
1878	proto.RegisterType((*LongRunningRecognizeMetadata)(nil), "google.cloud.speech.v1p1beta1.LongRunningRecognizeMetadata")
1879	proto.RegisterType((*StreamingRecognizeResponse)(nil), "google.cloud.speech.v1p1beta1.StreamingRecognizeResponse")
1880	proto.RegisterType((*StreamingRecognitionResult)(nil), "google.cloud.speech.v1p1beta1.StreamingRecognitionResult")
1881	proto.RegisterType((*SpeechRecognitionResult)(nil), "google.cloud.speech.v1p1beta1.SpeechRecognitionResult")
1882	proto.RegisterType((*SpeechRecognitionAlternative)(nil), "google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative")
1883	proto.RegisterType((*WordInfo)(nil), "google.cloud.speech.v1p1beta1.WordInfo")
1884}
1885
1886func init() {
1887	proto.RegisterFile("google/cloud/speech/v1p1beta1/cloud_speech.proto", fileDescriptor_6adcab595cc29495)
1888}
1889
1890var fileDescriptor_6adcab595cc29495 = []byte{
1891	// 2366 bytes of a gzipped FileDescriptorProto
1892	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0x3d, 0x93, 0xdb, 0xc6,
1893	0xf9, 0x17, 0xc8, 0xe3, 0xdd, 0xf1, 0xb9, 0x37, 0xdc, 0x9e, 0x6c, 0x41, 0xd4, 0xd9, 0x92, 0xa1,
1894	0xf1, 0x5f, 0xfa, 0x3b, 0x0e, 0x29, 0x9d, 0x1d, 0xbf, 0xc8, 0x8e, 0x63, 0x1c, 0x88, 0xd3, 0x21,
1895	0x43, 0x12, 0x9c, 0x25, 0x4f, 0x8a, 0xdd, 0x60, 0xf6, 0x80, 0x25, 0x0f, 0x13, 0x12, 0x40, 0x80,
1896	0xa5, 0xde, 0x9a, 0x64, 0xfc, 0x05, 0x52, 0x24, 0x6d, 0x9a, 0x4c, 0x52, 0x66, 0x26, 0x65, 0x3a,
1897	0xa7, 0x49, 0xa3, 0x26, 0x33, 0x49, 0x97, 0xca, 0x45, 0xbe, 0x40, 0x52, 0xa5, 0x4b, 0x66, 0x77,
1898	0x01, 0x12, 0xe4, 0x9d, 0x74, 0x92, 0x26, 0x2e, 0xd2, 0x71, 0x9f, 0x97, 0xdf, 0xf3, 0xb2, 0x0f,
1899	0x9e, 0xdd, 0x67, 0x09, 0xb7, 0x86, 0x51, 0x34, 0x1c, 0xd1, 0x86, 0x37, 0x8a, 0x26, 0x7e, 0x23,
1900	0x8d, 0x29, 0xf5, 0x4e, 0x1a, 0x0f, 0x6e, 0xc7, 0xb7, 0x8f, 0x29, 0x23, 0xb7, 0x25, 0xd9, 0x95,
1901	0xe4, 0x7a, 0x9c, 0x44, 0x2c, 0x42, 0x6f, 0x48, 0x8d, 0xba, 0x60, 0xd5, 0x33, 0xd6, 0x54, 0xa3,
1902	0xb6, 0x9b, 0x01, 0x92, 0x38, 0x68, 0x90, 0x30, 0x8c, 0x18, 0x61, 0x41, 0x14, 0xa6, 0x52, 0xb9,
1903	0x76, 0xa9, 0xc0, 0xf5, 0x46, 0x01, 0x0d, 0x59, 0xc6, 0xb8, 0x5a, 0x60, 0x0c, 0x02, 0x3a, 0xf2,
1904	0xdd, 0x63, 0x7a, 0x42, 0x1e, 0x04, 0x51, 0x92, 0x09, 0x5c, 0xcf, 0x04, 0x46, 0x51, 0x38, 0x4c,
1905	0x26, 0x61, 0x18, 0x84, 0xc3, 0x46, 0x14, 0xd3, 0x64, 0x0e, 0xfe, 0x72, 0x26, 0x24, 0x56, 0xc7,
1906	0x93, 0x41, 0x83, 0x84, 0x8f, 0x33, 0xd6, 0x9b, 0x8b, 0x2c, 0x7f, 0x22, 0x75, 0x17, 0x1c, 0x98,
1907	0xf2, 0x59, 0x30, 0xa6, 0x29, 0x23, 0xe3, 0x78, 0xc1, 0xf5, 0x24, 0xf6, 0x1a, 0x29, 0x23, 0x6c,
1908	0x92, 0x19, 0xd5, 0x7f, 0xa7, 0x80, 0x8a, 0xa9, 0x17, 0x0d, 0xc3, 0xe0, 0x09, 0xc5, 0xf4, 0x27,
1909	0x13, 0x9a, 0x32, 0xd4, 0x86, 0x65, 0x2f, 0x0a, 0x07, 0xc1, 0x50, 0x53, 0xae, 0x29, 0x37, 0xd7,
1910	0xf6, 0x6e, 0xd5, 0x9f, 0x9b, 0xb6, 0x7a, 0x06, 0xc0, 0x1d, 0x32, 0x85, 0xde, 0x7e, 0xf9, 0x1b,
1911	0xa3, 0x84, 0x33, 0x10, 0xf4, 0x43, 0xa8, 0x90, 0x89, 0x1f, 0x44, 0x5a, 0x49, 0xa0, 0x35, 0x5e,
1912	0x1c, 0xcd, 0xe0, 0x6a, 0x12, 0x4c, 0x42, 0xe8, 0x7f, 0x50, 0xe0, 0x4a, 0x2b, 0x0a, 0x87, 0x58,
1913	0x66, 0xf1, 0x7f, 0xc9, 0xf5, 0xaf, 0x15, 0xb8, 0xdc, 0x63, 0x09, 0x25, 0xe3, 0xb3, 0x1c, 0x1f,
1914	0x80, 0x9a, 0xe6, 0x4c, 0x77, 0x2e, 0x84, 0x8f, 0xcf, 0x31, 0xba, 0x88, 0x39, 0x8b, 0xe5, 0xf0,
1915	0x02, 0xde, 0x9a, 0x82, 0x4a, 0x12, 0x7a, 0x1b, 0x36, 0x84, 0x3b, 0xdc, 0x06, 0xa3, 0x21, 0x13,
1916	0x91, 0xad, 0x1f, 0x5e, 0xc0, 0xeb, 0x82, 0x6c, 0x4a, 0xea, 0xfe, 0x0e, 0x6c, 0xcf, 0xdc, 0x49,
1917	0xa4, 0x8f, 0x3c, 0x82, 0xda, 0xb3, 0xad, 0xfd, 0xb7, 0x73, 0xff, 0xff, 0xa0, 0xa6, 0x41, 0x38,
1918	0x1c, 0x51, 0x77, 0xc2, 0x18, 0x4d, 0x48, 0xe8, 0x51, 0xe1, 0xec, 0x2a, 0xde, 0x92, 0xf4, 0xa3,
1919	0x9c, 0x8c, 0x6e, 0xc0, 0x56, 0x10, 0x32, 0x9a, 0x04, 0x63, 0x37, 0xa1, 0xe9, 0x64, 0xc4, 0x52,
1920	0xad, 0x2c, 0x24, 0x37, 0x33, 0x32, 0x96, 0x54, 0xfd, 0x1f, 0x55, 0xd8, 0x3e, 0xed, 0xf8, 0x97,
1921	0xb0, 0x4a, 0x43, 0x2f, 0xf2, 0x83, 0x50, 0xba, 0xbe, 0xb9, 0xf7, 0xd9, 0xcb, 0xba, 0x5e, 0x17,
1922	0xfb, 0x6d, 0x65, 0x28, 0x78, 0x8a, 0x87, 0xde, 0x81, 0xed, 0x94, 0x8c, 0xe3, 0x11, 0x75, 0x13,
1923	0xc2, 0xa8, 0x7b, 0x42, 0x13, 0xf6, 0x44, 0x84, 0x51, 0xc1, 0x5b, 0x92, 0x81, 0x09, 0xa3, 0x87,
1924	0x9c, 0x8c, 0xea, 0xb0, 0x93, 0xed, 0xcd, 0x09, 0x09, 0x43, 0x3a, 0x72, 0xbd, 0x68, 0x12, 0x32,
1925	0x6d, 0x45, 0x48, 0x6f, 0xcb, 0xfd, 0x91, 0x1c, 0x93, 0x33, 0x50, 0x1f, 0x6e, 0xd0, 0x90, 0x1c,
1926	0x8f, 0xa8, 0x9b, 0xd2, 0x98, 0x08, 0xfc, 0x64, 0xe6, 0x98, 0x1b, 0xd3, 0x24, 0x47, 0xd2, 0xd6,
1927	0x45, 0x3a, 0xae, 0x4b, 0xf1, 0x5e, 0x26, 0x5d, 0x88, 0xa2, 0x4b, 0x93, 0x0c, 0x1a, 0xdd, 0x84,
1928	0x8d, 0x11, 0x09, 0x87, 0x13, 0x32, 0xa4, 0xae, 0x17, 0xf9, 0x54, 0xa4, 0xb2, 0x2a, 0xf7, 0x66,
1929	0x3d, 0xe7, 0x98, 0x91, 0x4f, 0xd1, 0xa7, 0x50, 0x23, 0x23, 0x46, 0x93, 0x90, 0xb0, 0xe0, 0x01,
1930	0x75, 0xe7, 0xb4, 0x52, 0x0d, 0x5d, 0x2b, 0xdf, 0xac, 0x62, 0xad, 0x20, 0xd1, 0x2a, 0x28, 0xa7,
1931	0x7c, 0x7f, 0xc7, 0xe4, 0x91, 0x5b, 0xe0, 0xa7, 0xda, 0x92, 0x4c, 0xcc, 0x98, 0x3c, 0x32, 0x0a,
1932	0x64, 0x2e, 0x1a, 0x27, 0xd1, 0x80, 0x84, 0x01, 0x7b, 0xec, 0x0e, 0x02, 0xce, 0xd2, 0x2a, 0xb2,
1933	0x14, 0xa6, 0xf4, 0x03, 0x41, 0x46, 0x47, 0xb0, 0x25, 0x77, 0x4b, 0x16, 0xf8, 0x23, 0x96, 0x6a,
1934	0xcb, 0xd7, 0xca, 0x37, 0xd7, 0xf6, 0xde, 0x3d, 0xef, 0x33, 0x12, 0x04, 0x53, 0x2a, 0xe1, 0xcd,
1935	0xb4, 0xb8, 0x4c, 0xd1, 0x87, 0xa0, 0x65, 0xa9, 0x7e, 0x18, 0x25, 0xbe, 0xcb, 0xfb, 0xab, 0x1b,
1936	0x0d, 0x06, 0x29, 0x65, 0xa9, 0xb6, 0x2a, 0x3c, 0x79, 0x4d, 0xf2, 0xef, 0x47, 0x89, 0xdf, 0x0f,
1937	0xc6, 0xd4, 0x91, 0x4c, 0xf4, 0x3e, 0xbc, 0x5e, 0x54, 0x14, 0xb5, 0xed, 0x53, 0x5e, 0xcb, 0x5b,
1938	0x42, 0xed, 0xe2, 0x4c, 0xcd, 0x9c, 0xf2, 0xd0, 0xe7, 0xb0, 0x9b, 0x69, 0x91, 0x09, 0x8b, 0xc6,
1939	0x84, 0x05, 0x9e, 0x1b, 0x4f, 0x42, 0x8f, 0x4d, 0x44, 0xdb, 0xd7, 0xd6, 0x84, 0x6e, 0x4d, 0xca,
1940	0x18, 0xb9, 0x48, 0x77, 0x26, 0x81, 0x3e, 0x87, 0x5a, 0x5e, 0x1b, 0x31, 0x25, 0x3f, 0xa6, 0x89,
1941	0xeb, 0x07, 0x24, 0x09, 0x9e, 0x48, 0x7d, 0x95, 0xeb, 0xef, 0x97, 0x34, 0x05, 0x67, 0x61, 0xf5,
1942	0xa4, 0x50, 0x73, 0x26, 0x83, 0x3e, 0x83, 0xcb, 0x05, 0x95, 0x29, 0x8c, 0xac, 0xc9, 0x6d, 0xbe,
1943	0x51, 0x02, 0xe0, 0x52, 0x41, 0x28, 0x43, 0x91, 0xd5, 0x39, 0x00, 0x54, 0xd4, 0xcf, 0x5a, 0xc3,
1944	0x8e, 0x68, 0x0d, 0x1f, 0x9e, 0xbf, 0x19, 0x0b, 0xee, 0xc8, 0xcf, 0x0c, 0x6f, 0xfb, 0x8b, 0x24,
1945	0xd4, 0x81, 0xd5, 0x31, 0x65, 0xc4, 0x27, 0x8c, 0x68, 0x55, 0x81, 0xbe, 0xf7, 0xe2, 0x5f, 0x6f,
1946	0x3b, 0xd3, 0xc4, 0x53, 0x0c, 0x74, 0x11, 0x2a, 0xe3, 0xc8, 0xa7, 0x23, 0x6d, 0x83, 0xd7, 0x3d,
1947	0x96, 0x0b, 0xf4, 0x16, 0xac, 0x4f, 0x52, 0xea, 0xd2, 0xf0, 0x84, 0x77, 0x1c, 0x5f, 0xdb, 0x14,
1948	0x3b, 0xb0, 0x36, 0x49, 0xa9, 0x95, 0x91, 0xf4, 0x5f, 0x2a, 0xb0, 0x31, 0xd7, 0x06, 0x90, 0x06,
1949	0x17, 0xad, 0x8e, 0xe9, 0x34, 0xed, 0xce, 0x5d, 0xf7, 0xa8, 0xd3, 0xeb, 0x5a, 0xa6, 0x7d, 0x60,
1950	0x5b, 0x4d, 0xf5, 0x02, 0x5a, 0x87, 0xd5, 0x96, 0xdd, 0xb1, 0x0c, 0x7c, 0xfb, 0x03, 0x55, 0x41,
1951	0xab, 0xb0, 0x74, 0xd0, 0x32, 0x4c, 0xb5, 0x84, 0xaa, 0x50, 0x69, 0x1f, 0xb5, 0x8c, 0xfb, 0x6a,
1952	0x19, 0xad, 0x40, 0xd9, 0x68, 0x63, 0x75, 0x09, 0x01, 0x2c, 0x1b, 0x6d, 0xec, 0xde, 0xdf, 0x57,
1953	0x2b, 0x5c, 0xcf, 0xb9, 0x7b, 0xd7, 0x75, 0xba, 0x47, 0x3d, 0x75, 0x19, 0xd5, 0xe0, 0xf5, 0x5e,
1954	0xd7, 0xb2, 0x7e, 0xe4, 0xde, 0xb7, 0xfb, 0x87, 0xee, 0xa1, 0x65, 0x34, 0x2d, 0xec, 0xee, 0x7f,
1955	0xd1, 0xb7, 0xd4, 0x15, 0xae, 0xde, 0xee, 0xbe, 0xa7, 0xae, 0xea, 0xbf, 0x57, 0x40, 0x7b, 0x56,
1956	0x3e, 0xf9, 0x27, 0xfc, 0x9c, 0x32, 0x51, 0x44, 0x90, 0xcf, 0x2e, 0x91, 0x77, 0x60, 0x7b, 0x1c,
1957	0x2c, 0x96, 0x46, 0xd6, 0xdc, 0xc6, 0xc1, 0x7c, 0x39, 0x70, 0x59, 0xf2, 0x68, 0x41, 0xb6, 0x3c,
1958	0xfd, 0xde, 0x8b, 0xb2, 0xfa, 0xbf, 0xab, 0xb0, 0x73, 0xc6, 0x26, 0xa1, 0x31, 0xa8, 0xa2, 0xa1,
1959	0x13, 0x4f, 0x94, 0x14, 0x7b, 0x1c, 0xd3, 0xac, 0x61, 0xef, 0xbf, 0xfc, 0x96, 0xd7, 0xed, 0x19,
1960	0x54, 0xff, 0x71, 0x4c, 0xf1, 0x56, 0x30, 0x4f, 0x40, 0x9f, 0xc1, 0x6e, 0x10, 0xfa, 0x93, 0x94,
1961	0x25, 0x8f, 0xdd, 0x90, 0x04, 0x5e, 0x2a, 0x3a, 0x9b, 0x1b, 0x0d, 0x5c, 0x79, 0x29, 0xe0, 0xde,
1962	0x6f, 0x60, 0x2d, 0x97, 0xe9, 0x70, 0x11, 0xde, 0xdb, 0x9c, 0x81, 0x28, 0x03, 0xf4, 0x00, 0x76,
1963	0xc6, 0x81, 0x97, 0x44, 0xf1, 0x49, 0x14, 0x52, 0xd7, 0x0f, 0x52, 0x26, 0x0e, 0xb1, 0x25, 0xe1,
1964	0xb1, 0xf5, 0x0a, 0x1e, 0xb7, 0xa7, 0x68, 0xcd, 0x0c, 0x0c, 0xa3, 0xf1, 0x29, 0x1a, 0x62, 0xb0,
1965	0x13, 0x25, 0xc1, 0x30, 0x08, 0xc9, 0xc8, 0x1d, 0x53, 0x3f, 0x20, 0x32, 0x53, 0x15, 0x61, 0xb7,
1966	0xf9, 0x0a, 0x76, 0x9d, 0x0c, 0xad, 0xcd, 0xc1, 0x44, 0xae, 0xb6, 0xa3, 0x45, 0x12, 0x7a, 0x02,
1967	0xaf, 0xf1, 0xd3, 0x27, 0xe1, 0x95, 0xef, 0xfa, 0xf4, 0x41, 0xe0, 0x51, 0x69, 0x77, 0x59, 0xd8,
1968	0x3d, 0x78, 0x05, 0xbb, 0x38, 0xc7, 0x6b, 0x0a, 0x38, 0x61, 0x79, 0x27, 0x39, 0x4d, 0x44, 0x7b,
1969	0x67, 0xd8, 0x0e, 0xc9, 0x98, 0x8a, 0xb3, 0xb3, 0x7a, 0x4a, 0xa7, 0x43, 0xc6, 0x14, 0xbd, 0x0b,
1970	0x68, 0x96, 0x25, 0xde, 0xcf, 0x85, 0xb3, 0xab, 0x42, 0x41, 0x9d, 0x86, 0x17, 0x8c, 0xa5, 0x85,
1971	0x1b, 0xb0, 0x11, 0x1d, 0x0f, 0x26, 0xa9, 0x47, 0x18, 0xf5, 0xdd, 0xc0, 0x17, 0xad, 0xa6, 0x2c,
1972	0x3a, 0xe0, 0xfa, 0x8c, 0x61, 0xfb, 0xe8, 0x2a, 0xac, 0xc9, 0x43, 0x9c, 0x45, 0x71, 0xe0, 0x69,
1973	0x20, 0xf0, 0x40, 0x90, 0xfa, 0x9c, 0xa2, 0xff, 0x49, 0x81, 0xad, 0x85, 0xd2, 0x43, 0xd7, 0x60,
1974	0xd7, 0xee, 0xf4, 0x2d, 0x6c, 0x98, 0x7d, 0xdb, 0xe9, 0xb8, 0xfd, 0x2f, 0xba, 0xd6, 0x42, 0xc3,
1975	0xd8, 0x04, 0x68, 0xda, 0x3d, 0xf3, 0xa8, 0xd7, 0xb3, 0x9d, 0x8e, 0xaa, 0x20, 0x15, 0xd6, 0xbb,
1976	0xd8, 0xea, 0x59, 0x9d, 0xbe, 0xc1, 0x55, 0xd4, 0x12, 0x97, 0xe8, 0x1e, 0x3a, 0x1d, 0xcb, 0x35,
1977	0x8d, 0x56, 0x4b, 0x2d, 0xa3, 0x0d, 0xa8, 0xde, 0x73, 0x6c, 0xd3, 0x6a, 0x1b, 0x76, 0x4b, 0x5d,
1978	0x42, 0x57, 0xe0, 0x52, 0x17, 0x3b, 0x07, 0x96, 0x00, 0x30, 0x5a, 0xad, 0x2f, 0xdc, 0x2e, 0x76,
1979	0x9a, 0x47, 0xa6, 0xd5, 0x54, 0x2b, 0x1c, 0x4d, 0xc8, 0xba, 0x3d, 0xcb, 0xc0, 0xe6, 0xa1, 0xba,
1980	0x8c, 0xb6, 0x61, 0x43, 0x52, 0x4c, 0xa7, 0xdd, 0x36, 0x3a, 0x4d, 0x75, 0x85, 0x03, 0x36, 0x6d,
1981	0x33, 0xb3, 0xb7, 0xaa, 0xfb, 0x80, 0x4e, 0xd7, 0x23, 0xba, 0x0e, 0x57, 0xdb, 0xb6, 0x89, 0x1d,
1982	0xe9, 0x4a, 0xd3, 0xee, 0xf5, 0x8d, 0x8e, 0xb9, 0x18, 0xcc, 0x06, 0x54, 0x79, 0xef, 0x3b, 0xb0,
1983	0xad, 0x56, 0x53, 0x55, 0x78, 0x53, 0x6b, 0xdb, 0x4d, 0xb9, 0x2a, 0xf1, 0xd5, 0x41, 0xce, 0x2b,
1984	0xeb, 0x1d, 0xd8, 0x3e, 0x55, 0x7d, 0xdc, 0x88, 0x83, 0xed, 0xbb, 0x76, 0xc7, 0x68, 0xb9, 0x6d,
1985	0xab, 0x69, 0x1b, 0x67, 0x65, 0xac, 0x0a, 0x15, 0xe3, 0xa8, 0x69, 0x3b, 0xaa, 0xc2, 0x7f, 0xde,
1986	0xb3, 0x9b, 0x96, 0xa3, 0x96, 0xf4, 0xdf, 0x28, 0xb2, 0xb5, 0x2c, 0x56, 0xd0, 0xdb, 0xf0, 0x16,
1987	0xb6, 0x4c, 0x07, 0x8b, 0x5e, 0xdd, 0xb4, 0xee, 0xf1, 0xd0, 0xcf, 0xde, 0x86, 0x5e, 0xdb, 0xc0,
1988	0x7d, 0x11, 0x9e, 0xaa, 0xa0, 0x65, 0x28, 0x75, 0xcd, 0x62, 0xf2, 0x79, 0x57, 0x57, 0xcb, 0x68,
1989	0x0d, 0x56, 0xee, 0x59, 0x87, 0xb6, 0xd9, 0xb2, 0xd4, 0x25, 0x7e, 0x0c, 0x38, 0xfd, 0x43, 0x0b,
1990	0xbb, 0xce, 0x51, 0xbf, 0xe9, 0x38, 0x38, 0xc3, 0x57, 0x2b, 0xe8, 0x12, 0xec, 0x48, 0x8e, 0xdd,
1991	0x29, 0x32, 0x96, 0xf5, 0x1f, 0xc0, 0xc6, 0xdc, 0x85, 0x04, 0x69, 0xb0, 0x12, 0x9f, 0x24, 0x24,
1992	0xa5, 0xa9, 0xa6, 0x88, 0x8b, 0x55, 0xbe, 0xe4, 0xe7, 0xd5, 0x71, 0x14, 0xa5, 0x4c, 0xf4, 0x95,
1993	0x12, 0x96, 0x0b, 0x1d, 0x4f, 0xe7, 0xba, 0xe9, 0x34, 0x82, 0x6a, 0xb0, 0x92, 0xdf, 0xfa, 0x95,
1994	0xec, 0xd6, 0x9f, 0x13, 0x10, 0x82, 0xf2, 0x24, 0x09, 0x44, 0xf3, 0xae, 0x1e, 0x5e, 0xc0, 0x7c,
1995	0xb1, 0xbf, 0x09, 0x72, 0x28, 0x70, 0xd3, 0x68, 0x92, 0x78, 0x54, 0xa7, 0xd3, 0xcb, 0x33, 0x9f,
1996	0x5b, 0xd2, 0x38, 0x0a, 0x53, 0x8a, 0xba, 0xb0, 0x92, 0xdf, 0xb9, 0x4b, 0xe2, 0xa2, 0xf5, 0xc1,
1997	0x0b, 0x5d, 0xb4, 0x0a, 0xce, 0xc9, 0xcb, 0x39, 0xce, 0x61, 0xf4, 0x18, 0x76, 0xcf, 0x1e, 0xf1,
1998	0xbe, 0x35, 0x8b, 0x4f, 0x95, 0xb3, 0x4d, 0x4e, 0x0f, 0x1e, 0x79, 0x01, 0x1d, 0x26, 0x34, 0x4d,
1999	0xf9, 0xb5, 0xda, 0xcb, 0x53, 0x58, 0x11, 0x17, 0x50, 0x41, 0xef, 0x4a, 0x32, 0xfa, 0x18, 0x20,
2000	0x65, 0x24, 0x61, 0xe2, 0x8e, 0x98, 0xcd, 0x8d, 0xb5, 0xdc, 0xc1, 0x7c, 0x40, 0xaf, 0xf7, 0xf3,
2001	0x01, 0x1d, 0x57, 0x85, 0x34, 0x5f, 0xa3, 0x26, 0xa8, 0x23, 0x92, 0x32, 0x77, 0x12, 0xfb, 0xfc,
2002	0x2e, 0x2f, 0x00, 0xca, 0xe7, 0x02, 0x6c, 0x72, 0x9d, 0x23, 0xa1, 0xc2, 0x89, 0xfa, 0x37, 0xa5,
2003	0xd3, 0x53, 0x5a, 0x21, 0x7b, 0x37, 0xa1, 0x42, 0x93, 0x24, 0x4a, 0xb2, 0x21, 0x0d, 0xe5, 0xc8,
2004	0x49, 0xec, 0xd5, 0x7b, 0xe2, 0x69, 0x00, 0x4b, 0x01, 0xd4, 0x5b, 0xcc, 0xf3, 0xab, 0x4c, 0xa2,
2005	0x0b, 0xa9, 0x46, 0x13, 0xd8, 0xce, 0xee, 0xe7, 0xf4, 0x01, 0x0d, 0x99, 0x6c, 0xba, 0xf2, 0x44,
2006	0xb4, 0x5f, 0x12, 0x7e, 0x16, 0x54, 0xb6, 0xc3, 0x16, 0x47, 0x94, 0x47, 0x79, 0x3a, 0x4f, 0xd0,
2007	0x5b, 0xb0, 0xb5, 0x20, 0x83, 0x76, 0x41, 0xe3, 0x97, 0x27, 0xf3, 0xd0, 0xb5, 0xee, 0x59, 0x9d,
2008	0xfe, 0xc2, 0x87, 0x7e, 0x05, 0x2e, 0x59, 0x9d, 0xa6, 0xeb, 0x1c, 0xb8, 0x3d, 0xbb, 0x73, 0xb7,
2009	0x65, 0xb9, 0x47, 0x7d, 0xde, 0x9f, 0x3b, 0xa6, 0xa5, 0x2a, 0xfa, 0xd7, 0xa5, 0xb3, 0x07, 0x61,
2010	0x19, 0x2c, 0x72, 0x61, 0x7d, 0x6e, 0xaa, 0x51, 0x44, 0xf6, 0x3e, 0x79, 0xd9, 0x2a, 0x2d, 0x8c,
2011	0x40, 0x78, 0x0e, 0x10, 0x5d, 0x86, 0xd5, 0x20, 0x75, 0x07, 0xbc, 0x29, 0x66, 0x23, 0xf1, 0x4a,
2012	0x90, 0x1e, 0xf0, 0x25, 0xda, 0x05, 0x5e, 0x50, 0xc7, 0xc1, 0x28, 0x60, 0x8f, 0x45, 0xf1, 0x94,
2013	0xf0, 0x8c, 0x80, 0x0c, 0xd8, 0x92, 0x1b, 0xe1, 0xd2, 0x50, 0x4e, 0x31, 0x22, 0xf7, 0x6b, 0x7b,
2014	0x97, 0x4f, 0x15, 0x58, 0x33, 0x7b, 0x62, 0xc2, 0x1b, 0x52, 0xc3, 0x0a, 0xc5, 0x5c, 0xc3, 0xcf,
2015	0xb7, 0x7c, 0x3c, 0x65, 0x64, 0x28, 0x2e, 0x15, 0x15, 0x0c, 0x19, 0xa9, 0x4f, 0x86, 0xe8, 0xfa,
2016	0xe2, 0xfc, 0xb8, 0x2c, 0x8e, 0xc0, 0xb9, 0xd1, 0x51, 0xff, 0xa3, 0x02, 0x97, 0x9e, 0xf1, 0x59,
2017	0x7e, 0xfb, 0xe9, 0x5b, 0x08, 0xa1, 0x74, 0x7e, 0x08, 0x95, 0x33, 0x42, 0xf8, 0x95, 0x02, 0xbb,
2018	0xcf, 0x33, 0x8a, 0xde, 0x04, 0x60, 0x09, 0x09, 0x53, 0x2f, 0x09, 0x62, 0xd9, 0x2e, 0xaa, 0xb8,
2019	0x40, 0xe1, 0xfc, 0xc2, 0x38, 0x58, 0x12, 0x7b, 0x55, 0xa0, 0xa0, 0xef, 0x43, 0x85, 0xcf, 0x8c,
2020	0xa9, 0x56, 0x16, 0x09, 0xb8, 0x71, 0x4e, 0x02, 0xf8, 0x08, 0x69, 0x87, 0x83, 0x08, 0x4b, 0x2d,
2021	0xfd, 0xcf, 0x0a, 0xac, 0xe6, 0x34, 0xf4, 0xd1, 0x5c, 0x57, 0x52, 0xce, 0xdb, 0xf3, 0x42, 0x53,
2022	0x7a, 0x1f, 0x56, 0xa7, 0xb5, 0x52, 0x3a, 0x4f, 0x6f, 0x85, 0x66, 0x55, 0x82, 0x60, 0x89, 0x7b,
2023	0x21, 0xdf, 0x0e, 0xb0, 0xf8, 0xbd, 0x10, 0xef, 0xd2, 0xa9, 0x78, 0xaf, 0xc2, 0x5a, 0x3e, 0x1d,
2024	0x14, 0x2a, 0x2b, 0x23, 0xf5, 0xc9, 0x70, 0xef, 0xab, 0x0a, 0x2c, 0xcb, 0x8c, 0xa3, 0xdf, 0x2a,
2025	0x50, 0x9d, 0xb6, 0x01, 0xf4, 0x82, 0xef, 0x72, 0xd3, 0xd7, 0xb6, 0xda, 0xad, 0x17, 0x57, 0x90,
2026	0x1d, 0x46, 0xff, 0xde, 0xdf, 0x8c, 0x75, 0x39, 0xc2, 0xbe, 0x2b, 0xce, 0xc4, 0xaf, 0xfe, 0xfa,
2027	0xf7, 0x5f, 0x94, 0xae, 0xe9, 0x57, 0x0a, 0xef, 0xcd, 0x12, 0xe5, 0x4e, 0x92, 0xeb, 0xde, 0x51,
2028	0xde, 0x41, 0xff, 0x54, 0xe0, 0xe2, 0x59, 0x27, 0x0b, 0xba, 0x73, 0x8e, 0x07, 0xcf, 0x79, 0xe4,
2029	0xac, 0xbd, 0x91, 0xeb, 0x16, 0xde, 0x93, 0xeb, 0x4e, 0xfe, 0x9e, 0xac, 0xff, 0xf4, 0xa9, 0xf1,
2030	0xe9, 0x39, 0x47, 0xe8, 0x73, 0x4f, 0xbb, 0xb3, 0x22, 0xfd, 0x8e, 0xfe, 0x7f, 0xa7, 0x23, 0x2d,
2031	0x98, 0x9f, 0x0b, 0xfa, 0xe7, 0x0a, 0xa0, 0xd3, 0xcd, 0x1a, 0x7d, 0xf4, 0x0a, 0xfd, 0x5d, 0x06,
2032	0xfc, 0xf1, 0x2b, 0x9f, 0x0c, 0xfa, 0x85, 0x9b, 0xca, 0x2d, 0xa5, 0x66, 0x3f, 0x35, 0x5e, 0xcb,
2033	0xb4, 0x24, 0x12, 0x89, 0x83, 0xb4, 0xee, 0x45, 0xe3, 0xbf, 0x18, 0xf5, 0x13, 0xc6, 0xe2, 0xf4,
2034	0x4e, 0xa3, 0xf1, 0xf0, 0xe1, 0xc3, 0x05, 0x66, 0x83, 0x4c, 0xd8, 0x89, 0xfc, 0x1b, 0xe1, 0xbb,
2035	0xf1, 0x88, 0xb0, 0x41, 0x94, 0x8c, 0xf7, 0x7f, 0xa6, 0xc0, 0x5b, 0x5e, 0x34, 0x7e, 0xbe, 0x47,
2036	0xfb, 0x6b, 0xb2, 0x4e, 0xbb, 0xfc, 0x0b, 0xe9, 0x2a, 0x5f, 0x9a, 0x99, 0xf4, 0x30, 0xe2, 0x2d,
2037	0xa4, 0x1e, 0x25, 0xc3, 0xc6, 0x90, 0x86, 0xe2, 0xfb, 0x69, 0xcc, 0x6c, 0x3e, 0xe3, 0x8f, 0x8c,
2038	0x4f, 0x24, 0xe1, 0x5f, 0x8a, 0xf2, 0xeb, 0x52, 0xf9, 0xae, 0xd9, 0x3b, 0x5e, 0x16, 0x8a, 0xef,
2039	0xfd, 0x27, 0x00, 0x00, 0xff, 0xff, 0xee, 0x00, 0xf7, 0xe7, 0x00, 0x19, 0x00, 0x00,
2040}
2041
2042// Reference imports to suppress errors if they are not otherwise used.
2043var _ context.Context
2044var _ grpc.ClientConn
2045
2046// This is a compile-time assertion to ensure that this generated file
2047// is compatible with the grpc package it is being compiled against.
2048const _ = grpc.SupportPackageIsVersion4
2049
2050// SpeechClient is the client API for Speech service.
2051//
2052// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
2053type SpeechClient interface {
2054	// Performs synchronous speech recognition: receive results after all audio
2055	// has been sent and processed.
2056	Recognize(ctx context.Context, in *RecognizeRequest, opts ...grpc.CallOption) (*RecognizeResponse, error)
2057	// Performs asynchronous speech recognition: receive results via the
2058	// google.longrunning.Operations interface. Returns either an
2059	// `Operation.error` or an `Operation.response` which contains
2060	// a `LongRunningRecognizeResponse` message.
2061	// For more information on asynchronous speech recognition, see the
2062	// [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize).
2063	LongRunningRecognize(ctx context.Context, in *LongRunningRecognizeRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
2064	// Performs bidirectional streaming speech recognition: receive results while
2065	// sending audio. This method is only available via the gRPC API (not REST).
2066	StreamingRecognize(ctx context.Context, opts ...grpc.CallOption) (Speech_StreamingRecognizeClient, error)
2067}
2068
2069type speechClient struct {
2070	cc *grpc.ClientConn
2071}
2072
2073func NewSpeechClient(cc *grpc.ClientConn) SpeechClient {
2074	return &speechClient{cc}
2075}
2076
2077func (c *speechClient) Recognize(ctx context.Context, in *RecognizeRequest, opts ...grpc.CallOption) (*RecognizeResponse, error) {
2078	out := new(RecognizeResponse)
2079	err := c.cc.Invoke(ctx, "/google.cloud.speech.v1p1beta1.Speech/Recognize", in, out, opts...)
2080	if err != nil {
2081		return nil, err
2082	}
2083	return out, nil
2084}
2085
2086func (c *speechClient) LongRunningRecognize(ctx context.Context, in *LongRunningRecognizeRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) {
2087	out := new(longrunning.Operation)
2088	err := c.cc.Invoke(ctx, "/google.cloud.speech.v1p1beta1.Speech/LongRunningRecognize", in, out, opts...)
2089	if err != nil {
2090		return nil, err
2091	}
2092	return out, nil
2093}
2094
2095func (c *speechClient) StreamingRecognize(ctx context.Context, opts ...grpc.CallOption) (Speech_StreamingRecognizeClient, error) {
2096	stream, err := c.cc.NewStream(ctx, &_Speech_serviceDesc.Streams[0], "/google.cloud.speech.v1p1beta1.Speech/StreamingRecognize", opts...)
2097	if err != nil {
2098		return nil, err
2099	}
2100	x := &speechStreamingRecognizeClient{stream}
2101	return x, nil
2102}
2103
2104type Speech_StreamingRecognizeClient interface {
2105	Send(*StreamingRecognizeRequest) error
2106	Recv() (*StreamingRecognizeResponse, error)
2107	grpc.ClientStream
2108}
2109
2110type speechStreamingRecognizeClient struct {
2111	grpc.ClientStream
2112}
2113
2114func (x *speechStreamingRecognizeClient) Send(m *StreamingRecognizeRequest) error {
2115	return x.ClientStream.SendMsg(m)
2116}
2117
2118func (x *speechStreamingRecognizeClient) Recv() (*StreamingRecognizeResponse, error) {
2119	m := new(StreamingRecognizeResponse)
2120	if err := x.ClientStream.RecvMsg(m); err != nil {
2121		return nil, err
2122	}
2123	return m, nil
2124}
2125
2126// SpeechServer is the server API for Speech service.
2127type SpeechServer interface {
2128	// Performs synchronous speech recognition: receive results after all audio
2129	// has been sent and processed.
2130	Recognize(context.Context, *RecognizeRequest) (*RecognizeResponse, error)
2131	// Performs asynchronous speech recognition: receive results via the
2132	// google.longrunning.Operations interface. Returns either an
2133	// `Operation.error` or an `Operation.response` which contains
2134	// a `LongRunningRecognizeResponse` message.
2135	// For more information on asynchronous speech recognition, see the
2136	// [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize).
2137	LongRunningRecognize(context.Context, *LongRunningRecognizeRequest) (*longrunning.Operation, error)
2138	// Performs bidirectional streaming speech recognition: receive results while
2139	// sending audio. This method is only available via the gRPC API (not REST).
2140	StreamingRecognize(Speech_StreamingRecognizeServer) error
2141}
2142
2143// UnimplementedSpeechServer can be embedded to have forward compatible implementations.
2144type UnimplementedSpeechServer struct {
2145}
2146
2147func (*UnimplementedSpeechServer) Recognize(ctx context.Context, req *RecognizeRequest) (*RecognizeResponse, error) {
2148	return nil, status1.Errorf(codes.Unimplemented, "method Recognize not implemented")
2149}
2150func (*UnimplementedSpeechServer) LongRunningRecognize(ctx context.Context, req *LongRunningRecognizeRequest) (*longrunning.Operation, error) {
2151	return nil, status1.Errorf(codes.Unimplemented, "method LongRunningRecognize not implemented")
2152}
2153func (*UnimplementedSpeechServer) StreamingRecognize(srv Speech_StreamingRecognizeServer) error {
2154	return status1.Errorf(codes.Unimplemented, "method StreamingRecognize not implemented")
2155}
2156
2157func RegisterSpeechServer(s *grpc.Server, srv SpeechServer) {
2158	s.RegisterService(&_Speech_serviceDesc, srv)
2159}
2160
2161func _Speech_Recognize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
2162	in := new(RecognizeRequest)
2163	if err := dec(in); err != nil {
2164		return nil, err
2165	}
2166	if interceptor == nil {
2167		return srv.(SpeechServer).Recognize(ctx, in)
2168	}
2169	info := &grpc.UnaryServerInfo{
2170		Server:     srv,
2171		FullMethod: "/google.cloud.speech.v1p1beta1.Speech/Recognize",
2172	}
2173	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
2174		return srv.(SpeechServer).Recognize(ctx, req.(*RecognizeRequest))
2175	}
2176	return interceptor(ctx, in, info, handler)
2177}
2178
2179func _Speech_LongRunningRecognize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
2180	in := new(LongRunningRecognizeRequest)
2181	if err := dec(in); err != nil {
2182		return nil, err
2183	}
2184	if interceptor == nil {
2185		return srv.(SpeechServer).LongRunningRecognize(ctx, in)
2186	}
2187	info := &grpc.UnaryServerInfo{
2188		Server:     srv,
2189		FullMethod: "/google.cloud.speech.v1p1beta1.Speech/LongRunningRecognize",
2190	}
2191	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
2192		return srv.(SpeechServer).LongRunningRecognize(ctx, req.(*LongRunningRecognizeRequest))
2193	}
2194	return interceptor(ctx, in, info, handler)
2195}
2196
2197func _Speech_StreamingRecognize_Handler(srv interface{}, stream grpc.ServerStream) error {
2198	return srv.(SpeechServer).StreamingRecognize(&speechStreamingRecognizeServer{stream})
2199}
2200
2201type Speech_StreamingRecognizeServer interface {
2202	Send(*StreamingRecognizeResponse) error
2203	Recv() (*StreamingRecognizeRequest, error)
2204	grpc.ServerStream
2205}
2206
2207type speechStreamingRecognizeServer struct {
2208	grpc.ServerStream
2209}
2210
2211func (x *speechStreamingRecognizeServer) Send(m *StreamingRecognizeResponse) error {
2212	return x.ServerStream.SendMsg(m)
2213}
2214
2215func (x *speechStreamingRecognizeServer) Recv() (*StreamingRecognizeRequest, error) {
2216	m := new(StreamingRecognizeRequest)
2217	if err := x.ServerStream.RecvMsg(m); err != nil {
2218		return nil, err
2219	}
2220	return m, nil
2221}
2222
2223var _Speech_serviceDesc = grpc.ServiceDesc{
2224	ServiceName: "google.cloud.speech.v1p1beta1.Speech",
2225	HandlerType: (*SpeechServer)(nil),
2226	Methods: []grpc.MethodDesc{
2227		{
2228			MethodName: "Recognize",
2229			Handler:    _Speech_Recognize_Handler,
2230		},
2231		{
2232			MethodName: "LongRunningRecognize",
2233			Handler:    _Speech_LongRunningRecognize_Handler,
2234		},
2235	},
2236	Streams: []grpc.StreamDesc{
2237		{
2238			StreamName:    "StreamingRecognize",
2239			Handler:       _Speech_StreamingRecognize_Handler,
2240			ServerStreams: true,
2241			ClientStreams: true,
2242		},
2243	},
2244	Metadata: "google/cloud/speech/v1p1beta1/cloud_speech.proto",
2245}
2246