1// Code generated by smithy-go-codegen DO NOT EDIT.
2
3package lexruntimev2
4
5import (
6	"context"
7	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
8	"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
9	"github.com/aws/smithy-go/middleware"
10	smithyhttp "github.com/aws/smithy-go/transport/http"
11	"io"
12)
13
14// Sends user input to Amazon Lex. You can send text or speech. Clients use this
15// API to send text and audio requests to Amazon Lex at runtime. Amazon Lex
16// interprets the user input using the machine learning model built for the bot.
17func (c *Client) RecognizeUtterance(ctx context.Context, params *RecognizeUtteranceInput, optFns ...func(*Options)) (*RecognizeUtteranceOutput, error) {
18	if params == nil {
19		params = &RecognizeUtteranceInput{}
20	}
21
22	result, metadata, err := c.invokeOperation(ctx, "RecognizeUtterance", params, optFns, addOperationRecognizeUtteranceMiddlewares)
23	if err != nil {
24		return nil, err
25	}
26
27	out := result.(*RecognizeUtteranceOutput)
28	out.ResultMetadata = metadata
29	return out, nil
30}
31
32type RecognizeUtteranceInput struct {
33
34	// The alias identifier in use for the bot that should receive the request.
35	//
36	// This member is required.
37	BotAliasId *string
38
39	// The identifier of the bot that should receive the request.
40	//
41	// This member is required.
42	BotId *string
43
44	// The locale where the session is in use.
45	//
46	// This member is required.
47	LocaleId *string
48
49	// Indicates the format for audio input or that the content is text. The header
50	// must start with one of the following prefixes:
51	//
52	// * PCM format, audio data must be
53	// in little-endian byte order.
54	//
55	// * audio/l16; rate=16000; channels=1
56	//
57	// *
58	// audio/x-l16; sample-rate=16000; channel-count=1
59	//
60	// * audio/lpcm; sample-rate=8000;
61	// sample-size-bits=16; channel-count=1; is-big-endian=false
62	//
63	// * Opus format
64	//
65	// *
66	// audio/x-cbr-opus-with-preamble;preamble-size=0;bit-rate=256000;frame-size-milliseconds=4
67	//
68	// *
69	// Text format
70	//
71	// * text/plain; charset=utf-8
72	//
73	// This member is required.
74	RequestContentType *string
75
76	// The identifier of the session in use.
77	//
78	// This member is required.
79	SessionId *string
80
81	// User input in PCM or Opus audio format or text format as described in the
82	// requestContentType parameter.
83	InputStream io.Reader
84
85	// Request-specific information passed between the client application and Amazon
86	// Lex The namespace x-amz-lex: is reserved for special attributes. Don't create
87	// any request attributes for prefix x-amz-lex:.
88	RequestAttributes *string
89
90	// The message that Amazon Lex returns in the response can be either text or speech
91	// based on the responseContentType value.
92	//
93	// * If the value is
94	// text/plain;charset=utf-8, Amazon Lex returns text in the response.
95	//
96	// * If the
97	// value begins with audio/, Amazon Lex returns speech in the response. Amazon Lex
98	// uses Amazon Polly to generate the speech using the configuration that you
99	// specified in the requestContentType parameter. For example, if you specify
100	// audio/mpeg as the value, Amazon Lex returns speech in the MPEG format.
101	//
102	// * If the
103	// value is audio/pcm, the speech returned is audio/pcm at 16 KHz in 16-bit,
104	// little-endian format.
105	//
106	// * The following are the accepted values:
107	//
108	// * audio/mpeg
109	//
110	// *
111	// audio/ogg
112	//
113	// * audio/pcm (16 KHz)
114	//
115	// * audio/* (defaults to mpeg)
116	//
117	// * text/plain;
118	// charset=utf-8
119	ResponseContentType *string
120
121	// Sets the state of the session with the user. You can use this to set the current
122	// intent, attributes, context, and dialog action. Use the dialog action to
123	// determine the next step that Amazon Lex should use in the conversation with the
124	// user.
125	SessionState *string
126}
127
128type RecognizeUtteranceOutput struct {
129
130	// The prompt or statement to send to the user. This is based on the bot
131	// configuration and context. For example, if Amazon Lex did not understand the
132	// user intent, it sends the clarificationPrompt configured for the bot. If the
133	// intent requires confirmation before taking the fulfillment action, it sends the
134	// confirmationPrompt. Another example: Suppose that the Lambda function
135	// successfully fulfilled the intent, and sent a message to convey to the user.
136	// Then Amazon Lex sends that message in the response.
137	AudioStream io.ReadCloser
138
139	// Content type as specified in the responseContentType in the request.
140	ContentType *string
141
142	// Indicates whether the input mode to the operation was text or speech.
143	InputMode *string
144
145	// The text used to process the request. If the input was an audio stream, the
146	// inputTranscript field contains the text extracted from the audio stream. This is
147	// the text that is actually processed to recognize intents and slot values. You
148	// can use this information to determine if Amazon Lex is correctly processing the
149	// audio that you send.
150	InputTranscript *string
151
152	// A list of intents that Amazon Lex determined might satisfy the user's utterance.
153	// Each interpretation includes the intent, a score that indicates how confident
154	// Amazon Lex is that the interpretation is the correct one, and an optional
155	// sentiment response that indicates the sentiment expressed in the utterance.
156	Interpretations *string
157
158	// A list of messages that were last sent to the user. The messages are ordered
159	// based on the order that you returned the messages from your Lambda function or
160	// the order that the messages are defined in the bot.
161	Messages *string
162
163	// The attributes sent in the request.
164	RequestAttributes *string
165
166	// The identifier of the session in use.
167	SessionId *string
168
169	// Represents the current state of the dialog between the user and the bot. Use
170	// this to determine the progress of the conversation and what the next action
171	// might be.
172	SessionState *string
173
174	// Metadata pertaining to the operation's result.
175	ResultMetadata middleware.Metadata
176}
177
178func addOperationRecognizeUtteranceMiddlewares(stack *middleware.Stack, options Options) (err error) {
179	err = stack.Serialize.Add(&awsRestjson1_serializeOpRecognizeUtterance{}, middleware.After)
180	if err != nil {
181		return err
182	}
183	err = stack.Deserialize.Add(&awsRestjson1_deserializeOpRecognizeUtterance{}, middleware.After)
184	if err != nil {
185		return err
186	}
187	if err = addSetLoggerMiddleware(stack, options); err != nil {
188		return err
189	}
190	if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
191		return err
192	}
193	if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
194		return err
195	}
196	if err = addResolveEndpointMiddleware(stack, options); err != nil {
197		return err
198	}
199	if err = v4.AddUnsignedPayloadMiddleware(stack); err != nil {
200		return err
201	}
202	if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
203		return err
204	}
205	if err = addRetryMiddlewares(stack, options); err != nil {
206		return err
207	}
208	if err = addHTTPSignerV4Middleware(stack, options); err != nil {
209		return err
210	}
211	if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
212		return err
213	}
214	if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
215		return err
216	}
217	if err = addClientUserAgent(stack); err != nil {
218		return err
219	}
220	if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
221		return err
222	}
223	if err = addOpRecognizeUtteranceValidationMiddleware(stack); err != nil {
224		return err
225	}
226	if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRecognizeUtterance(options.Region), middleware.Before); err != nil {
227		return err
228	}
229	if err = addRequestIDRetrieverMiddleware(stack); err != nil {
230		return err
231	}
232	if err = addResponseErrorMiddleware(stack); err != nil {
233		return err
234	}
235	if err = addRequestResponseLogging(stack, options); err != nil {
236		return err
237	}
238	return nil
239}
240
241func newServiceMetadataMiddleware_opRecognizeUtterance(region string) *awsmiddleware.RegisterServiceMetadata {
242	return &awsmiddleware.RegisterServiceMetadata{
243		Region:        region,
244		ServiceID:     ServiceID,
245		SigningName:   "lex",
246		OperationName: "RecognizeUtterance",
247	}
248}
249