1// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
2
3package rekognition
4
5import (
6	"fmt"
7	"time"
8
9	"github.com/aws/aws-sdk-go/aws"
10	"github.com/aws/aws-sdk-go/aws/awsutil"
11	"github.com/aws/aws-sdk-go/aws/request"
12	"github.com/aws/aws-sdk-go/private/protocol"
13	"github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
14)
15
16const opCompareFaces = "CompareFaces"
17
18// CompareFacesRequest generates a "aws/request.Request" representing the
19// client's request for the CompareFaces operation. The "output" return
20// value will be populated with the request's response once the request completes
21// successfully.
22//
23// Use "Send" method on the returned Request to send the API call to the service.
24// the "output" return value is not valid until after Send returns without error.
25//
26// See CompareFaces for more information on using the CompareFaces
27// API call, and error handling.
28//
29// This method is useful when you want to inject custom logic or configuration
30// into the SDK's request lifecycle. Such as custom headers, or retry logic.
31//
32//
33//    // Example sending a request using the CompareFacesRequest method.
34//    req, resp := client.CompareFacesRequest(params)
35//
36//    err := req.Send()
37//    if err == nil { // resp is now filled
38//        fmt.Println(resp)
39//    }
40func (c *Rekognition) CompareFacesRequest(input *CompareFacesInput) (req *request.Request, output *CompareFacesOutput) {
41	op := &request.Operation{
42		Name:       opCompareFaces,
43		HTTPMethod: "POST",
44		HTTPPath:   "/",
45	}
46
47	if input == nil {
48		input = &CompareFacesInput{}
49	}
50
51	output = &CompareFacesOutput{}
52	req = c.newRequest(op, input, output)
53	return
54}
55
56// CompareFaces API operation for Amazon Rekognition.
57//
58// Compares a face in the source input image with each of the 100 largest faces
59// detected in the target input image.
60//
61// If the source image contains multiple faces, the service detects the largest
62// face and compares it with each face detected in the target image.
63//
64// You pass the input and target images either as base64-encoded image bytes
65// or as references to images in an Amazon S3 bucket. If you use the AWS CLI
66// to call Amazon Rekognition operations, passing image bytes isn't supported.
67// The image must be formatted as a PNG or JPEG file.
68//
69// In response, the operation returns an array of face matches ordered by similarity
70// score in descending order. For each face match, the response provides a bounding
71// box of the face, facial landmarks, pose details (pitch, role, and yaw), quality
72// (brightness and sharpness), and confidence value (indicating the level of
73// confidence that the bounding box contains a face). The response also provides
74// a similarity score, which indicates how closely the faces match.
75//
76// By default, only faces with a similarity score of greater than or equal to
77// 80% are returned in the response. You can change this value by specifying
78// the SimilarityThreshold parameter.
79//
80// CompareFaces also returns an array of faces that don't match the source image.
81// For each face, it returns a bounding box, confidence value, landmarks, pose
82// details, and quality. The response also returns information about the face
83// in the source image, including the bounding box of the face and confidence
84// value.
85//
86// The QualityFilter input parameter allows you to filter out detected faces
87// that don’t meet a required quality bar. The quality bar is based on a variety
88// of common use cases. Use QualityFilter to set the quality bar by specifying
89// LOW, MEDIUM, or HIGH. If you do not want to filter detected faces, specify
90// NONE. The default value is NONE.
91//
92// If the image doesn't contain Exif metadata, CompareFaces returns orientation
93// information for the source and target images. Use these values to display
94// the images with the correct image orientation.
95//
96// If no faces are detected in the source or target images, CompareFaces returns
97// an InvalidParameterException error.
98//
99// This is a stateless API operation. That is, data returned by this operation
100// doesn't persist.
101//
102// For an example, see Comparing Faces in Images in the Amazon Rekognition Developer
103// Guide.
104//
105// This operation requires permissions to perform the rekognition:CompareFaces
106// action.
107//
108// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
109// with awserr.Error's Code and Message methods to get detailed information about
110// the error.
111//
112// See the AWS API reference guide for Amazon Rekognition's
113// API operation CompareFaces for usage and error information.
114//
115// Returned Error Types:
116//   * InvalidParameterException
117//   Input parameter violated a constraint. Validate your parameter before calling
118//   the API operation again.
119//
120//   * InvalidS3ObjectException
121//   Amazon Rekognition is unable to access the S3 object specified in the request.
122//
123//   * ImageTooLargeException
124//   The input image size exceeds the allowed limit. For more information, see
125//   Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
126//
127//   * AccessDeniedException
128//   You are not authorized to perform the action.
129//
130//   * InternalServerError
131//   Amazon Rekognition experienced a service issue. Try your call again.
132//
133//   * ThrottlingException
134//   Amazon Rekognition is temporarily unable to process the request. Try your
135//   call again.
136//
137//   * ProvisionedThroughputExceededException
138//   The number of requests exceeded your throughput limit. If you want to increase
139//   this limit, contact Amazon Rekognition.
140//
141//   * InvalidImageFormatException
142//   The provided image format is not supported.
143//
144func (c *Rekognition) CompareFaces(input *CompareFacesInput) (*CompareFacesOutput, error) {
145	req, out := c.CompareFacesRequest(input)
146	return out, req.Send()
147}
148
149// CompareFacesWithContext is the same as CompareFaces with the addition of
150// the ability to pass a context and additional request options.
151//
152// See CompareFaces for details on how to use this API operation.
153//
154// The context must be non-nil and will be used for request cancellation. If
155// the context is nil a panic will occur. In the future the SDK may create
156// sub-contexts for http.Requests. See https://golang.org/pkg/context/
157// for more information on using Contexts.
158func (c *Rekognition) CompareFacesWithContext(ctx aws.Context, input *CompareFacesInput, opts ...request.Option) (*CompareFacesOutput, error) {
159	req, out := c.CompareFacesRequest(input)
160	req.SetContext(ctx)
161	req.ApplyOptions(opts...)
162	return out, req.Send()
163}
164
165const opCreateCollection = "CreateCollection"
166
167// CreateCollectionRequest generates a "aws/request.Request" representing the
168// client's request for the CreateCollection operation. The "output" return
169// value will be populated with the request's response once the request completes
170// successfully.
171//
172// Use "Send" method on the returned Request to send the API call to the service.
173// the "output" return value is not valid until after Send returns without error.
174//
175// See CreateCollection for more information on using the CreateCollection
176// API call, and error handling.
177//
178// This method is useful when you want to inject custom logic or configuration
179// into the SDK's request lifecycle. Such as custom headers, or retry logic.
180//
181//
182//    // Example sending a request using the CreateCollectionRequest method.
183//    req, resp := client.CreateCollectionRequest(params)
184//
185//    err := req.Send()
186//    if err == nil { // resp is now filled
187//        fmt.Println(resp)
188//    }
189func (c *Rekognition) CreateCollectionRequest(input *CreateCollectionInput) (req *request.Request, output *CreateCollectionOutput) {
190	op := &request.Operation{
191		Name:       opCreateCollection,
192		HTTPMethod: "POST",
193		HTTPPath:   "/",
194	}
195
196	if input == nil {
197		input = &CreateCollectionInput{}
198	}
199
200	output = &CreateCollectionOutput{}
201	req = c.newRequest(op, input, output)
202	return
203}
204
205// CreateCollection API operation for Amazon Rekognition.
206//
207// Creates a collection in an AWS Region. You can add faces to the collection
208// using the IndexFaces operation.
209//
210// For example, you might create collections, one for each of your application
211// users. A user can then index faces using the IndexFaces operation and persist
212// results in a specific collection. Then, a user can search the collection
213// for faces in the user-specific container.
214//
215// When you create a collection, it is associated with the latest version of
216// the face model version.
217//
218// Collection names are case-sensitive.
219//
220// This operation requires permissions to perform the rekognition:CreateCollection
221// action.
222//
223// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
224// with awserr.Error's Code and Message methods to get detailed information about
225// the error.
226//
227// See the AWS API reference guide for Amazon Rekognition's
228// API operation CreateCollection for usage and error information.
229//
230// Returned Error Types:
231//   * InvalidParameterException
232//   Input parameter violated a constraint. Validate your parameter before calling
233//   the API operation again.
234//
235//   * AccessDeniedException
236//   You are not authorized to perform the action.
237//
238//   * InternalServerError
239//   Amazon Rekognition experienced a service issue. Try your call again.
240//
241//   * ThrottlingException
242//   Amazon Rekognition is temporarily unable to process the request. Try your
243//   call again.
244//
245//   * ProvisionedThroughputExceededException
246//   The number of requests exceeded your throughput limit. If you want to increase
247//   this limit, contact Amazon Rekognition.
248//
249//   * ResourceAlreadyExistsException
250//   A collection with the specified ID already exists.
251//
252func (c *Rekognition) CreateCollection(input *CreateCollectionInput) (*CreateCollectionOutput, error) {
253	req, out := c.CreateCollectionRequest(input)
254	return out, req.Send()
255}
256
257// CreateCollectionWithContext is the same as CreateCollection with the addition of
258// the ability to pass a context and additional request options.
259//
260// See CreateCollection for details on how to use this API operation.
261//
262// The context must be non-nil and will be used for request cancellation. If
263// the context is nil a panic will occur. In the future the SDK may create
264// sub-contexts for http.Requests. See https://golang.org/pkg/context/
265// for more information on using Contexts.
266func (c *Rekognition) CreateCollectionWithContext(ctx aws.Context, input *CreateCollectionInput, opts ...request.Option) (*CreateCollectionOutput, error) {
267	req, out := c.CreateCollectionRequest(input)
268	req.SetContext(ctx)
269	req.ApplyOptions(opts...)
270	return out, req.Send()
271}
272
273const opCreateProject = "CreateProject"
274
275// CreateProjectRequest generates a "aws/request.Request" representing the
276// client's request for the CreateProject operation. The "output" return
277// value will be populated with the request's response once the request completes
278// successfully.
279//
280// Use "Send" method on the returned Request to send the API call to the service.
281// the "output" return value is not valid until after Send returns without error.
282//
283// See CreateProject for more information on using the CreateProject
284// API call, and error handling.
285//
286// This method is useful when you want to inject custom logic or configuration
287// into the SDK's request lifecycle. Such as custom headers, or retry logic.
288//
289//
290//    // Example sending a request using the CreateProjectRequest method.
291//    req, resp := client.CreateProjectRequest(params)
292//
293//    err := req.Send()
294//    if err == nil { // resp is now filled
295//        fmt.Println(resp)
296//    }
297func (c *Rekognition) CreateProjectRequest(input *CreateProjectInput) (req *request.Request, output *CreateProjectOutput) {
298	op := &request.Operation{
299		Name:       opCreateProject,
300		HTTPMethod: "POST",
301		HTTPPath:   "/",
302	}
303
304	if input == nil {
305		input = &CreateProjectInput{}
306	}
307
308	output = &CreateProjectOutput{}
309	req = c.newRequest(op, input, output)
310	return
311}
312
313// CreateProject API operation for Amazon Rekognition.
314//
315// Creates a new Amazon Rekognition Custom Labels project. A project is a logical
316// grouping of resources (images, Labels, models) and operations (training,
317// evaluation and detection).
318//
319// This operation requires permissions to perform the rekognition:CreateProject
320// action.
321//
322// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
323// with awserr.Error's Code and Message methods to get detailed information about
324// the error.
325//
326// See the AWS API reference guide for Amazon Rekognition's
327// API operation CreateProject for usage and error information.
328//
329// Returned Error Types:
330//   * ResourceInUseException
331//   The specified resource is already being used.
332//
333//   * LimitExceededException
334//   An Amazon Rekognition service limit was exceeded. For example, if you start
335//   too many Amazon Rekognition Video jobs concurrently, calls to start operations
336//   (StartLabelDetection, for example) will raise a LimitExceededException exception
337//   (HTTP status code: 400) until the number of concurrently running jobs is
338//   below the Amazon Rekognition service limit.
339//
340//   * InvalidParameterException
341//   Input parameter violated a constraint. Validate your parameter before calling
342//   the API operation again.
343//
344//   * AccessDeniedException
345//   You are not authorized to perform the action.
346//
347//   * InternalServerError
348//   Amazon Rekognition experienced a service issue. Try your call again.
349//
350//   * ThrottlingException
351//   Amazon Rekognition is temporarily unable to process the request. Try your
352//   call again.
353//
354//   * ProvisionedThroughputExceededException
355//   The number of requests exceeded your throughput limit. If you want to increase
356//   this limit, contact Amazon Rekognition.
357//
358func (c *Rekognition) CreateProject(input *CreateProjectInput) (*CreateProjectOutput, error) {
359	req, out := c.CreateProjectRequest(input)
360	return out, req.Send()
361}
362
363// CreateProjectWithContext is the same as CreateProject with the addition of
364// the ability to pass a context and additional request options.
365//
366// See CreateProject for details on how to use this API operation.
367//
368// The context must be non-nil and will be used for request cancellation. If
369// the context is nil a panic will occur. In the future the SDK may create
370// sub-contexts for http.Requests. See https://golang.org/pkg/context/
371// for more information on using Contexts.
372func (c *Rekognition) CreateProjectWithContext(ctx aws.Context, input *CreateProjectInput, opts ...request.Option) (*CreateProjectOutput, error) {
373	req, out := c.CreateProjectRequest(input)
374	req.SetContext(ctx)
375	req.ApplyOptions(opts...)
376	return out, req.Send()
377}
378
379const opCreateProjectVersion = "CreateProjectVersion"
380
381// CreateProjectVersionRequest generates a "aws/request.Request" representing the
382// client's request for the CreateProjectVersion operation. The "output" return
383// value will be populated with the request's response once the request completes
384// successfully.
385//
386// Use "Send" method on the returned Request to send the API call to the service.
387// the "output" return value is not valid until after Send returns without error.
388//
389// See CreateProjectVersion for more information on using the CreateProjectVersion
390// API call, and error handling.
391//
392// This method is useful when you want to inject custom logic or configuration
393// into the SDK's request lifecycle. Such as custom headers, or retry logic.
394//
395//
396//    // Example sending a request using the CreateProjectVersionRequest method.
397//    req, resp := client.CreateProjectVersionRequest(params)
398//
399//    err := req.Send()
400//    if err == nil { // resp is now filled
401//        fmt.Println(resp)
402//    }
403func (c *Rekognition) CreateProjectVersionRequest(input *CreateProjectVersionInput) (req *request.Request, output *CreateProjectVersionOutput) {
404	op := &request.Operation{
405		Name:       opCreateProjectVersion,
406		HTTPMethod: "POST",
407		HTTPPath:   "/",
408	}
409
410	if input == nil {
411		input = &CreateProjectVersionInput{}
412	}
413
414	output = &CreateProjectVersionOutput{}
415	req = c.newRequest(op, input, output)
416	return
417}
418
419// CreateProjectVersion API operation for Amazon Rekognition.
420//
421// Creates a new version of a model and begins training. Models are managed
422// as part of an Amazon Rekognition Custom Labels project. You can specify one
423// training dataset and one testing dataset. The response from CreateProjectVersion
424// is an Amazon Resource Name (ARN) for the version of the model.
425//
426// Training takes a while to complete. You can get the current status by calling
427// DescribeProjectVersions.
428//
429// Once training has successfully completed, call DescribeProjectVersions to
430// get the training results and evaluate the model.
431//
432// After evaluating the model, you start the model by calling StartProjectVersion.
433//
434// This operation requires permissions to perform the rekognition:CreateProjectVersion
435// action.
436//
437// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
438// with awserr.Error's Code and Message methods to get detailed information about
439// the error.
440//
441// See the AWS API reference guide for Amazon Rekognition's
442// API operation CreateProjectVersion for usage and error information.
443//
444// Returned Error Types:
445//   * ResourceInUseException
446//   The specified resource is already being used.
447//
448//   * ResourceNotFoundException
449//   The collection specified in the request cannot be found.
450//
451//   * LimitExceededException
452//   An Amazon Rekognition service limit was exceeded. For example, if you start
453//   too many Amazon Rekognition Video jobs concurrently, calls to start operations
454//   (StartLabelDetection, for example) will raise a LimitExceededException exception
455//   (HTTP status code: 400) until the number of concurrently running jobs is
456//   below the Amazon Rekognition service limit.
457//
458//   * InvalidParameterException
459//   Input parameter violated a constraint. Validate your parameter before calling
460//   the API operation again.
461//
462//   * AccessDeniedException
463//   You are not authorized to perform the action.
464//
465//   * InternalServerError
466//   Amazon Rekognition experienced a service issue. Try your call again.
467//
468//   * ThrottlingException
469//   Amazon Rekognition is temporarily unable to process the request. Try your
470//   call again.
471//
472//   * ProvisionedThroughputExceededException
473//   The number of requests exceeded your throughput limit. If you want to increase
474//   this limit, contact Amazon Rekognition.
475//
476func (c *Rekognition) CreateProjectVersion(input *CreateProjectVersionInput) (*CreateProjectVersionOutput, error) {
477	req, out := c.CreateProjectVersionRequest(input)
478	return out, req.Send()
479}
480
481// CreateProjectVersionWithContext is the same as CreateProjectVersion with the addition of
482// the ability to pass a context and additional request options.
483//
484// See CreateProjectVersion for details on how to use this API operation.
485//
486// The context must be non-nil and will be used for request cancellation. If
487// the context is nil a panic will occur. In the future the SDK may create
488// sub-contexts for http.Requests. See https://golang.org/pkg/context/
489// for more information on using Contexts.
490func (c *Rekognition) CreateProjectVersionWithContext(ctx aws.Context, input *CreateProjectVersionInput, opts ...request.Option) (*CreateProjectVersionOutput, error) {
491	req, out := c.CreateProjectVersionRequest(input)
492	req.SetContext(ctx)
493	req.ApplyOptions(opts...)
494	return out, req.Send()
495}
496
497const opCreateStreamProcessor = "CreateStreamProcessor"
498
499// CreateStreamProcessorRequest generates a "aws/request.Request" representing the
500// client's request for the CreateStreamProcessor operation. The "output" return
501// value will be populated with the request's response once the request completes
502// successfully.
503//
504// Use "Send" method on the returned Request to send the API call to the service.
505// the "output" return value is not valid until after Send returns without error.
506//
507// See CreateStreamProcessor for more information on using the CreateStreamProcessor
508// API call, and error handling.
509//
510// This method is useful when you want to inject custom logic or configuration
511// into the SDK's request lifecycle. Such as custom headers, or retry logic.
512//
513//
514//    // Example sending a request using the CreateStreamProcessorRequest method.
515//    req, resp := client.CreateStreamProcessorRequest(params)
516//
517//    err := req.Send()
518//    if err == nil { // resp is now filled
519//        fmt.Println(resp)
520//    }
521func (c *Rekognition) CreateStreamProcessorRequest(input *CreateStreamProcessorInput) (req *request.Request, output *CreateStreamProcessorOutput) {
522	op := &request.Operation{
523		Name:       opCreateStreamProcessor,
524		HTTPMethod: "POST",
525		HTTPPath:   "/",
526	}
527
528	if input == nil {
529		input = &CreateStreamProcessorInput{}
530	}
531
532	output = &CreateStreamProcessorOutput{}
533	req = c.newRequest(op, input, output)
534	return
535}
536
537// CreateStreamProcessor API operation for Amazon Rekognition.
538//
539// Creates an Amazon Rekognition stream processor that you can use to detect
540// and recognize faces in a streaming video.
541//
542// Amazon Rekognition Video is a consumer of live video from Amazon Kinesis
543// Video Streams. Amazon Rekognition Video sends analysis results to Amazon
544// Kinesis Data Streams.
545//
546// You provide as input a Kinesis video stream (Input) and a Kinesis data stream
547// (Output) stream. You also specify the face recognition criteria in Settings.
548// For example, the collection containing faces that you want to recognize.
549// Use Name to assign an identifier for the stream processor. You use Name to
550// manage the stream processor. For example, you can start processing the source
551// video by calling StartStreamProcessor with the Name field.
552//
553// After you have finished analyzing a streaming video, use StopStreamProcessor
554// to stop processing. You can delete the stream processor by calling DeleteStreamProcessor.
555//
556// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
557// with awserr.Error's Code and Message methods to get detailed information about
558// the error.
559//
560// See the AWS API reference guide for Amazon Rekognition's
561// API operation CreateStreamProcessor for usage and error information.
562//
563// Returned Error Types:
564//   * AccessDeniedException
565//   You are not authorized to perform the action.
566//
567//   * InternalServerError
568//   Amazon Rekognition experienced a service issue. Try your call again.
569//
570//   * ThrottlingException
571//   Amazon Rekognition is temporarily unable to process the request. Try your
572//   call again.
573//
574//   * InvalidParameterException
575//   Input parameter violated a constraint. Validate your parameter before calling
576//   the API operation again.
577//
578//   * LimitExceededException
579//   An Amazon Rekognition service limit was exceeded. For example, if you start
580//   too many Amazon Rekognition Video jobs concurrently, calls to start operations
581//   (StartLabelDetection, for example) will raise a LimitExceededException exception
582//   (HTTP status code: 400) until the number of concurrently running jobs is
583//   below the Amazon Rekognition service limit.
584//
585//   * ResourceInUseException
586//   The specified resource is already being used.
587//
588//   * ProvisionedThroughputExceededException
589//   The number of requests exceeded your throughput limit. If you want to increase
590//   this limit, contact Amazon Rekognition.
591//
592func (c *Rekognition) CreateStreamProcessor(input *CreateStreamProcessorInput) (*CreateStreamProcessorOutput, error) {
593	req, out := c.CreateStreamProcessorRequest(input)
594	return out, req.Send()
595}
596
597// CreateStreamProcessorWithContext is the same as CreateStreamProcessor with the addition of
598// the ability to pass a context and additional request options.
599//
600// See CreateStreamProcessor for details on how to use this API operation.
601//
602// The context must be non-nil and will be used for request cancellation. If
603// the context is nil a panic will occur. In the future the SDK may create
604// sub-contexts for http.Requests. See https://golang.org/pkg/context/
605// for more information on using Contexts.
606func (c *Rekognition) CreateStreamProcessorWithContext(ctx aws.Context, input *CreateStreamProcessorInput, opts ...request.Option) (*CreateStreamProcessorOutput, error) {
607	req, out := c.CreateStreamProcessorRequest(input)
608	req.SetContext(ctx)
609	req.ApplyOptions(opts...)
610	return out, req.Send()
611}
612
613const opDeleteCollection = "DeleteCollection"
614
615// DeleteCollectionRequest generates a "aws/request.Request" representing the
616// client's request for the DeleteCollection operation. The "output" return
617// value will be populated with the request's response once the request completes
618// successfully.
619//
620// Use "Send" method on the returned Request to send the API call to the service.
621// the "output" return value is not valid until after Send returns without error.
622//
623// See DeleteCollection for more information on using the DeleteCollection
624// API call, and error handling.
625//
626// This method is useful when you want to inject custom logic or configuration
627// into the SDK's request lifecycle. Such as custom headers, or retry logic.
628//
629//
630//    // Example sending a request using the DeleteCollectionRequest method.
631//    req, resp := client.DeleteCollectionRequest(params)
632//
633//    err := req.Send()
634//    if err == nil { // resp is now filled
635//        fmt.Println(resp)
636//    }
637func (c *Rekognition) DeleteCollectionRequest(input *DeleteCollectionInput) (req *request.Request, output *DeleteCollectionOutput) {
638	op := &request.Operation{
639		Name:       opDeleteCollection,
640		HTTPMethod: "POST",
641		HTTPPath:   "/",
642	}
643
644	if input == nil {
645		input = &DeleteCollectionInput{}
646	}
647
648	output = &DeleteCollectionOutput{}
649	req = c.newRequest(op, input, output)
650	return
651}
652
653// DeleteCollection API operation for Amazon Rekognition.
654//
655// Deletes the specified collection. Note that this operation removes all faces
656// in the collection. For an example, see delete-collection-procedure.
657//
658// This operation requires permissions to perform the rekognition:DeleteCollection
659// action.
660//
661// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
662// with awserr.Error's Code and Message methods to get detailed information about
663// the error.
664//
665// See the AWS API reference guide for Amazon Rekognition's
666// API operation DeleteCollection for usage and error information.
667//
668// Returned Error Types:
669//   * InvalidParameterException
670//   Input parameter violated a constraint. Validate your parameter before calling
671//   the API operation again.
672//
673//   * AccessDeniedException
674//   You are not authorized to perform the action.
675//
676//   * InternalServerError
677//   Amazon Rekognition experienced a service issue. Try your call again.
678//
679//   * ThrottlingException
680//   Amazon Rekognition is temporarily unable to process the request. Try your
681//   call again.
682//
683//   * ProvisionedThroughputExceededException
684//   The number of requests exceeded your throughput limit. If you want to increase
685//   this limit, contact Amazon Rekognition.
686//
687//   * ResourceNotFoundException
688//   The collection specified in the request cannot be found.
689//
690func (c *Rekognition) DeleteCollection(input *DeleteCollectionInput) (*DeleteCollectionOutput, error) {
691	req, out := c.DeleteCollectionRequest(input)
692	return out, req.Send()
693}
694
695// DeleteCollectionWithContext is the same as DeleteCollection with the addition of
696// the ability to pass a context and additional request options.
697//
698// See DeleteCollection for details on how to use this API operation.
699//
700// The context must be non-nil and will be used for request cancellation. If
701// the context is nil a panic will occur. In the future the SDK may create
702// sub-contexts for http.Requests. See https://golang.org/pkg/context/
703// for more information on using Contexts.
704func (c *Rekognition) DeleteCollectionWithContext(ctx aws.Context, input *DeleteCollectionInput, opts ...request.Option) (*DeleteCollectionOutput, error) {
705	req, out := c.DeleteCollectionRequest(input)
706	req.SetContext(ctx)
707	req.ApplyOptions(opts...)
708	return out, req.Send()
709}
710
711const opDeleteFaces = "DeleteFaces"
712
713// DeleteFacesRequest generates a "aws/request.Request" representing the
714// client's request for the DeleteFaces operation. The "output" return
715// value will be populated with the request's response once the request completes
716// successfully.
717//
718// Use "Send" method on the returned Request to send the API call to the service.
719// the "output" return value is not valid until after Send returns without error.
720//
721// See DeleteFaces for more information on using the DeleteFaces
722// API call, and error handling.
723//
724// This method is useful when you want to inject custom logic or configuration
725// into the SDK's request lifecycle. Such as custom headers, or retry logic.
726//
727//
728//    // Example sending a request using the DeleteFacesRequest method.
729//    req, resp := client.DeleteFacesRequest(params)
730//
731//    err := req.Send()
732//    if err == nil { // resp is now filled
733//        fmt.Println(resp)
734//    }
735func (c *Rekognition) DeleteFacesRequest(input *DeleteFacesInput) (req *request.Request, output *DeleteFacesOutput) {
736	op := &request.Operation{
737		Name:       opDeleteFaces,
738		HTTPMethod: "POST",
739		HTTPPath:   "/",
740	}
741
742	if input == nil {
743		input = &DeleteFacesInput{}
744	}
745
746	output = &DeleteFacesOutput{}
747	req = c.newRequest(op, input, output)
748	return
749}
750
751// DeleteFaces API operation for Amazon Rekognition.
752//
753// Deletes faces from a collection. You specify a collection ID and an array
754// of face IDs to remove from the collection.
755//
756// This operation requires permissions to perform the rekognition:DeleteFaces
757// action.
758//
759// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
760// with awserr.Error's Code and Message methods to get detailed information about
761// the error.
762//
763// See the AWS API reference guide for Amazon Rekognition's
764// API operation DeleteFaces for usage and error information.
765//
766// Returned Error Types:
767//   * InvalidParameterException
768//   Input parameter violated a constraint. Validate your parameter before calling
769//   the API operation again.
770//
771//   * AccessDeniedException
772//   You are not authorized to perform the action.
773//
774//   * InternalServerError
775//   Amazon Rekognition experienced a service issue. Try your call again.
776//
777//   * ThrottlingException
778//   Amazon Rekognition is temporarily unable to process the request. Try your
779//   call again.
780//
781//   * ProvisionedThroughputExceededException
782//   The number of requests exceeded your throughput limit. If you want to increase
783//   this limit, contact Amazon Rekognition.
784//
785//   * ResourceNotFoundException
786//   The collection specified in the request cannot be found.
787//
788func (c *Rekognition) DeleteFaces(input *DeleteFacesInput) (*DeleteFacesOutput, error) {
789	req, out := c.DeleteFacesRequest(input)
790	return out, req.Send()
791}
792
793// DeleteFacesWithContext is the same as DeleteFaces with the addition of
794// the ability to pass a context and additional request options.
795//
796// See DeleteFaces for details on how to use this API operation.
797//
798// The context must be non-nil and will be used for request cancellation. If
799// the context is nil a panic will occur. In the future the SDK may create
800// sub-contexts for http.Requests. See https://golang.org/pkg/context/
801// for more information on using Contexts.
802func (c *Rekognition) DeleteFacesWithContext(ctx aws.Context, input *DeleteFacesInput, opts ...request.Option) (*DeleteFacesOutput, error) {
803	req, out := c.DeleteFacesRequest(input)
804	req.SetContext(ctx)
805	req.ApplyOptions(opts...)
806	return out, req.Send()
807}
808
809const opDeleteProject = "DeleteProject"
810
811// DeleteProjectRequest generates a "aws/request.Request" representing the
812// client's request for the DeleteProject operation. The "output" return
813// value will be populated with the request's response once the request completes
814// successfully.
815//
816// Use "Send" method on the returned Request to send the API call to the service.
817// the "output" return value is not valid until after Send returns without error.
818//
819// See DeleteProject for more information on using the DeleteProject
820// API call, and error handling.
821//
822// This method is useful when you want to inject custom logic or configuration
823// into the SDK's request lifecycle. Such as custom headers, or retry logic.
824//
825//
826//    // Example sending a request using the DeleteProjectRequest method.
827//    req, resp := client.DeleteProjectRequest(params)
828//
829//    err := req.Send()
830//    if err == nil { // resp is now filled
831//        fmt.Println(resp)
832//    }
833func (c *Rekognition) DeleteProjectRequest(input *DeleteProjectInput) (req *request.Request, output *DeleteProjectOutput) {
834	op := &request.Operation{
835		Name:       opDeleteProject,
836		HTTPMethod: "POST",
837		HTTPPath:   "/",
838	}
839
840	if input == nil {
841		input = &DeleteProjectInput{}
842	}
843
844	output = &DeleteProjectOutput{}
845	req = c.newRequest(op, input, output)
846	return
847}
848
849// DeleteProject API operation for Amazon Rekognition.
850//
851// Deletes an Amazon Rekognition Custom Labels project. To delete a project
852// you must first delete all models associated with the project. To delete a
853// model, see DeleteProjectVersion.
854//
855// This operation requires permissions to perform the rekognition:DeleteProject
856// action.
857//
858// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
859// with awserr.Error's Code and Message methods to get detailed information about
860// the error.
861//
862// See the AWS API reference guide for Amazon Rekognition's
863// API operation DeleteProject for usage and error information.
864//
865// Returned Error Types:
866//   * ResourceInUseException
867//   The specified resource is already being used.
868//
869//   * ResourceNotFoundException
870//   The collection specified in the request cannot be found.
871//
872//   * InvalidParameterException
873//   Input parameter violated a constraint. Validate your parameter before calling
874//   the API operation again.
875//
876//   * AccessDeniedException
877//   You are not authorized to perform the action.
878//
879//   * InternalServerError
880//   Amazon Rekognition experienced a service issue. Try your call again.
881//
882//   * ThrottlingException
883//   Amazon Rekognition is temporarily unable to process the request. Try your
884//   call again.
885//
886//   * ProvisionedThroughputExceededException
887//   The number of requests exceeded your throughput limit. If you want to increase
888//   this limit, contact Amazon Rekognition.
889//
890func (c *Rekognition) DeleteProject(input *DeleteProjectInput) (*DeleteProjectOutput, error) {
891	req, out := c.DeleteProjectRequest(input)
892	return out, req.Send()
893}
894
895// DeleteProjectWithContext is the same as DeleteProject with the addition of
896// the ability to pass a context and additional request options.
897//
898// See DeleteProject for details on how to use this API operation.
899//
900// The context must be non-nil and will be used for request cancellation. If
901// the context is nil a panic will occur. In the future the SDK may create
902// sub-contexts for http.Requests. See https://golang.org/pkg/context/
903// for more information on using Contexts.
904func (c *Rekognition) DeleteProjectWithContext(ctx aws.Context, input *DeleteProjectInput, opts ...request.Option) (*DeleteProjectOutput, error) {
905	req, out := c.DeleteProjectRequest(input)
906	req.SetContext(ctx)
907	req.ApplyOptions(opts...)
908	return out, req.Send()
909}
910
911const opDeleteProjectVersion = "DeleteProjectVersion"
912
913// DeleteProjectVersionRequest generates a "aws/request.Request" representing the
914// client's request for the DeleteProjectVersion operation. The "output" return
915// value will be populated with the request's response once the request completes
916// successfully.
917//
918// Use "Send" method on the returned Request to send the API call to the service.
919// the "output" return value is not valid until after Send returns without error.
920//
921// See DeleteProjectVersion for more information on using the DeleteProjectVersion
922// API call, and error handling.
923//
924// This method is useful when you want to inject custom logic or configuration
925// into the SDK's request lifecycle. Such as custom headers, or retry logic.
926//
927//
928//    // Example sending a request using the DeleteProjectVersionRequest method.
929//    req, resp := client.DeleteProjectVersionRequest(params)
930//
931//    err := req.Send()
932//    if err == nil { // resp is now filled
933//        fmt.Println(resp)
934//    }
935func (c *Rekognition) DeleteProjectVersionRequest(input *DeleteProjectVersionInput) (req *request.Request, output *DeleteProjectVersionOutput) {
936	op := &request.Operation{
937		Name:       opDeleteProjectVersion,
938		HTTPMethod: "POST",
939		HTTPPath:   "/",
940	}
941
942	if input == nil {
943		input = &DeleteProjectVersionInput{}
944	}
945
946	output = &DeleteProjectVersionOutput{}
947	req = c.newRequest(op, input, output)
948	return
949}
950
951// DeleteProjectVersion API operation for Amazon Rekognition.
952//
953// Deletes an Amazon Rekognition Custom Labels model.
954//
955// You can't delete a model if it is running or if it is training. To check
956// the status of a model, use the Status field returned from DescribeProjectVersions.
957// To stop a running model call StopProjectVersion. If the model is training,
958// wait until it finishes.
959//
960// This operation requires permissions to perform the rekognition:DeleteProjectVersion
961// action.
962//
963// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
964// with awserr.Error's Code and Message methods to get detailed information about
965// the error.
966//
967// See the AWS API reference guide for Amazon Rekognition's
968// API operation DeleteProjectVersion for usage and error information.
969//
970// Returned Error Types:
971//   * ResourceNotFoundException
972//   The collection specified in the request cannot be found.
973//
974//   * ResourceInUseException
975//   The specified resource is already being used.
976//
977//   * InvalidParameterException
978//   Input parameter violated a constraint. Validate your parameter before calling
979//   the API operation again.
980//
981//   * AccessDeniedException
982//   You are not authorized to perform the action.
983//
984//   * InternalServerError
985//   Amazon Rekognition experienced a service issue. Try your call again.
986//
987//   * ThrottlingException
988//   Amazon Rekognition is temporarily unable to process the request. Try your
989//   call again.
990//
991//   * ProvisionedThroughputExceededException
992//   The number of requests exceeded your throughput limit. If you want to increase
993//   this limit, contact Amazon Rekognition.
994//
995func (c *Rekognition) DeleteProjectVersion(input *DeleteProjectVersionInput) (*DeleteProjectVersionOutput, error) {
996	req, out := c.DeleteProjectVersionRequest(input)
997	return out, req.Send()
998}
999
1000// DeleteProjectVersionWithContext is the same as DeleteProjectVersion with the addition of
1001// the ability to pass a context and additional request options.
1002//
1003// See DeleteProjectVersion for details on how to use this API operation.
1004//
1005// The context must be non-nil and will be used for request cancellation. If
1006// the context is nil a panic will occur. In the future the SDK may create
1007// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1008// for more information on using Contexts.
1009func (c *Rekognition) DeleteProjectVersionWithContext(ctx aws.Context, input *DeleteProjectVersionInput, opts ...request.Option) (*DeleteProjectVersionOutput, error) {
1010	req, out := c.DeleteProjectVersionRequest(input)
1011	req.SetContext(ctx)
1012	req.ApplyOptions(opts...)
1013	return out, req.Send()
1014}
1015
1016const opDeleteStreamProcessor = "DeleteStreamProcessor"
1017
1018// DeleteStreamProcessorRequest generates a "aws/request.Request" representing the
1019// client's request for the DeleteStreamProcessor operation. The "output" return
1020// value will be populated with the request's response once the request completes
1021// successfully.
1022//
1023// Use "Send" method on the returned Request to send the API call to the service.
1024// the "output" return value is not valid until after Send returns without error.
1025//
1026// See DeleteStreamProcessor for more information on using the DeleteStreamProcessor
1027// API call, and error handling.
1028//
1029// This method is useful when you want to inject custom logic or configuration
1030// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1031//
1032//
1033//    // Example sending a request using the DeleteStreamProcessorRequest method.
1034//    req, resp := client.DeleteStreamProcessorRequest(params)
1035//
1036//    err := req.Send()
1037//    if err == nil { // resp is now filled
1038//        fmt.Println(resp)
1039//    }
1040func (c *Rekognition) DeleteStreamProcessorRequest(input *DeleteStreamProcessorInput) (req *request.Request, output *DeleteStreamProcessorOutput) {
1041	op := &request.Operation{
1042		Name:       opDeleteStreamProcessor,
1043		HTTPMethod: "POST",
1044		HTTPPath:   "/",
1045	}
1046
1047	if input == nil {
1048		input = &DeleteStreamProcessorInput{}
1049	}
1050
1051	output = &DeleteStreamProcessorOutput{}
1052	req = c.newRequest(op, input, output)
1053	req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
1054	return
1055}
1056
1057// DeleteStreamProcessor API operation for Amazon Rekognition.
1058//
1059// Deletes the stream processor identified by Name. You assign the value for
1060// Name when you create the stream processor with CreateStreamProcessor. You
1061// might not be able to use the same name for a stream processor for a few seconds
1062// after calling DeleteStreamProcessor.
1063//
1064// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1065// with awserr.Error's Code and Message methods to get detailed information about
1066// the error.
1067//
1068// See the AWS API reference guide for Amazon Rekognition's
1069// API operation DeleteStreamProcessor for usage and error information.
1070//
1071// Returned Error Types:
1072//   * AccessDeniedException
1073//   You are not authorized to perform the action.
1074//
1075//   * InternalServerError
1076//   Amazon Rekognition experienced a service issue. Try your call again.
1077//
1078//   * ThrottlingException
1079//   Amazon Rekognition is temporarily unable to process the request. Try your
1080//   call again.
1081//
1082//   * InvalidParameterException
1083//   Input parameter violated a constraint. Validate your parameter before calling
1084//   the API operation again.
1085//
1086//   * ResourceNotFoundException
1087//   The collection specified in the request cannot be found.
1088//
1089//   * ResourceInUseException
1090//   The specified resource is already being used.
1091//
1092//   * ProvisionedThroughputExceededException
1093//   The number of requests exceeded your throughput limit. If you want to increase
1094//   this limit, contact Amazon Rekognition.
1095//
1096func (c *Rekognition) DeleteStreamProcessor(input *DeleteStreamProcessorInput) (*DeleteStreamProcessorOutput, error) {
1097	req, out := c.DeleteStreamProcessorRequest(input)
1098	return out, req.Send()
1099}
1100
1101// DeleteStreamProcessorWithContext is the same as DeleteStreamProcessor with the addition of
1102// the ability to pass a context and additional request options.
1103//
1104// See DeleteStreamProcessor for details on how to use this API operation.
1105//
1106// The context must be non-nil and will be used for request cancellation. If
1107// the context is nil a panic will occur. In the future the SDK may create
1108// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1109// for more information on using Contexts.
1110func (c *Rekognition) DeleteStreamProcessorWithContext(ctx aws.Context, input *DeleteStreamProcessorInput, opts ...request.Option) (*DeleteStreamProcessorOutput, error) {
1111	req, out := c.DeleteStreamProcessorRequest(input)
1112	req.SetContext(ctx)
1113	req.ApplyOptions(opts...)
1114	return out, req.Send()
1115}
1116
1117const opDescribeCollection = "DescribeCollection"
1118
1119// DescribeCollectionRequest generates a "aws/request.Request" representing the
1120// client's request for the DescribeCollection operation. The "output" return
1121// value will be populated with the request's response once the request completes
1122// successfully.
1123//
1124// Use "Send" method on the returned Request to send the API call to the service.
1125// the "output" return value is not valid until after Send returns without error.
1126//
1127// See DescribeCollection for more information on using the DescribeCollection
1128// API call, and error handling.
1129//
1130// This method is useful when you want to inject custom logic or configuration
1131// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1132//
1133//
1134//    // Example sending a request using the DescribeCollectionRequest method.
1135//    req, resp := client.DescribeCollectionRequest(params)
1136//
1137//    err := req.Send()
1138//    if err == nil { // resp is now filled
1139//        fmt.Println(resp)
1140//    }
1141func (c *Rekognition) DescribeCollectionRequest(input *DescribeCollectionInput) (req *request.Request, output *DescribeCollectionOutput) {
1142	op := &request.Operation{
1143		Name:       opDescribeCollection,
1144		HTTPMethod: "POST",
1145		HTTPPath:   "/",
1146	}
1147
1148	if input == nil {
1149		input = &DescribeCollectionInput{}
1150	}
1151
1152	output = &DescribeCollectionOutput{}
1153	req = c.newRequest(op, input, output)
1154	return
1155}
1156
1157// DescribeCollection API operation for Amazon Rekognition.
1158//
1159// Describes the specified collection. You can use DescribeCollection to get
1160// information, such as the number of faces indexed into a collection and the
1161// version of the model used by the collection for face detection.
1162//
1163// For more information, see Describing a Collection in the Amazon Rekognition
1164// Developer Guide.
1165//
1166// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1167// with awserr.Error's Code and Message methods to get detailed information about
1168// the error.
1169//
1170// See the AWS API reference guide for Amazon Rekognition's
1171// API operation DescribeCollection for usage and error information.
1172//
1173// Returned Error Types:
1174//   * InvalidParameterException
1175//   Input parameter violated a constraint. Validate your parameter before calling
1176//   the API operation again.
1177//
1178//   * AccessDeniedException
1179//   You are not authorized to perform the action.
1180//
1181//   * InternalServerError
1182//   Amazon Rekognition experienced a service issue. Try your call again.
1183//
1184//   * ThrottlingException
1185//   Amazon Rekognition is temporarily unable to process the request. Try your
1186//   call again.
1187//
1188//   * ProvisionedThroughputExceededException
1189//   The number of requests exceeded your throughput limit. If you want to increase
1190//   this limit, contact Amazon Rekognition.
1191//
1192//   * ResourceNotFoundException
1193//   The collection specified in the request cannot be found.
1194//
1195func (c *Rekognition) DescribeCollection(input *DescribeCollectionInput) (*DescribeCollectionOutput, error) {
1196	req, out := c.DescribeCollectionRequest(input)
1197	return out, req.Send()
1198}
1199
1200// DescribeCollectionWithContext is the same as DescribeCollection with the addition of
1201// the ability to pass a context and additional request options.
1202//
1203// See DescribeCollection for details on how to use this API operation.
1204//
1205// The context must be non-nil and will be used for request cancellation. If
1206// the context is nil a panic will occur. In the future the SDK may create
1207// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1208// for more information on using Contexts.
1209func (c *Rekognition) DescribeCollectionWithContext(ctx aws.Context, input *DescribeCollectionInput, opts ...request.Option) (*DescribeCollectionOutput, error) {
1210	req, out := c.DescribeCollectionRequest(input)
1211	req.SetContext(ctx)
1212	req.ApplyOptions(opts...)
1213	return out, req.Send()
1214}
1215
1216const opDescribeProjectVersions = "DescribeProjectVersions"
1217
1218// DescribeProjectVersionsRequest generates a "aws/request.Request" representing the
1219// client's request for the DescribeProjectVersions operation. The "output" return
1220// value will be populated with the request's response once the request completes
1221// successfully.
1222//
1223// Use "Send" method on the returned Request to send the API call to the service.
1224// the "output" return value is not valid until after Send returns without error.
1225//
1226// See DescribeProjectVersions for more information on using the DescribeProjectVersions
1227// API call, and error handling.
1228//
1229// This method is useful when you want to inject custom logic or configuration
1230// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1231//
1232//
1233//    // Example sending a request using the DescribeProjectVersionsRequest method.
1234//    req, resp := client.DescribeProjectVersionsRequest(params)
1235//
1236//    err := req.Send()
1237//    if err == nil { // resp is now filled
1238//        fmt.Println(resp)
1239//    }
1240func (c *Rekognition) DescribeProjectVersionsRequest(input *DescribeProjectVersionsInput) (req *request.Request, output *DescribeProjectVersionsOutput) {
1241	op := &request.Operation{
1242		Name:       opDescribeProjectVersions,
1243		HTTPMethod: "POST",
1244		HTTPPath:   "/",
1245		Paginator: &request.Paginator{
1246			InputTokens:     []string{"NextToken"},
1247			OutputTokens:    []string{"NextToken"},
1248			LimitToken:      "MaxResults",
1249			TruncationToken: "",
1250		},
1251	}
1252
1253	if input == nil {
1254		input = &DescribeProjectVersionsInput{}
1255	}
1256
1257	output = &DescribeProjectVersionsOutput{}
1258	req = c.newRequest(op, input, output)
1259	return
1260}
1261
1262// DescribeProjectVersions API operation for Amazon Rekognition.
1263//
1264// Lists and describes the models in an Amazon Rekognition Custom Labels project.
1265// You can specify up to 10 model versions in ProjectVersionArns. If you don't
1266// specify a value, descriptions for all models are returned.
1267//
1268// This operation requires permissions to perform the rekognition:DescribeProjectVersions
1269// action.
1270//
1271// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1272// with awserr.Error's Code and Message methods to get detailed information about
1273// the error.
1274//
1275// See the AWS API reference guide for Amazon Rekognition's
1276// API operation DescribeProjectVersions for usage and error information.
1277//
1278// Returned Error Types:
1279//   * ResourceNotFoundException
1280//   The collection specified in the request cannot be found.
1281//
1282//   * InvalidPaginationTokenException
1283//   Pagination token in the request is not valid.
1284//
1285//   * InvalidParameterException
1286//   Input parameter violated a constraint. Validate your parameter before calling
1287//   the API operation again.
1288//
1289//   * AccessDeniedException
1290//   You are not authorized to perform the action.
1291//
1292//   * InternalServerError
1293//   Amazon Rekognition experienced a service issue. Try your call again.
1294//
1295//   * ThrottlingException
1296//   Amazon Rekognition is temporarily unable to process the request. Try your
1297//   call again.
1298//
1299//   * ProvisionedThroughputExceededException
1300//   The number of requests exceeded your throughput limit. If you want to increase
1301//   this limit, contact Amazon Rekognition.
1302//
1303func (c *Rekognition) DescribeProjectVersions(input *DescribeProjectVersionsInput) (*DescribeProjectVersionsOutput, error) {
1304	req, out := c.DescribeProjectVersionsRequest(input)
1305	return out, req.Send()
1306}
1307
1308// DescribeProjectVersionsWithContext is the same as DescribeProjectVersions with the addition of
1309// the ability to pass a context and additional request options.
1310//
1311// See DescribeProjectVersions for details on how to use this API operation.
1312//
1313// The context must be non-nil and will be used for request cancellation. If
1314// the context is nil a panic will occur. In the future the SDK may create
1315// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1316// for more information on using Contexts.
1317func (c *Rekognition) DescribeProjectVersionsWithContext(ctx aws.Context, input *DescribeProjectVersionsInput, opts ...request.Option) (*DescribeProjectVersionsOutput, error) {
1318	req, out := c.DescribeProjectVersionsRequest(input)
1319	req.SetContext(ctx)
1320	req.ApplyOptions(opts...)
1321	return out, req.Send()
1322}
1323
1324// DescribeProjectVersionsPages iterates over the pages of a DescribeProjectVersions operation,
1325// calling the "fn" function with the response data for each page. To stop
1326// iterating, return false from the fn function.
1327//
1328// See DescribeProjectVersions method for more information on how to use this operation.
1329//
1330// Note: This operation can generate multiple requests to a service.
1331//
1332//    // Example iterating over at most 3 pages of a DescribeProjectVersions operation.
1333//    pageNum := 0
1334//    err := client.DescribeProjectVersionsPages(params,
1335//        func(page *rekognition.DescribeProjectVersionsOutput, lastPage bool) bool {
1336//            pageNum++
1337//            fmt.Println(page)
1338//            return pageNum <= 3
1339//        })
1340//
1341func (c *Rekognition) DescribeProjectVersionsPages(input *DescribeProjectVersionsInput, fn func(*DescribeProjectVersionsOutput, bool) bool) error {
1342	return c.DescribeProjectVersionsPagesWithContext(aws.BackgroundContext(), input, fn)
1343}
1344
1345// DescribeProjectVersionsPagesWithContext same as DescribeProjectVersionsPages except
1346// it takes a Context and allows setting request options on the pages.
1347//
1348// The context must be non-nil and will be used for request cancellation. If
1349// the context is nil a panic will occur. In the future the SDK may create
1350// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1351// for more information on using Contexts.
1352func (c *Rekognition) DescribeProjectVersionsPagesWithContext(ctx aws.Context, input *DescribeProjectVersionsInput, fn func(*DescribeProjectVersionsOutput, bool) bool, opts ...request.Option) error {
1353	p := request.Pagination{
1354		NewRequest: func() (*request.Request, error) {
1355			var inCpy *DescribeProjectVersionsInput
1356			if input != nil {
1357				tmp := *input
1358				inCpy = &tmp
1359			}
1360			req, _ := c.DescribeProjectVersionsRequest(inCpy)
1361			req.SetContext(ctx)
1362			req.ApplyOptions(opts...)
1363			return req, nil
1364		},
1365	}
1366
1367	for p.Next() {
1368		if !fn(p.Page().(*DescribeProjectVersionsOutput), !p.HasNextPage()) {
1369			break
1370		}
1371	}
1372
1373	return p.Err()
1374}
1375
1376const opDescribeProjects = "DescribeProjects"
1377
1378// DescribeProjectsRequest generates a "aws/request.Request" representing the
1379// client's request for the DescribeProjects operation. The "output" return
1380// value will be populated with the request's response once the request completes
1381// successfully.
1382//
1383// Use "Send" method on the returned Request to send the API call to the service.
1384// the "output" return value is not valid until after Send returns without error.
1385//
1386// See DescribeProjects for more information on using the DescribeProjects
1387// API call, and error handling.
1388//
1389// This method is useful when you want to inject custom logic or configuration
1390// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1391//
1392//
1393//    // Example sending a request using the DescribeProjectsRequest method.
1394//    req, resp := client.DescribeProjectsRequest(params)
1395//
1396//    err := req.Send()
1397//    if err == nil { // resp is now filled
1398//        fmt.Println(resp)
1399//    }
1400func (c *Rekognition) DescribeProjectsRequest(input *DescribeProjectsInput) (req *request.Request, output *DescribeProjectsOutput) {
1401	op := &request.Operation{
1402		Name:       opDescribeProjects,
1403		HTTPMethod: "POST",
1404		HTTPPath:   "/",
1405		Paginator: &request.Paginator{
1406			InputTokens:     []string{"NextToken"},
1407			OutputTokens:    []string{"NextToken"},
1408			LimitToken:      "MaxResults",
1409			TruncationToken: "",
1410		},
1411	}
1412
1413	if input == nil {
1414		input = &DescribeProjectsInput{}
1415	}
1416
1417	output = &DescribeProjectsOutput{}
1418	req = c.newRequest(op, input, output)
1419	return
1420}
1421
1422// DescribeProjects API operation for Amazon Rekognition.
1423//
1424// Lists and gets information about your Amazon Rekognition Custom Labels projects.
1425//
1426// This operation requires permissions to perform the rekognition:DescribeProjects
1427// action.
1428//
1429// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1430// with awserr.Error's Code and Message methods to get detailed information about
1431// the error.
1432//
1433// See the AWS API reference guide for Amazon Rekognition's
1434// API operation DescribeProjects for usage and error information.
1435//
1436// Returned Error Types:
1437//   * InvalidPaginationTokenException
1438//   Pagination token in the request is not valid.
1439//
1440//   * InvalidParameterException
1441//   Input parameter violated a constraint. Validate your parameter before calling
1442//   the API operation again.
1443//
1444//   * AccessDeniedException
1445//   You are not authorized to perform the action.
1446//
1447//   * InternalServerError
1448//   Amazon Rekognition experienced a service issue. Try your call again.
1449//
1450//   * ThrottlingException
1451//   Amazon Rekognition is temporarily unable to process the request. Try your
1452//   call again.
1453//
1454//   * ProvisionedThroughputExceededException
1455//   The number of requests exceeded your throughput limit. If you want to increase
1456//   this limit, contact Amazon Rekognition.
1457//
1458func (c *Rekognition) DescribeProjects(input *DescribeProjectsInput) (*DescribeProjectsOutput, error) {
1459	req, out := c.DescribeProjectsRequest(input)
1460	return out, req.Send()
1461}
1462
1463// DescribeProjectsWithContext is the same as DescribeProjects with the addition of
1464// the ability to pass a context and additional request options.
1465//
1466// See DescribeProjects for details on how to use this API operation.
1467//
1468// The context must be non-nil and will be used for request cancellation. If
1469// the context is nil a panic will occur. In the future the SDK may create
1470// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1471// for more information on using Contexts.
1472func (c *Rekognition) DescribeProjectsWithContext(ctx aws.Context, input *DescribeProjectsInput, opts ...request.Option) (*DescribeProjectsOutput, error) {
1473	req, out := c.DescribeProjectsRequest(input)
1474	req.SetContext(ctx)
1475	req.ApplyOptions(opts...)
1476	return out, req.Send()
1477}
1478
1479// DescribeProjectsPages iterates over the pages of a DescribeProjects operation,
1480// calling the "fn" function with the response data for each page. To stop
1481// iterating, return false from the fn function.
1482//
1483// See DescribeProjects method for more information on how to use this operation.
1484//
1485// Note: This operation can generate multiple requests to a service.
1486//
1487//    // Example iterating over at most 3 pages of a DescribeProjects operation.
1488//    pageNum := 0
1489//    err := client.DescribeProjectsPages(params,
1490//        func(page *rekognition.DescribeProjectsOutput, lastPage bool) bool {
1491//            pageNum++
1492//            fmt.Println(page)
1493//            return pageNum <= 3
1494//        })
1495//
1496func (c *Rekognition) DescribeProjectsPages(input *DescribeProjectsInput, fn func(*DescribeProjectsOutput, bool) bool) error {
1497	return c.DescribeProjectsPagesWithContext(aws.BackgroundContext(), input, fn)
1498}
1499
1500// DescribeProjectsPagesWithContext same as DescribeProjectsPages except
1501// it takes a Context and allows setting request options on the pages.
1502//
1503// The context must be non-nil and will be used for request cancellation. If
1504// the context is nil a panic will occur. In the future the SDK may create
1505// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1506// for more information on using Contexts.
1507func (c *Rekognition) DescribeProjectsPagesWithContext(ctx aws.Context, input *DescribeProjectsInput, fn func(*DescribeProjectsOutput, bool) bool, opts ...request.Option) error {
1508	p := request.Pagination{
1509		NewRequest: func() (*request.Request, error) {
1510			var inCpy *DescribeProjectsInput
1511			if input != nil {
1512				tmp := *input
1513				inCpy = &tmp
1514			}
1515			req, _ := c.DescribeProjectsRequest(inCpy)
1516			req.SetContext(ctx)
1517			req.ApplyOptions(opts...)
1518			return req, nil
1519		},
1520	}
1521
1522	for p.Next() {
1523		if !fn(p.Page().(*DescribeProjectsOutput), !p.HasNextPage()) {
1524			break
1525		}
1526	}
1527
1528	return p.Err()
1529}
1530
1531const opDescribeStreamProcessor = "DescribeStreamProcessor"
1532
1533// DescribeStreamProcessorRequest generates a "aws/request.Request" representing the
1534// client's request for the DescribeStreamProcessor operation. The "output" return
1535// value will be populated with the request's response once the request completes
1536// successfully.
1537//
1538// Use "Send" method on the returned Request to send the API call to the service.
1539// the "output" return value is not valid until after Send returns without error.
1540//
1541// See DescribeStreamProcessor for more information on using the DescribeStreamProcessor
1542// API call, and error handling.
1543//
1544// This method is useful when you want to inject custom logic or configuration
1545// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1546//
1547//
1548//    // Example sending a request using the DescribeStreamProcessorRequest method.
1549//    req, resp := client.DescribeStreamProcessorRequest(params)
1550//
1551//    err := req.Send()
1552//    if err == nil { // resp is now filled
1553//        fmt.Println(resp)
1554//    }
1555func (c *Rekognition) DescribeStreamProcessorRequest(input *DescribeStreamProcessorInput) (req *request.Request, output *DescribeStreamProcessorOutput) {
1556	op := &request.Operation{
1557		Name:       opDescribeStreamProcessor,
1558		HTTPMethod: "POST",
1559		HTTPPath:   "/",
1560	}
1561
1562	if input == nil {
1563		input = &DescribeStreamProcessorInput{}
1564	}
1565
1566	output = &DescribeStreamProcessorOutput{}
1567	req = c.newRequest(op, input, output)
1568	return
1569}
1570
1571// DescribeStreamProcessor API operation for Amazon Rekognition.
1572//
1573// Provides information about a stream processor created by CreateStreamProcessor.
1574// You can get information about the input and output streams, the input parameters
1575// for the face recognition being performed, and the current status of the stream
1576// processor.
1577//
1578// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1579// with awserr.Error's Code and Message methods to get detailed information about
1580// the error.
1581//
1582// See the AWS API reference guide for Amazon Rekognition's
1583// API operation DescribeStreamProcessor for usage and error information.
1584//
1585// Returned Error Types:
1586//   * AccessDeniedException
1587//   You are not authorized to perform the action.
1588//
1589//   * InternalServerError
1590//   Amazon Rekognition experienced a service issue. Try your call again.
1591//
1592//   * ThrottlingException
1593//   Amazon Rekognition is temporarily unable to process the request. Try your
1594//   call again.
1595//
1596//   * InvalidParameterException
1597//   Input parameter violated a constraint. Validate your parameter before calling
1598//   the API operation again.
1599//
1600//   * ResourceNotFoundException
1601//   The collection specified in the request cannot be found.
1602//
1603//   * ProvisionedThroughputExceededException
1604//   The number of requests exceeded your throughput limit. If you want to increase
1605//   this limit, contact Amazon Rekognition.
1606//
1607func (c *Rekognition) DescribeStreamProcessor(input *DescribeStreamProcessorInput) (*DescribeStreamProcessorOutput, error) {
1608	req, out := c.DescribeStreamProcessorRequest(input)
1609	return out, req.Send()
1610}
1611
1612// DescribeStreamProcessorWithContext is the same as DescribeStreamProcessor with the addition of
1613// the ability to pass a context and additional request options.
1614//
1615// See DescribeStreamProcessor for details on how to use this API operation.
1616//
1617// The context must be non-nil and will be used for request cancellation. If
1618// the context is nil a panic will occur. In the future the SDK may create
1619// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1620// for more information on using Contexts.
1621func (c *Rekognition) DescribeStreamProcessorWithContext(ctx aws.Context, input *DescribeStreamProcessorInput, opts ...request.Option) (*DescribeStreamProcessorOutput, error) {
1622	req, out := c.DescribeStreamProcessorRequest(input)
1623	req.SetContext(ctx)
1624	req.ApplyOptions(opts...)
1625	return out, req.Send()
1626}
1627
1628const opDetectCustomLabels = "DetectCustomLabels"
1629
1630// DetectCustomLabelsRequest generates a "aws/request.Request" representing the
1631// client's request for the DetectCustomLabels operation. The "output" return
1632// value will be populated with the request's response once the request completes
1633// successfully.
1634//
1635// Use "Send" method on the returned Request to send the API call to the service.
1636// the "output" return value is not valid until after Send returns without error.
1637//
1638// See DetectCustomLabels for more information on using the DetectCustomLabels
1639// API call, and error handling.
1640//
1641// This method is useful when you want to inject custom logic or configuration
1642// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1643//
1644//
1645//    // Example sending a request using the DetectCustomLabelsRequest method.
1646//    req, resp := client.DetectCustomLabelsRequest(params)
1647//
1648//    err := req.Send()
1649//    if err == nil { // resp is now filled
1650//        fmt.Println(resp)
1651//    }
1652func (c *Rekognition) DetectCustomLabelsRequest(input *DetectCustomLabelsInput) (req *request.Request, output *DetectCustomLabelsOutput) {
1653	op := &request.Operation{
1654		Name:       opDetectCustomLabels,
1655		HTTPMethod: "POST",
1656		HTTPPath:   "/",
1657	}
1658
1659	if input == nil {
1660		input = &DetectCustomLabelsInput{}
1661	}
1662
1663	output = &DetectCustomLabelsOutput{}
1664	req = c.newRequest(op, input, output)
1665	return
1666}
1667
1668// DetectCustomLabels API operation for Amazon Rekognition.
1669//
1670// Detects custom labels in a supplied image by using an Amazon Rekognition
1671// Custom Labels model.
1672//
1673// You specify which version of a model version to use by using the ProjectVersionArn
1674// input parameter.
1675//
1676// You pass the input image as base64-encoded image bytes or as a reference
1677// to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon
1678// Rekognition operations, passing image bytes is not supported. The image must
1679// be either a PNG or JPEG formatted file.
1680//
1681// For each object that the model version detects on an image, the API returns
1682// a (CustomLabel) object in an array (CustomLabels). Each CustomLabel object
1683// provides the label name (Name), the level of confidence that the image contains
1684// the object (Confidence), and object location information, if it exists, for
1685// the label on the image (Geometry).
1686//
1687// During training model calculates a threshold value that determines if a prediction
1688// for a label is true. By default, DetectCustomLabels doesn't return labels
1689// whose confidence value is below the model's calculated threshold value. To
1690// filter labels that are returned, specify a value for MinConfidence that is
1691// higher than the model's calculated threshold. You can get the model's calculated
1692// threshold from the model's training results shown in the Amazon Rekognition
1693// Custom Labels console. To get all labels, regardless of confidence, specify
1694// a MinConfidence value of 0.
1695//
1696// You can also add the MaxResults parameter to limit the number of labels returned.
1697//
1698// This is a stateless API operation. That is, the operation does not persist
1699// any data.
1700//
1701// This operation requires permissions to perform the rekognition:DetectCustomLabels
1702// action.
1703//
1704// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1705// with awserr.Error's Code and Message methods to get detailed information about
1706// the error.
1707//
1708// See the AWS API reference guide for Amazon Rekognition's
1709// API operation DetectCustomLabels for usage and error information.
1710//
1711// Returned Error Types:
1712//   * ResourceNotFoundException
1713//   The collection specified in the request cannot be found.
1714//
1715//   * ResourceNotReadyException
1716//   The requested resource isn't ready. For example, this exception occurs when
1717//   you call DetectCustomLabels with a model version that isn't deployed.
1718//
1719//   * InvalidS3ObjectException
1720//   Amazon Rekognition is unable to access the S3 object specified in the request.
1721//
1722//   * InvalidParameterException
1723//   Input parameter violated a constraint. Validate your parameter before calling
1724//   the API operation again.
1725//
1726//   * ImageTooLargeException
1727//   The input image size exceeds the allowed limit. For more information, see
1728//   Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
1729//
1730//   * LimitExceededException
1731//   An Amazon Rekognition service limit was exceeded. For example, if you start
1732//   too many Amazon Rekognition Video jobs concurrently, calls to start operations
1733//   (StartLabelDetection, for example) will raise a LimitExceededException exception
1734//   (HTTP status code: 400) until the number of concurrently running jobs is
1735//   below the Amazon Rekognition service limit.
1736//
1737//   * AccessDeniedException
1738//   You are not authorized to perform the action.
1739//
1740//   * InternalServerError
1741//   Amazon Rekognition experienced a service issue. Try your call again.
1742//
1743//   * ThrottlingException
1744//   Amazon Rekognition is temporarily unable to process the request. Try your
1745//   call again.
1746//
1747//   * ProvisionedThroughputExceededException
1748//   The number of requests exceeded your throughput limit. If you want to increase
1749//   this limit, contact Amazon Rekognition.
1750//
1751//   * InvalidImageFormatException
1752//   The provided image format is not supported.
1753//
1754func (c *Rekognition) DetectCustomLabels(input *DetectCustomLabelsInput) (*DetectCustomLabelsOutput, error) {
1755	req, out := c.DetectCustomLabelsRequest(input)
1756	return out, req.Send()
1757}
1758
1759// DetectCustomLabelsWithContext is the same as DetectCustomLabels with the addition of
1760// the ability to pass a context and additional request options.
1761//
1762// See DetectCustomLabels for details on how to use this API operation.
1763//
1764// The context must be non-nil and will be used for request cancellation. If
1765// the context is nil a panic will occur. In the future the SDK may create
1766// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1767// for more information on using Contexts.
1768func (c *Rekognition) DetectCustomLabelsWithContext(ctx aws.Context, input *DetectCustomLabelsInput, opts ...request.Option) (*DetectCustomLabelsOutput, error) {
1769	req, out := c.DetectCustomLabelsRequest(input)
1770	req.SetContext(ctx)
1771	req.ApplyOptions(opts...)
1772	return out, req.Send()
1773}
1774
1775const opDetectFaces = "DetectFaces"
1776
1777// DetectFacesRequest generates a "aws/request.Request" representing the
1778// client's request for the DetectFaces operation. The "output" return
1779// value will be populated with the request's response once the request completes
1780// successfully.
1781//
1782// Use "Send" method on the returned Request to send the API call to the service.
1783// the "output" return value is not valid until after Send returns without error.
1784//
1785// See DetectFaces for more information on using the DetectFaces
1786// API call, and error handling.
1787//
1788// This method is useful when you want to inject custom logic or configuration
1789// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1790//
1791//
1792//    // Example sending a request using the DetectFacesRequest method.
1793//    req, resp := client.DetectFacesRequest(params)
1794//
1795//    err := req.Send()
1796//    if err == nil { // resp is now filled
1797//        fmt.Println(resp)
1798//    }
1799func (c *Rekognition) DetectFacesRequest(input *DetectFacesInput) (req *request.Request, output *DetectFacesOutput) {
1800	op := &request.Operation{
1801		Name:       opDetectFaces,
1802		HTTPMethod: "POST",
1803		HTTPPath:   "/",
1804	}
1805
1806	if input == nil {
1807		input = &DetectFacesInput{}
1808	}
1809
1810	output = &DetectFacesOutput{}
1811	req = c.newRequest(op, input, output)
1812	return
1813}
1814
1815// DetectFaces API operation for Amazon Rekognition.
1816//
1817// Detects faces within an image that is provided as input.
1818//
1819// DetectFaces detects the 100 largest faces in the image. For each face detected,
1820// the operation returns face details. These details include a bounding box
1821// of the face, a confidence value (that the bounding box contains a face),
1822// and a fixed set of attributes such as facial landmarks (for example, coordinates
1823// of eye and mouth), presence of beard, sunglasses, and so on.
1824//
1825// The face-detection algorithm is most effective on frontal faces. For non-frontal
1826// or obscured faces, the algorithm might not detect the faces or might detect
1827// faces with lower confidence.
1828//
1829// You pass the input image either as base64-encoded image bytes or as a reference
1830// to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon
1831// Rekognition operations, passing image bytes is not supported. The image must
1832// be either a PNG or JPEG formatted file.
1833//
1834// This is a stateless API operation. That is, the operation does not persist
1835// any data.
1836//
1837// This operation requires permissions to perform the rekognition:DetectFaces
1838// action.
1839//
1840// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1841// with awserr.Error's Code and Message methods to get detailed information about
1842// the error.
1843//
1844// See the AWS API reference guide for Amazon Rekognition's
1845// API operation DetectFaces for usage and error information.
1846//
1847// Returned Error Types:
1848//   * InvalidS3ObjectException
1849//   Amazon Rekognition is unable to access the S3 object specified in the request.
1850//
1851//   * InvalidParameterException
1852//   Input parameter violated a constraint. Validate your parameter before calling
1853//   the API operation again.
1854//
1855//   * ImageTooLargeException
1856//   The input image size exceeds the allowed limit. For more information, see
1857//   Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
1858//
1859//   * AccessDeniedException
1860//   You are not authorized to perform the action.
1861//
1862//   * InternalServerError
1863//   Amazon Rekognition experienced a service issue. Try your call again.
1864//
1865//   * ThrottlingException
1866//   Amazon Rekognition is temporarily unable to process the request. Try your
1867//   call again.
1868//
1869//   * ProvisionedThroughputExceededException
1870//   The number of requests exceeded your throughput limit. If you want to increase
1871//   this limit, contact Amazon Rekognition.
1872//
1873//   * InvalidImageFormatException
1874//   The provided image format is not supported.
1875//
1876func (c *Rekognition) DetectFaces(input *DetectFacesInput) (*DetectFacesOutput, error) {
1877	req, out := c.DetectFacesRequest(input)
1878	return out, req.Send()
1879}
1880
1881// DetectFacesWithContext is the same as DetectFaces with the addition of
1882// the ability to pass a context and additional request options.
1883//
1884// See DetectFaces for details on how to use this API operation.
1885//
1886// The context must be non-nil and will be used for request cancellation. If
1887// the context is nil a panic will occur. In the future the SDK may create
1888// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1889// for more information on using Contexts.
1890func (c *Rekognition) DetectFacesWithContext(ctx aws.Context, input *DetectFacesInput, opts ...request.Option) (*DetectFacesOutput, error) {
1891	req, out := c.DetectFacesRequest(input)
1892	req.SetContext(ctx)
1893	req.ApplyOptions(opts...)
1894	return out, req.Send()
1895}
1896
1897const opDetectLabels = "DetectLabels"
1898
1899// DetectLabelsRequest generates a "aws/request.Request" representing the
1900// client's request for the DetectLabels operation. The "output" return
1901// value will be populated with the request's response once the request completes
1902// successfully.
1903//
1904// Use "Send" method on the returned Request to send the API call to the service.
1905// the "output" return value is not valid until after Send returns without error.
1906//
1907// See DetectLabels for more information on using the DetectLabels
1908// API call, and error handling.
1909//
1910// This method is useful when you want to inject custom logic or configuration
1911// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1912//
1913//
1914//    // Example sending a request using the DetectLabelsRequest method.
1915//    req, resp := client.DetectLabelsRequest(params)
1916//
1917//    err := req.Send()
1918//    if err == nil { // resp is now filled
1919//        fmt.Println(resp)
1920//    }
1921func (c *Rekognition) DetectLabelsRequest(input *DetectLabelsInput) (req *request.Request, output *DetectLabelsOutput) {
1922	op := &request.Operation{
1923		Name:       opDetectLabels,
1924		HTTPMethod: "POST",
1925		HTTPPath:   "/",
1926	}
1927
1928	if input == nil {
1929		input = &DetectLabelsInput{}
1930	}
1931
1932	output = &DetectLabelsOutput{}
1933	req = c.newRequest(op, input, output)
1934	return
1935}
1936
1937// DetectLabels API operation for Amazon Rekognition.
1938//
1939// Detects instances of real-world entities within an image (JPEG or PNG) provided
1940// as input. This includes objects like flower, tree, and table; events like
1941// wedding, graduation, and birthday party; and concepts like landscape, evening,
1942// and nature.
1943//
1944// For an example, see Analyzing Images Stored in an Amazon S3 Bucket in the
1945// Amazon Rekognition Developer Guide.
1946//
1947// DetectLabels does not support the detection of activities. However, activity
1948// detection is supported for label detection in videos. For more information,
1949// see StartLabelDetection in the Amazon Rekognition Developer Guide.
1950//
1951// You pass the input image as base64-encoded image bytes or as a reference
1952// to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon
1953// Rekognition operations, passing image bytes is not supported. The image must
1954// be either a PNG or JPEG formatted file.
1955//
1956// For each object, scene, and concept the API returns one or more labels. Each
1957// label provides the object name, and the level of confidence that the image
1958// contains the object. For example, suppose the input image has a lighthouse,
1959// the sea, and a rock. The response includes all three labels, one for each
1960// object.
1961//
1962// {Name: lighthouse, Confidence: 98.4629}
1963//
1964// {Name: rock,Confidence: 79.2097}
1965//
1966// {Name: sea,Confidence: 75.061}
1967//
1968// In the preceding example, the operation returns one label for each of the
1969// three objects. The operation can also return multiple labels for the same
1970// object in the image. For example, if the input image shows a flower (for
1971// example, a tulip), the operation might return the following three labels.
1972//
1973// {Name: flower,Confidence: 99.0562}
1974//
1975// {Name: plant,Confidence: 99.0562}
1976//
1977// {Name: tulip,Confidence: 99.0562}
1978//
1979// In this example, the detection algorithm more precisely identifies the flower
1980// as a tulip.
1981//
1982// In response, the API returns an array of labels. In addition, the response
1983// also includes the orientation correction. Optionally, you can specify MinConfidence
1984// to control the confidence threshold for the labels returned. The default
1985// is 55%. You can also add the MaxLabels parameter to limit the number of labels
1986// returned.
1987//
1988// If the object detected is a person, the operation doesn't provide the same
1989// facial details that the DetectFaces operation provides.
1990//
1991// DetectLabels returns bounding boxes for instances of common object labels
1992// in an array of Instance objects. An Instance object contains a BoundingBox
1993// object, for the location of the label on the image. It also includes the
1994// confidence by which the bounding box was detected.
1995//
1996// DetectLabels also returns a hierarchical taxonomy of detected labels. For
1997// example, a detected car might be assigned the label car. The label car has
1998// two parent labels: Vehicle (its parent) and Transportation (its grandparent).
1999// The response returns the entire list of ancestors for a label. Each ancestor
2000// is a unique label in the response. In the previous example, Car, Vehicle,
2001// and Transportation are returned as unique labels in the response.
2002//
2003// This is a stateless API operation. That is, the operation does not persist
2004// any data.
2005//
2006// This operation requires permissions to perform the rekognition:DetectLabels
2007// action.
2008//
2009// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2010// with awserr.Error's Code and Message methods to get detailed information about
2011// the error.
2012//
2013// See the AWS API reference guide for Amazon Rekognition's
2014// API operation DetectLabels for usage and error information.
2015//
2016// Returned Error Types:
2017//   * InvalidS3ObjectException
2018//   Amazon Rekognition is unable to access the S3 object specified in the request.
2019//
2020//   * InvalidParameterException
2021//   Input parameter violated a constraint. Validate your parameter before calling
2022//   the API operation again.
2023//
2024//   * ImageTooLargeException
2025//   The input image size exceeds the allowed limit. For more information, see
2026//   Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
2027//
2028//   * AccessDeniedException
2029//   You are not authorized to perform the action.
2030//
2031//   * InternalServerError
2032//   Amazon Rekognition experienced a service issue. Try your call again.
2033//
2034//   * ThrottlingException
2035//   Amazon Rekognition is temporarily unable to process the request. Try your
2036//   call again.
2037//
2038//   * ProvisionedThroughputExceededException
2039//   The number of requests exceeded your throughput limit. If you want to increase
2040//   this limit, contact Amazon Rekognition.
2041//
2042//   * InvalidImageFormatException
2043//   The provided image format is not supported.
2044//
2045func (c *Rekognition) DetectLabels(input *DetectLabelsInput) (*DetectLabelsOutput, error) {
2046	req, out := c.DetectLabelsRequest(input)
2047	return out, req.Send()
2048}
2049
2050// DetectLabelsWithContext is the same as DetectLabels with the addition of
2051// the ability to pass a context and additional request options.
2052//
2053// See DetectLabels for details on how to use this API operation.
2054//
2055// The context must be non-nil and will be used for request cancellation. If
2056// the context is nil a panic will occur. In the future the SDK may create
2057// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2058// for more information on using Contexts.
2059func (c *Rekognition) DetectLabelsWithContext(ctx aws.Context, input *DetectLabelsInput, opts ...request.Option) (*DetectLabelsOutput, error) {
2060	req, out := c.DetectLabelsRequest(input)
2061	req.SetContext(ctx)
2062	req.ApplyOptions(opts...)
2063	return out, req.Send()
2064}
2065
2066const opDetectModerationLabels = "DetectModerationLabels"
2067
2068// DetectModerationLabelsRequest generates a "aws/request.Request" representing the
2069// client's request for the DetectModerationLabels operation. The "output" return
2070// value will be populated with the request's response once the request completes
2071// successfully.
2072//
2073// Use "Send" method on the returned Request to send the API call to the service.
2074// the "output" return value is not valid until after Send returns without error.
2075//
2076// See DetectModerationLabels for more information on using the DetectModerationLabels
2077// API call, and error handling.
2078//
2079// This method is useful when you want to inject custom logic or configuration
2080// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2081//
2082//
2083//    // Example sending a request using the DetectModerationLabelsRequest method.
2084//    req, resp := client.DetectModerationLabelsRequest(params)
2085//
2086//    err := req.Send()
2087//    if err == nil { // resp is now filled
2088//        fmt.Println(resp)
2089//    }
2090func (c *Rekognition) DetectModerationLabelsRequest(input *DetectModerationLabelsInput) (req *request.Request, output *DetectModerationLabelsOutput) {
2091	op := &request.Operation{
2092		Name:       opDetectModerationLabels,
2093		HTTPMethod: "POST",
2094		HTTPPath:   "/",
2095	}
2096
2097	if input == nil {
2098		input = &DetectModerationLabelsInput{}
2099	}
2100
2101	output = &DetectModerationLabelsOutput{}
2102	req = c.newRequest(op, input, output)
2103	return
2104}
2105
2106// DetectModerationLabels API operation for Amazon Rekognition.
2107//
2108// Detects unsafe content in a specified JPEG or PNG format image. Use DetectModerationLabels
2109// to moderate images depending on your requirements. For example, you might
2110// want to filter images that contain nudity, but not images containing suggestive
2111// content.
2112//
2113// To filter images, use the labels returned by DetectModerationLabels to determine
2114// which types of content are appropriate.
2115//
2116// For information about moderation labels, see Detecting Unsafe Content in
2117// the Amazon Rekognition Developer Guide.
2118//
2119// You pass the input image either as base64-encoded image bytes or as a reference
2120// to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon
2121// Rekognition operations, passing image bytes is not supported. The image must
2122// be either a PNG or JPEG formatted file.
2123//
2124// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2125// with awserr.Error's Code and Message methods to get detailed information about
2126// the error.
2127//
2128// See the AWS API reference guide for Amazon Rekognition's
2129// API operation DetectModerationLabels for usage and error information.
2130//
2131// Returned Error Types:
2132//   * InvalidS3ObjectException
2133//   Amazon Rekognition is unable to access the S3 object specified in the request.
2134//
2135//   * InvalidParameterException
2136//   Input parameter violated a constraint. Validate your parameter before calling
2137//   the API operation again.
2138//
2139//   * ImageTooLargeException
2140//   The input image size exceeds the allowed limit. For more information, see
2141//   Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
2142//
2143//   * AccessDeniedException
2144//   You are not authorized to perform the action.
2145//
2146//   * InternalServerError
2147//   Amazon Rekognition experienced a service issue. Try your call again.
2148//
2149//   * ThrottlingException
2150//   Amazon Rekognition is temporarily unable to process the request. Try your
2151//   call again.
2152//
2153//   * ProvisionedThroughputExceededException
2154//   The number of requests exceeded your throughput limit. If you want to increase
2155//   this limit, contact Amazon Rekognition.
2156//
2157//   * InvalidImageFormatException
2158//   The provided image format is not supported.
2159//
2160//   * HumanLoopQuotaExceededException
2161//   The number of in-progress human reviews you have has exceeded the number
2162//   allowed.
2163//
2164func (c *Rekognition) DetectModerationLabels(input *DetectModerationLabelsInput) (*DetectModerationLabelsOutput, error) {
2165	req, out := c.DetectModerationLabelsRequest(input)
2166	return out, req.Send()
2167}
2168
2169// DetectModerationLabelsWithContext is the same as DetectModerationLabels with the addition of
2170// the ability to pass a context and additional request options.
2171//
2172// See DetectModerationLabels for details on how to use this API operation.
2173//
2174// The context must be non-nil and will be used for request cancellation. If
2175// the context is nil a panic will occur. In the future the SDK may create
2176// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2177// for more information on using Contexts.
2178func (c *Rekognition) DetectModerationLabelsWithContext(ctx aws.Context, input *DetectModerationLabelsInput, opts ...request.Option) (*DetectModerationLabelsOutput, error) {
2179	req, out := c.DetectModerationLabelsRequest(input)
2180	req.SetContext(ctx)
2181	req.ApplyOptions(opts...)
2182	return out, req.Send()
2183}
2184
2185const opDetectProtectiveEquipment = "DetectProtectiveEquipment"
2186
2187// DetectProtectiveEquipmentRequest generates a "aws/request.Request" representing the
2188// client's request for the DetectProtectiveEquipment operation. The "output" return
2189// value will be populated with the request's response once the request completes
2190// successfully.
2191//
2192// Use "Send" method on the returned Request to send the API call to the service.
2193// the "output" return value is not valid until after Send returns without error.
2194//
2195// See DetectProtectiveEquipment for more information on using the DetectProtectiveEquipment
2196// API call, and error handling.
2197//
2198// This method is useful when you want to inject custom logic or configuration
2199// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2200//
2201//
2202//    // Example sending a request using the DetectProtectiveEquipmentRequest method.
2203//    req, resp := client.DetectProtectiveEquipmentRequest(params)
2204//
2205//    err := req.Send()
2206//    if err == nil { // resp is now filled
2207//        fmt.Println(resp)
2208//    }
2209func (c *Rekognition) DetectProtectiveEquipmentRequest(input *DetectProtectiveEquipmentInput) (req *request.Request, output *DetectProtectiveEquipmentOutput) {
2210	op := &request.Operation{
2211		Name:       opDetectProtectiveEquipment,
2212		HTTPMethod: "POST",
2213		HTTPPath:   "/",
2214	}
2215
2216	if input == nil {
2217		input = &DetectProtectiveEquipmentInput{}
2218	}
2219
2220	output = &DetectProtectiveEquipmentOutput{}
2221	req = c.newRequest(op, input, output)
2222	return
2223}
2224
2225// DetectProtectiveEquipment API operation for Amazon Rekognition.
2226//
2227// Detects Personal Protective Equipment (PPE) worn by people detected in an
2228// image. Amazon Rekognition can detect the following types of PPE.
2229//
2230//    * Face cover
2231//
2232//    * Hand cover
2233//
2234//    * Head cover
2235//
2236// You pass the input image as base64-encoded image bytes or as a reference
2237// to an image in an Amazon S3 bucket. The image must be either a PNG or JPG
2238// formatted file.
2239//
2240// DetectProtectiveEquipment detects PPE worn by up to 15 persons detected in
2241// an image.
2242//
2243// For each person detected in the image the API returns an array of body parts
2244// (face, head, left-hand, right-hand). For each body part, an array of detected
2245// items of PPE is returned, including an indicator of whether or not the PPE
2246// covers the body part. The API returns the confidence it has in each detection
2247// (person, PPE, body part and body part coverage). It also returns a bounding
2248// box (BoundingBox) for each detected person and each detected item of PPE.
2249//
2250// You can optionally request a summary of detected PPE items with the SummarizationAttributes
2251// input parameter. The summary provides the following information.
2252//
2253//    * The persons detected as wearing all of the types of PPE that you specify.
2254//
2255//    * The persons detected as not wearing all of the types PPE that you specify.
2256//
2257//    * The persons detected where PPE adornment could not be determined.
2258//
2259// This is a stateless API operation. That is, the operation does not persist
2260// any data.
2261//
2262// This operation requires permissions to perform the rekognition:DetectProtectiveEquipment
2263// action.
2264//
2265// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2266// with awserr.Error's Code and Message methods to get detailed information about
2267// the error.
2268//
2269// See the AWS API reference guide for Amazon Rekognition's
2270// API operation DetectProtectiveEquipment for usage and error information.
2271//
2272// Returned Error Types:
2273//   * InvalidS3ObjectException
2274//   Amazon Rekognition is unable to access the S3 object specified in the request.
2275//
2276//   * InvalidParameterException
2277//   Input parameter violated a constraint. Validate your parameter before calling
2278//   the API operation again.
2279//
2280//   * ImageTooLargeException
2281//   The input image size exceeds the allowed limit. For more information, see
2282//   Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
2283//
2284//   * AccessDeniedException
2285//   You are not authorized to perform the action.
2286//
2287//   * InternalServerError
2288//   Amazon Rekognition experienced a service issue. Try your call again.
2289//
2290//   * ThrottlingException
2291//   Amazon Rekognition is temporarily unable to process the request. Try your
2292//   call again.
2293//
2294//   * ProvisionedThroughputExceededException
2295//   The number of requests exceeded your throughput limit. If you want to increase
2296//   this limit, contact Amazon Rekognition.
2297//
2298//   * InvalidImageFormatException
2299//   The provided image format is not supported.
2300//
2301func (c *Rekognition) DetectProtectiveEquipment(input *DetectProtectiveEquipmentInput) (*DetectProtectiveEquipmentOutput, error) {
2302	req, out := c.DetectProtectiveEquipmentRequest(input)
2303	return out, req.Send()
2304}
2305
2306// DetectProtectiveEquipmentWithContext is the same as DetectProtectiveEquipment with the addition of
2307// the ability to pass a context and additional request options.
2308//
2309// See DetectProtectiveEquipment for details on how to use this API operation.
2310//
2311// The context must be non-nil and will be used for request cancellation. If
2312// the context is nil a panic will occur. In the future the SDK may create
2313// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2314// for more information on using Contexts.
2315func (c *Rekognition) DetectProtectiveEquipmentWithContext(ctx aws.Context, input *DetectProtectiveEquipmentInput, opts ...request.Option) (*DetectProtectiveEquipmentOutput, error) {
2316	req, out := c.DetectProtectiveEquipmentRequest(input)
2317	req.SetContext(ctx)
2318	req.ApplyOptions(opts...)
2319	return out, req.Send()
2320}
2321
2322const opDetectText = "DetectText"
2323
2324// DetectTextRequest generates a "aws/request.Request" representing the
2325// client's request for the DetectText operation. The "output" return
2326// value will be populated with the request's response once the request completes
2327// successfully.
2328//
2329// Use "Send" method on the returned Request to send the API call to the service.
2330// the "output" return value is not valid until after Send returns without error.
2331//
2332// See DetectText for more information on using the DetectText
2333// API call, and error handling.
2334//
2335// This method is useful when you want to inject custom logic or configuration
2336// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2337//
2338//
2339//    // Example sending a request using the DetectTextRequest method.
2340//    req, resp := client.DetectTextRequest(params)
2341//
2342//    err := req.Send()
2343//    if err == nil { // resp is now filled
2344//        fmt.Println(resp)
2345//    }
2346func (c *Rekognition) DetectTextRequest(input *DetectTextInput) (req *request.Request, output *DetectTextOutput) {
2347	op := &request.Operation{
2348		Name:       opDetectText,
2349		HTTPMethod: "POST",
2350		HTTPPath:   "/",
2351	}
2352
2353	if input == nil {
2354		input = &DetectTextInput{}
2355	}
2356
2357	output = &DetectTextOutput{}
2358	req = c.newRequest(op, input, output)
2359	return
2360}
2361
2362// DetectText API operation for Amazon Rekognition.
2363//
2364// Detects text in the input image and converts it into machine-readable text.
2365//
2366// Pass the input image as base64-encoded image bytes or as a reference to an
2367// image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition
2368// operations, you must pass it as a reference to an image in an Amazon S3 bucket.
2369// For the AWS CLI, passing image bytes is not supported. The image must be
2370// either a .png or .jpeg formatted file.
2371//
2372// The DetectText operation returns text in an array of TextDetection elements,
2373// TextDetections. Each TextDetection element provides information about a single
2374// word or line of text that was detected in the image.
2375//
2376// A word is one or more ISO basic latin script characters that are not separated
2377// by spaces. DetectText can detect up to 50 words in an image.
2378//
2379// A line is a string of equally spaced words. A line isn't necessarily a complete
2380// sentence. For example, a driver's license number is detected as a line. A
2381// line ends when there is no aligned text after it. Also, a line ends when
2382// there is a large gap between words, relative to the length of the words.
2383// This means, depending on the gap between words, Amazon Rekognition may detect
2384// multiple lines in text aligned in the same direction. Periods don't represent
2385// the end of a line. If a sentence spans multiple lines, the DetectText operation
2386// returns multiple lines.
2387//
2388// To determine whether a TextDetection element is a line of text or a word,
2389// use the TextDetection object Type field.
2390//
2391// To be detected, text must be within +/- 90 degrees orientation of the horizontal
2392// axis.
2393//
2394// For more information, see DetectText in the Amazon Rekognition Developer
2395// Guide.
2396//
2397// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2398// with awserr.Error's Code and Message methods to get detailed information about
2399// the error.
2400//
2401// See the AWS API reference guide for Amazon Rekognition's
2402// API operation DetectText for usage and error information.
2403//
2404// Returned Error Types:
2405//   * InvalidS3ObjectException
2406//   Amazon Rekognition is unable to access the S3 object specified in the request.
2407//
2408//   * InvalidParameterException
2409//   Input parameter violated a constraint. Validate your parameter before calling
2410//   the API operation again.
2411//
2412//   * ImageTooLargeException
2413//   The input image size exceeds the allowed limit. For more information, see
2414//   Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
2415//
2416//   * AccessDeniedException
2417//   You are not authorized to perform the action.
2418//
2419//   * InternalServerError
2420//   Amazon Rekognition experienced a service issue. Try your call again.
2421//
2422//   * ThrottlingException
2423//   Amazon Rekognition is temporarily unable to process the request. Try your
2424//   call again.
2425//
2426//   * ProvisionedThroughputExceededException
2427//   The number of requests exceeded your throughput limit. If you want to increase
2428//   this limit, contact Amazon Rekognition.
2429//
2430//   * InvalidImageFormatException
2431//   The provided image format is not supported.
2432//
2433func (c *Rekognition) DetectText(input *DetectTextInput) (*DetectTextOutput, error) {
2434	req, out := c.DetectTextRequest(input)
2435	return out, req.Send()
2436}
2437
2438// DetectTextWithContext is the same as DetectText with the addition of
2439// the ability to pass a context and additional request options.
2440//
2441// See DetectText for details on how to use this API operation.
2442//
2443// The context must be non-nil and will be used for request cancellation. If
2444// the context is nil a panic will occur. In the future the SDK may create
2445// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2446// for more information on using Contexts.
2447func (c *Rekognition) DetectTextWithContext(ctx aws.Context, input *DetectTextInput, opts ...request.Option) (*DetectTextOutput, error) {
2448	req, out := c.DetectTextRequest(input)
2449	req.SetContext(ctx)
2450	req.ApplyOptions(opts...)
2451	return out, req.Send()
2452}
2453
2454const opGetCelebrityInfo = "GetCelebrityInfo"
2455
2456// GetCelebrityInfoRequest generates a "aws/request.Request" representing the
2457// client's request for the GetCelebrityInfo operation. The "output" return
2458// value will be populated with the request's response once the request completes
2459// successfully.
2460//
2461// Use "Send" method on the returned Request to send the API call to the service.
2462// the "output" return value is not valid until after Send returns without error.
2463//
2464// See GetCelebrityInfo for more information on using the GetCelebrityInfo
2465// API call, and error handling.
2466//
2467// This method is useful when you want to inject custom logic or configuration
2468// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2469//
2470//
2471//    // Example sending a request using the GetCelebrityInfoRequest method.
2472//    req, resp := client.GetCelebrityInfoRequest(params)
2473//
2474//    err := req.Send()
2475//    if err == nil { // resp is now filled
2476//        fmt.Println(resp)
2477//    }
2478func (c *Rekognition) GetCelebrityInfoRequest(input *GetCelebrityInfoInput) (req *request.Request, output *GetCelebrityInfoOutput) {
2479	op := &request.Operation{
2480		Name:       opGetCelebrityInfo,
2481		HTTPMethod: "POST",
2482		HTTPPath:   "/",
2483	}
2484
2485	if input == nil {
2486		input = &GetCelebrityInfoInput{}
2487	}
2488
2489	output = &GetCelebrityInfoOutput{}
2490	req = c.newRequest(op, input, output)
2491	return
2492}
2493
2494// GetCelebrityInfo API operation for Amazon Rekognition.
2495//
2496// Gets the name and additional information about a celebrity based on his or
2497// her Amazon Rekognition ID. The additional information is returned as an array
2498// of URLs. If there is no additional information about the celebrity, this
2499// list is empty.
2500//
2501// For more information, see Recognizing Celebrities in an Image in the Amazon
2502// Rekognition Developer Guide.
2503//
2504// This operation requires permissions to perform the rekognition:GetCelebrityInfo
2505// action.
2506//
2507// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2508// with awserr.Error's Code and Message methods to get detailed information about
2509// the error.
2510//
2511// See the AWS API reference guide for Amazon Rekognition's
2512// API operation GetCelebrityInfo for usage and error information.
2513//
2514// Returned Error Types:
2515//   * InvalidParameterException
2516//   Input parameter violated a constraint. Validate your parameter before calling
2517//   the API operation again.
2518//
2519//   * AccessDeniedException
2520//   You are not authorized to perform the action.
2521//
2522//   * InternalServerError
2523//   Amazon Rekognition experienced a service issue. Try your call again.
2524//
2525//   * ThrottlingException
2526//   Amazon Rekognition is temporarily unable to process the request. Try your
2527//   call again.
2528//
2529//   * ProvisionedThroughputExceededException
2530//   The number of requests exceeded your throughput limit. If you want to increase
2531//   this limit, contact Amazon Rekognition.
2532//
2533//   * ResourceNotFoundException
2534//   The collection specified in the request cannot be found.
2535//
2536func (c *Rekognition) GetCelebrityInfo(input *GetCelebrityInfoInput) (*GetCelebrityInfoOutput, error) {
2537	req, out := c.GetCelebrityInfoRequest(input)
2538	return out, req.Send()
2539}
2540
2541// GetCelebrityInfoWithContext is the same as GetCelebrityInfo with the addition of
2542// the ability to pass a context and additional request options.
2543//
2544// See GetCelebrityInfo for details on how to use this API operation.
2545//
2546// The context must be non-nil and will be used for request cancellation. If
2547// the context is nil a panic will occur. In the future the SDK may create
2548// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2549// for more information on using Contexts.
2550func (c *Rekognition) GetCelebrityInfoWithContext(ctx aws.Context, input *GetCelebrityInfoInput, opts ...request.Option) (*GetCelebrityInfoOutput, error) {
2551	req, out := c.GetCelebrityInfoRequest(input)
2552	req.SetContext(ctx)
2553	req.ApplyOptions(opts...)
2554	return out, req.Send()
2555}
2556
2557const opGetCelebrityRecognition = "GetCelebrityRecognition"
2558
2559// GetCelebrityRecognitionRequest generates a "aws/request.Request" representing the
2560// client's request for the GetCelebrityRecognition operation. The "output" return
2561// value will be populated with the request's response once the request completes
2562// successfully.
2563//
2564// Use "Send" method on the returned Request to send the API call to the service.
2565// the "output" return value is not valid until after Send returns without error.
2566//
2567// See GetCelebrityRecognition for more information on using the GetCelebrityRecognition
2568// API call, and error handling.
2569//
2570// This method is useful when you want to inject custom logic or configuration
2571// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2572//
2573//
2574//    // Example sending a request using the GetCelebrityRecognitionRequest method.
2575//    req, resp := client.GetCelebrityRecognitionRequest(params)
2576//
2577//    err := req.Send()
2578//    if err == nil { // resp is now filled
2579//        fmt.Println(resp)
2580//    }
2581func (c *Rekognition) GetCelebrityRecognitionRequest(input *GetCelebrityRecognitionInput) (req *request.Request, output *GetCelebrityRecognitionOutput) {
2582	op := &request.Operation{
2583		Name:       opGetCelebrityRecognition,
2584		HTTPMethod: "POST",
2585		HTTPPath:   "/",
2586		Paginator: &request.Paginator{
2587			InputTokens:     []string{"NextToken"},
2588			OutputTokens:    []string{"NextToken"},
2589			LimitToken:      "MaxResults",
2590			TruncationToken: "",
2591		},
2592	}
2593
2594	if input == nil {
2595		input = &GetCelebrityRecognitionInput{}
2596	}
2597
2598	output = &GetCelebrityRecognitionOutput{}
2599	req = c.newRequest(op, input, output)
2600	return
2601}
2602
2603// GetCelebrityRecognition API operation for Amazon Rekognition.
2604//
2605// Gets the celebrity recognition results for a Amazon Rekognition Video analysis
2606// started by StartCelebrityRecognition.
2607//
2608// Celebrity recognition in a video is an asynchronous operation. Analysis is
2609// started by a call to StartCelebrityRecognition which returns a job identifier
2610// (JobId). When the celebrity recognition operation finishes, Amazon Rekognition
2611// Video publishes a completion status to the Amazon Simple Notification Service
2612// topic registered in the initial call to StartCelebrityRecognition. To get
2613// the results of the celebrity recognition analysis, first check that the status
2614// value published to the Amazon SNS topic is SUCCEEDED. If so, call GetCelebrityDetection
2615// and pass the job identifier (JobId) from the initial call to StartCelebrityDetection.
2616//
2617// For more information, see Working With Stored Videos in the Amazon Rekognition
2618// Developer Guide.
2619//
2620// GetCelebrityRecognition returns detected celebrities and the time(s) they
2621// are detected in an array (Celebrities) of CelebrityRecognition objects. Each
2622// CelebrityRecognition contains information about the celebrity in a CelebrityDetail
2623// object and the time, Timestamp, the celebrity was detected.
2624//
2625// GetCelebrityRecognition only returns the default facial attributes (BoundingBox,
2626// Confidence, Landmarks, Pose, and Quality). The other facial attributes listed
2627// in the Face object of the following response syntax are not returned. For
2628// more information, see FaceDetail in the Amazon Rekognition Developer Guide.
2629//
2630// By default, the Celebrities array is sorted by time (milliseconds from the
2631// start of the video). You can also sort the array by celebrity by specifying
2632// the value ID in the SortBy input parameter.
2633//
2634// The CelebrityDetail object includes the celebrity identifer and additional
2635// information urls. If you don't store the additional information urls, you
2636// can get them later by calling GetCelebrityInfo with the celebrity identifer.
2637//
2638// No information is returned for faces not recognized as celebrities.
2639//
2640// Use MaxResults parameter to limit the number of labels returned. If there
2641// are more results than specified in MaxResults, the value of NextToken in
2642// the operation response contains a pagination token for getting the next set
2643// of results. To get the next page of results, call GetCelebrityDetection and
2644// populate the NextToken request parameter with the token value returned from
2645// the previous call to GetCelebrityRecognition.
2646//
2647// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2648// with awserr.Error's Code and Message methods to get detailed information about
2649// the error.
2650//
2651// See the AWS API reference guide for Amazon Rekognition's
2652// API operation GetCelebrityRecognition for usage and error information.
2653//
2654// Returned Error Types:
2655//   * AccessDeniedException
2656//   You are not authorized to perform the action.
2657//
2658//   * InternalServerError
2659//   Amazon Rekognition experienced a service issue. Try your call again.
2660//
2661//   * InvalidParameterException
2662//   Input parameter violated a constraint. Validate your parameter before calling
2663//   the API operation again.
2664//
2665//   * InvalidPaginationTokenException
2666//   Pagination token in the request is not valid.
2667//
2668//   * ProvisionedThroughputExceededException
2669//   The number of requests exceeded your throughput limit. If you want to increase
2670//   this limit, contact Amazon Rekognition.
2671//
2672//   * ResourceNotFoundException
2673//   The collection specified in the request cannot be found.
2674//
2675//   * ThrottlingException
2676//   Amazon Rekognition is temporarily unable to process the request. Try your
2677//   call again.
2678//
2679func (c *Rekognition) GetCelebrityRecognition(input *GetCelebrityRecognitionInput) (*GetCelebrityRecognitionOutput, error) {
2680	req, out := c.GetCelebrityRecognitionRequest(input)
2681	return out, req.Send()
2682}
2683
2684// GetCelebrityRecognitionWithContext is the same as GetCelebrityRecognition with the addition of
2685// the ability to pass a context and additional request options.
2686//
2687// See GetCelebrityRecognition for details on how to use this API operation.
2688//
2689// The context must be non-nil and will be used for request cancellation. If
2690// the context is nil a panic will occur. In the future the SDK may create
2691// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2692// for more information on using Contexts.
2693func (c *Rekognition) GetCelebrityRecognitionWithContext(ctx aws.Context, input *GetCelebrityRecognitionInput, opts ...request.Option) (*GetCelebrityRecognitionOutput, error) {
2694	req, out := c.GetCelebrityRecognitionRequest(input)
2695	req.SetContext(ctx)
2696	req.ApplyOptions(opts...)
2697	return out, req.Send()
2698}
2699
2700// GetCelebrityRecognitionPages iterates over the pages of a GetCelebrityRecognition operation,
2701// calling the "fn" function with the response data for each page. To stop
2702// iterating, return false from the fn function.
2703//
2704// See GetCelebrityRecognition method for more information on how to use this operation.
2705//
2706// Note: This operation can generate multiple requests to a service.
2707//
2708//    // Example iterating over at most 3 pages of a GetCelebrityRecognition operation.
2709//    pageNum := 0
2710//    err := client.GetCelebrityRecognitionPages(params,
2711//        func(page *rekognition.GetCelebrityRecognitionOutput, lastPage bool) bool {
2712//            pageNum++
2713//            fmt.Println(page)
2714//            return pageNum <= 3
2715//        })
2716//
2717func (c *Rekognition) GetCelebrityRecognitionPages(input *GetCelebrityRecognitionInput, fn func(*GetCelebrityRecognitionOutput, bool) bool) error {
2718	return c.GetCelebrityRecognitionPagesWithContext(aws.BackgroundContext(), input, fn)
2719}
2720
2721// GetCelebrityRecognitionPagesWithContext same as GetCelebrityRecognitionPages except
2722// it takes a Context and allows setting request options on the pages.
2723//
2724// The context must be non-nil and will be used for request cancellation. If
2725// the context is nil a panic will occur. In the future the SDK may create
2726// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2727// for more information on using Contexts.
2728func (c *Rekognition) GetCelebrityRecognitionPagesWithContext(ctx aws.Context, input *GetCelebrityRecognitionInput, fn func(*GetCelebrityRecognitionOutput, bool) bool, opts ...request.Option) error {
2729	p := request.Pagination{
2730		NewRequest: func() (*request.Request, error) {
2731			var inCpy *GetCelebrityRecognitionInput
2732			if input != nil {
2733				tmp := *input
2734				inCpy = &tmp
2735			}
2736			req, _ := c.GetCelebrityRecognitionRequest(inCpy)
2737			req.SetContext(ctx)
2738			req.ApplyOptions(opts...)
2739			return req, nil
2740		},
2741	}
2742
2743	for p.Next() {
2744		if !fn(p.Page().(*GetCelebrityRecognitionOutput), !p.HasNextPage()) {
2745			break
2746		}
2747	}
2748
2749	return p.Err()
2750}
2751
2752const opGetContentModeration = "GetContentModeration"
2753
2754// GetContentModerationRequest generates a "aws/request.Request" representing the
2755// client's request for the GetContentModeration operation. The "output" return
2756// value will be populated with the request's response once the request completes
2757// successfully.
2758//
2759// Use "Send" method on the returned Request to send the API call to the service.
2760// the "output" return value is not valid until after Send returns without error.
2761//
2762// See GetContentModeration for more information on using the GetContentModeration
2763// API call, and error handling.
2764//
2765// This method is useful when you want to inject custom logic or configuration
2766// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2767//
2768//
2769//    // Example sending a request using the GetContentModerationRequest method.
2770//    req, resp := client.GetContentModerationRequest(params)
2771//
2772//    err := req.Send()
2773//    if err == nil { // resp is now filled
2774//        fmt.Println(resp)
2775//    }
2776func (c *Rekognition) GetContentModerationRequest(input *GetContentModerationInput) (req *request.Request, output *GetContentModerationOutput) {
2777	op := &request.Operation{
2778		Name:       opGetContentModeration,
2779		HTTPMethod: "POST",
2780		HTTPPath:   "/",
2781		Paginator: &request.Paginator{
2782			InputTokens:     []string{"NextToken"},
2783			OutputTokens:    []string{"NextToken"},
2784			LimitToken:      "MaxResults",
2785			TruncationToken: "",
2786		},
2787	}
2788
2789	if input == nil {
2790		input = &GetContentModerationInput{}
2791	}
2792
2793	output = &GetContentModerationOutput{}
2794	req = c.newRequest(op, input, output)
2795	return
2796}
2797
2798// GetContentModeration API operation for Amazon Rekognition.
2799//
2800// Gets the unsafe content analysis results for a Amazon Rekognition Video analysis
2801// started by StartContentModeration.
2802//
2803// Unsafe content analysis of a video is an asynchronous operation. You start
2804// analysis by calling StartContentModeration which returns a job identifier
2805// (JobId). When analysis finishes, Amazon Rekognition Video publishes a completion
2806// status to the Amazon Simple Notification Service topic registered in the
2807// initial call to StartContentModeration. To get the results of the unsafe
2808// content analysis, first check that the status value published to the Amazon
2809// SNS topic is SUCCEEDED. If so, call GetContentModeration and pass the job
2810// identifier (JobId) from the initial call to StartContentModeration.
2811//
2812// For more information, see Working with Stored Videos in the Amazon Rekognition
2813// Devlopers Guide.
2814//
2815// GetContentModeration returns detected unsafe content labels, and the time
2816// they are detected, in an array, ModerationLabels, of ContentModerationDetection
2817// objects.
2818//
2819// By default, the moderated labels are returned sorted by time, in milliseconds
2820// from the start of the video. You can also sort them by moderated label by
2821// specifying NAME for the SortBy input parameter.
2822//
2823// Since video analysis can return a large number of results, use the MaxResults
2824// parameter to limit the number of labels returned in a single call to GetContentModeration.
2825// If there are more results than specified in MaxResults, the value of NextToken
2826// in the operation response contains a pagination token for getting the next
2827// set of results. To get the next page of results, call GetContentModeration
2828// and populate the NextToken request parameter with the value of NextToken
2829// returned from the previous call to GetContentModeration.
2830//
2831// For more information, see Detecting Unsafe Content in the Amazon Rekognition
2832// Developer Guide.
2833//
2834// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2835// with awserr.Error's Code and Message methods to get detailed information about
2836// the error.
2837//
2838// See the AWS API reference guide for Amazon Rekognition's
2839// API operation GetContentModeration for usage and error information.
2840//
2841// Returned Error Types:
2842//   * AccessDeniedException
2843//   You are not authorized to perform the action.
2844//
2845//   * InternalServerError
2846//   Amazon Rekognition experienced a service issue. Try your call again.
2847//
2848//   * InvalidParameterException
2849//   Input parameter violated a constraint. Validate your parameter before calling
2850//   the API operation again.
2851//
2852//   * InvalidPaginationTokenException
2853//   Pagination token in the request is not valid.
2854//
2855//   * ProvisionedThroughputExceededException
2856//   The number of requests exceeded your throughput limit. If you want to increase
2857//   this limit, contact Amazon Rekognition.
2858//
2859//   * ResourceNotFoundException
2860//   The collection specified in the request cannot be found.
2861//
2862//   * ThrottlingException
2863//   Amazon Rekognition is temporarily unable to process the request. Try your
2864//   call again.
2865//
2866func (c *Rekognition) GetContentModeration(input *GetContentModerationInput) (*GetContentModerationOutput, error) {
2867	req, out := c.GetContentModerationRequest(input)
2868	return out, req.Send()
2869}
2870
2871// GetContentModerationWithContext is the same as GetContentModeration with the addition of
2872// the ability to pass a context and additional request options.
2873//
2874// See GetContentModeration for details on how to use this API operation.
2875//
2876// The context must be non-nil and will be used for request cancellation. If
2877// the context is nil a panic will occur. In the future the SDK may create
2878// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2879// for more information on using Contexts.
2880func (c *Rekognition) GetContentModerationWithContext(ctx aws.Context, input *GetContentModerationInput, opts ...request.Option) (*GetContentModerationOutput, error) {
2881	req, out := c.GetContentModerationRequest(input)
2882	req.SetContext(ctx)
2883	req.ApplyOptions(opts...)
2884	return out, req.Send()
2885}
2886
2887// GetContentModerationPages iterates over the pages of a GetContentModeration operation,
2888// calling the "fn" function with the response data for each page. To stop
2889// iterating, return false from the fn function.
2890//
2891// See GetContentModeration method for more information on how to use this operation.
2892//
2893// Note: This operation can generate multiple requests to a service.
2894//
2895//    // Example iterating over at most 3 pages of a GetContentModeration operation.
2896//    pageNum := 0
2897//    err := client.GetContentModerationPages(params,
2898//        func(page *rekognition.GetContentModerationOutput, lastPage bool) bool {
2899//            pageNum++
2900//            fmt.Println(page)
2901//            return pageNum <= 3
2902//        })
2903//
2904func (c *Rekognition) GetContentModerationPages(input *GetContentModerationInput, fn func(*GetContentModerationOutput, bool) bool) error {
2905	return c.GetContentModerationPagesWithContext(aws.BackgroundContext(), input, fn)
2906}
2907
2908// GetContentModerationPagesWithContext same as GetContentModerationPages except
2909// it takes a Context and allows setting request options on the pages.
2910//
2911// The context must be non-nil and will be used for request cancellation. If
2912// the context is nil a panic will occur. In the future the SDK may create
2913// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2914// for more information on using Contexts.
2915func (c *Rekognition) GetContentModerationPagesWithContext(ctx aws.Context, input *GetContentModerationInput, fn func(*GetContentModerationOutput, bool) bool, opts ...request.Option) error {
2916	p := request.Pagination{
2917		NewRequest: func() (*request.Request, error) {
2918			var inCpy *GetContentModerationInput
2919			if input != nil {
2920				tmp := *input
2921				inCpy = &tmp
2922			}
2923			req, _ := c.GetContentModerationRequest(inCpy)
2924			req.SetContext(ctx)
2925			req.ApplyOptions(opts...)
2926			return req, nil
2927		},
2928	}
2929
2930	for p.Next() {
2931		if !fn(p.Page().(*GetContentModerationOutput), !p.HasNextPage()) {
2932			break
2933		}
2934	}
2935
2936	return p.Err()
2937}
2938
2939const opGetFaceDetection = "GetFaceDetection"
2940
2941// GetFaceDetectionRequest generates a "aws/request.Request" representing the
2942// client's request for the GetFaceDetection operation. The "output" return
2943// value will be populated with the request's response once the request completes
2944// successfully.
2945//
2946// Use "Send" method on the returned Request to send the API call to the service.
2947// the "output" return value is not valid until after Send returns without error.
2948//
2949// See GetFaceDetection for more information on using the GetFaceDetection
2950// API call, and error handling.
2951//
2952// This method is useful when you want to inject custom logic or configuration
2953// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2954//
2955//
2956//    // Example sending a request using the GetFaceDetectionRequest method.
2957//    req, resp := client.GetFaceDetectionRequest(params)
2958//
2959//    err := req.Send()
2960//    if err == nil { // resp is now filled
2961//        fmt.Println(resp)
2962//    }
2963func (c *Rekognition) GetFaceDetectionRequest(input *GetFaceDetectionInput) (req *request.Request, output *GetFaceDetectionOutput) {
2964	op := &request.Operation{
2965		Name:       opGetFaceDetection,
2966		HTTPMethod: "POST",
2967		HTTPPath:   "/",
2968		Paginator: &request.Paginator{
2969			InputTokens:     []string{"NextToken"},
2970			OutputTokens:    []string{"NextToken"},
2971			LimitToken:      "MaxResults",
2972			TruncationToken: "",
2973		},
2974	}
2975
2976	if input == nil {
2977		input = &GetFaceDetectionInput{}
2978	}
2979
2980	output = &GetFaceDetectionOutput{}
2981	req = c.newRequest(op, input, output)
2982	return
2983}
2984
2985// GetFaceDetection API operation for Amazon Rekognition.
2986//
2987// Gets face detection results for a Amazon Rekognition Video analysis started
2988// by StartFaceDetection.
2989//
2990// Face detection with Amazon Rekognition Video is an asynchronous operation.
2991// You start face detection by calling StartFaceDetection which returns a job
2992// identifier (JobId). When the face detection operation finishes, Amazon Rekognition
2993// Video publishes a completion status to the Amazon Simple Notification Service
2994// topic registered in the initial call to StartFaceDetection. To get the results
2995// of the face detection operation, first check that the status value published
2996// to the Amazon SNS topic is SUCCEEDED. If so, call GetFaceDetection and pass
2997// the job identifier (JobId) from the initial call to StartFaceDetection.
2998//
2999// GetFaceDetection returns an array of detected faces (Faces) sorted by the
3000// time the faces were detected.
3001//
3002// Use MaxResults parameter to limit the number of labels returned. If there
3003// are more results than specified in MaxResults, the value of NextToken in
3004// the operation response contains a pagination token for getting the next set
3005// of results. To get the next page of results, call GetFaceDetection and populate
3006// the NextToken request parameter with the token value returned from the previous
3007// call to GetFaceDetection.
3008//
3009// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
3010// with awserr.Error's Code and Message methods to get detailed information about
3011// the error.
3012//
3013// See the AWS API reference guide for Amazon Rekognition's
3014// API operation GetFaceDetection for usage and error information.
3015//
3016// Returned Error Types:
3017//   * AccessDeniedException
3018//   You are not authorized to perform the action.
3019//
3020//   * InternalServerError
3021//   Amazon Rekognition experienced a service issue. Try your call again.
3022//
3023//   * InvalidParameterException
3024//   Input parameter violated a constraint. Validate your parameter before calling
3025//   the API operation again.
3026//
3027//   * InvalidPaginationTokenException
3028//   Pagination token in the request is not valid.
3029//
3030//   * ProvisionedThroughputExceededException
3031//   The number of requests exceeded your throughput limit. If you want to increase
3032//   this limit, contact Amazon Rekognition.
3033//
3034//   * ResourceNotFoundException
3035//   The collection specified in the request cannot be found.
3036//
3037//   * ThrottlingException
3038//   Amazon Rekognition is temporarily unable to process the request. Try your
3039//   call again.
3040//
3041func (c *Rekognition) GetFaceDetection(input *GetFaceDetectionInput) (*GetFaceDetectionOutput, error) {
3042	req, out := c.GetFaceDetectionRequest(input)
3043	return out, req.Send()
3044}
3045
3046// GetFaceDetectionWithContext is the same as GetFaceDetection with the addition of
3047// the ability to pass a context and additional request options.
3048//
3049// See GetFaceDetection for details on how to use this API operation.
3050//
3051// The context must be non-nil and will be used for request cancellation. If
3052// the context is nil a panic will occur. In the future the SDK may create
3053// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3054// for more information on using Contexts.
3055func (c *Rekognition) GetFaceDetectionWithContext(ctx aws.Context, input *GetFaceDetectionInput, opts ...request.Option) (*GetFaceDetectionOutput, error) {
3056	req, out := c.GetFaceDetectionRequest(input)
3057	req.SetContext(ctx)
3058	req.ApplyOptions(opts...)
3059	return out, req.Send()
3060}
3061
3062// GetFaceDetectionPages iterates over the pages of a GetFaceDetection operation,
3063// calling the "fn" function with the response data for each page. To stop
3064// iterating, return false from the fn function.
3065//
3066// See GetFaceDetection method for more information on how to use this operation.
3067//
3068// Note: This operation can generate multiple requests to a service.
3069//
3070//    // Example iterating over at most 3 pages of a GetFaceDetection operation.
3071//    pageNum := 0
3072//    err := client.GetFaceDetectionPages(params,
3073//        func(page *rekognition.GetFaceDetectionOutput, lastPage bool) bool {
3074//            pageNum++
3075//            fmt.Println(page)
3076//            return pageNum <= 3
3077//        })
3078//
3079func (c *Rekognition) GetFaceDetectionPages(input *GetFaceDetectionInput, fn func(*GetFaceDetectionOutput, bool) bool) error {
3080	return c.GetFaceDetectionPagesWithContext(aws.BackgroundContext(), input, fn)
3081}
3082
3083// GetFaceDetectionPagesWithContext same as GetFaceDetectionPages except
3084// it takes a Context and allows setting request options on the pages.
3085//
3086// The context must be non-nil and will be used for request cancellation. If
3087// the context is nil a panic will occur. In the future the SDK may create
3088// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3089// for more information on using Contexts.
3090func (c *Rekognition) GetFaceDetectionPagesWithContext(ctx aws.Context, input *GetFaceDetectionInput, fn func(*GetFaceDetectionOutput, bool) bool, opts ...request.Option) error {
3091	p := request.Pagination{
3092		NewRequest: func() (*request.Request, error) {
3093			var inCpy *GetFaceDetectionInput
3094			if input != nil {
3095				tmp := *input
3096				inCpy = &tmp
3097			}
3098			req, _ := c.GetFaceDetectionRequest(inCpy)
3099			req.SetContext(ctx)
3100			req.ApplyOptions(opts...)
3101			return req, nil
3102		},
3103	}
3104
3105	for p.Next() {
3106		if !fn(p.Page().(*GetFaceDetectionOutput), !p.HasNextPage()) {
3107			break
3108		}
3109	}
3110
3111	return p.Err()
3112}
3113
3114const opGetFaceSearch = "GetFaceSearch"
3115
3116// GetFaceSearchRequest generates a "aws/request.Request" representing the
3117// client's request for the GetFaceSearch operation. The "output" return
3118// value will be populated with the request's response once the request completes
3119// successfully.
3120//
3121// Use "Send" method on the returned Request to send the API call to the service.
3122// the "output" return value is not valid until after Send returns without error.
3123//
3124// See GetFaceSearch for more information on using the GetFaceSearch
3125// API call, and error handling.
3126//
3127// This method is useful when you want to inject custom logic or configuration
3128// into the SDK's request lifecycle. Such as custom headers, or retry logic.
3129//
3130//
3131//    // Example sending a request using the GetFaceSearchRequest method.
3132//    req, resp := client.GetFaceSearchRequest(params)
3133//
3134//    err := req.Send()
3135//    if err == nil { // resp is now filled
3136//        fmt.Println(resp)
3137//    }
3138func (c *Rekognition) GetFaceSearchRequest(input *GetFaceSearchInput) (req *request.Request, output *GetFaceSearchOutput) {
3139	op := &request.Operation{
3140		Name:       opGetFaceSearch,
3141		HTTPMethod: "POST",
3142		HTTPPath:   "/",
3143		Paginator: &request.Paginator{
3144			InputTokens:     []string{"NextToken"},
3145			OutputTokens:    []string{"NextToken"},
3146			LimitToken:      "MaxResults",
3147			TruncationToken: "",
3148		},
3149	}
3150
3151	if input == nil {
3152		input = &GetFaceSearchInput{}
3153	}
3154
3155	output = &GetFaceSearchOutput{}
3156	req = c.newRequest(op, input, output)
3157	return
3158}
3159
3160// GetFaceSearch API operation for Amazon Rekognition.
3161//
3162// Gets the face search results for Amazon Rekognition Video face search started
3163// by StartFaceSearch. The search returns faces in a collection that match the
3164// faces of persons detected in a video. It also includes the time(s) that faces
3165// are matched in the video.
3166//
3167// Face search in a video is an asynchronous operation. You start face search
3168// by calling to StartFaceSearch which returns a job identifier (JobId). When
3169// the search operation finishes, Amazon Rekognition Video publishes a completion
3170// status to the Amazon Simple Notification Service topic registered in the
3171// initial call to StartFaceSearch. To get the search results, first check that
3172// the status value published to the Amazon SNS topic is SUCCEEDED. If so, call
3173// GetFaceSearch and pass the job identifier (JobId) from the initial call to
3174// StartFaceSearch.
3175//
3176// For more information, see Searching Faces in a Collection in the Amazon Rekognition
3177// Developer Guide.
3178//
3179// The search results are retured in an array, Persons, of PersonMatch objects.
3180// EachPersonMatch element contains details about the matching faces in the
3181// input collection, person information (facial attributes, bounding boxes,
3182// and person identifer) for the matched person, and the time the person was
3183// matched in the video.
3184//
3185// GetFaceSearch only returns the default facial attributes (BoundingBox, Confidence,
3186// Landmarks, Pose, and Quality). The other facial attributes listed in the
3187// Face object of the following response syntax are not returned. For more information,
3188// see FaceDetail in the Amazon Rekognition Developer Guide.
3189//
3190// By default, the Persons array is sorted by the time, in milliseconds from
3191// the start of the video, persons are matched. You can also sort by persons
3192// by specifying INDEX for the SORTBY input parameter.
3193//
3194// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
3195// with awserr.Error's Code and Message methods to get detailed information about
3196// the error.
3197//
3198// See the AWS API reference guide for Amazon Rekognition's
3199// API operation GetFaceSearch for usage and error information.
3200//
3201// Returned Error Types:
3202//   * AccessDeniedException
3203//   You are not authorized to perform the action.
3204//
3205//   * InternalServerError
3206//   Amazon Rekognition experienced a service issue. Try your call again.
3207//
3208//   * InvalidParameterException
3209//   Input parameter violated a constraint. Validate your parameter before calling
3210//   the API operation again.
3211//
3212//   * InvalidPaginationTokenException
3213//   Pagination token in the request is not valid.
3214//
3215//   * ProvisionedThroughputExceededException
3216//   The number of requests exceeded your throughput limit. If you want to increase
3217//   this limit, contact Amazon Rekognition.
3218//
3219//   * ResourceNotFoundException
3220//   The collection specified in the request cannot be found.
3221//
3222//   * ThrottlingException
3223//   Amazon Rekognition is temporarily unable to process the request. Try your
3224//   call again.
3225//
3226func (c *Rekognition) GetFaceSearch(input *GetFaceSearchInput) (*GetFaceSearchOutput, error) {
3227	req, out := c.GetFaceSearchRequest(input)
3228	return out, req.Send()
3229}
3230
3231// GetFaceSearchWithContext is the same as GetFaceSearch with the addition of
3232// the ability to pass a context and additional request options.
3233//
3234// See GetFaceSearch for details on how to use this API operation.
3235//
3236// The context must be non-nil and will be used for request cancellation. If
3237// the context is nil a panic will occur. In the future the SDK may create
3238// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3239// for more information on using Contexts.
3240func (c *Rekognition) GetFaceSearchWithContext(ctx aws.Context, input *GetFaceSearchInput, opts ...request.Option) (*GetFaceSearchOutput, error) {
3241	req, out := c.GetFaceSearchRequest(input)
3242	req.SetContext(ctx)
3243	req.ApplyOptions(opts...)
3244	return out, req.Send()
3245}
3246
3247// GetFaceSearchPages iterates over the pages of a GetFaceSearch operation,
3248// calling the "fn" function with the response data for each page. To stop
3249// iterating, return false from the fn function.
3250//
3251// See GetFaceSearch method for more information on how to use this operation.
3252//
3253// Note: This operation can generate multiple requests to a service.
3254//
3255//    // Example iterating over at most 3 pages of a GetFaceSearch operation.
3256//    pageNum := 0
3257//    err := client.GetFaceSearchPages(params,
3258//        func(page *rekognition.GetFaceSearchOutput, lastPage bool) bool {
3259//            pageNum++
3260//            fmt.Println(page)
3261//            return pageNum <= 3
3262//        })
3263//
3264func (c *Rekognition) GetFaceSearchPages(input *GetFaceSearchInput, fn func(*GetFaceSearchOutput, bool) bool) error {
3265	return c.GetFaceSearchPagesWithContext(aws.BackgroundContext(), input, fn)
3266}
3267
3268// GetFaceSearchPagesWithContext same as GetFaceSearchPages except
3269// it takes a Context and allows setting request options on the pages.
3270//
3271// The context must be non-nil and will be used for request cancellation. If
3272// the context is nil a panic will occur. In the future the SDK may create
3273// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3274// for more information on using Contexts.
3275func (c *Rekognition) GetFaceSearchPagesWithContext(ctx aws.Context, input *GetFaceSearchInput, fn func(*GetFaceSearchOutput, bool) bool, opts ...request.Option) error {
3276	p := request.Pagination{
3277		NewRequest: func() (*request.Request, error) {
3278			var inCpy *GetFaceSearchInput
3279			if input != nil {
3280				tmp := *input
3281				inCpy = &tmp
3282			}
3283			req, _ := c.GetFaceSearchRequest(inCpy)
3284			req.SetContext(ctx)
3285			req.ApplyOptions(opts...)
3286			return req, nil
3287		},
3288	}
3289
3290	for p.Next() {
3291		if !fn(p.Page().(*GetFaceSearchOutput), !p.HasNextPage()) {
3292			break
3293		}
3294	}
3295
3296	return p.Err()
3297}
3298
3299const opGetLabelDetection = "GetLabelDetection"
3300
3301// GetLabelDetectionRequest generates a "aws/request.Request" representing the
3302// client's request for the GetLabelDetection operation. The "output" return
3303// value will be populated with the request's response once the request completes
3304// successfully.
3305//
3306// Use "Send" method on the returned Request to send the API call to the service.
3307// the "output" return value is not valid until after Send returns without error.
3308//
3309// See GetLabelDetection for more information on using the GetLabelDetection
3310// API call, and error handling.
3311//
3312// This method is useful when you want to inject custom logic or configuration
3313// into the SDK's request lifecycle. Such as custom headers, or retry logic.
3314//
3315//
3316//    // Example sending a request using the GetLabelDetectionRequest method.
3317//    req, resp := client.GetLabelDetectionRequest(params)
3318//
3319//    err := req.Send()
3320//    if err == nil { // resp is now filled
3321//        fmt.Println(resp)
3322//    }
3323func (c *Rekognition) GetLabelDetectionRequest(input *GetLabelDetectionInput) (req *request.Request, output *GetLabelDetectionOutput) {
3324	op := &request.Operation{
3325		Name:       opGetLabelDetection,
3326		HTTPMethod: "POST",
3327		HTTPPath:   "/",
3328		Paginator: &request.Paginator{
3329			InputTokens:     []string{"NextToken"},
3330			OutputTokens:    []string{"NextToken"},
3331			LimitToken:      "MaxResults",
3332			TruncationToken: "",
3333		},
3334	}
3335
3336	if input == nil {
3337		input = &GetLabelDetectionInput{}
3338	}
3339
3340	output = &GetLabelDetectionOutput{}
3341	req = c.newRequest(op, input, output)
3342	return
3343}
3344
3345// GetLabelDetection API operation for Amazon Rekognition.
3346//
3347// Gets the label detection results of a Amazon Rekognition Video analysis started
3348// by StartLabelDetection.
3349//
3350// The label detection operation is started by a call to StartLabelDetection
3351// which returns a job identifier (JobId). When the label detection operation
3352// finishes, Amazon Rekognition publishes a completion status to the Amazon
3353// Simple Notification Service topic registered in the initial call to StartlabelDetection.
3354// To get the results of the label detection operation, first check that the
3355// status value published to the Amazon SNS topic is SUCCEEDED. If so, call
3356// GetLabelDetection and pass the job identifier (JobId) from the initial call
3357// to StartLabelDetection.
3358//
3359// GetLabelDetection returns an array of detected labels (Labels) sorted by
3360// the time the labels were detected. You can also sort by the label name by
3361// specifying NAME for the SortBy input parameter.
3362//
3363// The labels returned include the label name, the percentage confidence in
3364// the accuracy of the detected label, and the time the label was detected in
3365// the video.
3366//
3367// The returned labels also include bounding box information for common objects,
3368// a hierarchical taxonomy of detected labels, and the version of the label
3369// model used for detection.
3370//
3371// Use MaxResults parameter to limit the number of labels returned. If there
3372// are more results than specified in MaxResults, the value of NextToken in
3373// the operation response contains a pagination token for getting the next set
3374// of results. To get the next page of results, call GetlabelDetection and populate
3375// the NextToken request parameter with the token value returned from the previous
3376// call to GetLabelDetection.
3377//
3378// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
3379// with awserr.Error's Code and Message methods to get detailed information about
3380// the error.
3381//
3382// See the AWS API reference guide for Amazon Rekognition's
3383// API operation GetLabelDetection for usage and error information.
3384//
3385// Returned Error Types:
3386//   * AccessDeniedException
3387//   You are not authorized to perform the action.
3388//
3389//   * InternalServerError
3390//   Amazon Rekognition experienced a service issue. Try your call again.
3391//
3392//   * InvalidParameterException
3393//   Input parameter violated a constraint. Validate your parameter before calling
3394//   the API operation again.
3395//
3396//   * InvalidPaginationTokenException
3397//   Pagination token in the request is not valid.
3398//
3399//   * ProvisionedThroughputExceededException
3400//   The number of requests exceeded your throughput limit. If you want to increase
3401//   this limit, contact Amazon Rekognition.
3402//
3403//   * ResourceNotFoundException
3404//   The collection specified in the request cannot be found.
3405//
3406//   * ThrottlingException
3407//   Amazon Rekognition is temporarily unable to process the request. Try your
3408//   call again.
3409//
3410func (c *Rekognition) GetLabelDetection(input *GetLabelDetectionInput) (*GetLabelDetectionOutput, error) {
3411	req, out := c.GetLabelDetectionRequest(input)
3412	return out, req.Send()
3413}
3414
3415// GetLabelDetectionWithContext is the same as GetLabelDetection with the addition of
3416// the ability to pass a context and additional request options.
3417//
3418// See GetLabelDetection for details on how to use this API operation.
3419//
3420// The context must be non-nil and will be used for request cancellation. If
3421// the context is nil a panic will occur. In the future the SDK may create
3422// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3423// for more information on using Contexts.
3424func (c *Rekognition) GetLabelDetectionWithContext(ctx aws.Context, input *GetLabelDetectionInput, opts ...request.Option) (*GetLabelDetectionOutput, error) {
3425	req, out := c.GetLabelDetectionRequest(input)
3426	req.SetContext(ctx)
3427	req.ApplyOptions(opts...)
3428	return out, req.Send()
3429}
3430
3431// GetLabelDetectionPages iterates over the pages of a GetLabelDetection operation,
3432// calling the "fn" function with the response data for each page. To stop
3433// iterating, return false from the fn function.
3434//
3435// See GetLabelDetection method for more information on how to use this operation.
3436//
3437// Note: This operation can generate multiple requests to a service.
3438//
3439//    // Example iterating over at most 3 pages of a GetLabelDetection operation.
3440//    pageNum := 0
3441//    err := client.GetLabelDetectionPages(params,
3442//        func(page *rekognition.GetLabelDetectionOutput, lastPage bool) bool {
3443//            pageNum++
3444//            fmt.Println(page)
3445//            return pageNum <= 3
3446//        })
3447//
3448func (c *Rekognition) GetLabelDetectionPages(input *GetLabelDetectionInput, fn func(*GetLabelDetectionOutput, bool) bool) error {
3449	return c.GetLabelDetectionPagesWithContext(aws.BackgroundContext(), input, fn)
3450}
3451
3452// GetLabelDetectionPagesWithContext same as GetLabelDetectionPages except
3453// it takes a Context and allows setting request options on the pages.
3454//
3455// The context must be non-nil and will be used for request cancellation. If
3456// the context is nil a panic will occur. In the future the SDK may create
3457// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3458// for more information on using Contexts.
3459func (c *Rekognition) GetLabelDetectionPagesWithContext(ctx aws.Context, input *GetLabelDetectionInput, fn func(*GetLabelDetectionOutput, bool) bool, opts ...request.Option) error {
3460	p := request.Pagination{
3461		NewRequest: func() (*request.Request, error) {
3462			var inCpy *GetLabelDetectionInput
3463			if input != nil {
3464				tmp := *input
3465				inCpy = &tmp
3466			}
3467			req, _ := c.GetLabelDetectionRequest(inCpy)
3468			req.SetContext(ctx)
3469			req.ApplyOptions(opts...)
3470			return req, nil
3471		},
3472	}
3473
3474	for p.Next() {
3475		if !fn(p.Page().(*GetLabelDetectionOutput), !p.HasNextPage()) {
3476			break
3477		}
3478	}
3479
3480	return p.Err()
3481}
3482
3483const opGetPersonTracking = "GetPersonTracking"
3484
3485// GetPersonTrackingRequest generates a "aws/request.Request" representing the
3486// client's request for the GetPersonTracking operation. The "output" return
3487// value will be populated with the request's response once the request completes
3488// successfully.
3489//
3490// Use "Send" method on the returned Request to send the API call to the service.
3491// the "output" return value is not valid until after Send returns without error.
3492//
3493// See GetPersonTracking for more information on using the GetPersonTracking
3494// API call, and error handling.
3495//
3496// This method is useful when you want to inject custom logic or configuration
3497// into the SDK's request lifecycle. Such as custom headers, or retry logic.
3498//
3499//
3500//    // Example sending a request using the GetPersonTrackingRequest method.
3501//    req, resp := client.GetPersonTrackingRequest(params)
3502//
3503//    err := req.Send()
3504//    if err == nil { // resp is now filled
3505//        fmt.Println(resp)
3506//    }
3507func (c *Rekognition) GetPersonTrackingRequest(input *GetPersonTrackingInput) (req *request.Request, output *GetPersonTrackingOutput) {
3508	op := &request.Operation{
3509		Name:       opGetPersonTracking,
3510		HTTPMethod: "POST",
3511		HTTPPath:   "/",
3512		Paginator: &request.Paginator{
3513			InputTokens:     []string{"NextToken"},
3514			OutputTokens:    []string{"NextToken"},
3515			LimitToken:      "MaxResults",
3516			TruncationToken: "",
3517		},
3518	}
3519
3520	if input == nil {
3521		input = &GetPersonTrackingInput{}
3522	}
3523
3524	output = &GetPersonTrackingOutput{}
3525	req = c.newRequest(op, input, output)
3526	return
3527}
3528
3529// GetPersonTracking API operation for Amazon Rekognition.
3530//
3531// Gets the path tracking results of a Amazon Rekognition Video analysis started
3532// by StartPersonTracking.
3533//
3534// The person path tracking operation is started by a call to StartPersonTracking
3535// which returns a job identifier (JobId). When the operation finishes, Amazon
3536// Rekognition Video publishes a completion status to the Amazon Simple Notification
3537// Service topic registered in the initial call to StartPersonTracking.
3538//
3539// To get the results of the person path tracking operation, first check that
3540// the status value published to the Amazon SNS topic is SUCCEEDED. If so, call
3541// GetPersonTracking and pass the job identifier (JobId) from the initial call
3542// to StartPersonTracking.
3543//
3544// GetPersonTracking returns an array, Persons, of tracked persons and the time(s)
3545// their paths were tracked in the video.
3546//
3547// GetPersonTracking only returns the default facial attributes (BoundingBox,
3548// Confidence, Landmarks, Pose, and Quality). The other facial attributes listed
3549// in the Face object of the following response syntax are not returned.
3550//
3551// For more information, see FaceDetail in the Amazon Rekognition Developer
3552// Guide.
3553//
3554// By default, the array is sorted by the time(s) a person's path is tracked
3555// in the video. You can sort by tracked persons by specifying INDEX for the
3556// SortBy input parameter.
3557//
3558// Use the MaxResults parameter to limit the number of items returned. If there
3559// are more results than specified in MaxResults, the value of NextToken in
3560// the operation response contains a pagination token for getting the next set
3561// of results. To get the next page of results, call GetPersonTracking and populate
3562// the NextToken request parameter with the token value returned from the previous
3563// call to GetPersonTracking.
3564//
3565// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
3566// with awserr.Error's Code and Message methods to get detailed information about
3567// the error.
3568//
3569// See the AWS API reference guide for Amazon Rekognition's
3570// API operation GetPersonTracking for usage and error information.
3571//
3572// Returned Error Types:
3573//   * AccessDeniedException
3574//   You are not authorized to perform the action.
3575//
3576//   * InternalServerError
3577//   Amazon Rekognition experienced a service issue. Try your call again.
3578//
3579//   * InvalidParameterException
3580//   Input parameter violated a constraint. Validate your parameter before calling
3581//   the API operation again.
3582//
3583//   * InvalidPaginationTokenException
3584//   Pagination token in the request is not valid.
3585//
3586//   * ProvisionedThroughputExceededException
3587//   The number of requests exceeded your throughput limit. If you want to increase
3588//   this limit, contact Amazon Rekognition.
3589//
3590//   * ResourceNotFoundException
3591//   The collection specified in the request cannot be found.
3592//
3593//   * ThrottlingException
3594//   Amazon Rekognition is temporarily unable to process the request. Try your
3595//   call again.
3596//
3597func (c *Rekognition) GetPersonTracking(input *GetPersonTrackingInput) (*GetPersonTrackingOutput, error) {
3598	req, out := c.GetPersonTrackingRequest(input)
3599	return out, req.Send()
3600}
3601
3602// GetPersonTrackingWithContext is the same as GetPersonTracking with the addition of
3603// the ability to pass a context and additional request options.
3604//
3605// See GetPersonTracking for details on how to use this API operation.
3606//
3607// The context must be non-nil and will be used for request cancellation. If
3608// the context is nil a panic will occur. In the future the SDK may create
3609// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3610// for more information on using Contexts.
3611func (c *Rekognition) GetPersonTrackingWithContext(ctx aws.Context, input *GetPersonTrackingInput, opts ...request.Option) (*GetPersonTrackingOutput, error) {
3612	req, out := c.GetPersonTrackingRequest(input)
3613	req.SetContext(ctx)
3614	req.ApplyOptions(opts...)
3615	return out, req.Send()
3616}
3617
3618// GetPersonTrackingPages iterates over the pages of a GetPersonTracking operation,
3619// calling the "fn" function with the response data for each page. To stop
3620// iterating, return false from the fn function.
3621//
3622// See GetPersonTracking method for more information on how to use this operation.
3623//
3624// Note: This operation can generate multiple requests to a service.
3625//
3626//    // Example iterating over at most 3 pages of a GetPersonTracking operation.
3627//    pageNum := 0
3628//    err := client.GetPersonTrackingPages(params,
3629//        func(page *rekognition.GetPersonTrackingOutput, lastPage bool) bool {
3630//            pageNum++
3631//            fmt.Println(page)
3632//            return pageNum <= 3
3633//        })
3634//
3635func (c *Rekognition) GetPersonTrackingPages(input *GetPersonTrackingInput, fn func(*GetPersonTrackingOutput, bool) bool) error {
3636	return c.GetPersonTrackingPagesWithContext(aws.BackgroundContext(), input, fn)
3637}
3638
3639// GetPersonTrackingPagesWithContext same as GetPersonTrackingPages except
3640// it takes a Context and allows setting request options on the pages.
3641//
3642// The context must be non-nil and will be used for request cancellation. If
3643// the context is nil a panic will occur. In the future the SDK may create
3644// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3645// for more information on using Contexts.
3646func (c *Rekognition) GetPersonTrackingPagesWithContext(ctx aws.Context, input *GetPersonTrackingInput, fn func(*GetPersonTrackingOutput, bool) bool, opts ...request.Option) error {
3647	p := request.Pagination{
3648		NewRequest: func() (*request.Request, error) {
3649			var inCpy *GetPersonTrackingInput
3650			if input != nil {
3651				tmp := *input
3652				inCpy = &tmp
3653			}
3654			req, _ := c.GetPersonTrackingRequest(inCpy)
3655			req.SetContext(ctx)
3656			req.ApplyOptions(opts...)
3657			return req, nil
3658		},
3659	}
3660
3661	for p.Next() {
3662		if !fn(p.Page().(*GetPersonTrackingOutput), !p.HasNextPage()) {
3663			break
3664		}
3665	}
3666
3667	return p.Err()
3668}
3669
3670const opGetSegmentDetection = "GetSegmentDetection"
3671
3672// GetSegmentDetectionRequest generates a "aws/request.Request" representing the
3673// client's request for the GetSegmentDetection operation. The "output" return
3674// value will be populated with the request's response once the request completes
3675// successfully.
3676//
3677// Use "Send" method on the returned Request to send the API call to the service.
3678// the "output" return value is not valid until after Send returns without error.
3679//
3680// See GetSegmentDetection for more information on using the GetSegmentDetection
3681// API call, and error handling.
3682//
3683// This method is useful when you want to inject custom logic or configuration
3684// into the SDK's request lifecycle. Such as custom headers, or retry logic.
3685//
3686//
3687//    // Example sending a request using the GetSegmentDetectionRequest method.
3688//    req, resp := client.GetSegmentDetectionRequest(params)
3689//
3690//    err := req.Send()
3691//    if err == nil { // resp is now filled
3692//        fmt.Println(resp)
3693//    }
3694func (c *Rekognition) GetSegmentDetectionRequest(input *GetSegmentDetectionInput) (req *request.Request, output *GetSegmentDetectionOutput) {
3695	op := &request.Operation{
3696		Name:       opGetSegmentDetection,
3697		HTTPMethod: "POST",
3698		HTTPPath:   "/",
3699		Paginator: &request.Paginator{
3700			InputTokens:     []string{"NextToken"},
3701			OutputTokens:    []string{"NextToken"},
3702			LimitToken:      "MaxResults",
3703			TruncationToken: "",
3704		},
3705	}
3706
3707	if input == nil {
3708		input = &GetSegmentDetectionInput{}
3709	}
3710
3711	output = &GetSegmentDetectionOutput{}
3712	req = c.newRequest(op, input, output)
3713	return
3714}
3715
3716// GetSegmentDetection API operation for Amazon Rekognition.
3717//
3718// Gets the segment detection results of a Amazon Rekognition Video analysis
3719// started by StartSegmentDetection.
3720//
3721// Segment detection with Amazon Rekognition Video is an asynchronous operation.
3722// You start segment detection by calling StartSegmentDetection which returns
3723// a job identifier (JobId). When the segment detection operation finishes,
3724// Amazon Rekognition publishes a completion status to the Amazon Simple Notification
3725// Service topic registered in the initial call to StartSegmentDetection. To
3726// get the results of the segment detection operation, first check that the
3727// status value published to the Amazon SNS topic is SUCCEEDED. if so, call
3728// GetSegmentDetection and pass the job identifier (JobId) from the initial
3729// call of StartSegmentDetection.
3730//
3731// GetSegmentDetection returns detected segments in an array (Segments) of SegmentDetection
3732// objects. Segments is sorted by the segment types specified in the SegmentTypes
3733// input parameter of StartSegmentDetection. Each element of the array includes
3734// the detected segment, the precentage confidence in the acuracy of the detected
3735// segment, the type of the segment, and the frame in which the segment was
3736// detected.
3737//
3738// Use SelectedSegmentTypes to find out the type of segment detection requested
3739// in the call to StartSegmentDetection.
3740//
3741// Use the MaxResults parameter to limit the number of segment detections returned.
3742// If there are more results than specified in MaxResults, the value of NextToken
3743// in the operation response contains a pagination token for getting the next
3744// set of results. To get the next page of results, call GetSegmentDetection
3745// and populate the NextToken request parameter with the token value returned
3746// from the previous call to GetSegmentDetection.
3747//
3748// For more information, see Detecting Video Segments in Stored Video in the
3749// Amazon Rekognition Developer Guide.
3750//
3751// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
3752// with awserr.Error's Code and Message methods to get detailed information about
3753// the error.
3754//
3755// See the AWS API reference guide for Amazon Rekognition's
3756// API operation GetSegmentDetection for usage and error information.
3757//
3758// Returned Error Types:
3759//   * AccessDeniedException
3760//   You are not authorized to perform the action.
3761//
3762//   * InternalServerError
3763//   Amazon Rekognition experienced a service issue. Try your call again.
3764//
3765//   * InvalidParameterException
3766//   Input parameter violated a constraint. Validate your parameter before calling
3767//   the API operation again.
3768//
3769//   * InvalidPaginationTokenException
3770//   Pagination token in the request is not valid.
3771//
3772//   * ProvisionedThroughputExceededException
3773//   The number of requests exceeded your throughput limit. If you want to increase
3774//   this limit, contact Amazon Rekognition.
3775//
3776//   * ResourceNotFoundException
3777//   The collection specified in the request cannot be found.
3778//
3779//   * ThrottlingException
3780//   Amazon Rekognition is temporarily unable to process the request. Try your
3781//   call again.
3782//
3783func (c *Rekognition) GetSegmentDetection(input *GetSegmentDetectionInput) (*GetSegmentDetectionOutput, error) {
3784	req, out := c.GetSegmentDetectionRequest(input)
3785	return out, req.Send()
3786}
3787
3788// GetSegmentDetectionWithContext is the same as GetSegmentDetection with the addition of
3789// the ability to pass a context and additional request options.
3790//
3791// See GetSegmentDetection for details on how to use this API operation.
3792//
3793// The context must be non-nil and will be used for request cancellation. If
3794// the context is nil a panic will occur. In the future the SDK may create
3795// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3796// for more information on using Contexts.
3797func (c *Rekognition) GetSegmentDetectionWithContext(ctx aws.Context, input *GetSegmentDetectionInput, opts ...request.Option) (*GetSegmentDetectionOutput, error) {
3798	req, out := c.GetSegmentDetectionRequest(input)
3799	req.SetContext(ctx)
3800	req.ApplyOptions(opts...)
3801	return out, req.Send()
3802}
3803
3804// GetSegmentDetectionPages iterates over the pages of a GetSegmentDetection operation,
3805// calling the "fn" function with the response data for each page. To stop
3806// iterating, return false from the fn function.
3807//
3808// See GetSegmentDetection method for more information on how to use this operation.
3809//
3810// Note: This operation can generate multiple requests to a service.
3811//
3812//    // Example iterating over at most 3 pages of a GetSegmentDetection operation.
3813//    pageNum := 0
3814//    err := client.GetSegmentDetectionPages(params,
3815//        func(page *rekognition.GetSegmentDetectionOutput, lastPage bool) bool {
3816//            pageNum++
3817//            fmt.Println(page)
3818//            return pageNum <= 3
3819//        })
3820//
3821func (c *Rekognition) GetSegmentDetectionPages(input *GetSegmentDetectionInput, fn func(*GetSegmentDetectionOutput, bool) bool) error {
3822	return c.GetSegmentDetectionPagesWithContext(aws.BackgroundContext(), input, fn)
3823}
3824
3825// GetSegmentDetectionPagesWithContext same as GetSegmentDetectionPages except
3826// it takes a Context and allows setting request options on the pages.
3827//
3828// The context must be non-nil and will be used for request cancellation. If
3829// the context is nil a panic will occur. In the future the SDK may create
3830// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3831// for more information on using Contexts.
3832func (c *Rekognition) GetSegmentDetectionPagesWithContext(ctx aws.Context, input *GetSegmentDetectionInput, fn func(*GetSegmentDetectionOutput, bool) bool, opts ...request.Option) error {
3833	p := request.Pagination{
3834		NewRequest: func() (*request.Request, error) {
3835			var inCpy *GetSegmentDetectionInput
3836			if input != nil {
3837				tmp := *input
3838				inCpy = &tmp
3839			}
3840			req, _ := c.GetSegmentDetectionRequest(inCpy)
3841			req.SetContext(ctx)
3842			req.ApplyOptions(opts...)
3843			return req, nil
3844		},
3845	}
3846
3847	for p.Next() {
3848		if !fn(p.Page().(*GetSegmentDetectionOutput), !p.HasNextPage()) {
3849			break
3850		}
3851	}
3852
3853	return p.Err()
3854}
3855
3856const opGetTextDetection = "GetTextDetection"
3857
3858// GetTextDetectionRequest generates a "aws/request.Request" representing the
3859// client's request for the GetTextDetection operation. The "output" return
3860// value will be populated with the request's response once the request completes
3861// successfully.
3862//
3863// Use "Send" method on the returned Request to send the API call to the service.
3864// the "output" return value is not valid until after Send returns without error.
3865//
3866// See GetTextDetection for more information on using the GetTextDetection
3867// API call, and error handling.
3868//
3869// This method is useful when you want to inject custom logic or configuration
3870// into the SDK's request lifecycle. Such as custom headers, or retry logic.
3871//
3872//
3873//    // Example sending a request using the GetTextDetectionRequest method.
3874//    req, resp := client.GetTextDetectionRequest(params)
3875//
3876//    err := req.Send()
3877//    if err == nil { // resp is now filled
3878//        fmt.Println(resp)
3879//    }
3880func (c *Rekognition) GetTextDetectionRequest(input *GetTextDetectionInput) (req *request.Request, output *GetTextDetectionOutput) {
3881	op := &request.Operation{
3882		Name:       opGetTextDetection,
3883		HTTPMethod: "POST",
3884		HTTPPath:   "/",
3885		Paginator: &request.Paginator{
3886			InputTokens:     []string{"NextToken"},
3887			OutputTokens:    []string{"NextToken"},
3888			LimitToken:      "MaxResults",
3889			TruncationToken: "",
3890		},
3891	}
3892
3893	if input == nil {
3894		input = &GetTextDetectionInput{}
3895	}
3896
3897	output = &GetTextDetectionOutput{}
3898	req = c.newRequest(op, input, output)
3899	return
3900}
3901
3902// GetTextDetection API operation for Amazon Rekognition.
3903//
3904// Gets the text detection results of a Amazon Rekognition Video analysis started
3905// by StartTextDetection.
3906//
3907// Text detection with Amazon Rekognition Video is an asynchronous operation.
3908// You start text detection by calling StartTextDetection which returns a job
3909// identifier (JobId) When the text detection operation finishes, Amazon Rekognition
3910// publishes a completion status to the Amazon Simple Notification Service topic
3911// registered in the initial call to StartTextDetection. To get the results
3912// of the text detection operation, first check that the status value published
3913// to the Amazon SNS topic is SUCCEEDED. if so, call GetTextDetection and pass
3914// the job identifier (JobId) from the initial call of StartLabelDetection.
3915//
3916// GetTextDetection returns an array of detected text (TextDetections) sorted
3917// by the time the text was detected, up to 50 words per frame of video.
3918//
3919// Each element of the array includes the detected text, the precentage confidence
3920// in the acuracy of the detected text, the time the text was detected, bounding
3921// box information for where the text was located, and unique identifiers for
3922// words and their lines.
3923//
3924// Use MaxResults parameter to limit the number of text detections returned.
3925// If there are more results than specified in MaxResults, the value of NextToken
3926// in the operation response contains a pagination token for getting the next
3927// set of results. To get the next page of results, call GetTextDetection and
3928// populate the NextToken request parameter with the token value returned from
3929// the previous call to GetTextDetection.
3930//
3931// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
3932// with awserr.Error's Code and Message methods to get detailed information about
3933// the error.
3934//
3935// See the AWS API reference guide for Amazon Rekognition's
3936// API operation GetTextDetection for usage and error information.
3937//
3938// Returned Error Types:
3939//   * AccessDeniedException
3940//   You are not authorized to perform the action.
3941//
3942//   * InternalServerError
3943//   Amazon Rekognition experienced a service issue. Try your call again.
3944//
3945//   * InvalidParameterException
3946//   Input parameter violated a constraint. Validate your parameter before calling
3947//   the API operation again.
3948//
3949//   * InvalidPaginationTokenException
3950//   Pagination token in the request is not valid.
3951//
3952//   * ProvisionedThroughputExceededException
3953//   The number of requests exceeded your throughput limit. If you want to increase
3954//   this limit, contact Amazon Rekognition.
3955//
3956//   * ResourceNotFoundException
3957//   The collection specified in the request cannot be found.
3958//
3959//   * ThrottlingException
3960//   Amazon Rekognition is temporarily unable to process the request. Try your
3961//   call again.
3962//
3963func (c *Rekognition) GetTextDetection(input *GetTextDetectionInput) (*GetTextDetectionOutput, error) {
3964	req, out := c.GetTextDetectionRequest(input)
3965	return out, req.Send()
3966}
3967
3968// GetTextDetectionWithContext is the same as GetTextDetection with the addition of
3969// the ability to pass a context and additional request options.
3970//
3971// See GetTextDetection for details on how to use this API operation.
3972//
3973// The context must be non-nil and will be used for request cancellation. If
3974// the context is nil a panic will occur. In the future the SDK may create
3975// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3976// for more information on using Contexts.
3977func (c *Rekognition) GetTextDetectionWithContext(ctx aws.Context, input *GetTextDetectionInput, opts ...request.Option) (*GetTextDetectionOutput, error) {
3978	req, out := c.GetTextDetectionRequest(input)
3979	req.SetContext(ctx)
3980	req.ApplyOptions(opts...)
3981	return out, req.Send()
3982}
3983
3984// GetTextDetectionPages iterates over the pages of a GetTextDetection operation,
3985// calling the "fn" function with the response data for each page. To stop
3986// iterating, return false from the fn function.
3987//
3988// See GetTextDetection method for more information on how to use this operation.
3989//
3990// Note: This operation can generate multiple requests to a service.
3991//
3992//    // Example iterating over at most 3 pages of a GetTextDetection operation.
3993//    pageNum := 0
3994//    err := client.GetTextDetectionPages(params,
3995//        func(page *rekognition.GetTextDetectionOutput, lastPage bool) bool {
3996//            pageNum++
3997//            fmt.Println(page)
3998//            return pageNum <= 3
3999//        })
4000//
4001func (c *Rekognition) GetTextDetectionPages(input *GetTextDetectionInput, fn func(*GetTextDetectionOutput, bool) bool) error {
4002	return c.GetTextDetectionPagesWithContext(aws.BackgroundContext(), input, fn)
4003}
4004
4005// GetTextDetectionPagesWithContext same as GetTextDetectionPages except
4006// it takes a Context and allows setting request options on the pages.
4007//
4008// The context must be non-nil and will be used for request cancellation. If
4009// the context is nil a panic will occur. In the future the SDK may create
4010// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4011// for more information on using Contexts.
4012func (c *Rekognition) GetTextDetectionPagesWithContext(ctx aws.Context, input *GetTextDetectionInput, fn func(*GetTextDetectionOutput, bool) bool, opts ...request.Option) error {
4013	p := request.Pagination{
4014		NewRequest: func() (*request.Request, error) {
4015			var inCpy *GetTextDetectionInput
4016			if input != nil {
4017				tmp := *input
4018				inCpy = &tmp
4019			}
4020			req, _ := c.GetTextDetectionRequest(inCpy)
4021			req.SetContext(ctx)
4022			req.ApplyOptions(opts...)
4023			return req, nil
4024		},
4025	}
4026
4027	for p.Next() {
4028		if !fn(p.Page().(*GetTextDetectionOutput), !p.HasNextPage()) {
4029			break
4030		}
4031	}
4032
4033	return p.Err()
4034}
4035
4036const opIndexFaces = "IndexFaces"
4037
4038// IndexFacesRequest generates a "aws/request.Request" representing the
4039// client's request for the IndexFaces operation. The "output" return
4040// value will be populated with the request's response once the request completes
4041// successfully.
4042//
4043// Use "Send" method on the returned Request to send the API call to the service.
4044// the "output" return value is not valid until after Send returns without error.
4045//
4046// See IndexFaces for more information on using the IndexFaces
4047// API call, and error handling.
4048//
4049// This method is useful when you want to inject custom logic or configuration
4050// into the SDK's request lifecycle. Such as custom headers, or retry logic.
4051//
4052//
4053//    // Example sending a request using the IndexFacesRequest method.
4054//    req, resp := client.IndexFacesRequest(params)
4055//
4056//    err := req.Send()
4057//    if err == nil { // resp is now filled
4058//        fmt.Println(resp)
4059//    }
4060func (c *Rekognition) IndexFacesRequest(input *IndexFacesInput) (req *request.Request, output *IndexFacesOutput) {
4061	op := &request.Operation{
4062		Name:       opIndexFaces,
4063		HTTPMethod: "POST",
4064		HTTPPath:   "/",
4065	}
4066
4067	if input == nil {
4068		input = &IndexFacesInput{}
4069	}
4070
4071	output = &IndexFacesOutput{}
4072	req = c.newRequest(op, input, output)
4073	return
4074}
4075
4076// IndexFaces API operation for Amazon Rekognition.
4077//
4078// Detects faces in the input image and adds them to the specified collection.
4079//
4080// Amazon Rekognition doesn't save the actual faces that are detected. Instead,
4081// the underlying detection algorithm first detects the faces in the input image.
4082// For each face, the algorithm extracts facial features into a feature vector,
4083// and stores it in the backend database. Amazon Rekognition uses feature vectors
4084// when it performs face match and search operations using the SearchFaces and
4085// SearchFacesByImage operations.
4086//
4087// For more information, see Adding Faces to a Collection in the Amazon Rekognition
4088// Developer Guide.
4089//
4090// To get the number of faces in a collection, call DescribeCollection.
4091//
4092// If you're using version 1.0 of the face detection model, IndexFaces indexes
4093// the 15 largest faces in the input image. Later versions of the face detection
4094// model index the 100 largest faces in the input image.
4095//
4096// If you're using version 4 or later of the face model, image orientation information
4097// is not returned in the OrientationCorrection field.
4098//
4099// To determine which version of the model you're using, call DescribeCollection
4100// and supply the collection ID. You can also get the model version from the
4101// value of FaceModelVersion in the response from IndexFaces
4102//
4103// For more information, see Model Versioning in the Amazon Rekognition Developer
4104// Guide.
4105//
4106// If you provide the optional ExternalImageId for the input image you provided,
4107// Amazon Rekognition associates this ID with all faces that it detects. When
4108// you call the ListFaces operation, the response returns the external ID. You
4109// can use this external image ID to create a client-side index to associate
4110// the faces with each image. You can then use the index to find all faces in
4111// an image.
4112//
4113// You can specify the maximum number of faces to index with the MaxFaces input
4114// parameter. This is useful when you want to index the largest faces in an
4115// image and don't want to index smaller faces, such as those belonging to people
4116// standing in the background.
4117//
4118// The QualityFilter input parameter allows you to filter out detected faces
4119// that don’t meet a required quality bar. The quality bar is based on a variety
4120// of common use cases. By default, IndexFaces chooses the quality bar that's
4121// used to filter faces. You can also explicitly choose the quality bar. Use
4122// QualityFilter, to set the quality bar by specifying LOW, MEDIUM, or HIGH.
4123// If you do not want to filter detected faces, specify NONE.
4124//
4125// To use quality filtering, you need a collection associated with version 3
4126// of the face model or higher. To get the version of the face model associated
4127// with a collection, call DescribeCollection.
4128//
4129// Information about faces detected in an image, but not indexed, is returned
4130// in an array of UnindexedFace objects, UnindexedFaces. Faces aren't indexed
4131// for reasons such as:
4132//
4133//    * The number of faces detected exceeds the value of the MaxFaces request
4134//    parameter.
4135//
4136//    * The face is too small compared to the image dimensions.
4137//
4138//    * The face is too blurry.
4139//
4140//    * The image is too dark.
4141//
4142//    * The face has an extreme pose.
4143//
4144//    * The face doesn’t have enough detail to be suitable for face search.
4145//
4146// In response, the IndexFaces operation returns an array of metadata for all
4147// detected faces, FaceRecords. This includes:
4148//
4149//    * The bounding box, BoundingBox, of the detected face.
4150//
4151//    * A confidence value, Confidence, which indicates the confidence that
4152//    the bounding box contains a face.
4153//
4154//    * A face ID, FaceId, assigned by the service for each face that's detected
4155//    and stored.
4156//
4157//    * An image ID, ImageId, assigned by the service for the input image.
4158//
4159// If you request all facial attributes (by using the detectionAttributes parameter),
4160// Amazon Rekognition returns detailed facial attributes, such as facial landmarks
4161// (for example, location of eye and mouth) and other facial attributes. If
4162// you provide the same image, specify the same collection, and use the same
4163// external ID in the IndexFaces operation, Amazon Rekognition doesn't save
4164// duplicate face metadata.
4165//
4166// The input image is passed either as base64-encoded image bytes, or as a reference
4167// to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon
4168// Rekognition operations, passing image bytes isn't supported. The image must
4169// be formatted as a PNG or JPEG file.
4170//
4171// This operation requires permissions to perform the rekognition:IndexFaces
4172// action.
4173//
4174// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
4175// with awserr.Error's Code and Message methods to get detailed information about
4176// the error.
4177//
4178// See the AWS API reference guide for Amazon Rekognition's
4179// API operation IndexFaces for usage and error information.
4180//
4181// Returned Error Types:
4182//   * InvalidS3ObjectException
4183//   Amazon Rekognition is unable to access the S3 object specified in the request.
4184//
4185//   * InvalidParameterException
4186//   Input parameter violated a constraint. Validate your parameter before calling
4187//   the API operation again.
4188//
4189//   * ImageTooLargeException
4190//   The input image size exceeds the allowed limit. For more information, see
4191//   Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
4192//
4193//   * AccessDeniedException
4194//   You are not authorized to perform the action.
4195//
4196//   * InternalServerError
4197//   Amazon Rekognition experienced a service issue. Try your call again.
4198//
4199//   * ThrottlingException
4200//   Amazon Rekognition is temporarily unable to process the request. Try your
4201//   call again.
4202//
4203//   * ProvisionedThroughputExceededException
4204//   The number of requests exceeded your throughput limit. If you want to increase
4205//   this limit, contact Amazon Rekognition.
4206//
4207//   * ResourceNotFoundException
4208//   The collection specified in the request cannot be found.
4209//
4210//   * InvalidImageFormatException
4211//   The provided image format is not supported.
4212//
4213//   * ServiceQuotaExceededException
4214//   The size of the collection exceeds the allowed limit. For more information,
4215//   see Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
4216//
4217func (c *Rekognition) IndexFaces(input *IndexFacesInput) (*IndexFacesOutput, error) {
4218	req, out := c.IndexFacesRequest(input)
4219	return out, req.Send()
4220}
4221
4222// IndexFacesWithContext is the same as IndexFaces with the addition of
4223// the ability to pass a context and additional request options.
4224//
4225// See IndexFaces for details on how to use this API operation.
4226//
4227// The context must be non-nil and will be used for request cancellation. If
4228// the context is nil a panic will occur. In the future the SDK may create
4229// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4230// for more information on using Contexts.
4231func (c *Rekognition) IndexFacesWithContext(ctx aws.Context, input *IndexFacesInput, opts ...request.Option) (*IndexFacesOutput, error) {
4232	req, out := c.IndexFacesRequest(input)
4233	req.SetContext(ctx)
4234	req.ApplyOptions(opts...)
4235	return out, req.Send()
4236}
4237
4238const opListCollections = "ListCollections"
4239
4240// ListCollectionsRequest generates a "aws/request.Request" representing the
4241// client's request for the ListCollections operation. The "output" return
4242// value will be populated with the request's response once the request completes
4243// successfully.
4244//
4245// Use "Send" method on the returned Request to send the API call to the service.
4246// the "output" return value is not valid until after Send returns without error.
4247//
4248// See ListCollections for more information on using the ListCollections
4249// API call, and error handling.
4250//
4251// This method is useful when you want to inject custom logic or configuration
4252// into the SDK's request lifecycle. Such as custom headers, or retry logic.
4253//
4254//
4255//    // Example sending a request using the ListCollectionsRequest method.
4256//    req, resp := client.ListCollectionsRequest(params)
4257//
4258//    err := req.Send()
4259//    if err == nil { // resp is now filled
4260//        fmt.Println(resp)
4261//    }
4262func (c *Rekognition) ListCollectionsRequest(input *ListCollectionsInput) (req *request.Request, output *ListCollectionsOutput) {
4263	op := &request.Operation{
4264		Name:       opListCollections,
4265		HTTPMethod: "POST",
4266		HTTPPath:   "/",
4267		Paginator: &request.Paginator{
4268			InputTokens:     []string{"NextToken"},
4269			OutputTokens:    []string{"NextToken"},
4270			LimitToken:      "MaxResults",
4271			TruncationToken: "",
4272		},
4273	}
4274
4275	if input == nil {
4276		input = &ListCollectionsInput{}
4277	}
4278
4279	output = &ListCollectionsOutput{}
4280	req = c.newRequest(op, input, output)
4281	return
4282}
4283
4284// ListCollections API operation for Amazon Rekognition.
4285//
4286// Returns list of collection IDs in your account. If the result is truncated,
4287// the response also provides a NextToken that you can use in the subsequent
4288// request to fetch the next set of collection IDs.
4289//
4290// For an example, see Listing Collections in the Amazon Rekognition Developer
4291// Guide.
4292//
4293// This operation requires permissions to perform the rekognition:ListCollections
4294// action.
4295//
4296// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
4297// with awserr.Error's Code and Message methods to get detailed information about
4298// the error.
4299//
4300// See the AWS API reference guide for Amazon Rekognition's
4301// API operation ListCollections for usage and error information.
4302//
4303// Returned Error Types:
4304//   * InvalidParameterException
4305//   Input parameter violated a constraint. Validate your parameter before calling
4306//   the API operation again.
4307//
4308//   * AccessDeniedException
4309//   You are not authorized to perform the action.
4310//
4311//   * InternalServerError
4312//   Amazon Rekognition experienced a service issue. Try your call again.
4313//
4314//   * ThrottlingException
4315//   Amazon Rekognition is temporarily unable to process the request. Try your
4316//   call again.
4317//
4318//   * ProvisionedThroughputExceededException
4319//   The number of requests exceeded your throughput limit. If you want to increase
4320//   this limit, contact Amazon Rekognition.
4321//
4322//   * InvalidPaginationTokenException
4323//   Pagination token in the request is not valid.
4324//
4325//   * ResourceNotFoundException
4326//   The collection specified in the request cannot be found.
4327//
4328func (c *Rekognition) ListCollections(input *ListCollectionsInput) (*ListCollectionsOutput, error) {
4329	req, out := c.ListCollectionsRequest(input)
4330	return out, req.Send()
4331}
4332
4333// ListCollectionsWithContext is the same as ListCollections with the addition of
4334// the ability to pass a context and additional request options.
4335//
4336// See ListCollections for details on how to use this API operation.
4337//
4338// The context must be non-nil and will be used for request cancellation. If
4339// the context is nil a panic will occur. In the future the SDK may create
4340// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4341// for more information on using Contexts.
4342func (c *Rekognition) ListCollectionsWithContext(ctx aws.Context, input *ListCollectionsInput, opts ...request.Option) (*ListCollectionsOutput, error) {
4343	req, out := c.ListCollectionsRequest(input)
4344	req.SetContext(ctx)
4345	req.ApplyOptions(opts...)
4346	return out, req.Send()
4347}
4348
4349// ListCollectionsPages iterates over the pages of a ListCollections operation,
4350// calling the "fn" function with the response data for each page. To stop
4351// iterating, return false from the fn function.
4352//
4353// See ListCollections method for more information on how to use this operation.
4354//
4355// Note: This operation can generate multiple requests to a service.
4356//
4357//    // Example iterating over at most 3 pages of a ListCollections operation.
4358//    pageNum := 0
4359//    err := client.ListCollectionsPages(params,
4360//        func(page *rekognition.ListCollectionsOutput, lastPage bool) bool {
4361//            pageNum++
4362//            fmt.Println(page)
4363//            return pageNum <= 3
4364//        })
4365//
4366func (c *Rekognition) ListCollectionsPages(input *ListCollectionsInput, fn func(*ListCollectionsOutput, bool) bool) error {
4367	return c.ListCollectionsPagesWithContext(aws.BackgroundContext(), input, fn)
4368}
4369
4370// ListCollectionsPagesWithContext same as ListCollectionsPages except
4371// it takes a Context and allows setting request options on the pages.
4372//
4373// The context must be non-nil and will be used for request cancellation. If
4374// the context is nil a panic will occur. In the future the SDK may create
4375// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4376// for more information on using Contexts.
4377func (c *Rekognition) ListCollectionsPagesWithContext(ctx aws.Context, input *ListCollectionsInput, fn func(*ListCollectionsOutput, bool) bool, opts ...request.Option) error {
4378	p := request.Pagination{
4379		NewRequest: func() (*request.Request, error) {
4380			var inCpy *ListCollectionsInput
4381			if input != nil {
4382				tmp := *input
4383				inCpy = &tmp
4384			}
4385			req, _ := c.ListCollectionsRequest(inCpy)
4386			req.SetContext(ctx)
4387			req.ApplyOptions(opts...)
4388			return req, nil
4389		},
4390	}
4391
4392	for p.Next() {
4393		if !fn(p.Page().(*ListCollectionsOutput), !p.HasNextPage()) {
4394			break
4395		}
4396	}
4397
4398	return p.Err()
4399}
4400
4401const opListFaces = "ListFaces"
4402
4403// ListFacesRequest generates a "aws/request.Request" representing the
4404// client's request for the ListFaces operation. The "output" return
4405// value will be populated with the request's response once the request completes
4406// successfully.
4407//
4408// Use "Send" method on the returned Request to send the API call to the service.
4409// the "output" return value is not valid until after Send returns without error.
4410//
4411// See ListFaces for more information on using the ListFaces
4412// API call, and error handling.
4413//
4414// This method is useful when you want to inject custom logic or configuration
4415// into the SDK's request lifecycle. Such as custom headers, or retry logic.
4416//
4417//
4418//    // Example sending a request using the ListFacesRequest method.
4419//    req, resp := client.ListFacesRequest(params)
4420//
4421//    err := req.Send()
4422//    if err == nil { // resp is now filled
4423//        fmt.Println(resp)
4424//    }
4425func (c *Rekognition) ListFacesRequest(input *ListFacesInput) (req *request.Request, output *ListFacesOutput) {
4426	op := &request.Operation{
4427		Name:       opListFaces,
4428		HTTPMethod: "POST",
4429		HTTPPath:   "/",
4430		Paginator: &request.Paginator{
4431			InputTokens:     []string{"NextToken"},
4432			OutputTokens:    []string{"NextToken"},
4433			LimitToken:      "MaxResults",
4434			TruncationToken: "",
4435		},
4436	}
4437
4438	if input == nil {
4439		input = &ListFacesInput{}
4440	}
4441
4442	output = &ListFacesOutput{}
4443	req = c.newRequest(op, input, output)
4444	return
4445}
4446
4447// ListFaces API operation for Amazon Rekognition.
4448//
4449// Returns metadata for faces in the specified collection. This metadata includes
4450// information such as the bounding box coordinates, the confidence (that the
4451// bounding box contains a face), and face ID. For an example, see Listing Faces
4452// in a Collection in the Amazon Rekognition Developer Guide.
4453//
4454// This operation requires permissions to perform the rekognition:ListFaces
4455// action.
4456//
4457// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
4458// with awserr.Error's Code and Message methods to get detailed information about
4459// the error.
4460//
4461// See the AWS API reference guide for Amazon Rekognition's
4462// API operation ListFaces for usage and error information.
4463//
4464// Returned Error Types:
4465//   * InvalidParameterException
4466//   Input parameter violated a constraint. Validate your parameter before calling
4467//   the API operation again.
4468//
4469//   * AccessDeniedException
4470//   You are not authorized to perform the action.
4471//
4472//   * InternalServerError
4473//   Amazon Rekognition experienced a service issue. Try your call again.
4474//
4475//   * ThrottlingException
4476//   Amazon Rekognition is temporarily unable to process the request. Try your
4477//   call again.
4478//
4479//   * ProvisionedThroughputExceededException
4480//   The number of requests exceeded your throughput limit. If you want to increase
4481//   this limit, contact Amazon Rekognition.
4482//
4483//   * InvalidPaginationTokenException
4484//   Pagination token in the request is not valid.
4485//
4486//   * ResourceNotFoundException
4487//   The collection specified in the request cannot be found.
4488//
4489func (c *Rekognition) ListFaces(input *ListFacesInput) (*ListFacesOutput, error) {
4490	req, out := c.ListFacesRequest(input)
4491	return out, req.Send()
4492}
4493
4494// ListFacesWithContext is the same as ListFaces with the addition of
4495// the ability to pass a context and additional request options.
4496//
4497// See ListFaces for details on how to use this API operation.
4498//
4499// The context must be non-nil and will be used for request cancellation. If
4500// the context is nil a panic will occur. In the future the SDK may create
4501// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4502// for more information on using Contexts.
4503func (c *Rekognition) ListFacesWithContext(ctx aws.Context, input *ListFacesInput, opts ...request.Option) (*ListFacesOutput, error) {
4504	req, out := c.ListFacesRequest(input)
4505	req.SetContext(ctx)
4506	req.ApplyOptions(opts...)
4507	return out, req.Send()
4508}
4509
4510// ListFacesPages iterates over the pages of a ListFaces operation,
4511// calling the "fn" function with the response data for each page. To stop
4512// iterating, return false from the fn function.
4513//
4514// See ListFaces method for more information on how to use this operation.
4515//
4516// Note: This operation can generate multiple requests to a service.
4517//
4518//    // Example iterating over at most 3 pages of a ListFaces operation.
4519//    pageNum := 0
4520//    err := client.ListFacesPages(params,
4521//        func(page *rekognition.ListFacesOutput, lastPage bool) bool {
4522//            pageNum++
4523//            fmt.Println(page)
4524//            return pageNum <= 3
4525//        })
4526//
4527func (c *Rekognition) ListFacesPages(input *ListFacesInput, fn func(*ListFacesOutput, bool) bool) error {
4528	return c.ListFacesPagesWithContext(aws.BackgroundContext(), input, fn)
4529}
4530
4531// ListFacesPagesWithContext same as ListFacesPages except
4532// it takes a Context and allows setting request options on the pages.
4533//
4534// The context must be non-nil and will be used for request cancellation. If
4535// the context is nil a panic will occur. In the future the SDK may create
4536// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4537// for more information on using Contexts.
4538func (c *Rekognition) ListFacesPagesWithContext(ctx aws.Context, input *ListFacesInput, fn func(*ListFacesOutput, bool) bool, opts ...request.Option) error {
4539	p := request.Pagination{
4540		NewRequest: func() (*request.Request, error) {
4541			var inCpy *ListFacesInput
4542			if input != nil {
4543				tmp := *input
4544				inCpy = &tmp
4545			}
4546			req, _ := c.ListFacesRequest(inCpy)
4547			req.SetContext(ctx)
4548			req.ApplyOptions(opts...)
4549			return req, nil
4550		},
4551	}
4552
4553	for p.Next() {
4554		if !fn(p.Page().(*ListFacesOutput), !p.HasNextPage()) {
4555			break
4556		}
4557	}
4558
4559	return p.Err()
4560}
4561
4562const opListStreamProcessors = "ListStreamProcessors"
4563
4564// ListStreamProcessorsRequest generates a "aws/request.Request" representing the
4565// client's request for the ListStreamProcessors operation. The "output" return
4566// value will be populated with the request's response once the request completes
4567// successfully.
4568//
4569// Use "Send" method on the returned Request to send the API call to the service.
4570// the "output" return value is not valid until after Send returns without error.
4571//
4572// See ListStreamProcessors for more information on using the ListStreamProcessors
4573// API call, and error handling.
4574//
4575// This method is useful when you want to inject custom logic or configuration
4576// into the SDK's request lifecycle. Such as custom headers, or retry logic.
4577//
4578//
4579//    // Example sending a request using the ListStreamProcessorsRequest method.
4580//    req, resp := client.ListStreamProcessorsRequest(params)
4581//
4582//    err := req.Send()
4583//    if err == nil { // resp is now filled
4584//        fmt.Println(resp)
4585//    }
4586func (c *Rekognition) ListStreamProcessorsRequest(input *ListStreamProcessorsInput) (req *request.Request, output *ListStreamProcessorsOutput) {
4587	op := &request.Operation{
4588		Name:       opListStreamProcessors,
4589		HTTPMethod: "POST",
4590		HTTPPath:   "/",
4591		Paginator: &request.Paginator{
4592			InputTokens:     []string{"NextToken"},
4593			OutputTokens:    []string{"NextToken"},
4594			LimitToken:      "MaxResults",
4595			TruncationToken: "",
4596		},
4597	}
4598
4599	if input == nil {
4600		input = &ListStreamProcessorsInput{}
4601	}
4602
4603	output = &ListStreamProcessorsOutput{}
4604	req = c.newRequest(op, input, output)
4605	return
4606}
4607
4608// ListStreamProcessors API operation for Amazon Rekognition.
4609//
4610// Gets a list of stream processors that you have created with CreateStreamProcessor.
4611//
4612// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
4613// with awserr.Error's Code and Message methods to get detailed information about
4614// the error.
4615//
4616// See the AWS API reference guide for Amazon Rekognition's
4617// API operation ListStreamProcessors for usage and error information.
4618//
4619// Returned Error Types:
4620//   * AccessDeniedException
4621//   You are not authorized to perform the action.
4622//
4623//   * InternalServerError
4624//   Amazon Rekognition experienced a service issue. Try your call again.
4625//
4626//   * ThrottlingException
4627//   Amazon Rekognition is temporarily unable to process the request. Try your
4628//   call again.
4629//
4630//   * InvalidParameterException
4631//   Input parameter violated a constraint. Validate your parameter before calling
4632//   the API operation again.
4633//
4634//   * InvalidPaginationTokenException
4635//   Pagination token in the request is not valid.
4636//
4637//   * ProvisionedThroughputExceededException
4638//   The number of requests exceeded your throughput limit. If you want to increase
4639//   this limit, contact Amazon Rekognition.
4640//
4641func (c *Rekognition) ListStreamProcessors(input *ListStreamProcessorsInput) (*ListStreamProcessorsOutput, error) {
4642	req, out := c.ListStreamProcessorsRequest(input)
4643	return out, req.Send()
4644}
4645
4646// ListStreamProcessorsWithContext is the same as ListStreamProcessors with the addition of
4647// the ability to pass a context and additional request options.
4648//
4649// See ListStreamProcessors for details on how to use this API operation.
4650//
4651// The context must be non-nil and will be used for request cancellation. If
4652// the context is nil a panic will occur. In the future the SDK may create
4653// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4654// for more information on using Contexts.
4655func (c *Rekognition) ListStreamProcessorsWithContext(ctx aws.Context, input *ListStreamProcessorsInput, opts ...request.Option) (*ListStreamProcessorsOutput, error) {
4656	req, out := c.ListStreamProcessorsRequest(input)
4657	req.SetContext(ctx)
4658	req.ApplyOptions(opts...)
4659	return out, req.Send()
4660}
4661
4662// ListStreamProcessorsPages iterates over the pages of a ListStreamProcessors operation,
4663// calling the "fn" function with the response data for each page. To stop
4664// iterating, return false from the fn function.
4665//
4666// See ListStreamProcessors method for more information on how to use this operation.
4667//
4668// Note: This operation can generate multiple requests to a service.
4669//
4670//    // Example iterating over at most 3 pages of a ListStreamProcessors operation.
4671//    pageNum := 0
4672//    err := client.ListStreamProcessorsPages(params,
4673//        func(page *rekognition.ListStreamProcessorsOutput, lastPage bool) bool {
4674//            pageNum++
4675//            fmt.Println(page)
4676//            return pageNum <= 3
4677//        })
4678//
4679func (c *Rekognition) ListStreamProcessorsPages(input *ListStreamProcessorsInput, fn func(*ListStreamProcessorsOutput, bool) bool) error {
4680	return c.ListStreamProcessorsPagesWithContext(aws.BackgroundContext(), input, fn)
4681}
4682
4683// ListStreamProcessorsPagesWithContext same as ListStreamProcessorsPages except
4684// it takes a Context and allows setting request options on the pages.
4685//
4686// The context must be non-nil and will be used for request cancellation. If
4687// the context is nil a panic will occur. In the future the SDK may create
4688// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4689// for more information on using Contexts.
4690func (c *Rekognition) ListStreamProcessorsPagesWithContext(ctx aws.Context, input *ListStreamProcessorsInput, fn func(*ListStreamProcessorsOutput, bool) bool, opts ...request.Option) error {
4691	p := request.Pagination{
4692		NewRequest: func() (*request.Request, error) {
4693			var inCpy *ListStreamProcessorsInput
4694			if input != nil {
4695				tmp := *input
4696				inCpy = &tmp
4697			}
4698			req, _ := c.ListStreamProcessorsRequest(inCpy)
4699			req.SetContext(ctx)
4700			req.ApplyOptions(opts...)
4701			return req, nil
4702		},
4703	}
4704
4705	for p.Next() {
4706		if !fn(p.Page().(*ListStreamProcessorsOutput), !p.HasNextPage()) {
4707			break
4708		}
4709	}
4710
4711	return p.Err()
4712}
4713
4714const opRecognizeCelebrities = "RecognizeCelebrities"
4715
4716// RecognizeCelebritiesRequest generates a "aws/request.Request" representing the
4717// client's request for the RecognizeCelebrities operation. The "output" return
4718// value will be populated with the request's response once the request completes
4719// successfully.
4720//
4721// Use "Send" method on the returned Request to send the API call to the service.
4722// the "output" return value is not valid until after Send returns without error.
4723//
4724// See RecognizeCelebrities for more information on using the RecognizeCelebrities
4725// API call, and error handling.
4726//
4727// This method is useful when you want to inject custom logic or configuration
4728// into the SDK's request lifecycle. Such as custom headers, or retry logic.
4729//
4730//
4731//    // Example sending a request using the RecognizeCelebritiesRequest method.
4732//    req, resp := client.RecognizeCelebritiesRequest(params)
4733//
4734//    err := req.Send()
4735//    if err == nil { // resp is now filled
4736//        fmt.Println(resp)
4737//    }
4738func (c *Rekognition) RecognizeCelebritiesRequest(input *RecognizeCelebritiesInput) (req *request.Request, output *RecognizeCelebritiesOutput) {
4739	op := &request.Operation{
4740		Name:       opRecognizeCelebrities,
4741		HTTPMethod: "POST",
4742		HTTPPath:   "/",
4743	}
4744
4745	if input == nil {
4746		input = &RecognizeCelebritiesInput{}
4747	}
4748
4749	output = &RecognizeCelebritiesOutput{}
4750	req = c.newRequest(op, input, output)
4751	return
4752}
4753
4754// RecognizeCelebrities API operation for Amazon Rekognition.
4755//
4756// Returns an array of celebrities recognized in the input image. For more information,
4757// see Recognizing Celebrities in the Amazon Rekognition Developer Guide.
4758//
4759// RecognizeCelebrities returns the 64 largest faces in the image. It lists
4760// recognized celebrities in the CelebrityFaces array and unrecognized faces
4761// in the UnrecognizedFaces array. RecognizeCelebrities doesn't return celebrities
4762// whose faces aren't among the largest 64 faces in the image.
4763//
4764// For each celebrity recognized, RecognizeCelebrities returns a Celebrity object.
4765// The Celebrity object contains the celebrity name, ID, URL links to additional
4766// information, match confidence, and a ComparedFace object that you can use
4767// to locate the celebrity's face on the image.
4768//
4769// Amazon Rekognition doesn't retain information about which images a celebrity
4770// has been recognized in. Your application must store this information and
4771// use the Celebrity ID property as a unique identifier for the celebrity. If
4772// you don't store the celebrity name or additional information URLs returned
4773// by RecognizeCelebrities, you will need the ID to identify the celebrity in
4774// a call to the GetCelebrityInfo operation.
4775//
4776// You pass the input image either as base64-encoded image bytes or as a reference
4777// to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon
4778// Rekognition operations, passing image bytes is not supported. The image must
4779// be either a PNG or JPEG formatted file.
4780//
4781// For an example, see Recognizing Celebrities in an Image in the Amazon Rekognition
4782// Developer Guide.
4783//
4784// This operation requires permissions to perform the rekognition:RecognizeCelebrities
4785// operation.
4786//
4787// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
4788// with awserr.Error's Code and Message methods to get detailed information about
4789// the error.
4790//
4791// See the AWS API reference guide for Amazon Rekognition's
4792// API operation RecognizeCelebrities for usage and error information.
4793//
4794// Returned Error Types:
4795//   * InvalidS3ObjectException
4796//   Amazon Rekognition is unable to access the S3 object specified in the request.
4797//
4798//   * InvalidParameterException
4799//   Input parameter violated a constraint. Validate your parameter before calling
4800//   the API operation again.
4801//
4802//   * InvalidImageFormatException
4803//   The provided image format is not supported.
4804//
4805//   * ImageTooLargeException
4806//   The input image size exceeds the allowed limit. For more information, see
4807//   Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
4808//
4809//   * AccessDeniedException
4810//   You are not authorized to perform the action.
4811//
4812//   * InternalServerError
4813//   Amazon Rekognition experienced a service issue. Try your call again.
4814//
4815//   * ThrottlingException
4816//   Amazon Rekognition is temporarily unable to process the request. Try your
4817//   call again.
4818//
4819//   * ProvisionedThroughputExceededException
4820//   The number of requests exceeded your throughput limit. If you want to increase
4821//   this limit, contact Amazon Rekognition.
4822//
4823//   * InvalidImageFormatException
4824//   The provided image format is not supported.
4825//
4826func (c *Rekognition) RecognizeCelebrities(input *RecognizeCelebritiesInput) (*RecognizeCelebritiesOutput, error) {
4827	req, out := c.RecognizeCelebritiesRequest(input)
4828	return out, req.Send()
4829}
4830
4831// RecognizeCelebritiesWithContext is the same as RecognizeCelebrities with the addition of
4832// the ability to pass a context and additional request options.
4833//
4834// See RecognizeCelebrities for details on how to use this API operation.
4835//
4836// The context must be non-nil and will be used for request cancellation. If
4837// the context is nil a panic will occur. In the future the SDK may create
4838// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4839// for more information on using Contexts.
4840func (c *Rekognition) RecognizeCelebritiesWithContext(ctx aws.Context, input *RecognizeCelebritiesInput, opts ...request.Option) (*RecognizeCelebritiesOutput, error) {
4841	req, out := c.RecognizeCelebritiesRequest(input)
4842	req.SetContext(ctx)
4843	req.ApplyOptions(opts...)
4844	return out, req.Send()
4845}
4846
4847const opSearchFaces = "SearchFaces"
4848
4849// SearchFacesRequest generates a "aws/request.Request" representing the
4850// client's request for the SearchFaces operation. The "output" return
4851// value will be populated with the request's response once the request completes
4852// successfully.
4853//
4854// Use "Send" method on the returned Request to send the API call to the service.
4855// the "output" return value is not valid until after Send returns without error.
4856//
4857// See SearchFaces for more information on using the SearchFaces
4858// API call, and error handling.
4859//
4860// This method is useful when you want to inject custom logic or configuration
4861// into the SDK's request lifecycle. Such as custom headers, or retry logic.
4862//
4863//
4864//    // Example sending a request using the SearchFacesRequest method.
4865//    req, resp := client.SearchFacesRequest(params)
4866//
4867//    err := req.Send()
4868//    if err == nil { // resp is now filled
4869//        fmt.Println(resp)
4870//    }
4871func (c *Rekognition) SearchFacesRequest(input *SearchFacesInput) (req *request.Request, output *SearchFacesOutput) {
4872	op := &request.Operation{
4873		Name:       opSearchFaces,
4874		HTTPMethod: "POST",
4875		HTTPPath:   "/",
4876	}
4877
4878	if input == nil {
4879		input = &SearchFacesInput{}
4880	}
4881
4882	output = &SearchFacesOutput{}
4883	req = c.newRequest(op, input, output)
4884	return
4885}
4886
4887// SearchFaces API operation for Amazon Rekognition.
4888//
4889// For a given input face ID, searches for matching faces in the collection
4890// the face belongs to. You get a face ID when you add a face to the collection
4891// using the IndexFaces operation. The operation compares the features of the
4892// input face with faces in the specified collection.
4893//
4894// You can also search faces without indexing faces by using the SearchFacesByImage
4895// operation.
4896//
4897// The operation response returns an array of faces that match, ordered by similarity
4898// score with the highest similarity first. More specifically, it is an array
4899// of metadata for each face match that is found. Along with the metadata, the
4900// response also includes a confidence value for each face match, indicating
4901// the confidence that the specific face matches the input face.
4902//
4903// For an example, see Searching for a Face Using Its Face ID in the Amazon
4904// Rekognition Developer Guide.
4905//
4906// This operation requires permissions to perform the rekognition:SearchFaces
4907// action.
4908//
4909// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
4910// with awserr.Error's Code and Message methods to get detailed information about
4911// the error.
4912//
4913// See the AWS API reference guide for Amazon Rekognition's
4914// API operation SearchFaces for usage and error information.
4915//
4916// Returned Error Types:
4917//   * InvalidParameterException
4918//   Input parameter violated a constraint. Validate your parameter before calling
4919//   the API operation again.
4920//
4921//   * AccessDeniedException
4922//   You are not authorized to perform the action.
4923//
4924//   * InternalServerError
4925//   Amazon Rekognition experienced a service issue. Try your call again.
4926//
4927//   * ThrottlingException
4928//   Amazon Rekognition is temporarily unable to process the request. Try your
4929//   call again.
4930//
4931//   * ProvisionedThroughputExceededException
4932//   The number of requests exceeded your throughput limit. If you want to increase
4933//   this limit, contact Amazon Rekognition.
4934//
4935//   * ResourceNotFoundException
4936//   The collection specified in the request cannot be found.
4937//
4938func (c *Rekognition) SearchFaces(input *SearchFacesInput) (*SearchFacesOutput, error) {
4939	req, out := c.SearchFacesRequest(input)
4940	return out, req.Send()
4941}
4942
4943// SearchFacesWithContext is the same as SearchFaces with the addition of
4944// the ability to pass a context and additional request options.
4945//
4946// See SearchFaces for details on how to use this API operation.
4947//
4948// The context must be non-nil and will be used for request cancellation. If
4949// the context is nil a panic will occur. In the future the SDK may create
4950// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4951// for more information on using Contexts.
4952func (c *Rekognition) SearchFacesWithContext(ctx aws.Context, input *SearchFacesInput, opts ...request.Option) (*SearchFacesOutput, error) {
4953	req, out := c.SearchFacesRequest(input)
4954	req.SetContext(ctx)
4955	req.ApplyOptions(opts...)
4956	return out, req.Send()
4957}
4958
4959const opSearchFacesByImage = "SearchFacesByImage"
4960
4961// SearchFacesByImageRequest generates a "aws/request.Request" representing the
4962// client's request for the SearchFacesByImage operation. The "output" return
4963// value will be populated with the request's response once the request completes
4964// successfully.
4965//
4966// Use "Send" method on the returned Request to send the API call to the service.
4967// the "output" return value is not valid until after Send returns without error.
4968//
4969// See SearchFacesByImage for more information on using the SearchFacesByImage
4970// API call, and error handling.
4971//
4972// This method is useful when you want to inject custom logic or configuration
4973// into the SDK's request lifecycle. Such as custom headers, or retry logic.
4974//
4975//
4976//    // Example sending a request using the SearchFacesByImageRequest method.
4977//    req, resp := client.SearchFacesByImageRequest(params)
4978//
4979//    err := req.Send()
4980//    if err == nil { // resp is now filled
4981//        fmt.Println(resp)
4982//    }
4983func (c *Rekognition) SearchFacesByImageRequest(input *SearchFacesByImageInput) (req *request.Request, output *SearchFacesByImageOutput) {
4984	op := &request.Operation{
4985		Name:       opSearchFacesByImage,
4986		HTTPMethod: "POST",
4987		HTTPPath:   "/",
4988	}
4989
4990	if input == nil {
4991		input = &SearchFacesByImageInput{}
4992	}
4993
4994	output = &SearchFacesByImageOutput{}
4995	req = c.newRequest(op, input, output)
4996	return
4997}
4998
4999// SearchFacesByImage API operation for Amazon Rekognition.
5000//
5001// For a given input image, first detects the largest face in the image, and
5002// then searches the specified collection for matching faces. The operation
5003// compares the features of the input face with faces in the specified collection.
5004//
5005// To search for all faces in an input image, you might first call the IndexFaces
5006// operation, and then use the face IDs returned in subsequent calls to the
5007// SearchFaces operation.
5008//
5009// You can also call the DetectFaces operation and use the bounding boxes in
5010// the response to make face crops, which then you can pass in to the SearchFacesByImage
5011// operation.
5012//
5013// You pass the input image either as base64-encoded image bytes or as a reference
5014// to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon
5015// Rekognition operations, passing image bytes is not supported. The image must
5016// be either a PNG or JPEG formatted file.
5017//
5018// The response returns an array of faces that match, ordered by similarity
5019// score with the highest similarity first. More specifically, it is an array
5020// of metadata for each face match found. Along with the metadata, the response
5021// also includes a similarity indicating how similar the face is to the input
5022// face. In the response, the operation also returns the bounding box (and a
5023// confidence level that the bounding box contains a face) of the face that
5024// Amazon Rekognition used for the input image.
5025//
5026// For an example, Searching for a Face Using an Image in the Amazon Rekognition
5027// Developer Guide.
5028//
5029// The QualityFilter input parameter allows you to filter out detected faces
5030// that don’t meet a required quality bar. The quality bar is based on a variety
5031// of common use cases. Use QualityFilter to set the quality bar for filtering
5032// by specifying LOW, MEDIUM, or HIGH. If you do not want to filter detected
5033// faces, specify NONE. The default value is NONE.
5034//
5035// To use quality filtering, you need a collection associated with version 3
5036// of the face model or higher. To get the version of the face model associated
5037// with a collection, call DescribeCollection.
5038//
5039// This operation requires permissions to perform the rekognition:SearchFacesByImage
5040// action.
5041//
5042// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
5043// with awserr.Error's Code and Message methods to get detailed information about
5044// the error.
5045//
5046// See the AWS API reference guide for Amazon Rekognition's
5047// API operation SearchFacesByImage for usage and error information.
5048//
5049// Returned Error Types:
5050//   * InvalidS3ObjectException
5051//   Amazon Rekognition is unable to access the S3 object specified in the request.
5052//
5053//   * InvalidParameterException
5054//   Input parameter violated a constraint. Validate your parameter before calling
5055//   the API operation again.
5056//
5057//   * ImageTooLargeException
5058//   The input image size exceeds the allowed limit. For more information, see
5059//   Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
5060//
5061//   * AccessDeniedException
5062//   You are not authorized to perform the action.
5063//
5064//   * InternalServerError
5065//   Amazon Rekognition experienced a service issue. Try your call again.
5066//
5067//   * ThrottlingException
5068//   Amazon Rekognition is temporarily unable to process the request. Try your
5069//   call again.
5070//
5071//   * ProvisionedThroughputExceededException
5072//   The number of requests exceeded your throughput limit. If you want to increase
5073//   this limit, contact Amazon Rekognition.
5074//
5075//   * ResourceNotFoundException
5076//   The collection specified in the request cannot be found.
5077//
5078//   * InvalidImageFormatException
5079//   The provided image format is not supported.
5080//
5081func (c *Rekognition) SearchFacesByImage(input *SearchFacesByImageInput) (*SearchFacesByImageOutput, error) {
5082	req, out := c.SearchFacesByImageRequest(input)
5083	return out, req.Send()
5084}
5085
5086// SearchFacesByImageWithContext is the same as SearchFacesByImage with the addition of
5087// the ability to pass a context and additional request options.
5088//
5089// See SearchFacesByImage for details on how to use this API operation.
5090//
5091// The context must be non-nil and will be used for request cancellation. If
5092// the context is nil a panic will occur. In the future the SDK may create
5093// sub-contexts for http.Requests. See https://golang.org/pkg/context/
5094// for more information on using Contexts.
5095func (c *Rekognition) SearchFacesByImageWithContext(ctx aws.Context, input *SearchFacesByImageInput, opts ...request.Option) (*SearchFacesByImageOutput, error) {
5096	req, out := c.SearchFacesByImageRequest(input)
5097	req.SetContext(ctx)
5098	req.ApplyOptions(opts...)
5099	return out, req.Send()
5100}
5101
5102const opStartCelebrityRecognition = "StartCelebrityRecognition"
5103
5104// StartCelebrityRecognitionRequest generates a "aws/request.Request" representing the
5105// client's request for the StartCelebrityRecognition operation. The "output" return
5106// value will be populated with the request's response once the request completes
5107// successfully.
5108//
5109// Use "Send" method on the returned Request to send the API call to the service.
5110// the "output" return value is not valid until after Send returns without error.
5111//
5112// See StartCelebrityRecognition for more information on using the StartCelebrityRecognition
5113// API call, and error handling.
5114//
5115// This method is useful when you want to inject custom logic or configuration
5116// into the SDK's request lifecycle. Such as custom headers, or retry logic.
5117//
5118//
5119//    // Example sending a request using the StartCelebrityRecognitionRequest method.
5120//    req, resp := client.StartCelebrityRecognitionRequest(params)
5121//
5122//    err := req.Send()
5123//    if err == nil { // resp is now filled
5124//        fmt.Println(resp)
5125//    }
5126func (c *Rekognition) StartCelebrityRecognitionRequest(input *StartCelebrityRecognitionInput) (req *request.Request, output *StartCelebrityRecognitionOutput) {
5127	op := &request.Operation{
5128		Name:       opStartCelebrityRecognition,
5129		HTTPMethod: "POST",
5130		HTTPPath:   "/",
5131	}
5132
5133	if input == nil {
5134		input = &StartCelebrityRecognitionInput{}
5135	}
5136
5137	output = &StartCelebrityRecognitionOutput{}
5138	req = c.newRequest(op, input, output)
5139	return
5140}
5141
5142// StartCelebrityRecognition API operation for Amazon Rekognition.
5143//
5144// Starts asynchronous recognition of celebrities in a stored video.
5145//
5146// Amazon Rekognition Video can detect celebrities in a video must be stored
5147// in an Amazon S3 bucket. Use Video to specify the bucket name and the filename
5148// of the video. StartCelebrityRecognition returns a job identifier (JobId)
5149// which you use to get the results of the analysis. When celebrity recognition
5150// analysis is finished, Amazon Rekognition Video publishes a completion status
5151// to the Amazon Simple Notification Service topic that you specify in NotificationChannel.
5152// To get the results of the celebrity recognition analysis, first check that
5153// the status value published to the Amazon SNS topic is SUCCEEDED. If so, call
5154// GetCelebrityRecognition and pass the job identifier (JobId) from the initial
5155// call to StartCelebrityRecognition.
5156//
5157// For more information, see Recognizing Celebrities in the Amazon Rekognition
5158// Developer Guide.
5159//
5160// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
5161// with awserr.Error's Code and Message methods to get detailed information about
5162// the error.
5163//
5164// See the AWS API reference guide for Amazon Rekognition's
5165// API operation StartCelebrityRecognition for usage and error information.
5166//
5167// Returned Error Types:
5168//   * AccessDeniedException
5169//   You are not authorized to perform the action.
5170//
5171//   * IdempotentParameterMismatchException
5172//   A ClientRequestToken input parameter was reused with an operation, but at
5173//   least one of the other input parameters is different from the previous call
5174//   to the operation.
5175//
5176//   * InvalidParameterException
5177//   Input parameter violated a constraint. Validate your parameter before calling
5178//   the API operation again.
5179//
5180//   * InvalidS3ObjectException
5181//   Amazon Rekognition is unable to access the S3 object specified in the request.
5182//
5183//   * InternalServerError
5184//   Amazon Rekognition experienced a service issue. Try your call again.
5185//
5186//   * VideoTooLargeException
5187//   The file size or duration of the supplied media is too large. The maximum
5188//   file size is 10GB. The maximum duration is 6 hours.
5189//
5190//   * ProvisionedThroughputExceededException
5191//   The number of requests exceeded your throughput limit. If you want to increase
5192//   this limit, contact Amazon Rekognition.
5193//
5194//   * LimitExceededException
5195//   An Amazon Rekognition service limit was exceeded. For example, if you start
5196//   too many Amazon Rekognition Video jobs concurrently, calls to start operations
5197//   (StartLabelDetection, for example) will raise a LimitExceededException exception
5198//   (HTTP status code: 400) until the number of concurrently running jobs is
5199//   below the Amazon Rekognition service limit.
5200//
5201//   * ThrottlingException
5202//   Amazon Rekognition is temporarily unable to process the request. Try your
5203//   call again.
5204//
5205func (c *Rekognition) StartCelebrityRecognition(input *StartCelebrityRecognitionInput) (*StartCelebrityRecognitionOutput, error) {
5206	req, out := c.StartCelebrityRecognitionRequest(input)
5207	return out, req.Send()
5208}
5209
5210// StartCelebrityRecognitionWithContext is the same as StartCelebrityRecognition with the addition of
5211// the ability to pass a context and additional request options.
5212//
5213// See StartCelebrityRecognition for details on how to use this API operation.
5214//
5215// The context must be non-nil and will be used for request cancellation. If
5216// the context is nil a panic will occur. In the future the SDK may create
5217// sub-contexts for http.Requests. See https://golang.org/pkg/context/
5218// for more information on using Contexts.
5219func (c *Rekognition) StartCelebrityRecognitionWithContext(ctx aws.Context, input *StartCelebrityRecognitionInput, opts ...request.Option) (*StartCelebrityRecognitionOutput, error) {
5220	req, out := c.StartCelebrityRecognitionRequest(input)
5221	req.SetContext(ctx)
5222	req.ApplyOptions(opts...)
5223	return out, req.Send()
5224}
5225
5226const opStartContentModeration = "StartContentModeration"
5227
5228// StartContentModerationRequest generates a "aws/request.Request" representing the
5229// client's request for the StartContentModeration operation. The "output" return
5230// value will be populated with the request's response once the request completes
5231// successfully.
5232//
5233// Use "Send" method on the returned Request to send the API call to the service.
5234// the "output" return value is not valid until after Send returns without error.
5235//
5236// See StartContentModeration for more information on using the StartContentModeration
5237// API call, and error handling.
5238//
5239// This method is useful when you want to inject custom logic or configuration
5240// into the SDK's request lifecycle. Such as custom headers, or retry logic.
5241//
5242//
5243//    // Example sending a request using the StartContentModerationRequest method.
5244//    req, resp := client.StartContentModerationRequest(params)
5245//
5246//    err := req.Send()
5247//    if err == nil { // resp is now filled
5248//        fmt.Println(resp)
5249//    }
5250func (c *Rekognition) StartContentModerationRequest(input *StartContentModerationInput) (req *request.Request, output *StartContentModerationOutput) {
5251	op := &request.Operation{
5252		Name:       opStartContentModeration,
5253		HTTPMethod: "POST",
5254		HTTPPath:   "/",
5255	}
5256
5257	if input == nil {
5258		input = &StartContentModerationInput{}
5259	}
5260
5261	output = &StartContentModerationOutput{}
5262	req = c.newRequest(op, input, output)
5263	return
5264}
5265
5266// StartContentModeration API operation for Amazon Rekognition.
5267//
5268// Starts asynchronous detection of unsafe content in a stored video.
5269//
5270// Amazon Rekognition Video can moderate content in a video stored in an Amazon
5271// S3 bucket. Use Video to specify the bucket name and the filename of the video.
5272// StartContentModeration returns a job identifier (JobId) which you use to
5273// get the results of the analysis. When unsafe content analysis is finished,
5274// Amazon Rekognition Video publishes a completion status to the Amazon Simple
5275// Notification Service topic that you specify in NotificationChannel.
5276//
5277// To get the results of the unsafe content analysis, first check that the status
5278// value published to the Amazon SNS topic is SUCCEEDED. If so, call GetContentModeration
5279// and pass the job identifier (JobId) from the initial call to StartContentModeration.
5280//
5281// For more information, see Detecting Unsafe Content in the Amazon Rekognition
5282// Developer Guide.
5283//
5284// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
5285// with awserr.Error's Code and Message methods to get detailed information about
5286// the error.
5287//
5288// See the AWS API reference guide for Amazon Rekognition's
5289// API operation StartContentModeration for usage and error information.
5290//
5291// Returned Error Types:
5292//   * AccessDeniedException
5293//   You are not authorized to perform the action.
5294//
5295//   * IdempotentParameterMismatchException
5296//   A ClientRequestToken input parameter was reused with an operation, but at
5297//   least one of the other input parameters is different from the previous call
5298//   to the operation.
5299//
5300//   * InvalidParameterException
5301//   Input parameter violated a constraint. Validate your parameter before calling
5302//   the API operation again.
5303//
5304//   * InvalidS3ObjectException
5305//   Amazon Rekognition is unable to access the S3 object specified in the request.
5306//
5307//   * InternalServerError
5308//   Amazon Rekognition experienced a service issue. Try your call again.
5309//
5310//   * VideoTooLargeException
5311//   The file size or duration of the supplied media is too large. The maximum
5312//   file size is 10GB. The maximum duration is 6 hours.
5313//
5314//   * ProvisionedThroughputExceededException
5315//   The number of requests exceeded your throughput limit. If you want to increase
5316//   this limit, contact Amazon Rekognition.
5317//
5318//   * LimitExceededException
5319//   An Amazon Rekognition service limit was exceeded. For example, if you start
5320//   too many Amazon Rekognition Video jobs concurrently, calls to start operations
5321//   (StartLabelDetection, for example) will raise a LimitExceededException exception
5322//   (HTTP status code: 400) until the number of concurrently running jobs is
5323//   below the Amazon Rekognition service limit.
5324//
5325//   * ThrottlingException
5326//   Amazon Rekognition is temporarily unable to process the request. Try your
5327//   call again.
5328//
5329func (c *Rekognition) StartContentModeration(input *StartContentModerationInput) (*StartContentModerationOutput, error) {
5330	req, out := c.StartContentModerationRequest(input)
5331	return out, req.Send()
5332}
5333
5334// StartContentModerationWithContext is the same as StartContentModeration with the addition of
5335// the ability to pass a context and additional request options.
5336//
5337// See StartContentModeration for details on how to use this API operation.
5338//
5339// The context must be non-nil and will be used for request cancellation. If
5340// the context is nil a panic will occur. In the future the SDK may create
5341// sub-contexts for http.Requests. See https://golang.org/pkg/context/
5342// for more information on using Contexts.
5343func (c *Rekognition) StartContentModerationWithContext(ctx aws.Context, input *StartContentModerationInput, opts ...request.Option) (*StartContentModerationOutput, error) {
5344	req, out := c.StartContentModerationRequest(input)
5345	req.SetContext(ctx)
5346	req.ApplyOptions(opts...)
5347	return out, req.Send()
5348}
5349
5350const opStartFaceDetection = "StartFaceDetection"
5351
5352// StartFaceDetectionRequest generates a "aws/request.Request" representing the
5353// client's request for the StartFaceDetection operation. The "output" return
5354// value will be populated with the request's response once the request completes
5355// successfully.
5356//
5357// Use "Send" method on the returned Request to send the API call to the service.
5358// the "output" return value is not valid until after Send returns without error.
5359//
5360// See StartFaceDetection for more information on using the StartFaceDetection
5361// API call, and error handling.
5362//
5363// This method is useful when you want to inject custom logic or configuration
5364// into the SDK's request lifecycle. Such as custom headers, or retry logic.
5365//
5366//
5367//    // Example sending a request using the StartFaceDetectionRequest method.
5368//    req, resp := client.StartFaceDetectionRequest(params)
5369//
5370//    err := req.Send()
5371//    if err == nil { // resp is now filled
5372//        fmt.Println(resp)
5373//    }
5374func (c *Rekognition) StartFaceDetectionRequest(input *StartFaceDetectionInput) (req *request.Request, output *StartFaceDetectionOutput) {
5375	op := &request.Operation{
5376		Name:       opStartFaceDetection,
5377		HTTPMethod: "POST",
5378		HTTPPath:   "/",
5379	}
5380
5381	if input == nil {
5382		input = &StartFaceDetectionInput{}
5383	}
5384
5385	output = &StartFaceDetectionOutput{}
5386	req = c.newRequest(op, input, output)
5387	return
5388}
5389
5390// StartFaceDetection API operation for Amazon Rekognition.
5391//
5392// Starts asynchronous detection of faces in a stored video.
5393//
5394// Amazon Rekognition Video can detect faces in a video stored in an Amazon
5395// S3 bucket. Use Video to specify the bucket name and the filename of the video.
5396// StartFaceDetection returns a job identifier (JobId) that you use to get the
5397// results of the operation. When face detection is finished, Amazon Rekognition
5398// Video publishes a completion status to the Amazon Simple Notification Service
5399// topic that you specify in NotificationChannel. To get the results of the
5400// face detection operation, first check that the status value published to
5401// the Amazon SNS topic is SUCCEEDED. If so, call GetFaceDetection and pass
5402// the job identifier (JobId) from the initial call to StartFaceDetection.
5403//
5404// For more information, see Detecting Faces in a Stored Video in the Amazon
5405// Rekognition Developer Guide.
5406//
5407// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
5408// with awserr.Error's Code and Message methods to get detailed information about
5409// the error.
5410//
5411// See the AWS API reference guide for Amazon Rekognition's
5412// API operation StartFaceDetection for usage and error information.
5413//
5414// Returned Error Types:
5415//   * AccessDeniedException
5416//   You are not authorized to perform the action.
5417//
5418//   * IdempotentParameterMismatchException
5419//   A ClientRequestToken input parameter was reused with an operation, but at
5420//   least one of the other input parameters is different from the previous call
5421//   to the operation.
5422//
5423//   * InvalidParameterException
5424//   Input parameter violated a constraint. Validate your parameter before calling
5425//   the API operation again.
5426//
5427//   * InvalidS3ObjectException
5428//   Amazon Rekognition is unable to access the S3 object specified in the request.
5429//
5430//   * InternalServerError
5431//   Amazon Rekognition experienced a service issue. Try your call again.
5432//
5433//   * VideoTooLargeException
5434//   The file size or duration of the supplied media is too large. The maximum
5435//   file size is 10GB. The maximum duration is 6 hours.
5436//
5437//   * ProvisionedThroughputExceededException
5438//   The number of requests exceeded your throughput limit. If you want to increase
5439//   this limit, contact Amazon Rekognition.
5440//
5441//   * LimitExceededException
5442//   An Amazon Rekognition service limit was exceeded. For example, if you start
5443//   too many Amazon Rekognition Video jobs concurrently, calls to start operations
5444//   (StartLabelDetection, for example) will raise a LimitExceededException exception
5445//   (HTTP status code: 400) until the number of concurrently running jobs is
5446//   below the Amazon Rekognition service limit.
5447//
5448//   * ThrottlingException
5449//   Amazon Rekognition is temporarily unable to process the request. Try your
5450//   call again.
5451//
5452func (c *Rekognition) StartFaceDetection(input *StartFaceDetectionInput) (*StartFaceDetectionOutput, error) {
5453	req, out := c.StartFaceDetectionRequest(input)
5454	return out, req.Send()
5455}
5456
5457// StartFaceDetectionWithContext is the same as StartFaceDetection with the addition of
5458// the ability to pass a context and additional request options.
5459//
5460// See StartFaceDetection for details on how to use this API operation.
5461//
5462// The context must be non-nil and will be used for request cancellation. If
5463// the context is nil a panic will occur. In the future the SDK may create
5464// sub-contexts for http.Requests. See https://golang.org/pkg/context/
5465// for more information on using Contexts.
5466func (c *Rekognition) StartFaceDetectionWithContext(ctx aws.Context, input *StartFaceDetectionInput, opts ...request.Option) (*StartFaceDetectionOutput, error) {
5467	req, out := c.StartFaceDetectionRequest(input)
5468	req.SetContext(ctx)
5469	req.ApplyOptions(opts...)
5470	return out, req.Send()
5471}
5472
5473const opStartFaceSearch = "StartFaceSearch"
5474
5475// StartFaceSearchRequest generates a "aws/request.Request" representing the
5476// client's request for the StartFaceSearch operation. The "output" return
5477// value will be populated with the request's response once the request completes
5478// successfully.
5479//
5480// Use "Send" method on the returned Request to send the API call to the service.
5481// the "output" return value is not valid until after Send returns without error.
5482//
5483// See StartFaceSearch for more information on using the StartFaceSearch
5484// API call, and error handling.
5485//
5486// This method is useful when you want to inject custom logic or configuration
5487// into the SDK's request lifecycle. Such as custom headers, or retry logic.
5488//
5489//
5490//    // Example sending a request using the StartFaceSearchRequest method.
5491//    req, resp := client.StartFaceSearchRequest(params)
5492//
5493//    err := req.Send()
5494//    if err == nil { // resp is now filled
5495//        fmt.Println(resp)
5496//    }
5497func (c *Rekognition) StartFaceSearchRequest(input *StartFaceSearchInput) (req *request.Request, output *StartFaceSearchOutput) {
5498	op := &request.Operation{
5499		Name:       opStartFaceSearch,
5500		HTTPMethod: "POST",
5501		HTTPPath:   "/",
5502	}
5503
5504	if input == nil {
5505		input = &StartFaceSearchInput{}
5506	}
5507
5508	output = &StartFaceSearchOutput{}
5509	req = c.newRequest(op, input, output)
5510	return
5511}
5512
5513// StartFaceSearch API operation for Amazon Rekognition.
5514//
5515// Starts the asynchronous search for faces in a collection that match the faces
5516// of persons detected in a stored video.
5517//
5518// The video must be stored in an Amazon S3 bucket. Use Video to specify the
5519// bucket name and the filename of the video. StartFaceSearch returns a job
5520// identifier (JobId) which you use to get the search results once the search
5521// has completed. When searching is finished, Amazon Rekognition Video publishes
5522// a completion status to the Amazon Simple Notification Service topic that
5523// you specify in NotificationChannel. To get the search results, first check
5524// that the status value published to the Amazon SNS topic is SUCCEEDED. If
5525// so, call GetFaceSearch and pass the job identifier (JobId) from the initial
5526// call to StartFaceSearch. For more information, see procedure-person-search-videos.
5527//
5528// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
5529// with awserr.Error's Code and Message methods to get detailed information about
5530// the error.
5531//
5532// See the AWS API reference guide for Amazon Rekognition's
5533// API operation StartFaceSearch for usage and error information.
5534//
5535// Returned Error Types:
5536//   * AccessDeniedException
5537//   You are not authorized to perform the action.
5538//
5539//   * IdempotentParameterMismatchException
5540//   A ClientRequestToken input parameter was reused with an operation, but at
5541//   least one of the other input parameters is different from the previous call
5542//   to the operation.
5543//
5544//   * InvalidParameterException
5545//   Input parameter violated a constraint. Validate your parameter before calling
5546//   the API operation again.
5547//
5548//   * InvalidS3ObjectException
5549//   Amazon Rekognition is unable to access the S3 object specified in the request.
5550//
5551//   * InternalServerError
5552//   Amazon Rekognition experienced a service issue. Try your call again.
5553//
5554//   * VideoTooLargeException
5555//   The file size or duration of the supplied media is too large. The maximum
5556//   file size is 10GB. The maximum duration is 6 hours.
5557//
5558//   * ProvisionedThroughputExceededException
5559//   The number of requests exceeded your throughput limit. If you want to increase
5560//   this limit, contact Amazon Rekognition.
5561//
5562//   * LimitExceededException
5563//   An Amazon Rekognition service limit was exceeded. For example, if you start
5564//   too many Amazon Rekognition Video jobs concurrently, calls to start operations
5565//   (StartLabelDetection, for example) will raise a LimitExceededException exception
5566//   (HTTP status code: 400) until the number of concurrently running jobs is
5567//   below the Amazon Rekognition service limit.
5568//
5569//   * ResourceNotFoundException
5570//   The collection specified in the request cannot be found.
5571//
5572//   * ThrottlingException
5573//   Amazon Rekognition is temporarily unable to process the request. Try your
5574//   call again.
5575//
5576func (c *Rekognition) StartFaceSearch(input *StartFaceSearchInput) (*StartFaceSearchOutput, error) {
5577	req, out := c.StartFaceSearchRequest(input)
5578	return out, req.Send()
5579}
5580
5581// StartFaceSearchWithContext is the same as StartFaceSearch with the addition of
5582// the ability to pass a context and additional request options.
5583//
5584// See StartFaceSearch for details on how to use this API operation.
5585//
5586// The context must be non-nil and will be used for request cancellation. If
5587// the context is nil a panic will occur. In the future the SDK may create
5588// sub-contexts for http.Requests. See https://golang.org/pkg/context/
5589// for more information on using Contexts.
5590func (c *Rekognition) StartFaceSearchWithContext(ctx aws.Context, input *StartFaceSearchInput, opts ...request.Option) (*StartFaceSearchOutput, error) {
5591	req, out := c.StartFaceSearchRequest(input)
5592	req.SetContext(ctx)
5593	req.ApplyOptions(opts...)
5594	return out, req.Send()
5595}
5596
5597const opStartLabelDetection = "StartLabelDetection"
5598
5599// StartLabelDetectionRequest generates a "aws/request.Request" representing the
5600// client's request for the StartLabelDetection operation. The "output" return
5601// value will be populated with the request's response once the request completes
5602// successfully.
5603//
5604// Use "Send" method on the returned Request to send the API call to the service.
5605// the "output" return value is not valid until after Send returns without error.
5606//
5607// See StartLabelDetection for more information on using the StartLabelDetection
5608// API call, and error handling.
5609//
5610// This method is useful when you want to inject custom logic or configuration
5611// into the SDK's request lifecycle. Such as custom headers, or retry logic.
5612//
5613//
5614//    // Example sending a request using the StartLabelDetectionRequest method.
5615//    req, resp := client.StartLabelDetectionRequest(params)
5616//
5617//    err := req.Send()
5618//    if err == nil { // resp is now filled
5619//        fmt.Println(resp)
5620//    }
5621func (c *Rekognition) StartLabelDetectionRequest(input *StartLabelDetectionInput) (req *request.Request, output *StartLabelDetectionOutput) {
5622	op := &request.Operation{
5623		Name:       opStartLabelDetection,
5624		HTTPMethod: "POST",
5625		HTTPPath:   "/",
5626	}
5627
5628	if input == nil {
5629		input = &StartLabelDetectionInput{}
5630	}
5631
5632	output = &StartLabelDetectionOutput{}
5633	req = c.newRequest(op, input, output)
5634	return
5635}
5636
5637// StartLabelDetection API operation for Amazon Rekognition.
5638//
5639// Starts asynchronous detection of labels in a stored video.
5640//
5641// Amazon Rekognition Video can detect labels in a video. Labels are instances
5642// of real-world entities. This includes objects like flower, tree, and table;
5643// events like wedding, graduation, and birthday party; concepts like landscape,
5644// evening, and nature; and activities like a person getting out of a car or
5645// a person skiing.
5646//
5647// The video must be stored in an Amazon S3 bucket. Use Video to specify the
5648// bucket name and the filename of the video. StartLabelDetection returns a
5649// job identifier (JobId) which you use to get the results of the operation.
5650// When label detection is finished, Amazon Rekognition Video publishes a completion
5651// status to the Amazon Simple Notification Service topic that you specify in
5652// NotificationChannel.
5653//
5654// To get the results of the label detection operation, first check that the
5655// status value published to the Amazon SNS topic is SUCCEEDED. If so, call
5656// GetLabelDetection and pass the job identifier (JobId) from the initial call
5657// to StartLabelDetection.
5658//
5659// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
5660// with awserr.Error's Code and Message methods to get detailed information about
5661// the error.
5662//
5663// See the AWS API reference guide for Amazon Rekognition's
5664// API operation StartLabelDetection for usage and error information.
5665//
5666// Returned Error Types:
5667//   * AccessDeniedException
5668//   You are not authorized to perform the action.
5669//
5670//   * IdempotentParameterMismatchException
5671//   A ClientRequestToken input parameter was reused with an operation, but at
5672//   least one of the other input parameters is different from the previous call
5673//   to the operation.
5674//
5675//   * InvalidParameterException
5676//   Input parameter violated a constraint. Validate your parameter before calling
5677//   the API operation again.
5678//
5679//   * InvalidS3ObjectException
5680//   Amazon Rekognition is unable to access the S3 object specified in the request.
5681//
5682//   * InternalServerError
5683//   Amazon Rekognition experienced a service issue. Try your call again.
5684//
5685//   * VideoTooLargeException
5686//   The file size or duration of the supplied media is too large. The maximum
5687//   file size is 10GB. The maximum duration is 6 hours.
5688//
5689//   * ProvisionedThroughputExceededException
5690//   The number of requests exceeded your throughput limit. If you want to increase
5691//   this limit, contact Amazon Rekognition.
5692//
5693//   * LimitExceededException
5694//   An Amazon Rekognition service limit was exceeded. For example, if you start
5695//   too many Amazon Rekognition Video jobs concurrently, calls to start operations
5696//   (StartLabelDetection, for example) will raise a LimitExceededException exception
5697//   (HTTP status code: 400) until the number of concurrently running jobs is
5698//   below the Amazon Rekognition service limit.
5699//
5700//   * ThrottlingException
5701//   Amazon Rekognition is temporarily unable to process the request. Try your
5702//   call again.
5703//
5704func (c *Rekognition) StartLabelDetection(input *StartLabelDetectionInput) (*StartLabelDetectionOutput, error) {
5705	req, out := c.StartLabelDetectionRequest(input)
5706	return out, req.Send()
5707}
5708
5709// StartLabelDetectionWithContext is the same as StartLabelDetection with the addition of
5710// the ability to pass a context and additional request options.
5711//
5712// See StartLabelDetection for details on how to use this API operation.
5713//
5714// The context must be non-nil and will be used for request cancellation. If
5715// the context is nil a panic will occur. In the future the SDK may create
5716// sub-contexts for http.Requests. See https://golang.org/pkg/context/
5717// for more information on using Contexts.
5718func (c *Rekognition) StartLabelDetectionWithContext(ctx aws.Context, input *StartLabelDetectionInput, opts ...request.Option) (*StartLabelDetectionOutput, error) {
5719	req, out := c.StartLabelDetectionRequest(input)
5720	req.SetContext(ctx)
5721	req.ApplyOptions(opts...)
5722	return out, req.Send()
5723}
5724
5725const opStartPersonTracking = "StartPersonTracking"
5726
5727// StartPersonTrackingRequest generates a "aws/request.Request" representing the
5728// client's request for the StartPersonTracking operation. The "output" return
5729// value will be populated with the request's response once the request completes
5730// successfully.
5731//
5732// Use "Send" method on the returned Request to send the API call to the service.
5733// the "output" return value is not valid until after Send returns without error.
5734//
5735// See StartPersonTracking for more information on using the StartPersonTracking
5736// API call, and error handling.
5737//
5738// This method is useful when you want to inject custom logic or configuration
5739// into the SDK's request lifecycle. Such as custom headers, or retry logic.
5740//
5741//
5742//    // Example sending a request using the StartPersonTrackingRequest method.
5743//    req, resp := client.StartPersonTrackingRequest(params)
5744//
5745//    err := req.Send()
5746//    if err == nil { // resp is now filled
5747//        fmt.Println(resp)
5748//    }
5749func (c *Rekognition) StartPersonTrackingRequest(input *StartPersonTrackingInput) (req *request.Request, output *StartPersonTrackingOutput) {
5750	op := &request.Operation{
5751		Name:       opStartPersonTracking,
5752		HTTPMethod: "POST",
5753		HTTPPath:   "/",
5754	}
5755
5756	if input == nil {
5757		input = &StartPersonTrackingInput{}
5758	}
5759
5760	output = &StartPersonTrackingOutput{}
5761	req = c.newRequest(op, input, output)
5762	return
5763}
5764
5765// StartPersonTracking API operation for Amazon Rekognition.
5766//
5767// Starts the asynchronous tracking of a person's path in a stored video.
5768//
5769// Amazon Rekognition Video can track the path of people in a video stored in
5770// an Amazon S3 bucket. Use Video to specify the bucket name and the filename
5771// of the video. StartPersonTracking returns a job identifier (JobId) which
5772// you use to get the results of the operation. When label detection is finished,
5773// Amazon Rekognition publishes a completion status to the Amazon Simple Notification
5774// Service topic that you specify in NotificationChannel.
5775//
5776// To get the results of the person detection operation, first check that the
5777// status value published to the Amazon SNS topic is SUCCEEDED. If so, call
5778// GetPersonTracking and pass the job identifier (JobId) from the initial call
5779// to StartPersonTracking.
5780//
5781// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
5782// with awserr.Error's Code and Message methods to get detailed information about
5783// the error.
5784//
5785// See the AWS API reference guide for Amazon Rekognition's
5786// API operation StartPersonTracking for usage and error information.
5787//
5788// Returned Error Types:
5789//   * AccessDeniedException
5790//   You are not authorized to perform the action.
5791//
5792//   * IdempotentParameterMismatchException
5793//   A ClientRequestToken input parameter was reused with an operation, but at
5794//   least one of the other input parameters is different from the previous call
5795//   to the operation.
5796//
5797//   * InvalidParameterException
5798//   Input parameter violated a constraint. Validate your parameter before calling
5799//   the API operation again.
5800//
5801//   * InvalidS3ObjectException
5802//   Amazon Rekognition is unable to access the S3 object specified in the request.
5803//
5804//   * InternalServerError
5805//   Amazon Rekognition experienced a service issue. Try your call again.
5806//
5807//   * VideoTooLargeException
5808//   The file size or duration of the supplied media is too large. The maximum
5809//   file size is 10GB. The maximum duration is 6 hours.
5810//
5811//   * ProvisionedThroughputExceededException
5812//   The number of requests exceeded your throughput limit. If you want to increase
5813//   this limit, contact Amazon Rekognition.
5814//
5815//   * LimitExceededException
5816//   An Amazon Rekognition service limit was exceeded. For example, if you start
5817//   too many Amazon Rekognition Video jobs concurrently, calls to start operations
5818//   (StartLabelDetection, for example) will raise a LimitExceededException exception
5819//   (HTTP status code: 400) until the number of concurrently running jobs is
5820//   below the Amazon Rekognition service limit.
5821//
5822//   * ThrottlingException
5823//   Amazon Rekognition is temporarily unable to process the request. Try your
5824//   call again.
5825//
5826func (c *Rekognition) StartPersonTracking(input *StartPersonTrackingInput) (*StartPersonTrackingOutput, error) {
5827	req, out := c.StartPersonTrackingRequest(input)
5828	return out, req.Send()
5829}
5830
5831// StartPersonTrackingWithContext is the same as StartPersonTracking with the addition of
5832// the ability to pass a context and additional request options.
5833//
5834// See StartPersonTracking for details on how to use this API operation.
5835//
5836// The context must be non-nil and will be used for request cancellation. If
5837// the context is nil a panic will occur. In the future the SDK may create
5838// sub-contexts for http.Requests. See https://golang.org/pkg/context/
5839// for more information on using Contexts.
5840func (c *Rekognition) StartPersonTrackingWithContext(ctx aws.Context, input *StartPersonTrackingInput, opts ...request.Option) (*StartPersonTrackingOutput, error) {
5841	req, out := c.StartPersonTrackingRequest(input)
5842	req.SetContext(ctx)
5843	req.ApplyOptions(opts...)
5844	return out, req.Send()
5845}
5846
5847const opStartProjectVersion = "StartProjectVersion"
5848
5849// StartProjectVersionRequest generates a "aws/request.Request" representing the
5850// client's request for the StartProjectVersion operation. The "output" return
5851// value will be populated with the request's response once the request completes
5852// successfully.
5853//
5854// Use "Send" method on the returned Request to send the API call to the service.
5855// the "output" return value is not valid until after Send returns without error.
5856//
5857// See StartProjectVersion for more information on using the StartProjectVersion
5858// API call, and error handling.
5859//
5860// This method is useful when you want to inject custom logic or configuration
5861// into the SDK's request lifecycle. Such as custom headers, or retry logic.
5862//
5863//
5864//    // Example sending a request using the StartProjectVersionRequest method.
5865//    req, resp := client.StartProjectVersionRequest(params)
5866//
5867//    err := req.Send()
5868//    if err == nil { // resp is now filled
5869//        fmt.Println(resp)
5870//    }
5871func (c *Rekognition) StartProjectVersionRequest(input *StartProjectVersionInput) (req *request.Request, output *StartProjectVersionOutput) {
5872	op := &request.Operation{
5873		Name:       opStartProjectVersion,
5874		HTTPMethod: "POST",
5875		HTTPPath:   "/",
5876	}
5877
5878	if input == nil {
5879		input = &StartProjectVersionInput{}
5880	}
5881
5882	output = &StartProjectVersionOutput{}
5883	req = c.newRequest(op, input, output)
5884	return
5885}
5886
5887// StartProjectVersion API operation for Amazon Rekognition.
5888//
5889// Starts the running of the version of a model. Starting a model takes a while
5890// to complete. To check the current state of the model, use DescribeProjectVersions.
5891//
5892// Once the model is running, you can detect custom labels in new images by
5893// calling DetectCustomLabels.
5894//
5895// You are charged for the amount of time that the model is running. To stop
5896// a running model, call StopProjectVersion.
5897//
5898// This operation requires permissions to perform the rekognition:StartProjectVersion
5899// action.
5900//
5901// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
5902// with awserr.Error's Code and Message methods to get detailed information about
5903// the error.
5904//
5905// See the AWS API reference guide for Amazon Rekognition's
5906// API operation StartProjectVersion for usage and error information.
5907//
5908// Returned Error Types:
5909//   * ResourceNotFoundException
5910//   The collection specified in the request cannot be found.
5911//
5912//   * ResourceInUseException
5913//   The specified resource is already being used.
5914//
5915//   * LimitExceededException
5916//   An Amazon Rekognition service limit was exceeded. For example, if you start
5917//   too many Amazon Rekognition Video jobs concurrently, calls to start operations
5918//   (StartLabelDetection, for example) will raise a LimitExceededException exception
5919//   (HTTP status code: 400) until the number of concurrently running jobs is
5920//   below the Amazon Rekognition service limit.
5921//
5922//   * InvalidParameterException
5923//   Input parameter violated a constraint. Validate your parameter before calling
5924//   the API operation again.
5925//
5926//   * AccessDeniedException
5927//   You are not authorized to perform the action.
5928//
5929//   * InternalServerError
5930//   Amazon Rekognition experienced a service issue. Try your call again.
5931//
5932//   * ThrottlingException
5933//   Amazon Rekognition is temporarily unable to process the request. Try your
5934//   call again.
5935//
5936//   * ProvisionedThroughputExceededException
5937//   The number of requests exceeded your throughput limit. If you want to increase
5938//   this limit, contact Amazon Rekognition.
5939//
5940func (c *Rekognition) StartProjectVersion(input *StartProjectVersionInput) (*StartProjectVersionOutput, error) {
5941	req, out := c.StartProjectVersionRequest(input)
5942	return out, req.Send()
5943}
5944
5945// StartProjectVersionWithContext is the same as StartProjectVersion with the addition of
5946// the ability to pass a context and additional request options.
5947//
5948// See StartProjectVersion for details on how to use this API operation.
5949//
5950// The context must be non-nil and will be used for request cancellation. If
5951// the context is nil a panic will occur. In the future the SDK may create
5952// sub-contexts for http.Requests. See https://golang.org/pkg/context/
5953// for more information on using Contexts.
5954func (c *Rekognition) StartProjectVersionWithContext(ctx aws.Context, input *StartProjectVersionInput, opts ...request.Option) (*StartProjectVersionOutput, error) {
5955	req, out := c.StartProjectVersionRequest(input)
5956	req.SetContext(ctx)
5957	req.ApplyOptions(opts...)
5958	return out, req.Send()
5959}
5960
5961const opStartSegmentDetection = "StartSegmentDetection"
5962
5963// StartSegmentDetectionRequest generates a "aws/request.Request" representing the
5964// client's request for the StartSegmentDetection operation. The "output" return
5965// value will be populated with the request's response once the request completes
5966// successfully.
5967//
5968// Use "Send" method on the returned Request to send the API call to the service.
5969// the "output" return value is not valid until after Send returns without error.
5970//
5971// See StartSegmentDetection for more information on using the StartSegmentDetection
5972// API call, and error handling.
5973//
5974// This method is useful when you want to inject custom logic or configuration
5975// into the SDK's request lifecycle. Such as custom headers, or retry logic.
5976//
5977//
5978//    // Example sending a request using the StartSegmentDetectionRequest method.
5979//    req, resp := client.StartSegmentDetectionRequest(params)
5980//
5981//    err := req.Send()
5982//    if err == nil { // resp is now filled
5983//        fmt.Println(resp)
5984//    }
5985func (c *Rekognition) StartSegmentDetectionRequest(input *StartSegmentDetectionInput) (req *request.Request, output *StartSegmentDetectionOutput) {
5986	op := &request.Operation{
5987		Name:       opStartSegmentDetection,
5988		HTTPMethod: "POST",
5989		HTTPPath:   "/",
5990	}
5991
5992	if input == nil {
5993		input = &StartSegmentDetectionInput{}
5994	}
5995
5996	output = &StartSegmentDetectionOutput{}
5997	req = c.newRequest(op, input, output)
5998	return
5999}
6000
6001// StartSegmentDetection API operation for Amazon Rekognition.
6002//
6003// Starts asynchronous detection of segment detection in a stored video.
6004//
6005// Amazon Rekognition Video can detect segments in a video stored in an Amazon
6006// S3 bucket. Use Video to specify the bucket name and the filename of the video.
6007// StartSegmentDetection returns a job identifier (JobId) which you use to get
6008// the results of the operation. When segment detection is finished, Amazon
6009// Rekognition Video publishes a completion status to the Amazon Simple Notification
6010// Service topic that you specify in NotificationChannel.
6011//
6012// You can use the Filters (StartSegmentDetectionFilters) input parameter to
6013// specify the minimum detection confidence returned in the response. Within
6014// Filters, use ShotFilter (StartShotDetectionFilter) to filter detected shots.
6015// Use TechnicalCueFilter (StartTechnicalCueDetectionFilter) to filter technical
6016// cues.
6017//
6018// To get the results of the segment detection operation, first check that the
6019// status value published to the Amazon SNS topic is SUCCEEDED. if so, call
6020// GetSegmentDetection and pass the job identifier (JobId) from the initial
6021// call to StartSegmentDetection.
6022//
6023// For more information, see Detecting Video Segments in Stored Video in the
6024// Amazon Rekognition Developer Guide.
6025//
6026// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
6027// with awserr.Error's Code and Message methods to get detailed information about
6028// the error.
6029//
6030// See the AWS API reference guide for Amazon Rekognition's
6031// API operation StartSegmentDetection for usage and error information.
6032//
6033// Returned Error Types:
6034//   * AccessDeniedException
6035//   You are not authorized to perform the action.
6036//
6037//   * IdempotentParameterMismatchException
6038//   A ClientRequestToken input parameter was reused with an operation, but at
6039//   least one of the other input parameters is different from the previous call
6040//   to the operation.
6041//
6042//   * InvalidParameterException
6043//   Input parameter violated a constraint. Validate your parameter before calling
6044//   the API operation again.
6045//
6046//   * InvalidS3ObjectException
6047//   Amazon Rekognition is unable to access the S3 object specified in the request.
6048//
6049//   * InternalServerError
6050//   Amazon Rekognition experienced a service issue. Try your call again.
6051//
6052//   * VideoTooLargeException
6053//   The file size or duration of the supplied media is too large. The maximum
6054//   file size is 10GB. The maximum duration is 6 hours.
6055//
6056//   * ProvisionedThroughputExceededException
6057//   The number of requests exceeded your throughput limit. If you want to increase
6058//   this limit, contact Amazon Rekognition.
6059//
6060//   * LimitExceededException
6061//   An Amazon Rekognition service limit was exceeded. For example, if you start
6062//   too many Amazon Rekognition Video jobs concurrently, calls to start operations
6063//   (StartLabelDetection, for example) will raise a LimitExceededException exception
6064//   (HTTP status code: 400) until the number of concurrently running jobs is
6065//   below the Amazon Rekognition service limit.
6066//
6067//   * ThrottlingException
6068//   Amazon Rekognition is temporarily unable to process the request. Try your
6069//   call again.
6070//
6071func (c *Rekognition) StartSegmentDetection(input *StartSegmentDetectionInput) (*StartSegmentDetectionOutput, error) {
6072	req, out := c.StartSegmentDetectionRequest(input)
6073	return out, req.Send()
6074}
6075
6076// StartSegmentDetectionWithContext is the same as StartSegmentDetection with the addition of
6077// the ability to pass a context and additional request options.
6078//
6079// See StartSegmentDetection for details on how to use this API operation.
6080//
6081// The context must be non-nil and will be used for request cancellation. If
6082// the context is nil a panic will occur. In the future the SDK may create
6083// sub-contexts for http.Requests. See https://golang.org/pkg/context/
6084// for more information on using Contexts.
6085func (c *Rekognition) StartSegmentDetectionWithContext(ctx aws.Context, input *StartSegmentDetectionInput, opts ...request.Option) (*StartSegmentDetectionOutput, error) {
6086	req, out := c.StartSegmentDetectionRequest(input)
6087	req.SetContext(ctx)
6088	req.ApplyOptions(opts...)
6089	return out, req.Send()
6090}
6091
6092const opStartStreamProcessor = "StartStreamProcessor"
6093
6094// StartStreamProcessorRequest generates a "aws/request.Request" representing the
6095// client's request for the StartStreamProcessor operation. The "output" return
6096// value will be populated with the request's response once the request completes
6097// successfully.
6098//
6099// Use "Send" method on the returned Request to send the API call to the service.
6100// the "output" return value is not valid until after Send returns without error.
6101//
6102// See StartStreamProcessor for more information on using the StartStreamProcessor
6103// API call, and error handling.
6104//
6105// This method is useful when you want to inject custom logic or configuration
6106// into the SDK's request lifecycle. Such as custom headers, or retry logic.
6107//
6108//
6109//    // Example sending a request using the StartStreamProcessorRequest method.
6110//    req, resp := client.StartStreamProcessorRequest(params)
6111//
6112//    err := req.Send()
6113//    if err == nil { // resp is now filled
6114//        fmt.Println(resp)
6115//    }
6116func (c *Rekognition) StartStreamProcessorRequest(input *StartStreamProcessorInput) (req *request.Request, output *StartStreamProcessorOutput) {
6117	op := &request.Operation{
6118		Name:       opStartStreamProcessor,
6119		HTTPMethod: "POST",
6120		HTTPPath:   "/",
6121	}
6122
6123	if input == nil {
6124		input = &StartStreamProcessorInput{}
6125	}
6126
6127	output = &StartStreamProcessorOutput{}
6128	req = c.newRequest(op, input, output)
6129	req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
6130	return
6131}
6132
6133// StartStreamProcessor API operation for Amazon Rekognition.
6134//
6135// Starts processing a stream processor. You create a stream processor by calling
6136// CreateStreamProcessor. To tell StartStreamProcessor which stream processor
6137// to start, use the value of the Name field specified in the call to CreateStreamProcessor.
6138//
6139// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
6140// with awserr.Error's Code and Message methods to get detailed information about
6141// the error.
6142//
6143// See the AWS API reference guide for Amazon Rekognition's
6144// API operation StartStreamProcessor for usage and error information.
6145//
6146// Returned Error Types:
6147//   * AccessDeniedException
6148//   You are not authorized to perform the action.
6149//
6150//   * InternalServerError
6151//   Amazon Rekognition experienced a service issue. Try your call again.
6152//
6153//   * ThrottlingException
6154//   Amazon Rekognition is temporarily unable to process the request. Try your
6155//   call again.
6156//
6157//   * InvalidParameterException
6158//   Input parameter violated a constraint. Validate your parameter before calling
6159//   the API operation again.
6160//
6161//   * ResourceNotFoundException
6162//   The collection specified in the request cannot be found.
6163//
6164//   * ResourceInUseException
6165//   The specified resource is already being used.
6166//
6167//   * ProvisionedThroughputExceededException
6168//   The number of requests exceeded your throughput limit. If you want to increase
6169//   this limit, contact Amazon Rekognition.
6170//
6171func (c *Rekognition) StartStreamProcessor(input *StartStreamProcessorInput) (*StartStreamProcessorOutput, error) {
6172	req, out := c.StartStreamProcessorRequest(input)
6173	return out, req.Send()
6174}
6175
6176// StartStreamProcessorWithContext is the same as StartStreamProcessor with the addition of
6177// the ability to pass a context and additional request options.
6178//
6179// See StartStreamProcessor for details on how to use this API operation.
6180//
6181// The context must be non-nil and will be used for request cancellation. If
6182// the context is nil a panic will occur. In the future the SDK may create
6183// sub-contexts for http.Requests. See https://golang.org/pkg/context/
6184// for more information on using Contexts.
6185func (c *Rekognition) StartStreamProcessorWithContext(ctx aws.Context, input *StartStreamProcessorInput, opts ...request.Option) (*StartStreamProcessorOutput, error) {
6186	req, out := c.StartStreamProcessorRequest(input)
6187	req.SetContext(ctx)
6188	req.ApplyOptions(opts...)
6189	return out, req.Send()
6190}
6191
6192const opStartTextDetection = "StartTextDetection"
6193
6194// StartTextDetectionRequest generates a "aws/request.Request" representing the
6195// client's request for the StartTextDetection operation. The "output" return
6196// value will be populated with the request's response once the request completes
6197// successfully.
6198//
6199// Use "Send" method on the returned Request to send the API call to the service.
6200// the "output" return value is not valid until after Send returns without error.
6201//
6202// See StartTextDetection for more information on using the StartTextDetection
6203// API call, and error handling.
6204//
6205// This method is useful when you want to inject custom logic or configuration
6206// into the SDK's request lifecycle. Such as custom headers, or retry logic.
6207//
6208//
6209//    // Example sending a request using the StartTextDetectionRequest method.
6210//    req, resp := client.StartTextDetectionRequest(params)
6211//
6212//    err := req.Send()
6213//    if err == nil { // resp is now filled
6214//        fmt.Println(resp)
6215//    }
6216func (c *Rekognition) StartTextDetectionRequest(input *StartTextDetectionInput) (req *request.Request, output *StartTextDetectionOutput) {
6217	op := &request.Operation{
6218		Name:       opStartTextDetection,
6219		HTTPMethod: "POST",
6220		HTTPPath:   "/",
6221	}
6222
6223	if input == nil {
6224		input = &StartTextDetectionInput{}
6225	}
6226
6227	output = &StartTextDetectionOutput{}
6228	req = c.newRequest(op, input, output)
6229	return
6230}
6231
6232// StartTextDetection API operation for Amazon Rekognition.
6233//
6234// Starts asynchronous detection of text in a stored video.
6235//
6236// Amazon Rekognition Video can detect text in a video stored in an Amazon S3
6237// bucket. Use Video to specify the bucket name and the filename of the video.
6238// StartTextDetection returns a job identifier (JobId) which you use to get
6239// the results of the operation. When text detection is finished, Amazon Rekognition
6240// Video publishes a completion status to the Amazon Simple Notification Service
6241// topic that you specify in NotificationChannel.
6242//
6243// To get the results of the text detection operation, first check that the
6244// status value published to the Amazon SNS topic is SUCCEEDED. if so, call
6245// GetTextDetection and pass the job identifier (JobId) from the initial call
6246// to StartTextDetection.
6247//
6248// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
6249// with awserr.Error's Code and Message methods to get detailed information about
6250// the error.
6251//
6252// See the AWS API reference guide for Amazon Rekognition's
6253// API operation StartTextDetection for usage and error information.
6254//
6255// Returned Error Types:
6256//   * AccessDeniedException
6257//   You are not authorized to perform the action.
6258//
6259//   * IdempotentParameterMismatchException
6260//   A ClientRequestToken input parameter was reused with an operation, but at
6261//   least one of the other input parameters is different from the previous call
6262//   to the operation.
6263//
6264//   * InvalidParameterException
6265//   Input parameter violated a constraint. Validate your parameter before calling
6266//   the API operation again.
6267//
6268//   * InvalidS3ObjectException
6269//   Amazon Rekognition is unable to access the S3 object specified in the request.
6270//
6271//   * InternalServerError
6272//   Amazon Rekognition experienced a service issue. Try your call again.
6273//
6274//   * VideoTooLargeException
6275//   The file size or duration of the supplied media is too large. The maximum
6276//   file size is 10GB. The maximum duration is 6 hours.
6277//
6278//   * ProvisionedThroughputExceededException
6279//   The number of requests exceeded your throughput limit. If you want to increase
6280//   this limit, contact Amazon Rekognition.
6281//
6282//   * LimitExceededException
6283//   An Amazon Rekognition service limit was exceeded. For example, if you start
6284//   too many Amazon Rekognition Video jobs concurrently, calls to start operations
6285//   (StartLabelDetection, for example) will raise a LimitExceededException exception
6286//   (HTTP status code: 400) until the number of concurrently running jobs is
6287//   below the Amazon Rekognition service limit.
6288//
6289//   * ThrottlingException
6290//   Amazon Rekognition is temporarily unable to process the request. Try your
6291//   call again.
6292//
6293func (c *Rekognition) StartTextDetection(input *StartTextDetectionInput) (*StartTextDetectionOutput, error) {
6294	req, out := c.StartTextDetectionRequest(input)
6295	return out, req.Send()
6296}
6297
6298// StartTextDetectionWithContext is the same as StartTextDetection with the addition of
6299// the ability to pass a context and additional request options.
6300//
6301// See StartTextDetection for details on how to use this API operation.
6302//
6303// The context must be non-nil and will be used for request cancellation. If
6304// the context is nil a panic will occur. In the future the SDK may create
6305// sub-contexts for http.Requests. See https://golang.org/pkg/context/
6306// for more information on using Contexts.
6307func (c *Rekognition) StartTextDetectionWithContext(ctx aws.Context, input *StartTextDetectionInput, opts ...request.Option) (*StartTextDetectionOutput, error) {
6308	req, out := c.StartTextDetectionRequest(input)
6309	req.SetContext(ctx)
6310	req.ApplyOptions(opts...)
6311	return out, req.Send()
6312}
6313
6314const opStopProjectVersion = "StopProjectVersion"
6315
6316// StopProjectVersionRequest generates a "aws/request.Request" representing the
6317// client's request for the StopProjectVersion operation. The "output" return
6318// value will be populated with the request's response once the request completes
6319// successfully.
6320//
6321// Use "Send" method on the returned Request to send the API call to the service.
6322// the "output" return value is not valid until after Send returns without error.
6323//
6324// See StopProjectVersion for more information on using the StopProjectVersion
6325// API call, and error handling.
6326//
6327// This method is useful when you want to inject custom logic or configuration
6328// into the SDK's request lifecycle. Such as custom headers, or retry logic.
6329//
6330//
6331//    // Example sending a request using the StopProjectVersionRequest method.
6332//    req, resp := client.StopProjectVersionRequest(params)
6333//
6334//    err := req.Send()
6335//    if err == nil { // resp is now filled
6336//        fmt.Println(resp)
6337//    }
6338func (c *Rekognition) StopProjectVersionRequest(input *StopProjectVersionInput) (req *request.Request, output *StopProjectVersionOutput) {
6339	op := &request.Operation{
6340		Name:       opStopProjectVersion,
6341		HTTPMethod: "POST",
6342		HTTPPath:   "/",
6343	}
6344
6345	if input == nil {
6346		input = &StopProjectVersionInput{}
6347	}
6348
6349	output = &StopProjectVersionOutput{}
6350	req = c.newRequest(op, input, output)
6351	return
6352}
6353
6354// StopProjectVersion API operation for Amazon Rekognition.
6355//
6356// Stops a running model. The operation might take a while to complete. To check
6357// the current status, call DescribeProjectVersions.
6358//
6359// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
6360// with awserr.Error's Code and Message methods to get detailed information about
6361// the error.
6362//
6363// See the AWS API reference guide for Amazon Rekognition's
6364// API operation StopProjectVersion for usage and error information.
6365//
6366// Returned Error Types:
6367//   * ResourceNotFoundException
6368//   The collection specified in the request cannot be found.
6369//
6370//   * ResourceInUseException
6371//   The specified resource is already being used.
6372//
6373//   * InvalidParameterException
6374//   Input parameter violated a constraint. Validate your parameter before calling
6375//   the API operation again.
6376//
6377//   * AccessDeniedException
6378//   You are not authorized to perform the action.
6379//
6380//   * InternalServerError
6381//   Amazon Rekognition experienced a service issue. Try your call again.
6382//
6383//   * ThrottlingException
6384//   Amazon Rekognition is temporarily unable to process the request. Try your
6385//   call again.
6386//
6387//   * ProvisionedThroughputExceededException
6388//   The number of requests exceeded your throughput limit. If you want to increase
6389//   this limit, contact Amazon Rekognition.
6390//
6391func (c *Rekognition) StopProjectVersion(input *StopProjectVersionInput) (*StopProjectVersionOutput, error) {
6392	req, out := c.StopProjectVersionRequest(input)
6393	return out, req.Send()
6394}
6395
6396// StopProjectVersionWithContext is the same as StopProjectVersion with the addition of
6397// the ability to pass a context and additional request options.
6398//
6399// See StopProjectVersion for details on how to use this API operation.
6400//
6401// The context must be non-nil and will be used for request cancellation. If
6402// the context is nil a panic will occur. In the future the SDK may create
6403// sub-contexts for http.Requests. See https://golang.org/pkg/context/
6404// for more information on using Contexts.
6405func (c *Rekognition) StopProjectVersionWithContext(ctx aws.Context, input *StopProjectVersionInput, opts ...request.Option) (*StopProjectVersionOutput, error) {
6406	req, out := c.StopProjectVersionRequest(input)
6407	req.SetContext(ctx)
6408	req.ApplyOptions(opts...)
6409	return out, req.Send()
6410}
6411
6412const opStopStreamProcessor = "StopStreamProcessor"
6413
6414// StopStreamProcessorRequest generates a "aws/request.Request" representing the
6415// client's request for the StopStreamProcessor operation. The "output" return
6416// value will be populated with the request's response once the request completes
6417// successfully.
6418//
6419// Use "Send" method on the returned Request to send the API call to the service.
6420// the "output" return value is not valid until after Send returns without error.
6421//
6422// See StopStreamProcessor for more information on using the StopStreamProcessor
6423// API call, and error handling.
6424//
6425// This method is useful when you want to inject custom logic or configuration
6426// into the SDK's request lifecycle. Such as custom headers, or retry logic.
6427//
6428//
6429//    // Example sending a request using the StopStreamProcessorRequest method.
6430//    req, resp := client.StopStreamProcessorRequest(params)
6431//
6432//    err := req.Send()
6433//    if err == nil { // resp is now filled
6434//        fmt.Println(resp)
6435//    }
6436func (c *Rekognition) StopStreamProcessorRequest(input *StopStreamProcessorInput) (req *request.Request, output *StopStreamProcessorOutput) {
6437	op := &request.Operation{
6438		Name:       opStopStreamProcessor,
6439		HTTPMethod: "POST",
6440		HTTPPath:   "/",
6441	}
6442
6443	if input == nil {
6444		input = &StopStreamProcessorInput{}
6445	}
6446
6447	output = &StopStreamProcessorOutput{}
6448	req = c.newRequest(op, input, output)
6449	req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
6450	return
6451}
6452
6453// StopStreamProcessor API operation for Amazon Rekognition.
6454//
6455// Stops a running stream processor that was created by CreateStreamProcessor.
6456//
6457// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
6458// with awserr.Error's Code and Message methods to get detailed information about
6459// the error.
6460//
6461// See the AWS API reference guide for Amazon Rekognition's
6462// API operation StopStreamProcessor for usage and error information.
6463//
6464// Returned Error Types:
6465//   * AccessDeniedException
6466//   You are not authorized to perform the action.
6467//
6468//   * InternalServerError
6469//   Amazon Rekognition experienced a service issue. Try your call again.
6470//
6471//   * ThrottlingException
6472//   Amazon Rekognition is temporarily unable to process the request. Try your
6473//   call again.
6474//
6475//   * InvalidParameterException
6476//   Input parameter violated a constraint. Validate your parameter before calling
6477//   the API operation again.
6478//
6479//   * ResourceNotFoundException
6480//   The collection specified in the request cannot be found.
6481//
6482//   * ResourceInUseException
6483//   The specified resource is already being used.
6484//
6485//   * ProvisionedThroughputExceededException
6486//   The number of requests exceeded your throughput limit. If you want to increase
6487//   this limit, contact Amazon Rekognition.
6488//
6489func (c *Rekognition) StopStreamProcessor(input *StopStreamProcessorInput) (*StopStreamProcessorOutput, error) {
6490	req, out := c.StopStreamProcessorRequest(input)
6491	return out, req.Send()
6492}
6493
6494// StopStreamProcessorWithContext is the same as StopStreamProcessor with the addition of
6495// the ability to pass a context and additional request options.
6496//
6497// See StopStreamProcessor for details on how to use this API operation.
6498//
6499// The context must be non-nil and will be used for request cancellation. If
6500// the context is nil a panic will occur. In the future the SDK may create
6501// sub-contexts for http.Requests. See https://golang.org/pkg/context/
6502// for more information on using Contexts.
6503func (c *Rekognition) StopStreamProcessorWithContext(ctx aws.Context, input *StopStreamProcessorInput, opts ...request.Option) (*StopStreamProcessorOutput, error) {
6504	req, out := c.StopStreamProcessorRequest(input)
6505	req.SetContext(ctx)
6506	req.ApplyOptions(opts...)
6507	return out, req.Send()
6508}
6509
6510// You are not authorized to perform the action.
6511type AccessDeniedException struct {
6512	_            struct{}                  `type:"structure"`
6513	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
6514
6515	Message_ *string `locationName:"message" type:"string"`
6516}
6517
6518// String returns the string representation
6519func (s AccessDeniedException) String() string {
6520	return awsutil.Prettify(s)
6521}
6522
6523// GoString returns the string representation
6524func (s AccessDeniedException) GoString() string {
6525	return s.String()
6526}
6527
6528func newErrorAccessDeniedException(v protocol.ResponseMetadata) error {
6529	return &AccessDeniedException{
6530		RespMetadata: v,
6531	}
6532}
6533
6534// Code returns the exception type name.
6535func (s *AccessDeniedException) Code() string {
6536	return "AccessDeniedException"
6537}
6538
6539// Message returns the exception's message.
6540func (s *AccessDeniedException) Message() string {
6541	if s.Message_ != nil {
6542		return *s.Message_
6543	}
6544	return ""
6545}
6546
6547// OrigErr always returns nil, satisfies awserr.Error interface.
6548func (s *AccessDeniedException) OrigErr() error {
6549	return nil
6550}
6551
6552func (s *AccessDeniedException) Error() string {
6553	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
6554}
6555
6556// Status code returns the HTTP status code for the request's response error.
6557func (s *AccessDeniedException) StatusCode() int {
6558	return s.RespMetadata.StatusCode
6559}
6560
6561// RequestID returns the service's response RequestID for request.
6562func (s *AccessDeniedException) RequestID() string {
6563	return s.RespMetadata.RequestID
6564}
6565
6566// Structure containing the estimated age range, in years, for a face.
6567//
6568// Amazon Rekognition estimates an age range for faces detected in the input
6569// image. Estimated age ranges can overlap. A face of a 5-year-old might have
6570// an estimated range of 4-6, while the face of a 6-year-old might have an estimated
6571// range of 4-8.
6572type AgeRange struct {
6573	_ struct{} `type:"structure"`
6574
6575	// The highest estimated age.
6576	High *int64 `type:"integer"`
6577
6578	// The lowest estimated age.
6579	Low *int64 `type:"integer"`
6580}
6581
6582// String returns the string representation
6583func (s AgeRange) String() string {
6584	return awsutil.Prettify(s)
6585}
6586
6587// GoString returns the string representation
6588func (s AgeRange) GoString() string {
6589	return s.String()
6590}
6591
6592// SetHigh sets the High field's value.
6593func (s *AgeRange) SetHigh(v int64) *AgeRange {
6594	s.High = &v
6595	return s
6596}
6597
6598// SetLow sets the Low field's value.
6599func (s *AgeRange) SetLow(v int64) *AgeRange {
6600	s.Low = &v
6601	return s
6602}
6603
6604// Assets are the images that you use to train and evaluate a model version.
6605// Assets can also contain validation information that you use to debug a failed
6606// model training.
6607type Asset struct {
6608	_ struct{} `type:"structure"`
6609
6610	// The S3 bucket that contains an Amazon Sagemaker Ground Truth format manifest
6611	// file.
6612	GroundTruthManifest *GroundTruthManifest `type:"structure"`
6613}
6614
6615// String returns the string representation
6616func (s Asset) String() string {
6617	return awsutil.Prettify(s)
6618}
6619
6620// GoString returns the string representation
6621func (s Asset) GoString() string {
6622	return s.String()
6623}
6624
6625// Validate inspects the fields of the type to determine if they are valid.
6626func (s *Asset) Validate() error {
6627	invalidParams := request.ErrInvalidParams{Context: "Asset"}
6628	if s.GroundTruthManifest != nil {
6629		if err := s.GroundTruthManifest.Validate(); err != nil {
6630			invalidParams.AddNested("GroundTruthManifest", err.(request.ErrInvalidParams))
6631		}
6632	}
6633
6634	if invalidParams.Len() > 0 {
6635		return invalidParams
6636	}
6637	return nil
6638}
6639
6640// SetGroundTruthManifest sets the GroundTruthManifest field's value.
6641func (s *Asset) SetGroundTruthManifest(v *GroundTruthManifest) *Asset {
6642	s.GroundTruthManifest = v
6643	return s
6644}
6645
6646// Metadata information about an audio stream. An array of AudioMetadata objects
6647// for the audio streams found in a stored video is returned by GetSegmentDetection.
6648type AudioMetadata struct {
6649	_ struct{} `type:"structure"`
6650
6651	// The audio codec used to encode or decode the audio stream.
6652	Codec *string `type:"string"`
6653
6654	// The duration of the audio stream in milliseconds.
6655	DurationMillis *int64 `type:"long"`
6656
6657	// The number of audio channels in the segment.
6658	NumberOfChannels *int64 `type:"long"`
6659
6660	// The sample rate for the audio stream.
6661	SampleRate *int64 `type:"long"`
6662}
6663
6664// String returns the string representation
6665func (s AudioMetadata) String() string {
6666	return awsutil.Prettify(s)
6667}
6668
6669// GoString returns the string representation
6670func (s AudioMetadata) GoString() string {
6671	return s.String()
6672}
6673
6674// SetCodec sets the Codec field's value.
6675func (s *AudioMetadata) SetCodec(v string) *AudioMetadata {
6676	s.Codec = &v
6677	return s
6678}
6679
6680// SetDurationMillis sets the DurationMillis field's value.
6681func (s *AudioMetadata) SetDurationMillis(v int64) *AudioMetadata {
6682	s.DurationMillis = &v
6683	return s
6684}
6685
6686// SetNumberOfChannels sets the NumberOfChannels field's value.
6687func (s *AudioMetadata) SetNumberOfChannels(v int64) *AudioMetadata {
6688	s.NumberOfChannels = &v
6689	return s
6690}
6691
6692// SetSampleRate sets the SampleRate field's value.
6693func (s *AudioMetadata) SetSampleRate(v int64) *AudioMetadata {
6694	s.SampleRate = &v
6695	return s
6696}
6697
6698// Indicates whether or not the face has a beard, and the confidence level in
6699// the determination.
6700type Beard struct {
6701	_ struct{} `type:"structure"`
6702
6703	// Level of confidence in the determination.
6704	Confidence *float64 `type:"float"`
6705
6706	// Boolean value that indicates whether the face has beard or not.
6707	Value *bool `type:"boolean"`
6708}
6709
6710// String returns the string representation
6711func (s Beard) String() string {
6712	return awsutil.Prettify(s)
6713}
6714
6715// GoString returns the string representation
6716func (s Beard) GoString() string {
6717	return s.String()
6718}
6719
6720// SetConfidence sets the Confidence field's value.
6721func (s *Beard) SetConfidence(v float64) *Beard {
6722	s.Confidence = &v
6723	return s
6724}
6725
6726// SetValue sets the Value field's value.
6727func (s *Beard) SetValue(v bool) *Beard {
6728	s.Value = &v
6729	return s
6730}
6731
6732// Identifies the bounding box around the label, face, text or personal protective
6733// equipment. The left (x-coordinate) and top (y-coordinate) are coordinates
6734// representing the top and left sides of the bounding box. Note that the upper-left
6735// corner of the image is the origin (0,0).
6736//
6737// The top and left values returned are ratios of the overall image size. For
6738// example, if the input image is 700x200 pixels, and the top-left coordinate
6739// of the bounding box is 350x50 pixels, the API returns a left value of 0.5
6740// (350/700) and a top value of 0.25 (50/200).
6741//
6742// The width and height values represent the dimensions of the bounding box
6743// as a ratio of the overall image dimension. For example, if the input image
6744// is 700x200 pixels, and the bounding box width is 70 pixels, the width returned
6745// is 0.1.
6746//
6747// The bounding box coordinates can have negative values. For example, if Amazon
6748// Rekognition is able to detect a face that is at the image edge and is only
6749// partially visible, the service can return coordinates that are outside the
6750// image bounds and, depending on the image edge, you might get negative values
6751// or values greater than 1 for the left or top values.
6752type BoundingBox struct {
6753	_ struct{} `type:"structure"`
6754
6755	// Height of the bounding box as a ratio of the overall image height.
6756	Height *float64 `type:"float"`
6757
6758	// Left coordinate of the bounding box as a ratio of overall image width.
6759	Left *float64 `type:"float"`
6760
6761	// Top coordinate of the bounding box as a ratio of overall image height.
6762	Top *float64 `type:"float"`
6763
6764	// Width of the bounding box as a ratio of the overall image width.
6765	Width *float64 `type:"float"`
6766}
6767
6768// String returns the string representation
6769func (s BoundingBox) String() string {
6770	return awsutil.Prettify(s)
6771}
6772
6773// GoString returns the string representation
6774func (s BoundingBox) GoString() string {
6775	return s.String()
6776}
6777
6778// SetHeight sets the Height field's value.
6779func (s *BoundingBox) SetHeight(v float64) *BoundingBox {
6780	s.Height = &v
6781	return s
6782}
6783
6784// SetLeft sets the Left field's value.
6785func (s *BoundingBox) SetLeft(v float64) *BoundingBox {
6786	s.Left = &v
6787	return s
6788}
6789
6790// SetTop sets the Top field's value.
6791func (s *BoundingBox) SetTop(v float64) *BoundingBox {
6792	s.Top = &v
6793	return s
6794}
6795
6796// SetWidth sets the Width field's value.
6797func (s *BoundingBox) SetWidth(v float64) *BoundingBox {
6798	s.Width = &v
6799	return s
6800}
6801
6802// Provides information about a celebrity recognized by the RecognizeCelebrities
6803// operation.
6804type Celebrity struct {
6805	_ struct{} `type:"structure"`
6806
6807	// Provides information about the celebrity's face, such as its location on
6808	// the image.
6809	Face *ComparedFace `type:"structure"`
6810
6811	// A unique identifier for the celebrity.
6812	Id *string `type:"string"`
6813
6814	// The confidence, in percentage, that Amazon Rekognition has that the recognized
6815	// face is the celebrity.
6816	MatchConfidence *float64 `type:"float"`
6817
6818	// The name of the celebrity.
6819	Name *string `type:"string"`
6820
6821	// An array of URLs pointing to additional information about the celebrity.
6822	// If there is no additional information about the celebrity, this list is empty.
6823	Urls []*string `type:"list"`
6824}
6825
6826// String returns the string representation
6827func (s Celebrity) String() string {
6828	return awsutil.Prettify(s)
6829}
6830
6831// GoString returns the string representation
6832func (s Celebrity) GoString() string {
6833	return s.String()
6834}
6835
6836// SetFace sets the Face field's value.
6837func (s *Celebrity) SetFace(v *ComparedFace) *Celebrity {
6838	s.Face = v
6839	return s
6840}
6841
6842// SetId sets the Id field's value.
6843func (s *Celebrity) SetId(v string) *Celebrity {
6844	s.Id = &v
6845	return s
6846}
6847
6848// SetMatchConfidence sets the MatchConfidence field's value.
6849func (s *Celebrity) SetMatchConfidence(v float64) *Celebrity {
6850	s.MatchConfidence = &v
6851	return s
6852}
6853
6854// SetName sets the Name field's value.
6855func (s *Celebrity) SetName(v string) *Celebrity {
6856	s.Name = &v
6857	return s
6858}
6859
6860// SetUrls sets the Urls field's value.
6861func (s *Celebrity) SetUrls(v []*string) *Celebrity {
6862	s.Urls = v
6863	return s
6864}
6865
6866// Information about a recognized celebrity.
6867type CelebrityDetail struct {
6868	_ struct{} `type:"structure"`
6869
6870	// Bounding box around the body of a celebrity.
6871	BoundingBox *BoundingBox `type:"structure"`
6872
6873	// The confidence, in percentage, that Amazon Rekognition has that the recognized
6874	// face is the celebrity.
6875	Confidence *float64 `type:"float"`
6876
6877	// Face details for the recognized celebrity.
6878	Face *FaceDetail `type:"structure"`
6879
6880	// The unique identifier for the celebrity.
6881	Id *string `type:"string"`
6882
6883	// The name of the celebrity.
6884	Name *string `type:"string"`
6885
6886	// An array of URLs pointing to additional celebrity information.
6887	Urls []*string `type:"list"`
6888}
6889
6890// String returns the string representation
6891func (s CelebrityDetail) String() string {
6892	return awsutil.Prettify(s)
6893}
6894
6895// GoString returns the string representation
6896func (s CelebrityDetail) GoString() string {
6897	return s.String()
6898}
6899
6900// SetBoundingBox sets the BoundingBox field's value.
6901func (s *CelebrityDetail) SetBoundingBox(v *BoundingBox) *CelebrityDetail {
6902	s.BoundingBox = v
6903	return s
6904}
6905
6906// SetConfidence sets the Confidence field's value.
6907func (s *CelebrityDetail) SetConfidence(v float64) *CelebrityDetail {
6908	s.Confidence = &v
6909	return s
6910}
6911
6912// SetFace sets the Face field's value.
6913func (s *CelebrityDetail) SetFace(v *FaceDetail) *CelebrityDetail {
6914	s.Face = v
6915	return s
6916}
6917
6918// SetId sets the Id field's value.
6919func (s *CelebrityDetail) SetId(v string) *CelebrityDetail {
6920	s.Id = &v
6921	return s
6922}
6923
6924// SetName sets the Name field's value.
6925func (s *CelebrityDetail) SetName(v string) *CelebrityDetail {
6926	s.Name = &v
6927	return s
6928}
6929
6930// SetUrls sets the Urls field's value.
6931func (s *CelebrityDetail) SetUrls(v []*string) *CelebrityDetail {
6932	s.Urls = v
6933	return s
6934}
6935
6936// Information about a detected celebrity and the time the celebrity was detected
6937// in a stored video. For more information, see GetCelebrityRecognition in the
6938// Amazon Rekognition Developer Guide.
6939type CelebrityRecognition struct {
6940	_ struct{} `type:"structure"`
6941
6942	// Information about a recognized celebrity.
6943	Celebrity *CelebrityDetail `type:"structure"`
6944
6945	// The time, in milliseconds from the start of the video, that the celebrity
6946	// was recognized.
6947	Timestamp *int64 `type:"long"`
6948}
6949
6950// String returns the string representation
6951func (s CelebrityRecognition) String() string {
6952	return awsutil.Prettify(s)
6953}
6954
6955// GoString returns the string representation
6956func (s CelebrityRecognition) GoString() string {
6957	return s.String()
6958}
6959
6960// SetCelebrity sets the Celebrity field's value.
6961func (s *CelebrityRecognition) SetCelebrity(v *CelebrityDetail) *CelebrityRecognition {
6962	s.Celebrity = v
6963	return s
6964}
6965
6966// SetTimestamp sets the Timestamp field's value.
6967func (s *CelebrityRecognition) SetTimestamp(v int64) *CelebrityRecognition {
6968	s.Timestamp = &v
6969	return s
6970}
6971
6972type CompareFacesInput struct {
6973	_ struct{} `type:"structure"`
6974
6975	// A filter that specifies a quality bar for how much filtering is done to identify
6976	// faces. Filtered faces aren't compared. If you specify AUTO, Amazon Rekognition
6977	// chooses the quality bar. If you specify LOW, MEDIUM, or HIGH, filtering removes
6978	// all faces that don’t meet the chosen quality bar. The quality bar is based
6979	// on a variety of common use cases. Low-quality detections can occur for a
6980	// number of reasons. Some examples are an object that's misidentified as a
6981	// face, a face that's too blurry, or a face with a pose that's too extreme
6982	// to use. If you specify NONE, no filtering is performed. The default value
6983	// is NONE.
6984	//
6985	// To use quality filtering, the collection you are using must be associated
6986	// with version 3 of the face model or higher.
6987	QualityFilter *string `type:"string" enum:"QualityFilter"`
6988
6989	// The minimum level of confidence in the face matches that a match must meet
6990	// to be included in the FaceMatches array.
6991	SimilarityThreshold *float64 `type:"float"`
6992
6993	// The input image as base64-encoded bytes or an S3 object. If you use the AWS
6994	// CLI to call Amazon Rekognition operations, passing base64-encoded image bytes
6995	// is not supported.
6996	//
6997	// If you are using an AWS SDK to call Amazon Rekognition, you might not need
6998	// to base64-encode image bytes passed using the Bytes field. For more information,
6999	// see Images in the Amazon Rekognition developer guide.
7000	//
7001	// SourceImage is a required field
7002	SourceImage *Image `type:"structure" required:"true"`
7003
7004	// The target image as base64-encoded bytes or an S3 object. If you use the
7005	// AWS CLI to call Amazon Rekognition operations, passing base64-encoded image
7006	// bytes is not supported.
7007	//
7008	// If you are using an AWS SDK to call Amazon Rekognition, you might not need
7009	// to base64-encode image bytes passed using the Bytes field. For more information,
7010	// see Images in the Amazon Rekognition developer guide.
7011	//
7012	// TargetImage is a required field
7013	TargetImage *Image `type:"structure" required:"true"`
7014}
7015
7016// String returns the string representation
7017func (s CompareFacesInput) String() string {
7018	return awsutil.Prettify(s)
7019}
7020
7021// GoString returns the string representation
7022func (s CompareFacesInput) GoString() string {
7023	return s.String()
7024}
7025
7026// Validate inspects the fields of the type to determine if they are valid.
7027func (s *CompareFacesInput) Validate() error {
7028	invalidParams := request.ErrInvalidParams{Context: "CompareFacesInput"}
7029	if s.SourceImage == nil {
7030		invalidParams.Add(request.NewErrParamRequired("SourceImage"))
7031	}
7032	if s.TargetImage == nil {
7033		invalidParams.Add(request.NewErrParamRequired("TargetImage"))
7034	}
7035	if s.SourceImage != nil {
7036		if err := s.SourceImage.Validate(); err != nil {
7037			invalidParams.AddNested("SourceImage", err.(request.ErrInvalidParams))
7038		}
7039	}
7040	if s.TargetImage != nil {
7041		if err := s.TargetImage.Validate(); err != nil {
7042			invalidParams.AddNested("TargetImage", err.(request.ErrInvalidParams))
7043		}
7044	}
7045
7046	if invalidParams.Len() > 0 {
7047		return invalidParams
7048	}
7049	return nil
7050}
7051
7052// SetQualityFilter sets the QualityFilter field's value.
7053func (s *CompareFacesInput) SetQualityFilter(v string) *CompareFacesInput {
7054	s.QualityFilter = &v
7055	return s
7056}
7057
7058// SetSimilarityThreshold sets the SimilarityThreshold field's value.
7059func (s *CompareFacesInput) SetSimilarityThreshold(v float64) *CompareFacesInput {
7060	s.SimilarityThreshold = &v
7061	return s
7062}
7063
7064// SetSourceImage sets the SourceImage field's value.
7065func (s *CompareFacesInput) SetSourceImage(v *Image) *CompareFacesInput {
7066	s.SourceImage = v
7067	return s
7068}
7069
7070// SetTargetImage sets the TargetImage field's value.
7071func (s *CompareFacesInput) SetTargetImage(v *Image) *CompareFacesInput {
7072	s.TargetImage = v
7073	return s
7074}
7075
7076// Provides information about a face in a target image that matches the source
7077// image face analyzed by CompareFaces. The Face property contains the bounding
7078// box of the face in the target image. The Similarity property is the confidence
7079// that the source image face matches the face in the bounding box.
7080type CompareFacesMatch struct {
7081	_ struct{} `type:"structure"`
7082
7083	// Provides face metadata (bounding box and confidence that the bounding box
7084	// actually contains a face).
7085	Face *ComparedFace `type:"structure"`
7086
7087	// Level of confidence that the faces match.
7088	Similarity *float64 `type:"float"`
7089}
7090
7091// String returns the string representation
7092func (s CompareFacesMatch) String() string {
7093	return awsutil.Prettify(s)
7094}
7095
7096// GoString returns the string representation
7097func (s CompareFacesMatch) GoString() string {
7098	return s.String()
7099}
7100
7101// SetFace sets the Face field's value.
7102func (s *CompareFacesMatch) SetFace(v *ComparedFace) *CompareFacesMatch {
7103	s.Face = v
7104	return s
7105}
7106
7107// SetSimilarity sets the Similarity field's value.
7108func (s *CompareFacesMatch) SetSimilarity(v float64) *CompareFacesMatch {
7109	s.Similarity = &v
7110	return s
7111}
7112
7113type CompareFacesOutput struct {
7114	_ struct{} `type:"structure"`
7115
7116	// An array of faces in the target image that match the source image face. Each
7117	// CompareFacesMatch object provides the bounding box, the confidence level
7118	// that the bounding box contains a face, and the similarity score for the face
7119	// in the bounding box and the face in the source image.
7120	FaceMatches []*CompareFacesMatch `type:"list"`
7121
7122	// The face in the source image that was used for comparison.
7123	SourceImageFace *ComparedSourceImageFace `type:"structure"`
7124
7125	// The value of SourceImageOrientationCorrection is always null.
7126	//
7127	// If the input image is in .jpeg format, it might contain exchangeable image
7128	// file format (Exif) metadata that includes the image's orientation. Amazon
7129	// Rekognition uses this orientation information to perform image correction.
7130	// The bounding box coordinates are translated to represent object locations
7131	// after the orientation information in the Exif metadata is used to correct
7132	// the image orientation. Images in .png format don't contain Exif metadata.
7133	//
7134	// Amazon Rekognition doesn’t perform image correction for images in .png
7135	// format and .jpeg images without orientation information in the image Exif
7136	// metadata. The bounding box coordinates aren't translated and represent the
7137	// object locations before the image is rotated.
7138	SourceImageOrientationCorrection *string `type:"string" enum:"OrientationCorrection"`
7139
7140	// The value of TargetImageOrientationCorrection is always null.
7141	//
7142	// If the input image is in .jpeg format, it might contain exchangeable image
7143	// file format (Exif) metadata that includes the image's orientation. Amazon
7144	// Rekognition uses this orientation information to perform image correction.
7145	// The bounding box coordinates are translated to represent object locations
7146	// after the orientation information in the Exif metadata is used to correct
7147	// the image orientation. Images in .png format don't contain Exif metadata.
7148	//
7149	// Amazon Rekognition doesn’t perform image correction for images in .png
7150	// format and .jpeg images without orientation information in the image Exif
7151	// metadata. The bounding box coordinates aren't translated and represent the
7152	// object locations before the image is rotated.
7153	TargetImageOrientationCorrection *string `type:"string" enum:"OrientationCorrection"`
7154
7155	// An array of faces in the target image that did not match the source image
7156	// face.
7157	UnmatchedFaces []*ComparedFace `type:"list"`
7158}
7159
7160// String returns the string representation
7161func (s CompareFacesOutput) String() string {
7162	return awsutil.Prettify(s)
7163}
7164
7165// GoString returns the string representation
7166func (s CompareFacesOutput) GoString() string {
7167	return s.String()
7168}
7169
7170// SetFaceMatches sets the FaceMatches field's value.
7171func (s *CompareFacesOutput) SetFaceMatches(v []*CompareFacesMatch) *CompareFacesOutput {
7172	s.FaceMatches = v
7173	return s
7174}
7175
7176// SetSourceImageFace sets the SourceImageFace field's value.
7177func (s *CompareFacesOutput) SetSourceImageFace(v *ComparedSourceImageFace) *CompareFacesOutput {
7178	s.SourceImageFace = v
7179	return s
7180}
7181
7182// SetSourceImageOrientationCorrection sets the SourceImageOrientationCorrection field's value.
7183func (s *CompareFacesOutput) SetSourceImageOrientationCorrection(v string) *CompareFacesOutput {
7184	s.SourceImageOrientationCorrection = &v
7185	return s
7186}
7187
7188// SetTargetImageOrientationCorrection sets the TargetImageOrientationCorrection field's value.
7189func (s *CompareFacesOutput) SetTargetImageOrientationCorrection(v string) *CompareFacesOutput {
7190	s.TargetImageOrientationCorrection = &v
7191	return s
7192}
7193
7194// SetUnmatchedFaces sets the UnmatchedFaces field's value.
7195func (s *CompareFacesOutput) SetUnmatchedFaces(v []*ComparedFace) *CompareFacesOutput {
7196	s.UnmatchedFaces = v
7197	return s
7198}
7199
7200// Provides face metadata for target image faces that are analyzed by CompareFaces
7201// and RecognizeCelebrities.
7202type ComparedFace struct {
7203	_ struct{} `type:"structure"`
7204
7205	// Bounding box of the face.
7206	BoundingBox *BoundingBox `type:"structure"`
7207
7208	// Level of confidence that what the bounding box contains is a face.
7209	Confidence *float64 `type:"float"`
7210
7211	// An array of facial landmarks.
7212	Landmarks []*Landmark `type:"list"`
7213
7214	// Indicates the pose of the face as determined by its pitch, roll, and yaw.
7215	Pose *Pose `type:"structure"`
7216
7217	// Identifies face image brightness and sharpness.
7218	Quality *ImageQuality `type:"structure"`
7219}
7220
7221// String returns the string representation
7222func (s ComparedFace) String() string {
7223	return awsutil.Prettify(s)
7224}
7225
7226// GoString returns the string representation
7227func (s ComparedFace) GoString() string {
7228	return s.String()
7229}
7230
7231// SetBoundingBox sets the BoundingBox field's value.
7232func (s *ComparedFace) SetBoundingBox(v *BoundingBox) *ComparedFace {
7233	s.BoundingBox = v
7234	return s
7235}
7236
7237// SetConfidence sets the Confidence field's value.
7238func (s *ComparedFace) SetConfidence(v float64) *ComparedFace {
7239	s.Confidence = &v
7240	return s
7241}
7242
7243// SetLandmarks sets the Landmarks field's value.
7244func (s *ComparedFace) SetLandmarks(v []*Landmark) *ComparedFace {
7245	s.Landmarks = v
7246	return s
7247}
7248
7249// SetPose sets the Pose field's value.
7250func (s *ComparedFace) SetPose(v *Pose) *ComparedFace {
7251	s.Pose = v
7252	return s
7253}
7254
7255// SetQuality sets the Quality field's value.
7256func (s *ComparedFace) SetQuality(v *ImageQuality) *ComparedFace {
7257	s.Quality = v
7258	return s
7259}
7260
7261// Type that describes the face Amazon Rekognition chose to compare with the
7262// faces in the target. This contains a bounding box for the selected face and
7263// confidence level that the bounding box contains a face. Note that Amazon
7264// Rekognition selects the largest face in the source image for this comparison.
7265type ComparedSourceImageFace struct {
7266	_ struct{} `type:"structure"`
7267
7268	// Bounding box of the face.
7269	BoundingBox *BoundingBox `type:"structure"`
7270
7271	// Confidence level that the selected bounding box contains a face.
7272	Confidence *float64 `type:"float"`
7273}
7274
7275// String returns the string representation
7276func (s ComparedSourceImageFace) String() string {
7277	return awsutil.Prettify(s)
7278}
7279
7280// GoString returns the string representation
7281func (s ComparedSourceImageFace) GoString() string {
7282	return s.String()
7283}
7284
7285// SetBoundingBox sets the BoundingBox field's value.
7286func (s *ComparedSourceImageFace) SetBoundingBox(v *BoundingBox) *ComparedSourceImageFace {
7287	s.BoundingBox = v
7288	return s
7289}
7290
7291// SetConfidence sets the Confidence field's value.
7292func (s *ComparedSourceImageFace) SetConfidence(v float64) *ComparedSourceImageFace {
7293	s.Confidence = &v
7294	return s
7295}
7296
7297// Information about an unsafe content label detection in a stored video.
7298type ContentModerationDetection struct {
7299	_ struct{} `type:"structure"`
7300
7301	// The unsafe content label detected by in the stored video.
7302	ModerationLabel *ModerationLabel `type:"structure"`
7303
7304	// Time, in milliseconds from the beginning of the video, that the unsafe content
7305	// label was detected.
7306	Timestamp *int64 `type:"long"`
7307}
7308
7309// String returns the string representation
7310func (s ContentModerationDetection) String() string {
7311	return awsutil.Prettify(s)
7312}
7313
7314// GoString returns the string representation
7315func (s ContentModerationDetection) GoString() string {
7316	return s.String()
7317}
7318
7319// SetModerationLabel sets the ModerationLabel field's value.
7320func (s *ContentModerationDetection) SetModerationLabel(v *ModerationLabel) *ContentModerationDetection {
7321	s.ModerationLabel = v
7322	return s
7323}
7324
7325// SetTimestamp sets the Timestamp field's value.
7326func (s *ContentModerationDetection) SetTimestamp(v int64) *ContentModerationDetection {
7327	s.Timestamp = &v
7328	return s
7329}
7330
7331// Information about an item of Personal Protective Equipment covering a corresponding
7332// body part. For more information, see DetectProtectiveEquipment.
7333type CoversBodyPart struct {
7334	_ struct{} `type:"structure"`
7335
7336	// The confidence that Amazon Rekognition has in the value of Value.
7337	Confidence *float64 `type:"float"`
7338
7339	// True if the PPE covers the corresponding body part, otherwise false.
7340	Value *bool `type:"boolean"`
7341}
7342
7343// String returns the string representation
7344func (s CoversBodyPart) String() string {
7345	return awsutil.Prettify(s)
7346}
7347
7348// GoString returns the string representation
7349func (s CoversBodyPart) GoString() string {
7350	return s.String()
7351}
7352
7353// SetConfidence sets the Confidence field's value.
7354func (s *CoversBodyPart) SetConfidence(v float64) *CoversBodyPart {
7355	s.Confidence = &v
7356	return s
7357}
7358
7359// SetValue sets the Value field's value.
7360func (s *CoversBodyPart) SetValue(v bool) *CoversBodyPart {
7361	s.Value = &v
7362	return s
7363}
7364
7365type CreateCollectionInput struct {
7366	_ struct{} `type:"structure"`
7367
7368	// ID for the collection that you are creating.
7369	//
7370	// CollectionId is a required field
7371	CollectionId *string `min:"1" type:"string" required:"true"`
7372}
7373
7374// String returns the string representation
7375func (s CreateCollectionInput) String() string {
7376	return awsutil.Prettify(s)
7377}
7378
7379// GoString returns the string representation
7380func (s CreateCollectionInput) GoString() string {
7381	return s.String()
7382}
7383
7384// Validate inspects the fields of the type to determine if they are valid.
7385func (s *CreateCollectionInput) Validate() error {
7386	invalidParams := request.ErrInvalidParams{Context: "CreateCollectionInput"}
7387	if s.CollectionId == nil {
7388		invalidParams.Add(request.NewErrParamRequired("CollectionId"))
7389	}
7390	if s.CollectionId != nil && len(*s.CollectionId) < 1 {
7391		invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1))
7392	}
7393
7394	if invalidParams.Len() > 0 {
7395		return invalidParams
7396	}
7397	return nil
7398}
7399
7400// SetCollectionId sets the CollectionId field's value.
7401func (s *CreateCollectionInput) SetCollectionId(v string) *CreateCollectionInput {
7402	s.CollectionId = &v
7403	return s
7404}
7405
7406type CreateCollectionOutput struct {
7407	_ struct{} `type:"structure"`
7408
7409	// Amazon Resource Name (ARN) of the collection. You can use this to manage
7410	// permissions on your resources.
7411	CollectionArn *string `type:"string"`
7412
7413	// Version number of the face detection model associated with the collection
7414	// you are creating.
7415	FaceModelVersion *string `type:"string"`
7416
7417	// HTTP status code indicating the result of the operation.
7418	StatusCode *int64 `type:"integer"`
7419}
7420
7421// String returns the string representation
7422func (s CreateCollectionOutput) String() string {
7423	return awsutil.Prettify(s)
7424}
7425
7426// GoString returns the string representation
7427func (s CreateCollectionOutput) GoString() string {
7428	return s.String()
7429}
7430
7431// SetCollectionArn sets the CollectionArn field's value.
7432func (s *CreateCollectionOutput) SetCollectionArn(v string) *CreateCollectionOutput {
7433	s.CollectionArn = &v
7434	return s
7435}
7436
7437// SetFaceModelVersion sets the FaceModelVersion field's value.
7438func (s *CreateCollectionOutput) SetFaceModelVersion(v string) *CreateCollectionOutput {
7439	s.FaceModelVersion = &v
7440	return s
7441}
7442
7443// SetStatusCode sets the StatusCode field's value.
7444func (s *CreateCollectionOutput) SetStatusCode(v int64) *CreateCollectionOutput {
7445	s.StatusCode = &v
7446	return s
7447}
7448
7449type CreateProjectInput struct {
7450	_ struct{} `type:"structure"`
7451
7452	// The name of the project to create.
7453	//
7454	// ProjectName is a required field
7455	ProjectName *string `min:"1" type:"string" required:"true"`
7456}
7457
7458// String returns the string representation
7459func (s CreateProjectInput) String() string {
7460	return awsutil.Prettify(s)
7461}
7462
7463// GoString returns the string representation
7464func (s CreateProjectInput) GoString() string {
7465	return s.String()
7466}
7467
7468// Validate inspects the fields of the type to determine if they are valid.
7469func (s *CreateProjectInput) Validate() error {
7470	invalidParams := request.ErrInvalidParams{Context: "CreateProjectInput"}
7471	if s.ProjectName == nil {
7472		invalidParams.Add(request.NewErrParamRequired("ProjectName"))
7473	}
7474	if s.ProjectName != nil && len(*s.ProjectName) < 1 {
7475		invalidParams.Add(request.NewErrParamMinLen("ProjectName", 1))
7476	}
7477
7478	if invalidParams.Len() > 0 {
7479		return invalidParams
7480	}
7481	return nil
7482}
7483
7484// SetProjectName sets the ProjectName field's value.
7485func (s *CreateProjectInput) SetProjectName(v string) *CreateProjectInput {
7486	s.ProjectName = &v
7487	return s
7488}
7489
7490type CreateProjectOutput struct {
7491	_ struct{} `type:"structure"`
7492
7493	// The Amazon Resource Name (ARN) of the new project. You can use the ARN to
7494	// configure IAM access to the project.
7495	ProjectArn *string `min:"20" type:"string"`
7496}
7497
7498// String returns the string representation
7499func (s CreateProjectOutput) String() string {
7500	return awsutil.Prettify(s)
7501}
7502
7503// GoString returns the string representation
7504func (s CreateProjectOutput) GoString() string {
7505	return s.String()
7506}
7507
7508// SetProjectArn sets the ProjectArn field's value.
7509func (s *CreateProjectOutput) SetProjectArn(v string) *CreateProjectOutput {
7510	s.ProjectArn = &v
7511	return s
7512}
7513
7514type CreateProjectVersionInput struct {
7515	_ struct{} `type:"structure"`
7516
7517	// The Amazon S3 location to store the results of training.
7518	//
7519	// OutputConfig is a required field
7520	OutputConfig *OutputConfig `type:"structure" required:"true"`
7521
7522	// The ARN of the Amazon Rekognition Custom Labels project that manages the
7523	// model that you want to train.
7524	//
7525	// ProjectArn is a required field
7526	ProjectArn *string `min:"20" type:"string" required:"true"`
7527
7528	// The dataset to use for testing.
7529	//
7530	// TestingData is a required field
7531	TestingData *TestingData `type:"structure" required:"true"`
7532
7533	// The dataset to use for training.
7534	//
7535	// TrainingData is a required field
7536	TrainingData *TrainingData `type:"structure" required:"true"`
7537
7538	// A name for the version of the model. This value must be unique.
7539	//
7540	// VersionName is a required field
7541	VersionName *string `min:"1" type:"string" required:"true"`
7542}
7543
7544// String returns the string representation
7545func (s CreateProjectVersionInput) String() string {
7546	return awsutil.Prettify(s)
7547}
7548
7549// GoString returns the string representation
7550func (s CreateProjectVersionInput) GoString() string {
7551	return s.String()
7552}
7553
7554// Validate inspects the fields of the type to determine if they are valid.
7555func (s *CreateProjectVersionInput) Validate() error {
7556	invalidParams := request.ErrInvalidParams{Context: "CreateProjectVersionInput"}
7557	if s.OutputConfig == nil {
7558		invalidParams.Add(request.NewErrParamRequired("OutputConfig"))
7559	}
7560	if s.ProjectArn == nil {
7561		invalidParams.Add(request.NewErrParamRequired("ProjectArn"))
7562	}
7563	if s.ProjectArn != nil && len(*s.ProjectArn) < 20 {
7564		invalidParams.Add(request.NewErrParamMinLen("ProjectArn", 20))
7565	}
7566	if s.TestingData == nil {
7567		invalidParams.Add(request.NewErrParamRequired("TestingData"))
7568	}
7569	if s.TrainingData == nil {
7570		invalidParams.Add(request.NewErrParamRequired("TrainingData"))
7571	}
7572	if s.VersionName == nil {
7573		invalidParams.Add(request.NewErrParamRequired("VersionName"))
7574	}
7575	if s.VersionName != nil && len(*s.VersionName) < 1 {
7576		invalidParams.Add(request.NewErrParamMinLen("VersionName", 1))
7577	}
7578	if s.OutputConfig != nil {
7579		if err := s.OutputConfig.Validate(); err != nil {
7580			invalidParams.AddNested("OutputConfig", err.(request.ErrInvalidParams))
7581		}
7582	}
7583	if s.TestingData != nil {
7584		if err := s.TestingData.Validate(); err != nil {
7585			invalidParams.AddNested("TestingData", err.(request.ErrInvalidParams))
7586		}
7587	}
7588	if s.TrainingData != nil {
7589		if err := s.TrainingData.Validate(); err != nil {
7590			invalidParams.AddNested("TrainingData", err.(request.ErrInvalidParams))
7591		}
7592	}
7593
7594	if invalidParams.Len() > 0 {
7595		return invalidParams
7596	}
7597	return nil
7598}
7599
7600// SetOutputConfig sets the OutputConfig field's value.
7601func (s *CreateProjectVersionInput) SetOutputConfig(v *OutputConfig) *CreateProjectVersionInput {
7602	s.OutputConfig = v
7603	return s
7604}
7605
7606// SetProjectArn sets the ProjectArn field's value.
7607func (s *CreateProjectVersionInput) SetProjectArn(v string) *CreateProjectVersionInput {
7608	s.ProjectArn = &v
7609	return s
7610}
7611
7612// SetTestingData sets the TestingData field's value.
7613func (s *CreateProjectVersionInput) SetTestingData(v *TestingData) *CreateProjectVersionInput {
7614	s.TestingData = v
7615	return s
7616}
7617
7618// SetTrainingData sets the TrainingData field's value.
7619func (s *CreateProjectVersionInput) SetTrainingData(v *TrainingData) *CreateProjectVersionInput {
7620	s.TrainingData = v
7621	return s
7622}
7623
7624// SetVersionName sets the VersionName field's value.
7625func (s *CreateProjectVersionInput) SetVersionName(v string) *CreateProjectVersionInput {
7626	s.VersionName = &v
7627	return s
7628}
7629
7630type CreateProjectVersionOutput struct {
7631	_ struct{} `type:"structure"`
7632
7633	// The ARN of the model version that was created. Use DescribeProjectVersion
7634	// to get the current status of the training operation.
7635	ProjectVersionArn *string `min:"20" type:"string"`
7636}
7637
7638// String returns the string representation
7639func (s CreateProjectVersionOutput) String() string {
7640	return awsutil.Prettify(s)
7641}
7642
7643// GoString returns the string representation
7644func (s CreateProjectVersionOutput) GoString() string {
7645	return s.String()
7646}
7647
7648// SetProjectVersionArn sets the ProjectVersionArn field's value.
7649func (s *CreateProjectVersionOutput) SetProjectVersionArn(v string) *CreateProjectVersionOutput {
7650	s.ProjectVersionArn = &v
7651	return s
7652}
7653
7654type CreateStreamProcessorInput struct {
7655	_ struct{} `type:"structure"`
7656
7657	// Kinesis video stream stream that provides the source streaming video. If
7658	// you are using the AWS CLI, the parameter name is StreamProcessorInput.
7659	//
7660	// Input is a required field
7661	Input *StreamProcessorInput `type:"structure" required:"true"`
7662
7663	// An identifier you assign to the stream processor. You can use Name to manage
7664	// the stream processor. For example, you can get the current status of the
7665	// stream processor by calling DescribeStreamProcessor. Name is idempotent.
7666	//
7667	// Name is a required field
7668	Name *string `min:"1" type:"string" required:"true"`
7669
7670	// Kinesis data stream stream to which Amazon Rekognition Video puts the analysis
7671	// results. If you are using the AWS CLI, the parameter name is StreamProcessorOutput.
7672	//
7673	// Output is a required field
7674	Output *StreamProcessorOutput `type:"structure" required:"true"`
7675
7676	// ARN of the IAM role that allows access to the stream processor.
7677	//
7678	// RoleArn is a required field
7679	RoleArn *string `type:"string" required:"true"`
7680
7681	// Face recognition input parameters to be used by the stream processor. Includes
7682	// the collection to use for face recognition and the face attributes to detect.
7683	//
7684	// Settings is a required field
7685	Settings *StreamProcessorSettings `type:"structure" required:"true"`
7686}
7687
7688// String returns the string representation
7689func (s CreateStreamProcessorInput) String() string {
7690	return awsutil.Prettify(s)
7691}
7692
7693// GoString returns the string representation
7694func (s CreateStreamProcessorInput) GoString() string {
7695	return s.String()
7696}
7697
7698// Validate inspects the fields of the type to determine if they are valid.
7699func (s *CreateStreamProcessorInput) Validate() error {
7700	invalidParams := request.ErrInvalidParams{Context: "CreateStreamProcessorInput"}
7701	if s.Input == nil {
7702		invalidParams.Add(request.NewErrParamRequired("Input"))
7703	}
7704	if s.Name == nil {
7705		invalidParams.Add(request.NewErrParamRequired("Name"))
7706	}
7707	if s.Name != nil && len(*s.Name) < 1 {
7708		invalidParams.Add(request.NewErrParamMinLen("Name", 1))
7709	}
7710	if s.Output == nil {
7711		invalidParams.Add(request.NewErrParamRequired("Output"))
7712	}
7713	if s.RoleArn == nil {
7714		invalidParams.Add(request.NewErrParamRequired("RoleArn"))
7715	}
7716	if s.Settings == nil {
7717		invalidParams.Add(request.NewErrParamRequired("Settings"))
7718	}
7719	if s.Settings != nil {
7720		if err := s.Settings.Validate(); err != nil {
7721			invalidParams.AddNested("Settings", err.(request.ErrInvalidParams))
7722		}
7723	}
7724
7725	if invalidParams.Len() > 0 {
7726		return invalidParams
7727	}
7728	return nil
7729}
7730
7731// SetInput sets the Input field's value.
7732func (s *CreateStreamProcessorInput) SetInput(v *StreamProcessorInput) *CreateStreamProcessorInput {
7733	s.Input = v
7734	return s
7735}
7736
7737// SetName sets the Name field's value.
7738func (s *CreateStreamProcessorInput) SetName(v string) *CreateStreamProcessorInput {
7739	s.Name = &v
7740	return s
7741}
7742
7743// SetOutput sets the Output field's value.
7744func (s *CreateStreamProcessorInput) SetOutput(v *StreamProcessorOutput) *CreateStreamProcessorInput {
7745	s.Output = v
7746	return s
7747}
7748
7749// SetRoleArn sets the RoleArn field's value.
7750func (s *CreateStreamProcessorInput) SetRoleArn(v string) *CreateStreamProcessorInput {
7751	s.RoleArn = &v
7752	return s
7753}
7754
7755// SetSettings sets the Settings field's value.
7756func (s *CreateStreamProcessorInput) SetSettings(v *StreamProcessorSettings) *CreateStreamProcessorInput {
7757	s.Settings = v
7758	return s
7759}
7760
7761type CreateStreamProcessorOutput struct {
7762	_ struct{} `type:"structure"`
7763
7764	// ARN for the newly create stream processor.
7765	StreamProcessorArn *string `type:"string"`
7766}
7767
7768// String returns the string representation
7769func (s CreateStreamProcessorOutput) String() string {
7770	return awsutil.Prettify(s)
7771}
7772
7773// GoString returns the string representation
7774func (s CreateStreamProcessorOutput) GoString() string {
7775	return s.String()
7776}
7777
7778// SetStreamProcessorArn sets the StreamProcessorArn field's value.
7779func (s *CreateStreamProcessorOutput) SetStreamProcessorArn(v string) *CreateStreamProcessorOutput {
7780	s.StreamProcessorArn = &v
7781	return s
7782}
7783
7784// A custom label detected in an image by a call to DetectCustomLabels.
7785type CustomLabel struct {
7786	_ struct{} `type:"structure"`
7787
7788	// The confidence that the model has in the detection of the custom label. The
7789	// range is 0-100. A higher value indicates a higher confidence.
7790	Confidence *float64 `type:"float"`
7791
7792	// The location of the detected object on the image that corresponds to the
7793	// custom label. Includes an axis aligned coarse bounding box surrounding the
7794	// object and a finer grain polygon for more accurate spatial information.
7795	Geometry *Geometry `type:"structure"`
7796
7797	// The name of the custom label.
7798	Name *string `type:"string"`
7799}
7800
7801// String returns the string representation
7802func (s CustomLabel) String() string {
7803	return awsutil.Prettify(s)
7804}
7805
7806// GoString returns the string representation
7807func (s CustomLabel) GoString() string {
7808	return s.String()
7809}
7810
7811// SetConfidence sets the Confidence field's value.
7812func (s *CustomLabel) SetConfidence(v float64) *CustomLabel {
7813	s.Confidence = &v
7814	return s
7815}
7816
7817// SetGeometry sets the Geometry field's value.
7818func (s *CustomLabel) SetGeometry(v *Geometry) *CustomLabel {
7819	s.Geometry = v
7820	return s
7821}
7822
7823// SetName sets the Name field's value.
7824func (s *CustomLabel) SetName(v string) *CustomLabel {
7825	s.Name = &v
7826	return s
7827}
7828
7829type DeleteCollectionInput struct {
7830	_ struct{} `type:"structure"`
7831
7832	// ID of the collection to delete.
7833	//
7834	// CollectionId is a required field
7835	CollectionId *string `min:"1" type:"string" required:"true"`
7836}
7837
7838// String returns the string representation
7839func (s DeleteCollectionInput) String() string {
7840	return awsutil.Prettify(s)
7841}
7842
7843// GoString returns the string representation
7844func (s DeleteCollectionInput) GoString() string {
7845	return s.String()
7846}
7847
7848// Validate inspects the fields of the type to determine if they are valid.
7849func (s *DeleteCollectionInput) Validate() error {
7850	invalidParams := request.ErrInvalidParams{Context: "DeleteCollectionInput"}
7851	if s.CollectionId == nil {
7852		invalidParams.Add(request.NewErrParamRequired("CollectionId"))
7853	}
7854	if s.CollectionId != nil && len(*s.CollectionId) < 1 {
7855		invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1))
7856	}
7857
7858	if invalidParams.Len() > 0 {
7859		return invalidParams
7860	}
7861	return nil
7862}
7863
7864// SetCollectionId sets the CollectionId field's value.
7865func (s *DeleteCollectionInput) SetCollectionId(v string) *DeleteCollectionInput {
7866	s.CollectionId = &v
7867	return s
7868}
7869
7870type DeleteCollectionOutput struct {
7871	_ struct{} `type:"structure"`
7872
7873	// HTTP status code that indicates the result of the operation.
7874	StatusCode *int64 `type:"integer"`
7875}
7876
7877// String returns the string representation
7878func (s DeleteCollectionOutput) String() string {
7879	return awsutil.Prettify(s)
7880}
7881
7882// GoString returns the string representation
7883func (s DeleteCollectionOutput) GoString() string {
7884	return s.String()
7885}
7886
7887// SetStatusCode sets the StatusCode field's value.
7888func (s *DeleteCollectionOutput) SetStatusCode(v int64) *DeleteCollectionOutput {
7889	s.StatusCode = &v
7890	return s
7891}
7892
7893type DeleteFacesInput struct {
7894	_ struct{} `type:"structure"`
7895
7896	// Collection from which to remove the specific faces.
7897	//
7898	// CollectionId is a required field
7899	CollectionId *string `min:"1" type:"string" required:"true"`
7900
7901	// An array of face IDs to delete.
7902	//
7903	// FaceIds is a required field
7904	FaceIds []*string `min:"1" type:"list" required:"true"`
7905}
7906
7907// String returns the string representation
7908func (s DeleteFacesInput) String() string {
7909	return awsutil.Prettify(s)
7910}
7911
7912// GoString returns the string representation
7913func (s DeleteFacesInput) GoString() string {
7914	return s.String()
7915}
7916
7917// Validate inspects the fields of the type to determine if they are valid.
7918func (s *DeleteFacesInput) Validate() error {
7919	invalidParams := request.ErrInvalidParams{Context: "DeleteFacesInput"}
7920	if s.CollectionId == nil {
7921		invalidParams.Add(request.NewErrParamRequired("CollectionId"))
7922	}
7923	if s.CollectionId != nil && len(*s.CollectionId) < 1 {
7924		invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1))
7925	}
7926	if s.FaceIds == nil {
7927		invalidParams.Add(request.NewErrParamRequired("FaceIds"))
7928	}
7929	if s.FaceIds != nil && len(s.FaceIds) < 1 {
7930		invalidParams.Add(request.NewErrParamMinLen("FaceIds", 1))
7931	}
7932
7933	if invalidParams.Len() > 0 {
7934		return invalidParams
7935	}
7936	return nil
7937}
7938
7939// SetCollectionId sets the CollectionId field's value.
7940func (s *DeleteFacesInput) SetCollectionId(v string) *DeleteFacesInput {
7941	s.CollectionId = &v
7942	return s
7943}
7944
7945// SetFaceIds sets the FaceIds field's value.
7946func (s *DeleteFacesInput) SetFaceIds(v []*string) *DeleteFacesInput {
7947	s.FaceIds = v
7948	return s
7949}
7950
7951type DeleteFacesOutput struct {
7952	_ struct{} `type:"structure"`
7953
7954	// An array of strings (face IDs) of the faces that were deleted.
7955	DeletedFaces []*string `min:"1" type:"list"`
7956}
7957
7958// String returns the string representation
7959func (s DeleteFacesOutput) String() string {
7960	return awsutil.Prettify(s)
7961}
7962
7963// GoString returns the string representation
7964func (s DeleteFacesOutput) GoString() string {
7965	return s.String()
7966}
7967
7968// SetDeletedFaces sets the DeletedFaces field's value.
7969func (s *DeleteFacesOutput) SetDeletedFaces(v []*string) *DeleteFacesOutput {
7970	s.DeletedFaces = v
7971	return s
7972}
7973
7974type DeleteProjectInput struct {
7975	_ struct{} `type:"structure"`
7976
7977	// The Amazon Resource Name (ARN) of the project that you want to delete.
7978	//
7979	// ProjectArn is a required field
7980	ProjectArn *string `min:"20" type:"string" required:"true"`
7981}
7982
7983// String returns the string representation
7984func (s DeleteProjectInput) String() string {
7985	return awsutil.Prettify(s)
7986}
7987
7988// GoString returns the string representation
7989func (s DeleteProjectInput) GoString() string {
7990	return s.String()
7991}
7992
7993// Validate inspects the fields of the type to determine if they are valid.
7994func (s *DeleteProjectInput) Validate() error {
7995	invalidParams := request.ErrInvalidParams{Context: "DeleteProjectInput"}
7996	if s.ProjectArn == nil {
7997		invalidParams.Add(request.NewErrParamRequired("ProjectArn"))
7998	}
7999	if s.ProjectArn != nil && len(*s.ProjectArn) < 20 {
8000		invalidParams.Add(request.NewErrParamMinLen("ProjectArn", 20))
8001	}
8002
8003	if invalidParams.Len() > 0 {
8004		return invalidParams
8005	}
8006	return nil
8007}
8008
8009// SetProjectArn sets the ProjectArn field's value.
8010func (s *DeleteProjectInput) SetProjectArn(v string) *DeleteProjectInput {
8011	s.ProjectArn = &v
8012	return s
8013}
8014
8015type DeleteProjectOutput struct {
8016	_ struct{} `type:"structure"`
8017
8018	// The current status of the delete project operation.
8019	Status *string `type:"string" enum:"ProjectStatus"`
8020}
8021
8022// String returns the string representation
8023func (s DeleteProjectOutput) String() string {
8024	return awsutil.Prettify(s)
8025}
8026
8027// GoString returns the string representation
8028func (s DeleteProjectOutput) GoString() string {
8029	return s.String()
8030}
8031
8032// SetStatus sets the Status field's value.
8033func (s *DeleteProjectOutput) SetStatus(v string) *DeleteProjectOutput {
8034	s.Status = &v
8035	return s
8036}
8037
8038type DeleteProjectVersionInput struct {
8039	_ struct{} `type:"structure"`
8040
8041	// The Amazon Resource Name (ARN) of the model version that you want to delete.
8042	//
8043	// ProjectVersionArn is a required field
8044	ProjectVersionArn *string `min:"20" type:"string" required:"true"`
8045}
8046
8047// String returns the string representation
8048func (s DeleteProjectVersionInput) String() string {
8049	return awsutil.Prettify(s)
8050}
8051
8052// GoString returns the string representation
8053func (s DeleteProjectVersionInput) GoString() string {
8054	return s.String()
8055}
8056
8057// Validate inspects the fields of the type to determine if they are valid.
8058func (s *DeleteProjectVersionInput) Validate() error {
8059	invalidParams := request.ErrInvalidParams{Context: "DeleteProjectVersionInput"}
8060	if s.ProjectVersionArn == nil {
8061		invalidParams.Add(request.NewErrParamRequired("ProjectVersionArn"))
8062	}
8063	if s.ProjectVersionArn != nil && len(*s.ProjectVersionArn) < 20 {
8064		invalidParams.Add(request.NewErrParamMinLen("ProjectVersionArn", 20))
8065	}
8066
8067	if invalidParams.Len() > 0 {
8068		return invalidParams
8069	}
8070	return nil
8071}
8072
8073// SetProjectVersionArn sets the ProjectVersionArn field's value.
8074func (s *DeleteProjectVersionInput) SetProjectVersionArn(v string) *DeleteProjectVersionInput {
8075	s.ProjectVersionArn = &v
8076	return s
8077}
8078
8079type DeleteProjectVersionOutput struct {
8080	_ struct{} `type:"structure"`
8081
8082	// The status of the deletion operation.
8083	Status *string `type:"string" enum:"ProjectVersionStatus"`
8084}
8085
8086// String returns the string representation
8087func (s DeleteProjectVersionOutput) String() string {
8088	return awsutil.Prettify(s)
8089}
8090
8091// GoString returns the string representation
8092func (s DeleteProjectVersionOutput) GoString() string {
8093	return s.String()
8094}
8095
8096// SetStatus sets the Status field's value.
8097func (s *DeleteProjectVersionOutput) SetStatus(v string) *DeleteProjectVersionOutput {
8098	s.Status = &v
8099	return s
8100}
8101
8102type DeleteStreamProcessorInput struct {
8103	_ struct{} `type:"structure"`
8104
8105	// The name of the stream processor you want to delete.
8106	//
8107	// Name is a required field
8108	Name *string `min:"1" type:"string" required:"true"`
8109}
8110
8111// String returns the string representation
8112func (s DeleteStreamProcessorInput) String() string {
8113	return awsutil.Prettify(s)
8114}
8115
8116// GoString returns the string representation
8117func (s DeleteStreamProcessorInput) GoString() string {
8118	return s.String()
8119}
8120
8121// Validate inspects the fields of the type to determine if they are valid.
8122func (s *DeleteStreamProcessorInput) Validate() error {
8123	invalidParams := request.ErrInvalidParams{Context: "DeleteStreamProcessorInput"}
8124	if s.Name == nil {
8125		invalidParams.Add(request.NewErrParamRequired("Name"))
8126	}
8127	if s.Name != nil && len(*s.Name) < 1 {
8128		invalidParams.Add(request.NewErrParamMinLen("Name", 1))
8129	}
8130
8131	if invalidParams.Len() > 0 {
8132		return invalidParams
8133	}
8134	return nil
8135}
8136
8137// SetName sets the Name field's value.
8138func (s *DeleteStreamProcessorInput) SetName(v string) *DeleteStreamProcessorInput {
8139	s.Name = &v
8140	return s
8141}
8142
8143type DeleteStreamProcessorOutput struct {
8144	_ struct{} `type:"structure"`
8145}
8146
8147// String returns the string representation
8148func (s DeleteStreamProcessorOutput) String() string {
8149	return awsutil.Prettify(s)
8150}
8151
8152// GoString returns the string representation
8153func (s DeleteStreamProcessorOutput) GoString() string {
8154	return s.String()
8155}
8156
8157type DescribeCollectionInput struct {
8158	_ struct{} `type:"structure"`
8159
8160	// The ID of the collection to describe.
8161	//
8162	// CollectionId is a required field
8163	CollectionId *string `min:"1" type:"string" required:"true"`
8164}
8165
8166// String returns the string representation
8167func (s DescribeCollectionInput) String() string {
8168	return awsutil.Prettify(s)
8169}
8170
8171// GoString returns the string representation
8172func (s DescribeCollectionInput) GoString() string {
8173	return s.String()
8174}
8175
8176// Validate inspects the fields of the type to determine if they are valid.
8177func (s *DescribeCollectionInput) Validate() error {
8178	invalidParams := request.ErrInvalidParams{Context: "DescribeCollectionInput"}
8179	if s.CollectionId == nil {
8180		invalidParams.Add(request.NewErrParamRequired("CollectionId"))
8181	}
8182	if s.CollectionId != nil && len(*s.CollectionId) < 1 {
8183		invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1))
8184	}
8185
8186	if invalidParams.Len() > 0 {
8187		return invalidParams
8188	}
8189	return nil
8190}
8191
8192// SetCollectionId sets the CollectionId field's value.
8193func (s *DescribeCollectionInput) SetCollectionId(v string) *DescribeCollectionInput {
8194	s.CollectionId = &v
8195	return s
8196}
8197
8198type DescribeCollectionOutput struct {
8199	_ struct{} `type:"structure"`
8200
8201	// The Amazon Resource Name (ARN) of the collection.
8202	CollectionARN *string `type:"string"`
8203
8204	// The number of milliseconds since the Unix epoch time until the creation of
8205	// the collection. The Unix epoch time is 00:00:00 Coordinated Universal Time
8206	// (UTC), Thursday, 1 January 1970.
8207	CreationTimestamp *time.Time `type:"timestamp"`
8208
8209	// The number of faces that are indexed into the collection. To index faces
8210	// into a collection, use IndexFaces.
8211	FaceCount *int64 `type:"long"`
8212
8213	// The version of the face model that's used by the collection for face detection.
8214	//
8215	// For more information, see Model Versioning in the Amazon Rekognition Developer
8216	// Guide.
8217	FaceModelVersion *string `type:"string"`
8218}
8219
8220// String returns the string representation
8221func (s DescribeCollectionOutput) String() string {
8222	return awsutil.Prettify(s)
8223}
8224
8225// GoString returns the string representation
8226func (s DescribeCollectionOutput) GoString() string {
8227	return s.String()
8228}
8229
8230// SetCollectionARN sets the CollectionARN field's value.
8231func (s *DescribeCollectionOutput) SetCollectionARN(v string) *DescribeCollectionOutput {
8232	s.CollectionARN = &v
8233	return s
8234}
8235
8236// SetCreationTimestamp sets the CreationTimestamp field's value.
8237func (s *DescribeCollectionOutput) SetCreationTimestamp(v time.Time) *DescribeCollectionOutput {
8238	s.CreationTimestamp = &v
8239	return s
8240}
8241
8242// SetFaceCount sets the FaceCount field's value.
8243func (s *DescribeCollectionOutput) SetFaceCount(v int64) *DescribeCollectionOutput {
8244	s.FaceCount = &v
8245	return s
8246}
8247
8248// SetFaceModelVersion sets the FaceModelVersion field's value.
8249func (s *DescribeCollectionOutput) SetFaceModelVersion(v string) *DescribeCollectionOutput {
8250	s.FaceModelVersion = &v
8251	return s
8252}
8253
8254type DescribeProjectVersionsInput struct {
8255	_ struct{} `type:"structure"`
8256
8257	// The maximum number of results to return per paginated call. The largest value
8258	// you can specify is 100. If you specify a value greater than 100, a ValidationException
8259	// error occurs. The default value is 100.
8260	MaxResults *int64 `min:"1" type:"integer"`
8261
8262	// If the previous response was incomplete (because there is more results to
8263	// retrieve), Amazon Rekognition Custom Labels returns a pagination token in
8264	// the response. You can use this pagination token to retrieve the next set
8265	// of results.
8266	NextToken *string `type:"string"`
8267
8268	// The Amazon Resource Name (ARN) of the project that contains the models you
8269	// want to describe.
8270	//
8271	// ProjectArn is a required field
8272	ProjectArn *string `min:"20" type:"string" required:"true"`
8273
8274	// A list of model version names that you want to describe. You can add up to
8275	// 10 model version names to the list. If you don't specify a value, all model
8276	// descriptions are returned. A version name is part of a model (ProjectVersion)
8277	// ARN. For example, my-model.2020-01-21T09.10.15 is the version name in the
8278	// following ARN. arn:aws:rekognition:us-east-1:123456789012:project/getting-started/version/my-model.2020-01-21T09.10.15/1234567890123.
8279	VersionNames []*string `min:"1" type:"list"`
8280}
8281
8282// String returns the string representation
8283func (s DescribeProjectVersionsInput) String() string {
8284	return awsutil.Prettify(s)
8285}
8286
8287// GoString returns the string representation
8288func (s DescribeProjectVersionsInput) GoString() string {
8289	return s.String()
8290}
8291
8292// Validate inspects the fields of the type to determine if they are valid.
8293func (s *DescribeProjectVersionsInput) Validate() error {
8294	invalidParams := request.ErrInvalidParams{Context: "DescribeProjectVersionsInput"}
8295	if s.MaxResults != nil && *s.MaxResults < 1 {
8296		invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
8297	}
8298	if s.ProjectArn == nil {
8299		invalidParams.Add(request.NewErrParamRequired("ProjectArn"))
8300	}
8301	if s.ProjectArn != nil && len(*s.ProjectArn) < 20 {
8302		invalidParams.Add(request.NewErrParamMinLen("ProjectArn", 20))
8303	}
8304	if s.VersionNames != nil && len(s.VersionNames) < 1 {
8305		invalidParams.Add(request.NewErrParamMinLen("VersionNames", 1))
8306	}
8307
8308	if invalidParams.Len() > 0 {
8309		return invalidParams
8310	}
8311	return nil
8312}
8313
8314// SetMaxResults sets the MaxResults field's value.
8315func (s *DescribeProjectVersionsInput) SetMaxResults(v int64) *DescribeProjectVersionsInput {
8316	s.MaxResults = &v
8317	return s
8318}
8319
8320// SetNextToken sets the NextToken field's value.
8321func (s *DescribeProjectVersionsInput) SetNextToken(v string) *DescribeProjectVersionsInput {
8322	s.NextToken = &v
8323	return s
8324}
8325
8326// SetProjectArn sets the ProjectArn field's value.
8327func (s *DescribeProjectVersionsInput) SetProjectArn(v string) *DescribeProjectVersionsInput {
8328	s.ProjectArn = &v
8329	return s
8330}
8331
8332// SetVersionNames sets the VersionNames field's value.
8333func (s *DescribeProjectVersionsInput) SetVersionNames(v []*string) *DescribeProjectVersionsInput {
8334	s.VersionNames = v
8335	return s
8336}
8337
8338type DescribeProjectVersionsOutput struct {
8339	_ struct{} `type:"structure"`
8340
8341	// If the previous response was incomplete (because there is more results to
8342	// retrieve), Amazon Rekognition Custom Labels returns a pagination token in
8343	// the response. You can use this pagination token to retrieve the next set
8344	// of results.
8345	NextToken *string `type:"string"`
8346
8347	// A list of model descriptions. The list is sorted by the creation date and
8348	// time of the model versions, latest to earliest.
8349	ProjectVersionDescriptions []*ProjectVersionDescription `type:"list"`
8350}
8351
8352// String returns the string representation
8353func (s DescribeProjectVersionsOutput) String() string {
8354	return awsutil.Prettify(s)
8355}
8356
8357// GoString returns the string representation
8358func (s DescribeProjectVersionsOutput) GoString() string {
8359	return s.String()
8360}
8361
8362// SetNextToken sets the NextToken field's value.
8363func (s *DescribeProjectVersionsOutput) SetNextToken(v string) *DescribeProjectVersionsOutput {
8364	s.NextToken = &v
8365	return s
8366}
8367
8368// SetProjectVersionDescriptions sets the ProjectVersionDescriptions field's value.
8369func (s *DescribeProjectVersionsOutput) SetProjectVersionDescriptions(v []*ProjectVersionDescription) *DescribeProjectVersionsOutput {
8370	s.ProjectVersionDescriptions = v
8371	return s
8372}
8373
8374type DescribeProjectsInput struct {
8375	_ struct{} `type:"structure"`
8376
8377	// The maximum number of results to return per paginated call. The largest value
8378	// you can specify is 100. If you specify a value greater than 100, a ValidationException
8379	// error occurs. The default value is 100.
8380	MaxResults *int64 `min:"1" type:"integer"`
8381
8382	// If the previous response was incomplete (because there is more results to
8383	// retrieve), Amazon Rekognition Custom Labels returns a pagination token in
8384	// the response. You can use this pagination token to retrieve the next set
8385	// of results.
8386	NextToken *string `type:"string"`
8387}
8388
8389// String returns the string representation
8390func (s DescribeProjectsInput) String() string {
8391	return awsutil.Prettify(s)
8392}
8393
8394// GoString returns the string representation
8395func (s DescribeProjectsInput) GoString() string {
8396	return s.String()
8397}
8398
8399// Validate inspects the fields of the type to determine if they are valid.
8400func (s *DescribeProjectsInput) Validate() error {
8401	invalidParams := request.ErrInvalidParams{Context: "DescribeProjectsInput"}
8402	if s.MaxResults != nil && *s.MaxResults < 1 {
8403		invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
8404	}
8405
8406	if invalidParams.Len() > 0 {
8407		return invalidParams
8408	}
8409	return nil
8410}
8411
8412// SetMaxResults sets the MaxResults field's value.
8413func (s *DescribeProjectsInput) SetMaxResults(v int64) *DescribeProjectsInput {
8414	s.MaxResults = &v
8415	return s
8416}
8417
8418// SetNextToken sets the NextToken field's value.
8419func (s *DescribeProjectsInput) SetNextToken(v string) *DescribeProjectsInput {
8420	s.NextToken = &v
8421	return s
8422}
8423
8424type DescribeProjectsOutput struct {
8425	_ struct{} `type:"structure"`
8426
8427	// If the previous response was incomplete (because there is more results to
8428	// retrieve), Amazon Rekognition Custom Labels returns a pagination token in
8429	// the response. You can use this pagination token to retrieve the next set
8430	// of results.
8431	NextToken *string `type:"string"`
8432
8433	// A list of project descriptions. The list is sorted by the date and time the
8434	// projects are created.
8435	ProjectDescriptions []*ProjectDescription `type:"list"`
8436}
8437
8438// String returns the string representation
8439func (s DescribeProjectsOutput) String() string {
8440	return awsutil.Prettify(s)
8441}
8442
8443// GoString returns the string representation
8444func (s DescribeProjectsOutput) GoString() string {
8445	return s.String()
8446}
8447
8448// SetNextToken sets the NextToken field's value.
8449func (s *DescribeProjectsOutput) SetNextToken(v string) *DescribeProjectsOutput {
8450	s.NextToken = &v
8451	return s
8452}
8453
8454// SetProjectDescriptions sets the ProjectDescriptions field's value.
8455func (s *DescribeProjectsOutput) SetProjectDescriptions(v []*ProjectDescription) *DescribeProjectsOutput {
8456	s.ProjectDescriptions = v
8457	return s
8458}
8459
8460type DescribeStreamProcessorInput struct {
8461	_ struct{} `type:"structure"`
8462
8463	// Name of the stream processor for which you want information.
8464	//
8465	// Name is a required field
8466	Name *string `min:"1" type:"string" required:"true"`
8467}
8468
8469// String returns the string representation
8470func (s DescribeStreamProcessorInput) String() string {
8471	return awsutil.Prettify(s)
8472}
8473
8474// GoString returns the string representation
8475func (s DescribeStreamProcessorInput) GoString() string {
8476	return s.String()
8477}
8478
8479// Validate inspects the fields of the type to determine if they are valid.
8480func (s *DescribeStreamProcessorInput) Validate() error {
8481	invalidParams := request.ErrInvalidParams{Context: "DescribeStreamProcessorInput"}
8482	if s.Name == nil {
8483		invalidParams.Add(request.NewErrParamRequired("Name"))
8484	}
8485	if s.Name != nil && len(*s.Name) < 1 {
8486		invalidParams.Add(request.NewErrParamMinLen("Name", 1))
8487	}
8488
8489	if invalidParams.Len() > 0 {
8490		return invalidParams
8491	}
8492	return nil
8493}
8494
8495// SetName sets the Name field's value.
8496func (s *DescribeStreamProcessorInput) SetName(v string) *DescribeStreamProcessorInput {
8497	s.Name = &v
8498	return s
8499}
8500
8501type DescribeStreamProcessorOutput struct {
8502	_ struct{} `type:"structure"`
8503
8504	// Date and time the stream processor was created
8505	CreationTimestamp *time.Time `type:"timestamp"`
8506
8507	// Kinesis video stream that provides the source streaming video.
8508	Input *StreamProcessorInput `type:"structure"`
8509
8510	// The time, in Unix format, the stream processor was last updated. For example,
8511	// when the stream processor moves from a running state to a failed state, or
8512	// when the user starts or stops the stream processor.
8513	LastUpdateTimestamp *time.Time `type:"timestamp"`
8514
8515	// Name of the stream processor.
8516	Name *string `min:"1" type:"string"`
8517
8518	// Kinesis data stream to which Amazon Rekognition Video puts the analysis results.
8519	Output *StreamProcessorOutput `type:"structure"`
8520
8521	// ARN of the IAM role that allows access to the stream processor.
8522	RoleArn *string `type:"string"`
8523
8524	// Face recognition input parameters that are being used by the stream processor.
8525	// Includes the collection to use for face recognition and the face attributes
8526	// to detect.
8527	Settings *StreamProcessorSettings `type:"structure"`
8528
8529	// Current status of the stream processor.
8530	Status *string `type:"string" enum:"StreamProcessorStatus"`
8531
8532	// Detailed status message about the stream processor.
8533	StatusMessage *string `type:"string"`
8534
8535	// ARN of the stream processor.
8536	StreamProcessorArn *string `type:"string"`
8537}
8538
8539// String returns the string representation
8540func (s DescribeStreamProcessorOutput) String() string {
8541	return awsutil.Prettify(s)
8542}
8543
8544// GoString returns the string representation
8545func (s DescribeStreamProcessorOutput) GoString() string {
8546	return s.String()
8547}
8548
8549// SetCreationTimestamp sets the CreationTimestamp field's value.
8550func (s *DescribeStreamProcessorOutput) SetCreationTimestamp(v time.Time) *DescribeStreamProcessorOutput {
8551	s.CreationTimestamp = &v
8552	return s
8553}
8554
8555// SetInput sets the Input field's value.
8556func (s *DescribeStreamProcessorOutput) SetInput(v *StreamProcessorInput) *DescribeStreamProcessorOutput {
8557	s.Input = v
8558	return s
8559}
8560
8561// SetLastUpdateTimestamp sets the LastUpdateTimestamp field's value.
8562func (s *DescribeStreamProcessorOutput) SetLastUpdateTimestamp(v time.Time) *DescribeStreamProcessorOutput {
8563	s.LastUpdateTimestamp = &v
8564	return s
8565}
8566
8567// SetName sets the Name field's value.
8568func (s *DescribeStreamProcessorOutput) SetName(v string) *DescribeStreamProcessorOutput {
8569	s.Name = &v
8570	return s
8571}
8572
8573// SetOutput sets the Output field's value.
8574func (s *DescribeStreamProcessorOutput) SetOutput(v *StreamProcessorOutput) *DescribeStreamProcessorOutput {
8575	s.Output = v
8576	return s
8577}
8578
8579// SetRoleArn sets the RoleArn field's value.
8580func (s *DescribeStreamProcessorOutput) SetRoleArn(v string) *DescribeStreamProcessorOutput {
8581	s.RoleArn = &v
8582	return s
8583}
8584
8585// SetSettings sets the Settings field's value.
8586func (s *DescribeStreamProcessorOutput) SetSettings(v *StreamProcessorSettings) *DescribeStreamProcessorOutput {
8587	s.Settings = v
8588	return s
8589}
8590
8591// SetStatus sets the Status field's value.
8592func (s *DescribeStreamProcessorOutput) SetStatus(v string) *DescribeStreamProcessorOutput {
8593	s.Status = &v
8594	return s
8595}
8596
8597// SetStatusMessage sets the StatusMessage field's value.
8598func (s *DescribeStreamProcessorOutput) SetStatusMessage(v string) *DescribeStreamProcessorOutput {
8599	s.StatusMessage = &v
8600	return s
8601}
8602
8603// SetStreamProcessorArn sets the StreamProcessorArn field's value.
8604func (s *DescribeStreamProcessorOutput) SetStreamProcessorArn(v string) *DescribeStreamProcessorOutput {
8605	s.StreamProcessorArn = &v
8606	return s
8607}
8608
8609type DetectCustomLabelsInput struct {
8610	_ struct{} `type:"structure"`
8611
8612	// Provides the input image either as bytes or an S3 object.
8613	//
8614	// You pass image bytes to an Amazon Rekognition API operation by using the
8615	// Bytes property. For example, you would use the Bytes property to pass an
8616	// image loaded from a local file system. Image bytes passed by using the Bytes
8617	// property must be base64-encoded. Your code may not need to encode image bytes
8618	// if you are using an AWS SDK to call Amazon Rekognition API operations.
8619	//
8620	// For more information, see Analyzing an Image Loaded from a Local File System
8621	// in the Amazon Rekognition Developer Guide.
8622	//
8623	// You pass images stored in an S3 bucket to an Amazon Rekognition API operation
8624	// by using the S3Object property. Images stored in an S3 bucket do not need
8625	// to be base64-encoded.
8626	//
8627	// The region for the S3 bucket containing the S3 object must match the region
8628	// you use for Amazon Rekognition operations.
8629	//
8630	// If you use the AWS CLI to call Amazon Rekognition operations, passing image
8631	// bytes using the Bytes property is not supported. You must first upload the
8632	// image to an Amazon S3 bucket and then call the operation using the S3Object
8633	// property.
8634	//
8635	// For Amazon Rekognition to process an S3 object, the user must have permission
8636	// to access the S3 object. For more information, see Resource Based Policies
8637	// in the Amazon Rekognition Developer Guide.
8638	//
8639	// Image is a required field
8640	Image *Image `type:"structure" required:"true"`
8641
8642	// Maximum number of results you want the service to return in the response.
8643	// The service returns the specified number of highest confidence labels ranked
8644	// from highest confidence to lowest.
8645	MaxResults *int64 `type:"integer"`
8646
8647	// Specifies the minimum confidence level for the labels to return. Amazon Rekognition
8648	// doesn't return any labels with a confidence lower than this specified value.
8649	// If you specify a value of 0, all labels are return, regardless of the default
8650	// thresholds that the model version applies.
8651	MinConfidence *float64 `type:"float"`
8652
8653	// The ARN of the model version that you want to use.
8654	//
8655	// ProjectVersionArn is a required field
8656	ProjectVersionArn *string `min:"20" type:"string" required:"true"`
8657}
8658
8659// String returns the string representation
8660func (s DetectCustomLabelsInput) String() string {
8661	return awsutil.Prettify(s)
8662}
8663
8664// GoString returns the string representation
8665func (s DetectCustomLabelsInput) GoString() string {
8666	return s.String()
8667}
8668
8669// Validate inspects the fields of the type to determine if they are valid.
8670func (s *DetectCustomLabelsInput) Validate() error {
8671	invalidParams := request.ErrInvalidParams{Context: "DetectCustomLabelsInput"}
8672	if s.Image == nil {
8673		invalidParams.Add(request.NewErrParamRequired("Image"))
8674	}
8675	if s.ProjectVersionArn == nil {
8676		invalidParams.Add(request.NewErrParamRequired("ProjectVersionArn"))
8677	}
8678	if s.ProjectVersionArn != nil && len(*s.ProjectVersionArn) < 20 {
8679		invalidParams.Add(request.NewErrParamMinLen("ProjectVersionArn", 20))
8680	}
8681	if s.Image != nil {
8682		if err := s.Image.Validate(); err != nil {
8683			invalidParams.AddNested("Image", err.(request.ErrInvalidParams))
8684		}
8685	}
8686
8687	if invalidParams.Len() > 0 {
8688		return invalidParams
8689	}
8690	return nil
8691}
8692
8693// SetImage sets the Image field's value.
8694func (s *DetectCustomLabelsInput) SetImage(v *Image) *DetectCustomLabelsInput {
8695	s.Image = v
8696	return s
8697}
8698
8699// SetMaxResults sets the MaxResults field's value.
8700func (s *DetectCustomLabelsInput) SetMaxResults(v int64) *DetectCustomLabelsInput {
8701	s.MaxResults = &v
8702	return s
8703}
8704
8705// SetMinConfidence sets the MinConfidence field's value.
8706func (s *DetectCustomLabelsInput) SetMinConfidence(v float64) *DetectCustomLabelsInput {
8707	s.MinConfidence = &v
8708	return s
8709}
8710
8711// SetProjectVersionArn sets the ProjectVersionArn field's value.
8712func (s *DetectCustomLabelsInput) SetProjectVersionArn(v string) *DetectCustomLabelsInput {
8713	s.ProjectVersionArn = &v
8714	return s
8715}
8716
8717type DetectCustomLabelsOutput struct {
8718	_ struct{} `type:"structure"`
8719
8720	// An array of custom labels detected in the input image.
8721	CustomLabels []*CustomLabel `type:"list"`
8722}
8723
8724// String returns the string representation
8725func (s DetectCustomLabelsOutput) String() string {
8726	return awsutil.Prettify(s)
8727}
8728
8729// GoString returns the string representation
8730func (s DetectCustomLabelsOutput) GoString() string {
8731	return s.String()
8732}
8733
8734// SetCustomLabels sets the CustomLabels field's value.
8735func (s *DetectCustomLabelsOutput) SetCustomLabels(v []*CustomLabel) *DetectCustomLabelsOutput {
8736	s.CustomLabels = v
8737	return s
8738}
8739
8740type DetectFacesInput struct {
8741	_ struct{} `type:"structure"`
8742
8743	// An array of facial attributes you want to be returned. This can be the default
8744	// list of attributes or all attributes. If you don't specify a value for Attributes
8745	// or if you specify ["DEFAULT"], the API returns the following subset of facial
8746	// attributes: BoundingBox, Confidence, Pose, Quality, and Landmarks. If you
8747	// provide ["ALL"], all facial attributes are returned, but the operation takes
8748	// longer to complete.
8749	//
8750	// If you provide both, ["ALL", "DEFAULT"], the service uses a logical AND operator
8751	// to determine which attributes to return (in this case, all attributes).
8752	Attributes []*string `type:"list"`
8753
8754	// The input image as base64-encoded bytes or an S3 object. If you use the AWS
8755	// CLI to call Amazon Rekognition operations, passing base64-encoded image bytes
8756	// is not supported.
8757	//
8758	// If you are using an AWS SDK to call Amazon Rekognition, you might not need
8759	// to base64-encode image bytes passed using the Bytes field. For more information,
8760	// see Images in the Amazon Rekognition developer guide.
8761	//
8762	// Image is a required field
8763	Image *Image `type:"structure" required:"true"`
8764}
8765
8766// String returns the string representation
8767func (s DetectFacesInput) String() string {
8768	return awsutil.Prettify(s)
8769}
8770
8771// GoString returns the string representation
8772func (s DetectFacesInput) GoString() string {
8773	return s.String()
8774}
8775
8776// Validate inspects the fields of the type to determine if they are valid.
8777func (s *DetectFacesInput) Validate() error {
8778	invalidParams := request.ErrInvalidParams{Context: "DetectFacesInput"}
8779	if s.Image == nil {
8780		invalidParams.Add(request.NewErrParamRequired("Image"))
8781	}
8782	if s.Image != nil {
8783		if err := s.Image.Validate(); err != nil {
8784			invalidParams.AddNested("Image", err.(request.ErrInvalidParams))
8785		}
8786	}
8787
8788	if invalidParams.Len() > 0 {
8789		return invalidParams
8790	}
8791	return nil
8792}
8793
8794// SetAttributes sets the Attributes field's value.
8795func (s *DetectFacesInput) SetAttributes(v []*string) *DetectFacesInput {
8796	s.Attributes = v
8797	return s
8798}
8799
8800// SetImage sets the Image field's value.
8801func (s *DetectFacesInput) SetImage(v *Image) *DetectFacesInput {
8802	s.Image = v
8803	return s
8804}
8805
8806type DetectFacesOutput struct {
8807	_ struct{} `type:"structure"`
8808
8809	// Details of each face found in the image.
8810	FaceDetails []*FaceDetail `type:"list"`
8811
8812	// The value of OrientationCorrection is always null.
8813	//
8814	// If the input image is in .jpeg format, it might contain exchangeable image
8815	// file format (Exif) metadata that includes the image's orientation. Amazon
8816	// Rekognition uses this orientation information to perform image correction.
8817	// The bounding box coordinates are translated to represent object locations
8818	// after the orientation information in the Exif metadata is used to correct
8819	// the image orientation. Images in .png format don't contain Exif metadata.
8820	//
8821	// Amazon Rekognition doesn’t perform image correction for images in .png
8822	// format and .jpeg images without orientation information in the image Exif
8823	// metadata. The bounding box coordinates aren't translated and represent the
8824	// object locations before the image is rotated.
8825	OrientationCorrection *string `type:"string" enum:"OrientationCorrection"`
8826}
8827
8828// String returns the string representation
8829func (s DetectFacesOutput) String() string {
8830	return awsutil.Prettify(s)
8831}
8832
8833// GoString returns the string representation
8834func (s DetectFacesOutput) GoString() string {
8835	return s.String()
8836}
8837
8838// SetFaceDetails sets the FaceDetails field's value.
8839func (s *DetectFacesOutput) SetFaceDetails(v []*FaceDetail) *DetectFacesOutput {
8840	s.FaceDetails = v
8841	return s
8842}
8843
8844// SetOrientationCorrection sets the OrientationCorrection field's value.
8845func (s *DetectFacesOutput) SetOrientationCorrection(v string) *DetectFacesOutput {
8846	s.OrientationCorrection = &v
8847	return s
8848}
8849
8850type DetectLabelsInput struct {
8851	_ struct{} `type:"structure"`
8852
8853	// The input image as base64-encoded bytes or an S3 object. If you use the AWS
8854	// CLI to call Amazon Rekognition operations, passing image bytes is not supported.
8855	// Images stored in an S3 Bucket do not need to be base64-encoded.
8856	//
8857	// If you are using an AWS SDK to call Amazon Rekognition, you might not need
8858	// to base64-encode image bytes passed using the Bytes field. For more information,
8859	// see Images in the Amazon Rekognition developer guide.
8860	//
8861	// Image is a required field
8862	Image *Image `type:"structure" required:"true"`
8863
8864	// Maximum number of labels you want the service to return in the response.
8865	// The service returns the specified number of highest confidence labels.
8866	MaxLabels *int64 `type:"integer"`
8867
8868	// Specifies the minimum confidence level for the labels to return. Amazon Rekognition
8869	// doesn't return any labels with confidence lower than this specified value.
8870	//
8871	// If MinConfidence is not specified, the operation returns labels with a confidence
8872	// values greater than or equal to 55 percent.
8873	MinConfidence *float64 `type:"float"`
8874}
8875
8876// String returns the string representation
8877func (s DetectLabelsInput) String() string {
8878	return awsutil.Prettify(s)
8879}
8880
8881// GoString returns the string representation
8882func (s DetectLabelsInput) GoString() string {
8883	return s.String()
8884}
8885
8886// Validate inspects the fields of the type to determine if they are valid.
8887func (s *DetectLabelsInput) Validate() error {
8888	invalidParams := request.ErrInvalidParams{Context: "DetectLabelsInput"}
8889	if s.Image == nil {
8890		invalidParams.Add(request.NewErrParamRequired("Image"))
8891	}
8892	if s.Image != nil {
8893		if err := s.Image.Validate(); err != nil {
8894			invalidParams.AddNested("Image", err.(request.ErrInvalidParams))
8895		}
8896	}
8897
8898	if invalidParams.Len() > 0 {
8899		return invalidParams
8900	}
8901	return nil
8902}
8903
8904// SetImage sets the Image field's value.
8905func (s *DetectLabelsInput) SetImage(v *Image) *DetectLabelsInput {
8906	s.Image = v
8907	return s
8908}
8909
8910// SetMaxLabels sets the MaxLabels field's value.
8911func (s *DetectLabelsInput) SetMaxLabels(v int64) *DetectLabelsInput {
8912	s.MaxLabels = &v
8913	return s
8914}
8915
8916// SetMinConfidence sets the MinConfidence field's value.
8917func (s *DetectLabelsInput) SetMinConfidence(v float64) *DetectLabelsInput {
8918	s.MinConfidence = &v
8919	return s
8920}
8921
8922type DetectLabelsOutput struct {
8923	_ struct{} `type:"structure"`
8924
8925	// Version number of the label detection model that was used to detect labels.
8926	LabelModelVersion *string `type:"string"`
8927
8928	// An array of labels for the real-world objects detected.
8929	Labels []*Label `type:"list"`
8930
8931	// The value of OrientationCorrection is always null.
8932	//
8933	// If the input image is in .jpeg format, it might contain exchangeable image
8934	// file format (Exif) metadata that includes the image's orientation. Amazon
8935	// Rekognition uses this orientation information to perform image correction.
8936	// The bounding box coordinates are translated to represent object locations
8937	// after the orientation information in the Exif metadata is used to correct
8938	// the image orientation. Images in .png format don't contain Exif metadata.
8939	//
8940	// Amazon Rekognition doesn’t perform image correction for images in .png
8941	// format and .jpeg images without orientation information in the image Exif
8942	// metadata. The bounding box coordinates aren't translated and represent the
8943	// object locations before the image is rotated.
8944	OrientationCorrection *string `type:"string" enum:"OrientationCorrection"`
8945}
8946
8947// String returns the string representation
8948func (s DetectLabelsOutput) String() string {
8949	return awsutil.Prettify(s)
8950}
8951
8952// GoString returns the string representation
8953func (s DetectLabelsOutput) GoString() string {
8954	return s.String()
8955}
8956
8957// SetLabelModelVersion sets the LabelModelVersion field's value.
8958func (s *DetectLabelsOutput) SetLabelModelVersion(v string) *DetectLabelsOutput {
8959	s.LabelModelVersion = &v
8960	return s
8961}
8962
8963// SetLabels sets the Labels field's value.
8964func (s *DetectLabelsOutput) SetLabels(v []*Label) *DetectLabelsOutput {
8965	s.Labels = v
8966	return s
8967}
8968
8969// SetOrientationCorrection sets the OrientationCorrection field's value.
8970func (s *DetectLabelsOutput) SetOrientationCorrection(v string) *DetectLabelsOutput {
8971	s.OrientationCorrection = &v
8972	return s
8973}
8974
8975type DetectModerationLabelsInput struct {
8976	_ struct{} `type:"structure"`
8977
8978	// Sets up the configuration for human evaluation, including the FlowDefinition
8979	// the image will be sent to.
8980	HumanLoopConfig *HumanLoopConfig `type:"structure"`
8981
8982	// The input image as base64-encoded bytes or an S3 object. If you use the AWS
8983	// CLI to call Amazon Rekognition operations, passing base64-encoded image bytes
8984	// is not supported.
8985	//
8986	// If you are using an AWS SDK to call Amazon Rekognition, you might not need
8987	// to base64-encode image bytes passed using the Bytes field. For more information,
8988	// see Images in the Amazon Rekognition developer guide.
8989	//
8990	// Image is a required field
8991	Image *Image `type:"structure" required:"true"`
8992
8993	// Specifies the minimum confidence level for the labels to return. Amazon Rekognition
8994	// doesn't return any labels with a confidence level lower than this specified
8995	// value.
8996	//
8997	// If you don't specify MinConfidence, the operation returns labels with confidence
8998	// values greater than or equal to 50 percent.
8999	MinConfidence *float64 `type:"float"`
9000}
9001
9002// String returns the string representation
9003func (s DetectModerationLabelsInput) String() string {
9004	return awsutil.Prettify(s)
9005}
9006
9007// GoString returns the string representation
9008func (s DetectModerationLabelsInput) GoString() string {
9009	return s.String()
9010}
9011
9012// Validate inspects the fields of the type to determine if they are valid.
9013func (s *DetectModerationLabelsInput) Validate() error {
9014	invalidParams := request.ErrInvalidParams{Context: "DetectModerationLabelsInput"}
9015	if s.Image == nil {
9016		invalidParams.Add(request.NewErrParamRequired("Image"))
9017	}
9018	if s.HumanLoopConfig != nil {
9019		if err := s.HumanLoopConfig.Validate(); err != nil {
9020			invalidParams.AddNested("HumanLoopConfig", err.(request.ErrInvalidParams))
9021		}
9022	}
9023	if s.Image != nil {
9024		if err := s.Image.Validate(); err != nil {
9025			invalidParams.AddNested("Image", err.(request.ErrInvalidParams))
9026		}
9027	}
9028
9029	if invalidParams.Len() > 0 {
9030		return invalidParams
9031	}
9032	return nil
9033}
9034
9035// SetHumanLoopConfig sets the HumanLoopConfig field's value.
9036func (s *DetectModerationLabelsInput) SetHumanLoopConfig(v *HumanLoopConfig) *DetectModerationLabelsInput {
9037	s.HumanLoopConfig = v
9038	return s
9039}
9040
9041// SetImage sets the Image field's value.
9042func (s *DetectModerationLabelsInput) SetImage(v *Image) *DetectModerationLabelsInput {
9043	s.Image = v
9044	return s
9045}
9046
9047// SetMinConfidence sets the MinConfidence field's value.
9048func (s *DetectModerationLabelsInput) SetMinConfidence(v float64) *DetectModerationLabelsInput {
9049	s.MinConfidence = &v
9050	return s
9051}
9052
9053type DetectModerationLabelsOutput struct {
9054	_ struct{} `type:"structure"`
9055
9056	// Shows the results of the human in the loop evaluation.
9057	HumanLoopActivationOutput *HumanLoopActivationOutput `type:"structure"`
9058
9059	// Array of detected Moderation labels and the time, in milliseconds from the
9060	// start of the video, they were detected.
9061	ModerationLabels []*ModerationLabel `type:"list"`
9062
9063	// Version number of the moderation detection model that was used to detect
9064	// unsafe content.
9065	ModerationModelVersion *string `type:"string"`
9066}
9067
9068// String returns the string representation
9069func (s DetectModerationLabelsOutput) String() string {
9070	return awsutil.Prettify(s)
9071}
9072
9073// GoString returns the string representation
9074func (s DetectModerationLabelsOutput) GoString() string {
9075	return s.String()
9076}
9077
9078// SetHumanLoopActivationOutput sets the HumanLoopActivationOutput field's value.
9079func (s *DetectModerationLabelsOutput) SetHumanLoopActivationOutput(v *HumanLoopActivationOutput) *DetectModerationLabelsOutput {
9080	s.HumanLoopActivationOutput = v
9081	return s
9082}
9083
9084// SetModerationLabels sets the ModerationLabels field's value.
9085func (s *DetectModerationLabelsOutput) SetModerationLabels(v []*ModerationLabel) *DetectModerationLabelsOutput {
9086	s.ModerationLabels = v
9087	return s
9088}
9089
9090// SetModerationModelVersion sets the ModerationModelVersion field's value.
9091func (s *DetectModerationLabelsOutput) SetModerationModelVersion(v string) *DetectModerationLabelsOutput {
9092	s.ModerationModelVersion = &v
9093	return s
9094}
9095
9096type DetectProtectiveEquipmentInput struct {
9097	_ struct{} `type:"structure"`
9098
9099	// The image in which you want to detect PPE on detected persons. The image
9100	// can be passed as image bytes or you can reference an image stored in an Amazon
9101	// S3 bucket.
9102	//
9103	// Image is a required field
9104	Image *Image `type:"structure" required:"true"`
9105
9106	// An array of PPE types that you want to summarize.
9107	SummarizationAttributes *ProtectiveEquipmentSummarizationAttributes `type:"structure"`
9108}
9109
9110// String returns the string representation
9111func (s DetectProtectiveEquipmentInput) String() string {
9112	return awsutil.Prettify(s)
9113}
9114
9115// GoString returns the string representation
9116func (s DetectProtectiveEquipmentInput) GoString() string {
9117	return s.String()
9118}
9119
9120// Validate inspects the fields of the type to determine if they are valid.
9121func (s *DetectProtectiveEquipmentInput) Validate() error {
9122	invalidParams := request.ErrInvalidParams{Context: "DetectProtectiveEquipmentInput"}
9123	if s.Image == nil {
9124		invalidParams.Add(request.NewErrParamRequired("Image"))
9125	}
9126	if s.Image != nil {
9127		if err := s.Image.Validate(); err != nil {
9128			invalidParams.AddNested("Image", err.(request.ErrInvalidParams))
9129		}
9130	}
9131	if s.SummarizationAttributes != nil {
9132		if err := s.SummarizationAttributes.Validate(); err != nil {
9133			invalidParams.AddNested("SummarizationAttributes", err.(request.ErrInvalidParams))
9134		}
9135	}
9136
9137	if invalidParams.Len() > 0 {
9138		return invalidParams
9139	}
9140	return nil
9141}
9142
9143// SetImage sets the Image field's value.
9144func (s *DetectProtectiveEquipmentInput) SetImage(v *Image) *DetectProtectiveEquipmentInput {
9145	s.Image = v
9146	return s
9147}
9148
9149// SetSummarizationAttributes sets the SummarizationAttributes field's value.
9150func (s *DetectProtectiveEquipmentInput) SetSummarizationAttributes(v *ProtectiveEquipmentSummarizationAttributes) *DetectProtectiveEquipmentInput {
9151	s.SummarizationAttributes = v
9152	return s
9153}
9154
9155type DetectProtectiveEquipmentOutput struct {
9156	_ struct{} `type:"structure"`
9157
9158	// An array of persons detected in the image (including persons not wearing
9159	// PPE).
9160	Persons []*ProtectiveEquipmentPerson `type:"list"`
9161
9162	// The version number of the PPE detection model used to detect PPE in the image.
9163	ProtectiveEquipmentModelVersion *string `type:"string"`
9164
9165	// Summary information for the types of PPE specified in the SummarizationAttributes
9166	// input parameter.
9167	Summary *ProtectiveEquipmentSummary `type:"structure"`
9168}
9169
9170// String returns the string representation
9171func (s DetectProtectiveEquipmentOutput) String() string {
9172	return awsutil.Prettify(s)
9173}
9174
9175// GoString returns the string representation
9176func (s DetectProtectiveEquipmentOutput) GoString() string {
9177	return s.String()
9178}
9179
9180// SetPersons sets the Persons field's value.
9181func (s *DetectProtectiveEquipmentOutput) SetPersons(v []*ProtectiveEquipmentPerson) *DetectProtectiveEquipmentOutput {
9182	s.Persons = v
9183	return s
9184}
9185
9186// SetProtectiveEquipmentModelVersion sets the ProtectiveEquipmentModelVersion field's value.
9187func (s *DetectProtectiveEquipmentOutput) SetProtectiveEquipmentModelVersion(v string) *DetectProtectiveEquipmentOutput {
9188	s.ProtectiveEquipmentModelVersion = &v
9189	return s
9190}
9191
9192// SetSummary sets the Summary field's value.
9193func (s *DetectProtectiveEquipmentOutput) SetSummary(v *ProtectiveEquipmentSummary) *DetectProtectiveEquipmentOutput {
9194	s.Summary = v
9195	return s
9196}
9197
9198// A set of optional parameters that you can use to set the criteria that the
9199// text must meet to be included in your response. WordFilter looks at a word’s
9200// height, width, and minimum confidence. RegionOfInterest lets you set a specific
9201// region of the image to look for text in.
9202type DetectTextFilters struct {
9203	_ struct{} `type:"structure"`
9204
9205	// A Filter focusing on a certain area of the image. Uses a BoundingBox object
9206	// to set the region of the image.
9207	RegionsOfInterest []*RegionOfInterest `type:"list"`
9208
9209	// A set of parameters that allow you to filter out certain results from your
9210	// returned results.
9211	WordFilter *DetectionFilter `type:"structure"`
9212}
9213
9214// String returns the string representation
9215func (s DetectTextFilters) String() string {
9216	return awsutil.Prettify(s)
9217}
9218
9219// GoString returns the string representation
9220func (s DetectTextFilters) GoString() string {
9221	return s.String()
9222}
9223
9224// SetRegionsOfInterest sets the RegionsOfInterest field's value.
9225func (s *DetectTextFilters) SetRegionsOfInterest(v []*RegionOfInterest) *DetectTextFilters {
9226	s.RegionsOfInterest = v
9227	return s
9228}
9229
9230// SetWordFilter sets the WordFilter field's value.
9231func (s *DetectTextFilters) SetWordFilter(v *DetectionFilter) *DetectTextFilters {
9232	s.WordFilter = v
9233	return s
9234}
9235
9236type DetectTextInput struct {
9237	_ struct{} `type:"structure"`
9238
9239	// Optional parameters that let you set the criteria that the text must meet
9240	// to be included in your response.
9241	Filters *DetectTextFilters `type:"structure"`
9242
9243	// The input image as base64-encoded bytes or an Amazon S3 object. If you use
9244	// the AWS CLI to call Amazon Rekognition operations, you can't pass image bytes.
9245	//
9246	// If you are using an AWS SDK to call Amazon Rekognition, you might not need
9247	// to base64-encode image bytes passed using the Bytes field. For more information,
9248	// see Images in the Amazon Rekognition developer guide.
9249	//
9250	// Image is a required field
9251	Image *Image `type:"structure" required:"true"`
9252}
9253
9254// String returns the string representation
9255func (s DetectTextInput) String() string {
9256	return awsutil.Prettify(s)
9257}
9258
9259// GoString returns the string representation
9260func (s DetectTextInput) GoString() string {
9261	return s.String()
9262}
9263
9264// Validate inspects the fields of the type to determine if they are valid.
9265func (s *DetectTextInput) Validate() error {
9266	invalidParams := request.ErrInvalidParams{Context: "DetectTextInput"}
9267	if s.Image == nil {
9268		invalidParams.Add(request.NewErrParamRequired("Image"))
9269	}
9270	if s.Image != nil {
9271		if err := s.Image.Validate(); err != nil {
9272			invalidParams.AddNested("Image", err.(request.ErrInvalidParams))
9273		}
9274	}
9275
9276	if invalidParams.Len() > 0 {
9277		return invalidParams
9278	}
9279	return nil
9280}
9281
9282// SetFilters sets the Filters field's value.
9283func (s *DetectTextInput) SetFilters(v *DetectTextFilters) *DetectTextInput {
9284	s.Filters = v
9285	return s
9286}
9287
9288// SetImage sets the Image field's value.
9289func (s *DetectTextInput) SetImage(v *Image) *DetectTextInput {
9290	s.Image = v
9291	return s
9292}
9293
9294type DetectTextOutput struct {
9295	_ struct{} `type:"structure"`
9296
9297	// An array of text that was detected in the input image.
9298	TextDetections []*TextDetection `type:"list"`
9299
9300	// The model version used to detect text.
9301	TextModelVersion *string `type:"string"`
9302}
9303
9304// String returns the string representation
9305func (s DetectTextOutput) String() string {
9306	return awsutil.Prettify(s)
9307}
9308
9309// GoString returns the string representation
9310func (s DetectTextOutput) GoString() string {
9311	return s.String()
9312}
9313
9314// SetTextDetections sets the TextDetections field's value.
9315func (s *DetectTextOutput) SetTextDetections(v []*TextDetection) *DetectTextOutput {
9316	s.TextDetections = v
9317	return s
9318}
9319
9320// SetTextModelVersion sets the TextModelVersion field's value.
9321func (s *DetectTextOutput) SetTextModelVersion(v string) *DetectTextOutput {
9322	s.TextModelVersion = &v
9323	return s
9324}
9325
9326// A set of parameters that allow you to filter out certain results from your
9327// returned results.
9328type DetectionFilter struct {
9329	_ struct{} `type:"structure"`
9330
9331	// Sets the minimum height of the word bounding box. Words with bounding box
9332	// heights lesser than this value will be excluded from the result. Value is
9333	// relative to the video frame height.
9334	MinBoundingBoxHeight *float64 `type:"float"`
9335
9336	// Sets the minimum width of the word bounding box. Words with bounding boxes
9337	// widths lesser than this value will be excluded from the result. Value is
9338	// relative to the video frame width.
9339	MinBoundingBoxWidth *float64 `type:"float"`
9340
9341	// Sets confidence of word detection. Words with detection confidence below
9342	// this will be excluded from the result. Values should be between 0.5 and 1
9343	// as Text in Video will not return any result below 0.5.
9344	MinConfidence *float64 `type:"float"`
9345}
9346
9347// String returns the string representation
9348func (s DetectionFilter) String() string {
9349	return awsutil.Prettify(s)
9350}
9351
9352// GoString returns the string representation
9353func (s DetectionFilter) GoString() string {
9354	return s.String()
9355}
9356
9357// SetMinBoundingBoxHeight sets the MinBoundingBoxHeight field's value.
9358func (s *DetectionFilter) SetMinBoundingBoxHeight(v float64) *DetectionFilter {
9359	s.MinBoundingBoxHeight = &v
9360	return s
9361}
9362
9363// SetMinBoundingBoxWidth sets the MinBoundingBoxWidth field's value.
9364func (s *DetectionFilter) SetMinBoundingBoxWidth(v float64) *DetectionFilter {
9365	s.MinBoundingBoxWidth = &v
9366	return s
9367}
9368
9369// SetMinConfidence sets the MinConfidence field's value.
9370func (s *DetectionFilter) SetMinConfidence(v float64) *DetectionFilter {
9371	s.MinConfidence = &v
9372	return s
9373}
9374
9375// The emotions that appear to be expressed on the face, and the confidence
9376// level in the determination. The API is only making a determination of the
9377// physical appearance of a person's face. It is not a determination of the
9378// person’s internal emotional state and should not be used in such a way.
9379// For example, a person pretending to have a sad face might not be sad emotionally.
9380type Emotion struct {
9381	_ struct{} `type:"structure"`
9382
9383	// Level of confidence in the determination.
9384	Confidence *float64 `type:"float"`
9385
9386	// Type of emotion detected.
9387	Type *string `type:"string" enum:"EmotionName"`
9388}
9389
9390// String returns the string representation
9391func (s Emotion) String() string {
9392	return awsutil.Prettify(s)
9393}
9394
9395// GoString returns the string representation
9396func (s Emotion) GoString() string {
9397	return s.String()
9398}
9399
9400// SetConfidence sets the Confidence field's value.
9401func (s *Emotion) SetConfidence(v float64) *Emotion {
9402	s.Confidence = &v
9403	return s
9404}
9405
9406// SetType sets the Type field's value.
9407func (s *Emotion) SetType(v string) *Emotion {
9408	s.Type = &v
9409	return s
9410}
9411
9412// Information about an item of Personal Protective Equipment (PPE) detected
9413// by DetectProtectiveEquipment. For more information, see DetectProtectiveEquipment.
9414type EquipmentDetection struct {
9415	_ struct{} `type:"structure"`
9416
9417	// A bounding box surrounding the item of detected PPE.
9418	BoundingBox *BoundingBox `type:"structure"`
9419
9420	// The confidence that Amazon Rekognition has that the bounding box (BoundingBox)
9421	// contains an item of PPE.
9422	Confidence *float64 `type:"float"`
9423
9424	// Information about the body part covered by the detected PPE.
9425	CoversBodyPart *CoversBodyPart `type:"structure"`
9426
9427	// The type of detected PPE.
9428	Type *string `type:"string" enum:"ProtectiveEquipmentType"`
9429}
9430
9431// String returns the string representation
9432func (s EquipmentDetection) String() string {
9433	return awsutil.Prettify(s)
9434}
9435
9436// GoString returns the string representation
9437func (s EquipmentDetection) GoString() string {
9438	return s.String()
9439}
9440
9441// SetBoundingBox sets the BoundingBox field's value.
9442func (s *EquipmentDetection) SetBoundingBox(v *BoundingBox) *EquipmentDetection {
9443	s.BoundingBox = v
9444	return s
9445}
9446
9447// SetConfidence sets the Confidence field's value.
9448func (s *EquipmentDetection) SetConfidence(v float64) *EquipmentDetection {
9449	s.Confidence = &v
9450	return s
9451}
9452
9453// SetCoversBodyPart sets the CoversBodyPart field's value.
9454func (s *EquipmentDetection) SetCoversBodyPart(v *CoversBodyPart) *EquipmentDetection {
9455	s.CoversBodyPart = v
9456	return s
9457}
9458
9459// SetType sets the Type field's value.
9460func (s *EquipmentDetection) SetType(v string) *EquipmentDetection {
9461	s.Type = &v
9462	return s
9463}
9464
9465// The evaluation results for the training of a model.
9466type EvaluationResult struct {
9467	_ struct{} `type:"structure"`
9468
9469	// The F1 score for the evaluation of all labels. The F1 score metric evaluates
9470	// the overall precision and recall performance of the model as a single value.
9471	// A higher value indicates better precision and recall performance. A lower
9472	// score indicates that precision, recall, or both are performing poorly.
9473	F1Score *float64 `type:"float"`
9474
9475	// The S3 bucket that contains the training summary.
9476	Summary *Summary `type:"structure"`
9477}
9478
9479// String returns the string representation
9480func (s EvaluationResult) String() string {
9481	return awsutil.Prettify(s)
9482}
9483
9484// GoString returns the string representation
9485func (s EvaluationResult) GoString() string {
9486	return s.String()
9487}
9488
9489// SetF1Score sets the F1Score field's value.
9490func (s *EvaluationResult) SetF1Score(v float64) *EvaluationResult {
9491	s.F1Score = &v
9492	return s
9493}
9494
9495// SetSummary sets the Summary field's value.
9496func (s *EvaluationResult) SetSummary(v *Summary) *EvaluationResult {
9497	s.Summary = v
9498	return s
9499}
9500
9501// Indicates whether or not the eyes on the face are open, and the confidence
9502// level in the determination.
9503type EyeOpen struct {
9504	_ struct{} `type:"structure"`
9505
9506	// Level of confidence in the determination.
9507	Confidence *float64 `type:"float"`
9508
9509	// Boolean value that indicates whether the eyes on the face are open.
9510	Value *bool `type:"boolean"`
9511}
9512
9513// String returns the string representation
9514func (s EyeOpen) String() string {
9515	return awsutil.Prettify(s)
9516}
9517
9518// GoString returns the string representation
9519func (s EyeOpen) GoString() string {
9520	return s.String()
9521}
9522
9523// SetConfidence sets the Confidence field's value.
9524func (s *EyeOpen) SetConfidence(v float64) *EyeOpen {
9525	s.Confidence = &v
9526	return s
9527}
9528
9529// SetValue sets the Value field's value.
9530func (s *EyeOpen) SetValue(v bool) *EyeOpen {
9531	s.Value = &v
9532	return s
9533}
9534
9535// Indicates whether or not the face is wearing eye glasses, and the confidence
9536// level in the determination.
9537type Eyeglasses struct {
9538	_ struct{} `type:"structure"`
9539
9540	// Level of confidence in the determination.
9541	Confidence *float64 `type:"float"`
9542
9543	// Boolean value that indicates whether the face is wearing eye glasses or not.
9544	Value *bool `type:"boolean"`
9545}
9546
9547// String returns the string representation
9548func (s Eyeglasses) String() string {
9549	return awsutil.Prettify(s)
9550}
9551
9552// GoString returns the string representation
9553func (s Eyeglasses) GoString() string {
9554	return s.String()
9555}
9556
9557// SetConfidence sets the Confidence field's value.
9558func (s *Eyeglasses) SetConfidence(v float64) *Eyeglasses {
9559	s.Confidence = &v
9560	return s
9561}
9562
9563// SetValue sets the Value field's value.
9564func (s *Eyeglasses) SetValue(v bool) *Eyeglasses {
9565	s.Value = &v
9566	return s
9567}
9568
9569// Describes the face properties such as the bounding box, face ID, image ID
9570// of the input image, and external image ID that you assigned.
9571type Face struct {
9572	_ struct{} `type:"structure"`
9573
9574	// Bounding box of the face.
9575	BoundingBox *BoundingBox `type:"structure"`
9576
9577	// Confidence level that the bounding box contains a face (and not a different
9578	// object such as a tree).
9579	Confidence *float64 `type:"float"`
9580
9581	// Identifier that you assign to all the faces in the input image.
9582	ExternalImageId *string `min:"1" type:"string"`
9583
9584	// Unique identifier that Amazon Rekognition assigns to the face.
9585	FaceId *string `type:"string"`
9586
9587	// Unique identifier that Amazon Rekognition assigns to the input image.
9588	ImageId *string `type:"string"`
9589}
9590
9591// String returns the string representation
9592func (s Face) String() string {
9593	return awsutil.Prettify(s)
9594}
9595
9596// GoString returns the string representation
9597func (s Face) GoString() string {
9598	return s.String()
9599}
9600
9601// SetBoundingBox sets the BoundingBox field's value.
9602func (s *Face) SetBoundingBox(v *BoundingBox) *Face {
9603	s.BoundingBox = v
9604	return s
9605}
9606
9607// SetConfidence sets the Confidence field's value.
9608func (s *Face) SetConfidence(v float64) *Face {
9609	s.Confidence = &v
9610	return s
9611}
9612
9613// SetExternalImageId sets the ExternalImageId field's value.
9614func (s *Face) SetExternalImageId(v string) *Face {
9615	s.ExternalImageId = &v
9616	return s
9617}
9618
9619// SetFaceId sets the FaceId field's value.
9620func (s *Face) SetFaceId(v string) *Face {
9621	s.FaceId = &v
9622	return s
9623}
9624
9625// SetImageId sets the ImageId field's value.
9626func (s *Face) SetImageId(v string) *Face {
9627	s.ImageId = &v
9628	return s
9629}
9630
9631// Structure containing attributes of the face that the algorithm detected.
9632//
9633// A FaceDetail object contains either the default facial attributes or all
9634// facial attributes. The default attributes are BoundingBox, Confidence, Landmarks,
9635// Pose, and Quality.
9636//
9637// GetFaceDetection is the only Amazon Rekognition Video stored video operation
9638// that can return a FaceDetail object with all attributes. To specify which
9639// attributes to return, use the FaceAttributes input parameter for StartFaceDetection.
9640// The following Amazon Rekognition Video operations return only the default
9641// attributes. The corresponding Start operations don't have a FaceAttributes
9642// input parameter.
9643//
9644//    * GetCelebrityRecognition
9645//
9646//    * GetPersonTracking
9647//
9648//    * GetFaceSearch
9649//
9650// The Amazon Rekognition Image DetectFaces and IndexFaces operations can return
9651// all facial attributes. To specify which attributes to return, use the Attributes
9652// input parameter for DetectFaces. For IndexFaces, use the DetectAttributes
9653// input parameter.
9654type FaceDetail struct {
9655	_ struct{} `type:"structure"`
9656
9657	// The estimated age range, in years, for the face. Low represents the lowest
9658	// estimated age and High represents the highest estimated age.
9659	AgeRange *AgeRange `type:"structure"`
9660
9661	// Indicates whether or not the face has a beard, and the confidence level in
9662	// the determination.
9663	Beard *Beard `type:"structure"`
9664
9665	// Bounding box of the face. Default attribute.
9666	BoundingBox *BoundingBox `type:"structure"`
9667
9668	// Confidence level that the bounding box contains a face (and not a different
9669	// object such as a tree). Default attribute.
9670	Confidence *float64 `type:"float"`
9671
9672	// The emotions that appear to be expressed on the face, and the confidence
9673	// level in the determination. The API is only making a determination of the
9674	// physical appearance of a person's face. It is not a determination of the
9675	// person’s internal emotional state and should not be used in such a way.
9676	// For example, a person pretending to have a sad face might not be sad emotionally.
9677	Emotions []*Emotion `type:"list"`
9678
9679	// Indicates whether or not the face is wearing eye glasses, and the confidence
9680	// level in the determination.
9681	Eyeglasses *Eyeglasses `type:"structure"`
9682
9683	// Indicates whether or not the eyes on the face are open, and the confidence
9684	// level in the determination.
9685	EyesOpen *EyeOpen `type:"structure"`
9686
9687	// The predicted gender of a detected face.
9688	Gender *Gender `type:"structure"`
9689
9690	// Indicates the location of landmarks on the face. Default attribute.
9691	Landmarks []*Landmark `type:"list"`
9692
9693	// Indicates whether or not the mouth on the face is open, and the confidence
9694	// level in the determination.
9695	MouthOpen *MouthOpen `type:"structure"`
9696
9697	// Indicates whether or not the face has a mustache, and the confidence level
9698	// in the determination.
9699	Mustache *Mustache `type:"structure"`
9700
9701	// Indicates the pose of the face as determined by its pitch, roll, and yaw.
9702	// Default attribute.
9703	Pose *Pose `type:"structure"`
9704
9705	// Identifies image brightness and sharpness. Default attribute.
9706	Quality *ImageQuality `type:"structure"`
9707
9708	// Indicates whether or not the face is smiling, and the confidence level in
9709	// the determination.
9710	Smile *Smile `type:"structure"`
9711
9712	// Indicates whether or not the face is wearing sunglasses, and the confidence
9713	// level in the determination.
9714	Sunglasses *Sunglasses `type:"structure"`
9715}
9716
9717// String returns the string representation
9718func (s FaceDetail) String() string {
9719	return awsutil.Prettify(s)
9720}
9721
9722// GoString returns the string representation
9723func (s FaceDetail) GoString() string {
9724	return s.String()
9725}
9726
9727// SetAgeRange sets the AgeRange field's value.
9728func (s *FaceDetail) SetAgeRange(v *AgeRange) *FaceDetail {
9729	s.AgeRange = v
9730	return s
9731}
9732
9733// SetBeard sets the Beard field's value.
9734func (s *FaceDetail) SetBeard(v *Beard) *FaceDetail {
9735	s.Beard = v
9736	return s
9737}
9738
9739// SetBoundingBox sets the BoundingBox field's value.
9740func (s *FaceDetail) SetBoundingBox(v *BoundingBox) *FaceDetail {
9741	s.BoundingBox = v
9742	return s
9743}
9744
9745// SetConfidence sets the Confidence field's value.
9746func (s *FaceDetail) SetConfidence(v float64) *FaceDetail {
9747	s.Confidence = &v
9748	return s
9749}
9750
9751// SetEmotions sets the Emotions field's value.
9752func (s *FaceDetail) SetEmotions(v []*Emotion) *FaceDetail {
9753	s.Emotions = v
9754	return s
9755}
9756
9757// SetEyeglasses sets the Eyeglasses field's value.
9758func (s *FaceDetail) SetEyeglasses(v *Eyeglasses) *FaceDetail {
9759	s.Eyeglasses = v
9760	return s
9761}
9762
9763// SetEyesOpen sets the EyesOpen field's value.
9764func (s *FaceDetail) SetEyesOpen(v *EyeOpen) *FaceDetail {
9765	s.EyesOpen = v
9766	return s
9767}
9768
9769// SetGender sets the Gender field's value.
9770func (s *FaceDetail) SetGender(v *Gender) *FaceDetail {
9771	s.Gender = v
9772	return s
9773}
9774
9775// SetLandmarks sets the Landmarks field's value.
9776func (s *FaceDetail) SetLandmarks(v []*Landmark) *FaceDetail {
9777	s.Landmarks = v
9778	return s
9779}
9780
9781// SetMouthOpen sets the MouthOpen field's value.
9782func (s *FaceDetail) SetMouthOpen(v *MouthOpen) *FaceDetail {
9783	s.MouthOpen = v
9784	return s
9785}
9786
9787// SetMustache sets the Mustache field's value.
9788func (s *FaceDetail) SetMustache(v *Mustache) *FaceDetail {
9789	s.Mustache = v
9790	return s
9791}
9792
9793// SetPose sets the Pose field's value.
9794func (s *FaceDetail) SetPose(v *Pose) *FaceDetail {
9795	s.Pose = v
9796	return s
9797}
9798
9799// SetQuality sets the Quality field's value.
9800func (s *FaceDetail) SetQuality(v *ImageQuality) *FaceDetail {
9801	s.Quality = v
9802	return s
9803}
9804
9805// SetSmile sets the Smile field's value.
9806func (s *FaceDetail) SetSmile(v *Smile) *FaceDetail {
9807	s.Smile = v
9808	return s
9809}
9810
9811// SetSunglasses sets the Sunglasses field's value.
9812func (s *FaceDetail) SetSunglasses(v *Sunglasses) *FaceDetail {
9813	s.Sunglasses = v
9814	return s
9815}
9816
9817// Information about a face detected in a video analysis request and the time
9818// the face was detected in the video.
9819type FaceDetection struct {
9820	_ struct{} `type:"structure"`
9821
9822	// The face properties for the detected face.
9823	Face *FaceDetail `type:"structure"`
9824
9825	// Time, in milliseconds from the start of the video, that the face was detected.
9826	Timestamp *int64 `type:"long"`
9827}
9828
9829// String returns the string representation
9830func (s FaceDetection) String() string {
9831	return awsutil.Prettify(s)
9832}
9833
9834// GoString returns the string representation
9835func (s FaceDetection) GoString() string {
9836	return s.String()
9837}
9838
9839// SetFace sets the Face field's value.
9840func (s *FaceDetection) SetFace(v *FaceDetail) *FaceDetection {
9841	s.Face = v
9842	return s
9843}
9844
9845// SetTimestamp sets the Timestamp field's value.
9846func (s *FaceDetection) SetTimestamp(v int64) *FaceDetection {
9847	s.Timestamp = &v
9848	return s
9849}
9850
9851// Provides face metadata. In addition, it also provides the confidence in the
9852// match of this face with the input face.
9853type FaceMatch struct {
9854	_ struct{} `type:"structure"`
9855
9856	// Describes the face properties such as the bounding box, face ID, image ID
9857	// of the source image, and external image ID that you assigned.
9858	Face *Face `type:"structure"`
9859
9860	// Confidence in the match of this face with the input face.
9861	Similarity *float64 `type:"float"`
9862}
9863
9864// String returns the string representation
9865func (s FaceMatch) String() string {
9866	return awsutil.Prettify(s)
9867}
9868
9869// GoString returns the string representation
9870func (s FaceMatch) GoString() string {
9871	return s.String()
9872}
9873
9874// SetFace sets the Face field's value.
9875func (s *FaceMatch) SetFace(v *Face) *FaceMatch {
9876	s.Face = v
9877	return s
9878}
9879
9880// SetSimilarity sets the Similarity field's value.
9881func (s *FaceMatch) SetSimilarity(v float64) *FaceMatch {
9882	s.Similarity = &v
9883	return s
9884}
9885
9886// Object containing both the face metadata (stored in the backend database),
9887// and facial attributes that are detected but aren't stored in the database.
9888type FaceRecord struct {
9889	_ struct{} `type:"structure"`
9890
9891	// Describes the face properties such as the bounding box, face ID, image ID
9892	// of the input image, and external image ID that you assigned.
9893	Face *Face `type:"structure"`
9894
9895	// Structure containing attributes of the face that the algorithm detected.
9896	FaceDetail *FaceDetail `type:"structure"`
9897}
9898
9899// String returns the string representation
9900func (s FaceRecord) String() string {
9901	return awsutil.Prettify(s)
9902}
9903
9904// GoString returns the string representation
9905func (s FaceRecord) GoString() string {
9906	return s.String()
9907}
9908
9909// SetFace sets the Face field's value.
9910func (s *FaceRecord) SetFace(v *Face) *FaceRecord {
9911	s.Face = v
9912	return s
9913}
9914
9915// SetFaceDetail sets the FaceDetail field's value.
9916func (s *FaceRecord) SetFaceDetail(v *FaceDetail) *FaceRecord {
9917	s.FaceDetail = v
9918	return s
9919}
9920
9921// Input face recognition parameters for an Amazon Rekognition stream processor.
9922// FaceRecognitionSettings is a request parameter for CreateStreamProcessor.
9923type FaceSearchSettings struct {
9924	_ struct{} `type:"structure"`
9925
9926	// The ID of a collection that contains faces that you want to search for.
9927	CollectionId *string `min:"1" type:"string"`
9928
9929	// Minimum face match confidence score that must be met to return a result for
9930	// a recognized face. Default is 80. 0 is the lowest confidence. 100 is the
9931	// highest confidence.
9932	FaceMatchThreshold *float64 `type:"float"`
9933}
9934
9935// String returns the string representation
9936func (s FaceSearchSettings) String() string {
9937	return awsutil.Prettify(s)
9938}
9939
9940// GoString returns the string representation
9941func (s FaceSearchSettings) GoString() string {
9942	return s.String()
9943}
9944
9945// Validate inspects the fields of the type to determine if they are valid.
9946func (s *FaceSearchSettings) Validate() error {
9947	invalidParams := request.ErrInvalidParams{Context: "FaceSearchSettings"}
9948	if s.CollectionId != nil && len(*s.CollectionId) < 1 {
9949		invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1))
9950	}
9951
9952	if invalidParams.Len() > 0 {
9953		return invalidParams
9954	}
9955	return nil
9956}
9957
9958// SetCollectionId sets the CollectionId field's value.
9959func (s *FaceSearchSettings) SetCollectionId(v string) *FaceSearchSettings {
9960	s.CollectionId = &v
9961	return s
9962}
9963
9964// SetFaceMatchThreshold sets the FaceMatchThreshold field's value.
9965func (s *FaceSearchSettings) SetFaceMatchThreshold(v float64) *FaceSearchSettings {
9966	s.FaceMatchThreshold = &v
9967	return s
9968}
9969
9970// The predicted gender of a detected face.
9971//
9972// Amazon Rekognition makes gender binary (male/female) predictions based on
9973// the physical appearance of a face in a particular image. This kind of prediction
9974// is not designed to categorize a person’s gender identity, and you shouldn't
9975// use Amazon Rekognition to make such a determination. For example, a male
9976// actor wearing a long-haired wig and earrings for a role might be predicted
9977// as female.
9978//
9979// Using Amazon Rekognition to make gender binary predictions is best suited
9980// for use cases where aggregate gender distribution statistics need to be analyzed
9981// without identifying specific users. For example, the percentage of female
9982// users compared to male users on a social media platform.
9983//
9984// We don't recommend using gender binary predictions to make decisions that
9985// impact an individual's rights, privacy, or access to services.
9986type Gender struct {
9987	_ struct{} `type:"structure"`
9988
9989	// Level of confidence in the prediction.
9990	Confidence *float64 `type:"float"`
9991
9992	// The predicted gender of the face.
9993	Value *string `type:"string" enum:"GenderType"`
9994}
9995
9996// String returns the string representation
9997func (s Gender) String() string {
9998	return awsutil.Prettify(s)
9999}
10000
10001// GoString returns the string representation
10002func (s Gender) GoString() string {
10003	return s.String()
10004}
10005
10006// SetConfidence sets the Confidence field's value.
10007func (s *Gender) SetConfidence(v float64) *Gender {
10008	s.Confidence = &v
10009	return s
10010}
10011
10012// SetValue sets the Value field's value.
10013func (s *Gender) SetValue(v string) *Gender {
10014	s.Value = &v
10015	return s
10016}
10017
10018// Information about where an object (DetectCustomLabels) or text (DetectText)
10019// is located on an image.
10020type Geometry struct {
10021	_ struct{} `type:"structure"`
10022
10023	// An axis-aligned coarse representation of the detected item's location on
10024	// the image.
10025	BoundingBox *BoundingBox `type:"structure"`
10026
10027	// Within the bounding box, a fine-grained polygon around the detected item.
10028	Polygon []*Point `type:"list"`
10029}
10030
10031// String returns the string representation
10032func (s Geometry) String() string {
10033	return awsutil.Prettify(s)
10034}
10035
10036// GoString returns the string representation
10037func (s Geometry) GoString() string {
10038	return s.String()
10039}
10040
10041// SetBoundingBox sets the BoundingBox field's value.
10042func (s *Geometry) SetBoundingBox(v *BoundingBox) *Geometry {
10043	s.BoundingBox = v
10044	return s
10045}
10046
10047// SetPolygon sets the Polygon field's value.
10048func (s *Geometry) SetPolygon(v []*Point) *Geometry {
10049	s.Polygon = v
10050	return s
10051}
10052
10053type GetCelebrityInfoInput struct {
10054	_ struct{} `type:"structure"`
10055
10056	// The ID for the celebrity. You get the celebrity ID from a call to the RecognizeCelebrities
10057	// operation, which recognizes celebrities in an image.
10058	//
10059	// Id is a required field
10060	Id *string `type:"string" required:"true"`
10061}
10062
10063// String returns the string representation
10064func (s GetCelebrityInfoInput) String() string {
10065	return awsutil.Prettify(s)
10066}
10067
10068// GoString returns the string representation
10069func (s GetCelebrityInfoInput) GoString() string {
10070	return s.String()
10071}
10072
10073// Validate inspects the fields of the type to determine if they are valid.
10074func (s *GetCelebrityInfoInput) Validate() error {
10075	invalidParams := request.ErrInvalidParams{Context: "GetCelebrityInfoInput"}
10076	if s.Id == nil {
10077		invalidParams.Add(request.NewErrParamRequired("Id"))
10078	}
10079
10080	if invalidParams.Len() > 0 {
10081		return invalidParams
10082	}
10083	return nil
10084}
10085
10086// SetId sets the Id field's value.
10087func (s *GetCelebrityInfoInput) SetId(v string) *GetCelebrityInfoInput {
10088	s.Id = &v
10089	return s
10090}
10091
10092type GetCelebrityInfoOutput struct {
10093	_ struct{} `type:"structure"`
10094
10095	// The name of the celebrity.
10096	Name *string `type:"string"`
10097
10098	// An array of URLs pointing to additional celebrity information.
10099	Urls []*string `type:"list"`
10100}
10101
10102// String returns the string representation
10103func (s GetCelebrityInfoOutput) String() string {
10104	return awsutil.Prettify(s)
10105}
10106
10107// GoString returns the string representation
10108func (s GetCelebrityInfoOutput) GoString() string {
10109	return s.String()
10110}
10111
10112// SetName sets the Name field's value.
10113func (s *GetCelebrityInfoOutput) SetName(v string) *GetCelebrityInfoOutput {
10114	s.Name = &v
10115	return s
10116}
10117
10118// SetUrls sets the Urls field's value.
10119func (s *GetCelebrityInfoOutput) SetUrls(v []*string) *GetCelebrityInfoOutput {
10120	s.Urls = v
10121	return s
10122}
10123
10124type GetCelebrityRecognitionInput struct {
10125	_ struct{} `type:"structure"`
10126
10127	// Job identifier for the required celebrity recognition analysis. You can get
10128	// the job identifer from a call to StartCelebrityRecognition.
10129	//
10130	// JobId is a required field
10131	JobId *string `min:"1" type:"string" required:"true"`
10132
10133	// Maximum number of results to return per paginated call. The largest value
10134	// you can specify is 1000. If you specify a value greater than 1000, a maximum
10135	// of 1000 results is returned. The default value is 1000.
10136	MaxResults *int64 `min:"1" type:"integer"`
10137
10138	// If the previous response was incomplete (because there is more recognized
10139	// celebrities to retrieve), Amazon Rekognition Video returns a pagination token
10140	// in the response. You can use this pagination token to retrieve the next set
10141	// of celebrities.
10142	NextToken *string `type:"string"`
10143
10144	// Sort to use for celebrities returned in Celebrities field. Specify ID to
10145	// sort by the celebrity identifier, specify TIMESTAMP to sort by the time the
10146	// celebrity was recognized.
10147	SortBy *string `type:"string" enum:"CelebrityRecognitionSortBy"`
10148}
10149
10150// String returns the string representation
10151func (s GetCelebrityRecognitionInput) String() string {
10152	return awsutil.Prettify(s)
10153}
10154
10155// GoString returns the string representation
10156func (s GetCelebrityRecognitionInput) GoString() string {
10157	return s.String()
10158}
10159
10160// Validate inspects the fields of the type to determine if they are valid.
10161func (s *GetCelebrityRecognitionInput) Validate() error {
10162	invalidParams := request.ErrInvalidParams{Context: "GetCelebrityRecognitionInput"}
10163	if s.JobId == nil {
10164		invalidParams.Add(request.NewErrParamRequired("JobId"))
10165	}
10166	if s.JobId != nil && len(*s.JobId) < 1 {
10167		invalidParams.Add(request.NewErrParamMinLen("JobId", 1))
10168	}
10169	if s.MaxResults != nil && *s.MaxResults < 1 {
10170		invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
10171	}
10172
10173	if invalidParams.Len() > 0 {
10174		return invalidParams
10175	}
10176	return nil
10177}
10178
10179// SetJobId sets the JobId field's value.
10180func (s *GetCelebrityRecognitionInput) SetJobId(v string) *GetCelebrityRecognitionInput {
10181	s.JobId = &v
10182	return s
10183}
10184
10185// SetMaxResults sets the MaxResults field's value.
10186func (s *GetCelebrityRecognitionInput) SetMaxResults(v int64) *GetCelebrityRecognitionInput {
10187	s.MaxResults = &v
10188	return s
10189}
10190
10191// SetNextToken sets the NextToken field's value.
10192func (s *GetCelebrityRecognitionInput) SetNextToken(v string) *GetCelebrityRecognitionInput {
10193	s.NextToken = &v
10194	return s
10195}
10196
10197// SetSortBy sets the SortBy field's value.
10198func (s *GetCelebrityRecognitionInput) SetSortBy(v string) *GetCelebrityRecognitionInput {
10199	s.SortBy = &v
10200	return s
10201}
10202
10203type GetCelebrityRecognitionOutput struct {
10204	_ struct{} `type:"structure"`
10205
10206	// Array of celebrities recognized in the video.
10207	Celebrities []*CelebrityRecognition `type:"list"`
10208
10209	// The current status of the celebrity recognition job.
10210	JobStatus *string `type:"string" enum:"VideoJobStatus"`
10211
10212	// If the response is truncated, Amazon Rekognition Video returns this token
10213	// that you can use in the subsequent request to retrieve the next set of celebrities.
10214	NextToken *string `type:"string"`
10215
10216	// If the job fails, StatusMessage provides a descriptive error message.
10217	StatusMessage *string `type:"string"`
10218
10219	// Information about a video that Amazon Rekognition Video analyzed. Videometadata
10220	// is returned in every page of paginated responses from a Amazon Rekognition
10221	// Video operation.
10222	VideoMetadata *VideoMetadata `type:"structure"`
10223}
10224
10225// String returns the string representation
10226func (s GetCelebrityRecognitionOutput) String() string {
10227	return awsutil.Prettify(s)
10228}
10229
10230// GoString returns the string representation
10231func (s GetCelebrityRecognitionOutput) GoString() string {
10232	return s.String()
10233}
10234
10235// SetCelebrities sets the Celebrities field's value.
10236func (s *GetCelebrityRecognitionOutput) SetCelebrities(v []*CelebrityRecognition) *GetCelebrityRecognitionOutput {
10237	s.Celebrities = v
10238	return s
10239}
10240
10241// SetJobStatus sets the JobStatus field's value.
10242func (s *GetCelebrityRecognitionOutput) SetJobStatus(v string) *GetCelebrityRecognitionOutput {
10243	s.JobStatus = &v
10244	return s
10245}
10246
10247// SetNextToken sets the NextToken field's value.
10248func (s *GetCelebrityRecognitionOutput) SetNextToken(v string) *GetCelebrityRecognitionOutput {
10249	s.NextToken = &v
10250	return s
10251}
10252
10253// SetStatusMessage sets the StatusMessage field's value.
10254func (s *GetCelebrityRecognitionOutput) SetStatusMessage(v string) *GetCelebrityRecognitionOutput {
10255	s.StatusMessage = &v
10256	return s
10257}
10258
10259// SetVideoMetadata sets the VideoMetadata field's value.
10260func (s *GetCelebrityRecognitionOutput) SetVideoMetadata(v *VideoMetadata) *GetCelebrityRecognitionOutput {
10261	s.VideoMetadata = v
10262	return s
10263}
10264
10265type GetContentModerationInput struct {
10266	_ struct{} `type:"structure"`
10267
10268	// The identifier for the unsafe content job. Use JobId to identify the job
10269	// in a subsequent call to GetContentModeration.
10270	//
10271	// JobId is a required field
10272	JobId *string `min:"1" type:"string" required:"true"`
10273
10274	// Maximum number of results to return per paginated call. The largest value
10275	// you can specify is 1000. If you specify a value greater than 1000, a maximum
10276	// of 1000 results is returned. The default value is 1000.
10277	MaxResults *int64 `min:"1" type:"integer"`
10278
10279	// If the previous response was incomplete (because there is more data to retrieve),
10280	// Amazon Rekognition returns a pagination token in the response. You can use
10281	// this pagination token to retrieve the next set of unsafe content labels.
10282	NextToken *string `type:"string"`
10283
10284	// Sort to use for elements in the ModerationLabelDetections array. Use TIMESTAMP
10285	// to sort array elements by the time labels are detected. Use NAME to alphabetically
10286	// group elements for a label together. Within each label group, the array element
10287	// are sorted by detection confidence. The default sort is by TIMESTAMP.
10288	SortBy *string `type:"string" enum:"ContentModerationSortBy"`
10289}
10290
10291// String returns the string representation
10292func (s GetContentModerationInput) String() string {
10293	return awsutil.Prettify(s)
10294}
10295
10296// GoString returns the string representation
10297func (s GetContentModerationInput) GoString() string {
10298	return s.String()
10299}
10300
10301// Validate inspects the fields of the type to determine if they are valid.
10302func (s *GetContentModerationInput) Validate() error {
10303	invalidParams := request.ErrInvalidParams{Context: "GetContentModerationInput"}
10304	if s.JobId == nil {
10305		invalidParams.Add(request.NewErrParamRequired("JobId"))
10306	}
10307	if s.JobId != nil && len(*s.JobId) < 1 {
10308		invalidParams.Add(request.NewErrParamMinLen("JobId", 1))
10309	}
10310	if s.MaxResults != nil && *s.MaxResults < 1 {
10311		invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
10312	}
10313
10314	if invalidParams.Len() > 0 {
10315		return invalidParams
10316	}
10317	return nil
10318}
10319
10320// SetJobId sets the JobId field's value.
10321func (s *GetContentModerationInput) SetJobId(v string) *GetContentModerationInput {
10322	s.JobId = &v
10323	return s
10324}
10325
10326// SetMaxResults sets the MaxResults field's value.
10327func (s *GetContentModerationInput) SetMaxResults(v int64) *GetContentModerationInput {
10328	s.MaxResults = &v
10329	return s
10330}
10331
10332// SetNextToken sets the NextToken field's value.
10333func (s *GetContentModerationInput) SetNextToken(v string) *GetContentModerationInput {
10334	s.NextToken = &v
10335	return s
10336}
10337
10338// SetSortBy sets the SortBy field's value.
10339func (s *GetContentModerationInput) SetSortBy(v string) *GetContentModerationInput {
10340	s.SortBy = &v
10341	return s
10342}
10343
10344type GetContentModerationOutput struct {
10345	_ struct{} `type:"structure"`
10346
10347	// The current status of the unsafe content analysis job.
10348	JobStatus *string `type:"string" enum:"VideoJobStatus"`
10349
10350	// The detected unsafe content labels and the time(s) they were detected.
10351	ModerationLabels []*ContentModerationDetection `type:"list"`
10352
10353	// Version number of the moderation detection model that was used to detect
10354	// unsafe content.
10355	ModerationModelVersion *string `type:"string"`
10356
10357	// If the response is truncated, Amazon Rekognition Video returns this token
10358	// that you can use in the subsequent request to retrieve the next set of unsafe
10359	// content labels.
10360	NextToken *string `type:"string"`
10361
10362	// If the job fails, StatusMessage provides a descriptive error message.
10363	StatusMessage *string `type:"string"`
10364
10365	// Information about a video that Amazon Rekognition analyzed. Videometadata
10366	// is returned in every page of paginated responses from GetContentModeration.
10367	VideoMetadata *VideoMetadata `type:"structure"`
10368}
10369
10370// String returns the string representation
10371func (s GetContentModerationOutput) String() string {
10372	return awsutil.Prettify(s)
10373}
10374
10375// GoString returns the string representation
10376func (s GetContentModerationOutput) GoString() string {
10377	return s.String()
10378}
10379
10380// SetJobStatus sets the JobStatus field's value.
10381func (s *GetContentModerationOutput) SetJobStatus(v string) *GetContentModerationOutput {
10382	s.JobStatus = &v
10383	return s
10384}
10385
10386// SetModerationLabels sets the ModerationLabels field's value.
10387func (s *GetContentModerationOutput) SetModerationLabels(v []*ContentModerationDetection) *GetContentModerationOutput {
10388	s.ModerationLabels = v
10389	return s
10390}
10391
10392// SetModerationModelVersion sets the ModerationModelVersion field's value.
10393func (s *GetContentModerationOutput) SetModerationModelVersion(v string) *GetContentModerationOutput {
10394	s.ModerationModelVersion = &v
10395	return s
10396}
10397
10398// SetNextToken sets the NextToken field's value.
10399func (s *GetContentModerationOutput) SetNextToken(v string) *GetContentModerationOutput {
10400	s.NextToken = &v
10401	return s
10402}
10403
10404// SetStatusMessage sets the StatusMessage field's value.
10405func (s *GetContentModerationOutput) SetStatusMessage(v string) *GetContentModerationOutput {
10406	s.StatusMessage = &v
10407	return s
10408}
10409
10410// SetVideoMetadata sets the VideoMetadata field's value.
10411func (s *GetContentModerationOutput) SetVideoMetadata(v *VideoMetadata) *GetContentModerationOutput {
10412	s.VideoMetadata = v
10413	return s
10414}
10415
10416type GetFaceDetectionInput struct {
10417	_ struct{} `type:"structure"`
10418
10419	// Unique identifier for the face detection job. The JobId is returned from
10420	// StartFaceDetection.
10421	//
10422	// JobId is a required field
10423	JobId *string `min:"1" type:"string" required:"true"`
10424
10425	// Maximum number of results to return per paginated call. The largest value
10426	// you can specify is 1000. If you specify a value greater than 1000, a maximum
10427	// of 1000 results is returned. The default value is 1000.
10428	MaxResults *int64 `min:"1" type:"integer"`
10429
10430	// If the previous response was incomplete (because there are more faces to
10431	// retrieve), Amazon Rekognition Video returns a pagination token in the response.
10432	// You can use this pagination token to retrieve the next set of faces.
10433	NextToken *string `type:"string"`
10434}
10435
10436// String returns the string representation
10437func (s GetFaceDetectionInput) String() string {
10438	return awsutil.Prettify(s)
10439}
10440
10441// GoString returns the string representation
10442func (s GetFaceDetectionInput) GoString() string {
10443	return s.String()
10444}
10445
10446// Validate inspects the fields of the type to determine if they are valid.
10447func (s *GetFaceDetectionInput) Validate() error {
10448	invalidParams := request.ErrInvalidParams{Context: "GetFaceDetectionInput"}
10449	if s.JobId == nil {
10450		invalidParams.Add(request.NewErrParamRequired("JobId"))
10451	}
10452	if s.JobId != nil && len(*s.JobId) < 1 {
10453		invalidParams.Add(request.NewErrParamMinLen("JobId", 1))
10454	}
10455	if s.MaxResults != nil && *s.MaxResults < 1 {
10456		invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
10457	}
10458
10459	if invalidParams.Len() > 0 {
10460		return invalidParams
10461	}
10462	return nil
10463}
10464
10465// SetJobId sets the JobId field's value.
10466func (s *GetFaceDetectionInput) SetJobId(v string) *GetFaceDetectionInput {
10467	s.JobId = &v
10468	return s
10469}
10470
10471// SetMaxResults sets the MaxResults field's value.
10472func (s *GetFaceDetectionInput) SetMaxResults(v int64) *GetFaceDetectionInput {
10473	s.MaxResults = &v
10474	return s
10475}
10476
10477// SetNextToken sets the NextToken field's value.
10478func (s *GetFaceDetectionInput) SetNextToken(v string) *GetFaceDetectionInput {
10479	s.NextToken = &v
10480	return s
10481}
10482
10483type GetFaceDetectionOutput struct {
10484	_ struct{} `type:"structure"`
10485
10486	// An array of faces detected in the video. Each element contains a detected
10487	// face's details and the time, in milliseconds from the start of the video,
10488	// the face was detected.
10489	Faces []*FaceDetection `type:"list"`
10490
10491	// The current status of the face detection job.
10492	JobStatus *string `type:"string" enum:"VideoJobStatus"`
10493
10494	// If the response is truncated, Amazon Rekognition returns this token that
10495	// you can use in the subsequent request to retrieve the next set of faces.
10496	NextToken *string `type:"string"`
10497
10498	// If the job fails, StatusMessage provides a descriptive error message.
10499	StatusMessage *string `type:"string"`
10500
10501	// Information about a video that Amazon Rekognition Video analyzed. Videometadata
10502	// is returned in every page of paginated responses from a Amazon Rekognition
10503	// video operation.
10504	VideoMetadata *VideoMetadata `type:"structure"`
10505}
10506
10507// String returns the string representation
10508func (s GetFaceDetectionOutput) String() string {
10509	return awsutil.Prettify(s)
10510}
10511
10512// GoString returns the string representation
10513func (s GetFaceDetectionOutput) GoString() string {
10514	return s.String()
10515}
10516
10517// SetFaces sets the Faces field's value.
10518func (s *GetFaceDetectionOutput) SetFaces(v []*FaceDetection) *GetFaceDetectionOutput {
10519	s.Faces = v
10520	return s
10521}
10522
10523// SetJobStatus sets the JobStatus field's value.
10524func (s *GetFaceDetectionOutput) SetJobStatus(v string) *GetFaceDetectionOutput {
10525	s.JobStatus = &v
10526	return s
10527}
10528
10529// SetNextToken sets the NextToken field's value.
10530func (s *GetFaceDetectionOutput) SetNextToken(v string) *GetFaceDetectionOutput {
10531	s.NextToken = &v
10532	return s
10533}
10534
10535// SetStatusMessage sets the StatusMessage field's value.
10536func (s *GetFaceDetectionOutput) SetStatusMessage(v string) *GetFaceDetectionOutput {
10537	s.StatusMessage = &v
10538	return s
10539}
10540
10541// SetVideoMetadata sets the VideoMetadata field's value.
10542func (s *GetFaceDetectionOutput) SetVideoMetadata(v *VideoMetadata) *GetFaceDetectionOutput {
10543	s.VideoMetadata = v
10544	return s
10545}
10546
10547type GetFaceSearchInput struct {
10548	_ struct{} `type:"structure"`
10549
10550	// The job identifer for the search request. You get the job identifier from
10551	// an initial call to StartFaceSearch.
10552	//
10553	// JobId is a required field
10554	JobId *string `min:"1" type:"string" required:"true"`
10555
10556	// Maximum number of results to return per paginated call. The largest value
10557	// you can specify is 1000. If you specify a value greater than 1000, a maximum
10558	// of 1000 results is returned. The default value is 1000.
10559	MaxResults *int64 `min:"1" type:"integer"`
10560
10561	// If the previous response was incomplete (because there is more search results
10562	// to retrieve), Amazon Rekognition Video returns a pagination token in the
10563	// response. You can use this pagination token to retrieve the next set of search
10564	// results.
10565	NextToken *string `type:"string"`
10566
10567	// Sort to use for grouping faces in the response. Use TIMESTAMP to group faces
10568	// by the time that they are recognized. Use INDEX to sort by recognized faces.
10569	SortBy *string `type:"string" enum:"FaceSearchSortBy"`
10570}
10571
10572// String returns the string representation
10573func (s GetFaceSearchInput) String() string {
10574	return awsutil.Prettify(s)
10575}
10576
10577// GoString returns the string representation
10578func (s GetFaceSearchInput) GoString() string {
10579	return s.String()
10580}
10581
10582// Validate inspects the fields of the type to determine if they are valid.
10583func (s *GetFaceSearchInput) Validate() error {
10584	invalidParams := request.ErrInvalidParams{Context: "GetFaceSearchInput"}
10585	if s.JobId == nil {
10586		invalidParams.Add(request.NewErrParamRequired("JobId"))
10587	}
10588	if s.JobId != nil && len(*s.JobId) < 1 {
10589		invalidParams.Add(request.NewErrParamMinLen("JobId", 1))
10590	}
10591	if s.MaxResults != nil && *s.MaxResults < 1 {
10592		invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
10593	}
10594
10595	if invalidParams.Len() > 0 {
10596		return invalidParams
10597	}
10598	return nil
10599}
10600
10601// SetJobId sets the JobId field's value.
10602func (s *GetFaceSearchInput) SetJobId(v string) *GetFaceSearchInput {
10603	s.JobId = &v
10604	return s
10605}
10606
10607// SetMaxResults sets the MaxResults field's value.
10608func (s *GetFaceSearchInput) SetMaxResults(v int64) *GetFaceSearchInput {
10609	s.MaxResults = &v
10610	return s
10611}
10612
10613// SetNextToken sets the NextToken field's value.
10614func (s *GetFaceSearchInput) SetNextToken(v string) *GetFaceSearchInput {
10615	s.NextToken = &v
10616	return s
10617}
10618
10619// SetSortBy sets the SortBy field's value.
10620func (s *GetFaceSearchInput) SetSortBy(v string) *GetFaceSearchInput {
10621	s.SortBy = &v
10622	return s
10623}
10624
10625type GetFaceSearchOutput struct {
10626	_ struct{} `type:"structure"`
10627
10628	// The current status of the face search job.
10629	JobStatus *string `type:"string" enum:"VideoJobStatus"`
10630
10631	// If the response is truncated, Amazon Rekognition Video returns this token
10632	// that you can use in the subsequent request to retrieve the next set of search
10633	// results.
10634	NextToken *string `type:"string"`
10635
10636	// An array of persons, PersonMatch, in the video whose face(s) match the face(s)
10637	// in an Amazon Rekognition collection. It also includes time information for
10638	// when persons are matched in the video. You specify the input collection in
10639	// an initial call to StartFaceSearch. Each Persons element includes a time
10640	// the person was matched, face match details (FaceMatches) for matching faces
10641	// in the collection, and person information (Person) for the matched person.
10642	Persons []*PersonMatch `type:"list"`
10643
10644	// If the job fails, StatusMessage provides a descriptive error message.
10645	StatusMessage *string `type:"string"`
10646
10647	// Information about a video that Amazon Rekognition analyzed. Videometadata
10648	// is returned in every page of paginated responses from a Amazon Rekognition
10649	// Video operation.
10650	VideoMetadata *VideoMetadata `type:"structure"`
10651}
10652
10653// String returns the string representation
10654func (s GetFaceSearchOutput) String() string {
10655	return awsutil.Prettify(s)
10656}
10657
10658// GoString returns the string representation
10659func (s GetFaceSearchOutput) GoString() string {
10660	return s.String()
10661}
10662
10663// SetJobStatus sets the JobStatus field's value.
10664func (s *GetFaceSearchOutput) SetJobStatus(v string) *GetFaceSearchOutput {
10665	s.JobStatus = &v
10666	return s
10667}
10668
10669// SetNextToken sets the NextToken field's value.
10670func (s *GetFaceSearchOutput) SetNextToken(v string) *GetFaceSearchOutput {
10671	s.NextToken = &v
10672	return s
10673}
10674
10675// SetPersons sets the Persons field's value.
10676func (s *GetFaceSearchOutput) SetPersons(v []*PersonMatch) *GetFaceSearchOutput {
10677	s.Persons = v
10678	return s
10679}
10680
10681// SetStatusMessage sets the StatusMessage field's value.
10682func (s *GetFaceSearchOutput) SetStatusMessage(v string) *GetFaceSearchOutput {
10683	s.StatusMessage = &v
10684	return s
10685}
10686
10687// SetVideoMetadata sets the VideoMetadata field's value.
10688func (s *GetFaceSearchOutput) SetVideoMetadata(v *VideoMetadata) *GetFaceSearchOutput {
10689	s.VideoMetadata = v
10690	return s
10691}
10692
10693type GetLabelDetectionInput struct {
10694	_ struct{} `type:"structure"`
10695
10696	// Job identifier for the label detection operation for which you want results
10697	// returned. You get the job identifer from an initial call to StartlabelDetection.
10698	//
10699	// JobId is a required field
10700	JobId *string `min:"1" type:"string" required:"true"`
10701
10702	// Maximum number of results to return per paginated call. The largest value
10703	// you can specify is 1000. If you specify a value greater than 1000, a maximum
10704	// of 1000 results is returned. The default value is 1000.
10705	MaxResults *int64 `min:"1" type:"integer"`
10706
10707	// If the previous response was incomplete (because there are more labels to
10708	// retrieve), Amazon Rekognition Video returns a pagination token in the response.
10709	// You can use this pagination token to retrieve the next set of labels.
10710	NextToken *string `type:"string"`
10711
10712	// Sort to use for elements in the Labels array. Use TIMESTAMP to sort array
10713	// elements by the time labels are detected. Use NAME to alphabetically group
10714	// elements for a label together. Within each label group, the array element
10715	// are sorted by detection confidence. The default sort is by TIMESTAMP.
10716	SortBy *string `type:"string" enum:"LabelDetectionSortBy"`
10717}
10718
10719// String returns the string representation
10720func (s GetLabelDetectionInput) String() string {
10721	return awsutil.Prettify(s)
10722}
10723
10724// GoString returns the string representation
10725func (s GetLabelDetectionInput) GoString() string {
10726	return s.String()
10727}
10728
10729// Validate inspects the fields of the type to determine if they are valid.
10730func (s *GetLabelDetectionInput) Validate() error {
10731	invalidParams := request.ErrInvalidParams{Context: "GetLabelDetectionInput"}
10732	if s.JobId == nil {
10733		invalidParams.Add(request.NewErrParamRequired("JobId"))
10734	}
10735	if s.JobId != nil && len(*s.JobId) < 1 {
10736		invalidParams.Add(request.NewErrParamMinLen("JobId", 1))
10737	}
10738	if s.MaxResults != nil && *s.MaxResults < 1 {
10739		invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
10740	}
10741
10742	if invalidParams.Len() > 0 {
10743		return invalidParams
10744	}
10745	return nil
10746}
10747
10748// SetJobId sets the JobId field's value.
10749func (s *GetLabelDetectionInput) SetJobId(v string) *GetLabelDetectionInput {
10750	s.JobId = &v
10751	return s
10752}
10753
10754// SetMaxResults sets the MaxResults field's value.
10755func (s *GetLabelDetectionInput) SetMaxResults(v int64) *GetLabelDetectionInput {
10756	s.MaxResults = &v
10757	return s
10758}
10759
10760// SetNextToken sets the NextToken field's value.
10761func (s *GetLabelDetectionInput) SetNextToken(v string) *GetLabelDetectionInput {
10762	s.NextToken = &v
10763	return s
10764}
10765
10766// SetSortBy sets the SortBy field's value.
10767func (s *GetLabelDetectionInput) SetSortBy(v string) *GetLabelDetectionInput {
10768	s.SortBy = &v
10769	return s
10770}
10771
10772type GetLabelDetectionOutput struct {
10773	_ struct{} `type:"structure"`
10774
10775	// The current status of the label detection job.
10776	JobStatus *string `type:"string" enum:"VideoJobStatus"`
10777
10778	// Version number of the label detection model that was used to detect labels.
10779	LabelModelVersion *string `type:"string"`
10780
10781	// An array of labels detected in the video. Each element contains the detected
10782	// label and the time, in milliseconds from the start of the video, that the
10783	// label was detected.
10784	Labels []*LabelDetection `type:"list"`
10785
10786	// If the response is truncated, Amazon Rekognition Video returns this token
10787	// that you can use in the subsequent request to retrieve the next set of labels.
10788	NextToken *string `type:"string"`
10789
10790	// If the job fails, StatusMessage provides a descriptive error message.
10791	StatusMessage *string `type:"string"`
10792
10793	// Information about a video that Amazon Rekognition Video analyzed. Videometadata
10794	// is returned in every page of paginated responses from a Amazon Rekognition
10795	// video operation.
10796	VideoMetadata *VideoMetadata `type:"structure"`
10797}
10798
10799// String returns the string representation
10800func (s GetLabelDetectionOutput) String() string {
10801	return awsutil.Prettify(s)
10802}
10803
10804// GoString returns the string representation
10805func (s GetLabelDetectionOutput) GoString() string {
10806	return s.String()
10807}
10808
10809// SetJobStatus sets the JobStatus field's value.
10810func (s *GetLabelDetectionOutput) SetJobStatus(v string) *GetLabelDetectionOutput {
10811	s.JobStatus = &v
10812	return s
10813}
10814
10815// SetLabelModelVersion sets the LabelModelVersion field's value.
10816func (s *GetLabelDetectionOutput) SetLabelModelVersion(v string) *GetLabelDetectionOutput {
10817	s.LabelModelVersion = &v
10818	return s
10819}
10820
10821// SetLabels sets the Labels field's value.
10822func (s *GetLabelDetectionOutput) SetLabels(v []*LabelDetection) *GetLabelDetectionOutput {
10823	s.Labels = v
10824	return s
10825}
10826
10827// SetNextToken sets the NextToken field's value.
10828func (s *GetLabelDetectionOutput) SetNextToken(v string) *GetLabelDetectionOutput {
10829	s.NextToken = &v
10830	return s
10831}
10832
10833// SetStatusMessage sets the StatusMessage field's value.
10834func (s *GetLabelDetectionOutput) SetStatusMessage(v string) *GetLabelDetectionOutput {
10835	s.StatusMessage = &v
10836	return s
10837}
10838
10839// SetVideoMetadata sets the VideoMetadata field's value.
10840func (s *GetLabelDetectionOutput) SetVideoMetadata(v *VideoMetadata) *GetLabelDetectionOutput {
10841	s.VideoMetadata = v
10842	return s
10843}
10844
10845type GetPersonTrackingInput struct {
10846	_ struct{} `type:"structure"`
10847
10848	// The identifier for a job that tracks persons in a video. You get the JobId
10849	// from a call to StartPersonTracking.
10850	//
10851	// JobId is a required field
10852	JobId *string `min:"1" type:"string" required:"true"`
10853
10854	// Maximum number of results to return per paginated call. The largest value
10855	// you can specify is 1000. If you specify a value greater than 1000, a maximum
10856	// of 1000 results is returned. The default value is 1000.
10857	MaxResults *int64 `min:"1" type:"integer"`
10858
10859	// If the previous response was incomplete (because there are more persons to
10860	// retrieve), Amazon Rekognition Video returns a pagination token in the response.
10861	// You can use this pagination token to retrieve the next set of persons.
10862	NextToken *string `type:"string"`
10863
10864	// Sort to use for elements in the Persons array. Use TIMESTAMP to sort array
10865	// elements by the time persons are detected. Use INDEX to sort by the tracked
10866	// persons. If you sort by INDEX, the array elements for each person are sorted
10867	// by detection confidence. The default sort is by TIMESTAMP.
10868	SortBy *string `type:"string" enum:"PersonTrackingSortBy"`
10869}
10870
10871// String returns the string representation
10872func (s GetPersonTrackingInput) String() string {
10873	return awsutil.Prettify(s)
10874}
10875
10876// GoString returns the string representation
10877func (s GetPersonTrackingInput) GoString() string {
10878	return s.String()
10879}
10880
10881// Validate inspects the fields of the type to determine if they are valid.
10882func (s *GetPersonTrackingInput) Validate() error {
10883	invalidParams := request.ErrInvalidParams{Context: "GetPersonTrackingInput"}
10884	if s.JobId == nil {
10885		invalidParams.Add(request.NewErrParamRequired("JobId"))
10886	}
10887	if s.JobId != nil && len(*s.JobId) < 1 {
10888		invalidParams.Add(request.NewErrParamMinLen("JobId", 1))
10889	}
10890	if s.MaxResults != nil && *s.MaxResults < 1 {
10891		invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
10892	}
10893
10894	if invalidParams.Len() > 0 {
10895		return invalidParams
10896	}
10897	return nil
10898}
10899
10900// SetJobId sets the JobId field's value.
10901func (s *GetPersonTrackingInput) SetJobId(v string) *GetPersonTrackingInput {
10902	s.JobId = &v
10903	return s
10904}
10905
10906// SetMaxResults sets the MaxResults field's value.
10907func (s *GetPersonTrackingInput) SetMaxResults(v int64) *GetPersonTrackingInput {
10908	s.MaxResults = &v
10909	return s
10910}
10911
10912// SetNextToken sets the NextToken field's value.
10913func (s *GetPersonTrackingInput) SetNextToken(v string) *GetPersonTrackingInput {
10914	s.NextToken = &v
10915	return s
10916}
10917
10918// SetSortBy sets the SortBy field's value.
10919func (s *GetPersonTrackingInput) SetSortBy(v string) *GetPersonTrackingInput {
10920	s.SortBy = &v
10921	return s
10922}
10923
10924type GetPersonTrackingOutput struct {
10925	_ struct{} `type:"structure"`
10926
10927	// The current status of the person tracking job.
10928	JobStatus *string `type:"string" enum:"VideoJobStatus"`
10929
10930	// If the response is truncated, Amazon Rekognition Video returns this token
10931	// that you can use in the subsequent request to retrieve the next set of persons.
10932	NextToken *string `type:"string"`
10933
10934	// An array of the persons detected in the video and the time(s) their path
10935	// was tracked throughout the video. An array element will exist for each time
10936	// a person's path is tracked.
10937	Persons []*PersonDetection `type:"list"`
10938
10939	// If the job fails, StatusMessage provides a descriptive error message.
10940	StatusMessage *string `type:"string"`
10941
10942	// Information about a video that Amazon Rekognition Video analyzed. Videometadata
10943	// is returned in every page of paginated responses from a Amazon Rekognition
10944	// Video operation.
10945	VideoMetadata *VideoMetadata `type:"structure"`
10946}
10947
10948// String returns the string representation
10949func (s GetPersonTrackingOutput) String() string {
10950	return awsutil.Prettify(s)
10951}
10952
10953// GoString returns the string representation
10954func (s GetPersonTrackingOutput) GoString() string {
10955	return s.String()
10956}
10957
10958// SetJobStatus sets the JobStatus field's value.
10959func (s *GetPersonTrackingOutput) SetJobStatus(v string) *GetPersonTrackingOutput {
10960	s.JobStatus = &v
10961	return s
10962}
10963
10964// SetNextToken sets the NextToken field's value.
10965func (s *GetPersonTrackingOutput) SetNextToken(v string) *GetPersonTrackingOutput {
10966	s.NextToken = &v
10967	return s
10968}
10969
10970// SetPersons sets the Persons field's value.
10971func (s *GetPersonTrackingOutput) SetPersons(v []*PersonDetection) *GetPersonTrackingOutput {
10972	s.Persons = v
10973	return s
10974}
10975
10976// SetStatusMessage sets the StatusMessage field's value.
10977func (s *GetPersonTrackingOutput) SetStatusMessage(v string) *GetPersonTrackingOutput {
10978	s.StatusMessage = &v
10979	return s
10980}
10981
10982// SetVideoMetadata sets the VideoMetadata field's value.
10983func (s *GetPersonTrackingOutput) SetVideoMetadata(v *VideoMetadata) *GetPersonTrackingOutput {
10984	s.VideoMetadata = v
10985	return s
10986}
10987
10988type GetSegmentDetectionInput struct {
10989	_ struct{} `type:"structure"`
10990
10991	// Job identifier for the text detection operation for which you want results
10992	// returned. You get the job identifer from an initial call to StartSegmentDetection.
10993	//
10994	// JobId is a required field
10995	JobId *string `min:"1" type:"string" required:"true"`
10996
10997	// Maximum number of results to return per paginated call. The largest value
10998	// you can specify is 1000.
10999	MaxResults *int64 `min:"1" type:"integer"`
11000
11001	// If the response is truncated, Amazon Rekognition Video returns this token
11002	// that you can use in the subsequent request to retrieve the next set of text.
11003	NextToken *string `type:"string"`
11004}
11005
11006// String returns the string representation
11007func (s GetSegmentDetectionInput) String() string {
11008	return awsutil.Prettify(s)
11009}
11010
11011// GoString returns the string representation
11012func (s GetSegmentDetectionInput) GoString() string {
11013	return s.String()
11014}
11015
11016// Validate inspects the fields of the type to determine if they are valid.
11017func (s *GetSegmentDetectionInput) Validate() error {
11018	invalidParams := request.ErrInvalidParams{Context: "GetSegmentDetectionInput"}
11019	if s.JobId == nil {
11020		invalidParams.Add(request.NewErrParamRequired("JobId"))
11021	}
11022	if s.JobId != nil && len(*s.JobId) < 1 {
11023		invalidParams.Add(request.NewErrParamMinLen("JobId", 1))
11024	}
11025	if s.MaxResults != nil && *s.MaxResults < 1 {
11026		invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
11027	}
11028
11029	if invalidParams.Len() > 0 {
11030		return invalidParams
11031	}
11032	return nil
11033}
11034
11035// SetJobId sets the JobId field's value.
11036func (s *GetSegmentDetectionInput) SetJobId(v string) *GetSegmentDetectionInput {
11037	s.JobId = &v
11038	return s
11039}
11040
11041// SetMaxResults sets the MaxResults field's value.
11042func (s *GetSegmentDetectionInput) SetMaxResults(v int64) *GetSegmentDetectionInput {
11043	s.MaxResults = &v
11044	return s
11045}
11046
11047// SetNextToken sets the NextToken field's value.
11048func (s *GetSegmentDetectionInput) SetNextToken(v string) *GetSegmentDetectionInput {
11049	s.NextToken = &v
11050	return s
11051}
11052
11053type GetSegmentDetectionOutput struct {
11054	_ struct{} `type:"structure"`
11055
11056	// An array of objects. There can be multiple audio streams. Each AudioMetadata
11057	// object contains metadata for a single audio stream. Audio information in
11058	// an AudioMetadata objects includes the audio codec, the number of audio channels,
11059	// the duration of the audio stream, and the sample rate. Audio metadata is
11060	// returned in each page of information returned by GetSegmentDetection.
11061	AudioMetadata []*AudioMetadata `type:"list"`
11062
11063	// Current status of the segment detection job.
11064	JobStatus *string `type:"string" enum:"VideoJobStatus"`
11065
11066	// If the previous response was incomplete (because there are more labels to
11067	// retrieve), Amazon Rekognition Video returns a pagination token in the response.
11068	// You can use this pagination token to retrieve the next set of text.
11069	NextToken *string `type:"string"`
11070
11071	// An array of segments detected in a video. The array is sorted by the segment
11072	// types (TECHNICAL_CUE or SHOT) specified in the SegmentTypes input parameter
11073	// of StartSegmentDetection. Within each segment type the array is sorted by
11074	// timestamp values.
11075	Segments []*SegmentDetection `type:"list"`
11076
11077	// An array containing the segment types requested in the call to StartSegmentDetection.
11078	SelectedSegmentTypes []*SegmentTypeInfo `type:"list"`
11079
11080	// If the job fails, StatusMessage provides a descriptive error message.
11081	StatusMessage *string `type:"string"`
11082
11083	// Currently, Amazon Rekognition Video returns a single object in the VideoMetadata
11084	// array. The object contains information about the video stream in the input
11085	// file that Amazon Rekognition Video chose to analyze. The VideoMetadata object
11086	// includes the video codec, video format and other information. Video metadata
11087	// is returned in each page of information returned by GetSegmentDetection.
11088	VideoMetadata []*VideoMetadata `type:"list"`
11089}
11090
11091// String returns the string representation
11092func (s GetSegmentDetectionOutput) String() string {
11093	return awsutil.Prettify(s)
11094}
11095
11096// GoString returns the string representation
11097func (s GetSegmentDetectionOutput) GoString() string {
11098	return s.String()
11099}
11100
11101// SetAudioMetadata sets the AudioMetadata field's value.
11102func (s *GetSegmentDetectionOutput) SetAudioMetadata(v []*AudioMetadata) *GetSegmentDetectionOutput {
11103	s.AudioMetadata = v
11104	return s
11105}
11106
11107// SetJobStatus sets the JobStatus field's value.
11108func (s *GetSegmentDetectionOutput) SetJobStatus(v string) *GetSegmentDetectionOutput {
11109	s.JobStatus = &v
11110	return s
11111}
11112
11113// SetNextToken sets the NextToken field's value.
11114func (s *GetSegmentDetectionOutput) SetNextToken(v string) *GetSegmentDetectionOutput {
11115	s.NextToken = &v
11116	return s
11117}
11118
11119// SetSegments sets the Segments field's value.
11120func (s *GetSegmentDetectionOutput) SetSegments(v []*SegmentDetection) *GetSegmentDetectionOutput {
11121	s.Segments = v
11122	return s
11123}
11124
11125// SetSelectedSegmentTypes sets the SelectedSegmentTypes field's value.
11126func (s *GetSegmentDetectionOutput) SetSelectedSegmentTypes(v []*SegmentTypeInfo) *GetSegmentDetectionOutput {
11127	s.SelectedSegmentTypes = v
11128	return s
11129}
11130
11131// SetStatusMessage sets the StatusMessage field's value.
11132func (s *GetSegmentDetectionOutput) SetStatusMessage(v string) *GetSegmentDetectionOutput {
11133	s.StatusMessage = &v
11134	return s
11135}
11136
11137// SetVideoMetadata sets the VideoMetadata field's value.
11138func (s *GetSegmentDetectionOutput) SetVideoMetadata(v []*VideoMetadata) *GetSegmentDetectionOutput {
11139	s.VideoMetadata = v
11140	return s
11141}
11142
11143type GetTextDetectionInput struct {
11144	_ struct{} `type:"structure"`
11145
11146	// Job identifier for the text detection operation for which you want results
11147	// returned. You get the job identifer from an initial call to StartTextDetection.
11148	//
11149	// JobId is a required field
11150	JobId *string `min:"1" type:"string" required:"true"`
11151
11152	// Maximum number of results to return per paginated call. The largest value
11153	// you can specify is 1000.
11154	MaxResults *int64 `min:"1" type:"integer"`
11155
11156	// If the previous response was incomplete (because there are more labels to
11157	// retrieve), Amazon Rekognition Video returns a pagination token in the response.
11158	// You can use this pagination token to retrieve the next set of text.
11159	NextToken *string `type:"string"`
11160}
11161
11162// String returns the string representation
11163func (s GetTextDetectionInput) String() string {
11164	return awsutil.Prettify(s)
11165}
11166
11167// GoString returns the string representation
11168func (s GetTextDetectionInput) GoString() string {
11169	return s.String()
11170}
11171
11172// Validate inspects the fields of the type to determine if they are valid.
11173func (s *GetTextDetectionInput) Validate() error {
11174	invalidParams := request.ErrInvalidParams{Context: "GetTextDetectionInput"}
11175	if s.JobId == nil {
11176		invalidParams.Add(request.NewErrParamRequired("JobId"))
11177	}
11178	if s.JobId != nil && len(*s.JobId) < 1 {
11179		invalidParams.Add(request.NewErrParamMinLen("JobId", 1))
11180	}
11181	if s.MaxResults != nil && *s.MaxResults < 1 {
11182		invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
11183	}
11184
11185	if invalidParams.Len() > 0 {
11186		return invalidParams
11187	}
11188	return nil
11189}
11190
11191// SetJobId sets the JobId field's value.
11192func (s *GetTextDetectionInput) SetJobId(v string) *GetTextDetectionInput {
11193	s.JobId = &v
11194	return s
11195}
11196
11197// SetMaxResults sets the MaxResults field's value.
11198func (s *GetTextDetectionInput) SetMaxResults(v int64) *GetTextDetectionInput {
11199	s.MaxResults = &v
11200	return s
11201}
11202
11203// SetNextToken sets the NextToken field's value.
11204func (s *GetTextDetectionInput) SetNextToken(v string) *GetTextDetectionInput {
11205	s.NextToken = &v
11206	return s
11207}
11208
11209type GetTextDetectionOutput struct {
11210	_ struct{} `type:"structure"`
11211
11212	// Current status of the text detection job.
11213	JobStatus *string `type:"string" enum:"VideoJobStatus"`
11214
11215	// If the response is truncated, Amazon Rekognition Video returns this token
11216	// that you can use in the subsequent request to retrieve the next set of text.
11217	NextToken *string `type:"string"`
11218
11219	// If the job fails, StatusMessage provides a descriptive error message.
11220	StatusMessage *string `type:"string"`
11221
11222	// An array of text detected in the video. Each element contains the detected
11223	// text, the time in milliseconds from the start of the video that the text
11224	// was detected, and where it was detected on the screen.
11225	TextDetections []*TextDetectionResult `type:"list"`
11226
11227	// Version number of the text detection model that was used to detect text.
11228	TextModelVersion *string `type:"string"`
11229
11230	// Information about a video that Amazon Rekognition analyzed. Videometadata
11231	// is returned in every page of paginated responses from a Amazon Rekognition
11232	// video operation.
11233	VideoMetadata *VideoMetadata `type:"structure"`
11234}
11235
11236// String returns the string representation
11237func (s GetTextDetectionOutput) String() string {
11238	return awsutil.Prettify(s)
11239}
11240
11241// GoString returns the string representation
11242func (s GetTextDetectionOutput) GoString() string {
11243	return s.String()
11244}
11245
11246// SetJobStatus sets the JobStatus field's value.
11247func (s *GetTextDetectionOutput) SetJobStatus(v string) *GetTextDetectionOutput {
11248	s.JobStatus = &v
11249	return s
11250}
11251
11252// SetNextToken sets the NextToken field's value.
11253func (s *GetTextDetectionOutput) SetNextToken(v string) *GetTextDetectionOutput {
11254	s.NextToken = &v
11255	return s
11256}
11257
11258// SetStatusMessage sets the StatusMessage field's value.
11259func (s *GetTextDetectionOutput) SetStatusMessage(v string) *GetTextDetectionOutput {
11260	s.StatusMessage = &v
11261	return s
11262}
11263
11264// SetTextDetections sets the TextDetections field's value.
11265func (s *GetTextDetectionOutput) SetTextDetections(v []*TextDetectionResult) *GetTextDetectionOutput {
11266	s.TextDetections = v
11267	return s
11268}
11269
11270// SetTextModelVersion sets the TextModelVersion field's value.
11271func (s *GetTextDetectionOutput) SetTextModelVersion(v string) *GetTextDetectionOutput {
11272	s.TextModelVersion = &v
11273	return s
11274}
11275
11276// SetVideoMetadata sets the VideoMetadata field's value.
11277func (s *GetTextDetectionOutput) SetVideoMetadata(v *VideoMetadata) *GetTextDetectionOutput {
11278	s.VideoMetadata = v
11279	return s
11280}
11281
11282// The S3 bucket that contains an Amazon Sagemaker Ground Truth format manifest
11283// file.
11284type GroundTruthManifest struct {
11285	_ struct{} `type:"structure"`
11286
11287	// Provides the S3 bucket name and object name.
11288	//
11289	// The region for the S3 bucket containing the S3 object must match the region
11290	// you use for Amazon Rekognition operations.
11291	//
11292	// For Amazon Rekognition to process an S3 object, the user must have permission
11293	// to access the S3 object. For more information, see Resource-Based Policies
11294	// in the Amazon Rekognition Developer Guide.
11295	S3Object *S3Object `type:"structure"`
11296}
11297
11298// String returns the string representation
11299func (s GroundTruthManifest) String() string {
11300	return awsutil.Prettify(s)
11301}
11302
11303// GoString returns the string representation
11304func (s GroundTruthManifest) GoString() string {
11305	return s.String()
11306}
11307
11308// Validate inspects the fields of the type to determine if they are valid.
11309func (s *GroundTruthManifest) Validate() error {
11310	invalidParams := request.ErrInvalidParams{Context: "GroundTruthManifest"}
11311	if s.S3Object != nil {
11312		if err := s.S3Object.Validate(); err != nil {
11313			invalidParams.AddNested("S3Object", err.(request.ErrInvalidParams))
11314		}
11315	}
11316
11317	if invalidParams.Len() > 0 {
11318		return invalidParams
11319	}
11320	return nil
11321}
11322
11323// SetS3Object sets the S3Object field's value.
11324func (s *GroundTruthManifest) SetS3Object(v *S3Object) *GroundTruthManifest {
11325	s.S3Object = v
11326	return s
11327}
11328
11329// Shows the results of the human in the loop evaluation. If there is no HumanLoopArn,
11330// the input did not trigger human review.
11331type HumanLoopActivationOutput struct {
11332	_ struct{} `type:"structure"`
11333
11334	// Shows the result of condition evaluations, including those conditions which
11335	// activated a human review.
11336	HumanLoopActivationConditionsEvaluationResults aws.JSONValue `type:"jsonvalue"`
11337
11338	// Shows if and why human review was needed.
11339	HumanLoopActivationReasons []*string `min:"1" type:"list"`
11340
11341	// The Amazon Resource Name (ARN) of the HumanLoop created.
11342	HumanLoopArn *string `type:"string"`
11343}
11344
11345// String returns the string representation
11346func (s HumanLoopActivationOutput) String() string {
11347	return awsutil.Prettify(s)
11348}
11349
11350// GoString returns the string representation
11351func (s HumanLoopActivationOutput) GoString() string {
11352	return s.String()
11353}
11354
11355// SetHumanLoopActivationConditionsEvaluationResults sets the HumanLoopActivationConditionsEvaluationResults field's value.
11356func (s *HumanLoopActivationOutput) SetHumanLoopActivationConditionsEvaluationResults(v aws.JSONValue) *HumanLoopActivationOutput {
11357	s.HumanLoopActivationConditionsEvaluationResults = v
11358	return s
11359}
11360
11361// SetHumanLoopActivationReasons sets the HumanLoopActivationReasons field's value.
11362func (s *HumanLoopActivationOutput) SetHumanLoopActivationReasons(v []*string) *HumanLoopActivationOutput {
11363	s.HumanLoopActivationReasons = v
11364	return s
11365}
11366
11367// SetHumanLoopArn sets the HumanLoopArn field's value.
11368func (s *HumanLoopActivationOutput) SetHumanLoopArn(v string) *HumanLoopActivationOutput {
11369	s.HumanLoopArn = &v
11370	return s
11371}
11372
11373// Sets up the flow definition the image will be sent to if one of the conditions
11374// is met. You can also set certain attributes of the image before review.
11375type HumanLoopConfig struct {
11376	_ struct{} `type:"structure"`
11377
11378	// Sets attributes of the input data.
11379	DataAttributes *HumanLoopDataAttributes `type:"structure"`
11380
11381	// The Amazon Resource Name (ARN) of the flow definition. You can create a flow
11382	// definition by using the Amazon Sagemaker CreateFlowDefinition (https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateFlowDefinition.html)
11383	// Operation.
11384	//
11385	// FlowDefinitionArn is a required field
11386	FlowDefinitionArn *string `type:"string" required:"true"`
11387
11388	// The name of the human review used for this image. This should be kept unique
11389	// within a region.
11390	//
11391	// HumanLoopName is a required field
11392	HumanLoopName *string `min:"1" type:"string" required:"true"`
11393}
11394
11395// String returns the string representation
11396func (s HumanLoopConfig) String() string {
11397	return awsutil.Prettify(s)
11398}
11399
11400// GoString returns the string representation
11401func (s HumanLoopConfig) GoString() string {
11402	return s.String()
11403}
11404
11405// Validate inspects the fields of the type to determine if they are valid.
11406func (s *HumanLoopConfig) Validate() error {
11407	invalidParams := request.ErrInvalidParams{Context: "HumanLoopConfig"}
11408	if s.FlowDefinitionArn == nil {
11409		invalidParams.Add(request.NewErrParamRequired("FlowDefinitionArn"))
11410	}
11411	if s.HumanLoopName == nil {
11412		invalidParams.Add(request.NewErrParamRequired("HumanLoopName"))
11413	}
11414	if s.HumanLoopName != nil && len(*s.HumanLoopName) < 1 {
11415		invalidParams.Add(request.NewErrParamMinLen("HumanLoopName", 1))
11416	}
11417
11418	if invalidParams.Len() > 0 {
11419		return invalidParams
11420	}
11421	return nil
11422}
11423
11424// SetDataAttributes sets the DataAttributes field's value.
11425func (s *HumanLoopConfig) SetDataAttributes(v *HumanLoopDataAttributes) *HumanLoopConfig {
11426	s.DataAttributes = v
11427	return s
11428}
11429
11430// SetFlowDefinitionArn sets the FlowDefinitionArn field's value.
11431func (s *HumanLoopConfig) SetFlowDefinitionArn(v string) *HumanLoopConfig {
11432	s.FlowDefinitionArn = &v
11433	return s
11434}
11435
11436// SetHumanLoopName sets the HumanLoopName field's value.
11437func (s *HumanLoopConfig) SetHumanLoopName(v string) *HumanLoopConfig {
11438	s.HumanLoopName = &v
11439	return s
11440}
11441
11442// Allows you to set attributes of the image. Currently, you can declare an
11443// image as free of personally identifiable information.
11444type HumanLoopDataAttributes struct {
11445	_ struct{} `type:"structure"`
11446
11447	// Sets whether the input image is free of personally identifiable information.
11448	ContentClassifiers []*string `type:"list"`
11449}
11450
11451// String returns the string representation
11452func (s HumanLoopDataAttributes) String() string {
11453	return awsutil.Prettify(s)
11454}
11455
11456// GoString returns the string representation
11457func (s HumanLoopDataAttributes) GoString() string {
11458	return s.String()
11459}
11460
11461// SetContentClassifiers sets the ContentClassifiers field's value.
11462func (s *HumanLoopDataAttributes) SetContentClassifiers(v []*string) *HumanLoopDataAttributes {
11463	s.ContentClassifiers = v
11464	return s
11465}
11466
11467// The number of in-progress human reviews you have has exceeded the number
11468// allowed.
11469type HumanLoopQuotaExceededException struct {
11470	_            struct{}                  `type:"structure"`
11471	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
11472
11473	Message_ *string `locationName:"message" type:"string"`
11474
11475	// The quota code.
11476	QuotaCode *string `type:"string"`
11477
11478	// The resource type.
11479	ResourceType *string `type:"string"`
11480
11481	// The service code.
11482	ServiceCode *string `type:"string"`
11483}
11484
11485// String returns the string representation
11486func (s HumanLoopQuotaExceededException) String() string {
11487	return awsutil.Prettify(s)
11488}
11489
11490// GoString returns the string representation
11491func (s HumanLoopQuotaExceededException) GoString() string {
11492	return s.String()
11493}
11494
11495func newErrorHumanLoopQuotaExceededException(v protocol.ResponseMetadata) error {
11496	return &HumanLoopQuotaExceededException{
11497		RespMetadata: v,
11498	}
11499}
11500
11501// Code returns the exception type name.
11502func (s *HumanLoopQuotaExceededException) Code() string {
11503	return "HumanLoopQuotaExceededException"
11504}
11505
11506// Message returns the exception's message.
11507func (s *HumanLoopQuotaExceededException) Message() string {
11508	if s.Message_ != nil {
11509		return *s.Message_
11510	}
11511	return ""
11512}
11513
11514// OrigErr always returns nil, satisfies awserr.Error interface.
11515func (s *HumanLoopQuotaExceededException) OrigErr() error {
11516	return nil
11517}
11518
11519func (s *HumanLoopQuotaExceededException) Error() string {
11520	return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
11521}
11522
11523// Status code returns the HTTP status code for the request's response error.
11524func (s *HumanLoopQuotaExceededException) StatusCode() int {
11525	return s.RespMetadata.StatusCode
11526}
11527
11528// RequestID returns the service's response RequestID for request.
11529func (s *HumanLoopQuotaExceededException) RequestID() string {
11530	return s.RespMetadata.RequestID
11531}
11532
11533// A ClientRequestToken input parameter was reused with an operation, but at
11534// least one of the other input parameters is different from the previous call
11535// to the operation.
11536type IdempotentParameterMismatchException struct {
11537	_            struct{}                  `type:"structure"`
11538	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
11539
11540	Message_ *string `locationName:"message" type:"string"`
11541}
11542
11543// String returns the string representation
11544func (s IdempotentParameterMismatchException) String() string {
11545	return awsutil.Prettify(s)
11546}
11547
11548// GoString returns the string representation
11549func (s IdempotentParameterMismatchException) GoString() string {
11550	return s.String()
11551}
11552
11553func newErrorIdempotentParameterMismatchException(v protocol.ResponseMetadata) error {
11554	return &IdempotentParameterMismatchException{
11555		RespMetadata: v,
11556	}
11557}
11558
11559// Code returns the exception type name.
11560func (s *IdempotentParameterMismatchException) Code() string {
11561	return "IdempotentParameterMismatchException"
11562}
11563
11564// Message returns the exception's message.
11565func (s *IdempotentParameterMismatchException) Message() string {
11566	if s.Message_ != nil {
11567		return *s.Message_
11568	}
11569	return ""
11570}
11571
11572// OrigErr always returns nil, satisfies awserr.Error interface.
11573func (s *IdempotentParameterMismatchException) OrigErr() error {
11574	return nil
11575}
11576
11577func (s *IdempotentParameterMismatchException) Error() string {
11578	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
11579}
11580
11581// Status code returns the HTTP status code for the request's response error.
11582func (s *IdempotentParameterMismatchException) StatusCode() int {
11583	return s.RespMetadata.StatusCode
11584}
11585
11586// RequestID returns the service's response RequestID for request.
11587func (s *IdempotentParameterMismatchException) RequestID() string {
11588	return s.RespMetadata.RequestID
11589}
11590
11591// Provides the input image either as bytes or an S3 object.
11592//
11593// You pass image bytes to an Amazon Rekognition API operation by using the
11594// Bytes property. For example, you would use the Bytes property to pass an
11595// image loaded from a local file system. Image bytes passed by using the Bytes
11596// property must be base64-encoded. Your code may not need to encode image bytes
11597// if you are using an AWS SDK to call Amazon Rekognition API operations.
11598//
11599// For more information, see Analyzing an Image Loaded from a Local File System
11600// in the Amazon Rekognition Developer Guide.
11601//
11602// You pass images stored in an S3 bucket to an Amazon Rekognition API operation
11603// by using the S3Object property. Images stored in an S3 bucket do not need
11604// to be base64-encoded.
11605//
11606// The region for the S3 bucket containing the S3 object must match the region
11607// you use for Amazon Rekognition operations.
11608//
11609// If you use the AWS CLI to call Amazon Rekognition operations, passing image
11610// bytes using the Bytes property is not supported. You must first upload the
11611// image to an Amazon S3 bucket and then call the operation using the S3Object
11612// property.
11613//
11614// For Amazon Rekognition to process an S3 object, the user must have permission
11615// to access the S3 object. For more information, see Resource Based Policies
11616// in the Amazon Rekognition Developer Guide.
11617type Image struct {
11618	_ struct{} `type:"structure"`
11619
11620	// Blob of image bytes up to 5 MBs.
11621	//
11622	// Bytes is automatically base64 encoded/decoded by the SDK.
11623	Bytes []byte `min:"1" type:"blob"`
11624
11625	// Identifies an S3 object as the image source.
11626	S3Object *S3Object `type:"structure"`
11627}
11628
11629// String returns the string representation
11630func (s Image) String() string {
11631	return awsutil.Prettify(s)
11632}
11633
11634// GoString returns the string representation
11635func (s Image) GoString() string {
11636	return s.String()
11637}
11638
11639// Validate inspects the fields of the type to determine if they are valid.
11640func (s *Image) Validate() error {
11641	invalidParams := request.ErrInvalidParams{Context: "Image"}
11642	if s.Bytes != nil && len(s.Bytes) < 1 {
11643		invalidParams.Add(request.NewErrParamMinLen("Bytes", 1))
11644	}
11645	if s.S3Object != nil {
11646		if err := s.S3Object.Validate(); err != nil {
11647			invalidParams.AddNested("S3Object", err.(request.ErrInvalidParams))
11648		}
11649	}
11650
11651	if invalidParams.Len() > 0 {
11652		return invalidParams
11653	}
11654	return nil
11655}
11656
11657// SetBytes sets the Bytes field's value.
11658func (s *Image) SetBytes(v []byte) *Image {
11659	s.Bytes = v
11660	return s
11661}
11662
11663// SetS3Object sets the S3Object field's value.
11664func (s *Image) SetS3Object(v *S3Object) *Image {
11665	s.S3Object = v
11666	return s
11667}
11668
11669// Identifies face image brightness and sharpness.
11670type ImageQuality struct {
11671	_ struct{} `type:"structure"`
11672
11673	// Value representing brightness of the face. The service returns a value between
11674	// 0 and 100 (inclusive). A higher value indicates a brighter face image.
11675	Brightness *float64 `type:"float"`
11676
11677	// Value representing sharpness of the face. The service returns a value between
11678	// 0 and 100 (inclusive). A higher value indicates a sharper face image.
11679	Sharpness *float64 `type:"float"`
11680}
11681
11682// String returns the string representation
11683func (s ImageQuality) String() string {
11684	return awsutil.Prettify(s)
11685}
11686
11687// GoString returns the string representation
11688func (s ImageQuality) GoString() string {
11689	return s.String()
11690}
11691
11692// SetBrightness sets the Brightness field's value.
11693func (s *ImageQuality) SetBrightness(v float64) *ImageQuality {
11694	s.Brightness = &v
11695	return s
11696}
11697
11698// SetSharpness sets the Sharpness field's value.
11699func (s *ImageQuality) SetSharpness(v float64) *ImageQuality {
11700	s.Sharpness = &v
11701	return s
11702}
11703
11704// The input image size exceeds the allowed limit. For more information, see
11705// Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
11706type ImageTooLargeException struct {
11707	_            struct{}                  `type:"structure"`
11708	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
11709
11710	Message_ *string `locationName:"message" type:"string"`
11711}
11712
11713// String returns the string representation
11714func (s ImageTooLargeException) String() string {
11715	return awsutil.Prettify(s)
11716}
11717
11718// GoString returns the string representation
11719func (s ImageTooLargeException) GoString() string {
11720	return s.String()
11721}
11722
11723func newErrorImageTooLargeException(v protocol.ResponseMetadata) error {
11724	return &ImageTooLargeException{
11725		RespMetadata: v,
11726	}
11727}
11728
11729// Code returns the exception type name.
11730func (s *ImageTooLargeException) Code() string {
11731	return "ImageTooLargeException"
11732}
11733
11734// Message returns the exception's message.
11735func (s *ImageTooLargeException) Message() string {
11736	if s.Message_ != nil {
11737		return *s.Message_
11738	}
11739	return ""
11740}
11741
11742// OrigErr always returns nil, satisfies awserr.Error interface.
11743func (s *ImageTooLargeException) OrigErr() error {
11744	return nil
11745}
11746
11747func (s *ImageTooLargeException) Error() string {
11748	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
11749}
11750
11751// Status code returns the HTTP status code for the request's response error.
11752func (s *ImageTooLargeException) StatusCode() int {
11753	return s.RespMetadata.StatusCode
11754}
11755
11756// RequestID returns the service's response RequestID for request.
11757func (s *ImageTooLargeException) RequestID() string {
11758	return s.RespMetadata.RequestID
11759}
11760
11761type IndexFacesInput struct {
11762	_ struct{} `type:"structure"`
11763
11764	// The ID of an existing collection to which you want to add the faces that
11765	// are detected in the input images.
11766	//
11767	// CollectionId is a required field
11768	CollectionId *string `min:"1" type:"string" required:"true"`
11769
11770	// An array of facial attributes that you want to be returned. This can be the
11771	// default list of attributes or all attributes. If you don't specify a value
11772	// for Attributes or if you specify ["DEFAULT"], the API returns the following
11773	// subset of facial attributes: BoundingBox, Confidence, Pose, Quality, and
11774	// Landmarks. If you provide ["ALL"], all facial attributes are returned, but
11775	// the operation takes longer to complete.
11776	//
11777	// If you provide both, ["ALL", "DEFAULT"], the service uses a logical AND operator
11778	// to determine which attributes to return (in this case, all attributes).
11779	DetectionAttributes []*string `type:"list"`
11780
11781	// The ID you want to assign to all the faces detected in the image.
11782	ExternalImageId *string `min:"1" type:"string"`
11783
11784	// The input image as base64-encoded bytes or an S3 object. If you use the AWS
11785	// CLI to call Amazon Rekognition operations, passing base64-encoded image bytes
11786	// isn't supported.
11787	//
11788	// If you are using an AWS SDK to call Amazon Rekognition, you might not need
11789	// to base64-encode image bytes passed using the Bytes field. For more information,
11790	// see Images in the Amazon Rekognition developer guide.
11791	//
11792	// Image is a required field
11793	Image *Image `type:"structure" required:"true"`
11794
11795	// The maximum number of faces to index. The value of MaxFaces must be greater
11796	// than or equal to 1. IndexFaces returns no more than 100 detected faces in
11797	// an image, even if you specify a larger value for MaxFaces.
11798	//
11799	// If IndexFaces detects more faces than the value of MaxFaces, the faces with
11800	// the lowest quality are filtered out first. If there are still more faces
11801	// than the value of MaxFaces, the faces with the smallest bounding boxes are
11802	// filtered out (up to the number that's needed to satisfy the value of MaxFaces).
11803	// Information about the unindexed faces is available in the UnindexedFaces
11804	// array.
11805	//
11806	// The faces that are returned by IndexFaces are sorted by the largest face
11807	// bounding box size to the smallest size, in descending order.
11808	//
11809	// MaxFaces can be used with a collection associated with any version of the
11810	// face model.
11811	MaxFaces *int64 `min:"1" type:"integer"`
11812
11813	// A filter that specifies a quality bar for how much filtering is done to identify
11814	// faces. Filtered faces aren't indexed. If you specify AUTO, Amazon Rekognition
11815	// chooses the quality bar. If you specify LOW, MEDIUM, or HIGH, filtering removes
11816	// all faces that don’t meet the chosen quality bar. The default value is
11817	// AUTO. The quality bar is based on a variety of common use cases. Low-quality
11818	// detections can occur for a number of reasons. Some examples are an object
11819	// that's misidentified as a face, a face that's too blurry, or a face with
11820	// a pose that's too extreme to use. If you specify NONE, no filtering is performed.
11821	//
11822	// To use quality filtering, the collection you are using must be associated
11823	// with version 3 of the face model or higher.
11824	QualityFilter *string `type:"string" enum:"QualityFilter"`
11825}
11826
11827// String returns the string representation
11828func (s IndexFacesInput) String() string {
11829	return awsutil.Prettify(s)
11830}
11831
11832// GoString returns the string representation
11833func (s IndexFacesInput) GoString() string {
11834	return s.String()
11835}
11836
11837// Validate inspects the fields of the type to determine if they are valid.
11838func (s *IndexFacesInput) Validate() error {
11839	invalidParams := request.ErrInvalidParams{Context: "IndexFacesInput"}
11840	if s.CollectionId == nil {
11841		invalidParams.Add(request.NewErrParamRequired("CollectionId"))
11842	}
11843	if s.CollectionId != nil && len(*s.CollectionId) < 1 {
11844		invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1))
11845	}
11846	if s.ExternalImageId != nil && len(*s.ExternalImageId) < 1 {
11847		invalidParams.Add(request.NewErrParamMinLen("ExternalImageId", 1))
11848	}
11849	if s.Image == nil {
11850		invalidParams.Add(request.NewErrParamRequired("Image"))
11851	}
11852	if s.MaxFaces != nil && *s.MaxFaces < 1 {
11853		invalidParams.Add(request.NewErrParamMinValue("MaxFaces", 1))
11854	}
11855	if s.Image != nil {
11856		if err := s.Image.Validate(); err != nil {
11857			invalidParams.AddNested("Image", err.(request.ErrInvalidParams))
11858		}
11859	}
11860
11861	if invalidParams.Len() > 0 {
11862		return invalidParams
11863	}
11864	return nil
11865}
11866
11867// SetCollectionId sets the CollectionId field's value.
11868func (s *IndexFacesInput) SetCollectionId(v string) *IndexFacesInput {
11869	s.CollectionId = &v
11870	return s
11871}
11872
11873// SetDetectionAttributes sets the DetectionAttributes field's value.
11874func (s *IndexFacesInput) SetDetectionAttributes(v []*string) *IndexFacesInput {
11875	s.DetectionAttributes = v
11876	return s
11877}
11878
11879// SetExternalImageId sets the ExternalImageId field's value.
11880func (s *IndexFacesInput) SetExternalImageId(v string) *IndexFacesInput {
11881	s.ExternalImageId = &v
11882	return s
11883}
11884
11885// SetImage sets the Image field's value.
11886func (s *IndexFacesInput) SetImage(v *Image) *IndexFacesInput {
11887	s.Image = v
11888	return s
11889}
11890
11891// SetMaxFaces sets the MaxFaces field's value.
11892func (s *IndexFacesInput) SetMaxFaces(v int64) *IndexFacesInput {
11893	s.MaxFaces = &v
11894	return s
11895}
11896
11897// SetQualityFilter sets the QualityFilter field's value.
11898func (s *IndexFacesInput) SetQualityFilter(v string) *IndexFacesInput {
11899	s.QualityFilter = &v
11900	return s
11901}
11902
11903type IndexFacesOutput struct {
11904	_ struct{} `type:"structure"`
11905
11906	// The version number of the face detection model that's associated with the
11907	// input collection (CollectionId).
11908	FaceModelVersion *string `type:"string"`
11909
11910	// An array of faces detected and added to the collection. For more information,
11911	// see Searching Faces in a Collection in the Amazon Rekognition Developer Guide.
11912	FaceRecords []*FaceRecord `type:"list"`
11913
11914	// If your collection is associated with a face detection model that's later
11915	// than version 3.0, the value of OrientationCorrection is always null and no
11916	// orientation information is returned.
11917	//
11918	// If your collection is associated with a face detection model that's version
11919	// 3.0 or earlier, the following applies:
11920	//
11921	//    * If the input image is in .jpeg format, it might contain exchangeable
11922	//    image file format (Exif) metadata that includes the image's orientation.
11923	//    Amazon Rekognition uses this orientation information to perform image
11924	//    correction - the bounding box coordinates are translated to represent
11925	//    object locations after the orientation information in the Exif metadata
11926	//    is used to correct the image orientation. Images in .png format don't
11927	//    contain Exif metadata. The value of OrientationCorrection is null.
11928	//
11929	//    * If the image doesn't contain orientation information in its Exif metadata,
11930	//    Amazon Rekognition returns an estimated orientation (ROTATE_0, ROTATE_90,
11931	//    ROTATE_180, ROTATE_270). Amazon Rekognition doesn’t perform image correction
11932	//    for images. The bounding box coordinates aren't translated and represent
11933	//    the object locations before the image is rotated.
11934	//
11935	// Bounding box information is returned in the FaceRecords array. You can get
11936	// the version of the face detection model by calling DescribeCollection.
11937	OrientationCorrection *string `type:"string" enum:"OrientationCorrection"`
11938
11939	// An array of faces that were detected in the image but weren't indexed. They
11940	// weren't indexed because the quality filter identified them as low quality,
11941	// or the MaxFaces request parameter filtered them out. To use the quality filter,
11942	// you specify the QualityFilter request parameter.
11943	UnindexedFaces []*UnindexedFace `type:"list"`
11944}
11945
11946// String returns the string representation
11947func (s IndexFacesOutput) String() string {
11948	return awsutil.Prettify(s)
11949}
11950
11951// GoString returns the string representation
11952func (s IndexFacesOutput) GoString() string {
11953	return s.String()
11954}
11955
11956// SetFaceModelVersion sets the FaceModelVersion field's value.
11957func (s *IndexFacesOutput) SetFaceModelVersion(v string) *IndexFacesOutput {
11958	s.FaceModelVersion = &v
11959	return s
11960}
11961
11962// SetFaceRecords sets the FaceRecords field's value.
11963func (s *IndexFacesOutput) SetFaceRecords(v []*FaceRecord) *IndexFacesOutput {
11964	s.FaceRecords = v
11965	return s
11966}
11967
11968// SetOrientationCorrection sets the OrientationCorrection field's value.
11969func (s *IndexFacesOutput) SetOrientationCorrection(v string) *IndexFacesOutput {
11970	s.OrientationCorrection = &v
11971	return s
11972}
11973
11974// SetUnindexedFaces sets the UnindexedFaces field's value.
11975func (s *IndexFacesOutput) SetUnindexedFaces(v []*UnindexedFace) *IndexFacesOutput {
11976	s.UnindexedFaces = v
11977	return s
11978}
11979
11980// An instance of a label returned by Amazon Rekognition Image (DetectLabels)
11981// or by Amazon Rekognition Video (GetLabelDetection).
11982type Instance struct {
11983	_ struct{} `type:"structure"`
11984
11985	// The position of the label instance on the image.
11986	BoundingBox *BoundingBox `type:"structure"`
11987
11988	// The confidence that Amazon Rekognition has in the accuracy of the bounding
11989	// box.
11990	Confidence *float64 `type:"float"`
11991}
11992
11993// String returns the string representation
11994func (s Instance) String() string {
11995	return awsutil.Prettify(s)
11996}
11997
11998// GoString returns the string representation
11999func (s Instance) GoString() string {
12000	return s.String()
12001}
12002
12003// SetBoundingBox sets the BoundingBox field's value.
12004func (s *Instance) SetBoundingBox(v *BoundingBox) *Instance {
12005	s.BoundingBox = v
12006	return s
12007}
12008
12009// SetConfidence sets the Confidence field's value.
12010func (s *Instance) SetConfidence(v float64) *Instance {
12011	s.Confidence = &v
12012	return s
12013}
12014
12015// Amazon Rekognition experienced a service issue. Try your call again.
12016type InternalServerError struct {
12017	_            struct{}                  `type:"structure"`
12018	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
12019
12020	Message_ *string `locationName:"message" type:"string"`
12021}
12022
12023// String returns the string representation
12024func (s InternalServerError) String() string {
12025	return awsutil.Prettify(s)
12026}
12027
12028// GoString returns the string representation
12029func (s InternalServerError) GoString() string {
12030	return s.String()
12031}
12032
12033func newErrorInternalServerError(v protocol.ResponseMetadata) error {
12034	return &InternalServerError{
12035		RespMetadata: v,
12036	}
12037}
12038
12039// Code returns the exception type name.
12040func (s *InternalServerError) Code() string {
12041	return "InternalServerError"
12042}
12043
12044// Message returns the exception's message.
12045func (s *InternalServerError) Message() string {
12046	if s.Message_ != nil {
12047		return *s.Message_
12048	}
12049	return ""
12050}
12051
12052// OrigErr always returns nil, satisfies awserr.Error interface.
12053func (s *InternalServerError) OrigErr() error {
12054	return nil
12055}
12056
12057func (s *InternalServerError) Error() string {
12058	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
12059}
12060
12061// Status code returns the HTTP status code for the request's response error.
12062func (s *InternalServerError) StatusCode() int {
12063	return s.RespMetadata.StatusCode
12064}
12065
12066// RequestID returns the service's response RequestID for request.
12067func (s *InternalServerError) RequestID() string {
12068	return s.RespMetadata.RequestID
12069}
12070
12071// The provided image format is not supported.
12072type InvalidImageFormatException struct {
12073	_            struct{}                  `type:"structure"`
12074	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
12075
12076	Message_ *string `locationName:"message" type:"string"`
12077}
12078
12079// String returns the string representation
12080func (s InvalidImageFormatException) String() string {
12081	return awsutil.Prettify(s)
12082}
12083
12084// GoString returns the string representation
12085func (s InvalidImageFormatException) GoString() string {
12086	return s.String()
12087}
12088
12089func newErrorInvalidImageFormatException(v protocol.ResponseMetadata) error {
12090	return &InvalidImageFormatException{
12091		RespMetadata: v,
12092	}
12093}
12094
12095// Code returns the exception type name.
12096func (s *InvalidImageFormatException) Code() string {
12097	return "InvalidImageFormatException"
12098}
12099
12100// Message returns the exception's message.
12101func (s *InvalidImageFormatException) Message() string {
12102	if s.Message_ != nil {
12103		return *s.Message_
12104	}
12105	return ""
12106}
12107
12108// OrigErr always returns nil, satisfies awserr.Error interface.
12109func (s *InvalidImageFormatException) OrigErr() error {
12110	return nil
12111}
12112
12113func (s *InvalidImageFormatException) Error() string {
12114	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
12115}
12116
12117// Status code returns the HTTP status code for the request's response error.
12118func (s *InvalidImageFormatException) StatusCode() int {
12119	return s.RespMetadata.StatusCode
12120}
12121
12122// RequestID returns the service's response RequestID for request.
12123func (s *InvalidImageFormatException) RequestID() string {
12124	return s.RespMetadata.RequestID
12125}
12126
12127// Pagination token in the request is not valid.
12128type InvalidPaginationTokenException struct {
12129	_            struct{}                  `type:"structure"`
12130	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
12131
12132	Message_ *string `locationName:"message" type:"string"`
12133}
12134
12135// String returns the string representation
12136func (s InvalidPaginationTokenException) String() string {
12137	return awsutil.Prettify(s)
12138}
12139
12140// GoString returns the string representation
12141func (s InvalidPaginationTokenException) GoString() string {
12142	return s.String()
12143}
12144
12145func newErrorInvalidPaginationTokenException(v protocol.ResponseMetadata) error {
12146	return &InvalidPaginationTokenException{
12147		RespMetadata: v,
12148	}
12149}
12150
12151// Code returns the exception type name.
12152func (s *InvalidPaginationTokenException) Code() string {
12153	return "InvalidPaginationTokenException"
12154}
12155
12156// Message returns the exception's message.
12157func (s *InvalidPaginationTokenException) Message() string {
12158	if s.Message_ != nil {
12159		return *s.Message_
12160	}
12161	return ""
12162}
12163
12164// OrigErr always returns nil, satisfies awserr.Error interface.
12165func (s *InvalidPaginationTokenException) OrigErr() error {
12166	return nil
12167}
12168
12169func (s *InvalidPaginationTokenException) Error() string {
12170	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
12171}
12172
12173// Status code returns the HTTP status code for the request's response error.
12174func (s *InvalidPaginationTokenException) StatusCode() int {
12175	return s.RespMetadata.StatusCode
12176}
12177
12178// RequestID returns the service's response RequestID for request.
12179func (s *InvalidPaginationTokenException) RequestID() string {
12180	return s.RespMetadata.RequestID
12181}
12182
12183// Input parameter violated a constraint. Validate your parameter before calling
12184// the API operation again.
12185type InvalidParameterException struct {
12186	_            struct{}                  `type:"structure"`
12187	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
12188
12189	Message_ *string `locationName:"message" type:"string"`
12190}
12191
12192// String returns the string representation
12193func (s InvalidParameterException) String() string {
12194	return awsutil.Prettify(s)
12195}
12196
12197// GoString returns the string representation
12198func (s InvalidParameterException) GoString() string {
12199	return s.String()
12200}
12201
12202func newErrorInvalidParameterException(v protocol.ResponseMetadata) error {
12203	return &InvalidParameterException{
12204		RespMetadata: v,
12205	}
12206}
12207
12208// Code returns the exception type name.
12209func (s *InvalidParameterException) Code() string {
12210	return "InvalidParameterException"
12211}
12212
12213// Message returns the exception's message.
12214func (s *InvalidParameterException) Message() string {
12215	if s.Message_ != nil {
12216		return *s.Message_
12217	}
12218	return ""
12219}
12220
12221// OrigErr always returns nil, satisfies awserr.Error interface.
12222func (s *InvalidParameterException) OrigErr() error {
12223	return nil
12224}
12225
12226func (s *InvalidParameterException) Error() string {
12227	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
12228}
12229
12230// Status code returns the HTTP status code for the request's response error.
12231func (s *InvalidParameterException) StatusCode() int {
12232	return s.RespMetadata.StatusCode
12233}
12234
12235// RequestID returns the service's response RequestID for request.
12236func (s *InvalidParameterException) RequestID() string {
12237	return s.RespMetadata.RequestID
12238}
12239
12240// Amazon Rekognition is unable to access the S3 object specified in the request.
12241type InvalidS3ObjectException struct {
12242	_            struct{}                  `type:"structure"`
12243	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
12244
12245	Message_ *string `locationName:"message" type:"string"`
12246}
12247
12248// String returns the string representation
12249func (s InvalidS3ObjectException) String() string {
12250	return awsutil.Prettify(s)
12251}
12252
12253// GoString returns the string representation
12254func (s InvalidS3ObjectException) GoString() string {
12255	return s.String()
12256}
12257
12258func newErrorInvalidS3ObjectException(v protocol.ResponseMetadata) error {
12259	return &InvalidS3ObjectException{
12260		RespMetadata: v,
12261	}
12262}
12263
12264// Code returns the exception type name.
12265func (s *InvalidS3ObjectException) Code() string {
12266	return "InvalidS3ObjectException"
12267}
12268
12269// Message returns the exception's message.
12270func (s *InvalidS3ObjectException) Message() string {
12271	if s.Message_ != nil {
12272		return *s.Message_
12273	}
12274	return ""
12275}
12276
12277// OrigErr always returns nil, satisfies awserr.Error interface.
12278func (s *InvalidS3ObjectException) OrigErr() error {
12279	return nil
12280}
12281
12282func (s *InvalidS3ObjectException) Error() string {
12283	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
12284}
12285
12286// Status code returns the HTTP status code for the request's response error.
12287func (s *InvalidS3ObjectException) StatusCode() int {
12288	return s.RespMetadata.StatusCode
12289}
12290
12291// RequestID returns the service's response RequestID for request.
12292func (s *InvalidS3ObjectException) RequestID() string {
12293	return s.RespMetadata.RequestID
12294}
12295
12296// The Kinesis data stream Amazon Rekognition to which the analysis results
12297// of a Amazon Rekognition stream processor are streamed. For more information,
12298// see CreateStreamProcessor in the Amazon Rekognition Developer Guide.
12299type KinesisDataStream struct {
12300	_ struct{} `type:"structure"`
12301
12302	// ARN of the output Amazon Kinesis Data Streams stream.
12303	Arn *string `type:"string"`
12304}
12305
12306// String returns the string representation
12307func (s KinesisDataStream) String() string {
12308	return awsutil.Prettify(s)
12309}
12310
12311// GoString returns the string representation
12312func (s KinesisDataStream) GoString() string {
12313	return s.String()
12314}
12315
12316// SetArn sets the Arn field's value.
12317func (s *KinesisDataStream) SetArn(v string) *KinesisDataStream {
12318	s.Arn = &v
12319	return s
12320}
12321
12322// Kinesis video stream stream that provides the source streaming video for
12323// a Amazon Rekognition Video stream processor. For more information, see CreateStreamProcessor
12324// in the Amazon Rekognition Developer Guide.
12325type KinesisVideoStream struct {
12326	_ struct{} `type:"structure"`
12327
12328	// ARN of the Kinesis video stream stream that streams the source video.
12329	Arn *string `type:"string"`
12330}
12331
12332// String returns the string representation
12333func (s KinesisVideoStream) String() string {
12334	return awsutil.Prettify(s)
12335}
12336
12337// GoString returns the string representation
12338func (s KinesisVideoStream) GoString() string {
12339	return s.String()
12340}
12341
12342// SetArn sets the Arn field's value.
12343func (s *KinesisVideoStream) SetArn(v string) *KinesisVideoStream {
12344	s.Arn = &v
12345	return s
12346}
12347
12348// Structure containing details about the detected label, including the name,
12349// detected instances, parent labels, and level of confidence.
12350type Label struct {
12351	_ struct{} `type:"structure"`
12352
12353	// Level of confidence.
12354	Confidence *float64 `type:"float"`
12355
12356	// If Label represents an object, Instances contains the bounding boxes for
12357	// each instance of the detected object. Bounding boxes are returned for common
12358	// object labels such as people, cars, furniture, apparel or pets.
12359	Instances []*Instance `type:"list"`
12360
12361	// The name (label) of the object or scene.
12362	Name *string `type:"string"`
12363
12364	// The parent labels for a label. The response includes all ancestor labels.
12365	Parents []*Parent `type:"list"`
12366}
12367
12368// String returns the string representation
12369func (s Label) String() string {
12370	return awsutil.Prettify(s)
12371}
12372
12373// GoString returns the string representation
12374func (s Label) GoString() string {
12375	return s.String()
12376}
12377
12378// SetConfidence sets the Confidence field's value.
12379func (s *Label) SetConfidence(v float64) *Label {
12380	s.Confidence = &v
12381	return s
12382}
12383
12384// SetInstances sets the Instances field's value.
12385func (s *Label) SetInstances(v []*Instance) *Label {
12386	s.Instances = v
12387	return s
12388}
12389
12390// SetName sets the Name field's value.
12391func (s *Label) SetName(v string) *Label {
12392	s.Name = &v
12393	return s
12394}
12395
12396// SetParents sets the Parents field's value.
12397func (s *Label) SetParents(v []*Parent) *Label {
12398	s.Parents = v
12399	return s
12400}
12401
12402// Information about a label detected in a video analysis request and the time
12403// the label was detected in the video.
12404type LabelDetection struct {
12405	_ struct{} `type:"structure"`
12406
12407	// Details about the detected label.
12408	Label *Label `type:"structure"`
12409
12410	// Time, in milliseconds from the start of the video, that the label was detected.
12411	Timestamp *int64 `type:"long"`
12412}
12413
12414// String returns the string representation
12415func (s LabelDetection) String() string {
12416	return awsutil.Prettify(s)
12417}
12418
12419// GoString returns the string representation
12420func (s LabelDetection) GoString() string {
12421	return s.String()
12422}
12423
12424// SetLabel sets the Label field's value.
12425func (s *LabelDetection) SetLabel(v *Label) *LabelDetection {
12426	s.Label = v
12427	return s
12428}
12429
12430// SetTimestamp sets the Timestamp field's value.
12431func (s *LabelDetection) SetTimestamp(v int64) *LabelDetection {
12432	s.Timestamp = &v
12433	return s
12434}
12435
12436// Indicates the location of the landmark on the face.
12437type Landmark struct {
12438	_ struct{} `type:"structure"`
12439
12440	// Type of landmark.
12441	Type *string `type:"string" enum:"LandmarkType"`
12442
12443	// The x-coordinate of the landmark expressed as a ratio of the width of the
12444	// image. The x-coordinate is measured from the left-side of the image. For
12445	// example, if the image is 700 pixels wide and the x-coordinate of the landmark
12446	// is at 350 pixels, this value is 0.5.
12447	X *float64 `type:"float"`
12448
12449	// The y-coordinate of the landmark expressed as a ratio of the height of the
12450	// image. The y-coordinate is measured from the top of the image. For example,
12451	// if the image height is 200 pixels and the y-coordinate of the landmark is
12452	// at 50 pixels, this value is 0.25.
12453	Y *float64 `type:"float"`
12454}
12455
12456// String returns the string representation
12457func (s Landmark) String() string {
12458	return awsutil.Prettify(s)
12459}
12460
12461// GoString returns the string representation
12462func (s Landmark) GoString() string {
12463	return s.String()
12464}
12465
12466// SetType sets the Type field's value.
12467func (s *Landmark) SetType(v string) *Landmark {
12468	s.Type = &v
12469	return s
12470}
12471
12472// SetX sets the X field's value.
12473func (s *Landmark) SetX(v float64) *Landmark {
12474	s.X = &v
12475	return s
12476}
12477
12478// SetY sets the Y field's value.
12479func (s *Landmark) SetY(v float64) *Landmark {
12480	s.Y = &v
12481	return s
12482}
12483
12484// An Amazon Rekognition service limit was exceeded. For example, if you start
12485// too many Amazon Rekognition Video jobs concurrently, calls to start operations
12486// (StartLabelDetection, for example) will raise a LimitExceededException exception
12487// (HTTP status code: 400) until the number of concurrently running jobs is
12488// below the Amazon Rekognition service limit.
12489type LimitExceededException struct {
12490	_            struct{}                  `type:"structure"`
12491	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
12492
12493	Message_ *string `locationName:"message" type:"string"`
12494}
12495
12496// String returns the string representation
12497func (s LimitExceededException) String() string {
12498	return awsutil.Prettify(s)
12499}
12500
12501// GoString returns the string representation
12502func (s LimitExceededException) GoString() string {
12503	return s.String()
12504}
12505
12506func newErrorLimitExceededException(v protocol.ResponseMetadata) error {
12507	return &LimitExceededException{
12508		RespMetadata: v,
12509	}
12510}
12511
12512// Code returns the exception type name.
12513func (s *LimitExceededException) Code() string {
12514	return "LimitExceededException"
12515}
12516
12517// Message returns the exception's message.
12518func (s *LimitExceededException) Message() string {
12519	if s.Message_ != nil {
12520		return *s.Message_
12521	}
12522	return ""
12523}
12524
12525// OrigErr always returns nil, satisfies awserr.Error interface.
12526func (s *LimitExceededException) OrigErr() error {
12527	return nil
12528}
12529
12530func (s *LimitExceededException) Error() string {
12531	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
12532}
12533
12534// Status code returns the HTTP status code for the request's response error.
12535func (s *LimitExceededException) StatusCode() int {
12536	return s.RespMetadata.StatusCode
12537}
12538
12539// RequestID returns the service's response RequestID for request.
12540func (s *LimitExceededException) RequestID() string {
12541	return s.RespMetadata.RequestID
12542}
12543
12544type ListCollectionsInput struct {
12545	_ struct{} `type:"structure"`
12546
12547	// Maximum number of collection IDs to return.
12548	MaxResults *int64 `type:"integer"`
12549
12550	// Pagination token from the previous response.
12551	NextToken *string `type:"string"`
12552}
12553
12554// String returns the string representation
12555func (s ListCollectionsInput) String() string {
12556	return awsutil.Prettify(s)
12557}
12558
12559// GoString returns the string representation
12560func (s ListCollectionsInput) GoString() string {
12561	return s.String()
12562}
12563
12564// SetMaxResults sets the MaxResults field's value.
12565func (s *ListCollectionsInput) SetMaxResults(v int64) *ListCollectionsInput {
12566	s.MaxResults = &v
12567	return s
12568}
12569
12570// SetNextToken sets the NextToken field's value.
12571func (s *ListCollectionsInput) SetNextToken(v string) *ListCollectionsInput {
12572	s.NextToken = &v
12573	return s
12574}
12575
12576type ListCollectionsOutput struct {
12577	_ struct{} `type:"structure"`
12578
12579	// An array of collection IDs.
12580	CollectionIds []*string `type:"list"`
12581
12582	// Version numbers of the face detection models associated with the collections
12583	// in the array CollectionIds. For example, the value of FaceModelVersions[2]
12584	// is the version number for the face detection model used by the collection
12585	// in CollectionId[2].
12586	FaceModelVersions []*string `type:"list"`
12587
12588	// If the result is truncated, the response provides a NextToken that you can
12589	// use in the subsequent request to fetch the next set of collection IDs.
12590	NextToken *string `type:"string"`
12591}
12592
12593// String returns the string representation
12594func (s ListCollectionsOutput) String() string {
12595	return awsutil.Prettify(s)
12596}
12597
12598// GoString returns the string representation
12599func (s ListCollectionsOutput) GoString() string {
12600	return s.String()
12601}
12602
12603// SetCollectionIds sets the CollectionIds field's value.
12604func (s *ListCollectionsOutput) SetCollectionIds(v []*string) *ListCollectionsOutput {
12605	s.CollectionIds = v
12606	return s
12607}
12608
12609// SetFaceModelVersions sets the FaceModelVersions field's value.
12610func (s *ListCollectionsOutput) SetFaceModelVersions(v []*string) *ListCollectionsOutput {
12611	s.FaceModelVersions = v
12612	return s
12613}
12614
12615// SetNextToken sets the NextToken field's value.
12616func (s *ListCollectionsOutput) SetNextToken(v string) *ListCollectionsOutput {
12617	s.NextToken = &v
12618	return s
12619}
12620
12621type ListFacesInput struct {
12622	_ struct{} `type:"structure"`
12623
12624	// ID of the collection from which to list the faces.
12625	//
12626	// CollectionId is a required field
12627	CollectionId *string `min:"1" type:"string" required:"true"`
12628
12629	// Maximum number of faces to return.
12630	MaxResults *int64 `type:"integer"`
12631
12632	// If the previous response was incomplete (because there is more data to retrieve),
12633	// Amazon Rekognition returns a pagination token in the response. You can use
12634	// this pagination token to retrieve the next set of faces.
12635	NextToken *string `type:"string"`
12636}
12637
12638// String returns the string representation
12639func (s ListFacesInput) String() string {
12640	return awsutil.Prettify(s)
12641}
12642
12643// GoString returns the string representation
12644func (s ListFacesInput) GoString() string {
12645	return s.String()
12646}
12647
12648// Validate inspects the fields of the type to determine if they are valid.
12649func (s *ListFacesInput) Validate() error {
12650	invalidParams := request.ErrInvalidParams{Context: "ListFacesInput"}
12651	if s.CollectionId == nil {
12652		invalidParams.Add(request.NewErrParamRequired("CollectionId"))
12653	}
12654	if s.CollectionId != nil && len(*s.CollectionId) < 1 {
12655		invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1))
12656	}
12657
12658	if invalidParams.Len() > 0 {
12659		return invalidParams
12660	}
12661	return nil
12662}
12663
12664// SetCollectionId sets the CollectionId field's value.
12665func (s *ListFacesInput) SetCollectionId(v string) *ListFacesInput {
12666	s.CollectionId = &v
12667	return s
12668}
12669
12670// SetMaxResults sets the MaxResults field's value.
12671func (s *ListFacesInput) SetMaxResults(v int64) *ListFacesInput {
12672	s.MaxResults = &v
12673	return s
12674}
12675
12676// SetNextToken sets the NextToken field's value.
12677func (s *ListFacesInput) SetNextToken(v string) *ListFacesInput {
12678	s.NextToken = &v
12679	return s
12680}
12681
12682type ListFacesOutput struct {
12683	_ struct{} `type:"structure"`
12684
12685	// Version number of the face detection model associated with the input collection
12686	// (CollectionId).
12687	FaceModelVersion *string `type:"string"`
12688
12689	// An array of Face objects.
12690	Faces []*Face `type:"list"`
12691
12692	// If the response is truncated, Amazon Rekognition returns this token that
12693	// you can use in the subsequent request to retrieve the next set of faces.
12694	NextToken *string `type:"string"`
12695}
12696
12697// String returns the string representation
12698func (s ListFacesOutput) String() string {
12699	return awsutil.Prettify(s)
12700}
12701
12702// GoString returns the string representation
12703func (s ListFacesOutput) GoString() string {
12704	return s.String()
12705}
12706
12707// SetFaceModelVersion sets the FaceModelVersion field's value.
12708func (s *ListFacesOutput) SetFaceModelVersion(v string) *ListFacesOutput {
12709	s.FaceModelVersion = &v
12710	return s
12711}
12712
12713// SetFaces sets the Faces field's value.
12714func (s *ListFacesOutput) SetFaces(v []*Face) *ListFacesOutput {
12715	s.Faces = v
12716	return s
12717}
12718
12719// SetNextToken sets the NextToken field's value.
12720func (s *ListFacesOutput) SetNextToken(v string) *ListFacesOutput {
12721	s.NextToken = &v
12722	return s
12723}
12724
12725type ListStreamProcessorsInput struct {
12726	_ struct{} `type:"structure"`
12727
12728	// Maximum number of stream processors you want Amazon Rekognition Video to
12729	// return in the response. The default is 1000.
12730	MaxResults *int64 `min:"1" type:"integer"`
12731
12732	// If the previous response was incomplete (because there are more stream processors
12733	// to retrieve), Amazon Rekognition Video returns a pagination token in the
12734	// response. You can use this pagination token to retrieve the next set of stream
12735	// processors.
12736	NextToken *string `type:"string"`
12737}
12738
12739// String returns the string representation
12740func (s ListStreamProcessorsInput) String() string {
12741	return awsutil.Prettify(s)
12742}
12743
12744// GoString returns the string representation
12745func (s ListStreamProcessorsInput) GoString() string {
12746	return s.String()
12747}
12748
12749// Validate inspects the fields of the type to determine if they are valid.
12750func (s *ListStreamProcessorsInput) Validate() error {
12751	invalidParams := request.ErrInvalidParams{Context: "ListStreamProcessorsInput"}
12752	if s.MaxResults != nil && *s.MaxResults < 1 {
12753		invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
12754	}
12755
12756	if invalidParams.Len() > 0 {
12757		return invalidParams
12758	}
12759	return nil
12760}
12761
12762// SetMaxResults sets the MaxResults field's value.
12763func (s *ListStreamProcessorsInput) SetMaxResults(v int64) *ListStreamProcessorsInput {
12764	s.MaxResults = &v
12765	return s
12766}
12767
12768// SetNextToken sets the NextToken field's value.
12769func (s *ListStreamProcessorsInput) SetNextToken(v string) *ListStreamProcessorsInput {
12770	s.NextToken = &v
12771	return s
12772}
12773
12774type ListStreamProcessorsOutput struct {
12775	_ struct{} `type:"structure"`
12776
12777	// If the response is truncated, Amazon Rekognition Video returns this token
12778	// that you can use in the subsequent request to retrieve the next set of stream
12779	// processors.
12780	NextToken *string `type:"string"`
12781
12782	// List of stream processors that you have created.
12783	StreamProcessors []*StreamProcessor `type:"list"`
12784}
12785
12786// String returns the string representation
12787func (s ListStreamProcessorsOutput) String() string {
12788	return awsutil.Prettify(s)
12789}
12790
12791// GoString returns the string representation
12792func (s ListStreamProcessorsOutput) GoString() string {
12793	return s.String()
12794}
12795
12796// SetNextToken sets the NextToken field's value.
12797func (s *ListStreamProcessorsOutput) SetNextToken(v string) *ListStreamProcessorsOutput {
12798	s.NextToken = &v
12799	return s
12800}
12801
12802// SetStreamProcessors sets the StreamProcessors field's value.
12803func (s *ListStreamProcessorsOutput) SetStreamProcessors(v []*StreamProcessor) *ListStreamProcessorsOutput {
12804	s.StreamProcessors = v
12805	return s
12806}
12807
12808// Provides information about a single type of unsafe content found in an image
12809// or video. Each type of moderated content has a label within a hierarchical
12810// taxonomy. For more information, see Detecting Unsafe Content in the Amazon
12811// Rekognition Developer Guide.
12812type ModerationLabel struct {
12813	_ struct{} `type:"structure"`
12814
12815	// Specifies the confidence that Amazon Rekognition has that the label has been
12816	// correctly identified.
12817	//
12818	// If you don't specify the MinConfidence parameter in the call to DetectModerationLabels,
12819	// the operation returns labels with a confidence value greater than or equal
12820	// to 50 percent.
12821	Confidence *float64 `type:"float"`
12822
12823	// The label name for the type of unsafe content detected in the image.
12824	Name *string `type:"string"`
12825
12826	// The name for the parent label. Labels at the top level of the hierarchy have
12827	// the parent label "".
12828	ParentName *string `type:"string"`
12829}
12830
12831// String returns the string representation
12832func (s ModerationLabel) String() string {
12833	return awsutil.Prettify(s)
12834}
12835
12836// GoString returns the string representation
12837func (s ModerationLabel) GoString() string {
12838	return s.String()
12839}
12840
12841// SetConfidence sets the Confidence field's value.
12842func (s *ModerationLabel) SetConfidence(v float64) *ModerationLabel {
12843	s.Confidence = &v
12844	return s
12845}
12846
12847// SetName sets the Name field's value.
12848func (s *ModerationLabel) SetName(v string) *ModerationLabel {
12849	s.Name = &v
12850	return s
12851}
12852
12853// SetParentName sets the ParentName field's value.
12854func (s *ModerationLabel) SetParentName(v string) *ModerationLabel {
12855	s.ParentName = &v
12856	return s
12857}
12858
12859// Indicates whether or not the mouth on the face is open, and the confidence
12860// level in the determination.
12861type MouthOpen struct {
12862	_ struct{} `type:"structure"`
12863
12864	// Level of confidence in the determination.
12865	Confidence *float64 `type:"float"`
12866
12867	// Boolean value that indicates whether the mouth on the face is open or not.
12868	Value *bool `type:"boolean"`
12869}
12870
12871// String returns the string representation
12872func (s MouthOpen) String() string {
12873	return awsutil.Prettify(s)
12874}
12875
12876// GoString returns the string representation
12877func (s MouthOpen) GoString() string {
12878	return s.String()
12879}
12880
12881// SetConfidence sets the Confidence field's value.
12882func (s *MouthOpen) SetConfidence(v float64) *MouthOpen {
12883	s.Confidence = &v
12884	return s
12885}
12886
12887// SetValue sets the Value field's value.
12888func (s *MouthOpen) SetValue(v bool) *MouthOpen {
12889	s.Value = &v
12890	return s
12891}
12892
12893// Indicates whether or not the face has a mustache, and the confidence level
12894// in the determination.
12895type Mustache struct {
12896	_ struct{} `type:"structure"`
12897
12898	// Level of confidence in the determination.
12899	Confidence *float64 `type:"float"`
12900
12901	// Boolean value that indicates whether the face has mustache or not.
12902	Value *bool `type:"boolean"`
12903}
12904
12905// String returns the string representation
12906func (s Mustache) String() string {
12907	return awsutil.Prettify(s)
12908}
12909
12910// GoString returns the string representation
12911func (s Mustache) GoString() string {
12912	return s.String()
12913}
12914
12915// SetConfidence sets the Confidence field's value.
12916func (s *Mustache) SetConfidence(v float64) *Mustache {
12917	s.Confidence = &v
12918	return s
12919}
12920
12921// SetValue sets the Value field's value.
12922func (s *Mustache) SetValue(v bool) *Mustache {
12923	s.Value = &v
12924	return s
12925}
12926
12927// The Amazon Simple Notification Service topic to which Amazon Rekognition
12928// publishes the completion status of a video analysis operation. For more information,
12929// see api-video.
12930type NotificationChannel struct {
12931	_ struct{} `type:"structure"`
12932
12933	// The ARN of an IAM role that gives Amazon Rekognition publishing permissions
12934	// to the Amazon SNS topic.
12935	//
12936	// RoleArn is a required field
12937	RoleArn *string `type:"string" required:"true"`
12938
12939	// The Amazon SNS topic to which Amazon Rekognition to posts the completion
12940	// status.
12941	//
12942	// SNSTopicArn is a required field
12943	SNSTopicArn *string `type:"string" required:"true"`
12944}
12945
12946// String returns the string representation
12947func (s NotificationChannel) String() string {
12948	return awsutil.Prettify(s)
12949}
12950
12951// GoString returns the string representation
12952func (s NotificationChannel) GoString() string {
12953	return s.String()
12954}
12955
12956// Validate inspects the fields of the type to determine if they are valid.
12957func (s *NotificationChannel) Validate() error {
12958	invalidParams := request.ErrInvalidParams{Context: "NotificationChannel"}
12959	if s.RoleArn == nil {
12960		invalidParams.Add(request.NewErrParamRequired("RoleArn"))
12961	}
12962	if s.SNSTopicArn == nil {
12963		invalidParams.Add(request.NewErrParamRequired("SNSTopicArn"))
12964	}
12965
12966	if invalidParams.Len() > 0 {
12967		return invalidParams
12968	}
12969	return nil
12970}
12971
12972// SetRoleArn sets the RoleArn field's value.
12973func (s *NotificationChannel) SetRoleArn(v string) *NotificationChannel {
12974	s.RoleArn = &v
12975	return s
12976}
12977
12978// SetSNSTopicArn sets the SNSTopicArn field's value.
12979func (s *NotificationChannel) SetSNSTopicArn(v string) *NotificationChannel {
12980	s.SNSTopicArn = &v
12981	return s
12982}
12983
12984// The S3 bucket and folder location where training output is placed.
12985type OutputConfig struct {
12986	_ struct{} `type:"structure"`
12987
12988	// The S3 bucket where training output is placed.
12989	S3Bucket *string `min:"3" type:"string"`
12990
12991	// The prefix applied to the training output files.
12992	S3KeyPrefix *string `type:"string"`
12993}
12994
12995// String returns the string representation
12996func (s OutputConfig) String() string {
12997	return awsutil.Prettify(s)
12998}
12999
13000// GoString returns the string representation
13001func (s OutputConfig) GoString() string {
13002	return s.String()
13003}
13004
13005// Validate inspects the fields of the type to determine if they are valid.
13006func (s *OutputConfig) Validate() error {
13007	invalidParams := request.ErrInvalidParams{Context: "OutputConfig"}
13008	if s.S3Bucket != nil && len(*s.S3Bucket) < 3 {
13009		invalidParams.Add(request.NewErrParamMinLen("S3Bucket", 3))
13010	}
13011
13012	if invalidParams.Len() > 0 {
13013		return invalidParams
13014	}
13015	return nil
13016}
13017
13018// SetS3Bucket sets the S3Bucket field's value.
13019func (s *OutputConfig) SetS3Bucket(v string) *OutputConfig {
13020	s.S3Bucket = &v
13021	return s
13022}
13023
13024// SetS3KeyPrefix sets the S3KeyPrefix field's value.
13025func (s *OutputConfig) SetS3KeyPrefix(v string) *OutputConfig {
13026	s.S3KeyPrefix = &v
13027	return s
13028}
13029
13030// A parent label for a label. A label can have 0, 1, or more parents.
13031type Parent struct {
13032	_ struct{} `type:"structure"`
13033
13034	// The name of the parent label.
13035	Name *string `type:"string"`
13036}
13037
13038// String returns the string representation
13039func (s Parent) String() string {
13040	return awsutil.Prettify(s)
13041}
13042
13043// GoString returns the string representation
13044func (s Parent) GoString() string {
13045	return s.String()
13046}
13047
13048// SetName sets the Name field's value.
13049func (s *Parent) SetName(v string) *Parent {
13050	s.Name = &v
13051	return s
13052}
13053
13054// Details about a person detected in a video analysis request.
13055type PersonDetail struct {
13056	_ struct{} `type:"structure"`
13057
13058	// Bounding box around the detected person.
13059	BoundingBox *BoundingBox `type:"structure"`
13060
13061	// Face details for the detected person.
13062	Face *FaceDetail `type:"structure"`
13063
13064	// Identifier for the person detected person within a video. Use to keep track
13065	// of the person throughout the video. The identifier is not stored by Amazon
13066	// Rekognition.
13067	Index *int64 `type:"long"`
13068}
13069
13070// String returns the string representation
13071func (s PersonDetail) String() string {
13072	return awsutil.Prettify(s)
13073}
13074
13075// GoString returns the string representation
13076func (s PersonDetail) GoString() string {
13077	return s.String()
13078}
13079
13080// SetBoundingBox sets the BoundingBox field's value.
13081func (s *PersonDetail) SetBoundingBox(v *BoundingBox) *PersonDetail {
13082	s.BoundingBox = v
13083	return s
13084}
13085
13086// SetFace sets the Face field's value.
13087func (s *PersonDetail) SetFace(v *FaceDetail) *PersonDetail {
13088	s.Face = v
13089	return s
13090}
13091
13092// SetIndex sets the Index field's value.
13093func (s *PersonDetail) SetIndex(v int64) *PersonDetail {
13094	s.Index = &v
13095	return s
13096}
13097
13098// Details and path tracking information for a single time a person's path is
13099// tracked in a video. Amazon Rekognition operations that track people's paths
13100// return an array of PersonDetection objects with elements for each time a
13101// person's path is tracked in a video.
13102//
13103// For more information, see GetPersonTracking in the Amazon Rekognition Developer
13104// Guide.
13105type PersonDetection struct {
13106	_ struct{} `type:"structure"`
13107
13108	// Details about a person whose path was tracked in a video.
13109	Person *PersonDetail `type:"structure"`
13110
13111	// The time, in milliseconds from the start of the video, that the person's
13112	// path was tracked.
13113	Timestamp *int64 `type:"long"`
13114}
13115
13116// String returns the string representation
13117func (s PersonDetection) String() string {
13118	return awsutil.Prettify(s)
13119}
13120
13121// GoString returns the string representation
13122func (s PersonDetection) GoString() string {
13123	return s.String()
13124}
13125
13126// SetPerson sets the Person field's value.
13127func (s *PersonDetection) SetPerson(v *PersonDetail) *PersonDetection {
13128	s.Person = v
13129	return s
13130}
13131
13132// SetTimestamp sets the Timestamp field's value.
13133func (s *PersonDetection) SetTimestamp(v int64) *PersonDetection {
13134	s.Timestamp = &v
13135	return s
13136}
13137
13138// Information about a person whose face matches a face(s) in an Amazon Rekognition
13139// collection. Includes information about the faces in the Amazon Rekognition
13140// collection (FaceMatch), information about the person (PersonDetail), and
13141// the time stamp for when the person was detected in a video. An array of PersonMatch
13142// objects is returned by GetFaceSearch.
13143type PersonMatch struct {
13144	_ struct{} `type:"structure"`
13145
13146	// Information about the faces in the input collection that match the face of
13147	// a person in the video.
13148	FaceMatches []*FaceMatch `type:"list"`
13149
13150	// Information about the matched person.
13151	Person *PersonDetail `type:"structure"`
13152
13153	// The time, in milliseconds from the beginning of the video, that the person
13154	// was matched in the video.
13155	Timestamp *int64 `type:"long"`
13156}
13157
13158// String returns the string representation
13159func (s PersonMatch) String() string {
13160	return awsutil.Prettify(s)
13161}
13162
13163// GoString returns the string representation
13164func (s PersonMatch) GoString() string {
13165	return s.String()
13166}
13167
13168// SetFaceMatches sets the FaceMatches field's value.
13169func (s *PersonMatch) SetFaceMatches(v []*FaceMatch) *PersonMatch {
13170	s.FaceMatches = v
13171	return s
13172}
13173
13174// SetPerson sets the Person field's value.
13175func (s *PersonMatch) SetPerson(v *PersonDetail) *PersonMatch {
13176	s.Person = v
13177	return s
13178}
13179
13180// SetTimestamp sets the Timestamp field's value.
13181func (s *PersonMatch) SetTimestamp(v int64) *PersonMatch {
13182	s.Timestamp = &v
13183	return s
13184}
13185
13186// The X and Y coordinates of a point on an image. The X and Y values returned
13187// are ratios of the overall image size. For example, if the input image is
13188// 700x200 and the operation returns X=0.5 and Y=0.25, then the point is at
13189// the (350,50) pixel coordinate on the image.
13190//
13191// An array of Point objects, Polygon, is returned by DetectText and by DetectCustomLabels.
13192// Polygon represents a fine-grained polygon around a detected item. For more
13193// information, see Geometry in the Amazon Rekognition Developer Guide.
13194type Point struct {
13195	_ struct{} `type:"structure"`
13196
13197	// The value of the X coordinate for a point on a Polygon.
13198	X *float64 `type:"float"`
13199
13200	// The value of the Y coordinate for a point on a Polygon.
13201	Y *float64 `type:"float"`
13202}
13203
13204// String returns the string representation
13205func (s Point) String() string {
13206	return awsutil.Prettify(s)
13207}
13208
13209// GoString returns the string representation
13210func (s Point) GoString() string {
13211	return s.String()
13212}
13213
13214// SetX sets the X field's value.
13215func (s *Point) SetX(v float64) *Point {
13216	s.X = &v
13217	return s
13218}
13219
13220// SetY sets the Y field's value.
13221func (s *Point) SetY(v float64) *Point {
13222	s.Y = &v
13223	return s
13224}
13225
13226// Indicates the pose of the face as determined by its pitch, roll, and yaw.
13227type Pose struct {
13228	_ struct{} `type:"structure"`
13229
13230	// Value representing the face rotation on the pitch axis.
13231	Pitch *float64 `type:"float"`
13232
13233	// Value representing the face rotation on the roll axis.
13234	Roll *float64 `type:"float"`
13235
13236	// Value representing the face rotation on the yaw axis.
13237	Yaw *float64 `type:"float"`
13238}
13239
13240// String returns the string representation
13241func (s Pose) String() string {
13242	return awsutil.Prettify(s)
13243}
13244
13245// GoString returns the string representation
13246func (s Pose) GoString() string {
13247	return s.String()
13248}
13249
13250// SetPitch sets the Pitch field's value.
13251func (s *Pose) SetPitch(v float64) *Pose {
13252	s.Pitch = &v
13253	return s
13254}
13255
13256// SetRoll sets the Roll field's value.
13257func (s *Pose) SetRoll(v float64) *Pose {
13258	s.Roll = &v
13259	return s
13260}
13261
13262// SetYaw sets the Yaw field's value.
13263func (s *Pose) SetYaw(v float64) *Pose {
13264	s.Yaw = &v
13265	return s
13266}
13267
13268// A description of a Amazon Rekognition Custom Labels project.
13269type ProjectDescription struct {
13270	_ struct{} `type:"structure"`
13271
13272	// The Unix timestamp for the date and time that the project was created.
13273	CreationTimestamp *time.Time `type:"timestamp"`
13274
13275	// The Amazon Resource Name (ARN) of the project.
13276	ProjectArn *string `min:"20" type:"string"`
13277
13278	// The current status of the project.
13279	Status *string `type:"string" enum:"ProjectStatus"`
13280}
13281
13282// String returns the string representation
13283func (s ProjectDescription) String() string {
13284	return awsutil.Prettify(s)
13285}
13286
13287// GoString returns the string representation
13288func (s ProjectDescription) GoString() string {
13289	return s.String()
13290}
13291
13292// SetCreationTimestamp sets the CreationTimestamp field's value.
13293func (s *ProjectDescription) SetCreationTimestamp(v time.Time) *ProjectDescription {
13294	s.CreationTimestamp = &v
13295	return s
13296}
13297
13298// SetProjectArn sets the ProjectArn field's value.
13299func (s *ProjectDescription) SetProjectArn(v string) *ProjectDescription {
13300	s.ProjectArn = &v
13301	return s
13302}
13303
13304// SetStatus sets the Status field's value.
13305func (s *ProjectDescription) SetStatus(v string) *ProjectDescription {
13306	s.Status = &v
13307	return s
13308}
13309
13310// The description of a version of a model.
13311type ProjectVersionDescription struct {
13312	_ struct{} `type:"structure"`
13313
13314	// The duration, in seconds, that the model version has been billed for training.
13315	// This value is only returned if the model version has been successfully trained.
13316	BillableTrainingTimeInSeconds *int64 `type:"long"`
13317
13318	// The Unix datetime for the date and time that training started.
13319	CreationTimestamp *time.Time `type:"timestamp"`
13320
13321	// The training results. EvaluationResult is only returned if training is successful.
13322	EvaluationResult *EvaluationResult `type:"structure"`
13323
13324	// The location of the summary manifest. The summary manifest provides aggregate
13325	// data validation results for the training and test datasets.
13326	ManifestSummary *GroundTruthManifest `type:"structure"`
13327
13328	// The minimum number of inference units used by the model. For more information,
13329	// see StartProjectVersion.
13330	MinInferenceUnits *int64 `min:"1" type:"integer"`
13331
13332	// The location where training results are saved.
13333	OutputConfig *OutputConfig `type:"structure"`
13334
13335	// The Amazon Resource Name (ARN) of the model version.
13336	ProjectVersionArn *string `min:"20" type:"string"`
13337
13338	// The current status of the model version.
13339	Status *string `type:"string" enum:"ProjectVersionStatus"`
13340
13341	// A descriptive message for an error or warning that occurred.
13342	StatusMessage *string `type:"string"`
13343
13344	// Contains information about the testing results.
13345	TestingDataResult *TestingDataResult `type:"structure"`
13346
13347	// Contains information about the training results.
13348	TrainingDataResult *TrainingDataResult `type:"structure"`
13349
13350	// The Unix date and time that training of the model ended.
13351	TrainingEndTimestamp *time.Time `type:"timestamp"`
13352}
13353
13354// String returns the string representation
13355func (s ProjectVersionDescription) String() string {
13356	return awsutil.Prettify(s)
13357}
13358
13359// GoString returns the string representation
13360func (s ProjectVersionDescription) GoString() string {
13361	return s.String()
13362}
13363
13364// SetBillableTrainingTimeInSeconds sets the BillableTrainingTimeInSeconds field's value.
13365func (s *ProjectVersionDescription) SetBillableTrainingTimeInSeconds(v int64) *ProjectVersionDescription {
13366	s.BillableTrainingTimeInSeconds = &v
13367	return s
13368}
13369
13370// SetCreationTimestamp sets the CreationTimestamp field's value.
13371func (s *ProjectVersionDescription) SetCreationTimestamp(v time.Time) *ProjectVersionDescription {
13372	s.CreationTimestamp = &v
13373	return s
13374}
13375
13376// SetEvaluationResult sets the EvaluationResult field's value.
13377func (s *ProjectVersionDescription) SetEvaluationResult(v *EvaluationResult) *ProjectVersionDescription {
13378	s.EvaluationResult = v
13379	return s
13380}
13381
13382// SetManifestSummary sets the ManifestSummary field's value.
13383func (s *ProjectVersionDescription) SetManifestSummary(v *GroundTruthManifest) *ProjectVersionDescription {
13384	s.ManifestSummary = v
13385	return s
13386}
13387
13388// SetMinInferenceUnits sets the MinInferenceUnits field's value.
13389func (s *ProjectVersionDescription) SetMinInferenceUnits(v int64) *ProjectVersionDescription {
13390	s.MinInferenceUnits = &v
13391	return s
13392}
13393
13394// SetOutputConfig sets the OutputConfig field's value.
13395func (s *ProjectVersionDescription) SetOutputConfig(v *OutputConfig) *ProjectVersionDescription {
13396	s.OutputConfig = v
13397	return s
13398}
13399
13400// SetProjectVersionArn sets the ProjectVersionArn field's value.
13401func (s *ProjectVersionDescription) SetProjectVersionArn(v string) *ProjectVersionDescription {
13402	s.ProjectVersionArn = &v
13403	return s
13404}
13405
13406// SetStatus sets the Status field's value.
13407func (s *ProjectVersionDescription) SetStatus(v string) *ProjectVersionDescription {
13408	s.Status = &v
13409	return s
13410}
13411
13412// SetStatusMessage sets the StatusMessage field's value.
13413func (s *ProjectVersionDescription) SetStatusMessage(v string) *ProjectVersionDescription {
13414	s.StatusMessage = &v
13415	return s
13416}
13417
13418// SetTestingDataResult sets the TestingDataResult field's value.
13419func (s *ProjectVersionDescription) SetTestingDataResult(v *TestingDataResult) *ProjectVersionDescription {
13420	s.TestingDataResult = v
13421	return s
13422}
13423
13424// SetTrainingDataResult sets the TrainingDataResult field's value.
13425func (s *ProjectVersionDescription) SetTrainingDataResult(v *TrainingDataResult) *ProjectVersionDescription {
13426	s.TrainingDataResult = v
13427	return s
13428}
13429
13430// SetTrainingEndTimestamp sets the TrainingEndTimestamp field's value.
13431func (s *ProjectVersionDescription) SetTrainingEndTimestamp(v time.Time) *ProjectVersionDescription {
13432	s.TrainingEndTimestamp = &v
13433	return s
13434}
13435
13436// Information about a body part detected by DetectProtectiveEquipment that
13437// contains PPE. An array of ProtectiveEquipmentBodyPart objects is returned
13438// for each person detected by DetectProtectiveEquipment.
13439type ProtectiveEquipmentBodyPart struct {
13440	_ struct{} `type:"structure"`
13441
13442	// The confidence that Amazon Rekognition has in the detection accuracy of the
13443	// detected body part.
13444	Confidence *float64 `type:"float"`
13445
13446	// An array of Personal Protective Equipment items detected around a body part.
13447	EquipmentDetections []*EquipmentDetection `type:"list"`
13448
13449	// The detected body part.
13450	Name *string `type:"string" enum:"BodyPart"`
13451}
13452
13453// String returns the string representation
13454func (s ProtectiveEquipmentBodyPart) String() string {
13455	return awsutil.Prettify(s)
13456}
13457
13458// GoString returns the string representation
13459func (s ProtectiveEquipmentBodyPart) GoString() string {
13460	return s.String()
13461}
13462
13463// SetConfidence sets the Confidence field's value.
13464func (s *ProtectiveEquipmentBodyPart) SetConfidence(v float64) *ProtectiveEquipmentBodyPart {
13465	s.Confidence = &v
13466	return s
13467}
13468
13469// SetEquipmentDetections sets the EquipmentDetections field's value.
13470func (s *ProtectiveEquipmentBodyPart) SetEquipmentDetections(v []*EquipmentDetection) *ProtectiveEquipmentBodyPart {
13471	s.EquipmentDetections = v
13472	return s
13473}
13474
13475// SetName sets the Name field's value.
13476func (s *ProtectiveEquipmentBodyPart) SetName(v string) *ProtectiveEquipmentBodyPart {
13477	s.Name = &v
13478	return s
13479}
13480
13481// A person detected by a call to DetectProtectiveEquipment. The API returns
13482// all persons detected in the input image in an array of ProtectiveEquipmentPerson
13483// objects.
13484type ProtectiveEquipmentPerson struct {
13485	_ struct{} `type:"structure"`
13486
13487	// An array of body parts detected on a person's body (including body parts
13488	// without PPE).
13489	BodyParts []*ProtectiveEquipmentBodyPart `type:"list"`
13490
13491	// A bounding box around the detected person.
13492	BoundingBox *BoundingBox `type:"structure"`
13493
13494	// The confidence that Amazon Rekognition has that the bounding box contains
13495	// a person.
13496	Confidence *float64 `type:"float"`
13497
13498	// The identifier for the detected person. The identifier is only unique for
13499	// a single call to DetectProtectiveEquipment.
13500	Id *int64 `type:"integer"`
13501}
13502
13503// String returns the string representation
13504func (s ProtectiveEquipmentPerson) String() string {
13505	return awsutil.Prettify(s)
13506}
13507
13508// GoString returns the string representation
13509func (s ProtectiveEquipmentPerson) GoString() string {
13510	return s.String()
13511}
13512
13513// SetBodyParts sets the BodyParts field's value.
13514func (s *ProtectiveEquipmentPerson) SetBodyParts(v []*ProtectiveEquipmentBodyPart) *ProtectiveEquipmentPerson {
13515	s.BodyParts = v
13516	return s
13517}
13518
13519// SetBoundingBox sets the BoundingBox field's value.
13520func (s *ProtectiveEquipmentPerson) SetBoundingBox(v *BoundingBox) *ProtectiveEquipmentPerson {
13521	s.BoundingBox = v
13522	return s
13523}
13524
13525// SetConfidence sets the Confidence field's value.
13526func (s *ProtectiveEquipmentPerson) SetConfidence(v float64) *ProtectiveEquipmentPerson {
13527	s.Confidence = &v
13528	return s
13529}
13530
13531// SetId sets the Id field's value.
13532func (s *ProtectiveEquipmentPerson) SetId(v int64) *ProtectiveEquipmentPerson {
13533	s.Id = &v
13534	return s
13535}
13536
13537// Specifies summary attributes to return from a call to DetectProtectiveEquipment.
13538// You can specify which types of PPE to summarize. You can also specify a minimum
13539// confidence value for detections. Summary information is returned in the Summary
13540// (ProtectiveEquipmentSummary) field of the response from DetectProtectiveEquipment.
13541// The summary includes which persons in an image were detected wearing the
13542// requested types of person protective equipment (PPE), which persons were
13543// detected as not wearing PPE, and the persons in which a determination could
13544// not be made. For more information, see ProtectiveEquipmentSummary.
13545type ProtectiveEquipmentSummarizationAttributes struct {
13546	_ struct{} `type:"structure"`
13547
13548	// The minimum confidence level for which you want summary information. The
13549	// confidence level applies to person detection, body part detection, equipment
13550	// detection, and body part coverage. Amazon Rekognition doesn't return summary
13551	// information with a confidence than this specified value. There isn't a default
13552	// value.
13553	//
13554	// Specify a MinConfidence value that is between 50-100% as DetectProtectiveEquipment
13555	// returns predictions only where the detection confidence is between 50% -
13556	// 100%. If you specify a value that is less than 50%, the results are the same
13557	// specifying a value of 50%.
13558	//
13559	// MinConfidence is a required field
13560	MinConfidence *float64 `type:"float" required:"true"`
13561
13562	// An array of personal protective equipment types for which you want summary
13563	// information. If a person is detected wearing a required requipment type,
13564	// the person's ID is added to the PersonsWithRequiredEquipment array field
13565	// returned in ProtectiveEquipmentSummary by DetectProtectiveEquipment.
13566	//
13567	// RequiredEquipmentTypes is a required field
13568	RequiredEquipmentTypes []*string `type:"list" required:"true"`
13569}
13570
13571// String returns the string representation
13572func (s ProtectiveEquipmentSummarizationAttributes) String() string {
13573	return awsutil.Prettify(s)
13574}
13575
13576// GoString returns the string representation
13577func (s ProtectiveEquipmentSummarizationAttributes) GoString() string {
13578	return s.String()
13579}
13580
13581// Validate inspects the fields of the type to determine if they are valid.
13582func (s *ProtectiveEquipmentSummarizationAttributes) Validate() error {
13583	invalidParams := request.ErrInvalidParams{Context: "ProtectiveEquipmentSummarizationAttributes"}
13584	if s.MinConfidence == nil {
13585		invalidParams.Add(request.NewErrParamRequired("MinConfidence"))
13586	}
13587	if s.RequiredEquipmentTypes == nil {
13588		invalidParams.Add(request.NewErrParamRequired("RequiredEquipmentTypes"))
13589	}
13590
13591	if invalidParams.Len() > 0 {
13592		return invalidParams
13593	}
13594	return nil
13595}
13596
13597// SetMinConfidence sets the MinConfidence field's value.
13598func (s *ProtectiveEquipmentSummarizationAttributes) SetMinConfidence(v float64) *ProtectiveEquipmentSummarizationAttributes {
13599	s.MinConfidence = &v
13600	return s
13601}
13602
13603// SetRequiredEquipmentTypes sets the RequiredEquipmentTypes field's value.
13604func (s *ProtectiveEquipmentSummarizationAttributes) SetRequiredEquipmentTypes(v []*string) *ProtectiveEquipmentSummarizationAttributes {
13605	s.RequiredEquipmentTypes = v
13606	return s
13607}
13608
13609// Summary information for required items of personal protective equipment (PPE)
13610// detected on persons by a call to DetectProtectiveEquipment. You specify the
13611// required type of PPE in the SummarizationAttributes (ProtectiveEquipmentSummarizationAttributes)
13612// input parameter. The summary includes which persons were detected wearing
13613// the required personal protective equipment (PersonsWithRequiredEquipment),
13614// which persons were detected as not wearing the required PPE (PersonsWithoutRequiredEquipment),
13615// and the persons in which a determination could not be made (PersonsIndeterminate).
13616//
13617// To get a total for each category, use the size of the field array. For example,
13618// to find out how many people were detected as wearing the specified PPE, use
13619// the size of the PersonsWithRequiredEquipment array. If you want to find out
13620// more about a person, such as the location (BoundingBox) of the person on
13621// the image, use the person ID in each array element. Each person ID matches
13622// the ID field of a ProtectiveEquipmentPerson object returned in the Persons
13623// array by DetectProtectiveEquipment.
13624type ProtectiveEquipmentSummary struct {
13625	_ struct{} `type:"structure"`
13626
13627	// An array of IDs for persons where it was not possible to determine if they
13628	// are wearing personal protective equipment.
13629	PersonsIndeterminate []*int64 `type:"list"`
13630
13631	// An array of IDs for persons who are wearing detected personal protective
13632	// equipment.
13633	PersonsWithRequiredEquipment []*int64 `type:"list"`
13634
13635	// An array of IDs for persons who are not wearing all of the types of PPE specified
13636	// in the RequiredEquipmentTypes field of the detected personal protective equipment.
13637	PersonsWithoutRequiredEquipment []*int64 `type:"list"`
13638}
13639
13640// String returns the string representation
13641func (s ProtectiveEquipmentSummary) String() string {
13642	return awsutil.Prettify(s)
13643}
13644
13645// GoString returns the string representation
13646func (s ProtectiveEquipmentSummary) GoString() string {
13647	return s.String()
13648}
13649
13650// SetPersonsIndeterminate sets the PersonsIndeterminate field's value.
13651func (s *ProtectiveEquipmentSummary) SetPersonsIndeterminate(v []*int64) *ProtectiveEquipmentSummary {
13652	s.PersonsIndeterminate = v
13653	return s
13654}
13655
13656// SetPersonsWithRequiredEquipment sets the PersonsWithRequiredEquipment field's value.
13657func (s *ProtectiveEquipmentSummary) SetPersonsWithRequiredEquipment(v []*int64) *ProtectiveEquipmentSummary {
13658	s.PersonsWithRequiredEquipment = v
13659	return s
13660}
13661
13662// SetPersonsWithoutRequiredEquipment sets the PersonsWithoutRequiredEquipment field's value.
13663func (s *ProtectiveEquipmentSummary) SetPersonsWithoutRequiredEquipment(v []*int64) *ProtectiveEquipmentSummary {
13664	s.PersonsWithoutRequiredEquipment = v
13665	return s
13666}
13667
13668// The number of requests exceeded your throughput limit. If you want to increase
13669// this limit, contact Amazon Rekognition.
13670type ProvisionedThroughputExceededException struct {
13671	_            struct{}                  `type:"structure"`
13672	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
13673
13674	Message_ *string `locationName:"message" type:"string"`
13675}
13676
13677// String returns the string representation
13678func (s ProvisionedThroughputExceededException) String() string {
13679	return awsutil.Prettify(s)
13680}
13681
13682// GoString returns the string representation
13683func (s ProvisionedThroughputExceededException) GoString() string {
13684	return s.String()
13685}
13686
13687func newErrorProvisionedThroughputExceededException(v protocol.ResponseMetadata) error {
13688	return &ProvisionedThroughputExceededException{
13689		RespMetadata: v,
13690	}
13691}
13692
13693// Code returns the exception type name.
13694func (s *ProvisionedThroughputExceededException) Code() string {
13695	return "ProvisionedThroughputExceededException"
13696}
13697
13698// Message returns the exception's message.
13699func (s *ProvisionedThroughputExceededException) Message() string {
13700	if s.Message_ != nil {
13701		return *s.Message_
13702	}
13703	return ""
13704}
13705
13706// OrigErr always returns nil, satisfies awserr.Error interface.
13707func (s *ProvisionedThroughputExceededException) OrigErr() error {
13708	return nil
13709}
13710
13711func (s *ProvisionedThroughputExceededException) Error() string {
13712	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
13713}
13714
13715// Status code returns the HTTP status code for the request's response error.
13716func (s *ProvisionedThroughputExceededException) StatusCode() int {
13717	return s.RespMetadata.StatusCode
13718}
13719
13720// RequestID returns the service's response RequestID for request.
13721func (s *ProvisionedThroughputExceededException) RequestID() string {
13722	return s.RespMetadata.RequestID
13723}
13724
13725type RecognizeCelebritiesInput struct {
13726	_ struct{} `type:"structure"`
13727
13728	// The input image as base64-encoded bytes or an S3 object. If you use the AWS
13729	// CLI to call Amazon Rekognition operations, passing base64-encoded image bytes
13730	// is not supported.
13731	//
13732	// If you are using an AWS SDK to call Amazon Rekognition, you might not need
13733	// to base64-encode image bytes passed using the Bytes field. For more information,
13734	// see Images in the Amazon Rekognition developer guide.
13735	//
13736	// Image is a required field
13737	Image *Image `type:"structure" required:"true"`
13738}
13739
13740// String returns the string representation
13741func (s RecognizeCelebritiesInput) String() string {
13742	return awsutil.Prettify(s)
13743}
13744
13745// GoString returns the string representation
13746func (s RecognizeCelebritiesInput) GoString() string {
13747	return s.String()
13748}
13749
13750// Validate inspects the fields of the type to determine if they are valid.
13751func (s *RecognizeCelebritiesInput) Validate() error {
13752	invalidParams := request.ErrInvalidParams{Context: "RecognizeCelebritiesInput"}
13753	if s.Image == nil {
13754		invalidParams.Add(request.NewErrParamRequired("Image"))
13755	}
13756	if s.Image != nil {
13757		if err := s.Image.Validate(); err != nil {
13758			invalidParams.AddNested("Image", err.(request.ErrInvalidParams))
13759		}
13760	}
13761
13762	if invalidParams.Len() > 0 {
13763		return invalidParams
13764	}
13765	return nil
13766}
13767
13768// SetImage sets the Image field's value.
13769func (s *RecognizeCelebritiesInput) SetImage(v *Image) *RecognizeCelebritiesInput {
13770	s.Image = v
13771	return s
13772}
13773
13774type RecognizeCelebritiesOutput struct {
13775	_ struct{} `type:"structure"`
13776
13777	// Details about each celebrity found in the image. Amazon Rekognition can detect
13778	// a maximum of 64 celebrities in an image.
13779	CelebrityFaces []*Celebrity `type:"list"`
13780
13781	// The orientation of the input image (counterclockwise direction). If your
13782	// application displays the image, you can use this value to correct the orientation.
13783	// The bounding box coordinates returned in CelebrityFaces and UnrecognizedFaces
13784	// represent face locations before the image orientation is corrected.
13785	//
13786	// If the input image is in .jpeg format, it might contain exchangeable image
13787	// (Exif) metadata that includes the image's orientation. If so, and the Exif
13788	// metadata for the input image populates the orientation field, the value of
13789	// OrientationCorrection is null. The CelebrityFaces and UnrecognizedFaces bounding
13790	// box coordinates represent face locations after Exif metadata is used to correct
13791	// the image orientation. Images in .png format don't contain Exif metadata.
13792	OrientationCorrection *string `type:"string" enum:"OrientationCorrection"`
13793
13794	// Details about each unrecognized face in the image.
13795	UnrecognizedFaces []*ComparedFace `type:"list"`
13796}
13797
13798// String returns the string representation
13799func (s RecognizeCelebritiesOutput) String() string {
13800	return awsutil.Prettify(s)
13801}
13802
13803// GoString returns the string representation
13804func (s RecognizeCelebritiesOutput) GoString() string {
13805	return s.String()
13806}
13807
13808// SetCelebrityFaces sets the CelebrityFaces field's value.
13809func (s *RecognizeCelebritiesOutput) SetCelebrityFaces(v []*Celebrity) *RecognizeCelebritiesOutput {
13810	s.CelebrityFaces = v
13811	return s
13812}
13813
13814// SetOrientationCorrection sets the OrientationCorrection field's value.
13815func (s *RecognizeCelebritiesOutput) SetOrientationCorrection(v string) *RecognizeCelebritiesOutput {
13816	s.OrientationCorrection = &v
13817	return s
13818}
13819
13820// SetUnrecognizedFaces sets the UnrecognizedFaces field's value.
13821func (s *RecognizeCelebritiesOutput) SetUnrecognizedFaces(v []*ComparedFace) *RecognizeCelebritiesOutput {
13822	s.UnrecognizedFaces = v
13823	return s
13824}
13825
13826// Specifies a location within the frame that Rekognition checks for text. Uses
13827// a BoundingBox object to set a region of the screen.
13828//
13829// A word is included in the region if the word is more than half in that region.
13830// If there is more than one region, the word will be compared with all regions
13831// of the screen. Any word more than half in a region is kept in the results.
13832type RegionOfInterest struct {
13833	_ struct{} `type:"structure"`
13834
13835	// The box representing a region of interest on screen.
13836	BoundingBox *BoundingBox `type:"structure"`
13837}
13838
13839// String returns the string representation
13840func (s RegionOfInterest) String() string {
13841	return awsutil.Prettify(s)
13842}
13843
13844// GoString returns the string representation
13845func (s RegionOfInterest) GoString() string {
13846	return s.String()
13847}
13848
13849// SetBoundingBox sets the BoundingBox field's value.
13850func (s *RegionOfInterest) SetBoundingBox(v *BoundingBox) *RegionOfInterest {
13851	s.BoundingBox = v
13852	return s
13853}
13854
13855// A collection with the specified ID already exists.
13856type ResourceAlreadyExistsException struct {
13857	_            struct{}                  `type:"structure"`
13858	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
13859
13860	Message_ *string `locationName:"message" type:"string"`
13861}
13862
13863// String returns the string representation
13864func (s ResourceAlreadyExistsException) String() string {
13865	return awsutil.Prettify(s)
13866}
13867
13868// GoString returns the string representation
13869func (s ResourceAlreadyExistsException) GoString() string {
13870	return s.String()
13871}
13872
13873func newErrorResourceAlreadyExistsException(v protocol.ResponseMetadata) error {
13874	return &ResourceAlreadyExistsException{
13875		RespMetadata: v,
13876	}
13877}
13878
13879// Code returns the exception type name.
13880func (s *ResourceAlreadyExistsException) Code() string {
13881	return "ResourceAlreadyExistsException"
13882}
13883
13884// Message returns the exception's message.
13885func (s *ResourceAlreadyExistsException) Message() string {
13886	if s.Message_ != nil {
13887		return *s.Message_
13888	}
13889	return ""
13890}
13891
13892// OrigErr always returns nil, satisfies awserr.Error interface.
13893func (s *ResourceAlreadyExistsException) OrigErr() error {
13894	return nil
13895}
13896
13897func (s *ResourceAlreadyExistsException) Error() string {
13898	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
13899}
13900
13901// Status code returns the HTTP status code for the request's response error.
13902func (s *ResourceAlreadyExistsException) StatusCode() int {
13903	return s.RespMetadata.StatusCode
13904}
13905
13906// RequestID returns the service's response RequestID for request.
13907func (s *ResourceAlreadyExistsException) RequestID() string {
13908	return s.RespMetadata.RequestID
13909}
13910
13911// The specified resource is already being used.
13912type ResourceInUseException struct {
13913	_            struct{}                  `type:"structure"`
13914	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
13915
13916	Message_ *string `locationName:"message" type:"string"`
13917}
13918
13919// String returns the string representation
13920func (s ResourceInUseException) String() string {
13921	return awsutil.Prettify(s)
13922}
13923
13924// GoString returns the string representation
13925func (s ResourceInUseException) GoString() string {
13926	return s.String()
13927}
13928
13929func newErrorResourceInUseException(v protocol.ResponseMetadata) error {
13930	return &ResourceInUseException{
13931		RespMetadata: v,
13932	}
13933}
13934
13935// Code returns the exception type name.
13936func (s *ResourceInUseException) Code() string {
13937	return "ResourceInUseException"
13938}
13939
13940// Message returns the exception's message.
13941func (s *ResourceInUseException) Message() string {
13942	if s.Message_ != nil {
13943		return *s.Message_
13944	}
13945	return ""
13946}
13947
13948// OrigErr always returns nil, satisfies awserr.Error interface.
13949func (s *ResourceInUseException) OrigErr() error {
13950	return nil
13951}
13952
13953func (s *ResourceInUseException) Error() string {
13954	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
13955}
13956
13957// Status code returns the HTTP status code for the request's response error.
13958func (s *ResourceInUseException) StatusCode() int {
13959	return s.RespMetadata.StatusCode
13960}
13961
13962// RequestID returns the service's response RequestID for request.
13963func (s *ResourceInUseException) RequestID() string {
13964	return s.RespMetadata.RequestID
13965}
13966
13967// The collection specified in the request cannot be found.
13968type ResourceNotFoundException struct {
13969	_            struct{}                  `type:"structure"`
13970	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
13971
13972	Message_ *string `locationName:"message" type:"string"`
13973}
13974
13975// String returns the string representation
13976func (s ResourceNotFoundException) String() string {
13977	return awsutil.Prettify(s)
13978}
13979
13980// GoString returns the string representation
13981func (s ResourceNotFoundException) GoString() string {
13982	return s.String()
13983}
13984
13985func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error {
13986	return &ResourceNotFoundException{
13987		RespMetadata: v,
13988	}
13989}
13990
13991// Code returns the exception type name.
13992func (s *ResourceNotFoundException) Code() string {
13993	return "ResourceNotFoundException"
13994}
13995
13996// Message returns the exception's message.
13997func (s *ResourceNotFoundException) Message() string {
13998	if s.Message_ != nil {
13999		return *s.Message_
14000	}
14001	return ""
14002}
14003
14004// OrigErr always returns nil, satisfies awserr.Error interface.
14005func (s *ResourceNotFoundException) OrigErr() error {
14006	return nil
14007}
14008
14009func (s *ResourceNotFoundException) Error() string {
14010	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
14011}
14012
14013// Status code returns the HTTP status code for the request's response error.
14014func (s *ResourceNotFoundException) StatusCode() int {
14015	return s.RespMetadata.StatusCode
14016}
14017
14018// RequestID returns the service's response RequestID for request.
14019func (s *ResourceNotFoundException) RequestID() string {
14020	return s.RespMetadata.RequestID
14021}
14022
14023// The requested resource isn't ready. For example, this exception occurs when
14024// you call DetectCustomLabels with a model version that isn't deployed.
14025type ResourceNotReadyException struct {
14026	_            struct{}                  `type:"structure"`
14027	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
14028
14029	Message_ *string `locationName:"message" type:"string"`
14030}
14031
14032// String returns the string representation
14033func (s ResourceNotReadyException) String() string {
14034	return awsutil.Prettify(s)
14035}
14036
14037// GoString returns the string representation
14038func (s ResourceNotReadyException) GoString() string {
14039	return s.String()
14040}
14041
14042func newErrorResourceNotReadyException(v protocol.ResponseMetadata) error {
14043	return &ResourceNotReadyException{
14044		RespMetadata: v,
14045	}
14046}
14047
14048// Code returns the exception type name.
14049func (s *ResourceNotReadyException) Code() string {
14050	return "ResourceNotReadyException"
14051}
14052
14053// Message returns the exception's message.
14054func (s *ResourceNotReadyException) Message() string {
14055	if s.Message_ != nil {
14056		return *s.Message_
14057	}
14058	return ""
14059}
14060
14061// OrigErr always returns nil, satisfies awserr.Error interface.
14062func (s *ResourceNotReadyException) OrigErr() error {
14063	return nil
14064}
14065
14066func (s *ResourceNotReadyException) Error() string {
14067	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
14068}
14069
14070// Status code returns the HTTP status code for the request's response error.
14071func (s *ResourceNotReadyException) StatusCode() int {
14072	return s.RespMetadata.StatusCode
14073}
14074
14075// RequestID returns the service's response RequestID for request.
14076func (s *ResourceNotReadyException) RequestID() string {
14077	return s.RespMetadata.RequestID
14078}
14079
14080// Provides the S3 bucket name and object name.
14081//
14082// The region for the S3 bucket containing the S3 object must match the region
14083// you use for Amazon Rekognition operations.
14084//
14085// For Amazon Rekognition to process an S3 object, the user must have permission
14086// to access the S3 object. For more information, see Resource-Based Policies
14087// in the Amazon Rekognition Developer Guide.
14088type S3Object struct {
14089	_ struct{} `type:"structure"`
14090
14091	// Name of the S3 bucket.
14092	Bucket *string `min:"3" type:"string"`
14093
14094	// S3 object key name.
14095	Name *string `min:"1" type:"string"`
14096
14097	// If the bucket is versioning enabled, you can specify the object version.
14098	Version *string `min:"1" type:"string"`
14099}
14100
14101// String returns the string representation
14102func (s S3Object) String() string {
14103	return awsutil.Prettify(s)
14104}
14105
14106// GoString returns the string representation
14107func (s S3Object) GoString() string {
14108	return s.String()
14109}
14110
14111// Validate inspects the fields of the type to determine if they are valid.
14112func (s *S3Object) Validate() error {
14113	invalidParams := request.ErrInvalidParams{Context: "S3Object"}
14114	if s.Bucket != nil && len(*s.Bucket) < 3 {
14115		invalidParams.Add(request.NewErrParamMinLen("Bucket", 3))
14116	}
14117	if s.Name != nil && len(*s.Name) < 1 {
14118		invalidParams.Add(request.NewErrParamMinLen("Name", 1))
14119	}
14120	if s.Version != nil && len(*s.Version) < 1 {
14121		invalidParams.Add(request.NewErrParamMinLen("Version", 1))
14122	}
14123
14124	if invalidParams.Len() > 0 {
14125		return invalidParams
14126	}
14127	return nil
14128}
14129
14130// SetBucket sets the Bucket field's value.
14131func (s *S3Object) SetBucket(v string) *S3Object {
14132	s.Bucket = &v
14133	return s
14134}
14135
14136// SetName sets the Name field's value.
14137func (s *S3Object) SetName(v string) *S3Object {
14138	s.Name = &v
14139	return s
14140}
14141
14142// SetVersion sets the Version field's value.
14143func (s *S3Object) SetVersion(v string) *S3Object {
14144	s.Version = &v
14145	return s
14146}
14147
14148type SearchFacesByImageInput struct {
14149	_ struct{} `type:"structure"`
14150
14151	// ID of the collection to search.
14152	//
14153	// CollectionId is a required field
14154	CollectionId *string `min:"1" type:"string" required:"true"`
14155
14156	// (Optional) Specifies the minimum confidence in the face match to return.
14157	// For example, don't return any matches where confidence in matches is less
14158	// than 70%. The default value is 80%.
14159	FaceMatchThreshold *float64 `type:"float"`
14160
14161	// The input image as base64-encoded bytes or an S3 object. If you use the AWS
14162	// CLI to call Amazon Rekognition operations, passing base64-encoded image bytes
14163	// is not supported.
14164	//
14165	// If you are using an AWS SDK to call Amazon Rekognition, you might not need
14166	// to base64-encode image bytes passed using the Bytes field. For more information,
14167	// see Images in the Amazon Rekognition developer guide.
14168	//
14169	// Image is a required field
14170	Image *Image `type:"structure" required:"true"`
14171
14172	// Maximum number of faces to return. The operation returns the maximum number
14173	// of faces with the highest confidence in the match.
14174	MaxFaces *int64 `min:"1" type:"integer"`
14175
14176	// A filter that specifies a quality bar for how much filtering is done to identify
14177	// faces. Filtered faces aren't searched for in the collection. If you specify
14178	// AUTO, Amazon Rekognition chooses the quality bar. If you specify LOW, MEDIUM,
14179	// or HIGH, filtering removes all faces that don’t meet the chosen quality
14180	// bar. The quality bar is based on a variety of common use cases. Low-quality
14181	// detections can occur for a number of reasons. Some examples are an object
14182	// that's misidentified as a face, a face that's too blurry, or a face with
14183	// a pose that's too extreme to use. If you specify NONE, no filtering is performed.
14184	// The default value is NONE.
14185	//
14186	// To use quality filtering, the collection you are using must be associated
14187	// with version 3 of the face model or higher.
14188	QualityFilter *string `type:"string" enum:"QualityFilter"`
14189}
14190
14191// String returns the string representation
14192func (s SearchFacesByImageInput) String() string {
14193	return awsutil.Prettify(s)
14194}
14195
14196// GoString returns the string representation
14197func (s SearchFacesByImageInput) GoString() string {
14198	return s.String()
14199}
14200
14201// Validate inspects the fields of the type to determine if they are valid.
14202func (s *SearchFacesByImageInput) Validate() error {
14203	invalidParams := request.ErrInvalidParams{Context: "SearchFacesByImageInput"}
14204	if s.CollectionId == nil {
14205		invalidParams.Add(request.NewErrParamRequired("CollectionId"))
14206	}
14207	if s.CollectionId != nil && len(*s.CollectionId) < 1 {
14208		invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1))
14209	}
14210	if s.Image == nil {
14211		invalidParams.Add(request.NewErrParamRequired("Image"))
14212	}
14213	if s.MaxFaces != nil && *s.MaxFaces < 1 {
14214		invalidParams.Add(request.NewErrParamMinValue("MaxFaces", 1))
14215	}
14216	if s.Image != nil {
14217		if err := s.Image.Validate(); err != nil {
14218			invalidParams.AddNested("Image", err.(request.ErrInvalidParams))
14219		}
14220	}
14221
14222	if invalidParams.Len() > 0 {
14223		return invalidParams
14224	}
14225	return nil
14226}
14227
14228// SetCollectionId sets the CollectionId field's value.
14229func (s *SearchFacesByImageInput) SetCollectionId(v string) *SearchFacesByImageInput {
14230	s.CollectionId = &v
14231	return s
14232}
14233
14234// SetFaceMatchThreshold sets the FaceMatchThreshold field's value.
14235func (s *SearchFacesByImageInput) SetFaceMatchThreshold(v float64) *SearchFacesByImageInput {
14236	s.FaceMatchThreshold = &v
14237	return s
14238}
14239
14240// SetImage sets the Image field's value.
14241func (s *SearchFacesByImageInput) SetImage(v *Image) *SearchFacesByImageInput {
14242	s.Image = v
14243	return s
14244}
14245
14246// SetMaxFaces sets the MaxFaces field's value.
14247func (s *SearchFacesByImageInput) SetMaxFaces(v int64) *SearchFacesByImageInput {
14248	s.MaxFaces = &v
14249	return s
14250}
14251
14252// SetQualityFilter sets the QualityFilter field's value.
14253func (s *SearchFacesByImageInput) SetQualityFilter(v string) *SearchFacesByImageInput {
14254	s.QualityFilter = &v
14255	return s
14256}
14257
14258type SearchFacesByImageOutput struct {
14259	_ struct{} `type:"structure"`
14260
14261	// An array of faces that match the input face, along with the confidence in
14262	// the match.
14263	FaceMatches []*FaceMatch `type:"list"`
14264
14265	// Version number of the face detection model associated with the input collection
14266	// (CollectionId).
14267	FaceModelVersion *string `type:"string"`
14268
14269	// The bounding box around the face in the input image that Amazon Rekognition
14270	// used for the search.
14271	SearchedFaceBoundingBox *BoundingBox `type:"structure"`
14272
14273	// The level of confidence that the searchedFaceBoundingBox, contains a face.
14274	SearchedFaceConfidence *float64 `type:"float"`
14275}
14276
14277// String returns the string representation
14278func (s SearchFacesByImageOutput) String() string {
14279	return awsutil.Prettify(s)
14280}
14281
14282// GoString returns the string representation
14283func (s SearchFacesByImageOutput) GoString() string {
14284	return s.String()
14285}
14286
14287// SetFaceMatches sets the FaceMatches field's value.
14288func (s *SearchFacesByImageOutput) SetFaceMatches(v []*FaceMatch) *SearchFacesByImageOutput {
14289	s.FaceMatches = v
14290	return s
14291}
14292
14293// SetFaceModelVersion sets the FaceModelVersion field's value.
14294func (s *SearchFacesByImageOutput) SetFaceModelVersion(v string) *SearchFacesByImageOutput {
14295	s.FaceModelVersion = &v
14296	return s
14297}
14298
14299// SetSearchedFaceBoundingBox sets the SearchedFaceBoundingBox field's value.
14300func (s *SearchFacesByImageOutput) SetSearchedFaceBoundingBox(v *BoundingBox) *SearchFacesByImageOutput {
14301	s.SearchedFaceBoundingBox = v
14302	return s
14303}
14304
14305// SetSearchedFaceConfidence sets the SearchedFaceConfidence field's value.
14306func (s *SearchFacesByImageOutput) SetSearchedFaceConfidence(v float64) *SearchFacesByImageOutput {
14307	s.SearchedFaceConfidence = &v
14308	return s
14309}
14310
14311type SearchFacesInput struct {
14312	_ struct{} `type:"structure"`
14313
14314	// ID of the collection the face belongs to.
14315	//
14316	// CollectionId is a required field
14317	CollectionId *string `min:"1" type:"string" required:"true"`
14318
14319	// ID of a face to find matches for in the collection.
14320	//
14321	// FaceId is a required field
14322	FaceId *string `type:"string" required:"true"`
14323
14324	// Optional value specifying the minimum confidence in the face match to return.
14325	// For example, don't return any matches where confidence in matches is less
14326	// than 70%. The default value is 80%.
14327	FaceMatchThreshold *float64 `type:"float"`
14328
14329	// Maximum number of faces to return. The operation returns the maximum number
14330	// of faces with the highest confidence in the match.
14331	MaxFaces *int64 `min:"1" type:"integer"`
14332}
14333
14334// String returns the string representation
14335func (s SearchFacesInput) String() string {
14336	return awsutil.Prettify(s)
14337}
14338
14339// GoString returns the string representation
14340func (s SearchFacesInput) GoString() string {
14341	return s.String()
14342}
14343
14344// Validate inspects the fields of the type to determine if they are valid.
14345func (s *SearchFacesInput) Validate() error {
14346	invalidParams := request.ErrInvalidParams{Context: "SearchFacesInput"}
14347	if s.CollectionId == nil {
14348		invalidParams.Add(request.NewErrParamRequired("CollectionId"))
14349	}
14350	if s.CollectionId != nil && len(*s.CollectionId) < 1 {
14351		invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1))
14352	}
14353	if s.FaceId == nil {
14354		invalidParams.Add(request.NewErrParamRequired("FaceId"))
14355	}
14356	if s.MaxFaces != nil && *s.MaxFaces < 1 {
14357		invalidParams.Add(request.NewErrParamMinValue("MaxFaces", 1))
14358	}
14359
14360	if invalidParams.Len() > 0 {
14361		return invalidParams
14362	}
14363	return nil
14364}
14365
14366// SetCollectionId sets the CollectionId field's value.
14367func (s *SearchFacesInput) SetCollectionId(v string) *SearchFacesInput {
14368	s.CollectionId = &v
14369	return s
14370}
14371
14372// SetFaceId sets the FaceId field's value.
14373func (s *SearchFacesInput) SetFaceId(v string) *SearchFacesInput {
14374	s.FaceId = &v
14375	return s
14376}
14377
14378// SetFaceMatchThreshold sets the FaceMatchThreshold field's value.
14379func (s *SearchFacesInput) SetFaceMatchThreshold(v float64) *SearchFacesInput {
14380	s.FaceMatchThreshold = &v
14381	return s
14382}
14383
14384// SetMaxFaces sets the MaxFaces field's value.
14385func (s *SearchFacesInput) SetMaxFaces(v int64) *SearchFacesInput {
14386	s.MaxFaces = &v
14387	return s
14388}
14389
14390type SearchFacesOutput struct {
14391	_ struct{} `type:"structure"`
14392
14393	// An array of faces that matched the input face, along with the confidence
14394	// in the match.
14395	FaceMatches []*FaceMatch `type:"list"`
14396
14397	// Version number of the face detection model associated with the input collection
14398	// (CollectionId).
14399	FaceModelVersion *string `type:"string"`
14400
14401	// ID of the face that was searched for matches in a collection.
14402	SearchedFaceId *string `type:"string"`
14403}
14404
14405// String returns the string representation
14406func (s SearchFacesOutput) String() string {
14407	return awsutil.Prettify(s)
14408}
14409
14410// GoString returns the string representation
14411func (s SearchFacesOutput) GoString() string {
14412	return s.String()
14413}
14414
14415// SetFaceMatches sets the FaceMatches field's value.
14416func (s *SearchFacesOutput) SetFaceMatches(v []*FaceMatch) *SearchFacesOutput {
14417	s.FaceMatches = v
14418	return s
14419}
14420
14421// SetFaceModelVersion sets the FaceModelVersion field's value.
14422func (s *SearchFacesOutput) SetFaceModelVersion(v string) *SearchFacesOutput {
14423	s.FaceModelVersion = &v
14424	return s
14425}
14426
14427// SetSearchedFaceId sets the SearchedFaceId field's value.
14428func (s *SearchFacesOutput) SetSearchedFaceId(v string) *SearchFacesOutput {
14429	s.SearchedFaceId = &v
14430	return s
14431}
14432
14433// A technical cue or shot detection segment detected in a video. An array of
14434// SegmentDetection objects containing all segments detected in a stored video
14435// is returned by GetSegmentDetection.
14436type SegmentDetection struct {
14437	_ struct{} `type:"structure"`
14438
14439	// The duration of the detected segment in milliseconds.
14440	DurationMillis *int64 `type:"long"`
14441
14442	// The duration of the timecode for the detected segment in SMPTE format.
14443	DurationSMPTE *string `type:"string"`
14444
14445	// The frame-accurate SMPTE timecode, from the start of a video, for the end
14446	// of a detected segment. EndTimecode is in HH:MM:SS:fr format (and ;fr for
14447	// drop frame-rates).
14448	EndTimecodeSMPTE *string `type:"string"`
14449
14450	// The end time of the detected segment, in milliseconds, from the start of
14451	// the video. This value is rounded down.
14452	EndTimestampMillis *int64 `type:"long"`
14453
14454	// If the segment is a shot detection, contains information about the shot detection.
14455	ShotSegment *ShotSegment `type:"structure"`
14456
14457	// The frame-accurate SMPTE timecode, from the start of a video, for the start
14458	// of a detected segment. StartTimecode is in HH:MM:SS:fr format (and ;fr for
14459	// drop frame-rates).
14460	StartTimecodeSMPTE *string `type:"string"`
14461
14462	// The start time of the detected segment in milliseconds from the start of
14463	// the video. This value is rounded down. For example, if the actual timestamp
14464	// is 100.6667 milliseconds, Amazon Rekognition Video returns a value of 100
14465	// millis.
14466	StartTimestampMillis *int64 `type:"long"`
14467
14468	// If the segment is a technical cue, contains information about the technical
14469	// cue.
14470	TechnicalCueSegment *TechnicalCueSegment `type:"structure"`
14471
14472	// The type of the segment. Valid values are TECHNICAL_CUE and SHOT.
14473	Type *string `type:"string" enum:"SegmentType"`
14474}
14475
14476// String returns the string representation
14477func (s SegmentDetection) String() string {
14478	return awsutil.Prettify(s)
14479}
14480
14481// GoString returns the string representation
14482func (s SegmentDetection) GoString() string {
14483	return s.String()
14484}
14485
14486// SetDurationMillis sets the DurationMillis field's value.
14487func (s *SegmentDetection) SetDurationMillis(v int64) *SegmentDetection {
14488	s.DurationMillis = &v
14489	return s
14490}
14491
14492// SetDurationSMPTE sets the DurationSMPTE field's value.
14493func (s *SegmentDetection) SetDurationSMPTE(v string) *SegmentDetection {
14494	s.DurationSMPTE = &v
14495	return s
14496}
14497
14498// SetEndTimecodeSMPTE sets the EndTimecodeSMPTE field's value.
14499func (s *SegmentDetection) SetEndTimecodeSMPTE(v string) *SegmentDetection {
14500	s.EndTimecodeSMPTE = &v
14501	return s
14502}
14503
14504// SetEndTimestampMillis sets the EndTimestampMillis field's value.
14505func (s *SegmentDetection) SetEndTimestampMillis(v int64) *SegmentDetection {
14506	s.EndTimestampMillis = &v
14507	return s
14508}
14509
14510// SetShotSegment sets the ShotSegment field's value.
14511func (s *SegmentDetection) SetShotSegment(v *ShotSegment) *SegmentDetection {
14512	s.ShotSegment = v
14513	return s
14514}
14515
14516// SetStartTimecodeSMPTE sets the StartTimecodeSMPTE field's value.
14517func (s *SegmentDetection) SetStartTimecodeSMPTE(v string) *SegmentDetection {
14518	s.StartTimecodeSMPTE = &v
14519	return s
14520}
14521
14522// SetStartTimestampMillis sets the StartTimestampMillis field's value.
14523func (s *SegmentDetection) SetStartTimestampMillis(v int64) *SegmentDetection {
14524	s.StartTimestampMillis = &v
14525	return s
14526}
14527
14528// SetTechnicalCueSegment sets the TechnicalCueSegment field's value.
14529func (s *SegmentDetection) SetTechnicalCueSegment(v *TechnicalCueSegment) *SegmentDetection {
14530	s.TechnicalCueSegment = v
14531	return s
14532}
14533
14534// SetType sets the Type field's value.
14535func (s *SegmentDetection) SetType(v string) *SegmentDetection {
14536	s.Type = &v
14537	return s
14538}
14539
14540// Information about the type of a segment requested in a call to StartSegmentDetection.
14541// An array of SegmentTypeInfo objects is returned by the response from GetSegmentDetection.
14542type SegmentTypeInfo struct {
14543	_ struct{} `type:"structure"`
14544
14545	// The version of the model used to detect segments.
14546	ModelVersion *string `type:"string"`
14547
14548	// The type of a segment (technical cue or shot detection).
14549	Type *string `type:"string" enum:"SegmentType"`
14550}
14551
14552// String returns the string representation
14553func (s SegmentTypeInfo) String() string {
14554	return awsutil.Prettify(s)
14555}
14556
14557// GoString returns the string representation
14558func (s SegmentTypeInfo) GoString() string {
14559	return s.String()
14560}
14561
14562// SetModelVersion sets the ModelVersion field's value.
14563func (s *SegmentTypeInfo) SetModelVersion(v string) *SegmentTypeInfo {
14564	s.ModelVersion = &v
14565	return s
14566}
14567
14568// SetType sets the Type field's value.
14569func (s *SegmentTypeInfo) SetType(v string) *SegmentTypeInfo {
14570	s.Type = &v
14571	return s
14572}
14573
14574// The size of the collection exceeds the allowed limit. For more information,
14575// see Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
14576type ServiceQuotaExceededException struct {
14577	_            struct{}                  `type:"structure"`
14578	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
14579
14580	Message_ *string `locationName:"message" type:"string"`
14581}
14582
14583// String returns the string representation
14584func (s ServiceQuotaExceededException) String() string {
14585	return awsutil.Prettify(s)
14586}
14587
14588// GoString returns the string representation
14589func (s ServiceQuotaExceededException) GoString() string {
14590	return s.String()
14591}
14592
14593func newErrorServiceQuotaExceededException(v protocol.ResponseMetadata) error {
14594	return &ServiceQuotaExceededException{
14595		RespMetadata: v,
14596	}
14597}
14598
14599// Code returns the exception type name.
14600func (s *ServiceQuotaExceededException) Code() string {
14601	return "ServiceQuotaExceededException"
14602}
14603
14604// Message returns the exception's message.
14605func (s *ServiceQuotaExceededException) Message() string {
14606	if s.Message_ != nil {
14607		return *s.Message_
14608	}
14609	return ""
14610}
14611
14612// OrigErr always returns nil, satisfies awserr.Error interface.
14613func (s *ServiceQuotaExceededException) OrigErr() error {
14614	return nil
14615}
14616
14617func (s *ServiceQuotaExceededException) Error() string {
14618	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
14619}
14620
14621// Status code returns the HTTP status code for the request's response error.
14622func (s *ServiceQuotaExceededException) StatusCode() int {
14623	return s.RespMetadata.StatusCode
14624}
14625
14626// RequestID returns the service's response RequestID for request.
14627func (s *ServiceQuotaExceededException) RequestID() string {
14628	return s.RespMetadata.RequestID
14629}
14630
14631// Information about a shot detection segment detected in a video. For more
14632// information, see SegmentDetection.
14633type ShotSegment struct {
14634	_ struct{} `type:"structure"`
14635
14636	// The confidence that Amazon Rekognition Video has in the accuracy of the detected
14637	// segment.
14638	Confidence *float64 `min:"50" type:"float"`
14639
14640	// An Identifier for a shot detection segment detected in a video.
14641	Index *int64 `type:"long"`
14642}
14643
14644// String returns the string representation
14645func (s ShotSegment) String() string {
14646	return awsutil.Prettify(s)
14647}
14648
14649// GoString returns the string representation
14650func (s ShotSegment) GoString() string {
14651	return s.String()
14652}
14653
14654// SetConfidence sets the Confidence field's value.
14655func (s *ShotSegment) SetConfidence(v float64) *ShotSegment {
14656	s.Confidence = &v
14657	return s
14658}
14659
14660// SetIndex sets the Index field's value.
14661func (s *ShotSegment) SetIndex(v int64) *ShotSegment {
14662	s.Index = &v
14663	return s
14664}
14665
14666// Indicates whether or not the face is smiling, and the confidence level in
14667// the determination.
14668type Smile struct {
14669	_ struct{} `type:"structure"`
14670
14671	// Level of confidence in the determination.
14672	Confidence *float64 `type:"float"`
14673
14674	// Boolean value that indicates whether the face is smiling or not.
14675	Value *bool `type:"boolean"`
14676}
14677
14678// String returns the string representation
14679func (s Smile) String() string {
14680	return awsutil.Prettify(s)
14681}
14682
14683// GoString returns the string representation
14684func (s Smile) GoString() string {
14685	return s.String()
14686}
14687
14688// SetConfidence sets the Confidence field's value.
14689func (s *Smile) SetConfidence(v float64) *Smile {
14690	s.Confidence = &v
14691	return s
14692}
14693
14694// SetValue sets the Value field's value.
14695func (s *Smile) SetValue(v bool) *Smile {
14696	s.Value = &v
14697	return s
14698}
14699
14700type StartCelebrityRecognitionInput struct {
14701	_ struct{} `type:"structure"`
14702
14703	// Idempotent token used to identify the start request. If you use the same
14704	// token with multiple StartCelebrityRecognition requests, the same JobId is
14705	// returned. Use ClientRequestToken to prevent the same job from being accidently
14706	// started more than once.
14707	ClientRequestToken *string `min:"1" type:"string"`
14708
14709	// An identifier you specify that's returned in the completion notification
14710	// that's published to your Amazon Simple Notification Service topic. For example,
14711	// you can use JobTag to group related jobs and identify them in the completion
14712	// notification.
14713	JobTag *string `min:"1" type:"string"`
14714
14715	// The Amazon SNS topic ARN that you want Amazon Rekognition Video to publish
14716	// the completion status of the celebrity recognition analysis to.
14717	NotificationChannel *NotificationChannel `type:"structure"`
14718
14719	// The video in which you want to recognize celebrities. The video must be stored
14720	// in an Amazon S3 bucket.
14721	//
14722	// Video is a required field
14723	Video *Video `type:"structure" required:"true"`
14724}
14725
14726// String returns the string representation
14727func (s StartCelebrityRecognitionInput) String() string {
14728	return awsutil.Prettify(s)
14729}
14730
14731// GoString returns the string representation
14732func (s StartCelebrityRecognitionInput) GoString() string {
14733	return s.String()
14734}
14735
14736// Validate inspects the fields of the type to determine if they are valid.
14737func (s *StartCelebrityRecognitionInput) Validate() error {
14738	invalidParams := request.ErrInvalidParams{Context: "StartCelebrityRecognitionInput"}
14739	if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 {
14740		invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1))
14741	}
14742	if s.JobTag != nil && len(*s.JobTag) < 1 {
14743		invalidParams.Add(request.NewErrParamMinLen("JobTag", 1))
14744	}
14745	if s.Video == nil {
14746		invalidParams.Add(request.NewErrParamRequired("Video"))
14747	}
14748	if s.NotificationChannel != nil {
14749		if err := s.NotificationChannel.Validate(); err != nil {
14750			invalidParams.AddNested("NotificationChannel", err.(request.ErrInvalidParams))
14751		}
14752	}
14753	if s.Video != nil {
14754		if err := s.Video.Validate(); err != nil {
14755			invalidParams.AddNested("Video", err.(request.ErrInvalidParams))
14756		}
14757	}
14758
14759	if invalidParams.Len() > 0 {
14760		return invalidParams
14761	}
14762	return nil
14763}
14764
14765// SetClientRequestToken sets the ClientRequestToken field's value.
14766func (s *StartCelebrityRecognitionInput) SetClientRequestToken(v string) *StartCelebrityRecognitionInput {
14767	s.ClientRequestToken = &v
14768	return s
14769}
14770
14771// SetJobTag sets the JobTag field's value.
14772func (s *StartCelebrityRecognitionInput) SetJobTag(v string) *StartCelebrityRecognitionInput {
14773	s.JobTag = &v
14774	return s
14775}
14776
14777// SetNotificationChannel sets the NotificationChannel field's value.
14778func (s *StartCelebrityRecognitionInput) SetNotificationChannel(v *NotificationChannel) *StartCelebrityRecognitionInput {
14779	s.NotificationChannel = v
14780	return s
14781}
14782
14783// SetVideo sets the Video field's value.
14784func (s *StartCelebrityRecognitionInput) SetVideo(v *Video) *StartCelebrityRecognitionInput {
14785	s.Video = v
14786	return s
14787}
14788
14789type StartCelebrityRecognitionOutput struct {
14790	_ struct{} `type:"structure"`
14791
14792	// The identifier for the celebrity recognition analysis job. Use JobId to identify
14793	// the job in a subsequent call to GetCelebrityRecognition.
14794	JobId *string `min:"1" type:"string"`
14795}
14796
14797// String returns the string representation
14798func (s StartCelebrityRecognitionOutput) String() string {
14799	return awsutil.Prettify(s)
14800}
14801
14802// GoString returns the string representation
14803func (s StartCelebrityRecognitionOutput) GoString() string {
14804	return s.String()
14805}
14806
14807// SetJobId sets the JobId field's value.
14808func (s *StartCelebrityRecognitionOutput) SetJobId(v string) *StartCelebrityRecognitionOutput {
14809	s.JobId = &v
14810	return s
14811}
14812
14813type StartContentModerationInput struct {
14814	_ struct{} `type:"structure"`
14815
14816	// Idempotent token used to identify the start request. If you use the same
14817	// token with multiple StartContentModeration requests, the same JobId is returned.
14818	// Use ClientRequestToken to prevent the same job from being accidently started
14819	// more than once.
14820	ClientRequestToken *string `min:"1" type:"string"`
14821
14822	// An identifier you specify that's returned in the completion notification
14823	// that's published to your Amazon Simple Notification Service topic. For example,
14824	// you can use JobTag to group related jobs and identify them in the completion
14825	// notification.
14826	JobTag *string `min:"1" type:"string"`
14827
14828	// Specifies the minimum confidence that Amazon Rekognition must have in order
14829	// to return a moderated content label. Confidence represents how certain Amazon
14830	// Rekognition is that the moderated content is correctly identified. 0 is the
14831	// lowest confidence. 100 is the highest confidence. Amazon Rekognition doesn't
14832	// return any moderated content labels with a confidence level lower than this
14833	// specified value. If you don't specify MinConfidence, GetContentModeration
14834	// returns labels with confidence values greater than or equal to 50 percent.
14835	MinConfidence *float64 `type:"float"`
14836
14837	// The Amazon SNS topic ARN that you want Amazon Rekognition Video to publish
14838	// the completion status of the unsafe content analysis to.
14839	NotificationChannel *NotificationChannel `type:"structure"`
14840
14841	// The video in which you want to detect unsafe content. The video must be stored
14842	// in an Amazon S3 bucket.
14843	//
14844	// Video is a required field
14845	Video *Video `type:"structure" required:"true"`
14846}
14847
14848// String returns the string representation
14849func (s StartContentModerationInput) String() string {
14850	return awsutil.Prettify(s)
14851}
14852
14853// GoString returns the string representation
14854func (s StartContentModerationInput) GoString() string {
14855	return s.String()
14856}
14857
14858// Validate inspects the fields of the type to determine if they are valid.
14859func (s *StartContentModerationInput) Validate() error {
14860	invalidParams := request.ErrInvalidParams{Context: "StartContentModerationInput"}
14861	if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 {
14862		invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1))
14863	}
14864	if s.JobTag != nil && len(*s.JobTag) < 1 {
14865		invalidParams.Add(request.NewErrParamMinLen("JobTag", 1))
14866	}
14867	if s.Video == nil {
14868		invalidParams.Add(request.NewErrParamRequired("Video"))
14869	}
14870	if s.NotificationChannel != nil {
14871		if err := s.NotificationChannel.Validate(); err != nil {
14872			invalidParams.AddNested("NotificationChannel", err.(request.ErrInvalidParams))
14873		}
14874	}
14875	if s.Video != nil {
14876		if err := s.Video.Validate(); err != nil {
14877			invalidParams.AddNested("Video", err.(request.ErrInvalidParams))
14878		}
14879	}
14880
14881	if invalidParams.Len() > 0 {
14882		return invalidParams
14883	}
14884	return nil
14885}
14886
14887// SetClientRequestToken sets the ClientRequestToken field's value.
14888func (s *StartContentModerationInput) SetClientRequestToken(v string) *StartContentModerationInput {
14889	s.ClientRequestToken = &v
14890	return s
14891}
14892
14893// SetJobTag sets the JobTag field's value.
14894func (s *StartContentModerationInput) SetJobTag(v string) *StartContentModerationInput {
14895	s.JobTag = &v
14896	return s
14897}
14898
14899// SetMinConfidence sets the MinConfidence field's value.
14900func (s *StartContentModerationInput) SetMinConfidence(v float64) *StartContentModerationInput {
14901	s.MinConfidence = &v
14902	return s
14903}
14904
14905// SetNotificationChannel sets the NotificationChannel field's value.
14906func (s *StartContentModerationInput) SetNotificationChannel(v *NotificationChannel) *StartContentModerationInput {
14907	s.NotificationChannel = v
14908	return s
14909}
14910
14911// SetVideo sets the Video field's value.
14912func (s *StartContentModerationInput) SetVideo(v *Video) *StartContentModerationInput {
14913	s.Video = v
14914	return s
14915}
14916
14917type StartContentModerationOutput struct {
14918	_ struct{} `type:"structure"`
14919
14920	// The identifier for the unsafe content analysis job. Use JobId to identify
14921	// the job in a subsequent call to GetContentModeration.
14922	JobId *string `min:"1" type:"string"`
14923}
14924
14925// String returns the string representation
14926func (s StartContentModerationOutput) String() string {
14927	return awsutil.Prettify(s)
14928}
14929
14930// GoString returns the string representation
14931func (s StartContentModerationOutput) GoString() string {
14932	return s.String()
14933}
14934
14935// SetJobId sets the JobId field's value.
14936func (s *StartContentModerationOutput) SetJobId(v string) *StartContentModerationOutput {
14937	s.JobId = &v
14938	return s
14939}
14940
14941type StartFaceDetectionInput struct {
14942	_ struct{} `type:"structure"`
14943
14944	// Idempotent token used to identify the start request. If you use the same
14945	// token with multiple StartFaceDetection requests, the same JobId is returned.
14946	// Use ClientRequestToken to prevent the same job from being accidently started
14947	// more than once.
14948	ClientRequestToken *string `min:"1" type:"string"`
14949
14950	// The face attributes you want returned.
14951	//
14952	// DEFAULT - The following subset of facial attributes are returned: BoundingBox,
14953	// Confidence, Pose, Quality and Landmarks.
14954	//
14955	// ALL - All facial attributes are returned.
14956	FaceAttributes *string `type:"string" enum:"FaceAttributes"`
14957
14958	// An identifier you specify that's returned in the completion notification
14959	// that's published to your Amazon Simple Notification Service topic. For example,
14960	// you can use JobTag to group related jobs and identify them in the completion
14961	// notification.
14962	JobTag *string `min:"1" type:"string"`
14963
14964	// The ARN of the Amazon SNS topic to which you want Amazon Rekognition Video
14965	// to publish the completion status of the face detection operation.
14966	NotificationChannel *NotificationChannel `type:"structure"`
14967
14968	// The video in which you want to detect faces. The video must be stored in
14969	// an Amazon S3 bucket.
14970	//
14971	// Video is a required field
14972	Video *Video `type:"structure" required:"true"`
14973}
14974
14975// String returns the string representation
14976func (s StartFaceDetectionInput) String() string {
14977	return awsutil.Prettify(s)
14978}
14979
14980// GoString returns the string representation
14981func (s StartFaceDetectionInput) GoString() string {
14982	return s.String()
14983}
14984
14985// Validate inspects the fields of the type to determine if they are valid.
14986func (s *StartFaceDetectionInput) Validate() error {
14987	invalidParams := request.ErrInvalidParams{Context: "StartFaceDetectionInput"}
14988	if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 {
14989		invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1))
14990	}
14991	if s.JobTag != nil && len(*s.JobTag) < 1 {
14992		invalidParams.Add(request.NewErrParamMinLen("JobTag", 1))
14993	}
14994	if s.Video == nil {
14995		invalidParams.Add(request.NewErrParamRequired("Video"))
14996	}
14997	if s.NotificationChannel != nil {
14998		if err := s.NotificationChannel.Validate(); err != nil {
14999			invalidParams.AddNested("NotificationChannel", err.(request.ErrInvalidParams))
15000		}
15001	}
15002	if s.Video != nil {
15003		if err := s.Video.Validate(); err != nil {
15004			invalidParams.AddNested("Video", err.(request.ErrInvalidParams))
15005		}
15006	}
15007
15008	if invalidParams.Len() > 0 {
15009		return invalidParams
15010	}
15011	return nil
15012}
15013
15014// SetClientRequestToken sets the ClientRequestToken field's value.
15015func (s *StartFaceDetectionInput) SetClientRequestToken(v string) *StartFaceDetectionInput {
15016	s.ClientRequestToken = &v
15017	return s
15018}
15019
15020// SetFaceAttributes sets the FaceAttributes field's value.
15021func (s *StartFaceDetectionInput) SetFaceAttributes(v string) *StartFaceDetectionInput {
15022	s.FaceAttributes = &v
15023	return s
15024}
15025
15026// SetJobTag sets the JobTag field's value.
15027func (s *StartFaceDetectionInput) SetJobTag(v string) *StartFaceDetectionInput {
15028	s.JobTag = &v
15029	return s
15030}
15031
15032// SetNotificationChannel sets the NotificationChannel field's value.
15033func (s *StartFaceDetectionInput) SetNotificationChannel(v *NotificationChannel) *StartFaceDetectionInput {
15034	s.NotificationChannel = v
15035	return s
15036}
15037
15038// SetVideo sets the Video field's value.
15039func (s *StartFaceDetectionInput) SetVideo(v *Video) *StartFaceDetectionInput {
15040	s.Video = v
15041	return s
15042}
15043
15044type StartFaceDetectionOutput struct {
15045	_ struct{} `type:"structure"`
15046
15047	// The identifier for the face detection job. Use JobId to identify the job
15048	// in a subsequent call to GetFaceDetection.
15049	JobId *string `min:"1" type:"string"`
15050}
15051
15052// String returns the string representation
15053func (s StartFaceDetectionOutput) String() string {
15054	return awsutil.Prettify(s)
15055}
15056
15057// GoString returns the string representation
15058func (s StartFaceDetectionOutput) GoString() string {
15059	return s.String()
15060}
15061
15062// SetJobId sets the JobId field's value.
15063func (s *StartFaceDetectionOutput) SetJobId(v string) *StartFaceDetectionOutput {
15064	s.JobId = &v
15065	return s
15066}
15067
15068type StartFaceSearchInput struct {
15069	_ struct{} `type:"structure"`
15070
15071	// Idempotent token used to identify the start request. If you use the same
15072	// token with multiple StartFaceSearch requests, the same JobId is returned.
15073	// Use ClientRequestToken to prevent the same job from being accidently started
15074	// more than once.
15075	ClientRequestToken *string `min:"1" type:"string"`
15076
15077	// ID of the collection that contains the faces you want to search for.
15078	//
15079	// CollectionId is a required field
15080	CollectionId *string `min:"1" type:"string" required:"true"`
15081
15082	// The minimum confidence in the person match to return. For example, don't
15083	// return any matches where confidence in matches is less than 70%. The default
15084	// value is 80%.
15085	FaceMatchThreshold *float64 `type:"float"`
15086
15087	// An identifier you specify that's returned in the completion notification
15088	// that's published to your Amazon Simple Notification Service topic. For example,
15089	// you can use JobTag to group related jobs and identify them in the completion
15090	// notification.
15091	JobTag *string `min:"1" type:"string"`
15092
15093	// The ARN of the Amazon SNS topic to which you want Amazon Rekognition Video
15094	// to publish the completion status of the search.
15095	NotificationChannel *NotificationChannel `type:"structure"`
15096
15097	// The video you want to search. The video must be stored in an Amazon S3 bucket.
15098	//
15099	// Video is a required field
15100	Video *Video `type:"structure" required:"true"`
15101}
15102
15103// String returns the string representation
15104func (s StartFaceSearchInput) String() string {
15105	return awsutil.Prettify(s)
15106}
15107
15108// GoString returns the string representation
15109func (s StartFaceSearchInput) GoString() string {
15110	return s.String()
15111}
15112
15113// Validate inspects the fields of the type to determine if they are valid.
15114func (s *StartFaceSearchInput) Validate() error {
15115	invalidParams := request.ErrInvalidParams{Context: "StartFaceSearchInput"}
15116	if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 {
15117		invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1))
15118	}
15119	if s.CollectionId == nil {
15120		invalidParams.Add(request.NewErrParamRequired("CollectionId"))
15121	}
15122	if s.CollectionId != nil && len(*s.CollectionId) < 1 {
15123		invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1))
15124	}
15125	if s.JobTag != nil && len(*s.JobTag) < 1 {
15126		invalidParams.Add(request.NewErrParamMinLen("JobTag", 1))
15127	}
15128	if s.Video == nil {
15129		invalidParams.Add(request.NewErrParamRequired("Video"))
15130	}
15131	if s.NotificationChannel != nil {
15132		if err := s.NotificationChannel.Validate(); err != nil {
15133			invalidParams.AddNested("NotificationChannel", err.(request.ErrInvalidParams))
15134		}
15135	}
15136	if s.Video != nil {
15137		if err := s.Video.Validate(); err != nil {
15138			invalidParams.AddNested("Video", err.(request.ErrInvalidParams))
15139		}
15140	}
15141
15142	if invalidParams.Len() > 0 {
15143		return invalidParams
15144	}
15145	return nil
15146}
15147
15148// SetClientRequestToken sets the ClientRequestToken field's value.
15149func (s *StartFaceSearchInput) SetClientRequestToken(v string) *StartFaceSearchInput {
15150	s.ClientRequestToken = &v
15151	return s
15152}
15153
15154// SetCollectionId sets the CollectionId field's value.
15155func (s *StartFaceSearchInput) SetCollectionId(v string) *StartFaceSearchInput {
15156	s.CollectionId = &v
15157	return s
15158}
15159
15160// SetFaceMatchThreshold sets the FaceMatchThreshold field's value.
15161func (s *StartFaceSearchInput) SetFaceMatchThreshold(v float64) *StartFaceSearchInput {
15162	s.FaceMatchThreshold = &v
15163	return s
15164}
15165
15166// SetJobTag sets the JobTag field's value.
15167func (s *StartFaceSearchInput) SetJobTag(v string) *StartFaceSearchInput {
15168	s.JobTag = &v
15169	return s
15170}
15171
15172// SetNotificationChannel sets the NotificationChannel field's value.
15173func (s *StartFaceSearchInput) SetNotificationChannel(v *NotificationChannel) *StartFaceSearchInput {
15174	s.NotificationChannel = v
15175	return s
15176}
15177
15178// SetVideo sets the Video field's value.
15179func (s *StartFaceSearchInput) SetVideo(v *Video) *StartFaceSearchInput {
15180	s.Video = v
15181	return s
15182}
15183
15184type StartFaceSearchOutput struct {
15185	_ struct{} `type:"structure"`
15186
15187	// The identifier for the search job. Use JobId to identify the job in a subsequent
15188	// call to GetFaceSearch.
15189	JobId *string `min:"1" type:"string"`
15190}
15191
15192// String returns the string representation
15193func (s StartFaceSearchOutput) String() string {
15194	return awsutil.Prettify(s)
15195}
15196
15197// GoString returns the string representation
15198func (s StartFaceSearchOutput) GoString() string {
15199	return s.String()
15200}
15201
15202// SetJobId sets the JobId field's value.
15203func (s *StartFaceSearchOutput) SetJobId(v string) *StartFaceSearchOutput {
15204	s.JobId = &v
15205	return s
15206}
15207
15208type StartLabelDetectionInput struct {
15209	_ struct{} `type:"structure"`
15210
15211	// Idempotent token used to identify the start request. If you use the same
15212	// token with multiple StartLabelDetection requests, the same JobId is returned.
15213	// Use ClientRequestToken to prevent the same job from being accidently started
15214	// more than once.
15215	ClientRequestToken *string `min:"1" type:"string"`
15216
15217	// An identifier you specify that's returned in the completion notification
15218	// that's published to your Amazon Simple Notification Service topic. For example,
15219	// you can use JobTag to group related jobs and identify them in the completion
15220	// notification.
15221	JobTag *string `min:"1" type:"string"`
15222
15223	// Specifies the minimum confidence that Amazon Rekognition Video must have
15224	// in order to return a detected label. Confidence represents how certain Amazon
15225	// Rekognition is that a label is correctly identified.0 is the lowest confidence.
15226	// 100 is the highest confidence. Amazon Rekognition Video doesn't return any
15227	// labels with a confidence level lower than this specified value.
15228	//
15229	// If you don't specify MinConfidence, the operation returns labels with confidence
15230	// values greater than or equal to 50 percent.
15231	MinConfidence *float64 `type:"float"`
15232
15233	// The Amazon SNS topic ARN you want Amazon Rekognition Video to publish the
15234	// completion status of the label detection operation to.
15235	NotificationChannel *NotificationChannel `type:"structure"`
15236
15237	// The video in which you want to detect labels. The video must be stored in
15238	// an Amazon S3 bucket.
15239	//
15240	// Video is a required field
15241	Video *Video `type:"structure" required:"true"`
15242}
15243
15244// String returns the string representation
15245func (s StartLabelDetectionInput) String() string {
15246	return awsutil.Prettify(s)
15247}
15248
15249// GoString returns the string representation
15250func (s StartLabelDetectionInput) GoString() string {
15251	return s.String()
15252}
15253
15254// Validate inspects the fields of the type to determine if they are valid.
15255func (s *StartLabelDetectionInput) Validate() error {
15256	invalidParams := request.ErrInvalidParams{Context: "StartLabelDetectionInput"}
15257	if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 {
15258		invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1))
15259	}
15260	if s.JobTag != nil && len(*s.JobTag) < 1 {
15261		invalidParams.Add(request.NewErrParamMinLen("JobTag", 1))
15262	}
15263	if s.Video == nil {
15264		invalidParams.Add(request.NewErrParamRequired("Video"))
15265	}
15266	if s.NotificationChannel != nil {
15267		if err := s.NotificationChannel.Validate(); err != nil {
15268			invalidParams.AddNested("NotificationChannel", err.(request.ErrInvalidParams))
15269		}
15270	}
15271	if s.Video != nil {
15272		if err := s.Video.Validate(); err != nil {
15273			invalidParams.AddNested("Video", err.(request.ErrInvalidParams))
15274		}
15275	}
15276
15277	if invalidParams.Len() > 0 {
15278		return invalidParams
15279	}
15280	return nil
15281}
15282
15283// SetClientRequestToken sets the ClientRequestToken field's value.
15284func (s *StartLabelDetectionInput) SetClientRequestToken(v string) *StartLabelDetectionInput {
15285	s.ClientRequestToken = &v
15286	return s
15287}
15288
15289// SetJobTag sets the JobTag field's value.
15290func (s *StartLabelDetectionInput) SetJobTag(v string) *StartLabelDetectionInput {
15291	s.JobTag = &v
15292	return s
15293}
15294
15295// SetMinConfidence sets the MinConfidence field's value.
15296func (s *StartLabelDetectionInput) SetMinConfidence(v float64) *StartLabelDetectionInput {
15297	s.MinConfidence = &v
15298	return s
15299}
15300
15301// SetNotificationChannel sets the NotificationChannel field's value.
15302func (s *StartLabelDetectionInput) SetNotificationChannel(v *NotificationChannel) *StartLabelDetectionInput {
15303	s.NotificationChannel = v
15304	return s
15305}
15306
15307// SetVideo sets the Video field's value.
15308func (s *StartLabelDetectionInput) SetVideo(v *Video) *StartLabelDetectionInput {
15309	s.Video = v
15310	return s
15311}
15312
15313type StartLabelDetectionOutput struct {
15314	_ struct{} `type:"structure"`
15315
15316	// The identifier for the label detection job. Use JobId to identify the job
15317	// in a subsequent call to GetLabelDetection.
15318	JobId *string `min:"1" type:"string"`
15319}
15320
15321// String returns the string representation
15322func (s StartLabelDetectionOutput) String() string {
15323	return awsutil.Prettify(s)
15324}
15325
15326// GoString returns the string representation
15327func (s StartLabelDetectionOutput) GoString() string {
15328	return s.String()
15329}
15330
15331// SetJobId sets the JobId field's value.
15332func (s *StartLabelDetectionOutput) SetJobId(v string) *StartLabelDetectionOutput {
15333	s.JobId = &v
15334	return s
15335}
15336
15337type StartPersonTrackingInput struct {
15338	_ struct{} `type:"structure"`
15339
15340	// Idempotent token used to identify the start request. If you use the same
15341	// token with multiple StartPersonTracking requests, the same JobId is returned.
15342	// Use ClientRequestToken to prevent the same job from being accidently started
15343	// more than once.
15344	ClientRequestToken *string `min:"1" type:"string"`
15345
15346	// An identifier you specify that's returned in the completion notification
15347	// that's published to your Amazon Simple Notification Service topic. For example,
15348	// you can use JobTag to group related jobs and identify them in the completion
15349	// notification.
15350	JobTag *string `min:"1" type:"string"`
15351
15352	// The Amazon SNS topic ARN you want Amazon Rekognition Video to publish the
15353	// completion status of the people detection operation to.
15354	NotificationChannel *NotificationChannel `type:"structure"`
15355
15356	// The video in which you want to detect people. The video must be stored in
15357	// an Amazon S3 bucket.
15358	//
15359	// Video is a required field
15360	Video *Video `type:"structure" required:"true"`
15361}
15362
15363// String returns the string representation
15364func (s StartPersonTrackingInput) String() string {
15365	return awsutil.Prettify(s)
15366}
15367
15368// GoString returns the string representation
15369func (s StartPersonTrackingInput) GoString() string {
15370	return s.String()
15371}
15372
15373// Validate inspects the fields of the type to determine if they are valid.
15374func (s *StartPersonTrackingInput) Validate() error {
15375	invalidParams := request.ErrInvalidParams{Context: "StartPersonTrackingInput"}
15376	if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 {
15377		invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1))
15378	}
15379	if s.JobTag != nil && len(*s.JobTag) < 1 {
15380		invalidParams.Add(request.NewErrParamMinLen("JobTag", 1))
15381	}
15382	if s.Video == nil {
15383		invalidParams.Add(request.NewErrParamRequired("Video"))
15384	}
15385	if s.NotificationChannel != nil {
15386		if err := s.NotificationChannel.Validate(); err != nil {
15387			invalidParams.AddNested("NotificationChannel", err.(request.ErrInvalidParams))
15388		}
15389	}
15390	if s.Video != nil {
15391		if err := s.Video.Validate(); err != nil {
15392			invalidParams.AddNested("Video", err.(request.ErrInvalidParams))
15393		}
15394	}
15395
15396	if invalidParams.Len() > 0 {
15397		return invalidParams
15398	}
15399	return nil
15400}
15401
15402// SetClientRequestToken sets the ClientRequestToken field's value.
15403func (s *StartPersonTrackingInput) SetClientRequestToken(v string) *StartPersonTrackingInput {
15404	s.ClientRequestToken = &v
15405	return s
15406}
15407
15408// SetJobTag sets the JobTag field's value.
15409func (s *StartPersonTrackingInput) SetJobTag(v string) *StartPersonTrackingInput {
15410	s.JobTag = &v
15411	return s
15412}
15413
15414// SetNotificationChannel sets the NotificationChannel field's value.
15415func (s *StartPersonTrackingInput) SetNotificationChannel(v *NotificationChannel) *StartPersonTrackingInput {
15416	s.NotificationChannel = v
15417	return s
15418}
15419
15420// SetVideo sets the Video field's value.
15421func (s *StartPersonTrackingInput) SetVideo(v *Video) *StartPersonTrackingInput {
15422	s.Video = v
15423	return s
15424}
15425
15426type StartPersonTrackingOutput struct {
15427	_ struct{} `type:"structure"`
15428
15429	// The identifier for the person detection job. Use JobId to identify the job
15430	// in a subsequent call to GetPersonTracking.
15431	JobId *string `min:"1" type:"string"`
15432}
15433
15434// String returns the string representation
15435func (s StartPersonTrackingOutput) String() string {
15436	return awsutil.Prettify(s)
15437}
15438
15439// GoString returns the string representation
15440func (s StartPersonTrackingOutput) GoString() string {
15441	return s.String()
15442}
15443
15444// SetJobId sets the JobId field's value.
15445func (s *StartPersonTrackingOutput) SetJobId(v string) *StartPersonTrackingOutput {
15446	s.JobId = &v
15447	return s
15448}
15449
15450type StartProjectVersionInput struct {
15451	_ struct{} `type:"structure"`
15452
15453	// The minimum number of inference units to use. A single inference unit represents
15454	// 1 hour of processing and can support up to 5 Transaction Pers Second (TPS).
15455	// Use a higher number to increase the TPS throughput of your model. You are
15456	// charged for the number of inference units that you use.
15457	//
15458	// MinInferenceUnits is a required field
15459	MinInferenceUnits *int64 `min:"1" type:"integer" required:"true"`
15460
15461	// The Amazon Resource Name(ARN) of the model version that you want to start.
15462	//
15463	// ProjectVersionArn is a required field
15464	ProjectVersionArn *string `min:"20" type:"string" required:"true"`
15465}
15466
15467// String returns the string representation
15468func (s StartProjectVersionInput) String() string {
15469	return awsutil.Prettify(s)
15470}
15471
15472// GoString returns the string representation
15473func (s StartProjectVersionInput) GoString() string {
15474	return s.String()
15475}
15476
15477// Validate inspects the fields of the type to determine if they are valid.
15478func (s *StartProjectVersionInput) Validate() error {
15479	invalidParams := request.ErrInvalidParams{Context: "StartProjectVersionInput"}
15480	if s.MinInferenceUnits == nil {
15481		invalidParams.Add(request.NewErrParamRequired("MinInferenceUnits"))
15482	}
15483	if s.MinInferenceUnits != nil && *s.MinInferenceUnits < 1 {
15484		invalidParams.Add(request.NewErrParamMinValue("MinInferenceUnits", 1))
15485	}
15486	if s.ProjectVersionArn == nil {
15487		invalidParams.Add(request.NewErrParamRequired("ProjectVersionArn"))
15488	}
15489	if s.ProjectVersionArn != nil && len(*s.ProjectVersionArn) < 20 {
15490		invalidParams.Add(request.NewErrParamMinLen("ProjectVersionArn", 20))
15491	}
15492
15493	if invalidParams.Len() > 0 {
15494		return invalidParams
15495	}
15496	return nil
15497}
15498
15499// SetMinInferenceUnits sets the MinInferenceUnits field's value.
15500func (s *StartProjectVersionInput) SetMinInferenceUnits(v int64) *StartProjectVersionInput {
15501	s.MinInferenceUnits = &v
15502	return s
15503}
15504
15505// SetProjectVersionArn sets the ProjectVersionArn field's value.
15506func (s *StartProjectVersionInput) SetProjectVersionArn(v string) *StartProjectVersionInput {
15507	s.ProjectVersionArn = &v
15508	return s
15509}
15510
15511type StartProjectVersionOutput struct {
15512	_ struct{} `type:"structure"`
15513
15514	// The current running status of the model.
15515	Status *string `type:"string" enum:"ProjectVersionStatus"`
15516}
15517
15518// String returns the string representation
15519func (s StartProjectVersionOutput) String() string {
15520	return awsutil.Prettify(s)
15521}
15522
15523// GoString returns the string representation
15524func (s StartProjectVersionOutput) GoString() string {
15525	return s.String()
15526}
15527
15528// SetStatus sets the Status field's value.
15529func (s *StartProjectVersionOutput) SetStatus(v string) *StartProjectVersionOutput {
15530	s.Status = &v
15531	return s
15532}
15533
15534// Filters applied to the technical cue or shot detection segments. For more
15535// information, see StartSegmentDetection.
15536type StartSegmentDetectionFilters struct {
15537	_ struct{} `type:"structure"`
15538
15539	// Filters that are specific to shot detections.
15540	ShotFilter *StartShotDetectionFilter `type:"structure"`
15541
15542	// Filters that are specific to technical cues.
15543	TechnicalCueFilter *StartTechnicalCueDetectionFilter `type:"structure"`
15544}
15545
15546// String returns the string representation
15547func (s StartSegmentDetectionFilters) String() string {
15548	return awsutil.Prettify(s)
15549}
15550
15551// GoString returns the string representation
15552func (s StartSegmentDetectionFilters) GoString() string {
15553	return s.String()
15554}
15555
15556// Validate inspects the fields of the type to determine if they are valid.
15557func (s *StartSegmentDetectionFilters) Validate() error {
15558	invalidParams := request.ErrInvalidParams{Context: "StartSegmentDetectionFilters"}
15559	if s.ShotFilter != nil {
15560		if err := s.ShotFilter.Validate(); err != nil {
15561			invalidParams.AddNested("ShotFilter", err.(request.ErrInvalidParams))
15562		}
15563	}
15564	if s.TechnicalCueFilter != nil {
15565		if err := s.TechnicalCueFilter.Validate(); err != nil {
15566			invalidParams.AddNested("TechnicalCueFilter", err.(request.ErrInvalidParams))
15567		}
15568	}
15569
15570	if invalidParams.Len() > 0 {
15571		return invalidParams
15572	}
15573	return nil
15574}
15575
15576// SetShotFilter sets the ShotFilter field's value.
15577func (s *StartSegmentDetectionFilters) SetShotFilter(v *StartShotDetectionFilter) *StartSegmentDetectionFilters {
15578	s.ShotFilter = v
15579	return s
15580}
15581
15582// SetTechnicalCueFilter sets the TechnicalCueFilter field's value.
15583func (s *StartSegmentDetectionFilters) SetTechnicalCueFilter(v *StartTechnicalCueDetectionFilter) *StartSegmentDetectionFilters {
15584	s.TechnicalCueFilter = v
15585	return s
15586}
15587
15588type StartSegmentDetectionInput struct {
15589	_ struct{} `type:"structure"`
15590
15591	// Idempotent token used to identify the start request. If you use the same
15592	// token with multiple StartSegmentDetection requests, the same JobId is returned.
15593	// Use ClientRequestToken to prevent the same job from being accidently started
15594	// more than once.
15595	ClientRequestToken *string `min:"1" type:"string"`
15596
15597	// Filters for technical cue or shot detection.
15598	Filters *StartSegmentDetectionFilters `type:"structure"`
15599
15600	// An identifier you specify that's returned in the completion notification
15601	// that's published to your Amazon Simple Notification Service topic. For example,
15602	// you can use JobTag to group related jobs and identify them in the completion
15603	// notification.
15604	JobTag *string `min:"1" type:"string"`
15605
15606	// The ARN of the Amazon SNS topic to which you want Amazon Rekognition Video
15607	// to publish the completion status of the segment detection operation.
15608	NotificationChannel *NotificationChannel `type:"structure"`
15609
15610	// An array of segment types to detect in the video. Valid values are TECHNICAL_CUE
15611	// and SHOT.
15612	//
15613	// SegmentTypes is a required field
15614	SegmentTypes []*string `min:"1" type:"list" required:"true"`
15615
15616	// Video file stored in an Amazon S3 bucket. Amazon Rekognition video start
15617	// operations such as StartLabelDetection use Video to specify a video for analysis.
15618	// The supported file formats are .mp4, .mov and .avi.
15619	//
15620	// Video is a required field
15621	Video *Video `type:"structure" required:"true"`
15622}
15623
15624// String returns the string representation
15625func (s StartSegmentDetectionInput) String() string {
15626	return awsutil.Prettify(s)
15627}
15628
15629// GoString returns the string representation
15630func (s StartSegmentDetectionInput) GoString() string {
15631	return s.String()
15632}
15633
15634// Validate inspects the fields of the type to determine if they are valid.
15635func (s *StartSegmentDetectionInput) Validate() error {
15636	invalidParams := request.ErrInvalidParams{Context: "StartSegmentDetectionInput"}
15637	if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 {
15638		invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1))
15639	}
15640	if s.JobTag != nil && len(*s.JobTag) < 1 {
15641		invalidParams.Add(request.NewErrParamMinLen("JobTag", 1))
15642	}
15643	if s.SegmentTypes == nil {
15644		invalidParams.Add(request.NewErrParamRequired("SegmentTypes"))
15645	}
15646	if s.SegmentTypes != nil && len(s.SegmentTypes) < 1 {
15647		invalidParams.Add(request.NewErrParamMinLen("SegmentTypes", 1))
15648	}
15649	if s.Video == nil {
15650		invalidParams.Add(request.NewErrParamRequired("Video"))
15651	}
15652	if s.Filters != nil {
15653		if err := s.Filters.Validate(); err != nil {
15654			invalidParams.AddNested("Filters", err.(request.ErrInvalidParams))
15655		}
15656	}
15657	if s.NotificationChannel != nil {
15658		if err := s.NotificationChannel.Validate(); err != nil {
15659			invalidParams.AddNested("NotificationChannel", err.(request.ErrInvalidParams))
15660		}
15661	}
15662	if s.Video != nil {
15663		if err := s.Video.Validate(); err != nil {
15664			invalidParams.AddNested("Video", err.(request.ErrInvalidParams))
15665		}
15666	}
15667
15668	if invalidParams.Len() > 0 {
15669		return invalidParams
15670	}
15671	return nil
15672}
15673
15674// SetClientRequestToken sets the ClientRequestToken field's value.
15675func (s *StartSegmentDetectionInput) SetClientRequestToken(v string) *StartSegmentDetectionInput {
15676	s.ClientRequestToken = &v
15677	return s
15678}
15679
15680// SetFilters sets the Filters field's value.
15681func (s *StartSegmentDetectionInput) SetFilters(v *StartSegmentDetectionFilters) *StartSegmentDetectionInput {
15682	s.Filters = v
15683	return s
15684}
15685
15686// SetJobTag sets the JobTag field's value.
15687func (s *StartSegmentDetectionInput) SetJobTag(v string) *StartSegmentDetectionInput {
15688	s.JobTag = &v
15689	return s
15690}
15691
15692// SetNotificationChannel sets the NotificationChannel field's value.
15693func (s *StartSegmentDetectionInput) SetNotificationChannel(v *NotificationChannel) *StartSegmentDetectionInput {
15694	s.NotificationChannel = v
15695	return s
15696}
15697
15698// SetSegmentTypes sets the SegmentTypes field's value.
15699func (s *StartSegmentDetectionInput) SetSegmentTypes(v []*string) *StartSegmentDetectionInput {
15700	s.SegmentTypes = v
15701	return s
15702}
15703
15704// SetVideo sets the Video field's value.
15705func (s *StartSegmentDetectionInput) SetVideo(v *Video) *StartSegmentDetectionInput {
15706	s.Video = v
15707	return s
15708}
15709
15710type StartSegmentDetectionOutput struct {
15711	_ struct{} `type:"structure"`
15712
15713	// Unique identifier for the segment detection job. The JobId is returned from
15714	// StartSegmentDetection.
15715	JobId *string `min:"1" type:"string"`
15716}
15717
15718// String returns the string representation
15719func (s StartSegmentDetectionOutput) String() string {
15720	return awsutil.Prettify(s)
15721}
15722
15723// GoString returns the string representation
15724func (s StartSegmentDetectionOutput) GoString() string {
15725	return s.String()
15726}
15727
15728// SetJobId sets the JobId field's value.
15729func (s *StartSegmentDetectionOutput) SetJobId(v string) *StartSegmentDetectionOutput {
15730	s.JobId = &v
15731	return s
15732}
15733
15734// Filters for the shot detection segments returned by GetSegmentDetection.
15735// For more information, see StartSegmentDetectionFilters.
15736type StartShotDetectionFilter struct {
15737	_ struct{} `type:"structure"`
15738
15739	// Specifies the minimum confidence that Amazon Rekognition Video must have
15740	// in order to return a detected segment. Confidence represents how certain
15741	// Amazon Rekognition is that a segment is correctly identified. 0 is the lowest
15742	// confidence. 100 is the highest confidence. Amazon Rekognition Video doesn't
15743	// return any segments with a confidence level lower than this specified value.
15744	//
15745	// If you don't specify MinSegmentConfidence, the GetSegmentDetection returns
15746	// segments with confidence values greater than or equal to 50 percent.
15747	MinSegmentConfidence *float64 `min:"50" type:"float"`
15748}
15749
15750// String returns the string representation
15751func (s StartShotDetectionFilter) String() string {
15752	return awsutil.Prettify(s)
15753}
15754
15755// GoString returns the string representation
15756func (s StartShotDetectionFilter) GoString() string {
15757	return s.String()
15758}
15759
15760// Validate inspects the fields of the type to determine if they are valid.
15761func (s *StartShotDetectionFilter) Validate() error {
15762	invalidParams := request.ErrInvalidParams{Context: "StartShotDetectionFilter"}
15763	if s.MinSegmentConfidence != nil && *s.MinSegmentConfidence < 50 {
15764		invalidParams.Add(request.NewErrParamMinValue("MinSegmentConfidence", 50))
15765	}
15766
15767	if invalidParams.Len() > 0 {
15768		return invalidParams
15769	}
15770	return nil
15771}
15772
15773// SetMinSegmentConfidence sets the MinSegmentConfidence field's value.
15774func (s *StartShotDetectionFilter) SetMinSegmentConfidence(v float64) *StartShotDetectionFilter {
15775	s.MinSegmentConfidence = &v
15776	return s
15777}
15778
15779type StartStreamProcessorInput struct {
15780	_ struct{} `type:"structure"`
15781
15782	// The name of the stream processor to start processing.
15783	//
15784	// Name is a required field
15785	Name *string `min:"1" type:"string" required:"true"`
15786}
15787
15788// String returns the string representation
15789func (s StartStreamProcessorInput) String() string {
15790	return awsutil.Prettify(s)
15791}
15792
15793// GoString returns the string representation
15794func (s StartStreamProcessorInput) GoString() string {
15795	return s.String()
15796}
15797
15798// Validate inspects the fields of the type to determine if they are valid.
15799func (s *StartStreamProcessorInput) Validate() error {
15800	invalidParams := request.ErrInvalidParams{Context: "StartStreamProcessorInput"}
15801	if s.Name == nil {
15802		invalidParams.Add(request.NewErrParamRequired("Name"))
15803	}
15804	if s.Name != nil && len(*s.Name) < 1 {
15805		invalidParams.Add(request.NewErrParamMinLen("Name", 1))
15806	}
15807
15808	if invalidParams.Len() > 0 {
15809		return invalidParams
15810	}
15811	return nil
15812}
15813
15814// SetName sets the Name field's value.
15815func (s *StartStreamProcessorInput) SetName(v string) *StartStreamProcessorInput {
15816	s.Name = &v
15817	return s
15818}
15819
15820type StartStreamProcessorOutput struct {
15821	_ struct{} `type:"structure"`
15822}
15823
15824// String returns the string representation
15825func (s StartStreamProcessorOutput) String() string {
15826	return awsutil.Prettify(s)
15827}
15828
15829// GoString returns the string representation
15830func (s StartStreamProcessorOutput) GoString() string {
15831	return s.String()
15832}
15833
15834// Filters for the technical segments returned by GetSegmentDetection. For more
15835// information, see StartSegmentDetectionFilters.
15836type StartTechnicalCueDetectionFilter struct {
15837	_ struct{} `type:"structure"`
15838
15839	// Specifies the minimum confidence that Amazon Rekognition Video must have
15840	// in order to return a detected segment. Confidence represents how certain
15841	// Amazon Rekognition is that a segment is correctly identified. 0 is the lowest
15842	// confidence. 100 is the highest confidence. Amazon Rekognition Video doesn't
15843	// return any segments with a confidence level lower than this specified value.
15844	//
15845	// If you don't specify MinSegmentConfidence, GetSegmentDetection returns segments
15846	// with confidence values greater than or equal to 50 percent.
15847	MinSegmentConfidence *float64 `min:"50" type:"float"`
15848}
15849
15850// String returns the string representation
15851func (s StartTechnicalCueDetectionFilter) String() string {
15852	return awsutil.Prettify(s)
15853}
15854
15855// GoString returns the string representation
15856func (s StartTechnicalCueDetectionFilter) GoString() string {
15857	return s.String()
15858}
15859
15860// Validate inspects the fields of the type to determine if they are valid.
15861func (s *StartTechnicalCueDetectionFilter) Validate() error {
15862	invalidParams := request.ErrInvalidParams{Context: "StartTechnicalCueDetectionFilter"}
15863	if s.MinSegmentConfidence != nil && *s.MinSegmentConfidence < 50 {
15864		invalidParams.Add(request.NewErrParamMinValue("MinSegmentConfidence", 50))
15865	}
15866
15867	if invalidParams.Len() > 0 {
15868		return invalidParams
15869	}
15870	return nil
15871}
15872
15873// SetMinSegmentConfidence sets the MinSegmentConfidence field's value.
15874func (s *StartTechnicalCueDetectionFilter) SetMinSegmentConfidence(v float64) *StartTechnicalCueDetectionFilter {
15875	s.MinSegmentConfidence = &v
15876	return s
15877}
15878
15879// Set of optional parameters that let you set the criteria text must meet to
15880// be included in your response. WordFilter looks at a word's height, width
15881// and minimum confidence. RegionOfInterest lets you set a specific region of
15882// the screen to look for text in.
15883type StartTextDetectionFilters struct {
15884	_ struct{} `type:"structure"`
15885
15886	// Filter focusing on a certain area of the frame. Uses a BoundingBox object
15887	// to set the region of the screen.
15888	RegionsOfInterest []*RegionOfInterest `type:"list"`
15889
15890	// Filters focusing on qualities of the text, such as confidence or size.
15891	WordFilter *DetectionFilter `type:"structure"`
15892}
15893
15894// String returns the string representation
15895func (s StartTextDetectionFilters) String() string {
15896	return awsutil.Prettify(s)
15897}
15898
15899// GoString returns the string representation
15900func (s StartTextDetectionFilters) GoString() string {
15901	return s.String()
15902}
15903
15904// SetRegionsOfInterest sets the RegionsOfInterest field's value.
15905func (s *StartTextDetectionFilters) SetRegionsOfInterest(v []*RegionOfInterest) *StartTextDetectionFilters {
15906	s.RegionsOfInterest = v
15907	return s
15908}
15909
15910// SetWordFilter sets the WordFilter field's value.
15911func (s *StartTextDetectionFilters) SetWordFilter(v *DetectionFilter) *StartTextDetectionFilters {
15912	s.WordFilter = v
15913	return s
15914}
15915
15916type StartTextDetectionInput struct {
15917	_ struct{} `type:"structure"`
15918
15919	// Idempotent token used to identify the start request. If you use the same
15920	// token with multiple StartTextDetection requests, the same JobId is returned.
15921	// Use ClientRequestToken to prevent the same job from being accidentaly started
15922	// more than once.
15923	ClientRequestToken *string `min:"1" type:"string"`
15924
15925	// Optional parameters that let you set criteria the text must meet to be included
15926	// in your response.
15927	Filters *StartTextDetectionFilters `type:"structure"`
15928
15929	// An identifier returned in the completion status published by your Amazon
15930	// Simple Notification Service topic. For example, you can use JobTag to group
15931	// related jobs and identify them in the completion notification.
15932	JobTag *string `min:"1" type:"string"`
15933
15934	// The Amazon Simple Notification Service topic to which Amazon Rekognition
15935	// publishes the completion status of a video analysis operation. For more information,
15936	// see api-video.
15937	NotificationChannel *NotificationChannel `type:"structure"`
15938
15939	// Video file stored in an Amazon S3 bucket. Amazon Rekognition video start
15940	// operations such as StartLabelDetection use Video to specify a video for analysis.
15941	// The supported file formats are .mp4, .mov and .avi.
15942	//
15943	// Video is a required field
15944	Video *Video `type:"structure" required:"true"`
15945}
15946
15947// String returns the string representation
15948func (s StartTextDetectionInput) String() string {
15949	return awsutil.Prettify(s)
15950}
15951
15952// GoString returns the string representation
15953func (s StartTextDetectionInput) GoString() string {
15954	return s.String()
15955}
15956
15957// Validate inspects the fields of the type to determine if they are valid.
15958func (s *StartTextDetectionInput) Validate() error {
15959	invalidParams := request.ErrInvalidParams{Context: "StartTextDetectionInput"}
15960	if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 {
15961		invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1))
15962	}
15963	if s.JobTag != nil && len(*s.JobTag) < 1 {
15964		invalidParams.Add(request.NewErrParamMinLen("JobTag", 1))
15965	}
15966	if s.Video == nil {
15967		invalidParams.Add(request.NewErrParamRequired("Video"))
15968	}
15969	if s.NotificationChannel != nil {
15970		if err := s.NotificationChannel.Validate(); err != nil {
15971			invalidParams.AddNested("NotificationChannel", err.(request.ErrInvalidParams))
15972		}
15973	}
15974	if s.Video != nil {
15975		if err := s.Video.Validate(); err != nil {
15976			invalidParams.AddNested("Video", err.(request.ErrInvalidParams))
15977		}
15978	}
15979
15980	if invalidParams.Len() > 0 {
15981		return invalidParams
15982	}
15983	return nil
15984}
15985
15986// SetClientRequestToken sets the ClientRequestToken field's value.
15987func (s *StartTextDetectionInput) SetClientRequestToken(v string) *StartTextDetectionInput {
15988	s.ClientRequestToken = &v
15989	return s
15990}
15991
15992// SetFilters sets the Filters field's value.
15993func (s *StartTextDetectionInput) SetFilters(v *StartTextDetectionFilters) *StartTextDetectionInput {
15994	s.Filters = v
15995	return s
15996}
15997
15998// SetJobTag sets the JobTag field's value.
15999func (s *StartTextDetectionInput) SetJobTag(v string) *StartTextDetectionInput {
16000	s.JobTag = &v
16001	return s
16002}
16003
16004// SetNotificationChannel sets the NotificationChannel field's value.
16005func (s *StartTextDetectionInput) SetNotificationChannel(v *NotificationChannel) *StartTextDetectionInput {
16006	s.NotificationChannel = v
16007	return s
16008}
16009
16010// SetVideo sets the Video field's value.
16011func (s *StartTextDetectionInput) SetVideo(v *Video) *StartTextDetectionInput {
16012	s.Video = v
16013	return s
16014}
16015
16016type StartTextDetectionOutput struct {
16017	_ struct{} `type:"structure"`
16018
16019	// Identifier for the text detection job. Use JobId to identify the job in a
16020	// subsequent call to GetTextDetection.
16021	JobId *string `min:"1" type:"string"`
16022}
16023
16024// String returns the string representation
16025func (s StartTextDetectionOutput) String() string {
16026	return awsutil.Prettify(s)
16027}
16028
16029// GoString returns the string representation
16030func (s StartTextDetectionOutput) GoString() string {
16031	return s.String()
16032}
16033
16034// SetJobId sets the JobId field's value.
16035func (s *StartTextDetectionOutput) SetJobId(v string) *StartTextDetectionOutput {
16036	s.JobId = &v
16037	return s
16038}
16039
16040type StopProjectVersionInput struct {
16041	_ struct{} `type:"structure"`
16042
16043	// The Amazon Resource Name (ARN) of the model version that you want to delete.
16044	//
16045	// This operation requires permissions to perform the rekognition:StopProjectVersion
16046	// action.
16047	//
16048	// ProjectVersionArn is a required field
16049	ProjectVersionArn *string `min:"20" type:"string" required:"true"`
16050}
16051
16052// String returns the string representation
16053func (s StopProjectVersionInput) String() string {
16054	return awsutil.Prettify(s)
16055}
16056
16057// GoString returns the string representation
16058func (s StopProjectVersionInput) GoString() string {
16059	return s.String()
16060}
16061
16062// Validate inspects the fields of the type to determine if they are valid.
16063func (s *StopProjectVersionInput) Validate() error {
16064	invalidParams := request.ErrInvalidParams{Context: "StopProjectVersionInput"}
16065	if s.ProjectVersionArn == nil {
16066		invalidParams.Add(request.NewErrParamRequired("ProjectVersionArn"))
16067	}
16068	if s.ProjectVersionArn != nil && len(*s.ProjectVersionArn) < 20 {
16069		invalidParams.Add(request.NewErrParamMinLen("ProjectVersionArn", 20))
16070	}
16071
16072	if invalidParams.Len() > 0 {
16073		return invalidParams
16074	}
16075	return nil
16076}
16077
16078// SetProjectVersionArn sets the ProjectVersionArn field's value.
16079func (s *StopProjectVersionInput) SetProjectVersionArn(v string) *StopProjectVersionInput {
16080	s.ProjectVersionArn = &v
16081	return s
16082}
16083
16084type StopProjectVersionOutput struct {
16085	_ struct{} `type:"structure"`
16086
16087	// The current status of the stop operation.
16088	Status *string `type:"string" enum:"ProjectVersionStatus"`
16089}
16090
16091// String returns the string representation
16092func (s StopProjectVersionOutput) String() string {
16093	return awsutil.Prettify(s)
16094}
16095
16096// GoString returns the string representation
16097func (s StopProjectVersionOutput) GoString() string {
16098	return s.String()
16099}
16100
16101// SetStatus sets the Status field's value.
16102func (s *StopProjectVersionOutput) SetStatus(v string) *StopProjectVersionOutput {
16103	s.Status = &v
16104	return s
16105}
16106
16107type StopStreamProcessorInput struct {
16108	_ struct{} `type:"structure"`
16109
16110	// The name of a stream processor created by CreateStreamProcessor.
16111	//
16112	// Name is a required field
16113	Name *string `min:"1" type:"string" required:"true"`
16114}
16115
16116// String returns the string representation
16117func (s StopStreamProcessorInput) String() string {
16118	return awsutil.Prettify(s)
16119}
16120
16121// GoString returns the string representation
16122func (s StopStreamProcessorInput) GoString() string {
16123	return s.String()
16124}
16125
16126// Validate inspects the fields of the type to determine if they are valid.
16127func (s *StopStreamProcessorInput) Validate() error {
16128	invalidParams := request.ErrInvalidParams{Context: "StopStreamProcessorInput"}
16129	if s.Name == nil {
16130		invalidParams.Add(request.NewErrParamRequired("Name"))
16131	}
16132	if s.Name != nil && len(*s.Name) < 1 {
16133		invalidParams.Add(request.NewErrParamMinLen("Name", 1))
16134	}
16135
16136	if invalidParams.Len() > 0 {
16137		return invalidParams
16138	}
16139	return nil
16140}
16141
16142// SetName sets the Name field's value.
16143func (s *StopStreamProcessorInput) SetName(v string) *StopStreamProcessorInput {
16144	s.Name = &v
16145	return s
16146}
16147
16148type StopStreamProcessorOutput struct {
16149	_ struct{} `type:"structure"`
16150}
16151
16152// String returns the string representation
16153func (s StopStreamProcessorOutput) String() string {
16154	return awsutil.Prettify(s)
16155}
16156
16157// GoString returns the string representation
16158func (s StopStreamProcessorOutput) GoString() string {
16159	return s.String()
16160}
16161
16162// An object that recognizes faces in a streaming video. An Amazon Rekognition
16163// stream processor is created by a call to CreateStreamProcessor. The request
16164// parameters for CreateStreamProcessor describe the Kinesis video stream source
16165// for the streaming video, face recognition parameters, and where to stream
16166// the analysis resullts.
16167type StreamProcessor struct {
16168	_ struct{} `type:"structure"`
16169
16170	// Name of the Amazon Rekognition stream processor.
16171	Name *string `min:"1" type:"string"`
16172
16173	// Current status of the Amazon Rekognition stream processor.
16174	Status *string `type:"string" enum:"StreamProcessorStatus"`
16175}
16176
16177// String returns the string representation
16178func (s StreamProcessor) String() string {
16179	return awsutil.Prettify(s)
16180}
16181
16182// GoString returns the string representation
16183func (s StreamProcessor) GoString() string {
16184	return s.String()
16185}
16186
16187// SetName sets the Name field's value.
16188func (s *StreamProcessor) SetName(v string) *StreamProcessor {
16189	s.Name = &v
16190	return s
16191}
16192
16193// SetStatus sets the Status field's value.
16194func (s *StreamProcessor) SetStatus(v string) *StreamProcessor {
16195	s.Status = &v
16196	return s
16197}
16198
16199// Information about the source streaming video.
16200type StreamProcessorInput struct {
16201	_ struct{} `type:"structure"`
16202
16203	// The Kinesis video stream input stream for the source streaming video.
16204	KinesisVideoStream *KinesisVideoStream `type:"structure"`
16205}
16206
16207// String returns the string representation
16208func (s StreamProcessorInput) String() string {
16209	return awsutil.Prettify(s)
16210}
16211
16212// GoString returns the string representation
16213func (s StreamProcessorInput) GoString() string {
16214	return s.String()
16215}
16216
16217// SetKinesisVideoStream sets the KinesisVideoStream field's value.
16218func (s *StreamProcessorInput) SetKinesisVideoStream(v *KinesisVideoStream) *StreamProcessorInput {
16219	s.KinesisVideoStream = v
16220	return s
16221}
16222
16223// Information about the Amazon Kinesis Data Streams stream to which a Amazon
16224// Rekognition Video stream processor streams the results of a video analysis.
16225// For more information, see CreateStreamProcessor in the Amazon Rekognition
16226// Developer Guide.
16227type StreamProcessorOutput struct {
16228	_ struct{} `type:"structure"`
16229
16230	// The Amazon Kinesis Data Streams stream to which the Amazon Rekognition stream
16231	// processor streams the analysis results.
16232	KinesisDataStream *KinesisDataStream `type:"structure"`
16233}
16234
16235// String returns the string representation
16236func (s StreamProcessorOutput) String() string {
16237	return awsutil.Prettify(s)
16238}
16239
16240// GoString returns the string representation
16241func (s StreamProcessorOutput) GoString() string {
16242	return s.String()
16243}
16244
16245// SetKinesisDataStream sets the KinesisDataStream field's value.
16246func (s *StreamProcessorOutput) SetKinesisDataStream(v *KinesisDataStream) *StreamProcessorOutput {
16247	s.KinesisDataStream = v
16248	return s
16249}
16250
16251// Input parameters used to recognize faces in a streaming video analyzed by
16252// a Amazon Rekognition stream processor.
16253type StreamProcessorSettings struct {
16254	_ struct{} `type:"structure"`
16255
16256	// Face search settings to use on a streaming video.
16257	FaceSearch *FaceSearchSettings `type:"structure"`
16258}
16259
16260// String returns the string representation
16261func (s StreamProcessorSettings) String() string {
16262	return awsutil.Prettify(s)
16263}
16264
16265// GoString returns the string representation
16266func (s StreamProcessorSettings) GoString() string {
16267	return s.String()
16268}
16269
16270// Validate inspects the fields of the type to determine if they are valid.
16271func (s *StreamProcessorSettings) Validate() error {
16272	invalidParams := request.ErrInvalidParams{Context: "StreamProcessorSettings"}
16273	if s.FaceSearch != nil {
16274		if err := s.FaceSearch.Validate(); err != nil {
16275			invalidParams.AddNested("FaceSearch", err.(request.ErrInvalidParams))
16276		}
16277	}
16278
16279	if invalidParams.Len() > 0 {
16280		return invalidParams
16281	}
16282	return nil
16283}
16284
16285// SetFaceSearch sets the FaceSearch field's value.
16286func (s *StreamProcessorSettings) SetFaceSearch(v *FaceSearchSettings) *StreamProcessorSettings {
16287	s.FaceSearch = v
16288	return s
16289}
16290
16291// The S3 bucket that contains the training summary. The training summary includes
16292// aggregated evaluation metrics for the entire testing dataset and metrics
16293// for each individual label.
16294//
16295// You get the training summary S3 bucket location by calling DescribeProjectVersions.
16296type Summary struct {
16297	_ struct{} `type:"structure"`
16298
16299	// Provides the S3 bucket name and object name.
16300	//
16301	// The region for the S3 bucket containing the S3 object must match the region
16302	// you use for Amazon Rekognition operations.
16303	//
16304	// For Amazon Rekognition to process an S3 object, the user must have permission
16305	// to access the S3 object. For more information, see Resource-Based Policies
16306	// in the Amazon Rekognition Developer Guide.
16307	S3Object *S3Object `type:"structure"`
16308}
16309
16310// String returns the string representation
16311func (s Summary) String() string {
16312	return awsutil.Prettify(s)
16313}
16314
16315// GoString returns the string representation
16316func (s Summary) GoString() string {
16317	return s.String()
16318}
16319
16320// SetS3Object sets the S3Object field's value.
16321func (s *Summary) SetS3Object(v *S3Object) *Summary {
16322	s.S3Object = v
16323	return s
16324}
16325
16326// Indicates whether or not the face is wearing sunglasses, and the confidence
16327// level in the determination.
16328type Sunglasses struct {
16329	_ struct{} `type:"structure"`
16330
16331	// Level of confidence in the determination.
16332	Confidence *float64 `type:"float"`
16333
16334	// Boolean value that indicates whether the face is wearing sunglasses or not.
16335	Value *bool `type:"boolean"`
16336}
16337
16338// String returns the string representation
16339func (s Sunglasses) String() string {
16340	return awsutil.Prettify(s)
16341}
16342
16343// GoString returns the string representation
16344func (s Sunglasses) GoString() string {
16345	return s.String()
16346}
16347
16348// SetConfidence sets the Confidence field's value.
16349func (s *Sunglasses) SetConfidence(v float64) *Sunglasses {
16350	s.Confidence = &v
16351	return s
16352}
16353
16354// SetValue sets the Value field's value.
16355func (s *Sunglasses) SetValue(v bool) *Sunglasses {
16356	s.Value = &v
16357	return s
16358}
16359
16360// Information about a technical cue segment. For more information, see SegmentDetection.
16361type TechnicalCueSegment struct {
16362	_ struct{} `type:"structure"`
16363
16364	// The confidence that Amazon Rekognition Video has in the accuracy of the detected
16365	// segment.
16366	Confidence *float64 `min:"50" type:"float"`
16367
16368	// The type of the technical cue.
16369	Type *string `type:"string" enum:"TechnicalCueType"`
16370}
16371
16372// String returns the string representation
16373func (s TechnicalCueSegment) String() string {
16374	return awsutil.Prettify(s)
16375}
16376
16377// GoString returns the string representation
16378func (s TechnicalCueSegment) GoString() string {
16379	return s.String()
16380}
16381
16382// SetConfidence sets the Confidence field's value.
16383func (s *TechnicalCueSegment) SetConfidence(v float64) *TechnicalCueSegment {
16384	s.Confidence = &v
16385	return s
16386}
16387
16388// SetType sets the Type field's value.
16389func (s *TechnicalCueSegment) SetType(v string) *TechnicalCueSegment {
16390	s.Type = &v
16391	return s
16392}
16393
16394// The dataset used for testing. Optionally, if AutoCreate is set, Amazon Rekognition
16395// Custom Labels creates a testing dataset using an 80/20 split of the training
16396// dataset.
16397type TestingData struct {
16398	_ struct{} `type:"structure"`
16399
16400	// The assets used for testing.
16401	Assets []*Asset `type:"list"`
16402
16403	// If specified, Amazon Rekognition Custom Labels creates a testing dataset
16404	// with an 80/20 split of the training dataset.
16405	AutoCreate *bool `type:"boolean"`
16406}
16407
16408// String returns the string representation
16409func (s TestingData) String() string {
16410	return awsutil.Prettify(s)
16411}
16412
16413// GoString returns the string representation
16414func (s TestingData) GoString() string {
16415	return s.String()
16416}
16417
16418// Validate inspects the fields of the type to determine if they are valid.
16419func (s *TestingData) Validate() error {
16420	invalidParams := request.ErrInvalidParams{Context: "TestingData"}
16421	if s.Assets != nil {
16422		for i, v := range s.Assets {
16423			if v == nil {
16424				continue
16425			}
16426			if err := v.Validate(); err != nil {
16427				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Assets", i), err.(request.ErrInvalidParams))
16428			}
16429		}
16430	}
16431
16432	if invalidParams.Len() > 0 {
16433		return invalidParams
16434	}
16435	return nil
16436}
16437
16438// SetAssets sets the Assets field's value.
16439func (s *TestingData) SetAssets(v []*Asset) *TestingData {
16440	s.Assets = v
16441	return s
16442}
16443
16444// SetAutoCreate sets the AutoCreate field's value.
16445func (s *TestingData) SetAutoCreate(v bool) *TestingData {
16446	s.AutoCreate = &v
16447	return s
16448}
16449
16450// Sagemaker Groundtruth format manifest files for the input, output and validation
16451// datasets that are used and created during testing.
16452type TestingDataResult struct {
16453	_ struct{} `type:"structure"`
16454
16455	// The testing dataset that was supplied for training.
16456	Input *TestingData `type:"structure"`
16457
16458	// The subset of the dataset that was actually tested. Some images (assets)
16459	// might not be tested due to file formatting and other issues.
16460	Output *TestingData `type:"structure"`
16461
16462	// The location of the data validation manifest. The data validation manifest
16463	// is created for the test dataset during model training.
16464	Validation *ValidationData `type:"structure"`
16465}
16466
16467// String returns the string representation
16468func (s TestingDataResult) String() string {
16469	return awsutil.Prettify(s)
16470}
16471
16472// GoString returns the string representation
16473func (s TestingDataResult) GoString() string {
16474	return s.String()
16475}
16476
16477// SetInput sets the Input field's value.
16478func (s *TestingDataResult) SetInput(v *TestingData) *TestingDataResult {
16479	s.Input = v
16480	return s
16481}
16482
16483// SetOutput sets the Output field's value.
16484func (s *TestingDataResult) SetOutput(v *TestingData) *TestingDataResult {
16485	s.Output = v
16486	return s
16487}
16488
16489// SetValidation sets the Validation field's value.
16490func (s *TestingDataResult) SetValidation(v *ValidationData) *TestingDataResult {
16491	s.Validation = v
16492	return s
16493}
16494
16495// Information about a word or line of text detected by DetectText.
16496//
16497// The DetectedText field contains the text that Amazon Rekognition detected
16498// in the image.
16499//
16500// Every word and line has an identifier (Id). Each word belongs to a line and
16501// has a parent identifier (ParentId) that identifies the line of text in which
16502// the word appears. The word Id is also an index for the word within a line
16503// of words.
16504//
16505// For more information, see Detecting Text in the Amazon Rekognition Developer
16506// Guide.
16507type TextDetection struct {
16508	_ struct{} `type:"structure"`
16509
16510	// The confidence that Amazon Rekognition has in the accuracy of the detected
16511	// text and the accuracy of the geometry points around the detected text.
16512	Confidence *float64 `type:"float"`
16513
16514	// The word or line of text recognized by Amazon Rekognition.
16515	DetectedText *string `type:"string"`
16516
16517	// The location of the detected text on the image. Includes an axis aligned
16518	// coarse bounding box surrounding the text and a finer grain polygon for more
16519	// accurate spatial information.
16520	Geometry *Geometry `type:"structure"`
16521
16522	// The identifier for the detected text. The identifier is only unique for a
16523	// single call to DetectText.
16524	Id *int64 `type:"integer"`
16525
16526	// The Parent identifier for the detected text identified by the value of ID.
16527	// If the type of detected text is LINE, the value of ParentId is Null.
16528	ParentId *int64 `type:"integer"`
16529
16530	// The type of text that was detected.
16531	Type *string `type:"string" enum:"TextTypes"`
16532}
16533
16534// String returns the string representation
16535func (s TextDetection) String() string {
16536	return awsutil.Prettify(s)
16537}
16538
16539// GoString returns the string representation
16540func (s TextDetection) GoString() string {
16541	return s.String()
16542}
16543
16544// SetConfidence sets the Confidence field's value.
16545func (s *TextDetection) SetConfidence(v float64) *TextDetection {
16546	s.Confidence = &v
16547	return s
16548}
16549
16550// SetDetectedText sets the DetectedText field's value.
16551func (s *TextDetection) SetDetectedText(v string) *TextDetection {
16552	s.DetectedText = &v
16553	return s
16554}
16555
16556// SetGeometry sets the Geometry field's value.
16557func (s *TextDetection) SetGeometry(v *Geometry) *TextDetection {
16558	s.Geometry = v
16559	return s
16560}
16561
16562// SetId sets the Id field's value.
16563func (s *TextDetection) SetId(v int64) *TextDetection {
16564	s.Id = &v
16565	return s
16566}
16567
16568// SetParentId sets the ParentId field's value.
16569func (s *TextDetection) SetParentId(v int64) *TextDetection {
16570	s.ParentId = &v
16571	return s
16572}
16573
16574// SetType sets the Type field's value.
16575func (s *TextDetection) SetType(v string) *TextDetection {
16576	s.Type = &v
16577	return s
16578}
16579
16580// Information about text detected in a video. Incudes the detected text, the
16581// time in milliseconds from the start of the video that the text was detected,
16582// and where it was detected on the screen.
16583type TextDetectionResult struct {
16584	_ struct{} `type:"structure"`
16585
16586	// Details about text detected in a video.
16587	TextDetection *TextDetection `type:"structure"`
16588
16589	// The time, in milliseconds from the start of the video, that the text was
16590	// detected.
16591	Timestamp *int64 `type:"long"`
16592}
16593
16594// String returns the string representation
16595func (s TextDetectionResult) String() string {
16596	return awsutil.Prettify(s)
16597}
16598
16599// GoString returns the string representation
16600func (s TextDetectionResult) GoString() string {
16601	return s.String()
16602}
16603
16604// SetTextDetection sets the TextDetection field's value.
16605func (s *TextDetectionResult) SetTextDetection(v *TextDetection) *TextDetectionResult {
16606	s.TextDetection = v
16607	return s
16608}
16609
16610// SetTimestamp sets the Timestamp field's value.
16611func (s *TextDetectionResult) SetTimestamp(v int64) *TextDetectionResult {
16612	s.Timestamp = &v
16613	return s
16614}
16615
16616// Amazon Rekognition is temporarily unable to process the request. Try your
16617// call again.
16618type ThrottlingException struct {
16619	_            struct{}                  `type:"structure"`
16620	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
16621
16622	Message_ *string `locationName:"message" type:"string"`
16623}
16624
16625// String returns the string representation
16626func (s ThrottlingException) String() string {
16627	return awsutil.Prettify(s)
16628}
16629
16630// GoString returns the string representation
16631func (s ThrottlingException) GoString() string {
16632	return s.String()
16633}
16634
16635func newErrorThrottlingException(v protocol.ResponseMetadata) error {
16636	return &ThrottlingException{
16637		RespMetadata: v,
16638	}
16639}
16640
16641// Code returns the exception type name.
16642func (s *ThrottlingException) Code() string {
16643	return "ThrottlingException"
16644}
16645
16646// Message returns the exception's message.
16647func (s *ThrottlingException) Message() string {
16648	if s.Message_ != nil {
16649		return *s.Message_
16650	}
16651	return ""
16652}
16653
16654// OrigErr always returns nil, satisfies awserr.Error interface.
16655func (s *ThrottlingException) OrigErr() error {
16656	return nil
16657}
16658
16659func (s *ThrottlingException) Error() string {
16660	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
16661}
16662
16663// Status code returns the HTTP status code for the request's response error.
16664func (s *ThrottlingException) StatusCode() int {
16665	return s.RespMetadata.StatusCode
16666}
16667
16668// RequestID returns the service's response RequestID for request.
16669func (s *ThrottlingException) RequestID() string {
16670	return s.RespMetadata.RequestID
16671}
16672
16673// The dataset used for training.
16674type TrainingData struct {
16675	_ struct{} `type:"structure"`
16676
16677	// A Sagemaker GroundTruth manifest file that contains the training images (assets).
16678	Assets []*Asset `type:"list"`
16679}
16680
16681// String returns the string representation
16682func (s TrainingData) String() string {
16683	return awsutil.Prettify(s)
16684}
16685
16686// GoString returns the string representation
16687func (s TrainingData) GoString() string {
16688	return s.String()
16689}
16690
16691// Validate inspects the fields of the type to determine if they are valid.
16692func (s *TrainingData) Validate() error {
16693	invalidParams := request.ErrInvalidParams{Context: "TrainingData"}
16694	if s.Assets != nil {
16695		for i, v := range s.Assets {
16696			if v == nil {
16697				continue
16698			}
16699			if err := v.Validate(); err != nil {
16700				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Assets", i), err.(request.ErrInvalidParams))
16701			}
16702		}
16703	}
16704
16705	if invalidParams.Len() > 0 {
16706		return invalidParams
16707	}
16708	return nil
16709}
16710
16711// SetAssets sets the Assets field's value.
16712func (s *TrainingData) SetAssets(v []*Asset) *TrainingData {
16713	s.Assets = v
16714	return s
16715}
16716
16717// Sagemaker Groundtruth format manifest files for the input, output and validation
16718// datasets that are used and created during testing.
16719type TrainingDataResult struct {
16720	_ struct{} `type:"structure"`
16721
16722	// The training assets that you supplied for training.
16723	Input *TrainingData `type:"structure"`
16724
16725	// The images (assets) that were actually trained by Amazon Rekognition Custom
16726	// Labels.
16727	Output *TrainingData `type:"structure"`
16728
16729	// The location of the data validation manifest. The data validation manifest
16730	// is created for the training dataset during model training.
16731	Validation *ValidationData `type:"structure"`
16732}
16733
16734// String returns the string representation
16735func (s TrainingDataResult) String() string {
16736	return awsutil.Prettify(s)
16737}
16738
16739// GoString returns the string representation
16740func (s TrainingDataResult) GoString() string {
16741	return s.String()
16742}
16743
16744// SetInput sets the Input field's value.
16745func (s *TrainingDataResult) SetInput(v *TrainingData) *TrainingDataResult {
16746	s.Input = v
16747	return s
16748}
16749
16750// SetOutput sets the Output field's value.
16751func (s *TrainingDataResult) SetOutput(v *TrainingData) *TrainingDataResult {
16752	s.Output = v
16753	return s
16754}
16755
16756// SetValidation sets the Validation field's value.
16757func (s *TrainingDataResult) SetValidation(v *ValidationData) *TrainingDataResult {
16758	s.Validation = v
16759	return s
16760}
16761
16762// A face that IndexFaces detected, but didn't index. Use the Reasons response
16763// attribute to determine why a face wasn't indexed.
16764type UnindexedFace struct {
16765	_ struct{} `type:"structure"`
16766
16767	// The structure that contains attributes of a face that IndexFacesdetected,
16768	// but didn't index.
16769	FaceDetail *FaceDetail `type:"structure"`
16770
16771	// An array of reasons that specify why a face wasn't indexed.
16772	//
16773	//    * EXTREME_POSE - The face is at a pose that can't be detected. For example,
16774	//    the head is turned too far away from the camera.
16775	//
16776	//    * EXCEEDS_MAX_FACES - The number of faces detected is already higher than
16777	//    that specified by the MaxFaces input parameter for IndexFaces.
16778	//
16779	//    * LOW_BRIGHTNESS - The image is too dark.
16780	//
16781	//    * LOW_SHARPNESS - The image is too blurry.
16782	//
16783	//    * LOW_CONFIDENCE - The face was detected with a low confidence.
16784	//
16785	//    * SMALL_BOUNDING_BOX - The bounding box around the face is too small.
16786	Reasons []*string `type:"list"`
16787}
16788
16789// String returns the string representation
16790func (s UnindexedFace) String() string {
16791	return awsutil.Prettify(s)
16792}
16793
16794// GoString returns the string representation
16795func (s UnindexedFace) GoString() string {
16796	return s.String()
16797}
16798
16799// SetFaceDetail sets the FaceDetail field's value.
16800func (s *UnindexedFace) SetFaceDetail(v *FaceDetail) *UnindexedFace {
16801	s.FaceDetail = v
16802	return s
16803}
16804
16805// SetReasons sets the Reasons field's value.
16806func (s *UnindexedFace) SetReasons(v []*string) *UnindexedFace {
16807	s.Reasons = v
16808	return s
16809}
16810
16811// Contains the Amazon S3 bucket location of the validation data for a model
16812// training job.
16813//
16814// The validation data includes error information for individual JSON lines
16815// in the dataset. For more information, see Debugging a Failed Model Training
16816// in the Amazon Rekognition Custom Labels Developer Guide.
16817//
16818// You get the ValidationData object for the training dataset (TrainingDataResult)
16819// and the test dataset (TestingDataResult) by calling DescribeProjectVersions.
16820//
16821// The assets array contains a single Asset object. The GroundTruthManifest
16822// field of the Asset object contains the S3 bucket location of the validation
16823// data.
16824type ValidationData struct {
16825	_ struct{} `type:"structure"`
16826
16827	// The assets that comprise the validation data.
16828	Assets []*Asset `type:"list"`
16829}
16830
16831// String returns the string representation
16832func (s ValidationData) String() string {
16833	return awsutil.Prettify(s)
16834}
16835
16836// GoString returns the string representation
16837func (s ValidationData) GoString() string {
16838	return s.String()
16839}
16840
16841// SetAssets sets the Assets field's value.
16842func (s *ValidationData) SetAssets(v []*Asset) *ValidationData {
16843	s.Assets = v
16844	return s
16845}
16846
16847// Video file stored in an Amazon S3 bucket. Amazon Rekognition video start
16848// operations such as StartLabelDetection use Video to specify a video for analysis.
16849// The supported file formats are .mp4, .mov and .avi.
16850type Video struct {
16851	_ struct{} `type:"structure"`
16852
16853	// The Amazon S3 bucket name and file name for the video.
16854	S3Object *S3Object `type:"structure"`
16855}
16856
16857// String returns the string representation
16858func (s Video) String() string {
16859	return awsutil.Prettify(s)
16860}
16861
16862// GoString returns the string representation
16863func (s Video) GoString() string {
16864	return s.String()
16865}
16866
16867// Validate inspects the fields of the type to determine if they are valid.
16868func (s *Video) Validate() error {
16869	invalidParams := request.ErrInvalidParams{Context: "Video"}
16870	if s.S3Object != nil {
16871		if err := s.S3Object.Validate(); err != nil {
16872			invalidParams.AddNested("S3Object", err.(request.ErrInvalidParams))
16873		}
16874	}
16875
16876	if invalidParams.Len() > 0 {
16877		return invalidParams
16878	}
16879	return nil
16880}
16881
16882// SetS3Object sets the S3Object field's value.
16883func (s *Video) SetS3Object(v *S3Object) *Video {
16884	s.S3Object = v
16885	return s
16886}
16887
16888// Information about a video that Amazon Rekognition analyzed. Videometadata
16889// is returned in every page of paginated responses from a Amazon Rekognition
16890// video operation.
16891type VideoMetadata struct {
16892	_ struct{} `type:"structure"`
16893
16894	// Type of compression used in the analyzed video.
16895	Codec *string `type:"string"`
16896
16897	// Length of the video in milliseconds.
16898	DurationMillis *int64 `type:"long"`
16899
16900	// Format of the analyzed video. Possible values are MP4, MOV and AVI.
16901	Format *string `type:"string"`
16902
16903	// Vertical pixel dimension of the video.
16904	FrameHeight *int64 `type:"long"`
16905
16906	// Number of frames per second in the video.
16907	FrameRate *float64 `type:"float"`
16908
16909	// Horizontal pixel dimension of the video.
16910	FrameWidth *int64 `type:"long"`
16911}
16912
16913// String returns the string representation
16914func (s VideoMetadata) String() string {
16915	return awsutil.Prettify(s)
16916}
16917
16918// GoString returns the string representation
16919func (s VideoMetadata) GoString() string {
16920	return s.String()
16921}
16922
16923// SetCodec sets the Codec field's value.
16924func (s *VideoMetadata) SetCodec(v string) *VideoMetadata {
16925	s.Codec = &v
16926	return s
16927}
16928
16929// SetDurationMillis sets the DurationMillis field's value.
16930func (s *VideoMetadata) SetDurationMillis(v int64) *VideoMetadata {
16931	s.DurationMillis = &v
16932	return s
16933}
16934
16935// SetFormat sets the Format field's value.
16936func (s *VideoMetadata) SetFormat(v string) *VideoMetadata {
16937	s.Format = &v
16938	return s
16939}
16940
16941// SetFrameHeight sets the FrameHeight field's value.
16942func (s *VideoMetadata) SetFrameHeight(v int64) *VideoMetadata {
16943	s.FrameHeight = &v
16944	return s
16945}
16946
16947// SetFrameRate sets the FrameRate field's value.
16948func (s *VideoMetadata) SetFrameRate(v float64) *VideoMetadata {
16949	s.FrameRate = &v
16950	return s
16951}
16952
16953// SetFrameWidth sets the FrameWidth field's value.
16954func (s *VideoMetadata) SetFrameWidth(v int64) *VideoMetadata {
16955	s.FrameWidth = &v
16956	return s
16957}
16958
16959// The file size or duration of the supplied media is too large. The maximum
16960// file size is 10GB. The maximum duration is 6 hours.
16961type VideoTooLargeException struct {
16962	_            struct{}                  `type:"structure"`
16963	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
16964
16965	Message_ *string `locationName:"message" type:"string"`
16966}
16967
16968// String returns the string representation
16969func (s VideoTooLargeException) String() string {
16970	return awsutil.Prettify(s)
16971}
16972
16973// GoString returns the string representation
16974func (s VideoTooLargeException) GoString() string {
16975	return s.String()
16976}
16977
16978func newErrorVideoTooLargeException(v protocol.ResponseMetadata) error {
16979	return &VideoTooLargeException{
16980		RespMetadata: v,
16981	}
16982}
16983
16984// Code returns the exception type name.
16985func (s *VideoTooLargeException) Code() string {
16986	return "VideoTooLargeException"
16987}
16988
16989// Message returns the exception's message.
16990func (s *VideoTooLargeException) Message() string {
16991	if s.Message_ != nil {
16992		return *s.Message_
16993	}
16994	return ""
16995}
16996
16997// OrigErr always returns nil, satisfies awserr.Error interface.
16998func (s *VideoTooLargeException) OrigErr() error {
16999	return nil
17000}
17001
17002func (s *VideoTooLargeException) Error() string {
17003	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
17004}
17005
17006// Status code returns the HTTP status code for the request's response error.
17007func (s *VideoTooLargeException) StatusCode() int {
17008	return s.RespMetadata.StatusCode
17009}
17010
17011// RequestID returns the service's response RequestID for request.
17012func (s *VideoTooLargeException) RequestID() string {
17013	return s.RespMetadata.RequestID
17014}
17015
17016const (
17017	// AttributeDefault is a Attribute enum value
17018	AttributeDefault = "DEFAULT"
17019
17020	// AttributeAll is a Attribute enum value
17021	AttributeAll = "ALL"
17022)
17023
17024// Attribute_Values returns all elements of the Attribute enum
17025func Attribute_Values() []string {
17026	return []string{
17027		AttributeDefault,
17028		AttributeAll,
17029	}
17030}
17031
17032const (
17033	// BodyPartFace is a BodyPart enum value
17034	BodyPartFace = "FACE"
17035
17036	// BodyPartHead is a BodyPart enum value
17037	BodyPartHead = "HEAD"
17038
17039	// BodyPartLeftHand is a BodyPart enum value
17040	BodyPartLeftHand = "LEFT_HAND"
17041
17042	// BodyPartRightHand is a BodyPart enum value
17043	BodyPartRightHand = "RIGHT_HAND"
17044)
17045
17046// BodyPart_Values returns all elements of the BodyPart enum
17047func BodyPart_Values() []string {
17048	return []string{
17049		BodyPartFace,
17050		BodyPartHead,
17051		BodyPartLeftHand,
17052		BodyPartRightHand,
17053	}
17054}
17055
17056const (
17057	// CelebrityRecognitionSortById is a CelebrityRecognitionSortBy enum value
17058	CelebrityRecognitionSortById = "ID"
17059
17060	// CelebrityRecognitionSortByTimestamp is a CelebrityRecognitionSortBy enum value
17061	CelebrityRecognitionSortByTimestamp = "TIMESTAMP"
17062)
17063
17064// CelebrityRecognitionSortBy_Values returns all elements of the CelebrityRecognitionSortBy enum
17065func CelebrityRecognitionSortBy_Values() []string {
17066	return []string{
17067		CelebrityRecognitionSortById,
17068		CelebrityRecognitionSortByTimestamp,
17069	}
17070}
17071
17072const (
17073	// ContentClassifierFreeOfPersonallyIdentifiableInformation is a ContentClassifier enum value
17074	ContentClassifierFreeOfPersonallyIdentifiableInformation = "FreeOfPersonallyIdentifiableInformation"
17075
17076	// ContentClassifierFreeOfAdultContent is a ContentClassifier enum value
17077	ContentClassifierFreeOfAdultContent = "FreeOfAdultContent"
17078)
17079
17080// ContentClassifier_Values returns all elements of the ContentClassifier enum
17081func ContentClassifier_Values() []string {
17082	return []string{
17083		ContentClassifierFreeOfPersonallyIdentifiableInformation,
17084		ContentClassifierFreeOfAdultContent,
17085	}
17086}
17087
17088const (
17089	// ContentModerationSortByName is a ContentModerationSortBy enum value
17090	ContentModerationSortByName = "NAME"
17091
17092	// ContentModerationSortByTimestamp is a ContentModerationSortBy enum value
17093	ContentModerationSortByTimestamp = "TIMESTAMP"
17094)
17095
17096// ContentModerationSortBy_Values returns all elements of the ContentModerationSortBy enum
17097func ContentModerationSortBy_Values() []string {
17098	return []string{
17099		ContentModerationSortByName,
17100		ContentModerationSortByTimestamp,
17101	}
17102}
17103
17104const (
17105	// EmotionNameHappy is a EmotionName enum value
17106	EmotionNameHappy = "HAPPY"
17107
17108	// EmotionNameSad is a EmotionName enum value
17109	EmotionNameSad = "SAD"
17110
17111	// EmotionNameAngry is a EmotionName enum value
17112	EmotionNameAngry = "ANGRY"
17113
17114	// EmotionNameConfused is a EmotionName enum value
17115	EmotionNameConfused = "CONFUSED"
17116
17117	// EmotionNameDisgusted is a EmotionName enum value
17118	EmotionNameDisgusted = "DISGUSTED"
17119
17120	// EmotionNameSurprised is a EmotionName enum value
17121	EmotionNameSurprised = "SURPRISED"
17122
17123	// EmotionNameCalm is a EmotionName enum value
17124	EmotionNameCalm = "CALM"
17125
17126	// EmotionNameUnknown is a EmotionName enum value
17127	EmotionNameUnknown = "UNKNOWN"
17128
17129	// EmotionNameFear is a EmotionName enum value
17130	EmotionNameFear = "FEAR"
17131)
17132
17133// EmotionName_Values returns all elements of the EmotionName enum
17134func EmotionName_Values() []string {
17135	return []string{
17136		EmotionNameHappy,
17137		EmotionNameSad,
17138		EmotionNameAngry,
17139		EmotionNameConfused,
17140		EmotionNameDisgusted,
17141		EmotionNameSurprised,
17142		EmotionNameCalm,
17143		EmotionNameUnknown,
17144		EmotionNameFear,
17145	}
17146}
17147
17148const (
17149	// FaceAttributesDefault is a FaceAttributes enum value
17150	FaceAttributesDefault = "DEFAULT"
17151
17152	// FaceAttributesAll is a FaceAttributes enum value
17153	FaceAttributesAll = "ALL"
17154)
17155
17156// FaceAttributes_Values returns all elements of the FaceAttributes enum
17157func FaceAttributes_Values() []string {
17158	return []string{
17159		FaceAttributesDefault,
17160		FaceAttributesAll,
17161	}
17162}
17163
17164const (
17165	// FaceSearchSortByIndex is a FaceSearchSortBy enum value
17166	FaceSearchSortByIndex = "INDEX"
17167
17168	// FaceSearchSortByTimestamp is a FaceSearchSortBy enum value
17169	FaceSearchSortByTimestamp = "TIMESTAMP"
17170)
17171
17172// FaceSearchSortBy_Values returns all elements of the FaceSearchSortBy enum
17173func FaceSearchSortBy_Values() []string {
17174	return []string{
17175		FaceSearchSortByIndex,
17176		FaceSearchSortByTimestamp,
17177	}
17178}
17179
17180const (
17181	// GenderTypeMale is a GenderType enum value
17182	GenderTypeMale = "Male"
17183
17184	// GenderTypeFemale is a GenderType enum value
17185	GenderTypeFemale = "Female"
17186)
17187
17188// GenderType_Values returns all elements of the GenderType enum
17189func GenderType_Values() []string {
17190	return []string{
17191		GenderTypeMale,
17192		GenderTypeFemale,
17193	}
17194}
17195
17196const (
17197	// LabelDetectionSortByName is a LabelDetectionSortBy enum value
17198	LabelDetectionSortByName = "NAME"
17199
17200	// LabelDetectionSortByTimestamp is a LabelDetectionSortBy enum value
17201	LabelDetectionSortByTimestamp = "TIMESTAMP"
17202)
17203
17204// LabelDetectionSortBy_Values returns all elements of the LabelDetectionSortBy enum
17205func LabelDetectionSortBy_Values() []string {
17206	return []string{
17207		LabelDetectionSortByName,
17208		LabelDetectionSortByTimestamp,
17209	}
17210}
17211
17212const (
17213	// LandmarkTypeEyeLeft is a LandmarkType enum value
17214	LandmarkTypeEyeLeft = "eyeLeft"
17215
17216	// LandmarkTypeEyeRight is a LandmarkType enum value
17217	LandmarkTypeEyeRight = "eyeRight"
17218
17219	// LandmarkTypeNose is a LandmarkType enum value
17220	LandmarkTypeNose = "nose"
17221
17222	// LandmarkTypeMouthLeft is a LandmarkType enum value
17223	LandmarkTypeMouthLeft = "mouthLeft"
17224
17225	// LandmarkTypeMouthRight is a LandmarkType enum value
17226	LandmarkTypeMouthRight = "mouthRight"
17227
17228	// LandmarkTypeLeftEyeBrowLeft is a LandmarkType enum value
17229	LandmarkTypeLeftEyeBrowLeft = "leftEyeBrowLeft"
17230
17231	// LandmarkTypeLeftEyeBrowRight is a LandmarkType enum value
17232	LandmarkTypeLeftEyeBrowRight = "leftEyeBrowRight"
17233
17234	// LandmarkTypeLeftEyeBrowUp is a LandmarkType enum value
17235	LandmarkTypeLeftEyeBrowUp = "leftEyeBrowUp"
17236
17237	// LandmarkTypeRightEyeBrowLeft is a LandmarkType enum value
17238	LandmarkTypeRightEyeBrowLeft = "rightEyeBrowLeft"
17239
17240	// LandmarkTypeRightEyeBrowRight is a LandmarkType enum value
17241	LandmarkTypeRightEyeBrowRight = "rightEyeBrowRight"
17242
17243	// LandmarkTypeRightEyeBrowUp is a LandmarkType enum value
17244	LandmarkTypeRightEyeBrowUp = "rightEyeBrowUp"
17245
17246	// LandmarkTypeLeftEyeLeft is a LandmarkType enum value
17247	LandmarkTypeLeftEyeLeft = "leftEyeLeft"
17248
17249	// LandmarkTypeLeftEyeRight is a LandmarkType enum value
17250	LandmarkTypeLeftEyeRight = "leftEyeRight"
17251
17252	// LandmarkTypeLeftEyeUp is a LandmarkType enum value
17253	LandmarkTypeLeftEyeUp = "leftEyeUp"
17254
17255	// LandmarkTypeLeftEyeDown is a LandmarkType enum value
17256	LandmarkTypeLeftEyeDown = "leftEyeDown"
17257
17258	// LandmarkTypeRightEyeLeft is a LandmarkType enum value
17259	LandmarkTypeRightEyeLeft = "rightEyeLeft"
17260
17261	// LandmarkTypeRightEyeRight is a LandmarkType enum value
17262	LandmarkTypeRightEyeRight = "rightEyeRight"
17263
17264	// LandmarkTypeRightEyeUp is a LandmarkType enum value
17265	LandmarkTypeRightEyeUp = "rightEyeUp"
17266
17267	// LandmarkTypeRightEyeDown is a LandmarkType enum value
17268	LandmarkTypeRightEyeDown = "rightEyeDown"
17269
17270	// LandmarkTypeNoseLeft is a LandmarkType enum value
17271	LandmarkTypeNoseLeft = "noseLeft"
17272
17273	// LandmarkTypeNoseRight is a LandmarkType enum value
17274	LandmarkTypeNoseRight = "noseRight"
17275
17276	// LandmarkTypeMouthUp is a LandmarkType enum value
17277	LandmarkTypeMouthUp = "mouthUp"
17278
17279	// LandmarkTypeMouthDown is a LandmarkType enum value
17280	LandmarkTypeMouthDown = "mouthDown"
17281
17282	// LandmarkTypeLeftPupil is a LandmarkType enum value
17283	LandmarkTypeLeftPupil = "leftPupil"
17284
17285	// LandmarkTypeRightPupil is a LandmarkType enum value
17286	LandmarkTypeRightPupil = "rightPupil"
17287
17288	// LandmarkTypeUpperJawlineLeft is a LandmarkType enum value
17289	LandmarkTypeUpperJawlineLeft = "upperJawlineLeft"
17290
17291	// LandmarkTypeMidJawlineLeft is a LandmarkType enum value
17292	LandmarkTypeMidJawlineLeft = "midJawlineLeft"
17293
17294	// LandmarkTypeChinBottom is a LandmarkType enum value
17295	LandmarkTypeChinBottom = "chinBottom"
17296
17297	// LandmarkTypeMidJawlineRight is a LandmarkType enum value
17298	LandmarkTypeMidJawlineRight = "midJawlineRight"
17299
17300	// LandmarkTypeUpperJawlineRight is a LandmarkType enum value
17301	LandmarkTypeUpperJawlineRight = "upperJawlineRight"
17302)
17303
17304// LandmarkType_Values returns all elements of the LandmarkType enum
17305func LandmarkType_Values() []string {
17306	return []string{
17307		LandmarkTypeEyeLeft,
17308		LandmarkTypeEyeRight,
17309		LandmarkTypeNose,
17310		LandmarkTypeMouthLeft,
17311		LandmarkTypeMouthRight,
17312		LandmarkTypeLeftEyeBrowLeft,
17313		LandmarkTypeLeftEyeBrowRight,
17314		LandmarkTypeLeftEyeBrowUp,
17315		LandmarkTypeRightEyeBrowLeft,
17316		LandmarkTypeRightEyeBrowRight,
17317		LandmarkTypeRightEyeBrowUp,
17318		LandmarkTypeLeftEyeLeft,
17319		LandmarkTypeLeftEyeRight,
17320		LandmarkTypeLeftEyeUp,
17321		LandmarkTypeLeftEyeDown,
17322		LandmarkTypeRightEyeLeft,
17323		LandmarkTypeRightEyeRight,
17324		LandmarkTypeRightEyeUp,
17325		LandmarkTypeRightEyeDown,
17326		LandmarkTypeNoseLeft,
17327		LandmarkTypeNoseRight,
17328		LandmarkTypeMouthUp,
17329		LandmarkTypeMouthDown,
17330		LandmarkTypeLeftPupil,
17331		LandmarkTypeRightPupil,
17332		LandmarkTypeUpperJawlineLeft,
17333		LandmarkTypeMidJawlineLeft,
17334		LandmarkTypeChinBottom,
17335		LandmarkTypeMidJawlineRight,
17336		LandmarkTypeUpperJawlineRight,
17337	}
17338}
17339
17340const (
17341	// OrientationCorrectionRotate0 is a OrientationCorrection enum value
17342	OrientationCorrectionRotate0 = "ROTATE_0"
17343
17344	// OrientationCorrectionRotate90 is a OrientationCorrection enum value
17345	OrientationCorrectionRotate90 = "ROTATE_90"
17346
17347	// OrientationCorrectionRotate180 is a OrientationCorrection enum value
17348	OrientationCorrectionRotate180 = "ROTATE_180"
17349
17350	// OrientationCorrectionRotate270 is a OrientationCorrection enum value
17351	OrientationCorrectionRotate270 = "ROTATE_270"
17352)
17353
17354// OrientationCorrection_Values returns all elements of the OrientationCorrection enum
17355func OrientationCorrection_Values() []string {
17356	return []string{
17357		OrientationCorrectionRotate0,
17358		OrientationCorrectionRotate90,
17359		OrientationCorrectionRotate180,
17360		OrientationCorrectionRotate270,
17361	}
17362}
17363
17364const (
17365	// PersonTrackingSortByIndex is a PersonTrackingSortBy enum value
17366	PersonTrackingSortByIndex = "INDEX"
17367
17368	// PersonTrackingSortByTimestamp is a PersonTrackingSortBy enum value
17369	PersonTrackingSortByTimestamp = "TIMESTAMP"
17370)
17371
17372// PersonTrackingSortBy_Values returns all elements of the PersonTrackingSortBy enum
17373func PersonTrackingSortBy_Values() []string {
17374	return []string{
17375		PersonTrackingSortByIndex,
17376		PersonTrackingSortByTimestamp,
17377	}
17378}
17379
17380const (
17381	// ProjectStatusCreating is a ProjectStatus enum value
17382	ProjectStatusCreating = "CREATING"
17383
17384	// ProjectStatusCreated is a ProjectStatus enum value
17385	ProjectStatusCreated = "CREATED"
17386
17387	// ProjectStatusDeleting is a ProjectStatus enum value
17388	ProjectStatusDeleting = "DELETING"
17389)
17390
17391// ProjectStatus_Values returns all elements of the ProjectStatus enum
17392func ProjectStatus_Values() []string {
17393	return []string{
17394		ProjectStatusCreating,
17395		ProjectStatusCreated,
17396		ProjectStatusDeleting,
17397	}
17398}
17399
17400const (
17401	// ProjectVersionStatusTrainingInProgress is a ProjectVersionStatus enum value
17402	ProjectVersionStatusTrainingInProgress = "TRAINING_IN_PROGRESS"
17403
17404	// ProjectVersionStatusTrainingCompleted is a ProjectVersionStatus enum value
17405	ProjectVersionStatusTrainingCompleted = "TRAINING_COMPLETED"
17406
17407	// ProjectVersionStatusTrainingFailed is a ProjectVersionStatus enum value
17408	ProjectVersionStatusTrainingFailed = "TRAINING_FAILED"
17409
17410	// ProjectVersionStatusStarting is a ProjectVersionStatus enum value
17411	ProjectVersionStatusStarting = "STARTING"
17412
17413	// ProjectVersionStatusRunning is a ProjectVersionStatus enum value
17414	ProjectVersionStatusRunning = "RUNNING"
17415
17416	// ProjectVersionStatusFailed is a ProjectVersionStatus enum value
17417	ProjectVersionStatusFailed = "FAILED"
17418
17419	// ProjectVersionStatusStopping is a ProjectVersionStatus enum value
17420	ProjectVersionStatusStopping = "STOPPING"
17421
17422	// ProjectVersionStatusStopped is a ProjectVersionStatus enum value
17423	ProjectVersionStatusStopped = "STOPPED"
17424
17425	// ProjectVersionStatusDeleting is a ProjectVersionStatus enum value
17426	ProjectVersionStatusDeleting = "DELETING"
17427)
17428
17429// ProjectVersionStatus_Values returns all elements of the ProjectVersionStatus enum
17430func ProjectVersionStatus_Values() []string {
17431	return []string{
17432		ProjectVersionStatusTrainingInProgress,
17433		ProjectVersionStatusTrainingCompleted,
17434		ProjectVersionStatusTrainingFailed,
17435		ProjectVersionStatusStarting,
17436		ProjectVersionStatusRunning,
17437		ProjectVersionStatusFailed,
17438		ProjectVersionStatusStopping,
17439		ProjectVersionStatusStopped,
17440		ProjectVersionStatusDeleting,
17441	}
17442}
17443
17444const (
17445	// ProtectiveEquipmentTypeFaceCover is a ProtectiveEquipmentType enum value
17446	ProtectiveEquipmentTypeFaceCover = "FACE_COVER"
17447
17448	// ProtectiveEquipmentTypeHandCover is a ProtectiveEquipmentType enum value
17449	ProtectiveEquipmentTypeHandCover = "HAND_COVER"
17450
17451	// ProtectiveEquipmentTypeHeadCover is a ProtectiveEquipmentType enum value
17452	ProtectiveEquipmentTypeHeadCover = "HEAD_COVER"
17453)
17454
17455// ProtectiveEquipmentType_Values returns all elements of the ProtectiveEquipmentType enum
17456func ProtectiveEquipmentType_Values() []string {
17457	return []string{
17458		ProtectiveEquipmentTypeFaceCover,
17459		ProtectiveEquipmentTypeHandCover,
17460		ProtectiveEquipmentTypeHeadCover,
17461	}
17462}
17463
17464const (
17465	// QualityFilterNone is a QualityFilter enum value
17466	QualityFilterNone = "NONE"
17467
17468	// QualityFilterAuto is a QualityFilter enum value
17469	QualityFilterAuto = "AUTO"
17470
17471	// QualityFilterLow is a QualityFilter enum value
17472	QualityFilterLow = "LOW"
17473
17474	// QualityFilterMedium is a QualityFilter enum value
17475	QualityFilterMedium = "MEDIUM"
17476
17477	// QualityFilterHigh is a QualityFilter enum value
17478	QualityFilterHigh = "HIGH"
17479)
17480
17481// QualityFilter_Values returns all elements of the QualityFilter enum
17482func QualityFilter_Values() []string {
17483	return []string{
17484		QualityFilterNone,
17485		QualityFilterAuto,
17486		QualityFilterLow,
17487		QualityFilterMedium,
17488		QualityFilterHigh,
17489	}
17490}
17491
17492const (
17493	// ReasonExceedsMaxFaces is a Reason enum value
17494	ReasonExceedsMaxFaces = "EXCEEDS_MAX_FACES"
17495
17496	// ReasonExtremePose is a Reason enum value
17497	ReasonExtremePose = "EXTREME_POSE"
17498
17499	// ReasonLowBrightness is a Reason enum value
17500	ReasonLowBrightness = "LOW_BRIGHTNESS"
17501
17502	// ReasonLowSharpness is a Reason enum value
17503	ReasonLowSharpness = "LOW_SHARPNESS"
17504
17505	// ReasonLowConfidence is a Reason enum value
17506	ReasonLowConfidence = "LOW_CONFIDENCE"
17507
17508	// ReasonSmallBoundingBox is a Reason enum value
17509	ReasonSmallBoundingBox = "SMALL_BOUNDING_BOX"
17510
17511	// ReasonLowFaceQuality is a Reason enum value
17512	ReasonLowFaceQuality = "LOW_FACE_QUALITY"
17513)
17514
17515// Reason_Values returns all elements of the Reason enum
17516func Reason_Values() []string {
17517	return []string{
17518		ReasonExceedsMaxFaces,
17519		ReasonExtremePose,
17520		ReasonLowBrightness,
17521		ReasonLowSharpness,
17522		ReasonLowConfidence,
17523		ReasonSmallBoundingBox,
17524		ReasonLowFaceQuality,
17525	}
17526}
17527
17528const (
17529	// SegmentTypeTechnicalCue is a SegmentType enum value
17530	SegmentTypeTechnicalCue = "TECHNICAL_CUE"
17531
17532	// SegmentTypeShot is a SegmentType enum value
17533	SegmentTypeShot = "SHOT"
17534)
17535
17536// SegmentType_Values returns all elements of the SegmentType enum
17537func SegmentType_Values() []string {
17538	return []string{
17539		SegmentTypeTechnicalCue,
17540		SegmentTypeShot,
17541	}
17542}
17543
17544const (
17545	// StreamProcessorStatusStopped is a StreamProcessorStatus enum value
17546	StreamProcessorStatusStopped = "STOPPED"
17547
17548	// StreamProcessorStatusStarting is a StreamProcessorStatus enum value
17549	StreamProcessorStatusStarting = "STARTING"
17550
17551	// StreamProcessorStatusRunning is a StreamProcessorStatus enum value
17552	StreamProcessorStatusRunning = "RUNNING"
17553
17554	// StreamProcessorStatusFailed is a StreamProcessorStatus enum value
17555	StreamProcessorStatusFailed = "FAILED"
17556
17557	// StreamProcessorStatusStopping is a StreamProcessorStatus enum value
17558	StreamProcessorStatusStopping = "STOPPING"
17559)
17560
17561// StreamProcessorStatus_Values returns all elements of the StreamProcessorStatus enum
17562func StreamProcessorStatus_Values() []string {
17563	return []string{
17564		StreamProcessorStatusStopped,
17565		StreamProcessorStatusStarting,
17566		StreamProcessorStatusRunning,
17567		StreamProcessorStatusFailed,
17568		StreamProcessorStatusStopping,
17569	}
17570}
17571
17572const (
17573	// TechnicalCueTypeColorBars is a TechnicalCueType enum value
17574	TechnicalCueTypeColorBars = "ColorBars"
17575
17576	// TechnicalCueTypeEndCredits is a TechnicalCueType enum value
17577	TechnicalCueTypeEndCredits = "EndCredits"
17578
17579	// TechnicalCueTypeBlackFrames is a TechnicalCueType enum value
17580	TechnicalCueTypeBlackFrames = "BlackFrames"
17581)
17582
17583// TechnicalCueType_Values returns all elements of the TechnicalCueType enum
17584func TechnicalCueType_Values() []string {
17585	return []string{
17586		TechnicalCueTypeColorBars,
17587		TechnicalCueTypeEndCredits,
17588		TechnicalCueTypeBlackFrames,
17589	}
17590}
17591
17592const (
17593	// TextTypesLine is a TextTypes enum value
17594	TextTypesLine = "LINE"
17595
17596	// TextTypesWord is a TextTypes enum value
17597	TextTypesWord = "WORD"
17598)
17599
17600// TextTypes_Values returns all elements of the TextTypes enum
17601func TextTypes_Values() []string {
17602	return []string{
17603		TextTypesLine,
17604		TextTypesWord,
17605	}
17606}
17607
17608const (
17609	// VideoJobStatusInProgress is a VideoJobStatus enum value
17610	VideoJobStatusInProgress = "IN_PROGRESS"
17611
17612	// VideoJobStatusSucceeded is a VideoJobStatus enum value
17613	VideoJobStatusSucceeded = "SUCCEEDED"
17614
17615	// VideoJobStatusFailed is a VideoJobStatus enum value
17616	VideoJobStatusFailed = "FAILED"
17617)
17618
17619// VideoJobStatus_Values returns all elements of the VideoJobStatus enum
17620func VideoJobStatus_Values() []string {
17621	return []string{
17622		VideoJobStatusInProgress,
17623		VideoJobStatusSucceeded,
17624		VideoJobStatusFailed,
17625	}
17626}
17627