1// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
2
3package rekognition
4
5import (
6	"fmt"
7	"time"
8
9	"github.com/aws/aws-sdk-go/aws"
10	"github.com/aws/aws-sdk-go/aws/awsutil"
11	"github.com/aws/aws-sdk-go/aws/request"
12	"github.com/aws/aws-sdk-go/private/protocol"
13	"github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
14)
15
16const opCompareFaces = "CompareFaces"
17
18// CompareFacesRequest generates a "aws/request.Request" representing the
19// client's request for the CompareFaces operation. The "output" return
20// value will be populated with the request's response once the request completes
21// successfully.
22//
23// Use "Send" method on the returned Request to send the API call to the service.
24// the "output" return value is not valid until after Send returns without error.
25//
26// See CompareFaces for more information on using the CompareFaces
27// API call, and error handling.
28//
29// This method is useful when you want to inject custom logic or configuration
30// into the SDK's request lifecycle. Such as custom headers, or retry logic.
31//
32//
33//    // Example sending a request using the CompareFacesRequest method.
34//    req, resp := client.CompareFacesRequest(params)
35//
36//    err := req.Send()
37//    if err == nil { // resp is now filled
38//        fmt.Println(resp)
39//    }
40func (c *Rekognition) CompareFacesRequest(input *CompareFacesInput) (req *request.Request, output *CompareFacesOutput) {
41	op := &request.Operation{
42		Name:       opCompareFaces,
43		HTTPMethod: "POST",
44		HTTPPath:   "/",
45	}
46
47	if input == nil {
48		input = &CompareFacesInput{}
49	}
50
51	output = &CompareFacesOutput{}
52	req = c.newRequest(op, input, output)
53	return
54}
55
56// CompareFaces API operation for Amazon Rekognition.
57//
58// Compares a face in the source input image with each of the 100 largest faces
59// detected in the target input image.
60//
61// If the source image contains multiple faces, the service detects the largest
62// face and compares it with each face detected in the target image.
63//
64// CompareFaces uses machine learning algorithms, which are probabilistic. A
65// false negative is an incorrect prediction that a face in the target image
66// has a low similarity confidence score when compared to the face in the source
67// image. To reduce the probability of false negatives, we recommend that you
68// compare the target image against multiple source images. If you plan to use
69// CompareFaces to make a decision that impacts an individual's rights, privacy,
70// or access to services, we recommend that you pass the result to a human for
71// review and further validation before taking action.
72//
73// You pass the input and target images either as base64-encoded image bytes
74// or as references to images in an Amazon S3 bucket. If you use the AWS CLI
75// to call Amazon Rekognition operations, passing image bytes isn't supported.
76// The image must be formatted as a PNG or JPEG file.
77//
78// In response, the operation returns an array of face matches ordered by similarity
79// score in descending order. For each face match, the response provides a bounding
80// box of the face, facial landmarks, pose details (pitch, role, and yaw), quality
81// (brightness and sharpness), and confidence value (indicating the level of
82// confidence that the bounding box contains a face). The response also provides
83// a similarity score, which indicates how closely the faces match.
84//
85// By default, only faces with a similarity score of greater than or equal to
86// 80% are returned in the response. You can change this value by specifying
87// the SimilarityThreshold parameter.
88//
89// CompareFaces also returns an array of faces that don't match the source image.
90// For each face, it returns a bounding box, confidence value, landmarks, pose
91// details, and quality. The response also returns information about the face
92// in the source image, including the bounding box of the face and confidence
93// value.
94//
95// The QualityFilter input parameter allows you to filter out detected faces
96// that don’t meet a required quality bar. The quality bar is based on a variety
97// of common use cases. Use QualityFilter to set the quality bar by specifying
98// LOW, MEDIUM, or HIGH. If you do not want to filter detected faces, specify
99// NONE. The default value is NONE.
100//
101// If the image doesn't contain Exif metadata, CompareFaces returns orientation
102// information for the source and target images. Use these values to display
103// the images with the correct image orientation.
104//
105// If no faces are detected in the source or target images, CompareFaces returns
106// an InvalidParameterException error.
107//
108// This is a stateless API operation. That is, data returned by this operation
109// doesn't persist.
110//
111// For an example, see Comparing Faces in Images in the Amazon Rekognition Developer
112// Guide.
113//
114// This operation requires permissions to perform the rekognition:CompareFaces
115// action.
116//
117// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
118// with awserr.Error's Code and Message methods to get detailed information about
119// the error.
120//
121// See the AWS API reference guide for Amazon Rekognition's
122// API operation CompareFaces for usage and error information.
123//
124// Returned Error Types:
125//   * InvalidParameterException
126//   Input parameter violated a constraint. Validate your parameter before calling
127//   the API operation again.
128//
129//   * InvalidS3ObjectException
130//   Amazon Rekognition is unable to access the S3 object specified in the request.
131//
132//   * ImageTooLargeException
133//   The input image size exceeds the allowed limit. If you are calling DetectProtectiveEquipment,
134//   the image size or resolution exceeds the allowed limit. For more information,
135//   see Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
136//
137//   * AccessDeniedException
138//   You are not authorized to perform the action.
139//
140//   * InternalServerError
141//   Amazon Rekognition experienced a service issue. Try your call again.
142//
143//   * ThrottlingException
144//   Amazon Rekognition is temporarily unable to process the request. Try your
145//   call again.
146//
147//   * ProvisionedThroughputExceededException
148//   The number of requests exceeded your throughput limit. If you want to increase
149//   this limit, contact Amazon Rekognition.
150//
151//   * InvalidImageFormatException
152//   The provided image format is not supported.
153//
154func (c *Rekognition) CompareFaces(input *CompareFacesInput) (*CompareFacesOutput, error) {
155	req, out := c.CompareFacesRequest(input)
156	return out, req.Send()
157}
158
159// CompareFacesWithContext is the same as CompareFaces with the addition of
160// the ability to pass a context and additional request options.
161//
162// See CompareFaces for details on how to use this API operation.
163//
164// The context must be non-nil and will be used for request cancellation. If
165// the context is nil a panic will occur. In the future the SDK may create
166// sub-contexts for http.Requests. See https://golang.org/pkg/context/
167// for more information on using Contexts.
168func (c *Rekognition) CompareFacesWithContext(ctx aws.Context, input *CompareFacesInput, opts ...request.Option) (*CompareFacesOutput, error) {
169	req, out := c.CompareFacesRequest(input)
170	req.SetContext(ctx)
171	req.ApplyOptions(opts...)
172	return out, req.Send()
173}
174
175const opCreateCollection = "CreateCollection"
176
177// CreateCollectionRequest generates a "aws/request.Request" representing the
178// client's request for the CreateCollection operation. The "output" return
179// value will be populated with the request's response once the request completes
180// successfully.
181//
182// Use "Send" method on the returned Request to send the API call to the service.
183// the "output" return value is not valid until after Send returns without error.
184//
185// See CreateCollection for more information on using the CreateCollection
186// API call, and error handling.
187//
188// This method is useful when you want to inject custom logic or configuration
189// into the SDK's request lifecycle. Such as custom headers, or retry logic.
190//
191//
192//    // Example sending a request using the CreateCollectionRequest method.
193//    req, resp := client.CreateCollectionRequest(params)
194//
195//    err := req.Send()
196//    if err == nil { // resp is now filled
197//        fmt.Println(resp)
198//    }
199func (c *Rekognition) CreateCollectionRequest(input *CreateCollectionInput) (req *request.Request, output *CreateCollectionOutput) {
200	op := &request.Operation{
201		Name:       opCreateCollection,
202		HTTPMethod: "POST",
203		HTTPPath:   "/",
204	}
205
206	if input == nil {
207		input = &CreateCollectionInput{}
208	}
209
210	output = &CreateCollectionOutput{}
211	req = c.newRequest(op, input, output)
212	return
213}
214
215// CreateCollection API operation for Amazon Rekognition.
216//
217// Creates a collection in an AWS Region. You can add faces to the collection
218// using the IndexFaces operation.
219//
220// For example, you might create collections, one for each of your application
221// users. A user can then index faces using the IndexFaces operation and persist
222// results in a specific collection. Then, a user can search the collection
223// for faces in the user-specific container.
224//
225// When you create a collection, it is associated with the latest version of
226// the face model version.
227//
228// Collection names are case-sensitive.
229//
230// This operation requires permissions to perform the rekognition:CreateCollection
231// action.
232//
233// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
234// with awserr.Error's Code and Message methods to get detailed information about
235// the error.
236//
237// See the AWS API reference guide for Amazon Rekognition's
238// API operation CreateCollection for usage and error information.
239//
240// Returned Error Types:
241//   * InvalidParameterException
242//   Input parameter violated a constraint. Validate your parameter before calling
243//   the API operation again.
244//
245//   * AccessDeniedException
246//   You are not authorized to perform the action.
247//
248//   * InternalServerError
249//   Amazon Rekognition experienced a service issue. Try your call again.
250//
251//   * ThrottlingException
252//   Amazon Rekognition is temporarily unable to process the request. Try your
253//   call again.
254//
255//   * ProvisionedThroughputExceededException
256//   The number of requests exceeded your throughput limit. If you want to increase
257//   this limit, contact Amazon Rekognition.
258//
259//   * ResourceAlreadyExistsException
260//   A collection with the specified ID already exists.
261//
262//   * ServiceQuotaExceededException
263//   The size of the collection or tag list exceeds the allowed limit. For more
264//   information, see Limits in Amazon Rekognition in the Amazon Rekognition Developer
265//   Guide.
266//
267func (c *Rekognition) CreateCollection(input *CreateCollectionInput) (*CreateCollectionOutput, error) {
268	req, out := c.CreateCollectionRequest(input)
269	return out, req.Send()
270}
271
272// CreateCollectionWithContext is the same as CreateCollection with the addition of
273// the ability to pass a context and additional request options.
274//
275// See CreateCollection for details on how to use this API operation.
276//
277// The context must be non-nil and will be used for request cancellation. If
278// the context is nil a panic will occur. In the future the SDK may create
279// sub-contexts for http.Requests. See https://golang.org/pkg/context/
280// for more information on using Contexts.
281func (c *Rekognition) CreateCollectionWithContext(ctx aws.Context, input *CreateCollectionInput, opts ...request.Option) (*CreateCollectionOutput, error) {
282	req, out := c.CreateCollectionRequest(input)
283	req.SetContext(ctx)
284	req.ApplyOptions(opts...)
285	return out, req.Send()
286}
287
288const opCreateProject = "CreateProject"
289
290// CreateProjectRequest generates a "aws/request.Request" representing the
291// client's request for the CreateProject operation. The "output" return
292// value will be populated with the request's response once the request completes
293// successfully.
294//
295// Use "Send" method on the returned Request to send the API call to the service.
296// the "output" return value is not valid until after Send returns without error.
297//
298// See CreateProject for more information on using the CreateProject
299// API call, and error handling.
300//
301// This method is useful when you want to inject custom logic or configuration
302// into the SDK's request lifecycle. Such as custom headers, or retry logic.
303//
304//
305//    // Example sending a request using the CreateProjectRequest method.
306//    req, resp := client.CreateProjectRequest(params)
307//
308//    err := req.Send()
309//    if err == nil { // resp is now filled
310//        fmt.Println(resp)
311//    }
312func (c *Rekognition) CreateProjectRequest(input *CreateProjectInput) (req *request.Request, output *CreateProjectOutput) {
313	op := &request.Operation{
314		Name:       opCreateProject,
315		HTTPMethod: "POST",
316		HTTPPath:   "/",
317	}
318
319	if input == nil {
320		input = &CreateProjectInput{}
321	}
322
323	output = &CreateProjectOutput{}
324	req = c.newRequest(op, input, output)
325	return
326}
327
328// CreateProject API operation for Amazon Rekognition.
329//
330// Creates a new Amazon Rekognition Custom Labels project. A project is a logical
331// grouping of resources (images, Labels, models) and operations (training,
332// evaluation and detection).
333//
334// This operation requires permissions to perform the rekognition:CreateProject
335// action.
336//
337// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
338// with awserr.Error's Code and Message methods to get detailed information about
339// the error.
340//
341// See the AWS API reference guide for Amazon Rekognition's
342// API operation CreateProject for usage and error information.
343//
344// Returned Error Types:
345//   * ResourceInUseException
346//   The specified resource is already being used.
347//
348//   * LimitExceededException
349//   An Amazon Rekognition service limit was exceeded. For example, if you start
350//   too many Amazon Rekognition Video jobs concurrently, calls to start operations
351//   (StartLabelDetection, for example) will raise a LimitExceededException exception
352//   (HTTP status code: 400) until the number of concurrently running jobs is
353//   below the Amazon Rekognition service limit.
354//
355//   * InvalidParameterException
356//   Input parameter violated a constraint. Validate your parameter before calling
357//   the API operation again.
358//
359//   * AccessDeniedException
360//   You are not authorized to perform the action.
361//
362//   * InternalServerError
363//   Amazon Rekognition experienced a service issue. Try your call again.
364//
365//   * ThrottlingException
366//   Amazon Rekognition is temporarily unable to process the request. Try your
367//   call again.
368//
369//   * ProvisionedThroughputExceededException
370//   The number of requests exceeded your throughput limit. If you want to increase
371//   this limit, contact Amazon Rekognition.
372//
373func (c *Rekognition) CreateProject(input *CreateProjectInput) (*CreateProjectOutput, error) {
374	req, out := c.CreateProjectRequest(input)
375	return out, req.Send()
376}
377
378// CreateProjectWithContext is the same as CreateProject with the addition of
379// the ability to pass a context and additional request options.
380//
381// See CreateProject for details on how to use this API operation.
382//
383// The context must be non-nil and will be used for request cancellation. If
384// the context is nil a panic will occur. In the future the SDK may create
385// sub-contexts for http.Requests. See https://golang.org/pkg/context/
386// for more information on using Contexts.
387func (c *Rekognition) CreateProjectWithContext(ctx aws.Context, input *CreateProjectInput, opts ...request.Option) (*CreateProjectOutput, error) {
388	req, out := c.CreateProjectRequest(input)
389	req.SetContext(ctx)
390	req.ApplyOptions(opts...)
391	return out, req.Send()
392}
393
394const opCreateProjectVersion = "CreateProjectVersion"
395
396// CreateProjectVersionRequest generates a "aws/request.Request" representing the
397// client's request for the CreateProjectVersion operation. The "output" return
398// value will be populated with the request's response once the request completes
399// successfully.
400//
401// Use "Send" method on the returned Request to send the API call to the service.
402// the "output" return value is not valid until after Send returns without error.
403//
404// See CreateProjectVersion for more information on using the CreateProjectVersion
405// API call, and error handling.
406//
407// This method is useful when you want to inject custom logic or configuration
408// into the SDK's request lifecycle. Such as custom headers, or retry logic.
409//
410//
411//    // Example sending a request using the CreateProjectVersionRequest method.
412//    req, resp := client.CreateProjectVersionRequest(params)
413//
414//    err := req.Send()
415//    if err == nil { // resp is now filled
416//        fmt.Println(resp)
417//    }
418func (c *Rekognition) CreateProjectVersionRequest(input *CreateProjectVersionInput) (req *request.Request, output *CreateProjectVersionOutput) {
419	op := &request.Operation{
420		Name:       opCreateProjectVersion,
421		HTTPMethod: "POST",
422		HTTPPath:   "/",
423	}
424
425	if input == nil {
426		input = &CreateProjectVersionInput{}
427	}
428
429	output = &CreateProjectVersionOutput{}
430	req = c.newRequest(op, input, output)
431	return
432}
433
434// CreateProjectVersion API operation for Amazon Rekognition.
435//
436// Creates a new version of a model and begins training. Models are managed
437// as part of an Amazon Rekognition Custom Labels project. You can specify one
438// training dataset and one testing dataset. The response from CreateProjectVersion
439// is an Amazon Resource Name (ARN) for the version of the model.
440//
441// Training takes a while to complete. You can get the current status by calling
442// DescribeProjectVersions.
443//
444// Once training has successfully completed, call DescribeProjectVersions to
445// get the training results and evaluate the model.
446//
447// After evaluating the model, you start the model by calling StartProjectVersion.
448//
449// This operation requires permissions to perform the rekognition:CreateProjectVersion
450// action.
451//
452// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
453// with awserr.Error's Code and Message methods to get detailed information about
454// the error.
455//
456// See the AWS API reference guide for Amazon Rekognition's
457// API operation CreateProjectVersion for usage and error information.
458//
459// Returned Error Types:
460//   * ResourceInUseException
461//   The specified resource is already being used.
462//
463//   * ResourceNotFoundException
464//   The collection specified in the request cannot be found.
465//
466//   * LimitExceededException
467//   An Amazon Rekognition service limit was exceeded. For example, if you start
468//   too many Amazon Rekognition Video jobs concurrently, calls to start operations
469//   (StartLabelDetection, for example) will raise a LimitExceededException exception
470//   (HTTP status code: 400) until the number of concurrently running jobs is
471//   below the Amazon Rekognition service limit.
472//
473//   * InvalidParameterException
474//   Input parameter violated a constraint. Validate your parameter before calling
475//   the API operation again.
476//
477//   * AccessDeniedException
478//   You are not authorized to perform the action.
479//
480//   * InternalServerError
481//   Amazon Rekognition experienced a service issue. Try your call again.
482//
483//   * ThrottlingException
484//   Amazon Rekognition is temporarily unable to process the request. Try your
485//   call again.
486//
487//   * ProvisionedThroughputExceededException
488//   The number of requests exceeded your throughput limit. If you want to increase
489//   this limit, contact Amazon Rekognition.
490//
491//   * ServiceQuotaExceededException
492//   The size of the collection or tag list exceeds the allowed limit. For more
493//   information, see Limits in Amazon Rekognition in the Amazon Rekognition Developer
494//   Guide.
495//
496func (c *Rekognition) CreateProjectVersion(input *CreateProjectVersionInput) (*CreateProjectVersionOutput, error) {
497	req, out := c.CreateProjectVersionRequest(input)
498	return out, req.Send()
499}
500
501// CreateProjectVersionWithContext is the same as CreateProjectVersion with the addition of
502// the ability to pass a context and additional request options.
503//
504// See CreateProjectVersion for details on how to use this API operation.
505//
506// The context must be non-nil and will be used for request cancellation. If
507// the context is nil a panic will occur. In the future the SDK may create
508// sub-contexts for http.Requests. See https://golang.org/pkg/context/
509// for more information on using Contexts.
510func (c *Rekognition) CreateProjectVersionWithContext(ctx aws.Context, input *CreateProjectVersionInput, opts ...request.Option) (*CreateProjectVersionOutput, error) {
511	req, out := c.CreateProjectVersionRequest(input)
512	req.SetContext(ctx)
513	req.ApplyOptions(opts...)
514	return out, req.Send()
515}
516
517const opCreateStreamProcessor = "CreateStreamProcessor"
518
519// CreateStreamProcessorRequest generates a "aws/request.Request" representing the
520// client's request for the CreateStreamProcessor operation. The "output" return
521// value will be populated with the request's response once the request completes
522// successfully.
523//
524// Use "Send" method on the returned Request to send the API call to the service.
525// the "output" return value is not valid until after Send returns without error.
526//
527// See CreateStreamProcessor for more information on using the CreateStreamProcessor
528// API call, and error handling.
529//
530// This method is useful when you want to inject custom logic or configuration
531// into the SDK's request lifecycle. Such as custom headers, or retry logic.
532//
533//
534//    // Example sending a request using the CreateStreamProcessorRequest method.
535//    req, resp := client.CreateStreamProcessorRequest(params)
536//
537//    err := req.Send()
538//    if err == nil { // resp is now filled
539//        fmt.Println(resp)
540//    }
541func (c *Rekognition) CreateStreamProcessorRequest(input *CreateStreamProcessorInput) (req *request.Request, output *CreateStreamProcessorOutput) {
542	op := &request.Operation{
543		Name:       opCreateStreamProcessor,
544		HTTPMethod: "POST",
545		HTTPPath:   "/",
546	}
547
548	if input == nil {
549		input = &CreateStreamProcessorInput{}
550	}
551
552	output = &CreateStreamProcessorOutput{}
553	req = c.newRequest(op, input, output)
554	return
555}
556
557// CreateStreamProcessor API operation for Amazon Rekognition.
558//
559// Creates an Amazon Rekognition stream processor that you can use to detect
560// and recognize faces in a streaming video.
561//
562// Amazon Rekognition Video is a consumer of live video from Amazon Kinesis
563// Video Streams. Amazon Rekognition Video sends analysis results to Amazon
564// Kinesis Data Streams.
565//
566// You provide as input a Kinesis video stream (Input) and a Kinesis data stream
567// (Output) stream. You also specify the face recognition criteria in Settings.
568// For example, the collection containing faces that you want to recognize.
569// Use Name to assign an identifier for the stream processor. You use Name to
570// manage the stream processor. For example, you can start processing the source
571// video by calling StartStreamProcessor with the Name field.
572//
573// After you have finished analyzing a streaming video, use StopStreamProcessor
574// to stop processing. You can delete the stream processor by calling DeleteStreamProcessor.
575//
576// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
577// with awserr.Error's Code and Message methods to get detailed information about
578// the error.
579//
580// See the AWS API reference guide for Amazon Rekognition's
581// API operation CreateStreamProcessor for usage and error information.
582//
583// Returned Error Types:
584//   * AccessDeniedException
585//   You are not authorized to perform the action.
586//
587//   * InternalServerError
588//   Amazon Rekognition experienced a service issue. Try your call again.
589//
590//   * ThrottlingException
591//   Amazon Rekognition is temporarily unable to process the request. Try your
592//   call again.
593//
594//   * InvalidParameterException
595//   Input parameter violated a constraint. Validate your parameter before calling
596//   the API operation again.
597//
598//   * LimitExceededException
599//   An Amazon Rekognition service limit was exceeded. For example, if you start
600//   too many Amazon Rekognition Video jobs concurrently, calls to start operations
601//   (StartLabelDetection, for example) will raise a LimitExceededException exception
602//   (HTTP status code: 400) until the number of concurrently running jobs is
603//   below the Amazon Rekognition service limit.
604//
605//   * ResourceInUseException
606//   The specified resource is already being used.
607//
608//   * ProvisionedThroughputExceededException
609//   The number of requests exceeded your throughput limit. If you want to increase
610//   this limit, contact Amazon Rekognition.
611//
612//   * ServiceQuotaExceededException
613//   The size of the collection or tag list exceeds the allowed limit. For more
614//   information, see Limits in Amazon Rekognition in the Amazon Rekognition Developer
615//   Guide.
616//
617func (c *Rekognition) CreateStreamProcessor(input *CreateStreamProcessorInput) (*CreateStreamProcessorOutput, error) {
618	req, out := c.CreateStreamProcessorRequest(input)
619	return out, req.Send()
620}
621
622// CreateStreamProcessorWithContext is the same as CreateStreamProcessor with the addition of
623// the ability to pass a context and additional request options.
624//
625// See CreateStreamProcessor for details on how to use this API operation.
626//
627// The context must be non-nil and will be used for request cancellation. If
628// the context is nil a panic will occur. In the future the SDK may create
629// sub-contexts for http.Requests. See https://golang.org/pkg/context/
630// for more information on using Contexts.
631func (c *Rekognition) CreateStreamProcessorWithContext(ctx aws.Context, input *CreateStreamProcessorInput, opts ...request.Option) (*CreateStreamProcessorOutput, error) {
632	req, out := c.CreateStreamProcessorRequest(input)
633	req.SetContext(ctx)
634	req.ApplyOptions(opts...)
635	return out, req.Send()
636}
637
638const opDeleteCollection = "DeleteCollection"
639
640// DeleteCollectionRequest generates a "aws/request.Request" representing the
641// client's request for the DeleteCollection operation. The "output" return
642// value will be populated with the request's response once the request completes
643// successfully.
644//
645// Use "Send" method on the returned Request to send the API call to the service.
646// the "output" return value is not valid until after Send returns without error.
647//
648// See DeleteCollection for more information on using the DeleteCollection
649// API call, and error handling.
650//
651// This method is useful when you want to inject custom logic or configuration
652// into the SDK's request lifecycle. Such as custom headers, or retry logic.
653//
654//
655//    // Example sending a request using the DeleteCollectionRequest method.
656//    req, resp := client.DeleteCollectionRequest(params)
657//
658//    err := req.Send()
659//    if err == nil { // resp is now filled
660//        fmt.Println(resp)
661//    }
662func (c *Rekognition) DeleteCollectionRequest(input *DeleteCollectionInput) (req *request.Request, output *DeleteCollectionOutput) {
663	op := &request.Operation{
664		Name:       opDeleteCollection,
665		HTTPMethod: "POST",
666		HTTPPath:   "/",
667	}
668
669	if input == nil {
670		input = &DeleteCollectionInput{}
671	}
672
673	output = &DeleteCollectionOutput{}
674	req = c.newRequest(op, input, output)
675	return
676}
677
678// DeleteCollection API operation for Amazon Rekognition.
679//
680// Deletes the specified collection. Note that this operation removes all faces
681// in the collection. For an example, see delete-collection-procedure.
682//
683// This operation requires permissions to perform the rekognition:DeleteCollection
684// action.
685//
686// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
687// with awserr.Error's Code and Message methods to get detailed information about
688// the error.
689//
690// See the AWS API reference guide for Amazon Rekognition's
691// API operation DeleteCollection for usage and error information.
692//
693// Returned Error Types:
694//   * InvalidParameterException
695//   Input parameter violated a constraint. Validate your parameter before calling
696//   the API operation again.
697//
698//   * AccessDeniedException
699//   You are not authorized to perform the action.
700//
701//   * InternalServerError
702//   Amazon Rekognition experienced a service issue. Try your call again.
703//
704//   * ThrottlingException
705//   Amazon Rekognition is temporarily unable to process the request. Try your
706//   call again.
707//
708//   * ProvisionedThroughputExceededException
709//   The number of requests exceeded your throughput limit. If you want to increase
710//   this limit, contact Amazon Rekognition.
711//
712//   * ResourceNotFoundException
713//   The collection specified in the request cannot be found.
714//
715func (c *Rekognition) DeleteCollection(input *DeleteCollectionInput) (*DeleteCollectionOutput, error) {
716	req, out := c.DeleteCollectionRequest(input)
717	return out, req.Send()
718}
719
720// DeleteCollectionWithContext is the same as DeleteCollection with the addition of
721// the ability to pass a context and additional request options.
722//
723// See DeleteCollection for details on how to use this API operation.
724//
725// The context must be non-nil and will be used for request cancellation. If
726// the context is nil a panic will occur. In the future the SDK may create
727// sub-contexts for http.Requests. See https://golang.org/pkg/context/
728// for more information on using Contexts.
729func (c *Rekognition) DeleteCollectionWithContext(ctx aws.Context, input *DeleteCollectionInput, opts ...request.Option) (*DeleteCollectionOutput, error) {
730	req, out := c.DeleteCollectionRequest(input)
731	req.SetContext(ctx)
732	req.ApplyOptions(opts...)
733	return out, req.Send()
734}
735
736const opDeleteFaces = "DeleteFaces"
737
738// DeleteFacesRequest generates a "aws/request.Request" representing the
739// client's request for the DeleteFaces operation. The "output" return
740// value will be populated with the request's response once the request completes
741// successfully.
742//
743// Use "Send" method on the returned Request to send the API call to the service.
744// the "output" return value is not valid until after Send returns without error.
745//
746// See DeleteFaces for more information on using the DeleteFaces
747// API call, and error handling.
748//
749// This method is useful when you want to inject custom logic or configuration
750// into the SDK's request lifecycle. Such as custom headers, or retry logic.
751//
752//
753//    // Example sending a request using the DeleteFacesRequest method.
754//    req, resp := client.DeleteFacesRequest(params)
755//
756//    err := req.Send()
757//    if err == nil { // resp is now filled
758//        fmt.Println(resp)
759//    }
760func (c *Rekognition) DeleteFacesRequest(input *DeleteFacesInput) (req *request.Request, output *DeleteFacesOutput) {
761	op := &request.Operation{
762		Name:       opDeleteFaces,
763		HTTPMethod: "POST",
764		HTTPPath:   "/",
765	}
766
767	if input == nil {
768		input = &DeleteFacesInput{}
769	}
770
771	output = &DeleteFacesOutput{}
772	req = c.newRequest(op, input, output)
773	return
774}
775
776// DeleteFaces API operation for Amazon Rekognition.
777//
778// Deletes faces from a collection. You specify a collection ID and an array
779// of face IDs to remove from the collection.
780//
781// This operation requires permissions to perform the rekognition:DeleteFaces
782// action.
783//
784// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
785// with awserr.Error's Code and Message methods to get detailed information about
786// the error.
787//
788// See the AWS API reference guide for Amazon Rekognition's
789// API operation DeleteFaces for usage and error information.
790//
791// Returned Error Types:
792//   * InvalidParameterException
793//   Input parameter violated a constraint. Validate your parameter before calling
794//   the API operation again.
795//
796//   * AccessDeniedException
797//   You are not authorized to perform the action.
798//
799//   * InternalServerError
800//   Amazon Rekognition experienced a service issue. Try your call again.
801//
802//   * ThrottlingException
803//   Amazon Rekognition is temporarily unable to process the request. Try your
804//   call again.
805//
806//   * ProvisionedThroughputExceededException
807//   The number of requests exceeded your throughput limit. If you want to increase
808//   this limit, contact Amazon Rekognition.
809//
810//   * ResourceNotFoundException
811//   The collection specified in the request cannot be found.
812//
813func (c *Rekognition) DeleteFaces(input *DeleteFacesInput) (*DeleteFacesOutput, error) {
814	req, out := c.DeleteFacesRequest(input)
815	return out, req.Send()
816}
817
818// DeleteFacesWithContext is the same as DeleteFaces with the addition of
819// the ability to pass a context and additional request options.
820//
821// See DeleteFaces for details on how to use this API operation.
822//
823// The context must be non-nil and will be used for request cancellation. If
824// the context is nil a panic will occur. In the future the SDK may create
825// sub-contexts for http.Requests. See https://golang.org/pkg/context/
826// for more information on using Contexts.
827func (c *Rekognition) DeleteFacesWithContext(ctx aws.Context, input *DeleteFacesInput, opts ...request.Option) (*DeleteFacesOutput, error) {
828	req, out := c.DeleteFacesRequest(input)
829	req.SetContext(ctx)
830	req.ApplyOptions(opts...)
831	return out, req.Send()
832}
833
834const opDeleteProject = "DeleteProject"
835
836// DeleteProjectRequest generates a "aws/request.Request" representing the
837// client's request for the DeleteProject operation. The "output" return
838// value will be populated with the request's response once the request completes
839// successfully.
840//
841// Use "Send" method on the returned Request to send the API call to the service.
842// the "output" return value is not valid until after Send returns without error.
843//
844// See DeleteProject for more information on using the DeleteProject
845// API call, and error handling.
846//
847// This method is useful when you want to inject custom logic or configuration
848// into the SDK's request lifecycle. Such as custom headers, or retry logic.
849//
850//
851//    // Example sending a request using the DeleteProjectRequest method.
852//    req, resp := client.DeleteProjectRequest(params)
853//
854//    err := req.Send()
855//    if err == nil { // resp is now filled
856//        fmt.Println(resp)
857//    }
858func (c *Rekognition) DeleteProjectRequest(input *DeleteProjectInput) (req *request.Request, output *DeleteProjectOutput) {
859	op := &request.Operation{
860		Name:       opDeleteProject,
861		HTTPMethod: "POST",
862		HTTPPath:   "/",
863	}
864
865	if input == nil {
866		input = &DeleteProjectInput{}
867	}
868
869	output = &DeleteProjectOutput{}
870	req = c.newRequest(op, input, output)
871	return
872}
873
874// DeleteProject API operation for Amazon Rekognition.
875//
876// Deletes an Amazon Rekognition Custom Labels project. To delete a project
877// you must first delete all models associated with the project. To delete a
878// model, see DeleteProjectVersion.
879//
880// This operation requires permissions to perform the rekognition:DeleteProject
881// action.
882//
883// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
884// with awserr.Error's Code and Message methods to get detailed information about
885// the error.
886//
887// See the AWS API reference guide for Amazon Rekognition's
888// API operation DeleteProject for usage and error information.
889//
890// Returned Error Types:
891//   * ResourceInUseException
892//   The specified resource is already being used.
893//
894//   * ResourceNotFoundException
895//   The collection specified in the request cannot be found.
896//
897//   * InvalidParameterException
898//   Input parameter violated a constraint. Validate your parameter before calling
899//   the API operation again.
900//
901//   * AccessDeniedException
902//   You are not authorized to perform the action.
903//
904//   * InternalServerError
905//   Amazon Rekognition experienced a service issue. Try your call again.
906//
907//   * ThrottlingException
908//   Amazon Rekognition is temporarily unable to process the request. Try your
909//   call again.
910//
911//   * ProvisionedThroughputExceededException
912//   The number of requests exceeded your throughput limit. If you want to increase
913//   this limit, contact Amazon Rekognition.
914//
915func (c *Rekognition) DeleteProject(input *DeleteProjectInput) (*DeleteProjectOutput, error) {
916	req, out := c.DeleteProjectRequest(input)
917	return out, req.Send()
918}
919
920// DeleteProjectWithContext is the same as DeleteProject with the addition of
921// the ability to pass a context and additional request options.
922//
923// See DeleteProject for details on how to use this API operation.
924//
925// The context must be non-nil and will be used for request cancellation. If
926// the context is nil a panic will occur. In the future the SDK may create
927// sub-contexts for http.Requests. See https://golang.org/pkg/context/
928// for more information on using Contexts.
929func (c *Rekognition) DeleteProjectWithContext(ctx aws.Context, input *DeleteProjectInput, opts ...request.Option) (*DeleteProjectOutput, error) {
930	req, out := c.DeleteProjectRequest(input)
931	req.SetContext(ctx)
932	req.ApplyOptions(opts...)
933	return out, req.Send()
934}
935
936const opDeleteProjectVersion = "DeleteProjectVersion"
937
938// DeleteProjectVersionRequest generates a "aws/request.Request" representing the
939// client's request for the DeleteProjectVersion operation. The "output" return
940// value will be populated with the request's response once the request completes
941// successfully.
942//
943// Use "Send" method on the returned Request to send the API call to the service.
944// the "output" return value is not valid until after Send returns without error.
945//
946// See DeleteProjectVersion for more information on using the DeleteProjectVersion
947// API call, and error handling.
948//
949// This method is useful when you want to inject custom logic or configuration
950// into the SDK's request lifecycle. Such as custom headers, or retry logic.
951//
952//
953//    // Example sending a request using the DeleteProjectVersionRequest method.
954//    req, resp := client.DeleteProjectVersionRequest(params)
955//
956//    err := req.Send()
957//    if err == nil { // resp is now filled
958//        fmt.Println(resp)
959//    }
960func (c *Rekognition) DeleteProjectVersionRequest(input *DeleteProjectVersionInput) (req *request.Request, output *DeleteProjectVersionOutput) {
961	op := &request.Operation{
962		Name:       opDeleteProjectVersion,
963		HTTPMethod: "POST",
964		HTTPPath:   "/",
965	}
966
967	if input == nil {
968		input = &DeleteProjectVersionInput{}
969	}
970
971	output = &DeleteProjectVersionOutput{}
972	req = c.newRequest(op, input, output)
973	return
974}
975
976// DeleteProjectVersion API operation for Amazon Rekognition.
977//
978// Deletes an Amazon Rekognition Custom Labels model.
979//
980// You can't delete a model if it is running or if it is training. To check
981// the status of a model, use the Status field returned from DescribeProjectVersions.
982// To stop a running model call StopProjectVersion. If the model is training,
983// wait until it finishes.
984//
985// This operation requires permissions to perform the rekognition:DeleteProjectVersion
986// action.
987//
988// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
989// with awserr.Error's Code and Message methods to get detailed information about
990// the error.
991//
992// See the AWS API reference guide for Amazon Rekognition's
993// API operation DeleteProjectVersion for usage and error information.
994//
995// Returned Error Types:
996//   * ResourceNotFoundException
997//   The collection specified in the request cannot be found.
998//
999//   * ResourceInUseException
1000//   The specified resource is already being used.
1001//
1002//   * InvalidParameterException
1003//   Input parameter violated a constraint. Validate your parameter before calling
1004//   the API operation again.
1005//
1006//   * AccessDeniedException
1007//   You are not authorized to perform the action.
1008//
1009//   * InternalServerError
1010//   Amazon Rekognition experienced a service issue. Try your call again.
1011//
1012//   * ThrottlingException
1013//   Amazon Rekognition is temporarily unable to process the request. Try your
1014//   call again.
1015//
1016//   * ProvisionedThroughputExceededException
1017//   The number of requests exceeded your throughput limit. If you want to increase
1018//   this limit, contact Amazon Rekognition.
1019//
1020func (c *Rekognition) DeleteProjectVersion(input *DeleteProjectVersionInput) (*DeleteProjectVersionOutput, error) {
1021	req, out := c.DeleteProjectVersionRequest(input)
1022	return out, req.Send()
1023}
1024
1025// DeleteProjectVersionWithContext is the same as DeleteProjectVersion with the addition of
1026// the ability to pass a context and additional request options.
1027//
1028// See DeleteProjectVersion for details on how to use this API operation.
1029//
1030// The context must be non-nil and will be used for request cancellation. If
1031// the context is nil a panic will occur. In the future the SDK may create
1032// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1033// for more information on using Contexts.
1034func (c *Rekognition) DeleteProjectVersionWithContext(ctx aws.Context, input *DeleteProjectVersionInput, opts ...request.Option) (*DeleteProjectVersionOutput, error) {
1035	req, out := c.DeleteProjectVersionRequest(input)
1036	req.SetContext(ctx)
1037	req.ApplyOptions(opts...)
1038	return out, req.Send()
1039}
1040
1041const opDeleteStreamProcessor = "DeleteStreamProcessor"
1042
1043// DeleteStreamProcessorRequest generates a "aws/request.Request" representing the
1044// client's request for the DeleteStreamProcessor operation. The "output" return
1045// value will be populated with the request's response once the request completes
1046// successfully.
1047//
1048// Use "Send" method on the returned Request to send the API call to the service.
1049// the "output" return value is not valid until after Send returns without error.
1050//
1051// See DeleteStreamProcessor for more information on using the DeleteStreamProcessor
1052// API call, and error handling.
1053//
1054// This method is useful when you want to inject custom logic or configuration
1055// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1056//
1057//
1058//    // Example sending a request using the DeleteStreamProcessorRequest method.
1059//    req, resp := client.DeleteStreamProcessorRequest(params)
1060//
1061//    err := req.Send()
1062//    if err == nil { // resp is now filled
1063//        fmt.Println(resp)
1064//    }
1065func (c *Rekognition) DeleteStreamProcessorRequest(input *DeleteStreamProcessorInput) (req *request.Request, output *DeleteStreamProcessorOutput) {
1066	op := &request.Operation{
1067		Name:       opDeleteStreamProcessor,
1068		HTTPMethod: "POST",
1069		HTTPPath:   "/",
1070	}
1071
1072	if input == nil {
1073		input = &DeleteStreamProcessorInput{}
1074	}
1075
1076	output = &DeleteStreamProcessorOutput{}
1077	req = c.newRequest(op, input, output)
1078	req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
1079	return
1080}
1081
1082// DeleteStreamProcessor API operation for Amazon Rekognition.
1083//
1084// Deletes the stream processor identified by Name. You assign the value for
1085// Name when you create the stream processor with CreateStreamProcessor. You
1086// might not be able to use the same name for a stream processor for a few seconds
1087// after calling DeleteStreamProcessor.
1088//
1089// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1090// with awserr.Error's Code and Message methods to get detailed information about
1091// the error.
1092//
1093// See the AWS API reference guide for Amazon Rekognition's
1094// API operation DeleteStreamProcessor for usage and error information.
1095//
1096// Returned Error Types:
1097//   * AccessDeniedException
1098//   You are not authorized to perform the action.
1099//
1100//   * InternalServerError
1101//   Amazon Rekognition experienced a service issue. Try your call again.
1102//
1103//   * ThrottlingException
1104//   Amazon Rekognition is temporarily unable to process the request. Try your
1105//   call again.
1106//
1107//   * InvalidParameterException
1108//   Input parameter violated a constraint. Validate your parameter before calling
1109//   the API operation again.
1110//
1111//   * ResourceNotFoundException
1112//   The collection specified in the request cannot be found.
1113//
1114//   * ResourceInUseException
1115//   The specified resource is already being used.
1116//
1117//   * ProvisionedThroughputExceededException
1118//   The number of requests exceeded your throughput limit. If you want to increase
1119//   this limit, contact Amazon Rekognition.
1120//
1121func (c *Rekognition) DeleteStreamProcessor(input *DeleteStreamProcessorInput) (*DeleteStreamProcessorOutput, error) {
1122	req, out := c.DeleteStreamProcessorRequest(input)
1123	return out, req.Send()
1124}
1125
1126// DeleteStreamProcessorWithContext is the same as DeleteStreamProcessor with the addition of
1127// the ability to pass a context and additional request options.
1128//
1129// See DeleteStreamProcessor for details on how to use this API operation.
1130//
1131// The context must be non-nil and will be used for request cancellation. If
1132// the context is nil a panic will occur. In the future the SDK may create
1133// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1134// for more information on using Contexts.
1135func (c *Rekognition) DeleteStreamProcessorWithContext(ctx aws.Context, input *DeleteStreamProcessorInput, opts ...request.Option) (*DeleteStreamProcessorOutput, error) {
1136	req, out := c.DeleteStreamProcessorRequest(input)
1137	req.SetContext(ctx)
1138	req.ApplyOptions(opts...)
1139	return out, req.Send()
1140}
1141
1142const opDescribeCollection = "DescribeCollection"
1143
1144// DescribeCollectionRequest generates a "aws/request.Request" representing the
1145// client's request for the DescribeCollection operation. The "output" return
1146// value will be populated with the request's response once the request completes
1147// successfully.
1148//
1149// Use "Send" method on the returned Request to send the API call to the service.
1150// the "output" return value is not valid until after Send returns without error.
1151//
1152// See DescribeCollection for more information on using the DescribeCollection
1153// API call, and error handling.
1154//
1155// This method is useful when you want to inject custom logic or configuration
1156// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1157//
1158//
1159//    // Example sending a request using the DescribeCollectionRequest method.
1160//    req, resp := client.DescribeCollectionRequest(params)
1161//
1162//    err := req.Send()
1163//    if err == nil { // resp is now filled
1164//        fmt.Println(resp)
1165//    }
1166func (c *Rekognition) DescribeCollectionRequest(input *DescribeCollectionInput) (req *request.Request, output *DescribeCollectionOutput) {
1167	op := &request.Operation{
1168		Name:       opDescribeCollection,
1169		HTTPMethod: "POST",
1170		HTTPPath:   "/",
1171	}
1172
1173	if input == nil {
1174		input = &DescribeCollectionInput{}
1175	}
1176
1177	output = &DescribeCollectionOutput{}
1178	req = c.newRequest(op, input, output)
1179	return
1180}
1181
1182// DescribeCollection API operation for Amazon Rekognition.
1183//
1184// Describes the specified collection. You can use DescribeCollection to get
1185// information, such as the number of faces indexed into a collection and the
1186// version of the model used by the collection for face detection.
1187//
1188// For more information, see Describing a Collection in the Amazon Rekognition
1189// Developer Guide.
1190//
1191// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1192// with awserr.Error's Code and Message methods to get detailed information about
1193// the error.
1194//
1195// See the AWS API reference guide for Amazon Rekognition's
1196// API operation DescribeCollection for usage and error information.
1197//
1198// Returned Error Types:
1199//   * InvalidParameterException
1200//   Input parameter violated a constraint. Validate your parameter before calling
1201//   the API operation again.
1202//
1203//   * AccessDeniedException
1204//   You are not authorized to perform the action.
1205//
1206//   * InternalServerError
1207//   Amazon Rekognition experienced a service issue. Try your call again.
1208//
1209//   * ThrottlingException
1210//   Amazon Rekognition is temporarily unable to process the request. Try your
1211//   call again.
1212//
1213//   * ProvisionedThroughputExceededException
1214//   The number of requests exceeded your throughput limit. If you want to increase
1215//   this limit, contact Amazon Rekognition.
1216//
1217//   * ResourceNotFoundException
1218//   The collection specified in the request cannot be found.
1219//
1220func (c *Rekognition) DescribeCollection(input *DescribeCollectionInput) (*DescribeCollectionOutput, error) {
1221	req, out := c.DescribeCollectionRequest(input)
1222	return out, req.Send()
1223}
1224
1225// DescribeCollectionWithContext is the same as DescribeCollection with the addition of
1226// the ability to pass a context and additional request options.
1227//
1228// See DescribeCollection for details on how to use this API operation.
1229//
1230// The context must be non-nil and will be used for request cancellation. If
1231// the context is nil a panic will occur. In the future the SDK may create
1232// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1233// for more information on using Contexts.
1234func (c *Rekognition) DescribeCollectionWithContext(ctx aws.Context, input *DescribeCollectionInput, opts ...request.Option) (*DescribeCollectionOutput, error) {
1235	req, out := c.DescribeCollectionRequest(input)
1236	req.SetContext(ctx)
1237	req.ApplyOptions(opts...)
1238	return out, req.Send()
1239}
1240
1241const opDescribeProjectVersions = "DescribeProjectVersions"
1242
1243// DescribeProjectVersionsRequest generates a "aws/request.Request" representing the
1244// client's request for the DescribeProjectVersions operation. The "output" return
1245// value will be populated with the request's response once the request completes
1246// successfully.
1247//
1248// Use "Send" method on the returned Request to send the API call to the service.
1249// the "output" return value is not valid until after Send returns without error.
1250//
1251// See DescribeProjectVersions for more information on using the DescribeProjectVersions
1252// API call, and error handling.
1253//
1254// This method is useful when you want to inject custom logic or configuration
1255// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1256//
1257//
1258//    // Example sending a request using the DescribeProjectVersionsRequest method.
1259//    req, resp := client.DescribeProjectVersionsRequest(params)
1260//
1261//    err := req.Send()
1262//    if err == nil { // resp is now filled
1263//        fmt.Println(resp)
1264//    }
1265func (c *Rekognition) DescribeProjectVersionsRequest(input *DescribeProjectVersionsInput) (req *request.Request, output *DescribeProjectVersionsOutput) {
1266	op := &request.Operation{
1267		Name:       opDescribeProjectVersions,
1268		HTTPMethod: "POST",
1269		HTTPPath:   "/",
1270		Paginator: &request.Paginator{
1271			InputTokens:     []string{"NextToken"},
1272			OutputTokens:    []string{"NextToken"},
1273			LimitToken:      "MaxResults",
1274			TruncationToken: "",
1275		},
1276	}
1277
1278	if input == nil {
1279		input = &DescribeProjectVersionsInput{}
1280	}
1281
1282	output = &DescribeProjectVersionsOutput{}
1283	req = c.newRequest(op, input, output)
1284	return
1285}
1286
1287// DescribeProjectVersions API operation for Amazon Rekognition.
1288//
1289// Lists and describes the models in an Amazon Rekognition Custom Labels project.
1290// You can specify up to 10 model versions in ProjectVersionArns. If you don't
1291// specify a value, descriptions for all models are returned.
1292//
1293// This operation requires permissions to perform the rekognition:DescribeProjectVersions
1294// action.
1295//
1296// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1297// with awserr.Error's Code and Message methods to get detailed information about
1298// the error.
1299//
1300// See the AWS API reference guide for Amazon Rekognition's
1301// API operation DescribeProjectVersions for usage and error information.
1302//
1303// Returned Error Types:
1304//   * ResourceNotFoundException
1305//   The collection specified in the request cannot be found.
1306//
1307//   * InvalidPaginationTokenException
1308//   Pagination token in the request is not valid.
1309//
1310//   * InvalidParameterException
1311//   Input parameter violated a constraint. Validate your parameter before calling
1312//   the API operation again.
1313//
1314//   * AccessDeniedException
1315//   You are not authorized to perform the action.
1316//
1317//   * InternalServerError
1318//   Amazon Rekognition experienced a service issue. Try your call again.
1319//
1320//   * ThrottlingException
1321//   Amazon Rekognition is temporarily unable to process the request. Try your
1322//   call again.
1323//
1324//   * ProvisionedThroughputExceededException
1325//   The number of requests exceeded your throughput limit. If you want to increase
1326//   this limit, contact Amazon Rekognition.
1327//
1328func (c *Rekognition) DescribeProjectVersions(input *DescribeProjectVersionsInput) (*DescribeProjectVersionsOutput, error) {
1329	req, out := c.DescribeProjectVersionsRequest(input)
1330	return out, req.Send()
1331}
1332
1333// DescribeProjectVersionsWithContext is the same as DescribeProjectVersions with the addition of
1334// the ability to pass a context and additional request options.
1335//
1336// See DescribeProjectVersions for details on how to use this API operation.
1337//
1338// The context must be non-nil and will be used for request cancellation. If
1339// the context is nil a panic will occur. In the future the SDK may create
1340// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1341// for more information on using Contexts.
1342func (c *Rekognition) DescribeProjectVersionsWithContext(ctx aws.Context, input *DescribeProjectVersionsInput, opts ...request.Option) (*DescribeProjectVersionsOutput, error) {
1343	req, out := c.DescribeProjectVersionsRequest(input)
1344	req.SetContext(ctx)
1345	req.ApplyOptions(opts...)
1346	return out, req.Send()
1347}
1348
1349// DescribeProjectVersionsPages iterates over the pages of a DescribeProjectVersions operation,
1350// calling the "fn" function with the response data for each page. To stop
1351// iterating, return false from the fn function.
1352//
1353// See DescribeProjectVersions method for more information on how to use this operation.
1354//
1355// Note: This operation can generate multiple requests to a service.
1356//
1357//    // Example iterating over at most 3 pages of a DescribeProjectVersions operation.
1358//    pageNum := 0
1359//    err := client.DescribeProjectVersionsPages(params,
1360//        func(page *rekognition.DescribeProjectVersionsOutput, lastPage bool) bool {
1361//            pageNum++
1362//            fmt.Println(page)
1363//            return pageNum <= 3
1364//        })
1365//
1366func (c *Rekognition) DescribeProjectVersionsPages(input *DescribeProjectVersionsInput, fn func(*DescribeProjectVersionsOutput, bool) bool) error {
1367	return c.DescribeProjectVersionsPagesWithContext(aws.BackgroundContext(), input, fn)
1368}
1369
1370// DescribeProjectVersionsPagesWithContext same as DescribeProjectVersionsPages except
1371// it takes a Context and allows setting request options on the pages.
1372//
1373// The context must be non-nil and will be used for request cancellation. If
1374// the context is nil a panic will occur. In the future the SDK may create
1375// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1376// for more information on using Contexts.
1377func (c *Rekognition) DescribeProjectVersionsPagesWithContext(ctx aws.Context, input *DescribeProjectVersionsInput, fn func(*DescribeProjectVersionsOutput, bool) bool, opts ...request.Option) error {
1378	p := request.Pagination{
1379		NewRequest: func() (*request.Request, error) {
1380			var inCpy *DescribeProjectVersionsInput
1381			if input != nil {
1382				tmp := *input
1383				inCpy = &tmp
1384			}
1385			req, _ := c.DescribeProjectVersionsRequest(inCpy)
1386			req.SetContext(ctx)
1387			req.ApplyOptions(opts...)
1388			return req, nil
1389		},
1390	}
1391
1392	for p.Next() {
1393		if !fn(p.Page().(*DescribeProjectVersionsOutput), !p.HasNextPage()) {
1394			break
1395		}
1396	}
1397
1398	return p.Err()
1399}
1400
1401const opDescribeProjects = "DescribeProjects"
1402
1403// DescribeProjectsRequest generates a "aws/request.Request" representing the
1404// client's request for the DescribeProjects operation. The "output" return
1405// value will be populated with the request's response once the request completes
1406// successfully.
1407//
1408// Use "Send" method on the returned Request to send the API call to the service.
1409// the "output" return value is not valid until after Send returns without error.
1410//
1411// See DescribeProjects for more information on using the DescribeProjects
1412// API call, and error handling.
1413//
1414// This method is useful when you want to inject custom logic or configuration
1415// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1416//
1417//
1418//    // Example sending a request using the DescribeProjectsRequest method.
1419//    req, resp := client.DescribeProjectsRequest(params)
1420//
1421//    err := req.Send()
1422//    if err == nil { // resp is now filled
1423//        fmt.Println(resp)
1424//    }
1425func (c *Rekognition) DescribeProjectsRequest(input *DescribeProjectsInput) (req *request.Request, output *DescribeProjectsOutput) {
1426	op := &request.Operation{
1427		Name:       opDescribeProjects,
1428		HTTPMethod: "POST",
1429		HTTPPath:   "/",
1430		Paginator: &request.Paginator{
1431			InputTokens:     []string{"NextToken"},
1432			OutputTokens:    []string{"NextToken"},
1433			LimitToken:      "MaxResults",
1434			TruncationToken: "",
1435		},
1436	}
1437
1438	if input == nil {
1439		input = &DescribeProjectsInput{}
1440	}
1441
1442	output = &DescribeProjectsOutput{}
1443	req = c.newRequest(op, input, output)
1444	return
1445}
1446
1447// DescribeProjects API operation for Amazon Rekognition.
1448//
1449// Lists and gets information about your Amazon Rekognition Custom Labels projects.
1450//
1451// This operation requires permissions to perform the rekognition:DescribeProjects
1452// action.
1453//
1454// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1455// with awserr.Error's Code and Message methods to get detailed information about
1456// the error.
1457//
1458// See the AWS API reference guide for Amazon Rekognition's
1459// API operation DescribeProjects for usage and error information.
1460//
1461// Returned Error Types:
1462//   * InvalidPaginationTokenException
1463//   Pagination token in the request is not valid.
1464//
1465//   * InvalidParameterException
1466//   Input parameter violated a constraint. Validate your parameter before calling
1467//   the API operation again.
1468//
1469//   * AccessDeniedException
1470//   You are not authorized to perform the action.
1471//
1472//   * InternalServerError
1473//   Amazon Rekognition experienced a service issue. Try your call again.
1474//
1475//   * ThrottlingException
1476//   Amazon Rekognition is temporarily unable to process the request. Try your
1477//   call again.
1478//
1479//   * ProvisionedThroughputExceededException
1480//   The number of requests exceeded your throughput limit. If you want to increase
1481//   this limit, contact Amazon Rekognition.
1482//
1483func (c *Rekognition) DescribeProjects(input *DescribeProjectsInput) (*DescribeProjectsOutput, error) {
1484	req, out := c.DescribeProjectsRequest(input)
1485	return out, req.Send()
1486}
1487
1488// DescribeProjectsWithContext is the same as DescribeProjects with the addition of
1489// the ability to pass a context and additional request options.
1490//
1491// See DescribeProjects for details on how to use this API operation.
1492//
1493// The context must be non-nil and will be used for request cancellation. If
1494// the context is nil a panic will occur. In the future the SDK may create
1495// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1496// for more information on using Contexts.
1497func (c *Rekognition) DescribeProjectsWithContext(ctx aws.Context, input *DescribeProjectsInput, opts ...request.Option) (*DescribeProjectsOutput, error) {
1498	req, out := c.DescribeProjectsRequest(input)
1499	req.SetContext(ctx)
1500	req.ApplyOptions(opts...)
1501	return out, req.Send()
1502}
1503
1504// DescribeProjectsPages iterates over the pages of a DescribeProjects operation,
1505// calling the "fn" function with the response data for each page. To stop
1506// iterating, return false from the fn function.
1507//
1508// See DescribeProjects method for more information on how to use this operation.
1509//
1510// Note: This operation can generate multiple requests to a service.
1511//
1512//    // Example iterating over at most 3 pages of a DescribeProjects operation.
1513//    pageNum := 0
1514//    err := client.DescribeProjectsPages(params,
1515//        func(page *rekognition.DescribeProjectsOutput, lastPage bool) bool {
1516//            pageNum++
1517//            fmt.Println(page)
1518//            return pageNum <= 3
1519//        })
1520//
1521func (c *Rekognition) DescribeProjectsPages(input *DescribeProjectsInput, fn func(*DescribeProjectsOutput, bool) bool) error {
1522	return c.DescribeProjectsPagesWithContext(aws.BackgroundContext(), input, fn)
1523}
1524
1525// DescribeProjectsPagesWithContext same as DescribeProjectsPages except
1526// it takes a Context and allows setting request options on the pages.
1527//
1528// The context must be non-nil and will be used for request cancellation. If
1529// the context is nil a panic will occur. In the future the SDK may create
1530// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1531// for more information on using Contexts.
1532func (c *Rekognition) DescribeProjectsPagesWithContext(ctx aws.Context, input *DescribeProjectsInput, fn func(*DescribeProjectsOutput, bool) bool, opts ...request.Option) error {
1533	p := request.Pagination{
1534		NewRequest: func() (*request.Request, error) {
1535			var inCpy *DescribeProjectsInput
1536			if input != nil {
1537				tmp := *input
1538				inCpy = &tmp
1539			}
1540			req, _ := c.DescribeProjectsRequest(inCpy)
1541			req.SetContext(ctx)
1542			req.ApplyOptions(opts...)
1543			return req, nil
1544		},
1545	}
1546
1547	for p.Next() {
1548		if !fn(p.Page().(*DescribeProjectsOutput), !p.HasNextPage()) {
1549			break
1550		}
1551	}
1552
1553	return p.Err()
1554}
1555
1556const opDescribeStreamProcessor = "DescribeStreamProcessor"
1557
1558// DescribeStreamProcessorRequest generates a "aws/request.Request" representing the
1559// client's request for the DescribeStreamProcessor operation. The "output" return
1560// value will be populated with the request's response once the request completes
1561// successfully.
1562//
1563// Use "Send" method on the returned Request to send the API call to the service.
1564// the "output" return value is not valid until after Send returns without error.
1565//
1566// See DescribeStreamProcessor for more information on using the DescribeStreamProcessor
1567// API call, and error handling.
1568//
1569// This method is useful when you want to inject custom logic or configuration
1570// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1571//
1572//
1573//    // Example sending a request using the DescribeStreamProcessorRequest method.
1574//    req, resp := client.DescribeStreamProcessorRequest(params)
1575//
1576//    err := req.Send()
1577//    if err == nil { // resp is now filled
1578//        fmt.Println(resp)
1579//    }
1580func (c *Rekognition) DescribeStreamProcessorRequest(input *DescribeStreamProcessorInput) (req *request.Request, output *DescribeStreamProcessorOutput) {
1581	op := &request.Operation{
1582		Name:       opDescribeStreamProcessor,
1583		HTTPMethod: "POST",
1584		HTTPPath:   "/",
1585	}
1586
1587	if input == nil {
1588		input = &DescribeStreamProcessorInput{}
1589	}
1590
1591	output = &DescribeStreamProcessorOutput{}
1592	req = c.newRequest(op, input, output)
1593	return
1594}
1595
1596// DescribeStreamProcessor API operation for Amazon Rekognition.
1597//
1598// Provides information about a stream processor created by CreateStreamProcessor.
1599// You can get information about the input and output streams, the input parameters
1600// for the face recognition being performed, and the current status of the stream
1601// processor.
1602//
1603// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1604// with awserr.Error's Code and Message methods to get detailed information about
1605// the error.
1606//
1607// See the AWS API reference guide for Amazon Rekognition's
1608// API operation DescribeStreamProcessor for usage and error information.
1609//
1610// Returned Error Types:
1611//   * AccessDeniedException
1612//   You are not authorized to perform the action.
1613//
1614//   * InternalServerError
1615//   Amazon Rekognition experienced a service issue. Try your call again.
1616//
1617//   * ThrottlingException
1618//   Amazon Rekognition is temporarily unable to process the request. Try your
1619//   call again.
1620//
1621//   * InvalidParameterException
1622//   Input parameter violated a constraint. Validate your parameter before calling
1623//   the API operation again.
1624//
1625//   * ResourceNotFoundException
1626//   The collection specified in the request cannot be found.
1627//
1628//   * ProvisionedThroughputExceededException
1629//   The number of requests exceeded your throughput limit. If you want to increase
1630//   this limit, contact Amazon Rekognition.
1631//
1632func (c *Rekognition) DescribeStreamProcessor(input *DescribeStreamProcessorInput) (*DescribeStreamProcessorOutput, error) {
1633	req, out := c.DescribeStreamProcessorRequest(input)
1634	return out, req.Send()
1635}
1636
1637// DescribeStreamProcessorWithContext is the same as DescribeStreamProcessor with the addition of
1638// the ability to pass a context and additional request options.
1639//
1640// See DescribeStreamProcessor for details on how to use this API operation.
1641//
1642// The context must be non-nil and will be used for request cancellation. If
1643// the context is nil a panic will occur. In the future the SDK may create
1644// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1645// for more information on using Contexts.
1646func (c *Rekognition) DescribeStreamProcessorWithContext(ctx aws.Context, input *DescribeStreamProcessorInput, opts ...request.Option) (*DescribeStreamProcessorOutput, error) {
1647	req, out := c.DescribeStreamProcessorRequest(input)
1648	req.SetContext(ctx)
1649	req.ApplyOptions(opts...)
1650	return out, req.Send()
1651}
1652
1653const opDetectCustomLabels = "DetectCustomLabels"
1654
1655// DetectCustomLabelsRequest generates a "aws/request.Request" representing the
1656// client's request for the DetectCustomLabels operation. The "output" return
1657// value will be populated with the request's response once the request completes
1658// successfully.
1659//
1660// Use "Send" method on the returned Request to send the API call to the service.
1661// the "output" return value is not valid until after Send returns without error.
1662//
1663// See DetectCustomLabels for more information on using the DetectCustomLabels
1664// API call, and error handling.
1665//
1666// This method is useful when you want to inject custom logic or configuration
1667// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1668//
1669//
1670//    // Example sending a request using the DetectCustomLabelsRequest method.
1671//    req, resp := client.DetectCustomLabelsRequest(params)
1672//
1673//    err := req.Send()
1674//    if err == nil { // resp is now filled
1675//        fmt.Println(resp)
1676//    }
1677func (c *Rekognition) DetectCustomLabelsRequest(input *DetectCustomLabelsInput) (req *request.Request, output *DetectCustomLabelsOutput) {
1678	op := &request.Operation{
1679		Name:       opDetectCustomLabels,
1680		HTTPMethod: "POST",
1681		HTTPPath:   "/",
1682	}
1683
1684	if input == nil {
1685		input = &DetectCustomLabelsInput{}
1686	}
1687
1688	output = &DetectCustomLabelsOutput{}
1689	req = c.newRequest(op, input, output)
1690	return
1691}
1692
1693// DetectCustomLabels API operation for Amazon Rekognition.
1694//
1695// Detects custom labels in a supplied image by using an Amazon Rekognition
1696// Custom Labels model.
1697//
1698// You specify which version of a model version to use by using the ProjectVersionArn
1699// input parameter.
1700//
1701// You pass the input image as base64-encoded image bytes or as a reference
1702// to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon
1703// Rekognition operations, passing image bytes is not supported. The image must
1704// be either a PNG or JPEG formatted file.
1705//
1706// For each object that the model version detects on an image, the API returns
1707// a (CustomLabel) object in an array (CustomLabels). Each CustomLabel object
1708// provides the label name (Name), the level of confidence that the image contains
1709// the object (Confidence), and object location information, if it exists, for
1710// the label on the image (Geometry).
1711//
1712// During training model calculates a threshold value that determines if a prediction
1713// for a label is true. By default, DetectCustomLabels doesn't return labels
1714// whose confidence value is below the model's calculated threshold value. To
1715// filter labels that are returned, specify a value for MinConfidence that is
1716// higher than the model's calculated threshold. You can get the model's calculated
1717// threshold from the model's training results shown in the Amazon Rekognition
1718// Custom Labels console. To get all labels, regardless of confidence, specify
1719// a MinConfidence value of 0.
1720//
1721// You can also add the MaxResults parameter to limit the number of labels returned.
1722//
1723// This is a stateless API operation. That is, the operation does not persist
1724// any data.
1725//
1726// This operation requires permissions to perform the rekognition:DetectCustomLabels
1727// action.
1728//
1729// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1730// with awserr.Error's Code and Message methods to get detailed information about
1731// the error.
1732//
1733// See the AWS API reference guide for Amazon Rekognition's
1734// API operation DetectCustomLabels for usage and error information.
1735//
1736// Returned Error Types:
1737//   * ResourceNotFoundException
1738//   The collection specified in the request cannot be found.
1739//
1740//   * ResourceNotReadyException
1741//   The requested resource isn't ready. For example, this exception occurs when
1742//   you call DetectCustomLabels with a model version that isn't deployed.
1743//
1744//   * InvalidS3ObjectException
1745//   Amazon Rekognition is unable to access the S3 object specified in the request.
1746//
1747//   * InvalidParameterException
1748//   Input parameter violated a constraint. Validate your parameter before calling
1749//   the API operation again.
1750//
1751//   * ImageTooLargeException
1752//   The input image size exceeds the allowed limit. If you are calling DetectProtectiveEquipment,
1753//   the image size or resolution exceeds the allowed limit. For more information,
1754//   see Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
1755//
1756//   * LimitExceededException
1757//   An Amazon Rekognition service limit was exceeded. For example, if you start
1758//   too many Amazon Rekognition Video jobs concurrently, calls to start operations
1759//   (StartLabelDetection, for example) will raise a LimitExceededException exception
1760//   (HTTP status code: 400) until the number of concurrently running jobs is
1761//   below the Amazon Rekognition service limit.
1762//
1763//   * AccessDeniedException
1764//   You are not authorized to perform the action.
1765//
1766//   * InternalServerError
1767//   Amazon Rekognition experienced a service issue. Try your call again.
1768//
1769//   * ThrottlingException
1770//   Amazon Rekognition is temporarily unable to process the request. Try your
1771//   call again.
1772//
1773//   * ProvisionedThroughputExceededException
1774//   The number of requests exceeded your throughput limit. If you want to increase
1775//   this limit, contact Amazon Rekognition.
1776//
1777//   * InvalidImageFormatException
1778//   The provided image format is not supported.
1779//
1780func (c *Rekognition) DetectCustomLabels(input *DetectCustomLabelsInput) (*DetectCustomLabelsOutput, error) {
1781	req, out := c.DetectCustomLabelsRequest(input)
1782	return out, req.Send()
1783}
1784
1785// DetectCustomLabelsWithContext is the same as DetectCustomLabels with the addition of
1786// the ability to pass a context and additional request options.
1787//
1788// See DetectCustomLabels for details on how to use this API operation.
1789//
1790// The context must be non-nil and will be used for request cancellation. If
1791// the context is nil a panic will occur. In the future the SDK may create
1792// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1793// for more information on using Contexts.
1794func (c *Rekognition) DetectCustomLabelsWithContext(ctx aws.Context, input *DetectCustomLabelsInput, opts ...request.Option) (*DetectCustomLabelsOutput, error) {
1795	req, out := c.DetectCustomLabelsRequest(input)
1796	req.SetContext(ctx)
1797	req.ApplyOptions(opts...)
1798	return out, req.Send()
1799}
1800
1801const opDetectFaces = "DetectFaces"
1802
1803// DetectFacesRequest generates a "aws/request.Request" representing the
1804// client's request for the DetectFaces operation. The "output" return
1805// value will be populated with the request's response once the request completes
1806// successfully.
1807//
1808// Use "Send" method on the returned Request to send the API call to the service.
1809// the "output" return value is not valid until after Send returns without error.
1810//
1811// See DetectFaces for more information on using the DetectFaces
1812// API call, and error handling.
1813//
1814// This method is useful when you want to inject custom logic or configuration
1815// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1816//
1817//
1818//    // Example sending a request using the DetectFacesRequest method.
1819//    req, resp := client.DetectFacesRequest(params)
1820//
1821//    err := req.Send()
1822//    if err == nil { // resp is now filled
1823//        fmt.Println(resp)
1824//    }
1825func (c *Rekognition) DetectFacesRequest(input *DetectFacesInput) (req *request.Request, output *DetectFacesOutput) {
1826	op := &request.Operation{
1827		Name:       opDetectFaces,
1828		HTTPMethod: "POST",
1829		HTTPPath:   "/",
1830	}
1831
1832	if input == nil {
1833		input = &DetectFacesInput{}
1834	}
1835
1836	output = &DetectFacesOutput{}
1837	req = c.newRequest(op, input, output)
1838	return
1839}
1840
1841// DetectFaces API operation for Amazon Rekognition.
1842//
1843// Detects faces within an image that is provided as input.
1844//
1845// DetectFaces detects the 100 largest faces in the image. For each face detected,
1846// the operation returns face details. These details include a bounding box
1847// of the face, a confidence value (that the bounding box contains a face),
1848// and a fixed set of attributes such as facial landmarks (for example, coordinates
1849// of eye and mouth), presence of beard, sunglasses, and so on.
1850//
1851// The face-detection algorithm is most effective on frontal faces. For non-frontal
1852// or obscured faces, the algorithm might not detect the faces or might detect
1853// faces with lower confidence.
1854//
1855// You pass the input image either as base64-encoded image bytes or as a reference
1856// to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon
1857// Rekognition operations, passing image bytes is not supported. The image must
1858// be either a PNG or JPEG formatted file.
1859//
1860// This is a stateless API operation. That is, the operation does not persist
1861// any data.
1862//
1863// This operation requires permissions to perform the rekognition:DetectFaces
1864// action.
1865//
1866// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1867// with awserr.Error's Code and Message methods to get detailed information about
1868// the error.
1869//
1870// See the AWS API reference guide for Amazon Rekognition's
1871// API operation DetectFaces for usage and error information.
1872//
1873// Returned Error Types:
1874//   * InvalidS3ObjectException
1875//   Amazon Rekognition is unable to access the S3 object specified in the request.
1876//
1877//   * InvalidParameterException
1878//   Input parameter violated a constraint. Validate your parameter before calling
1879//   the API operation again.
1880//
1881//   * ImageTooLargeException
1882//   The input image size exceeds the allowed limit. If you are calling DetectProtectiveEquipment,
1883//   the image size or resolution exceeds the allowed limit. For more information,
1884//   see Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
1885//
1886//   * AccessDeniedException
1887//   You are not authorized to perform the action.
1888//
1889//   * InternalServerError
1890//   Amazon Rekognition experienced a service issue. Try your call again.
1891//
1892//   * ThrottlingException
1893//   Amazon Rekognition is temporarily unable to process the request. Try your
1894//   call again.
1895//
1896//   * ProvisionedThroughputExceededException
1897//   The number of requests exceeded your throughput limit. If you want to increase
1898//   this limit, contact Amazon Rekognition.
1899//
1900//   * InvalidImageFormatException
1901//   The provided image format is not supported.
1902//
1903func (c *Rekognition) DetectFaces(input *DetectFacesInput) (*DetectFacesOutput, error) {
1904	req, out := c.DetectFacesRequest(input)
1905	return out, req.Send()
1906}
1907
1908// DetectFacesWithContext is the same as DetectFaces with the addition of
1909// the ability to pass a context and additional request options.
1910//
1911// See DetectFaces for details on how to use this API operation.
1912//
1913// The context must be non-nil and will be used for request cancellation. If
1914// the context is nil a panic will occur. In the future the SDK may create
1915// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1916// for more information on using Contexts.
1917func (c *Rekognition) DetectFacesWithContext(ctx aws.Context, input *DetectFacesInput, opts ...request.Option) (*DetectFacesOutput, error) {
1918	req, out := c.DetectFacesRequest(input)
1919	req.SetContext(ctx)
1920	req.ApplyOptions(opts...)
1921	return out, req.Send()
1922}
1923
1924const opDetectLabels = "DetectLabels"
1925
1926// DetectLabelsRequest generates a "aws/request.Request" representing the
1927// client's request for the DetectLabels operation. The "output" return
1928// value will be populated with the request's response once the request completes
1929// successfully.
1930//
1931// Use "Send" method on the returned Request to send the API call to the service.
1932// the "output" return value is not valid until after Send returns without error.
1933//
1934// See DetectLabels for more information on using the DetectLabels
1935// API call, and error handling.
1936//
1937// This method is useful when you want to inject custom logic or configuration
1938// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1939//
1940//
1941//    // Example sending a request using the DetectLabelsRequest method.
1942//    req, resp := client.DetectLabelsRequest(params)
1943//
1944//    err := req.Send()
1945//    if err == nil { // resp is now filled
1946//        fmt.Println(resp)
1947//    }
1948func (c *Rekognition) DetectLabelsRequest(input *DetectLabelsInput) (req *request.Request, output *DetectLabelsOutput) {
1949	op := &request.Operation{
1950		Name:       opDetectLabels,
1951		HTTPMethod: "POST",
1952		HTTPPath:   "/",
1953	}
1954
1955	if input == nil {
1956		input = &DetectLabelsInput{}
1957	}
1958
1959	output = &DetectLabelsOutput{}
1960	req = c.newRequest(op, input, output)
1961	return
1962}
1963
1964// DetectLabels API operation for Amazon Rekognition.
1965//
1966// Detects instances of real-world entities within an image (JPEG or PNG) provided
1967// as input. This includes objects like flower, tree, and table; events like
1968// wedding, graduation, and birthday party; and concepts like landscape, evening,
1969// and nature.
1970//
1971// For an example, see Analyzing Images Stored in an Amazon S3 Bucket in the
1972// Amazon Rekognition Developer Guide.
1973//
1974// DetectLabels does not support the detection of activities. However, activity
1975// detection is supported for label detection in videos. For more information,
1976// see StartLabelDetection in the Amazon Rekognition Developer Guide.
1977//
1978// You pass the input image as base64-encoded image bytes or as a reference
1979// to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon
1980// Rekognition operations, passing image bytes is not supported. The image must
1981// be either a PNG or JPEG formatted file.
1982//
1983// For each object, scene, and concept the API returns one or more labels. Each
1984// label provides the object name, and the level of confidence that the image
1985// contains the object. For example, suppose the input image has a lighthouse,
1986// the sea, and a rock. The response includes all three labels, one for each
1987// object.
1988//
1989// {Name: lighthouse, Confidence: 98.4629}
1990//
1991// {Name: rock,Confidence: 79.2097}
1992//
1993// {Name: sea,Confidence: 75.061}
1994//
1995// In the preceding example, the operation returns one label for each of the
1996// three objects. The operation can also return multiple labels for the same
1997// object in the image. For example, if the input image shows a flower (for
1998// example, a tulip), the operation might return the following three labels.
1999//
2000// {Name: flower,Confidence: 99.0562}
2001//
2002// {Name: plant,Confidence: 99.0562}
2003//
2004// {Name: tulip,Confidence: 99.0562}
2005//
2006// In this example, the detection algorithm more precisely identifies the flower
2007// as a tulip.
2008//
2009// In response, the API returns an array of labels. In addition, the response
2010// also includes the orientation correction. Optionally, you can specify MinConfidence
2011// to control the confidence threshold for the labels returned. The default
2012// is 55%. You can also add the MaxLabels parameter to limit the number of labels
2013// returned.
2014//
2015// If the object detected is a person, the operation doesn't provide the same
2016// facial details that the DetectFaces operation provides.
2017//
2018// DetectLabels returns bounding boxes for instances of common object labels
2019// in an array of Instance objects. An Instance object contains a BoundingBox
2020// object, for the location of the label on the image. It also includes the
2021// confidence by which the bounding box was detected.
2022//
2023// DetectLabels also returns a hierarchical taxonomy of detected labels. For
2024// example, a detected car might be assigned the label car. The label car has
2025// two parent labels: Vehicle (its parent) and Transportation (its grandparent).
2026// The response returns the entire list of ancestors for a label. Each ancestor
2027// is a unique label in the response. In the previous example, Car, Vehicle,
2028// and Transportation are returned as unique labels in the response.
2029//
2030// This is a stateless API operation. That is, the operation does not persist
2031// any data.
2032//
2033// This operation requires permissions to perform the rekognition:DetectLabels
2034// action.
2035//
2036// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2037// with awserr.Error's Code and Message methods to get detailed information about
2038// the error.
2039//
2040// See the AWS API reference guide for Amazon Rekognition's
2041// API operation DetectLabels for usage and error information.
2042//
2043// Returned Error Types:
2044//   * InvalidS3ObjectException
2045//   Amazon Rekognition is unable to access the S3 object specified in the request.
2046//
2047//   * InvalidParameterException
2048//   Input parameter violated a constraint. Validate your parameter before calling
2049//   the API operation again.
2050//
2051//   * ImageTooLargeException
2052//   The input image size exceeds the allowed limit. If you are calling DetectProtectiveEquipment,
2053//   the image size or resolution exceeds the allowed limit. For more information,
2054//   see Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
2055//
2056//   * AccessDeniedException
2057//   You are not authorized to perform the action.
2058//
2059//   * InternalServerError
2060//   Amazon Rekognition experienced a service issue. Try your call again.
2061//
2062//   * ThrottlingException
2063//   Amazon Rekognition is temporarily unable to process the request. Try your
2064//   call again.
2065//
2066//   * ProvisionedThroughputExceededException
2067//   The number of requests exceeded your throughput limit. If you want to increase
2068//   this limit, contact Amazon Rekognition.
2069//
2070//   * InvalidImageFormatException
2071//   The provided image format is not supported.
2072//
2073func (c *Rekognition) DetectLabels(input *DetectLabelsInput) (*DetectLabelsOutput, error) {
2074	req, out := c.DetectLabelsRequest(input)
2075	return out, req.Send()
2076}
2077
2078// DetectLabelsWithContext is the same as DetectLabels with the addition of
2079// the ability to pass a context and additional request options.
2080//
2081// See DetectLabels for details on how to use this API operation.
2082//
2083// The context must be non-nil and will be used for request cancellation. If
2084// the context is nil a panic will occur. In the future the SDK may create
2085// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2086// for more information on using Contexts.
2087func (c *Rekognition) DetectLabelsWithContext(ctx aws.Context, input *DetectLabelsInput, opts ...request.Option) (*DetectLabelsOutput, error) {
2088	req, out := c.DetectLabelsRequest(input)
2089	req.SetContext(ctx)
2090	req.ApplyOptions(opts...)
2091	return out, req.Send()
2092}
2093
2094const opDetectModerationLabels = "DetectModerationLabels"
2095
2096// DetectModerationLabelsRequest generates a "aws/request.Request" representing the
2097// client's request for the DetectModerationLabels operation. The "output" return
2098// value will be populated with the request's response once the request completes
2099// successfully.
2100//
2101// Use "Send" method on the returned Request to send the API call to the service.
2102// the "output" return value is not valid until after Send returns without error.
2103//
2104// See DetectModerationLabels for more information on using the DetectModerationLabels
2105// API call, and error handling.
2106//
2107// This method is useful when you want to inject custom logic or configuration
2108// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2109//
2110//
2111//    // Example sending a request using the DetectModerationLabelsRequest method.
2112//    req, resp := client.DetectModerationLabelsRequest(params)
2113//
2114//    err := req.Send()
2115//    if err == nil { // resp is now filled
2116//        fmt.Println(resp)
2117//    }
2118func (c *Rekognition) DetectModerationLabelsRequest(input *DetectModerationLabelsInput) (req *request.Request, output *DetectModerationLabelsOutput) {
2119	op := &request.Operation{
2120		Name:       opDetectModerationLabels,
2121		HTTPMethod: "POST",
2122		HTTPPath:   "/",
2123	}
2124
2125	if input == nil {
2126		input = &DetectModerationLabelsInput{}
2127	}
2128
2129	output = &DetectModerationLabelsOutput{}
2130	req = c.newRequest(op, input, output)
2131	return
2132}
2133
2134// DetectModerationLabels API operation for Amazon Rekognition.
2135//
2136// Detects unsafe content in a specified JPEG or PNG format image. Use DetectModerationLabels
2137// to moderate images depending on your requirements. For example, you might
2138// want to filter images that contain nudity, but not images containing suggestive
2139// content.
2140//
2141// To filter images, use the labels returned by DetectModerationLabels to determine
2142// which types of content are appropriate.
2143//
2144// For information about moderation labels, see Detecting Unsafe Content in
2145// the Amazon Rekognition Developer Guide.
2146//
2147// You pass the input image either as base64-encoded image bytes or as a reference
2148// to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon
2149// Rekognition operations, passing image bytes is not supported. The image must
2150// be either a PNG or JPEG formatted file.
2151//
2152// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2153// with awserr.Error's Code and Message methods to get detailed information about
2154// the error.
2155//
2156// See the AWS API reference guide for Amazon Rekognition's
2157// API operation DetectModerationLabels for usage and error information.
2158//
2159// Returned Error Types:
2160//   * InvalidS3ObjectException
2161//   Amazon Rekognition is unable to access the S3 object specified in the request.
2162//
2163//   * InvalidParameterException
2164//   Input parameter violated a constraint. Validate your parameter before calling
2165//   the API operation again.
2166//
2167//   * ImageTooLargeException
2168//   The input image size exceeds the allowed limit. If you are calling DetectProtectiveEquipment,
2169//   the image size or resolution exceeds the allowed limit. For more information,
2170//   see Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
2171//
2172//   * AccessDeniedException
2173//   You are not authorized to perform the action.
2174//
2175//   * InternalServerError
2176//   Amazon Rekognition experienced a service issue. Try your call again.
2177//
2178//   * ThrottlingException
2179//   Amazon Rekognition is temporarily unable to process the request. Try your
2180//   call again.
2181//
2182//   * ProvisionedThroughputExceededException
2183//   The number of requests exceeded your throughput limit. If you want to increase
2184//   this limit, contact Amazon Rekognition.
2185//
2186//   * InvalidImageFormatException
2187//   The provided image format is not supported.
2188//
2189//   * HumanLoopQuotaExceededException
2190//   The number of in-progress human reviews you have has exceeded the number
2191//   allowed.
2192//
2193func (c *Rekognition) DetectModerationLabels(input *DetectModerationLabelsInput) (*DetectModerationLabelsOutput, error) {
2194	req, out := c.DetectModerationLabelsRequest(input)
2195	return out, req.Send()
2196}
2197
2198// DetectModerationLabelsWithContext is the same as DetectModerationLabels with the addition of
2199// the ability to pass a context and additional request options.
2200//
2201// See DetectModerationLabels for details on how to use this API operation.
2202//
2203// The context must be non-nil and will be used for request cancellation. If
2204// the context is nil a panic will occur. In the future the SDK may create
2205// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2206// for more information on using Contexts.
2207func (c *Rekognition) DetectModerationLabelsWithContext(ctx aws.Context, input *DetectModerationLabelsInput, opts ...request.Option) (*DetectModerationLabelsOutput, error) {
2208	req, out := c.DetectModerationLabelsRequest(input)
2209	req.SetContext(ctx)
2210	req.ApplyOptions(opts...)
2211	return out, req.Send()
2212}
2213
2214const opDetectProtectiveEquipment = "DetectProtectiveEquipment"
2215
2216// DetectProtectiveEquipmentRequest generates a "aws/request.Request" representing the
2217// client's request for the DetectProtectiveEquipment operation. The "output" return
2218// value will be populated with the request's response once the request completes
2219// successfully.
2220//
2221// Use "Send" method on the returned Request to send the API call to the service.
2222// the "output" return value is not valid until after Send returns without error.
2223//
2224// See DetectProtectiveEquipment for more information on using the DetectProtectiveEquipment
2225// API call, and error handling.
2226//
2227// This method is useful when you want to inject custom logic or configuration
2228// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2229//
2230//
2231//    // Example sending a request using the DetectProtectiveEquipmentRequest method.
2232//    req, resp := client.DetectProtectiveEquipmentRequest(params)
2233//
2234//    err := req.Send()
2235//    if err == nil { // resp is now filled
2236//        fmt.Println(resp)
2237//    }
2238func (c *Rekognition) DetectProtectiveEquipmentRequest(input *DetectProtectiveEquipmentInput) (req *request.Request, output *DetectProtectiveEquipmentOutput) {
2239	op := &request.Operation{
2240		Name:       opDetectProtectiveEquipment,
2241		HTTPMethod: "POST",
2242		HTTPPath:   "/",
2243	}
2244
2245	if input == nil {
2246		input = &DetectProtectiveEquipmentInput{}
2247	}
2248
2249	output = &DetectProtectiveEquipmentOutput{}
2250	req = c.newRequest(op, input, output)
2251	return
2252}
2253
2254// DetectProtectiveEquipment API operation for Amazon Rekognition.
2255//
2256// Detects Personal Protective Equipment (PPE) worn by people detected in an
2257// image. Amazon Rekognition can detect the following types of PPE.
2258//
2259//    * Face cover
2260//
2261//    * Hand cover
2262//
2263//    * Head cover
2264//
2265// You pass the input image as base64-encoded image bytes or as a reference
2266// to an image in an Amazon S3 bucket. The image must be either a PNG or JPG
2267// formatted file.
2268//
2269// DetectProtectiveEquipment detects PPE worn by up to 15 persons detected in
2270// an image.
2271//
2272// For each person detected in the image the API returns an array of body parts
2273// (face, head, left-hand, right-hand). For each body part, an array of detected
2274// items of PPE is returned, including an indicator of whether or not the PPE
2275// covers the body part. The API returns the confidence it has in each detection
2276// (person, PPE, body part and body part coverage). It also returns a bounding
2277// box (BoundingBox) for each detected person and each detected item of PPE.
2278//
2279// You can optionally request a summary of detected PPE items with the SummarizationAttributes
2280// input parameter. The summary provides the following information.
2281//
2282//    * The persons detected as wearing all of the types of PPE that you specify.
2283//
2284//    * The persons detected as not wearing all of the types PPE that you specify.
2285//
2286//    * The persons detected where PPE adornment could not be determined.
2287//
2288// This is a stateless API operation. That is, the operation does not persist
2289// any data.
2290//
2291// This operation requires permissions to perform the rekognition:DetectProtectiveEquipment
2292// action.
2293//
2294// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2295// with awserr.Error's Code and Message methods to get detailed information about
2296// the error.
2297//
2298// See the AWS API reference guide for Amazon Rekognition's
2299// API operation DetectProtectiveEquipment for usage and error information.
2300//
2301// Returned Error Types:
2302//   * InvalidS3ObjectException
2303//   Amazon Rekognition is unable to access the S3 object specified in the request.
2304//
2305//   * InvalidParameterException
2306//   Input parameter violated a constraint. Validate your parameter before calling
2307//   the API operation again.
2308//
2309//   * ImageTooLargeException
2310//   The input image size exceeds the allowed limit. If you are calling DetectProtectiveEquipment,
2311//   the image size or resolution exceeds the allowed limit. For more information,
2312//   see Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
2313//
2314//   * AccessDeniedException
2315//   You are not authorized to perform the action.
2316//
2317//   * InternalServerError
2318//   Amazon Rekognition experienced a service issue. Try your call again.
2319//
2320//   * ThrottlingException
2321//   Amazon Rekognition is temporarily unable to process the request. Try your
2322//   call again.
2323//
2324//   * ProvisionedThroughputExceededException
2325//   The number of requests exceeded your throughput limit. If you want to increase
2326//   this limit, contact Amazon Rekognition.
2327//
2328//   * InvalidImageFormatException
2329//   The provided image format is not supported.
2330//
2331func (c *Rekognition) DetectProtectiveEquipment(input *DetectProtectiveEquipmentInput) (*DetectProtectiveEquipmentOutput, error) {
2332	req, out := c.DetectProtectiveEquipmentRequest(input)
2333	return out, req.Send()
2334}
2335
2336// DetectProtectiveEquipmentWithContext is the same as DetectProtectiveEquipment with the addition of
2337// the ability to pass a context and additional request options.
2338//
2339// See DetectProtectiveEquipment for details on how to use this API operation.
2340//
2341// The context must be non-nil and will be used for request cancellation. If
2342// the context is nil a panic will occur. In the future the SDK may create
2343// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2344// for more information on using Contexts.
2345func (c *Rekognition) DetectProtectiveEquipmentWithContext(ctx aws.Context, input *DetectProtectiveEquipmentInput, opts ...request.Option) (*DetectProtectiveEquipmentOutput, error) {
2346	req, out := c.DetectProtectiveEquipmentRequest(input)
2347	req.SetContext(ctx)
2348	req.ApplyOptions(opts...)
2349	return out, req.Send()
2350}
2351
2352const opDetectText = "DetectText"
2353
2354// DetectTextRequest generates a "aws/request.Request" representing the
2355// client's request for the DetectText operation. The "output" return
2356// value will be populated with the request's response once the request completes
2357// successfully.
2358//
2359// Use "Send" method on the returned Request to send the API call to the service.
2360// the "output" return value is not valid until after Send returns without error.
2361//
2362// See DetectText for more information on using the DetectText
2363// API call, and error handling.
2364//
2365// This method is useful when you want to inject custom logic or configuration
2366// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2367//
2368//
2369//    // Example sending a request using the DetectTextRequest method.
2370//    req, resp := client.DetectTextRequest(params)
2371//
2372//    err := req.Send()
2373//    if err == nil { // resp is now filled
2374//        fmt.Println(resp)
2375//    }
2376func (c *Rekognition) DetectTextRequest(input *DetectTextInput) (req *request.Request, output *DetectTextOutput) {
2377	op := &request.Operation{
2378		Name:       opDetectText,
2379		HTTPMethod: "POST",
2380		HTTPPath:   "/",
2381	}
2382
2383	if input == nil {
2384		input = &DetectTextInput{}
2385	}
2386
2387	output = &DetectTextOutput{}
2388	req = c.newRequest(op, input, output)
2389	return
2390}
2391
2392// DetectText API operation for Amazon Rekognition.
2393//
2394// Detects text in the input image and converts it into machine-readable text.
2395//
2396// Pass the input image as base64-encoded image bytes or as a reference to an
2397// image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition
2398// operations, you must pass it as a reference to an image in an Amazon S3 bucket.
2399// For the AWS CLI, passing image bytes is not supported. The image must be
2400// either a .png or .jpeg formatted file.
2401//
2402// The DetectText operation returns text in an array of TextDetection elements,
2403// TextDetections. Each TextDetection element provides information about a single
2404// word or line of text that was detected in the image.
2405//
2406// A word is one or more ISO basic latin script characters that are not separated
2407// by spaces. DetectText can detect up to 50 words in an image.
2408//
2409// A line is a string of equally spaced words. A line isn't necessarily a complete
2410// sentence. For example, a driver's license number is detected as a line. A
2411// line ends when there is no aligned text after it. Also, a line ends when
2412// there is a large gap between words, relative to the length of the words.
2413// This means, depending on the gap between words, Amazon Rekognition may detect
2414// multiple lines in text aligned in the same direction. Periods don't represent
2415// the end of a line. If a sentence spans multiple lines, the DetectText operation
2416// returns multiple lines.
2417//
2418// To determine whether a TextDetection element is a line of text or a word,
2419// use the TextDetection object Type field.
2420//
2421// To be detected, text must be within +/- 90 degrees orientation of the horizontal
2422// axis.
2423//
2424// For more information, see DetectText in the Amazon Rekognition Developer
2425// Guide.
2426//
2427// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2428// with awserr.Error's Code and Message methods to get detailed information about
2429// the error.
2430//
2431// See the AWS API reference guide for Amazon Rekognition's
2432// API operation DetectText for usage and error information.
2433//
2434// Returned Error Types:
2435//   * InvalidS3ObjectException
2436//   Amazon Rekognition is unable to access the S3 object specified in the request.
2437//
2438//   * InvalidParameterException
2439//   Input parameter violated a constraint. Validate your parameter before calling
2440//   the API operation again.
2441//
2442//   * ImageTooLargeException
2443//   The input image size exceeds the allowed limit. If you are calling DetectProtectiveEquipment,
2444//   the image size or resolution exceeds the allowed limit. For more information,
2445//   see Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
2446//
2447//   * AccessDeniedException
2448//   You are not authorized to perform the action.
2449//
2450//   * InternalServerError
2451//   Amazon Rekognition experienced a service issue. Try your call again.
2452//
2453//   * ThrottlingException
2454//   Amazon Rekognition is temporarily unable to process the request. Try your
2455//   call again.
2456//
2457//   * ProvisionedThroughputExceededException
2458//   The number of requests exceeded your throughput limit. If you want to increase
2459//   this limit, contact Amazon Rekognition.
2460//
2461//   * InvalidImageFormatException
2462//   The provided image format is not supported.
2463//
2464func (c *Rekognition) DetectText(input *DetectTextInput) (*DetectTextOutput, error) {
2465	req, out := c.DetectTextRequest(input)
2466	return out, req.Send()
2467}
2468
2469// DetectTextWithContext is the same as DetectText with the addition of
2470// the ability to pass a context and additional request options.
2471//
2472// See DetectText for details on how to use this API operation.
2473//
2474// The context must be non-nil and will be used for request cancellation. If
2475// the context is nil a panic will occur. In the future the SDK may create
2476// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2477// for more information on using Contexts.
2478func (c *Rekognition) DetectTextWithContext(ctx aws.Context, input *DetectTextInput, opts ...request.Option) (*DetectTextOutput, error) {
2479	req, out := c.DetectTextRequest(input)
2480	req.SetContext(ctx)
2481	req.ApplyOptions(opts...)
2482	return out, req.Send()
2483}
2484
2485const opGetCelebrityInfo = "GetCelebrityInfo"
2486
2487// GetCelebrityInfoRequest generates a "aws/request.Request" representing the
2488// client's request for the GetCelebrityInfo operation. The "output" return
2489// value will be populated with the request's response once the request completes
2490// successfully.
2491//
2492// Use "Send" method on the returned Request to send the API call to the service.
2493// the "output" return value is not valid until after Send returns without error.
2494//
2495// See GetCelebrityInfo for more information on using the GetCelebrityInfo
2496// API call, and error handling.
2497//
2498// This method is useful when you want to inject custom logic or configuration
2499// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2500//
2501//
2502//    // Example sending a request using the GetCelebrityInfoRequest method.
2503//    req, resp := client.GetCelebrityInfoRequest(params)
2504//
2505//    err := req.Send()
2506//    if err == nil { // resp is now filled
2507//        fmt.Println(resp)
2508//    }
2509func (c *Rekognition) GetCelebrityInfoRequest(input *GetCelebrityInfoInput) (req *request.Request, output *GetCelebrityInfoOutput) {
2510	op := &request.Operation{
2511		Name:       opGetCelebrityInfo,
2512		HTTPMethod: "POST",
2513		HTTPPath:   "/",
2514	}
2515
2516	if input == nil {
2517		input = &GetCelebrityInfoInput{}
2518	}
2519
2520	output = &GetCelebrityInfoOutput{}
2521	req = c.newRequest(op, input, output)
2522	return
2523}
2524
2525// GetCelebrityInfo API operation for Amazon Rekognition.
2526//
2527// Gets the name and additional information about a celebrity based on his or
2528// her Amazon Rekognition ID. The additional information is returned as an array
2529// of URLs. If there is no additional information about the celebrity, this
2530// list is empty.
2531//
2532// For more information, see Recognizing Celebrities in an Image in the Amazon
2533// Rekognition Developer Guide.
2534//
2535// This operation requires permissions to perform the rekognition:GetCelebrityInfo
2536// action.
2537//
2538// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2539// with awserr.Error's Code and Message methods to get detailed information about
2540// the error.
2541//
2542// See the AWS API reference guide for Amazon Rekognition's
2543// API operation GetCelebrityInfo for usage and error information.
2544//
2545// Returned Error Types:
2546//   * InvalidParameterException
2547//   Input parameter violated a constraint. Validate your parameter before calling
2548//   the API operation again.
2549//
2550//   * AccessDeniedException
2551//   You are not authorized to perform the action.
2552//
2553//   * InternalServerError
2554//   Amazon Rekognition experienced a service issue. Try your call again.
2555//
2556//   * ThrottlingException
2557//   Amazon Rekognition is temporarily unable to process the request. Try your
2558//   call again.
2559//
2560//   * ProvisionedThroughputExceededException
2561//   The number of requests exceeded your throughput limit. If you want to increase
2562//   this limit, contact Amazon Rekognition.
2563//
2564//   * ResourceNotFoundException
2565//   The collection specified in the request cannot be found.
2566//
2567func (c *Rekognition) GetCelebrityInfo(input *GetCelebrityInfoInput) (*GetCelebrityInfoOutput, error) {
2568	req, out := c.GetCelebrityInfoRequest(input)
2569	return out, req.Send()
2570}
2571
2572// GetCelebrityInfoWithContext is the same as GetCelebrityInfo with the addition of
2573// the ability to pass a context and additional request options.
2574//
2575// See GetCelebrityInfo for details on how to use this API operation.
2576//
2577// The context must be non-nil and will be used for request cancellation. If
2578// the context is nil a panic will occur. In the future the SDK may create
2579// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2580// for more information on using Contexts.
2581func (c *Rekognition) GetCelebrityInfoWithContext(ctx aws.Context, input *GetCelebrityInfoInput, opts ...request.Option) (*GetCelebrityInfoOutput, error) {
2582	req, out := c.GetCelebrityInfoRequest(input)
2583	req.SetContext(ctx)
2584	req.ApplyOptions(opts...)
2585	return out, req.Send()
2586}
2587
2588const opGetCelebrityRecognition = "GetCelebrityRecognition"
2589
2590// GetCelebrityRecognitionRequest generates a "aws/request.Request" representing the
2591// client's request for the GetCelebrityRecognition operation. The "output" return
2592// value will be populated with the request's response once the request completes
2593// successfully.
2594//
2595// Use "Send" method on the returned Request to send the API call to the service.
2596// the "output" return value is not valid until after Send returns without error.
2597//
2598// See GetCelebrityRecognition for more information on using the GetCelebrityRecognition
2599// API call, and error handling.
2600//
2601// This method is useful when you want to inject custom logic or configuration
2602// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2603//
2604//
2605//    // Example sending a request using the GetCelebrityRecognitionRequest method.
2606//    req, resp := client.GetCelebrityRecognitionRequest(params)
2607//
2608//    err := req.Send()
2609//    if err == nil { // resp is now filled
2610//        fmt.Println(resp)
2611//    }
2612func (c *Rekognition) GetCelebrityRecognitionRequest(input *GetCelebrityRecognitionInput) (req *request.Request, output *GetCelebrityRecognitionOutput) {
2613	op := &request.Operation{
2614		Name:       opGetCelebrityRecognition,
2615		HTTPMethod: "POST",
2616		HTTPPath:   "/",
2617		Paginator: &request.Paginator{
2618			InputTokens:     []string{"NextToken"},
2619			OutputTokens:    []string{"NextToken"},
2620			LimitToken:      "MaxResults",
2621			TruncationToken: "",
2622		},
2623	}
2624
2625	if input == nil {
2626		input = &GetCelebrityRecognitionInput{}
2627	}
2628
2629	output = &GetCelebrityRecognitionOutput{}
2630	req = c.newRequest(op, input, output)
2631	return
2632}
2633
2634// GetCelebrityRecognition API operation for Amazon Rekognition.
2635//
2636// Gets the celebrity recognition results for a Amazon Rekognition Video analysis
2637// started by StartCelebrityRecognition.
2638//
2639// Celebrity recognition in a video is an asynchronous operation. Analysis is
2640// started by a call to StartCelebrityRecognition which returns a job identifier
2641// (JobId). When the celebrity recognition operation finishes, Amazon Rekognition
2642// Video publishes a completion status to the Amazon Simple Notification Service
2643// topic registered in the initial call to StartCelebrityRecognition. To get
2644// the results of the celebrity recognition analysis, first check that the status
2645// value published to the Amazon SNS topic is SUCCEEDED. If so, call GetCelebrityDetection
2646// and pass the job identifier (JobId) from the initial call to StartCelebrityDetection.
2647//
2648// For more information, see Working With Stored Videos in the Amazon Rekognition
2649// Developer Guide.
2650//
2651// GetCelebrityRecognition returns detected celebrities and the time(s) they
2652// are detected in an array (Celebrities) of CelebrityRecognition objects. Each
2653// CelebrityRecognition contains information about the celebrity in a CelebrityDetail
2654// object and the time, Timestamp, the celebrity was detected.
2655//
2656// GetCelebrityRecognition only returns the default facial attributes (BoundingBox,
2657// Confidence, Landmarks, Pose, and Quality). The other facial attributes listed
2658// in the Face object of the following response syntax are not returned. For
2659// more information, see FaceDetail in the Amazon Rekognition Developer Guide.
2660//
2661// By default, the Celebrities array is sorted by time (milliseconds from the
2662// start of the video). You can also sort the array by celebrity by specifying
2663// the value ID in the SortBy input parameter.
2664//
2665// The CelebrityDetail object includes the celebrity identifer and additional
2666// information urls. If you don't store the additional information urls, you
2667// can get them later by calling GetCelebrityInfo with the celebrity identifer.
2668//
2669// No information is returned for faces not recognized as celebrities.
2670//
2671// Use MaxResults parameter to limit the number of labels returned. If there
2672// are more results than specified in MaxResults, the value of NextToken in
2673// the operation response contains a pagination token for getting the next set
2674// of results. To get the next page of results, call GetCelebrityDetection and
2675// populate the NextToken request parameter with the token value returned from
2676// the previous call to GetCelebrityRecognition.
2677//
2678// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2679// with awserr.Error's Code and Message methods to get detailed information about
2680// the error.
2681//
2682// See the AWS API reference guide for Amazon Rekognition's
2683// API operation GetCelebrityRecognition for usage and error information.
2684//
2685// Returned Error Types:
2686//   * AccessDeniedException
2687//   You are not authorized to perform the action.
2688//
2689//   * InternalServerError
2690//   Amazon Rekognition experienced a service issue. Try your call again.
2691//
2692//   * InvalidParameterException
2693//   Input parameter violated a constraint. Validate your parameter before calling
2694//   the API operation again.
2695//
2696//   * InvalidPaginationTokenException
2697//   Pagination token in the request is not valid.
2698//
2699//   * ProvisionedThroughputExceededException
2700//   The number of requests exceeded your throughput limit. If you want to increase
2701//   this limit, contact Amazon Rekognition.
2702//
2703//   * ResourceNotFoundException
2704//   The collection specified in the request cannot be found.
2705//
2706//   * ThrottlingException
2707//   Amazon Rekognition is temporarily unable to process the request. Try your
2708//   call again.
2709//
2710func (c *Rekognition) GetCelebrityRecognition(input *GetCelebrityRecognitionInput) (*GetCelebrityRecognitionOutput, error) {
2711	req, out := c.GetCelebrityRecognitionRequest(input)
2712	return out, req.Send()
2713}
2714
2715// GetCelebrityRecognitionWithContext is the same as GetCelebrityRecognition with the addition of
2716// the ability to pass a context and additional request options.
2717//
2718// See GetCelebrityRecognition for details on how to use this API operation.
2719//
2720// The context must be non-nil and will be used for request cancellation. If
2721// the context is nil a panic will occur. In the future the SDK may create
2722// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2723// for more information on using Contexts.
2724func (c *Rekognition) GetCelebrityRecognitionWithContext(ctx aws.Context, input *GetCelebrityRecognitionInput, opts ...request.Option) (*GetCelebrityRecognitionOutput, error) {
2725	req, out := c.GetCelebrityRecognitionRequest(input)
2726	req.SetContext(ctx)
2727	req.ApplyOptions(opts...)
2728	return out, req.Send()
2729}
2730
2731// GetCelebrityRecognitionPages iterates over the pages of a GetCelebrityRecognition operation,
2732// calling the "fn" function with the response data for each page. To stop
2733// iterating, return false from the fn function.
2734//
2735// See GetCelebrityRecognition method for more information on how to use this operation.
2736//
2737// Note: This operation can generate multiple requests to a service.
2738//
2739//    // Example iterating over at most 3 pages of a GetCelebrityRecognition operation.
2740//    pageNum := 0
2741//    err := client.GetCelebrityRecognitionPages(params,
2742//        func(page *rekognition.GetCelebrityRecognitionOutput, lastPage bool) bool {
2743//            pageNum++
2744//            fmt.Println(page)
2745//            return pageNum <= 3
2746//        })
2747//
2748func (c *Rekognition) GetCelebrityRecognitionPages(input *GetCelebrityRecognitionInput, fn func(*GetCelebrityRecognitionOutput, bool) bool) error {
2749	return c.GetCelebrityRecognitionPagesWithContext(aws.BackgroundContext(), input, fn)
2750}
2751
2752// GetCelebrityRecognitionPagesWithContext same as GetCelebrityRecognitionPages except
2753// it takes a Context and allows setting request options on the pages.
2754//
2755// The context must be non-nil and will be used for request cancellation. If
2756// the context is nil a panic will occur. In the future the SDK may create
2757// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2758// for more information on using Contexts.
2759func (c *Rekognition) GetCelebrityRecognitionPagesWithContext(ctx aws.Context, input *GetCelebrityRecognitionInput, fn func(*GetCelebrityRecognitionOutput, bool) bool, opts ...request.Option) error {
2760	p := request.Pagination{
2761		NewRequest: func() (*request.Request, error) {
2762			var inCpy *GetCelebrityRecognitionInput
2763			if input != nil {
2764				tmp := *input
2765				inCpy = &tmp
2766			}
2767			req, _ := c.GetCelebrityRecognitionRequest(inCpy)
2768			req.SetContext(ctx)
2769			req.ApplyOptions(opts...)
2770			return req, nil
2771		},
2772	}
2773
2774	for p.Next() {
2775		if !fn(p.Page().(*GetCelebrityRecognitionOutput), !p.HasNextPage()) {
2776			break
2777		}
2778	}
2779
2780	return p.Err()
2781}
2782
2783const opGetContentModeration = "GetContentModeration"
2784
2785// GetContentModerationRequest generates a "aws/request.Request" representing the
2786// client's request for the GetContentModeration operation. The "output" return
2787// value will be populated with the request's response once the request completes
2788// successfully.
2789//
2790// Use "Send" method on the returned Request to send the API call to the service.
2791// the "output" return value is not valid until after Send returns without error.
2792//
2793// See GetContentModeration for more information on using the GetContentModeration
2794// API call, and error handling.
2795//
2796// This method is useful when you want to inject custom logic or configuration
2797// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2798//
2799//
2800//    // Example sending a request using the GetContentModerationRequest method.
2801//    req, resp := client.GetContentModerationRequest(params)
2802//
2803//    err := req.Send()
2804//    if err == nil { // resp is now filled
2805//        fmt.Println(resp)
2806//    }
2807func (c *Rekognition) GetContentModerationRequest(input *GetContentModerationInput) (req *request.Request, output *GetContentModerationOutput) {
2808	op := &request.Operation{
2809		Name:       opGetContentModeration,
2810		HTTPMethod: "POST",
2811		HTTPPath:   "/",
2812		Paginator: &request.Paginator{
2813			InputTokens:     []string{"NextToken"},
2814			OutputTokens:    []string{"NextToken"},
2815			LimitToken:      "MaxResults",
2816			TruncationToken: "",
2817		},
2818	}
2819
2820	if input == nil {
2821		input = &GetContentModerationInput{}
2822	}
2823
2824	output = &GetContentModerationOutput{}
2825	req = c.newRequest(op, input, output)
2826	return
2827}
2828
2829// GetContentModeration API operation for Amazon Rekognition.
2830//
2831// Gets the unsafe content analysis results for a Amazon Rekognition Video analysis
2832// started by StartContentModeration.
2833//
2834// Unsafe content analysis of a video is an asynchronous operation. You start
2835// analysis by calling StartContentModeration which returns a job identifier
2836// (JobId). When analysis finishes, Amazon Rekognition Video publishes a completion
2837// status to the Amazon Simple Notification Service topic registered in the
2838// initial call to StartContentModeration. To get the results of the unsafe
2839// content analysis, first check that the status value published to the Amazon
2840// SNS topic is SUCCEEDED. If so, call GetContentModeration and pass the job
2841// identifier (JobId) from the initial call to StartContentModeration.
2842//
2843// For more information, see Working with Stored Videos in the Amazon Rekognition
2844// Devlopers Guide.
2845//
2846// GetContentModeration returns detected unsafe content labels, and the time
2847// they are detected, in an array, ModerationLabels, of ContentModerationDetection
2848// objects.
2849//
2850// By default, the moderated labels are returned sorted by time, in milliseconds
2851// from the start of the video. You can also sort them by moderated label by
2852// specifying NAME for the SortBy input parameter.
2853//
2854// Since video analysis can return a large number of results, use the MaxResults
2855// parameter to limit the number of labels returned in a single call to GetContentModeration.
2856// If there are more results than specified in MaxResults, the value of NextToken
2857// in the operation response contains a pagination token for getting the next
2858// set of results. To get the next page of results, call GetContentModeration
2859// and populate the NextToken request parameter with the value of NextToken
2860// returned from the previous call to GetContentModeration.
2861//
2862// For more information, see Detecting Unsafe Content in the Amazon Rekognition
2863// Developer Guide.
2864//
2865// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2866// with awserr.Error's Code and Message methods to get detailed information about
2867// the error.
2868//
2869// See the AWS API reference guide for Amazon Rekognition's
2870// API operation GetContentModeration for usage and error information.
2871//
2872// Returned Error Types:
2873//   * AccessDeniedException
2874//   You are not authorized to perform the action.
2875//
2876//   * InternalServerError
2877//   Amazon Rekognition experienced a service issue. Try your call again.
2878//
2879//   * InvalidParameterException
2880//   Input parameter violated a constraint. Validate your parameter before calling
2881//   the API operation again.
2882//
2883//   * InvalidPaginationTokenException
2884//   Pagination token in the request is not valid.
2885//
2886//   * ProvisionedThroughputExceededException
2887//   The number of requests exceeded your throughput limit. If you want to increase
2888//   this limit, contact Amazon Rekognition.
2889//
2890//   * ResourceNotFoundException
2891//   The collection specified in the request cannot be found.
2892//
2893//   * ThrottlingException
2894//   Amazon Rekognition is temporarily unable to process the request. Try your
2895//   call again.
2896//
2897func (c *Rekognition) GetContentModeration(input *GetContentModerationInput) (*GetContentModerationOutput, error) {
2898	req, out := c.GetContentModerationRequest(input)
2899	return out, req.Send()
2900}
2901
2902// GetContentModerationWithContext is the same as GetContentModeration with the addition of
2903// the ability to pass a context and additional request options.
2904//
2905// See GetContentModeration for details on how to use this API operation.
2906//
2907// The context must be non-nil and will be used for request cancellation. If
2908// the context is nil a panic will occur. In the future the SDK may create
2909// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2910// for more information on using Contexts.
2911func (c *Rekognition) GetContentModerationWithContext(ctx aws.Context, input *GetContentModerationInput, opts ...request.Option) (*GetContentModerationOutput, error) {
2912	req, out := c.GetContentModerationRequest(input)
2913	req.SetContext(ctx)
2914	req.ApplyOptions(opts...)
2915	return out, req.Send()
2916}
2917
2918// GetContentModerationPages iterates over the pages of a GetContentModeration operation,
2919// calling the "fn" function with the response data for each page. To stop
2920// iterating, return false from the fn function.
2921//
2922// See GetContentModeration method for more information on how to use this operation.
2923//
2924// Note: This operation can generate multiple requests to a service.
2925//
2926//    // Example iterating over at most 3 pages of a GetContentModeration operation.
2927//    pageNum := 0
2928//    err := client.GetContentModerationPages(params,
2929//        func(page *rekognition.GetContentModerationOutput, lastPage bool) bool {
2930//            pageNum++
2931//            fmt.Println(page)
2932//            return pageNum <= 3
2933//        })
2934//
2935func (c *Rekognition) GetContentModerationPages(input *GetContentModerationInput, fn func(*GetContentModerationOutput, bool) bool) error {
2936	return c.GetContentModerationPagesWithContext(aws.BackgroundContext(), input, fn)
2937}
2938
2939// GetContentModerationPagesWithContext same as GetContentModerationPages except
2940// it takes a Context and allows setting request options on the pages.
2941//
2942// The context must be non-nil and will be used for request cancellation. If
2943// the context is nil a panic will occur. In the future the SDK may create
2944// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2945// for more information on using Contexts.
2946func (c *Rekognition) GetContentModerationPagesWithContext(ctx aws.Context, input *GetContentModerationInput, fn func(*GetContentModerationOutput, bool) bool, opts ...request.Option) error {
2947	p := request.Pagination{
2948		NewRequest: func() (*request.Request, error) {
2949			var inCpy *GetContentModerationInput
2950			if input != nil {
2951				tmp := *input
2952				inCpy = &tmp
2953			}
2954			req, _ := c.GetContentModerationRequest(inCpy)
2955			req.SetContext(ctx)
2956			req.ApplyOptions(opts...)
2957			return req, nil
2958		},
2959	}
2960
2961	for p.Next() {
2962		if !fn(p.Page().(*GetContentModerationOutput), !p.HasNextPage()) {
2963			break
2964		}
2965	}
2966
2967	return p.Err()
2968}
2969
2970const opGetFaceDetection = "GetFaceDetection"
2971
2972// GetFaceDetectionRequest generates a "aws/request.Request" representing the
2973// client's request for the GetFaceDetection operation. The "output" return
2974// value will be populated with the request's response once the request completes
2975// successfully.
2976//
2977// Use "Send" method on the returned Request to send the API call to the service.
2978// the "output" return value is not valid until after Send returns without error.
2979//
2980// See GetFaceDetection for more information on using the GetFaceDetection
2981// API call, and error handling.
2982//
2983// This method is useful when you want to inject custom logic or configuration
2984// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2985//
2986//
2987//    // Example sending a request using the GetFaceDetectionRequest method.
2988//    req, resp := client.GetFaceDetectionRequest(params)
2989//
2990//    err := req.Send()
2991//    if err == nil { // resp is now filled
2992//        fmt.Println(resp)
2993//    }
2994func (c *Rekognition) GetFaceDetectionRequest(input *GetFaceDetectionInput) (req *request.Request, output *GetFaceDetectionOutput) {
2995	op := &request.Operation{
2996		Name:       opGetFaceDetection,
2997		HTTPMethod: "POST",
2998		HTTPPath:   "/",
2999		Paginator: &request.Paginator{
3000			InputTokens:     []string{"NextToken"},
3001			OutputTokens:    []string{"NextToken"},
3002			LimitToken:      "MaxResults",
3003			TruncationToken: "",
3004		},
3005	}
3006
3007	if input == nil {
3008		input = &GetFaceDetectionInput{}
3009	}
3010
3011	output = &GetFaceDetectionOutput{}
3012	req = c.newRequest(op, input, output)
3013	return
3014}
3015
3016// GetFaceDetection API operation for Amazon Rekognition.
3017//
3018// Gets face detection results for a Amazon Rekognition Video analysis started
3019// by StartFaceDetection.
3020//
3021// Face detection with Amazon Rekognition Video is an asynchronous operation.
3022// You start face detection by calling StartFaceDetection which returns a job
3023// identifier (JobId). When the face detection operation finishes, Amazon Rekognition
3024// Video publishes a completion status to the Amazon Simple Notification Service
3025// topic registered in the initial call to StartFaceDetection. To get the results
3026// of the face detection operation, first check that the status value published
3027// to the Amazon SNS topic is SUCCEEDED. If so, call GetFaceDetection and pass
3028// the job identifier (JobId) from the initial call to StartFaceDetection.
3029//
3030// GetFaceDetection returns an array of detected faces (Faces) sorted by the
3031// time the faces were detected.
3032//
3033// Use MaxResults parameter to limit the number of labels returned. If there
3034// are more results than specified in MaxResults, the value of NextToken in
3035// the operation response contains a pagination token for getting the next set
3036// of results. To get the next page of results, call GetFaceDetection and populate
3037// the NextToken request parameter with the token value returned from the previous
3038// call to GetFaceDetection.
3039//
3040// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
3041// with awserr.Error's Code and Message methods to get detailed information about
3042// the error.
3043//
3044// See the AWS API reference guide for Amazon Rekognition's
3045// API operation GetFaceDetection for usage and error information.
3046//
3047// Returned Error Types:
3048//   * AccessDeniedException
3049//   You are not authorized to perform the action.
3050//
3051//   * InternalServerError
3052//   Amazon Rekognition experienced a service issue. Try your call again.
3053//
3054//   * InvalidParameterException
3055//   Input parameter violated a constraint. Validate your parameter before calling
3056//   the API operation again.
3057//
3058//   * InvalidPaginationTokenException
3059//   Pagination token in the request is not valid.
3060//
3061//   * ProvisionedThroughputExceededException
3062//   The number of requests exceeded your throughput limit. If you want to increase
3063//   this limit, contact Amazon Rekognition.
3064//
3065//   * ResourceNotFoundException
3066//   The collection specified in the request cannot be found.
3067//
3068//   * ThrottlingException
3069//   Amazon Rekognition is temporarily unable to process the request. Try your
3070//   call again.
3071//
3072func (c *Rekognition) GetFaceDetection(input *GetFaceDetectionInput) (*GetFaceDetectionOutput, error) {
3073	req, out := c.GetFaceDetectionRequest(input)
3074	return out, req.Send()
3075}
3076
3077// GetFaceDetectionWithContext is the same as GetFaceDetection with the addition of
3078// the ability to pass a context and additional request options.
3079//
3080// See GetFaceDetection for details on how to use this API operation.
3081//
3082// The context must be non-nil and will be used for request cancellation. If
3083// the context is nil a panic will occur. In the future the SDK may create
3084// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3085// for more information on using Contexts.
3086func (c *Rekognition) GetFaceDetectionWithContext(ctx aws.Context, input *GetFaceDetectionInput, opts ...request.Option) (*GetFaceDetectionOutput, error) {
3087	req, out := c.GetFaceDetectionRequest(input)
3088	req.SetContext(ctx)
3089	req.ApplyOptions(opts...)
3090	return out, req.Send()
3091}
3092
3093// GetFaceDetectionPages iterates over the pages of a GetFaceDetection operation,
3094// calling the "fn" function with the response data for each page. To stop
3095// iterating, return false from the fn function.
3096//
3097// See GetFaceDetection method for more information on how to use this operation.
3098//
3099// Note: This operation can generate multiple requests to a service.
3100//
3101//    // Example iterating over at most 3 pages of a GetFaceDetection operation.
3102//    pageNum := 0
3103//    err := client.GetFaceDetectionPages(params,
3104//        func(page *rekognition.GetFaceDetectionOutput, lastPage bool) bool {
3105//            pageNum++
3106//            fmt.Println(page)
3107//            return pageNum <= 3
3108//        })
3109//
3110func (c *Rekognition) GetFaceDetectionPages(input *GetFaceDetectionInput, fn func(*GetFaceDetectionOutput, bool) bool) error {
3111	return c.GetFaceDetectionPagesWithContext(aws.BackgroundContext(), input, fn)
3112}
3113
3114// GetFaceDetectionPagesWithContext same as GetFaceDetectionPages except
3115// it takes a Context and allows setting request options on the pages.
3116//
3117// The context must be non-nil and will be used for request cancellation. If
3118// the context is nil a panic will occur. In the future the SDK may create
3119// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3120// for more information on using Contexts.
3121func (c *Rekognition) GetFaceDetectionPagesWithContext(ctx aws.Context, input *GetFaceDetectionInput, fn func(*GetFaceDetectionOutput, bool) bool, opts ...request.Option) error {
3122	p := request.Pagination{
3123		NewRequest: func() (*request.Request, error) {
3124			var inCpy *GetFaceDetectionInput
3125			if input != nil {
3126				tmp := *input
3127				inCpy = &tmp
3128			}
3129			req, _ := c.GetFaceDetectionRequest(inCpy)
3130			req.SetContext(ctx)
3131			req.ApplyOptions(opts...)
3132			return req, nil
3133		},
3134	}
3135
3136	for p.Next() {
3137		if !fn(p.Page().(*GetFaceDetectionOutput), !p.HasNextPage()) {
3138			break
3139		}
3140	}
3141
3142	return p.Err()
3143}
3144
3145const opGetFaceSearch = "GetFaceSearch"
3146
3147// GetFaceSearchRequest generates a "aws/request.Request" representing the
3148// client's request for the GetFaceSearch operation. The "output" return
3149// value will be populated with the request's response once the request completes
3150// successfully.
3151//
3152// Use "Send" method on the returned Request to send the API call to the service.
3153// the "output" return value is not valid until after Send returns without error.
3154//
3155// See GetFaceSearch for more information on using the GetFaceSearch
3156// API call, and error handling.
3157//
3158// This method is useful when you want to inject custom logic or configuration
3159// into the SDK's request lifecycle. Such as custom headers, or retry logic.
3160//
3161//
3162//    // Example sending a request using the GetFaceSearchRequest method.
3163//    req, resp := client.GetFaceSearchRequest(params)
3164//
3165//    err := req.Send()
3166//    if err == nil { // resp is now filled
3167//        fmt.Println(resp)
3168//    }
3169func (c *Rekognition) GetFaceSearchRequest(input *GetFaceSearchInput) (req *request.Request, output *GetFaceSearchOutput) {
3170	op := &request.Operation{
3171		Name:       opGetFaceSearch,
3172		HTTPMethod: "POST",
3173		HTTPPath:   "/",
3174		Paginator: &request.Paginator{
3175			InputTokens:     []string{"NextToken"},
3176			OutputTokens:    []string{"NextToken"},
3177			LimitToken:      "MaxResults",
3178			TruncationToken: "",
3179		},
3180	}
3181
3182	if input == nil {
3183		input = &GetFaceSearchInput{}
3184	}
3185
3186	output = &GetFaceSearchOutput{}
3187	req = c.newRequest(op, input, output)
3188	return
3189}
3190
3191// GetFaceSearch API operation for Amazon Rekognition.
3192//
3193// Gets the face search results for Amazon Rekognition Video face search started
3194// by StartFaceSearch. The search returns faces in a collection that match the
3195// faces of persons detected in a video. It also includes the time(s) that faces
3196// are matched in the video.
3197//
3198// Face search in a video is an asynchronous operation. You start face search
3199// by calling to StartFaceSearch which returns a job identifier (JobId). When
3200// the search operation finishes, Amazon Rekognition Video publishes a completion
3201// status to the Amazon Simple Notification Service topic registered in the
3202// initial call to StartFaceSearch. To get the search results, first check that
3203// the status value published to the Amazon SNS topic is SUCCEEDED. If so, call
3204// GetFaceSearch and pass the job identifier (JobId) from the initial call to
3205// StartFaceSearch.
3206//
3207// For more information, see Searching Faces in a Collection in the Amazon Rekognition
3208// Developer Guide.
3209//
3210// The search results are retured in an array, Persons, of PersonMatch objects.
3211// EachPersonMatch element contains details about the matching faces in the
3212// input collection, person information (facial attributes, bounding boxes,
3213// and person identifer) for the matched person, and the time the person was
3214// matched in the video.
3215//
3216// GetFaceSearch only returns the default facial attributes (BoundingBox, Confidence,
3217// Landmarks, Pose, and Quality). The other facial attributes listed in the
3218// Face object of the following response syntax are not returned. For more information,
3219// see FaceDetail in the Amazon Rekognition Developer Guide.
3220//
3221// By default, the Persons array is sorted by the time, in milliseconds from
3222// the start of the video, persons are matched. You can also sort by persons
3223// by specifying INDEX for the SORTBY input parameter.
3224//
3225// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
3226// with awserr.Error's Code and Message methods to get detailed information about
3227// the error.
3228//
3229// See the AWS API reference guide for Amazon Rekognition's
3230// API operation GetFaceSearch for usage and error information.
3231//
3232// Returned Error Types:
3233//   * AccessDeniedException
3234//   You are not authorized to perform the action.
3235//
3236//   * InternalServerError
3237//   Amazon Rekognition experienced a service issue. Try your call again.
3238//
3239//   * InvalidParameterException
3240//   Input parameter violated a constraint. Validate your parameter before calling
3241//   the API operation again.
3242//
3243//   * InvalidPaginationTokenException
3244//   Pagination token in the request is not valid.
3245//
3246//   * ProvisionedThroughputExceededException
3247//   The number of requests exceeded your throughput limit. If you want to increase
3248//   this limit, contact Amazon Rekognition.
3249//
3250//   * ResourceNotFoundException
3251//   The collection specified in the request cannot be found.
3252//
3253//   * ThrottlingException
3254//   Amazon Rekognition is temporarily unable to process the request. Try your
3255//   call again.
3256//
3257func (c *Rekognition) GetFaceSearch(input *GetFaceSearchInput) (*GetFaceSearchOutput, error) {
3258	req, out := c.GetFaceSearchRequest(input)
3259	return out, req.Send()
3260}
3261
3262// GetFaceSearchWithContext is the same as GetFaceSearch with the addition of
3263// the ability to pass a context and additional request options.
3264//
3265// See GetFaceSearch for details on how to use this API operation.
3266//
3267// The context must be non-nil and will be used for request cancellation. If
3268// the context is nil a panic will occur. In the future the SDK may create
3269// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3270// for more information on using Contexts.
3271func (c *Rekognition) GetFaceSearchWithContext(ctx aws.Context, input *GetFaceSearchInput, opts ...request.Option) (*GetFaceSearchOutput, error) {
3272	req, out := c.GetFaceSearchRequest(input)
3273	req.SetContext(ctx)
3274	req.ApplyOptions(opts...)
3275	return out, req.Send()
3276}
3277
3278// GetFaceSearchPages iterates over the pages of a GetFaceSearch operation,
3279// calling the "fn" function with the response data for each page. To stop
3280// iterating, return false from the fn function.
3281//
3282// See GetFaceSearch method for more information on how to use this operation.
3283//
3284// Note: This operation can generate multiple requests to a service.
3285//
3286//    // Example iterating over at most 3 pages of a GetFaceSearch operation.
3287//    pageNum := 0
3288//    err := client.GetFaceSearchPages(params,
3289//        func(page *rekognition.GetFaceSearchOutput, lastPage bool) bool {
3290//            pageNum++
3291//            fmt.Println(page)
3292//            return pageNum <= 3
3293//        })
3294//
3295func (c *Rekognition) GetFaceSearchPages(input *GetFaceSearchInput, fn func(*GetFaceSearchOutput, bool) bool) error {
3296	return c.GetFaceSearchPagesWithContext(aws.BackgroundContext(), input, fn)
3297}
3298
3299// GetFaceSearchPagesWithContext same as GetFaceSearchPages except
3300// it takes a Context and allows setting request options on the pages.
3301//
3302// The context must be non-nil and will be used for request cancellation. If
3303// the context is nil a panic will occur. In the future the SDK may create
3304// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3305// for more information on using Contexts.
3306func (c *Rekognition) GetFaceSearchPagesWithContext(ctx aws.Context, input *GetFaceSearchInput, fn func(*GetFaceSearchOutput, bool) bool, opts ...request.Option) error {
3307	p := request.Pagination{
3308		NewRequest: func() (*request.Request, error) {
3309			var inCpy *GetFaceSearchInput
3310			if input != nil {
3311				tmp := *input
3312				inCpy = &tmp
3313			}
3314			req, _ := c.GetFaceSearchRequest(inCpy)
3315			req.SetContext(ctx)
3316			req.ApplyOptions(opts...)
3317			return req, nil
3318		},
3319	}
3320
3321	for p.Next() {
3322		if !fn(p.Page().(*GetFaceSearchOutput), !p.HasNextPage()) {
3323			break
3324		}
3325	}
3326
3327	return p.Err()
3328}
3329
3330const opGetLabelDetection = "GetLabelDetection"
3331
3332// GetLabelDetectionRequest generates a "aws/request.Request" representing the
3333// client's request for the GetLabelDetection operation. The "output" return
3334// value will be populated with the request's response once the request completes
3335// successfully.
3336//
3337// Use "Send" method on the returned Request to send the API call to the service.
3338// the "output" return value is not valid until after Send returns without error.
3339//
3340// See GetLabelDetection for more information on using the GetLabelDetection
3341// API call, and error handling.
3342//
3343// This method is useful when you want to inject custom logic or configuration
3344// into the SDK's request lifecycle. Such as custom headers, or retry logic.
3345//
3346//
3347//    // Example sending a request using the GetLabelDetectionRequest method.
3348//    req, resp := client.GetLabelDetectionRequest(params)
3349//
3350//    err := req.Send()
3351//    if err == nil { // resp is now filled
3352//        fmt.Println(resp)
3353//    }
3354func (c *Rekognition) GetLabelDetectionRequest(input *GetLabelDetectionInput) (req *request.Request, output *GetLabelDetectionOutput) {
3355	op := &request.Operation{
3356		Name:       opGetLabelDetection,
3357		HTTPMethod: "POST",
3358		HTTPPath:   "/",
3359		Paginator: &request.Paginator{
3360			InputTokens:     []string{"NextToken"},
3361			OutputTokens:    []string{"NextToken"},
3362			LimitToken:      "MaxResults",
3363			TruncationToken: "",
3364		},
3365	}
3366
3367	if input == nil {
3368		input = &GetLabelDetectionInput{}
3369	}
3370
3371	output = &GetLabelDetectionOutput{}
3372	req = c.newRequest(op, input, output)
3373	return
3374}
3375
3376// GetLabelDetection API operation for Amazon Rekognition.
3377//
3378// Gets the label detection results of a Amazon Rekognition Video analysis started
3379// by StartLabelDetection.
3380//
3381// The label detection operation is started by a call to StartLabelDetection
3382// which returns a job identifier (JobId). When the label detection operation
3383// finishes, Amazon Rekognition publishes a completion status to the Amazon
3384// Simple Notification Service topic registered in the initial call to StartlabelDetection.
3385// To get the results of the label detection operation, first check that the
3386// status value published to the Amazon SNS topic is SUCCEEDED. If so, call
3387// GetLabelDetection and pass the job identifier (JobId) from the initial call
3388// to StartLabelDetection.
3389//
3390// GetLabelDetection returns an array of detected labels (Labels) sorted by
3391// the time the labels were detected. You can also sort by the label name by
3392// specifying NAME for the SortBy input parameter.
3393//
3394// The labels returned include the label name, the percentage confidence in
3395// the accuracy of the detected label, and the time the label was detected in
3396// the video.
3397//
3398// The returned labels also include bounding box information for common objects,
3399// a hierarchical taxonomy of detected labels, and the version of the label
3400// model used for detection.
3401//
3402// Use MaxResults parameter to limit the number of labels returned. If there
3403// are more results than specified in MaxResults, the value of NextToken in
3404// the operation response contains a pagination token for getting the next set
3405// of results. To get the next page of results, call GetlabelDetection and populate
3406// the NextToken request parameter with the token value returned from the previous
3407// call to GetLabelDetection.
3408//
3409// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
3410// with awserr.Error's Code and Message methods to get detailed information about
3411// the error.
3412//
3413// See the AWS API reference guide for Amazon Rekognition's
3414// API operation GetLabelDetection for usage and error information.
3415//
3416// Returned Error Types:
3417//   * AccessDeniedException
3418//   You are not authorized to perform the action.
3419//
3420//   * InternalServerError
3421//   Amazon Rekognition experienced a service issue. Try your call again.
3422//
3423//   * InvalidParameterException
3424//   Input parameter violated a constraint. Validate your parameter before calling
3425//   the API operation again.
3426//
3427//   * InvalidPaginationTokenException
3428//   Pagination token in the request is not valid.
3429//
3430//   * ProvisionedThroughputExceededException
3431//   The number of requests exceeded your throughput limit. If you want to increase
3432//   this limit, contact Amazon Rekognition.
3433//
3434//   * ResourceNotFoundException
3435//   The collection specified in the request cannot be found.
3436//
3437//   * ThrottlingException
3438//   Amazon Rekognition is temporarily unable to process the request. Try your
3439//   call again.
3440//
3441func (c *Rekognition) GetLabelDetection(input *GetLabelDetectionInput) (*GetLabelDetectionOutput, error) {
3442	req, out := c.GetLabelDetectionRequest(input)
3443	return out, req.Send()
3444}
3445
3446// GetLabelDetectionWithContext is the same as GetLabelDetection with the addition of
3447// the ability to pass a context and additional request options.
3448//
3449// See GetLabelDetection for details on how to use this API operation.
3450//
3451// The context must be non-nil and will be used for request cancellation. If
3452// the context is nil a panic will occur. In the future the SDK may create
3453// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3454// for more information on using Contexts.
3455func (c *Rekognition) GetLabelDetectionWithContext(ctx aws.Context, input *GetLabelDetectionInput, opts ...request.Option) (*GetLabelDetectionOutput, error) {
3456	req, out := c.GetLabelDetectionRequest(input)
3457	req.SetContext(ctx)
3458	req.ApplyOptions(opts...)
3459	return out, req.Send()
3460}
3461
3462// GetLabelDetectionPages iterates over the pages of a GetLabelDetection operation,
3463// calling the "fn" function with the response data for each page. To stop
3464// iterating, return false from the fn function.
3465//
3466// See GetLabelDetection method for more information on how to use this operation.
3467//
3468// Note: This operation can generate multiple requests to a service.
3469//
3470//    // Example iterating over at most 3 pages of a GetLabelDetection operation.
3471//    pageNum := 0
3472//    err := client.GetLabelDetectionPages(params,
3473//        func(page *rekognition.GetLabelDetectionOutput, lastPage bool) bool {
3474//            pageNum++
3475//            fmt.Println(page)
3476//            return pageNum <= 3
3477//        })
3478//
3479func (c *Rekognition) GetLabelDetectionPages(input *GetLabelDetectionInput, fn func(*GetLabelDetectionOutput, bool) bool) error {
3480	return c.GetLabelDetectionPagesWithContext(aws.BackgroundContext(), input, fn)
3481}
3482
3483// GetLabelDetectionPagesWithContext same as GetLabelDetectionPages except
3484// it takes a Context and allows setting request options on the pages.
3485//
3486// The context must be non-nil and will be used for request cancellation. If
3487// the context is nil a panic will occur. In the future the SDK may create
3488// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3489// for more information on using Contexts.
3490func (c *Rekognition) GetLabelDetectionPagesWithContext(ctx aws.Context, input *GetLabelDetectionInput, fn func(*GetLabelDetectionOutput, bool) bool, opts ...request.Option) error {
3491	p := request.Pagination{
3492		NewRequest: func() (*request.Request, error) {
3493			var inCpy *GetLabelDetectionInput
3494			if input != nil {
3495				tmp := *input
3496				inCpy = &tmp
3497			}
3498			req, _ := c.GetLabelDetectionRequest(inCpy)
3499			req.SetContext(ctx)
3500			req.ApplyOptions(opts...)
3501			return req, nil
3502		},
3503	}
3504
3505	for p.Next() {
3506		if !fn(p.Page().(*GetLabelDetectionOutput), !p.HasNextPage()) {
3507			break
3508		}
3509	}
3510
3511	return p.Err()
3512}
3513
3514const opGetPersonTracking = "GetPersonTracking"
3515
3516// GetPersonTrackingRequest generates a "aws/request.Request" representing the
3517// client's request for the GetPersonTracking operation. The "output" return
3518// value will be populated with the request's response once the request completes
3519// successfully.
3520//
3521// Use "Send" method on the returned Request to send the API call to the service.
3522// the "output" return value is not valid until after Send returns without error.
3523//
3524// See GetPersonTracking for more information on using the GetPersonTracking
3525// API call, and error handling.
3526//
3527// This method is useful when you want to inject custom logic or configuration
3528// into the SDK's request lifecycle. Such as custom headers, or retry logic.
3529//
3530//
3531//    // Example sending a request using the GetPersonTrackingRequest method.
3532//    req, resp := client.GetPersonTrackingRequest(params)
3533//
3534//    err := req.Send()
3535//    if err == nil { // resp is now filled
3536//        fmt.Println(resp)
3537//    }
3538func (c *Rekognition) GetPersonTrackingRequest(input *GetPersonTrackingInput) (req *request.Request, output *GetPersonTrackingOutput) {
3539	op := &request.Operation{
3540		Name:       opGetPersonTracking,
3541		HTTPMethod: "POST",
3542		HTTPPath:   "/",
3543		Paginator: &request.Paginator{
3544			InputTokens:     []string{"NextToken"},
3545			OutputTokens:    []string{"NextToken"},
3546			LimitToken:      "MaxResults",
3547			TruncationToken: "",
3548		},
3549	}
3550
3551	if input == nil {
3552		input = &GetPersonTrackingInput{}
3553	}
3554
3555	output = &GetPersonTrackingOutput{}
3556	req = c.newRequest(op, input, output)
3557	return
3558}
3559
3560// GetPersonTracking API operation for Amazon Rekognition.
3561//
3562// Gets the path tracking results of a Amazon Rekognition Video analysis started
3563// by StartPersonTracking.
3564//
3565// The person path tracking operation is started by a call to StartPersonTracking
3566// which returns a job identifier (JobId). When the operation finishes, Amazon
3567// Rekognition Video publishes a completion status to the Amazon Simple Notification
3568// Service topic registered in the initial call to StartPersonTracking.
3569//
3570// To get the results of the person path tracking operation, first check that
3571// the status value published to the Amazon SNS topic is SUCCEEDED. If so, call
3572// GetPersonTracking and pass the job identifier (JobId) from the initial call
3573// to StartPersonTracking.
3574//
3575// GetPersonTracking returns an array, Persons, of tracked persons and the time(s)
3576// their paths were tracked in the video.
3577//
3578// GetPersonTracking only returns the default facial attributes (BoundingBox,
3579// Confidence, Landmarks, Pose, and Quality). The other facial attributes listed
3580// in the Face object of the following response syntax are not returned.
3581//
3582// For more information, see FaceDetail in the Amazon Rekognition Developer
3583// Guide.
3584//
3585// By default, the array is sorted by the time(s) a person's path is tracked
3586// in the video. You can sort by tracked persons by specifying INDEX for the
3587// SortBy input parameter.
3588//
3589// Use the MaxResults parameter to limit the number of items returned. If there
3590// are more results than specified in MaxResults, the value of NextToken in
3591// the operation response contains a pagination token for getting the next set
3592// of results. To get the next page of results, call GetPersonTracking and populate
3593// the NextToken request parameter with the token value returned from the previous
3594// call to GetPersonTracking.
3595//
3596// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
3597// with awserr.Error's Code and Message methods to get detailed information about
3598// the error.
3599//
3600// See the AWS API reference guide for Amazon Rekognition's
3601// API operation GetPersonTracking for usage and error information.
3602//
3603// Returned Error Types:
3604//   * AccessDeniedException
3605//   You are not authorized to perform the action.
3606//
3607//   * InternalServerError
3608//   Amazon Rekognition experienced a service issue. Try your call again.
3609//
3610//   * InvalidParameterException
3611//   Input parameter violated a constraint. Validate your parameter before calling
3612//   the API operation again.
3613//
3614//   * InvalidPaginationTokenException
3615//   Pagination token in the request is not valid.
3616//
3617//   * ProvisionedThroughputExceededException
3618//   The number of requests exceeded your throughput limit. If you want to increase
3619//   this limit, contact Amazon Rekognition.
3620//
3621//   * ResourceNotFoundException
3622//   The collection specified in the request cannot be found.
3623//
3624//   * ThrottlingException
3625//   Amazon Rekognition is temporarily unable to process the request. Try your
3626//   call again.
3627//
3628func (c *Rekognition) GetPersonTracking(input *GetPersonTrackingInput) (*GetPersonTrackingOutput, error) {
3629	req, out := c.GetPersonTrackingRequest(input)
3630	return out, req.Send()
3631}
3632
3633// GetPersonTrackingWithContext is the same as GetPersonTracking with the addition of
3634// the ability to pass a context and additional request options.
3635//
3636// See GetPersonTracking for details on how to use this API operation.
3637//
3638// The context must be non-nil and will be used for request cancellation. If
3639// the context is nil a panic will occur. In the future the SDK may create
3640// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3641// for more information on using Contexts.
3642func (c *Rekognition) GetPersonTrackingWithContext(ctx aws.Context, input *GetPersonTrackingInput, opts ...request.Option) (*GetPersonTrackingOutput, error) {
3643	req, out := c.GetPersonTrackingRequest(input)
3644	req.SetContext(ctx)
3645	req.ApplyOptions(opts...)
3646	return out, req.Send()
3647}
3648
3649// GetPersonTrackingPages iterates over the pages of a GetPersonTracking operation,
3650// calling the "fn" function with the response data for each page. To stop
3651// iterating, return false from the fn function.
3652//
3653// See GetPersonTracking method for more information on how to use this operation.
3654//
3655// Note: This operation can generate multiple requests to a service.
3656//
3657//    // Example iterating over at most 3 pages of a GetPersonTracking operation.
3658//    pageNum := 0
3659//    err := client.GetPersonTrackingPages(params,
3660//        func(page *rekognition.GetPersonTrackingOutput, lastPage bool) bool {
3661//            pageNum++
3662//            fmt.Println(page)
3663//            return pageNum <= 3
3664//        })
3665//
3666func (c *Rekognition) GetPersonTrackingPages(input *GetPersonTrackingInput, fn func(*GetPersonTrackingOutput, bool) bool) error {
3667	return c.GetPersonTrackingPagesWithContext(aws.BackgroundContext(), input, fn)
3668}
3669
3670// GetPersonTrackingPagesWithContext same as GetPersonTrackingPages except
3671// it takes a Context and allows setting request options on the pages.
3672//
3673// The context must be non-nil and will be used for request cancellation. If
3674// the context is nil a panic will occur. In the future the SDK may create
3675// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3676// for more information on using Contexts.
3677func (c *Rekognition) GetPersonTrackingPagesWithContext(ctx aws.Context, input *GetPersonTrackingInput, fn func(*GetPersonTrackingOutput, bool) bool, opts ...request.Option) error {
3678	p := request.Pagination{
3679		NewRequest: func() (*request.Request, error) {
3680			var inCpy *GetPersonTrackingInput
3681			if input != nil {
3682				tmp := *input
3683				inCpy = &tmp
3684			}
3685			req, _ := c.GetPersonTrackingRequest(inCpy)
3686			req.SetContext(ctx)
3687			req.ApplyOptions(opts...)
3688			return req, nil
3689		},
3690	}
3691
3692	for p.Next() {
3693		if !fn(p.Page().(*GetPersonTrackingOutput), !p.HasNextPage()) {
3694			break
3695		}
3696	}
3697
3698	return p.Err()
3699}
3700
3701const opGetSegmentDetection = "GetSegmentDetection"
3702
3703// GetSegmentDetectionRequest generates a "aws/request.Request" representing the
3704// client's request for the GetSegmentDetection operation. The "output" return
3705// value will be populated with the request's response once the request completes
3706// successfully.
3707//
3708// Use "Send" method on the returned Request to send the API call to the service.
3709// the "output" return value is not valid until after Send returns without error.
3710//
3711// See GetSegmentDetection for more information on using the GetSegmentDetection
3712// API call, and error handling.
3713//
3714// This method is useful when you want to inject custom logic or configuration
3715// into the SDK's request lifecycle. Such as custom headers, or retry logic.
3716//
3717//
3718//    // Example sending a request using the GetSegmentDetectionRequest method.
3719//    req, resp := client.GetSegmentDetectionRequest(params)
3720//
3721//    err := req.Send()
3722//    if err == nil { // resp is now filled
3723//        fmt.Println(resp)
3724//    }
3725func (c *Rekognition) GetSegmentDetectionRequest(input *GetSegmentDetectionInput) (req *request.Request, output *GetSegmentDetectionOutput) {
3726	op := &request.Operation{
3727		Name:       opGetSegmentDetection,
3728		HTTPMethod: "POST",
3729		HTTPPath:   "/",
3730		Paginator: &request.Paginator{
3731			InputTokens:     []string{"NextToken"},
3732			OutputTokens:    []string{"NextToken"},
3733			LimitToken:      "MaxResults",
3734			TruncationToken: "",
3735		},
3736	}
3737
3738	if input == nil {
3739		input = &GetSegmentDetectionInput{}
3740	}
3741
3742	output = &GetSegmentDetectionOutput{}
3743	req = c.newRequest(op, input, output)
3744	return
3745}
3746
3747// GetSegmentDetection API operation for Amazon Rekognition.
3748//
3749// Gets the segment detection results of a Amazon Rekognition Video analysis
3750// started by StartSegmentDetection.
3751//
3752// Segment detection with Amazon Rekognition Video is an asynchronous operation.
3753// You start segment detection by calling StartSegmentDetection which returns
3754// a job identifier (JobId). When the segment detection operation finishes,
3755// Amazon Rekognition publishes a completion status to the Amazon Simple Notification
3756// Service topic registered in the initial call to StartSegmentDetection. To
3757// get the results of the segment detection operation, first check that the
3758// status value published to the Amazon SNS topic is SUCCEEDED. if so, call
3759// GetSegmentDetection and pass the job identifier (JobId) from the initial
3760// call of StartSegmentDetection.
3761//
3762// GetSegmentDetection returns detected segments in an array (Segments) of SegmentDetection
3763// objects. Segments is sorted by the segment types specified in the SegmentTypes
3764// input parameter of StartSegmentDetection. Each element of the array includes
3765// the detected segment, the precentage confidence in the acuracy of the detected
3766// segment, the type of the segment, and the frame in which the segment was
3767// detected.
3768//
3769// Use SelectedSegmentTypes to find out the type of segment detection requested
3770// in the call to StartSegmentDetection.
3771//
3772// Use the MaxResults parameter to limit the number of segment detections returned.
3773// If there are more results than specified in MaxResults, the value of NextToken
3774// in the operation response contains a pagination token for getting the next
3775// set of results. To get the next page of results, call GetSegmentDetection
3776// and populate the NextToken request parameter with the token value returned
3777// from the previous call to GetSegmentDetection.
3778//
3779// For more information, see Detecting Video Segments in Stored Video in the
3780// Amazon Rekognition Developer Guide.
3781//
3782// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
3783// with awserr.Error's Code and Message methods to get detailed information about
3784// the error.
3785//
3786// See the AWS API reference guide for Amazon Rekognition's
3787// API operation GetSegmentDetection for usage and error information.
3788//
3789// Returned Error Types:
3790//   * AccessDeniedException
3791//   You are not authorized to perform the action.
3792//
3793//   * InternalServerError
3794//   Amazon Rekognition experienced a service issue. Try your call again.
3795//
3796//   * InvalidParameterException
3797//   Input parameter violated a constraint. Validate your parameter before calling
3798//   the API operation again.
3799//
3800//   * InvalidPaginationTokenException
3801//   Pagination token in the request is not valid.
3802//
3803//   * ProvisionedThroughputExceededException
3804//   The number of requests exceeded your throughput limit. If you want to increase
3805//   this limit, contact Amazon Rekognition.
3806//
3807//   * ResourceNotFoundException
3808//   The collection specified in the request cannot be found.
3809//
3810//   * ThrottlingException
3811//   Amazon Rekognition is temporarily unable to process the request. Try your
3812//   call again.
3813//
3814func (c *Rekognition) GetSegmentDetection(input *GetSegmentDetectionInput) (*GetSegmentDetectionOutput, error) {
3815	req, out := c.GetSegmentDetectionRequest(input)
3816	return out, req.Send()
3817}
3818
3819// GetSegmentDetectionWithContext is the same as GetSegmentDetection with the addition of
3820// the ability to pass a context and additional request options.
3821//
3822// See GetSegmentDetection for details on how to use this API operation.
3823//
3824// The context must be non-nil and will be used for request cancellation. If
3825// the context is nil a panic will occur. In the future the SDK may create
3826// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3827// for more information on using Contexts.
3828func (c *Rekognition) GetSegmentDetectionWithContext(ctx aws.Context, input *GetSegmentDetectionInput, opts ...request.Option) (*GetSegmentDetectionOutput, error) {
3829	req, out := c.GetSegmentDetectionRequest(input)
3830	req.SetContext(ctx)
3831	req.ApplyOptions(opts...)
3832	return out, req.Send()
3833}
3834
3835// GetSegmentDetectionPages iterates over the pages of a GetSegmentDetection operation,
3836// calling the "fn" function with the response data for each page. To stop
3837// iterating, return false from the fn function.
3838//
3839// See GetSegmentDetection method for more information on how to use this operation.
3840//
3841// Note: This operation can generate multiple requests to a service.
3842//
3843//    // Example iterating over at most 3 pages of a GetSegmentDetection operation.
3844//    pageNum := 0
3845//    err := client.GetSegmentDetectionPages(params,
3846//        func(page *rekognition.GetSegmentDetectionOutput, lastPage bool) bool {
3847//            pageNum++
3848//            fmt.Println(page)
3849//            return pageNum <= 3
3850//        })
3851//
3852func (c *Rekognition) GetSegmentDetectionPages(input *GetSegmentDetectionInput, fn func(*GetSegmentDetectionOutput, bool) bool) error {
3853	return c.GetSegmentDetectionPagesWithContext(aws.BackgroundContext(), input, fn)
3854}
3855
3856// GetSegmentDetectionPagesWithContext same as GetSegmentDetectionPages except
3857// it takes a Context and allows setting request options on the pages.
3858//
3859// The context must be non-nil and will be used for request cancellation. If
3860// the context is nil a panic will occur. In the future the SDK may create
3861// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3862// for more information on using Contexts.
3863func (c *Rekognition) GetSegmentDetectionPagesWithContext(ctx aws.Context, input *GetSegmentDetectionInput, fn func(*GetSegmentDetectionOutput, bool) bool, opts ...request.Option) error {
3864	p := request.Pagination{
3865		NewRequest: func() (*request.Request, error) {
3866			var inCpy *GetSegmentDetectionInput
3867			if input != nil {
3868				tmp := *input
3869				inCpy = &tmp
3870			}
3871			req, _ := c.GetSegmentDetectionRequest(inCpy)
3872			req.SetContext(ctx)
3873			req.ApplyOptions(opts...)
3874			return req, nil
3875		},
3876	}
3877
3878	for p.Next() {
3879		if !fn(p.Page().(*GetSegmentDetectionOutput), !p.HasNextPage()) {
3880			break
3881		}
3882	}
3883
3884	return p.Err()
3885}
3886
3887const opGetTextDetection = "GetTextDetection"
3888
3889// GetTextDetectionRequest generates a "aws/request.Request" representing the
3890// client's request for the GetTextDetection operation. The "output" return
3891// value will be populated with the request's response once the request completes
3892// successfully.
3893//
3894// Use "Send" method on the returned Request to send the API call to the service.
3895// the "output" return value is not valid until after Send returns without error.
3896//
3897// See GetTextDetection for more information on using the GetTextDetection
3898// API call, and error handling.
3899//
3900// This method is useful when you want to inject custom logic or configuration
3901// into the SDK's request lifecycle. Such as custom headers, or retry logic.
3902//
3903//
3904//    // Example sending a request using the GetTextDetectionRequest method.
3905//    req, resp := client.GetTextDetectionRequest(params)
3906//
3907//    err := req.Send()
3908//    if err == nil { // resp is now filled
3909//        fmt.Println(resp)
3910//    }
3911func (c *Rekognition) GetTextDetectionRequest(input *GetTextDetectionInput) (req *request.Request, output *GetTextDetectionOutput) {
3912	op := &request.Operation{
3913		Name:       opGetTextDetection,
3914		HTTPMethod: "POST",
3915		HTTPPath:   "/",
3916		Paginator: &request.Paginator{
3917			InputTokens:     []string{"NextToken"},
3918			OutputTokens:    []string{"NextToken"},
3919			LimitToken:      "MaxResults",
3920			TruncationToken: "",
3921		},
3922	}
3923
3924	if input == nil {
3925		input = &GetTextDetectionInput{}
3926	}
3927
3928	output = &GetTextDetectionOutput{}
3929	req = c.newRequest(op, input, output)
3930	return
3931}
3932
3933// GetTextDetection API operation for Amazon Rekognition.
3934//
3935// Gets the text detection results of a Amazon Rekognition Video analysis started
3936// by StartTextDetection.
3937//
3938// Text detection with Amazon Rekognition Video is an asynchronous operation.
3939// You start text detection by calling StartTextDetection which returns a job
3940// identifier (JobId) When the text detection operation finishes, Amazon Rekognition
3941// publishes a completion status to the Amazon Simple Notification Service topic
3942// registered in the initial call to StartTextDetection. To get the results
3943// of the text detection operation, first check that the status value published
3944// to the Amazon SNS topic is SUCCEEDED. if so, call GetTextDetection and pass
3945// the job identifier (JobId) from the initial call of StartLabelDetection.
3946//
3947// GetTextDetection returns an array of detected text (TextDetections) sorted
3948// by the time the text was detected, up to 50 words per frame of video.
3949//
3950// Each element of the array includes the detected text, the precentage confidence
3951// in the acuracy of the detected text, the time the text was detected, bounding
3952// box information for where the text was located, and unique identifiers for
3953// words and their lines.
3954//
3955// Use MaxResults parameter to limit the number of text detections returned.
3956// If there are more results than specified in MaxResults, the value of NextToken
3957// in the operation response contains a pagination token for getting the next
3958// set of results. To get the next page of results, call GetTextDetection and
3959// populate the NextToken request parameter with the token value returned from
3960// the previous call to GetTextDetection.
3961//
3962// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
3963// with awserr.Error's Code and Message methods to get detailed information about
3964// the error.
3965//
3966// See the AWS API reference guide for Amazon Rekognition's
3967// API operation GetTextDetection for usage and error information.
3968//
3969// Returned Error Types:
3970//   * AccessDeniedException
3971//   You are not authorized to perform the action.
3972//
3973//   * InternalServerError
3974//   Amazon Rekognition experienced a service issue. Try your call again.
3975//
3976//   * InvalidParameterException
3977//   Input parameter violated a constraint. Validate your parameter before calling
3978//   the API operation again.
3979//
3980//   * InvalidPaginationTokenException
3981//   Pagination token in the request is not valid.
3982//
3983//   * ProvisionedThroughputExceededException
3984//   The number of requests exceeded your throughput limit. If you want to increase
3985//   this limit, contact Amazon Rekognition.
3986//
3987//   * ResourceNotFoundException
3988//   The collection specified in the request cannot be found.
3989//
3990//   * ThrottlingException
3991//   Amazon Rekognition is temporarily unable to process the request. Try your
3992//   call again.
3993//
3994func (c *Rekognition) GetTextDetection(input *GetTextDetectionInput) (*GetTextDetectionOutput, error) {
3995	req, out := c.GetTextDetectionRequest(input)
3996	return out, req.Send()
3997}
3998
3999// GetTextDetectionWithContext is the same as GetTextDetection with the addition of
4000// the ability to pass a context and additional request options.
4001//
4002// See GetTextDetection for details on how to use this API operation.
4003//
4004// The context must be non-nil and will be used for request cancellation. If
4005// the context is nil a panic will occur. In the future the SDK may create
4006// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4007// for more information on using Contexts.
4008func (c *Rekognition) GetTextDetectionWithContext(ctx aws.Context, input *GetTextDetectionInput, opts ...request.Option) (*GetTextDetectionOutput, error) {
4009	req, out := c.GetTextDetectionRequest(input)
4010	req.SetContext(ctx)
4011	req.ApplyOptions(opts...)
4012	return out, req.Send()
4013}
4014
4015// GetTextDetectionPages iterates over the pages of a GetTextDetection operation,
4016// calling the "fn" function with the response data for each page. To stop
4017// iterating, return false from the fn function.
4018//
4019// See GetTextDetection method for more information on how to use this operation.
4020//
4021// Note: This operation can generate multiple requests to a service.
4022//
4023//    // Example iterating over at most 3 pages of a GetTextDetection operation.
4024//    pageNum := 0
4025//    err := client.GetTextDetectionPages(params,
4026//        func(page *rekognition.GetTextDetectionOutput, lastPage bool) bool {
4027//            pageNum++
4028//            fmt.Println(page)
4029//            return pageNum <= 3
4030//        })
4031//
4032func (c *Rekognition) GetTextDetectionPages(input *GetTextDetectionInput, fn func(*GetTextDetectionOutput, bool) bool) error {
4033	return c.GetTextDetectionPagesWithContext(aws.BackgroundContext(), input, fn)
4034}
4035
4036// GetTextDetectionPagesWithContext same as GetTextDetectionPages except
4037// it takes a Context and allows setting request options on the pages.
4038//
4039// The context must be non-nil and will be used for request cancellation. If
4040// the context is nil a panic will occur. In the future the SDK may create
4041// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4042// for more information on using Contexts.
4043func (c *Rekognition) GetTextDetectionPagesWithContext(ctx aws.Context, input *GetTextDetectionInput, fn func(*GetTextDetectionOutput, bool) bool, opts ...request.Option) error {
4044	p := request.Pagination{
4045		NewRequest: func() (*request.Request, error) {
4046			var inCpy *GetTextDetectionInput
4047			if input != nil {
4048				tmp := *input
4049				inCpy = &tmp
4050			}
4051			req, _ := c.GetTextDetectionRequest(inCpy)
4052			req.SetContext(ctx)
4053			req.ApplyOptions(opts...)
4054			return req, nil
4055		},
4056	}
4057
4058	for p.Next() {
4059		if !fn(p.Page().(*GetTextDetectionOutput), !p.HasNextPage()) {
4060			break
4061		}
4062	}
4063
4064	return p.Err()
4065}
4066
4067const opIndexFaces = "IndexFaces"
4068
4069// IndexFacesRequest generates a "aws/request.Request" representing the
4070// client's request for the IndexFaces operation. The "output" return
4071// value will be populated with the request's response once the request completes
4072// successfully.
4073//
4074// Use "Send" method on the returned Request to send the API call to the service.
4075// the "output" return value is not valid until after Send returns without error.
4076//
4077// See IndexFaces for more information on using the IndexFaces
4078// API call, and error handling.
4079//
4080// This method is useful when you want to inject custom logic or configuration
4081// into the SDK's request lifecycle. Such as custom headers, or retry logic.
4082//
4083//
4084//    // Example sending a request using the IndexFacesRequest method.
4085//    req, resp := client.IndexFacesRequest(params)
4086//
4087//    err := req.Send()
4088//    if err == nil { // resp is now filled
4089//        fmt.Println(resp)
4090//    }
4091func (c *Rekognition) IndexFacesRequest(input *IndexFacesInput) (req *request.Request, output *IndexFacesOutput) {
4092	op := &request.Operation{
4093		Name:       opIndexFaces,
4094		HTTPMethod: "POST",
4095		HTTPPath:   "/",
4096	}
4097
4098	if input == nil {
4099		input = &IndexFacesInput{}
4100	}
4101
4102	output = &IndexFacesOutput{}
4103	req = c.newRequest(op, input, output)
4104	return
4105}
4106
4107// IndexFaces API operation for Amazon Rekognition.
4108//
4109// Detects faces in the input image and adds them to the specified collection.
4110//
4111// Amazon Rekognition doesn't save the actual faces that are detected. Instead,
4112// the underlying detection algorithm first detects the faces in the input image.
4113// For each face, the algorithm extracts facial features into a feature vector,
4114// and stores it in the backend database. Amazon Rekognition uses feature vectors
4115// when it performs face match and search operations using the SearchFaces and
4116// SearchFacesByImage operations.
4117//
4118// For more information, see Adding Faces to a Collection in the Amazon Rekognition
4119// Developer Guide.
4120//
4121// To get the number of faces in a collection, call DescribeCollection.
4122//
4123// If you're using version 1.0 of the face detection model, IndexFaces indexes
4124// the 15 largest faces in the input image. Later versions of the face detection
4125// model index the 100 largest faces in the input image.
4126//
4127// If you're using version 4 or later of the face model, image orientation information
4128// is not returned in the OrientationCorrection field.
4129//
4130// To determine which version of the model you're using, call DescribeCollection
4131// and supply the collection ID. You can also get the model version from the
4132// value of FaceModelVersion in the response from IndexFaces
4133//
4134// For more information, see Model Versioning in the Amazon Rekognition Developer
4135// Guide.
4136//
4137// If you provide the optional ExternalImageId for the input image you provided,
4138// Amazon Rekognition associates this ID with all faces that it detects. When
4139// you call the ListFaces operation, the response returns the external ID. You
4140// can use this external image ID to create a client-side index to associate
4141// the faces with each image. You can then use the index to find all faces in
4142// an image.
4143//
4144// You can specify the maximum number of faces to index with the MaxFaces input
4145// parameter. This is useful when you want to index the largest faces in an
4146// image and don't want to index smaller faces, such as those belonging to people
4147// standing in the background.
4148//
4149// The QualityFilter input parameter allows you to filter out detected faces
4150// that don’t meet a required quality bar. The quality bar is based on a variety
4151// of common use cases. By default, IndexFaces chooses the quality bar that's
4152// used to filter faces. You can also explicitly choose the quality bar. Use
4153// QualityFilter, to set the quality bar by specifying LOW, MEDIUM, or HIGH.
4154// If you do not want to filter detected faces, specify NONE.
4155//
4156// To use quality filtering, you need a collection associated with version 3
4157// of the face model or higher. To get the version of the face model associated
4158// with a collection, call DescribeCollection.
4159//
4160// Information about faces detected in an image, but not indexed, is returned
4161// in an array of UnindexedFace objects, UnindexedFaces. Faces aren't indexed
4162// for reasons such as:
4163//
4164//    * The number of faces detected exceeds the value of the MaxFaces request
4165//    parameter.
4166//
4167//    * The face is too small compared to the image dimensions.
4168//
4169//    * The face is too blurry.
4170//
4171//    * The image is too dark.
4172//
4173//    * The face has an extreme pose.
4174//
4175//    * The face doesn’t have enough detail to be suitable for face search.
4176//
4177// In response, the IndexFaces operation returns an array of metadata for all
4178// detected faces, FaceRecords. This includes:
4179//
4180//    * The bounding box, BoundingBox, of the detected face.
4181//
4182//    * A confidence value, Confidence, which indicates the confidence that
4183//    the bounding box contains a face.
4184//
4185//    * A face ID, FaceId, assigned by the service for each face that's detected
4186//    and stored.
4187//
4188//    * An image ID, ImageId, assigned by the service for the input image.
4189//
4190// If you request all facial attributes (by using the detectionAttributes parameter),
4191// Amazon Rekognition returns detailed facial attributes, such as facial landmarks
4192// (for example, location of eye and mouth) and other facial attributes. If
4193// you provide the same image, specify the same collection, and use the same
4194// external ID in the IndexFaces operation, Amazon Rekognition doesn't save
4195// duplicate face metadata.
4196//
4197// The input image is passed either as base64-encoded image bytes, or as a reference
4198// to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon
4199// Rekognition operations, passing image bytes isn't supported. The image must
4200// be formatted as a PNG or JPEG file.
4201//
4202// This operation requires permissions to perform the rekognition:IndexFaces
4203// action.
4204//
4205// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
4206// with awserr.Error's Code and Message methods to get detailed information about
4207// the error.
4208//
4209// See the AWS API reference guide for Amazon Rekognition's
4210// API operation IndexFaces for usage and error information.
4211//
4212// Returned Error Types:
4213//   * InvalidS3ObjectException
4214//   Amazon Rekognition is unable to access the S3 object specified in the request.
4215//
4216//   * InvalidParameterException
4217//   Input parameter violated a constraint. Validate your parameter before calling
4218//   the API operation again.
4219//
4220//   * ImageTooLargeException
4221//   The input image size exceeds the allowed limit. If you are calling DetectProtectiveEquipment,
4222//   the image size or resolution exceeds the allowed limit. For more information,
4223//   see Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
4224//
4225//   * AccessDeniedException
4226//   You are not authorized to perform the action.
4227//
4228//   * InternalServerError
4229//   Amazon Rekognition experienced a service issue. Try your call again.
4230//
4231//   * ThrottlingException
4232//   Amazon Rekognition is temporarily unable to process the request. Try your
4233//   call again.
4234//
4235//   * ProvisionedThroughputExceededException
4236//   The number of requests exceeded your throughput limit. If you want to increase
4237//   this limit, contact Amazon Rekognition.
4238//
4239//   * ResourceNotFoundException
4240//   The collection specified in the request cannot be found.
4241//
4242//   * InvalidImageFormatException
4243//   The provided image format is not supported.
4244//
4245//   * ServiceQuotaExceededException
4246//   The size of the collection or tag list exceeds the allowed limit. For more
4247//   information, see Limits in Amazon Rekognition in the Amazon Rekognition Developer
4248//   Guide.
4249//
4250func (c *Rekognition) IndexFaces(input *IndexFacesInput) (*IndexFacesOutput, error) {
4251	req, out := c.IndexFacesRequest(input)
4252	return out, req.Send()
4253}
4254
4255// IndexFacesWithContext is the same as IndexFaces with the addition of
4256// the ability to pass a context and additional request options.
4257//
4258// See IndexFaces for details on how to use this API operation.
4259//
4260// The context must be non-nil and will be used for request cancellation. If
4261// the context is nil a panic will occur. In the future the SDK may create
4262// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4263// for more information on using Contexts.
4264func (c *Rekognition) IndexFacesWithContext(ctx aws.Context, input *IndexFacesInput, opts ...request.Option) (*IndexFacesOutput, error) {
4265	req, out := c.IndexFacesRequest(input)
4266	req.SetContext(ctx)
4267	req.ApplyOptions(opts...)
4268	return out, req.Send()
4269}
4270
4271const opListCollections = "ListCollections"
4272
4273// ListCollectionsRequest generates a "aws/request.Request" representing the
4274// client's request for the ListCollections operation. The "output" return
4275// value will be populated with the request's response once the request completes
4276// successfully.
4277//
4278// Use "Send" method on the returned Request to send the API call to the service.
4279// the "output" return value is not valid until after Send returns without error.
4280//
4281// See ListCollections for more information on using the ListCollections
4282// API call, and error handling.
4283//
4284// This method is useful when you want to inject custom logic or configuration
4285// into the SDK's request lifecycle. Such as custom headers, or retry logic.
4286//
4287//
4288//    // Example sending a request using the ListCollectionsRequest method.
4289//    req, resp := client.ListCollectionsRequest(params)
4290//
4291//    err := req.Send()
4292//    if err == nil { // resp is now filled
4293//        fmt.Println(resp)
4294//    }
4295func (c *Rekognition) ListCollectionsRequest(input *ListCollectionsInput) (req *request.Request, output *ListCollectionsOutput) {
4296	op := &request.Operation{
4297		Name:       opListCollections,
4298		HTTPMethod: "POST",
4299		HTTPPath:   "/",
4300		Paginator: &request.Paginator{
4301			InputTokens:     []string{"NextToken"},
4302			OutputTokens:    []string{"NextToken"},
4303			LimitToken:      "MaxResults",
4304			TruncationToken: "",
4305		},
4306	}
4307
4308	if input == nil {
4309		input = &ListCollectionsInput{}
4310	}
4311
4312	output = &ListCollectionsOutput{}
4313	req = c.newRequest(op, input, output)
4314	return
4315}
4316
4317// ListCollections API operation for Amazon Rekognition.
4318//
4319// Returns list of collection IDs in your account. If the result is truncated,
4320// the response also provides a NextToken that you can use in the subsequent
4321// request to fetch the next set of collection IDs.
4322//
4323// For an example, see Listing Collections in the Amazon Rekognition Developer
4324// Guide.
4325//
4326// This operation requires permissions to perform the rekognition:ListCollections
4327// action.
4328//
4329// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
4330// with awserr.Error's Code and Message methods to get detailed information about
4331// the error.
4332//
4333// See the AWS API reference guide for Amazon Rekognition's
4334// API operation ListCollections for usage and error information.
4335//
4336// Returned Error Types:
4337//   * InvalidParameterException
4338//   Input parameter violated a constraint. Validate your parameter before calling
4339//   the API operation again.
4340//
4341//   * AccessDeniedException
4342//   You are not authorized to perform the action.
4343//
4344//   * InternalServerError
4345//   Amazon Rekognition experienced a service issue. Try your call again.
4346//
4347//   * ThrottlingException
4348//   Amazon Rekognition is temporarily unable to process the request. Try your
4349//   call again.
4350//
4351//   * ProvisionedThroughputExceededException
4352//   The number of requests exceeded your throughput limit. If you want to increase
4353//   this limit, contact Amazon Rekognition.
4354//
4355//   * InvalidPaginationTokenException
4356//   Pagination token in the request is not valid.
4357//
4358//   * ResourceNotFoundException
4359//   The collection specified in the request cannot be found.
4360//
4361func (c *Rekognition) ListCollections(input *ListCollectionsInput) (*ListCollectionsOutput, error) {
4362	req, out := c.ListCollectionsRequest(input)
4363	return out, req.Send()
4364}
4365
4366// ListCollectionsWithContext is the same as ListCollections with the addition of
4367// the ability to pass a context and additional request options.
4368//
4369// See ListCollections for details on how to use this API operation.
4370//
4371// The context must be non-nil and will be used for request cancellation. If
4372// the context is nil a panic will occur. In the future the SDK may create
4373// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4374// for more information on using Contexts.
4375func (c *Rekognition) ListCollectionsWithContext(ctx aws.Context, input *ListCollectionsInput, opts ...request.Option) (*ListCollectionsOutput, error) {
4376	req, out := c.ListCollectionsRequest(input)
4377	req.SetContext(ctx)
4378	req.ApplyOptions(opts...)
4379	return out, req.Send()
4380}
4381
4382// ListCollectionsPages iterates over the pages of a ListCollections operation,
4383// calling the "fn" function with the response data for each page. To stop
4384// iterating, return false from the fn function.
4385//
4386// See ListCollections method for more information on how to use this operation.
4387//
4388// Note: This operation can generate multiple requests to a service.
4389//
4390//    // Example iterating over at most 3 pages of a ListCollections operation.
4391//    pageNum := 0
4392//    err := client.ListCollectionsPages(params,
4393//        func(page *rekognition.ListCollectionsOutput, lastPage bool) bool {
4394//            pageNum++
4395//            fmt.Println(page)
4396//            return pageNum <= 3
4397//        })
4398//
4399func (c *Rekognition) ListCollectionsPages(input *ListCollectionsInput, fn func(*ListCollectionsOutput, bool) bool) error {
4400	return c.ListCollectionsPagesWithContext(aws.BackgroundContext(), input, fn)
4401}
4402
4403// ListCollectionsPagesWithContext same as ListCollectionsPages except
4404// it takes a Context and allows setting request options on the pages.
4405//
4406// The context must be non-nil and will be used for request cancellation. If
4407// the context is nil a panic will occur. In the future the SDK may create
4408// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4409// for more information on using Contexts.
4410func (c *Rekognition) ListCollectionsPagesWithContext(ctx aws.Context, input *ListCollectionsInput, fn func(*ListCollectionsOutput, bool) bool, opts ...request.Option) error {
4411	p := request.Pagination{
4412		NewRequest: func() (*request.Request, error) {
4413			var inCpy *ListCollectionsInput
4414			if input != nil {
4415				tmp := *input
4416				inCpy = &tmp
4417			}
4418			req, _ := c.ListCollectionsRequest(inCpy)
4419			req.SetContext(ctx)
4420			req.ApplyOptions(opts...)
4421			return req, nil
4422		},
4423	}
4424
4425	for p.Next() {
4426		if !fn(p.Page().(*ListCollectionsOutput), !p.HasNextPage()) {
4427			break
4428		}
4429	}
4430
4431	return p.Err()
4432}
4433
4434const opListFaces = "ListFaces"
4435
4436// ListFacesRequest generates a "aws/request.Request" representing the
4437// client's request for the ListFaces operation. The "output" return
4438// value will be populated with the request's response once the request completes
4439// successfully.
4440//
4441// Use "Send" method on the returned Request to send the API call to the service.
4442// the "output" return value is not valid until after Send returns without error.
4443//
4444// See ListFaces for more information on using the ListFaces
4445// API call, and error handling.
4446//
4447// This method is useful when you want to inject custom logic or configuration
4448// into the SDK's request lifecycle. Such as custom headers, or retry logic.
4449//
4450//
4451//    // Example sending a request using the ListFacesRequest method.
4452//    req, resp := client.ListFacesRequest(params)
4453//
4454//    err := req.Send()
4455//    if err == nil { // resp is now filled
4456//        fmt.Println(resp)
4457//    }
4458func (c *Rekognition) ListFacesRequest(input *ListFacesInput) (req *request.Request, output *ListFacesOutput) {
4459	op := &request.Operation{
4460		Name:       opListFaces,
4461		HTTPMethod: "POST",
4462		HTTPPath:   "/",
4463		Paginator: &request.Paginator{
4464			InputTokens:     []string{"NextToken"},
4465			OutputTokens:    []string{"NextToken"},
4466			LimitToken:      "MaxResults",
4467			TruncationToken: "",
4468		},
4469	}
4470
4471	if input == nil {
4472		input = &ListFacesInput{}
4473	}
4474
4475	output = &ListFacesOutput{}
4476	req = c.newRequest(op, input, output)
4477	return
4478}
4479
4480// ListFaces API operation for Amazon Rekognition.
4481//
4482// Returns metadata for faces in the specified collection. This metadata includes
4483// information such as the bounding box coordinates, the confidence (that the
4484// bounding box contains a face), and face ID. For an example, see Listing Faces
4485// in a Collection in the Amazon Rekognition Developer Guide.
4486//
4487// This operation requires permissions to perform the rekognition:ListFaces
4488// action.
4489//
4490// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
4491// with awserr.Error's Code and Message methods to get detailed information about
4492// the error.
4493//
4494// See the AWS API reference guide for Amazon Rekognition's
4495// API operation ListFaces for usage and error information.
4496//
4497// Returned Error Types:
4498//   * InvalidParameterException
4499//   Input parameter violated a constraint. Validate your parameter before calling
4500//   the API operation again.
4501//
4502//   * AccessDeniedException
4503//   You are not authorized to perform the action.
4504//
4505//   * InternalServerError
4506//   Amazon Rekognition experienced a service issue. Try your call again.
4507//
4508//   * ThrottlingException
4509//   Amazon Rekognition is temporarily unable to process the request. Try your
4510//   call again.
4511//
4512//   * ProvisionedThroughputExceededException
4513//   The number of requests exceeded your throughput limit. If you want to increase
4514//   this limit, contact Amazon Rekognition.
4515//
4516//   * InvalidPaginationTokenException
4517//   Pagination token in the request is not valid.
4518//
4519//   * ResourceNotFoundException
4520//   The collection specified in the request cannot be found.
4521//
4522func (c *Rekognition) ListFaces(input *ListFacesInput) (*ListFacesOutput, error) {
4523	req, out := c.ListFacesRequest(input)
4524	return out, req.Send()
4525}
4526
4527// ListFacesWithContext is the same as ListFaces with the addition of
4528// the ability to pass a context and additional request options.
4529//
4530// See ListFaces for details on how to use this API operation.
4531//
4532// The context must be non-nil and will be used for request cancellation. If
4533// the context is nil a panic will occur. In the future the SDK may create
4534// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4535// for more information on using Contexts.
4536func (c *Rekognition) ListFacesWithContext(ctx aws.Context, input *ListFacesInput, opts ...request.Option) (*ListFacesOutput, error) {
4537	req, out := c.ListFacesRequest(input)
4538	req.SetContext(ctx)
4539	req.ApplyOptions(opts...)
4540	return out, req.Send()
4541}
4542
4543// ListFacesPages iterates over the pages of a ListFaces operation,
4544// calling the "fn" function with the response data for each page. To stop
4545// iterating, return false from the fn function.
4546//
4547// See ListFaces method for more information on how to use this operation.
4548//
4549// Note: This operation can generate multiple requests to a service.
4550//
4551//    // Example iterating over at most 3 pages of a ListFaces operation.
4552//    pageNum := 0
4553//    err := client.ListFacesPages(params,
4554//        func(page *rekognition.ListFacesOutput, lastPage bool) bool {
4555//            pageNum++
4556//            fmt.Println(page)
4557//            return pageNum <= 3
4558//        })
4559//
4560func (c *Rekognition) ListFacesPages(input *ListFacesInput, fn func(*ListFacesOutput, bool) bool) error {
4561	return c.ListFacesPagesWithContext(aws.BackgroundContext(), input, fn)
4562}
4563
4564// ListFacesPagesWithContext same as ListFacesPages except
4565// it takes a Context and allows setting request options on the pages.
4566//
4567// The context must be non-nil and will be used for request cancellation. If
4568// the context is nil a panic will occur. In the future the SDK may create
4569// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4570// for more information on using Contexts.
4571func (c *Rekognition) ListFacesPagesWithContext(ctx aws.Context, input *ListFacesInput, fn func(*ListFacesOutput, bool) bool, opts ...request.Option) error {
4572	p := request.Pagination{
4573		NewRequest: func() (*request.Request, error) {
4574			var inCpy *ListFacesInput
4575			if input != nil {
4576				tmp := *input
4577				inCpy = &tmp
4578			}
4579			req, _ := c.ListFacesRequest(inCpy)
4580			req.SetContext(ctx)
4581			req.ApplyOptions(opts...)
4582			return req, nil
4583		},
4584	}
4585
4586	for p.Next() {
4587		if !fn(p.Page().(*ListFacesOutput), !p.HasNextPage()) {
4588			break
4589		}
4590	}
4591
4592	return p.Err()
4593}
4594
4595const opListStreamProcessors = "ListStreamProcessors"
4596
4597// ListStreamProcessorsRequest generates a "aws/request.Request" representing the
4598// client's request for the ListStreamProcessors operation. The "output" return
4599// value will be populated with the request's response once the request completes
4600// successfully.
4601//
4602// Use "Send" method on the returned Request to send the API call to the service.
4603// the "output" return value is not valid until after Send returns without error.
4604//
4605// See ListStreamProcessors for more information on using the ListStreamProcessors
4606// API call, and error handling.
4607//
4608// This method is useful when you want to inject custom logic or configuration
4609// into the SDK's request lifecycle. Such as custom headers, or retry logic.
4610//
4611//
4612//    // Example sending a request using the ListStreamProcessorsRequest method.
4613//    req, resp := client.ListStreamProcessorsRequest(params)
4614//
4615//    err := req.Send()
4616//    if err == nil { // resp is now filled
4617//        fmt.Println(resp)
4618//    }
4619func (c *Rekognition) ListStreamProcessorsRequest(input *ListStreamProcessorsInput) (req *request.Request, output *ListStreamProcessorsOutput) {
4620	op := &request.Operation{
4621		Name:       opListStreamProcessors,
4622		HTTPMethod: "POST",
4623		HTTPPath:   "/",
4624		Paginator: &request.Paginator{
4625			InputTokens:     []string{"NextToken"},
4626			OutputTokens:    []string{"NextToken"},
4627			LimitToken:      "MaxResults",
4628			TruncationToken: "",
4629		},
4630	}
4631
4632	if input == nil {
4633		input = &ListStreamProcessorsInput{}
4634	}
4635
4636	output = &ListStreamProcessorsOutput{}
4637	req = c.newRequest(op, input, output)
4638	return
4639}
4640
4641// ListStreamProcessors API operation for Amazon Rekognition.
4642//
4643// Gets a list of stream processors that you have created with CreateStreamProcessor.
4644//
4645// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
4646// with awserr.Error's Code and Message methods to get detailed information about
4647// the error.
4648//
4649// See the AWS API reference guide for Amazon Rekognition's
4650// API operation ListStreamProcessors for usage and error information.
4651//
4652// Returned Error Types:
4653//   * AccessDeniedException
4654//   You are not authorized to perform the action.
4655//
4656//   * InternalServerError
4657//   Amazon Rekognition experienced a service issue. Try your call again.
4658//
4659//   * ThrottlingException
4660//   Amazon Rekognition is temporarily unable to process the request. Try your
4661//   call again.
4662//
4663//   * InvalidParameterException
4664//   Input parameter violated a constraint. Validate your parameter before calling
4665//   the API operation again.
4666//
4667//   * InvalidPaginationTokenException
4668//   Pagination token in the request is not valid.
4669//
4670//   * ProvisionedThroughputExceededException
4671//   The number of requests exceeded your throughput limit. If you want to increase
4672//   this limit, contact Amazon Rekognition.
4673//
4674func (c *Rekognition) ListStreamProcessors(input *ListStreamProcessorsInput) (*ListStreamProcessorsOutput, error) {
4675	req, out := c.ListStreamProcessorsRequest(input)
4676	return out, req.Send()
4677}
4678
4679// ListStreamProcessorsWithContext is the same as ListStreamProcessors with the addition of
4680// the ability to pass a context and additional request options.
4681//
4682// See ListStreamProcessors for details on how to use this API operation.
4683//
4684// The context must be non-nil and will be used for request cancellation. If
4685// the context is nil a panic will occur. In the future the SDK may create
4686// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4687// for more information on using Contexts.
4688func (c *Rekognition) ListStreamProcessorsWithContext(ctx aws.Context, input *ListStreamProcessorsInput, opts ...request.Option) (*ListStreamProcessorsOutput, error) {
4689	req, out := c.ListStreamProcessorsRequest(input)
4690	req.SetContext(ctx)
4691	req.ApplyOptions(opts...)
4692	return out, req.Send()
4693}
4694
4695// ListStreamProcessorsPages iterates over the pages of a ListStreamProcessors operation,
4696// calling the "fn" function with the response data for each page. To stop
4697// iterating, return false from the fn function.
4698//
4699// See ListStreamProcessors method for more information on how to use this operation.
4700//
4701// Note: This operation can generate multiple requests to a service.
4702//
4703//    // Example iterating over at most 3 pages of a ListStreamProcessors operation.
4704//    pageNum := 0
4705//    err := client.ListStreamProcessorsPages(params,
4706//        func(page *rekognition.ListStreamProcessorsOutput, lastPage bool) bool {
4707//            pageNum++
4708//            fmt.Println(page)
4709//            return pageNum <= 3
4710//        })
4711//
4712func (c *Rekognition) ListStreamProcessorsPages(input *ListStreamProcessorsInput, fn func(*ListStreamProcessorsOutput, bool) bool) error {
4713	return c.ListStreamProcessorsPagesWithContext(aws.BackgroundContext(), input, fn)
4714}
4715
4716// ListStreamProcessorsPagesWithContext same as ListStreamProcessorsPages except
4717// it takes a Context and allows setting request options on the pages.
4718//
4719// The context must be non-nil and will be used for request cancellation. If
4720// the context is nil a panic will occur. In the future the SDK may create
4721// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4722// for more information on using Contexts.
4723func (c *Rekognition) ListStreamProcessorsPagesWithContext(ctx aws.Context, input *ListStreamProcessorsInput, fn func(*ListStreamProcessorsOutput, bool) bool, opts ...request.Option) error {
4724	p := request.Pagination{
4725		NewRequest: func() (*request.Request, error) {
4726			var inCpy *ListStreamProcessorsInput
4727			if input != nil {
4728				tmp := *input
4729				inCpy = &tmp
4730			}
4731			req, _ := c.ListStreamProcessorsRequest(inCpy)
4732			req.SetContext(ctx)
4733			req.ApplyOptions(opts...)
4734			return req, nil
4735		},
4736	}
4737
4738	for p.Next() {
4739		if !fn(p.Page().(*ListStreamProcessorsOutput), !p.HasNextPage()) {
4740			break
4741		}
4742	}
4743
4744	return p.Err()
4745}
4746
4747const opListTagsForResource = "ListTagsForResource"
4748
4749// ListTagsForResourceRequest generates a "aws/request.Request" representing the
4750// client's request for the ListTagsForResource operation. The "output" return
4751// value will be populated with the request's response once the request completes
4752// successfully.
4753//
4754// Use "Send" method on the returned Request to send the API call to the service.
4755// the "output" return value is not valid until after Send returns without error.
4756//
4757// See ListTagsForResource for more information on using the ListTagsForResource
4758// API call, and error handling.
4759//
4760// This method is useful when you want to inject custom logic or configuration
4761// into the SDK's request lifecycle. Such as custom headers, or retry logic.
4762//
4763//
4764//    // Example sending a request using the ListTagsForResourceRequest method.
4765//    req, resp := client.ListTagsForResourceRequest(params)
4766//
4767//    err := req.Send()
4768//    if err == nil { // resp is now filled
4769//        fmt.Println(resp)
4770//    }
4771func (c *Rekognition) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) {
4772	op := &request.Operation{
4773		Name:       opListTagsForResource,
4774		HTTPMethod: "POST",
4775		HTTPPath:   "/",
4776	}
4777
4778	if input == nil {
4779		input = &ListTagsForResourceInput{}
4780	}
4781
4782	output = &ListTagsForResourceOutput{}
4783	req = c.newRequest(op, input, output)
4784	return
4785}
4786
4787// ListTagsForResource API operation for Amazon Rekognition.
4788//
4789// Returns a list of tags in an Amazon Rekognition collection, stream processor,
4790// or Custom Labels model.
4791//
4792// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
4793// with awserr.Error's Code and Message methods to get detailed information about
4794// the error.
4795//
4796// See the AWS API reference guide for Amazon Rekognition's
4797// API operation ListTagsForResource for usage and error information.
4798//
4799// Returned Error Types:
4800//   * ResourceNotFoundException
4801//   The collection specified in the request cannot be found.
4802//
4803//   * InvalidParameterException
4804//   Input parameter violated a constraint. Validate your parameter before calling
4805//   the API operation again.
4806//
4807//   * AccessDeniedException
4808//   You are not authorized to perform the action.
4809//
4810//   * InternalServerError
4811//   Amazon Rekognition experienced a service issue. Try your call again.
4812//
4813//   * ThrottlingException
4814//   Amazon Rekognition is temporarily unable to process the request. Try your
4815//   call again.
4816//
4817//   * ProvisionedThroughputExceededException
4818//   The number of requests exceeded your throughput limit. If you want to increase
4819//   this limit, contact Amazon Rekognition.
4820//
4821func (c *Rekognition) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) {
4822	req, out := c.ListTagsForResourceRequest(input)
4823	return out, req.Send()
4824}
4825
4826// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of
4827// the ability to pass a context and additional request options.
4828//
4829// See ListTagsForResource for details on how to use this API operation.
4830//
4831// The context must be non-nil and will be used for request cancellation. If
4832// the context is nil a panic will occur. In the future the SDK may create
4833// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4834// for more information on using Contexts.
4835func (c *Rekognition) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) {
4836	req, out := c.ListTagsForResourceRequest(input)
4837	req.SetContext(ctx)
4838	req.ApplyOptions(opts...)
4839	return out, req.Send()
4840}
4841
4842const opRecognizeCelebrities = "RecognizeCelebrities"
4843
4844// RecognizeCelebritiesRequest generates a "aws/request.Request" representing the
4845// client's request for the RecognizeCelebrities operation. The "output" return
4846// value will be populated with the request's response once the request completes
4847// successfully.
4848//
4849// Use "Send" method on the returned Request to send the API call to the service.
4850// the "output" return value is not valid until after Send returns without error.
4851//
4852// See RecognizeCelebrities for more information on using the RecognizeCelebrities
4853// API call, and error handling.
4854//
4855// This method is useful when you want to inject custom logic or configuration
4856// into the SDK's request lifecycle. Such as custom headers, or retry logic.
4857//
4858//
4859//    // Example sending a request using the RecognizeCelebritiesRequest method.
4860//    req, resp := client.RecognizeCelebritiesRequest(params)
4861//
4862//    err := req.Send()
4863//    if err == nil { // resp is now filled
4864//        fmt.Println(resp)
4865//    }
4866func (c *Rekognition) RecognizeCelebritiesRequest(input *RecognizeCelebritiesInput) (req *request.Request, output *RecognizeCelebritiesOutput) {
4867	op := &request.Operation{
4868		Name:       opRecognizeCelebrities,
4869		HTTPMethod: "POST",
4870		HTTPPath:   "/",
4871	}
4872
4873	if input == nil {
4874		input = &RecognizeCelebritiesInput{}
4875	}
4876
4877	output = &RecognizeCelebritiesOutput{}
4878	req = c.newRequest(op, input, output)
4879	return
4880}
4881
4882// RecognizeCelebrities API operation for Amazon Rekognition.
4883//
4884// Returns an array of celebrities recognized in the input image. For more information,
4885// see Recognizing Celebrities in the Amazon Rekognition Developer Guide.
4886//
4887// RecognizeCelebrities returns the 64 largest faces in the image. It lists
4888// recognized celebrities in the CelebrityFaces array and unrecognized faces
4889// in the UnrecognizedFaces array. RecognizeCelebrities doesn't return celebrities
4890// whose faces aren't among the largest 64 faces in the image.
4891//
4892// For each celebrity recognized, RecognizeCelebrities returns a Celebrity object.
4893// The Celebrity object contains the celebrity name, ID, URL links to additional
4894// information, match confidence, and a ComparedFace object that you can use
4895// to locate the celebrity's face on the image.
4896//
4897// Amazon Rekognition doesn't retain information about which images a celebrity
4898// has been recognized in. Your application must store this information and
4899// use the Celebrity ID property as a unique identifier for the celebrity. If
4900// you don't store the celebrity name or additional information URLs returned
4901// by RecognizeCelebrities, you will need the ID to identify the celebrity in
4902// a call to the GetCelebrityInfo operation.
4903//
4904// You pass the input image either as base64-encoded image bytes or as a reference
4905// to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon
4906// Rekognition operations, passing image bytes is not supported. The image must
4907// be either a PNG or JPEG formatted file.
4908//
4909// For an example, see Recognizing Celebrities in an Image in the Amazon Rekognition
4910// Developer Guide.
4911//
4912// This operation requires permissions to perform the rekognition:RecognizeCelebrities
4913// operation.
4914//
4915// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
4916// with awserr.Error's Code and Message methods to get detailed information about
4917// the error.
4918//
4919// See the AWS API reference guide for Amazon Rekognition's
4920// API operation RecognizeCelebrities for usage and error information.
4921//
4922// Returned Error Types:
4923//   * InvalidS3ObjectException
4924//   Amazon Rekognition is unable to access the S3 object specified in the request.
4925//
4926//   * InvalidParameterException
4927//   Input parameter violated a constraint. Validate your parameter before calling
4928//   the API operation again.
4929//
4930//   * InvalidImageFormatException
4931//   The provided image format is not supported.
4932//
4933//   * ImageTooLargeException
4934//   The input image size exceeds the allowed limit. If you are calling DetectProtectiveEquipment,
4935//   the image size or resolution exceeds the allowed limit. For more information,
4936//   see Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
4937//
4938//   * AccessDeniedException
4939//   You are not authorized to perform the action.
4940//
4941//   * InternalServerError
4942//   Amazon Rekognition experienced a service issue. Try your call again.
4943//
4944//   * ThrottlingException
4945//   Amazon Rekognition is temporarily unable to process the request. Try your
4946//   call again.
4947//
4948//   * ProvisionedThroughputExceededException
4949//   The number of requests exceeded your throughput limit. If you want to increase
4950//   this limit, contact Amazon Rekognition.
4951//
4952//   * InvalidImageFormatException
4953//   The provided image format is not supported.
4954//
4955func (c *Rekognition) RecognizeCelebrities(input *RecognizeCelebritiesInput) (*RecognizeCelebritiesOutput, error) {
4956	req, out := c.RecognizeCelebritiesRequest(input)
4957	return out, req.Send()
4958}
4959
4960// RecognizeCelebritiesWithContext is the same as RecognizeCelebrities with the addition of
4961// the ability to pass a context and additional request options.
4962//
4963// See RecognizeCelebrities for details on how to use this API operation.
4964//
4965// The context must be non-nil and will be used for request cancellation. If
4966// the context is nil a panic will occur. In the future the SDK may create
4967// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4968// for more information on using Contexts.
4969func (c *Rekognition) RecognizeCelebritiesWithContext(ctx aws.Context, input *RecognizeCelebritiesInput, opts ...request.Option) (*RecognizeCelebritiesOutput, error) {
4970	req, out := c.RecognizeCelebritiesRequest(input)
4971	req.SetContext(ctx)
4972	req.ApplyOptions(opts...)
4973	return out, req.Send()
4974}
4975
4976const opSearchFaces = "SearchFaces"
4977
4978// SearchFacesRequest generates a "aws/request.Request" representing the
4979// client's request for the SearchFaces operation. The "output" return
4980// value will be populated with the request's response once the request completes
4981// successfully.
4982//
4983// Use "Send" method on the returned Request to send the API call to the service.
4984// the "output" return value is not valid until after Send returns without error.
4985//
4986// See SearchFaces for more information on using the SearchFaces
4987// API call, and error handling.
4988//
4989// This method is useful when you want to inject custom logic or configuration
4990// into the SDK's request lifecycle. Such as custom headers, or retry logic.
4991//
4992//
4993//    // Example sending a request using the SearchFacesRequest method.
4994//    req, resp := client.SearchFacesRequest(params)
4995//
4996//    err := req.Send()
4997//    if err == nil { // resp is now filled
4998//        fmt.Println(resp)
4999//    }
5000func (c *Rekognition) SearchFacesRequest(input *SearchFacesInput) (req *request.Request, output *SearchFacesOutput) {
5001	op := &request.Operation{
5002		Name:       opSearchFaces,
5003		HTTPMethod: "POST",
5004		HTTPPath:   "/",
5005	}
5006
5007	if input == nil {
5008		input = &SearchFacesInput{}
5009	}
5010
5011	output = &SearchFacesOutput{}
5012	req = c.newRequest(op, input, output)
5013	return
5014}
5015
5016// SearchFaces API operation for Amazon Rekognition.
5017//
5018// For a given input face ID, searches for matching faces in the collection
5019// the face belongs to. You get a face ID when you add a face to the collection
5020// using the IndexFaces operation. The operation compares the features of the
5021// input face with faces in the specified collection.
5022//
5023// You can also search faces without indexing faces by using the SearchFacesByImage
5024// operation.
5025//
5026// The operation response returns an array of faces that match, ordered by similarity
5027// score with the highest similarity first. More specifically, it is an array
5028// of metadata for each face match that is found. Along with the metadata, the
5029// response also includes a confidence value for each face match, indicating
5030// the confidence that the specific face matches the input face.
5031//
5032// For an example, see Searching for a Face Using Its Face ID in the Amazon
5033// Rekognition Developer Guide.
5034//
5035// This operation requires permissions to perform the rekognition:SearchFaces
5036// action.
5037//
5038// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
5039// with awserr.Error's Code and Message methods to get detailed information about
5040// the error.
5041//
5042// See the AWS API reference guide for Amazon Rekognition's
5043// API operation SearchFaces for usage and error information.
5044//
5045// Returned Error Types:
5046//   * InvalidParameterException
5047//   Input parameter violated a constraint. Validate your parameter before calling
5048//   the API operation again.
5049//
5050//   * AccessDeniedException
5051//   You are not authorized to perform the action.
5052//
5053//   * InternalServerError
5054//   Amazon Rekognition experienced a service issue. Try your call again.
5055//
5056//   * ThrottlingException
5057//   Amazon Rekognition is temporarily unable to process the request. Try your
5058//   call again.
5059//
5060//   * ProvisionedThroughputExceededException
5061//   The number of requests exceeded your throughput limit. If you want to increase
5062//   this limit, contact Amazon Rekognition.
5063//
5064//   * ResourceNotFoundException
5065//   The collection specified in the request cannot be found.
5066//
5067func (c *Rekognition) SearchFaces(input *SearchFacesInput) (*SearchFacesOutput, error) {
5068	req, out := c.SearchFacesRequest(input)
5069	return out, req.Send()
5070}
5071
5072// SearchFacesWithContext is the same as SearchFaces with the addition of
5073// the ability to pass a context and additional request options.
5074//
5075// See SearchFaces for details on how to use this API operation.
5076//
5077// The context must be non-nil and will be used for request cancellation. If
5078// the context is nil a panic will occur. In the future the SDK may create
5079// sub-contexts for http.Requests. See https://golang.org/pkg/context/
5080// for more information on using Contexts.
5081func (c *Rekognition) SearchFacesWithContext(ctx aws.Context, input *SearchFacesInput, opts ...request.Option) (*SearchFacesOutput, error) {
5082	req, out := c.SearchFacesRequest(input)
5083	req.SetContext(ctx)
5084	req.ApplyOptions(opts...)
5085	return out, req.Send()
5086}
5087
5088const opSearchFacesByImage = "SearchFacesByImage"
5089
5090// SearchFacesByImageRequest generates a "aws/request.Request" representing the
5091// client's request for the SearchFacesByImage operation. The "output" return
5092// value will be populated with the request's response once the request completes
5093// successfully.
5094//
5095// Use "Send" method on the returned Request to send the API call to the service.
5096// the "output" return value is not valid until after Send returns without error.
5097//
5098// See SearchFacesByImage for more information on using the SearchFacesByImage
5099// API call, and error handling.
5100//
5101// This method is useful when you want to inject custom logic or configuration
5102// into the SDK's request lifecycle. Such as custom headers, or retry logic.
5103//
5104//
5105//    // Example sending a request using the SearchFacesByImageRequest method.
5106//    req, resp := client.SearchFacesByImageRequest(params)
5107//
5108//    err := req.Send()
5109//    if err == nil { // resp is now filled
5110//        fmt.Println(resp)
5111//    }
5112func (c *Rekognition) SearchFacesByImageRequest(input *SearchFacesByImageInput) (req *request.Request, output *SearchFacesByImageOutput) {
5113	op := &request.Operation{
5114		Name:       opSearchFacesByImage,
5115		HTTPMethod: "POST",
5116		HTTPPath:   "/",
5117	}
5118
5119	if input == nil {
5120		input = &SearchFacesByImageInput{}
5121	}
5122
5123	output = &SearchFacesByImageOutput{}
5124	req = c.newRequest(op, input, output)
5125	return
5126}
5127
5128// SearchFacesByImage API operation for Amazon Rekognition.
5129//
5130// For a given input image, first detects the largest face in the image, and
5131// then searches the specified collection for matching faces. The operation
5132// compares the features of the input face with faces in the specified collection.
5133//
5134// To search for all faces in an input image, you might first call the IndexFaces
5135// operation, and then use the face IDs returned in subsequent calls to the
5136// SearchFaces operation.
5137//
5138// You can also call the DetectFaces operation and use the bounding boxes in
5139// the response to make face crops, which then you can pass in to the SearchFacesByImage
5140// operation.
5141//
5142// You pass the input image either as base64-encoded image bytes or as a reference
5143// to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon
5144// Rekognition operations, passing image bytes is not supported. The image must
5145// be either a PNG or JPEG formatted file.
5146//
5147// The response returns an array of faces that match, ordered by similarity
5148// score with the highest similarity first. More specifically, it is an array
5149// of metadata for each face match found. Along with the metadata, the response
5150// also includes a similarity indicating how similar the face is to the input
5151// face. In the response, the operation also returns the bounding box (and a
5152// confidence level that the bounding box contains a face) of the face that
5153// Amazon Rekognition used for the input image.
5154//
5155// For an example, Searching for a Face Using an Image in the Amazon Rekognition
5156// Developer Guide.
5157//
5158// The QualityFilter input parameter allows you to filter out detected faces
5159// that don’t meet a required quality bar. The quality bar is based on a variety
5160// of common use cases. Use QualityFilter to set the quality bar for filtering
5161// by specifying LOW, MEDIUM, or HIGH. If you do not want to filter detected
5162// faces, specify NONE. The default value is NONE.
5163//
5164// To use quality filtering, you need a collection associated with version 3
5165// of the face model or higher. To get the version of the face model associated
5166// with a collection, call DescribeCollection.
5167//
5168// This operation requires permissions to perform the rekognition:SearchFacesByImage
5169// action.
5170//
5171// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
5172// with awserr.Error's Code and Message methods to get detailed information about
5173// the error.
5174//
5175// See the AWS API reference guide for Amazon Rekognition's
5176// API operation SearchFacesByImage for usage and error information.
5177//
5178// Returned Error Types:
5179//   * InvalidS3ObjectException
5180//   Amazon Rekognition is unable to access the S3 object specified in the request.
5181//
5182//   * InvalidParameterException
5183//   Input parameter violated a constraint. Validate your parameter before calling
5184//   the API operation again.
5185//
5186//   * ImageTooLargeException
5187//   The input image size exceeds the allowed limit. If you are calling DetectProtectiveEquipment,
5188//   the image size or resolution exceeds the allowed limit. For more information,
5189//   see Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
5190//
5191//   * AccessDeniedException
5192//   You are not authorized to perform the action.
5193//
5194//   * InternalServerError
5195//   Amazon Rekognition experienced a service issue. Try your call again.
5196//
5197//   * ThrottlingException
5198//   Amazon Rekognition is temporarily unable to process the request. Try your
5199//   call again.
5200//
5201//   * ProvisionedThroughputExceededException
5202//   The number of requests exceeded your throughput limit. If you want to increase
5203//   this limit, contact Amazon Rekognition.
5204//
5205//   * ResourceNotFoundException
5206//   The collection specified in the request cannot be found.
5207//
5208//   * InvalidImageFormatException
5209//   The provided image format is not supported.
5210//
5211func (c *Rekognition) SearchFacesByImage(input *SearchFacesByImageInput) (*SearchFacesByImageOutput, error) {
5212	req, out := c.SearchFacesByImageRequest(input)
5213	return out, req.Send()
5214}
5215
5216// SearchFacesByImageWithContext is the same as SearchFacesByImage with the addition of
5217// the ability to pass a context and additional request options.
5218//
5219// See SearchFacesByImage for details on how to use this API operation.
5220//
5221// The context must be non-nil and will be used for request cancellation. If
5222// the context is nil a panic will occur. In the future the SDK may create
5223// sub-contexts for http.Requests. See https://golang.org/pkg/context/
5224// for more information on using Contexts.
5225func (c *Rekognition) SearchFacesByImageWithContext(ctx aws.Context, input *SearchFacesByImageInput, opts ...request.Option) (*SearchFacesByImageOutput, error) {
5226	req, out := c.SearchFacesByImageRequest(input)
5227	req.SetContext(ctx)
5228	req.ApplyOptions(opts...)
5229	return out, req.Send()
5230}
5231
5232const opStartCelebrityRecognition = "StartCelebrityRecognition"
5233
5234// StartCelebrityRecognitionRequest generates a "aws/request.Request" representing the
5235// client's request for the StartCelebrityRecognition operation. The "output" return
5236// value will be populated with the request's response once the request completes
5237// successfully.
5238//
5239// Use "Send" method on the returned Request to send the API call to the service.
5240// the "output" return value is not valid until after Send returns without error.
5241//
5242// See StartCelebrityRecognition for more information on using the StartCelebrityRecognition
5243// API call, and error handling.
5244//
5245// This method is useful when you want to inject custom logic or configuration
5246// into the SDK's request lifecycle. Such as custom headers, or retry logic.
5247//
5248//
5249//    // Example sending a request using the StartCelebrityRecognitionRequest method.
5250//    req, resp := client.StartCelebrityRecognitionRequest(params)
5251//
5252//    err := req.Send()
5253//    if err == nil { // resp is now filled
5254//        fmt.Println(resp)
5255//    }
5256func (c *Rekognition) StartCelebrityRecognitionRequest(input *StartCelebrityRecognitionInput) (req *request.Request, output *StartCelebrityRecognitionOutput) {
5257	op := &request.Operation{
5258		Name:       opStartCelebrityRecognition,
5259		HTTPMethod: "POST",
5260		HTTPPath:   "/",
5261	}
5262
5263	if input == nil {
5264		input = &StartCelebrityRecognitionInput{}
5265	}
5266
5267	output = &StartCelebrityRecognitionOutput{}
5268	req = c.newRequest(op, input, output)
5269	return
5270}
5271
5272// StartCelebrityRecognition API operation for Amazon Rekognition.
5273//
5274// Starts asynchronous recognition of celebrities in a stored video.
5275//
5276// Amazon Rekognition Video can detect celebrities in a video must be stored
5277// in an Amazon S3 bucket. Use Video to specify the bucket name and the filename
5278// of the video. StartCelebrityRecognition returns a job identifier (JobId)
5279// which you use to get the results of the analysis. When celebrity recognition
5280// analysis is finished, Amazon Rekognition Video publishes a completion status
5281// to the Amazon Simple Notification Service topic that you specify in NotificationChannel.
5282// To get the results of the celebrity recognition analysis, first check that
5283// the status value published to the Amazon SNS topic is SUCCEEDED. If so, call
5284// GetCelebrityRecognition and pass the job identifier (JobId) from the initial
5285// call to StartCelebrityRecognition.
5286//
5287// For more information, see Recognizing Celebrities in the Amazon Rekognition
5288// Developer Guide.
5289//
5290// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
5291// with awserr.Error's Code and Message methods to get detailed information about
5292// the error.
5293//
5294// See the AWS API reference guide for Amazon Rekognition's
5295// API operation StartCelebrityRecognition for usage and error information.
5296//
5297// Returned Error Types:
5298//   * AccessDeniedException
5299//   You are not authorized to perform the action.
5300//
5301//   * IdempotentParameterMismatchException
5302//   A ClientRequestToken input parameter was reused with an operation, but at
5303//   least one of the other input parameters is different from the previous call
5304//   to the operation.
5305//
5306//   * InvalidParameterException
5307//   Input parameter violated a constraint. Validate your parameter before calling
5308//   the API operation again.
5309//
5310//   * InvalidS3ObjectException
5311//   Amazon Rekognition is unable to access the S3 object specified in the request.
5312//
5313//   * InternalServerError
5314//   Amazon Rekognition experienced a service issue. Try your call again.
5315//
5316//   * VideoTooLargeException
5317//   The file size or duration of the supplied media is too large. The maximum
5318//   file size is 10GB. The maximum duration is 6 hours.
5319//
5320//   * ProvisionedThroughputExceededException
5321//   The number of requests exceeded your throughput limit. If you want to increase
5322//   this limit, contact Amazon Rekognition.
5323//
5324//   * LimitExceededException
5325//   An Amazon Rekognition service limit was exceeded. For example, if you start
5326//   too many Amazon Rekognition Video jobs concurrently, calls to start operations
5327//   (StartLabelDetection, for example) will raise a LimitExceededException exception
5328//   (HTTP status code: 400) until the number of concurrently running jobs is
5329//   below the Amazon Rekognition service limit.
5330//
5331//   * ThrottlingException
5332//   Amazon Rekognition is temporarily unable to process the request. Try your
5333//   call again.
5334//
5335func (c *Rekognition) StartCelebrityRecognition(input *StartCelebrityRecognitionInput) (*StartCelebrityRecognitionOutput, error) {
5336	req, out := c.StartCelebrityRecognitionRequest(input)
5337	return out, req.Send()
5338}
5339
5340// StartCelebrityRecognitionWithContext is the same as StartCelebrityRecognition with the addition of
5341// the ability to pass a context and additional request options.
5342//
5343// See StartCelebrityRecognition for details on how to use this API operation.
5344//
5345// The context must be non-nil and will be used for request cancellation. If
5346// the context is nil a panic will occur. In the future the SDK may create
5347// sub-contexts for http.Requests. See https://golang.org/pkg/context/
5348// for more information on using Contexts.
5349func (c *Rekognition) StartCelebrityRecognitionWithContext(ctx aws.Context, input *StartCelebrityRecognitionInput, opts ...request.Option) (*StartCelebrityRecognitionOutput, error) {
5350	req, out := c.StartCelebrityRecognitionRequest(input)
5351	req.SetContext(ctx)
5352	req.ApplyOptions(opts...)
5353	return out, req.Send()
5354}
5355
5356const opStartContentModeration = "StartContentModeration"
5357
5358// StartContentModerationRequest generates a "aws/request.Request" representing the
5359// client's request for the StartContentModeration operation. The "output" return
5360// value will be populated with the request's response once the request completes
5361// successfully.
5362//
5363// Use "Send" method on the returned Request to send the API call to the service.
5364// the "output" return value is not valid until after Send returns without error.
5365//
5366// See StartContentModeration for more information on using the StartContentModeration
5367// API call, and error handling.
5368//
5369// This method is useful when you want to inject custom logic or configuration
5370// into the SDK's request lifecycle. Such as custom headers, or retry logic.
5371//
5372//
5373//    // Example sending a request using the StartContentModerationRequest method.
5374//    req, resp := client.StartContentModerationRequest(params)
5375//
5376//    err := req.Send()
5377//    if err == nil { // resp is now filled
5378//        fmt.Println(resp)
5379//    }
5380func (c *Rekognition) StartContentModerationRequest(input *StartContentModerationInput) (req *request.Request, output *StartContentModerationOutput) {
5381	op := &request.Operation{
5382		Name:       opStartContentModeration,
5383		HTTPMethod: "POST",
5384		HTTPPath:   "/",
5385	}
5386
5387	if input == nil {
5388		input = &StartContentModerationInput{}
5389	}
5390
5391	output = &StartContentModerationOutput{}
5392	req = c.newRequest(op, input, output)
5393	return
5394}
5395
5396// StartContentModeration API operation for Amazon Rekognition.
5397//
5398// Starts asynchronous detection of unsafe content in a stored video.
5399//
5400// Amazon Rekognition Video can moderate content in a video stored in an Amazon
5401// S3 bucket. Use Video to specify the bucket name and the filename of the video.
5402// StartContentModeration returns a job identifier (JobId) which you use to
5403// get the results of the analysis. When unsafe content analysis is finished,
5404// Amazon Rekognition Video publishes a completion status to the Amazon Simple
5405// Notification Service topic that you specify in NotificationChannel.
5406//
5407// To get the results of the unsafe content analysis, first check that the status
5408// value published to the Amazon SNS topic is SUCCEEDED. If so, call GetContentModeration
5409// and pass the job identifier (JobId) from the initial call to StartContentModeration.
5410//
5411// For more information, see Detecting Unsafe Content in the Amazon Rekognition
5412// Developer Guide.
5413//
5414// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
5415// with awserr.Error's Code and Message methods to get detailed information about
5416// the error.
5417//
5418// See the AWS API reference guide for Amazon Rekognition's
5419// API operation StartContentModeration for usage and error information.
5420//
5421// Returned Error Types:
5422//   * AccessDeniedException
5423//   You are not authorized to perform the action.
5424//
5425//   * IdempotentParameterMismatchException
5426//   A ClientRequestToken input parameter was reused with an operation, but at
5427//   least one of the other input parameters is different from the previous call
5428//   to the operation.
5429//
5430//   * InvalidParameterException
5431//   Input parameter violated a constraint. Validate your parameter before calling
5432//   the API operation again.
5433//
5434//   * InvalidS3ObjectException
5435//   Amazon Rekognition is unable to access the S3 object specified in the request.
5436//
5437//   * InternalServerError
5438//   Amazon Rekognition experienced a service issue. Try your call again.
5439//
5440//   * VideoTooLargeException
5441//   The file size or duration of the supplied media is too large. The maximum
5442//   file size is 10GB. The maximum duration is 6 hours.
5443//
5444//   * ProvisionedThroughputExceededException
5445//   The number of requests exceeded your throughput limit. If you want to increase
5446//   this limit, contact Amazon Rekognition.
5447//
5448//   * LimitExceededException
5449//   An Amazon Rekognition service limit was exceeded. For example, if you start
5450//   too many Amazon Rekognition Video jobs concurrently, calls to start operations
5451//   (StartLabelDetection, for example) will raise a LimitExceededException exception
5452//   (HTTP status code: 400) until the number of concurrently running jobs is
5453//   below the Amazon Rekognition service limit.
5454//
5455//   * ThrottlingException
5456//   Amazon Rekognition is temporarily unable to process the request. Try your
5457//   call again.
5458//
5459func (c *Rekognition) StartContentModeration(input *StartContentModerationInput) (*StartContentModerationOutput, error) {
5460	req, out := c.StartContentModerationRequest(input)
5461	return out, req.Send()
5462}
5463
5464// StartContentModerationWithContext is the same as StartContentModeration with the addition of
5465// the ability to pass a context and additional request options.
5466//
5467// See StartContentModeration for details on how to use this API operation.
5468//
5469// The context must be non-nil and will be used for request cancellation. If
5470// the context is nil a panic will occur. In the future the SDK may create
5471// sub-contexts for http.Requests. See https://golang.org/pkg/context/
5472// for more information on using Contexts.
5473func (c *Rekognition) StartContentModerationWithContext(ctx aws.Context, input *StartContentModerationInput, opts ...request.Option) (*StartContentModerationOutput, error) {
5474	req, out := c.StartContentModerationRequest(input)
5475	req.SetContext(ctx)
5476	req.ApplyOptions(opts...)
5477	return out, req.Send()
5478}
5479
5480const opStartFaceDetection = "StartFaceDetection"
5481
5482// StartFaceDetectionRequest generates a "aws/request.Request" representing the
5483// client's request for the StartFaceDetection operation. The "output" return
5484// value will be populated with the request's response once the request completes
5485// successfully.
5486//
5487// Use "Send" method on the returned Request to send the API call to the service.
5488// the "output" return value is not valid until after Send returns without error.
5489//
5490// See StartFaceDetection for more information on using the StartFaceDetection
5491// API call, and error handling.
5492//
5493// This method is useful when you want to inject custom logic or configuration
5494// into the SDK's request lifecycle. Such as custom headers, or retry logic.
5495//
5496//
5497//    // Example sending a request using the StartFaceDetectionRequest method.
5498//    req, resp := client.StartFaceDetectionRequest(params)
5499//
5500//    err := req.Send()
5501//    if err == nil { // resp is now filled
5502//        fmt.Println(resp)
5503//    }
5504func (c *Rekognition) StartFaceDetectionRequest(input *StartFaceDetectionInput) (req *request.Request, output *StartFaceDetectionOutput) {
5505	op := &request.Operation{
5506		Name:       opStartFaceDetection,
5507		HTTPMethod: "POST",
5508		HTTPPath:   "/",
5509	}
5510
5511	if input == nil {
5512		input = &StartFaceDetectionInput{}
5513	}
5514
5515	output = &StartFaceDetectionOutput{}
5516	req = c.newRequest(op, input, output)
5517	return
5518}
5519
5520// StartFaceDetection API operation for Amazon Rekognition.
5521//
5522// Starts asynchronous detection of faces in a stored video.
5523//
5524// Amazon Rekognition Video can detect faces in a video stored in an Amazon
5525// S3 bucket. Use Video to specify the bucket name and the filename of the video.
5526// StartFaceDetection returns a job identifier (JobId) that you use to get the
5527// results of the operation. When face detection is finished, Amazon Rekognition
5528// Video publishes a completion status to the Amazon Simple Notification Service
5529// topic that you specify in NotificationChannel. To get the results of the
5530// face detection operation, first check that the status value published to
5531// the Amazon SNS topic is SUCCEEDED. If so, call GetFaceDetection and pass
5532// the job identifier (JobId) from the initial call to StartFaceDetection.
5533//
5534// For more information, see Detecting Faces in a Stored Video in the Amazon
5535// Rekognition Developer Guide.
5536//
5537// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
5538// with awserr.Error's Code and Message methods to get detailed information about
5539// the error.
5540//
5541// See the AWS API reference guide for Amazon Rekognition's
5542// API operation StartFaceDetection for usage and error information.
5543//
5544// Returned Error Types:
5545//   * AccessDeniedException
5546//   You are not authorized to perform the action.
5547//
5548//   * IdempotentParameterMismatchException
5549//   A ClientRequestToken input parameter was reused with an operation, but at
5550//   least one of the other input parameters is different from the previous call
5551//   to the operation.
5552//
5553//   * InvalidParameterException
5554//   Input parameter violated a constraint. Validate your parameter before calling
5555//   the API operation again.
5556//
5557//   * InvalidS3ObjectException
5558//   Amazon Rekognition is unable to access the S3 object specified in the request.
5559//
5560//   * InternalServerError
5561//   Amazon Rekognition experienced a service issue. Try your call again.
5562//
5563//   * VideoTooLargeException
5564//   The file size or duration of the supplied media is too large. The maximum
5565//   file size is 10GB. The maximum duration is 6 hours.
5566//
5567//   * ProvisionedThroughputExceededException
5568//   The number of requests exceeded your throughput limit. If you want to increase
5569//   this limit, contact Amazon Rekognition.
5570//
5571//   * LimitExceededException
5572//   An Amazon Rekognition service limit was exceeded. For example, if you start
5573//   too many Amazon Rekognition Video jobs concurrently, calls to start operations
5574//   (StartLabelDetection, for example) will raise a LimitExceededException exception
5575//   (HTTP status code: 400) until the number of concurrently running jobs is
5576//   below the Amazon Rekognition service limit.
5577//
5578//   * ThrottlingException
5579//   Amazon Rekognition is temporarily unable to process the request. Try your
5580//   call again.
5581//
5582func (c *Rekognition) StartFaceDetection(input *StartFaceDetectionInput) (*StartFaceDetectionOutput, error) {
5583	req, out := c.StartFaceDetectionRequest(input)
5584	return out, req.Send()
5585}
5586
5587// StartFaceDetectionWithContext is the same as StartFaceDetection with the addition of
5588// the ability to pass a context and additional request options.
5589//
5590// See StartFaceDetection for details on how to use this API operation.
5591//
5592// The context must be non-nil and will be used for request cancellation. If
5593// the context is nil a panic will occur. In the future the SDK may create
5594// sub-contexts for http.Requests. See https://golang.org/pkg/context/
5595// for more information on using Contexts.
5596func (c *Rekognition) StartFaceDetectionWithContext(ctx aws.Context, input *StartFaceDetectionInput, opts ...request.Option) (*StartFaceDetectionOutput, error) {
5597	req, out := c.StartFaceDetectionRequest(input)
5598	req.SetContext(ctx)
5599	req.ApplyOptions(opts...)
5600	return out, req.Send()
5601}
5602
5603const opStartFaceSearch = "StartFaceSearch"
5604
5605// StartFaceSearchRequest generates a "aws/request.Request" representing the
5606// client's request for the StartFaceSearch operation. The "output" return
5607// value will be populated with the request's response once the request completes
5608// successfully.
5609//
5610// Use "Send" method on the returned Request to send the API call to the service.
5611// the "output" return value is not valid until after Send returns without error.
5612//
5613// See StartFaceSearch for more information on using the StartFaceSearch
5614// API call, and error handling.
5615//
5616// This method is useful when you want to inject custom logic or configuration
5617// into the SDK's request lifecycle. Such as custom headers, or retry logic.
5618//
5619//
5620//    // Example sending a request using the StartFaceSearchRequest method.
5621//    req, resp := client.StartFaceSearchRequest(params)
5622//
5623//    err := req.Send()
5624//    if err == nil { // resp is now filled
5625//        fmt.Println(resp)
5626//    }
5627func (c *Rekognition) StartFaceSearchRequest(input *StartFaceSearchInput) (req *request.Request, output *StartFaceSearchOutput) {
5628	op := &request.Operation{
5629		Name:       opStartFaceSearch,
5630		HTTPMethod: "POST",
5631		HTTPPath:   "/",
5632	}
5633
5634	if input == nil {
5635		input = &StartFaceSearchInput{}
5636	}
5637
5638	output = &StartFaceSearchOutput{}
5639	req = c.newRequest(op, input, output)
5640	return
5641}
5642
5643// StartFaceSearch API operation for Amazon Rekognition.
5644//
5645// Starts the asynchronous search for faces in a collection that match the faces
5646// of persons detected in a stored video.
5647//
5648// The video must be stored in an Amazon S3 bucket. Use Video to specify the
5649// bucket name and the filename of the video. StartFaceSearch returns a job
5650// identifier (JobId) which you use to get the search results once the search
5651// has completed. When searching is finished, Amazon Rekognition Video publishes
5652// a completion status to the Amazon Simple Notification Service topic that
5653// you specify in NotificationChannel. To get the search results, first check
5654// that the status value published to the Amazon SNS topic is SUCCEEDED. If
5655// so, call GetFaceSearch and pass the job identifier (JobId) from the initial
5656// call to StartFaceSearch. For more information, see procedure-person-search-videos.
5657//
5658// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
5659// with awserr.Error's Code and Message methods to get detailed information about
5660// the error.
5661//
5662// See the AWS API reference guide for Amazon Rekognition's
5663// API operation StartFaceSearch for usage and error information.
5664//
5665// Returned Error Types:
5666//   * AccessDeniedException
5667//   You are not authorized to perform the action.
5668//
5669//   * IdempotentParameterMismatchException
5670//   A ClientRequestToken input parameter was reused with an operation, but at
5671//   least one of the other input parameters is different from the previous call
5672//   to the operation.
5673//
5674//   * InvalidParameterException
5675//   Input parameter violated a constraint. Validate your parameter before calling
5676//   the API operation again.
5677//
5678//   * InvalidS3ObjectException
5679//   Amazon Rekognition is unable to access the S3 object specified in the request.
5680//
5681//   * InternalServerError
5682//   Amazon Rekognition experienced a service issue. Try your call again.
5683//
5684//   * VideoTooLargeException
5685//   The file size or duration of the supplied media is too large. The maximum
5686//   file size is 10GB. The maximum duration is 6 hours.
5687//
5688//   * ProvisionedThroughputExceededException
5689//   The number of requests exceeded your throughput limit. If you want to increase
5690//   this limit, contact Amazon Rekognition.
5691//
5692//   * LimitExceededException
5693//   An Amazon Rekognition service limit was exceeded. For example, if you start
5694//   too many Amazon Rekognition Video jobs concurrently, calls to start operations
5695//   (StartLabelDetection, for example) will raise a LimitExceededException exception
5696//   (HTTP status code: 400) until the number of concurrently running jobs is
5697//   below the Amazon Rekognition service limit.
5698//
5699//   * ResourceNotFoundException
5700//   The collection specified in the request cannot be found.
5701//
5702//   * ThrottlingException
5703//   Amazon Rekognition is temporarily unable to process the request. Try your
5704//   call again.
5705//
5706func (c *Rekognition) StartFaceSearch(input *StartFaceSearchInput) (*StartFaceSearchOutput, error) {
5707	req, out := c.StartFaceSearchRequest(input)
5708	return out, req.Send()
5709}
5710
5711// StartFaceSearchWithContext is the same as StartFaceSearch with the addition of
5712// the ability to pass a context and additional request options.
5713//
5714// See StartFaceSearch for details on how to use this API operation.
5715//
5716// The context must be non-nil and will be used for request cancellation. If
5717// the context is nil a panic will occur. In the future the SDK may create
5718// sub-contexts for http.Requests. See https://golang.org/pkg/context/
5719// for more information on using Contexts.
5720func (c *Rekognition) StartFaceSearchWithContext(ctx aws.Context, input *StartFaceSearchInput, opts ...request.Option) (*StartFaceSearchOutput, error) {
5721	req, out := c.StartFaceSearchRequest(input)
5722	req.SetContext(ctx)
5723	req.ApplyOptions(opts...)
5724	return out, req.Send()
5725}
5726
5727const opStartLabelDetection = "StartLabelDetection"
5728
5729// StartLabelDetectionRequest generates a "aws/request.Request" representing the
5730// client's request for the StartLabelDetection operation. The "output" return
5731// value will be populated with the request's response once the request completes
5732// successfully.
5733//
5734// Use "Send" method on the returned Request to send the API call to the service.
5735// the "output" return value is not valid until after Send returns without error.
5736//
5737// See StartLabelDetection for more information on using the StartLabelDetection
5738// API call, and error handling.
5739//
5740// This method is useful when you want to inject custom logic or configuration
5741// into the SDK's request lifecycle. Such as custom headers, or retry logic.
5742//
5743//
5744//    // Example sending a request using the StartLabelDetectionRequest method.
5745//    req, resp := client.StartLabelDetectionRequest(params)
5746//
5747//    err := req.Send()
5748//    if err == nil { // resp is now filled
5749//        fmt.Println(resp)
5750//    }
5751func (c *Rekognition) StartLabelDetectionRequest(input *StartLabelDetectionInput) (req *request.Request, output *StartLabelDetectionOutput) {
5752	op := &request.Operation{
5753		Name:       opStartLabelDetection,
5754		HTTPMethod: "POST",
5755		HTTPPath:   "/",
5756	}
5757
5758	if input == nil {
5759		input = &StartLabelDetectionInput{}
5760	}
5761
5762	output = &StartLabelDetectionOutput{}
5763	req = c.newRequest(op, input, output)
5764	return
5765}
5766
5767// StartLabelDetection API operation for Amazon Rekognition.
5768//
5769// Starts asynchronous detection of labels in a stored video.
5770//
5771// Amazon Rekognition Video can detect labels in a video. Labels are instances
5772// of real-world entities. This includes objects like flower, tree, and table;
5773// events like wedding, graduation, and birthday party; concepts like landscape,
5774// evening, and nature; and activities like a person getting out of a car or
5775// a person skiing.
5776//
5777// The video must be stored in an Amazon S3 bucket. Use Video to specify the
5778// bucket name and the filename of the video. StartLabelDetection returns a
5779// job identifier (JobId) which you use to get the results of the operation.
5780// When label detection is finished, Amazon Rekognition Video publishes a completion
5781// status to the Amazon Simple Notification Service topic that you specify in
5782// NotificationChannel.
5783//
5784// To get the results of the label detection operation, first check that the
5785// status value published to the Amazon SNS topic is SUCCEEDED. If so, call
5786// GetLabelDetection and pass the job identifier (JobId) from the initial call
5787// to StartLabelDetection.
5788//
5789// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
5790// with awserr.Error's Code and Message methods to get detailed information about
5791// the error.
5792//
5793// See the AWS API reference guide for Amazon Rekognition's
5794// API operation StartLabelDetection for usage and error information.
5795//
5796// Returned Error Types:
5797//   * AccessDeniedException
5798//   You are not authorized to perform the action.
5799//
5800//   * IdempotentParameterMismatchException
5801//   A ClientRequestToken input parameter was reused with an operation, but at
5802//   least one of the other input parameters is different from the previous call
5803//   to the operation.
5804//
5805//   * InvalidParameterException
5806//   Input parameter violated a constraint. Validate your parameter before calling
5807//   the API operation again.
5808//
5809//   * InvalidS3ObjectException
5810//   Amazon Rekognition is unable to access the S3 object specified in the request.
5811//
5812//   * InternalServerError
5813//   Amazon Rekognition experienced a service issue. Try your call again.
5814//
5815//   * VideoTooLargeException
5816//   The file size or duration of the supplied media is too large. The maximum
5817//   file size is 10GB. The maximum duration is 6 hours.
5818//
5819//   * ProvisionedThroughputExceededException
5820//   The number of requests exceeded your throughput limit. If you want to increase
5821//   this limit, contact Amazon Rekognition.
5822//
5823//   * LimitExceededException
5824//   An Amazon Rekognition service limit was exceeded. For example, if you start
5825//   too many Amazon Rekognition Video jobs concurrently, calls to start operations
5826//   (StartLabelDetection, for example) will raise a LimitExceededException exception
5827//   (HTTP status code: 400) until the number of concurrently running jobs is
5828//   below the Amazon Rekognition service limit.
5829//
5830//   * ThrottlingException
5831//   Amazon Rekognition is temporarily unable to process the request. Try your
5832//   call again.
5833//
5834func (c *Rekognition) StartLabelDetection(input *StartLabelDetectionInput) (*StartLabelDetectionOutput, error) {
5835	req, out := c.StartLabelDetectionRequest(input)
5836	return out, req.Send()
5837}
5838
5839// StartLabelDetectionWithContext is the same as StartLabelDetection with the addition of
5840// the ability to pass a context and additional request options.
5841//
5842// See StartLabelDetection for details on how to use this API operation.
5843//
5844// The context must be non-nil and will be used for request cancellation. If
5845// the context is nil a panic will occur. In the future the SDK may create
5846// sub-contexts for http.Requests. See https://golang.org/pkg/context/
5847// for more information on using Contexts.
5848func (c *Rekognition) StartLabelDetectionWithContext(ctx aws.Context, input *StartLabelDetectionInput, opts ...request.Option) (*StartLabelDetectionOutput, error) {
5849	req, out := c.StartLabelDetectionRequest(input)
5850	req.SetContext(ctx)
5851	req.ApplyOptions(opts...)
5852	return out, req.Send()
5853}
5854
5855const opStartPersonTracking = "StartPersonTracking"
5856
5857// StartPersonTrackingRequest generates a "aws/request.Request" representing the
5858// client's request for the StartPersonTracking operation. The "output" return
5859// value will be populated with the request's response once the request completes
5860// successfully.
5861//
5862// Use "Send" method on the returned Request to send the API call to the service.
5863// the "output" return value is not valid until after Send returns without error.
5864//
5865// See StartPersonTracking for more information on using the StartPersonTracking
5866// API call, and error handling.
5867//
5868// This method is useful when you want to inject custom logic or configuration
5869// into the SDK's request lifecycle. Such as custom headers, or retry logic.
5870//
5871//
5872//    // Example sending a request using the StartPersonTrackingRequest method.
5873//    req, resp := client.StartPersonTrackingRequest(params)
5874//
5875//    err := req.Send()
5876//    if err == nil { // resp is now filled
5877//        fmt.Println(resp)
5878//    }
5879func (c *Rekognition) StartPersonTrackingRequest(input *StartPersonTrackingInput) (req *request.Request, output *StartPersonTrackingOutput) {
5880	op := &request.Operation{
5881		Name:       opStartPersonTracking,
5882		HTTPMethod: "POST",
5883		HTTPPath:   "/",
5884	}
5885
5886	if input == nil {
5887		input = &StartPersonTrackingInput{}
5888	}
5889
5890	output = &StartPersonTrackingOutput{}
5891	req = c.newRequest(op, input, output)
5892	return
5893}
5894
5895// StartPersonTracking API operation for Amazon Rekognition.
5896//
5897// Starts the asynchronous tracking of a person's path in a stored video.
5898//
5899// Amazon Rekognition Video can track the path of people in a video stored in
5900// an Amazon S3 bucket. Use Video to specify the bucket name and the filename
5901// of the video. StartPersonTracking returns a job identifier (JobId) which
5902// you use to get the results of the operation. When label detection is finished,
5903// Amazon Rekognition publishes a completion status to the Amazon Simple Notification
5904// Service topic that you specify in NotificationChannel.
5905//
5906// To get the results of the person detection operation, first check that the
5907// status value published to the Amazon SNS topic is SUCCEEDED. If so, call
5908// GetPersonTracking and pass the job identifier (JobId) from the initial call
5909// to StartPersonTracking.
5910//
5911// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
5912// with awserr.Error's Code and Message methods to get detailed information about
5913// the error.
5914//
5915// See the AWS API reference guide for Amazon Rekognition's
5916// API operation StartPersonTracking for usage and error information.
5917//
5918// Returned Error Types:
5919//   * AccessDeniedException
5920//   You are not authorized to perform the action.
5921//
5922//   * IdempotentParameterMismatchException
5923//   A ClientRequestToken input parameter was reused with an operation, but at
5924//   least one of the other input parameters is different from the previous call
5925//   to the operation.
5926//
5927//   * InvalidParameterException
5928//   Input parameter violated a constraint. Validate your parameter before calling
5929//   the API operation again.
5930//
5931//   * InvalidS3ObjectException
5932//   Amazon Rekognition is unable to access the S3 object specified in the request.
5933//
5934//   * InternalServerError
5935//   Amazon Rekognition experienced a service issue. Try your call again.
5936//
5937//   * VideoTooLargeException
5938//   The file size or duration of the supplied media is too large. The maximum
5939//   file size is 10GB. The maximum duration is 6 hours.
5940//
5941//   * ProvisionedThroughputExceededException
5942//   The number of requests exceeded your throughput limit. If you want to increase
5943//   this limit, contact Amazon Rekognition.
5944//
5945//   * LimitExceededException
5946//   An Amazon Rekognition service limit was exceeded. For example, if you start
5947//   too many Amazon Rekognition Video jobs concurrently, calls to start operations
5948//   (StartLabelDetection, for example) will raise a LimitExceededException exception
5949//   (HTTP status code: 400) until the number of concurrently running jobs is
5950//   below the Amazon Rekognition service limit.
5951//
5952//   * ThrottlingException
5953//   Amazon Rekognition is temporarily unable to process the request. Try your
5954//   call again.
5955//
5956func (c *Rekognition) StartPersonTracking(input *StartPersonTrackingInput) (*StartPersonTrackingOutput, error) {
5957	req, out := c.StartPersonTrackingRequest(input)
5958	return out, req.Send()
5959}
5960
5961// StartPersonTrackingWithContext is the same as StartPersonTracking with the addition of
5962// the ability to pass a context and additional request options.
5963//
5964// See StartPersonTracking for details on how to use this API operation.
5965//
5966// The context must be non-nil and will be used for request cancellation. If
5967// the context is nil a panic will occur. In the future the SDK may create
5968// sub-contexts for http.Requests. See https://golang.org/pkg/context/
5969// for more information on using Contexts.
5970func (c *Rekognition) StartPersonTrackingWithContext(ctx aws.Context, input *StartPersonTrackingInput, opts ...request.Option) (*StartPersonTrackingOutput, error) {
5971	req, out := c.StartPersonTrackingRequest(input)
5972	req.SetContext(ctx)
5973	req.ApplyOptions(opts...)
5974	return out, req.Send()
5975}
5976
5977const opStartProjectVersion = "StartProjectVersion"
5978
5979// StartProjectVersionRequest generates a "aws/request.Request" representing the
5980// client's request for the StartProjectVersion operation. The "output" return
5981// value will be populated with the request's response once the request completes
5982// successfully.
5983//
5984// Use "Send" method on the returned Request to send the API call to the service.
5985// the "output" return value is not valid until after Send returns without error.
5986//
5987// See StartProjectVersion for more information on using the StartProjectVersion
5988// API call, and error handling.
5989//
5990// This method is useful when you want to inject custom logic or configuration
5991// into the SDK's request lifecycle. Such as custom headers, or retry logic.
5992//
5993//
5994//    // Example sending a request using the StartProjectVersionRequest method.
5995//    req, resp := client.StartProjectVersionRequest(params)
5996//
5997//    err := req.Send()
5998//    if err == nil { // resp is now filled
5999//        fmt.Println(resp)
6000//    }
6001func (c *Rekognition) StartProjectVersionRequest(input *StartProjectVersionInput) (req *request.Request, output *StartProjectVersionOutput) {
6002	op := &request.Operation{
6003		Name:       opStartProjectVersion,
6004		HTTPMethod: "POST",
6005		HTTPPath:   "/",
6006	}
6007
6008	if input == nil {
6009		input = &StartProjectVersionInput{}
6010	}
6011
6012	output = &StartProjectVersionOutput{}
6013	req = c.newRequest(op, input, output)
6014	return
6015}
6016
6017// StartProjectVersion API operation for Amazon Rekognition.
6018//
6019// Starts the running of the version of a model. Starting a model takes a while
6020// to complete. To check the current state of the model, use DescribeProjectVersions.
6021//
6022// Once the model is running, you can detect custom labels in new images by
6023// calling DetectCustomLabels.
6024//
6025// You are charged for the amount of time that the model is running. To stop
6026// a running model, call StopProjectVersion.
6027//
6028// This operation requires permissions to perform the rekognition:StartProjectVersion
6029// action.
6030//
6031// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
6032// with awserr.Error's Code and Message methods to get detailed information about
6033// the error.
6034//
6035// See the AWS API reference guide for Amazon Rekognition's
6036// API operation StartProjectVersion for usage and error information.
6037//
6038// Returned Error Types:
6039//   * ResourceNotFoundException
6040//   The collection specified in the request cannot be found.
6041//
6042//   * ResourceInUseException
6043//   The specified resource is already being used.
6044//
6045//   * LimitExceededException
6046//   An Amazon Rekognition service limit was exceeded. For example, if you start
6047//   too many Amazon Rekognition Video jobs concurrently, calls to start operations
6048//   (StartLabelDetection, for example) will raise a LimitExceededException exception
6049//   (HTTP status code: 400) until the number of concurrently running jobs is
6050//   below the Amazon Rekognition service limit.
6051//
6052//   * InvalidParameterException
6053//   Input parameter violated a constraint. Validate your parameter before calling
6054//   the API operation again.
6055//
6056//   * AccessDeniedException
6057//   You are not authorized to perform the action.
6058//
6059//   * InternalServerError
6060//   Amazon Rekognition experienced a service issue. Try your call again.
6061//
6062//   * ThrottlingException
6063//   Amazon Rekognition is temporarily unable to process the request. Try your
6064//   call again.
6065//
6066//   * ProvisionedThroughputExceededException
6067//   The number of requests exceeded your throughput limit. If you want to increase
6068//   this limit, contact Amazon Rekognition.
6069//
6070func (c *Rekognition) StartProjectVersion(input *StartProjectVersionInput) (*StartProjectVersionOutput, error) {
6071	req, out := c.StartProjectVersionRequest(input)
6072	return out, req.Send()
6073}
6074
6075// StartProjectVersionWithContext is the same as StartProjectVersion with the addition of
6076// the ability to pass a context and additional request options.
6077//
6078// See StartProjectVersion for details on how to use this API operation.
6079//
6080// The context must be non-nil and will be used for request cancellation. If
6081// the context is nil a panic will occur. In the future the SDK may create
6082// sub-contexts for http.Requests. See https://golang.org/pkg/context/
6083// for more information on using Contexts.
6084func (c *Rekognition) StartProjectVersionWithContext(ctx aws.Context, input *StartProjectVersionInput, opts ...request.Option) (*StartProjectVersionOutput, error) {
6085	req, out := c.StartProjectVersionRequest(input)
6086	req.SetContext(ctx)
6087	req.ApplyOptions(opts...)
6088	return out, req.Send()
6089}
6090
6091const opStartSegmentDetection = "StartSegmentDetection"
6092
6093// StartSegmentDetectionRequest generates a "aws/request.Request" representing the
6094// client's request for the StartSegmentDetection operation. The "output" return
6095// value will be populated with the request's response once the request completes
6096// successfully.
6097//
6098// Use "Send" method on the returned Request to send the API call to the service.
6099// the "output" return value is not valid until after Send returns without error.
6100//
6101// See StartSegmentDetection for more information on using the StartSegmentDetection
6102// API call, and error handling.
6103//
6104// This method is useful when you want to inject custom logic or configuration
6105// into the SDK's request lifecycle. Such as custom headers, or retry logic.
6106//
6107//
6108//    // Example sending a request using the StartSegmentDetectionRequest method.
6109//    req, resp := client.StartSegmentDetectionRequest(params)
6110//
6111//    err := req.Send()
6112//    if err == nil { // resp is now filled
6113//        fmt.Println(resp)
6114//    }
6115func (c *Rekognition) StartSegmentDetectionRequest(input *StartSegmentDetectionInput) (req *request.Request, output *StartSegmentDetectionOutput) {
6116	op := &request.Operation{
6117		Name:       opStartSegmentDetection,
6118		HTTPMethod: "POST",
6119		HTTPPath:   "/",
6120	}
6121
6122	if input == nil {
6123		input = &StartSegmentDetectionInput{}
6124	}
6125
6126	output = &StartSegmentDetectionOutput{}
6127	req = c.newRequest(op, input, output)
6128	return
6129}
6130
6131// StartSegmentDetection API operation for Amazon Rekognition.
6132//
6133// Starts asynchronous detection of segment detection in a stored video.
6134//
6135// Amazon Rekognition Video can detect segments in a video stored in an Amazon
6136// S3 bucket. Use Video to specify the bucket name and the filename of the video.
6137// StartSegmentDetection returns a job identifier (JobId) which you use to get
6138// the results of the operation. When segment detection is finished, Amazon
6139// Rekognition Video publishes a completion status to the Amazon Simple Notification
6140// Service topic that you specify in NotificationChannel.
6141//
6142// You can use the Filters (StartSegmentDetectionFilters) input parameter to
6143// specify the minimum detection confidence returned in the response. Within
6144// Filters, use ShotFilter (StartShotDetectionFilter) to filter detected shots.
6145// Use TechnicalCueFilter (StartTechnicalCueDetectionFilter) to filter technical
6146// cues.
6147//
6148// To get the results of the segment detection operation, first check that the
6149// status value published to the Amazon SNS topic is SUCCEEDED. if so, call
6150// GetSegmentDetection and pass the job identifier (JobId) from the initial
6151// call to StartSegmentDetection.
6152//
6153// For more information, see Detecting Video Segments in Stored Video in the
6154// Amazon Rekognition Developer Guide.
6155//
6156// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
6157// with awserr.Error's Code and Message methods to get detailed information about
6158// the error.
6159//
6160// See the AWS API reference guide for Amazon Rekognition's
6161// API operation StartSegmentDetection for usage and error information.
6162//
6163// Returned Error Types:
6164//   * AccessDeniedException
6165//   You are not authorized to perform the action.
6166//
6167//   * IdempotentParameterMismatchException
6168//   A ClientRequestToken input parameter was reused with an operation, but at
6169//   least one of the other input parameters is different from the previous call
6170//   to the operation.
6171//
6172//   * InvalidParameterException
6173//   Input parameter violated a constraint. Validate your parameter before calling
6174//   the API operation again.
6175//
6176//   * InvalidS3ObjectException
6177//   Amazon Rekognition is unable to access the S3 object specified in the request.
6178//
6179//   * InternalServerError
6180//   Amazon Rekognition experienced a service issue. Try your call again.
6181//
6182//   * VideoTooLargeException
6183//   The file size or duration of the supplied media is too large. The maximum
6184//   file size is 10GB. The maximum duration is 6 hours.
6185//
6186//   * ProvisionedThroughputExceededException
6187//   The number of requests exceeded your throughput limit. If you want to increase
6188//   this limit, contact Amazon Rekognition.
6189//
6190//   * LimitExceededException
6191//   An Amazon Rekognition service limit was exceeded. For example, if you start
6192//   too many Amazon Rekognition Video jobs concurrently, calls to start operations
6193//   (StartLabelDetection, for example) will raise a LimitExceededException exception
6194//   (HTTP status code: 400) until the number of concurrently running jobs is
6195//   below the Amazon Rekognition service limit.
6196//
6197//   * ThrottlingException
6198//   Amazon Rekognition is temporarily unable to process the request. Try your
6199//   call again.
6200//
6201func (c *Rekognition) StartSegmentDetection(input *StartSegmentDetectionInput) (*StartSegmentDetectionOutput, error) {
6202	req, out := c.StartSegmentDetectionRequest(input)
6203	return out, req.Send()
6204}
6205
6206// StartSegmentDetectionWithContext is the same as StartSegmentDetection with the addition of
6207// the ability to pass a context and additional request options.
6208//
6209// See StartSegmentDetection for details on how to use this API operation.
6210//
6211// The context must be non-nil and will be used for request cancellation. If
6212// the context is nil a panic will occur. In the future the SDK may create
6213// sub-contexts for http.Requests. See https://golang.org/pkg/context/
6214// for more information on using Contexts.
6215func (c *Rekognition) StartSegmentDetectionWithContext(ctx aws.Context, input *StartSegmentDetectionInput, opts ...request.Option) (*StartSegmentDetectionOutput, error) {
6216	req, out := c.StartSegmentDetectionRequest(input)
6217	req.SetContext(ctx)
6218	req.ApplyOptions(opts...)
6219	return out, req.Send()
6220}
6221
6222const opStartStreamProcessor = "StartStreamProcessor"
6223
6224// StartStreamProcessorRequest generates a "aws/request.Request" representing the
6225// client's request for the StartStreamProcessor operation. The "output" return
6226// value will be populated with the request's response once the request completes
6227// successfully.
6228//
6229// Use "Send" method on the returned Request to send the API call to the service.
6230// the "output" return value is not valid until after Send returns without error.
6231//
6232// See StartStreamProcessor for more information on using the StartStreamProcessor
6233// API call, and error handling.
6234//
6235// This method is useful when you want to inject custom logic or configuration
6236// into the SDK's request lifecycle. Such as custom headers, or retry logic.
6237//
6238//
6239//    // Example sending a request using the StartStreamProcessorRequest method.
6240//    req, resp := client.StartStreamProcessorRequest(params)
6241//
6242//    err := req.Send()
6243//    if err == nil { // resp is now filled
6244//        fmt.Println(resp)
6245//    }
6246func (c *Rekognition) StartStreamProcessorRequest(input *StartStreamProcessorInput) (req *request.Request, output *StartStreamProcessorOutput) {
6247	op := &request.Operation{
6248		Name:       opStartStreamProcessor,
6249		HTTPMethod: "POST",
6250		HTTPPath:   "/",
6251	}
6252
6253	if input == nil {
6254		input = &StartStreamProcessorInput{}
6255	}
6256
6257	output = &StartStreamProcessorOutput{}
6258	req = c.newRequest(op, input, output)
6259	req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
6260	return
6261}
6262
6263// StartStreamProcessor API operation for Amazon Rekognition.
6264//
6265// Starts processing a stream processor. You create a stream processor by calling
6266// CreateStreamProcessor. To tell StartStreamProcessor which stream processor
6267// to start, use the value of the Name field specified in the call to CreateStreamProcessor.
6268//
6269// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
6270// with awserr.Error's Code and Message methods to get detailed information about
6271// the error.
6272//
6273// See the AWS API reference guide for Amazon Rekognition's
6274// API operation StartStreamProcessor for usage and error information.
6275//
6276// Returned Error Types:
6277//   * AccessDeniedException
6278//   You are not authorized to perform the action.
6279//
6280//   * InternalServerError
6281//   Amazon Rekognition experienced a service issue. Try your call again.
6282//
6283//   * ThrottlingException
6284//   Amazon Rekognition is temporarily unable to process the request. Try your
6285//   call again.
6286//
6287//   * InvalidParameterException
6288//   Input parameter violated a constraint. Validate your parameter before calling
6289//   the API operation again.
6290//
6291//   * ResourceNotFoundException
6292//   The collection specified in the request cannot be found.
6293//
6294//   * ResourceInUseException
6295//   The specified resource is already being used.
6296//
6297//   * ProvisionedThroughputExceededException
6298//   The number of requests exceeded your throughput limit. If you want to increase
6299//   this limit, contact Amazon Rekognition.
6300//
6301func (c *Rekognition) StartStreamProcessor(input *StartStreamProcessorInput) (*StartStreamProcessorOutput, error) {
6302	req, out := c.StartStreamProcessorRequest(input)
6303	return out, req.Send()
6304}
6305
6306// StartStreamProcessorWithContext is the same as StartStreamProcessor with the addition of
6307// the ability to pass a context and additional request options.
6308//
6309// See StartStreamProcessor for details on how to use this API operation.
6310//
6311// The context must be non-nil and will be used for request cancellation. If
6312// the context is nil a panic will occur. In the future the SDK may create
6313// sub-contexts for http.Requests. See https://golang.org/pkg/context/
6314// for more information on using Contexts.
6315func (c *Rekognition) StartStreamProcessorWithContext(ctx aws.Context, input *StartStreamProcessorInput, opts ...request.Option) (*StartStreamProcessorOutput, error) {
6316	req, out := c.StartStreamProcessorRequest(input)
6317	req.SetContext(ctx)
6318	req.ApplyOptions(opts...)
6319	return out, req.Send()
6320}
6321
6322const opStartTextDetection = "StartTextDetection"
6323
6324// StartTextDetectionRequest generates a "aws/request.Request" representing the
6325// client's request for the StartTextDetection operation. The "output" return
6326// value will be populated with the request's response once the request completes
6327// successfully.
6328//
6329// Use "Send" method on the returned Request to send the API call to the service.
6330// the "output" return value is not valid until after Send returns without error.
6331//
6332// See StartTextDetection for more information on using the StartTextDetection
6333// API call, and error handling.
6334//
6335// This method is useful when you want to inject custom logic or configuration
6336// into the SDK's request lifecycle. Such as custom headers, or retry logic.
6337//
6338//
6339//    // Example sending a request using the StartTextDetectionRequest method.
6340//    req, resp := client.StartTextDetectionRequest(params)
6341//
6342//    err := req.Send()
6343//    if err == nil { // resp is now filled
6344//        fmt.Println(resp)
6345//    }
6346func (c *Rekognition) StartTextDetectionRequest(input *StartTextDetectionInput) (req *request.Request, output *StartTextDetectionOutput) {
6347	op := &request.Operation{
6348		Name:       opStartTextDetection,
6349		HTTPMethod: "POST",
6350		HTTPPath:   "/",
6351	}
6352
6353	if input == nil {
6354		input = &StartTextDetectionInput{}
6355	}
6356
6357	output = &StartTextDetectionOutput{}
6358	req = c.newRequest(op, input, output)
6359	return
6360}
6361
6362// StartTextDetection API operation for Amazon Rekognition.
6363//
6364// Starts asynchronous detection of text in a stored video.
6365//
6366// Amazon Rekognition Video can detect text in a video stored in an Amazon S3
6367// bucket. Use Video to specify the bucket name and the filename of the video.
6368// StartTextDetection returns a job identifier (JobId) which you use to get
6369// the results of the operation. When text detection is finished, Amazon Rekognition
6370// Video publishes a completion status to the Amazon Simple Notification Service
6371// topic that you specify in NotificationChannel.
6372//
6373// To get the results of the text detection operation, first check that the
6374// status value published to the Amazon SNS topic is SUCCEEDED. if so, call
6375// GetTextDetection and pass the job identifier (JobId) from the initial call
6376// to StartTextDetection.
6377//
6378// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
6379// with awserr.Error's Code and Message methods to get detailed information about
6380// the error.
6381//
6382// See the AWS API reference guide for Amazon Rekognition's
6383// API operation StartTextDetection for usage and error information.
6384//
6385// Returned Error Types:
6386//   * AccessDeniedException
6387//   You are not authorized to perform the action.
6388//
6389//   * IdempotentParameterMismatchException
6390//   A ClientRequestToken input parameter was reused with an operation, but at
6391//   least one of the other input parameters is different from the previous call
6392//   to the operation.
6393//
6394//   * InvalidParameterException
6395//   Input parameter violated a constraint. Validate your parameter before calling
6396//   the API operation again.
6397//
6398//   * InvalidS3ObjectException
6399//   Amazon Rekognition is unable to access the S3 object specified in the request.
6400//
6401//   * InternalServerError
6402//   Amazon Rekognition experienced a service issue. Try your call again.
6403//
6404//   * VideoTooLargeException
6405//   The file size or duration of the supplied media is too large. The maximum
6406//   file size is 10GB. The maximum duration is 6 hours.
6407//
6408//   * ProvisionedThroughputExceededException
6409//   The number of requests exceeded your throughput limit. If you want to increase
6410//   this limit, contact Amazon Rekognition.
6411//
6412//   * LimitExceededException
6413//   An Amazon Rekognition service limit was exceeded. For example, if you start
6414//   too many Amazon Rekognition Video jobs concurrently, calls to start operations
6415//   (StartLabelDetection, for example) will raise a LimitExceededException exception
6416//   (HTTP status code: 400) until the number of concurrently running jobs is
6417//   below the Amazon Rekognition service limit.
6418//
6419//   * ThrottlingException
6420//   Amazon Rekognition is temporarily unable to process the request. Try your
6421//   call again.
6422//
6423func (c *Rekognition) StartTextDetection(input *StartTextDetectionInput) (*StartTextDetectionOutput, error) {
6424	req, out := c.StartTextDetectionRequest(input)
6425	return out, req.Send()
6426}
6427
6428// StartTextDetectionWithContext is the same as StartTextDetection with the addition of
6429// the ability to pass a context and additional request options.
6430//
6431// See StartTextDetection for details on how to use this API operation.
6432//
6433// The context must be non-nil and will be used for request cancellation. If
6434// the context is nil a panic will occur. In the future the SDK may create
6435// sub-contexts for http.Requests. See https://golang.org/pkg/context/
6436// for more information on using Contexts.
6437func (c *Rekognition) StartTextDetectionWithContext(ctx aws.Context, input *StartTextDetectionInput, opts ...request.Option) (*StartTextDetectionOutput, error) {
6438	req, out := c.StartTextDetectionRequest(input)
6439	req.SetContext(ctx)
6440	req.ApplyOptions(opts...)
6441	return out, req.Send()
6442}
6443
6444const opStopProjectVersion = "StopProjectVersion"
6445
6446// StopProjectVersionRequest generates a "aws/request.Request" representing the
6447// client's request for the StopProjectVersion operation. The "output" return
6448// value will be populated with the request's response once the request completes
6449// successfully.
6450//
6451// Use "Send" method on the returned Request to send the API call to the service.
6452// the "output" return value is not valid until after Send returns without error.
6453//
6454// See StopProjectVersion for more information on using the StopProjectVersion
6455// API call, and error handling.
6456//
6457// This method is useful when you want to inject custom logic or configuration
6458// into the SDK's request lifecycle. Such as custom headers, or retry logic.
6459//
6460//
6461//    // Example sending a request using the StopProjectVersionRequest method.
6462//    req, resp := client.StopProjectVersionRequest(params)
6463//
6464//    err := req.Send()
6465//    if err == nil { // resp is now filled
6466//        fmt.Println(resp)
6467//    }
6468func (c *Rekognition) StopProjectVersionRequest(input *StopProjectVersionInput) (req *request.Request, output *StopProjectVersionOutput) {
6469	op := &request.Operation{
6470		Name:       opStopProjectVersion,
6471		HTTPMethod: "POST",
6472		HTTPPath:   "/",
6473	}
6474
6475	if input == nil {
6476		input = &StopProjectVersionInput{}
6477	}
6478
6479	output = &StopProjectVersionOutput{}
6480	req = c.newRequest(op, input, output)
6481	return
6482}
6483
6484// StopProjectVersion API operation for Amazon Rekognition.
6485//
6486// Stops a running model. The operation might take a while to complete. To check
6487// the current status, call DescribeProjectVersions.
6488//
6489// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
6490// with awserr.Error's Code and Message methods to get detailed information about
6491// the error.
6492//
6493// See the AWS API reference guide for Amazon Rekognition's
6494// API operation StopProjectVersion for usage and error information.
6495//
6496// Returned Error Types:
6497//   * ResourceNotFoundException
6498//   The collection specified in the request cannot be found.
6499//
6500//   * ResourceInUseException
6501//   The specified resource is already being used.
6502//
6503//   * InvalidParameterException
6504//   Input parameter violated a constraint. Validate your parameter before calling
6505//   the API operation again.
6506//
6507//   * AccessDeniedException
6508//   You are not authorized to perform the action.
6509//
6510//   * InternalServerError
6511//   Amazon Rekognition experienced a service issue. Try your call again.
6512//
6513//   * ThrottlingException
6514//   Amazon Rekognition is temporarily unable to process the request. Try your
6515//   call again.
6516//
6517//   * ProvisionedThroughputExceededException
6518//   The number of requests exceeded your throughput limit. If you want to increase
6519//   this limit, contact Amazon Rekognition.
6520//
6521func (c *Rekognition) StopProjectVersion(input *StopProjectVersionInput) (*StopProjectVersionOutput, error) {
6522	req, out := c.StopProjectVersionRequest(input)
6523	return out, req.Send()
6524}
6525
6526// StopProjectVersionWithContext is the same as StopProjectVersion with the addition of
6527// the ability to pass a context and additional request options.
6528//
6529// See StopProjectVersion for details on how to use this API operation.
6530//
6531// The context must be non-nil and will be used for request cancellation. If
6532// the context is nil a panic will occur. In the future the SDK may create
6533// sub-contexts for http.Requests. See https://golang.org/pkg/context/
6534// for more information on using Contexts.
6535func (c *Rekognition) StopProjectVersionWithContext(ctx aws.Context, input *StopProjectVersionInput, opts ...request.Option) (*StopProjectVersionOutput, error) {
6536	req, out := c.StopProjectVersionRequest(input)
6537	req.SetContext(ctx)
6538	req.ApplyOptions(opts...)
6539	return out, req.Send()
6540}
6541
6542const opStopStreamProcessor = "StopStreamProcessor"
6543
6544// StopStreamProcessorRequest generates a "aws/request.Request" representing the
6545// client's request for the StopStreamProcessor operation. The "output" return
6546// value will be populated with the request's response once the request completes
6547// successfully.
6548//
6549// Use "Send" method on the returned Request to send the API call to the service.
6550// the "output" return value is not valid until after Send returns without error.
6551//
6552// See StopStreamProcessor for more information on using the StopStreamProcessor
6553// API call, and error handling.
6554//
6555// This method is useful when you want to inject custom logic or configuration
6556// into the SDK's request lifecycle. Such as custom headers, or retry logic.
6557//
6558//
6559//    // Example sending a request using the StopStreamProcessorRequest method.
6560//    req, resp := client.StopStreamProcessorRequest(params)
6561//
6562//    err := req.Send()
6563//    if err == nil { // resp is now filled
6564//        fmt.Println(resp)
6565//    }
6566func (c *Rekognition) StopStreamProcessorRequest(input *StopStreamProcessorInput) (req *request.Request, output *StopStreamProcessorOutput) {
6567	op := &request.Operation{
6568		Name:       opStopStreamProcessor,
6569		HTTPMethod: "POST",
6570		HTTPPath:   "/",
6571	}
6572
6573	if input == nil {
6574		input = &StopStreamProcessorInput{}
6575	}
6576
6577	output = &StopStreamProcessorOutput{}
6578	req = c.newRequest(op, input, output)
6579	req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
6580	return
6581}
6582
6583// StopStreamProcessor API operation for Amazon Rekognition.
6584//
6585// Stops a running stream processor that was created by CreateStreamProcessor.
6586//
6587// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
6588// with awserr.Error's Code and Message methods to get detailed information about
6589// the error.
6590//
6591// See the AWS API reference guide for Amazon Rekognition's
6592// API operation StopStreamProcessor for usage and error information.
6593//
6594// Returned Error Types:
6595//   * AccessDeniedException
6596//   You are not authorized to perform the action.
6597//
6598//   * InternalServerError
6599//   Amazon Rekognition experienced a service issue. Try your call again.
6600//
6601//   * ThrottlingException
6602//   Amazon Rekognition is temporarily unable to process the request. Try your
6603//   call again.
6604//
6605//   * InvalidParameterException
6606//   Input parameter violated a constraint. Validate your parameter before calling
6607//   the API operation again.
6608//
6609//   * ResourceNotFoundException
6610//   The collection specified in the request cannot be found.
6611//
6612//   * ResourceInUseException
6613//   The specified resource is already being used.
6614//
6615//   * ProvisionedThroughputExceededException
6616//   The number of requests exceeded your throughput limit. If you want to increase
6617//   this limit, contact Amazon Rekognition.
6618//
6619func (c *Rekognition) StopStreamProcessor(input *StopStreamProcessorInput) (*StopStreamProcessorOutput, error) {
6620	req, out := c.StopStreamProcessorRequest(input)
6621	return out, req.Send()
6622}
6623
6624// StopStreamProcessorWithContext is the same as StopStreamProcessor with the addition of
6625// the ability to pass a context and additional request options.
6626//
6627// See StopStreamProcessor for details on how to use this API operation.
6628//
6629// The context must be non-nil and will be used for request cancellation. If
6630// the context is nil a panic will occur. In the future the SDK may create
6631// sub-contexts for http.Requests. See https://golang.org/pkg/context/
6632// for more information on using Contexts.
6633func (c *Rekognition) StopStreamProcessorWithContext(ctx aws.Context, input *StopStreamProcessorInput, opts ...request.Option) (*StopStreamProcessorOutput, error) {
6634	req, out := c.StopStreamProcessorRequest(input)
6635	req.SetContext(ctx)
6636	req.ApplyOptions(opts...)
6637	return out, req.Send()
6638}
6639
6640const opTagResource = "TagResource"
6641
6642// TagResourceRequest generates a "aws/request.Request" representing the
6643// client's request for the TagResource operation. The "output" return
6644// value will be populated with the request's response once the request completes
6645// successfully.
6646//
6647// Use "Send" method on the returned Request to send the API call to the service.
6648// the "output" return value is not valid until after Send returns without error.
6649//
6650// See TagResource for more information on using the TagResource
6651// API call, and error handling.
6652//
6653// This method is useful when you want to inject custom logic or configuration
6654// into the SDK's request lifecycle. Such as custom headers, or retry logic.
6655//
6656//
6657//    // Example sending a request using the TagResourceRequest method.
6658//    req, resp := client.TagResourceRequest(params)
6659//
6660//    err := req.Send()
6661//    if err == nil { // resp is now filled
6662//        fmt.Println(resp)
6663//    }
6664func (c *Rekognition) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) {
6665	op := &request.Operation{
6666		Name:       opTagResource,
6667		HTTPMethod: "POST",
6668		HTTPPath:   "/",
6669	}
6670
6671	if input == nil {
6672		input = &TagResourceInput{}
6673	}
6674
6675	output = &TagResourceOutput{}
6676	req = c.newRequest(op, input, output)
6677	req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
6678	return
6679}
6680
6681// TagResource API operation for Amazon Rekognition.
6682//
6683// Adds one or more key-value tags to an Amazon Rekognition collection, stream
6684// processor, or Custom Labels model. For more information, see Tagging AWS
6685// Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html).
6686//
6687// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
6688// with awserr.Error's Code and Message methods to get detailed information about
6689// the error.
6690//
6691// See the AWS API reference guide for Amazon Rekognition's
6692// API operation TagResource for usage and error information.
6693//
6694// Returned Error Types:
6695//   * ResourceNotFoundException
6696//   The collection specified in the request cannot be found.
6697//
6698//   * InvalidParameterException
6699//   Input parameter violated a constraint. Validate your parameter before calling
6700//   the API operation again.
6701//
6702//   * ServiceQuotaExceededException
6703//   The size of the collection or tag list exceeds the allowed limit. For more
6704//   information, see Limits in Amazon Rekognition in the Amazon Rekognition Developer
6705//   Guide.
6706//
6707//   * AccessDeniedException
6708//   You are not authorized to perform the action.
6709//
6710//   * InternalServerError
6711//   Amazon Rekognition experienced a service issue. Try your call again.
6712//
6713//   * ThrottlingException
6714//   Amazon Rekognition is temporarily unable to process the request. Try your
6715//   call again.
6716//
6717//   * ProvisionedThroughputExceededException
6718//   The number of requests exceeded your throughput limit. If you want to increase
6719//   this limit, contact Amazon Rekognition.
6720//
6721func (c *Rekognition) TagResource(input *TagResourceInput) (*TagResourceOutput, error) {
6722	req, out := c.TagResourceRequest(input)
6723	return out, req.Send()
6724}
6725
6726// TagResourceWithContext is the same as TagResource with the addition of
6727// the ability to pass a context and additional request options.
6728//
6729// See TagResource for details on how to use this API operation.
6730//
6731// The context must be non-nil and will be used for request cancellation. If
6732// the context is nil a panic will occur. In the future the SDK may create
6733// sub-contexts for http.Requests. See https://golang.org/pkg/context/
6734// for more information on using Contexts.
6735func (c *Rekognition) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) {
6736	req, out := c.TagResourceRequest(input)
6737	req.SetContext(ctx)
6738	req.ApplyOptions(opts...)
6739	return out, req.Send()
6740}
6741
6742const opUntagResource = "UntagResource"
6743
6744// UntagResourceRequest generates a "aws/request.Request" representing the
6745// client's request for the UntagResource operation. The "output" return
6746// value will be populated with the request's response once the request completes
6747// successfully.
6748//
6749// Use "Send" method on the returned Request to send the API call to the service.
6750// the "output" return value is not valid until after Send returns without error.
6751//
6752// See UntagResource for more information on using the UntagResource
6753// API call, and error handling.
6754//
6755// This method is useful when you want to inject custom logic or configuration
6756// into the SDK's request lifecycle. Such as custom headers, or retry logic.
6757//
6758//
6759//    // Example sending a request using the UntagResourceRequest method.
6760//    req, resp := client.UntagResourceRequest(params)
6761//
6762//    err := req.Send()
6763//    if err == nil { // resp is now filled
6764//        fmt.Println(resp)
6765//    }
6766func (c *Rekognition) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) {
6767	op := &request.Operation{
6768		Name:       opUntagResource,
6769		HTTPMethod: "POST",
6770		HTTPPath:   "/",
6771	}
6772
6773	if input == nil {
6774		input = &UntagResourceInput{}
6775	}
6776
6777	output = &UntagResourceOutput{}
6778	req = c.newRequest(op, input, output)
6779	req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
6780	return
6781}
6782
6783// UntagResource API operation for Amazon Rekognition.
6784//
6785// Removes one or more tags from an Amazon Rekognition collection, stream processor,
6786// or Custom Labels model.
6787//
6788// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
6789// with awserr.Error's Code and Message methods to get detailed information about
6790// the error.
6791//
6792// See the AWS API reference guide for Amazon Rekognition's
6793// API operation UntagResource for usage and error information.
6794//
6795// Returned Error Types:
6796//   * ResourceNotFoundException
6797//   The collection specified in the request cannot be found.
6798//
6799//   * InvalidParameterException
6800//   Input parameter violated a constraint. Validate your parameter before calling
6801//   the API operation again.
6802//
6803//   * AccessDeniedException
6804//   You are not authorized to perform the action.
6805//
6806//   * InternalServerError
6807//   Amazon Rekognition experienced a service issue. Try your call again.
6808//
6809//   * ThrottlingException
6810//   Amazon Rekognition is temporarily unable to process the request. Try your
6811//   call again.
6812//
6813//   * ProvisionedThroughputExceededException
6814//   The number of requests exceeded your throughput limit. If you want to increase
6815//   this limit, contact Amazon Rekognition.
6816//
6817func (c *Rekognition) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) {
6818	req, out := c.UntagResourceRequest(input)
6819	return out, req.Send()
6820}
6821
6822// UntagResourceWithContext is the same as UntagResource with the addition of
6823// the ability to pass a context and additional request options.
6824//
6825// See UntagResource for details on how to use this API operation.
6826//
6827// The context must be non-nil and will be used for request cancellation. If
6828// the context is nil a panic will occur. In the future the SDK may create
6829// sub-contexts for http.Requests. See https://golang.org/pkg/context/
6830// for more information on using Contexts.
6831func (c *Rekognition) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) {
6832	req, out := c.UntagResourceRequest(input)
6833	req.SetContext(ctx)
6834	req.ApplyOptions(opts...)
6835	return out, req.Send()
6836}
6837
6838// You are not authorized to perform the action.
6839type AccessDeniedException struct {
6840	_            struct{}                  `type:"structure"`
6841	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
6842
6843	Message_ *string `locationName:"message" type:"string"`
6844}
6845
6846// String returns the string representation
6847func (s AccessDeniedException) String() string {
6848	return awsutil.Prettify(s)
6849}
6850
6851// GoString returns the string representation
6852func (s AccessDeniedException) GoString() string {
6853	return s.String()
6854}
6855
6856func newErrorAccessDeniedException(v protocol.ResponseMetadata) error {
6857	return &AccessDeniedException{
6858		RespMetadata: v,
6859	}
6860}
6861
6862// Code returns the exception type name.
6863func (s *AccessDeniedException) Code() string {
6864	return "AccessDeniedException"
6865}
6866
6867// Message returns the exception's message.
6868func (s *AccessDeniedException) Message() string {
6869	if s.Message_ != nil {
6870		return *s.Message_
6871	}
6872	return ""
6873}
6874
6875// OrigErr always returns nil, satisfies awserr.Error interface.
6876func (s *AccessDeniedException) OrigErr() error {
6877	return nil
6878}
6879
6880func (s *AccessDeniedException) Error() string {
6881	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
6882}
6883
6884// Status code returns the HTTP status code for the request's response error.
6885func (s *AccessDeniedException) StatusCode() int {
6886	return s.RespMetadata.StatusCode
6887}
6888
6889// RequestID returns the service's response RequestID for request.
6890func (s *AccessDeniedException) RequestID() string {
6891	return s.RespMetadata.RequestID
6892}
6893
6894// Structure containing the estimated age range, in years, for a face.
6895//
6896// Amazon Rekognition estimates an age range for faces detected in the input
6897// image. Estimated age ranges can overlap. A face of a 5-year-old might have
6898// an estimated range of 4-6, while the face of a 6-year-old might have an estimated
6899// range of 4-8.
6900type AgeRange struct {
6901	_ struct{} `type:"structure"`
6902
6903	// The highest estimated age.
6904	High *int64 `type:"integer"`
6905
6906	// The lowest estimated age.
6907	Low *int64 `type:"integer"`
6908}
6909
6910// String returns the string representation
6911func (s AgeRange) String() string {
6912	return awsutil.Prettify(s)
6913}
6914
6915// GoString returns the string representation
6916func (s AgeRange) GoString() string {
6917	return s.String()
6918}
6919
6920// SetHigh sets the High field's value.
6921func (s *AgeRange) SetHigh(v int64) *AgeRange {
6922	s.High = &v
6923	return s
6924}
6925
6926// SetLow sets the Low field's value.
6927func (s *AgeRange) SetLow(v int64) *AgeRange {
6928	s.Low = &v
6929	return s
6930}
6931
6932// Assets are the images that you use to train and evaluate a model version.
6933// Assets can also contain validation information that you use to debug a failed
6934// model training.
6935type Asset struct {
6936	_ struct{} `type:"structure"`
6937
6938	// The S3 bucket that contains an Amazon Sagemaker Ground Truth format manifest
6939	// file.
6940	GroundTruthManifest *GroundTruthManifest `type:"structure"`
6941}
6942
6943// String returns the string representation
6944func (s Asset) String() string {
6945	return awsutil.Prettify(s)
6946}
6947
6948// GoString returns the string representation
6949func (s Asset) GoString() string {
6950	return s.String()
6951}
6952
6953// Validate inspects the fields of the type to determine if they are valid.
6954func (s *Asset) Validate() error {
6955	invalidParams := request.ErrInvalidParams{Context: "Asset"}
6956	if s.GroundTruthManifest != nil {
6957		if err := s.GroundTruthManifest.Validate(); err != nil {
6958			invalidParams.AddNested("GroundTruthManifest", err.(request.ErrInvalidParams))
6959		}
6960	}
6961
6962	if invalidParams.Len() > 0 {
6963		return invalidParams
6964	}
6965	return nil
6966}
6967
6968// SetGroundTruthManifest sets the GroundTruthManifest field's value.
6969func (s *Asset) SetGroundTruthManifest(v *GroundTruthManifest) *Asset {
6970	s.GroundTruthManifest = v
6971	return s
6972}
6973
6974// Metadata information about an audio stream. An array of AudioMetadata objects
6975// for the audio streams found in a stored video is returned by GetSegmentDetection.
6976type AudioMetadata struct {
6977	_ struct{} `type:"structure"`
6978
6979	// The audio codec used to encode or decode the audio stream.
6980	Codec *string `type:"string"`
6981
6982	// The duration of the audio stream in milliseconds.
6983	DurationMillis *int64 `type:"long"`
6984
6985	// The number of audio channels in the segment.
6986	NumberOfChannels *int64 `type:"long"`
6987
6988	// The sample rate for the audio stream.
6989	SampleRate *int64 `type:"long"`
6990}
6991
6992// String returns the string representation
6993func (s AudioMetadata) String() string {
6994	return awsutil.Prettify(s)
6995}
6996
6997// GoString returns the string representation
6998func (s AudioMetadata) GoString() string {
6999	return s.String()
7000}
7001
7002// SetCodec sets the Codec field's value.
7003func (s *AudioMetadata) SetCodec(v string) *AudioMetadata {
7004	s.Codec = &v
7005	return s
7006}
7007
7008// SetDurationMillis sets the DurationMillis field's value.
7009func (s *AudioMetadata) SetDurationMillis(v int64) *AudioMetadata {
7010	s.DurationMillis = &v
7011	return s
7012}
7013
7014// SetNumberOfChannels sets the NumberOfChannels field's value.
7015func (s *AudioMetadata) SetNumberOfChannels(v int64) *AudioMetadata {
7016	s.NumberOfChannels = &v
7017	return s
7018}
7019
7020// SetSampleRate sets the SampleRate field's value.
7021func (s *AudioMetadata) SetSampleRate(v int64) *AudioMetadata {
7022	s.SampleRate = &v
7023	return s
7024}
7025
7026// Indicates whether or not the face has a beard, and the confidence level in
7027// the determination.
7028type Beard struct {
7029	_ struct{} `type:"structure"`
7030
7031	// Level of confidence in the determination.
7032	Confidence *float64 `type:"float"`
7033
7034	// Boolean value that indicates whether the face has beard or not.
7035	Value *bool `type:"boolean"`
7036}
7037
7038// String returns the string representation
7039func (s Beard) String() string {
7040	return awsutil.Prettify(s)
7041}
7042
7043// GoString returns the string representation
7044func (s Beard) GoString() string {
7045	return s.String()
7046}
7047
7048// SetConfidence sets the Confidence field's value.
7049func (s *Beard) SetConfidence(v float64) *Beard {
7050	s.Confidence = &v
7051	return s
7052}
7053
7054// SetValue sets the Value field's value.
7055func (s *Beard) SetValue(v bool) *Beard {
7056	s.Value = &v
7057	return s
7058}
7059
7060// Identifies the bounding box around the label, face, text or personal protective
7061// equipment. The left (x-coordinate) and top (y-coordinate) are coordinates
7062// representing the top and left sides of the bounding box. Note that the upper-left
7063// corner of the image is the origin (0,0).
7064//
7065// The top and left values returned are ratios of the overall image size. For
7066// example, if the input image is 700x200 pixels, and the top-left coordinate
7067// of the bounding box is 350x50 pixels, the API returns a left value of 0.5
7068// (350/700) and a top value of 0.25 (50/200).
7069//
7070// The width and height values represent the dimensions of the bounding box
7071// as a ratio of the overall image dimension. For example, if the input image
7072// is 700x200 pixels, and the bounding box width is 70 pixels, the width returned
7073// is 0.1.
7074//
7075// The bounding box coordinates can have negative values. For example, if Amazon
7076// Rekognition is able to detect a face that is at the image edge and is only
7077// partially visible, the service can return coordinates that are outside the
7078// image bounds and, depending on the image edge, you might get negative values
7079// or values greater than 1 for the left or top values.
7080type BoundingBox struct {
7081	_ struct{} `type:"structure"`
7082
7083	// Height of the bounding box as a ratio of the overall image height.
7084	Height *float64 `type:"float"`
7085
7086	// Left coordinate of the bounding box as a ratio of overall image width.
7087	Left *float64 `type:"float"`
7088
7089	// Top coordinate of the bounding box as a ratio of overall image height.
7090	Top *float64 `type:"float"`
7091
7092	// Width of the bounding box as a ratio of the overall image width.
7093	Width *float64 `type:"float"`
7094}
7095
7096// String returns the string representation
7097func (s BoundingBox) String() string {
7098	return awsutil.Prettify(s)
7099}
7100
7101// GoString returns the string representation
7102func (s BoundingBox) GoString() string {
7103	return s.String()
7104}
7105
7106// SetHeight sets the Height field's value.
7107func (s *BoundingBox) SetHeight(v float64) *BoundingBox {
7108	s.Height = &v
7109	return s
7110}
7111
7112// SetLeft sets the Left field's value.
7113func (s *BoundingBox) SetLeft(v float64) *BoundingBox {
7114	s.Left = &v
7115	return s
7116}
7117
7118// SetTop sets the Top field's value.
7119func (s *BoundingBox) SetTop(v float64) *BoundingBox {
7120	s.Top = &v
7121	return s
7122}
7123
7124// SetWidth sets the Width field's value.
7125func (s *BoundingBox) SetWidth(v float64) *BoundingBox {
7126	s.Width = &v
7127	return s
7128}
7129
7130// Provides information about a celebrity recognized by the RecognizeCelebrities
7131// operation.
7132type Celebrity struct {
7133	_ struct{} `type:"structure"`
7134
7135	// Provides information about the celebrity's face, such as its location on
7136	// the image.
7137	Face *ComparedFace `type:"structure"`
7138
7139	// A unique identifier for the celebrity.
7140	Id *string `type:"string"`
7141
7142	// The confidence, in percentage, that Amazon Rekognition has that the recognized
7143	// face is the celebrity.
7144	MatchConfidence *float64 `type:"float"`
7145
7146	// The name of the celebrity.
7147	Name *string `type:"string"`
7148
7149	// An array of URLs pointing to additional information about the celebrity.
7150	// If there is no additional information about the celebrity, this list is empty.
7151	Urls []*string `type:"list"`
7152}
7153
7154// String returns the string representation
7155func (s Celebrity) String() string {
7156	return awsutil.Prettify(s)
7157}
7158
7159// GoString returns the string representation
7160func (s Celebrity) GoString() string {
7161	return s.String()
7162}
7163
7164// SetFace sets the Face field's value.
7165func (s *Celebrity) SetFace(v *ComparedFace) *Celebrity {
7166	s.Face = v
7167	return s
7168}
7169
7170// SetId sets the Id field's value.
7171func (s *Celebrity) SetId(v string) *Celebrity {
7172	s.Id = &v
7173	return s
7174}
7175
7176// SetMatchConfidence sets the MatchConfidence field's value.
7177func (s *Celebrity) SetMatchConfidence(v float64) *Celebrity {
7178	s.MatchConfidence = &v
7179	return s
7180}
7181
7182// SetName sets the Name field's value.
7183func (s *Celebrity) SetName(v string) *Celebrity {
7184	s.Name = &v
7185	return s
7186}
7187
7188// SetUrls sets the Urls field's value.
7189func (s *Celebrity) SetUrls(v []*string) *Celebrity {
7190	s.Urls = v
7191	return s
7192}
7193
7194// Information about a recognized celebrity.
7195type CelebrityDetail struct {
7196	_ struct{} `type:"structure"`
7197
7198	// Bounding box around the body of a celebrity.
7199	BoundingBox *BoundingBox `type:"structure"`
7200
7201	// The confidence, in percentage, that Amazon Rekognition has that the recognized
7202	// face is the celebrity.
7203	Confidence *float64 `type:"float"`
7204
7205	// Face details for the recognized celebrity.
7206	Face *FaceDetail `type:"structure"`
7207
7208	// The unique identifier for the celebrity.
7209	Id *string `type:"string"`
7210
7211	// The name of the celebrity.
7212	Name *string `type:"string"`
7213
7214	// An array of URLs pointing to additional celebrity information.
7215	Urls []*string `type:"list"`
7216}
7217
7218// String returns the string representation
7219func (s CelebrityDetail) String() string {
7220	return awsutil.Prettify(s)
7221}
7222
7223// GoString returns the string representation
7224func (s CelebrityDetail) GoString() string {
7225	return s.String()
7226}
7227
7228// SetBoundingBox sets the BoundingBox field's value.
7229func (s *CelebrityDetail) SetBoundingBox(v *BoundingBox) *CelebrityDetail {
7230	s.BoundingBox = v
7231	return s
7232}
7233
7234// SetConfidence sets the Confidence field's value.
7235func (s *CelebrityDetail) SetConfidence(v float64) *CelebrityDetail {
7236	s.Confidence = &v
7237	return s
7238}
7239
7240// SetFace sets the Face field's value.
7241func (s *CelebrityDetail) SetFace(v *FaceDetail) *CelebrityDetail {
7242	s.Face = v
7243	return s
7244}
7245
7246// SetId sets the Id field's value.
7247func (s *CelebrityDetail) SetId(v string) *CelebrityDetail {
7248	s.Id = &v
7249	return s
7250}
7251
7252// SetName sets the Name field's value.
7253func (s *CelebrityDetail) SetName(v string) *CelebrityDetail {
7254	s.Name = &v
7255	return s
7256}
7257
7258// SetUrls sets the Urls field's value.
7259func (s *CelebrityDetail) SetUrls(v []*string) *CelebrityDetail {
7260	s.Urls = v
7261	return s
7262}
7263
7264// Information about a detected celebrity and the time the celebrity was detected
7265// in a stored video. For more information, see GetCelebrityRecognition in the
7266// Amazon Rekognition Developer Guide.
7267type CelebrityRecognition struct {
7268	_ struct{} `type:"structure"`
7269
7270	// Information about a recognized celebrity.
7271	Celebrity *CelebrityDetail `type:"structure"`
7272
7273	// The time, in milliseconds from the start of the video, that the celebrity
7274	// was recognized.
7275	Timestamp *int64 `type:"long"`
7276}
7277
7278// String returns the string representation
7279func (s CelebrityRecognition) String() string {
7280	return awsutil.Prettify(s)
7281}
7282
7283// GoString returns the string representation
7284func (s CelebrityRecognition) GoString() string {
7285	return s.String()
7286}
7287
7288// SetCelebrity sets the Celebrity field's value.
7289func (s *CelebrityRecognition) SetCelebrity(v *CelebrityDetail) *CelebrityRecognition {
7290	s.Celebrity = v
7291	return s
7292}
7293
7294// SetTimestamp sets the Timestamp field's value.
7295func (s *CelebrityRecognition) SetTimestamp(v int64) *CelebrityRecognition {
7296	s.Timestamp = &v
7297	return s
7298}
7299
7300type CompareFacesInput struct {
7301	_ struct{} `type:"structure"`
7302
7303	// A filter that specifies a quality bar for how much filtering is done to identify
7304	// faces. Filtered faces aren't compared. If you specify AUTO, Amazon Rekognition
7305	// chooses the quality bar. If you specify LOW, MEDIUM, or HIGH, filtering removes
7306	// all faces that don’t meet the chosen quality bar. The quality bar is based
7307	// on a variety of common use cases. Low-quality detections can occur for a
7308	// number of reasons. Some examples are an object that's misidentified as a
7309	// face, a face that's too blurry, or a face with a pose that's too extreme
7310	// to use. If you specify NONE, no filtering is performed. The default value
7311	// is NONE.
7312	//
7313	// To use quality filtering, the collection you are using must be associated
7314	// with version 3 of the face model or higher.
7315	QualityFilter *string `type:"string" enum:"QualityFilter"`
7316
7317	// The minimum level of confidence in the face matches that a match must meet
7318	// to be included in the FaceMatches array.
7319	SimilarityThreshold *float64 `type:"float"`
7320
7321	// The input image as base64-encoded bytes or an S3 object. If you use the AWS
7322	// CLI to call Amazon Rekognition operations, passing base64-encoded image bytes
7323	// is not supported.
7324	//
7325	// If you are using an AWS SDK to call Amazon Rekognition, you might not need
7326	// to base64-encode image bytes passed using the Bytes field. For more information,
7327	// see Images in the Amazon Rekognition developer guide.
7328	//
7329	// SourceImage is a required field
7330	SourceImage *Image `type:"structure" required:"true"`
7331
7332	// The target image as base64-encoded bytes or an S3 object. If you use the
7333	// AWS CLI to call Amazon Rekognition operations, passing base64-encoded image
7334	// bytes is not supported.
7335	//
7336	// If you are using an AWS SDK to call Amazon Rekognition, you might not need
7337	// to base64-encode image bytes passed using the Bytes field. For more information,
7338	// see Images in the Amazon Rekognition developer guide.
7339	//
7340	// TargetImage is a required field
7341	TargetImage *Image `type:"structure" required:"true"`
7342}
7343
7344// String returns the string representation
7345func (s CompareFacesInput) String() string {
7346	return awsutil.Prettify(s)
7347}
7348
7349// GoString returns the string representation
7350func (s CompareFacesInput) GoString() string {
7351	return s.String()
7352}
7353
7354// Validate inspects the fields of the type to determine if they are valid.
7355func (s *CompareFacesInput) Validate() error {
7356	invalidParams := request.ErrInvalidParams{Context: "CompareFacesInput"}
7357	if s.SourceImage == nil {
7358		invalidParams.Add(request.NewErrParamRequired("SourceImage"))
7359	}
7360	if s.TargetImage == nil {
7361		invalidParams.Add(request.NewErrParamRequired("TargetImage"))
7362	}
7363	if s.SourceImage != nil {
7364		if err := s.SourceImage.Validate(); err != nil {
7365			invalidParams.AddNested("SourceImage", err.(request.ErrInvalidParams))
7366		}
7367	}
7368	if s.TargetImage != nil {
7369		if err := s.TargetImage.Validate(); err != nil {
7370			invalidParams.AddNested("TargetImage", err.(request.ErrInvalidParams))
7371		}
7372	}
7373
7374	if invalidParams.Len() > 0 {
7375		return invalidParams
7376	}
7377	return nil
7378}
7379
7380// SetQualityFilter sets the QualityFilter field's value.
7381func (s *CompareFacesInput) SetQualityFilter(v string) *CompareFacesInput {
7382	s.QualityFilter = &v
7383	return s
7384}
7385
7386// SetSimilarityThreshold sets the SimilarityThreshold field's value.
7387func (s *CompareFacesInput) SetSimilarityThreshold(v float64) *CompareFacesInput {
7388	s.SimilarityThreshold = &v
7389	return s
7390}
7391
7392// SetSourceImage sets the SourceImage field's value.
7393func (s *CompareFacesInput) SetSourceImage(v *Image) *CompareFacesInput {
7394	s.SourceImage = v
7395	return s
7396}
7397
7398// SetTargetImage sets the TargetImage field's value.
7399func (s *CompareFacesInput) SetTargetImage(v *Image) *CompareFacesInput {
7400	s.TargetImage = v
7401	return s
7402}
7403
7404// Provides information about a face in a target image that matches the source
7405// image face analyzed by CompareFaces. The Face property contains the bounding
7406// box of the face in the target image. The Similarity property is the confidence
7407// that the source image face matches the face in the bounding box.
7408type CompareFacesMatch struct {
7409	_ struct{} `type:"structure"`
7410
7411	// Provides face metadata (bounding box and confidence that the bounding box
7412	// actually contains a face).
7413	Face *ComparedFace `type:"structure"`
7414
7415	// Level of confidence that the faces match.
7416	Similarity *float64 `type:"float"`
7417}
7418
7419// String returns the string representation
7420func (s CompareFacesMatch) String() string {
7421	return awsutil.Prettify(s)
7422}
7423
7424// GoString returns the string representation
7425func (s CompareFacesMatch) GoString() string {
7426	return s.String()
7427}
7428
7429// SetFace sets the Face field's value.
7430func (s *CompareFacesMatch) SetFace(v *ComparedFace) *CompareFacesMatch {
7431	s.Face = v
7432	return s
7433}
7434
7435// SetSimilarity sets the Similarity field's value.
7436func (s *CompareFacesMatch) SetSimilarity(v float64) *CompareFacesMatch {
7437	s.Similarity = &v
7438	return s
7439}
7440
7441type CompareFacesOutput struct {
7442	_ struct{} `type:"structure"`
7443
7444	// An array of faces in the target image that match the source image face. Each
7445	// CompareFacesMatch object provides the bounding box, the confidence level
7446	// that the bounding box contains a face, and the similarity score for the face
7447	// in the bounding box and the face in the source image.
7448	FaceMatches []*CompareFacesMatch `type:"list"`
7449
7450	// The face in the source image that was used for comparison.
7451	SourceImageFace *ComparedSourceImageFace `type:"structure"`
7452
7453	// The value of SourceImageOrientationCorrection is always null.
7454	//
7455	// If the input image is in .jpeg format, it might contain exchangeable image
7456	// file format (Exif) metadata that includes the image's orientation. Amazon
7457	// Rekognition uses this orientation information to perform image correction.
7458	// The bounding box coordinates are translated to represent object locations
7459	// after the orientation information in the Exif metadata is used to correct
7460	// the image orientation. Images in .png format don't contain Exif metadata.
7461	//
7462	// Amazon Rekognition doesn’t perform image correction for images in .png
7463	// format and .jpeg images without orientation information in the image Exif
7464	// metadata. The bounding box coordinates aren't translated and represent the
7465	// object locations before the image is rotated.
7466	SourceImageOrientationCorrection *string `type:"string" enum:"OrientationCorrection"`
7467
7468	// The value of TargetImageOrientationCorrection is always null.
7469	//
7470	// If the input image is in .jpeg format, it might contain exchangeable image
7471	// file format (Exif) metadata that includes the image's orientation. Amazon
7472	// Rekognition uses this orientation information to perform image correction.
7473	// The bounding box coordinates are translated to represent object locations
7474	// after the orientation information in the Exif metadata is used to correct
7475	// the image orientation. Images in .png format don't contain Exif metadata.
7476	//
7477	// Amazon Rekognition doesn’t perform image correction for images in .png
7478	// format and .jpeg images without orientation information in the image Exif
7479	// metadata. The bounding box coordinates aren't translated and represent the
7480	// object locations before the image is rotated.
7481	TargetImageOrientationCorrection *string `type:"string" enum:"OrientationCorrection"`
7482
7483	// An array of faces in the target image that did not match the source image
7484	// face.
7485	UnmatchedFaces []*ComparedFace `type:"list"`
7486}
7487
7488// String returns the string representation
7489func (s CompareFacesOutput) String() string {
7490	return awsutil.Prettify(s)
7491}
7492
7493// GoString returns the string representation
7494func (s CompareFacesOutput) GoString() string {
7495	return s.String()
7496}
7497
7498// SetFaceMatches sets the FaceMatches field's value.
7499func (s *CompareFacesOutput) SetFaceMatches(v []*CompareFacesMatch) *CompareFacesOutput {
7500	s.FaceMatches = v
7501	return s
7502}
7503
7504// SetSourceImageFace sets the SourceImageFace field's value.
7505func (s *CompareFacesOutput) SetSourceImageFace(v *ComparedSourceImageFace) *CompareFacesOutput {
7506	s.SourceImageFace = v
7507	return s
7508}
7509
7510// SetSourceImageOrientationCorrection sets the SourceImageOrientationCorrection field's value.
7511func (s *CompareFacesOutput) SetSourceImageOrientationCorrection(v string) *CompareFacesOutput {
7512	s.SourceImageOrientationCorrection = &v
7513	return s
7514}
7515
7516// SetTargetImageOrientationCorrection sets the TargetImageOrientationCorrection field's value.
7517func (s *CompareFacesOutput) SetTargetImageOrientationCorrection(v string) *CompareFacesOutput {
7518	s.TargetImageOrientationCorrection = &v
7519	return s
7520}
7521
7522// SetUnmatchedFaces sets the UnmatchedFaces field's value.
7523func (s *CompareFacesOutput) SetUnmatchedFaces(v []*ComparedFace) *CompareFacesOutput {
7524	s.UnmatchedFaces = v
7525	return s
7526}
7527
7528// Provides face metadata for target image faces that are analyzed by CompareFaces
7529// and RecognizeCelebrities.
7530type ComparedFace struct {
7531	_ struct{} `type:"structure"`
7532
7533	// Bounding box of the face.
7534	BoundingBox *BoundingBox `type:"structure"`
7535
7536	// Level of confidence that what the bounding box contains is a face.
7537	Confidence *float64 `type:"float"`
7538
7539	// An array of facial landmarks.
7540	Landmarks []*Landmark `type:"list"`
7541
7542	// Indicates the pose of the face as determined by its pitch, roll, and yaw.
7543	Pose *Pose `type:"structure"`
7544
7545	// Identifies face image brightness and sharpness.
7546	Quality *ImageQuality `type:"structure"`
7547}
7548
7549// String returns the string representation
7550func (s ComparedFace) String() string {
7551	return awsutil.Prettify(s)
7552}
7553
7554// GoString returns the string representation
7555func (s ComparedFace) GoString() string {
7556	return s.String()
7557}
7558
7559// SetBoundingBox sets the BoundingBox field's value.
7560func (s *ComparedFace) SetBoundingBox(v *BoundingBox) *ComparedFace {
7561	s.BoundingBox = v
7562	return s
7563}
7564
7565// SetConfidence sets the Confidence field's value.
7566func (s *ComparedFace) SetConfidence(v float64) *ComparedFace {
7567	s.Confidence = &v
7568	return s
7569}
7570
7571// SetLandmarks sets the Landmarks field's value.
7572func (s *ComparedFace) SetLandmarks(v []*Landmark) *ComparedFace {
7573	s.Landmarks = v
7574	return s
7575}
7576
7577// SetPose sets the Pose field's value.
7578func (s *ComparedFace) SetPose(v *Pose) *ComparedFace {
7579	s.Pose = v
7580	return s
7581}
7582
7583// SetQuality sets the Quality field's value.
7584func (s *ComparedFace) SetQuality(v *ImageQuality) *ComparedFace {
7585	s.Quality = v
7586	return s
7587}
7588
7589// Type that describes the face Amazon Rekognition chose to compare with the
7590// faces in the target. This contains a bounding box for the selected face and
7591// confidence level that the bounding box contains a face. Note that Amazon
7592// Rekognition selects the largest face in the source image for this comparison.
7593type ComparedSourceImageFace struct {
7594	_ struct{} `type:"structure"`
7595
7596	// Bounding box of the face.
7597	BoundingBox *BoundingBox `type:"structure"`
7598
7599	// Confidence level that the selected bounding box contains a face.
7600	Confidence *float64 `type:"float"`
7601}
7602
7603// String returns the string representation
7604func (s ComparedSourceImageFace) String() string {
7605	return awsutil.Prettify(s)
7606}
7607
7608// GoString returns the string representation
7609func (s ComparedSourceImageFace) GoString() string {
7610	return s.String()
7611}
7612
7613// SetBoundingBox sets the BoundingBox field's value.
7614func (s *ComparedSourceImageFace) SetBoundingBox(v *BoundingBox) *ComparedSourceImageFace {
7615	s.BoundingBox = v
7616	return s
7617}
7618
7619// SetConfidence sets the Confidence field's value.
7620func (s *ComparedSourceImageFace) SetConfidence(v float64) *ComparedSourceImageFace {
7621	s.Confidence = &v
7622	return s
7623}
7624
7625// Information about an unsafe content label detection in a stored video.
7626type ContentModerationDetection struct {
7627	_ struct{} `type:"structure"`
7628
7629	// The unsafe content label detected by in the stored video.
7630	ModerationLabel *ModerationLabel `type:"structure"`
7631
7632	// Time, in milliseconds from the beginning of the video, that the unsafe content
7633	// label was detected.
7634	Timestamp *int64 `type:"long"`
7635}
7636
7637// String returns the string representation
7638func (s ContentModerationDetection) String() string {
7639	return awsutil.Prettify(s)
7640}
7641
7642// GoString returns the string representation
7643func (s ContentModerationDetection) GoString() string {
7644	return s.String()
7645}
7646
7647// SetModerationLabel sets the ModerationLabel field's value.
7648func (s *ContentModerationDetection) SetModerationLabel(v *ModerationLabel) *ContentModerationDetection {
7649	s.ModerationLabel = v
7650	return s
7651}
7652
7653// SetTimestamp sets the Timestamp field's value.
7654func (s *ContentModerationDetection) SetTimestamp(v int64) *ContentModerationDetection {
7655	s.Timestamp = &v
7656	return s
7657}
7658
7659// Information about an item of Personal Protective Equipment covering a corresponding
7660// body part. For more information, see DetectProtectiveEquipment.
7661type CoversBodyPart struct {
7662	_ struct{} `type:"structure"`
7663
7664	// The confidence that Amazon Rekognition has in the value of Value.
7665	Confidence *float64 `type:"float"`
7666
7667	// True if the PPE covers the corresponding body part, otherwise false.
7668	Value *bool `type:"boolean"`
7669}
7670
7671// String returns the string representation
7672func (s CoversBodyPart) String() string {
7673	return awsutil.Prettify(s)
7674}
7675
7676// GoString returns the string representation
7677func (s CoversBodyPart) GoString() string {
7678	return s.String()
7679}
7680
7681// SetConfidence sets the Confidence field's value.
7682func (s *CoversBodyPart) SetConfidence(v float64) *CoversBodyPart {
7683	s.Confidence = &v
7684	return s
7685}
7686
7687// SetValue sets the Value field's value.
7688func (s *CoversBodyPart) SetValue(v bool) *CoversBodyPart {
7689	s.Value = &v
7690	return s
7691}
7692
7693type CreateCollectionInput struct {
7694	_ struct{} `type:"structure"`
7695
7696	// ID for the collection that you are creating.
7697	//
7698	// CollectionId is a required field
7699	CollectionId *string `min:"1" type:"string" required:"true"`
7700
7701	// A set of tags (key-value pairs) that you want to attach to the collection.
7702	Tags map[string]*string `type:"map"`
7703}
7704
7705// String returns the string representation
7706func (s CreateCollectionInput) String() string {
7707	return awsutil.Prettify(s)
7708}
7709
7710// GoString returns the string representation
7711func (s CreateCollectionInput) GoString() string {
7712	return s.String()
7713}
7714
7715// Validate inspects the fields of the type to determine if they are valid.
7716func (s *CreateCollectionInput) Validate() error {
7717	invalidParams := request.ErrInvalidParams{Context: "CreateCollectionInput"}
7718	if s.CollectionId == nil {
7719		invalidParams.Add(request.NewErrParamRequired("CollectionId"))
7720	}
7721	if s.CollectionId != nil && len(*s.CollectionId) < 1 {
7722		invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1))
7723	}
7724
7725	if invalidParams.Len() > 0 {
7726		return invalidParams
7727	}
7728	return nil
7729}
7730
7731// SetCollectionId sets the CollectionId field's value.
7732func (s *CreateCollectionInput) SetCollectionId(v string) *CreateCollectionInput {
7733	s.CollectionId = &v
7734	return s
7735}
7736
7737// SetTags sets the Tags field's value.
7738func (s *CreateCollectionInput) SetTags(v map[string]*string) *CreateCollectionInput {
7739	s.Tags = v
7740	return s
7741}
7742
7743type CreateCollectionOutput struct {
7744	_ struct{} `type:"structure"`
7745
7746	// Amazon Resource Name (ARN) of the collection. You can use this to manage
7747	// permissions on your resources.
7748	CollectionArn *string `type:"string"`
7749
7750	// Version number of the face detection model associated with the collection
7751	// you are creating.
7752	FaceModelVersion *string `type:"string"`
7753
7754	// HTTP status code indicating the result of the operation.
7755	StatusCode *int64 `type:"integer"`
7756}
7757
7758// String returns the string representation
7759func (s CreateCollectionOutput) String() string {
7760	return awsutil.Prettify(s)
7761}
7762
7763// GoString returns the string representation
7764func (s CreateCollectionOutput) GoString() string {
7765	return s.String()
7766}
7767
7768// SetCollectionArn sets the CollectionArn field's value.
7769func (s *CreateCollectionOutput) SetCollectionArn(v string) *CreateCollectionOutput {
7770	s.CollectionArn = &v
7771	return s
7772}
7773
7774// SetFaceModelVersion sets the FaceModelVersion field's value.
7775func (s *CreateCollectionOutput) SetFaceModelVersion(v string) *CreateCollectionOutput {
7776	s.FaceModelVersion = &v
7777	return s
7778}
7779
7780// SetStatusCode sets the StatusCode field's value.
7781func (s *CreateCollectionOutput) SetStatusCode(v int64) *CreateCollectionOutput {
7782	s.StatusCode = &v
7783	return s
7784}
7785
7786type CreateProjectInput struct {
7787	_ struct{} `type:"structure"`
7788
7789	// The name of the project to create.
7790	//
7791	// ProjectName is a required field
7792	ProjectName *string `min:"1" type:"string" required:"true"`
7793}
7794
7795// String returns the string representation
7796func (s CreateProjectInput) String() string {
7797	return awsutil.Prettify(s)
7798}
7799
7800// GoString returns the string representation
7801func (s CreateProjectInput) GoString() string {
7802	return s.String()
7803}
7804
7805// Validate inspects the fields of the type to determine if they are valid.
7806func (s *CreateProjectInput) Validate() error {
7807	invalidParams := request.ErrInvalidParams{Context: "CreateProjectInput"}
7808	if s.ProjectName == nil {
7809		invalidParams.Add(request.NewErrParamRequired("ProjectName"))
7810	}
7811	if s.ProjectName != nil && len(*s.ProjectName) < 1 {
7812		invalidParams.Add(request.NewErrParamMinLen("ProjectName", 1))
7813	}
7814
7815	if invalidParams.Len() > 0 {
7816		return invalidParams
7817	}
7818	return nil
7819}
7820
7821// SetProjectName sets the ProjectName field's value.
7822func (s *CreateProjectInput) SetProjectName(v string) *CreateProjectInput {
7823	s.ProjectName = &v
7824	return s
7825}
7826
7827type CreateProjectOutput struct {
7828	_ struct{} `type:"structure"`
7829
7830	// The Amazon Resource Name (ARN) of the new project. You can use the ARN to
7831	// configure IAM access to the project.
7832	ProjectArn *string `min:"20" type:"string"`
7833}
7834
7835// String returns the string representation
7836func (s CreateProjectOutput) String() string {
7837	return awsutil.Prettify(s)
7838}
7839
7840// GoString returns the string representation
7841func (s CreateProjectOutput) GoString() string {
7842	return s.String()
7843}
7844
7845// SetProjectArn sets the ProjectArn field's value.
7846func (s *CreateProjectOutput) SetProjectArn(v string) *CreateProjectOutput {
7847	s.ProjectArn = &v
7848	return s
7849}
7850
7851type CreateProjectVersionInput struct {
7852	_ struct{} `type:"structure"`
7853
7854	// The Amazon S3 location to store the results of training.
7855	//
7856	// OutputConfig is a required field
7857	OutputConfig *OutputConfig `type:"structure" required:"true"`
7858
7859	// The ARN of the Amazon Rekognition Custom Labels project that manages the
7860	// model that you want to train.
7861	//
7862	// ProjectArn is a required field
7863	ProjectArn *string `min:"20" type:"string" required:"true"`
7864
7865	// A set of tags (key-value pairs) that you want to attach to the model.
7866	Tags map[string]*string `type:"map"`
7867
7868	// The dataset to use for testing.
7869	//
7870	// TestingData is a required field
7871	TestingData *TestingData `type:"structure" required:"true"`
7872
7873	// The dataset to use for training.
7874	//
7875	// TrainingData is a required field
7876	TrainingData *TrainingData `type:"structure" required:"true"`
7877
7878	// A name for the version of the model. This value must be unique.
7879	//
7880	// VersionName is a required field
7881	VersionName *string `min:"1" type:"string" required:"true"`
7882}
7883
7884// String returns the string representation
7885func (s CreateProjectVersionInput) String() string {
7886	return awsutil.Prettify(s)
7887}
7888
7889// GoString returns the string representation
7890func (s CreateProjectVersionInput) GoString() string {
7891	return s.String()
7892}
7893
7894// Validate inspects the fields of the type to determine if they are valid.
7895func (s *CreateProjectVersionInput) Validate() error {
7896	invalidParams := request.ErrInvalidParams{Context: "CreateProjectVersionInput"}
7897	if s.OutputConfig == nil {
7898		invalidParams.Add(request.NewErrParamRequired("OutputConfig"))
7899	}
7900	if s.ProjectArn == nil {
7901		invalidParams.Add(request.NewErrParamRequired("ProjectArn"))
7902	}
7903	if s.ProjectArn != nil && len(*s.ProjectArn) < 20 {
7904		invalidParams.Add(request.NewErrParamMinLen("ProjectArn", 20))
7905	}
7906	if s.TestingData == nil {
7907		invalidParams.Add(request.NewErrParamRequired("TestingData"))
7908	}
7909	if s.TrainingData == nil {
7910		invalidParams.Add(request.NewErrParamRequired("TrainingData"))
7911	}
7912	if s.VersionName == nil {
7913		invalidParams.Add(request.NewErrParamRequired("VersionName"))
7914	}
7915	if s.VersionName != nil && len(*s.VersionName) < 1 {
7916		invalidParams.Add(request.NewErrParamMinLen("VersionName", 1))
7917	}
7918	if s.OutputConfig != nil {
7919		if err := s.OutputConfig.Validate(); err != nil {
7920			invalidParams.AddNested("OutputConfig", err.(request.ErrInvalidParams))
7921		}
7922	}
7923	if s.TestingData != nil {
7924		if err := s.TestingData.Validate(); err != nil {
7925			invalidParams.AddNested("TestingData", err.(request.ErrInvalidParams))
7926		}
7927	}
7928	if s.TrainingData != nil {
7929		if err := s.TrainingData.Validate(); err != nil {
7930			invalidParams.AddNested("TrainingData", err.(request.ErrInvalidParams))
7931		}
7932	}
7933
7934	if invalidParams.Len() > 0 {
7935		return invalidParams
7936	}
7937	return nil
7938}
7939
7940// SetOutputConfig sets the OutputConfig field's value.
7941func (s *CreateProjectVersionInput) SetOutputConfig(v *OutputConfig) *CreateProjectVersionInput {
7942	s.OutputConfig = v
7943	return s
7944}
7945
7946// SetProjectArn sets the ProjectArn field's value.
7947func (s *CreateProjectVersionInput) SetProjectArn(v string) *CreateProjectVersionInput {
7948	s.ProjectArn = &v
7949	return s
7950}
7951
7952// SetTags sets the Tags field's value.
7953func (s *CreateProjectVersionInput) SetTags(v map[string]*string) *CreateProjectVersionInput {
7954	s.Tags = v
7955	return s
7956}
7957
7958// SetTestingData sets the TestingData field's value.
7959func (s *CreateProjectVersionInput) SetTestingData(v *TestingData) *CreateProjectVersionInput {
7960	s.TestingData = v
7961	return s
7962}
7963
7964// SetTrainingData sets the TrainingData field's value.
7965func (s *CreateProjectVersionInput) SetTrainingData(v *TrainingData) *CreateProjectVersionInput {
7966	s.TrainingData = v
7967	return s
7968}
7969
7970// SetVersionName sets the VersionName field's value.
7971func (s *CreateProjectVersionInput) SetVersionName(v string) *CreateProjectVersionInput {
7972	s.VersionName = &v
7973	return s
7974}
7975
7976type CreateProjectVersionOutput struct {
7977	_ struct{} `type:"structure"`
7978
7979	// The ARN of the model version that was created. Use DescribeProjectVersion
7980	// to get the current status of the training operation.
7981	ProjectVersionArn *string `min:"20" type:"string"`
7982}
7983
7984// String returns the string representation
7985func (s CreateProjectVersionOutput) String() string {
7986	return awsutil.Prettify(s)
7987}
7988
7989// GoString returns the string representation
7990func (s CreateProjectVersionOutput) GoString() string {
7991	return s.String()
7992}
7993
7994// SetProjectVersionArn sets the ProjectVersionArn field's value.
7995func (s *CreateProjectVersionOutput) SetProjectVersionArn(v string) *CreateProjectVersionOutput {
7996	s.ProjectVersionArn = &v
7997	return s
7998}
7999
8000type CreateStreamProcessorInput struct {
8001	_ struct{} `type:"structure"`
8002
8003	// Kinesis video stream stream that provides the source streaming video. If
8004	// you are using the AWS CLI, the parameter name is StreamProcessorInput.
8005	//
8006	// Input is a required field
8007	Input *StreamProcessorInput `type:"structure" required:"true"`
8008
8009	// An identifier you assign to the stream processor. You can use Name to manage
8010	// the stream processor. For example, you can get the current status of the
8011	// stream processor by calling DescribeStreamProcessor. Name is idempotent.
8012	//
8013	// Name is a required field
8014	Name *string `min:"1" type:"string" required:"true"`
8015
8016	// Kinesis data stream stream to which Amazon Rekognition Video puts the analysis
8017	// results. If you are using the AWS CLI, the parameter name is StreamProcessorOutput.
8018	//
8019	// Output is a required field
8020	Output *StreamProcessorOutput `type:"structure" required:"true"`
8021
8022	// ARN of the IAM role that allows access to the stream processor.
8023	//
8024	// RoleArn is a required field
8025	RoleArn *string `type:"string" required:"true"`
8026
8027	// Face recognition input parameters to be used by the stream processor. Includes
8028	// the collection to use for face recognition and the face attributes to detect.
8029	//
8030	// Settings is a required field
8031	Settings *StreamProcessorSettings `type:"structure" required:"true"`
8032
8033	// A set of tags (key-value pairs) that you want to attach to the stream processor.
8034	Tags map[string]*string `type:"map"`
8035}
8036
8037// String returns the string representation
8038func (s CreateStreamProcessorInput) String() string {
8039	return awsutil.Prettify(s)
8040}
8041
8042// GoString returns the string representation
8043func (s CreateStreamProcessorInput) GoString() string {
8044	return s.String()
8045}
8046
8047// Validate inspects the fields of the type to determine if they are valid.
8048func (s *CreateStreamProcessorInput) Validate() error {
8049	invalidParams := request.ErrInvalidParams{Context: "CreateStreamProcessorInput"}
8050	if s.Input == nil {
8051		invalidParams.Add(request.NewErrParamRequired("Input"))
8052	}
8053	if s.Name == nil {
8054		invalidParams.Add(request.NewErrParamRequired("Name"))
8055	}
8056	if s.Name != nil && len(*s.Name) < 1 {
8057		invalidParams.Add(request.NewErrParamMinLen("Name", 1))
8058	}
8059	if s.Output == nil {
8060		invalidParams.Add(request.NewErrParamRequired("Output"))
8061	}
8062	if s.RoleArn == nil {
8063		invalidParams.Add(request.NewErrParamRequired("RoleArn"))
8064	}
8065	if s.Settings == nil {
8066		invalidParams.Add(request.NewErrParamRequired("Settings"))
8067	}
8068	if s.Settings != nil {
8069		if err := s.Settings.Validate(); err != nil {
8070			invalidParams.AddNested("Settings", err.(request.ErrInvalidParams))
8071		}
8072	}
8073
8074	if invalidParams.Len() > 0 {
8075		return invalidParams
8076	}
8077	return nil
8078}
8079
8080// SetInput sets the Input field's value.
8081func (s *CreateStreamProcessorInput) SetInput(v *StreamProcessorInput) *CreateStreamProcessorInput {
8082	s.Input = v
8083	return s
8084}
8085
8086// SetName sets the Name field's value.
8087func (s *CreateStreamProcessorInput) SetName(v string) *CreateStreamProcessorInput {
8088	s.Name = &v
8089	return s
8090}
8091
8092// SetOutput sets the Output field's value.
8093func (s *CreateStreamProcessorInput) SetOutput(v *StreamProcessorOutput) *CreateStreamProcessorInput {
8094	s.Output = v
8095	return s
8096}
8097
8098// SetRoleArn sets the RoleArn field's value.
8099func (s *CreateStreamProcessorInput) SetRoleArn(v string) *CreateStreamProcessorInput {
8100	s.RoleArn = &v
8101	return s
8102}
8103
8104// SetSettings sets the Settings field's value.
8105func (s *CreateStreamProcessorInput) SetSettings(v *StreamProcessorSettings) *CreateStreamProcessorInput {
8106	s.Settings = v
8107	return s
8108}
8109
8110// SetTags sets the Tags field's value.
8111func (s *CreateStreamProcessorInput) SetTags(v map[string]*string) *CreateStreamProcessorInput {
8112	s.Tags = v
8113	return s
8114}
8115
8116type CreateStreamProcessorOutput struct {
8117	_ struct{} `type:"structure"`
8118
8119	// ARN for the newly create stream processor.
8120	StreamProcessorArn *string `type:"string"`
8121}
8122
8123// String returns the string representation
8124func (s CreateStreamProcessorOutput) String() string {
8125	return awsutil.Prettify(s)
8126}
8127
8128// GoString returns the string representation
8129func (s CreateStreamProcessorOutput) GoString() string {
8130	return s.String()
8131}
8132
8133// SetStreamProcessorArn sets the StreamProcessorArn field's value.
8134func (s *CreateStreamProcessorOutput) SetStreamProcessorArn(v string) *CreateStreamProcessorOutput {
8135	s.StreamProcessorArn = &v
8136	return s
8137}
8138
8139// A custom label detected in an image by a call to DetectCustomLabels.
8140type CustomLabel struct {
8141	_ struct{} `type:"structure"`
8142
8143	// The confidence that the model has in the detection of the custom label. The
8144	// range is 0-100. A higher value indicates a higher confidence.
8145	Confidence *float64 `type:"float"`
8146
8147	// The location of the detected object on the image that corresponds to the
8148	// custom label. Includes an axis aligned coarse bounding box surrounding the
8149	// object and a finer grain polygon for more accurate spatial information.
8150	Geometry *Geometry `type:"structure"`
8151
8152	// The name of the custom label.
8153	Name *string `type:"string"`
8154}
8155
8156// String returns the string representation
8157func (s CustomLabel) String() string {
8158	return awsutil.Prettify(s)
8159}
8160
8161// GoString returns the string representation
8162func (s CustomLabel) GoString() string {
8163	return s.String()
8164}
8165
8166// SetConfidence sets the Confidence field's value.
8167func (s *CustomLabel) SetConfidence(v float64) *CustomLabel {
8168	s.Confidence = &v
8169	return s
8170}
8171
8172// SetGeometry sets the Geometry field's value.
8173func (s *CustomLabel) SetGeometry(v *Geometry) *CustomLabel {
8174	s.Geometry = v
8175	return s
8176}
8177
8178// SetName sets the Name field's value.
8179func (s *CustomLabel) SetName(v string) *CustomLabel {
8180	s.Name = &v
8181	return s
8182}
8183
8184type DeleteCollectionInput struct {
8185	_ struct{} `type:"structure"`
8186
8187	// ID of the collection to delete.
8188	//
8189	// CollectionId is a required field
8190	CollectionId *string `min:"1" type:"string" required:"true"`
8191}
8192
8193// String returns the string representation
8194func (s DeleteCollectionInput) String() string {
8195	return awsutil.Prettify(s)
8196}
8197
8198// GoString returns the string representation
8199func (s DeleteCollectionInput) GoString() string {
8200	return s.String()
8201}
8202
8203// Validate inspects the fields of the type to determine if they are valid.
8204func (s *DeleteCollectionInput) Validate() error {
8205	invalidParams := request.ErrInvalidParams{Context: "DeleteCollectionInput"}
8206	if s.CollectionId == nil {
8207		invalidParams.Add(request.NewErrParamRequired("CollectionId"))
8208	}
8209	if s.CollectionId != nil && len(*s.CollectionId) < 1 {
8210		invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1))
8211	}
8212
8213	if invalidParams.Len() > 0 {
8214		return invalidParams
8215	}
8216	return nil
8217}
8218
8219// SetCollectionId sets the CollectionId field's value.
8220func (s *DeleteCollectionInput) SetCollectionId(v string) *DeleteCollectionInput {
8221	s.CollectionId = &v
8222	return s
8223}
8224
8225type DeleteCollectionOutput struct {
8226	_ struct{} `type:"structure"`
8227
8228	// HTTP status code that indicates the result of the operation.
8229	StatusCode *int64 `type:"integer"`
8230}
8231
8232// String returns the string representation
8233func (s DeleteCollectionOutput) String() string {
8234	return awsutil.Prettify(s)
8235}
8236
8237// GoString returns the string representation
8238func (s DeleteCollectionOutput) GoString() string {
8239	return s.String()
8240}
8241
8242// SetStatusCode sets the StatusCode field's value.
8243func (s *DeleteCollectionOutput) SetStatusCode(v int64) *DeleteCollectionOutput {
8244	s.StatusCode = &v
8245	return s
8246}
8247
8248type DeleteFacesInput struct {
8249	_ struct{} `type:"structure"`
8250
8251	// Collection from which to remove the specific faces.
8252	//
8253	// CollectionId is a required field
8254	CollectionId *string `min:"1" type:"string" required:"true"`
8255
8256	// An array of face IDs to delete.
8257	//
8258	// FaceIds is a required field
8259	FaceIds []*string `min:"1" type:"list" required:"true"`
8260}
8261
8262// String returns the string representation
8263func (s DeleteFacesInput) String() string {
8264	return awsutil.Prettify(s)
8265}
8266
8267// GoString returns the string representation
8268func (s DeleteFacesInput) GoString() string {
8269	return s.String()
8270}
8271
8272// Validate inspects the fields of the type to determine if they are valid.
8273func (s *DeleteFacesInput) Validate() error {
8274	invalidParams := request.ErrInvalidParams{Context: "DeleteFacesInput"}
8275	if s.CollectionId == nil {
8276		invalidParams.Add(request.NewErrParamRequired("CollectionId"))
8277	}
8278	if s.CollectionId != nil && len(*s.CollectionId) < 1 {
8279		invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1))
8280	}
8281	if s.FaceIds == nil {
8282		invalidParams.Add(request.NewErrParamRequired("FaceIds"))
8283	}
8284	if s.FaceIds != nil && len(s.FaceIds) < 1 {
8285		invalidParams.Add(request.NewErrParamMinLen("FaceIds", 1))
8286	}
8287
8288	if invalidParams.Len() > 0 {
8289		return invalidParams
8290	}
8291	return nil
8292}
8293
8294// SetCollectionId sets the CollectionId field's value.
8295func (s *DeleteFacesInput) SetCollectionId(v string) *DeleteFacesInput {
8296	s.CollectionId = &v
8297	return s
8298}
8299
8300// SetFaceIds sets the FaceIds field's value.
8301func (s *DeleteFacesInput) SetFaceIds(v []*string) *DeleteFacesInput {
8302	s.FaceIds = v
8303	return s
8304}
8305
8306type DeleteFacesOutput struct {
8307	_ struct{} `type:"structure"`
8308
8309	// An array of strings (face IDs) of the faces that were deleted.
8310	DeletedFaces []*string `min:"1" type:"list"`
8311}
8312
8313// String returns the string representation
8314func (s DeleteFacesOutput) String() string {
8315	return awsutil.Prettify(s)
8316}
8317
8318// GoString returns the string representation
8319func (s DeleteFacesOutput) GoString() string {
8320	return s.String()
8321}
8322
8323// SetDeletedFaces sets the DeletedFaces field's value.
8324func (s *DeleteFacesOutput) SetDeletedFaces(v []*string) *DeleteFacesOutput {
8325	s.DeletedFaces = v
8326	return s
8327}
8328
8329type DeleteProjectInput struct {
8330	_ struct{} `type:"structure"`
8331
8332	// The Amazon Resource Name (ARN) of the project that you want to delete.
8333	//
8334	// ProjectArn is a required field
8335	ProjectArn *string `min:"20" type:"string" required:"true"`
8336}
8337
8338// String returns the string representation
8339func (s DeleteProjectInput) String() string {
8340	return awsutil.Prettify(s)
8341}
8342
8343// GoString returns the string representation
8344func (s DeleteProjectInput) GoString() string {
8345	return s.String()
8346}
8347
8348// Validate inspects the fields of the type to determine if they are valid.
8349func (s *DeleteProjectInput) Validate() error {
8350	invalidParams := request.ErrInvalidParams{Context: "DeleteProjectInput"}
8351	if s.ProjectArn == nil {
8352		invalidParams.Add(request.NewErrParamRequired("ProjectArn"))
8353	}
8354	if s.ProjectArn != nil && len(*s.ProjectArn) < 20 {
8355		invalidParams.Add(request.NewErrParamMinLen("ProjectArn", 20))
8356	}
8357
8358	if invalidParams.Len() > 0 {
8359		return invalidParams
8360	}
8361	return nil
8362}
8363
8364// SetProjectArn sets the ProjectArn field's value.
8365func (s *DeleteProjectInput) SetProjectArn(v string) *DeleteProjectInput {
8366	s.ProjectArn = &v
8367	return s
8368}
8369
8370type DeleteProjectOutput struct {
8371	_ struct{} `type:"structure"`
8372
8373	// The current status of the delete project operation.
8374	Status *string `type:"string" enum:"ProjectStatus"`
8375}
8376
8377// String returns the string representation
8378func (s DeleteProjectOutput) String() string {
8379	return awsutil.Prettify(s)
8380}
8381
8382// GoString returns the string representation
8383func (s DeleteProjectOutput) GoString() string {
8384	return s.String()
8385}
8386
8387// SetStatus sets the Status field's value.
8388func (s *DeleteProjectOutput) SetStatus(v string) *DeleteProjectOutput {
8389	s.Status = &v
8390	return s
8391}
8392
8393type DeleteProjectVersionInput struct {
8394	_ struct{} `type:"structure"`
8395
8396	// The Amazon Resource Name (ARN) of the model version that you want to delete.
8397	//
8398	// ProjectVersionArn is a required field
8399	ProjectVersionArn *string `min:"20" type:"string" required:"true"`
8400}
8401
8402// String returns the string representation
8403func (s DeleteProjectVersionInput) String() string {
8404	return awsutil.Prettify(s)
8405}
8406
8407// GoString returns the string representation
8408func (s DeleteProjectVersionInput) GoString() string {
8409	return s.String()
8410}
8411
8412// Validate inspects the fields of the type to determine if they are valid.
8413func (s *DeleteProjectVersionInput) Validate() error {
8414	invalidParams := request.ErrInvalidParams{Context: "DeleteProjectVersionInput"}
8415	if s.ProjectVersionArn == nil {
8416		invalidParams.Add(request.NewErrParamRequired("ProjectVersionArn"))
8417	}
8418	if s.ProjectVersionArn != nil && len(*s.ProjectVersionArn) < 20 {
8419		invalidParams.Add(request.NewErrParamMinLen("ProjectVersionArn", 20))
8420	}
8421
8422	if invalidParams.Len() > 0 {
8423		return invalidParams
8424	}
8425	return nil
8426}
8427
8428// SetProjectVersionArn sets the ProjectVersionArn field's value.
8429func (s *DeleteProjectVersionInput) SetProjectVersionArn(v string) *DeleteProjectVersionInput {
8430	s.ProjectVersionArn = &v
8431	return s
8432}
8433
8434type DeleteProjectVersionOutput struct {
8435	_ struct{} `type:"structure"`
8436
8437	// The status of the deletion operation.
8438	Status *string `type:"string" enum:"ProjectVersionStatus"`
8439}
8440
8441// String returns the string representation
8442func (s DeleteProjectVersionOutput) String() string {
8443	return awsutil.Prettify(s)
8444}
8445
8446// GoString returns the string representation
8447func (s DeleteProjectVersionOutput) GoString() string {
8448	return s.String()
8449}
8450
8451// SetStatus sets the Status field's value.
8452func (s *DeleteProjectVersionOutput) SetStatus(v string) *DeleteProjectVersionOutput {
8453	s.Status = &v
8454	return s
8455}
8456
8457type DeleteStreamProcessorInput struct {
8458	_ struct{} `type:"structure"`
8459
8460	// The name of the stream processor you want to delete.
8461	//
8462	// Name is a required field
8463	Name *string `min:"1" type:"string" required:"true"`
8464}
8465
8466// String returns the string representation
8467func (s DeleteStreamProcessorInput) String() string {
8468	return awsutil.Prettify(s)
8469}
8470
8471// GoString returns the string representation
8472func (s DeleteStreamProcessorInput) GoString() string {
8473	return s.String()
8474}
8475
8476// Validate inspects the fields of the type to determine if they are valid.
8477func (s *DeleteStreamProcessorInput) Validate() error {
8478	invalidParams := request.ErrInvalidParams{Context: "DeleteStreamProcessorInput"}
8479	if s.Name == nil {
8480		invalidParams.Add(request.NewErrParamRequired("Name"))
8481	}
8482	if s.Name != nil && len(*s.Name) < 1 {
8483		invalidParams.Add(request.NewErrParamMinLen("Name", 1))
8484	}
8485
8486	if invalidParams.Len() > 0 {
8487		return invalidParams
8488	}
8489	return nil
8490}
8491
8492// SetName sets the Name field's value.
8493func (s *DeleteStreamProcessorInput) SetName(v string) *DeleteStreamProcessorInput {
8494	s.Name = &v
8495	return s
8496}
8497
8498type DeleteStreamProcessorOutput struct {
8499	_ struct{} `type:"structure"`
8500}
8501
8502// String returns the string representation
8503func (s DeleteStreamProcessorOutput) String() string {
8504	return awsutil.Prettify(s)
8505}
8506
8507// GoString returns the string representation
8508func (s DeleteStreamProcessorOutput) GoString() string {
8509	return s.String()
8510}
8511
8512type DescribeCollectionInput struct {
8513	_ struct{} `type:"structure"`
8514
8515	// The ID of the collection to describe.
8516	//
8517	// CollectionId is a required field
8518	CollectionId *string `min:"1" type:"string" required:"true"`
8519}
8520
8521// String returns the string representation
8522func (s DescribeCollectionInput) String() string {
8523	return awsutil.Prettify(s)
8524}
8525
8526// GoString returns the string representation
8527func (s DescribeCollectionInput) GoString() string {
8528	return s.String()
8529}
8530
8531// Validate inspects the fields of the type to determine if they are valid.
8532func (s *DescribeCollectionInput) Validate() error {
8533	invalidParams := request.ErrInvalidParams{Context: "DescribeCollectionInput"}
8534	if s.CollectionId == nil {
8535		invalidParams.Add(request.NewErrParamRequired("CollectionId"))
8536	}
8537	if s.CollectionId != nil && len(*s.CollectionId) < 1 {
8538		invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1))
8539	}
8540
8541	if invalidParams.Len() > 0 {
8542		return invalidParams
8543	}
8544	return nil
8545}
8546
8547// SetCollectionId sets the CollectionId field's value.
8548func (s *DescribeCollectionInput) SetCollectionId(v string) *DescribeCollectionInput {
8549	s.CollectionId = &v
8550	return s
8551}
8552
8553type DescribeCollectionOutput struct {
8554	_ struct{} `type:"structure"`
8555
8556	// The Amazon Resource Name (ARN) of the collection.
8557	CollectionARN *string `type:"string"`
8558
8559	// The number of milliseconds since the Unix epoch time until the creation of
8560	// the collection. The Unix epoch time is 00:00:00 Coordinated Universal Time
8561	// (UTC), Thursday, 1 January 1970.
8562	CreationTimestamp *time.Time `type:"timestamp"`
8563
8564	// The number of faces that are indexed into the collection. To index faces
8565	// into a collection, use IndexFaces.
8566	FaceCount *int64 `type:"long"`
8567
8568	// The version of the face model that's used by the collection for face detection.
8569	//
8570	// For more information, see Model Versioning in the Amazon Rekognition Developer
8571	// Guide.
8572	FaceModelVersion *string `type:"string"`
8573}
8574
8575// String returns the string representation
8576func (s DescribeCollectionOutput) String() string {
8577	return awsutil.Prettify(s)
8578}
8579
8580// GoString returns the string representation
8581func (s DescribeCollectionOutput) GoString() string {
8582	return s.String()
8583}
8584
8585// SetCollectionARN sets the CollectionARN field's value.
8586func (s *DescribeCollectionOutput) SetCollectionARN(v string) *DescribeCollectionOutput {
8587	s.CollectionARN = &v
8588	return s
8589}
8590
8591// SetCreationTimestamp sets the CreationTimestamp field's value.
8592func (s *DescribeCollectionOutput) SetCreationTimestamp(v time.Time) *DescribeCollectionOutput {
8593	s.CreationTimestamp = &v
8594	return s
8595}
8596
8597// SetFaceCount sets the FaceCount field's value.
8598func (s *DescribeCollectionOutput) SetFaceCount(v int64) *DescribeCollectionOutput {
8599	s.FaceCount = &v
8600	return s
8601}
8602
8603// SetFaceModelVersion sets the FaceModelVersion field's value.
8604func (s *DescribeCollectionOutput) SetFaceModelVersion(v string) *DescribeCollectionOutput {
8605	s.FaceModelVersion = &v
8606	return s
8607}
8608
8609type DescribeProjectVersionsInput struct {
8610	_ struct{} `type:"structure"`
8611
8612	// The maximum number of results to return per paginated call. The largest value
8613	// you can specify is 100. If you specify a value greater than 100, a ValidationException
8614	// error occurs. The default value is 100.
8615	MaxResults *int64 `min:"1" type:"integer"`
8616
8617	// If the previous response was incomplete (because there is more results to
8618	// retrieve), Amazon Rekognition Custom Labels returns a pagination token in
8619	// the response. You can use this pagination token to retrieve the next set
8620	// of results.
8621	NextToken *string `type:"string"`
8622
8623	// The Amazon Resource Name (ARN) of the project that contains the models you
8624	// want to describe.
8625	//
8626	// ProjectArn is a required field
8627	ProjectArn *string `min:"20" type:"string" required:"true"`
8628
8629	// A list of model version names that you want to describe. You can add up to
8630	// 10 model version names to the list. If you don't specify a value, all model
8631	// descriptions are returned. A version name is part of a model (ProjectVersion)
8632	// ARN. For example, my-model.2020-01-21T09.10.15 is the version name in the
8633	// following ARN. arn:aws:rekognition:us-east-1:123456789012:project/getting-started/version/my-model.2020-01-21T09.10.15/1234567890123.
8634	VersionNames []*string `min:"1" type:"list"`
8635}
8636
8637// String returns the string representation
8638func (s DescribeProjectVersionsInput) String() string {
8639	return awsutil.Prettify(s)
8640}
8641
8642// GoString returns the string representation
8643func (s DescribeProjectVersionsInput) GoString() string {
8644	return s.String()
8645}
8646
8647// Validate inspects the fields of the type to determine if they are valid.
8648func (s *DescribeProjectVersionsInput) Validate() error {
8649	invalidParams := request.ErrInvalidParams{Context: "DescribeProjectVersionsInput"}
8650	if s.MaxResults != nil && *s.MaxResults < 1 {
8651		invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
8652	}
8653	if s.ProjectArn == nil {
8654		invalidParams.Add(request.NewErrParamRequired("ProjectArn"))
8655	}
8656	if s.ProjectArn != nil && len(*s.ProjectArn) < 20 {
8657		invalidParams.Add(request.NewErrParamMinLen("ProjectArn", 20))
8658	}
8659	if s.VersionNames != nil && len(s.VersionNames) < 1 {
8660		invalidParams.Add(request.NewErrParamMinLen("VersionNames", 1))
8661	}
8662
8663	if invalidParams.Len() > 0 {
8664		return invalidParams
8665	}
8666	return nil
8667}
8668
8669// SetMaxResults sets the MaxResults field's value.
8670func (s *DescribeProjectVersionsInput) SetMaxResults(v int64) *DescribeProjectVersionsInput {
8671	s.MaxResults = &v
8672	return s
8673}
8674
8675// SetNextToken sets the NextToken field's value.
8676func (s *DescribeProjectVersionsInput) SetNextToken(v string) *DescribeProjectVersionsInput {
8677	s.NextToken = &v
8678	return s
8679}
8680
8681// SetProjectArn sets the ProjectArn field's value.
8682func (s *DescribeProjectVersionsInput) SetProjectArn(v string) *DescribeProjectVersionsInput {
8683	s.ProjectArn = &v
8684	return s
8685}
8686
8687// SetVersionNames sets the VersionNames field's value.
8688func (s *DescribeProjectVersionsInput) SetVersionNames(v []*string) *DescribeProjectVersionsInput {
8689	s.VersionNames = v
8690	return s
8691}
8692
8693type DescribeProjectVersionsOutput struct {
8694	_ struct{} `type:"structure"`
8695
8696	// If the previous response was incomplete (because there is more results to
8697	// retrieve), Amazon Rekognition Custom Labels returns a pagination token in
8698	// the response. You can use this pagination token to retrieve the next set
8699	// of results.
8700	NextToken *string `type:"string"`
8701
8702	// A list of model descriptions. The list is sorted by the creation date and
8703	// time of the model versions, latest to earliest.
8704	ProjectVersionDescriptions []*ProjectVersionDescription `type:"list"`
8705}
8706
8707// String returns the string representation
8708func (s DescribeProjectVersionsOutput) String() string {
8709	return awsutil.Prettify(s)
8710}
8711
8712// GoString returns the string representation
8713func (s DescribeProjectVersionsOutput) GoString() string {
8714	return s.String()
8715}
8716
8717// SetNextToken sets the NextToken field's value.
8718func (s *DescribeProjectVersionsOutput) SetNextToken(v string) *DescribeProjectVersionsOutput {
8719	s.NextToken = &v
8720	return s
8721}
8722
8723// SetProjectVersionDescriptions sets the ProjectVersionDescriptions field's value.
8724func (s *DescribeProjectVersionsOutput) SetProjectVersionDescriptions(v []*ProjectVersionDescription) *DescribeProjectVersionsOutput {
8725	s.ProjectVersionDescriptions = v
8726	return s
8727}
8728
8729type DescribeProjectsInput struct {
8730	_ struct{} `type:"structure"`
8731
8732	// The maximum number of results to return per paginated call. The largest value
8733	// you can specify is 100. If you specify a value greater than 100, a ValidationException
8734	// error occurs. The default value is 100.
8735	MaxResults *int64 `min:"1" type:"integer"`
8736
8737	// If the previous response was incomplete (because there is more results to
8738	// retrieve), Amazon Rekognition Custom Labels returns a pagination token in
8739	// the response. You can use this pagination token to retrieve the next set
8740	// of results.
8741	NextToken *string `type:"string"`
8742}
8743
8744// String returns the string representation
8745func (s DescribeProjectsInput) String() string {
8746	return awsutil.Prettify(s)
8747}
8748
8749// GoString returns the string representation
8750func (s DescribeProjectsInput) GoString() string {
8751	return s.String()
8752}
8753
8754// Validate inspects the fields of the type to determine if they are valid.
8755func (s *DescribeProjectsInput) Validate() error {
8756	invalidParams := request.ErrInvalidParams{Context: "DescribeProjectsInput"}
8757	if s.MaxResults != nil && *s.MaxResults < 1 {
8758		invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
8759	}
8760
8761	if invalidParams.Len() > 0 {
8762		return invalidParams
8763	}
8764	return nil
8765}
8766
8767// SetMaxResults sets the MaxResults field's value.
8768func (s *DescribeProjectsInput) SetMaxResults(v int64) *DescribeProjectsInput {
8769	s.MaxResults = &v
8770	return s
8771}
8772
8773// SetNextToken sets the NextToken field's value.
8774func (s *DescribeProjectsInput) SetNextToken(v string) *DescribeProjectsInput {
8775	s.NextToken = &v
8776	return s
8777}
8778
8779type DescribeProjectsOutput struct {
8780	_ struct{} `type:"structure"`
8781
8782	// If the previous response was incomplete (because there is more results to
8783	// retrieve), Amazon Rekognition Custom Labels returns a pagination token in
8784	// the response. You can use this pagination token to retrieve the next set
8785	// of results.
8786	NextToken *string `type:"string"`
8787
8788	// A list of project descriptions. The list is sorted by the date and time the
8789	// projects are created.
8790	ProjectDescriptions []*ProjectDescription `type:"list"`
8791}
8792
8793// String returns the string representation
8794func (s DescribeProjectsOutput) String() string {
8795	return awsutil.Prettify(s)
8796}
8797
8798// GoString returns the string representation
8799func (s DescribeProjectsOutput) GoString() string {
8800	return s.String()
8801}
8802
8803// SetNextToken sets the NextToken field's value.
8804func (s *DescribeProjectsOutput) SetNextToken(v string) *DescribeProjectsOutput {
8805	s.NextToken = &v
8806	return s
8807}
8808
8809// SetProjectDescriptions sets the ProjectDescriptions field's value.
8810func (s *DescribeProjectsOutput) SetProjectDescriptions(v []*ProjectDescription) *DescribeProjectsOutput {
8811	s.ProjectDescriptions = v
8812	return s
8813}
8814
8815type DescribeStreamProcessorInput struct {
8816	_ struct{} `type:"structure"`
8817
8818	// Name of the stream processor for which you want information.
8819	//
8820	// Name is a required field
8821	Name *string `min:"1" type:"string" required:"true"`
8822}
8823
8824// String returns the string representation
8825func (s DescribeStreamProcessorInput) String() string {
8826	return awsutil.Prettify(s)
8827}
8828
8829// GoString returns the string representation
8830func (s DescribeStreamProcessorInput) GoString() string {
8831	return s.String()
8832}
8833
8834// Validate inspects the fields of the type to determine if they are valid.
8835func (s *DescribeStreamProcessorInput) Validate() error {
8836	invalidParams := request.ErrInvalidParams{Context: "DescribeStreamProcessorInput"}
8837	if s.Name == nil {
8838		invalidParams.Add(request.NewErrParamRequired("Name"))
8839	}
8840	if s.Name != nil && len(*s.Name) < 1 {
8841		invalidParams.Add(request.NewErrParamMinLen("Name", 1))
8842	}
8843
8844	if invalidParams.Len() > 0 {
8845		return invalidParams
8846	}
8847	return nil
8848}
8849
8850// SetName sets the Name field's value.
8851func (s *DescribeStreamProcessorInput) SetName(v string) *DescribeStreamProcessorInput {
8852	s.Name = &v
8853	return s
8854}
8855
8856type DescribeStreamProcessorOutput struct {
8857	_ struct{} `type:"structure"`
8858
8859	// Date and time the stream processor was created
8860	CreationTimestamp *time.Time `type:"timestamp"`
8861
8862	// Kinesis video stream that provides the source streaming video.
8863	Input *StreamProcessorInput `type:"structure"`
8864
8865	// The time, in Unix format, the stream processor was last updated. For example,
8866	// when the stream processor moves from a running state to a failed state, or
8867	// when the user starts or stops the stream processor.
8868	LastUpdateTimestamp *time.Time `type:"timestamp"`
8869
8870	// Name of the stream processor.
8871	Name *string `min:"1" type:"string"`
8872
8873	// Kinesis data stream to which Amazon Rekognition Video puts the analysis results.
8874	Output *StreamProcessorOutput `type:"structure"`
8875
8876	// ARN of the IAM role that allows access to the stream processor.
8877	RoleArn *string `type:"string"`
8878
8879	// Face recognition input parameters that are being used by the stream processor.
8880	// Includes the collection to use for face recognition and the face attributes
8881	// to detect.
8882	Settings *StreamProcessorSettings `type:"structure"`
8883
8884	// Current status of the stream processor.
8885	Status *string `type:"string" enum:"StreamProcessorStatus"`
8886
8887	// Detailed status message about the stream processor.
8888	StatusMessage *string `type:"string"`
8889
8890	// ARN of the stream processor.
8891	StreamProcessorArn *string `type:"string"`
8892}
8893
8894// String returns the string representation
8895func (s DescribeStreamProcessorOutput) String() string {
8896	return awsutil.Prettify(s)
8897}
8898
8899// GoString returns the string representation
8900func (s DescribeStreamProcessorOutput) GoString() string {
8901	return s.String()
8902}
8903
8904// SetCreationTimestamp sets the CreationTimestamp field's value.
8905func (s *DescribeStreamProcessorOutput) SetCreationTimestamp(v time.Time) *DescribeStreamProcessorOutput {
8906	s.CreationTimestamp = &v
8907	return s
8908}
8909
8910// SetInput sets the Input field's value.
8911func (s *DescribeStreamProcessorOutput) SetInput(v *StreamProcessorInput) *DescribeStreamProcessorOutput {
8912	s.Input = v
8913	return s
8914}
8915
8916// SetLastUpdateTimestamp sets the LastUpdateTimestamp field's value.
8917func (s *DescribeStreamProcessorOutput) SetLastUpdateTimestamp(v time.Time) *DescribeStreamProcessorOutput {
8918	s.LastUpdateTimestamp = &v
8919	return s
8920}
8921
8922// SetName sets the Name field's value.
8923func (s *DescribeStreamProcessorOutput) SetName(v string) *DescribeStreamProcessorOutput {
8924	s.Name = &v
8925	return s
8926}
8927
8928// SetOutput sets the Output field's value.
8929func (s *DescribeStreamProcessorOutput) SetOutput(v *StreamProcessorOutput) *DescribeStreamProcessorOutput {
8930	s.Output = v
8931	return s
8932}
8933
8934// SetRoleArn sets the RoleArn field's value.
8935func (s *DescribeStreamProcessorOutput) SetRoleArn(v string) *DescribeStreamProcessorOutput {
8936	s.RoleArn = &v
8937	return s
8938}
8939
8940// SetSettings sets the Settings field's value.
8941func (s *DescribeStreamProcessorOutput) SetSettings(v *StreamProcessorSettings) *DescribeStreamProcessorOutput {
8942	s.Settings = v
8943	return s
8944}
8945
8946// SetStatus sets the Status field's value.
8947func (s *DescribeStreamProcessorOutput) SetStatus(v string) *DescribeStreamProcessorOutput {
8948	s.Status = &v
8949	return s
8950}
8951
8952// SetStatusMessage sets the StatusMessage field's value.
8953func (s *DescribeStreamProcessorOutput) SetStatusMessage(v string) *DescribeStreamProcessorOutput {
8954	s.StatusMessage = &v
8955	return s
8956}
8957
8958// SetStreamProcessorArn sets the StreamProcessorArn field's value.
8959func (s *DescribeStreamProcessorOutput) SetStreamProcessorArn(v string) *DescribeStreamProcessorOutput {
8960	s.StreamProcessorArn = &v
8961	return s
8962}
8963
8964type DetectCustomLabelsInput struct {
8965	_ struct{} `type:"structure"`
8966
8967	// Provides the input image either as bytes or an S3 object.
8968	//
8969	// You pass image bytes to an Amazon Rekognition API operation by using the
8970	// Bytes property. For example, you would use the Bytes property to pass an
8971	// image loaded from a local file system. Image bytes passed by using the Bytes
8972	// property must be base64-encoded. Your code may not need to encode image bytes
8973	// if you are using an AWS SDK to call Amazon Rekognition API operations.
8974	//
8975	// For more information, see Analyzing an Image Loaded from a Local File System
8976	// in the Amazon Rekognition Developer Guide.
8977	//
8978	// You pass images stored in an S3 bucket to an Amazon Rekognition API operation
8979	// by using the S3Object property. Images stored in an S3 bucket do not need
8980	// to be base64-encoded.
8981	//
8982	// The region for the S3 bucket containing the S3 object must match the region
8983	// you use for Amazon Rekognition operations.
8984	//
8985	// If you use the AWS CLI to call Amazon Rekognition operations, passing image
8986	// bytes using the Bytes property is not supported. You must first upload the
8987	// image to an Amazon S3 bucket and then call the operation using the S3Object
8988	// property.
8989	//
8990	// For Amazon Rekognition to process an S3 object, the user must have permission
8991	// to access the S3 object. For more information, see Resource Based Policies
8992	// in the Amazon Rekognition Developer Guide.
8993	//
8994	// Image is a required field
8995	Image *Image `type:"structure" required:"true"`
8996
8997	// Maximum number of results you want the service to return in the response.
8998	// The service returns the specified number of highest confidence labels ranked
8999	// from highest confidence to lowest.
9000	MaxResults *int64 `type:"integer"`
9001
9002	// Specifies the minimum confidence level for the labels to return. Amazon Rekognition
9003	// doesn't return any labels with a confidence lower than this specified value.
9004	// If you specify a value of 0, all labels are return, regardless of the default
9005	// thresholds that the model version applies.
9006	MinConfidence *float64 `type:"float"`
9007
9008	// The ARN of the model version that you want to use.
9009	//
9010	// ProjectVersionArn is a required field
9011	ProjectVersionArn *string `min:"20" type:"string" required:"true"`
9012}
9013
9014// String returns the string representation
9015func (s DetectCustomLabelsInput) String() string {
9016	return awsutil.Prettify(s)
9017}
9018
9019// GoString returns the string representation
9020func (s DetectCustomLabelsInput) GoString() string {
9021	return s.String()
9022}
9023
9024// Validate inspects the fields of the type to determine if they are valid.
9025func (s *DetectCustomLabelsInput) Validate() error {
9026	invalidParams := request.ErrInvalidParams{Context: "DetectCustomLabelsInput"}
9027	if s.Image == nil {
9028		invalidParams.Add(request.NewErrParamRequired("Image"))
9029	}
9030	if s.ProjectVersionArn == nil {
9031		invalidParams.Add(request.NewErrParamRequired("ProjectVersionArn"))
9032	}
9033	if s.ProjectVersionArn != nil && len(*s.ProjectVersionArn) < 20 {
9034		invalidParams.Add(request.NewErrParamMinLen("ProjectVersionArn", 20))
9035	}
9036	if s.Image != nil {
9037		if err := s.Image.Validate(); err != nil {
9038			invalidParams.AddNested("Image", err.(request.ErrInvalidParams))
9039		}
9040	}
9041
9042	if invalidParams.Len() > 0 {
9043		return invalidParams
9044	}
9045	return nil
9046}
9047
9048// SetImage sets the Image field's value.
9049func (s *DetectCustomLabelsInput) SetImage(v *Image) *DetectCustomLabelsInput {
9050	s.Image = v
9051	return s
9052}
9053
9054// SetMaxResults sets the MaxResults field's value.
9055func (s *DetectCustomLabelsInput) SetMaxResults(v int64) *DetectCustomLabelsInput {
9056	s.MaxResults = &v
9057	return s
9058}
9059
9060// SetMinConfidence sets the MinConfidence field's value.
9061func (s *DetectCustomLabelsInput) SetMinConfidence(v float64) *DetectCustomLabelsInput {
9062	s.MinConfidence = &v
9063	return s
9064}
9065
9066// SetProjectVersionArn sets the ProjectVersionArn field's value.
9067func (s *DetectCustomLabelsInput) SetProjectVersionArn(v string) *DetectCustomLabelsInput {
9068	s.ProjectVersionArn = &v
9069	return s
9070}
9071
9072type DetectCustomLabelsOutput struct {
9073	_ struct{} `type:"structure"`
9074
9075	// An array of custom labels detected in the input image.
9076	CustomLabels []*CustomLabel `type:"list"`
9077}
9078
9079// String returns the string representation
9080func (s DetectCustomLabelsOutput) String() string {
9081	return awsutil.Prettify(s)
9082}
9083
9084// GoString returns the string representation
9085func (s DetectCustomLabelsOutput) GoString() string {
9086	return s.String()
9087}
9088
9089// SetCustomLabels sets the CustomLabels field's value.
9090func (s *DetectCustomLabelsOutput) SetCustomLabels(v []*CustomLabel) *DetectCustomLabelsOutput {
9091	s.CustomLabels = v
9092	return s
9093}
9094
9095type DetectFacesInput struct {
9096	_ struct{} `type:"structure"`
9097
9098	// An array of facial attributes you want to be returned. This can be the default
9099	// list of attributes or all attributes. If you don't specify a value for Attributes
9100	// or if you specify ["DEFAULT"], the API returns the following subset of facial
9101	// attributes: BoundingBox, Confidence, Pose, Quality, and Landmarks. If you
9102	// provide ["ALL"], all facial attributes are returned, but the operation takes
9103	// longer to complete.
9104	//
9105	// If you provide both, ["ALL", "DEFAULT"], the service uses a logical AND operator
9106	// to determine which attributes to return (in this case, all attributes).
9107	Attributes []*string `type:"list"`
9108
9109	// The input image as base64-encoded bytes or an S3 object. If you use the AWS
9110	// CLI to call Amazon Rekognition operations, passing base64-encoded image bytes
9111	// is not supported.
9112	//
9113	// If you are using an AWS SDK to call Amazon Rekognition, you might not need
9114	// to base64-encode image bytes passed using the Bytes field. For more information,
9115	// see Images in the Amazon Rekognition developer guide.
9116	//
9117	// Image is a required field
9118	Image *Image `type:"structure" required:"true"`
9119}
9120
9121// String returns the string representation
9122func (s DetectFacesInput) String() string {
9123	return awsutil.Prettify(s)
9124}
9125
9126// GoString returns the string representation
9127func (s DetectFacesInput) GoString() string {
9128	return s.String()
9129}
9130
9131// Validate inspects the fields of the type to determine if they are valid.
9132func (s *DetectFacesInput) Validate() error {
9133	invalidParams := request.ErrInvalidParams{Context: "DetectFacesInput"}
9134	if s.Image == nil {
9135		invalidParams.Add(request.NewErrParamRequired("Image"))
9136	}
9137	if s.Image != nil {
9138		if err := s.Image.Validate(); err != nil {
9139			invalidParams.AddNested("Image", err.(request.ErrInvalidParams))
9140		}
9141	}
9142
9143	if invalidParams.Len() > 0 {
9144		return invalidParams
9145	}
9146	return nil
9147}
9148
9149// SetAttributes sets the Attributes field's value.
9150func (s *DetectFacesInput) SetAttributes(v []*string) *DetectFacesInput {
9151	s.Attributes = v
9152	return s
9153}
9154
9155// SetImage sets the Image field's value.
9156func (s *DetectFacesInput) SetImage(v *Image) *DetectFacesInput {
9157	s.Image = v
9158	return s
9159}
9160
9161type DetectFacesOutput struct {
9162	_ struct{} `type:"structure"`
9163
9164	// Details of each face found in the image.
9165	FaceDetails []*FaceDetail `type:"list"`
9166
9167	// The value of OrientationCorrection is always null.
9168	//
9169	// If the input image is in .jpeg format, it might contain exchangeable image
9170	// file format (Exif) metadata that includes the image's orientation. Amazon
9171	// Rekognition uses this orientation information to perform image correction.
9172	// The bounding box coordinates are translated to represent object locations
9173	// after the orientation information in the Exif metadata is used to correct
9174	// the image orientation. Images in .png format don't contain Exif metadata.
9175	//
9176	// Amazon Rekognition doesn’t perform image correction for images in .png
9177	// format and .jpeg images without orientation information in the image Exif
9178	// metadata. The bounding box coordinates aren't translated and represent the
9179	// object locations before the image is rotated.
9180	OrientationCorrection *string `type:"string" enum:"OrientationCorrection"`
9181}
9182
9183// String returns the string representation
9184func (s DetectFacesOutput) String() string {
9185	return awsutil.Prettify(s)
9186}
9187
9188// GoString returns the string representation
9189func (s DetectFacesOutput) GoString() string {
9190	return s.String()
9191}
9192
9193// SetFaceDetails sets the FaceDetails field's value.
9194func (s *DetectFacesOutput) SetFaceDetails(v []*FaceDetail) *DetectFacesOutput {
9195	s.FaceDetails = v
9196	return s
9197}
9198
9199// SetOrientationCorrection sets the OrientationCorrection field's value.
9200func (s *DetectFacesOutput) SetOrientationCorrection(v string) *DetectFacesOutput {
9201	s.OrientationCorrection = &v
9202	return s
9203}
9204
9205type DetectLabelsInput struct {
9206	_ struct{} `type:"structure"`
9207
9208	// The input image as base64-encoded bytes or an S3 object. If you use the AWS
9209	// CLI to call Amazon Rekognition operations, passing image bytes is not supported.
9210	// Images stored in an S3 Bucket do not need to be base64-encoded.
9211	//
9212	// If you are using an AWS SDK to call Amazon Rekognition, you might not need
9213	// to base64-encode image bytes passed using the Bytes field. For more information,
9214	// see Images in the Amazon Rekognition developer guide.
9215	//
9216	// Image is a required field
9217	Image *Image `type:"structure" required:"true"`
9218
9219	// Maximum number of labels you want the service to return in the response.
9220	// The service returns the specified number of highest confidence labels.
9221	MaxLabels *int64 `type:"integer"`
9222
9223	// Specifies the minimum confidence level for the labels to return. Amazon Rekognition
9224	// doesn't return any labels with confidence lower than this specified value.
9225	//
9226	// If MinConfidence is not specified, the operation returns labels with a confidence
9227	// values greater than or equal to 55 percent.
9228	MinConfidence *float64 `type:"float"`
9229}
9230
9231// String returns the string representation
9232func (s DetectLabelsInput) String() string {
9233	return awsutil.Prettify(s)
9234}
9235
9236// GoString returns the string representation
9237func (s DetectLabelsInput) GoString() string {
9238	return s.String()
9239}
9240
9241// Validate inspects the fields of the type to determine if they are valid.
9242func (s *DetectLabelsInput) Validate() error {
9243	invalidParams := request.ErrInvalidParams{Context: "DetectLabelsInput"}
9244	if s.Image == nil {
9245		invalidParams.Add(request.NewErrParamRequired("Image"))
9246	}
9247	if s.Image != nil {
9248		if err := s.Image.Validate(); err != nil {
9249			invalidParams.AddNested("Image", err.(request.ErrInvalidParams))
9250		}
9251	}
9252
9253	if invalidParams.Len() > 0 {
9254		return invalidParams
9255	}
9256	return nil
9257}
9258
9259// SetImage sets the Image field's value.
9260func (s *DetectLabelsInput) SetImage(v *Image) *DetectLabelsInput {
9261	s.Image = v
9262	return s
9263}
9264
9265// SetMaxLabels sets the MaxLabels field's value.
9266func (s *DetectLabelsInput) SetMaxLabels(v int64) *DetectLabelsInput {
9267	s.MaxLabels = &v
9268	return s
9269}
9270
9271// SetMinConfidence sets the MinConfidence field's value.
9272func (s *DetectLabelsInput) SetMinConfidence(v float64) *DetectLabelsInput {
9273	s.MinConfidence = &v
9274	return s
9275}
9276
9277type DetectLabelsOutput struct {
9278	_ struct{} `type:"structure"`
9279
9280	// Version number of the label detection model that was used to detect labels.
9281	LabelModelVersion *string `type:"string"`
9282
9283	// An array of labels for the real-world objects detected.
9284	Labels []*Label `type:"list"`
9285
9286	// The value of OrientationCorrection is always null.
9287	//
9288	// If the input image is in .jpeg format, it might contain exchangeable image
9289	// file format (Exif) metadata that includes the image's orientation. Amazon
9290	// Rekognition uses this orientation information to perform image correction.
9291	// The bounding box coordinates are translated to represent object locations
9292	// after the orientation information in the Exif metadata is used to correct
9293	// the image orientation. Images in .png format don't contain Exif metadata.
9294	//
9295	// Amazon Rekognition doesn’t perform image correction for images in .png
9296	// format and .jpeg images without orientation information in the image Exif
9297	// metadata. The bounding box coordinates aren't translated and represent the
9298	// object locations before the image is rotated.
9299	OrientationCorrection *string `type:"string" enum:"OrientationCorrection"`
9300}
9301
9302// String returns the string representation
9303func (s DetectLabelsOutput) String() string {
9304	return awsutil.Prettify(s)
9305}
9306
9307// GoString returns the string representation
9308func (s DetectLabelsOutput) GoString() string {
9309	return s.String()
9310}
9311
9312// SetLabelModelVersion sets the LabelModelVersion field's value.
9313func (s *DetectLabelsOutput) SetLabelModelVersion(v string) *DetectLabelsOutput {
9314	s.LabelModelVersion = &v
9315	return s
9316}
9317
9318// SetLabels sets the Labels field's value.
9319func (s *DetectLabelsOutput) SetLabels(v []*Label) *DetectLabelsOutput {
9320	s.Labels = v
9321	return s
9322}
9323
9324// SetOrientationCorrection sets the OrientationCorrection field's value.
9325func (s *DetectLabelsOutput) SetOrientationCorrection(v string) *DetectLabelsOutput {
9326	s.OrientationCorrection = &v
9327	return s
9328}
9329
9330type DetectModerationLabelsInput struct {
9331	_ struct{} `type:"structure"`
9332
9333	// Sets up the configuration for human evaluation, including the FlowDefinition
9334	// the image will be sent to.
9335	HumanLoopConfig *HumanLoopConfig `type:"structure"`
9336
9337	// The input image as base64-encoded bytes or an S3 object. If you use the AWS
9338	// CLI to call Amazon Rekognition operations, passing base64-encoded image bytes
9339	// is not supported.
9340	//
9341	// If you are using an AWS SDK to call Amazon Rekognition, you might not need
9342	// to base64-encode image bytes passed using the Bytes field. For more information,
9343	// see Images in the Amazon Rekognition developer guide.
9344	//
9345	// Image is a required field
9346	Image *Image `type:"structure" required:"true"`
9347
9348	// Specifies the minimum confidence level for the labels to return. Amazon Rekognition
9349	// doesn't return any labels with a confidence level lower than this specified
9350	// value.
9351	//
9352	// If you don't specify MinConfidence, the operation returns labels with confidence
9353	// values greater than or equal to 50 percent.
9354	MinConfidence *float64 `type:"float"`
9355}
9356
9357// String returns the string representation
9358func (s DetectModerationLabelsInput) String() string {
9359	return awsutil.Prettify(s)
9360}
9361
9362// GoString returns the string representation
9363func (s DetectModerationLabelsInput) GoString() string {
9364	return s.String()
9365}
9366
9367// Validate inspects the fields of the type to determine if they are valid.
9368func (s *DetectModerationLabelsInput) Validate() error {
9369	invalidParams := request.ErrInvalidParams{Context: "DetectModerationLabelsInput"}
9370	if s.Image == nil {
9371		invalidParams.Add(request.NewErrParamRequired("Image"))
9372	}
9373	if s.HumanLoopConfig != nil {
9374		if err := s.HumanLoopConfig.Validate(); err != nil {
9375			invalidParams.AddNested("HumanLoopConfig", err.(request.ErrInvalidParams))
9376		}
9377	}
9378	if s.Image != nil {
9379		if err := s.Image.Validate(); err != nil {
9380			invalidParams.AddNested("Image", err.(request.ErrInvalidParams))
9381		}
9382	}
9383
9384	if invalidParams.Len() > 0 {
9385		return invalidParams
9386	}
9387	return nil
9388}
9389
9390// SetHumanLoopConfig sets the HumanLoopConfig field's value.
9391func (s *DetectModerationLabelsInput) SetHumanLoopConfig(v *HumanLoopConfig) *DetectModerationLabelsInput {
9392	s.HumanLoopConfig = v
9393	return s
9394}
9395
9396// SetImage sets the Image field's value.
9397func (s *DetectModerationLabelsInput) SetImage(v *Image) *DetectModerationLabelsInput {
9398	s.Image = v
9399	return s
9400}
9401
9402// SetMinConfidence sets the MinConfidence field's value.
9403func (s *DetectModerationLabelsInput) SetMinConfidence(v float64) *DetectModerationLabelsInput {
9404	s.MinConfidence = &v
9405	return s
9406}
9407
9408type DetectModerationLabelsOutput struct {
9409	_ struct{} `type:"structure"`
9410
9411	// Shows the results of the human in the loop evaluation.
9412	HumanLoopActivationOutput *HumanLoopActivationOutput `type:"structure"`
9413
9414	// Array of detected Moderation labels and the time, in milliseconds from the
9415	// start of the video, they were detected.
9416	ModerationLabels []*ModerationLabel `type:"list"`
9417
9418	// Version number of the moderation detection model that was used to detect
9419	// unsafe content.
9420	ModerationModelVersion *string `type:"string"`
9421}
9422
9423// String returns the string representation
9424func (s DetectModerationLabelsOutput) String() string {
9425	return awsutil.Prettify(s)
9426}
9427
9428// GoString returns the string representation
9429func (s DetectModerationLabelsOutput) GoString() string {
9430	return s.String()
9431}
9432
9433// SetHumanLoopActivationOutput sets the HumanLoopActivationOutput field's value.
9434func (s *DetectModerationLabelsOutput) SetHumanLoopActivationOutput(v *HumanLoopActivationOutput) *DetectModerationLabelsOutput {
9435	s.HumanLoopActivationOutput = v
9436	return s
9437}
9438
9439// SetModerationLabels sets the ModerationLabels field's value.
9440func (s *DetectModerationLabelsOutput) SetModerationLabels(v []*ModerationLabel) *DetectModerationLabelsOutput {
9441	s.ModerationLabels = v
9442	return s
9443}
9444
9445// SetModerationModelVersion sets the ModerationModelVersion field's value.
9446func (s *DetectModerationLabelsOutput) SetModerationModelVersion(v string) *DetectModerationLabelsOutput {
9447	s.ModerationModelVersion = &v
9448	return s
9449}
9450
9451type DetectProtectiveEquipmentInput struct {
9452	_ struct{} `type:"structure"`
9453
9454	// The image in which you want to detect PPE on detected persons. The image
9455	// can be passed as image bytes or you can reference an image stored in an Amazon
9456	// S3 bucket.
9457	//
9458	// Image is a required field
9459	Image *Image `type:"structure" required:"true"`
9460
9461	// An array of PPE types that you want to summarize.
9462	SummarizationAttributes *ProtectiveEquipmentSummarizationAttributes `type:"structure"`
9463}
9464
9465// String returns the string representation
9466func (s DetectProtectiveEquipmentInput) String() string {
9467	return awsutil.Prettify(s)
9468}
9469
9470// GoString returns the string representation
9471func (s DetectProtectiveEquipmentInput) GoString() string {
9472	return s.String()
9473}
9474
9475// Validate inspects the fields of the type to determine if they are valid.
9476func (s *DetectProtectiveEquipmentInput) Validate() error {
9477	invalidParams := request.ErrInvalidParams{Context: "DetectProtectiveEquipmentInput"}
9478	if s.Image == nil {
9479		invalidParams.Add(request.NewErrParamRequired("Image"))
9480	}
9481	if s.Image != nil {
9482		if err := s.Image.Validate(); err != nil {
9483			invalidParams.AddNested("Image", err.(request.ErrInvalidParams))
9484		}
9485	}
9486	if s.SummarizationAttributes != nil {
9487		if err := s.SummarizationAttributes.Validate(); err != nil {
9488			invalidParams.AddNested("SummarizationAttributes", err.(request.ErrInvalidParams))
9489		}
9490	}
9491
9492	if invalidParams.Len() > 0 {
9493		return invalidParams
9494	}
9495	return nil
9496}
9497
9498// SetImage sets the Image field's value.
9499func (s *DetectProtectiveEquipmentInput) SetImage(v *Image) *DetectProtectiveEquipmentInput {
9500	s.Image = v
9501	return s
9502}
9503
9504// SetSummarizationAttributes sets the SummarizationAttributes field's value.
9505func (s *DetectProtectiveEquipmentInput) SetSummarizationAttributes(v *ProtectiveEquipmentSummarizationAttributes) *DetectProtectiveEquipmentInput {
9506	s.SummarizationAttributes = v
9507	return s
9508}
9509
9510type DetectProtectiveEquipmentOutput struct {
9511	_ struct{} `type:"structure"`
9512
9513	// An array of persons detected in the image (including persons not wearing
9514	// PPE).
9515	Persons []*ProtectiveEquipmentPerson `type:"list"`
9516
9517	// The version number of the PPE detection model used to detect PPE in the image.
9518	ProtectiveEquipmentModelVersion *string `type:"string"`
9519
9520	// Summary information for the types of PPE specified in the SummarizationAttributes
9521	// input parameter.
9522	Summary *ProtectiveEquipmentSummary `type:"structure"`
9523}
9524
9525// String returns the string representation
9526func (s DetectProtectiveEquipmentOutput) String() string {
9527	return awsutil.Prettify(s)
9528}
9529
9530// GoString returns the string representation
9531func (s DetectProtectiveEquipmentOutput) GoString() string {
9532	return s.String()
9533}
9534
9535// SetPersons sets the Persons field's value.
9536func (s *DetectProtectiveEquipmentOutput) SetPersons(v []*ProtectiveEquipmentPerson) *DetectProtectiveEquipmentOutput {
9537	s.Persons = v
9538	return s
9539}
9540
9541// SetProtectiveEquipmentModelVersion sets the ProtectiveEquipmentModelVersion field's value.
9542func (s *DetectProtectiveEquipmentOutput) SetProtectiveEquipmentModelVersion(v string) *DetectProtectiveEquipmentOutput {
9543	s.ProtectiveEquipmentModelVersion = &v
9544	return s
9545}
9546
9547// SetSummary sets the Summary field's value.
9548func (s *DetectProtectiveEquipmentOutput) SetSummary(v *ProtectiveEquipmentSummary) *DetectProtectiveEquipmentOutput {
9549	s.Summary = v
9550	return s
9551}
9552
9553// A set of optional parameters that you can use to set the criteria that the
9554// text must meet to be included in your response. WordFilter looks at a word’s
9555// height, width, and minimum confidence. RegionOfInterest lets you set a specific
9556// region of the image to look for text in.
9557type DetectTextFilters struct {
9558	_ struct{} `type:"structure"`
9559
9560	// A Filter focusing on a certain area of the image. Uses a BoundingBox object
9561	// to set the region of the image.
9562	RegionsOfInterest []*RegionOfInterest `type:"list"`
9563
9564	// A set of parameters that allow you to filter out certain results from your
9565	// returned results.
9566	WordFilter *DetectionFilter `type:"structure"`
9567}
9568
9569// String returns the string representation
9570func (s DetectTextFilters) String() string {
9571	return awsutil.Prettify(s)
9572}
9573
9574// GoString returns the string representation
9575func (s DetectTextFilters) GoString() string {
9576	return s.String()
9577}
9578
9579// SetRegionsOfInterest sets the RegionsOfInterest field's value.
9580func (s *DetectTextFilters) SetRegionsOfInterest(v []*RegionOfInterest) *DetectTextFilters {
9581	s.RegionsOfInterest = v
9582	return s
9583}
9584
9585// SetWordFilter sets the WordFilter field's value.
9586func (s *DetectTextFilters) SetWordFilter(v *DetectionFilter) *DetectTextFilters {
9587	s.WordFilter = v
9588	return s
9589}
9590
9591type DetectTextInput struct {
9592	_ struct{} `type:"structure"`
9593
9594	// Optional parameters that let you set the criteria that the text must meet
9595	// to be included in your response.
9596	Filters *DetectTextFilters `type:"structure"`
9597
9598	// The input image as base64-encoded bytes or an Amazon S3 object. If you use
9599	// the AWS CLI to call Amazon Rekognition operations, you can't pass image bytes.
9600	//
9601	// If you are using an AWS SDK to call Amazon Rekognition, you might not need
9602	// to base64-encode image bytes passed using the Bytes field. For more information,
9603	// see Images in the Amazon Rekognition developer guide.
9604	//
9605	// Image is a required field
9606	Image *Image `type:"structure" required:"true"`
9607}
9608
9609// String returns the string representation
9610func (s DetectTextInput) String() string {
9611	return awsutil.Prettify(s)
9612}
9613
9614// GoString returns the string representation
9615func (s DetectTextInput) GoString() string {
9616	return s.String()
9617}
9618
9619// Validate inspects the fields of the type to determine if they are valid.
9620func (s *DetectTextInput) Validate() error {
9621	invalidParams := request.ErrInvalidParams{Context: "DetectTextInput"}
9622	if s.Image == nil {
9623		invalidParams.Add(request.NewErrParamRequired("Image"))
9624	}
9625	if s.Image != nil {
9626		if err := s.Image.Validate(); err != nil {
9627			invalidParams.AddNested("Image", err.(request.ErrInvalidParams))
9628		}
9629	}
9630
9631	if invalidParams.Len() > 0 {
9632		return invalidParams
9633	}
9634	return nil
9635}
9636
9637// SetFilters sets the Filters field's value.
9638func (s *DetectTextInput) SetFilters(v *DetectTextFilters) *DetectTextInput {
9639	s.Filters = v
9640	return s
9641}
9642
9643// SetImage sets the Image field's value.
9644func (s *DetectTextInput) SetImage(v *Image) *DetectTextInput {
9645	s.Image = v
9646	return s
9647}
9648
9649type DetectTextOutput struct {
9650	_ struct{} `type:"structure"`
9651
9652	// An array of text that was detected in the input image.
9653	TextDetections []*TextDetection `type:"list"`
9654
9655	// The model version used to detect text.
9656	TextModelVersion *string `type:"string"`
9657}
9658
9659// String returns the string representation
9660func (s DetectTextOutput) String() string {
9661	return awsutil.Prettify(s)
9662}
9663
9664// GoString returns the string representation
9665func (s DetectTextOutput) GoString() string {
9666	return s.String()
9667}
9668
9669// SetTextDetections sets the TextDetections field's value.
9670func (s *DetectTextOutput) SetTextDetections(v []*TextDetection) *DetectTextOutput {
9671	s.TextDetections = v
9672	return s
9673}
9674
9675// SetTextModelVersion sets the TextModelVersion field's value.
9676func (s *DetectTextOutput) SetTextModelVersion(v string) *DetectTextOutput {
9677	s.TextModelVersion = &v
9678	return s
9679}
9680
9681// A set of parameters that allow you to filter out certain results from your
9682// returned results.
9683type DetectionFilter struct {
9684	_ struct{} `type:"structure"`
9685
9686	// Sets the minimum height of the word bounding box. Words with bounding box
9687	// heights lesser than this value will be excluded from the result. Value is
9688	// relative to the video frame height.
9689	MinBoundingBoxHeight *float64 `type:"float"`
9690
9691	// Sets the minimum width of the word bounding box. Words with bounding boxes
9692	// widths lesser than this value will be excluded from the result. Value is
9693	// relative to the video frame width.
9694	MinBoundingBoxWidth *float64 `type:"float"`
9695
9696	// Sets the confidence of word detection. Words with detection confidence below
9697	// this will be excluded from the result. Values should be between 50 and 100
9698	// as Text in Video will not return any result below 50.
9699	MinConfidence *float64 `type:"float"`
9700}
9701
9702// String returns the string representation
9703func (s DetectionFilter) String() string {
9704	return awsutil.Prettify(s)
9705}
9706
9707// GoString returns the string representation
9708func (s DetectionFilter) GoString() string {
9709	return s.String()
9710}
9711
9712// SetMinBoundingBoxHeight sets the MinBoundingBoxHeight field's value.
9713func (s *DetectionFilter) SetMinBoundingBoxHeight(v float64) *DetectionFilter {
9714	s.MinBoundingBoxHeight = &v
9715	return s
9716}
9717
9718// SetMinBoundingBoxWidth sets the MinBoundingBoxWidth field's value.
9719func (s *DetectionFilter) SetMinBoundingBoxWidth(v float64) *DetectionFilter {
9720	s.MinBoundingBoxWidth = &v
9721	return s
9722}
9723
9724// SetMinConfidence sets the MinConfidence field's value.
9725func (s *DetectionFilter) SetMinConfidence(v float64) *DetectionFilter {
9726	s.MinConfidence = &v
9727	return s
9728}
9729
9730// The emotions that appear to be expressed on the face, and the confidence
9731// level in the determination. The API is only making a determination of the
9732// physical appearance of a person's face. It is not a determination of the
9733// person’s internal emotional state and should not be used in such a way.
9734// For example, a person pretending to have a sad face might not be sad emotionally.
9735type Emotion struct {
9736	_ struct{} `type:"structure"`
9737
9738	// Level of confidence in the determination.
9739	Confidence *float64 `type:"float"`
9740
9741	// Type of emotion detected.
9742	Type *string `type:"string" enum:"EmotionName"`
9743}
9744
9745// String returns the string representation
9746func (s Emotion) String() string {
9747	return awsutil.Prettify(s)
9748}
9749
9750// GoString returns the string representation
9751func (s Emotion) GoString() string {
9752	return s.String()
9753}
9754
9755// SetConfidence sets the Confidence field's value.
9756func (s *Emotion) SetConfidence(v float64) *Emotion {
9757	s.Confidence = &v
9758	return s
9759}
9760
9761// SetType sets the Type field's value.
9762func (s *Emotion) SetType(v string) *Emotion {
9763	s.Type = &v
9764	return s
9765}
9766
9767// Information about an item of Personal Protective Equipment (PPE) detected
9768// by DetectProtectiveEquipment. For more information, see DetectProtectiveEquipment.
9769type EquipmentDetection struct {
9770	_ struct{} `type:"structure"`
9771
9772	// A bounding box surrounding the item of detected PPE.
9773	BoundingBox *BoundingBox `type:"structure"`
9774
9775	// The confidence that Amazon Rekognition has that the bounding box (BoundingBox)
9776	// contains an item of PPE.
9777	Confidence *float64 `type:"float"`
9778
9779	// Information about the body part covered by the detected PPE.
9780	CoversBodyPart *CoversBodyPart `type:"structure"`
9781
9782	// The type of detected PPE.
9783	Type *string `type:"string" enum:"ProtectiveEquipmentType"`
9784}
9785
9786// String returns the string representation
9787func (s EquipmentDetection) String() string {
9788	return awsutil.Prettify(s)
9789}
9790
9791// GoString returns the string representation
9792func (s EquipmentDetection) GoString() string {
9793	return s.String()
9794}
9795
9796// SetBoundingBox sets the BoundingBox field's value.
9797func (s *EquipmentDetection) SetBoundingBox(v *BoundingBox) *EquipmentDetection {
9798	s.BoundingBox = v
9799	return s
9800}
9801
9802// SetConfidence sets the Confidence field's value.
9803func (s *EquipmentDetection) SetConfidence(v float64) *EquipmentDetection {
9804	s.Confidence = &v
9805	return s
9806}
9807
9808// SetCoversBodyPart sets the CoversBodyPart field's value.
9809func (s *EquipmentDetection) SetCoversBodyPart(v *CoversBodyPart) *EquipmentDetection {
9810	s.CoversBodyPart = v
9811	return s
9812}
9813
9814// SetType sets the Type field's value.
9815func (s *EquipmentDetection) SetType(v string) *EquipmentDetection {
9816	s.Type = &v
9817	return s
9818}
9819
9820// The evaluation results for the training of a model.
9821type EvaluationResult struct {
9822	_ struct{} `type:"structure"`
9823
9824	// The F1 score for the evaluation of all labels. The F1 score metric evaluates
9825	// the overall precision and recall performance of the model as a single value.
9826	// A higher value indicates better precision and recall performance. A lower
9827	// score indicates that precision, recall, or both are performing poorly.
9828	F1Score *float64 `type:"float"`
9829
9830	// The S3 bucket that contains the training summary.
9831	Summary *Summary `type:"structure"`
9832}
9833
9834// String returns the string representation
9835func (s EvaluationResult) String() string {
9836	return awsutil.Prettify(s)
9837}
9838
9839// GoString returns the string representation
9840func (s EvaluationResult) GoString() string {
9841	return s.String()
9842}
9843
9844// SetF1Score sets the F1Score field's value.
9845func (s *EvaluationResult) SetF1Score(v float64) *EvaluationResult {
9846	s.F1Score = &v
9847	return s
9848}
9849
9850// SetSummary sets the Summary field's value.
9851func (s *EvaluationResult) SetSummary(v *Summary) *EvaluationResult {
9852	s.Summary = v
9853	return s
9854}
9855
9856// Indicates whether or not the eyes on the face are open, and the confidence
9857// level in the determination.
9858type EyeOpen struct {
9859	_ struct{} `type:"structure"`
9860
9861	// Level of confidence in the determination.
9862	Confidence *float64 `type:"float"`
9863
9864	// Boolean value that indicates whether the eyes on the face are open.
9865	Value *bool `type:"boolean"`
9866}
9867
9868// String returns the string representation
9869func (s EyeOpen) String() string {
9870	return awsutil.Prettify(s)
9871}
9872
9873// GoString returns the string representation
9874func (s EyeOpen) GoString() string {
9875	return s.String()
9876}
9877
9878// SetConfidence sets the Confidence field's value.
9879func (s *EyeOpen) SetConfidence(v float64) *EyeOpen {
9880	s.Confidence = &v
9881	return s
9882}
9883
9884// SetValue sets the Value field's value.
9885func (s *EyeOpen) SetValue(v bool) *EyeOpen {
9886	s.Value = &v
9887	return s
9888}
9889
9890// Indicates whether or not the face is wearing eye glasses, and the confidence
9891// level in the determination.
9892type Eyeglasses struct {
9893	_ struct{} `type:"structure"`
9894
9895	// Level of confidence in the determination.
9896	Confidence *float64 `type:"float"`
9897
9898	// Boolean value that indicates whether the face is wearing eye glasses or not.
9899	Value *bool `type:"boolean"`
9900}
9901
9902// String returns the string representation
9903func (s Eyeglasses) String() string {
9904	return awsutil.Prettify(s)
9905}
9906
9907// GoString returns the string representation
9908func (s Eyeglasses) GoString() string {
9909	return s.String()
9910}
9911
9912// SetConfidence sets the Confidence field's value.
9913func (s *Eyeglasses) SetConfidence(v float64) *Eyeglasses {
9914	s.Confidence = &v
9915	return s
9916}
9917
9918// SetValue sets the Value field's value.
9919func (s *Eyeglasses) SetValue(v bool) *Eyeglasses {
9920	s.Value = &v
9921	return s
9922}
9923
9924// Describes the face properties such as the bounding box, face ID, image ID
9925// of the input image, and external image ID that you assigned.
9926type Face struct {
9927	_ struct{} `type:"structure"`
9928
9929	// Bounding box of the face.
9930	BoundingBox *BoundingBox `type:"structure"`
9931
9932	// Confidence level that the bounding box contains a face (and not a different
9933	// object such as a tree).
9934	Confidence *float64 `type:"float"`
9935
9936	// Identifier that you assign to all the faces in the input image.
9937	ExternalImageId *string `min:"1" type:"string"`
9938
9939	// Unique identifier that Amazon Rekognition assigns to the face.
9940	FaceId *string `type:"string"`
9941
9942	// Unique identifier that Amazon Rekognition assigns to the input image.
9943	ImageId *string `type:"string"`
9944}
9945
9946// String returns the string representation
9947func (s Face) String() string {
9948	return awsutil.Prettify(s)
9949}
9950
9951// GoString returns the string representation
9952func (s Face) GoString() string {
9953	return s.String()
9954}
9955
9956// SetBoundingBox sets the BoundingBox field's value.
9957func (s *Face) SetBoundingBox(v *BoundingBox) *Face {
9958	s.BoundingBox = v
9959	return s
9960}
9961
9962// SetConfidence sets the Confidence field's value.
9963func (s *Face) SetConfidence(v float64) *Face {
9964	s.Confidence = &v
9965	return s
9966}
9967
9968// SetExternalImageId sets the ExternalImageId field's value.
9969func (s *Face) SetExternalImageId(v string) *Face {
9970	s.ExternalImageId = &v
9971	return s
9972}
9973
9974// SetFaceId sets the FaceId field's value.
9975func (s *Face) SetFaceId(v string) *Face {
9976	s.FaceId = &v
9977	return s
9978}
9979
9980// SetImageId sets the ImageId field's value.
9981func (s *Face) SetImageId(v string) *Face {
9982	s.ImageId = &v
9983	return s
9984}
9985
9986// Structure containing attributes of the face that the algorithm detected.
9987//
9988// A FaceDetail object contains either the default facial attributes or all
9989// facial attributes. The default attributes are BoundingBox, Confidence, Landmarks,
9990// Pose, and Quality.
9991//
9992// GetFaceDetection is the only Amazon Rekognition Video stored video operation
9993// that can return a FaceDetail object with all attributes. To specify which
9994// attributes to return, use the FaceAttributes input parameter for StartFaceDetection.
9995// The following Amazon Rekognition Video operations return only the default
9996// attributes. The corresponding Start operations don't have a FaceAttributes
9997// input parameter.
9998//
9999//    * GetCelebrityRecognition
10000//
10001//    * GetPersonTracking
10002//
10003//    * GetFaceSearch
10004//
10005// The Amazon Rekognition Image DetectFaces and IndexFaces operations can return
10006// all facial attributes. To specify which attributes to return, use the Attributes
10007// input parameter for DetectFaces. For IndexFaces, use the DetectAttributes
10008// input parameter.
10009type FaceDetail struct {
10010	_ struct{} `type:"structure"`
10011
10012	// The estimated age range, in years, for the face. Low represents the lowest
10013	// estimated age and High represents the highest estimated age.
10014	AgeRange *AgeRange `type:"structure"`
10015
10016	// Indicates whether or not the face has a beard, and the confidence level in
10017	// the determination.
10018	Beard *Beard `type:"structure"`
10019
10020	// Bounding box of the face. Default attribute.
10021	BoundingBox *BoundingBox `type:"structure"`
10022
10023	// Confidence level that the bounding box contains a face (and not a different
10024	// object such as a tree). Default attribute.
10025	Confidence *float64 `type:"float"`
10026
10027	// The emotions that appear to be expressed on the face, and the confidence
10028	// level in the determination. The API is only making a determination of the
10029	// physical appearance of a person's face. It is not a determination of the
10030	// person’s internal emotional state and should not be used in such a way.
10031	// For example, a person pretending to have a sad face might not be sad emotionally.
10032	Emotions []*Emotion `type:"list"`
10033
10034	// Indicates whether or not the face is wearing eye glasses, and the confidence
10035	// level in the determination.
10036	Eyeglasses *Eyeglasses `type:"structure"`
10037
10038	// Indicates whether or not the eyes on the face are open, and the confidence
10039	// level in the determination.
10040	EyesOpen *EyeOpen `type:"structure"`
10041
10042	// The predicted gender of a detected face.
10043	Gender *Gender `type:"structure"`
10044
10045	// Indicates the location of landmarks on the face. Default attribute.
10046	Landmarks []*Landmark `type:"list"`
10047
10048	// Indicates whether or not the mouth on the face is open, and the confidence
10049	// level in the determination.
10050	MouthOpen *MouthOpen `type:"structure"`
10051
10052	// Indicates whether or not the face has a mustache, and the confidence level
10053	// in the determination.
10054	Mustache *Mustache `type:"structure"`
10055
10056	// Indicates the pose of the face as determined by its pitch, roll, and yaw.
10057	// Default attribute.
10058	Pose *Pose `type:"structure"`
10059
10060	// Identifies image brightness and sharpness. Default attribute.
10061	Quality *ImageQuality `type:"structure"`
10062
10063	// Indicates whether or not the face is smiling, and the confidence level in
10064	// the determination.
10065	Smile *Smile `type:"structure"`
10066
10067	// Indicates whether or not the face is wearing sunglasses, and the confidence
10068	// level in the determination.
10069	Sunglasses *Sunglasses `type:"structure"`
10070}
10071
10072// String returns the string representation
10073func (s FaceDetail) String() string {
10074	return awsutil.Prettify(s)
10075}
10076
10077// GoString returns the string representation
10078func (s FaceDetail) GoString() string {
10079	return s.String()
10080}
10081
10082// SetAgeRange sets the AgeRange field's value.
10083func (s *FaceDetail) SetAgeRange(v *AgeRange) *FaceDetail {
10084	s.AgeRange = v
10085	return s
10086}
10087
10088// SetBeard sets the Beard field's value.
10089func (s *FaceDetail) SetBeard(v *Beard) *FaceDetail {
10090	s.Beard = v
10091	return s
10092}
10093
10094// SetBoundingBox sets the BoundingBox field's value.
10095func (s *FaceDetail) SetBoundingBox(v *BoundingBox) *FaceDetail {
10096	s.BoundingBox = v
10097	return s
10098}
10099
10100// SetConfidence sets the Confidence field's value.
10101func (s *FaceDetail) SetConfidence(v float64) *FaceDetail {
10102	s.Confidence = &v
10103	return s
10104}
10105
10106// SetEmotions sets the Emotions field's value.
10107func (s *FaceDetail) SetEmotions(v []*Emotion) *FaceDetail {
10108	s.Emotions = v
10109	return s
10110}
10111
10112// SetEyeglasses sets the Eyeglasses field's value.
10113func (s *FaceDetail) SetEyeglasses(v *Eyeglasses) *FaceDetail {
10114	s.Eyeglasses = v
10115	return s
10116}
10117
10118// SetEyesOpen sets the EyesOpen field's value.
10119func (s *FaceDetail) SetEyesOpen(v *EyeOpen) *FaceDetail {
10120	s.EyesOpen = v
10121	return s
10122}
10123
10124// SetGender sets the Gender field's value.
10125func (s *FaceDetail) SetGender(v *Gender) *FaceDetail {
10126	s.Gender = v
10127	return s
10128}
10129
10130// SetLandmarks sets the Landmarks field's value.
10131func (s *FaceDetail) SetLandmarks(v []*Landmark) *FaceDetail {
10132	s.Landmarks = v
10133	return s
10134}
10135
10136// SetMouthOpen sets the MouthOpen field's value.
10137func (s *FaceDetail) SetMouthOpen(v *MouthOpen) *FaceDetail {
10138	s.MouthOpen = v
10139	return s
10140}
10141
10142// SetMustache sets the Mustache field's value.
10143func (s *FaceDetail) SetMustache(v *Mustache) *FaceDetail {
10144	s.Mustache = v
10145	return s
10146}
10147
10148// SetPose sets the Pose field's value.
10149func (s *FaceDetail) SetPose(v *Pose) *FaceDetail {
10150	s.Pose = v
10151	return s
10152}
10153
10154// SetQuality sets the Quality field's value.
10155func (s *FaceDetail) SetQuality(v *ImageQuality) *FaceDetail {
10156	s.Quality = v
10157	return s
10158}
10159
10160// SetSmile sets the Smile field's value.
10161func (s *FaceDetail) SetSmile(v *Smile) *FaceDetail {
10162	s.Smile = v
10163	return s
10164}
10165
10166// SetSunglasses sets the Sunglasses field's value.
10167func (s *FaceDetail) SetSunglasses(v *Sunglasses) *FaceDetail {
10168	s.Sunglasses = v
10169	return s
10170}
10171
10172// Information about a face detected in a video analysis request and the time
10173// the face was detected in the video.
10174type FaceDetection struct {
10175	_ struct{} `type:"structure"`
10176
10177	// The face properties for the detected face.
10178	Face *FaceDetail `type:"structure"`
10179
10180	// Time, in milliseconds from the start of the video, that the face was detected.
10181	Timestamp *int64 `type:"long"`
10182}
10183
10184// String returns the string representation
10185func (s FaceDetection) String() string {
10186	return awsutil.Prettify(s)
10187}
10188
10189// GoString returns the string representation
10190func (s FaceDetection) GoString() string {
10191	return s.String()
10192}
10193
10194// SetFace sets the Face field's value.
10195func (s *FaceDetection) SetFace(v *FaceDetail) *FaceDetection {
10196	s.Face = v
10197	return s
10198}
10199
10200// SetTimestamp sets the Timestamp field's value.
10201func (s *FaceDetection) SetTimestamp(v int64) *FaceDetection {
10202	s.Timestamp = &v
10203	return s
10204}
10205
10206// Provides face metadata. In addition, it also provides the confidence in the
10207// match of this face with the input face.
10208type FaceMatch struct {
10209	_ struct{} `type:"structure"`
10210
10211	// Describes the face properties such as the bounding box, face ID, image ID
10212	// of the source image, and external image ID that you assigned.
10213	Face *Face `type:"structure"`
10214
10215	// Confidence in the match of this face with the input face.
10216	Similarity *float64 `type:"float"`
10217}
10218
10219// String returns the string representation
10220func (s FaceMatch) String() string {
10221	return awsutil.Prettify(s)
10222}
10223
10224// GoString returns the string representation
10225func (s FaceMatch) GoString() string {
10226	return s.String()
10227}
10228
10229// SetFace sets the Face field's value.
10230func (s *FaceMatch) SetFace(v *Face) *FaceMatch {
10231	s.Face = v
10232	return s
10233}
10234
10235// SetSimilarity sets the Similarity field's value.
10236func (s *FaceMatch) SetSimilarity(v float64) *FaceMatch {
10237	s.Similarity = &v
10238	return s
10239}
10240
10241// Object containing both the face metadata (stored in the backend database),
10242// and facial attributes that are detected but aren't stored in the database.
10243type FaceRecord struct {
10244	_ struct{} `type:"structure"`
10245
10246	// Describes the face properties such as the bounding box, face ID, image ID
10247	// of the input image, and external image ID that you assigned.
10248	Face *Face `type:"structure"`
10249
10250	// Structure containing attributes of the face that the algorithm detected.
10251	FaceDetail *FaceDetail `type:"structure"`
10252}
10253
10254// String returns the string representation
10255func (s FaceRecord) String() string {
10256	return awsutil.Prettify(s)
10257}
10258
10259// GoString returns the string representation
10260func (s FaceRecord) GoString() string {
10261	return s.String()
10262}
10263
10264// SetFace sets the Face field's value.
10265func (s *FaceRecord) SetFace(v *Face) *FaceRecord {
10266	s.Face = v
10267	return s
10268}
10269
10270// SetFaceDetail sets the FaceDetail field's value.
10271func (s *FaceRecord) SetFaceDetail(v *FaceDetail) *FaceRecord {
10272	s.FaceDetail = v
10273	return s
10274}
10275
10276// Input face recognition parameters for an Amazon Rekognition stream processor.
10277// FaceRecognitionSettings is a request parameter for CreateStreamProcessor.
10278type FaceSearchSettings struct {
10279	_ struct{} `type:"structure"`
10280
10281	// The ID of a collection that contains faces that you want to search for.
10282	CollectionId *string `min:"1" type:"string"`
10283
10284	// Minimum face match confidence score that must be met to return a result for
10285	// a recognized face. Default is 80. 0 is the lowest confidence. 100 is the
10286	// highest confidence.
10287	FaceMatchThreshold *float64 `type:"float"`
10288}
10289
10290// String returns the string representation
10291func (s FaceSearchSettings) String() string {
10292	return awsutil.Prettify(s)
10293}
10294
10295// GoString returns the string representation
10296func (s FaceSearchSettings) GoString() string {
10297	return s.String()
10298}
10299
10300// Validate inspects the fields of the type to determine if they are valid.
10301func (s *FaceSearchSettings) Validate() error {
10302	invalidParams := request.ErrInvalidParams{Context: "FaceSearchSettings"}
10303	if s.CollectionId != nil && len(*s.CollectionId) < 1 {
10304		invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1))
10305	}
10306
10307	if invalidParams.Len() > 0 {
10308		return invalidParams
10309	}
10310	return nil
10311}
10312
10313// SetCollectionId sets the CollectionId field's value.
10314func (s *FaceSearchSettings) SetCollectionId(v string) *FaceSearchSettings {
10315	s.CollectionId = &v
10316	return s
10317}
10318
10319// SetFaceMatchThreshold sets the FaceMatchThreshold field's value.
10320func (s *FaceSearchSettings) SetFaceMatchThreshold(v float64) *FaceSearchSettings {
10321	s.FaceMatchThreshold = &v
10322	return s
10323}
10324
10325// The predicted gender of a detected face.
10326//
10327// Amazon Rekognition makes gender binary (male/female) predictions based on
10328// the physical appearance of a face in a particular image. This kind of prediction
10329// is not designed to categorize a person’s gender identity, and you shouldn't
10330// use Amazon Rekognition to make such a determination. For example, a male
10331// actor wearing a long-haired wig and earrings for a role might be predicted
10332// as female.
10333//
10334// Using Amazon Rekognition to make gender binary predictions is best suited
10335// for use cases where aggregate gender distribution statistics need to be analyzed
10336// without identifying specific users. For example, the percentage of female
10337// users compared to male users on a social media platform.
10338//
10339// We don't recommend using gender binary predictions to make decisions that
10340// impact an individual's rights, privacy, or access to services.
10341type Gender struct {
10342	_ struct{} `type:"structure"`
10343
10344	// Level of confidence in the prediction.
10345	Confidence *float64 `type:"float"`
10346
10347	// The predicted gender of the face.
10348	Value *string `type:"string" enum:"GenderType"`
10349}
10350
10351// String returns the string representation
10352func (s Gender) String() string {
10353	return awsutil.Prettify(s)
10354}
10355
10356// GoString returns the string representation
10357func (s Gender) GoString() string {
10358	return s.String()
10359}
10360
10361// SetConfidence sets the Confidence field's value.
10362func (s *Gender) SetConfidence(v float64) *Gender {
10363	s.Confidence = &v
10364	return s
10365}
10366
10367// SetValue sets the Value field's value.
10368func (s *Gender) SetValue(v string) *Gender {
10369	s.Value = &v
10370	return s
10371}
10372
10373// Information about where an object (DetectCustomLabels) or text (DetectText)
10374// is located on an image.
10375type Geometry struct {
10376	_ struct{} `type:"structure"`
10377
10378	// An axis-aligned coarse representation of the detected item's location on
10379	// the image.
10380	BoundingBox *BoundingBox `type:"structure"`
10381
10382	// Within the bounding box, a fine-grained polygon around the detected item.
10383	Polygon []*Point `type:"list"`
10384}
10385
10386// String returns the string representation
10387func (s Geometry) String() string {
10388	return awsutil.Prettify(s)
10389}
10390
10391// GoString returns the string representation
10392func (s Geometry) GoString() string {
10393	return s.String()
10394}
10395
10396// SetBoundingBox sets the BoundingBox field's value.
10397func (s *Geometry) SetBoundingBox(v *BoundingBox) *Geometry {
10398	s.BoundingBox = v
10399	return s
10400}
10401
10402// SetPolygon sets the Polygon field's value.
10403func (s *Geometry) SetPolygon(v []*Point) *Geometry {
10404	s.Polygon = v
10405	return s
10406}
10407
10408type GetCelebrityInfoInput struct {
10409	_ struct{} `type:"structure"`
10410
10411	// The ID for the celebrity. You get the celebrity ID from a call to the RecognizeCelebrities
10412	// operation, which recognizes celebrities in an image.
10413	//
10414	// Id is a required field
10415	Id *string `type:"string" required:"true"`
10416}
10417
10418// String returns the string representation
10419func (s GetCelebrityInfoInput) String() string {
10420	return awsutil.Prettify(s)
10421}
10422
10423// GoString returns the string representation
10424func (s GetCelebrityInfoInput) GoString() string {
10425	return s.String()
10426}
10427
10428// Validate inspects the fields of the type to determine if they are valid.
10429func (s *GetCelebrityInfoInput) Validate() error {
10430	invalidParams := request.ErrInvalidParams{Context: "GetCelebrityInfoInput"}
10431	if s.Id == nil {
10432		invalidParams.Add(request.NewErrParamRequired("Id"))
10433	}
10434
10435	if invalidParams.Len() > 0 {
10436		return invalidParams
10437	}
10438	return nil
10439}
10440
10441// SetId sets the Id field's value.
10442func (s *GetCelebrityInfoInput) SetId(v string) *GetCelebrityInfoInput {
10443	s.Id = &v
10444	return s
10445}
10446
10447type GetCelebrityInfoOutput struct {
10448	_ struct{} `type:"structure"`
10449
10450	// The name of the celebrity.
10451	Name *string `type:"string"`
10452
10453	// An array of URLs pointing to additional celebrity information.
10454	Urls []*string `type:"list"`
10455}
10456
10457// String returns the string representation
10458func (s GetCelebrityInfoOutput) String() string {
10459	return awsutil.Prettify(s)
10460}
10461
10462// GoString returns the string representation
10463func (s GetCelebrityInfoOutput) GoString() string {
10464	return s.String()
10465}
10466
10467// SetName sets the Name field's value.
10468func (s *GetCelebrityInfoOutput) SetName(v string) *GetCelebrityInfoOutput {
10469	s.Name = &v
10470	return s
10471}
10472
10473// SetUrls sets the Urls field's value.
10474func (s *GetCelebrityInfoOutput) SetUrls(v []*string) *GetCelebrityInfoOutput {
10475	s.Urls = v
10476	return s
10477}
10478
10479type GetCelebrityRecognitionInput struct {
10480	_ struct{} `type:"structure"`
10481
10482	// Job identifier for the required celebrity recognition analysis. You can get
10483	// the job identifer from a call to StartCelebrityRecognition.
10484	//
10485	// JobId is a required field
10486	JobId *string `min:"1" type:"string" required:"true"`
10487
10488	// Maximum number of results to return per paginated call. The largest value
10489	// you can specify is 1000. If you specify a value greater than 1000, a maximum
10490	// of 1000 results is returned. The default value is 1000.
10491	MaxResults *int64 `min:"1" type:"integer"`
10492
10493	// If the previous response was incomplete (because there is more recognized
10494	// celebrities to retrieve), Amazon Rekognition Video returns a pagination token
10495	// in the response. You can use this pagination token to retrieve the next set
10496	// of celebrities.
10497	NextToken *string `type:"string"`
10498
10499	// Sort to use for celebrities returned in Celebrities field. Specify ID to
10500	// sort by the celebrity identifier, specify TIMESTAMP to sort by the time the
10501	// celebrity was recognized.
10502	SortBy *string `type:"string" enum:"CelebrityRecognitionSortBy"`
10503}
10504
10505// String returns the string representation
10506func (s GetCelebrityRecognitionInput) String() string {
10507	return awsutil.Prettify(s)
10508}
10509
10510// GoString returns the string representation
10511func (s GetCelebrityRecognitionInput) GoString() string {
10512	return s.String()
10513}
10514
10515// Validate inspects the fields of the type to determine if they are valid.
10516func (s *GetCelebrityRecognitionInput) Validate() error {
10517	invalidParams := request.ErrInvalidParams{Context: "GetCelebrityRecognitionInput"}
10518	if s.JobId == nil {
10519		invalidParams.Add(request.NewErrParamRequired("JobId"))
10520	}
10521	if s.JobId != nil && len(*s.JobId) < 1 {
10522		invalidParams.Add(request.NewErrParamMinLen("JobId", 1))
10523	}
10524	if s.MaxResults != nil && *s.MaxResults < 1 {
10525		invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
10526	}
10527
10528	if invalidParams.Len() > 0 {
10529		return invalidParams
10530	}
10531	return nil
10532}
10533
10534// SetJobId sets the JobId field's value.
10535func (s *GetCelebrityRecognitionInput) SetJobId(v string) *GetCelebrityRecognitionInput {
10536	s.JobId = &v
10537	return s
10538}
10539
10540// SetMaxResults sets the MaxResults field's value.
10541func (s *GetCelebrityRecognitionInput) SetMaxResults(v int64) *GetCelebrityRecognitionInput {
10542	s.MaxResults = &v
10543	return s
10544}
10545
10546// SetNextToken sets the NextToken field's value.
10547func (s *GetCelebrityRecognitionInput) SetNextToken(v string) *GetCelebrityRecognitionInput {
10548	s.NextToken = &v
10549	return s
10550}
10551
10552// SetSortBy sets the SortBy field's value.
10553func (s *GetCelebrityRecognitionInput) SetSortBy(v string) *GetCelebrityRecognitionInput {
10554	s.SortBy = &v
10555	return s
10556}
10557
10558type GetCelebrityRecognitionOutput struct {
10559	_ struct{} `type:"structure"`
10560
10561	// Array of celebrities recognized in the video.
10562	Celebrities []*CelebrityRecognition `type:"list"`
10563
10564	// The current status of the celebrity recognition job.
10565	JobStatus *string `type:"string" enum:"VideoJobStatus"`
10566
10567	// If the response is truncated, Amazon Rekognition Video returns this token
10568	// that you can use in the subsequent request to retrieve the next set of celebrities.
10569	NextToken *string `type:"string"`
10570
10571	// If the job fails, StatusMessage provides a descriptive error message.
10572	StatusMessage *string `type:"string"`
10573
10574	// Information about a video that Amazon Rekognition Video analyzed. Videometadata
10575	// is returned in every page of paginated responses from a Amazon Rekognition
10576	// Video operation.
10577	VideoMetadata *VideoMetadata `type:"structure"`
10578}
10579
10580// String returns the string representation
10581func (s GetCelebrityRecognitionOutput) String() string {
10582	return awsutil.Prettify(s)
10583}
10584
10585// GoString returns the string representation
10586func (s GetCelebrityRecognitionOutput) GoString() string {
10587	return s.String()
10588}
10589
10590// SetCelebrities sets the Celebrities field's value.
10591func (s *GetCelebrityRecognitionOutput) SetCelebrities(v []*CelebrityRecognition) *GetCelebrityRecognitionOutput {
10592	s.Celebrities = v
10593	return s
10594}
10595
10596// SetJobStatus sets the JobStatus field's value.
10597func (s *GetCelebrityRecognitionOutput) SetJobStatus(v string) *GetCelebrityRecognitionOutput {
10598	s.JobStatus = &v
10599	return s
10600}
10601
10602// SetNextToken sets the NextToken field's value.
10603func (s *GetCelebrityRecognitionOutput) SetNextToken(v string) *GetCelebrityRecognitionOutput {
10604	s.NextToken = &v
10605	return s
10606}
10607
10608// SetStatusMessage sets the StatusMessage field's value.
10609func (s *GetCelebrityRecognitionOutput) SetStatusMessage(v string) *GetCelebrityRecognitionOutput {
10610	s.StatusMessage = &v
10611	return s
10612}
10613
10614// SetVideoMetadata sets the VideoMetadata field's value.
10615func (s *GetCelebrityRecognitionOutput) SetVideoMetadata(v *VideoMetadata) *GetCelebrityRecognitionOutput {
10616	s.VideoMetadata = v
10617	return s
10618}
10619
10620type GetContentModerationInput struct {
10621	_ struct{} `type:"structure"`
10622
10623	// The identifier for the unsafe content job. Use JobId to identify the job
10624	// in a subsequent call to GetContentModeration.
10625	//
10626	// JobId is a required field
10627	JobId *string `min:"1" type:"string" required:"true"`
10628
10629	// Maximum number of results to return per paginated call. The largest value
10630	// you can specify is 1000. If you specify a value greater than 1000, a maximum
10631	// of 1000 results is returned. The default value is 1000.
10632	MaxResults *int64 `min:"1" type:"integer"`
10633
10634	// If the previous response was incomplete (because there is more data to retrieve),
10635	// Amazon Rekognition returns a pagination token in the response. You can use
10636	// this pagination token to retrieve the next set of unsafe content labels.
10637	NextToken *string `type:"string"`
10638
10639	// Sort to use for elements in the ModerationLabelDetections array. Use TIMESTAMP
10640	// to sort array elements by the time labels are detected. Use NAME to alphabetically
10641	// group elements for a label together. Within each label group, the array element
10642	// are sorted by detection confidence. The default sort is by TIMESTAMP.
10643	SortBy *string `type:"string" enum:"ContentModerationSortBy"`
10644}
10645
10646// String returns the string representation
10647func (s GetContentModerationInput) String() string {
10648	return awsutil.Prettify(s)
10649}
10650
10651// GoString returns the string representation
10652func (s GetContentModerationInput) GoString() string {
10653	return s.String()
10654}
10655
10656// Validate inspects the fields of the type to determine if they are valid.
10657func (s *GetContentModerationInput) Validate() error {
10658	invalidParams := request.ErrInvalidParams{Context: "GetContentModerationInput"}
10659	if s.JobId == nil {
10660		invalidParams.Add(request.NewErrParamRequired("JobId"))
10661	}
10662	if s.JobId != nil && len(*s.JobId) < 1 {
10663		invalidParams.Add(request.NewErrParamMinLen("JobId", 1))
10664	}
10665	if s.MaxResults != nil && *s.MaxResults < 1 {
10666		invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
10667	}
10668
10669	if invalidParams.Len() > 0 {
10670		return invalidParams
10671	}
10672	return nil
10673}
10674
10675// SetJobId sets the JobId field's value.
10676func (s *GetContentModerationInput) SetJobId(v string) *GetContentModerationInput {
10677	s.JobId = &v
10678	return s
10679}
10680
10681// SetMaxResults sets the MaxResults field's value.
10682func (s *GetContentModerationInput) SetMaxResults(v int64) *GetContentModerationInput {
10683	s.MaxResults = &v
10684	return s
10685}
10686
10687// SetNextToken sets the NextToken field's value.
10688func (s *GetContentModerationInput) SetNextToken(v string) *GetContentModerationInput {
10689	s.NextToken = &v
10690	return s
10691}
10692
10693// SetSortBy sets the SortBy field's value.
10694func (s *GetContentModerationInput) SetSortBy(v string) *GetContentModerationInput {
10695	s.SortBy = &v
10696	return s
10697}
10698
10699type GetContentModerationOutput struct {
10700	_ struct{} `type:"structure"`
10701
10702	// The current status of the unsafe content analysis job.
10703	JobStatus *string `type:"string" enum:"VideoJobStatus"`
10704
10705	// The detected unsafe content labels and the time(s) they were detected.
10706	ModerationLabels []*ContentModerationDetection `type:"list"`
10707
10708	// Version number of the moderation detection model that was used to detect
10709	// unsafe content.
10710	ModerationModelVersion *string `type:"string"`
10711
10712	// If the response is truncated, Amazon Rekognition Video returns this token
10713	// that you can use in the subsequent request to retrieve the next set of unsafe
10714	// content labels.
10715	NextToken *string `type:"string"`
10716
10717	// If the job fails, StatusMessage provides a descriptive error message.
10718	StatusMessage *string `type:"string"`
10719
10720	// Information about a video that Amazon Rekognition analyzed. Videometadata
10721	// is returned in every page of paginated responses from GetContentModeration.
10722	VideoMetadata *VideoMetadata `type:"structure"`
10723}
10724
10725// String returns the string representation
10726func (s GetContentModerationOutput) String() string {
10727	return awsutil.Prettify(s)
10728}
10729
10730// GoString returns the string representation
10731func (s GetContentModerationOutput) GoString() string {
10732	return s.String()
10733}
10734
10735// SetJobStatus sets the JobStatus field's value.
10736func (s *GetContentModerationOutput) SetJobStatus(v string) *GetContentModerationOutput {
10737	s.JobStatus = &v
10738	return s
10739}
10740
10741// SetModerationLabels sets the ModerationLabels field's value.
10742func (s *GetContentModerationOutput) SetModerationLabels(v []*ContentModerationDetection) *GetContentModerationOutput {
10743	s.ModerationLabels = v
10744	return s
10745}
10746
10747// SetModerationModelVersion sets the ModerationModelVersion field's value.
10748func (s *GetContentModerationOutput) SetModerationModelVersion(v string) *GetContentModerationOutput {
10749	s.ModerationModelVersion = &v
10750	return s
10751}
10752
10753// SetNextToken sets the NextToken field's value.
10754func (s *GetContentModerationOutput) SetNextToken(v string) *GetContentModerationOutput {
10755	s.NextToken = &v
10756	return s
10757}
10758
10759// SetStatusMessage sets the StatusMessage field's value.
10760func (s *GetContentModerationOutput) SetStatusMessage(v string) *GetContentModerationOutput {
10761	s.StatusMessage = &v
10762	return s
10763}
10764
10765// SetVideoMetadata sets the VideoMetadata field's value.
10766func (s *GetContentModerationOutput) SetVideoMetadata(v *VideoMetadata) *GetContentModerationOutput {
10767	s.VideoMetadata = v
10768	return s
10769}
10770
10771type GetFaceDetectionInput struct {
10772	_ struct{} `type:"structure"`
10773
10774	// Unique identifier for the face detection job. The JobId is returned from
10775	// StartFaceDetection.
10776	//
10777	// JobId is a required field
10778	JobId *string `min:"1" type:"string" required:"true"`
10779
10780	// Maximum number of results to return per paginated call. The largest value
10781	// you can specify is 1000. If you specify a value greater than 1000, a maximum
10782	// of 1000 results is returned. The default value is 1000.
10783	MaxResults *int64 `min:"1" type:"integer"`
10784
10785	// If the previous response was incomplete (because there are more faces to
10786	// retrieve), Amazon Rekognition Video returns a pagination token in the response.
10787	// You can use this pagination token to retrieve the next set of faces.
10788	NextToken *string `type:"string"`
10789}
10790
10791// String returns the string representation
10792func (s GetFaceDetectionInput) String() string {
10793	return awsutil.Prettify(s)
10794}
10795
10796// GoString returns the string representation
10797func (s GetFaceDetectionInput) GoString() string {
10798	return s.String()
10799}
10800
10801// Validate inspects the fields of the type to determine if they are valid.
10802func (s *GetFaceDetectionInput) Validate() error {
10803	invalidParams := request.ErrInvalidParams{Context: "GetFaceDetectionInput"}
10804	if s.JobId == nil {
10805		invalidParams.Add(request.NewErrParamRequired("JobId"))
10806	}
10807	if s.JobId != nil && len(*s.JobId) < 1 {
10808		invalidParams.Add(request.NewErrParamMinLen("JobId", 1))
10809	}
10810	if s.MaxResults != nil && *s.MaxResults < 1 {
10811		invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
10812	}
10813
10814	if invalidParams.Len() > 0 {
10815		return invalidParams
10816	}
10817	return nil
10818}
10819
10820// SetJobId sets the JobId field's value.
10821func (s *GetFaceDetectionInput) SetJobId(v string) *GetFaceDetectionInput {
10822	s.JobId = &v
10823	return s
10824}
10825
10826// SetMaxResults sets the MaxResults field's value.
10827func (s *GetFaceDetectionInput) SetMaxResults(v int64) *GetFaceDetectionInput {
10828	s.MaxResults = &v
10829	return s
10830}
10831
10832// SetNextToken sets the NextToken field's value.
10833func (s *GetFaceDetectionInput) SetNextToken(v string) *GetFaceDetectionInput {
10834	s.NextToken = &v
10835	return s
10836}
10837
10838type GetFaceDetectionOutput struct {
10839	_ struct{} `type:"structure"`
10840
10841	// An array of faces detected in the video. Each element contains a detected
10842	// face's details and the time, in milliseconds from the start of the video,
10843	// the face was detected.
10844	Faces []*FaceDetection `type:"list"`
10845
10846	// The current status of the face detection job.
10847	JobStatus *string `type:"string" enum:"VideoJobStatus"`
10848
10849	// If the response is truncated, Amazon Rekognition returns this token that
10850	// you can use in the subsequent request to retrieve the next set of faces.
10851	NextToken *string `type:"string"`
10852
10853	// If the job fails, StatusMessage provides a descriptive error message.
10854	StatusMessage *string `type:"string"`
10855
10856	// Information about a video that Amazon Rekognition Video analyzed. Videometadata
10857	// is returned in every page of paginated responses from a Amazon Rekognition
10858	// video operation.
10859	VideoMetadata *VideoMetadata `type:"structure"`
10860}
10861
10862// String returns the string representation
10863func (s GetFaceDetectionOutput) String() string {
10864	return awsutil.Prettify(s)
10865}
10866
10867// GoString returns the string representation
10868func (s GetFaceDetectionOutput) GoString() string {
10869	return s.String()
10870}
10871
10872// SetFaces sets the Faces field's value.
10873func (s *GetFaceDetectionOutput) SetFaces(v []*FaceDetection) *GetFaceDetectionOutput {
10874	s.Faces = v
10875	return s
10876}
10877
10878// SetJobStatus sets the JobStatus field's value.
10879func (s *GetFaceDetectionOutput) SetJobStatus(v string) *GetFaceDetectionOutput {
10880	s.JobStatus = &v
10881	return s
10882}
10883
10884// SetNextToken sets the NextToken field's value.
10885func (s *GetFaceDetectionOutput) SetNextToken(v string) *GetFaceDetectionOutput {
10886	s.NextToken = &v
10887	return s
10888}
10889
10890// SetStatusMessage sets the StatusMessage field's value.
10891func (s *GetFaceDetectionOutput) SetStatusMessage(v string) *GetFaceDetectionOutput {
10892	s.StatusMessage = &v
10893	return s
10894}
10895
10896// SetVideoMetadata sets the VideoMetadata field's value.
10897func (s *GetFaceDetectionOutput) SetVideoMetadata(v *VideoMetadata) *GetFaceDetectionOutput {
10898	s.VideoMetadata = v
10899	return s
10900}
10901
10902type GetFaceSearchInput struct {
10903	_ struct{} `type:"structure"`
10904
10905	// The job identifer for the search request. You get the job identifier from
10906	// an initial call to StartFaceSearch.
10907	//
10908	// JobId is a required field
10909	JobId *string `min:"1" type:"string" required:"true"`
10910
10911	// Maximum number of results to return per paginated call. The largest value
10912	// you can specify is 1000. If you specify a value greater than 1000, a maximum
10913	// of 1000 results is returned. The default value is 1000.
10914	MaxResults *int64 `min:"1" type:"integer"`
10915
10916	// If the previous response was incomplete (because there is more search results
10917	// to retrieve), Amazon Rekognition Video returns a pagination token in the
10918	// response. You can use this pagination token to retrieve the next set of search
10919	// results.
10920	NextToken *string `type:"string"`
10921
10922	// Sort to use for grouping faces in the response. Use TIMESTAMP to group faces
10923	// by the time that they are recognized. Use INDEX to sort by recognized faces.
10924	SortBy *string `type:"string" enum:"FaceSearchSortBy"`
10925}
10926
10927// String returns the string representation
10928func (s GetFaceSearchInput) String() string {
10929	return awsutil.Prettify(s)
10930}
10931
10932// GoString returns the string representation
10933func (s GetFaceSearchInput) GoString() string {
10934	return s.String()
10935}
10936
10937// Validate inspects the fields of the type to determine if they are valid.
10938func (s *GetFaceSearchInput) Validate() error {
10939	invalidParams := request.ErrInvalidParams{Context: "GetFaceSearchInput"}
10940	if s.JobId == nil {
10941		invalidParams.Add(request.NewErrParamRequired("JobId"))
10942	}
10943	if s.JobId != nil && len(*s.JobId) < 1 {
10944		invalidParams.Add(request.NewErrParamMinLen("JobId", 1))
10945	}
10946	if s.MaxResults != nil && *s.MaxResults < 1 {
10947		invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
10948	}
10949
10950	if invalidParams.Len() > 0 {
10951		return invalidParams
10952	}
10953	return nil
10954}
10955
10956// SetJobId sets the JobId field's value.
10957func (s *GetFaceSearchInput) SetJobId(v string) *GetFaceSearchInput {
10958	s.JobId = &v
10959	return s
10960}
10961
10962// SetMaxResults sets the MaxResults field's value.
10963func (s *GetFaceSearchInput) SetMaxResults(v int64) *GetFaceSearchInput {
10964	s.MaxResults = &v
10965	return s
10966}
10967
10968// SetNextToken sets the NextToken field's value.
10969func (s *GetFaceSearchInput) SetNextToken(v string) *GetFaceSearchInput {
10970	s.NextToken = &v
10971	return s
10972}
10973
10974// SetSortBy sets the SortBy field's value.
10975func (s *GetFaceSearchInput) SetSortBy(v string) *GetFaceSearchInput {
10976	s.SortBy = &v
10977	return s
10978}
10979
10980type GetFaceSearchOutput struct {
10981	_ struct{} `type:"structure"`
10982
10983	// The current status of the face search job.
10984	JobStatus *string `type:"string" enum:"VideoJobStatus"`
10985
10986	// If the response is truncated, Amazon Rekognition Video returns this token
10987	// that you can use in the subsequent request to retrieve the next set of search
10988	// results.
10989	NextToken *string `type:"string"`
10990
10991	// An array of persons, PersonMatch, in the video whose face(s) match the face(s)
10992	// in an Amazon Rekognition collection. It also includes time information for
10993	// when persons are matched in the video. You specify the input collection in
10994	// an initial call to StartFaceSearch. Each Persons element includes a time
10995	// the person was matched, face match details (FaceMatches) for matching faces
10996	// in the collection, and person information (Person) for the matched person.
10997	Persons []*PersonMatch `type:"list"`
10998
10999	// If the job fails, StatusMessage provides a descriptive error message.
11000	StatusMessage *string `type:"string"`
11001
11002	// Information about a video that Amazon Rekognition analyzed. Videometadata
11003	// is returned in every page of paginated responses from a Amazon Rekognition
11004	// Video operation.
11005	VideoMetadata *VideoMetadata `type:"structure"`
11006}
11007
11008// String returns the string representation
11009func (s GetFaceSearchOutput) String() string {
11010	return awsutil.Prettify(s)
11011}
11012
11013// GoString returns the string representation
11014func (s GetFaceSearchOutput) GoString() string {
11015	return s.String()
11016}
11017
11018// SetJobStatus sets the JobStatus field's value.
11019func (s *GetFaceSearchOutput) SetJobStatus(v string) *GetFaceSearchOutput {
11020	s.JobStatus = &v
11021	return s
11022}
11023
11024// SetNextToken sets the NextToken field's value.
11025func (s *GetFaceSearchOutput) SetNextToken(v string) *GetFaceSearchOutput {
11026	s.NextToken = &v
11027	return s
11028}
11029
11030// SetPersons sets the Persons field's value.
11031func (s *GetFaceSearchOutput) SetPersons(v []*PersonMatch) *GetFaceSearchOutput {
11032	s.Persons = v
11033	return s
11034}
11035
11036// SetStatusMessage sets the StatusMessage field's value.
11037func (s *GetFaceSearchOutput) SetStatusMessage(v string) *GetFaceSearchOutput {
11038	s.StatusMessage = &v
11039	return s
11040}
11041
11042// SetVideoMetadata sets the VideoMetadata field's value.
11043func (s *GetFaceSearchOutput) SetVideoMetadata(v *VideoMetadata) *GetFaceSearchOutput {
11044	s.VideoMetadata = v
11045	return s
11046}
11047
11048type GetLabelDetectionInput struct {
11049	_ struct{} `type:"structure"`
11050
11051	// Job identifier for the label detection operation for which you want results
11052	// returned. You get the job identifer from an initial call to StartlabelDetection.
11053	//
11054	// JobId is a required field
11055	JobId *string `min:"1" type:"string" required:"true"`
11056
11057	// Maximum number of results to return per paginated call. The largest value
11058	// you can specify is 1000. If you specify a value greater than 1000, a maximum
11059	// of 1000 results is returned. The default value is 1000.
11060	MaxResults *int64 `min:"1" type:"integer"`
11061
11062	// If the previous response was incomplete (because there are more labels to
11063	// retrieve), Amazon Rekognition Video returns a pagination token in the response.
11064	// You can use this pagination token to retrieve the next set of labels.
11065	NextToken *string `type:"string"`
11066
11067	// Sort to use for elements in the Labels array. Use TIMESTAMP to sort array
11068	// elements by the time labels are detected. Use NAME to alphabetically group
11069	// elements for a label together. Within each label group, the array element
11070	// are sorted by detection confidence. The default sort is by TIMESTAMP.
11071	SortBy *string `type:"string" enum:"LabelDetectionSortBy"`
11072}
11073
11074// String returns the string representation
11075func (s GetLabelDetectionInput) String() string {
11076	return awsutil.Prettify(s)
11077}
11078
11079// GoString returns the string representation
11080func (s GetLabelDetectionInput) GoString() string {
11081	return s.String()
11082}
11083
11084// Validate inspects the fields of the type to determine if they are valid.
11085func (s *GetLabelDetectionInput) Validate() error {
11086	invalidParams := request.ErrInvalidParams{Context: "GetLabelDetectionInput"}
11087	if s.JobId == nil {
11088		invalidParams.Add(request.NewErrParamRequired("JobId"))
11089	}
11090	if s.JobId != nil && len(*s.JobId) < 1 {
11091		invalidParams.Add(request.NewErrParamMinLen("JobId", 1))
11092	}
11093	if s.MaxResults != nil && *s.MaxResults < 1 {
11094		invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
11095	}
11096
11097	if invalidParams.Len() > 0 {
11098		return invalidParams
11099	}
11100	return nil
11101}
11102
11103// SetJobId sets the JobId field's value.
11104func (s *GetLabelDetectionInput) SetJobId(v string) *GetLabelDetectionInput {
11105	s.JobId = &v
11106	return s
11107}
11108
11109// SetMaxResults sets the MaxResults field's value.
11110func (s *GetLabelDetectionInput) SetMaxResults(v int64) *GetLabelDetectionInput {
11111	s.MaxResults = &v
11112	return s
11113}
11114
11115// SetNextToken sets the NextToken field's value.
11116func (s *GetLabelDetectionInput) SetNextToken(v string) *GetLabelDetectionInput {
11117	s.NextToken = &v
11118	return s
11119}
11120
11121// SetSortBy sets the SortBy field's value.
11122func (s *GetLabelDetectionInput) SetSortBy(v string) *GetLabelDetectionInput {
11123	s.SortBy = &v
11124	return s
11125}
11126
11127type GetLabelDetectionOutput struct {
11128	_ struct{} `type:"structure"`
11129
11130	// The current status of the label detection job.
11131	JobStatus *string `type:"string" enum:"VideoJobStatus"`
11132
11133	// Version number of the label detection model that was used to detect labels.
11134	LabelModelVersion *string `type:"string"`
11135
11136	// An array of labels detected in the video. Each element contains the detected
11137	// label and the time, in milliseconds from the start of the video, that the
11138	// label was detected.
11139	Labels []*LabelDetection `type:"list"`
11140
11141	// If the response is truncated, Amazon Rekognition Video returns this token
11142	// that you can use in the subsequent request to retrieve the next set of labels.
11143	NextToken *string `type:"string"`
11144
11145	// If the job fails, StatusMessage provides a descriptive error message.
11146	StatusMessage *string `type:"string"`
11147
11148	// Information about a video that Amazon Rekognition Video analyzed. Videometadata
11149	// is returned in every page of paginated responses from a Amazon Rekognition
11150	// video operation.
11151	VideoMetadata *VideoMetadata `type:"structure"`
11152}
11153
11154// String returns the string representation
11155func (s GetLabelDetectionOutput) String() string {
11156	return awsutil.Prettify(s)
11157}
11158
11159// GoString returns the string representation
11160func (s GetLabelDetectionOutput) GoString() string {
11161	return s.String()
11162}
11163
11164// SetJobStatus sets the JobStatus field's value.
11165func (s *GetLabelDetectionOutput) SetJobStatus(v string) *GetLabelDetectionOutput {
11166	s.JobStatus = &v
11167	return s
11168}
11169
11170// SetLabelModelVersion sets the LabelModelVersion field's value.
11171func (s *GetLabelDetectionOutput) SetLabelModelVersion(v string) *GetLabelDetectionOutput {
11172	s.LabelModelVersion = &v
11173	return s
11174}
11175
11176// SetLabels sets the Labels field's value.
11177func (s *GetLabelDetectionOutput) SetLabels(v []*LabelDetection) *GetLabelDetectionOutput {
11178	s.Labels = v
11179	return s
11180}
11181
11182// SetNextToken sets the NextToken field's value.
11183func (s *GetLabelDetectionOutput) SetNextToken(v string) *GetLabelDetectionOutput {
11184	s.NextToken = &v
11185	return s
11186}
11187
11188// SetStatusMessage sets the StatusMessage field's value.
11189func (s *GetLabelDetectionOutput) SetStatusMessage(v string) *GetLabelDetectionOutput {
11190	s.StatusMessage = &v
11191	return s
11192}
11193
11194// SetVideoMetadata sets the VideoMetadata field's value.
11195func (s *GetLabelDetectionOutput) SetVideoMetadata(v *VideoMetadata) *GetLabelDetectionOutput {
11196	s.VideoMetadata = v
11197	return s
11198}
11199
11200type GetPersonTrackingInput struct {
11201	_ struct{} `type:"structure"`
11202
11203	// The identifier for a job that tracks persons in a video. You get the JobId
11204	// from a call to StartPersonTracking.
11205	//
11206	// JobId is a required field
11207	JobId *string `min:"1" type:"string" required:"true"`
11208
11209	// Maximum number of results to return per paginated call. The largest value
11210	// you can specify is 1000. If you specify a value greater than 1000, a maximum
11211	// of 1000 results is returned. The default value is 1000.
11212	MaxResults *int64 `min:"1" type:"integer"`
11213
11214	// If the previous response was incomplete (because there are more persons to
11215	// retrieve), Amazon Rekognition Video returns a pagination token in the response.
11216	// You can use this pagination token to retrieve the next set of persons.
11217	NextToken *string `type:"string"`
11218
11219	// Sort to use for elements in the Persons array. Use TIMESTAMP to sort array
11220	// elements by the time persons are detected. Use INDEX to sort by the tracked
11221	// persons. If you sort by INDEX, the array elements for each person are sorted
11222	// by detection confidence. The default sort is by TIMESTAMP.
11223	SortBy *string `type:"string" enum:"PersonTrackingSortBy"`
11224}
11225
11226// String returns the string representation
11227func (s GetPersonTrackingInput) String() string {
11228	return awsutil.Prettify(s)
11229}
11230
11231// GoString returns the string representation
11232func (s GetPersonTrackingInput) GoString() string {
11233	return s.String()
11234}
11235
11236// Validate inspects the fields of the type to determine if they are valid.
11237func (s *GetPersonTrackingInput) Validate() error {
11238	invalidParams := request.ErrInvalidParams{Context: "GetPersonTrackingInput"}
11239	if s.JobId == nil {
11240		invalidParams.Add(request.NewErrParamRequired("JobId"))
11241	}
11242	if s.JobId != nil && len(*s.JobId) < 1 {
11243		invalidParams.Add(request.NewErrParamMinLen("JobId", 1))
11244	}
11245	if s.MaxResults != nil && *s.MaxResults < 1 {
11246		invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
11247	}
11248
11249	if invalidParams.Len() > 0 {
11250		return invalidParams
11251	}
11252	return nil
11253}
11254
11255// SetJobId sets the JobId field's value.
11256func (s *GetPersonTrackingInput) SetJobId(v string) *GetPersonTrackingInput {
11257	s.JobId = &v
11258	return s
11259}
11260
11261// SetMaxResults sets the MaxResults field's value.
11262func (s *GetPersonTrackingInput) SetMaxResults(v int64) *GetPersonTrackingInput {
11263	s.MaxResults = &v
11264	return s
11265}
11266
11267// SetNextToken sets the NextToken field's value.
11268func (s *GetPersonTrackingInput) SetNextToken(v string) *GetPersonTrackingInput {
11269	s.NextToken = &v
11270	return s
11271}
11272
11273// SetSortBy sets the SortBy field's value.
11274func (s *GetPersonTrackingInput) SetSortBy(v string) *GetPersonTrackingInput {
11275	s.SortBy = &v
11276	return s
11277}
11278
11279type GetPersonTrackingOutput struct {
11280	_ struct{} `type:"structure"`
11281
11282	// The current status of the person tracking job.
11283	JobStatus *string `type:"string" enum:"VideoJobStatus"`
11284
11285	// If the response is truncated, Amazon Rekognition Video returns this token
11286	// that you can use in the subsequent request to retrieve the next set of persons.
11287	NextToken *string `type:"string"`
11288
11289	// An array of the persons detected in the video and the time(s) their path
11290	// was tracked throughout the video. An array element will exist for each time
11291	// a person's path is tracked.
11292	Persons []*PersonDetection `type:"list"`
11293
11294	// If the job fails, StatusMessage provides a descriptive error message.
11295	StatusMessage *string `type:"string"`
11296
11297	// Information about a video that Amazon Rekognition Video analyzed. Videometadata
11298	// is returned in every page of paginated responses from a Amazon Rekognition
11299	// Video operation.
11300	VideoMetadata *VideoMetadata `type:"structure"`
11301}
11302
11303// String returns the string representation
11304func (s GetPersonTrackingOutput) String() string {
11305	return awsutil.Prettify(s)
11306}
11307
11308// GoString returns the string representation
11309func (s GetPersonTrackingOutput) GoString() string {
11310	return s.String()
11311}
11312
11313// SetJobStatus sets the JobStatus field's value.
11314func (s *GetPersonTrackingOutput) SetJobStatus(v string) *GetPersonTrackingOutput {
11315	s.JobStatus = &v
11316	return s
11317}
11318
11319// SetNextToken sets the NextToken field's value.
11320func (s *GetPersonTrackingOutput) SetNextToken(v string) *GetPersonTrackingOutput {
11321	s.NextToken = &v
11322	return s
11323}
11324
11325// SetPersons sets the Persons field's value.
11326func (s *GetPersonTrackingOutput) SetPersons(v []*PersonDetection) *GetPersonTrackingOutput {
11327	s.Persons = v
11328	return s
11329}
11330
11331// SetStatusMessage sets the StatusMessage field's value.
11332func (s *GetPersonTrackingOutput) SetStatusMessage(v string) *GetPersonTrackingOutput {
11333	s.StatusMessage = &v
11334	return s
11335}
11336
11337// SetVideoMetadata sets the VideoMetadata field's value.
11338func (s *GetPersonTrackingOutput) SetVideoMetadata(v *VideoMetadata) *GetPersonTrackingOutput {
11339	s.VideoMetadata = v
11340	return s
11341}
11342
11343type GetSegmentDetectionInput struct {
11344	_ struct{} `type:"structure"`
11345
11346	// Job identifier for the text detection operation for which you want results
11347	// returned. You get the job identifer from an initial call to StartSegmentDetection.
11348	//
11349	// JobId is a required field
11350	JobId *string `min:"1" type:"string" required:"true"`
11351
11352	// Maximum number of results to return per paginated call. The largest value
11353	// you can specify is 1000.
11354	MaxResults *int64 `min:"1" type:"integer"`
11355
11356	// If the response is truncated, Amazon Rekognition Video returns this token
11357	// that you can use in the subsequent request to retrieve the next set of text.
11358	NextToken *string `type:"string"`
11359}
11360
11361// String returns the string representation
11362func (s GetSegmentDetectionInput) String() string {
11363	return awsutil.Prettify(s)
11364}
11365
11366// GoString returns the string representation
11367func (s GetSegmentDetectionInput) GoString() string {
11368	return s.String()
11369}
11370
11371// Validate inspects the fields of the type to determine if they are valid.
11372func (s *GetSegmentDetectionInput) Validate() error {
11373	invalidParams := request.ErrInvalidParams{Context: "GetSegmentDetectionInput"}
11374	if s.JobId == nil {
11375		invalidParams.Add(request.NewErrParamRequired("JobId"))
11376	}
11377	if s.JobId != nil && len(*s.JobId) < 1 {
11378		invalidParams.Add(request.NewErrParamMinLen("JobId", 1))
11379	}
11380	if s.MaxResults != nil && *s.MaxResults < 1 {
11381		invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
11382	}
11383
11384	if invalidParams.Len() > 0 {
11385		return invalidParams
11386	}
11387	return nil
11388}
11389
11390// SetJobId sets the JobId field's value.
11391func (s *GetSegmentDetectionInput) SetJobId(v string) *GetSegmentDetectionInput {
11392	s.JobId = &v
11393	return s
11394}
11395
11396// SetMaxResults sets the MaxResults field's value.
11397func (s *GetSegmentDetectionInput) SetMaxResults(v int64) *GetSegmentDetectionInput {
11398	s.MaxResults = &v
11399	return s
11400}
11401
11402// SetNextToken sets the NextToken field's value.
11403func (s *GetSegmentDetectionInput) SetNextToken(v string) *GetSegmentDetectionInput {
11404	s.NextToken = &v
11405	return s
11406}
11407
11408type GetSegmentDetectionOutput struct {
11409	_ struct{} `type:"structure"`
11410
11411	// An array of objects. There can be multiple audio streams. Each AudioMetadata
11412	// object contains metadata for a single audio stream. Audio information in
11413	// an AudioMetadata objects includes the audio codec, the number of audio channels,
11414	// the duration of the audio stream, and the sample rate. Audio metadata is
11415	// returned in each page of information returned by GetSegmentDetection.
11416	AudioMetadata []*AudioMetadata `type:"list"`
11417
11418	// Current status of the segment detection job.
11419	JobStatus *string `type:"string" enum:"VideoJobStatus"`
11420
11421	// If the previous response was incomplete (because there are more labels to
11422	// retrieve), Amazon Rekognition Video returns a pagination token in the response.
11423	// You can use this pagination token to retrieve the next set of text.
11424	NextToken *string `type:"string"`
11425
11426	// An array of segments detected in a video. The array is sorted by the segment
11427	// types (TECHNICAL_CUE or SHOT) specified in the SegmentTypes input parameter
11428	// of StartSegmentDetection. Within each segment type the array is sorted by
11429	// timestamp values.
11430	Segments []*SegmentDetection `type:"list"`
11431
11432	// An array containing the segment types requested in the call to StartSegmentDetection.
11433	SelectedSegmentTypes []*SegmentTypeInfo `type:"list"`
11434
11435	// If the job fails, StatusMessage provides a descriptive error message.
11436	StatusMessage *string `type:"string"`
11437
11438	// Currently, Amazon Rekognition Video returns a single object in the VideoMetadata
11439	// array. The object contains information about the video stream in the input
11440	// file that Amazon Rekognition Video chose to analyze. The VideoMetadata object
11441	// includes the video codec, video format and other information. Video metadata
11442	// is returned in each page of information returned by GetSegmentDetection.
11443	VideoMetadata []*VideoMetadata `type:"list"`
11444}
11445
11446// String returns the string representation
11447func (s GetSegmentDetectionOutput) String() string {
11448	return awsutil.Prettify(s)
11449}
11450
11451// GoString returns the string representation
11452func (s GetSegmentDetectionOutput) GoString() string {
11453	return s.String()
11454}
11455
11456// SetAudioMetadata sets the AudioMetadata field's value.
11457func (s *GetSegmentDetectionOutput) SetAudioMetadata(v []*AudioMetadata) *GetSegmentDetectionOutput {
11458	s.AudioMetadata = v
11459	return s
11460}
11461
11462// SetJobStatus sets the JobStatus field's value.
11463func (s *GetSegmentDetectionOutput) SetJobStatus(v string) *GetSegmentDetectionOutput {
11464	s.JobStatus = &v
11465	return s
11466}
11467
11468// SetNextToken sets the NextToken field's value.
11469func (s *GetSegmentDetectionOutput) SetNextToken(v string) *GetSegmentDetectionOutput {
11470	s.NextToken = &v
11471	return s
11472}
11473
11474// SetSegments sets the Segments field's value.
11475func (s *GetSegmentDetectionOutput) SetSegments(v []*SegmentDetection) *GetSegmentDetectionOutput {
11476	s.Segments = v
11477	return s
11478}
11479
11480// SetSelectedSegmentTypes sets the SelectedSegmentTypes field's value.
11481func (s *GetSegmentDetectionOutput) SetSelectedSegmentTypes(v []*SegmentTypeInfo) *GetSegmentDetectionOutput {
11482	s.SelectedSegmentTypes = v
11483	return s
11484}
11485
11486// SetStatusMessage sets the StatusMessage field's value.
11487func (s *GetSegmentDetectionOutput) SetStatusMessage(v string) *GetSegmentDetectionOutput {
11488	s.StatusMessage = &v
11489	return s
11490}
11491
11492// SetVideoMetadata sets the VideoMetadata field's value.
11493func (s *GetSegmentDetectionOutput) SetVideoMetadata(v []*VideoMetadata) *GetSegmentDetectionOutput {
11494	s.VideoMetadata = v
11495	return s
11496}
11497
11498type GetTextDetectionInput struct {
11499	_ struct{} `type:"structure"`
11500
11501	// Job identifier for the text detection operation for which you want results
11502	// returned. You get the job identifer from an initial call to StartTextDetection.
11503	//
11504	// JobId is a required field
11505	JobId *string `min:"1" type:"string" required:"true"`
11506
11507	// Maximum number of results to return per paginated call. The largest value
11508	// you can specify is 1000.
11509	MaxResults *int64 `min:"1" type:"integer"`
11510
11511	// If the previous response was incomplete (because there are more labels to
11512	// retrieve), Amazon Rekognition Video returns a pagination token in the response.
11513	// You can use this pagination token to retrieve the next set of text.
11514	NextToken *string `type:"string"`
11515}
11516
11517// String returns the string representation
11518func (s GetTextDetectionInput) String() string {
11519	return awsutil.Prettify(s)
11520}
11521
11522// GoString returns the string representation
11523func (s GetTextDetectionInput) GoString() string {
11524	return s.String()
11525}
11526
11527// Validate inspects the fields of the type to determine if they are valid.
11528func (s *GetTextDetectionInput) Validate() error {
11529	invalidParams := request.ErrInvalidParams{Context: "GetTextDetectionInput"}
11530	if s.JobId == nil {
11531		invalidParams.Add(request.NewErrParamRequired("JobId"))
11532	}
11533	if s.JobId != nil && len(*s.JobId) < 1 {
11534		invalidParams.Add(request.NewErrParamMinLen("JobId", 1))
11535	}
11536	if s.MaxResults != nil && *s.MaxResults < 1 {
11537		invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
11538	}
11539
11540	if invalidParams.Len() > 0 {
11541		return invalidParams
11542	}
11543	return nil
11544}
11545
11546// SetJobId sets the JobId field's value.
11547func (s *GetTextDetectionInput) SetJobId(v string) *GetTextDetectionInput {
11548	s.JobId = &v
11549	return s
11550}
11551
11552// SetMaxResults sets the MaxResults field's value.
11553func (s *GetTextDetectionInput) SetMaxResults(v int64) *GetTextDetectionInput {
11554	s.MaxResults = &v
11555	return s
11556}
11557
11558// SetNextToken sets the NextToken field's value.
11559func (s *GetTextDetectionInput) SetNextToken(v string) *GetTextDetectionInput {
11560	s.NextToken = &v
11561	return s
11562}
11563
11564type GetTextDetectionOutput struct {
11565	_ struct{} `type:"structure"`
11566
11567	// Current status of the text detection job.
11568	JobStatus *string `type:"string" enum:"VideoJobStatus"`
11569
11570	// If the response is truncated, Amazon Rekognition Video returns this token
11571	// that you can use in the subsequent request to retrieve the next set of text.
11572	NextToken *string `type:"string"`
11573
11574	// If the job fails, StatusMessage provides a descriptive error message.
11575	StatusMessage *string `type:"string"`
11576
11577	// An array of text detected in the video. Each element contains the detected
11578	// text, the time in milliseconds from the start of the video that the text
11579	// was detected, and where it was detected on the screen.
11580	TextDetections []*TextDetectionResult `type:"list"`
11581
11582	// Version number of the text detection model that was used to detect text.
11583	TextModelVersion *string `type:"string"`
11584
11585	// Information about a video that Amazon Rekognition analyzed. Videometadata
11586	// is returned in every page of paginated responses from a Amazon Rekognition
11587	// video operation.
11588	VideoMetadata *VideoMetadata `type:"structure"`
11589}
11590
11591// String returns the string representation
11592func (s GetTextDetectionOutput) String() string {
11593	return awsutil.Prettify(s)
11594}
11595
11596// GoString returns the string representation
11597func (s GetTextDetectionOutput) GoString() string {
11598	return s.String()
11599}
11600
11601// SetJobStatus sets the JobStatus field's value.
11602func (s *GetTextDetectionOutput) SetJobStatus(v string) *GetTextDetectionOutput {
11603	s.JobStatus = &v
11604	return s
11605}
11606
11607// SetNextToken sets the NextToken field's value.
11608func (s *GetTextDetectionOutput) SetNextToken(v string) *GetTextDetectionOutput {
11609	s.NextToken = &v
11610	return s
11611}
11612
11613// SetStatusMessage sets the StatusMessage field's value.
11614func (s *GetTextDetectionOutput) SetStatusMessage(v string) *GetTextDetectionOutput {
11615	s.StatusMessage = &v
11616	return s
11617}
11618
11619// SetTextDetections sets the TextDetections field's value.
11620func (s *GetTextDetectionOutput) SetTextDetections(v []*TextDetectionResult) *GetTextDetectionOutput {
11621	s.TextDetections = v
11622	return s
11623}
11624
11625// SetTextModelVersion sets the TextModelVersion field's value.
11626func (s *GetTextDetectionOutput) SetTextModelVersion(v string) *GetTextDetectionOutput {
11627	s.TextModelVersion = &v
11628	return s
11629}
11630
11631// SetVideoMetadata sets the VideoMetadata field's value.
11632func (s *GetTextDetectionOutput) SetVideoMetadata(v *VideoMetadata) *GetTextDetectionOutput {
11633	s.VideoMetadata = v
11634	return s
11635}
11636
11637// The S3 bucket that contains an Amazon Sagemaker Ground Truth format manifest
11638// file.
11639type GroundTruthManifest struct {
11640	_ struct{} `type:"structure"`
11641
11642	// Provides the S3 bucket name and object name.
11643	//
11644	// The region for the S3 bucket containing the S3 object must match the region
11645	// you use for Amazon Rekognition operations.
11646	//
11647	// For Amazon Rekognition to process an S3 object, the user must have permission
11648	// to access the S3 object. For more information, see Resource-Based Policies
11649	// in the Amazon Rekognition Developer Guide.
11650	S3Object *S3Object `type:"structure"`
11651}
11652
11653// String returns the string representation
11654func (s GroundTruthManifest) String() string {
11655	return awsutil.Prettify(s)
11656}
11657
11658// GoString returns the string representation
11659func (s GroundTruthManifest) GoString() string {
11660	return s.String()
11661}
11662
11663// Validate inspects the fields of the type to determine if they are valid.
11664func (s *GroundTruthManifest) Validate() error {
11665	invalidParams := request.ErrInvalidParams{Context: "GroundTruthManifest"}
11666	if s.S3Object != nil {
11667		if err := s.S3Object.Validate(); err != nil {
11668			invalidParams.AddNested("S3Object", err.(request.ErrInvalidParams))
11669		}
11670	}
11671
11672	if invalidParams.Len() > 0 {
11673		return invalidParams
11674	}
11675	return nil
11676}
11677
11678// SetS3Object sets the S3Object field's value.
11679func (s *GroundTruthManifest) SetS3Object(v *S3Object) *GroundTruthManifest {
11680	s.S3Object = v
11681	return s
11682}
11683
11684// Shows the results of the human in the loop evaluation. If there is no HumanLoopArn,
11685// the input did not trigger human review.
11686type HumanLoopActivationOutput struct {
11687	_ struct{} `type:"structure"`
11688
11689	// Shows the result of condition evaluations, including those conditions which
11690	// activated a human review.
11691	HumanLoopActivationConditionsEvaluationResults aws.JSONValue `type:"jsonvalue"`
11692
11693	// Shows if and why human review was needed.
11694	HumanLoopActivationReasons []*string `min:"1" type:"list"`
11695
11696	// The Amazon Resource Name (ARN) of the HumanLoop created.
11697	HumanLoopArn *string `type:"string"`
11698}
11699
11700// String returns the string representation
11701func (s HumanLoopActivationOutput) String() string {
11702	return awsutil.Prettify(s)
11703}
11704
11705// GoString returns the string representation
11706func (s HumanLoopActivationOutput) GoString() string {
11707	return s.String()
11708}
11709
11710// SetHumanLoopActivationConditionsEvaluationResults sets the HumanLoopActivationConditionsEvaluationResults field's value.
11711func (s *HumanLoopActivationOutput) SetHumanLoopActivationConditionsEvaluationResults(v aws.JSONValue) *HumanLoopActivationOutput {
11712	s.HumanLoopActivationConditionsEvaluationResults = v
11713	return s
11714}
11715
11716// SetHumanLoopActivationReasons sets the HumanLoopActivationReasons field's value.
11717func (s *HumanLoopActivationOutput) SetHumanLoopActivationReasons(v []*string) *HumanLoopActivationOutput {
11718	s.HumanLoopActivationReasons = v
11719	return s
11720}
11721
11722// SetHumanLoopArn sets the HumanLoopArn field's value.
11723func (s *HumanLoopActivationOutput) SetHumanLoopArn(v string) *HumanLoopActivationOutput {
11724	s.HumanLoopArn = &v
11725	return s
11726}
11727
11728// Sets up the flow definition the image will be sent to if one of the conditions
11729// is met. You can also set certain attributes of the image before review.
11730type HumanLoopConfig struct {
11731	_ struct{} `type:"structure"`
11732
11733	// Sets attributes of the input data.
11734	DataAttributes *HumanLoopDataAttributes `type:"structure"`
11735
11736	// The Amazon Resource Name (ARN) of the flow definition. You can create a flow
11737	// definition by using the Amazon Sagemaker CreateFlowDefinition (https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateFlowDefinition.html)
11738	// Operation.
11739	//
11740	// FlowDefinitionArn is a required field
11741	FlowDefinitionArn *string `type:"string" required:"true"`
11742
11743	// The name of the human review used for this image. This should be kept unique
11744	// within a region.
11745	//
11746	// HumanLoopName is a required field
11747	HumanLoopName *string `min:"1" type:"string" required:"true"`
11748}
11749
11750// String returns the string representation
11751func (s HumanLoopConfig) String() string {
11752	return awsutil.Prettify(s)
11753}
11754
11755// GoString returns the string representation
11756func (s HumanLoopConfig) GoString() string {
11757	return s.String()
11758}
11759
11760// Validate inspects the fields of the type to determine if they are valid.
11761func (s *HumanLoopConfig) Validate() error {
11762	invalidParams := request.ErrInvalidParams{Context: "HumanLoopConfig"}
11763	if s.FlowDefinitionArn == nil {
11764		invalidParams.Add(request.NewErrParamRequired("FlowDefinitionArn"))
11765	}
11766	if s.HumanLoopName == nil {
11767		invalidParams.Add(request.NewErrParamRequired("HumanLoopName"))
11768	}
11769	if s.HumanLoopName != nil && len(*s.HumanLoopName) < 1 {
11770		invalidParams.Add(request.NewErrParamMinLen("HumanLoopName", 1))
11771	}
11772
11773	if invalidParams.Len() > 0 {
11774		return invalidParams
11775	}
11776	return nil
11777}
11778
11779// SetDataAttributes sets the DataAttributes field's value.
11780func (s *HumanLoopConfig) SetDataAttributes(v *HumanLoopDataAttributes) *HumanLoopConfig {
11781	s.DataAttributes = v
11782	return s
11783}
11784
11785// SetFlowDefinitionArn sets the FlowDefinitionArn field's value.
11786func (s *HumanLoopConfig) SetFlowDefinitionArn(v string) *HumanLoopConfig {
11787	s.FlowDefinitionArn = &v
11788	return s
11789}
11790
11791// SetHumanLoopName sets the HumanLoopName field's value.
11792func (s *HumanLoopConfig) SetHumanLoopName(v string) *HumanLoopConfig {
11793	s.HumanLoopName = &v
11794	return s
11795}
11796
11797// Allows you to set attributes of the image. Currently, you can declare an
11798// image as free of personally identifiable information.
11799type HumanLoopDataAttributes struct {
11800	_ struct{} `type:"structure"`
11801
11802	// Sets whether the input image is free of personally identifiable information.
11803	ContentClassifiers []*string `type:"list"`
11804}
11805
11806// String returns the string representation
11807func (s HumanLoopDataAttributes) String() string {
11808	return awsutil.Prettify(s)
11809}
11810
11811// GoString returns the string representation
11812func (s HumanLoopDataAttributes) GoString() string {
11813	return s.String()
11814}
11815
11816// SetContentClassifiers sets the ContentClassifiers field's value.
11817func (s *HumanLoopDataAttributes) SetContentClassifiers(v []*string) *HumanLoopDataAttributes {
11818	s.ContentClassifiers = v
11819	return s
11820}
11821
11822// The number of in-progress human reviews you have has exceeded the number
11823// allowed.
11824type HumanLoopQuotaExceededException struct {
11825	_            struct{}                  `type:"structure"`
11826	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
11827
11828	Message_ *string `locationName:"message" type:"string"`
11829
11830	// The quota code.
11831	QuotaCode *string `type:"string"`
11832
11833	// The resource type.
11834	ResourceType *string `type:"string"`
11835
11836	// The service code.
11837	ServiceCode *string `type:"string"`
11838}
11839
11840// String returns the string representation
11841func (s HumanLoopQuotaExceededException) String() string {
11842	return awsutil.Prettify(s)
11843}
11844
11845// GoString returns the string representation
11846func (s HumanLoopQuotaExceededException) GoString() string {
11847	return s.String()
11848}
11849
11850func newErrorHumanLoopQuotaExceededException(v protocol.ResponseMetadata) error {
11851	return &HumanLoopQuotaExceededException{
11852		RespMetadata: v,
11853	}
11854}
11855
11856// Code returns the exception type name.
11857func (s *HumanLoopQuotaExceededException) Code() string {
11858	return "HumanLoopQuotaExceededException"
11859}
11860
11861// Message returns the exception's message.
11862func (s *HumanLoopQuotaExceededException) Message() string {
11863	if s.Message_ != nil {
11864		return *s.Message_
11865	}
11866	return ""
11867}
11868
11869// OrigErr always returns nil, satisfies awserr.Error interface.
11870func (s *HumanLoopQuotaExceededException) OrigErr() error {
11871	return nil
11872}
11873
11874func (s *HumanLoopQuotaExceededException) Error() string {
11875	return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
11876}
11877
11878// Status code returns the HTTP status code for the request's response error.
11879func (s *HumanLoopQuotaExceededException) StatusCode() int {
11880	return s.RespMetadata.StatusCode
11881}
11882
11883// RequestID returns the service's response RequestID for request.
11884func (s *HumanLoopQuotaExceededException) RequestID() string {
11885	return s.RespMetadata.RequestID
11886}
11887
11888// A ClientRequestToken input parameter was reused with an operation, but at
11889// least one of the other input parameters is different from the previous call
11890// to the operation.
11891type IdempotentParameterMismatchException struct {
11892	_            struct{}                  `type:"structure"`
11893	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
11894
11895	Message_ *string `locationName:"message" type:"string"`
11896}
11897
11898// String returns the string representation
11899func (s IdempotentParameterMismatchException) String() string {
11900	return awsutil.Prettify(s)
11901}
11902
11903// GoString returns the string representation
11904func (s IdempotentParameterMismatchException) GoString() string {
11905	return s.String()
11906}
11907
11908func newErrorIdempotentParameterMismatchException(v protocol.ResponseMetadata) error {
11909	return &IdempotentParameterMismatchException{
11910		RespMetadata: v,
11911	}
11912}
11913
11914// Code returns the exception type name.
11915func (s *IdempotentParameterMismatchException) Code() string {
11916	return "IdempotentParameterMismatchException"
11917}
11918
11919// Message returns the exception's message.
11920func (s *IdempotentParameterMismatchException) Message() string {
11921	if s.Message_ != nil {
11922		return *s.Message_
11923	}
11924	return ""
11925}
11926
11927// OrigErr always returns nil, satisfies awserr.Error interface.
11928func (s *IdempotentParameterMismatchException) OrigErr() error {
11929	return nil
11930}
11931
11932func (s *IdempotentParameterMismatchException) Error() string {
11933	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
11934}
11935
11936// Status code returns the HTTP status code for the request's response error.
11937func (s *IdempotentParameterMismatchException) StatusCode() int {
11938	return s.RespMetadata.StatusCode
11939}
11940
11941// RequestID returns the service's response RequestID for request.
11942func (s *IdempotentParameterMismatchException) RequestID() string {
11943	return s.RespMetadata.RequestID
11944}
11945
11946// Provides the input image either as bytes or an S3 object.
11947//
11948// You pass image bytes to an Amazon Rekognition API operation by using the
11949// Bytes property. For example, you would use the Bytes property to pass an
11950// image loaded from a local file system. Image bytes passed by using the Bytes
11951// property must be base64-encoded. Your code may not need to encode image bytes
11952// if you are using an AWS SDK to call Amazon Rekognition API operations.
11953//
11954// For more information, see Analyzing an Image Loaded from a Local File System
11955// in the Amazon Rekognition Developer Guide.
11956//
11957// You pass images stored in an S3 bucket to an Amazon Rekognition API operation
11958// by using the S3Object property. Images stored in an S3 bucket do not need
11959// to be base64-encoded.
11960//
11961// The region for the S3 bucket containing the S3 object must match the region
11962// you use for Amazon Rekognition operations.
11963//
11964// If you use the AWS CLI to call Amazon Rekognition operations, passing image
11965// bytes using the Bytes property is not supported. You must first upload the
11966// image to an Amazon S3 bucket and then call the operation using the S3Object
11967// property.
11968//
11969// For Amazon Rekognition to process an S3 object, the user must have permission
11970// to access the S3 object. For more information, see Resource Based Policies
11971// in the Amazon Rekognition Developer Guide.
11972type Image struct {
11973	_ struct{} `type:"structure"`
11974
11975	// Blob of image bytes up to 5 MBs.
11976	//
11977	// Bytes is automatically base64 encoded/decoded by the SDK.
11978	Bytes []byte `min:"1" type:"blob"`
11979
11980	// Identifies an S3 object as the image source.
11981	S3Object *S3Object `type:"structure"`
11982}
11983
11984// String returns the string representation
11985func (s Image) String() string {
11986	return awsutil.Prettify(s)
11987}
11988
11989// GoString returns the string representation
11990func (s Image) GoString() string {
11991	return s.String()
11992}
11993
11994// Validate inspects the fields of the type to determine if they are valid.
11995func (s *Image) Validate() error {
11996	invalidParams := request.ErrInvalidParams{Context: "Image"}
11997	if s.Bytes != nil && len(s.Bytes) < 1 {
11998		invalidParams.Add(request.NewErrParamMinLen("Bytes", 1))
11999	}
12000	if s.S3Object != nil {
12001		if err := s.S3Object.Validate(); err != nil {
12002			invalidParams.AddNested("S3Object", err.(request.ErrInvalidParams))
12003		}
12004	}
12005
12006	if invalidParams.Len() > 0 {
12007		return invalidParams
12008	}
12009	return nil
12010}
12011
12012// SetBytes sets the Bytes field's value.
12013func (s *Image) SetBytes(v []byte) *Image {
12014	s.Bytes = v
12015	return s
12016}
12017
12018// SetS3Object sets the S3Object field's value.
12019func (s *Image) SetS3Object(v *S3Object) *Image {
12020	s.S3Object = v
12021	return s
12022}
12023
12024// Identifies face image brightness and sharpness.
12025type ImageQuality struct {
12026	_ struct{} `type:"structure"`
12027
12028	// Value representing brightness of the face. The service returns a value between
12029	// 0 and 100 (inclusive). A higher value indicates a brighter face image.
12030	Brightness *float64 `type:"float"`
12031
12032	// Value representing sharpness of the face. The service returns a value between
12033	// 0 and 100 (inclusive). A higher value indicates a sharper face image.
12034	Sharpness *float64 `type:"float"`
12035}
12036
12037// String returns the string representation
12038func (s ImageQuality) String() string {
12039	return awsutil.Prettify(s)
12040}
12041
12042// GoString returns the string representation
12043func (s ImageQuality) GoString() string {
12044	return s.String()
12045}
12046
12047// SetBrightness sets the Brightness field's value.
12048func (s *ImageQuality) SetBrightness(v float64) *ImageQuality {
12049	s.Brightness = &v
12050	return s
12051}
12052
12053// SetSharpness sets the Sharpness field's value.
12054func (s *ImageQuality) SetSharpness(v float64) *ImageQuality {
12055	s.Sharpness = &v
12056	return s
12057}
12058
12059// The input image size exceeds the allowed limit. If you are calling DetectProtectiveEquipment,
12060// the image size or resolution exceeds the allowed limit. For more information,
12061// see Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
12062type ImageTooLargeException struct {
12063	_            struct{}                  `type:"structure"`
12064	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
12065
12066	Message_ *string `locationName:"message" type:"string"`
12067}
12068
12069// String returns the string representation
12070func (s ImageTooLargeException) String() string {
12071	return awsutil.Prettify(s)
12072}
12073
12074// GoString returns the string representation
12075func (s ImageTooLargeException) GoString() string {
12076	return s.String()
12077}
12078
12079func newErrorImageTooLargeException(v protocol.ResponseMetadata) error {
12080	return &ImageTooLargeException{
12081		RespMetadata: v,
12082	}
12083}
12084
12085// Code returns the exception type name.
12086func (s *ImageTooLargeException) Code() string {
12087	return "ImageTooLargeException"
12088}
12089
12090// Message returns the exception's message.
12091func (s *ImageTooLargeException) Message() string {
12092	if s.Message_ != nil {
12093		return *s.Message_
12094	}
12095	return ""
12096}
12097
12098// OrigErr always returns nil, satisfies awserr.Error interface.
12099func (s *ImageTooLargeException) OrigErr() error {
12100	return nil
12101}
12102
12103func (s *ImageTooLargeException) Error() string {
12104	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
12105}
12106
12107// Status code returns the HTTP status code for the request's response error.
12108func (s *ImageTooLargeException) StatusCode() int {
12109	return s.RespMetadata.StatusCode
12110}
12111
12112// RequestID returns the service's response RequestID for request.
12113func (s *ImageTooLargeException) RequestID() string {
12114	return s.RespMetadata.RequestID
12115}
12116
12117type IndexFacesInput struct {
12118	_ struct{} `type:"structure"`
12119
12120	// The ID of an existing collection to which you want to add the faces that
12121	// are detected in the input images.
12122	//
12123	// CollectionId is a required field
12124	CollectionId *string `min:"1" type:"string" required:"true"`
12125
12126	// An array of facial attributes that you want to be returned. This can be the
12127	// default list of attributes or all attributes. If you don't specify a value
12128	// for Attributes or if you specify ["DEFAULT"], the API returns the following
12129	// subset of facial attributes: BoundingBox, Confidence, Pose, Quality, and
12130	// Landmarks. If you provide ["ALL"], all facial attributes are returned, but
12131	// the operation takes longer to complete.
12132	//
12133	// If you provide both, ["ALL", "DEFAULT"], the service uses a logical AND operator
12134	// to determine which attributes to return (in this case, all attributes).
12135	DetectionAttributes []*string `type:"list"`
12136
12137	// The ID you want to assign to all the faces detected in the image.
12138	ExternalImageId *string `min:"1" type:"string"`
12139
12140	// The input image as base64-encoded bytes or an S3 object. If you use the AWS
12141	// CLI to call Amazon Rekognition operations, passing base64-encoded image bytes
12142	// isn't supported.
12143	//
12144	// If you are using an AWS SDK to call Amazon Rekognition, you might not need
12145	// to base64-encode image bytes passed using the Bytes field. For more information,
12146	// see Images in the Amazon Rekognition developer guide.
12147	//
12148	// Image is a required field
12149	Image *Image `type:"structure" required:"true"`
12150
12151	// The maximum number of faces to index. The value of MaxFaces must be greater
12152	// than or equal to 1. IndexFaces returns no more than 100 detected faces in
12153	// an image, even if you specify a larger value for MaxFaces.
12154	//
12155	// If IndexFaces detects more faces than the value of MaxFaces, the faces with
12156	// the lowest quality are filtered out first. If there are still more faces
12157	// than the value of MaxFaces, the faces with the smallest bounding boxes are
12158	// filtered out (up to the number that's needed to satisfy the value of MaxFaces).
12159	// Information about the unindexed faces is available in the UnindexedFaces
12160	// array.
12161	//
12162	// The faces that are returned by IndexFaces are sorted by the largest face
12163	// bounding box size to the smallest size, in descending order.
12164	//
12165	// MaxFaces can be used with a collection associated with any version of the
12166	// face model.
12167	MaxFaces *int64 `min:"1" type:"integer"`
12168
12169	// A filter that specifies a quality bar for how much filtering is done to identify
12170	// faces. Filtered faces aren't indexed. If you specify AUTO, Amazon Rekognition
12171	// chooses the quality bar. If you specify LOW, MEDIUM, or HIGH, filtering removes
12172	// all faces that don’t meet the chosen quality bar. The default value is
12173	// AUTO. The quality bar is based on a variety of common use cases. Low-quality
12174	// detections can occur for a number of reasons. Some examples are an object
12175	// that's misidentified as a face, a face that's too blurry, or a face with
12176	// a pose that's too extreme to use. If you specify NONE, no filtering is performed.
12177	//
12178	// To use quality filtering, the collection you are using must be associated
12179	// with version 3 of the face model or higher.
12180	QualityFilter *string `type:"string" enum:"QualityFilter"`
12181}
12182
12183// String returns the string representation
12184func (s IndexFacesInput) String() string {
12185	return awsutil.Prettify(s)
12186}
12187
12188// GoString returns the string representation
12189func (s IndexFacesInput) GoString() string {
12190	return s.String()
12191}
12192
12193// Validate inspects the fields of the type to determine if they are valid.
12194func (s *IndexFacesInput) Validate() error {
12195	invalidParams := request.ErrInvalidParams{Context: "IndexFacesInput"}
12196	if s.CollectionId == nil {
12197		invalidParams.Add(request.NewErrParamRequired("CollectionId"))
12198	}
12199	if s.CollectionId != nil && len(*s.CollectionId) < 1 {
12200		invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1))
12201	}
12202	if s.ExternalImageId != nil && len(*s.ExternalImageId) < 1 {
12203		invalidParams.Add(request.NewErrParamMinLen("ExternalImageId", 1))
12204	}
12205	if s.Image == nil {
12206		invalidParams.Add(request.NewErrParamRequired("Image"))
12207	}
12208	if s.MaxFaces != nil && *s.MaxFaces < 1 {
12209		invalidParams.Add(request.NewErrParamMinValue("MaxFaces", 1))
12210	}
12211	if s.Image != nil {
12212		if err := s.Image.Validate(); err != nil {
12213			invalidParams.AddNested("Image", err.(request.ErrInvalidParams))
12214		}
12215	}
12216
12217	if invalidParams.Len() > 0 {
12218		return invalidParams
12219	}
12220	return nil
12221}
12222
12223// SetCollectionId sets the CollectionId field's value.
12224func (s *IndexFacesInput) SetCollectionId(v string) *IndexFacesInput {
12225	s.CollectionId = &v
12226	return s
12227}
12228
12229// SetDetectionAttributes sets the DetectionAttributes field's value.
12230func (s *IndexFacesInput) SetDetectionAttributes(v []*string) *IndexFacesInput {
12231	s.DetectionAttributes = v
12232	return s
12233}
12234
12235// SetExternalImageId sets the ExternalImageId field's value.
12236func (s *IndexFacesInput) SetExternalImageId(v string) *IndexFacesInput {
12237	s.ExternalImageId = &v
12238	return s
12239}
12240
12241// SetImage sets the Image field's value.
12242func (s *IndexFacesInput) SetImage(v *Image) *IndexFacesInput {
12243	s.Image = v
12244	return s
12245}
12246
12247// SetMaxFaces sets the MaxFaces field's value.
12248func (s *IndexFacesInput) SetMaxFaces(v int64) *IndexFacesInput {
12249	s.MaxFaces = &v
12250	return s
12251}
12252
12253// SetQualityFilter sets the QualityFilter field's value.
12254func (s *IndexFacesInput) SetQualityFilter(v string) *IndexFacesInput {
12255	s.QualityFilter = &v
12256	return s
12257}
12258
12259type IndexFacesOutput struct {
12260	_ struct{} `type:"structure"`
12261
12262	// The version number of the face detection model that's associated with the
12263	// input collection (CollectionId).
12264	FaceModelVersion *string `type:"string"`
12265
12266	// An array of faces detected and added to the collection. For more information,
12267	// see Searching Faces in a Collection in the Amazon Rekognition Developer Guide.
12268	FaceRecords []*FaceRecord `type:"list"`
12269
12270	// If your collection is associated with a face detection model that's later
12271	// than version 3.0, the value of OrientationCorrection is always null and no
12272	// orientation information is returned.
12273	//
12274	// If your collection is associated with a face detection model that's version
12275	// 3.0 or earlier, the following applies:
12276	//
12277	//    * If the input image is in .jpeg format, it might contain exchangeable
12278	//    image file format (Exif) metadata that includes the image's orientation.
12279	//    Amazon Rekognition uses this orientation information to perform image
12280	//    correction - the bounding box coordinates are translated to represent
12281	//    object locations after the orientation information in the Exif metadata
12282	//    is used to correct the image orientation. Images in .png format don't
12283	//    contain Exif metadata. The value of OrientationCorrection is null.
12284	//
12285	//    * If the image doesn't contain orientation information in its Exif metadata,
12286	//    Amazon Rekognition returns an estimated orientation (ROTATE_0, ROTATE_90,
12287	//    ROTATE_180, ROTATE_270). Amazon Rekognition doesn’t perform image correction
12288	//    for images. The bounding box coordinates aren't translated and represent
12289	//    the object locations before the image is rotated.
12290	//
12291	// Bounding box information is returned in the FaceRecords array. You can get
12292	// the version of the face detection model by calling DescribeCollection.
12293	OrientationCorrection *string `type:"string" enum:"OrientationCorrection"`
12294
12295	// An array of faces that were detected in the image but weren't indexed. They
12296	// weren't indexed because the quality filter identified them as low quality,
12297	// or the MaxFaces request parameter filtered them out. To use the quality filter,
12298	// you specify the QualityFilter request parameter.
12299	UnindexedFaces []*UnindexedFace `type:"list"`
12300}
12301
12302// String returns the string representation
12303func (s IndexFacesOutput) String() string {
12304	return awsutil.Prettify(s)
12305}
12306
12307// GoString returns the string representation
12308func (s IndexFacesOutput) GoString() string {
12309	return s.String()
12310}
12311
12312// SetFaceModelVersion sets the FaceModelVersion field's value.
12313func (s *IndexFacesOutput) SetFaceModelVersion(v string) *IndexFacesOutput {
12314	s.FaceModelVersion = &v
12315	return s
12316}
12317
12318// SetFaceRecords sets the FaceRecords field's value.
12319func (s *IndexFacesOutput) SetFaceRecords(v []*FaceRecord) *IndexFacesOutput {
12320	s.FaceRecords = v
12321	return s
12322}
12323
12324// SetOrientationCorrection sets the OrientationCorrection field's value.
12325func (s *IndexFacesOutput) SetOrientationCorrection(v string) *IndexFacesOutput {
12326	s.OrientationCorrection = &v
12327	return s
12328}
12329
12330// SetUnindexedFaces sets the UnindexedFaces field's value.
12331func (s *IndexFacesOutput) SetUnindexedFaces(v []*UnindexedFace) *IndexFacesOutput {
12332	s.UnindexedFaces = v
12333	return s
12334}
12335
12336// An instance of a label returned by Amazon Rekognition Image (DetectLabels)
12337// or by Amazon Rekognition Video (GetLabelDetection).
12338type Instance struct {
12339	_ struct{} `type:"structure"`
12340
12341	// The position of the label instance on the image.
12342	BoundingBox *BoundingBox `type:"structure"`
12343
12344	// The confidence that Amazon Rekognition has in the accuracy of the bounding
12345	// box.
12346	Confidence *float64 `type:"float"`
12347}
12348
12349// String returns the string representation
12350func (s Instance) String() string {
12351	return awsutil.Prettify(s)
12352}
12353
12354// GoString returns the string representation
12355func (s Instance) GoString() string {
12356	return s.String()
12357}
12358
12359// SetBoundingBox sets the BoundingBox field's value.
12360func (s *Instance) SetBoundingBox(v *BoundingBox) *Instance {
12361	s.BoundingBox = v
12362	return s
12363}
12364
12365// SetConfidence sets the Confidence field's value.
12366func (s *Instance) SetConfidence(v float64) *Instance {
12367	s.Confidence = &v
12368	return s
12369}
12370
12371// Amazon Rekognition experienced a service issue. Try your call again.
12372type InternalServerError struct {
12373	_            struct{}                  `type:"structure"`
12374	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
12375
12376	Message_ *string `locationName:"message" type:"string"`
12377}
12378
12379// String returns the string representation
12380func (s InternalServerError) String() string {
12381	return awsutil.Prettify(s)
12382}
12383
12384// GoString returns the string representation
12385func (s InternalServerError) GoString() string {
12386	return s.String()
12387}
12388
12389func newErrorInternalServerError(v protocol.ResponseMetadata) error {
12390	return &InternalServerError{
12391		RespMetadata: v,
12392	}
12393}
12394
12395// Code returns the exception type name.
12396func (s *InternalServerError) Code() string {
12397	return "InternalServerError"
12398}
12399
12400// Message returns the exception's message.
12401func (s *InternalServerError) Message() string {
12402	if s.Message_ != nil {
12403		return *s.Message_
12404	}
12405	return ""
12406}
12407
12408// OrigErr always returns nil, satisfies awserr.Error interface.
12409func (s *InternalServerError) OrigErr() error {
12410	return nil
12411}
12412
12413func (s *InternalServerError) Error() string {
12414	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
12415}
12416
12417// Status code returns the HTTP status code for the request's response error.
12418func (s *InternalServerError) StatusCode() int {
12419	return s.RespMetadata.StatusCode
12420}
12421
12422// RequestID returns the service's response RequestID for request.
12423func (s *InternalServerError) RequestID() string {
12424	return s.RespMetadata.RequestID
12425}
12426
12427// The provided image format is not supported.
12428type InvalidImageFormatException struct {
12429	_            struct{}                  `type:"structure"`
12430	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
12431
12432	Message_ *string `locationName:"message" type:"string"`
12433}
12434
12435// String returns the string representation
12436func (s InvalidImageFormatException) String() string {
12437	return awsutil.Prettify(s)
12438}
12439
12440// GoString returns the string representation
12441func (s InvalidImageFormatException) GoString() string {
12442	return s.String()
12443}
12444
12445func newErrorInvalidImageFormatException(v protocol.ResponseMetadata) error {
12446	return &InvalidImageFormatException{
12447		RespMetadata: v,
12448	}
12449}
12450
12451// Code returns the exception type name.
12452func (s *InvalidImageFormatException) Code() string {
12453	return "InvalidImageFormatException"
12454}
12455
12456// Message returns the exception's message.
12457func (s *InvalidImageFormatException) Message() string {
12458	if s.Message_ != nil {
12459		return *s.Message_
12460	}
12461	return ""
12462}
12463
12464// OrigErr always returns nil, satisfies awserr.Error interface.
12465func (s *InvalidImageFormatException) OrigErr() error {
12466	return nil
12467}
12468
12469func (s *InvalidImageFormatException) Error() string {
12470	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
12471}
12472
12473// Status code returns the HTTP status code for the request's response error.
12474func (s *InvalidImageFormatException) StatusCode() int {
12475	return s.RespMetadata.StatusCode
12476}
12477
12478// RequestID returns the service's response RequestID for request.
12479func (s *InvalidImageFormatException) RequestID() string {
12480	return s.RespMetadata.RequestID
12481}
12482
12483// Pagination token in the request is not valid.
12484type InvalidPaginationTokenException struct {
12485	_            struct{}                  `type:"structure"`
12486	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
12487
12488	Message_ *string `locationName:"message" type:"string"`
12489}
12490
12491// String returns the string representation
12492func (s InvalidPaginationTokenException) String() string {
12493	return awsutil.Prettify(s)
12494}
12495
12496// GoString returns the string representation
12497func (s InvalidPaginationTokenException) GoString() string {
12498	return s.String()
12499}
12500
12501func newErrorInvalidPaginationTokenException(v protocol.ResponseMetadata) error {
12502	return &InvalidPaginationTokenException{
12503		RespMetadata: v,
12504	}
12505}
12506
12507// Code returns the exception type name.
12508func (s *InvalidPaginationTokenException) Code() string {
12509	return "InvalidPaginationTokenException"
12510}
12511
12512// Message returns the exception's message.
12513func (s *InvalidPaginationTokenException) Message() string {
12514	if s.Message_ != nil {
12515		return *s.Message_
12516	}
12517	return ""
12518}
12519
12520// OrigErr always returns nil, satisfies awserr.Error interface.
12521func (s *InvalidPaginationTokenException) OrigErr() error {
12522	return nil
12523}
12524
12525func (s *InvalidPaginationTokenException) Error() string {
12526	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
12527}
12528
12529// Status code returns the HTTP status code for the request's response error.
12530func (s *InvalidPaginationTokenException) StatusCode() int {
12531	return s.RespMetadata.StatusCode
12532}
12533
12534// RequestID returns the service's response RequestID for request.
12535func (s *InvalidPaginationTokenException) RequestID() string {
12536	return s.RespMetadata.RequestID
12537}
12538
12539// Input parameter violated a constraint. Validate your parameter before calling
12540// the API operation again.
12541type InvalidParameterException struct {
12542	_            struct{}                  `type:"structure"`
12543	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
12544
12545	Message_ *string `locationName:"message" type:"string"`
12546}
12547
12548// String returns the string representation
12549func (s InvalidParameterException) String() string {
12550	return awsutil.Prettify(s)
12551}
12552
12553// GoString returns the string representation
12554func (s InvalidParameterException) GoString() string {
12555	return s.String()
12556}
12557
12558func newErrorInvalidParameterException(v protocol.ResponseMetadata) error {
12559	return &InvalidParameterException{
12560		RespMetadata: v,
12561	}
12562}
12563
12564// Code returns the exception type name.
12565func (s *InvalidParameterException) Code() string {
12566	return "InvalidParameterException"
12567}
12568
12569// Message returns the exception's message.
12570func (s *InvalidParameterException) Message() string {
12571	if s.Message_ != nil {
12572		return *s.Message_
12573	}
12574	return ""
12575}
12576
12577// OrigErr always returns nil, satisfies awserr.Error interface.
12578func (s *InvalidParameterException) OrigErr() error {
12579	return nil
12580}
12581
12582func (s *InvalidParameterException) Error() string {
12583	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
12584}
12585
12586// Status code returns the HTTP status code for the request's response error.
12587func (s *InvalidParameterException) StatusCode() int {
12588	return s.RespMetadata.StatusCode
12589}
12590
12591// RequestID returns the service's response RequestID for request.
12592func (s *InvalidParameterException) RequestID() string {
12593	return s.RespMetadata.RequestID
12594}
12595
12596// Amazon Rekognition is unable to access the S3 object specified in the request.
12597type InvalidS3ObjectException struct {
12598	_            struct{}                  `type:"structure"`
12599	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
12600
12601	Message_ *string `locationName:"message" type:"string"`
12602}
12603
12604// String returns the string representation
12605func (s InvalidS3ObjectException) String() string {
12606	return awsutil.Prettify(s)
12607}
12608
12609// GoString returns the string representation
12610func (s InvalidS3ObjectException) GoString() string {
12611	return s.String()
12612}
12613
12614func newErrorInvalidS3ObjectException(v protocol.ResponseMetadata) error {
12615	return &InvalidS3ObjectException{
12616		RespMetadata: v,
12617	}
12618}
12619
12620// Code returns the exception type name.
12621func (s *InvalidS3ObjectException) Code() string {
12622	return "InvalidS3ObjectException"
12623}
12624
12625// Message returns the exception's message.
12626func (s *InvalidS3ObjectException) Message() string {
12627	if s.Message_ != nil {
12628		return *s.Message_
12629	}
12630	return ""
12631}
12632
12633// OrigErr always returns nil, satisfies awserr.Error interface.
12634func (s *InvalidS3ObjectException) OrigErr() error {
12635	return nil
12636}
12637
12638func (s *InvalidS3ObjectException) Error() string {
12639	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
12640}
12641
12642// Status code returns the HTTP status code for the request's response error.
12643func (s *InvalidS3ObjectException) StatusCode() int {
12644	return s.RespMetadata.StatusCode
12645}
12646
12647// RequestID returns the service's response RequestID for request.
12648func (s *InvalidS3ObjectException) RequestID() string {
12649	return s.RespMetadata.RequestID
12650}
12651
12652// The Kinesis data stream Amazon Rekognition to which the analysis results
12653// of a Amazon Rekognition stream processor are streamed. For more information,
12654// see CreateStreamProcessor in the Amazon Rekognition Developer Guide.
12655type KinesisDataStream struct {
12656	_ struct{} `type:"structure"`
12657
12658	// ARN of the output Amazon Kinesis Data Streams stream.
12659	Arn *string `type:"string"`
12660}
12661
12662// String returns the string representation
12663func (s KinesisDataStream) String() string {
12664	return awsutil.Prettify(s)
12665}
12666
12667// GoString returns the string representation
12668func (s KinesisDataStream) GoString() string {
12669	return s.String()
12670}
12671
12672// SetArn sets the Arn field's value.
12673func (s *KinesisDataStream) SetArn(v string) *KinesisDataStream {
12674	s.Arn = &v
12675	return s
12676}
12677
12678// Kinesis video stream stream that provides the source streaming video for
12679// a Amazon Rekognition Video stream processor. For more information, see CreateStreamProcessor
12680// in the Amazon Rekognition Developer Guide.
12681type KinesisVideoStream struct {
12682	_ struct{} `type:"structure"`
12683
12684	// ARN of the Kinesis video stream stream that streams the source video.
12685	Arn *string `type:"string"`
12686}
12687
12688// String returns the string representation
12689func (s KinesisVideoStream) String() string {
12690	return awsutil.Prettify(s)
12691}
12692
12693// GoString returns the string representation
12694func (s KinesisVideoStream) GoString() string {
12695	return s.String()
12696}
12697
12698// SetArn sets the Arn field's value.
12699func (s *KinesisVideoStream) SetArn(v string) *KinesisVideoStream {
12700	s.Arn = &v
12701	return s
12702}
12703
12704// Structure containing details about the detected label, including the name,
12705// detected instances, parent labels, and level of confidence.
12706type Label struct {
12707	_ struct{} `type:"structure"`
12708
12709	// Level of confidence.
12710	Confidence *float64 `type:"float"`
12711
12712	// If Label represents an object, Instances contains the bounding boxes for
12713	// each instance of the detected object. Bounding boxes are returned for common
12714	// object labels such as people, cars, furniture, apparel or pets.
12715	Instances []*Instance `type:"list"`
12716
12717	// The name (label) of the object or scene.
12718	Name *string `type:"string"`
12719
12720	// The parent labels for a label. The response includes all ancestor labels.
12721	Parents []*Parent `type:"list"`
12722}
12723
12724// String returns the string representation
12725func (s Label) String() string {
12726	return awsutil.Prettify(s)
12727}
12728
12729// GoString returns the string representation
12730func (s Label) GoString() string {
12731	return s.String()
12732}
12733
12734// SetConfidence sets the Confidence field's value.
12735func (s *Label) SetConfidence(v float64) *Label {
12736	s.Confidence = &v
12737	return s
12738}
12739
12740// SetInstances sets the Instances field's value.
12741func (s *Label) SetInstances(v []*Instance) *Label {
12742	s.Instances = v
12743	return s
12744}
12745
12746// SetName sets the Name field's value.
12747func (s *Label) SetName(v string) *Label {
12748	s.Name = &v
12749	return s
12750}
12751
12752// SetParents sets the Parents field's value.
12753func (s *Label) SetParents(v []*Parent) *Label {
12754	s.Parents = v
12755	return s
12756}
12757
12758// Information about a label detected in a video analysis request and the time
12759// the label was detected in the video.
12760type LabelDetection struct {
12761	_ struct{} `type:"structure"`
12762
12763	// Details about the detected label.
12764	Label *Label `type:"structure"`
12765
12766	// Time, in milliseconds from the start of the video, that the label was detected.
12767	Timestamp *int64 `type:"long"`
12768}
12769
12770// String returns the string representation
12771func (s LabelDetection) String() string {
12772	return awsutil.Prettify(s)
12773}
12774
12775// GoString returns the string representation
12776func (s LabelDetection) GoString() string {
12777	return s.String()
12778}
12779
12780// SetLabel sets the Label field's value.
12781func (s *LabelDetection) SetLabel(v *Label) *LabelDetection {
12782	s.Label = v
12783	return s
12784}
12785
12786// SetTimestamp sets the Timestamp field's value.
12787func (s *LabelDetection) SetTimestamp(v int64) *LabelDetection {
12788	s.Timestamp = &v
12789	return s
12790}
12791
12792// Indicates the location of the landmark on the face.
12793type Landmark struct {
12794	_ struct{} `type:"structure"`
12795
12796	// Type of landmark.
12797	Type *string `type:"string" enum:"LandmarkType"`
12798
12799	// The x-coordinate of the landmark expressed as a ratio of the width of the
12800	// image. The x-coordinate is measured from the left-side of the image. For
12801	// example, if the image is 700 pixels wide and the x-coordinate of the landmark
12802	// is at 350 pixels, this value is 0.5.
12803	X *float64 `type:"float"`
12804
12805	// The y-coordinate of the landmark expressed as a ratio of the height of the
12806	// image. The y-coordinate is measured from the top of the image. For example,
12807	// if the image height is 200 pixels and the y-coordinate of the landmark is
12808	// at 50 pixels, this value is 0.25.
12809	Y *float64 `type:"float"`
12810}
12811
12812// String returns the string representation
12813func (s Landmark) String() string {
12814	return awsutil.Prettify(s)
12815}
12816
12817// GoString returns the string representation
12818func (s Landmark) GoString() string {
12819	return s.String()
12820}
12821
12822// SetType sets the Type field's value.
12823func (s *Landmark) SetType(v string) *Landmark {
12824	s.Type = &v
12825	return s
12826}
12827
12828// SetX sets the X field's value.
12829func (s *Landmark) SetX(v float64) *Landmark {
12830	s.X = &v
12831	return s
12832}
12833
12834// SetY sets the Y field's value.
12835func (s *Landmark) SetY(v float64) *Landmark {
12836	s.Y = &v
12837	return s
12838}
12839
12840// An Amazon Rekognition service limit was exceeded. For example, if you start
12841// too many Amazon Rekognition Video jobs concurrently, calls to start operations
12842// (StartLabelDetection, for example) will raise a LimitExceededException exception
12843// (HTTP status code: 400) until the number of concurrently running jobs is
12844// below the Amazon Rekognition service limit.
12845type LimitExceededException struct {
12846	_            struct{}                  `type:"structure"`
12847	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
12848
12849	Message_ *string `locationName:"message" type:"string"`
12850}
12851
12852// String returns the string representation
12853func (s LimitExceededException) String() string {
12854	return awsutil.Prettify(s)
12855}
12856
12857// GoString returns the string representation
12858func (s LimitExceededException) GoString() string {
12859	return s.String()
12860}
12861
12862func newErrorLimitExceededException(v protocol.ResponseMetadata) error {
12863	return &LimitExceededException{
12864		RespMetadata: v,
12865	}
12866}
12867
12868// Code returns the exception type name.
12869func (s *LimitExceededException) Code() string {
12870	return "LimitExceededException"
12871}
12872
12873// Message returns the exception's message.
12874func (s *LimitExceededException) Message() string {
12875	if s.Message_ != nil {
12876		return *s.Message_
12877	}
12878	return ""
12879}
12880
12881// OrigErr always returns nil, satisfies awserr.Error interface.
12882func (s *LimitExceededException) OrigErr() error {
12883	return nil
12884}
12885
12886func (s *LimitExceededException) Error() string {
12887	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
12888}
12889
12890// Status code returns the HTTP status code for the request's response error.
12891func (s *LimitExceededException) StatusCode() int {
12892	return s.RespMetadata.StatusCode
12893}
12894
12895// RequestID returns the service's response RequestID for request.
12896func (s *LimitExceededException) RequestID() string {
12897	return s.RespMetadata.RequestID
12898}
12899
12900type ListCollectionsInput struct {
12901	_ struct{} `type:"structure"`
12902
12903	// Maximum number of collection IDs to return.
12904	MaxResults *int64 `type:"integer"`
12905
12906	// Pagination token from the previous response.
12907	NextToken *string `type:"string"`
12908}
12909
12910// String returns the string representation
12911func (s ListCollectionsInput) String() string {
12912	return awsutil.Prettify(s)
12913}
12914
12915// GoString returns the string representation
12916func (s ListCollectionsInput) GoString() string {
12917	return s.String()
12918}
12919
12920// SetMaxResults sets the MaxResults field's value.
12921func (s *ListCollectionsInput) SetMaxResults(v int64) *ListCollectionsInput {
12922	s.MaxResults = &v
12923	return s
12924}
12925
12926// SetNextToken sets the NextToken field's value.
12927func (s *ListCollectionsInput) SetNextToken(v string) *ListCollectionsInput {
12928	s.NextToken = &v
12929	return s
12930}
12931
12932type ListCollectionsOutput struct {
12933	_ struct{} `type:"structure"`
12934
12935	// An array of collection IDs.
12936	CollectionIds []*string `type:"list"`
12937
12938	// Version numbers of the face detection models associated with the collections
12939	// in the array CollectionIds. For example, the value of FaceModelVersions[2]
12940	// is the version number for the face detection model used by the collection
12941	// in CollectionId[2].
12942	FaceModelVersions []*string `type:"list"`
12943
12944	// If the result is truncated, the response provides a NextToken that you can
12945	// use in the subsequent request to fetch the next set of collection IDs.
12946	NextToken *string `type:"string"`
12947}
12948
12949// String returns the string representation
12950func (s ListCollectionsOutput) String() string {
12951	return awsutil.Prettify(s)
12952}
12953
12954// GoString returns the string representation
12955func (s ListCollectionsOutput) GoString() string {
12956	return s.String()
12957}
12958
12959// SetCollectionIds sets the CollectionIds field's value.
12960func (s *ListCollectionsOutput) SetCollectionIds(v []*string) *ListCollectionsOutput {
12961	s.CollectionIds = v
12962	return s
12963}
12964
12965// SetFaceModelVersions sets the FaceModelVersions field's value.
12966func (s *ListCollectionsOutput) SetFaceModelVersions(v []*string) *ListCollectionsOutput {
12967	s.FaceModelVersions = v
12968	return s
12969}
12970
12971// SetNextToken sets the NextToken field's value.
12972func (s *ListCollectionsOutput) SetNextToken(v string) *ListCollectionsOutput {
12973	s.NextToken = &v
12974	return s
12975}
12976
12977type ListFacesInput struct {
12978	_ struct{} `type:"structure"`
12979
12980	// ID of the collection from which to list the faces.
12981	//
12982	// CollectionId is a required field
12983	CollectionId *string `min:"1" type:"string" required:"true"`
12984
12985	// Maximum number of faces to return.
12986	MaxResults *int64 `type:"integer"`
12987
12988	// If the previous response was incomplete (because there is more data to retrieve),
12989	// Amazon Rekognition returns a pagination token in the response. You can use
12990	// this pagination token to retrieve the next set of faces.
12991	NextToken *string `type:"string"`
12992}
12993
12994// String returns the string representation
12995func (s ListFacesInput) String() string {
12996	return awsutil.Prettify(s)
12997}
12998
12999// GoString returns the string representation
13000func (s ListFacesInput) GoString() string {
13001	return s.String()
13002}
13003
13004// Validate inspects the fields of the type to determine if they are valid.
13005func (s *ListFacesInput) Validate() error {
13006	invalidParams := request.ErrInvalidParams{Context: "ListFacesInput"}
13007	if s.CollectionId == nil {
13008		invalidParams.Add(request.NewErrParamRequired("CollectionId"))
13009	}
13010	if s.CollectionId != nil && len(*s.CollectionId) < 1 {
13011		invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1))
13012	}
13013
13014	if invalidParams.Len() > 0 {
13015		return invalidParams
13016	}
13017	return nil
13018}
13019
13020// SetCollectionId sets the CollectionId field's value.
13021func (s *ListFacesInput) SetCollectionId(v string) *ListFacesInput {
13022	s.CollectionId = &v
13023	return s
13024}
13025
13026// SetMaxResults sets the MaxResults field's value.
13027func (s *ListFacesInput) SetMaxResults(v int64) *ListFacesInput {
13028	s.MaxResults = &v
13029	return s
13030}
13031
13032// SetNextToken sets the NextToken field's value.
13033func (s *ListFacesInput) SetNextToken(v string) *ListFacesInput {
13034	s.NextToken = &v
13035	return s
13036}
13037
13038type ListFacesOutput struct {
13039	_ struct{} `type:"structure"`
13040
13041	// Version number of the face detection model associated with the input collection
13042	// (CollectionId).
13043	FaceModelVersion *string `type:"string"`
13044
13045	// An array of Face objects.
13046	Faces []*Face `type:"list"`
13047
13048	// If the response is truncated, Amazon Rekognition returns this token that
13049	// you can use in the subsequent request to retrieve the next set of faces.
13050	NextToken *string `type:"string"`
13051}
13052
13053// String returns the string representation
13054func (s ListFacesOutput) String() string {
13055	return awsutil.Prettify(s)
13056}
13057
13058// GoString returns the string representation
13059func (s ListFacesOutput) GoString() string {
13060	return s.String()
13061}
13062
13063// SetFaceModelVersion sets the FaceModelVersion field's value.
13064func (s *ListFacesOutput) SetFaceModelVersion(v string) *ListFacesOutput {
13065	s.FaceModelVersion = &v
13066	return s
13067}
13068
13069// SetFaces sets the Faces field's value.
13070func (s *ListFacesOutput) SetFaces(v []*Face) *ListFacesOutput {
13071	s.Faces = v
13072	return s
13073}
13074
13075// SetNextToken sets the NextToken field's value.
13076func (s *ListFacesOutput) SetNextToken(v string) *ListFacesOutput {
13077	s.NextToken = &v
13078	return s
13079}
13080
13081type ListStreamProcessorsInput struct {
13082	_ struct{} `type:"structure"`
13083
13084	// Maximum number of stream processors you want Amazon Rekognition Video to
13085	// return in the response. The default is 1000.
13086	MaxResults *int64 `min:"1" type:"integer"`
13087
13088	// If the previous response was incomplete (because there are more stream processors
13089	// to retrieve), Amazon Rekognition Video returns a pagination token in the
13090	// response. You can use this pagination token to retrieve the next set of stream
13091	// processors.
13092	NextToken *string `type:"string"`
13093}
13094
13095// String returns the string representation
13096func (s ListStreamProcessorsInput) String() string {
13097	return awsutil.Prettify(s)
13098}
13099
13100// GoString returns the string representation
13101func (s ListStreamProcessorsInput) GoString() string {
13102	return s.String()
13103}
13104
13105// Validate inspects the fields of the type to determine if they are valid.
13106func (s *ListStreamProcessorsInput) Validate() error {
13107	invalidParams := request.ErrInvalidParams{Context: "ListStreamProcessorsInput"}
13108	if s.MaxResults != nil && *s.MaxResults < 1 {
13109		invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
13110	}
13111
13112	if invalidParams.Len() > 0 {
13113		return invalidParams
13114	}
13115	return nil
13116}
13117
13118// SetMaxResults sets the MaxResults field's value.
13119func (s *ListStreamProcessorsInput) SetMaxResults(v int64) *ListStreamProcessorsInput {
13120	s.MaxResults = &v
13121	return s
13122}
13123
13124// SetNextToken sets the NextToken field's value.
13125func (s *ListStreamProcessorsInput) SetNextToken(v string) *ListStreamProcessorsInput {
13126	s.NextToken = &v
13127	return s
13128}
13129
13130type ListStreamProcessorsOutput struct {
13131	_ struct{} `type:"structure"`
13132
13133	// If the response is truncated, Amazon Rekognition Video returns this token
13134	// that you can use in the subsequent request to retrieve the next set of stream
13135	// processors.
13136	NextToken *string `type:"string"`
13137
13138	// List of stream processors that you have created.
13139	StreamProcessors []*StreamProcessor `type:"list"`
13140}
13141
13142// String returns the string representation
13143func (s ListStreamProcessorsOutput) String() string {
13144	return awsutil.Prettify(s)
13145}
13146
13147// GoString returns the string representation
13148func (s ListStreamProcessorsOutput) GoString() string {
13149	return s.String()
13150}
13151
13152// SetNextToken sets the NextToken field's value.
13153func (s *ListStreamProcessorsOutput) SetNextToken(v string) *ListStreamProcessorsOutput {
13154	s.NextToken = &v
13155	return s
13156}
13157
13158// SetStreamProcessors sets the StreamProcessors field's value.
13159func (s *ListStreamProcessorsOutput) SetStreamProcessors(v []*StreamProcessor) *ListStreamProcessorsOutput {
13160	s.StreamProcessors = v
13161	return s
13162}
13163
13164type ListTagsForResourceInput struct {
13165	_ struct{} `type:"structure"`
13166
13167	// Amazon Resource Name (ARN) of the model, collection, or stream processor
13168	// that contains the tags that you want a list of.
13169	//
13170	// ResourceArn is a required field
13171	ResourceArn *string `min:"20" type:"string" required:"true"`
13172}
13173
13174// String returns the string representation
13175func (s ListTagsForResourceInput) String() string {
13176	return awsutil.Prettify(s)
13177}
13178
13179// GoString returns the string representation
13180func (s ListTagsForResourceInput) GoString() string {
13181	return s.String()
13182}
13183
13184// Validate inspects the fields of the type to determine if they are valid.
13185func (s *ListTagsForResourceInput) Validate() error {
13186	invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"}
13187	if s.ResourceArn == nil {
13188		invalidParams.Add(request.NewErrParamRequired("ResourceArn"))
13189	}
13190	if s.ResourceArn != nil && len(*s.ResourceArn) < 20 {
13191		invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 20))
13192	}
13193
13194	if invalidParams.Len() > 0 {
13195		return invalidParams
13196	}
13197	return nil
13198}
13199
13200// SetResourceArn sets the ResourceArn field's value.
13201func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput {
13202	s.ResourceArn = &v
13203	return s
13204}
13205
13206type ListTagsForResourceOutput struct {
13207	_ struct{} `type:"structure"`
13208
13209	// A list of key-value tags assigned to the resource.
13210	Tags map[string]*string `type:"map"`
13211}
13212
13213// String returns the string representation
13214func (s ListTagsForResourceOutput) String() string {
13215	return awsutil.Prettify(s)
13216}
13217
13218// GoString returns the string representation
13219func (s ListTagsForResourceOutput) GoString() string {
13220	return s.String()
13221}
13222
13223// SetTags sets the Tags field's value.
13224func (s *ListTagsForResourceOutput) SetTags(v map[string]*string) *ListTagsForResourceOutput {
13225	s.Tags = v
13226	return s
13227}
13228
13229// Provides information about a single type of unsafe content found in an image
13230// or video. Each type of moderated content has a label within a hierarchical
13231// taxonomy. For more information, see Detecting Unsafe Content in the Amazon
13232// Rekognition Developer Guide.
13233type ModerationLabel struct {
13234	_ struct{} `type:"structure"`
13235
13236	// Specifies the confidence that Amazon Rekognition has that the label has been
13237	// correctly identified.
13238	//
13239	// If you don't specify the MinConfidence parameter in the call to DetectModerationLabels,
13240	// the operation returns labels with a confidence value greater than or equal
13241	// to 50 percent.
13242	Confidence *float64 `type:"float"`
13243
13244	// The label name for the type of unsafe content detected in the image.
13245	Name *string `type:"string"`
13246
13247	// The name for the parent label. Labels at the top level of the hierarchy have
13248	// the parent label "".
13249	ParentName *string `type:"string"`
13250}
13251
13252// String returns the string representation
13253func (s ModerationLabel) String() string {
13254	return awsutil.Prettify(s)
13255}
13256
13257// GoString returns the string representation
13258func (s ModerationLabel) GoString() string {
13259	return s.String()
13260}
13261
13262// SetConfidence sets the Confidence field's value.
13263func (s *ModerationLabel) SetConfidence(v float64) *ModerationLabel {
13264	s.Confidence = &v
13265	return s
13266}
13267
13268// SetName sets the Name field's value.
13269func (s *ModerationLabel) SetName(v string) *ModerationLabel {
13270	s.Name = &v
13271	return s
13272}
13273
13274// SetParentName sets the ParentName field's value.
13275func (s *ModerationLabel) SetParentName(v string) *ModerationLabel {
13276	s.ParentName = &v
13277	return s
13278}
13279
13280// Indicates whether or not the mouth on the face is open, and the confidence
13281// level in the determination.
13282type MouthOpen struct {
13283	_ struct{} `type:"structure"`
13284
13285	// Level of confidence in the determination.
13286	Confidence *float64 `type:"float"`
13287
13288	// Boolean value that indicates whether the mouth on the face is open or not.
13289	Value *bool `type:"boolean"`
13290}
13291
13292// String returns the string representation
13293func (s MouthOpen) String() string {
13294	return awsutil.Prettify(s)
13295}
13296
13297// GoString returns the string representation
13298func (s MouthOpen) GoString() string {
13299	return s.String()
13300}
13301
13302// SetConfidence sets the Confidence field's value.
13303func (s *MouthOpen) SetConfidence(v float64) *MouthOpen {
13304	s.Confidence = &v
13305	return s
13306}
13307
13308// SetValue sets the Value field's value.
13309func (s *MouthOpen) SetValue(v bool) *MouthOpen {
13310	s.Value = &v
13311	return s
13312}
13313
13314// Indicates whether or not the face has a mustache, and the confidence level
13315// in the determination.
13316type Mustache struct {
13317	_ struct{} `type:"structure"`
13318
13319	// Level of confidence in the determination.
13320	Confidence *float64 `type:"float"`
13321
13322	// Boolean value that indicates whether the face has mustache or not.
13323	Value *bool `type:"boolean"`
13324}
13325
13326// String returns the string representation
13327func (s Mustache) String() string {
13328	return awsutil.Prettify(s)
13329}
13330
13331// GoString returns the string representation
13332func (s Mustache) GoString() string {
13333	return s.String()
13334}
13335
13336// SetConfidence sets the Confidence field's value.
13337func (s *Mustache) SetConfidence(v float64) *Mustache {
13338	s.Confidence = &v
13339	return s
13340}
13341
13342// SetValue sets the Value field's value.
13343func (s *Mustache) SetValue(v bool) *Mustache {
13344	s.Value = &v
13345	return s
13346}
13347
13348// The Amazon Simple Notification Service topic to which Amazon Rekognition
13349// publishes the completion status of a video analysis operation. For more information,
13350// see api-video.
13351type NotificationChannel struct {
13352	_ struct{} `type:"structure"`
13353
13354	// The ARN of an IAM role that gives Amazon Rekognition publishing permissions
13355	// to the Amazon SNS topic.
13356	//
13357	// RoleArn is a required field
13358	RoleArn *string `type:"string" required:"true"`
13359
13360	// The Amazon SNS topic to which Amazon Rekognition to posts the completion
13361	// status.
13362	//
13363	// SNSTopicArn is a required field
13364	SNSTopicArn *string `type:"string" required:"true"`
13365}
13366
13367// String returns the string representation
13368func (s NotificationChannel) String() string {
13369	return awsutil.Prettify(s)
13370}
13371
13372// GoString returns the string representation
13373func (s NotificationChannel) GoString() string {
13374	return s.String()
13375}
13376
13377// Validate inspects the fields of the type to determine if they are valid.
13378func (s *NotificationChannel) Validate() error {
13379	invalidParams := request.ErrInvalidParams{Context: "NotificationChannel"}
13380	if s.RoleArn == nil {
13381		invalidParams.Add(request.NewErrParamRequired("RoleArn"))
13382	}
13383	if s.SNSTopicArn == nil {
13384		invalidParams.Add(request.NewErrParamRequired("SNSTopicArn"))
13385	}
13386
13387	if invalidParams.Len() > 0 {
13388		return invalidParams
13389	}
13390	return nil
13391}
13392
13393// SetRoleArn sets the RoleArn field's value.
13394func (s *NotificationChannel) SetRoleArn(v string) *NotificationChannel {
13395	s.RoleArn = &v
13396	return s
13397}
13398
13399// SetSNSTopicArn sets the SNSTopicArn field's value.
13400func (s *NotificationChannel) SetSNSTopicArn(v string) *NotificationChannel {
13401	s.SNSTopicArn = &v
13402	return s
13403}
13404
13405// The S3 bucket and folder location where training output is placed.
13406type OutputConfig struct {
13407	_ struct{} `type:"structure"`
13408
13409	// The S3 bucket where training output is placed.
13410	S3Bucket *string `min:"3" type:"string"`
13411
13412	// The prefix applied to the training output files.
13413	S3KeyPrefix *string `type:"string"`
13414}
13415
13416// String returns the string representation
13417func (s OutputConfig) String() string {
13418	return awsutil.Prettify(s)
13419}
13420
13421// GoString returns the string representation
13422func (s OutputConfig) GoString() string {
13423	return s.String()
13424}
13425
13426// Validate inspects the fields of the type to determine if they are valid.
13427func (s *OutputConfig) Validate() error {
13428	invalidParams := request.ErrInvalidParams{Context: "OutputConfig"}
13429	if s.S3Bucket != nil && len(*s.S3Bucket) < 3 {
13430		invalidParams.Add(request.NewErrParamMinLen("S3Bucket", 3))
13431	}
13432
13433	if invalidParams.Len() > 0 {
13434		return invalidParams
13435	}
13436	return nil
13437}
13438
13439// SetS3Bucket sets the S3Bucket field's value.
13440func (s *OutputConfig) SetS3Bucket(v string) *OutputConfig {
13441	s.S3Bucket = &v
13442	return s
13443}
13444
13445// SetS3KeyPrefix sets the S3KeyPrefix field's value.
13446func (s *OutputConfig) SetS3KeyPrefix(v string) *OutputConfig {
13447	s.S3KeyPrefix = &v
13448	return s
13449}
13450
13451// A parent label for a label. A label can have 0, 1, or more parents.
13452type Parent struct {
13453	_ struct{} `type:"structure"`
13454
13455	// The name of the parent label.
13456	Name *string `type:"string"`
13457}
13458
13459// String returns the string representation
13460func (s Parent) String() string {
13461	return awsutil.Prettify(s)
13462}
13463
13464// GoString returns the string representation
13465func (s Parent) GoString() string {
13466	return s.String()
13467}
13468
13469// SetName sets the Name field's value.
13470func (s *Parent) SetName(v string) *Parent {
13471	s.Name = &v
13472	return s
13473}
13474
13475// Details about a person detected in a video analysis request.
13476type PersonDetail struct {
13477	_ struct{} `type:"structure"`
13478
13479	// Bounding box around the detected person.
13480	BoundingBox *BoundingBox `type:"structure"`
13481
13482	// Face details for the detected person.
13483	Face *FaceDetail `type:"structure"`
13484
13485	// Identifier for the person detected person within a video. Use to keep track
13486	// of the person throughout the video. The identifier is not stored by Amazon
13487	// Rekognition.
13488	Index *int64 `type:"long"`
13489}
13490
13491// String returns the string representation
13492func (s PersonDetail) String() string {
13493	return awsutil.Prettify(s)
13494}
13495
13496// GoString returns the string representation
13497func (s PersonDetail) GoString() string {
13498	return s.String()
13499}
13500
13501// SetBoundingBox sets the BoundingBox field's value.
13502func (s *PersonDetail) SetBoundingBox(v *BoundingBox) *PersonDetail {
13503	s.BoundingBox = v
13504	return s
13505}
13506
13507// SetFace sets the Face field's value.
13508func (s *PersonDetail) SetFace(v *FaceDetail) *PersonDetail {
13509	s.Face = v
13510	return s
13511}
13512
13513// SetIndex sets the Index field's value.
13514func (s *PersonDetail) SetIndex(v int64) *PersonDetail {
13515	s.Index = &v
13516	return s
13517}
13518
13519// Details and path tracking information for a single time a person's path is
13520// tracked in a video. Amazon Rekognition operations that track people's paths
13521// return an array of PersonDetection objects with elements for each time a
13522// person's path is tracked in a video.
13523//
13524// For more information, see GetPersonTracking in the Amazon Rekognition Developer
13525// Guide.
13526type PersonDetection struct {
13527	_ struct{} `type:"structure"`
13528
13529	// Details about a person whose path was tracked in a video.
13530	Person *PersonDetail `type:"structure"`
13531
13532	// The time, in milliseconds from the start of the video, that the person's
13533	// path was tracked.
13534	Timestamp *int64 `type:"long"`
13535}
13536
13537// String returns the string representation
13538func (s PersonDetection) String() string {
13539	return awsutil.Prettify(s)
13540}
13541
13542// GoString returns the string representation
13543func (s PersonDetection) GoString() string {
13544	return s.String()
13545}
13546
13547// SetPerson sets the Person field's value.
13548func (s *PersonDetection) SetPerson(v *PersonDetail) *PersonDetection {
13549	s.Person = v
13550	return s
13551}
13552
13553// SetTimestamp sets the Timestamp field's value.
13554func (s *PersonDetection) SetTimestamp(v int64) *PersonDetection {
13555	s.Timestamp = &v
13556	return s
13557}
13558
13559// Information about a person whose face matches a face(s) in an Amazon Rekognition
13560// collection. Includes information about the faces in the Amazon Rekognition
13561// collection (FaceMatch), information about the person (PersonDetail), and
13562// the time stamp for when the person was detected in a video. An array of PersonMatch
13563// objects is returned by GetFaceSearch.
13564type PersonMatch struct {
13565	_ struct{} `type:"structure"`
13566
13567	// Information about the faces in the input collection that match the face of
13568	// a person in the video.
13569	FaceMatches []*FaceMatch `type:"list"`
13570
13571	// Information about the matched person.
13572	Person *PersonDetail `type:"structure"`
13573
13574	// The time, in milliseconds from the beginning of the video, that the person
13575	// was matched in the video.
13576	Timestamp *int64 `type:"long"`
13577}
13578
13579// String returns the string representation
13580func (s PersonMatch) String() string {
13581	return awsutil.Prettify(s)
13582}
13583
13584// GoString returns the string representation
13585func (s PersonMatch) GoString() string {
13586	return s.String()
13587}
13588
13589// SetFaceMatches sets the FaceMatches field's value.
13590func (s *PersonMatch) SetFaceMatches(v []*FaceMatch) *PersonMatch {
13591	s.FaceMatches = v
13592	return s
13593}
13594
13595// SetPerson sets the Person field's value.
13596func (s *PersonMatch) SetPerson(v *PersonDetail) *PersonMatch {
13597	s.Person = v
13598	return s
13599}
13600
13601// SetTimestamp sets the Timestamp field's value.
13602func (s *PersonMatch) SetTimestamp(v int64) *PersonMatch {
13603	s.Timestamp = &v
13604	return s
13605}
13606
13607// The X and Y coordinates of a point on an image. The X and Y values returned
13608// are ratios of the overall image size. For example, if the input image is
13609// 700x200 and the operation returns X=0.5 and Y=0.25, then the point is at
13610// the (350,50) pixel coordinate on the image.
13611//
13612// An array of Point objects, Polygon, is returned by DetectText and by DetectCustomLabels.
13613// Polygon represents a fine-grained polygon around a detected item. For more
13614// information, see Geometry in the Amazon Rekognition Developer Guide.
13615type Point struct {
13616	_ struct{} `type:"structure"`
13617
13618	// The value of the X coordinate for a point on a Polygon.
13619	X *float64 `type:"float"`
13620
13621	// The value of the Y coordinate for a point on a Polygon.
13622	Y *float64 `type:"float"`
13623}
13624
13625// String returns the string representation
13626func (s Point) String() string {
13627	return awsutil.Prettify(s)
13628}
13629
13630// GoString returns the string representation
13631func (s Point) GoString() string {
13632	return s.String()
13633}
13634
13635// SetX sets the X field's value.
13636func (s *Point) SetX(v float64) *Point {
13637	s.X = &v
13638	return s
13639}
13640
13641// SetY sets the Y field's value.
13642func (s *Point) SetY(v float64) *Point {
13643	s.Y = &v
13644	return s
13645}
13646
13647// Indicates the pose of the face as determined by its pitch, roll, and yaw.
13648type Pose struct {
13649	_ struct{} `type:"structure"`
13650
13651	// Value representing the face rotation on the pitch axis.
13652	Pitch *float64 `type:"float"`
13653
13654	// Value representing the face rotation on the roll axis.
13655	Roll *float64 `type:"float"`
13656
13657	// Value representing the face rotation on the yaw axis.
13658	Yaw *float64 `type:"float"`
13659}
13660
13661// String returns the string representation
13662func (s Pose) String() string {
13663	return awsutil.Prettify(s)
13664}
13665
13666// GoString returns the string representation
13667func (s Pose) GoString() string {
13668	return s.String()
13669}
13670
13671// SetPitch sets the Pitch field's value.
13672func (s *Pose) SetPitch(v float64) *Pose {
13673	s.Pitch = &v
13674	return s
13675}
13676
13677// SetRoll sets the Roll field's value.
13678func (s *Pose) SetRoll(v float64) *Pose {
13679	s.Roll = &v
13680	return s
13681}
13682
13683// SetYaw sets the Yaw field's value.
13684func (s *Pose) SetYaw(v float64) *Pose {
13685	s.Yaw = &v
13686	return s
13687}
13688
13689// A description of a Amazon Rekognition Custom Labels project.
13690type ProjectDescription struct {
13691	_ struct{} `type:"structure"`
13692
13693	// The Unix timestamp for the date and time that the project was created.
13694	CreationTimestamp *time.Time `type:"timestamp"`
13695
13696	// The Amazon Resource Name (ARN) of the project.
13697	ProjectArn *string `min:"20" type:"string"`
13698
13699	// The current status of the project.
13700	Status *string `type:"string" enum:"ProjectStatus"`
13701}
13702
13703// String returns the string representation
13704func (s ProjectDescription) String() string {
13705	return awsutil.Prettify(s)
13706}
13707
13708// GoString returns the string representation
13709func (s ProjectDescription) GoString() string {
13710	return s.String()
13711}
13712
13713// SetCreationTimestamp sets the CreationTimestamp field's value.
13714func (s *ProjectDescription) SetCreationTimestamp(v time.Time) *ProjectDescription {
13715	s.CreationTimestamp = &v
13716	return s
13717}
13718
13719// SetProjectArn sets the ProjectArn field's value.
13720func (s *ProjectDescription) SetProjectArn(v string) *ProjectDescription {
13721	s.ProjectArn = &v
13722	return s
13723}
13724
13725// SetStatus sets the Status field's value.
13726func (s *ProjectDescription) SetStatus(v string) *ProjectDescription {
13727	s.Status = &v
13728	return s
13729}
13730
13731// The description of a version of a model.
13732type ProjectVersionDescription struct {
13733	_ struct{} `type:"structure"`
13734
13735	// The duration, in seconds, that the model version has been billed for training.
13736	// This value is only returned if the model version has been successfully trained.
13737	BillableTrainingTimeInSeconds *int64 `type:"long"`
13738
13739	// The Unix datetime for the date and time that training started.
13740	CreationTimestamp *time.Time `type:"timestamp"`
13741
13742	// The training results. EvaluationResult is only returned if training is successful.
13743	EvaluationResult *EvaluationResult `type:"structure"`
13744
13745	// The location of the summary manifest. The summary manifest provides aggregate
13746	// data validation results for the training and test datasets.
13747	ManifestSummary *GroundTruthManifest `type:"structure"`
13748
13749	// The minimum number of inference units used by the model. For more information,
13750	// see StartProjectVersion.
13751	MinInferenceUnits *int64 `min:"1" type:"integer"`
13752
13753	// The location where training results are saved.
13754	OutputConfig *OutputConfig `type:"structure"`
13755
13756	// The Amazon Resource Name (ARN) of the model version.
13757	ProjectVersionArn *string `min:"20" type:"string"`
13758
13759	// The current status of the model version.
13760	Status *string `type:"string" enum:"ProjectVersionStatus"`
13761
13762	// A descriptive message for an error or warning that occurred.
13763	StatusMessage *string `type:"string"`
13764
13765	// Contains information about the testing results.
13766	TestingDataResult *TestingDataResult `type:"structure"`
13767
13768	// Contains information about the training results.
13769	TrainingDataResult *TrainingDataResult `type:"structure"`
13770
13771	// The Unix date and time that training of the model ended.
13772	TrainingEndTimestamp *time.Time `type:"timestamp"`
13773}
13774
13775// String returns the string representation
13776func (s ProjectVersionDescription) String() string {
13777	return awsutil.Prettify(s)
13778}
13779
13780// GoString returns the string representation
13781func (s ProjectVersionDescription) GoString() string {
13782	return s.String()
13783}
13784
13785// SetBillableTrainingTimeInSeconds sets the BillableTrainingTimeInSeconds field's value.
13786func (s *ProjectVersionDescription) SetBillableTrainingTimeInSeconds(v int64) *ProjectVersionDescription {
13787	s.BillableTrainingTimeInSeconds = &v
13788	return s
13789}
13790
13791// SetCreationTimestamp sets the CreationTimestamp field's value.
13792func (s *ProjectVersionDescription) SetCreationTimestamp(v time.Time) *ProjectVersionDescription {
13793	s.CreationTimestamp = &v
13794	return s
13795}
13796
13797// SetEvaluationResult sets the EvaluationResult field's value.
13798func (s *ProjectVersionDescription) SetEvaluationResult(v *EvaluationResult) *ProjectVersionDescription {
13799	s.EvaluationResult = v
13800	return s
13801}
13802
13803// SetManifestSummary sets the ManifestSummary field's value.
13804func (s *ProjectVersionDescription) SetManifestSummary(v *GroundTruthManifest) *ProjectVersionDescription {
13805	s.ManifestSummary = v
13806	return s
13807}
13808
13809// SetMinInferenceUnits sets the MinInferenceUnits field's value.
13810func (s *ProjectVersionDescription) SetMinInferenceUnits(v int64) *ProjectVersionDescription {
13811	s.MinInferenceUnits = &v
13812	return s
13813}
13814
13815// SetOutputConfig sets the OutputConfig field's value.
13816func (s *ProjectVersionDescription) SetOutputConfig(v *OutputConfig) *ProjectVersionDescription {
13817	s.OutputConfig = v
13818	return s
13819}
13820
13821// SetProjectVersionArn sets the ProjectVersionArn field's value.
13822func (s *ProjectVersionDescription) SetProjectVersionArn(v string) *ProjectVersionDescription {
13823	s.ProjectVersionArn = &v
13824	return s
13825}
13826
13827// SetStatus sets the Status field's value.
13828func (s *ProjectVersionDescription) SetStatus(v string) *ProjectVersionDescription {
13829	s.Status = &v
13830	return s
13831}
13832
13833// SetStatusMessage sets the StatusMessage field's value.
13834func (s *ProjectVersionDescription) SetStatusMessage(v string) *ProjectVersionDescription {
13835	s.StatusMessage = &v
13836	return s
13837}
13838
13839// SetTestingDataResult sets the TestingDataResult field's value.
13840func (s *ProjectVersionDescription) SetTestingDataResult(v *TestingDataResult) *ProjectVersionDescription {
13841	s.TestingDataResult = v
13842	return s
13843}
13844
13845// SetTrainingDataResult sets the TrainingDataResult field's value.
13846func (s *ProjectVersionDescription) SetTrainingDataResult(v *TrainingDataResult) *ProjectVersionDescription {
13847	s.TrainingDataResult = v
13848	return s
13849}
13850
13851// SetTrainingEndTimestamp sets the TrainingEndTimestamp field's value.
13852func (s *ProjectVersionDescription) SetTrainingEndTimestamp(v time.Time) *ProjectVersionDescription {
13853	s.TrainingEndTimestamp = &v
13854	return s
13855}
13856
13857// Information about a body part detected by DetectProtectiveEquipment that
13858// contains PPE. An array of ProtectiveEquipmentBodyPart objects is returned
13859// for each person detected by DetectProtectiveEquipment.
13860type ProtectiveEquipmentBodyPart struct {
13861	_ struct{} `type:"structure"`
13862
13863	// The confidence that Amazon Rekognition has in the detection accuracy of the
13864	// detected body part.
13865	Confidence *float64 `type:"float"`
13866
13867	// An array of Personal Protective Equipment items detected around a body part.
13868	EquipmentDetections []*EquipmentDetection `type:"list"`
13869
13870	// The detected body part.
13871	Name *string `type:"string" enum:"BodyPart"`
13872}
13873
13874// String returns the string representation
13875func (s ProtectiveEquipmentBodyPart) String() string {
13876	return awsutil.Prettify(s)
13877}
13878
13879// GoString returns the string representation
13880func (s ProtectiveEquipmentBodyPart) GoString() string {
13881	return s.String()
13882}
13883
13884// SetConfidence sets the Confidence field's value.
13885func (s *ProtectiveEquipmentBodyPart) SetConfidence(v float64) *ProtectiveEquipmentBodyPart {
13886	s.Confidence = &v
13887	return s
13888}
13889
13890// SetEquipmentDetections sets the EquipmentDetections field's value.
13891func (s *ProtectiveEquipmentBodyPart) SetEquipmentDetections(v []*EquipmentDetection) *ProtectiveEquipmentBodyPart {
13892	s.EquipmentDetections = v
13893	return s
13894}
13895
13896// SetName sets the Name field's value.
13897func (s *ProtectiveEquipmentBodyPart) SetName(v string) *ProtectiveEquipmentBodyPart {
13898	s.Name = &v
13899	return s
13900}
13901
13902// A person detected by a call to DetectProtectiveEquipment. The API returns
13903// all persons detected in the input image in an array of ProtectiveEquipmentPerson
13904// objects.
13905type ProtectiveEquipmentPerson struct {
13906	_ struct{} `type:"structure"`
13907
13908	// An array of body parts detected on a person's body (including body parts
13909	// without PPE).
13910	BodyParts []*ProtectiveEquipmentBodyPart `type:"list"`
13911
13912	// A bounding box around the detected person.
13913	BoundingBox *BoundingBox `type:"structure"`
13914
13915	// The confidence that Amazon Rekognition has that the bounding box contains
13916	// a person.
13917	Confidence *float64 `type:"float"`
13918
13919	// The identifier for the detected person. The identifier is only unique for
13920	// a single call to DetectProtectiveEquipment.
13921	Id *int64 `type:"integer"`
13922}
13923
13924// String returns the string representation
13925func (s ProtectiveEquipmentPerson) String() string {
13926	return awsutil.Prettify(s)
13927}
13928
13929// GoString returns the string representation
13930func (s ProtectiveEquipmentPerson) GoString() string {
13931	return s.String()
13932}
13933
13934// SetBodyParts sets the BodyParts field's value.
13935func (s *ProtectiveEquipmentPerson) SetBodyParts(v []*ProtectiveEquipmentBodyPart) *ProtectiveEquipmentPerson {
13936	s.BodyParts = v
13937	return s
13938}
13939
13940// SetBoundingBox sets the BoundingBox field's value.
13941func (s *ProtectiveEquipmentPerson) SetBoundingBox(v *BoundingBox) *ProtectiveEquipmentPerson {
13942	s.BoundingBox = v
13943	return s
13944}
13945
13946// SetConfidence sets the Confidence field's value.
13947func (s *ProtectiveEquipmentPerson) SetConfidence(v float64) *ProtectiveEquipmentPerson {
13948	s.Confidence = &v
13949	return s
13950}
13951
13952// SetId sets the Id field's value.
13953func (s *ProtectiveEquipmentPerson) SetId(v int64) *ProtectiveEquipmentPerson {
13954	s.Id = &v
13955	return s
13956}
13957
13958// Specifies summary attributes to return from a call to DetectProtectiveEquipment.
13959// You can specify which types of PPE to summarize. You can also specify a minimum
13960// confidence value for detections. Summary information is returned in the Summary
13961// (ProtectiveEquipmentSummary) field of the response from DetectProtectiveEquipment.
13962// The summary includes which persons in an image were detected wearing the
13963// requested types of person protective equipment (PPE), which persons were
13964// detected as not wearing PPE, and the persons in which a determination could
13965// not be made. For more information, see ProtectiveEquipmentSummary.
13966type ProtectiveEquipmentSummarizationAttributes struct {
13967	_ struct{} `type:"structure"`
13968
13969	// The minimum confidence level for which you want summary information. The
13970	// confidence level applies to person detection, body part detection, equipment
13971	// detection, and body part coverage. Amazon Rekognition doesn't return summary
13972	// information with a confidence than this specified value. There isn't a default
13973	// value.
13974	//
13975	// Specify a MinConfidence value that is between 50-100% as DetectProtectiveEquipment
13976	// returns predictions only where the detection confidence is between 50% -
13977	// 100%. If you specify a value that is less than 50%, the results are the same
13978	// specifying a value of 50%.
13979	//
13980	// MinConfidence is a required field
13981	MinConfidence *float64 `type:"float" required:"true"`
13982
13983	// An array of personal protective equipment types for which you want summary
13984	// information. If a person is detected wearing a required requipment type,
13985	// the person's ID is added to the PersonsWithRequiredEquipment array field
13986	// returned in ProtectiveEquipmentSummary by DetectProtectiveEquipment.
13987	//
13988	// RequiredEquipmentTypes is a required field
13989	RequiredEquipmentTypes []*string `type:"list" required:"true"`
13990}
13991
13992// String returns the string representation
13993func (s ProtectiveEquipmentSummarizationAttributes) String() string {
13994	return awsutil.Prettify(s)
13995}
13996
13997// GoString returns the string representation
13998func (s ProtectiveEquipmentSummarizationAttributes) GoString() string {
13999	return s.String()
14000}
14001
14002// Validate inspects the fields of the type to determine if they are valid.
14003func (s *ProtectiveEquipmentSummarizationAttributes) Validate() error {
14004	invalidParams := request.ErrInvalidParams{Context: "ProtectiveEquipmentSummarizationAttributes"}
14005	if s.MinConfidence == nil {
14006		invalidParams.Add(request.NewErrParamRequired("MinConfidence"))
14007	}
14008	if s.RequiredEquipmentTypes == nil {
14009		invalidParams.Add(request.NewErrParamRequired("RequiredEquipmentTypes"))
14010	}
14011
14012	if invalidParams.Len() > 0 {
14013		return invalidParams
14014	}
14015	return nil
14016}
14017
14018// SetMinConfidence sets the MinConfidence field's value.
14019func (s *ProtectiveEquipmentSummarizationAttributes) SetMinConfidence(v float64) *ProtectiveEquipmentSummarizationAttributes {
14020	s.MinConfidence = &v
14021	return s
14022}
14023
14024// SetRequiredEquipmentTypes sets the RequiredEquipmentTypes field's value.
14025func (s *ProtectiveEquipmentSummarizationAttributes) SetRequiredEquipmentTypes(v []*string) *ProtectiveEquipmentSummarizationAttributes {
14026	s.RequiredEquipmentTypes = v
14027	return s
14028}
14029
14030// Summary information for required items of personal protective equipment (PPE)
14031// detected on persons by a call to DetectProtectiveEquipment. You specify the
14032// required type of PPE in the SummarizationAttributes (ProtectiveEquipmentSummarizationAttributes)
14033// input parameter. The summary includes which persons were detected wearing
14034// the required personal protective equipment (PersonsWithRequiredEquipment),
14035// which persons were detected as not wearing the required PPE (PersonsWithoutRequiredEquipment),
14036// and the persons in which a determination could not be made (PersonsIndeterminate).
14037//
14038// To get a total for each category, use the size of the field array. For example,
14039// to find out how many people were detected as wearing the specified PPE, use
14040// the size of the PersonsWithRequiredEquipment array. If you want to find out
14041// more about a person, such as the location (BoundingBox) of the person on
14042// the image, use the person ID in each array element. Each person ID matches
14043// the ID field of a ProtectiveEquipmentPerson object returned in the Persons
14044// array by DetectProtectiveEquipment.
14045type ProtectiveEquipmentSummary struct {
14046	_ struct{} `type:"structure"`
14047
14048	// An array of IDs for persons where it was not possible to determine if they
14049	// are wearing personal protective equipment.
14050	PersonsIndeterminate []*int64 `type:"list"`
14051
14052	// An array of IDs for persons who are wearing detected personal protective
14053	// equipment.
14054	PersonsWithRequiredEquipment []*int64 `type:"list"`
14055
14056	// An array of IDs for persons who are not wearing all of the types of PPE specified
14057	// in the RequiredEquipmentTypes field of the detected personal protective equipment.
14058	PersonsWithoutRequiredEquipment []*int64 `type:"list"`
14059}
14060
14061// String returns the string representation
14062func (s ProtectiveEquipmentSummary) String() string {
14063	return awsutil.Prettify(s)
14064}
14065
14066// GoString returns the string representation
14067func (s ProtectiveEquipmentSummary) GoString() string {
14068	return s.String()
14069}
14070
14071// SetPersonsIndeterminate sets the PersonsIndeterminate field's value.
14072func (s *ProtectiveEquipmentSummary) SetPersonsIndeterminate(v []*int64) *ProtectiveEquipmentSummary {
14073	s.PersonsIndeterminate = v
14074	return s
14075}
14076
14077// SetPersonsWithRequiredEquipment sets the PersonsWithRequiredEquipment field's value.
14078func (s *ProtectiveEquipmentSummary) SetPersonsWithRequiredEquipment(v []*int64) *ProtectiveEquipmentSummary {
14079	s.PersonsWithRequiredEquipment = v
14080	return s
14081}
14082
14083// SetPersonsWithoutRequiredEquipment sets the PersonsWithoutRequiredEquipment field's value.
14084func (s *ProtectiveEquipmentSummary) SetPersonsWithoutRequiredEquipment(v []*int64) *ProtectiveEquipmentSummary {
14085	s.PersonsWithoutRequiredEquipment = v
14086	return s
14087}
14088
14089// The number of requests exceeded your throughput limit. If you want to increase
14090// this limit, contact Amazon Rekognition.
14091type ProvisionedThroughputExceededException struct {
14092	_            struct{}                  `type:"structure"`
14093	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
14094
14095	Message_ *string `locationName:"message" type:"string"`
14096}
14097
14098// String returns the string representation
14099func (s ProvisionedThroughputExceededException) String() string {
14100	return awsutil.Prettify(s)
14101}
14102
14103// GoString returns the string representation
14104func (s ProvisionedThroughputExceededException) GoString() string {
14105	return s.String()
14106}
14107
14108func newErrorProvisionedThroughputExceededException(v protocol.ResponseMetadata) error {
14109	return &ProvisionedThroughputExceededException{
14110		RespMetadata: v,
14111	}
14112}
14113
14114// Code returns the exception type name.
14115func (s *ProvisionedThroughputExceededException) Code() string {
14116	return "ProvisionedThroughputExceededException"
14117}
14118
14119// Message returns the exception's message.
14120func (s *ProvisionedThroughputExceededException) Message() string {
14121	if s.Message_ != nil {
14122		return *s.Message_
14123	}
14124	return ""
14125}
14126
14127// OrigErr always returns nil, satisfies awserr.Error interface.
14128func (s *ProvisionedThroughputExceededException) OrigErr() error {
14129	return nil
14130}
14131
14132func (s *ProvisionedThroughputExceededException) Error() string {
14133	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
14134}
14135
14136// Status code returns the HTTP status code for the request's response error.
14137func (s *ProvisionedThroughputExceededException) StatusCode() int {
14138	return s.RespMetadata.StatusCode
14139}
14140
14141// RequestID returns the service's response RequestID for request.
14142func (s *ProvisionedThroughputExceededException) RequestID() string {
14143	return s.RespMetadata.RequestID
14144}
14145
14146type RecognizeCelebritiesInput struct {
14147	_ struct{} `type:"structure"`
14148
14149	// The input image as base64-encoded bytes or an S3 object. If you use the AWS
14150	// CLI to call Amazon Rekognition operations, passing base64-encoded image bytes
14151	// is not supported.
14152	//
14153	// If you are using an AWS SDK to call Amazon Rekognition, you might not need
14154	// to base64-encode image bytes passed using the Bytes field. For more information,
14155	// see Images in the Amazon Rekognition developer guide.
14156	//
14157	// Image is a required field
14158	Image *Image `type:"structure" required:"true"`
14159}
14160
14161// String returns the string representation
14162func (s RecognizeCelebritiesInput) String() string {
14163	return awsutil.Prettify(s)
14164}
14165
14166// GoString returns the string representation
14167func (s RecognizeCelebritiesInput) GoString() string {
14168	return s.String()
14169}
14170
14171// Validate inspects the fields of the type to determine if they are valid.
14172func (s *RecognizeCelebritiesInput) Validate() error {
14173	invalidParams := request.ErrInvalidParams{Context: "RecognizeCelebritiesInput"}
14174	if s.Image == nil {
14175		invalidParams.Add(request.NewErrParamRequired("Image"))
14176	}
14177	if s.Image != nil {
14178		if err := s.Image.Validate(); err != nil {
14179			invalidParams.AddNested("Image", err.(request.ErrInvalidParams))
14180		}
14181	}
14182
14183	if invalidParams.Len() > 0 {
14184		return invalidParams
14185	}
14186	return nil
14187}
14188
14189// SetImage sets the Image field's value.
14190func (s *RecognizeCelebritiesInput) SetImage(v *Image) *RecognizeCelebritiesInput {
14191	s.Image = v
14192	return s
14193}
14194
14195type RecognizeCelebritiesOutput struct {
14196	_ struct{} `type:"structure"`
14197
14198	// Details about each celebrity found in the image. Amazon Rekognition can detect
14199	// a maximum of 64 celebrities in an image.
14200	CelebrityFaces []*Celebrity `type:"list"`
14201
14202	// The orientation of the input image (counterclockwise direction). If your
14203	// application displays the image, you can use this value to correct the orientation.
14204	// The bounding box coordinates returned in CelebrityFaces and UnrecognizedFaces
14205	// represent face locations before the image orientation is corrected.
14206	//
14207	// If the input image is in .jpeg format, it might contain exchangeable image
14208	// (Exif) metadata that includes the image's orientation. If so, and the Exif
14209	// metadata for the input image populates the orientation field, the value of
14210	// OrientationCorrection is null. The CelebrityFaces and UnrecognizedFaces bounding
14211	// box coordinates represent face locations after Exif metadata is used to correct
14212	// the image orientation. Images in .png format don't contain Exif metadata.
14213	OrientationCorrection *string `type:"string" enum:"OrientationCorrection"`
14214
14215	// Details about each unrecognized face in the image.
14216	UnrecognizedFaces []*ComparedFace `type:"list"`
14217}
14218
14219// String returns the string representation
14220func (s RecognizeCelebritiesOutput) String() string {
14221	return awsutil.Prettify(s)
14222}
14223
14224// GoString returns the string representation
14225func (s RecognizeCelebritiesOutput) GoString() string {
14226	return s.String()
14227}
14228
14229// SetCelebrityFaces sets the CelebrityFaces field's value.
14230func (s *RecognizeCelebritiesOutput) SetCelebrityFaces(v []*Celebrity) *RecognizeCelebritiesOutput {
14231	s.CelebrityFaces = v
14232	return s
14233}
14234
14235// SetOrientationCorrection sets the OrientationCorrection field's value.
14236func (s *RecognizeCelebritiesOutput) SetOrientationCorrection(v string) *RecognizeCelebritiesOutput {
14237	s.OrientationCorrection = &v
14238	return s
14239}
14240
14241// SetUnrecognizedFaces sets the UnrecognizedFaces field's value.
14242func (s *RecognizeCelebritiesOutput) SetUnrecognizedFaces(v []*ComparedFace) *RecognizeCelebritiesOutput {
14243	s.UnrecognizedFaces = v
14244	return s
14245}
14246
14247// Specifies a location within the frame that Rekognition checks for text. Uses
14248// a BoundingBox object to set a region of the screen.
14249//
14250// A word is included in the region if the word is more than half in that region.
14251// If there is more than one region, the word will be compared with all regions
14252// of the screen. Any word more than half in a region is kept in the results.
14253type RegionOfInterest struct {
14254	_ struct{} `type:"structure"`
14255
14256	// The box representing a region of interest on screen.
14257	BoundingBox *BoundingBox `type:"structure"`
14258}
14259
14260// String returns the string representation
14261func (s RegionOfInterest) String() string {
14262	return awsutil.Prettify(s)
14263}
14264
14265// GoString returns the string representation
14266func (s RegionOfInterest) GoString() string {
14267	return s.String()
14268}
14269
14270// SetBoundingBox sets the BoundingBox field's value.
14271func (s *RegionOfInterest) SetBoundingBox(v *BoundingBox) *RegionOfInterest {
14272	s.BoundingBox = v
14273	return s
14274}
14275
14276// A collection with the specified ID already exists.
14277type ResourceAlreadyExistsException struct {
14278	_            struct{}                  `type:"structure"`
14279	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
14280
14281	Message_ *string `locationName:"message" type:"string"`
14282}
14283
14284// String returns the string representation
14285func (s ResourceAlreadyExistsException) String() string {
14286	return awsutil.Prettify(s)
14287}
14288
14289// GoString returns the string representation
14290func (s ResourceAlreadyExistsException) GoString() string {
14291	return s.String()
14292}
14293
14294func newErrorResourceAlreadyExistsException(v protocol.ResponseMetadata) error {
14295	return &ResourceAlreadyExistsException{
14296		RespMetadata: v,
14297	}
14298}
14299
14300// Code returns the exception type name.
14301func (s *ResourceAlreadyExistsException) Code() string {
14302	return "ResourceAlreadyExistsException"
14303}
14304
14305// Message returns the exception's message.
14306func (s *ResourceAlreadyExistsException) Message() string {
14307	if s.Message_ != nil {
14308		return *s.Message_
14309	}
14310	return ""
14311}
14312
14313// OrigErr always returns nil, satisfies awserr.Error interface.
14314func (s *ResourceAlreadyExistsException) OrigErr() error {
14315	return nil
14316}
14317
14318func (s *ResourceAlreadyExistsException) Error() string {
14319	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
14320}
14321
14322// Status code returns the HTTP status code for the request's response error.
14323func (s *ResourceAlreadyExistsException) StatusCode() int {
14324	return s.RespMetadata.StatusCode
14325}
14326
14327// RequestID returns the service's response RequestID for request.
14328func (s *ResourceAlreadyExistsException) RequestID() string {
14329	return s.RespMetadata.RequestID
14330}
14331
14332// The specified resource is already being used.
14333type ResourceInUseException struct {
14334	_            struct{}                  `type:"structure"`
14335	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
14336
14337	Message_ *string `locationName:"message" type:"string"`
14338}
14339
14340// String returns the string representation
14341func (s ResourceInUseException) String() string {
14342	return awsutil.Prettify(s)
14343}
14344
14345// GoString returns the string representation
14346func (s ResourceInUseException) GoString() string {
14347	return s.String()
14348}
14349
14350func newErrorResourceInUseException(v protocol.ResponseMetadata) error {
14351	return &ResourceInUseException{
14352		RespMetadata: v,
14353	}
14354}
14355
14356// Code returns the exception type name.
14357func (s *ResourceInUseException) Code() string {
14358	return "ResourceInUseException"
14359}
14360
14361// Message returns the exception's message.
14362func (s *ResourceInUseException) Message() string {
14363	if s.Message_ != nil {
14364		return *s.Message_
14365	}
14366	return ""
14367}
14368
14369// OrigErr always returns nil, satisfies awserr.Error interface.
14370func (s *ResourceInUseException) OrigErr() error {
14371	return nil
14372}
14373
14374func (s *ResourceInUseException) Error() string {
14375	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
14376}
14377
14378// Status code returns the HTTP status code for the request's response error.
14379func (s *ResourceInUseException) StatusCode() int {
14380	return s.RespMetadata.StatusCode
14381}
14382
14383// RequestID returns the service's response RequestID for request.
14384func (s *ResourceInUseException) RequestID() string {
14385	return s.RespMetadata.RequestID
14386}
14387
14388// The collection specified in the request cannot be found.
14389type ResourceNotFoundException struct {
14390	_            struct{}                  `type:"structure"`
14391	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
14392
14393	Message_ *string `locationName:"message" type:"string"`
14394}
14395
14396// String returns the string representation
14397func (s ResourceNotFoundException) String() string {
14398	return awsutil.Prettify(s)
14399}
14400
14401// GoString returns the string representation
14402func (s ResourceNotFoundException) GoString() string {
14403	return s.String()
14404}
14405
14406func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error {
14407	return &ResourceNotFoundException{
14408		RespMetadata: v,
14409	}
14410}
14411
14412// Code returns the exception type name.
14413func (s *ResourceNotFoundException) Code() string {
14414	return "ResourceNotFoundException"
14415}
14416
14417// Message returns the exception's message.
14418func (s *ResourceNotFoundException) Message() string {
14419	if s.Message_ != nil {
14420		return *s.Message_
14421	}
14422	return ""
14423}
14424
14425// OrigErr always returns nil, satisfies awserr.Error interface.
14426func (s *ResourceNotFoundException) OrigErr() error {
14427	return nil
14428}
14429
14430func (s *ResourceNotFoundException) Error() string {
14431	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
14432}
14433
14434// Status code returns the HTTP status code for the request's response error.
14435func (s *ResourceNotFoundException) StatusCode() int {
14436	return s.RespMetadata.StatusCode
14437}
14438
14439// RequestID returns the service's response RequestID for request.
14440func (s *ResourceNotFoundException) RequestID() string {
14441	return s.RespMetadata.RequestID
14442}
14443
14444// The requested resource isn't ready. For example, this exception occurs when
14445// you call DetectCustomLabels with a model version that isn't deployed.
14446type ResourceNotReadyException struct {
14447	_            struct{}                  `type:"structure"`
14448	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
14449
14450	Message_ *string `locationName:"message" type:"string"`
14451}
14452
14453// String returns the string representation
14454func (s ResourceNotReadyException) String() string {
14455	return awsutil.Prettify(s)
14456}
14457
14458// GoString returns the string representation
14459func (s ResourceNotReadyException) GoString() string {
14460	return s.String()
14461}
14462
14463func newErrorResourceNotReadyException(v protocol.ResponseMetadata) error {
14464	return &ResourceNotReadyException{
14465		RespMetadata: v,
14466	}
14467}
14468
14469// Code returns the exception type name.
14470func (s *ResourceNotReadyException) Code() string {
14471	return "ResourceNotReadyException"
14472}
14473
14474// Message returns the exception's message.
14475func (s *ResourceNotReadyException) Message() string {
14476	if s.Message_ != nil {
14477		return *s.Message_
14478	}
14479	return ""
14480}
14481
14482// OrigErr always returns nil, satisfies awserr.Error interface.
14483func (s *ResourceNotReadyException) OrigErr() error {
14484	return nil
14485}
14486
14487func (s *ResourceNotReadyException) Error() string {
14488	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
14489}
14490
14491// Status code returns the HTTP status code for the request's response error.
14492func (s *ResourceNotReadyException) StatusCode() int {
14493	return s.RespMetadata.StatusCode
14494}
14495
14496// RequestID returns the service's response RequestID for request.
14497func (s *ResourceNotReadyException) RequestID() string {
14498	return s.RespMetadata.RequestID
14499}
14500
14501// Provides the S3 bucket name and object name.
14502//
14503// The region for the S3 bucket containing the S3 object must match the region
14504// you use for Amazon Rekognition operations.
14505//
14506// For Amazon Rekognition to process an S3 object, the user must have permission
14507// to access the S3 object. For more information, see Resource-Based Policies
14508// in the Amazon Rekognition Developer Guide.
14509type S3Object struct {
14510	_ struct{} `type:"structure"`
14511
14512	// Name of the S3 bucket.
14513	Bucket *string `min:"3" type:"string"`
14514
14515	// S3 object key name.
14516	Name *string `min:"1" type:"string"`
14517
14518	// If the bucket is versioning enabled, you can specify the object version.
14519	Version *string `min:"1" type:"string"`
14520}
14521
14522// String returns the string representation
14523func (s S3Object) String() string {
14524	return awsutil.Prettify(s)
14525}
14526
14527// GoString returns the string representation
14528func (s S3Object) GoString() string {
14529	return s.String()
14530}
14531
14532// Validate inspects the fields of the type to determine if they are valid.
14533func (s *S3Object) Validate() error {
14534	invalidParams := request.ErrInvalidParams{Context: "S3Object"}
14535	if s.Bucket != nil && len(*s.Bucket) < 3 {
14536		invalidParams.Add(request.NewErrParamMinLen("Bucket", 3))
14537	}
14538	if s.Name != nil && len(*s.Name) < 1 {
14539		invalidParams.Add(request.NewErrParamMinLen("Name", 1))
14540	}
14541	if s.Version != nil && len(*s.Version) < 1 {
14542		invalidParams.Add(request.NewErrParamMinLen("Version", 1))
14543	}
14544
14545	if invalidParams.Len() > 0 {
14546		return invalidParams
14547	}
14548	return nil
14549}
14550
14551// SetBucket sets the Bucket field's value.
14552func (s *S3Object) SetBucket(v string) *S3Object {
14553	s.Bucket = &v
14554	return s
14555}
14556
14557// SetName sets the Name field's value.
14558func (s *S3Object) SetName(v string) *S3Object {
14559	s.Name = &v
14560	return s
14561}
14562
14563// SetVersion sets the Version field's value.
14564func (s *S3Object) SetVersion(v string) *S3Object {
14565	s.Version = &v
14566	return s
14567}
14568
14569type SearchFacesByImageInput struct {
14570	_ struct{} `type:"structure"`
14571
14572	// ID of the collection to search.
14573	//
14574	// CollectionId is a required field
14575	CollectionId *string `min:"1" type:"string" required:"true"`
14576
14577	// (Optional) Specifies the minimum confidence in the face match to return.
14578	// For example, don't return any matches where confidence in matches is less
14579	// than 70%. The default value is 80%.
14580	FaceMatchThreshold *float64 `type:"float"`
14581
14582	// The input image as base64-encoded bytes or an S3 object. If you use the AWS
14583	// CLI to call Amazon Rekognition operations, passing base64-encoded image bytes
14584	// is not supported.
14585	//
14586	// If you are using an AWS SDK to call Amazon Rekognition, you might not need
14587	// to base64-encode image bytes passed using the Bytes field. For more information,
14588	// see Images in the Amazon Rekognition developer guide.
14589	//
14590	// Image is a required field
14591	Image *Image `type:"structure" required:"true"`
14592
14593	// Maximum number of faces to return. The operation returns the maximum number
14594	// of faces with the highest confidence in the match.
14595	MaxFaces *int64 `min:"1" type:"integer"`
14596
14597	// A filter that specifies a quality bar for how much filtering is done to identify
14598	// faces. Filtered faces aren't searched for in the collection. If you specify
14599	// AUTO, Amazon Rekognition chooses the quality bar. If you specify LOW, MEDIUM,
14600	// or HIGH, filtering removes all faces that don’t meet the chosen quality
14601	// bar. The quality bar is based on a variety of common use cases. Low-quality
14602	// detections can occur for a number of reasons. Some examples are an object
14603	// that's misidentified as a face, a face that's too blurry, or a face with
14604	// a pose that's too extreme to use. If you specify NONE, no filtering is performed.
14605	// The default value is NONE.
14606	//
14607	// To use quality filtering, the collection you are using must be associated
14608	// with version 3 of the face model or higher.
14609	QualityFilter *string `type:"string" enum:"QualityFilter"`
14610}
14611
14612// String returns the string representation
14613func (s SearchFacesByImageInput) String() string {
14614	return awsutil.Prettify(s)
14615}
14616
14617// GoString returns the string representation
14618func (s SearchFacesByImageInput) GoString() string {
14619	return s.String()
14620}
14621
14622// Validate inspects the fields of the type to determine if they are valid.
14623func (s *SearchFacesByImageInput) Validate() error {
14624	invalidParams := request.ErrInvalidParams{Context: "SearchFacesByImageInput"}
14625	if s.CollectionId == nil {
14626		invalidParams.Add(request.NewErrParamRequired("CollectionId"))
14627	}
14628	if s.CollectionId != nil && len(*s.CollectionId) < 1 {
14629		invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1))
14630	}
14631	if s.Image == nil {
14632		invalidParams.Add(request.NewErrParamRequired("Image"))
14633	}
14634	if s.MaxFaces != nil && *s.MaxFaces < 1 {
14635		invalidParams.Add(request.NewErrParamMinValue("MaxFaces", 1))
14636	}
14637	if s.Image != nil {
14638		if err := s.Image.Validate(); err != nil {
14639			invalidParams.AddNested("Image", err.(request.ErrInvalidParams))
14640		}
14641	}
14642
14643	if invalidParams.Len() > 0 {
14644		return invalidParams
14645	}
14646	return nil
14647}
14648
14649// SetCollectionId sets the CollectionId field's value.
14650func (s *SearchFacesByImageInput) SetCollectionId(v string) *SearchFacesByImageInput {
14651	s.CollectionId = &v
14652	return s
14653}
14654
14655// SetFaceMatchThreshold sets the FaceMatchThreshold field's value.
14656func (s *SearchFacesByImageInput) SetFaceMatchThreshold(v float64) *SearchFacesByImageInput {
14657	s.FaceMatchThreshold = &v
14658	return s
14659}
14660
14661// SetImage sets the Image field's value.
14662func (s *SearchFacesByImageInput) SetImage(v *Image) *SearchFacesByImageInput {
14663	s.Image = v
14664	return s
14665}
14666
14667// SetMaxFaces sets the MaxFaces field's value.
14668func (s *SearchFacesByImageInput) SetMaxFaces(v int64) *SearchFacesByImageInput {
14669	s.MaxFaces = &v
14670	return s
14671}
14672
14673// SetQualityFilter sets the QualityFilter field's value.
14674func (s *SearchFacesByImageInput) SetQualityFilter(v string) *SearchFacesByImageInput {
14675	s.QualityFilter = &v
14676	return s
14677}
14678
14679type SearchFacesByImageOutput struct {
14680	_ struct{} `type:"structure"`
14681
14682	// An array of faces that match the input face, along with the confidence in
14683	// the match.
14684	FaceMatches []*FaceMatch `type:"list"`
14685
14686	// Version number of the face detection model associated with the input collection
14687	// (CollectionId).
14688	FaceModelVersion *string `type:"string"`
14689
14690	// The bounding box around the face in the input image that Amazon Rekognition
14691	// used for the search.
14692	SearchedFaceBoundingBox *BoundingBox `type:"structure"`
14693
14694	// The level of confidence that the searchedFaceBoundingBox, contains a face.
14695	SearchedFaceConfidence *float64 `type:"float"`
14696}
14697
14698// String returns the string representation
14699func (s SearchFacesByImageOutput) String() string {
14700	return awsutil.Prettify(s)
14701}
14702
14703// GoString returns the string representation
14704func (s SearchFacesByImageOutput) GoString() string {
14705	return s.String()
14706}
14707
14708// SetFaceMatches sets the FaceMatches field's value.
14709func (s *SearchFacesByImageOutput) SetFaceMatches(v []*FaceMatch) *SearchFacesByImageOutput {
14710	s.FaceMatches = v
14711	return s
14712}
14713
14714// SetFaceModelVersion sets the FaceModelVersion field's value.
14715func (s *SearchFacesByImageOutput) SetFaceModelVersion(v string) *SearchFacesByImageOutput {
14716	s.FaceModelVersion = &v
14717	return s
14718}
14719
14720// SetSearchedFaceBoundingBox sets the SearchedFaceBoundingBox field's value.
14721func (s *SearchFacesByImageOutput) SetSearchedFaceBoundingBox(v *BoundingBox) *SearchFacesByImageOutput {
14722	s.SearchedFaceBoundingBox = v
14723	return s
14724}
14725
14726// SetSearchedFaceConfidence sets the SearchedFaceConfidence field's value.
14727func (s *SearchFacesByImageOutput) SetSearchedFaceConfidence(v float64) *SearchFacesByImageOutput {
14728	s.SearchedFaceConfidence = &v
14729	return s
14730}
14731
14732type SearchFacesInput struct {
14733	_ struct{} `type:"structure"`
14734
14735	// ID of the collection the face belongs to.
14736	//
14737	// CollectionId is a required field
14738	CollectionId *string `min:"1" type:"string" required:"true"`
14739
14740	// ID of a face to find matches for in the collection.
14741	//
14742	// FaceId is a required field
14743	FaceId *string `type:"string" required:"true"`
14744
14745	// Optional value specifying the minimum confidence in the face match to return.
14746	// For example, don't return any matches where confidence in matches is less
14747	// than 70%. The default value is 80%.
14748	FaceMatchThreshold *float64 `type:"float"`
14749
14750	// Maximum number of faces to return. The operation returns the maximum number
14751	// of faces with the highest confidence in the match.
14752	MaxFaces *int64 `min:"1" type:"integer"`
14753}
14754
14755// String returns the string representation
14756func (s SearchFacesInput) String() string {
14757	return awsutil.Prettify(s)
14758}
14759
14760// GoString returns the string representation
14761func (s SearchFacesInput) GoString() string {
14762	return s.String()
14763}
14764
14765// Validate inspects the fields of the type to determine if they are valid.
14766func (s *SearchFacesInput) Validate() error {
14767	invalidParams := request.ErrInvalidParams{Context: "SearchFacesInput"}
14768	if s.CollectionId == nil {
14769		invalidParams.Add(request.NewErrParamRequired("CollectionId"))
14770	}
14771	if s.CollectionId != nil && len(*s.CollectionId) < 1 {
14772		invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1))
14773	}
14774	if s.FaceId == nil {
14775		invalidParams.Add(request.NewErrParamRequired("FaceId"))
14776	}
14777	if s.MaxFaces != nil && *s.MaxFaces < 1 {
14778		invalidParams.Add(request.NewErrParamMinValue("MaxFaces", 1))
14779	}
14780
14781	if invalidParams.Len() > 0 {
14782		return invalidParams
14783	}
14784	return nil
14785}
14786
14787// SetCollectionId sets the CollectionId field's value.
14788func (s *SearchFacesInput) SetCollectionId(v string) *SearchFacesInput {
14789	s.CollectionId = &v
14790	return s
14791}
14792
14793// SetFaceId sets the FaceId field's value.
14794func (s *SearchFacesInput) SetFaceId(v string) *SearchFacesInput {
14795	s.FaceId = &v
14796	return s
14797}
14798
14799// SetFaceMatchThreshold sets the FaceMatchThreshold field's value.
14800func (s *SearchFacesInput) SetFaceMatchThreshold(v float64) *SearchFacesInput {
14801	s.FaceMatchThreshold = &v
14802	return s
14803}
14804
14805// SetMaxFaces sets the MaxFaces field's value.
14806func (s *SearchFacesInput) SetMaxFaces(v int64) *SearchFacesInput {
14807	s.MaxFaces = &v
14808	return s
14809}
14810
14811type SearchFacesOutput struct {
14812	_ struct{} `type:"structure"`
14813
14814	// An array of faces that matched the input face, along with the confidence
14815	// in the match.
14816	FaceMatches []*FaceMatch `type:"list"`
14817
14818	// Version number of the face detection model associated with the input collection
14819	// (CollectionId).
14820	FaceModelVersion *string `type:"string"`
14821
14822	// ID of the face that was searched for matches in a collection.
14823	SearchedFaceId *string `type:"string"`
14824}
14825
14826// String returns the string representation
14827func (s SearchFacesOutput) String() string {
14828	return awsutil.Prettify(s)
14829}
14830
14831// GoString returns the string representation
14832func (s SearchFacesOutput) GoString() string {
14833	return s.String()
14834}
14835
14836// SetFaceMatches sets the FaceMatches field's value.
14837func (s *SearchFacesOutput) SetFaceMatches(v []*FaceMatch) *SearchFacesOutput {
14838	s.FaceMatches = v
14839	return s
14840}
14841
14842// SetFaceModelVersion sets the FaceModelVersion field's value.
14843func (s *SearchFacesOutput) SetFaceModelVersion(v string) *SearchFacesOutput {
14844	s.FaceModelVersion = &v
14845	return s
14846}
14847
14848// SetSearchedFaceId sets the SearchedFaceId field's value.
14849func (s *SearchFacesOutput) SetSearchedFaceId(v string) *SearchFacesOutput {
14850	s.SearchedFaceId = &v
14851	return s
14852}
14853
14854// A technical cue or shot detection segment detected in a video. An array of
14855// SegmentDetection objects containing all segments detected in a stored video
14856// is returned by GetSegmentDetection.
14857type SegmentDetection struct {
14858	_ struct{} `type:"structure"`
14859
14860	// The duration of the detected segment in milliseconds.
14861	DurationMillis *int64 `type:"long"`
14862
14863	// The duration of the timecode for the detected segment in SMPTE format.
14864	DurationSMPTE *string `type:"string"`
14865
14866	// The frame-accurate SMPTE timecode, from the start of a video, for the end
14867	// of a detected segment. EndTimecode is in HH:MM:SS:fr format (and ;fr for
14868	// drop frame-rates).
14869	EndTimecodeSMPTE *string `type:"string"`
14870
14871	// The end time of the detected segment, in milliseconds, from the start of
14872	// the video. This value is rounded down.
14873	EndTimestampMillis *int64 `type:"long"`
14874
14875	// If the segment is a shot detection, contains information about the shot detection.
14876	ShotSegment *ShotSegment `type:"structure"`
14877
14878	// The frame-accurate SMPTE timecode, from the start of a video, for the start
14879	// of a detected segment. StartTimecode is in HH:MM:SS:fr format (and ;fr for
14880	// drop frame-rates).
14881	StartTimecodeSMPTE *string `type:"string"`
14882
14883	// The start time of the detected segment in milliseconds from the start of
14884	// the video. This value is rounded down. For example, if the actual timestamp
14885	// is 100.6667 milliseconds, Amazon Rekognition Video returns a value of 100
14886	// millis.
14887	StartTimestampMillis *int64 `type:"long"`
14888
14889	// If the segment is a technical cue, contains information about the technical
14890	// cue.
14891	TechnicalCueSegment *TechnicalCueSegment `type:"structure"`
14892
14893	// The type of the segment. Valid values are TECHNICAL_CUE and SHOT.
14894	Type *string `type:"string" enum:"SegmentType"`
14895}
14896
14897// String returns the string representation
14898func (s SegmentDetection) String() string {
14899	return awsutil.Prettify(s)
14900}
14901
14902// GoString returns the string representation
14903func (s SegmentDetection) GoString() string {
14904	return s.String()
14905}
14906
14907// SetDurationMillis sets the DurationMillis field's value.
14908func (s *SegmentDetection) SetDurationMillis(v int64) *SegmentDetection {
14909	s.DurationMillis = &v
14910	return s
14911}
14912
14913// SetDurationSMPTE sets the DurationSMPTE field's value.
14914func (s *SegmentDetection) SetDurationSMPTE(v string) *SegmentDetection {
14915	s.DurationSMPTE = &v
14916	return s
14917}
14918
14919// SetEndTimecodeSMPTE sets the EndTimecodeSMPTE field's value.
14920func (s *SegmentDetection) SetEndTimecodeSMPTE(v string) *SegmentDetection {
14921	s.EndTimecodeSMPTE = &v
14922	return s
14923}
14924
14925// SetEndTimestampMillis sets the EndTimestampMillis field's value.
14926func (s *SegmentDetection) SetEndTimestampMillis(v int64) *SegmentDetection {
14927	s.EndTimestampMillis = &v
14928	return s
14929}
14930
14931// SetShotSegment sets the ShotSegment field's value.
14932func (s *SegmentDetection) SetShotSegment(v *ShotSegment) *SegmentDetection {
14933	s.ShotSegment = v
14934	return s
14935}
14936
14937// SetStartTimecodeSMPTE sets the StartTimecodeSMPTE field's value.
14938func (s *SegmentDetection) SetStartTimecodeSMPTE(v string) *SegmentDetection {
14939	s.StartTimecodeSMPTE = &v
14940	return s
14941}
14942
14943// SetStartTimestampMillis sets the StartTimestampMillis field's value.
14944func (s *SegmentDetection) SetStartTimestampMillis(v int64) *SegmentDetection {
14945	s.StartTimestampMillis = &v
14946	return s
14947}
14948
14949// SetTechnicalCueSegment sets the TechnicalCueSegment field's value.
14950func (s *SegmentDetection) SetTechnicalCueSegment(v *TechnicalCueSegment) *SegmentDetection {
14951	s.TechnicalCueSegment = v
14952	return s
14953}
14954
14955// SetType sets the Type field's value.
14956func (s *SegmentDetection) SetType(v string) *SegmentDetection {
14957	s.Type = &v
14958	return s
14959}
14960
14961// Information about the type of a segment requested in a call to StartSegmentDetection.
14962// An array of SegmentTypeInfo objects is returned by the response from GetSegmentDetection.
14963type SegmentTypeInfo struct {
14964	_ struct{} `type:"structure"`
14965
14966	// The version of the model used to detect segments.
14967	ModelVersion *string `type:"string"`
14968
14969	// The type of a segment (technical cue or shot detection).
14970	Type *string `type:"string" enum:"SegmentType"`
14971}
14972
14973// String returns the string representation
14974func (s SegmentTypeInfo) String() string {
14975	return awsutil.Prettify(s)
14976}
14977
14978// GoString returns the string representation
14979func (s SegmentTypeInfo) GoString() string {
14980	return s.String()
14981}
14982
14983// SetModelVersion sets the ModelVersion field's value.
14984func (s *SegmentTypeInfo) SetModelVersion(v string) *SegmentTypeInfo {
14985	s.ModelVersion = &v
14986	return s
14987}
14988
14989// SetType sets the Type field's value.
14990func (s *SegmentTypeInfo) SetType(v string) *SegmentTypeInfo {
14991	s.Type = &v
14992	return s
14993}
14994
14995// The size of the collection or tag list exceeds the allowed limit. For more
14996// information, see Limits in Amazon Rekognition in the Amazon Rekognition Developer
14997// Guide.
14998type ServiceQuotaExceededException struct {
14999	_            struct{}                  `type:"structure"`
15000	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
15001
15002	Message_ *string `locationName:"message" type:"string"`
15003}
15004
15005// String returns the string representation
15006func (s ServiceQuotaExceededException) String() string {
15007	return awsutil.Prettify(s)
15008}
15009
15010// GoString returns the string representation
15011func (s ServiceQuotaExceededException) GoString() string {
15012	return s.String()
15013}
15014
15015func newErrorServiceQuotaExceededException(v protocol.ResponseMetadata) error {
15016	return &ServiceQuotaExceededException{
15017		RespMetadata: v,
15018	}
15019}
15020
15021// Code returns the exception type name.
15022func (s *ServiceQuotaExceededException) Code() string {
15023	return "ServiceQuotaExceededException"
15024}
15025
15026// Message returns the exception's message.
15027func (s *ServiceQuotaExceededException) Message() string {
15028	if s.Message_ != nil {
15029		return *s.Message_
15030	}
15031	return ""
15032}
15033
15034// OrigErr always returns nil, satisfies awserr.Error interface.
15035func (s *ServiceQuotaExceededException) OrigErr() error {
15036	return nil
15037}
15038
15039func (s *ServiceQuotaExceededException) Error() string {
15040	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
15041}
15042
15043// Status code returns the HTTP status code for the request's response error.
15044func (s *ServiceQuotaExceededException) StatusCode() int {
15045	return s.RespMetadata.StatusCode
15046}
15047
15048// RequestID returns the service's response RequestID for request.
15049func (s *ServiceQuotaExceededException) RequestID() string {
15050	return s.RespMetadata.RequestID
15051}
15052
15053// Information about a shot detection segment detected in a video. For more
15054// information, see SegmentDetection.
15055type ShotSegment struct {
15056	_ struct{} `type:"structure"`
15057
15058	// The confidence that Amazon Rekognition Video has in the accuracy of the detected
15059	// segment.
15060	Confidence *float64 `min:"50" type:"float"`
15061
15062	// An Identifier for a shot detection segment detected in a video.
15063	Index *int64 `type:"long"`
15064}
15065
15066// String returns the string representation
15067func (s ShotSegment) String() string {
15068	return awsutil.Prettify(s)
15069}
15070
15071// GoString returns the string representation
15072func (s ShotSegment) GoString() string {
15073	return s.String()
15074}
15075
15076// SetConfidence sets the Confidence field's value.
15077func (s *ShotSegment) SetConfidence(v float64) *ShotSegment {
15078	s.Confidence = &v
15079	return s
15080}
15081
15082// SetIndex sets the Index field's value.
15083func (s *ShotSegment) SetIndex(v int64) *ShotSegment {
15084	s.Index = &v
15085	return s
15086}
15087
15088// Indicates whether or not the face is smiling, and the confidence level in
15089// the determination.
15090type Smile struct {
15091	_ struct{} `type:"structure"`
15092
15093	// Level of confidence in the determination.
15094	Confidence *float64 `type:"float"`
15095
15096	// Boolean value that indicates whether the face is smiling or not.
15097	Value *bool `type:"boolean"`
15098}
15099
15100// String returns the string representation
15101func (s Smile) String() string {
15102	return awsutil.Prettify(s)
15103}
15104
15105// GoString returns the string representation
15106func (s Smile) GoString() string {
15107	return s.String()
15108}
15109
15110// SetConfidence sets the Confidence field's value.
15111func (s *Smile) SetConfidence(v float64) *Smile {
15112	s.Confidence = &v
15113	return s
15114}
15115
15116// SetValue sets the Value field's value.
15117func (s *Smile) SetValue(v bool) *Smile {
15118	s.Value = &v
15119	return s
15120}
15121
15122type StartCelebrityRecognitionInput struct {
15123	_ struct{} `type:"structure"`
15124
15125	// Idempotent token used to identify the start request. If you use the same
15126	// token with multiple StartCelebrityRecognition requests, the same JobId is
15127	// returned. Use ClientRequestToken to prevent the same job from being accidently
15128	// started more than once.
15129	ClientRequestToken *string `min:"1" type:"string"`
15130
15131	// An identifier you specify that's returned in the completion notification
15132	// that's published to your Amazon Simple Notification Service topic. For example,
15133	// you can use JobTag to group related jobs and identify them in the completion
15134	// notification.
15135	JobTag *string `min:"1" type:"string"`
15136
15137	// The Amazon SNS topic ARN that you want Amazon Rekognition Video to publish
15138	// the completion status of the celebrity recognition analysis to.
15139	NotificationChannel *NotificationChannel `type:"structure"`
15140
15141	// The video in which you want to recognize celebrities. The video must be stored
15142	// in an Amazon S3 bucket.
15143	//
15144	// Video is a required field
15145	Video *Video `type:"structure" required:"true"`
15146}
15147
15148// String returns the string representation
15149func (s StartCelebrityRecognitionInput) String() string {
15150	return awsutil.Prettify(s)
15151}
15152
15153// GoString returns the string representation
15154func (s StartCelebrityRecognitionInput) GoString() string {
15155	return s.String()
15156}
15157
15158// Validate inspects the fields of the type to determine if they are valid.
15159func (s *StartCelebrityRecognitionInput) Validate() error {
15160	invalidParams := request.ErrInvalidParams{Context: "StartCelebrityRecognitionInput"}
15161	if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 {
15162		invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1))
15163	}
15164	if s.JobTag != nil && len(*s.JobTag) < 1 {
15165		invalidParams.Add(request.NewErrParamMinLen("JobTag", 1))
15166	}
15167	if s.Video == nil {
15168		invalidParams.Add(request.NewErrParamRequired("Video"))
15169	}
15170	if s.NotificationChannel != nil {
15171		if err := s.NotificationChannel.Validate(); err != nil {
15172			invalidParams.AddNested("NotificationChannel", err.(request.ErrInvalidParams))
15173		}
15174	}
15175	if s.Video != nil {
15176		if err := s.Video.Validate(); err != nil {
15177			invalidParams.AddNested("Video", err.(request.ErrInvalidParams))
15178		}
15179	}
15180
15181	if invalidParams.Len() > 0 {
15182		return invalidParams
15183	}
15184	return nil
15185}
15186
15187// SetClientRequestToken sets the ClientRequestToken field's value.
15188func (s *StartCelebrityRecognitionInput) SetClientRequestToken(v string) *StartCelebrityRecognitionInput {
15189	s.ClientRequestToken = &v
15190	return s
15191}
15192
15193// SetJobTag sets the JobTag field's value.
15194func (s *StartCelebrityRecognitionInput) SetJobTag(v string) *StartCelebrityRecognitionInput {
15195	s.JobTag = &v
15196	return s
15197}
15198
15199// SetNotificationChannel sets the NotificationChannel field's value.
15200func (s *StartCelebrityRecognitionInput) SetNotificationChannel(v *NotificationChannel) *StartCelebrityRecognitionInput {
15201	s.NotificationChannel = v
15202	return s
15203}
15204
15205// SetVideo sets the Video field's value.
15206func (s *StartCelebrityRecognitionInput) SetVideo(v *Video) *StartCelebrityRecognitionInput {
15207	s.Video = v
15208	return s
15209}
15210
15211type StartCelebrityRecognitionOutput struct {
15212	_ struct{} `type:"structure"`
15213
15214	// The identifier for the celebrity recognition analysis job. Use JobId to identify
15215	// the job in a subsequent call to GetCelebrityRecognition.
15216	JobId *string `min:"1" type:"string"`
15217}
15218
15219// String returns the string representation
15220func (s StartCelebrityRecognitionOutput) String() string {
15221	return awsutil.Prettify(s)
15222}
15223
15224// GoString returns the string representation
15225func (s StartCelebrityRecognitionOutput) GoString() string {
15226	return s.String()
15227}
15228
15229// SetJobId sets the JobId field's value.
15230func (s *StartCelebrityRecognitionOutput) SetJobId(v string) *StartCelebrityRecognitionOutput {
15231	s.JobId = &v
15232	return s
15233}
15234
15235type StartContentModerationInput struct {
15236	_ struct{} `type:"structure"`
15237
15238	// Idempotent token used to identify the start request. If you use the same
15239	// token with multiple StartContentModeration requests, the same JobId is returned.
15240	// Use ClientRequestToken to prevent the same job from being accidently started
15241	// more than once.
15242	ClientRequestToken *string `min:"1" type:"string"`
15243
15244	// An identifier you specify that's returned in the completion notification
15245	// that's published to your Amazon Simple Notification Service topic. For example,
15246	// you can use JobTag to group related jobs and identify them in the completion
15247	// notification.
15248	JobTag *string `min:"1" type:"string"`
15249
15250	// Specifies the minimum confidence that Amazon Rekognition must have in order
15251	// to return a moderated content label. Confidence represents how certain Amazon
15252	// Rekognition is that the moderated content is correctly identified. 0 is the
15253	// lowest confidence. 100 is the highest confidence. Amazon Rekognition doesn't
15254	// return any moderated content labels with a confidence level lower than this
15255	// specified value. If you don't specify MinConfidence, GetContentModeration
15256	// returns labels with confidence values greater than or equal to 50 percent.
15257	MinConfidence *float64 `type:"float"`
15258
15259	// The Amazon SNS topic ARN that you want Amazon Rekognition Video to publish
15260	// the completion status of the unsafe content analysis to.
15261	NotificationChannel *NotificationChannel `type:"structure"`
15262
15263	// The video in which you want to detect unsafe content. The video must be stored
15264	// in an Amazon S3 bucket.
15265	//
15266	// Video is a required field
15267	Video *Video `type:"structure" required:"true"`
15268}
15269
15270// String returns the string representation
15271func (s StartContentModerationInput) String() string {
15272	return awsutil.Prettify(s)
15273}
15274
15275// GoString returns the string representation
15276func (s StartContentModerationInput) GoString() string {
15277	return s.String()
15278}
15279
15280// Validate inspects the fields of the type to determine if they are valid.
15281func (s *StartContentModerationInput) Validate() error {
15282	invalidParams := request.ErrInvalidParams{Context: "StartContentModerationInput"}
15283	if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 {
15284		invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1))
15285	}
15286	if s.JobTag != nil && len(*s.JobTag) < 1 {
15287		invalidParams.Add(request.NewErrParamMinLen("JobTag", 1))
15288	}
15289	if s.Video == nil {
15290		invalidParams.Add(request.NewErrParamRequired("Video"))
15291	}
15292	if s.NotificationChannel != nil {
15293		if err := s.NotificationChannel.Validate(); err != nil {
15294			invalidParams.AddNested("NotificationChannel", err.(request.ErrInvalidParams))
15295		}
15296	}
15297	if s.Video != nil {
15298		if err := s.Video.Validate(); err != nil {
15299			invalidParams.AddNested("Video", err.(request.ErrInvalidParams))
15300		}
15301	}
15302
15303	if invalidParams.Len() > 0 {
15304		return invalidParams
15305	}
15306	return nil
15307}
15308
15309// SetClientRequestToken sets the ClientRequestToken field's value.
15310func (s *StartContentModerationInput) SetClientRequestToken(v string) *StartContentModerationInput {
15311	s.ClientRequestToken = &v
15312	return s
15313}
15314
15315// SetJobTag sets the JobTag field's value.
15316func (s *StartContentModerationInput) SetJobTag(v string) *StartContentModerationInput {
15317	s.JobTag = &v
15318	return s
15319}
15320
15321// SetMinConfidence sets the MinConfidence field's value.
15322func (s *StartContentModerationInput) SetMinConfidence(v float64) *StartContentModerationInput {
15323	s.MinConfidence = &v
15324	return s
15325}
15326
15327// SetNotificationChannel sets the NotificationChannel field's value.
15328func (s *StartContentModerationInput) SetNotificationChannel(v *NotificationChannel) *StartContentModerationInput {
15329	s.NotificationChannel = v
15330	return s
15331}
15332
15333// SetVideo sets the Video field's value.
15334func (s *StartContentModerationInput) SetVideo(v *Video) *StartContentModerationInput {
15335	s.Video = v
15336	return s
15337}
15338
15339type StartContentModerationOutput struct {
15340	_ struct{} `type:"structure"`
15341
15342	// The identifier for the unsafe content analysis job. Use JobId to identify
15343	// the job in a subsequent call to GetContentModeration.
15344	JobId *string `min:"1" type:"string"`
15345}
15346
15347// String returns the string representation
15348func (s StartContentModerationOutput) String() string {
15349	return awsutil.Prettify(s)
15350}
15351
15352// GoString returns the string representation
15353func (s StartContentModerationOutput) GoString() string {
15354	return s.String()
15355}
15356
15357// SetJobId sets the JobId field's value.
15358func (s *StartContentModerationOutput) SetJobId(v string) *StartContentModerationOutput {
15359	s.JobId = &v
15360	return s
15361}
15362
15363type StartFaceDetectionInput struct {
15364	_ struct{} `type:"structure"`
15365
15366	// Idempotent token used to identify the start request. If you use the same
15367	// token with multiple StartFaceDetection requests, the same JobId is returned.
15368	// Use ClientRequestToken to prevent the same job from being accidently started
15369	// more than once.
15370	ClientRequestToken *string `min:"1" type:"string"`
15371
15372	// The face attributes you want returned.
15373	//
15374	// DEFAULT - The following subset of facial attributes are returned: BoundingBox,
15375	// Confidence, Pose, Quality and Landmarks.
15376	//
15377	// ALL - All facial attributes are returned.
15378	FaceAttributes *string `type:"string" enum:"FaceAttributes"`
15379
15380	// An identifier you specify that's returned in the completion notification
15381	// that's published to your Amazon Simple Notification Service topic. For example,
15382	// you can use JobTag to group related jobs and identify them in the completion
15383	// notification.
15384	JobTag *string `min:"1" type:"string"`
15385
15386	// The ARN of the Amazon SNS topic to which you want Amazon Rekognition Video
15387	// to publish the completion status of the face detection operation.
15388	NotificationChannel *NotificationChannel `type:"structure"`
15389
15390	// The video in which you want to detect faces. The video must be stored in
15391	// an Amazon S3 bucket.
15392	//
15393	// Video is a required field
15394	Video *Video `type:"structure" required:"true"`
15395}
15396
15397// String returns the string representation
15398func (s StartFaceDetectionInput) String() string {
15399	return awsutil.Prettify(s)
15400}
15401
15402// GoString returns the string representation
15403func (s StartFaceDetectionInput) GoString() string {
15404	return s.String()
15405}
15406
15407// Validate inspects the fields of the type to determine if they are valid.
15408func (s *StartFaceDetectionInput) Validate() error {
15409	invalidParams := request.ErrInvalidParams{Context: "StartFaceDetectionInput"}
15410	if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 {
15411		invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1))
15412	}
15413	if s.JobTag != nil && len(*s.JobTag) < 1 {
15414		invalidParams.Add(request.NewErrParamMinLen("JobTag", 1))
15415	}
15416	if s.Video == nil {
15417		invalidParams.Add(request.NewErrParamRequired("Video"))
15418	}
15419	if s.NotificationChannel != nil {
15420		if err := s.NotificationChannel.Validate(); err != nil {
15421			invalidParams.AddNested("NotificationChannel", err.(request.ErrInvalidParams))
15422		}
15423	}
15424	if s.Video != nil {
15425		if err := s.Video.Validate(); err != nil {
15426			invalidParams.AddNested("Video", err.(request.ErrInvalidParams))
15427		}
15428	}
15429
15430	if invalidParams.Len() > 0 {
15431		return invalidParams
15432	}
15433	return nil
15434}
15435
15436// SetClientRequestToken sets the ClientRequestToken field's value.
15437func (s *StartFaceDetectionInput) SetClientRequestToken(v string) *StartFaceDetectionInput {
15438	s.ClientRequestToken = &v
15439	return s
15440}
15441
15442// SetFaceAttributes sets the FaceAttributes field's value.
15443func (s *StartFaceDetectionInput) SetFaceAttributes(v string) *StartFaceDetectionInput {
15444	s.FaceAttributes = &v
15445	return s
15446}
15447
15448// SetJobTag sets the JobTag field's value.
15449func (s *StartFaceDetectionInput) SetJobTag(v string) *StartFaceDetectionInput {
15450	s.JobTag = &v
15451	return s
15452}
15453
15454// SetNotificationChannel sets the NotificationChannel field's value.
15455func (s *StartFaceDetectionInput) SetNotificationChannel(v *NotificationChannel) *StartFaceDetectionInput {
15456	s.NotificationChannel = v
15457	return s
15458}
15459
15460// SetVideo sets the Video field's value.
15461func (s *StartFaceDetectionInput) SetVideo(v *Video) *StartFaceDetectionInput {
15462	s.Video = v
15463	return s
15464}
15465
15466type StartFaceDetectionOutput struct {
15467	_ struct{} `type:"structure"`
15468
15469	// The identifier for the face detection job. Use JobId to identify the job
15470	// in a subsequent call to GetFaceDetection.
15471	JobId *string `min:"1" type:"string"`
15472}
15473
15474// String returns the string representation
15475func (s StartFaceDetectionOutput) String() string {
15476	return awsutil.Prettify(s)
15477}
15478
15479// GoString returns the string representation
15480func (s StartFaceDetectionOutput) GoString() string {
15481	return s.String()
15482}
15483
15484// SetJobId sets the JobId field's value.
15485func (s *StartFaceDetectionOutput) SetJobId(v string) *StartFaceDetectionOutput {
15486	s.JobId = &v
15487	return s
15488}
15489
15490type StartFaceSearchInput struct {
15491	_ struct{} `type:"structure"`
15492
15493	// Idempotent token used to identify the start request. If you use the same
15494	// token with multiple StartFaceSearch requests, the same JobId is returned.
15495	// Use ClientRequestToken to prevent the same job from being accidently started
15496	// more than once.
15497	ClientRequestToken *string `min:"1" type:"string"`
15498
15499	// ID of the collection that contains the faces you want to search for.
15500	//
15501	// CollectionId is a required field
15502	CollectionId *string `min:"1" type:"string" required:"true"`
15503
15504	// The minimum confidence in the person match to return. For example, don't
15505	// return any matches where confidence in matches is less than 70%. The default
15506	// value is 80%.
15507	FaceMatchThreshold *float64 `type:"float"`
15508
15509	// An identifier you specify that's returned in the completion notification
15510	// that's published to your Amazon Simple Notification Service topic. For example,
15511	// you can use JobTag to group related jobs and identify them in the completion
15512	// notification.
15513	JobTag *string `min:"1" type:"string"`
15514
15515	// The ARN of the Amazon SNS topic to which you want Amazon Rekognition Video
15516	// to publish the completion status of the search.
15517	NotificationChannel *NotificationChannel `type:"structure"`
15518
15519	// The video you want to search. The video must be stored in an Amazon S3 bucket.
15520	//
15521	// Video is a required field
15522	Video *Video `type:"structure" required:"true"`
15523}
15524
15525// String returns the string representation
15526func (s StartFaceSearchInput) String() string {
15527	return awsutil.Prettify(s)
15528}
15529
15530// GoString returns the string representation
15531func (s StartFaceSearchInput) GoString() string {
15532	return s.String()
15533}
15534
15535// Validate inspects the fields of the type to determine if they are valid.
15536func (s *StartFaceSearchInput) Validate() error {
15537	invalidParams := request.ErrInvalidParams{Context: "StartFaceSearchInput"}
15538	if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 {
15539		invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1))
15540	}
15541	if s.CollectionId == nil {
15542		invalidParams.Add(request.NewErrParamRequired("CollectionId"))
15543	}
15544	if s.CollectionId != nil && len(*s.CollectionId) < 1 {
15545		invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1))
15546	}
15547	if s.JobTag != nil && len(*s.JobTag) < 1 {
15548		invalidParams.Add(request.NewErrParamMinLen("JobTag", 1))
15549	}
15550	if s.Video == nil {
15551		invalidParams.Add(request.NewErrParamRequired("Video"))
15552	}
15553	if s.NotificationChannel != nil {
15554		if err := s.NotificationChannel.Validate(); err != nil {
15555			invalidParams.AddNested("NotificationChannel", err.(request.ErrInvalidParams))
15556		}
15557	}
15558	if s.Video != nil {
15559		if err := s.Video.Validate(); err != nil {
15560			invalidParams.AddNested("Video", err.(request.ErrInvalidParams))
15561		}
15562	}
15563
15564	if invalidParams.Len() > 0 {
15565		return invalidParams
15566	}
15567	return nil
15568}
15569
15570// SetClientRequestToken sets the ClientRequestToken field's value.
15571func (s *StartFaceSearchInput) SetClientRequestToken(v string) *StartFaceSearchInput {
15572	s.ClientRequestToken = &v
15573	return s
15574}
15575
15576// SetCollectionId sets the CollectionId field's value.
15577func (s *StartFaceSearchInput) SetCollectionId(v string) *StartFaceSearchInput {
15578	s.CollectionId = &v
15579	return s
15580}
15581
15582// SetFaceMatchThreshold sets the FaceMatchThreshold field's value.
15583func (s *StartFaceSearchInput) SetFaceMatchThreshold(v float64) *StartFaceSearchInput {
15584	s.FaceMatchThreshold = &v
15585	return s
15586}
15587
15588// SetJobTag sets the JobTag field's value.
15589func (s *StartFaceSearchInput) SetJobTag(v string) *StartFaceSearchInput {
15590	s.JobTag = &v
15591	return s
15592}
15593
15594// SetNotificationChannel sets the NotificationChannel field's value.
15595func (s *StartFaceSearchInput) SetNotificationChannel(v *NotificationChannel) *StartFaceSearchInput {
15596	s.NotificationChannel = v
15597	return s
15598}
15599
15600// SetVideo sets the Video field's value.
15601func (s *StartFaceSearchInput) SetVideo(v *Video) *StartFaceSearchInput {
15602	s.Video = v
15603	return s
15604}
15605
15606type StartFaceSearchOutput struct {
15607	_ struct{} `type:"structure"`
15608
15609	// The identifier for the search job. Use JobId to identify the job in a subsequent
15610	// call to GetFaceSearch.
15611	JobId *string `min:"1" type:"string"`
15612}
15613
15614// String returns the string representation
15615func (s StartFaceSearchOutput) String() string {
15616	return awsutil.Prettify(s)
15617}
15618
15619// GoString returns the string representation
15620func (s StartFaceSearchOutput) GoString() string {
15621	return s.String()
15622}
15623
15624// SetJobId sets the JobId field's value.
15625func (s *StartFaceSearchOutput) SetJobId(v string) *StartFaceSearchOutput {
15626	s.JobId = &v
15627	return s
15628}
15629
15630type StartLabelDetectionInput struct {
15631	_ struct{} `type:"structure"`
15632
15633	// Idempotent token used to identify the start request. If you use the same
15634	// token with multiple StartLabelDetection requests, the same JobId is returned.
15635	// Use ClientRequestToken to prevent the same job from being accidently started
15636	// more than once.
15637	ClientRequestToken *string `min:"1" type:"string"`
15638
15639	// An identifier you specify that's returned in the completion notification
15640	// that's published to your Amazon Simple Notification Service topic. For example,
15641	// you can use JobTag to group related jobs and identify them in the completion
15642	// notification.
15643	JobTag *string `min:"1" type:"string"`
15644
15645	// Specifies the minimum confidence that Amazon Rekognition Video must have
15646	// in order to return a detected label. Confidence represents how certain Amazon
15647	// Rekognition is that a label is correctly identified.0 is the lowest confidence.
15648	// 100 is the highest confidence. Amazon Rekognition Video doesn't return any
15649	// labels with a confidence level lower than this specified value.
15650	//
15651	// If you don't specify MinConfidence, the operation returns labels with confidence
15652	// values greater than or equal to 50 percent.
15653	MinConfidence *float64 `type:"float"`
15654
15655	// The Amazon SNS topic ARN you want Amazon Rekognition Video to publish the
15656	// completion status of the label detection operation to.
15657	NotificationChannel *NotificationChannel `type:"structure"`
15658
15659	// The video in which you want to detect labels. The video must be stored in
15660	// an Amazon S3 bucket.
15661	//
15662	// Video is a required field
15663	Video *Video `type:"structure" required:"true"`
15664}
15665
15666// String returns the string representation
15667func (s StartLabelDetectionInput) String() string {
15668	return awsutil.Prettify(s)
15669}
15670
15671// GoString returns the string representation
15672func (s StartLabelDetectionInput) GoString() string {
15673	return s.String()
15674}
15675
15676// Validate inspects the fields of the type to determine if they are valid.
15677func (s *StartLabelDetectionInput) Validate() error {
15678	invalidParams := request.ErrInvalidParams{Context: "StartLabelDetectionInput"}
15679	if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 {
15680		invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1))
15681	}
15682	if s.JobTag != nil && len(*s.JobTag) < 1 {
15683		invalidParams.Add(request.NewErrParamMinLen("JobTag", 1))
15684	}
15685	if s.Video == nil {
15686		invalidParams.Add(request.NewErrParamRequired("Video"))
15687	}
15688	if s.NotificationChannel != nil {
15689		if err := s.NotificationChannel.Validate(); err != nil {
15690			invalidParams.AddNested("NotificationChannel", err.(request.ErrInvalidParams))
15691		}
15692	}
15693	if s.Video != nil {
15694		if err := s.Video.Validate(); err != nil {
15695			invalidParams.AddNested("Video", err.(request.ErrInvalidParams))
15696		}
15697	}
15698
15699	if invalidParams.Len() > 0 {
15700		return invalidParams
15701	}
15702	return nil
15703}
15704
15705// SetClientRequestToken sets the ClientRequestToken field's value.
15706func (s *StartLabelDetectionInput) SetClientRequestToken(v string) *StartLabelDetectionInput {
15707	s.ClientRequestToken = &v
15708	return s
15709}
15710
15711// SetJobTag sets the JobTag field's value.
15712func (s *StartLabelDetectionInput) SetJobTag(v string) *StartLabelDetectionInput {
15713	s.JobTag = &v
15714	return s
15715}
15716
15717// SetMinConfidence sets the MinConfidence field's value.
15718func (s *StartLabelDetectionInput) SetMinConfidence(v float64) *StartLabelDetectionInput {
15719	s.MinConfidence = &v
15720	return s
15721}
15722
15723// SetNotificationChannel sets the NotificationChannel field's value.
15724func (s *StartLabelDetectionInput) SetNotificationChannel(v *NotificationChannel) *StartLabelDetectionInput {
15725	s.NotificationChannel = v
15726	return s
15727}
15728
15729// SetVideo sets the Video field's value.
15730func (s *StartLabelDetectionInput) SetVideo(v *Video) *StartLabelDetectionInput {
15731	s.Video = v
15732	return s
15733}
15734
15735type StartLabelDetectionOutput struct {
15736	_ struct{} `type:"structure"`
15737
15738	// The identifier for the label detection job. Use JobId to identify the job
15739	// in a subsequent call to GetLabelDetection.
15740	JobId *string `min:"1" type:"string"`
15741}
15742
15743// String returns the string representation
15744func (s StartLabelDetectionOutput) String() string {
15745	return awsutil.Prettify(s)
15746}
15747
15748// GoString returns the string representation
15749func (s StartLabelDetectionOutput) GoString() string {
15750	return s.String()
15751}
15752
15753// SetJobId sets the JobId field's value.
15754func (s *StartLabelDetectionOutput) SetJobId(v string) *StartLabelDetectionOutput {
15755	s.JobId = &v
15756	return s
15757}
15758
15759type StartPersonTrackingInput struct {
15760	_ struct{} `type:"structure"`
15761
15762	// Idempotent token used to identify the start request. If you use the same
15763	// token with multiple StartPersonTracking requests, the same JobId is returned.
15764	// Use ClientRequestToken to prevent the same job from being accidently started
15765	// more than once.
15766	ClientRequestToken *string `min:"1" type:"string"`
15767
15768	// An identifier you specify that's returned in the completion notification
15769	// that's published to your Amazon Simple Notification Service topic. For example,
15770	// you can use JobTag to group related jobs and identify them in the completion
15771	// notification.
15772	JobTag *string `min:"1" type:"string"`
15773
15774	// The Amazon SNS topic ARN you want Amazon Rekognition Video to publish the
15775	// completion status of the people detection operation to.
15776	NotificationChannel *NotificationChannel `type:"structure"`
15777
15778	// The video in which you want to detect people. The video must be stored in
15779	// an Amazon S3 bucket.
15780	//
15781	// Video is a required field
15782	Video *Video `type:"structure" required:"true"`
15783}
15784
15785// String returns the string representation
15786func (s StartPersonTrackingInput) String() string {
15787	return awsutil.Prettify(s)
15788}
15789
15790// GoString returns the string representation
15791func (s StartPersonTrackingInput) GoString() string {
15792	return s.String()
15793}
15794
15795// Validate inspects the fields of the type to determine if they are valid.
15796func (s *StartPersonTrackingInput) Validate() error {
15797	invalidParams := request.ErrInvalidParams{Context: "StartPersonTrackingInput"}
15798	if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 {
15799		invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1))
15800	}
15801	if s.JobTag != nil && len(*s.JobTag) < 1 {
15802		invalidParams.Add(request.NewErrParamMinLen("JobTag", 1))
15803	}
15804	if s.Video == nil {
15805		invalidParams.Add(request.NewErrParamRequired("Video"))
15806	}
15807	if s.NotificationChannel != nil {
15808		if err := s.NotificationChannel.Validate(); err != nil {
15809			invalidParams.AddNested("NotificationChannel", err.(request.ErrInvalidParams))
15810		}
15811	}
15812	if s.Video != nil {
15813		if err := s.Video.Validate(); err != nil {
15814			invalidParams.AddNested("Video", err.(request.ErrInvalidParams))
15815		}
15816	}
15817
15818	if invalidParams.Len() > 0 {
15819		return invalidParams
15820	}
15821	return nil
15822}
15823
15824// SetClientRequestToken sets the ClientRequestToken field's value.
15825func (s *StartPersonTrackingInput) SetClientRequestToken(v string) *StartPersonTrackingInput {
15826	s.ClientRequestToken = &v
15827	return s
15828}
15829
15830// SetJobTag sets the JobTag field's value.
15831func (s *StartPersonTrackingInput) SetJobTag(v string) *StartPersonTrackingInput {
15832	s.JobTag = &v
15833	return s
15834}
15835
15836// SetNotificationChannel sets the NotificationChannel field's value.
15837func (s *StartPersonTrackingInput) SetNotificationChannel(v *NotificationChannel) *StartPersonTrackingInput {
15838	s.NotificationChannel = v
15839	return s
15840}
15841
15842// SetVideo sets the Video field's value.
15843func (s *StartPersonTrackingInput) SetVideo(v *Video) *StartPersonTrackingInput {
15844	s.Video = v
15845	return s
15846}
15847
15848type StartPersonTrackingOutput struct {
15849	_ struct{} `type:"structure"`
15850
15851	// The identifier for the person detection job. Use JobId to identify the job
15852	// in a subsequent call to GetPersonTracking.
15853	JobId *string `min:"1" type:"string"`
15854}
15855
15856// String returns the string representation
15857func (s StartPersonTrackingOutput) String() string {
15858	return awsutil.Prettify(s)
15859}
15860
15861// GoString returns the string representation
15862func (s StartPersonTrackingOutput) GoString() string {
15863	return s.String()
15864}
15865
15866// SetJobId sets the JobId field's value.
15867func (s *StartPersonTrackingOutput) SetJobId(v string) *StartPersonTrackingOutput {
15868	s.JobId = &v
15869	return s
15870}
15871
15872type StartProjectVersionInput struct {
15873	_ struct{} `type:"structure"`
15874
15875	// The minimum number of inference units to use. A single inference unit represents
15876	// 1 hour of processing and can support up to 5 Transaction Pers Second (TPS).
15877	// Use a higher number to increase the TPS throughput of your model. You are
15878	// charged for the number of inference units that you use.
15879	//
15880	// MinInferenceUnits is a required field
15881	MinInferenceUnits *int64 `min:"1" type:"integer" required:"true"`
15882
15883	// The Amazon Resource Name(ARN) of the model version that you want to start.
15884	//
15885	// ProjectVersionArn is a required field
15886	ProjectVersionArn *string `min:"20" type:"string" required:"true"`
15887}
15888
15889// String returns the string representation
15890func (s StartProjectVersionInput) String() string {
15891	return awsutil.Prettify(s)
15892}
15893
15894// GoString returns the string representation
15895func (s StartProjectVersionInput) GoString() string {
15896	return s.String()
15897}
15898
15899// Validate inspects the fields of the type to determine if they are valid.
15900func (s *StartProjectVersionInput) Validate() error {
15901	invalidParams := request.ErrInvalidParams{Context: "StartProjectVersionInput"}
15902	if s.MinInferenceUnits == nil {
15903		invalidParams.Add(request.NewErrParamRequired("MinInferenceUnits"))
15904	}
15905	if s.MinInferenceUnits != nil && *s.MinInferenceUnits < 1 {
15906		invalidParams.Add(request.NewErrParamMinValue("MinInferenceUnits", 1))
15907	}
15908	if s.ProjectVersionArn == nil {
15909		invalidParams.Add(request.NewErrParamRequired("ProjectVersionArn"))
15910	}
15911	if s.ProjectVersionArn != nil && len(*s.ProjectVersionArn) < 20 {
15912		invalidParams.Add(request.NewErrParamMinLen("ProjectVersionArn", 20))
15913	}
15914
15915	if invalidParams.Len() > 0 {
15916		return invalidParams
15917	}
15918	return nil
15919}
15920
15921// SetMinInferenceUnits sets the MinInferenceUnits field's value.
15922func (s *StartProjectVersionInput) SetMinInferenceUnits(v int64) *StartProjectVersionInput {
15923	s.MinInferenceUnits = &v
15924	return s
15925}
15926
15927// SetProjectVersionArn sets the ProjectVersionArn field's value.
15928func (s *StartProjectVersionInput) SetProjectVersionArn(v string) *StartProjectVersionInput {
15929	s.ProjectVersionArn = &v
15930	return s
15931}
15932
15933type StartProjectVersionOutput struct {
15934	_ struct{} `type:"structure"`
15935
15936	// The current running status of the model.
15937	Status *string `type:"string" enum:"ProjectVersionStatus"`
15938}
15939
15940// String returns the string representation
15941func (s StartProjectVersionOutput) String() string {
15942	return awsutil.Prettify(s)
15943}
15944
15945// GoString returns the string representation
15946func (s StartProjectVersionOutput) GoString() string {
15947	return s.String()
15948}
15949
15950// SetStatus sets the Status field's value.
15951func (s *StartProjectVersionOutput) SetStatus(v string) *StartProjectVersionOutput {
15952	s.Status = &v
15953	return s
15954}
15955
15956// Filters applied to the technical cue or shot detection segments. For more
15957// information, see StartSegmentDetection.
15958type StartSegmentDetectionFilters struct {
15959	_ struct{} `type:"structure"`
15960
15961	// Filters that are specific to shot detections.
15962	ShotFilter *StartShotDetectionFilter `type:"structure"`
15963
15964	// Filters that are specific to technical cues.
15965	TechnicalCueFilter *StartTechnicalCueDetectionFilter `type:"structure"`
15966}
15967
15968// String returns the string representation
15969func (s StartSegmentDetectionFilters) String() string {
15970	return awsutil.Prettify(s)
15971}
15972
15973// GoString returns the string representation
15974func (s StartSegmentDetectionFilters) GoString() string {
15975	return s.String()
15976}
15977
15978// Validate inspects the fields of the type to determine if they are valid.
15979func (s *StartSegmentDetectionFilters) Validate() error {
15980	invalidParams := request.ErrInvalidParams{Context: "StartSegmentDetectionFilters"}
15981	if s.ShotFilter != nil {
15982		if err := s.ShotFilter.Validate(); err != nil {
15983			invalidParams.AddNested("ShotFilter", err.(request.ErrInvalidParams))
15984		}
15985	}
15986	if s.TechnicalCueFilter != nil {
15987		if err := s.TechnicalCueFilter.Validate(); err != nil {
15988			invalidParams.AddNested("TechnicalCueFilter", err.(request.ErrInvalidParams))
15989		}
15990	}
15991
15992	if invalidParams.Len() > 0 {
15993		return invalidParams
15994	}
15995	return nil
15996}
15997
15998// SetShotFilter sets the ShotFilter field's value.
15999func (s *StartSegmentDetectionFilters) SetShotFilter(v *StartShotDetectionFilter) *StartSegmentDetectionFilters {
16000	s.ShotFilter = v
16001	return s
16002}
16003
16004// SetTechnicalCueFilter sets the TechnicalCueFilter field's value.
16005func (s *StartSegmentDetectionFilters) SetTechnicalCueFilter(v *StartTechnicalCueDetectionFilter) *StartSegmentDetectionFilters {
16006	s.TechnicalCueFilter = v
16007	return s
16008}
16009
16010type StartSegmentDetectionInput struct {
16011	_ struct{} `type:"structure"`
16012
16013	// Idempotent token used to identify the start request. If you use the same
16014	// token with multiple StartSegmentDetection requests, the same JobId is returned.
16015	// Use ClientRequestToken to prevent the same job from being accidently started
16016	// more than once.
16017	ClientRequestToken *string `min:"1" type:"string"`
16018
16019	// Filters for technical cue or shot detection.
16020	Filters *StartSegmentDetectionFilters `type:"structure"`
16021
16022	// An identifier you specify that's returned in the completion notification
16023	// that's published to your Amazon Simple Notification Service topic. For example,
16024	// you can use JobTag to group related jobs and identify them in the completion
16025	// notification.
16026	JobTag *string `min:"1" type:"string"`
16027
16028	// The ARN of the Amazon SNS topic to which you want Amazon Rekognition Video
16029	// to publish the completion status of the segment detection operation.
16030	NotificationChannel *NotificationChannel `type:"structure"`
16031
16032	// An array of segment types to detect in the video. Valid values are TECHNICAL_CUE
16033	// and SHOT.
16034	//
16035	// SegmentTypes is a required field
16036	SegmentTypes []*string `min:"1" type:"list" required:"true"`
16037
16038	// Video file stored in an Amazon S3 bucket. Amazon Rekognition video start
16039	// operations such as StartLabelDetection use Video to specify a video for analysis.
16040	// The supported file formats are .mp4, .mov and .avi.
16041	//
16042	// Video is a required field
16043	Video *Video `type:"structure" required:"true"`
16044}
16045
16046// String returns the string representation
16047func (s StartSegmentDetectionInput) String() string {
16048	return awsutil.Prettify(s)
16049}
16050
16051// GoString returns the string representation
16052func (s StartSegmentDetectionInput) GoString() string {
16053	return s.String()
16054}
16055
16056// Validate inspects the fields of the type to determine if they are valid.
16057func (s *StartSegmentDetectionInput) Validate() error {
16058	invalidParams := request.ErrInvalidParams{Context: "StartSegmentDetectionInput"}
16059	if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 {
16060		invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1))
16061	}
16062	if s.JobTag != nil && len(*s.JobTag) < 1 {
16063		invalidParams.Add(request.NewErrParamMinLen("JobTag", 1))
16064	}
16065	if s.SegmentTypes == nil {
16066		invalidParams.Add(request.NewErrParamRequired("SegmentTypes"))
16067	}
16068	if s.SegmentTypes != nil && len(s.SegmentTypes) < 1 {
16069		invalidParams.Add(request.NewErrParamMinLen("SegmentTypes", 1))
16070	}
16071	if s.Video == nil {
16072		invalidParams.Add(request.NewErrParamRequired("Video"))
16073	}
16074	if s.Filters != nil {
16075		if err := s.Filters.Validate(); err != nil {
16076			invalidParams.AddNested("Filters", err.(request.ErrInvalidParams))
16077		}
16078	}
16079	if s.NotificationChannel != nil {
16080		if err := s.NotificationChannel.Validate(); err != nil {
16081			invalidParams.AddNested("NotificationChannel", err.(request.ErrInvalidParams))
16082		}
16083	}
16084	if s.Video != nil {
16085		if err := s.Video.Validate(); err != nil {
16086			invalidParams.AddNested("Video", err.(request.ErrInvalidParams))
16087		}
16088	}
16089
16090	if invalidParams.Len() > 0 {
16091		return invalidParams
16092	}
16093	return nil
16094}
16095
16096// SetClientRequestToken sets the ClientRequestToken field's value.
16097func (s *StartSegmentDetectionInput) SetClientRequestToken(v string) *StartSegmentDetectionInput {
16098	s.ClientRequestToken = &v
16099	return s
16100}
16101
16102// SetFilters sets the Filters field's value.
16103func (s *StartSegmentDetectionInput) SetFilters(v *StartSegmentDetectionFilters) *StartSegmentDetectionInput {
16104	s.Filters = v
16105	return s
16106}
16107
16108// SetJobTag sets the JobTag field's value.
16109func (s *StartSegmentDetectionInput) SetJobTag(v string) *StartSegmentDetectionInput {
16110	s.JobTag = &v
16111	return s
16112}
16113
16114// SetNotificationChannel sets the NotificationChannel field's value.
16115func (s *StartSegmentDetectionInput) SetNotificationChannel(v *NotificationChannel) *StartSegmentDetectionInput {
16116	s.NotificationChannel = v
16117	return s
16118}
16119
16120// SetSegmentTypes sets the SegmentTypes field's value.
16121func (s *StartSegmentDetectionInput) SetSegmentTypes(v []*string) *StartSegmentDetectionInput {
16122	s.SegmentTypes = v
16123	return s
16124}
16125
16126// SetVideo sets the Video field's value.
16127func (s *StartSegmentDetectionInput) SetVideo(v *Video) *StartSegmentDetectionInput {
16128	s.Video = v
16129	return s
16130}
16131
16132type StartSegmentDetectionOutput struct {
16133	_ struct{} `type:"structure"`
16134
16135	// Unique identifier for the segment detection job. The JobId is returned from
16136	// StartSegmentDetection.
16137	JobId *string `min:"1" type:"string"`
16138}
16139
16140// String returns the string representation
16141func (s StartSegmentDetectionOutput) String() string {
16142	return awsutil.Prettify(s)
16143}
16144
16145// GoString returns the string representation
16146func (s StartSegmentDetectionOutput) GoString() string {
16147	return s.String()
16148}
16149
16150// SetJobId sets the JobId field's value.
16151func (s *StartSegmentDetectionOutput) SetJobId(v string) *StartSegmentDetectionOutput {
16152	s.JobId = &v
16153	return s
16154}
16155
16156// Filters for the shot detection segments returned by GetSegmentDetection.
16157// For more information, see StartSegmentDetectionFilters.
16158type StartShotDetectionFilter struct {
16159	_ struct{} `type:"structure"`
16160
16161	// Specifies the minimum confidence that Amazon Rekognition Video must have
16162	// in order to return a detected segment. Confidence represents how certain
16163	// Amazon Rekognition is that a segment is correctly identified. 0 is the lowest
16164	// confidence. 100 is the highest confidence. Amazon Rekognition Video doesn't
16165	// return any segments with a confidence level lower than this specified value.
16166	//
16167	// If you don't specify MinSegmentConfidence, the GetSegmentDetection returns
16168	// segments with confidence values greater than or equal to 50 percent.
16169	MinSegmentConfidence *float64 `min:"50" type:"float"`
16170}
16171
16172// String returns the string representation
16173func (s StartShotDetectionFilter) String() string {
16174	return awsutil.Prettify(s)
16175}
16176
16177// GoString returns the string representation
16178func (s StartShotDetectionFilter) GoString() string {
16179	return s.String()
16180}
16181
16182// Validate inspects the fields of the type to determine if they are valid.
16183func (s *StartShotDetectionFilter) Validate() error {
16184	invalidParams := request.ErrInvalidParams{Context: "StartShotDetectionFilter"}
16185	if s.MinSegmentConfidence != nil && *s.MinSegmentConfidence < 50 {
16186		invalidParams.Add(request.NewErrParamMinValue("MinSegmentConfidence", 50))
16187	}
16188
16189	if invalidParams.Len() > 0 {
16190		return invalidParams
16191	}
16192	return nil
16193}
16194
16195// SetMinSegmentConfidence sets the MinSegmentConfidence field's value.
16196func (s *StartShotDetectionFilter) SetMinSegmentConfidence(v float64) *StartShotDetectionFilter {
16197	s.MinSegmentConfidence = &v
16198	return s
16199}
16200
16201type StartStreamProcessorInput struct {
16202	_ struct{} `type:"structure"`
16203
16204	// The name of the stream processor to start processing.
16205	//
16206	// Name is a required field
16207	Name *string `min:"1" type:"string" required:"true"`
16208}
16209
16210// String returns the string representation
16211func (s StartStreamProcessorInput) String() string {
16212	return awsutil.Prettify(s)
16213}
16214
16215// GoString returns the string representation
16216func (s StartStreamProcessorInput) GoString() string {
16217	return s.String()
16218}
16219
16220// Validate inspects the fields of the type to determine if they are valid.
16221func (s *StartStreamProcessorInput) Validate() error {
16222	invalidParams := request.ErrInvalidParams{Context: "StartStreamProcessorInput"}
16223	if s.Name == nil {
16224		invalidParams.Add(request.NewErrParamRequired("Name"))
16225	}
16226	if s.Name != nil && len(*s.Name) < 1 {
16227		invalidParams.Add(request.NewErrParamMinLen("Name", 1))
16228	}
16229
16230	if invalidParams.Len() > 0 {
16231		return invalidParams
16232	}
16233	return nil
16234}
16235
16236// SetName sets the Name field's value.
16237func (s *StartStreamProcessorInput) SetName(v string) *StartStreamProcessorInput {
16238	s.Name = &v
16239	return s
16240}
16241
16242type StartStreamProcessorOutput struct {
16243	_ struct{} `type:"structure"`
16244}
16245
16246// String returns the string representation
16247func (s StartStreamProcessorOutput) String() string {
16248	return awsutil.Prettify(s)
16249}
16250
16251// GoString returns the string representation
16252func (s StartStreamProcessorOutput) GoString() string {
16253	return s.String()
16254}
16255
16256// Filters for the technical segments returned by GetSegmentDetection. For more
16257// information, see StartSegmentDetectionFilters.
16258type StartTechnicalCueDetectionFilter struct {
16259	_ struct{} `type:"structure"`
16260
16261	// Specifies the minimum confidence that Amazon Rekognition Video must have
16262	// in order to return a detected segment. Confidence represents how certain
16263	// Amazon Rekognition is that a segment is correctly identified. 0 is the lowest
16264	// confidence. 100 is the highest confidence. Amazon Rekognition Video doesn't
16265	// return any segments with a confidence level lower than this specified value.
16266	//
16267	// If you don't specify MinSegmentConfidence, GetSegmentDetection returns segments
16268	// with confidence values greater than or equal to 50 percent.
16269	MinSegmentConfidence *float64 `min:"50" type:"float"`
16270}
16271
16272// String returns the string representation
16273func (s StartTechnicalCueDetectionFilter) String() string {
16274	return awsutil.Prettify(s)
16275}
16276
16277// GoString returns the string representation
16278func (s StartTechnicalCueDetectionFilter) GoString() string {
16279	return s.String()
16280}
16281
16282// Validate inspects the fields of the type to determine if they are valid.
16283func (s *StartTechnicalCueDetectionFilter) Validate() error {
16284	invalidParams := request.ErrInvalidParams{Context: "StartTechnicalCueDetectionFilter"}
16285	if s.MinSegmentConfidence != nil && *s.MinSegmentConfidence < 50 {
16286		invalidParams.Add(request.NewErrParamMinValue("MinSegmentConfidence", 50))
16287	}
16288
16289	if invalidParams.Len() > 0 {
16290		return invalidParams
16291	}
16292	return nil
16293}
16294
16295// SetMinSegmentConfidence sets the MinSegmentConfidence field's value.
16296func (s *StartTechnicalCueDetectionFilter) SetMinSegmentConfidence(v float64) *StartTechnicalCueDetectionFilter {
16297	s.MinSegmentConfidence = &v
16298	return s
16299}
16300
16301// Set of optional parameters that let you set the criteria text must meet to
16302// be included in your response. WordFilter looks at a word's height, width
16303// and minimum confidence. RegionOfInterest lets you set a specific region of
16304// the screen to look for text in.
16305type StartTextDetectionFilters struct {
16306	_ struct{} `type:"structure"`
16307
16308	// Filter focusing on a certain area of the frame. Uses a BoundingBox object
16309	// to set the region of the screen.
16310	RegionsOfInterest []*RegionOfInterest `type:"list"`
16311
16312	// Filters focusing on qualities of the text, such as confidence or size.
16313	WordFilter *DetectionFilter `type:"structure"`
16314}
16315
16316// String returns the string representation
16317func (s StartTextDetectionFilters) String() string {
16318	return awsutil.Prettify(s)
16319}
16320
16321// GoString returns the string representation
16322func (s StartTextDetectionFilters) GoString() string {
16323	return s.String()
16324}
16325
16326// SetRegionsOfInterest sets the RegionsOfInterest field's value.
16327func (s *StartTextDetectionFilters) SetRegionsOfInterest(v []*RegionOfInterest) *StartTextDetectionFilters {
16328	s.RegionsOfInterest = v
16329	return s
16330}
16331
16332// SetWordFilter sets the WordFilter field's value.
16333func (s *StartTextDetectionFilters) SetWordFilter(v *DetectionFilter) *StartTextDetectionFilters {
16334	s.WordFilter = v
16335	return s
16336}
16337
16338type StartTextDetectionInput struct {
16339	_ struct{} `type:"structure"`
16340
16341	// Idempotent token used to identify the start request. If you use the same
16342	// token with multiple StartTextDetection requests, the same JobId is returned.
16343	// Use ClientRequestToken to prevent the same job from being accidentaly started
16344	// more than once.
16345	ClientRequestToken *string `min:"1" type:"string"`
16346
16347	// Optional parameters that let you set criteria the text must meet to be included
16348	// in your response.
16349	Filters *StartTextDetectionFilters `type:"structure"`
16350
16351	// An identifier returned in the completion status published by your Amazon
16352	// Simple Notification Service topic. For example, you can use JobTag to group
16353	// related jobs and identify them in the completion notification.
16354	JobTag *string `min:"1" type:"string"`
16355
16356	// The Amazon Simple Notification Service topic to which Amazon Rekognition
16357	// publishes the completion status of a video analysis operation. For more information,
16358	// see api-video.
16359	NotificationChannel *NotificationChannel `type:"structure"`
16360
16361	// Video file stored in an Amazon S3 bucket. Amazon Rekognition video start
16362	// operations such as StartLabelDetection use Video to specify a video for analysis.
16363	// The supported file formats are .mp4, .mov and .avi.
16364	//
16365	// Video is a required field
16366	Video *Video `type:"structure" required:"true"`
16367}
16368
16369// String returns the string representation
16370func (s StartTextDetectionInput) String() string {
16371	return awsutil.Prettify(s)
16372}
16373
16374// GoString returns the string representation
16375func (s StartTextDetectionInput) GoString() string {
16376	return s.String()
16377}
16378
16379// Validate inspects the fields of the type to determine if they are valid.
16380func (s *StartTextDetectionInput) Validate() error {
16381	invalidParams := request.ErrInvalidParams{Context: "StartTextDetectionInput"}
16382	if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 {
16383		invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1))
16384	}
16385	if s.JobTag != nil && len(*s.JobTag) < 1 {
16386		invalidParams.Add(request.NewErrParamMinLen("JobTag", 1))
16387	}
16388	if s.Video == nil {
16389		invalidParams.Add(request.NewErrParamRequired("Video"))
16390	}
16391	if s.NotificationChannel != nil {
16392		if err := s.NotificationChannel.Validate(); err != nil {
16393			invalidParams.AddNested("NotificationChannel", err.(request.ErrInvalidParams))
16394		}
16395	}
16396	if s.Video != nil {
16397		if err := s.Video.Validate(); err != nil {
16398			invalidParams.AddNested("Video", err.(request.ErrInvalidParams))
16399		}
16400	}
16401
16402	if invalidParams.Len() > 0 {
16403		return invalidParams
16404	}
16405	return nil
16406}
16407
16408// SetClientRequestToken sets the ClientRequestToken field's value.
16409func (s *StartTextDetectionInput) SetClientRequestToken(v string) *StartTextDetectionInput {
16410	s.ClientRequestToken = &v
16411	return s
16412}
16413
16414// SetFilters sets the Filters field's value.
16415func (s *StartTextDetectionInput) SetFilters(v *StartTextDetectionFilters) *StartTextDetectionInput {
16416	s.Filters = v
16417	return s
16418}
16419
16420// SetJobTag sets the JobTag field's value.
16421func (s *StartTextDetectionInput) SetJobTag(v string) *StartTextDetectionInput {
16422	s.JobTag = &v
16423	return s
16424}
16425
16426// SetNotificationChannel sets the NotificationChannel field's value.
16427func (s *StartTextDetectionInput) SetNotificationChannel(v *NotificationChannel) *StartTextDetectionInput {
16428	s.NotificationChannel = v
16429	return s
16430}
16431
16432// SetVideo sets the Video field's value.
16433func (s *StartTextDetectionInput) SetVideo(v *Video) *StartTextDetectionInput {
16434	s.Video = v
16435	return s
16436}
16437
16438type StartTextDetectionOutput struct {
16439	_ struct{} `type:"structure"`
16440
16441	// Identifier for the text detection job. Use JobId to identify the job in a
16442	// subsequent call to GetTextDetection.
16443	JobId *string `min:"1" type:"string"`
16444}
16445
16446// String returns the string representation
16447func (s StartTextDetectionOutput) String() string {
16448	return awsutil.Prettify(s)
16449}
16450
16451// GoString returns the string representation
16452func (s StartTextDetectionOutput) GoString() string {
16453	return s.String()
16454}
16455
16456// SetJobId sets the JobId field's value.
16457func (s *StartTextDetectionOutput) SetJobId(v string) *StartTextDetectionOutput {
16458	s.JobId = &v
16459	return s
16460}
16461
16462type StopProjectVersionInput struct {
16463	_ struct{} `type:"structure"`
16464
16465	// The Amazon Resource Name (ARN) of the model version that you want to delete.
16466	//
16467	// This operation requires permissions to perform the rekognition:StopProjectVersion
16468	// action.
16469	//
16470	// ProjectVersionArn is a required field
16471	ProjectVersionArn *string `min:"20" type:"string" required:"true"`
16472}
16473
16474// String returns the string representation
16475func (s StopProjectVersionInput) String() string {
16476	return awsutil.Prettify(s)
16477}
16478
16479// GoString returns the string representation
16480func (s StopProjectVersionInput) GoString() string {
16481	return s.String()
16482}
16483
16484// Validate inspects the fields of the type to determine if they are valid.
16485func (s *StopProjectVersionInput) Validate() error {
16486	invalidParams := request.ErrInvalidParams{Context: "StopProjectVersionInput"}
16487	if s.ProjectVersionArn == nil {
16488		invalidParams.Add(request.NewErrParamRequired("ProjectVersionArn"))
16489	}
16490	if s.ProjectVersionArn != nil && len(*s.ProjectVersionArn) < 20 {
16491		invalidParams.Add(request.NewErrParamMinLen("ProjectVersionArn", 20))
16492	}
16493
16494	if invalidParams.Len() > 0 {
16495		return invalidParams
16496	}
16497	return nil
16498}
16499
16500// SetProjectVersionArn sets the ProjectVersionArn field's value.
16501func (s *StopProjectVersionInput) SetProjectVersionArn(v string) *StopProjectVersionInput {
16502	s.ProjectVersionArn = &v
16503	return s
16504}
16505
16506type StopProjectVersionOutput struct {
16507	_ struct{} `type:"structure"`
16508
16509	// The current status of the stop operation.
16510	Status *string `type:"string" enum:"ProjectVersionStatus"`
16511}
16512
16513// String returns the string representation
16514func (s StopProjectVersionOutput) String() string {
16515	return awsutil.Prettify(s)
16516}
16517
16518// GoString returns the string representation
16519func (s StopProjectVersionOutput) GoString() string {
16520	return s.String()
16521}
16522
16523// SetStatus sets the Status field's value.
16524func (s *StopProjectVersionOutput) SetStatus(v string) *StopProjectVersionOutput {
16525	s.Status = &v
16526	return s
16527}
16528
16529type StopStreamProcessorInput struct {
16530	_ struct{} `type:"structure"`
16531
16532	// The name of a stream processor created by CreateStreamProcessor.
16533	//
16534	// Name is a required field
16535	Name *string `min:"1" type:"string" required:"true"`
16536}
16537
16538// String returns the string representation
16539func (s StopStreamProcessorInput) String() string {
16540	return awsutil.Prettify(s)
16541}
16542
16543// GoString returns the string representation
16544func (s StopStreamProcessorInput) GoString() string {
16545	return s.String()
16546}
16547
16548// Validate inspects the fields of the type to determine if they are valid.
16549func (s *StopStreamProcessorInput) Validate() error {
16550	invalidParams := request.ErrInvalidParams{Context: "StopStreamProcessorInput"}
16551	if s.Name == nil {
16552		invalidParams.Add(request.NewErrParamRequired("Name"))
16553	}
16554	if s.Name != nil && len(*s.Name) < 1 {
16555		invalidParams.Add(request.NewErrParamMinLen("Name", 1))
16556	}
16557
16558	if invalidParams.Len() > 0 {
16559		return invalidParams
16560	}
16561	return nil
16562}
16563
16564// SetName sets the Name field's value.
16565func (s *StopStreamProcessorInput) SetName(v string) *StopStreamProcessorInput {
16566	s.Name = &v
16567	return s
16568}
16569
16570type StopStreamProcessorOutput struct {
16571	_ struct{} `type:"structure"`
16572}
16573
16574// String returns the string representation
16575func (s StopStreamProcessorOutput) String() string {
16576	return awsutil.Prettify(s)
16577}
16578
16579// GoString returns the string representation
16580func (s StopStreamProcessorOutput) GoString() string {
16581	return s.String()
16582}
16583
16584// An object that recognizes faces in a streaming video. An Amazon Rekognition
16585// stream processor is created by a call to CreateStreamProcessor. The request
16586// parameters for CreateStreamProcessor describe the Kinesis video stream source
16587// for the streaming video, face recognition parameters, and where to stream
16588// the analysis resullts.
16589type StreamProcessor struct {
16590	_ struct{} `type:"structure"`
16591
16592	// Name of the Amazon Rekognition stream processor.
16593	Name *string `min:"1" type:"string"`
16594
16595	// Current status of the Amazon Rekognition stream processor.
16596	Status *string `type:"string" enum:"StreamProcessorStatus"`
16597}
16598
16599// String returns the string representation
16600func (s StreamProcessor) String() string {
16601	return awsutil.Prettify(s)
16602}
16603
16604// GoString returns the string representation
16605func (s StreamProcessor) GoString() string {
16606	return s.String()
16607}
16608
16609// SetName sets the Name field's value.
16610func (s *StreamProcessor) SetName(v string) *StreamProcessor {
16611	s.Name = &v
16612	return s
16613}
16614
16615// SetStatus sets the Status field's value.
16616func (s *StreamProcessor) SetStatus(v string) *StreamProcessor {
16617	s.Status = &v
16618	return s
16619}
16620
16621// Information about the source streaming video.
16622type StreamProcessorInput struct {
16623	_ struct{} `type:"structure"`
16624
16625	// The Kinesis video stream input stream for the source streaming video.
16626	KinesisVideoStream *KinesisVideoStream `type:"structure"`
16627}
16628
16629// String returns the string representation
16630func (s StreamProcessorInput) String() string {
16631	return awsutil.Prettify(s)
16632}
16633
16634// GoString returns the string representation
16635func (s StreamProcessorInput) GoString() string {
16636	return s.String()
16637}
16638
16639// SetKinesisVideoStream sets the KinesisVideoStream field's value.
16640func (s *StreamProcessorInput) SetKinesisVideoStream(v *KinesisVideoStream) *StreamProcessorInput {
16641	s.KinesisVideoStream = v
16642	return s
16643}
16644
16645// Information about the Amazon Kinesis Data Streams stream to which a Amazon
16646// Rekognition Video stream processor streams the results of a video analysis.
16647// For more information, see CreateStreamProcessor in the Amazon Rekognition
16648// Developer Guide.
16649type StreamProcessorOutput struct {
16650	_ struct{} `type:"structure"`
16651
16652	// The Amazon Kinesis Data Streams stream to which the Amazon Rekognition stream
16653	// processor streams the analysis results.
16654	KinesisDataStream *KinesisDataStream `type:"structure"`
16655}
16656
16657// String returns the string representation
16658func (s StreamProcessorOutput) String() string {
16659	return awsutil.Prettify(s)
16660}
16661
16662// GoString returns the string representation
16663func (s StreamProcessorOutput) GoString() string {
16664	return s.String()
16665}
16666
16667// SetKinesisDataStream sets the KinesisDataStream field's value.
16668func (s *StreamProcessorOutput) SetKinesisDataStream(v *KinesisDataStream) *StreamProcessorOutput {
16669	s.KinesisDataStream = v
16670	return s
16671}
16672
16673// Input parameters used to recognize faces in a streaming video analyzed by
16674// a Amazon Rekognition stream processor.
16675type StreamProcessorSettings struct {
16676	_ struct{} `type:"structure"`
16677
16678	// Face search settings to use on a streaming video.
16679	FaceSearch *FaceSearchSettings `type:"structure"`
16680}
16681
16682// String returns the string representation
16683func (s StreamProcessorSettings) String() string {
16684	return awsutil.Prettify(s)
16685}
16686
16687// GoString returns the string representation
16688func (s StreamProcessorSettings) GoString() string {
16689	return s.String()
16690}
16691
16692// Validate inspects the fields of the type to determine if they are valid.
16693func (s *StreamProcessorSettings) Validate() error {
16694	invalidParams := request.ErrInvalidParams{Context: "StreamProcessorSettings"}
16695	if s.FaceSearch != nil {
16696		if err := s.FaceSearch.Validate(); err != nil {
16697			invalidParams.AddNested("FaceSearch", err.(request.ErrInvalidParams))
16698		}
16699	}
16700
16701	if invalidParams.Len() > 0 {
16702		return invalidParams
16703	}
16704	return nil
16705}
16706
16707// SetFaceSearch sets the FaceSearch field's value.
16708func (s *StreamProcessorSettings) SetFaceSearch(v *FaceSearchSettings) *StreamProcessorSettings {
16709	s.FaceSearch = v
16710	return s
16711}
16712
16713// The S3 bucket that contains the training summary. The training summary includes
16714// aggregated evaluation metrics for the entire testing dataset and metrics
16715// for each individual label.
16716//
16717// You get the training summary S3 bucket location by calling DescribeProjectVersions.
16718type Summary struct {
16719	_ struct{} `type:"structure"`
16720
16721	// Provides the S3 bucket name and object name.
16722	//
16723	// The region for the S3 bucket containing the S3 object must match the region
16724	// you use for Amazon Rekognition operations.
16725	//
16726	// For Amazon Rekognition to process an S3 object, the user must have permission
16727	// to access the S3 object. For more information, see Resource-Based Policies
16728	// in the Amazon Rekognition Developer Guide.
16729	S3Object *S3Object `type:"structure"`
16730}
16731
16732// String returns the string representation
16733func (s Summary) String() string {
16734	return awsutil.Prettify(s)
16735}
16736
16737// GoString returns the string representation
16738func (s Summary) GoString() string {
16739	return s.String()
16740}
16741
16742// SetS3Object sets the S3Object field's value.
16743func (s *Summary) SetS3Object(v *S3Object) *Summary {
16744	s.S3Object = v
16745	return s
16746}
16747
16748// Indicates whether or not the face is wearing sunglasses, and the confidence
16749// level in the determination.
16750type Sunglasses struct {
16751	_ struct{} `type:"structure"`
16752
16753	// Level of confidence in the determination.
16754	Confidence *float64 `type:"float"`
16755
16756	// Boolean value that indicates whether the face is wearing sunglasses or not.
16757	Value *bool `type:"boolean"`
16758}
16759
16760// String returns the string representation
16761func (s Sunglasses) String() string {
16762	return awsutil.Prettify(s)
16763}
16764
16765// GoString returns the string representation
16766func (s Sunglasses) GoString() string {
16767	return s.String()
16768}
16769
16770// SetConfidence sets the Confidence field's value.
16771func (s *Sunglasses) SetConfidence(v float64) *Sunglasses {
16772	s.Confidence = &v
16773	return s
16774}
16775
16776// SetValue sets the Value field's value.
16777func (s *Sunglasses) SetValue(v bool) *Sunglasses {
16778	s.Value = &v
16779	return s
16780}
16781
16782type TagResourceInput struct {
16783	_ struct{} `type:"structure"`
16784
16785	// Amazon Resource Name (ARN) of the model, collection, or stream processor
16786	// that you want to assign the tags to.
16787	//
16788	// ResourceArn is a required field
16789	ResourceArn *string `min:"20" type:"string" required:"true"`
16790
16791	// The key-value tags to assign to the resource.
16792	//
16793	// Tags is a required field
16794	Tags map[string]*string `type:"map" required:"true"`
16795}
16796
16797// String returns the string representation
16798func (s TagResourceInput) String() string {
16799	return awsutil.Prettify(s)
16800}
16801
16802// GoString returns the string representation
16803func (s TagResourceInput) GoString() string {
16804	return s.String()
16805}
16806
16807// Validate inspects the fields of the type to determine if they are valid.
16808func (s *TagResourceInput) Validate() error {
16809	invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"}
16810	if s.ResourceArn == nil {
16811		invalidParams.Add(request.NewErrParamRequired("ResourceArn"))
16812	}
16813	if s.ResourceArn != nil && len(*s.ResourceArn) < 20 {
16814		invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 20))
16815	}
16816	if s.Tags == nil {
16817		invalidParams.Add(request.NewErrParamRequired("Tags"))
16818	}
16819
16820	if invalidParams.Len() > 0 {
16821		return invalidParams
16822	}
16823	return nil
16824}
16825
16826// SetResourceArn sets the ResourceArn field's value.
16827func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput {
16828	s.ResourceArn = &v
16829	return s
16830}
16831
16832// SetTags sets the Tags field's value.
16833func (s *TagResourceInput) SetTags(v map[string]*string) *TagResourceInput {
16834	s.Tags = v
16835	return s
16836}
16837
16838type TagResourceOutput struct {
16839	_ struct{} `type:"structure"`
16840}
16841
16842// String returns the string representation
16843func (s TagResourceOutput) String() string {
16844	return awsutil.Prettify(s)
16845}
16846
16847// GoString returns the string representation
16848func (s TagResourceOutput) GoString() string {
16849	return s.String()
16850}
16851
16852// Information about a technical cue segment. For more information, see SegmentDetection.
16853type TechnicalCueSegment struct {
16854	_ struct{} `type:"structure"`
16855
16856	// The confidence that Amazon Rekognition Video has in the accuracy of the detected
16857	// segment.
16858	Confidence *float64 `min:"50" type:"float"`
16859
16860	// The type of the technical cue.
16861	Type *string `type:"string" enum:"TechnicalCueType"`
16862}
16863
16864// String returns the string representation
16865func (s TechnicalCueSegment) String() string {
16866	return awsutil.Prettify(s)
16867}
16868
16869// GoString returns the string representation
16870func (s TechnicalCueSegment) GoString() string {
16871	return s.String()
16872}
16873
16874// SetConfidence sets the Confidence field's value.
16875func (s *TechnicalCueSegment) SetConfidence(v float64) *TechnicalCueSegment {
16876	s.Confidence = &v
16877	return s
16878}
16879
16880// SetType sets the Type field's value.
16881func (s *TechnicalCueSegment) SetType(v string) *TechnicalCueSegment {
16882	s.Type = &v
16883	return s
16884}
16885
16886// The dataset used for testing. Optionally, if AutoCreate is set, Amazon Rekognition
16887// Custom Labels creates a testing dataset using an 80/20 split of the training
16888// dataset.
16889type TestingData struct {
16890	_ struct{} `type:"structure"`
16891
16892	// The assets used for testing.
16893	Assets []*Asset `type:"list"`
16894
16895	// If specified, Amazon Rekognition Custom Labels creates a testing dataset
16896	// with an 80/20 split of the training dataset.
16897	AutoCreate *bool `type:"boolean"`
16898}
16899
16900// String returns the string representation
16901func (s TestingData) String() string {
16902	return awsutil.Prettify(s)
16903}
16904
16905// GoString returns the string representation
16906func (s TestingData) GoString() string {
16907	return s.String()
16908}
16909
16910// Validate inspects the fields of the type to determine if they are valid.
16911func (s *TestingData) Validate() error {
16912	invalidParams := request.ErrInvalidParams{Context: "TestingData"}
16913	if s.Assets != nil {
16914		for i, v := range s.Assets {
16915			if v == nil {
16916				continue
16917			}
16918			if err := v.Validate(); err != nil {
16919				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Assets", i), err.(request.ErrInvalidParams))
16920			}
16921		}
16922	}
16923
16924	if invalidParams.Len() > 0 {
16925		return invalidParams
16926	}
16927	return nil
16928}
16929
16930// SetAssets sets the Assets field's value.
16931func (s *TestingData) SetAssets(v []*Asset) *TestingData {
16932	s.Assets = v
16933	return s
16934}
16935
16936// SetAutoCreate sets the AutoCreate field's value.
16937func (s *TestingData) SetAutoCreate(v bool) *TestingData {
16938	s.AutoCreate = &v
16939	return s
16940}
16941
16942// Sagemaker Groundtruth format manifest files for the input, output and validation
16943// datasets that are used and created during testing.
16944type TestingDataResult struct {
16945	_ struct{} `type:"structure"`
16946
16947	// The testing dataset that was supplied for training.
16948	Input *TestingData `type:"structure"`
16949
16950	// The subset of the dataset that was actually tested. Some images (assets)
16951	// might not be tested due to file formatting and other issues.
16952	Output *TestingData `type:"structure"`
16953
16954	// The location of the data validation manifest. The data validation manifest
16955	// is created for the test dataset during model training.
16956	Validation *ValidationData `type:"structure"`
16957}
16958
16959// String returns the string representation
16960func (s TestingDataResult) String() string {
16961	return awsutil.Prettify(s)
16962}
16963
16964// GoString returns the string representation
16965func (s TestingDataResult) GoString() string {
16966	return s.String()
16967}
16968
16969// SetInput sets the Input field's value.
16970func (s *TestingDataResult) SetInput(v *TestingData) *TestingDataResult {
16971	s.Input = v
16972	return s
16973}
16974
16975// SetOutput sets the Output field's value.
16976func (s *TestingDataResult) SetOutput(v *TestingData) *TestingDataResult {
16977	s.Output = v
16978	return s
16979}
16980
16981// SetValidation sets the Validation field's value.
16982func (s *TestingDataResult) SetValidation(v *ValidationData) *TestingDataResult {
16983	s.Validation = v
16984	return s
16985}
16986
16987// Information about a word or line of text detected by DetectText.
16988//
16989// The DetectedText field contains the text that Amazon Rekognition detected
16990// in the image.
16991//
16992// Every word and line has an identifier (Id). Each word belongs to a line and
16993// has a parent identifier (ParentId) that identifies the line of text in which
16994// the word appears. The word Id is also an index for the word within a line
16995// of words.
16996//
16997// For more information, see Detecting Text in the Amazon Rekognition Developer
16998// Guide.
16999type TextDetection struct {
17000	_ struct{} `type:"structure"`
17001
17002	// The confidence that Amazon Rekognition has in the accuracy of the detected
17003	// text and the accuracy of the geometry points around the detected text.
17004	Confidence *float64 `type:"float"`
17005
17006	// The word or line of text recognized by Amazon Rekognition.
17007	DetectedText *string `type:"string"`
17008
17009	// The location of the detected text on the image. Includes an axis aligned
17010	// coarse bounding box surrounding the text and a finer grain polygon for more
17011	// accurate spatial information.
17012	Geometry *Geometry `type:"structure"`
17013
17014	// The identifier for the detected text. The identifier is only unique for a
17015	// single call to DetectText.
17016	Id *int64 `type:"integer"`
17017
17018	// The Parent identifier for the detected text identified by the value of ID.
17019	// If the type of detected text is LINE, the value of ParentId is Null.
17020	ParentId *int64 `type:"integer"`
17021
17022	// The type of text that was detected.
17023	Type *string `type:"string" enum:"TextTypes"`
17024}
17025
17026// String returns the string representation
17027func (s TextDetection) String() string {
17028	return awsutil.Prettify(s)
17029}
17030
17031// GoString returns the string representation
17032func (s TextDetection) GoString() string {
17033	return s.String()
17034}
17035
17036// SetConfidence sets the Confidence field's value.
17037func (s *TextDetection) SetConfidence(v float64) *TextDetection {
17038	s.Confidence = &v
17039	return s
17040}
17041
17042// SetDetectedText sets the DetectedText field's value.
17043func (s *TextDetection) SetDetectedText(v string) *TextDetection {
17044	s.DetectedText = &v
17045	return s
17046}
17047
17048// SetGeometry sets the Geometry field's value.
17049func (s *TextDetection) SetGeometry(v *Geometry) *TextDetection {
17050	s.Geometry = v
17051	return s
17052}
17053
17054// SetId sets the Id field's value.
17055func (s *TextDetection) SetId(v int64) *TextDetection {
17056	s.Id = &v
17057	return s
17058}
17059
17060// SetParentId sets the ParentId field's value.
17061func (s *TextDetection) SetParentId(v int64) *TextDetection {
17062	s.ParentId = &v
17063	return s
17064}
17065
17066// SetType sets the Type field's value.
17067func (s *TextDetection) SetType(v string) *TextDetection {
17068	s.Type = &v
17069	return s
17070}
17071
17072// Information about text detected in a video. Incudes the detected text, the
17073// time in milliseconds from the start of the video that the text was detected,
17074// and where it was detected on the screen.
17075type TextDetectionResult struct {
17076	_ struct{} `type:"structure"`
17077
17078	// Details about text detected in a video.
17079	TextDetection *TextDetection `type:"structure"`
17080
17081	// The time, in milliseconds from the start of the video, that the text was
17082	// detected.
17083	Timestamp *int64 `type:"long"`
17084}
17085
17086// String returns the string representation
17087func (s TextDetectionResult) String() string {
17088	return awsutil.Prettify(s)
17089}
17090
17091// GoString returns the string representation
17092func (s TextDetectionResult) GoString() string {
17093	return s.String()
17094}
17095
17096// SetTextDetection sets the TextDetection field's value.
17097func (s *TextDetectionResult) SetTextDetection(v *TextDetection) *TextDetectionResult {
17098	s.TextDetection = v
17099	return s
17100}
17101
17102// SetTimestamp sets the Timestamp field's value.
17103func (s *TextDetectionResult) SetTimestamp(v int64) *TextDetectionResult {
17104	s.Timestamp = &v
17105	return s
17106}
17107
17108// Amazon Rekognition is temporarily unable to process the request. Try your
17109// call again.
17110type ThrottlingException struct {
17111	_            struct{}                  `type:"structure"`
17112	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
17113
17114	Message_ *string `locationName:"message" type:"string"`
17115}
17116
17117// String returns the string representation
17118func (s ThrottlingException) String() string {
17119	return awsutil.Prettify(s)
17120}
17121
17122// GoString returns the string representation
17123func (s ThrottlingException) GoString() string {
17124	return s.String()
17125}
17126
17127func newErrorThrottlingException(v protocol.ResponseMetadata) error {
17128	return &ThrottlingException{
17129		RespMetadata: v,
17130	}
17131}
17132
17133// Code returns the exception type name.
17134func (s *ThrottlingException) Code() string {
17135	return "ThrottlingException"
17136}
17137
17138// Message returns the exception's message.
17139func (s *ThrottlingException) Message() string {
17140	if s.Message_ != nil {
17141		return *s.Message_
17142	}
17143	return ""
17144}
17145
17146// OrigErr always returns nil, satisfies awserr.Error interface.
17147func (s *ThrottlingException) OrigErr() error {
17148	return nil
17149}
17150
17151func (s *ThrottlingException) Error() string {
17152	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
17153}
17154
17155// Status code returns the HTTP status code for the request's response error.
17156func (s *ThrottlingException) StatusCode() int {
17157	return s.RespMetadata.StatusCode
17158}
17159
17160// RequestID returns the service's response RequestID for request.
17161func (s *ThrottlingException) RequestID() string {
17162	return s.RespMetadata.RequestID
17163}
17164
17165// The dataset used for training.
17166type TrainingData struct {
17167	_ struct{} `type:"structure"`
17168
17169	// A Sagemaker GroundTruth manifest file that contains the training images (assets).
17170	Assets []*Asset `type:"list"`
17171}
17172
17173// String returns the string representation
17174func (s TrainingData) String() string {
17175	return awsutil.Prettify(s)
17176}
17177
17178// GoString returns the string representation
17179func (s TrainingData) GoString() string {
17180	return s.String()
17181}
17182
17183// Validate inspects the fields of the type to determine if they are valid.
17184func (s *TrainingData) Validate() error {
17185	invalidParams := request.ErrInvalidParams{Context: "TrainingData"}
17186	if s.Assets != nil {
17187		for i, v := range s.Assets {
17188			if v == nil {
17189				continue
17190			}
17191			if err := v.Validate(); err != nil {
17192				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Assets", i), err.(request.ErrInvalidParams))
17193			}
17194		}
17195	}
17196
17197	if invalidParams.Len() > 0 {
17198		return invalidParams
17199	}
17200	return nil
17201}
17202
17203// SetAssets sets the Assets field's value.
17204func (s *TrainingData) SetAssets(v []*Asset) *TrainingData {
17205	s.Assets = v
17206	return s
17207}
17208
17209// Sagemaker Groundtruth format manifest files for the input, output and validation
17210// datasets that are used and created during testing.
17211type TrainingDataResult struct {
17212	_ struct{} `type:"structure"`
17213
17214	// The training assets that you supplied for training.
17215	Input *TrainingData `type:"structure"`
17216
17217	// The images (assets) that were actually trained by Amazon Rekognition Custom
17218	// Labels.
17219	Output *TrainingData `type:"structure"`
17220
17221	// The location of the data validation manifest. The data validation manifest
17222	// is created for the training dataset during model training.
17223	Validation *ValidationData `type:"structure"`
17224}
17225
17226// String returns the string representation
17227func (s TrainingDataResult) String() string {
17228	return awsutil.Prettify(s)
17229}
17230
17231// GoString returns the string representation
17232func (s TrainingDataResult) GoString() string {
17233	return s.String()
17234}
17235
17236// SetInput sets the Input field's value.
17237func (s *TrainingDataResult) SetInput(v *TrainingData) *TrainingDataResult {
17238	s.Input = v
17239	return s
17240}
17241
17242// SetOutput sets the Output field's value.
17243func (s *TrainingDataResult) SetOutput(v *TrainingData) *TrainingDataResult {
17244	s.Output = v
17245	return s
17246}
17247
17248// SetValidation sets the Validation field's value.
17249func (s *TrainingDataResult) SetValidation(v *ValidationData) *TrainingDataResult {
17250	s.Validation = v
17251	return s
17252}
17253
17254// A face that IndexFaces detected, but didn't index. Use the Reasons response
17255// attribute to determine why a face wasn't indexed.
17256type UnindexedFace struct {
17257	_ struct{} `type:"structure"`
17258
17259	// The structure that contains attributes of a face that IndexFacesdetected,
17260	// but didn't index.
17261	FaceDetail *FaceDetail `type:"structure"`
17262
17263	// An array of reasons that specify why a face wasn't indexed.
17264	//
17265	//    * EXTREME_POSE - The face is at a pose that can't be detected. For example,
17266	//    the head is turned too far away from the camera.
17267	//
17268	//    * EXCEEDS_MAX_FACES - The number of faces detected is already higher than
17269	//    that specified by the MaxFaces input parameter for IndexFaces.
17270	//
17271	//    * LOW_BRIGHTNESS - The image is too dark.
17272	//
17273	//    * LOW_SHARPNESS - The image is too blurry.
17274	//
17275	//    * LOW_CONFIDENCE - The face was detected with a low confidence.
17276	//
17277	//    * SMALL_BOUNDING_BOX - The bounding box around the face is too small.
17278	Reasons []*string `type:"list"`
17279}
17280
17281// String returns the string representation
17282func (s UnindexedFace) String() string {
17283	return awsutil.Prettify(s)
17284}
17285
17286// GoString returns the string representation
17287func (s UnindexedFace) GoString() string {
17288	return s.String()
17289}
17290
17291// SetFaceDetail sets the FaceDetail field's value.
17292func (s *UnindexedFace) SetFaceDetail(v *FaceDetail) *UnindexedFace {
17293	s.FaceDetail = v
17294	return s
17295}
17296
17297// SetReasons sets the Reasons field's value.
17298func (s *UnindexedFace) SetReasons(v []*string) *UnindexedFace {
17299	s.Reasons = v
17300	return s
17301}
17302
17303type UntagResourceInput struct {
17304	_ struct{} `type:"structure"`
17305
17306	// Amazon Resource Name (ARN) of the model, collection, or stream processor
17307	// that you want to remove the tags from.
17308	//
17309	// ResourceArn is a required field
17310	ResourceArn *string `min:"20" type:"string" required:"true"`
17311
17312	// A list of the tags that you want to remove.
17313	//
17314	// TagKeys is a required field
17315	TagKeys []*string `type:"list" required:"true"`
17316}
17317
17318// String returns the string representation
17319func (s UntagResourceInput) String() string {
17320	return awsutil.Prettify(s)
17321}
17322
17323// GoString returns the string representation
17324func (s UntagResourceInput) GoString() string {
17325	return s.String()
17326}
17327
17328// Validate inspects the fields of the type to determine if they are valid.
17329func (s *UntagResourceInput) Validate() error {
17330	invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"}
17331	if s.ResourceArn == nil {
17332		invalidParams.Add(request.NewErrParamRequired("ResourceArn"))
17333	}
17334	if s.ResourceArn != nil && len(*s.ResourceArn) < 20 {
17335		invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 20))
17336	}
17337	if s.TagKeys == nil {
17338		invalidParams.Add(request.NewErrParamRequired("TagKeys"))
17339	}
17340
17341	if invalidParams.Len() > 0 {
17342		return invalidParams
17343	}
17344	return nil
17345}
17346
17347// SetResourceArn sets the ResourceArn field's value.
17348func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput {
17349	s.ResourceArn = &v
17350	return s
17351}
17352
17353// SetTagKeys sets the TagKeys field's value.
17354func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput {
17355	s.TagKeys = v
17356	return s
17357}
17358
17359type UntagResourceOutput struct {
17360	_ struct{} `type:"structure"`
17361}
17362
17363// String returns the string representation
17364func (s UntagResourceOutput) String() string {
17365	return awsutil.Prettify(s)
17366}
17367
17368// GoString returns the string representation
17369func (s UntagResourceOutput) GoString() string {
17370	return s.String()
17371}
17372
17373// Contains the Amazon S3 bucket location of the validation data for a model
17374// training job.
17375//
17376// The validation data includes error information for individual JSON lines
17377// in the dataset. For more information, see Debugging a Failed Model Training
17378// in the Amazon Rekognition Custom Labels Developer Guide.
17379//
17380// You get the ValidationData object for the training dataset (TrainingDataResult)
17381// and the test dataset (TestingDataResult) by calling DescribeProjectVersions.
17382//
17383// The assets array contains a single Asset object. The GroundTruthManifest
17384// field of the Asset object contains the S3 bucket location of the validation
17385// data.
17386type ValidationData struct {
17387	_ struct{} `type:"structure"`
17388
17389	// The assets that comprise the validation data.
17390	Assets []*Asset `type:"list"`
17391}
17392
17393// String returns the string representation
17394func (s ValidationData) String() string {
17395	return awsutil.Prettify(s)
17396}
17397
17398// GoString returns the string representation
17399func (s ValidationData) GoString() string {
17400	return s.String()
17401}
17402
17403// SetAssets sets the Assets field's value.
17404func (s *ValidationData) SetAssets(v []*Asset) *ValidationData {
17405	s.Assets = v
17406	return s
17407}
17408
17409// Video file stored in an Amazon S3 bucket. Amazon Rekognition video start
17410// operations such as StartLabelDetection use Video to specify a video for analysis.
17411// The supported file formats are .mp4, .mov and .avi.
17412type Video struct {
17413	_ struct{} `type:"structure"`
17414
17415	// The Amazon S3 bucket name and file name for the video.
17416	S3Object *S3Object `type:"structure"`
17417}
17418
17419// String returns the string representation
17420func (s Video) String() string {
17421	return awsutil.Prettify(s)
17422}
17423
17424// GoString returns the string representation
17425func (s Video) GoString() string {
17426	return s.String()
17427}
17428
17429// Validate inspects the fields of the type to determine if they are valid.
17430func (s *Video) Validate() error {
17431	invalidParams := request.ErrInvalidParams{Context: "Video"}
17432	if s.S3Object != nil {
17433		if err := s.S3Object.Validate(); err != nil {
17434			invalidParams.AddNested("S3Object", err.(request.ErrInvalidParams))
17435		}
17436	}
17437
17438	if invalidParams.Len() > 0 {
17439		return invalidParams
17440	}
17441	return nil
17442}
17443
17444// SetS3Object sets the S3Object field's value.
17445func (s *Video) SetS3Object(v *S3Object) *Video {
17446	s.S3Object = v
17447	return s
17448}
17449
17450// Information about a video that Amazon Rekognition analyzed. Videometadata
17451// is returned in every page of paginated responses from a Amazon Rekognition
17452// video operation.
17453type VideoMetadata struct {
17454	_ struct{} `type:"structure"`
17455
17456	// Type of compression used in the analyzed video.
17457	Codec *string `type:"string"`
17458
17459	// Length of the video in milliseconds.
17460	DurationMillis *int64 `type:"long"`
17461
17462	// Format of the analyzed video. Possible values are MP4, MOV and AVI.
17463	Format *string `type:"string"`
17464
17465	// Vertical pixel dimension of the video.
17466	FrameHeight *int64 `type:"long"`
17467
17468	// Number of frames per second in the video.
17469	FrameRate *float64 `type:"float"`
17470
17471	// Horizontal pixel dimension of the video.
17472	FrameWidth *int64 `type:"long"`
17473}
17474
17475// String returns the string representation
17476func (s VideoMetadata) String() string {
17477	return awsutil.Prettify(s)
17478}
17479
17480// GoString returns the string representation
17481func (s VideoMetadata) GoString() string {
17482	return s.String()
17483}
17484
17485// SetCodec sets the Codec field's value.
17486func (s *VideoMetadata) SetCodec(v string) *VideoMetadata {
17487	s.Codec = &v
17488	return s
17489}
17490
17491// SetDurationMillis sets the DurationMillis field's value.
17492func (s *VideoMetadata) SetDurationMillis(v int64) *VideoMetadata {
17493	s.DurationMillis = &v
17494	return s
17495}
17496
17497// SetFormat sets the Format field's value.
17498func (s *VideoMetadata) SetFormat(v string) *VideoMetadata {
17499	s.Format = &v
17500	return s
17501}
17502
17503// SetFrameHeight sets the FrameHeight field's value.
17504func (s *VideoMetadata) SetFrameHeight(v int64) *VideoMetadata {
17505	s.FrameHeight = &v
17506	return s
17507}
17508
17509// SetFrameRate sets the FrameRate field's value.
17510func (s *VideoMetadata) SetFrameRate(v float64) *VideoMetadata {
17511	s.FrameRate = &v
17512	return s
17513}
17514
17515// SetFrameWidth sets the FrameWidth field's value.
17516func (s *VideoMetadata) SetFrameWidth(v int64) *VideoMetadata {
17517	s.FrameWidth = &v
17518	return s
17519}
17520
17521// The file size or duration of the supplied media is too large. The maximum
17522// file size is 10GB. The maximum duration is 6 hours.
17523type VideoTooLargeException struct {
17524	_            struct{}                  `type:"structure"`
17525	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
17526
17527	Message_ *string `locationName:"message" type:"string"`
17528}
17529
17530// String returns the string representation
17531func (s VideoTooLargeException) String() string {
17532	return awsutil.Prettify(s)
17533}
17534
17535// GoString returns the string representation
17536func (s VideoTooLargeException) GoString() string {
17537	return s.String()
17538}
17539
17540func newErrorVideoTooLargeException(v protocol.ResponseMetadata) error {
17541	return &VideoTooLargeException{
17542		RespMetadata: v,
17543	}
17544}
17545
17546// Code returns the exception type name.
17547func (s *VideoTooLargeException) Code() string {
17548	return "VideoTooLargeException"
17549}
17550
17551// Message returns the exception's message.
17552func (s *VideoTooLargeException) Message() string {
17553	if s.Message_ != nil {
17554		return *s.Message_
17555	}
17556	return ""
17557}
17558
17559// OrigErr always returns nil, satisfies awserr.Error interface.
17560func (s *VideoTooLargeException) OrigErr() error {
17561	return nil
17562}
17563
17564func (s *VideoTooLargeException) Error() string {
17565	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
17566}
17567
17568// Status code returns the HTTP status code for the request's response error.
17569func (s *VideoTooLargeException) StatusCode() int {
17570	return s.RespMetadata.StatusCode
17571}
17572
17573// RequestID returns the service's response RequestID for request.
17574func (s *VideoTooLargeException) RequestID() string {
17575	return s.RespMetadata.RequestID
17576}
17577
17578const (
17579	// AttributeDefault is a Attribute enum value
17580	AttributeDefault = "DEFAULT"
17581
17582	// AttributeAll is a Attribute enum value
17583	AttributeAll = "ALL"
17584)
17585
17586// Attribute_Values returns all elements of the Attribute enum
17587func Attribute_Values() []string {
17588	return []string{
17589		AttributeDefault,
17590		AttributeAll,
17591	}
17592}
17593
17594const (
17595	// BodyPartFace is a BodyPart enum value
17596	BodyPartFace = "FACE"
17597
17598	// BodyPartHead is a BodyPart enum value
17599	BodyPartHead = "HEAD"
17600
17601	// BodyPartLeftHand is a BodyPart enum value
17602	BodyPartLeftHand = "LEFT_HAND"
17603
17604	// BodyPartRightHand is a BodyPart enum value
17605	BodyPartRightHand = "RIGHT_HAND"
17606)
17607
17608// BodyPart_Values returns all elements of the BodyPart enum
17609func BodyPart_Values() []string {
17610	return []string{
17611		BodyPartFace,
17612		BodyPartHead,
17613		BodyPartLeftHand,
17614		BodyPartRightHand,
17615	}
17616}
17617
17618const (
17619	// CelebrityRecognitionSortById is a CelebrityRecognitionSortBy enum value
17620	CelebrityRecognitionSortById = "ID"
17621
17622	// CelebrityRecognitionSortByTimestamp is a CelebrityRecognitionSortBy enum value
17623	CelebrityRecognitionSortByTimestamp = "TIMESTAMP"
17624)
17625
17626// CelebrityRecognitionSortBy_Values returns all elements of the CelebrityRecognitionSortBy enum
17627func CelebrityRecognitionSortBy_Values() []string {
17628	return []string{
17629		CelebrityRecognitionSortById,
17630		CelebrityRecognitionSortByTimestamp,
17631	}
17632}
17633
17634const (
17635	// ContentClassifierFreeOfPersonallyIdentifiableInformation is a ContentClassifier enum value
17636	ContentClassifierFreeOfPersonallyIdentifiableInformation = "FreeOfPersonallyIdentifiableInformation"
17637
17638	// ContentClassifierFreeOfAdultContent is a ContentClassifier enum value
17639	ContentClassifierFreeOfAdultContent = "FreeOfAdultContent"
17640)
17641
17642// ContentClassifier_Values returns all elements of the ContentClassifier enum
17643func ContentClassifier_Values() []string {
17644	return []string{
17645		ContentClassifierFreeOfPersonallyIdentifiableInformation,
17646		ContentClassifierFreeOfAdultContent,
17647	}
17648}
17649
17650const (
17651	// ContentModerationSortByName is a ContentModerationSortBy enum value
17652	ContentModerationSortByName = "NAME"
17653
17654	// ContentModerationSortByTimestamp is a ContentModerationSortBy enum value
17655	ContentModerationSortByTimestamp = "TIMESTAMP"
17656)
17657
17658// ContentModerationSortBy_Values returns all elements of the ContentModerationSortBy enum
17659func ContentModerationSortBy_Values() []string {
17660	return []string{
17661		ContentModerationSortByName,
17662		ContentModerationSortByTimestamp,
17663	}
17664}
17665
17666const (
17667	// EmotionNameHappy is a EmotionName enum value
17668	EmotionNameHappy = "HAPPY"
17669
17670	// EmotionNameSad is a EmotionName enum value
17671	EmotionNameSad = "SAD"
17672
17673	// EmotionNameAngry is a EmotionName enum value
17674	EmotionNameAngry = "ANGRY"
17675
17676	// EmotionNameConfused is a EmotionName enum value
17677	EmotionNameConfused = "CONFUSED"
17678
17679	// EmotionNameDisgusted is a EmotionName enum value
17680	EmotionNameDisgusted = "DISGUSTED"
17681
17682	// EmotionNameSurprised is a EmotionName enum value
17683	EmotionNameSurprised = "SURPRISED"
17684
17685	// EmotionNameCalm is a EmotionName enum value
17686	EmotionNameCalm = "CALM"
17687
17688	// EmotionNameUnknown is a EmotionName enum value
17689	EmotionNameUnknown = "UNKNOWN"
17690
17691	// EmotionNameFear is a EmotionName enum value
17692	EmotionNameFear = "FEAR"
17693)
17694
17695// EmotionName_Values returns all elements of the EmotionName enum
17696func EmotionName_Values() []string {
17697	return []string{
17698		EmotionNameHappy,
17699		EmotionNameSad,
17700		EmotionNameAngry,
17701		EmotionNameConfused,
17702		EmotionNameDisgusted,
17703		EmotionNameSurprised,
17704		EmotionNameCalm,
17705		EmotionNameUnknown,
17706		EmotionNameFear,
17707	}
17708}
17709
17710const (
17711	// FaceAttributesDefault is a FaceAttributes enum value
17712	FaceAttributesDefault = "DEFAULT"
17713
17714	// FaceAttributesAll is a FaceAttributes enum value
17715	FaceAttributesAll = "ALL"
17716)
17717
17718// FaceAttributes_Values returns all elements of the FaceAttributes enum
17719func FaceAttributes_Values() []string {
17720	return []string{
17721		FaceAttributesDefault,
17722		FaceAttributesAll,
17723	}
17724}
17725
17726const (
17727	// FaceSearchSortByIndex is a FaceSearchSortBy enum value
17728	FaceSearchSortByIndex = "INDEX"
17729
17730	// FaceSearchSortByTimestamp is a FaceSearchSortBy enum value
17731	FaceSearchSortByTimestamp = "TIMESTAMP"
17732)
17733
17734// FaceSearchSortBy_Values returns all elements of the FaceSearchSortBy enum
17735func FaceSearchSortBy_Values() []string {
17736	return []string{
17737		FaceSearchSortByIndex,
17738		FaceSearchSortByTimestamp,
17739	}
17740}
17741
17742const (
17743	// GenderTypeMale is a GenderType enum value
17744	GenderTypeMale = "Male"
17745
17746	// GenderTypeFemale is a GenderType enum value
17747	GenderTypeFemale = "Female"
17748)
17749
17750// GenderType_Values returns all elements of the GenderType enum
17751func GenderType_Values() []string {
17752	return []string{
17753		GenderTypeMale,
17754		GenderTypeFemale,
17755	}
17756}
17757
17758const (
17759	// LabelDetectionSortByName is a LabelDetectionSortBy enum value
17760	LabelDetectionSortByName = "NAME"
17761
17762	// LabelDetectionSortByTimestamp is a LabelDetectionSortBy enum value
17763	LabelDetectionSortByTimestamp = "TIMESTAMP"
17764)
17765
17766// LabelDetectionSortBy_Values returns all elements of the LabelDetectionSortBy enum
17767func LabelDetectionSortBy_Values() []string {
17768	return []string{
17769		LabelDetectionSortByName,
17770		LabelDetectionSortByTimestamp,
17771	}
17772}
17773
17774const (
17775	// LandmarkTypeEyeLeft is a LandmarkType enum value
17776	LandmarkTypeEyeLeft = "eyeLeft"
17777
17778	// LandmarkTypeEyeRight is a LandmarkType enum value
17779	LandmarkTypeEyeRight = "eyeRight"
17780
17781	// LandmarkTypeNose is a LandmarkType enum value
17782	LandmarkTypeNose = "nose"
17783
17784	// LandmarkTypeMouthLeft is a LandmarkType enum value
17785	LandmarkTypeMouthLeft = "mouthLeft"
17786
17787	// LandmarkTypeMouthRight is a LandmarkType enum value
17788	LandmarkTypeMouthRight = "mouthRight"
17789
17790	// LandmarkTypeLeftEyeBrowLeft is a LandmarkType enum value
17791	LandmarkTypeLeftEyeBrowLeft = "leftEyeBrowLeft"
17792
17793	// LandmarkTypeLeftEyeBrowRight is a LandmarkType enum value
17794	LandmarkTypeLeftEyeBrowRight = "leftEyeBrowRight"
17795
17796	// LandmarkTypeLeftEyeBrowUp is a LandmarkType enum value
17797	LandmarkTypeLeftEyeBrowUp = "leftEyeBrowUp"
17798
17799	// LandmarkTypeRightEyeBrowLeft is a LandmarkType enum value
17800	LandmarkTypeRightEyeBrowLeft = "rightEyeBrowLeft"
17801
17802	// LandmarkTypeRightEyeBrowRight is a LandmarkType enum value
17803	LandmarkTypeRightEyeBrowRight = "rightEyeBrowRight"
17804
17805	// LandmarkTypeRightEyeBrowUp is a LandmarkType enum value
17806	LandmarkTypeRightEyeBrowUp = "rightEyeBrowUp"
17807
17808	// LandmarkTypeLeftEyeLeft is a LandmarkType enum value
17809	LandmarkTypeLeftEyeLeft = "leftEyeLeft"
17810
17811	// LandmarkTypeLeftEyeRight is a LandmarkType enum value
17812	LandmarkTypeLeftEyeRight = "leftEyeRight"
17813
17814	// LandmarkTypeLeftEyeUp is a LandmarkType enum value
17815	LandmarkTypeLeftEyeUp = "leftEyeUp"
17816
17817	// LandmarkTypeLeftEyeDown is a LandmarkType enum value
17818	LandmarkTypeLeftEyeDown = "leftEyeDown"
17819
17820	// LandmarkTypeRightEyeLeft is a LandmarkType enum value
17821	LandmarkTypeRightEyeLeft = "rightEyeLeft"
17822
17823	// LandmarkTypeRightEyeRight is a LandmarkType enum value
17824	LandmarkTypeRightEyeRight = "rightEyeRight"
17825
17826	// LandmarkTypeRightEyeUp is a LandmarkType enum value
17827	LandmarkTypeRightEyeUp = "rightEyeUp"
17828
17829	// LandmarkTypeRightEyeDown is a LandmarkType enum value
17830	LandmarkTypeRightEyeDown = "rightEyeDown"
17831
17832	// LandmarkTypeNoseLeft is a LandmarkType enum value
17833	LandmarkTypeNoseLeft = "noseLeft"
17834
17835	// LandmarkTypeNoseRight is a LandmarkType enum value
17836	LandmarkTypeNoseRight = "noseRight"
17837
17838	// LandmarkTypeMouthUp is a LandmarkType enum value
17839	LandmarkTypeMouthUp = "mouthUp"
17840
17841	// LandmarkTypeMouthDown is a LandmarkType enum value
17842	LandmarkTypeMouthDown = "mouthDown"
17843
17844	// LandmarkTypeLeftPupil is a LandmarkType enum value
17845	LandmarkTypeLeftPupil = "leftPupil"
17846
17847	// LandmarkTypeRightPupil is a LandmarkType enum value
17848	LandmarkTypeRightPupil = "rightPupil"
17849
17850	// LandmarkTypeUpperJawlineLeft is a LandmarkType enum value
17851	LandmarkTypeUpperJawlineLeft = "upperJawlineLeft"
17852
17853	// LandmarkTypeMidJawlineLeft is a LandmarkType enum value
17854	LandmarkTypeMidJawlineLeft = "midJawlineLeft"
17855
17856	// LandmarkTypeChinBottom is a LandmarkType enum value
17857	LandmarkTypeChinBottom = "chinBottom"
17858
17859	// LandmarkTypeMidJawlineRight is a LandmarkType enum value
17860	LandmarkTypeMidJawlineRight = "midJawlineRight"
17861
17862	// LandmarkTypeUpperJawlineRight is a LandmarkType enum value
17863	LandmarkTypeUpperJawlineRight = "upperJawlineRight"
17864)
17865
17866// LandmarkType_Values returns all elements of the LandmarkType enum
17867func LandmarkType_Values() []string {
17868	return []string{
17869		LandmarkTypeEyeLeft,
17870		LandmarkTypeEyeRight,
17871		LandmarkTypeNose,
17872		LandmarkTypeMouthLeft,
17873		LandmarkTypeMouthRight,
17874		LandmarkTypeLeftEyeBrowLeft,
17875		LandmarkTypeLeftEyeBrowRight,
17876		LandmarkTypeLeftEyeBrowUp,
17877		LandmarkTypeRightEyeBrowLeft,
17878		LandmarkTypeRightEyeBrowRight,
17879		LandmarkTypeRightEyeBrowUp,
17880		LandmarkTypeLeftEyeLeft,
17881		LandmarkTypeLeftEyeRight,
17882		LandmarkTypeLeftEyeUp,
17883		LandmarkTypeLeftEyeDown,
17884		LandmarkTypeRightEyeLeft,
17885		LandmarkTypeRightEyeRight,
17886		LandmarkTypeRightEyeUp,
17887		LandmarkTypeRightEyeDown,
17888		LandmarkTypeNoseLeft,
17889		LandmarkTypeNoseRight,
17890		LandmarkTypeMouthUp,
17891		LandmarkTypeMouthDown,
17892		LandmarkTypeLeftPupil,
17893		LandmarkTypeRightPupil,
17894		LandmarkTypeUpperJawlineLeft,
17895		LandmarkTypeMidJawlineLeft,
17896		LandmarkTypeChinBottom,
17897		LandmarkTypeMidJawlineRight,
17898		LandmarkTypeUpperJawlineRight,
17899	}
17900}
17901
17902const (
17903	// OrientationCorrectionRotate0 is a OrientationCorrection enum value
17904	OrientationCorrectionRotate0 = "ROTATE_0"
17905
17906	// OrientationCorrectionRotate90 is a OrientationCorrection enum value
17907	OrientationCorrectionRotate90 = "ROTATE_90"
17908
17909	// OrientationCorrectionRotate180 is a OrientationCorrection enum value
17910	OrientationCorrectionRotate180 = "ROTATE_180"
17911
17912	// OrientationCorrectionRotate270 is a OrientationCorrection enum value
17913	OrientationCorrectionRotate270 = "ROTATE_270"
17914)
17915
17916// OrientationCorrection_Values returns all elements of the OrientationCorrection enum
17917func OrientationCorrection_Values() []string {
17918	return []string{
17919		OrientationCorrectionRotate0,
17920		OrientationCorrectionRotate90,
17921		OrientationCorrectionRotate180,
17922		OrientationCorrectionRotate270,
17923	}
17924}
17925
17926const (
17927	// PersonTrackingSortByIndex is a PersonTrackingSortBy enum value
17928	PersonTrackingSortByIndex = "INDEX"
17929
17930	// PersonTrackingSortByTimestamp is a PersonTrackingSortBy enum value
17931	PersonTrackingSortByTimestamp = "TIMESTAMP"
17932)
17933
17934// PersonTrackingSortBy_Values returns all elements of the PersonTrackingSortBy enum
17935func PersonTrackingSortBy_Values() []string {
17936	return []string{
17937		PersonTrackingSortByIndex,
17938		PersonTrackingSortByTimestamp,
17939	}
17940}
17941
17942const (
17943	// ProjectStatusCreating is a ProjectStatus enum value
17944	ProjectStatusCreating = "CREATING"
17945
17946	// ProjectStatusCreated is a ProjectStatus enum value
17947	ProjectStatusCreated = "CREATED"
17948
17949	// ProjectStatusDeleting is a ProjectStatus enum value
17950	ProjectStatusDeleting = "DELETING"
17951)
17952
17953// ProjectStatus_Values returns all elements of the ProjectStatus enum
17954func ProjectStatus_Values() []string {
17955	return []string{
17956		ProjectStatusCreating,
17957		ProjectStatusCreated,
17958		ProjectStatusDeleting,
17959	}
17960}
17961
17962const (
17963	// ProjectVersionStatusTrainingInProgress is a ProjectVersionStatus enum value
17964	ProjectVersionStatusTrainingInProgress = "TRAINING_IN_PROGRESS"
17965
17966	// ProjectVersionStatusTrainingCompleted is a ProjectVersionStatus enum value
17967	ProjectVersionStatusTrainingCompleted = "TRAINING_COMPLETED"
17968
17969	// ProjectVersionStatusTrainingFailed is a ProjectVersionStatus enum value
17970	ProjectVersionStatusTrainingFailed = "TRAINING_FAILED"
17971
17972	// ProjectVersionStatusStarting is a ProjectVersionStatus enum value
17973	ProjectVersionStatusStarting = "STARTING"
17974
17975	// ProjectVersionStatusRunning is a ProjectVersionStatus enum value
17976	ProjectVersionStatusRunning = "RUNNING"
17977
17978	// ProjectVersionStatusFailed is a ProjectVersionStatus enum value
17979	ProjectVersionStatusFailed = "FAILED"
17980
17981	// ProjectVersionStatusStopping is a ProjectVersionStatus enum value
17982	ProjectVersionStatusStopping = "STOPPING"
17983
17984	// ProjectVersionStatusStopped is a ProjectVersionStatus enum value
17985	ProjectVersionStatusStopped = "STOPPED"
17986
17987	// ProjectVersionStatusDeleting is a ProjectVersionStatus enum value
17988	ProjectVersionStatusDeleting = "DELETING"
17989)
17990
17991// ProjectVersionStatus_Values returns all elements of the ProjectVersionStatus enum
17992func ProjectVersionStatus_Values() []string {
17993	return []string{
17994		ProjectVersionStatusTrainingInProgress,
17995		ProjectVersionStatusTrainingCompleted,
17996		ProjectVersionStatusTrainingFailed,
17997		ProjectVersionStatusStarting,
17998		ProjectVersionStatusRunning,
17999		ProjectVersionStatusFailed,
18000		ProjectVersionStatusStopping,
18001		ProjectVersionStatusStopped,
18002		ProjectVersionStatusDeleting,
18003	}
18004}
18005
18006const (
18007	// ProtectiveEquipmentTypeFaceCover is a ProtectiveEquipmentType enum value
18008	ProtectiveEquipmentTypeFaceCover = "FACE_COVER"
18009
18010	// ProtectiveEquipmentTypeHandCover is a ProtectiveEquipmentType enum value
18011	ProtectiveEquipmentTypeHandCover = "HAND_COVER"
18012
18013	// ProtectiveEquipmentTypeHeadCover is a ProtectiveEquipmentType enum value
18014	ProtectiveEquipmentTypeHeadCover = "HEAD_COVER"
18015)
18016
18017// ProtectiveEquipmentType_Values returns all elements of the ProtectiveEquipmentType enum
18018func ProtectiveEquipmentType_Values() []string {
18019	return []string{
18020		ProtectiveEquipmentTypeFaceCover,
18021		ProtectiveEquipmentTypeHandCover,
18022		ProtectiveEquipmentTypeHeadCover,
18023	}
18024}
18025
18026const (
18027	// QualityFilterNone is a QualityFilter enum value
18028	QualityFilterNone = "NONE"
18029
18030	// QualityFilterAuto is a QualityFilter enum value
18031	QualityFilterAuto = "AUTO"
18032
18033	// QualityFilterLow is a QualityFilter enum value
18034	QualityFilterLow = "LOW"
18035
18036	// QualityFilterMedium is a QualityFilter enum value
18037	QualityFilterMedium = "MEDIUM"
18038
18039	// QualityFilterHigh is a QualityFilter enum value
18040	QualityFilterHigh = "HIGH"
18041)
18042
18043// QualityFilter_Values returns all elements of the QualityFilter enum
18044func QualityFilter_Values() []string {
18045	return []string{
18046		QualityFilterNone,
18047		QualityFilterAuto,
18048		QualityFilterLow,
18049		QualityFilterMedium,
18050		QualityFilterHigh,
18051	}
18052}
18053
18054const (
18055	// ReasonExceedsMaxFaces is a Reason enum value
18056	ReasonExceedsMaxFaces = "EXCEEDS_MAX_FACES"
18057
18058	// ReasonExtremePose is a Reason enum value
18059	ReasonExtremePose = "EXTREME_POSE"
18060
18061	// ReasonLowBrightness is a Reason enum value
18062	ReasonLowBrightness = "LOW_BRIGHTNESS"
18063
18064	// ReasonLowSharpness is a Reason enum value
18065	ReasonLowSharpness = "LOW_SHARPNESS"
18066
18067	// ReasonLowConfidence is a Reason enum value
18068	ReasonLowConfidence = "LOW_CONFIDENCE"
18069
18070	// ReasonSmallBoundingBox is a Reason enum value
18071	ReasonSmallBoundingBox = "SMALL_BOUNDING_BOX"
18072
18073	// ReasonLowFaceQuality is a Reason enum value
18074	ReasonLowFaceQuality = "LOW_FACE_QUALITY"
18075)
18076
18077// Reason_Values returns all elements of the Reason enum
18078func Reason_Values() []string {
18079	return []string{
18080		ReasonExceedsMaxFaces,
18081		ReasonExtremePose,
18082		ReasonLowBrightness,
18083		ReasonLowSharpness,
18084		ReasonLowConfidence,
18085		ReasonSmallBoundingBox,
18086		ReasonLowFaceQuality,
18087	}
18088}
18089
18090const (
18091	// SegmentTypeTechnicalCue is a SegmentType enum value
18092	SegmentTypeTechnicalCue = "TECHNICAL_CUE"
18093
18094	// SegmentTypeShot is a SegmentType enum value
18095	SegmentTypeShot = "SHOT"
18096)
18097
18098// SegmentType_Values returns all elements of the SegmentType enum
18099func SegmentType_Values() []string {
18100	return []string{
18101		SegmentTypeTechnicalCue,
18102		SegmentTypeShot,
18103	}
18104}
18105
18106const (
18107	// StreamProcessorStatusStopped is a StreamProcessorStatus enum value
18108	StreamProcessorStatusStopped = "STOPPED"
18109
18110	// StreamProcessorStatusStarting is a StreamProcessorStatus enum value
18111	StreamProcessorStatusStarting = "STARTING"
18112
18113	// StreamProcessorStatusRunning is a StreamProcessorStatus enum value
18114	StreamProcessorStatusRunning = "RUNNING"
18115
18116	// StreamProcessorStatusFailed is a StreamProcessorStatus enum value
18117	StreamProcessorStatusFailed = "FAILED"
18118
18119	// StreamProcessorStatusStopping is a StreamProcessorStatus enum value
18120	StreamProcessorStatusStopping = "STOPPING"
18121)
18122
18123// StreamProcessorStatus_Values returns all elements of the StreamProcessorStatus enum
18124func StreamProcessorStatus_Values() []string {
18125	return []string{
18126		StreamProcessorStatusStopped,
18127		StreamProcessorStatusStarting,
18128		StreamProcessorStatusRunning,
18129		StreamProcessorStatusFailed,
18130		StreamProcessorStatusStopping,
18131	}
18132}
18133
18134const (
18135	// TechnicalCueTypeColorBars is a TechnicalCueType enum value
18136	TechnicalCueTypeColorBars = "ColorBars"
18137
18138	// TechnicalCueTypeEndCredits is a TechnicalCueType enum value
18139	TechnicalCueTypeEndCredits = "EndCredits"
18140
18141	// TechnicalCueTypeBlackFrames is a TechnicalCueType enum value
18142	TechnicalCueTypeBlackFrames = "BlackFrames"
18143)
18144
18145// TechnicalCueType_Values returns all elements of the TechnicalCueType enum
18146func TechnicalCueType_Values() []string {
18147	return []string{
18148		TechnicalCueTypeColorBars,
18149		TechnicalCueTypeEndCredits,
18150		TechnicalCueTypeBlackFrames,
18151	}
18152}
18153
18154const (
18155	// TextTypesLine is a TextTypes enum value
18156	TextTypesLine = "LINE"
18157
18158	// TextTypesWord is a TextTypes enum value
18159	TextTypesWord = "WORD"
18160)
18161
18162// TextTypes_Values returns all elements of the TextTypes enum
18163func TextTypes_Values() []string {
18164	return []string{
18165		TextTypesLine,
18166		TextTypesWord,
18167	}
18168}
18169
18170const (
18171	// VideoJobStatusInProgress is a VideoJobStatus enum value
18172	VideoJobStatusInProgress = "IN_PROGRESS"
18173
18174	// VideoJobStatusSucceeded is a VideoJobStatus enum value
18175	VideoJobStatusSucceeded = "SUCCEEDED"
18176
18177	// VideoJobStatusFailed is a VideoJobStatus enum value
18178	VideoJobStatusFailed = "FAILED"
18179)
18180
18181// VideoJobStatus_Values returns all elements of the VideoJobStatus enum
18182func VideoJobStatus_Values() []string {
18183	return []string{
18184		VideoJobStatusInProgress,
18185		VideoJobStatusSucceeded,
18186		VideoJobStatusFailed,
18187	}
18188}
18189