/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace Aws { namespace Http { class HttpClient; class HttpClientFactory; } // namespace Http namespace Utils { template< typename R, typename E> class Outcome; namespace Threading { class Executor; } // namespace Threading } // namespace Utils namespace Auth { class AWSCredentials; class AWSCredentialsProvider; } // namespace Auth namespace Client { class RetryStrategy; } // namespace Client namespace Rekognition { namespace Model { class CompareFacesRequest; class CreateCollectionRequest; class CreateProjectRequest; class CreateProjectVersionRequest; class CreateStreamProcessorRequest; class DeleteCollectionRequest; class DeleteFacesRequest; class DeleteProjectRequest; class DeleteProjectVersionRequest; class DeleteStreamProcessorRequest; class DescribeCollectionRequest; class DescribeProjectVersionsRequest; class DescribeProjectsRequest; class DescribeStreamProcessorRequest; class DetectCustomLabelsRequest; class DetectFacesRequest; class DetectLabelsRequest; class DetectModerationLabelsRequest; class DetectTextRequest; class GetCelebrityInfoRequest; class GetCelebrityRecognitionRequest; class GetContentModerationRequest; class GetFaceDetectionRequest; class GetFaceSearchRequest; class GetLabelDetectionRequest; class GetPersonTrackingRequest; class GetSegmentDetectionRequest; class GetTextDetectionRequest; class IndexFacesRequest; class ListCollectionsRequest; class ListFacesRequest; class ListStreamProcessorsRequest; class RecognizeCelebritiesRequest; class SearchFacesRequest; class SearchFacesByImageRequest; class StartCelebrityRecognitionRequest; class StartContentModerationRequest; class StartFaceDetectionRequest; class StartFaceSearchRequest; class StartLabelDetectionRequest; class StartPersonTrackingRequest; class StartProjectVersionRequest; class StartSegmentDetectionRequest; class StartStreamProcessorRequest; class StartTextDetectionRequest; class StopProjectVersionRequest; class StopStreamProcessorRequest; typedef Aws::Utils::Outcome CompareFacesOutcome; typedef Aws::Utils::Outcome CreateCollectionOutcome; typedef Aws::Utils::Outcome CreateProjectOutcome; typedef Aws::Utils::Outcome CreateProjectVersionOutcome; typedef Aws::Utils::Outcome CreateStreamProcessorOutcome; typedef Aws::Utils::Outcome DeleteCollectionOutcome; typedef Aws::Utils::Outcome DeleteFacesOutcome; typedef Aws::Utils::Outcome DeleteProjectOutcome; typedef Aws::Utils::Outcome DeleteProjectVersionOutcome; typedef Aws::Utils::Outcome DeleteStreamProcessorOutcome; typedef Aws::Utils::Outcome DescribeCollectionOutcome; typedef Aws::Utils::Outcome DescribeProjectVersionsOutcome; typedef Aws::Utils::Outcome DescribeProjectsOutcome; typedef Aws::Utils::Outcome DescribeStreamProcessorOutcome; typedef Aws::Utils::Outcome DetectCustomLabelsOutcome; typedef Aws::Utils::Outcome DetectFacesOutcome; typedef Aws::Utils::Outcome DetectLabelsOutcome; typedef Aws::Utils::Outcome DetectModerationLabelsOutcome; typedef Aws::Utils::Outcome DetectTextOutcome; typedef Aws::Utils::Outcome GetCelebrityInfoOutcome; typedef Aws::Utils::Outcome GetCelebrityRecognitionOutcome; typedef Aws::Utils::Outcome GetContentModerationOutcome; typedef Aws::Utils::Outcome GetFaceDetectionOutcome; typedef Aws::Utils::Outcome GetFaceSearchOutcome; typedef Aws::Utils::Outcome GetLabelDetectionOutcome; typedef Aws::Utils::Outcome GetPersonTrackingOutcome; typedef Aws::Utils::Outcome GetSegmentDetectionOutcome; typedef Aws::Utils::Outcome GetTextDetectionOutcome; typedef Aws::Utils::Outcome IndexFacesOutcome; typedef Aws::Utils::Outcome ListCollectionsOutcome; typedef Aws::Utils::Outcome ListFacesOutcome; typedef Aws::Utils::Outcome ListStreamProcessorsOutcome; typedef Aws::Utils::Outcome RecognizeCelebritiesOutcome; typedef Aws::Utils::Outcome SearchFacesOutcome; typedef Aws::Utils::Outcome SearchFacesByImageOutcome; typedef Aws::Utils::Outcome StartCelebrityRecognitionOutcome; typedef Aws::Utils::Outcome StartContentModerationOutcome; typedef Aws::Utils::Outcome StartFaceDetectionOutcome; typedef Aws::Utils::Outcome StartFaceSearchOutcome; typedef Aws::Utils::Outcome StartLabelDetectionOutcome; typedef Aws::Utils::Outcome StartPersonTrackingOutcome; typedef Aws::Utils::Outcome StartProjectVersionOutcome; typedef Aws::Utils::Outcome StartSegmentDetectionOutcome; typedef Aws::Utils::Outcome StartStreamProcessorOutcome; typedef Aws::Utils::Outcome StartTextDetectionOutcome; typedef Aws::Utils::Outcome StopProjectVersionOutcome; typedef Aws::Utils::Outcome StopStreamProcessorOutcome; typedef std::future CompareFacesOutcomeCallable; typedef std::future CreateCollectionOutcomeCallable; typedef std::future CreateProjectOutcomeCallable; typedef std::future CreateProjectVersionOutcomeCallable; typedef std::future CreateStreamProcessorOutcomeCallable; typedef std::future DeleteCollectionOutcomeCallable; typedef std::future DeleteFacesOutcomeCallable; typedef std::future DeleteProjectOutcomeCallable; typedef std::future DeleteProjectVersionOutcomeCallable; typedef std::future DeleteStreamProcessorOutcomeCallable; typedef std::future DescribeCollectionOutcomeCallable; typedef std::future DescribeProjectVersionsOutcomeCallable; typedef std::future DescribeProjectsOutcomeCallable; typedef std::future DescribeStreamProcessorOutcomeCallable; typedef std::future DetectCustomLabelsOutcomeCallable; typedef std::future DetectFacesOutcomeCallable; typedef std::future DetectLabelsOutcomeCallable; typedef std::future DetectModerationLabelsOutcomeCallable; typedef std::future DetectTextOutcomeCallable; typedef std::future GetCelebrityInfoOutcomeCallable; typedef std::future GetCelebrityRecognitionOutcomeCallable; typedef std::future GetContentModerationOutcomeCallable; typedef std::future GetFaceDetectionOutcomeCallable; typedef std::future GetFaceSearchOutcomeCallable; typedef std::future GetLabelDetectionOutcomeCallable; typedef std::future GetPersonTrackingOutcomeCallable; typedef std::future GetSegmentDetectionOutcomeCallable; typedef std::future GetTextDetectionOutcomeCallable; typedef std::future IndexFacesOutcomeCallable; typedef std::future ListCollectionsOutcomeCallable; typedef std::future ListFacesOutcomeCallable; typedef std::future ListStreamProcessorsOutcomeCallable; typedef std::future RecognizeCelebritiesOutcomeCallable; typedef std::future SearchFacesOutcomeCallable; typedef std::future SearchFacesByImageOutcomeCallable; typedef std::future StartCelebrityRecognitionOutcomeCallable; typedef std::future StartContentModerationOutcomeCallable; typedef std::future StartFaceDetectionOutcomeCallable; typedef std::future StartFaceSearchOutcomeCallable; typedef std::future StartLabelDetectionOutcomeCallable; typedef std::future StartPersonTrackingOutcomeCallable; typedef std::future StartProjectVersionOutcomeCallable; typedef std::future StartSegmentDetectionOutcomeCallable; typedef std::future StartStreamProcessorOutcomeCallable; typedef std::future StartTextDetectionOutcomeCallable; typedef std::future StopProjectVersionOutcomeCallable; typedef std::future StopStreamProcessorOutcomeCallable; } // namespace Model class RekognitionClient; typedef std::function&) > CompareFacesResponseReceivedHandler; typedef std::function&) > CreateCollectionResponseReceivedHandler; typedef std::function&) > CreateProjectResponseReceivedHandler; typedef std::function&) > CreateProjectVersionResponseReceivedHandler; typedef std::function&) > CreateStreamProcessorResponseReceivedHandler; typedef std::function&) > DeleteCollectionResponseReceivedHandler; typedef std::function&) > DeleteFacesResponseReceivedHandler; typedef std::function&) > DeleteProjectResponseReceivedHandler; typedef std::function&) > DeleteProjectVersionResponseReceivedHandler; typedef std::function&) > DeleteStreamProcessorResponseReceivedHandler; typedef std::function&) > DescribeCollectionResponseReceivedHandler; typedef std::function&) > DescribeProjectVersionsResponseReceivedHandler; typedef std::function&) > DescribeProjectsResponseReceivedHandler; typedef std::function&) > DescribeStreamProcessorResponseReceivedHandler; typedef std::function&) > DetectCustomLabelsResponseReceivedHandler; typedef std::function&) > DetectFacesResponseReceivedHandler; typedef std::function&) > DetectLabelsResponseReceivedHandler; typedef std::function&) > DetectModerationLabelsResponseReceivedHandler; typedef std::function&) > DetectTextResponseReceivedHandler; typedef std::function&) > GetCelebrityInfoResponseReceivedHandler; typedef std::function&) > GetCelebrityRecognitionResponseReceivedHandler; typedef std::function&) > GetContentModerationResponseReceivedHandler; typedef std::function&) > GetFaceDetectionResponseReceivedHandler; typedef std::function&) > GetFaceSearchResponseReceivedHandler; typedef std::function&) > GetLabelDetectionResponseReceivedHandler; typedef std::function&) > GetPersonTrackingResponseReceivedHandler; typedef std::function&) > GetSegmentDetectionResponseReceivedHandler; typedef std::function&) > GetTextDetectionResponseReceivedHandler; typedef std::function&) > IndexFacesResponseReceivedHandler; typedef std::function&) > ListCollectionsResponseReceivedHandler; typedef std::function&) > ListFacesResponseReceivedHandler; typedef std::function&) > ListStreamProcessorsResponseReceivedHandler; typedef std::function&) > RecognizeCelebritiesResponseReceivedHandler; typedef std::function&) > SearchFacesResponseReceivedHandler; typedef std::function&) > SearchFacesByImageResponseReceivedHandler; typedef std::function&) > StartCelebrityRecognitionResponseReceivedHandler; typedef std::function&) > StartContentModerationResponseReceivedHandler; typedef std::function&) > StartFaceDetectionResponseReceivedHandler; typedef std::function&) > StartFaceSearchResponseReceivedHandler; typedef std::function&) > StartLabelDetectionResponseReceivedHandler; typedef std::function&) > StartPersonTrackingResponseReceivedHandler; typedef std::function&) > StartProjectVersionResponseReceivedHandler; typedef std::function&) > StartSegmentDetectionResponseReceivedHandler; typedef std::function&) > StartStreamProcessorResponseReceivedHandler; typedef std::function&) > StartTextDetectionResponseReceivedHandler; typedef std::function&) > StopProjectVersionResponseReceivedHandler; typedef std::function&) > StopStreamProcessorResponseReceivedHandler; /** *

This is the Amazon Rekognition API reference.

*/ class AWS_REKOGNITION_API RekognitionClient : public Aws::Client::AWSJsonClient { public: typedef Aws::Client::AWSJsonClient BASECLASS; /** * Initializes client to use DefaultCredentialProviderChain, with default http client factory, and optional client config. If client config * is not specified, it will be initialized to default values. */ RekognitionClient(const Aws::Client::ClientConfiguration& clientConfiguration = Aws::Client::ClientConfiguration()); /** * Initializes client to use SimpleAWSCredentialsProvider, with default http client factory, and optional client config. If client config * is not specified, it will be initialized to default values. */ RekognitionClient(const Aws::Auth::AWSCredentials& credentials, const Aws::Client::ClientConfiguration& clientConfiguration = Aws::Client::ClientConfiguration()); /** * Initializes client to use specified credentials provider with specified client config. If http client factory is not supplied, * the default http client factory will be used */ RekognitionClient(const std::shared_ptr& credentialsProvider, const Aws::Client::ClientConfiguration& clientConfiguration = Aws::Client::ClientConfiguration()); virtual ~RekognitionClient(); /** *

Compares a face in the source input image with each of the 100 largest * faces detected in the target input image.

If the source * image contains multiple faces, the service detects the largest face and compares * it with each face detected in the target image.

You pass the * input and target images either as base64-encoded image bytes or as references to * images in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition * operations, passing image bytes isn't supported. The image must be formatted as * a PNG or JPEG file.

In response, the operation returns an array of face * matches ordered by similarity score in descending order. For each face match, * the response provides a bounding box of the face, facial landmarks, pose details * (pitch, role, and yaw), quality (brightness and sharpness), and confidence value * (indicating the level of confidence that the bounding box contains a face). The * response also provides a similarity score, which indicates how closely the faces * match.

By default, only faces with a similarity score of greater * than or equal to 80% are returned in the response. You can change this value by * specifying the SimilarityThreshold parameter.

* CompareFaces also returns an array of faces that don't match the * source image. For each face, it returns a bounding box, confidence value, * landmarks, pose details, and quality. The response also returns information * about the face in the source image, including the bounding box of the face and * confidence value.

The QualityFilter input parameter allows * you to filter out detected faces that don’t meet a required quality bar. The * quality bar is based on a variety of common use cases. Use * QualityFilter to set the quality bar by specifying * LOW, MEDIUM, or HIGH. If you do not want * to filter detected faces, specify NONE. The default value is * NONE.

To use quality filtering, you need a * collection associated with version 3 of the face model or higher. To get the * version of the face model associated with a collection, call * DescribeCollection.

If the image doesn't contain Exif * metadata, CompareFaces returns orientation information for the * source and target images. Use these values to display the images with the * correct image orientation.

If no faces are detected in the source or * target images, CompareFaces returns an * InvalidParameterException error.

This is a * stateless API operation. That is, data returned by this operation doesn't * persist.

For an example, see Comparing Faces in Images in the * Amazon Rekognition Developer Guide.

This operation requires permissions * to perform the rekognition:CompareFaces action.

See * Also:

AWS * API Reference

*/ virtual Model::CompareFacesOutcome CompareFaces(const Model::CompareFacesRequest& request) const; /** *

Compares a face in the source input image with each of the 100 largest * faces detected in the target input image.

If the source * image contains multiple faces, the service detects the largest face and compares * it with each face detected in the target image.

You pass the * input and target images either as base64-encoded image bytes or as references to * images in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition * operations, passing image bytes isn't supported. The image must be formatted as * a PNG or JPEG file.

In response, the operation returns an array of face * matches ordered by similarity score in descending order. For each face match, * the response provides a bounding box of the face, facial landmarks, pose details * (pitch, role, and yaw), quality (brightness and sharpness), and confidence value * (indicating the level of confidence that the bounding box contains a face). The * response also provides a similarity score, which indicates how closely the faces * match.

By default, only faces with a similarity score of greater * than or equal to 80% are returned in the response. You can change this value by * specifying the SimilarityThreshold parameter.

* CompareFaces also returns an array of faces that don't match the * source image. For each face, it returns a bounding box, confidence value, * landmarks, pose details, and quality. The response also returns information * about the face in the source image, including the bounding box of the face and * confidence value.

The QualityFilter input parameter allows * you to filter out detected faces that don’t meet a required quality bar. The * quality bar is based on a variety of common use cases. Use * QualityFilter to set the quality bar by specifying * LOW, MEDIUM, or HIGH. If you do not want * to filter detected faces, specify NONE. The default value is * NONE.

To use quality filtering, you need a * collection associated with version 3 of the face model or higher. To get the * version of the face model associated with a collection, call * DescribeCollection.

If the image doesn't contain Exif * metadata, CompareFaces returns orientation information for the * source and target images. Use these values to display the images with the * correct image orientation.

If no faces are detected in the source or * target images, CompareFaces returns an * InvalidParameterException error.

This is a * stateless API operation. That is, data returned by this operation doesn't * persist.

For an example, see Comparing Faces in Images in the * Amazon Rekognition Developer Guide.

This operation requires permissions * to perform the rekognition:CompareFaces action.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CompareFacesOutcomeCallable CompareFacesCallable(const Model::CompareFacesRequest& request) const; /** *

Compares a face in the source input image with each of the 100 largest * faces detected in the target input image.

If the source * image contains multiple faces, the service detects the largest face and compares * it with each face detected in the target image.

You pass the * input and target images either as base64-encoded image bytes or as references to * images in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition * operations, passing image bytes isn't supported. The image must be formatted as * a PNG or JPEG file.

In response, the operation returns an array of face * matches ordered by similarity score in descending order. For each face match, * the response provides a bounding box of the face, facial landmarks, pose details * (pitch, role, and yaw), quality (brightness and sharpness), and confidence value * (indicating the level of confidence that the bounding box contains a face). The * response also provides a similarity score, which indicates how closely the faces * match.

By default, only faces with a similarity score of greater * than or equal to 80% are returned in the response. You can change this value by * specifying the SimilarityThreshold parameter.

* CompareFaces also returns an array of faces that don't match the * source image. For each face, it returns a bounding box, confidence value, * landmarks, pose details, and quality. The response also returns information * about the face in the source image, including the bounding box of the face and * confidence value.

The QualityFilter input parameter allows * you to filter out detected faces that don’t meet a required quality bar. The * quality bar is based on a variety of common use cases. Use * QualityFilter to set the quality bar by specifying * LOW, MEDIUM, or HIGH. If you do not want * to filter detected faces, specify NONE. The default value is * NONE.

To use quality filtering, you need a * collection associated with version 3 of the face model or higher. To get the * version of the face model associated with a collection, call * DescribeCollection.

If the image doesn't contain Exif * metadata, CompareFaces returns orientation information for the * source and target images. Use these values to display the images with the * correct image orientation.

If no faces are detected in the source or * target images, CompareFaces returns an * InvalidParameterException error.

This is a * stateless API operation. That is, data returned by this operation doesn't * persist.

For an example, see Comparing Faces in Images in the * Amazon Rekognition Developer Guide.

This operation requires permissions * to perform the rekognition:CompareFaces action.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CompareFacesAsync(const Model::CompareFacesRequest& request, const CompareFacesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Creates a collection in an AWS Region. You can add faces to the collection * using the IndexFaces operation.

For example, you might create * collections, one for each of your application users. A user can then index faces * using the IndexFaces operation and persist results in a specific * collection. Then, a user can search the collection for faces in the * user-specific container.

When you create a collection, it is associated * with the latest version of the face model version.

Collection * names are case-sensitive.

This operation requires permissions to * perform the rekognition:CreateCollection action.

See * Also:

AWS * API Reference

*/ virtual Model::CreateCollectionOutcome CreateCollection(const Model::CreateCollectionRequest& request) const; /** *

Creates a collection in an AWS Region. You can add faces to the collection * using the IndexFaces operation.

For example, you might create * collections, one for each of your application users. A user can then index faces * using the IndexFaces operation and persist results in a specific * collection. Then, a user can search the collection for faces in the * user-specific container.

When you create a collection, it is associated * with the latest version of the face model version.

Collection * names are case-sensitive.

This operation requires permissions to * perform the rekognition:CreateCollection action.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateCollectionOutcomeCallable CreateCollectionCallable(const Model::CreateCollectionRequest& request) const; /** *

Creates a collection in an AWS Region. You can add faces to the collection * using the IndexFaces operation.

For example, you might create * collections, one for each of your application users. A user can then index faces * using the IndexFaces operation and persist results in a specific * collection. Then, a user can search the collection for faces in the * user-specific container.

When you create a collection, it is associated * with the latest version of the face model version.

Collection * names are case-sensitive.

This operation requires permissions to * perform the rekognition:CreateCollection action.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateCollectionAsync(const Model::CreateCollectionRequest& request, const CreateCollectionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Creates a new Amazon Rekognition Custom Labels project. A project is a * logical grouping of resources (images, Labels, models) and operations (training, * evaluation and detection).

This operation requires permissions to * perform the rekognition:CreateProject action.

See * Also:

AWS * API Reference

*/ virtual Model::CreateProjectOutcome CreateProject(const Model::CreateProjectRequest& request) const; /** *

Creates a new Amazon Rekognition Custom Labels project. A project is a * logical grouping of resources (images, Labels, models) and operations (training, * evaluation and detection).

This operation requires permissions to * perform the rekognition:CreateProject action.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateProjectOutcomeCallable CreateProjectCallable(const Model::CreateProjectRequest& request) const; /** *

Creates a new Amazon Rekognition Custom Labels project. A project is a * logical grouping of resources (images, Labels, models) and operations (training, * evaluation and detection).

This operation requires permissions to * perform the rekognition:CreateProject action.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateProjectAsync(const Model::CreateProjectRequest& request, const CreateProjectResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Creates a new version of a model and begins training. Models are managed as * part of an Amazon Rekognition Custom Labels project. You can specify one * training dataset and one testing dataset. The response from * CreateProjectVersion is an Amazon Resource Name (ARN) for the * version of the model.

Training takes a while to complete. You can get * the current status by calling DescribeProjectVersions.

Once * training has successfully completed, call DescribeProjectVersions to get * the training results and evaluate the model.

After evaluating the model, * you start the model by calling StartProjectVersion.

This operation * requires permissions to perform the * rekognition:CreateProjectVersion action.

See Also:

* AWS * API Reference

*/ virtual Model::CreateProjectVersionOutcome CreateProjectVersion(const Model::CreateProjectVersionRequest& request) const; /** *

Creates a new version of a model and begins training. Models are managed as * part of an Amazon Rekognition Custom Labels project. You can specify one * training dataset and one testing dataset. The response from * CreateProjectVersion is an Amazon Resource Name (ARN) for the * version of the model.

Training takes a while to complete. You can get * the current status by calling DescribeProjectVersions.

Once * training has successfully completed, call DescribeProjectVersions to get * the training results and evaluate the model.

After evaluating the model, * you start the model by calling StartProjectVersion.

This operation * requires permissions to perform the * rekognition:CreateProjectVersion action.

See Also:

* AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateProjectVersionOutcomeCallable CreateProjectVersionCallable(const Model::CreateProjectVersionRequest& request) const; /** *

Creates a new version of a model and begins training. Models are managed as * part of an Amazon Rekognition Custom Labels project. You can specify one * training dataset and one testing dataset. The response from * CreateProjectVersion is an Amazon Resource Name (ARN) for the * version of the model.

Training takes a while to complete. You can get * the current status by calling DescribeProjectVersions.

Once * training has successfully completed, call DescribeProjectVersions to get * the training results and evaluate the model.

After evaluating the model, * you start the model by calling StartProjectVersion.

This operation * requires permissions to perform the * rekognition:CreateProjectVersion action.

See Also:

* AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateProjectVersionAsync(const Model::CreateProjectVersionRequest& request, const CreateProjectVersionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Creates an Amazon Rekognition stream processor that you can use to detect and * recognize faces in a streaming video.

Amazon Rekognition Video is a * consumer of live video from Amazon Kinesis Video Streams. Amazon Rekognition * Video sends analysis results to Amazon Kinesis Data Streams.

You provide * as input a Kinesis video stream (Input) and a Kinesis data stream * (Output) stream. You also specify the face recognition criteria in * Settings. For example, the collection containing faces that you * want to recognize. Use Name to assign an identifier for the stream * processor. You use Name to manage the stream processor. For * example, you can start processing the source video by calling * StartStreamProcessor with the Name field.

After you * have finished analyzing a streaming video, use StopStreamProcessor to * stop processing. You can delete the stream processor by calling * DeleteStreamProcessor.

See Also:

AWS * API Reference

*/ virtual Model::CreateStreamProcessorOutcome CreateStreamProcessor(const Model::CreateStreamProcessorRequest& request) const; /** *

Creates an Amazon Rekognition stream processor that you can use to detect and * recognize faces in a streaming video.

Amazon Rekognition Video is a * consumer of live video from Amazon Kinesis Video Streams. Amazon Rekognition * Video sends analysis results to Amazon Kinesis Data Streams.

You provide * as input a Kinesis video stream (Input) and a Kinesis data stream * (Output) stream. You also specify the face recognition criteria in * Settings. For example, the collection containing faces that you * want to recognize. Use Name to assign an identifier for the stream * processor. You use Name to manage the stream processor. For * example, you can start processing the source video by calling * StartStreamProcessor with the Name field.

After you * have finished analyzing a streaming video, use StopStreamProcessor to * stop processing. You can delete the stream processor by calling * DeleteStreamProcessor.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateStreamProcessorOutcomeCallable CreateStreamProcessorCallable(const Model::CreateStreamProcessorRequest& request) const; /** *

Creates an Amazon Rekognition stream processor that you can use to detect and * recognize faces in a streaming video.

Amazon Rekognition Video is a * consumer of live video from Amazon Kinesis Video Streams. Amazon Rekognition * Video sends analysis results to Amazon Kinesis Data Streams.

You provide * as input a Kinesis video stream (Input) and a Kinesis data stream * (Output) stream. You also specify the face recognition criteria in * Settings. For example, the collection containing faces that you * want to recognize. Use Name to assign an identifier for the stream * processor. You use Name to manage the stream processor. For * example, you can start processing the source video by calling * StartStreamProcessor with the Name field.

After you * have finished analyzing a streaming video, use StopStreamProcessor to * stop processing. You can delete the stream processor by calling * DeleteStreamProcessor.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateStreamProcessorAsync(const Model::CreateStreamProcessorRequest& request, const CreateStreamProcessorResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes the specified collection. Note that this operation removes all faces * in the collection. For an example, see delete-collection-procedure.

*

This operation requires permissions to perform the * rekognition:DeleteCollection action.

See Also:

AWS * API Reference

*/ virtual Model::DeleteCollectionOutcome DeleteCollection(const Model::DeleteCollectionRequest& request) const; /** *

Deletes the specified collection. Note that this operation removes all faces * in the collection. For an example, see delete-collection-procedure.

*

This operation requires permissions to perform the * rekognition:DeleteCollection action.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteCollectionOutcomeCallable DeleteCollectionCallable(const Model::DeleteCollectionRequest& request) const; /** *

Deletes the specified collection. Note that this operation removes all faces * in the collection. For an example, see delete-collection-procedure.

*

This operation requires permissions to perform the * rekognition:DeleteCollection action.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteCollectionAsync(const Model::DeleteCollectionRequest& request, const DeleteCollectionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes faces from a collection. You specify a collection ID and an array of * face IDs to remove from the collection.

This operation requires * permissions to perform the rekognition:DeleteFaces * action.

See Also:

AWS * API Reference

*/ virtual Model::DeleteFacesOutcome DeleteFaces(const Model::DeleteFacesRequest& request) const; /** *

Deletes faces from a collection. You specify a collection ID and an array of * face IDs to remove from the collection.

This operation requires * permissions to perform the rekognition:DeleteFaces * action.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteFacesOutcomeCallable DeleteFacesCallable(const Model::DeleteFacesRequest& request) const; /** *

Deletes faces from a collection. You specify a collection ID and an array of * face IDs to remove from the collection.

This operation requires * permissions to perform the rekognition:DeleteFaces * action.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteFacesAsync(const Model::DeleteFacesRequest& request, const DeleteFacesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes an Amazon Rekognition Custom Labels project. To delete a project you * must first delete all models associated with the project. To delete a model, see * DeleteProjectVersion.

This operation requires permissions to * perform the rekognition:DeleteProject action.

See * Also:

AWS * API Reference

*/ virtual Model::DeleteProjectOutcome DeleteProject(const Model::DeleteProjectRequest& request) const; /** *

Deletes an Amazon Rekognition Custom Labels project. To delete a project you * must first delete all models associated with the project. To delete a model, see * DeleteProjectVersion.

This operation requires permissions to * perform the rekognition:DeleteProject action.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteProjectOutcomeCallable DeleteProjectCallable(const Model::DeleteProjectRequest& request) const; /** *

Deletes an Amazon Rekognition Custom Labels project. To delete a project you * must first delete all models associated with the project. To delete a model, see * DeleteProjectVersion.

This operation requires permissions to * perform the rekognition:DeleteProject action.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteProjectAsync(const Model::DeleteProjectRequest& request, const DeleteProjectResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes an Amazon Rekognition Custom Labels model.

You can't delete a * model if it is running or if it is training. To check the status of a model, use * the Status field returned from DescribeProjectVersions. To * stop a running model call StopProjectVersion. If the model is training, * wait until it finishes.

This operation requires permissions to perform * the rekognition:DeleteProjectVersion action.

See * Also:

AWS * API Reference

*/ virtual Model::DeleteProjectVersionOutcome DeleteProjectVersion(const Model::DeleteProjectVersionRequest& request) const; /** *

Deletes an Amazon Rekognition Custom Labels model.

You can't delete a * model if it is running or if it is training. To check the status of a model, use * the Status field returned from DescribeProjectVersions. To * stop a running model call StopProjectVersion. If the model is training, * wait until it finishes.

This operation requires permissions to perform * the rekognition:DeleteProjectVersion action.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteProjectVersionOutcomeCallable DeleteProjectVersionCallable(const Model::DeleteProjectVersionRequest& request) const; /** *

Deletes an Amazon Rekognition Custom Labels model.

You can't delete a * model if it is running or if it is training. To check the status of a model, use * the Status field returned from DescribeProjectVersions. To * stop a running model call StopProjectVersion. If the model is training, * wait until it finishes.

This operation requires permissions to perform * the rekognition:DeleteProjectVersion action.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteProjectVersionAsync(const Model::DeleteProjectVersionRequest& request, const DeleteProjectVersionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes the stream processor identified by Name. You assign the * value for Name when you create the stream processor with * CreateStreamProcessor. You might not be able to use the same name for a * stream processor for a few seconds after calling * DeleteStreamProcessor.

See Also:

AWS * API Reference

*/ virtual Model::DeleteStreamProcessorOutcome DeleteStreamProcessor(const Model::DeleteStreamProcessorRequest& request) const; /** *

Deletes the stream processor identified by Name. You assign the * value for Name when you create the stream processor with * CreateStreamProcessor. You might not be able to use the same name for a * stream processor for a few seconds after calling * DeleteStreamProcessor.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteStreamProcessorOutcomeCallable DeleteStreamProcessorCallable(const Model::DeleteStreamProcessorRequest& request) const; /** *

Deletes the stream processor identified by Name. You assign the * value for Name when you create the stream processor with * CreateStreamProcessor. You might not be able to use the same name for a * stream processor for a few seconds after calling * DeleteStreamProcessor.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteStreamProcessorAsync(const Model::DeleteStreamProcessorRequest& request, const DeleteStreamProcessorResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Describes the specified collection. You can use * DescribeCollection to get information, such as the number of faces * indexed into a collection and the version of the model used by the collection * for face detection.

For more information, see Describing a Collection in * the Amazon Rekognition Developer Guide.

See Also:

AWS * API Reference

*/ virtual Model::DescribeCollectionOutcome DescribeCollection(const Model::DescribeCollectionRequest& request) const; /** *

Describes the specified collection. You can use * DescribeCollection to get information, such as the number of faces * indexed into a collection and the version of the model used by the collection * for face detection.

For more information, see Describing a Collection in * the Amazon Rekognition Developer Guide.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeCollectionOutcomeCallable DescribeCollectionCallable(const Model::DescribeCollectionRequest& request) const; /** *

Describes the specified collection. You can use * DescribeCollection to get information, such as the number of faces * indexed into a collection and the version of the model used by the collection * for face detection.

For more information, see Describing a Collection in * the Amazon Rekognition Developer Guide.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeCollectionAsync(const Model::DescribeCollectionRequest& request, const DescribeCollectionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Lists and describes the models in an Amazon Rekognition Custom Labels * project. You can specify up to 10 model versions in * ProjectVersionArns. If you don't specify a value, descriptions for * all models are returned.

This operation requires permissions to perform * the rekognition:DescribeProjectVersions action.

See * Also:

AWS * API Reference

*/ virtual Model::DescribeProjectVersionsOutcome DescribeProjectVersions(const Model::DescribeProjectVersionsRequest& request) const; /** *

Lists and describes the models in an Amazon Rekognition Custom Labels * project. You can specify up to 10 model versions in * ProjectVersionArns. If you don't specify a value, descriptions for * all models are returned.

This operation requires permissions to perform * the rekognition:DescribeProjectVersions action.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeProjectVersionsOutcomeCallable DescribeProjectVersionsCallable(const Model::DescribeProjectVersionsRequest& request) const; /** *

Lists and describes the models in an Amazon Rekognition Custom Labels * project. You can specify up to 10 model versions in * ProjectVersionArns. If you don't specify a value, descriptions for * all models are returned.

This operation requires permissions to perform * the rekognition:DescribeProjectVersions action.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeProjectVersionsAsync(const Model::DescribeProjectVersionsRequest& request, const DescribeProjectVersionsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Lists and gets information about your Amazon Rekognition Custom Labels * projects.

This operation requires permissions to perform the * rekognition:DescribeProjects action.

See Also:

AWS * API Reference

*/ virtual Model::DescribeProjectsOutcome DescribeProjects(const Model::DescribeProjectsRequest& request) const; /** *

Lists and gets information about your Amazon Rekognition Custom Labels * projects.

This operation requires permissions to perform the * rekognition:DescribeProjects action.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeProjectsOutcomeCallable DescribeProjectsCallable(const Model::DescribeProjectsRequest& request) const; /** *

Lists and gets information about your Amazon Rekognition Custom Labels * projects.

This operation requires permissions to perform the * rekognition:DescribeProjects action.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeProjectsAsync(const Model::DescribeProjectsRequest& request, const DescribeProjectsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Provides information about a stream processor created by * CreateStreamProcessor. You can get information about the input and output * streams, the input parameters for the face recognition being performed, and the * current status of the stream processor.

See Also:

AWS * API Reference

*/ virtual Model::DescribeStreamProcessorOutcome DescribeStreamProcessor(const Model::DescribeStreamProcessorRequest& request) const; /** *

Provides information about a stream processor created by * CreateStreamProcessor. You can get information about the input and output * streams, the input parameters for the face recognition being performed, and the * current status of the stream processor.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeStreamProcessorOutcomeCallable DescribeStreamProcessorCallable(const Model::DescribeStreamProcessorRequest& request) const; /** *

Provides information about a stream processor created by * CreateStreamProcessor. You can get information about the input and output * streams, the input parameters for the face recognition being performed, and the * current status of the stream processor.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeStreamProcessorAsync(const Model::DescribeStreamProcessorRequest& request, const DescribeStreamProcessorResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Detects custom labels in a supplied image by using an Amazon Rekognition * Custom Labels model.

You specify which version of a model version to use * by using the ProjectVersionArn input parameter.

You pass * the input image as base64-encoded image bytes or as a reference to an image in * an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition * operations, passing image bytes is not supported. The image must be either a PNG * or JPEG formatted file.

For each object that the model version detects * on an image, the API returns a (CustomLabel) object in an array * (CustomLabels). Each CustomLabel object provides the * label name (Name), the level of confidence that the image contains * the object (Confidence), and object location information, if it * exists, for the label on the image (Geometry).

During * training model calculates a threshold value that determines if a prediction for * a label is true. By default, DetectCustomLabels doesn't return * labels whose confidence value is below the model's calculated threshold value. * To filter labels that are returned, specify a value for * MinConfidence that is higher than the model's calculated threshold. * You can get the model's calculated threshold from the model's training results * shown in the Amazon Rekognition Custom Labels console. To get all labels, * regardless of confidence, specify a MinConfidence value of 0.

*

You can also add the MaxResults parameter to limit the number of * labels returned.

This is a stateless API operation. That is, the * operation does not persist any data.

This operation requires permissions * to perform the rekognition:DetectCustomLabels action. *

See Also:

AWS * API Reference

*/ virtual Model::DetectCustomLabelsOutcome DetectCustomLabels(const Model::DetectCustomLabelsRequest& request) const; /** *

Detects custom labels in a supplied image by using an Amazon Rekognition * Custom Labels model.

You specify which version of a model version to use * by using the ProjectVersionArn input parameter.

You pass * the input image as base64-encoded image bytes or as a reference to an image in * an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition * operations, passing image bytes is not supported. The image must be either a PNG * or JPEG formatted file.

For each object that the model version detects * on an image, the API returns a (CustomLabel) object in an array * (CustomLabels). Each CustomLabel object provides the * label name (Name), the level of confidence that the image contains * the object (Confidence), and object location information, if it * exists, for the label on the image (Geometry).

During * training model calculates a threshold value that determines if a prediction for * a label is true. By default, DetectCustomLabels doesn't return * labels whose confidence value is below the model's calculated threshold value. * To filter labels that are returned, specify a value for * MinConfidence that is higher than the model's calculated threshold. * You can get the model's calculated threshold from the model's training results * shown in the Amazon Rekognition Custom Labels console. To get all labels, * regardless of confidence, specify a MinConfidence value of 0.

*

You can also add the MaxResults parameter to limit the number of * labels returned.

This is a stateless API operation. That is, the * operation does not persist any data.

This operation requires permissions * to perform the rekognition:DetectCustomLabels action. *

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DetectCustomLabelsOutcomeCallable DetectCustomLabelsCallable(const Model::DetectCustomLabelsRequest& request) const; /** *

Detects custom labels in a supplied image by using an Amazon Rekognition * Custom Labels model.

You specify which version of a model version to use * by using the ProjectVersionArn input parameter.

You pass * the input image as base64-encoded image bytes or as a reference to an image in * an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition * operations, passing image bytes is not supported. The image must be either a PNG * or JPEG formatted file.

For each object that the model version detects * on an image, the API returns a (CustomLabel) object in an array * (CustomLabels). Each CustomLabel object provides the * label name (Name), the level of confidence that the image contains * the object (Confidence), and object location information, if it * exists, for the label on the image (Geometry).

During * training model calculates a threshold value that determines if a prediction for * a label is true. By default, DetectCustomLabels doesn't return * labels whose confidence value is below the model's calculated threshold value. * To filter labels that are returned, specify a value for * MinConfidence that is higher than the model's calculated threshold. * You can get the model's calculated threshold from the model's training results * shown in the Amazon Rekognition Custom Labels console. To get all labels, * regardless of confidence, specify a MinConfidence value of 0.

*

You can also add the MaxResults parameter to limit the number of * labels returned.

This is a stateless API operation. That is, the * operation does not persist any data.

This operation requires permissions * to perform the rekognition:DetectCustomLabels action. *

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DetectCustomLabelsAsync(const Model::DetectCustomLabelsRequest& request, const DetectCustomLabelsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Detects faces within an image that is provided as input.

* DetectFaces detects the 100 largest faces in the image. For each * face detected, the operation returns face details. These details include a * bounding box of the face, a confidence value (that the bounding box contains a * face), and a fixed set of attributes such as facial landmarks (for example, * coordinates of eye and mouth), presence of beard, sunglasses, and so on.

*

The face-detection algorithm is most effective on frontal faces. For * non-frontal or obscured faces, the algorithm might not detect the faces or might * detect faces with lower confidence.

You pass the input image either as * base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. * If you use the AWS CLI to call Amazon Rekognition operations, passing image * bytes is not supported. The image must be either a PNG or JPEG formatted file. *

This is a stateless API operation. That is, the operation does * not persist any data.

This operation requires permissions to * perform the rekognition:DetectFaces action.

See * Also:

AWS * API Reference

*/ virtual Model::DetectFacesOutcome DetectFaces(const Model::DetectFacesRequest& request) const; /** *

Detects faces within an image that is provided as input.

* DetectFaces detects the 100 largest faces in the image. For each * face detected, the operation returns face details. These details include a * bounding box of the face, a confidence value (that the bounding box contains a * face), and a fixed set of attributes such as facial landmarks (for example, * coordinates of eye and mouth), presence of beard, sunglasses, and so on.

*

The face-detection algorithm is most effective on frontal faces. For * non-frontal or obscured faces, the algorithm might not detect the faces or might * detect faces with lower confidence.

You pass the input image either as * base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. * If you use the AWS CLI to call Amazon Rekognition operations, passing image * bytes is not supported. The image must be either a PNG or JPEG formatted file. *

This is a stateless API operation. That is, the operation does * not persist any data.

This operation requires permissions to * perform the rekognition:DetectFaces action.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DetectFacesOutcomeCallable DetectFacesCallable(const Model::DetectFacesRequest& request) const; /** *

Detects faces within an image that is provided as input.

* DetectFaces detects the 100 largest faces in the image. For each * face detected, the operation returns face details. These details include a * bounding box of the face, a confidence value (that the bounding box contains a * face), and a fixed set of attributes such as facial landmarks (for example, * coordinates of eye and mouth), presence of beard, sunglasses, and so on.

*

The face-detection algorithm is most effective on frontal faces. For * non-frontal or obscured faces, the algorithm might not detect the faces or might * detect faces with lower confidence.

You pass the input image either as * base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. * If you use the AWS CLI to call Amazon Rekognition operations, passing image * bytes is not supported. The image must be either a PNG or JPEG formatted file. *

This is a stateless API operation. That is, the operation does * not persist any data.

This operation requires permissions to * perform the rekognition:DetectFaces action.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DetectFacesAsync(const Model::DetectFacesRequest& request, const DetectFacesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Detects instances of real-world entities within an image (JPEG or PNG) * provided as input. This includes objects like flower, tree, and table; events * like wedding, graduation, and birthday party; and concepts like landscape, * evening, and nature.

For an example, see Analyzing Images Stored in an * Amazon S3 Bucket in the Amazon Rekognition Developer Guide.

* DetectLabels does not support the detection of activities. However, * activity detection is supported for label detection in videos. For more * information, see StartLabelDetection in the Amazon Rekognition Developer * Guide.

You pass the input image as base64-encoded image bytes or * as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to * call Amazon Rekognition operations, passing image bytes is not supported. The * image must be either a PNG or JPEG formatted file.

For each object, * scene, and concept the API returns one or more labels. Each label provides the * object name, and the level of confidence that the image contains the object. For * example, suppose the input image has a lighthouse, the sea, and a rock. The * response includes all three labels, one for each object.

{Name: * lighthouse, Confidence: 98.4629}

{Name: rock,Confidence: * 79.2097}

{Name: sea,Confidence: 75.061}

In * the preceding example, the operation returns one label for each of the three * objects. The operation can also return multiple labels for the same object in * the image. For example, if the input image shows a flower (for example, a * tulip), the operation might return the following three labels.

* {Name: flower,Confidence: 99.0562}

{Name: * plant,Confidence: 99.0562}

{Name: tulip,Confidence: * 99.0562}

In this example, the detection algorithm more precisely * identifies the flower as a tulip.

In response, the API returns an array * of labels. In addition, the response also includes the orientation correction. * Optionally, you can specify MinConfidence to control the confidence * threshold for the labels returned. The default is 55%. You can also add the * MaxLabels parameter to limit the number of labels returned.

*

If the object detected is a person, the operation doesn't provide the * same facial details that the DetectFaces operation provides.

*

DetectLabels returns bounding boxes for instances of common * object labels in an array of Instance objects. An Instance * object contains a BoundingBox object, for the location of the label on * the image. It also includes the confidence by which the bounding box was * detected.

DetectLabels also returns a hierarchical taxonomy * of detected labels. For example, a detected car might be assigned the label * car. The label car has two parent labels: Vehicle (its * parent) and Transportation (its grandparent). The response returns the * entire list of ancestors for a label. Each ancestor is a unique label in the * response. In the previous example, Car, Vehicle, and * Transportation are returned as unique labels in the response.

*

This is a stateless API operation. That is, the operation does not persist * any data.

This operation requires permissions to perform the * rekognition:DetectLabels action.

See Also:

AWS * API Reference

*/ virtual Model::DetectLabelsOutcome DetectLabels(const Model::DetectLabelsRequest& request) const; /** *

Detects instances of real-world entities within an image (JPEG or PNG) * provided as input. This includes objects like flower, tree, and table; events * like wedding, graduation, and birthday party; and concepts like landscape, * evening, and nature.

For an example, see Analyzing Images Stored in an * Amazon S3 Bucket in the Amazon Rekognition Developer Guide.

* DetectLabels does not support the detection of activities. However, * activity detection is supported for label detection in videos. For more * information, see StartLabelDetection in the Amazon Rekognition Developer * Guide.

You pass the input image as base64-encoded image bytes or * as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to * call Amazon Rekognition operations, passing image bytes is not supported. The * image must be either a PNG or JPEG formatted file.

For each object, * scene, and concept the API returns one or more labels. Each label provides the * object name, and the level of confidence that the image contains the object. For * example, suppose the input image has a lighthouse, the sea, and a rock. The * response includes all three labels, one for each object.

{Name: * lighthouse, Confidence: 98.4629}

{Name: rock,Confidence: * 79.2097}

{Name: sea,Confidence: 75.061}

In * the preceding example, the operation returns one label for each of the three * objects. The operation can also return multiple labels for the same object in * the image. For example, if the input image shows a flower (for example, a * tulip), the operation might return the following three labels.

* {Name: flower,Confidence: 99.0562}

{Name: * plant,Confidence: 99.0562}

{Name: tulip,Confidence: * 99.0562}

In this example, the detection algorithm more precisely * identifies the flower as a tulip.

In response, the API returns an array * of labels. In addition, the response also includes the orientation correction. * Optionally, you can specify MinConfidence to control the confidence * threshold for the labels returned. The default is 55%. You can also add the * MaxLabels parameter to limit the number of labels returned.

*

If the object detected is a person, the operation doesn't provide the * same facial details that the DetectFaces operation provides.

*

DetectLabels returns bounding boxes for instances of common * object labels in an array of Instance objects. An Instance * object contains a BoundingBox object, for the location of the label on * the image. It also includes the confidence by which the bounding box was * detected.

DetectLabels also returns a hierarchical taxonomy * of detected labels. For example, a detected car might be assigned the label * car. The label car has two parent labels: Vehicle (its * parent) and Transportation (its grandparent). The response returns the * entire list of ancestors for a label. Each ancestor is a unique label in the * response. In the previous example, Car, Vehicle, and * Transportation are returned as unique labels in the response.

*

This is a stateless API operation. That is, the operation does not persist * any data.

This operation requires permissions to perform the * rekognition:DetectLabels action.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DetectLabelsOutcomeCallable DetectLabelsCallable(const Model::DetectLabelsRequest& request) const; /** *

Detects instances of real-world entities within an image (JPEG or PNG) * provided as input. This includes objects like flower, tree, and table; events * like wedding, graduation, and birthday party; and concepts like landscape, * evening, and nature.

For an example, see Analyzing Images Stored in an * Amazon S3 Bucket in the Amazon Rekognition Developer Guide.

* DetectLabels does not support the detection of activities. However, * activity detection is supported for label detection in videos. For more * information, see StartLabelDetection in the Amazon Rekognition Developer * Guide.

You pass the input image as base64-encoded image bytes or * as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to * call Amazon Rekognition operations, passing image bytes is not supported. The * image must be either a PNG or JPEG formatted file.

For each object, * scene, and concept the API returns one or more labels. Each label provides the * object name, and the level of confidence that the image contains the object. For * example, suppose the input image has a lighthouse, the sea, and a rock. The * response includes all three labels, one for each object.

{Name: * lighthouse, Confidence: 98.4629}

{Name: rock,Confidence: * 79.2097}

{Name: sea,Confidence: 75.061}

In * the preceding example, the operation returns one label for each of the three * objects. The operation can also return multiple labels for the same object in * the image. For example, if the input image shows a flower (for example, a * tulip), the operation might return the following three labels.

* {Name: flower,Confidence: 99.0562}

{Name: * plant,Confidence: 99.0562}

{Name: tulip,Confidence: * 99.0562}

In this example, the detection algorithm more precisely * identifies the flower as a tulip.

In response, the API returns an array * of labels. In addition, the response also includes the orientation correction. * Optionally, you can specify MinConfidence to control the confidence * threshold for the labels returned. The default is 55%. You can also add the * MaxLabels parameter to limit the number of labels returned.

*

If the object detected is a person, the operation doesn't provide the * same facial details that the DetectFaces operation provides.

*

DetectLabels returns bounding boxes for instances of common * object labels in an array of Instance objects. An Instance * object contains a BoundingBox object, for the location of the label on * the image. It also includes the confidence by which the bounding box was * detected.

DetectLabels also returns a hierarchical taxonomy * of detected labels. For example, a detected car might be assigned the label * car. The label car has two parent labels: Vehicle (its * parent) and Transportation (its grandparent). The response returns the * entire list of ancestors for a label. Each ancestor is a unique label in the * response. In the previous example, Car, Vehicle, and * Transportation are returned as unique labels in the response.

*

This is a stateless API operation. That is, the operation does not persist * any data.

This operation requires permissions to perform the * rekognition:DetectLabels action.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DetectLabelsAsync(const Model::DetectLabelsRequest& request, const DetectLabelsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Detects unsafe content in a specified JPEG or PNG format image. Use * DetectModerationLabels to moderate images depending on your * requirements. For example, you might want to filter images that contain nudity, * but not images containing suggestive content.

To filter images, use the * labels returned by DetectModerationLabels to determine which types * of content are appropriate.

For information about moderation labels, see * Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

You * pass the input image either as base64-encoded image bytes or as a reference to * an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon * Rekognition operations, passing image bytes is not supported. The image must be * either a PNG or JPEG formatted file.

See Also:

AWS * API Reference

*/ virtual Model::DetectModerationLabelsOutcome DetectModerationLabels(const Model::DetectModerationLabelsRequest& request) const; /** *

Detects unsafe content in a specified JPEG or PNG format image. Use * DetectModerationLabels to moderate images depending on your * requirements. For example, you might want to filter images that contain nudity, * but not images containing suggestive content.

To filter images, use the * labels returned by DetectModerationLabels to determine which types * of content are appropriate.

For information about moderation labels, see * Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

You * pass the input image either as base64-encoded image bytes or as a reference to * an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon * Rekognition operations, passing image bytes is not supported. The image must be * either a PNG or JPEG formatted file.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DetectModerationLabelsOutcomeCallable DetectModerationLabelsCallable(const Model::DetectModerationLabelsRequest& request) const; /** *

Detects unsafe content in a specified JPEG or PNG format image. Use * DetectModerationLabels to moderate images depending on your * requirements. For example, you might want to filter images that contain nudity, * but not images containing suggestive content.

To filter images, use the * labels returned by DetectModerationLabels to determine which types * of content are appropriate.

For information about moderation labels, see * Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

You * pass the input image either as base64-encoded image bytes or as a reference to * an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon * Rekognition operations, passing image bytes is not supported. The image must be * either a PNG or JPEG formatted file.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DetectModerationLabelsAsync(const Model::DetectModerationLabelsRequest& request, const DetectModerationLabelsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Detects text in the input image and converts it into machine-readable * text.

Pass the input image as base64-encoded image bytes or as a * reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call * Amazon Rekognition operations, you must pass it as a reference to an image in an * Amazon S3 bucket. For the AWS CLI, passing image bytes is not supported. The * image must be either a .png or .jpeg formatted file.

The * DetectText operation returns text in an array of * TextDetection elements, TextDetections. Each * TextDetection element provides information about a single word or * line of text that was detected in the image.

A word is one or more ISO * basic latin script characters that are not separated by spaces. * DetectText can detect up to 50 words in an image.

A line is * a string of equally spaced words. A line isn't necessarily a complete sentence. * For example, a driver's license number is detected as a line. A line ends when * there is no aligned text after it. Also, a line ends when there is a large gap * between words, relative to the length of the words. This means, depending on the * gap between words, Amazon Rekognition may detect multiple lines in text aligned * in the same direction. Periods don't represent the end of a line. If a sentence * spans multiple lines, the DetectText operation returns multiple * lines.

To determine whether a TextDetection element is a * line of text or a word, use the TextDetection object * Type field.

To be detected, text must be within +/- 90 * degrees orientation of the horizontal axis.

For more information, see * DetectText in the Amazon Rekognition Developer Guide.

See Also:

* AWS * API Reference

*/ virtual Model::DetectTextOutcome DetectText(const Model::DetectTextRequest& request) const; /** *

Detects text in the input image and converts it into machine-readable * text.

Pass the input image as base64-encoded image bytes or as a * reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call * Amazon Rekognition operations, you must pass it as a reference to an image in an * Amazon S3 bucket. For the AWS CLI, passing image bytes is not supported. The * image must be either a .png or .jpeg formatted file.

The * DetectText operation returns text in an array of * TextDetection elements, TextDetections. Each * TextDetection element provides information about a single word or * line of text that was detected in the image.

A word is one or more ISO * basic latin script characters that are not separated by spaces. * DetectText can detect up to 50 words in an image.

A line is * a string of equally spaced words. A line isn't necessarily a complete sentence. * For example, a driver's license number is detected as a line. A line ends when * there is no aligned text after it. Also, a line ends when there is a large gap * between words, relative to the length of the words. This means, depending on the * gap between words, Amazon Rekognition may detect multiple lines in text aligned * in the same direction. Periods don't represent the end of a line. If a sentence * spans multiple lines, the DetectText operation returns multiple * lines.

To determine whether a TextDetection element is a * line of text or a word, use the TextDetection object * Type field.

To be detected, text must be within +/- 90 * degrees orientation of the horizontal axis.

For more information, see * DetectText in the Amazon Rekognition Developer Guide.

See Also:

* AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DetectTextOutcomeCallable DetectTextCallable(const Model::DetectTextRequest& request) const; /** *

Detects text in the input image and converts it into machine-readable * text.

Pass the input image as base64-encoded image bytes or as a * reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call * Amazon Rekognition operations, you must pass it as a reference to an image in an * Amazon S3 bucket. For the AWS CLI, passing image bytes is not supported. The * image must be either a .png or .jpeg formatted file.

The * DetectText operation returns text in an array of * TextDetection elements, TextDetections. Each * TextDetection element provides information about a single word or * line of text that was detected in the image.

A word is one or more ISO * basic latin script characters that are not separated by spaces. * DetectText can detect up to 50 words in an image.

A line is * a string of equally spaced words. A line isn't necessarily a complete sentence. * For example, a driver's license number is detected as a line. A line ends when * there is no aligned text after it. Also, a line ends when there is a large gap * between words, relative to the length of the words. This means, depending on the * gap between words, Amazon Rekognition may detect multiple lines in text aligned * in the same direction. Periods don't represent the end of a line. If a sentence * spans multiple lines, the DetectText operation returns multiple * lines.

To determine whether a TextDetection element is a * line of text or a word, use the TextDetection object * Type field.

To be detected, text must be within +/- 90 * degrees orientation of the horizontal axis.

For more information, see * DetectText in the Amazon Rekognition Developer Guide.

See Also:

* AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DetectTextAsync(const Model::DetectTextRequest& request, const DetectTextResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Gets the name and additional information about a celebrity based on his or * her Amazon Rekognition ID. The additional information is returned as an array of * URLs. If there is no additional information about the celebrity, this list is * empty.

For more information, see Recognizing Celebrities in an Image in * the Amazon Rekognition Developer Guide.

This operation requires * permissions to perform the rekognition:GetCelebrityInfo action. *

See Also:

AWS * API Reference

*/ virtual Model::GetCelebrityInfoOutcome GetCelebrityInfo(const Model::GetCelebrityInfoRequest& request) const; /** *

Gets the name and additional information about a celebrity based on his or * her Amazon Rekognition ID. The additional information is returned as an array of * URLs. If there is no additional information about the celebrity, this list is * empty.

For more information, see Recognizing Celebrities in an Image in * the Amazon Rekognition Developer Guide.

This operation requires * permissions to perform the rekognition:GetCelebrityInfo action. *

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetCelebrityInfoOutcomeCallable GetCelebrityInfoCallable(const Model::GetCelebrityInfoRequest& request) const; /** *

Gets the name and additional information about a celebrity based on his or * her Amazon Rekognition ID. The additional information is returned as an array of * URLs. If there is no additional information about the celebrity, this list is * empty.

For more information, see Recognizing Celebrities in an Image in * the Amazon Rekognition Developer Guide.

This operation requires * permissions to perform the rekognition:GetCelebrityInfo action. *

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetCelebrityInfoAsync(const Model::GetCelebrityInfoRequest& request, const GetCelebrityInfoResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Gets the celebrity recognition results for a Amazon Rekognition Video * analysis started by StartCelebrityRecognition.

Celebrity * recognition in a video is an asynchronous operation. Analysis is started by a * call to StartCelebrityRecognition which returns a job identifier * (JobId). When the celebrity recognition operation finishes, Amazon * Rekognition Video publishes a completion status to the Amazon Simple * Notification Service topic registered in the initial call to * StartCelebrityRecognition. To get the results of the celebrity * recognition analysis, first check that the status value published to the Amazon * SNS topic is SUCCEEDED. If so, call * GetCelebrityDetection and pass the job identifier * (JobId) from the initial call to * StartCelebrityDetection.

For more information, see Working * With Stored Videos in the Amazon Rekognition Developer Guide.

* GetCelebrityRecognition returns detected celebrities and the * time(s) they are detected in an array (Celebrities) of * CelebrityRecognition objects. Each CelebrityRecognition * contains information about the celebrity in a CelebrityDetail object and * the time, Timestamp, the celebrity was detected.

* GetCelebrityRecognition only returns the default facial attributes * (BoundingBox, Confidence, Landmarks, * Pose, and Quality). The other facial attributes listed * in the Face object of the following response syntax are not * returned. For more information, see FaceDetail in the Amazon Rekognition * Developer Guide.

By default, the Celebrities array * is sorted by time (milliseconds from the start of the video). You can also sort * the array by celebrity by specifying the value ID in the * SortBy input parameter.

The CelebrityDetail * object includes the celebrity identifer and additional information urls. If you * don't store the additional information urls, you can get them later by calling * GetCelebrityInfo with the celebrity identifer.

No information is * returned for faces not recognized as celebrities.

Use MaxResults * parameter to limit the number of labels returned. If there are more results than * specified in MaxResults, the value of NextToken in the * operation response contains a pagination token for getting the next set of * results. To get the next page of results, call * GetCelebrityDetection and populate the NextToken * request parameter with the token value returned from the previous call to * GetCelebrityRecognition.

See Also:

AWS * API Reference

*/ virtual Model::GetCelebrityRecognitionOutcome GetCelebrityRecognition(const Model::GetCelebrityRecognitionRequest& request) const; /** *

Gets the celebrity recognition results for a Amazon Rekognition Video * analysis started by StartCelebrityRecognition.

Celebrity * recognition in a video is an asynchronous operation. Analysis is started by a * call to StartCelebrityRecognition which returns a job identifier * (JobId). When the celebrity recognition operation finishes, Amazon * Rekognition Video publishes a completion status to the Amazon Simple * Notification Service topic registered in the initial call to * StartCelebrityRecognition. To get the results of the celebrity * recognition analysis, first check that the status value published to the Amazon * SNS topic is SUCCEEDED. If so, call * GetCelebrityDetection and pass the job identifier * (JobId) from the initial call to * StartCelebrityDetection.

For more information, see Working * With Stored Videos in the Amazon Rekognition Developer Guide.

* GetCelebrityRecognition returns detected celebrities and the * time(s) they are detected in an array (Celebrities) of * CelebrityRecognition objects. Each CelebrityRecognition * contains information about the celebrity in a CelebrityDetail object and * the time, Timestamp, the celebrity was detected.

* GetCelebrityRecognition only returns the default facial attributes * (BoundingBox, Confidence, Landmarks, * Pose, and Quality). The other facial attributes listed * in the Face object of the following response syntax are not * returned. For more information, see FaceDetail in the Amazon Rekognition * Developer Guide.

By default, the Celebrities array * is sorted by time (milliseconds from the start of the video). You can also sort * the array by celebrity by specifying the value ID in the * SortBy input parameter.

The CelebrityDetail * object includes the celebrity identifer and additional information urls. If you * don't store the additional information urls, you can get them later by calling * GetCelebrityInfo with the celebrity identifer.

No information is * returned for faces not recognized as celebrities.

Use MaxResults * parameter to limit the number of labels returned. If there are more results than * specified in MaxResults, the value of NextToken in the * operation response contains a pagination token for getting the next set of * results. To get the next page of results, call * GetCelebrityDetection and populate the NextToken * request parameter with the token value returned from the previous call to * GetCelebrityRecognition.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetCelebrityRecognitionOutcomeCallable GetCelebrityRecognitionCallable(const Model::GetCelebrityRecognitionRequest& request) const; /** *

Gets the celebrity recognition results for a Amazon Rekognition Video * analysis started by StartCelebrityRecognition.

Celebrity * recognition in a video is an asynchronous operation. Analysis is started by a * call to StartCelebrityRecognition which returns a job identifier * (JobId). When the celebrity recognition operation finishes, Amazon * Rekognition Video publishes a completion status to the Amazon Simple * Notification Service topic registered in the initial call to * StartCelebrityRecognition. To get the results of the celebrity * recognition analysis, first check that the status value published to the Amazon * SNS topic is SUCCEEDED. If so, call * GetCelebrityDetection and pass the job identifier * (JobId) from the initial call to * StartCelebrityDetection.

For more information, see Working * With Stored Videos in the Amazon Rekognition Developer Guide.

* GetCelebrityRecognition returns detected celebrities and the * time(s) they are detected in an array (Celebrities) of * CelebrityRecognition objects. Each CelebrityRecognition * contains information about the celebrity in a CelebrityDetail object and * the time, Timestamp, the celebrity was detected.

* GetCelebrityRecognition only returns the default facial attributes * (BoundingBox, Confidence, Landmarks, * Pose, and Quality). The other facial attributes listed * in the Face object of the following response syntax are not * returned. For more information, see FaceDetail in the Amazon Rekognition * Developer Guide.

By default, the Celebrities array * is sorted by time (milliseconds from the start of the video). You can also sort * the array by celebrity by specifying the value ID in the * SortBy input parameter.

The CelebrityDetail * object includes the celebrity identifer and additional information urls. If you * don't store the additional information urls, you can get them later by calling * GetCelebrityInfo with the celebrity identifer.

No information is * returned for faces not recognized as celebrities.

Use MaxResults * parameter to limit the number of labels returned. If there are more results than * specified in MaxResults, the value of NextToken in the * operation response contains a pagination token for getting the next set of * results. To get the next page of results, call * GetCelebrityDetection and populate the NextToken * request parameter with the token value returned from the previous call to * GetCelebrityRecognition.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetCelebrityRecognitionAsync(const Model::GetCelebrityRecognitionRequest& request, const GetCelebrityRecognitionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Gets the unsafe content analysis results for a Amazon Rekognition Video * analysis started by StartContentModeration.

Unsafe content * analysis of a video is an asynchronous operation. You start analysis by calling * StartContentModeration which returns a job identifier * (JobId). When analysis finishes, Amazon Rekognition Video publishes * a completion status to the Amazon Simple Notification Service topic registered * in the initial call to StartContentModeration. To get the results * of the unsafe content analysis, first check that the status value published to * the Amazon SNS topic is SUCCEEDED. If so, call * GetContentModeration and pass the job identifier * (JobId) from the initial call to * StartContentModeration.

For more information, see Working * with Stored Videos in the Amazon Rekognition Devlopers Guide.

* GetContentModeration returns detected unsafe content labels, and * the time they are detected, in an array, ModerationLabels, of * ContentModerationDetection objects.

By default, the moderated * labels are returned sorted by time, in milliseconds from the start of the video. * You can also sort them by moderated label by specifying NAME for * the SortBy input parameter.

Since video analysis can return * a large number of results, use the MaxResults parameter to limit * the number of labels returned in a single call to * GetContentModeration. If there are more results than specified in * MaxResults, the value of NextToken in the operation * response contains a pagination token for getting the next set of results. To get * the next page of results, call GetContentModeration and populate * the NextToken request parameter with the value of * NextToken returned from the previous call to * GetContentModeration.

For more information, see Detecting * Unsafe Content in the Amazon Rekognition Developer Guide.

See * Also:

AWS * API Reference

*/ virtual Model::GetContentModerationOutcome GetContentModeration(const Model::GetContentModerationRequest& request) const; /** *

Gets the unsafe content analysis results for a Amazon Rekognition Video * analysis started by StartContentModeration.

Unsafe content * analysis of a video is an asynchronous operation. You start analysis by calling * StartContentModeration which returns a job identifier * (JobId). When analysis finishes, Amazon Rekognition Video publishes * a completion status to the Amazon Simple Notification Service topic registered * in the initial call to StartContentModeration. To get the results * of the unsafe content analysis, first check that the status value published to * the Amazon SNS topic is SUCCEEDED. If so, call * GetContentModeration and pass the job identifier * (JobId) from the initial call to * StartContentModeration.

For more information, see Working * with Stored Videos in the Amazon Rekognition Devlopers Guide.

* GetContentModeration returns detected unsafe content labels, and * the time they are detected, in an array, ModerationLabels, of * ContentModerationDetection objects.

By default, the moderated * labels are returned sorted by time, in milliseconds from the start of the video. * You can also sort them by moderated label by specifying NAME for * the SortBy input parameter.

Since video analysis can return * a large number of results, use the MaxResults parameter to limit * the number of labels returned in a single call to * GetContentModeration. If there are more results than specified in * MaxResults, the value of NextToken in the operation * response contains a pagination token for getting the next set of results. To get * the next page of results, call GetContentModeration and populate * the NextToken request parameter with the value of * NextToken returned from the previous call to * GetContentModeration.

For more information, see Detecting * Unsafe Content in the Amazon Rekognition Developer Guide.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetContentModerationOutcomeCallable GetContentModerationCallable(const Model::GetContentModerationRequest& request) const; /** *

Gets the unsafe content analysis results for a Amazon Rekognition Video * analysis started by StartContentModeration.

Unsafe content * analysis of a video is an asynchronous operation. You start analysis by calling * StartContentModeration which returns a job identifier * (JobId). When analysis finishes, Amazon Rekognition Video publishes * a completion status to the Amazon Simple Notification Service topic registered * in the initial call to StartContentModeration. To get the results * of the unsafe content analysis, first check that the status value published to * the Amazon SNS topic is SUCCEEDED. If so, call * GetContentModeration and pass the job identifier * (JobId) from the initial call to * StartContentModeration.

For more information, see Working * with Stored Videos in the Amazon Rekognition Devlopers Guide.

* GetContentModeration returns detected unsafe content labels, and * the time they are detected, in an array, ModerationLabels, of * ContentModerationDetection objects.

By default, the moderated * labels are returned sorted by time, in milliseconds from the start of the video. * You can also sort them by moderated label by specifying NAME for * the SortBy input parameter.

Since video analysis can return * a large number of results, use the MaxResults parameter to limit * the number of labels returned in a single call to * GetContentModeration. If there are more results than specified in * MaxResults, the value of NextToken in the operation * response contains a pagination token for getting the next set of results. To get * the next page of results, call GetContentModeration and populate * the NextToken request parameter with the value of * NextToken returned from the previous call to * GetContentModeration.

For more information, see Detecting * Unsafe Content in the Amazon Rekognition Developer Guide.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetContentModerationAsync(const Model::GetContentModerationRequest& request, const GetContentModerationResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Gets face detection results for a Amazon Rekognition Video analysis started * by StartFaceDetection.

Face detection with Amazon Rekognition * Video is an asynchronous operation. You start face detection by calling * StartFaceDetection which returns a job identifier (JobId). * When the face detection operation finishes, Amazon Rekognition Video publishes a * completion status to the Amazon Simple Notification Service topic registered in * the initial call to StartFaceDetection. To get the results of the * face detection operation, first check that the status value published to the * Amazon SNS topic is SUCCEEDED. If so, call GetFaceDetection * and pass the job identifier (JobId) from the initial call to * StartFaceDetection.

GetFaceDetection returns * an array of detected faces (Faces) sorted by the time the faces * were detected.

Use MaxResults parameter to limit the number of labels * returned. If there are more results than specified in MaxResults, * the value of NextToken in the operation response contains a * pagination token for getting the next set of results. To get the next page of * results, call GetFaceDetection and populate the * NextToken request parameter with the token value returned from the * previous call to GetFaceDetection.

See Also:

AWS * API Reference

*/ virtual Model::GetFaceDetectionOutcome GetFaceDetection(const Model::GetFaceDetectionRequest& request) const; /** *

Gets face detection results for a Amazon Rekognition Video analysis started * by StartFaceDetection.

Face detection with Amazon Rekognition * Video is an asynchronous operation. You start face detection by calling * StartFaceDetection which returns a job identifier (JobId). * When the face detection operation finishes, Amazon Rekognition Video publishes a * completion status to the Amazon Simple Notification Service topic registered in * the initial call to StartFaceDetection. To get the results of the * face detection operation, first check that the status value published to the * Amazon SNS topic is SUCCEEDED. If so, call GetFaceDetection * and pass the job identifier (JobId) from the initial call to * StartFaceDetection.

GetFaceDetection returns * an array of detected faces (Faces) sorted by the time the faces * were detected.

Use MaxResults parameter to limit the number of labels * returned. If there are more results than specified in MaxResults, * the value of NextToken in the operation response contains a * pagination token for getting the next set of results. To get the next page of * results, call GetFaceDetection and populate the * NextToken request parameter with the token value returned from the * previous call to GetFaceDetection.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetFaceDetectionOutcomeCallable GetFaceDetectionCallable(const Model::GetFaceDetectionRequest& request) const; /** *

Gets face detection results for a Amazon Rekognition Video analysis started * by StartFaceDetection.

Face detection with Amazon Rekognition * Video is an asynchronous operation. You start face detection by calling * StartFaceDetection which returns a job identifier (JobId). * When the face detection operation finishes, Amazon Rekognition Video publishes a * completion status to the Amazon Simple Notification Service topic registered in * the initial call to StartFaceDetection. To get the results of the * face detection operation, first check that the status value published to the * Amazon SNS topic is SUCCEEDED. If so, call GetFaceDetection * and pass the job identifier (JobId) from the initial call to * StartFaceDetection.

GetFaceDetection returns * an array of detected faces (Faces) sorted by the time the faces * were detected.

Use MaxResults parameter to limit the number of labels * returned. If there are more results than specified in MaxResults, * the value of NextToken in the operation response contains a * pagination token for getting the next set of results. To get the next page of * results, call GetFaceDetection and populate the * NextToken request parameter with the token value returned from the * previous call to GetFaceDetection.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetFaceDetectionAsync(const Model::GetFaceDetectionRequest& request, const GetFaceDetectionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Gets the face search results for Amazon Rekognition Video face search started * by StartFaceSearch. The search returns faces in a collection that match * the faces of persons detected in a video. It also includes the time(s) that * faces are matched in the video.

Face search in a video is an asynchronous * operation. You start face search by calling to StartFaceSearch which * returns a job identifier (JobId). When the search operation * finishes, Amazon Rekognition Video publishes a completion status to the Amazon * Simple Notification Service topic registered in the initial call to * StartFaceSearch. To get the search results, first check that the * status value published to the Amazon SNS topic is SUCCEEDED. If so, * call GetFaceSearch and pass the job identifier (JobId) * from the initial call to StartFaceSearch.

For more * information, see Searching Faces in a Collection in the Amazon Rekognition * Developer Guide.

The search results are retured in an array, * Persons, of PersonMatch objects. * EachPersonMatch element contains details about the matching faces * in the input collection, person information (facial attributes, bounding boxes, * and person identifer) for the matched person, and the time the person was * matched in the video.

GetFaceSearch only returns the * default facial attributes (BoundingBox, Confidence, * Landmarks, Pose, and Quality). The other * facial attributes listed in the Face object of the following * response syntax are not returned. For more information, see FaceDetail in the * Amazon Rekognition Developer Guide.

By default, the * Persons array is sorted by the time, in milliseconds from the start * of the video, persons are matched. You can also sort by persons by specifying * INDEX for the SORTBY input parameter.

See * Also:

AWS * API Reference

*/ virtual Model::GetFaceSearchOutcome GetFaceSearch(const Model::GetFaceSearchRequest& request) const; /** *

Gets the face search results for Amazon Rekognition Video face search started * by StartFaceSearch. The search returns faces in a collection that match * the faces of persons detected in a video. It also includes the time(s) that * faces are matched in the video.

Face search in a video is an asynchronous * operation. You start face search by calling to StartFaceSearch which * returns a job identifier (JobId). When the search operation * finishes, Amazon Rekognition Video publishes a completion status to the Amazon * Simple Notification Service topic registered in the initial call to * StartFaceSearch. To get the search results, first check that the * status value published to the Amazon SNS topic is SUCCEEDED. If so, * call GetFaceSearch and pass the job identifier (JobId) * from the initial call to StartFaceSearch.

For more * information, see Searching Faces in a Collection in the Amazon Rekognition * Developer Guide.

The search results are retured in an array, * Persons, of PersonMatch objects. * EachPersonMatch element contains details about the matching faces * in the input collection, person information (facial attributes, bounding boxes, * and person identifer) for the matched person, and the time the person was * matched in the video.

GetFaceSearch only returns the * default facial attributes (BoundingBox, Confidence, * Landmarks, Pose, and Quality). The other * facial attributes listed in the Face object of the following * response syntax are not returned. For more information, see FaceDetail in the * Amazon Rekognition Developer Guide.

By default, the * Persons array is sorted by the time, in milliseconds from the start * of the video, persons are matched. You can also sort by persons by specifying * INDEX for the SORTBY input parameter.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetFaceSearchOutcomeCallable GetFaceSearchCallable(const Model::GetFaceSearchRequest& request) const; /** *

Gets the face search results for Amazon Rekognition Video face search started * by StartFaceSearch. The search returns faces in a collection that match * the faces of persons detected in a video. It also includes the time(s) that * faces are matched in the video.

Face search in a video is an asynchronous * operation. You start face search by calling to StartFaceSearch which * returns a job identifier (JobId). When the search operation * finishes, Amazon Rekognition Video publishes a completion status to the Amazon * Simple Notification Service topic registered in the initial call to * StartFaceSearch. To get the search results, first check that the * status value published to the Amazon SNS topic is SUCCEEDED. If so, * call GetFaceSearch and pass the job identifier (JobId) * from the initial call to StartFaceSearch.

For more * information, see Searching Faces in a Collection in the Amazon Rekognition * Developer Guide.

The search results are retured in an array, * Persons, of PersonMatch objects. * EachPersonMatch element contains details about the matching faces * in the input collection, person information (facial attributes, bounding boxes, * and person identifer) for the matched person, and the time the person was * matched in the video.

GetFaceSearch only returns the * default facial attributes (BoundingBox, Confidence, * Landmarks, Pose, and Quality). The other * facial attributes listed in the Face object of the following * response syntax are not returned. For more information, see FaceDetail in the * Amazon Rekognition Developer Guide.

By default, the * Persons array is sorted by the time, in milliseconds from the start * of the video, persons are matched. You can also sort by persons by specifying * INDEX for the SORTBY input parameter.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetFaceSearchAsync(const Model::GetFaceSearchRequest& request, const GetFaceSearchResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Gets the label detection results of a Amazon Rekognition Video analysis * started by StartLabelDetection.

The label detection operation is * started by a call to StartLabelDetection which returns a job identifier * (JobId). When the label detection operation finishes, Amazon * Rekognition publishes a completion status to the Amazon Simple Notification * Service topic registered in the initial call to * StartlabelDetection. To get the results of the label detection * operation, first check that the status value published to the Amazon SNS topic * is SUCCEEDED. If so, call GetLabelDetection and pass the job * identifier (JobId) from the initial call to * StartLabelDetection.

GetLabelDetection returns * an array of detected labels (Labels) sorted by the time the labels * were detected. You can also sort by the label name by specifying * NAME for the SortBy input parameter.

The labels * returned include the label name, the percentage confidence in the accuracy of * the detected label, and the time the label was detected in the video.

The * returned labels also include bounding box information for common objects, a * hierarchical taxonomy of detected labels, and the version of the label model * used for detection.

Use MaxResults parameter to limit the number of * labels returned. If there are more results than specified in * MaxResults, the value of NextToken in the operation * response contains a pagination token for getting the next set of results. To get * the next page of results, call GetlabelDetection and populate the * NextToken request parameter with the token value returned from the * previous call to GetLabelDetection.

See Also:

AWS * API Reference

*/ virtual Model::GetLabelDetectionOutcome GetLabelDetection(const Model::GetLabelDetectionRequest& request) const; /** *

Gets the label detection results of a Amazon Rekognition Video analysis * started by StartLabelDetection.

The label detection operation is * started by a call to StartLabelDetection which returns a job identifier * (JobId). When the label detection operation finishes, Amazon * Rekognition publishes a completion status to the Amazon Simple Notification * Service topic registered in the initial call to * StartlabelDetection. To get the results of the label detection * operation, first check that the status value published to the Amazon SNS topic * is SUCCEEDED. If so, call GetLabelDetection and pass the job * identifier (JobId) from the initial call to * StartLabelDetection.

GetLabelDetection returns * an array of detected labels (Labels) sorted by the time the labels * were detected. You can also sort by the label name by specifying * NAME for the SortBy input parameter.

The labels * returned include the label name, the percentage confidence in the accuracy of * the detected label, and the time the label was detected in the video.

The * returned labels also include bounding box information for common objects, a * hierarchical taxonomy of detected labels, and the version of the label model * used for detection.

Use MaxResults parameter to limit the number of * labels returned. If there are more results than specified in * MaxResults, the value of NextToken in the operation * response contains a pagination token for getting the next set of results. To get * the next page of results, call GetlabelDetection and populate the * NextToken request parameter with the token value returned from the * previous call to GetLabelDetection.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetLabelDetectionOutcomeCallable GetLabelDetectionCallable(const Model::GetLabelDetectionRequest& request) const; /** *

Gets the label detection results of a Amazon Rekognition Video analysis * started by StartLabelDetection.

The label detection operation is * started by a call to StartLabelDetection which returns a job identifier * (JobId). When the label detection operation finishes, Amazon * Rekognition publishes a completion status to the Amazon Simple Notification * Service topic registered in the initial call to * StartlabelDetection. To get the results of the label detection * operation, first check that the status value published to the Amazon SNS topic * is SUCCEEDED. If so, call GetLabelDetection and pass the job * identifier (JobId) from the initial call to * StartLabelDetection.

GetLabelDetection returns * an array of detected labels (Labels) sorted by the time the labels * were detected. You can also sort by the label name by specifying * NAME for the SortBy input parameter.

The labels * returned include the label name, the percentage confidence in the accuracy of * the detected label, and the time the label was detected in the video.

The * returned labels also include bounding box information for common objects, a * hierarchical taxonomy of detected labels, and the version of the label model * used for detection.

Use MaxResults parameter to limit the number of * labels returned. If there are more results than specified in * MaxResults, the value of NextToken in the operation * response contains a pagination token for getting the next set of results. To get * the next page of results, call GetlabelDetection and populate the * NextToken request parameter with the token value returned from the * previous call to GetLabelDetection.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetLabelDetectionAsync(const Model::GetLabelDetectionRequest& request, const GetLabelDetectionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Gets the path tracking results of a Amazon Rekognition Video analysis started * by StartPersonTracking.

The person path tracking operation is * started by a call to StartPersonTracking which returns a job * identifier (JobId). When the operation finishes, Amazon Rekognition * Video publishes a completion status to the Amazon Simple Notification Service * topic registered in the initial call to StartPersonTracking.

*

To get the results of the person path tracking operation, first check that * the status value published to the Amazon SNS topic is SUCCEEDED. If * so, call GetPersonTracking and pass the job identifier * (JobId) from the initial call to * StartPersonTracking.

GetPersonTracking returns * an array, Persons, of tracked persons and the time(s) their paths * were tracked in the video.

GetPersonTracking only * returns the default facial attributes (BoundingBox, * Confidence, Landmarks, Pose, and * Quality). The other facial attributes listed in the * Face object of the following response syntax are not returned.

*

For more information, see FaceDetail in the Amazon Rekognition Developer * Guide.

By default, the array is sorted by the time(s) a person's * path is tracked in the video. You can sort by tracked persons by specifying * INDEX for the SortBy input parameter.

Use the * MaxResults parameter to limit the number of items returned. If * there are more results than specified in MaxResults, the value of * NextToken in the operation response contains a pagination token for * getting the next set of results. To get the next page of results, call * GetPersonTracking and populate the NextToken request * parameter with the token value returned from the previous call to * GetPersonTracking.

See Also:

AWS * API Reference

*/ virtual Model::GetPersonTrackingOutcome GetPersonTracking(const Model::GetPersonTrackingRequest& request) const; /** *

Gets the path tracking results of a Amazon Rekognition Video analysis started * by StartPersonTracking.

The person path tracking operation is * started by a call to StartPersonTracking which returns a job * identifier (JobId). When the operation finishes, Amazon Rekognition * Video publishes a completion status to the Amazon Simple Notification Service * topic registered in the initial call to StartPersonTracking.

*

To get the results of the person path tracking operation, first check that * the status value published to the Amazon SNS topic is SUCCEEDED. If * so, call GetPersonTracking and pass the job identifier * (JobId) from the initial call to * StartPersonTracking.

GetPersonTracking returns * an array, Persons, of tracked persons and the time(s) their paths * were tracked in the video.

GetPersonTracking only * returns the default facial attributes (BoundingBox, * Confidence, Landmarks, Pose, and * Quality). The other facial attributes listed in the * Face object of the following response syntax are not returned.

*

For more information, see FaceDetail in the Amazon Rekognition Developer * Guide.

By default, the array is sorted by the time(s) a person's * path is tracked in the video. You can sort by tracked persons by specifying * INDEX for the SortBy input parameter.

Use the * MaxResults parameter to limit the number of items returned. If * there are more results than specified in MaxResults, the value of * NextToken in the operation response contains a pagination token for * getting the next set of results. To get the next page of results, call * GetPersonTracking and populate the NextToken request * parameter with the token value returned from the previous call to * GetPersonTracking.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetPersonTrackingOutcomeCallable GetPersonTrackingCallable(const Model::GetPersonTrackingRequest& request) const; /** *

Gets the path tracking results of a Amazon Rekognition Video analysis started * by StartPersonTracking.

The person path tracking operation is * started by a call to StartPersonTracking which returns a job * identifier (JobId). When the operation finishes, Amazon Rekognition * Video publishes a completion status to the Amazon Simple Notification Service * topic registered in the initial call to StartPersonTracking.

*

To get the results of the person path tracking operation, first check that * the status value published to the Amazon SNS topic is SUCCEEDED. If * so, call GetPersonTracking and pass the job identifier * (JobId) from the initial call to * StartPersonTracking.

GetPersonTracking returns * an array, Persons, of tracked persons and the time(s) their paths * were tracked in the video.

GetPersonTracking only * returns the default facial attributes (BoundingBox, * Confidence, Landmarks, Pose, and * Quality). The other facial attributes listed in the * Face object of the following response syntax are not returned.

*

For more information, see FaceDetail in the Amazon Rekognition Developer * Guide.

By default, the array is sorted by the time(s) a person's * path is tracked in the video. You can sort by tracked persons by specifying * INDEX for the SortBy input parameter.

Use the * MaxResults parameter to limit the number of items returned. If * there are more results than specified in MaxResults, the value of * NextToken in the operation response contains a pagination token for * getting the next set of results. To get the next page of results, call * GetPersonTracking and populate the NextToken request * parameter with the token value returned from the previous call to * GetPersonTracking.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetPersonTrackingAsync(const Model::GetPersonTrackingRequest& request, const GetPersonTrackingResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Gets the segment detection results of a Amazon Rekognition Video analysis * started by StartSegmentDetection.

Segment detection with Amazon * Rekognition Video is an asynchronous operation. You start segment detection by * calling StartSegmentDetection which returns a job identifier * (JobId). When the segment detection operation finishes, Amazon * Rekognition publishes a completion status to the Amazon Simple Notification * Service topic registered in the initial call to * StartSegmentDetection. To get the results of the segment detection * operation, first check that the status value published to the Amazon SNS topic * is SUCCEEDED. if so, call GetSegmentDetection and pass * the job identifier (JobId) from the initial call of * StartSegmentDetection.

GetSegmentDetection * returns detected segments in an array (Segments) of * SegmentDetection objects. Segments is sorted by the segment * types specified in the SegmentTypes input parameter of * StartSegmentDetection. Each element of the array includes the * detected segment, the precentage confidence in the acuracy of the detected * segment, the type of the segment, and the frame in which the segment was * detected.

Use SelectedSegmentTypes to find out the type of * segment detection requested in the call to * StartSegmentDetection.

Use the MaxResults * parameter to limit the number of segment detections returned. If there are more * results than specified in MaxResults, the value of * NextToken in the operation response contains a pagination token for * getting the next set of results. To get the next page of results, call * GetSegmentDetection and populate the NextToken request * parameter with the token value returned from the previous call to * GetSegmentDetection.

For more information, see Detecting * Video Segments in Stored Video in the Amazon Rekognition Developer * Guide.

See Also:

AWS * API Reference

*/ virtual Model::GetSegmentDetectionOutcome GetSegmentDetection(const Model::GetSegmentDetectionRequest& request) const; /** *

Gets the segment detection results of a Amazon Rekognition Video analysis * started by StartSegmentDetection.

Segment detection with Amazon * Rekognition Video is an asynchronous operation. You start segment detection by * calling StartSegmentDetection which returns a job identifier * (JobId). When the segment detection operation finishes, Amazon * Rekognition publishes a completion status to the Amazon Simple Notification * Service topic registered in the initial call to * StartSegmentDetection. To get the results of the segment detection * operation, first check that the status value published to the Amazon SNS topic * is SUCCEEDED. if so, call GetSegmentDetection and pass * the job identifier (JobId) from the initial call of * StartSegmentDetection.

GetSegmentDetection * returns detected segments in an array (Segments) of * SegmentDetection objects. Segments is sorted by the segment * types specified in the SegmentTypes input parameter of * StartSegmentDetection. Each element of the array includes the * detected segment, the precentage confidence in the acuracy of the detected * segment, the type of the segment, and the frame in which the segment was * detected.

Use SelectedSegmentTypes to find out the type of * segment detection requested in the call to * StartSegmentDetection.

Use the MaxResults * parameter to limit the number of segment detections returned. If there are more * results than specified in MaxResults, the value of * NextToken in the operation response contains a pagination token for * getting the next set of results. To get the next page of results, call * GetSegmentDetection and populate the NextToken request * parameter with the token value returned from the previous call to * GetSegmentDetection.

For more information, see Detecting * Video Segments in Stored Video in the Amazon Rekognition Developer * Guide.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetSegmentDetectionOutcomeCallable GetSegmentDetectionCallable(const Model::GetSegmentDetectionRequest& request) const; /** *

Gets the segment detection results of a Amazon Rekognition Video analysis * started by StartSegmentDetection.

Segment detection with Amazon * Rekognition Video is an asynchronous operation. You start segment detection by * calling StartSegmentDetection which returns a job identifier * (JobId). When the segment detection operation finishes, Amazon * Rekognition publishes a completion status to the Amazon Simple Notification * Service topic registered in the initial call to * StartSegmentDetection. To get the results of the segment detection * operation, first check that the status value published to the Amazon SNS topic * is SUCCEEDED. if so, call GetSegmentDetection and pass * the job identifier (JobId) from the initial call of * StartSegmentDetection.

GetSegmentDetection * returns detected segments in an array (Segments) of * SegmentDetection objects. Segments is sorted by the segment * types specified in the SegmentTypes input parameter of * StartSegmentDetection. Each element of the array includes the * detected segment, the precentage confidence in the acuracy of the detected * segment, the type of the segment, and the frame in which the segment was * detected.

Use SelectedSegmentTypes to find out the type of * segment detection requested in the call to * StartSegmentDetection.

Use the MaxResults * parameter to limit the number of segment detections returned. If there are more * results than specified in MaxResults, the value of * NextToken in the operation response contains a pagination token for * getting the next set of results. To get the next page of results, call * GetSegmentDetection and populate the NextToken request * parameter with the token value returned from the previous call to * GetSegmentDetection.

For more information, see Detecting * Video Segments in Stored Video in the Amazon Rekognition Developer * Guide.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetSegmentDetectionAsync(const Model::GetSegmentDetectionRequest& request, const GetSegmentDetectionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Gets the text detection results of a Amazon Rekognition Video analysis * started by StartTextDetection.

Text detection with Amazon * Rekognition Video is an asynchronous operation. You start text detection by * calling StartTextDetection which returns a job identifier * (JobId) When the text detection operation finishes, Amazon * Rekognition publishes a completion status to the Amazon Simple Notification * Service topic registered in the initial call to StartTextDetection. * To get the results of the text detection operation, first check that the status * value published to the Amazon SNS topic is SUCCEEDED. if so, call * GetTextDetection and pass the job identifier (JobId) * from the initial call of StartLabelDetection.

* GetTextDetection returns an array of detected text * (TextDetections) sorted by the time the text was detected, up to 50 * words per frame of video.

Each element of the array includes the detected * text, the precentage confidence in the acuracy of the detected text, the time * the text was detected, bounding box information for where the text was located, * and unique identifiers for words and their lines.

Use MaxResults * parameter to limit the number of text detections returned. If there are more * results than specified in MaxResults, the value of * NextToken in the operation response contains a pagination token for * getting the next set of results. To get the next page of results, call * GetTextDetection and populate the NextToken request * parameter with the token value returned from the previous call to * GetTextDetection.

See Also:

AWS * API Reference

*/ virtual Model::GetTextDetectionOutcome GetTextDetection(const Model::GetTextDetectionRequest& request) const; /** *

Gets the text detection results of a Amazon Rekognition Video analysis * started by StartTextDetection.

Text detection with Amazon * Rekognition Video is an asynchronous operation. You start text detection by * calling StartTextDetection which returns a job identifier * (JobId) When the text detection operation finishes, Amazon * Rekognition publishes a completion status to the Amazon Simple Notification * Service topic registered in the initial call to StartTextDetection. * To get the results of the text detection operation, first check that the status * value published to the Amazon SNS topic is SUCCEEDED. if so, call * GetTextDetection and pass the job identifier (JobId) * from the initial call of StartLabelDetection.

* GetTextDetection returns an array of detected text * (TextDetections) sorted by the time the text was detected, up to 50 * words per frame of video.

Each element of the array includes the detected * text, the precentage confidence in the acuracy of the detected text, the time * the text was detected, bounding box information for where the text was located, * and unique identifiers for words and their lines.

Use MaxResults * parameter to limit the number of text detections returned. If there are more * results than specified in MaxResults, the value of * NextToken in the operation response contains a pagination token for * getting the next set of results. To get the next page of results, call * GetTextDetection and populate the NextToken request * parameter with the token value returned from the previous call to * GetTextDetection.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetTextDetectionOutcomeCallable GetTextDetectionCallable(const Model::GetTextDetectionRequest& request) const; /** *

Gets the text detection results of a Amazon Rekognition Video analysis * started by StartTextDetection.

Text detection with Amazon * Rekognition Video is an asynchronous operation. You start text detection by * calling StartTextDetection which returns a job identifier * (JobId) When the text detection operation finishes, Amazon * Rekognition publishes a completion status to the Amazon Simple Notification * Service topic registered in the initial call to StartTextDetection. * To get the results of the text detection operation, first check that the status * value published to the Amazon SNS topic is SUCCEEDED. if so, call * GetTextDetection and pass the job identifier (JobId) * from the initial call of StartLabelDetection.

* GetTextDetection returns an array of detected text * (TextDetections) sorted by the time the text was detected, up to 50 * words per frame of video.

Each element of the array includes the detected * text, the precentage confidence in the acuracy of the detected text, the time * the text was detected, bounding box information for where the text was located, * and unique identifiers for words and their lines.

Use MaxResults * parameter to limit the number of text detections returned. If there are more * results than specified in MaxResults, the value of * NextToken in the operation response contains a pagination token for * getting the next set of results. To get the next page of results, call * GetTextDetection and populate the NextToken request * parameter with the token value returned from the previous call to * GetTextDetection.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetTextDetectionAsync(const Model::GetTextDetectionRequest& request, const GetTextDetectionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Detects faces in the input image and adds them to the specified collection. *

Amazon Rekognition doesn't save the actual faces that are detected. * Instead, the underlying detection algorithm first detects the faces in the input * image. For each face, the algorithm extracts facial features into a feature * vector, and stores it in the backend database. Amazon Rekognition uses feature * vectors when it performs face match and search operations using the * SearchFaces and SearchFacesByImage operations.

For more * information, see Adding Faces to a Collection in the Amazon Rekognition * Developer Guide.

To get the number of faces in a collection, call * DescribeCollection.

If you're using version 1.0 of the face * detection model, IndexFaces indexes the 15 largest faces in the * input image. Later versions of the face detection model index the 100 largest * faces in the input image.

If you're using version 4 or later of the face * model, image orientation information is not returned in the * OrientationCorrection field.

To determine which version of * the model you're using, call DescribeCollection and supply the collection * ID. You can also get the model version from the value of * FaceModelVersion in the response from IndexFaces

*

For more information, see Model Versioning in the Amazon Rekognition * Developer Guide.

If you provide the optional ExternalImageId * for the input image you provided, Amazon Rekognition associates this ID with all * faces that it detects. When you call the ListFaces operation, the * response returns the external ID. You can use this external image ID to create a * client-side index to associate the faces with each image. You can then use the * index to find all faces in an image.

You can specify the maximum number * of faces to index with the MaxFaces input parameter. This is useful * when you want to index the largest faces in an image and don't want to index * smaller faces, such as those belonging to people standing in the background.

*

The QualityFilter input parameter allows you to filter out * detected faces that don’t meet a required quality bar. The quality bar is based * on a variety of common use cases. By default, IndexFaces chooses * the quality bar that's used to filter faces. You can also explicitly choose the * quality bar. Use QualityFilter, to set the quality bar by * specifying LOW, MEDIUM, or HIGH. If you * do not want to filter detected faces, specify NONE.

*

To use quality filtering, you need a collection associated with version 3 of * the face model or higher. To get the version of the face model associated with a * collection, call DescribeCollection.

Information about * faces detected in an image, but not indexed, is returned in an array of * UnindexedFace objects, UnindexedFaces. Faces aren't indexed * for reasons such as:

  • The number of faces detected exceeds the * value of the MaxFaces request parameter.

  • The face * is too small compared to the image dimensions.

  • The face is too * blurry.

  • The image is too dark.

  • The face has * an extreme pose.

  • The face doesn’t have enough detail to be * suitable for face search.

In response, the * IndexFaces operation returns an array of metadata for all detected * faces, FaceRecords. This includes:

  • The bounding * box, BoundingBox, of the detected face.

  • A * confidence value, Confidence, which indicates the confidence that * the bounding box contains a face.

  • A face ID, * FaceId, assigned by the service for each face that's detected and * stored.

  • An image ID, ImageId, assigned by the * service for the input image.

If you request all facial * attributes (by using the detectionAttributes parameter), Amazon * Rekognition returns detailed facial attributes, such as facial landmarks (for * example, location of eye and mouth) and other facial attributes. If you provide * the same image, specify the same collection, and use the same external ID in the * IndexFaces operation, Amazon Rekognition doesn't save duplicate * face metadata.

The input image is passed either as base64-encoded * image bytes, or as a reference to an image in an Amazon S3 bucket. If you use * the AWS CLI to call Amazon Rekognition operations, passing image bytes isn't * supported. The image must be formatted as a PNG or JPEG file.

This * operation requires permissions to perform the * rekognition:IndexFaces action.

See Also:

AWS * API Reference

*/ virtual Model::IndexFacesOutcome IndexFaces(const Model::IndexFacesRequest& request) const; /** *

Detects faces in the input image and adds them to the specified collection. *

Amazon Rekognition doesn't save the actual faces that are detected. * Instead, the underlying detection algorithm first detects the faces in the input * image. For each face, the algorithm extracts facial features into a feature * vector, and stores it in the backend database. Amazon Rekognition uses feature * vectors when it performs face match and search operations using the * SearchFaces and SearchFacesByImage operations.

For more * information, see Adding Faces to a Collection in the Amazon Rekognition * Developer Guide.

To get the number of faces in a collection, call * DescribeCollection.

If you're using version 1.0 of the face * detection model, IndexFaces indexes the 15 largest faces in the * input image. Later versions of the face detection model index the 100 largest * faces in the input image.

If you're using version 4 or later of the face * model, image orientation information is not returned in the * OrientationCorrection field.

To determine which version of * the model you're using, call DescribeCollection and supply the collection * ID. You can also get the model version from the value of * FaceModelVersion in the response from IndexFaces

*

For more information, see Model Versioning in the Amazon Rekognition * Developer Guide.

If you provide the optional ExternalImageId * for the input image you provided, Amazon Rekognition associates this ID with all * faces that it detects. When you call the ListFaces operation, the * response returns the external ID. You can use this external image ID to create a * client-side index to associate the faces with each image. You can then use the * index to find all faces in an image.

You can specify the maximum number * of faces to index with the MaxFaces input parameter. This is useful * when you want to index the largest faces in an image and don't want to index * smaller faces, such as those belonging to people standing in the background.

*

The QualityFilter input parameter allows you to filter out * detected faces that don’t meet a required quality bar. The quality bar is based * on a variety of common use cases. By default, IndexFaces chooses * the quality bar that's used to filter faces. You can also explicitly choose the * quality bar. Use QualityFilter, to set the quality bar by * specifying LOW, MEDIUM, or HIGH. If you * do not want to filter detected faces, specify NONE.

*

To use quality filtering, you need a collection associated with version 3 of * the face model or higher. To get the version of the face model associated with a * collection, call DescribeCollection.

Information about * faces detected in an image, but not indexed, is returned in an array of * UnindexedFace objects, UnindexedFaces. Faces aren't indexed * for reasons such as:

  • The number of faces detected exceeds the * value of the MaxFaces request parameter.

  • The face * is too small compared to the image dimensions.

  • The face is too * blurry.

  • The image is too dark.

  • The face has * an extreme pose.

  • The face doesn’t have enough detail to be * suitable for face search.

In response, the * IndexFaces operation returns an array of metadata for all detected * faces, FaceRecords. This includes:

  • The bounding * box, BoundingBox, of the detected face.

  • A * confidence value, Confidence, which indicates the confidence that * the bounding box contains a face.

  • A face ID, * FaceId, assigned by the service for each face that's detected and * stored.

  • An image ID, ImageId, assigned by the * service for the input image.

If you request all facial * attributes (by using the detectionAttributes parameter), Amazon * Rekognition returns detailed facial attributes, such as facial landmarks (for * example, location of eye and mouth) and other facial attributes. If you provide * the same image, specify the same collection, and use the same external ID in the * IndexFaces operation, Amazon Rekognition doesn't save duplicate * face metadata.

The input image is passed either as base64-encoded * image bytes, or as a reference to an image in an Amazon S3 bucket. If you use * the AWS CLI to call Amazon Rekognition operations, passing image bytes isn't * supported. The image must be formatted as a PNG or JPEG file.

This * operation requires permissions to perform the * rekognition:IndexFaces action.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::IndexFacesOutcomeCallable IndexFacesCallable(const Model::IndexFacesRequest& request) const; /** *

Detects faces in the input image and adds them to the specified collection. *

Amazon Rekognition doesn't save the actual faces that are detected. * Instead, the underlying detection algorithm first detects the faces in the input * image. For each face, the algorithm extracts facial features into a feature * vector, and stores it in the backend database. Amazon Rekognition uses feature * vectors when it performs face match and search operations using the * SearchFaces and SearchFacesByImage operations.

For more * information, see Adding Faces to a Collection in the Amazon Rekognition * Developer Guide.

To get the number of faces in a collection, call * DescribeCollection.

If you're using version 1.0 of the face * detection model, IndexFaces indexes the 15 largest faces in the * input image. Later versions of the face detection model index the 100 largest * faces in the input image.

If you're using version 4 or later of the face * model, image orientation information is not returned in the * OrientationCorrection field.

To determine which version of * the model you're using, call DescribeCollection and supply the collection * ID. You can also get the model version from the value of * FaceModelVersion in the response from IndexFaces

*

For more information, see Model Versioning in the Amazon Rekognition * Developer Guide.

If you provide the optional ExternalImageId * for the input image you provided, Amazon Rekognition associates this ID with all * faces that it detects. When you call the ListFaces operation, the * response returns the external ID. You can use this external image ID to create a * client-side index to associate the faces with each image. You can then use the * index to find all faces in an image.

You can specify the maximum number * of faces to index with the MaxFaces input parameter. This is useful * when you want to index the largest faces in an image and don't want to index * smaller faces, such as those belonging to people standing in the background.

*

The QualityFilter input parameter allows you to filter out * detected faces that don’t meet a required quality bar. The quality bar is based * on a variety of common use cases. By default, IndexFaces chooses * the quality bar that's used to filter faces. You can also explicitly choose the * quality bar. Use QualityFilter, to set the quality bar by * specifying LOW, MEDIUM, or HIGH. If you * do not want to filter detected faces, specify NONE.

*

To use quality filtering, you need a collection associated with version 3 of * the face model or higher. To get the version of the face model associated with a * collection, call DescribeCollection.

Information about * faces detected in an image, but not indexed, is returned in an array of * UnindexedFace objects, UnindexedFaces. Faces aren't indexed * for reasons such as:

  • The number of faces detected exceeds the * value of the MaxFaces request parameter.

  • The face * is too small compared to the image dimensions.

  • The face is too * blurry.

  • The image is too dark.

  • The face has * an extreme pose.

  • The face doesn’t have enough detail to be * suitable for face search.

In response, the * IndexFaces operation returns an array of metadata for all detected * faces, FaceRecords. This includes:

  • The bounding * box, BoundingBox, of the detected face.

  • A * confidence value, Confidence, which indicates the confidence that * the bounding box contains a face.

  • A face ID, * FaceId, assigned by the service for each face that's detected and * stored.

  • An image ID, ImageId, assigned by the * service for the input image.

If you request all facial * attributes (by using the detectionAttributes parameter), Amazon * Rekognition returns detailed facial attributes, such as facial landmarks (for * example, location of eye and mouth) and other facial attributes. If you provide * the same image, specify the same collection, and use the same external ID in the * IndexFaces operation, Amazon Rekognition doesn't save duplicate * face metadata.

The input image is passed either as base64-encoded * image bytes, or as a reference to an image in an Amazon S3 bucket. If you use * the AWS CLI to call Amazon Rekognition operations, passing image bytes isn't * supported. The image must be formatted as a PNG or JPEG file.

This * operation requires permissions to perform the * rekognition:IndexFaces action.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void IndexFacesAsync(const Model::IndexFacesRequest& request, const IndexFacesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns list of collection IDs in your account. If the result is truncated, * the response also provides a NextToken that you can use in the * subsequent request to fetch the next set of collection IDs.

For an * example, see Listing Collections in the Amazon Rekognition Developer Guide.

*

This operation requires permissions to perform the * rekognition:ListCollections action.

See Also:

AWS * API Reference

*/ virtual Model::ListCollectionsOutcome ListCollections(const Model::ListCollectionsRequest& request) const; /** *

Returns list of collection IDs in your account. If the result is truncated, * the response also provides a NextToken that you can use in the * subsequent request to fetch the next set of collection IDs.

For an * example, see Listing Collections in the Amazon Rekognition Developer Guide.

*

This operation requires permissions to perform the * rekognition:ListCollections action.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListCollectionsOutcomeCallable ListCollectionsCallable(const Model::ListCollectionsRequest& request) const; /** *

Returns list of collection IDs in your account. If the result is truncated, * the response also provides a NextToken that you can use in the * subsequent request to fetch the next set of collection IDs.

For an * example, see Listing Collections in the Amazon Rekognition Developer Guide.

*

This operation requires permissions to perform the * rekognition:ListCollections action.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListCollectionsAsync(const Model::ListCollectionsRequest& request, const ListCollectionsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns metadata for faces in the specified collection. This metadata * includes information such as the bounding box coordinates, the confidence (that * the bounding box contains a face), and face ID. For an example, see Listing * Faces in a Collection in the Amazon Rekognition Developer Guide.

This * operation requires permissions to perform the rekognition:ListFaces * action.

See Also:

AWS * API Reference

*/ virtual Model::ListFacesOutcome ListFaces(const Model::ListFacesRequest& request) const; /** *

Returns metadata for faces in the specified collection. This metadata * includes information such as the bounding box coordinates, the confidence (that * the bounding box contains a face), and face ID. For an example, see Listing * Faces in a Collection in the Amazon Rekognition Developer Guide.

This * operation requires permissions to perform the rekognition:ListFaces * action.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListFacesOutcomeCallable ListFacesCallable(const Model::ListFacesRequest& request) const; /** *

Returns metadata for faces in the specified collection. This metadata * includes information such as the bounding box coordinates, the confidence (that * the bounding box contains a face), and face ID. For an example, see Listing * Faces in a Collection in the Amazon Rekognition Developer Guide.

This * operation requires permissions to perform the rekognition:ListFaces * action.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListFacesAsync(const Model::ListFacesRequest& request, const ListFacesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Gets a list of stream processors that you have created with * CreateStreamProcessor.

See Also:

AWS * API Reference

*/ virtual Model::ListStreamProcessorsOutcome ListStreamProcessors(const Model::ListStreamProcessorsRequest& request) const; /** *

Gets a list of stream processors that you have created with * CreateStreamProcessor.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListStreamProcessorsOutcomeCallable ListStreamProcessorsCallable(const Model::ListStreamProcessorsRequest& request) const; /** *

Gets a list of stream processors that you have created with * CreateStreamProcessor.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListStreamProcessorsAsync(const Model::ListStreamProcessorsRequest& request, const ListStreamProcessorsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns an array of celebrities recognized in the input image. For more * information, see Recognizing Celebrities in the Amazon Rekognition Developer * Guide.

RecognizeCelebrities returns the 100 largest faces * in the image. It lists recognized celebrities in the CelebrityFaces * array and unrecognized faces in the UnrecognizedFaces array. * RecognizeCelebrities doesn't return celebrities whose faces aren't * among the largest 100 faces in the image.

For each celebrity recognized, * RecognizeCelebrities returns a Celebrity object. The * Celebrity object contains the celebrity name, ID, URL links to * additional information, match confidence, and a ComparedFace object * that you can use to locate the celebrity's face on the image.

Amazon * Rekognition doesn't retain information about which images a celebrity has been * recognized in. Your application must store this information and use the * Celebrity ID property as a unique identifier for the celebrity. If * you don't store the celebrity name or additional information URLs returned by * RecognizeCelebrities, you will need the ID to identify the * celebrity in a call to the GetCelebrityInfo operation.

You pass * the input image either as base64-encoded image bytes or as a reference to an * image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition * operations, passing image bytes is not supported. The image must be either a PNG * or JPEG formatted file.

For an example, see Recognizing Celebrities in * an Image in the Amazon Rekognition Developer Guide.

This operation * requires permissions to perform the * rekognition:RecognizeCelebrities operation.

See * Also:

AWS * API Reference

*/ virtual Model::RecognizeCelebritiesOutcome RecognizeCelebrities(const Model::RecognizeCelebritiesRequest& request) const; /** *

Returns an array of celebrities recognized in the input image. For more * information, see Recognizing Celebrities in the Amazon Rekognition Developer * Guide.

RecognizeCelebrities returns the 100 largest faces * in the image. It lists recognized celebrities in the CelebrityFaces * array and unrecognized faces in the UnrecognizedFaces array. * RecognizeCelebrities doesn't return celebrities whose faces aren't * among the largest 100 faces in the image.

For each celebrity recognized, * RecognizeCelebrities returns a Celebrity object. The * Celebrity object contains the celebrity name, ID, URL links to * additional information, match confidence, and a ComparedFace object * that you can use to locate the celebrity's face on the image.

Amazon * Rekognition doesn't retain information about which images a celebrity has been * recognized in. Your application must store this information and use the * Celebrity ID property as a unique identifier for the celebrity. If * you don't store the celebrity name or additional information URLs returned by * RecognizeCelebrities, you will need the ID to identify the * celebrity in a call to the GetCelebrityInfo operation.

You pass * the input image either as base64-encoded image bytes or as a reference to an * image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition * operations, passing image bytes is not supported. The image must be either a PNG * or JPEG formatted file.

For an example, see Recognizing Celebrities in * an Image in the Amazon Rekognition Developer Guide.

This operation * requires permissions to perform the * rekognition:RecognizeCelebrities operation.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::RecognizeCelebritiesOutcomeCallable RecognizeCelebritiesCallable(const Model::RecognizeCelebritiesRequest& request) const; /** *

Returns an array of celebrities recognized in the input image. For more * information, see Recognizing Celebrities in the Amazon Rekognition Developer * Guide.

RecognizeCelebrities returns the 100 largest faces * in the image. It lists recognized celebrities in the CelebrityFaces * array and unrecognized faces in the UnrecognizedFaces array. * RecognizeCelebrities doesn't return celebrities whose faces aren't * among the largest 100 faces in the image.

For each celebrity recognized, * RecognizeCelebrities returns a Celebrity object. The * Celebrity object contains the celebrity name, ID, URL links to * additional information, match confidence, and a ComparedFace object * that you can use to locate the celebrity's face on the image.

Amazon * Rekognition doesn't retain information about which images a celebrity has been * recognized in. Your application must store this information and use the * Celebrity ID property as a unique identifier for the celebrity. If * you don't store the celebrity name or additional information URLs returned by * RecognizeCelebrities, you will need the ID to identify the * celebrity in a call to the GetCelebrityInfo operation.

You pass * the input image either as base64-encoded image bytes or as a reference to an * image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition * operations, passing image bytes is not supported. The image must be either a PNG * or JPEG formatted file.

For an example, see Recognizing Celebrities in * an Image in the Amazon Rekognition Developer Guide.

This operation * requires permissions to perform the * rekognition:RecognizeCelebrities operation.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void RecognizeCelebritiesAsync(const Model::RecognizeCelebritiesRequest& request, const RecognizeCelebritiesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

For a given input face ID, searches for matching faces in the collection the * face belongs to. You get a face ID when you add a face to the collection using * the IndexFaces operation. The operation compares the features of the * input face with faces in the specified collection.

You can also * search faces without indexing faces by using the SearchFacesByImage * operation.

The operation response returns an array of faces that * match, ordered by similarity score with the highest similarity first. More * specifically, it is an array of metadata for each face match that is found. * Along with the metadata, the response also includes a confidence * value for each face match, indicating the confidence that the specific face * matches the input face.

For an example, see Searching for a Face Using * Its Face ID in the Amazon Rekognition Developer Guide.

This operation * requires permissions to perform the rekognition:SearchFaces * action.

See Also:

AWS * API Reference

*/ virtual Model::SearchFacesOutcome SearchFaces(const Model::SearchFacesRequest& request) const; /** *

For a given input face ID, searches for matching faces in the collection the * face belongs to. You get a face ID when you add a face to the collection using * the IndexFaces operation. The operation compares the features of the * input face with faces in the specified collection.

You can also * search faces without indexing faces by using the SearchFacesByImage * operation.

The operation response returns an array of faces that * match, ordered by similarity score with the highest similarity first. More * specifically, it is an array of metadata for each face match that is found. * Along with the metadata, the response also includes a confidence * value for each face match, indicating the confidence that the specific face * matches the input face.

For an example, see Searching for a Face Using * Its Face ID in the Amazon Rekognition Developer Guide.

This operation * requires permissions to perform the rekognition:SearchFaces * action.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::SearchFacesOutcomeCallable SearchFacesCallable(const Model::SearchFacesRequest& request) const; /** *

For a given input face ID, searches for matching faces in the collection the * face belongs to. You get a face ID when you add a face to the collection using * the IndexFaces operation. The operation compares the features of the * input face with faces in the specified collection.

You can also * search faces without indexing faces by using the SearchFacesByImage * operation.

The operation response returns an array of faces that * match, ordered by similarity score with the highest similarity first. More * specifically, it is an array of metadata for each face match that is found. * Along with the metadata, the response also includes a confidence * value for each face match, indicating the confidence that the specific face * matches the input face.

For an example, see Searching for a Face Using * Its Face ID in the Amazon Rekognition Developer Guide.

This operation * requires permissions to perform the rekognition:SearchFaces * action.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void SearchFacesAsync(const Model::SearchFacesRequest& request, const SearchFacesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

For a given input image, first detects the largest face in the image, and * then searches the specified collection for matching faces. The operation * compares the features of the input face with faces in the specified collection. *

To search for all faces in an input image, you might first call * the IndexFaces operation, and then use the face IDs returned in * subsequent calls to the SearchFaces operation.

You can also call * the DetectFaces operation and use the bounding boxes in the * response to make face crops, which then you can pass in to the * SearchFacesByImage operation.

You pass the input * image either as base64-encoded image bytes or as a reference to an image in an * Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, * passing image bytes is not supported. The image must be either a PNG or JPEG * formatted file.

The response returns an array of faces that match, * ordered by similarity score with the highest similarity first. More * specifically, it is an array of metadata for each face match found. Along with * the metadata, the response also includes a similarity indicating * how similar the face is to the input face. In the response, the operation also * returns the bounding box (and a confidence level that the bounding box contains * a face) of the face that Amazon Rekognition used for the input image.

*

For an example, Searching for a Face Using an Image in the Amazon Rekognition * Developer Guide.

The QualityFilter input parameter allows * you to filter out detected faces that don’t meet a required quality bar. The * quality bar is based on a variety of common use cases. Use * QualityFilter to set the quality bar for filtering by specifying * LOW, MEDIUM, or HIGH. If you do not want * to filter detected faces, specify NONE. The default value is * NONE.

To use quality filtering, you need a collection * associated with version 3 of the face model or higher. To get the version of the * face model associated with a collection, call DescribeCollection.

*

This operation requires permissions to perform the * rekognition:SearchFacesByImage action.

See Also:

* AWS * API Reference

*/ virtual Model::SearchFacesByImageOutcome SearchFacesByImage(const Model::SearchFacesByImageRequest& request) const; /** *

For a given input image, first detects the largest face in the image, and * then searches the specified collection for matching faces. The operation * compares the features of the input face with faces in the specified collection. *

To search for all faces in an input image, you might first call * the IndexFaces operation, and then use the face IDs returned in * subsequent calls to the SearchFaces operation.

You can also call * the DetectFaces operation and use the bounding boxes in the * response to make face crops, which then you can pass in to the * SearchFacesByImage operation.

You pass the input * image either as base64-encoded image bytes or as a reference to an image in an * Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, * passing image bytes is not supported. The image must be either a PNG or JPEG * formatted file.

The response returns an array of faces that match, * ordered by similarity score with the highest similarity first. More * specifically, it is an array of metadata for each face match found. Along with * the metadata, the response also includes a similarity indicating * how similar the face is to the input face. In the response, the operation also * returns the bounding box (and a confidence level that the bounding box contains * a face) of the face that Amazon Rekognition used for the input image.

*

For an example, Searching for a Face Using an Image in the Amazon Rekognition * Developer Guide.

The QualityFilter input parameter allows * you to filter out detected faces that don’t meet a required quality bar. The * quality bar is based on a variety of common use cases. Use * QualityFilter to set the quality bar for filtering by specifying * LOW, MEDIUM, or HIGH. If you do not want * to filter detected faces, specify NONE. The default value is * NONE.

To use quality filtering, you need a collection * associated with version 3 of the face model or higher. To get the version of the * face model associated with a collection, call DescribeCollection.

*

This operation requires permissions to perform the * rekognition:SearchFacesByImage action.

See Also:

* AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::SearchFacesByImageOutcomeCallable SearchFacesByImageCallable(const Model::SearchFacesByImageRequest& request) const; /** *

For a given input image, first detects the largest face in the image, and * then searches the specified collection for matching faces. The operation * compares the features of the input face with faces in the specified collection. *

To search for all faces in an input image, you might first call * the IndexFaces operation, and then use the face IDs returned in * subsequent calls to the SearchFaces operation.

You can also call * the DetectFaces operation and use the bounding boxes in the * response to make face crops, which then you can pass in to the * SearchFacesByImage operation.

You pass the input * image either as base64-encoded image bytes or as a reference to an image in an * Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, * passing image bytes is not supported. The image must be either a PNG or JPEG * formatted file.

The response returns an array of faces that match, * ordered by similarity score with the highest similarity first. More * specifically, it is an array of metadata for each face match found. Along with * the metadata, the response also includes a similarity indicating * how similar the face is to the input face. In the response, the operation also * returns the bounding box (and a confidence level that the bounding box contains * a face) of the face that Amazon Rekognition used for the input image.

*

For an example, Searching for a Face Using an Image in the Amazon Rekognition * Developer Guide.

The QualityFilter input parameter allows * you to filter out detected faces that don’t meet a required quality bar. The * quality bar is based on a variety of common use cases. Use * QualityFilter to set the quality bar for filtering by specifying * LOW, MEDIUM, or HIGH. If you do not want * to filter detected faces, specify NONE. The default value is * NONE.

To use quality filtering, you need a collection * associated with version 3 of the face model or higher. To get the version of the * face model associated with a collection, call DescribeCollection.

*

This operation requires permissions to perform the * rekognition:SearchFacesByImage action.

See Also:

* AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void SearchFacesByImageAsync(const Model::SearchFacesByImageRequest& request, const SearchFacesByImageResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Starts asynchronous recognition of celebrities in a stored video.

*

Amazon Rekognition Video can detect celebrities in a video must be stored in * an Amazon S3 bucket. Use Video to specify the bucket name and the * filename of the video. StartCelebrityRecognition returns a job * identifier (JobId) which you use to get the results of the * analysis. When celebrity recognition analysis is finished, Amazon Rekognition * Video publishes a completion status to the Amazon Simple Notification Service * topic that you specify in NotificationChannel. To get the results * of the celebrity recognition analysis, first check that the status value * published to the Amazon SNS topic is SUCCEEDED. If so, call * GetCelebrityRecognition and pass the job identifier (JobId) * from the initial call to StartCelebrityRecognition.

For * more information, see Recognizing Celebrities in the Amazon Rekognition * Developer Guide.

See Also:

AWS * API Reference

*/ virtual Model::StartCelebrityRecognitionOutcome StartCelebrityRecognition(const Model::StartCelebrityRecognitionRequest& request) const; /** *

Starts asynchronous recognition of celebrities in a stored video.

*

Amazon Rekognition Video can detect celebrities in a video must be stored in * an Amazon S3 bucket. Use Video to specify the bucket name and the * filename of the video. StartCelebrityRecognition returns a job * identifier (JobId) which you use to get the results of the * analysis. When celebrity recognition analysis is finished, Amazon Rekognition * Video publishes a completion status to the Amazon Simple Notification Service * topic that you specify in NotificationChannel. To get the results * of the celebrity recognition analysis, first check that the status value * published to the Amazon SNS topic is SUCCEEDED. If so, call * GetCelebrityRecognition and pass the job identifier (JobId) * from the initial call to StartCelebrityRecognition.

For * more information, see Recognizing Celebrities in the Amazon Rekognition * Developer Guide.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::StartCelebrityRecognitionOutcomeCallable StartCelebrityRecognitionCallable(const Model::StartCelebrityRecognitionRequest& request) const; /** *

Starts asynchronous recognition of celebrities in a stored video.

*

Amazon Rekognition Video can detect celebrities in a video must be stored in * an Amazon S3 bucket. Use Video to specify the bucket name and the * filename of the video. StartCelebrityRecognition returns a job * identifier (JobId) which you use to get the results of the * analysis. When celebrity recognition analysis is finished, Amazon Rekognition * Video publishes a completion status to the Amazon Simple Notification Service * topic that you specify in NotificationChannel. To get the results * of the celebrity recognition analysis, first check that the status value * published to the Amazon SNS topic is SUCCEEDED. If so, call * GetCelebrityRecognition and pass the job identifier (JobId) * from the initial call to StartCelebrityRecognition.

For * more information, see Recognizing Celebrities in the Amazon Rekognition * Developer Guide.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void StartCelebrityRecognitionAsync(const Model::StartCelebrityRecognitionRequest& request, const StartCelebrityRecognitionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Starts asynchronous detection of unsafe content in a stored video.

*

Amazon Rekognition Video can moderate content in a video stored in an Amazon * S3 bucket. Use Video to specify the bucket name and the filename of the * video. StartContentModeration returns a job identifier * (JobId) which you use to get the results of the analysis. When * unsafe content analysis is finished, Amazon Rekognition Video publishes a * completion status to the Amazon Simple Notification Service topic that you * specify in NotificationChannel.

To get the results of the * unsafe content analysis, first check that the status value published to the * Amazon SNS topic is SUCCEEDED. If so, call * GetContentModeration and pass the job identifier (JobId) * from the initial call to StartContentModeration.

For more * information, see Detecting Unsafe Content in the Amazon Rekognition Developer * Guide.

See Also:

AWS * API Reference

*/ virtual Model::StartContentModerationOutcome StartContentModeration(const Model::StartContentModerationRequest& request) const; /** *

Starts asynchronous detection of unsafe content in a stored video.

*

Amazon Rekognition Video can moderate content in a video stored in an Amazon * S3 bucket. Use Video to specify the bucket name and the filename of the * video. StartContentModeration returns a job identifier * (JobId) which you use to get the results of the analysis. When * unsafe content analysis is finished, Amazon Rekognition Video publishes a * completion status to the Amazon Simple Notification Service topic that you * specify in NotificationChannel.

To get the results of the * unsafe content analysis, first check that the status value published to the * Amazon SNS topic is SUCCEEDED. If so, call * GetContentModeration and pass the job identifier (JobId) * from the initial call to StartContentModeration.

For more * information, see Detecting Unsafe Content in the Amazon Rekognition Developer * Guide.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::StartContentModerationOutcomeCallable StartContentModerationCallable(const Model::StartContentModerationRequest& request) const; /** *

Starts asynchronous detection of unsafe content in a stored video.

*

Amazon Rekognition Video can moderate content in a video stored in an Amazon * S3 bucket. Use Video to specify the bucket name and the filename of the * video. StartContentModeration returns a job identifier * (JobId) which you use to get the results of the analysis. When * unsafe content analysis is finished, Amazon Rekognition Video publishes a * completion status to the Amazon Simple Notification Service topic that you * specify in NotificationChannel.

To get the results of the * unsafe content analysis, first check that the status value published to the * Amazon SNS topic is SUCCEEDED. If so, call * GetContentModeration and pass the job identifier (JobId) * from the initial call to StartContentModeration.

For more * information, see Detecting Unsafe Content in the Amazon Rekognition Developer * Guide.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void StartContentModerationAsync(const Model::StartContentModerationRequest& request, const StartContentModerationResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Starts asynchronous detection of faces in a stored video.

Amazon * Rekognition Video can detect faces in a video stored in an Amazon S3 bucket. Use * Video to specify the bucket name and the filename of the video. * StartFaceDetection returns a job identifier (JobId) * that you use to get the results of the operation. When face detection is * finished, Amazon Rekognition Video publishes a completion status to the Amazon * Simple Notification Service topic that you specify in * NotificationChannel. To get the results of the face detection * operation, first check that the status value published to the Amazon SNS topic * is SUCCEEDED. If so, call GetFaceDetection and pass the job * identifier (JobId) from the initial call to * StartFaceDetection.

For more information, see Detecting * Faces in a Stored Video in the Amazon Rekognition Developer Guide.

See * Also:

AWS * API Reference

*/ virtual Model::StartFaceDetectionOutcome StartFaceDetection(const Model::StartFaceDetectionRequest& request) const; /** *

Starts asynchronous detection of faces in a stored video.

Amazon * Rekognition Video can detect faces in a video stored in an Amazon S3 bucket. Use * Video to specify the bucket name and the filename of the video. * StartFaceDetection returns a job identifier (JobId) * that you use to get the results of the operation. When face detection is * finished, Amazon Rekognition Video publishes a completion status to the Amazon * Simple Notification Service topic that you specify in * NotificationChannel. To get the results of the face detection * operation, first check that the status value published to the Amazon SNS topic * is SUCCEEDED. If so, call GetFaceDetection and pass the job * identifier (JobId) from the initial call to * StartFaceDetection.

For more information, see Detecting * Faces in a Stored Video in the Amazon Rekognition Developer Guide.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::StartFaceDetectionOutcomeCallable StartFaceDetectionCallable(const Model::StartFaceDetectionRequest& request) const; /** *

Starts asynchronous detection of faces in a stored video.

Amazon * Rekognition Video can detect faces in a video stored in an Amazon S3 bucket. Use * Video to specify the bucket name and the filename of the video. * StartFaceDetection returns a job identifier (JobId) * that you use to get the results of the operation. When face detection is * finished, Amazon Rekognition Video publishes a completion status to the Amazon * Simple Notification Service topic that you specify in * NotificationChannel. To get the results of the face detection * operation, first check that the status value published to the Amazon SNS topic * is SUCCEEDED. If so, call GetFaceDetection and pass the job * identifier (JobId) from the initial call to * StartFaceDetection.

For more information, see Detecting * Faces in a Stored Video in the Amazon Rekognition Developer Guide.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void StartFaceDetectionAsync(const Model::StartFaceDetectionRequest& request, const StartFaceDetectionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Starts the asynchronous search for faces in a collection that match the faces * of persons detected in a stored video.

The video must be stored in an * Amazon S3 bucket. Use Video to specify the bucket name and the filename * of the video. StartFaceSearch returns a job identifier * (JobId) which you use to get the search results once the search has * completed. When searching is finished, Amazon Rekognition Video publishes a * completion status to the Amazon Simple Notification Service topic that you * specify in NotificationChannel. To get the search results, first * check that the status value published to the Amazon SNS topic is * SUCCEEDED. If so, call GetFaceSearch and pass the job * identifier (JobId) from the initial call to * StartFaceSearch. For more information, see * procedure-person-search-videos.

See Also:

AWS * API Reference

*/ virtual Model::StartFaceSearchOutcome StartFaceSearch(const Model::StartFaceSearchRequest& request) const; /** *

Starts the asynchronous search for faces in a collection that match the faces * of persons detected in a stored video.

The video must be stored in an * Amazon S3 bucket. Use Video to specify the bucket name and the filename * of the video. StartFaceSearch returns a job identifier * (JobId) which you use to get the search results once the search has * completed. When searching is finished, Amazon Rekognition Video publishes a * completion status to the Amazon Simple Notification Service topic that you * specify in NotificationChannel. To get the search results, first * check that the status value published to the Amazon SNS topic is * SUCCEEDED. If so, call GetFaceSearch and pass the job * identifier (JobId) from the initial call to * StartFaceSearch. For more information, see * procedure-person-search-videos.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::StartFaceSearchOutcomeCallable StartFaceSearchCallable(const Model::StartFaceSearchRequest& request) const; /** *

Starts the asynchronous search for faces in a collection that match the faces * of persons detected in a stored video.

The video must be stored in an * Amazon S3 bucket. Use Video to specify the bucket name and the filename * of the video. StartFaceSearch returns a job identifier * (JobId) which you use to get the search results once the search has * completed. When searching is finished, Amazon Rekognition Video publishes a * completion status to the Amazon Simple Notification Service topic that you * specify in NotificationChannel. To get the search results, first * check that the status value published to the Amazon SNS topic is * SUCCEEDED. If so, call GetFaceSearch and pass the job * identifier (JobId) from the initial call to * StartFaceSearch. For more information, see * procedure-person-search-videos.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void StartFaceSearchAsync(const Model::StartFaceSearchRequest& request, const StartFaceSearchResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Starts asynchronous detection of labels in a stored video.

Amazon * Rekognition Video can detect labels in a video. Labels are instances of * real-world entities. This includes objects like flower, tree, and table; events * like wedding, graduation, and birthday party; concepts like landscape, evening, * and nature; and activities like a person getting out of a car or a person * skiing.

The video must be stored in an Amazon S3 bucket. Use Video * to specify the bucket name and the filename of the video. * StartLabelDetection returns a job identifier (JobId) * which you use to get the results of the operation. When label detection is * finished, Amazon Rekognition Video publishes a completion status to the Amazon * Simple Notification Service topic that you specify in * NotificationChannel.

To get the results of the label * detection operation, first check that the status value published to the Amazon * SNS topic is SUCCEEDED. If so, call GetLabelDetection and * pass the job identifier (JobId) from the initial call to * StartLabelDetection.

See Also:

AWS * API Reference

*/ virtual Model::StartLabelDetectionOutcome StartLabelDetection(const Model::StartLabelDetectionRequest& request) const; /** *

Starts asynchronous detection of labels in a stored video.

Amazon * Rekognition Video can detect labels in a video. Labels are instances of * real-world entities. This includes objects like flower, tree, and table; events * like wedding, graduation, and birthday party; concepts like landscape, evening, * and nature; and activities like a person getting out of a car or a person * skiing.

The video must be stored in an Amazon S3 bucket. Use Video * to specify the bucket name and the filename of the video. * StartLabelDetection returns a job identifier (JobId) * which you use to get the results of the operation. When label detection is * finished, Amazon Rekognition Video publishes a completion status to the Amazon * Simple Notification Service topic that you specify in * NotificationChannel.

To get the results of the label * detection operation, first check that the status value published to the Amazon * SNS topic is SUCCEEDED. If so, call GetLabelDetection and * pass the job identifier (JobId) from the initial call to * StartLabelDetection.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::StartLabelDetectionOutcomeCallable StartLabelDetectionCallable(const Model::StartLabelDetectionRequest& request) const; /** *

Starts asynchronous detection of labels in a stored video.

Amazon * Rekognition Video can detect labels in a video. Labels are instances of * real-world entities. This includes objects like flower, tree, and table; events * like wedding, graduation, and birthday party; concepts like landscape, evening, * and nature; and activities like a person getting out of a car or a person * skiing.

The video must be stored in an Amazon S3 bucket. Use Video * to specify the bucket name and the filename of the video. * StartLabelDetection returns a job identifier (JobId) * which you use to get the results of the operation. When label detection is * finished, Amazon Rekognition Video publishes a completion status to the Amazon * Simple Notification Service topic that you specify in * NotificationChannel.

To get the results of the label * detection operation, first check that the status value published to the Amazon * SNS topic is SUCCEEDED. If so, call GetLabelDetection and * pass the job identifier (JobId) from the initial call to * StartLabelDetection.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void StartLabelDetectionAsync(const Model::StartLabelDetectionRequest& request, const StartLabelDetectionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Starts the asynchronous tracking of a person's path in a stored video.

*

Amazon Rekognition Video can track the path of people in a video stored in an * Amazon S3 bucket. Use Video to specify the bucket name and the filename * of the video. StartPersonTracking returns a job identifier * (JobId) which you use to get the results of the operation. When * label detection is finished, Amazon Rekognition publishes a completion status to * the Amazon Simple Notification Service topic that you specify in * NotificationChannel.

To get the results of the person * detection operation, first check that the status value published to the Amazon * SNS topic is SUCCEEDED. If so, call GetPersonTracking and * pass the job identifier (JobId) from the initial call to * StartPersonTracking.

See Also:

AWS * API Reference

*/ virtual Model::StartPersonTrackingOutcome StartPersonTracking(const Model::StartPersonTrackingRequest& request) const; /** *

Starts the asynchronous tracking of a person's path in a stored video.

*

Amazon Rekognition Video can track the path of people in a video stored in an * Amazon S3 bucket. Use Video to specify the bucket name and the filename * of the video. StartPersonTracking returns a job identifier * (JobId) which you use to get the results of the operation. When * label detection is finished, Amazon Rekognition publishes a completion status to * the Amazon Simple Notification Service topic that you specify in * NotificationChannel.

To get the results of the person * detection operation, first check that the status value published to the Amazon * SNS topic is SUCCEEDED. If so, call GetPersonTracking and * pass the job identifier (JobId) from the initial call to * StartPersonTracking.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::StartPersonTrackingOutcomeCallable StartPersonTrackingCallable(const Model::StartPersonTrackingRequest& request) const; /** *

Starts the asynchronous tracking of a person's path in a stored video.

*

Amazon Rekognition Video can track the path of people in a video stored in an * Amazon S3 bucket. Use Video to specify the bucket name and the filename * of the video. StartPersonTracking returns a job identifier * (JobId) which you use to get the results of the operation. When * label detection is finished, Amazon Rekognition publishes a completion status to * the Amazon Simple Notification Service topic that you specify in * NotificationChannel.

To get the results of the person * detection operation, first check that the status value published to the Amazon * SNS topic is SUCCEEDED. If so, call GetPersonTracking and * pass the job identifier (JobId) from the initial call to * StartPersonTracking.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void StartPersonTrackingAsync(const Model::StartPersonTrackingRequest& request, const StartPersonTrackingResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Starts the running of the version of a model. Starting a model takes a while * to complete. To check the current state of the model, use * DescribeProjectVersions.

Once the model is running, you can detect * custom labels in new images by calling DetectCustomLabels.

*

You are charged for the amount of time that the model is running. To stop a * running model, call StopProjectVersion.

This operation * requires permissions to perform the rekognition:StartProjectVersion * action.

See Also:

AWS * API Reference

*/ virtual Model::StartProjectVersionOutcome StartProjectVersion(const Model::StartProjectVersionRequest& request) const; /** *

Starts the running of the version of a model. Starting a model takes a while * to complete. To check the current state of the model, use * DescribeProjectVersions.

Once the model is running, you can detect * custom labels in new images by calling DetectCustomLabels.

*

You are charged for the amount of time that the model is running. To stop a * running model, call StopProjectVersion.

This operation * requires permissions to perform the rekognition:StartProjectVersion * action.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::StartProjectVersionOutcomeCallable StartProjectVersionCallable(const Model::StartProjectVersionRequest& request) const; /** *

Starts the running of the version of a model. Starting a model takes a while * to complete. To check the current state of the model, use * DescribeProjectVersions.

Once the model is running, you can detect * custom labels in new images by calling DetectCustomLabels.

*

You are charged for the amount of time that the model is running. To stop a * running model, call StopProjectVersion.

This operation * requires permissions to perform the rekognition:StartProjectVersion * action.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void StartProjectVersionAsync(const Model::StartProjectVersionRequest& request, const StartProjectVersionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Starts asynchronous detection of segment detection in a stored video.

*

Amazon Rekognition Video can detect segments in a video stored in an Amazon * S3 bucket. Use Video to specify the bucket name and the filename of the * video. StartSegmentDetection returns a job identifier * (JobId) which you use to get the results of the operation. When * segment detection is finished, Amazon Rekognition Video publishes a completion * status to the Amazon Simple Notification Service topic that you specify in * NotificationChannel.

You can use the Filters * (StartSegmentDetectionFilters) input parameter to specify the minimum * detection confidence returned in the response. Within Filters, use * ShotFilter (StartShotDetectionFilter) to filter detected * shots. Use TechnicalCueFilter * (StartTechnicalCueDetectionFilter) to filter technical cues.

To * get the results of the segment detection operation, first check that the status * value published to the Amazon SNS topic is SUCCEEDED. if so, call * GetSegmentDetection and pass the job identifier (JobId) from * the initial call to StartSegmentDetection.

For more * information, see Detecting Video Segments in Stored Video in the Amazon * Rekognition Developer Guide.

See Also:

AWS * API Reference

*/ virtual Model::StartSegmentDetectionOutcome StartSegmentDetection(const Model::StartSegmentDetectionRequest& request) const; /** *

Starts asynchronous detection of segment detection in a stored video.

*

Amazon Rekognition Video can detect segments in a video stored in an Amazon * S3 bucket. Use Video to specify the bucket name and the filename of the * video. StartSegmentDetection returns a job identifier * (JobId) which you use to get the results of the operation. When * segment detection is finished, Amazon Rekognition Video publishes a completion * status to the Amazon Simple Notification Service topic that you specify in * NotificationChannel.

You can use the Filters * (StartSegmentDetectionFilters) input parameter to specify the minimum * detection confidence returned in the response. Within Filters, use * ShotFilter (StartShotDetectionFilter) to filter detected * shots. Use TechnicalCueFilter * (StartTechnicalCueDetectionFilter) to filter technical cues.

To * get the results of the segment detection operation, first check that the status * value published to the Amazon SNS topic is SUCCEEDED. if so, call * GetSegmentDetection and pass the job identifier (JobId) from * the initial call to StartSegmentDetection.

For more * information, see Detecting Video Segments in Stored Video in the Amazon * Rekognition Developer Guide.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::StartSegmentDetectionOutcomeCallable StartSegmentDetectionCallable(const Model::StartSegmentDetectionRequest& request) const; /** *

Starts asynchronous detection of segment detection in a stored video.

*

Amazon Rekognition Video can detect segments in a video stored in an Amazon * S3 bucket. Use Video to specify the bucket name and the filename of the * video. StartSegmentDetection returns a job identifier * (JobId) which you use to get the results of the operation. When * segment detection is finished, Amazon Rekognition Video publishes a completion * status to the Amazon Simple Notification Service topic that you specify in * NotificationChannel.

You can use the Filters * (StartSegmentDetectionFilters) input parameter to specify the minimum * detection confidence returned in the response. Within Filters, use * ShotFilter (StartShotDetectionFilter) to filter detected * shots. Use TechnicalCueFilter * (StartTechnicalCueDetectionFilter) to filter technical cues.

To * get the results of the segment detection operation, first check that the status * value published to the Amazon SNS topic is SUCCEEDED. if so, call * GetSegmentDetection and pass the job identifier (JobId) from * the initial call to StartSegmentDetection.

For more * information, see Detecting Video Segments in Stored Video in the Amazon * Rekognition Developer Guide.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void StartSegmentDetectionAsync(const Model::StartSegmentDetectionRequest& request, const StartSegmentDetectionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Starts processing a stream processor. You create a stream processor by * calling CreateStreamProcessor. To tell StartStreamProcessor * which stream processor to start, use the value of the Name field * specified in the call to CreateStreamProcessor.

See * Also:

AWS * API Reference

*/ virtual Model::StartStreamProcessorOutcome StartStreamProcessor(const Model::StartStreamProcessorRequest& request) const; /** *

Starts processing a stream processor. You create a stream processor by * calling CreateStreamProcessor. To tell StartStreamProcessor * which stream processor to start, use the value of the Name field * specified in the call to CreateStreamProcessor.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::StartStreamProcessorOutcomeCallable StartStreamProcessorCallable(const Model::StartStreamProcessorRequest& request) const; /** *

Starts processing a stream processor. You create a stream processor by * calling CreateStreamProcessor. To tell StartStreamProcessor * which stream processor to start, use the value of the Name field * specified in the call to CreateStreamProcessor.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void StartStreamProcessorAsync(const Model::StartStreamProcessorRequest& request, const StartStreamProcessorResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Starts asynchronous detection of text in a stored video.

Amazon * Rekognition Video can detect text in a video stored in an Amazon S3 bucket. Use * Video to specify the bucket name and the filename of the video. * StartTextDetection returns a job identifier (JobId) * which you use to get the results of the operation. When text detection is * finished, Amazon Rekognition Video publishes a completion status to the Amazon * Simple Notification Service topic that you specify in * NotificationChannel.

To get the results of the text * detection operation, first check that the status value published to the Amazon * SNS topic is SUCCEEDED. if so, call GetTextDetection and * pass the job identifier (JobId) from the initial call to * StartTextDetection.

See Also:

AWS * API Reference

*/ virtual Model::StartTextDetectionOutcome StartTextDetection(const Model::StartTextDetectionRequest& request) const; /** *

Starts asynchronous detection of text in a stored video.

Amazon * Rekognition Video can detect text in a video stored in an Amazon S3 bucket. Use * Video to specify the bucket name and the filename of the video. * StartTextDetection returns a job identifier (JobId) * which you use to get the results of the operation. When text detection is * finished, Amazon Rekognition Video publishes a completion status to the Amazon * Simple Notification Service topic that you specify in * NotificationChannel.

To get the results of the text * detection operation, first check that the status value published to the Amazon * SNS topic is SUCCEEDED. if so, call GetTextDetection and * pass the job identifier (JobId) from the initial call to * StartTextDetection.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::StartTextDetectionOutcomeCallable StartTextDetectionCallable(const Model::StartTextDetectionRequest& request) const; /** *

Starts asynchronous detection of text in a stored video.

Amazon * Rekognition Video can detect text in a video stored in an Amazon S3 bucket. Use * Video to specify the bucket name and the filename of the video. * StartTextDetection returns a job identifier (JobId) * which you use to get the results of the operation. When text detection is * finished, Amazon Rekognition Video publishes a completion status to the Amazon * Simple Notification Service topic that you specify in * NotificationChannel.

To get the results of the text * detection operation, first check that the status value published to the Amazon * SNS topic is SUCCEEDED. if so, call GetTextDetection and * pass the job identifier (JobId) from the initial call to * StartTextDetection.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void StartTextDetectionAsync(const Model::StartTextDetectionRequest& request, const StartTextDetectionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Stops a running model. The operation might take a while to complete. To check * the current status, call DescribeProjectVersions.

See * Also:

AWS * API Reference

*/ virtual Model::StopProjectVersionOutcome StopProjectVersion(const Model::StopProjectVersionRequest& request) const; /** *

Stops a running model. The operation might take a while to complete. To check * the current status, call DescribeProjectVersions.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::StopProjectVersionOutcomeCallable StopProjectVersionCallable(const Model::StopProjectVersionRequest& request) const; /** *

Stops a running model. The operation might take a while to complete. To check * the current status, call DescribeProjectVersions.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void StopProjectVersionAsync(const Model::StopProjectVersionRequest& request, const StopProjectVersionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Stops a running stream processor that was created by * CreateStreamProcessor.

See Also:

AWS * API Reference

*/ virtual Model::StopStreamProcessorOutcome StopStreamProcessor(const Model::StopStreamProcessorRequest& request) const; /** *

Stops a running stream processor that was created by * CreateStreamProcessor.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::StopStreamProcessorOutcomeCallable StopStreamProcessorCallable(const Model::StopStreamProcessorRequest& request) const; /** *

Stops a running stream processor that was created by * CreateStreamProcessor.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void StopStreamProcessorAsync(const Model::StopStreamProcessorRequest& request, const StopStreamProcessorResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; void OverrideEndpoint(const Aws::String& endpoint); private: void init(const Aws::Client::ClientConfiguration& clientConfiguration); void CompareFacesAsyncHelper(const Model::CompareFacesRequest& request, const CompareFacesResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateCollectionAsyncHelper(const Model::CreateCollectionRequest& request, const CreateCollectionResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateProjectAsyncHelper(const Model::CreateProjectRequest& request, const CreateProjectResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateProjectVersionAsyncHelper(const Model::CreateProjectVersionRequest& request, const CreateProjectVersionResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateStreamProcessorAsyncHelper(const Model::CreateStreamProcessorRequest& request, const CreateStreamProcessorResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteCollectionAsyncHelper(const Model::DeleteCollectionRequest& request, const DeleteCollectionResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteFacesAsyncHelper(const Model::DeleteFacesRequest& request, const DeleteFacesResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteProjectAsyncHelper(const Model::DeleteProjectRequest& request, const DeleteProjectResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteProjectVersionAsyncHelper(const Model::DeleteProjectVersionRequest& request, const DeleteProjectVersionResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteStreamProcessorAsyncHelper(const Model::DeleteStreamProcessorRequest& request, const DeleteStreamProcessorResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeCollectionAsyncHelper(const Model::DescribeCollectionRequest& request, const DescribeCollectionResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeProjectVersionsAsyncHelper(const Model::DescribeProjectVersionsRequest& request, const DescribeProjectVersionsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeProjectsAsyncHelper(const Model::DescribeProjectsRequest& request, const DescribeProjectsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeStreamProcessorAsyncHelper(const Model::DescribeStreamProcessorRequest& request, const DescribeStreamProcessorResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DetectCustomLabelsAsyncHelper(const Model::DetectCustomLabelsRequest& request, const DetectCustomLabelsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DetectFacesAsyncHelper(const Model::DetectFacesRequest& request, const DetectFacesResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DetectLabelsAsyncHelper(const Model::DetectLabelsRequest& request, const DetectLabelsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DetectModerationLabelsAsyncHelper(const Model::DetectModerationLabelsRequest& request, const DetectModerationLabelsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DetectTextAsyncHelper(const Model::DetectTextRequest& request, const DetectTextResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetCelebrityInfoAsyncHelper(const Model::GetCelebrityInfoRequest& request, const GetCelebrityInfoResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetCelebrityRecognitionAsyncHelper(const Model::GetCelebrityRecognitionRequest& request, const GetCelebrityRecognitionResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetContentModerationAsyncHelper(const Model::GetContentModerationRequest& request, const GetContentModerationResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetFaceDetectionAsyncHelper(const Model::GetFaceDetectionRequest& request, const GetFaceDetectionResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetFaceSearchAsyncHelper(const Model::GetFaceSearchRequest& request, const GetFaceSearchResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetLabelDetectionAsyncHelper(const Model::GetLabelDetectionRequest& request, const GetLabelDetectionResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetPersonTrackingAsyncHelper(const Model::GetPersonTrackingRequest& request, const GetPersonTrackingResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetSegmentDetectionAsyncHelper(const Model::GetSegmentDetectionRequest& request, const GetSegmentDetectionResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetTextDetectionAsyncHelper(const Model::GetTextDetectionRequest& request, const GetTextDetectionResponseReceivedHandler& handler, const std::shared_ptr& context) const; void IndexFacesAsyncHelper(const Model::IndexFacesRequest& request, const IndexFacesResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListCollectionsAsyncHelper(const Model::ListCollectionsRequest& request, const ListCollectionsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListFacesAsyncHelper(const Model::ListFacesRequest& request, const ListFacesResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListStreamProcessorsAsyncHelper(const Model::ListStreamProcessorsRequest& request, const ListStreamProcessorsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void RecognizeCelebritiesAsyncHelper(const Model::RecognizeCelebritiesRequest& request, const RecognizeCelebritiesResponseReceivedHandler& handler, const std::shared_ptr& context) const; void SearchFacesAsyncHelper(const Model::SearchFacesRequest& request, const SearchFacesResponseReceivedHandler& handler, const std::shared_ptr& context) const; void SearchFacesByImageAsyncHelper(const Model::SearchFacesByImageRequest& request, const SearchFacesByImageResponseReceivedHandler& handler, const std::shared_ptr& context) const; void StartCelebrityRecognitionAsyncHelper(const Model::StartCelebrityRecognitionRequest& request, const StartCelebrityRecognitionResponseReceivedHandler& handler, const std::shared_ptr& context) const; void StartContentModerationAsyncHelper(const Model::StartContentModerationRequest& request, const StartContentModerationResponseReceivedHandler& handler, const std::shared_ptr& context) const; void StartFaceDetectionAsyncHelper(const Model::StartFaceDetectionRequest& request, const StartFaceDetectionResponseReceivedHandler& handler, const std::shared_ptr& context) const; void StartFaceSearchAsyncHelper(const Model::StartFaceSearchRequest& request, const StartFaceSearchResponseReceivedHandler& handler, const std::shared_ptr& context) const; void StartLabelDetectionAsyncHelper(const Model::StartLabelDetectionRequest& request, const StartLabelDetectionResponseReceivedHandler& handler, const std::shared_ptr& context) const; void StartPersonTrackingAsyncHelper(const Model::StartPersonTrackingRequest& request, const StartPersonTrackingResponseReceivedHandler& handler, const std::shared_ptr& context) const; void StartProjectVersionAsyncHelper(const Model::StartProjectVersionRequest& request, const StartProjectVersionResponseReceivedHandler& handler, const std::shared_ptr& context) const; void StartSegmentDetectionAsyncHelper(const Model::StartSegmentDetectionRequest& request, const StartSegmentDetectionResponseReceivedHandler& handler, const std::shared_ptr& context) const; void StartStreamProcessorAsyncHelper(const Model::StartStreamProcessorRequest& request, const StartStreamProcessorResponseReceivedHandler& handler, const std::shared_ptr& context) const; void StartTextDetectionAsyncHelper(const Model::StartTextDetectionRequest& request, const StartTextDetectionResponseReceivedHandler& handler, const std::shared_ptr& context) const; void StopProjectVersionAsyncHelper(const Model::StopProjectVersionRequest& request, const StopProjectVersionResponseReceivedHandler& handler, const std::shared_ptr& context) const; void StopStreamProcessorAsyncHelper(const Model::StopStreamProcessorRequest& request, const StopStreamProcessorResponseReceivedHandler& handler, const std::shared_ptr& context) const; Aws::String m_uri; Aws::String m_configScheme; std::shared_ptr m_executor; }; } // namespace Rekognition } // namespace Aws