feat(hos_client_create, hos_client_destory): 多次调用destory不会导致重复释放

This commit is contained in:
彭宣正
2020-12-14 17:24:58 +08:00
parent 505d529c32
commit 10b370e486
55976 changed files with 8544395 additions and 2 deletions

View File

@@ -0,0 +1,94 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>Structure containing the estimated age range, in years, for a face.</p>
* <p>Amazon Rekognition estimates an age range for faces detected in the input
* image. Estimated age ranges can overlap. A face of a 5-year-old might have an
* estimated range of 4-6, while the face of a 6-year-old might have an estimated
* range of 4-8.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/AgeRange">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API AgeRange
{
public:
AgeRange();
AgeRange(Aws::Utils::Json::JsonView jsonValue);
AgeRange& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>The lowest estimated age.</p>
*/
inline int GetLow() const{ return m_low; }
/**
* <p>The lowest estimated age.</p>
*/
inline bool LowHasBeenSet() const { return m_lowHasBeenSet; }
/**
* <p>The lowest estimated age.</p>
*/
inline void SetLow(int value) { m_lowHasBeenSet = true; m_low = value; }
/**
* <p>The lowest estimated age.</p>
*/
inline AgeRange& WithLow(int value) { SetLow(value); return *this;}
/**
* <p>The highest estimated age.</p>
*/
inline int GetHigh() const{ return m_high; }
/**
* <p>The highest estimated age.</p>
*/
inline bool HighHasBeenSet() const { return m_highHasBeenSet; }
/**
* <p>The highest estimated age.</p>
*/
inline void SetHigh(int value) { m_highHasBeenSet = true; m_high = value; }
/**
* <p>The highest estimated age.</p>
*/
inline AgeRange& WithHigh(int value) { SetHigh(value); return *this;}
private:
int m_low;
bool m_lowHasBeenSet;
int m_high;
bool m_highHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,68 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/model/GroundTruthManifest.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>Assets are the images that you use to train and evaluate a model version.
* Assets are referenced by Sagemaker GroundTruth manifest files. </p><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/Asset">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API Asset
{
public:
Asset();
Asset(Aws::Utils::Json::JsonView jsonValue);
Asset& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
inline const GroundTruthManifest& GetGroundTruthManifest() const{ return m_groundTruthManifest; }
inline bool GroundTruthManifestHasBeenSet() const { return m_groundTruthManifestHasBeenSet; }
inline void SetGroundTruthManifest(const GroundTruthManifest& value) { m_groundTruthManifestHasBeenSet = true; m_groundTruthManifest = value; }
inline void SetGroundTruthManifest(GroundTruthManifest&& value) { m_groundTruthManifestHasBeenSet = true; m_groundTruthManifest = std::move(value); }
inline Asset& WithGroundTruthManifest(const GroundTruthManifest& value) { SetGroundTruthManifest(value); return *this;}
inline Asset& WithGroundTruthManifest(GroundTruthManifest&& value) { SetGroundTruthManifest(std::move(value)); return *this;}
private:
GroundTruthManifest m_groundTruthManifest;
bool m_groundTruthManifestHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,31 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
enum class Attribute
{
NOT_SET,
DEFAULT,
ALL
};
namespace AttributeMapper
{
AWS_REKOGNITION_API Attribute GetAttributeForName(const Aws::String& name);
AWS_REKOGNITION_API Aws::String GetNameForAttribute(Attribute value);
} // namespace AttributeMapper
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,162 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>Metadata information about an audio stream. An array of
* <code>AudioMetadata</code> objects for the audio streams found in a stored video
* is returned by <a>GetSegmentDetection</a>. </p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/AudioMetadata">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API AudioMetadata
{
public:
AudioMetadata();
AudioMetadata(Aws::Utils::Json::JsonView jsonValue);
AudioMetadata& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>The audio codec used to encode or decode the audio stream. </p>
*/
inline const Aws::String& GetCodec() const{ return m_codec; }
/**
* <p>The audio codec used to encode or decode the audio stream. </p>
*/
inline bool CodecHasBeenSet() const { return m_codecHasBeenSet; }
/**
* <p>The audio codec used to encode or decode the audio stream. </p>
*/
inline void SetCodec(const Aws::String& value) { m_codecHasBeenSet = true; m_codec = value; }
/**
* <p>The audio codec used to encode or decode the audio stream. </p>
*/
inline void SetCodec(Aws::String&& value) { m_codecHasBeenSet = true; m_codec = std::move(value); }
/**
* <p>The audio codec used to encode or decode the audio stream. </p>
*/
inline void SetCodec(const char* value) { m_codecHasBeenSet = true; m_codec.assign(value); }
/**
* <p>The audio codec used to encode or decode the audio stream. </p>
*/
inline AudioMetadata& WithCodec(const Aws::String& value) { SetCodec(value); return *this;}
/**
* <p>The audio codec used to encode or decode the audio stream. </p>
*/
inline AudioMetadata& WithCodec(Aws::String&& value) { SetCodec(std::move(value)); return *this;}
/**
* <p>The audio codec used to encode or decode the audio stream. </p>
*/
inline AudioMetadata& WithCodec(const char* value) { SetCodec(value); return *this;}
/**
* <p>The duration of the audio stream in milliseconds.</p>
*/
inline long long GetDurationMillis() const{ return m_durationMillis; }
/**
* <p>The duration of the audio stream in milliseconds.</p>
*/
inline bool DurationMillisHasBeenSet() const { return m_durationMillisHasBeenSet; }
/**
* <p>The duration of the audio stream in milliseconds.</p>
*/
inline void SetDurationMillis(long long value) { m_durationMillisHasBeenSet = true; m_durationMillis = value; }
/**
* <p>The duration of the audio stream in milliseconds.</p>
*/
inline AudioMetadata& WithDurationMillis(long long value) { SetDurationMillis(value); return *this;}
/**
* <p>The sample rate for the audio stream.</p>
*/
inline long long GetSampleRate() const{ return m_sampleRate; }
/**
* <p>The sample rate for the audio stream.</p>
*/
inline bool SampleRateHasBeenSet() const { return m_sampleRateHasBeenSet; }
/**
* <p>The sample rate for the audio stream.</p>
*/
inline void SetSampleRate(long long value) { m_sampleRateHasBeenSet = true; m_sampleRate = value; }
/**
* <p>The sample rate for the audio stream.</p>
*/
inline AudioMetadata& WithSampleRate(long long value) { SetSampleRate(value); return *this;}
/**
* <p>The number of audio channels in the segement.</p>
*/
inline long long GetNumberOfChannels() const{ return m_numberOfChannels; }
/**
* <p>The number of audio channels in the segement.</p>
*/
inline bool NumberOfChannelsHasBeenSet() const { return m_numberOfChannelsHasBeenSet; }
/**
* <p>The number of audio channels in the segement.</p>
*/
inline void SetNumberOfChannels(long long value) { m_numberOfChannelsHasBeenSet = true; m_numberOfChannels = value; }
/**
* <p>The number of audio channels in the segement.</p>
*/
inline AudioMetadata& WithNumberOfChannels(long long value) { SetNumberOfChannels(value); return *this;}
private:
Aws::String m_codec;
bool m_codecHasBeenSet;
long long m_durationMillis;
bool m_durationMillisHasBeenSet;
long long m_sampleRate;
bool m_sampleRateHasBeenSet;
long long m_numberOfChannels;
bool m_numberOfChannelsHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,91 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>Indicates whether or not the face has a beard, and the confidence level in
* the determination.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/Beard">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API Beard
{
public:
Beard();
Beard(Aws::Utils::Json::JsonView jsonValue);
Beard& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>Boolean value that indicates whether the face has beard or not.</p>
*/
inline bool GetValue() const{ return m_value; }
/**
* <p>Boolean value that indicates whether the face has beard or not.</p>
*/
inline bool ValueHasBeenSet() const { return m_valueHasBeenSet; }
/**
* <p>Boolean value that indicates whether the face has beard or not.</p>
*/
inline void SetValue(bool value) { m_valueHasBeenSet = true; m_value = value; }
/**
* <p>Boolean value that indicates whether the face has beard or not.</p>
*/
inline Beard& WithValue(bool value) { SetValue(value); return *this;}
/**
* <p>Level of confidence in the determination.</p>
*/
inline double GetConfidence() const{ return m_confidence; }
/**
* <p>Level of confidence in the determination.</p>
*/
inline bool ConfidenceHasBeenSet() const { return m_confidenceHasBeenSet; }
/**
* <p>Level of confidence in the determination.</p>
*/
inline void SetConfidence(double value) { m_confidenceHasBeenSet = true; m_confidence = value; }
/**
* <p>Level of confidence in the determination.</p>
*/
inline Beard& WithConfidence(double value) { SetConfidence(value); return *this;}
private:
bool m_value;
bool m_valueHasBeenSet;
double m_confidence;
bool m_confidenceHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,155 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>Identifies the bounding box around the label, face, or text. The
* <code>left</code> (x-coordinate) and <code>top</code> (y-coordinate) are
* coordinates representing the top and left sides of the bounding box. Note that
* the upper-left corner of the image is the origin (0,0). </p> <p>The
* <code>top</code> and <code>left</code> values returned are ratios of the overall
* image size. For example, if the input image is 700x200 pixels, and the top-left
* coordinate of the bounding box is 350x50 pixels, the API returns a
* <code>left</code> value of 0.5 (350/700) and a <code>top</code> value of 0.25
* (50/200).</p> <p>The <code>width</code> and <code>height</code> values represent
* the dimensions of the bounding box as a ratio of the overall image dimension.
* For example, if the input image is 700x200 pixels, and the bounding box width is
* 70 pixels, the width returned is 0.1. </p> <p> The bounding box
* coordinates can have negative values. For example, if Amazon Rekognition is able
* to detect a face that is at the image edge and is only partially visible, the
* service can return coordinates that are outside the image bounds and, depending
* on the image edge, you might get negative values or values greater than 1 for
* the <code>left</code> or <code>top</code> values. </p> <p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/BoundingBox">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API BoundingBox
{
public:
BoundingBox();
BoundingBox(Aws::Utils::Json::JsonView jsonValue);
BoundingBox& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>Width of the bounding box as a ratio of the overall image width.</p>
*/
inline double GetWidth() const{ return m_width; }
/**
* <p>Width of the bounding box as a ratio of the overall image width.</p>
*/
inline bool WidthHasBeenSet() const { return m_widthHasBeenSet; }
/**
* <p>Width of the bounding box as a ratio of the overall image width.</p>
*/
inline void SetWidth(double value) { m_widthHasBeenSet = true; m_width = value; }
/**
* <p>Width of the bounding box as a ratio of the overall image width.</p>
*/
inline BoundingBox& WithWidth(double value) { SetWidth(value); return *this;}
/**
* <p>Height of the bounding box as a ratio of the overall image height.</p>
*/
inline double GetHeight() const{ return m_height; }
/**
* <p>Height of the bounding box as a ratio of the overall image height.</p>
*/
inline bool HeightHasBeenSet() const { return m_heightHasBeenSet; }
/**
* <p>Height of the bounding box as a ratio of the overall image height.</p>
*/
inline void SetHeight(double value) { m_heightHasBeenSet = true; m_height = value; }
/**
* <p>Height of the bounding box as a ratio of the overall image height.</p>
*/
inline BoundingBox& WithHeight(double value) { SetHeight(value); return *this;}
/**
* <p>Left coordinate of the bounding box as a ratio of overall image width.</p>
*/
inline double GetLeft() const{ return m_left; }
/**
* <p>Left coordinate of the bounding box as a ratio of overall image width.</p>
*/
inline bool LeftHasBeenSet() const { return m_leftHasBeenSet; }
/**
* <p>Left coordinate of the bounding box as a ratio of overall image width.</p>
*/
inline void SetLeft(double value) { m_leftHasBeenSet = true; m_left = value; }
/**
* <p>Left coordinate of the bounding box as a ratio of overall image width.</p>
*/
inline BoundingBox& WithLeft(double value) { SetLeft(value); return *this;}
/**
* <p>Top coordinate of the bounding box as a ratio of overall image height.</p>
*/
inline double GetTop() const{ return m_top; }
/**
* <p>Top coordinate of the bounding box as a ratio of overall image height.</p>
*/
inline bool TopHasBeenSet() const { return m_topHasBeenSet; }
/**
* <p>Top coordinate of the bounding box as a ratio of overall image height.</p>
*/
inline void SetTop(double value) { m_topHasBeenSet = true; m_top = value; }
/**
* <p>Top coordinate of the bounding box as a ratio of overall image height.</p>
*/
inline BoundingBox& WithTop(double value) { SetTop(value); return *this;}
private:
double m_width;
bool m_widthHasBeenSet;
double m_height;
bool m_heightHasBeenSet;
double m_left;
bool m_leftHasBeenSet;
double m_top;
bool m_topHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,261 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/rekognition/model/ComparedFace.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>Provides information about a celebrity recognized by the
* <a>RecognizeCelebrities</a> operation.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/Celebrity">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API Celebrity
{
public:
Celebrity();
Celebrity(Aws::Utils::Json::JsonView jsonValue);
Celebrity& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>An array of URLs pointing to additional information about the celebrity. If
* there is no additional information about the celebrity, this list is empty.</p>
*/
inline const Aws::Vector<Aws::String>& GetUrls() const{ return m_urls; }
/**
* <p>An array of URLs pointing to additional information about the celebrity. If
* there is no additional information about the celebrity, this list is empty.</p>
*/
inline bool UrlsHasBeenSet() const { return m_urlsHasBeenSet; }
/**
* <p>An array of URLs pointing to additional information about the celebrity. If
* there is no additional information about the celebrity, this list is empty.</p>
*/
inline void SetUrls(const Aws::Vector<Aws::String>& value) { m_urlsHasBeenSet = true; m_urls = value; }
/**
* <p>An array of URLs pointing to additional information about the celebrity. If
* there is no additional information about the celebrity, this list is empty.</p>
*/
inline void SetUrls(Aws::Vector<Aws::String>&& value) { m_urlsHasBeenSet = true; m_urls = std::move(value); }
/**
* <p>An array of URLs pointing to additional information about the celebrity. If
* there is no additional information about the celebrity, this list is empty.</p>
*/
inline Celebrity& WithUrls(const Aws::Vector<Aws::String>& value) { SetUrls(value); return *this;}
/**
* <p>An array of URLs pointing to additional information about the celebrity. If
* there is no additional information about the celebrity, this list is empty.</p>
*/
inline Celebrity& WithUrls(Aws::Vector<Aws::String>&& value) { SetUrls(std::move(value)); return *this;}
/**
* <p>An array of URLs pointing to additional information about the celebrity. If
* there is no additional information about the celebrity, this list is empty.</p>
*/
inline Celebrity& AddUrls(const Aws::String& value) { m_urlsHasBeenSet = true; m_urls.push_back(value); return *this; }
/**
* <p>An array of URLs pointing to additional information about the celebrity. If
* there is no additional information about the celebrity, this list is empty.</p>
*/
inline Celebrity& AddUrls(Aws::String&& value) { m_urlsHasBeenSet = true; m_urls.push_back(std::move(value)); return *this; }
/**
* <p>An array of URLs pointing to additional information about the celebrity. If
* there is no additional information about the celebrity, this list is empty.</p>
*/
inline Celebrity& AddUrls(const char* value) { m_urlsHasBeenSet = true; m_urls.push_back(value); return *this; }
/**
* <p>The name of the celebrity.</p>
*/
inline const Aws::String& GetName() const{ return m_name; }
/**
* <p>The name of the celebrity.</p>
*/
inline bool NameHasBeenSet() const { return m_nameHasBeenSet; }
/**
* <p>The name of the celebrity.</p>
*/
inline void SetName(const Aws::String& value) { m_nameHasBeenSet = true; m_name = value; }
/**
* <p>The name of the celebrity.</p>
*/
inline void SetName(Aws::String&& value) { m_nameHasBeenSet = true; m_name = std::move(value); }
/**
* <p>The name of the celebrity.</p>
*/
inline void SetName(const char* value) { m_nameHasBeenSet = true; m_name.assign(value); }
/**
* <p>The name of the celebrity.</p>
*/
inline Celebrity& WithName(const Aws::String& value) { SetName(value); return *this;}
/**
* <p>The name of the celebrity.</p>
*/
inline Celebrity& WithName(Aws::String&& value) { SetName(std::move(value)); return *this;}
/**
* <p>The name of the celebrity.</p>
*/
inline Celebrity& WithName(const char* value) { SetName(value); return *this;}
/**
* <p>A unique identifier for the celebrity. </p>
*/
inline const Aws::String& GetId() const{ return m_id; }
/**
* <p>A unique identifier for the celebrity. </p>
*/
inline bool IdHasBeenSet() const { return m_idHasBeenSet; }
/**
* <p>A unique identifier for the celebrity. </p>
*/
inline void SetId(const Aws::String& value) { m_idHasBeenSet = true; m_id = value; }
/**
* <p>A unique identifier for the celebrity. </p>
*/
inline void SetId(Aws::String&& value) { m_idHasBeenSet = true; m_id = std::move(value); }
/**
* <p>A unique identifier for the celebrity. </p>
*/
inline void SetId(const char* value) { m_idHasBeenSet = true; m_id.assign(value); }
/**
* <p>A unique identifier for the celebrity. </p>
*/
inline Celebrity& WithId(const Aws::String& value) { SetId(value); return *this;}
/**
* <p>A unique identifier for the celebrity. </p>
*/
inline Celebrity& WithId(Aws::String&& value) { SetId(std::move(value)); return *this;}
/**
* <p>A unique identifier for the celebrity. </p>
*/
inline Celebrity& WithId(const char* value) { SetId(value); return *this;}
/**
* <p>Provides information about the celebrity's face, such as its location on the
* image.</p>
*/
inline const ComparedFace& GetFace() const{ return m_face; }
/**
* <p>Provides information about the celebrity's face, such as its location on the
* image.</p>
*/
inline bool FaceHasBeenSet() const { return m_faceHasBeenSet; }
/**
* <p>Provides information about the celebrity's face, such as its location on the
* image.</p>
*/
inline void SetFace(const ComparedFace& value) { m_faceHasBeenSet = true; m_face = value; }
/**
* <p>Provides information about the celebrity's face, such as its location on the
* image.</p>
*/
inline void SetFace(ComparedFace&& value) { m_faceHasBeenSet = true; m_face = std::move(value); }
/**
* <p>Provides information about the celebrity's face, such as its location on the
* image.</p>
*/
inline Celebrity& WithFace(const ComparedFace& value) { SetFace(value); return *this;}
/**
* <p>Provides information about the celebrity's face, such as its location on the
* image.</p>
*/
inline Celebrity& WithFace(ComparedFace&& value) { SetFace(std::move(value)); return *this;}
/**
* <p>The confidence, in percentage, that Amazon Rekognition has that the
* recognized face is the celebrity.</p>
*/
inline double GetMatchConfidence() const{ return m_matchConfidence; }
/**
* <p>The confidence, in percentage, that Amazon Rekognition has that the
* recognized face is the celebrity.</p>
*/
inline bool MatchConfidenceHasBeenSet() const { return m_matchConfidenceHasBeenSet; }
/**
* <p>The confidence, in percentage, that Amazon Rekognition has that the
* recognized face is the celebrity.</p>
*/
inline void SetMatchConfidence(double value) { m_matchConfidenceHasBeenSet = true; m_matchConfidence = value; }
/**
* <p>The confidence, in percentage, that Amazon Rekognition has that the
* recognized face is the celebrity.</p>
*/
inline Celebrity& WithMatchConfidence(double value) { SetMatchConfidence(value); return *this;}
private:
Aws::Vector<Aws::String> m_urls;
bool m_urlsHasBeenSet;
Aws::String m_name;
bool m_nameHasBeenSet;
Aws::String m_id;
bool m_idHasBeenSet;
ComparedFace m_face;
bool m_faceHasBeenSet;
double m_matchConfidence;
bool m_matchConfidenceHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,280 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/rekognition/model/BoundingBox.h>
#include <aws/rekognition/model/FaceDetail.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>Information about a recognized celebrity.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/CelebrityDetail">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API CelebrityDetail
{
public:
CelebrityDetail();
CelebrityDetail(Aws::Utils::Json::JsonView jsonValue);
CelebrityDetail& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>An array of URLs pointing to additional celebrity information. </p>
*/
inline const Aws::Vector<Aws::String>& GetUrls() const{ return m_urls; }
/**
* <p>An array of URLs pointing to additional celebrity information. </p>
*/
inline bool UrlsHasBeenSet() const { return m_urlsHasBeenSet; }
/**
* <p>An array of URLs pointing to additional celebrity information. </p>
*/
inline void SetUrls(const Aws::Vector<Aws::String>& value) { m_urlsHasBeenSet = true; m_urls = value; }
/**
* <p>An array of URLs pointing to additional celebrity information. </p>
*/
inline void SetUrls(Aws::Vector<Aws::String>&& value) { m_urlsHasBeenSet = true; m_urls = std::move(value); }
/**
* <p>An array of URLs pointing to additional celebrity information. </p>
*/
inline CelebrityDetail& WithUrls(const Aws::Vector<Aws::String>& value) { SetUrls(value); return *this;}
/**
* <p>An array of URLs pointing to additional celebrity information. </p>
*/
inline CelebrityDetail& WithUrls(Aws::Vector<Aws::String>&& value) { SetUrls(std::move(value)); return *this;}
/**
* <p>An array of URLs pointing to additional celebrity information. </p>
*/
inline CelebrityDetail& AddUrls(const Aws::String& value) { m_urlsHasBeenSet = true; m_urls.push_back(value); return *this; }
/**
* <p>An array of URLs pointing to additional celebrity information. </p>
*/
inline CelebrityDetail& AddUrls(Aws::String&& value) { m_urlsHasBeenSet = true; m_urls.push_back(std::move(value)); return *this; }
/**
* <p>An array of URLs pointing to additional celebrity information. </p>
*/
inline CelebrityDetail& AddUrls(const char* value) { m_urlsHasBeenSet = true; m_urls.push_back(value); return *this; }
/**
* <p>The name of the celebrity.</p>
*/
inline const Aws::String& GetName() const{ return m_name; }
/**
* <p>The name of the celebrity.</p>
*/
inline bool NameHasBeenSet() const { return m_nameHasBeenSet; }
/**
* <p>The name of the celebrity.</p>
*/
inline void SetName(const Aws::String& value) { m_nameHasBeenSet = true; m_name = value; }
/**
* <p>The name of the celebrity.</p>
*/
inline void SetName(Aws::String&& value) { m_nameHasBeenSet = true; m_name = std::move(value); }
/**
* <p>The name of the celebrity.</p>
*/
inline void SetName(const char* value) { m_nameHasBeenSet = true; m_name.assign(value); }
/**
* <p>The name of the celebrity.</p>
*/
inline CelebrityDetail& WithName(const Aws::String& value) { SetName(value); return *this;}
/**
* <p>The name of the celebrity.</p>
*/
inline CelebrityDetail& WithName(Aws::String&& value) { SetName(std::move(value)); return *this;}
/**
* <p>The name of the celebrity.</p>
*/
inline CelebrityDetail& WithName(const char* value) { SetName(value); return *this;}
/**
* <p>The unique identifier for the celebrity. </p>
*/
inline const Aws::String& GetId() const{ return m_id; }
/**
* <p>The unique identifier for the celebrity. </p>
*/
inline bool IdHasBeenSet() const { return m_idHasBeenSet; }
/**
* <p>The unique identifier for the celebrity. </p>
*/
inline void SetId(const Aws::String& value) { m_idHasBeenSet = true; m_id = value; }
/**
* <p>The unique identifier for the celebrity. </p>
*/
inline void SetId(Aws::String&& value) { m_idHasBeenSet = true; m_id = std::move(value); }
/**
* <p>The unique identifier for the celebrity. </p>
*/
inline void SetId(const char* value) { m_idHasBeenSet = true; m_id.assign(value); }
/**
* <p>The unique identifier for the celebrity. </p>
*/
inline CelebrityDetail& WithId(const Aws::String& value) { SetId(value); return *this;}
/**
* <p>The unique identifier for the celebrity. </p>
*/
inline CelebrityDetail& WithId(Aws::String&& value) { SetId(std::move(value)); return *this;}
/**
* <p>The unique identifier for the celebrity. </p>
*/
inline CelebrityDetail& WithId(const char* value) { SetId(value); return *this;}
/**
* <p>The confidence, in percentage, that Amazon Rekognition has that the
* recognized face is the celebrity. </p>
*/
inline double GetConfidence() const{ return m_confidence; }
/**
* <p>The confidence, in percentage, that Amazon Rekognition has that the
* recognized face is the celebrity. </p>
*/
inline bool ConfidenceHasBeenSet() const { return m_confidenceHasBeenSet; }
/**
* <p>The confidence, in percentage, that Amazon Rekognition has that the
* recognized face is the celebrity. </p>
*/
inline void SetConfidence(double value) { m_confidenceHasBeenSet = true; m_confidence = value; }
/**
* <p>The confidence, in percentage, that Amazon Rekognition has that the
* recognized face is the celebrity. </p>
*/
inline CelebrityDetail& WithConfidence(double value) { SetConfidence(value); return *this;}
/**
* <p>Bounding box around the body of a celebrity.</p>
*/
inline const BoundingBox& GetBoundingBox() const{ return m_boundingBox; }
/**
* <p>Bounding box around the body of a celebrity.</p>
*/
inline bool BoundingBoxHasBeenSet() const { return m_boundingBoxHasBeenSet; }
/**
* <p>Bounding box around the body of a celebrity.</p>
*/
inline void SetBoundingBox(const BoundingBox& value) { m_boundingBoxHasBeenSet = true; m_boundingBox = value; }
/**
* <p>Bounding box around the body of a celebrity.</p>
*/
inline void SetBoundingBox(BoundingBox&& value) { m_boundingBoxHasBeenSet = true; m_boundingBox = std::move(value); }
/**
* <p>Bounding box around the body of a celebrity.</p>
*/
inline CelebrityDetail& WithBoundingBox(const BoundingBox& value) { SetBoundingBox(value); return *this;}
/**
* <p>Bounding box around the body of a celebrity.</p>
*/
inline CelebrityDetail& WithBoundingBox(BoundingBox&& value) { SetBoundingBox(std::move(value)); return *this;}
/**
* <p>Face details for the recognized celebrity.</p>
*/
inline const FaceDetail& GetFace() const{ return m_face; }
/**
* <p>Face details for the recognized celebrity.</p>
*/
inline bool FaceHasBeenSet() const { return m_faceHasBeenSet; }
/**
* <p>Face details for the recognized celebrity.</p>
*/
inline void SetFace(const FaceDetail& value) { m_faceHasBeenSet = true; m_face = value; }
/**
* <p>Face details for the recognized celebrity.</p>
*/
inline void SetFace(FaceDetail&& value) { m_faceHasBeenSet = true; m_face = std::move(value); }
/**
* <p>Face details for the recognized celebrity.</p>
*/
inline CelebrityDetail& WithFace(const FaceDetail& value) { SetFace(value); return *this;}
/**
* <p>Face details for the recognized celebrity.</p>
*/
inline CelebrityDetail& WithFace(FaceDetail&& value) { SetFace(std::move(value)); return *this;}
private:
Aws::Vector<Aws::String> m_urls;
bool m_urlsHasBeenSet;
Aws::String m_name;
bool m_nameHasBeenSet;
Aws::String m_id;
bool m_idHasBeenSet;
double m_confidence;
bool m_confidenceHasBeenSet;
BoundingBox m_boundingBox;
bool m_boundingBoxHasBeenSet;
FaceDetail m_face;
bool m_faceHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,108 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/model/CelebrityDetail.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>Information about a detected celebrity and the time the celebrity was
* detected in a stored video. For more information, see GetCelebrityRecognition in
* the Amazon Rekognition Developer Guide.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/CelebrityRecognition">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API CelebrityRecognition
{
public:
CelebrityRecognition();
CelebrityRecognition(Aws::Utils::Json::JsonView jsonValue);
CelebrityRecognition& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>The time, in milliseconds from the start of the video, that the celebrity was
* recognized.</p>
*/
inline long long GetTimestamp() const{ return m_timestamp; }
/**
* <p>The time, in milliseconds from the start of the video, that the celebrity was
* recognized.</p>
*/
inline bool TimestampHasBeenSet() const { return m_timestampHasBeenSet; }
/**
* <p>The time, in milliseconds from the start of the video, that the celebrity was
* recognized.</p>
*/
inline void SetTimestamp(long long value) { m_timestampHasBeenSet = true; m_timestamp = value; }
/**
* <p>The time, in milliseconds from the start of the video, that the celebrity was
* recognized.</p>
*/
inline CelebrityRecognition& WithTimestamp(long long value) { SetTimestamp(value); return *this;}
/**
* <p>Information about a recognized celebrity.</p>
*/
inline const CelebrityDetail& GetCelebrity() const{ return m_celebrity; }
/**
* <p>Information about a recognized celebrity.</p>
*/
inline bool CelebrityHasBeenSet() const { return m_celebrityHasBeenSet; }
/**
* <p>Information about a recognized celebrity.</p>
*/
inline void SetCelebrity(const CelebrityDetail& value) { m_celebrityHasBeenSet = true; m_celebrity = value; }
/**
* <p>Information about a recognized celebrity.</p>
*/
inline void SetCelebrity(CelebrityDetail&& value) { m_celebrityHasBeenSet = true; m_celebrity = std::move(value); }
/**
* <p>Information about a recognized celebrity.</p>
*/
inline CelebrityRecognition& WithCelebrity(const CelebrityDetail& value) { SetCelebrity(value); return *this;}
/**
* <p>Information about a recognized celebrity.</p>
*/
inline CelebrityRecognition& WithCelebrity(CelebrityDetail&& value) { SetCelebrity(std::move(value)); return *this;}
private:
long long m_timestamp;
bool m_timestampHasBeenSet;
CelebrityDetail m_celebrity;
bool m_celebrityHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,31 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
enum class CelebrityRecognitionSortBy
{
NOT_SET,
ID,
TIMESTAMP
};
namespace CelebrityRecognitionSortByMapper
{
AWS_REKOGNITION_API CelebrityRecognitionSortBy GetCelebrityRecognitionSortByForName(const Aws::String& name);
AWS_REKOGNITION_API Aws::String GetNameForCelebrityRecognitionSortBy(CelebrityRecognitionSortBy value);
} // namespace CelebrityRecognitionSortByMapper
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,112 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/model/ComparedFace.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>Provides information about a face in a target image that matches the source
* image face analyzed by <code>CompareFaces</code>. The <code>Face</code> property
* contains the bounding box of the face in the target image. The
* <code>Similarity</code> property is the confidence that the source image face
* matches the face in the bounding box.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/CompareFacesMatch">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API CompareFacesMatch
{
public:
CompareFacesMatch();
CompareFacesMatch(Aws::Utils::Json::JsonView jsonValue);
CompareFacesMatch& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>Level of confidence that the faces match.</p>
*/
inline double GetSimilarity() const{ return m_similarity; }
/**
* <p>Level of confidence that the faces match.</p>
*/
inline bool SimilarityHasBeenSet() const { return m_similarityHasBeenSet; }
/**
* <p>Level of confidence that the faces match.</p>
*/
inline void SetSimilarity(double value) { m_similarityHasBeenSet = true; m_similarity = value; }
/**
* <p>Level of confidence that the faces match.</p>
*/
inline CompareFacesMatch& WithSimilarity(double value) { SetSimilarity(value); return *this;}
/**
* <p>Provides face metadata (bounding box and confidence that the bounding box
* actually contains a face).</p>
*/
inline const ComparedFace& GetFace() const{ return m_face; }
/**
* <p>Provides face metadata (bounding box and confidence that the bounding box
* actually contains a face).</p>
*/
inline bool FaceHasBeenSet() const { return m_faceHasBeenSet; }
/**
* <p>Provides face metadata (bounding box and confidence that the bounding box
* actually contains a face).</p>
*/
inline void SetFace(const ComparedFace& value) { m_faceHasBeenSet = true; m_face = value; }
/**
* <p>Provides face metadata (bounding box and confidence that the bounding box
* actually contains a face).</p>
*/
inline void SetFace(ComparedFace&& value) { m_faceHasBeenSet = true; m_face = std::move(value); }
/**
* <p>Provides face metadata (bounding box and confidence that the bounding box
* actually contains a face).</p>
*/
inline CompareFacesMatch& WithFace(const ComparedFace& value) { SetFace(value); return *this;}
/**
* <p>Provides face metadata (bounding box and confidence that the bounding box
* actually contains a face).</p>
*/
inline CompareFacesMatch& WithFace(ComparedFace&& value) { SetFace(std::move(value)); return *this;}
private:
double m_similarity;
bool m_similarityHasBeenSet;
ComparedFace m_face;
bool m_faceHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,292 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/RekognitionRequest.h>
#include <aws/rekognition/model/Image.h>
#include <aws/rekognition/model/QualityFilter.h>
#include <utility>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
/**
*/
class AWS_REKOGNITION_API CompareFacesRequest : public RekognitionRequest
{
public:
CompareFacesRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "CompareFaces"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is
* not supported. </p> <p>If you are using an AWS SDK to call Amazon Rekognition,
* you might not need to base64-encode image bytes passed using the
* <code>Bytes</code> field. For more information, see Images in the Amazon
* Rekognition developer guide.</p>
*/
inline const Image& GetSourceImage() const{ return m_sourceImage; }
/**
* <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is
* not supported. </p> <p>If you are using an AWS SDK to call Amazon Rekognition,
* you might not need to base64-encode image bytes passed using the
* <code>Bytes</code> field. For more information, see Images in the Amazon
* Rekognition developer guide.</p>
*/
inline bool SourceImageHasBeenSet() const { return m_sourceImageHasBeenSet; }
/**
* <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is
* not supported. </p> <p>If you are using an AWS SDK to call Amazon Rekognition,
* you might not need to base64-encode image bytes passed using the
* <code>Bytes</code> field. For more information, see Images in the Amazon
* Rekognition developer guide.</p>
*/
inline void SetSourceImage(const Image& value) { m_sourceImageHasBeenSet = true; m_sourceImage = value; }
/**
* <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is
* not supported. </p> <p>If you are using an AWS SDK to call Amazon Rekognition,
* you might not need to base64-encode image bytes passed using the
* <code>Bytes</code> field. For more information, see Images in the Amazon
* Rekognition developer guide.</p>
*/
inline void SetSourceImage(Image&& value) { m_sourceImageHasBeenSet = true; m_sourceImage = std::move(value); }
/**
* <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is
* not supported. </p> <p>If you are using an AWS SDK to call Amazon Rekognition,
* you might not need to base64-encode image bytes passed using the
* <code>Bytes</code> field. For more information, see Images in the Amazon
* Rekognition developer guide.</p>
*/
inline CompareFacesRequest& WithSourceImage(const Image& value) { SetSourceImage(value); return *this;}
/**
* <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is
* not supported. </p> <p>If you are using an AWS SDK to call Amazon Rekognition,
* you might not need to base64-encode image bytes passed using the
* <code>Bytes</code> field. For more information, see Images in the Amazon
* Rekognition developer guide.</p>
*/
inline CompareFacesRequest& WithSourceImage(Image&& value) { SetSourceImage(std::move(value)); return *this;}
/**
* <p>The target image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is
* not supported. </p> <p>If you are using an AWS SDK to call Amazon Rekognition,
* you might not need to base64-encode image bytes passed using the
* <code>Bytes</code> field. For more information, see Images in the Amazon
* Rekognition developer guide.</p>
*/
inline const Image& GetTargetImage() const{ return m_targetImage; }
/**
* <p>The target image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is
* not supported. </p> <p>If you are using an AWS SDK to call Amazon Rekognition,
* you might not need to base64-encode image bytes passed using the
* <code>Bytes</code> field. For more information, see Images in the Amazon
* Rekognition developer guide.</p>
*/
inline bool TargetImageHasBeenSet() const { return m_targetImageHasBeenSet; }
/**
* <p>The target image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is
* not supported. </p> <p>If you are using an AWS SDK to call Amazon Rekognition,
* you might not need to base64-encode image bytes passed using the
* <code>Bytes</code> field. For more information, see Images in the Amazon
* Rekognition developer guide.</p>
*/
inline void SetTargetImage(const Image& value) { m_targetImageHasBeenSet = true; m_targetImage = value; }
/**
* <p>The target image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is
* not supported. </p> <p>If you are using an AWS SDK to call Amazon Rekognition,
* you might not need to base64-encode image bytes passed using the
* <code>Bytes</code> field. For more information, see Images in the Amazon
* Rekognition developer guide.</p>
*/
inline void SetTargetImage(Image&& value) { m_targetImageHasBeenSet = true; m_targetImage = std::move(value); }
/**
* <p>The target image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is
* not supported. </p> <p>If you are using an AWS SDK to call Amazon Rekognition,
* you might not need to base64-encode image bytes passed using the
* <code>Bytes</code> field. For more information, see Images in the Amazon
* Rekognition developer guide.</p>
*/
inline CompareFacesRequest& WithTargetImage(const Image& value) { SetTargetImage(value); return *this;}
/**
* <p>The target image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is
* not supported. </p> <p>If you are using an AWS SDK to call Amazon Rekognition,
* you might not need to base64-encode image bytes passed using the
* <code>Bytes</code> field. For more information, see Images in the Amazon
* Rekognition developer guide.</p>
*/
inline CompareFacesRequest& WithTargetImage(Image&& value) { SetTargetImage(std::move(value)); return *this;}
/**
* <p>The minimum level of confidence in the face matches that a match must meet to
* be included in the <code>FaceMatches</code> array.</p>
*/
inline double GetSimilarityThreshold() const{ return m_similarityThreshold; }
/**
* <p>The minimum level of confidence in the face matches that a match must meet to
* be included in the <code>FaceMatches</code> array.</p>
*/
inline bool SimilarityThresholdHasBeenSet() const { return m_similarityThresholdHasBeenSet; }
/**
* <p>The minimum level of confidence in the face matches that a match must meet to
* be included in the <code>FaceMatches</code> array.</p>
*/
inline void SetSimilarityThreshold(double value) { m_similarityThresholdHasBeenSet = true; m_similarityThreshold = value; }
/**
* <p>The minimum level of confidence in the face matches that a match must meet to
* be included in the <code>FaceMatches</code> array.</p>
*/
inline CompareFacesRequest& WithSimilarityThreshold(double value) { SetSimilarityThreshold(value); return *this;}
/**
* <p>A filter that specifies a quality bar for how much filtering is done to
* identify faces. Filtered faces aren't compared. If you specify
* <code>AUTO</code>, Amazon Rekognition chooses the quality bar. If you specify
* <code>LOW</code>, <code>MEDIUM</code>, or <code>HIGH</code>, filtering removes
* all faces that dont meet the chosen quality bar. The quality bar is based on a
* variety of common use cases. Low-quality detections can occur for a number of
* reasons. Some examples are an object that's misidentified as a face, a face
* that's too blurry, or a face with a pose that's too extreme to use. If you
* specify <code>NONE</code>, no filtering is performed. The default value is
* <code>NONE</code>. </p> <p>To use quality filtering, the collection you are
* using must be associated with version 3 of the face model or higher.</p>
*/
inline const QualityFilter& GetQualityFilter() const{ return m_qualityFilter; }
/**
* <p>A filter that specifies a quality bar for how much filtering is done to
* identify faces. Filtered faces aren't compared. If you specify
* <code>AUTO</code>, Amazon Rekognition chooses the quality bar. If you specify
* <code>LOW</code>, <code>MEDIUM</code>, or <code>HIGH</code>, filtering removes
* all faces that dont meet the chosen quality bar. The quality bar is based on a
* variety of common use cases. Low-quality detections can occur for a number of
* reasons. Some examples are an object that's misidentified as a face, a face
* that's too blurry, or a face with a pose that's too extreme to use. If you
* specify <code>NONE</code>, no filtering is performed. The default value is
* <code>NONE</code>. </p> <p>To use quality filtering, the collection you are
* using must be associated with version 3 of the face model or higher.</p>
*/
inline bool QualityFilterHasBeenSet() const { return m_qualityFilterHasBeenSet; }
/**
* <p>A filter that specifies a quality bar for how much filtering is done to
* identify faces. Filtered faces aren't compared. If you specify
* <code>AUTO</code>, Amazon Rekognition chooses the quality bar. If you specify
* <code>LOW</code>, <code>MEDIUM</code>, or <code>HIGH</code>, filtering removes
* all faces that dont meet the chosen quality bar. The quality bar is based on a
* variety of common use cases. Low-quality detections can occur for a number of
* reasons. Some examples are an object that's misidentified as a face, a face
* that's too blurry, or a face with a pose that's too extreme to use. If you
* specify <code>NONE</code>, no filtering is performed. The default value is
* <code>NONE</code>. </p> <p>To use quality filtering, the collection you are
* using must be associated with version 3 of the face model or higher.</p>
*/
inline void SetQualityFilter(const QualityFilter& value) { m_qualityFilterHasBeenSet = true; m_qualityFilter = value; }
/**
* <p>A filter that specifies a quality bar for how much filtering is done to
* identify faces. Filtered faces aren't compared. If you specify
* <code>AUTO</code>, Amazon Rekognition chooses the quality bar. If you specify
* <code>LOW</code>, <code>MEDIUM</code>, or <code>HIGH</code>, filtering removes
* all faces that dont meet the chosen quality bar. The quality bar is based on a
* variety of common use cases. Low-quality detections can occur for a number of
* reasons. Some examples are an object that's misidentified as a face, a face
* that's too blurry, or a face with a pose that's too extreme to use. If you
* specify <code>NONE</code>, no filtering is performed. The default value is
* <code>NONE</code>. </p> <p>To use quality filtering, the collection you are
* using must be associated with version 3 of the face model or higher.</p>
*/
inline void SetQualityFilter(QualityFilter&& value) { m_qualityFilterHasBeenSet = true; m_qualityFilter = std::move(value); }
/**
* <p>A filter that specifies a quality bar for how much filtering is done to
* identify faces. Filtered faces aren't compared. If you specify
* <code>AUTO</code>, Amazon Rekognition chooses the quality bar. If you specify
* <code>LOW</code>, <code>MEDIUM</code>, or <code>HIGH</code>, filtering removes
* all faces that dont meet the chosen quality bar. The quality bar is based on a
* variety of common use cases. Low-quality detections can occur for a number of
* reasons. Some examples are an object that's misidentified as a face, a face
* that's too blurry, or a face with a pose that's too extreme to use. If you
* specify <code>NONE</code>, no filtering is performed. The default value is
* <code>NONE</code>. </p> <p>To use quality filtering, the collection you are
* using must be associated with version 3 of the face model or higher.</p>
*/
inline CompareFacesRequest& WithQualityFilter(const QualityFilter& value) { SetQualityFilter(value); return *this;}
/**
* <p>A filter that specifies a quality bar for how much filtering is done to
* identify faces. Filtered faces aren't compared. If you specify
* <code>AUTO</code>, Amazon Rekognition chooses the quality bar. If you specify
* <code>LOW</code>, <code>MEDIUM</code>, or <code>HIGH</code>, filtering removes
* all faces that dont meet the chosen quality bar. The quality bar is based on a
* variety of common use cases. Low-quality detections can occur for a number of
* reasons. Some examples are an object that's misidentified as a face, a face
* that's too blurry, or a face with a pose that's too extreme to use. If you
* specify <code>NONE</code>, no filtering is performed. The default value is
* <code>NONE</code>. </p> <p>To use quality filtering, the collection you are
* using must be associated with version 3 of the face model or higher.</p>
*/
inline CompareFacesRequest& WithQualityFilter(QualityFilter&& value) { SetQualityFilter(std::move(value)); return *this;}
private:
Image m_sourceImage;
bool m_sourceImageHasBeenSet;
Image m_targetImage;
bool m_targetImageHasBeenSet;
double m_similarityThreshold;
bool m_similarityThresholdHasBeenSet;
QualityFilter m_qualityFilter;
bool m_qualityFilterHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,331 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/model/ComparedSourceImageFace.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/rekognition/model/OrientationCorrection.h>
#include <aws/rekognition/model/CompareFacesMatch.h>
#include <aws/rekognition/model/ComparedFace.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
class AWS_REKOGNITION_API CompareFacesResult
{
public:
CompareFacesResult();
CompareFacesResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
CompareFacesResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>The face in the source image that was used for comparison.</p>
*/
inline const ComparedSourceImageFace& GetSourceImageFace() const{ return m_sourceImageFace; }
/**
* <p>The face in the source image that was used for comparison.</p>
*/
inline void SetSourceImageFace(const ComparedSourceImageFace& value) { m_sourceImageFace = value; }
/**
* <p>The face in the source image that was used for comparison.</p>
*/
inline void SetSourceImageFace(ComparedSourceImageFace&& value) { m_sourceImageFace = std::move(value); }
/**
* <p>The face in the source image that was used for comparison.</p>
*/
inline CompareFacesResult& WithSourceImageFace(const ComparedSourceImageFace& value) { SetSourceImageFace(value); return *this;}
/**
* <p>The face in the source image that was used for comparison.</p>
*/
inline CompareFacesResult& WithSourceImageFace(ComparedSourceImageFace&& value) { SetSourceImageFace(std::move(value)); return *this;}
/**
* <p>An array of faces in the target image that match the source image face. Each
* <code>CompareFacesMatch</code> object provides the bounding box, the confidence
* level that the bounding box contains a face, and the similarity score for the
* face in the bounding box and the face in the source image.</p>
*/
inline const Aws::Vector<CompareFacesMatch>& GetFaceMatches() const{ return m_faceMatches; }
/**
* <p>An array of faces in the target image that match the source image face. Each
* <code>CompareFacesMatch</code> object provides the bounding box, the confidence
* level that the bounding box contains a face, and the similarity score for the
* face in the bounding box and the face in the source image.</p>
*/
inline void SetFaceMatches(const Aws::Vector<CompareFacesMatch>& value) { m_faceMatches = value; }
/**
* <p>An array of faces in the target image that match the source image face. Each
* <code>CompareFacesMatch</code> object provides the bounding box, the confidence
* level that the bounding box contains a face, and the similarity score for the
* face in the bounding box and the face in the source image.</p>
*/
inline void SetFaceMatches(Aws::Vector<CompareFacesMatch>&& value) { m_faceMatches = std::move(value); }
/**
* <p>An array of faces in the target image that match the source image face. Each
* <code>CompareFacesMatch</code> object provides the bounding box, the confidence
* level that the bounding box contains a face, and the similarity score for the
* face in the bounding box and the face in the source image.</p>
*/
inline CompareFacesResult& WithFaceMatches(const Aws::Vector<CompareFacesMatch>& value) { SetFaceMatches(value); return *this;}
/**
* <p>An array of faces in the target image that match the source image face. Each
* <code>CompareFacesMatch</code> object provides the bounding box, the confidence
* level that the bounding box contains a face, and the similarity score for the
* face in the bounding box and the face in the source image.</p>
*/
inline CompareFacesResult& WithFaceMatches(Aws::Vector<CompareFacesMatch>&& value) { SetFaceMatches(std::move(value)); return *this;}
/**
* <p>An array of faces in the target image that match the source image face. Each
* <code>CompareFacesMatch</code> object provides the bounding box, the confidence
* level that the bounding box contains a face, and the similarity score for the
* face in the bounding box and the face in the source image.</p>
*/
inline CompareFacesResult& AddFaceMatches(const CompareFacesMatch& value) { m_faceMatches.push_back(value); return *this; }
/**
* <p>An array of faces in the target image that match the source image face. Each
* <code>CompareFacesMatch</code> object provides the bounding box, the confidence
* level that the bounding box contains a face, and the similarity score for the
* face in the bounding box and the face in the source image.</p>
*/
inline CompareFacesResult& AddFaceMatches(CompareFacesMatch&& value) { m_faceMatches.push_back(std::move(value)); return *this; }
/**
* <p>An array of faces in the target image that did not match the source image
* face.</p>
*/
inline const Aws::Vector<ComparedFace>& GetUnmatchedFaces() const{ return m_unmatchedFaces; }
/**
* <p>An array of faces in the target image that did not match the source image
* face.</p>
*/
inline void SetUnmatchedFaces(const Aws::Vector<ComparedFace>& value) { m_unmatchedFaces = value; }
/**
* <p>An array of faces in the target image that did not match the source image
* face.</p>
*/
inline void SetUnmatchedFaces(Aws::Vector<ComparedFace>&& value) { m_unmatchedFaces = std::move(value); }
/**
* <p>An array of faces in the target image that did not match the source image
* face.</p>
*/
inline CompareFacesResult& WithUnmatchedFaces(const Aws::Vector<ComparedFace>& value) { SetUnmatchedFaces(value); return *this;}
/**
* <p>An array of faces in the target image that did not match the source image
* face.</p>
*/
inline CompareFacesResult& WithUnmatchedFaces(Aws::Vector<ComparedFace>&& value) { SetUnmatchedFaces(std::move(value)); return *this;}
/**
* <p>An array of faces in the target image that did not match the source image
* face.</p>
*/
inline CompareFacesResult& AddUnmatchedFaces(const ComparedFace& value) { m_unmatchedFaces.push_back(value); return *this; }
/**
* <p>An array of faces in the target image that did not match the source image
* face.</p>
*/
inline CompareFacesResult& AddUnmatchedFaces(ComparedFace&& value) { m_unmatchedFaces.push_back(std::move(value)); return *this; }
/**
* <p>The value of <code>SourceImageOrientationCorrection</code> is always
* null.</p> <p>If the input image is in .jpeg format, it might contain
* exchangeable image file format (Exif) metadata that includes the image's
* orientation. Amazon Rekognition uses this orientation information to perform
* image correction. The bounding box coordinates are translated to represent
* object locations after the orientation information in the Exif metadata is used
* to correct the image orientation. Images in .png format don't contain Exif
* metadata.</p> <p>Amazon Rekognition doesnt perform image correction for images
* in .png format and .jpeg images without orientation information in the image
* Exif metadata. The bounding box coordinates aren't translated and represent the
* object locations before the image is rotated. </p>
*/
inline const OrientationCorrection& GetSourceImageOrientationCorrection() const{ return m_sourceImageOrientationCorrection; }
/**
* <p>The value of <code>SourceImageOrientationCorrection</code> is always
* null.</p> <p>If the input image is in .jpeg format, it might contain
* exchangeable image file format (Exif) metadata that includes the image's
* orientation. Amazon Rekognition uses this orientation information to perform
* image correction. The bounding box coordinates are translated to represent
* object locations after the orientation information in the Exif metadata is used
* to correct the image orientation. Images in .png format don't contain Exif
* metadata.</p> <p>Amazon Rekognition doesnt perform image correction for images
* in .png format and .jpeg images without orientation information in the image
* Exif metadata. The bounding box coordinates aren't translated and represent the
* object locations before the image is rotated. </p>
*/
inline void SetSourceImageOrientationCorrection(const OrientationCorrection& value) { m_sourceImageOrientationCorrection = value; }
/**
* <p>The value of <code>SourceImageOrientationCorrection</code> is always
* null.</p> <p>If the input image is in .jpeg format, it might contain
* exchangeable image file format (Exif) metadata that includes the image's
* orientation. Amazon Rekognition uses this orientation information to perform
* image correction. The bounding box coordinates are translated to represent
* object locations after the orientation information in the Exif metadata is used
* to correct the image orientation. Images in .png format don't contain Exif
* metadata.</p> <p>Amazon Rekognition doesnt perform image correction for images
* in .png format and .jpeg images without orientation information in the image
* Exif metadata. The bounding box coordinates aren't translated and represent the
* object locations before the image is rotated. </p>
*/
inline void SetSourceImageOrientationCorrection(OrientationCorrection&& value) { m_sourceImageOrientationCorrection = std::move(value); }
/**
* <p>The value of <code>SourceImageOrientationCorrection</code> is always
* null.</p> <p>If the input image is in .jpeg format, it might contain
* exchangeable image file format (Exif) metadata that includes the image's
* orientation. Amazon Rekognition uses this orientation information to perform
* image correction. The bounding box coordinates are translated to represent
* object locations after the orientation information in the Exif metadata is used
* to correct the image orientation. Images in .png format don't contain Exif
* metadata.</p> <p>Amazon Rekognition doesnt perform image correction for images
* in .png format and .jpeg images without orientation information in the image
* Exif metadata. The bounding box coordinates aren't translated and represent the
* object locations before the image is rotated. </p>
*/
inline CompareFacesResult& WithSourceImageOrientationCorrection(const OrientationCorrection& value) { SetSourceImageOrientationCorrection(value); return *this;}
/**
* <p>The value of <code>SourceImageOrientationCorrection</code> is always
* null.</p> <p>If the input image is in .jpeg format, it might contain
* exchangeable image file format (Exif) metadata that includes the image's
* orientation. Amazon Rekognition uses this orientation information to perform
* image correction. The bounding box coordinates are translated to represent
* object locations after the orientation information in the Exif metadata is used
* to correct the image orientation. Images in .png format don't contain Exif
* metadata.</p> <p>Amazon Rekognition doesnt perform image correction for images
* in .png format and .jpeg images without orientation information in the image
* Exif metadata. The bounding box coordinates aren't translated and represent the
* object locations before the image is rotated. </p>
*/
inline CompareFacesResult& WithSourceImageOrientationCorrection(OrientationCorrection&& value) { SetSourceImageOrientationCorrection(std::move(value)); return *this;}
/**
* <p>The value of <code>TargetImageOrientationCorrection</code> is always
* null.</p> <p>If the input image is in .jpeg format, it might contain
* exchangeable image file format (Exif) metadata that includes the image's
* orientation. Amazon Rekognition uses this orientation information to perform
* image correction. The bounding box coordinates are translated to represent
* object locations after the orientation information in the Exif metadata is used
* to correct the image orientation. Images in .png format don't contain Exif
* metadata.</p> <p>Amazon Rekognition doesnt perform image correction for images
* in .png format and .jpeg images without orientation information in the image
* Exif metadata. The bounding box coordinates aren't translated and represent the
* object locations before the image is rotated. </p>
*/
inline const OrientationCorrection& GetTargetImageOrientationCorrection() const{ return m_targetImageOrientationCorrection; }
/**
* <p>The value of <code>TargetImageOrientationCorrection</code> is always
* null.</p> <p>If the input image is in .jpeg format, it might contain
* exchangeable image file format (Exif) metadata that includes the image's
* orientation. Amazon Rekognition uses this orientation information to perform
* image correction. The bounding box coordinates are translated to represent
* object locations after the orientation information in the Exif metadata is used
* to correct the image orientation. Images in .png format don't contain Exif
* metadata.</p> <p>Amazon Rekognition doesnt perform image correction for images
* in .png format and .jpeg images without orientation information in the image
* Exif metadata. The bounding box coordinates aren't translated and represent the
* object locations before the image is rotated. </p>
*/
inline void SetTargetImageOrientationCorrection(const OrientationCorrection& value) { m_targetImageOrientationCorrection = value; }
/**
* <p>The value of <code>TargetImageOrientationCorrection</code> is always
* null.</p> <p>If the input image is in .jpeg format, it might contain
* exchangeable image file format (Exif) metadata that includes the image's
* orientation. Amazon Rekognition uses this orientation information to perform
* image correction. The bounding box coordinates are translated to represent
* object locations after the orientation information in the Exif metadata is used
* to correct the image orientation. Images in .png format don't contain Exif
* metadata.</p> <p>Amazon Rekognition doesnt perform image correction for images
* in .png format and .jpeg images without orientation information in the image
* Exif metadata. The bounding box coordinates aren't translated and represent the
* object locations before the image is rotated. </p>
*/
inline void SetTargetImageOrientationCorrection(OrientationCorrection&& value) { m_targetImageOrientationCorrection = std::move(value); }
/**
* <p>The value of <code>TargetImageOrientationCorrection</code> is always
* null.</p> <p>If the input image is in .jpeg format, it might contain
* exchangeable image file format (Exif) metadata that includes the image's
* orientation. Amazon Rekognition uses this orientation information to perform
* image correction. The bounding box coordinates are translated to represent
* object locations after the orientation information in the Exif metadata is used
* to correct the image orientation. Images in .png format don't contain Exif
* metadata.</p> <p>Amazon Rekognition doesnt perform image correction for images
* in .png format and .jpeg images without orientation information in the image
* Exif metadata. The bounding box coordinates aren't translated and represent the
* object locations before the image is rotated. </p>
*/
inline CompareFacesResult& WithTargetImageOrientationCorrection(const OrientationCorrection& value) { SetTargetImageOrientationCorrection(value); return *this;}
/**
* <p>The value of <code>TargetImageOrientationCorrection</code> is always
* null.</p> <p>If the input image is in .jpeg format, it might contain
* exchangeable image file format (Exif) metadata that includes the image's
* orientation. Amazon Rekognition uses this orientation information to perform
* image correction. The bounding box coordinates are translated to represent
* object locations after the orientation information in the Exif metadata is used
* to correct the image orientation. Images in .png format don't contain Exif
* metadata.</p> <p>Amazon Rekognition doesnt perform image correction for images
* in .png format and .jpeg images without orientation information in the image
* Exif metadata. The bounding box coordinates aren't translated and represent the
* object locations before the image is rotated. </p>
*/
inline CompareFacesResult& WithTargetImageOrientationCorrection(OrientationCorrection&& value) { SetTargetImageOrientationCorrection(std::move(value)); return *this;}
private:
ComparedSourceImageFace m_sourceImageFace;
Aws::Vector<CompareFacesMatch> m_faceMatches;
Aws::Vector<ComparedFace> m_unmatchedFaces;
OrientationCorrection m_sourceImageOrientationCorrection;
OrientationCorrection m_targetImageOrientationCorrection;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,220 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/model/BoundingBox.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/rekognition/model/Pose.h>
#include <aws/rekognition/model/ImageQuality.h>
#include <aws/rekognition/model/Landmark.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>Provides face metadata for target image faces that are analyzed by
* <code>CompareFaces</code> and <code>RecognizeCelebrities</code>.</p><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/ComparedFace">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API ComparedFace
{
public:
ComparedFace();
ComparedFace(Aws::Utils::Json::JsonView jsonValue);
ComparedFace& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>Bounding box of the face.</p>
*/
inline const BoundingBox& GetBoundingBox() const{ return m_boundingBox; }
/**
* <p>Bounding box of the face.</p>
*/
inline bool BoundingBoxHasBeenSet() const { return m_boundingBoxHasBeenSet; }
/**
* <p>Bounding box of the face.</p>
*/
inline void SetBoundingBox(const BoundingBox& value) { m_boundingBoxHasBeenSet = true; m_boundingBox = value; }
/**
* <p>Bounding box of the face.</p>
*/
inline void SetBoundingBox(BoundingBox&& value) { m_boundingBoxHasBeenSet = true; m_boundingBox = std::move(value); }
/**
* <p>Bounding box of the face.</p>
*/
inline ComparedFace& WithBoundingBox(const BoundingBox& value) { SetBoundingBox(value); return *this;}
/**
* <p>Bounding box of the face.</p>
*/
inline ComparedFace& WithBoundingBox(BoundingBox&& value) { SetBoundingBox(std::move(value)); return *this;}
/**
* <p>Level of confidence that what the bounding box contains is a face.</p>
*/
inline double GetConfidence() const{ return m_confidence; }
/**
* <p>Level of confidence that what the bounding box contains is a face.</p>
*/
inline bool ConfidenceHasBeenSet() const { return m_confidenceHasBeenSet; }
/**
* <p>Level of confidence that what the bounding box contains is a face.</p>
*/
inline void SetConfidence(double value) { m_confidenceHasBeenSet = true; m_confidence = value; }
/**
* <p>Level of confidence that what the bounding box contains is a face.</p>
*/
inline ComparedFace& WithConfidence(double value) { SetConfidence(value); return *this;}
/**
* <p>An array of facial landmarks.</p>
*/
inline const Aws::Vector<Landmark>& GetLandmarks() const{ return m_landmarks; }
/**
* <p>An array of facial landmarks.</p>
*/
inline bool LandmarksHasBeenSet() const { return m_landmarksHasBeenSet; }
/**
* <p>An array of facial landmarks.</p>
*/
inline void SetLandmarks(const Aws::Vector<Landmark>& value) { m_landmarksHasBeenSet = true; m_landmarks = value; }
/**
* <p>An array of facial landmarks.</p>
*/
inline void SetLandmarks(Aws::Vector<Landmark>&& value) { m_landmarksHasBeenSet = true; m_landmarks = std::move(value); }
/**
* <p>An array of facial landmarks.</p>
*/
inline ComparedFace& WithLandmarks(const Aws::Vector<Landmark>& value) { SetLandmarks(value); return *this;}
/**
* <p>An array of facial landmarks.</p>
*/
inline ComparedFace& WithLandmarks(Aws::Vector<Landmark>&& value) { SetLandmarks(std::move(value)); return *this;}
/**
* <p>An array of facial landmarks.</p>
*/
inline ComparedFace& AddLandmarks(const Landmark& value) { m_landmarksHasBeenSet = true; m_landmarks.push_back(value); return *this; }
/**
* <p>An array of facial landmarks.</p>
*/
inline ComparedFace& AddLandmarks(Landmark&& value) { m_landmarksHasBeenSet = true; m_landmarks.push_back(std::move(value)); return *this; }
/**
* <p>Indicates the pose of the face as determined by its pitch, roll, and yaw.</p>
*/
inline const Pose& GetPose() const{ return m_pose; }
/**
* <p>Indicates the pose of the face as determined by its pitch, roll, and yaw.</p>
*/
inline bool PoseHasBeenSet() const { return m_poseHasBeenSet; }
/**
* <p>Indicates the pose of the face as determined by its pitch, roll, and yaw.</p>
*/
inline void SetPose(const Pose& value) { m_poseHasBeenSet = true; m_pose = value; }
/**
* <p>Indicates the pose of the face as determined by its pitch, roll, and yaw.</p>
*/
inline void SetPose(Pose&& value) { m_poseHasBeenSet = true; m_pose = std::move(value); }
/**
* <p>Indicates the pose of the face as determined by its pitch, roll, and yaw.</p>
*/
inline ComparedFace& WithPose(const Pose& value) { SetPose(value); return *this;}
/**
* <p>Indicates the pose of the face as determined by its pitch, roll, and yaw.</p>
*/
inline ComparedFace& WithPose(Pose&& value) { SetPose(std::move(value)); return *this;}
/**
* <p>Identifies face image brightness and sharpness. </p>
*/
inline const ImageQuality& GetQuality() const{ return m_quality; }
/**
* <p>Identifies face image brightness and sharpness. </p>
*/
inline bool QualityHasBeenSet() const { return m_qualityHasBeenSet; }
/**
* <p>Identifies face image brightness and sharpness. </p>
*/
inline void SetQuality(const ImageQuality& value) { m_qualityHasBeenSet = true; m_quality = value; }
/**
* <p>Identifies face image brightness and sharpness. </p>
*/
inline void SetQuality(ImageQuality&& value) { m_qualityHasBeenSet = true; m_quality = std::move(value); }
/**
* <p>Identifies face image brightness and sharpness. </p>
*/
inline ComparedFace& WithQuality(const ImageQuality& value) { SetQuality(value); return *this;}
/**
* <p>Identifies face image brightness and sharpness. </p>
*/
inline ComparedFace& WithQuality(ImageQuality&& value) { SetQuality(std::move(value)); return *this;}
private:
BoundingBox m_boundingBox;
bool m_boundingBoxHasBeenSet;
double m_confidence;
bool m_confidenceHasBeenSet;
Aws::Vector<Landmark> m_landmarks;
bool m_landmarksHasBeenSet;
Pose m_pose;
bool m_poseHasBeenSet;
ImageQuality m_quality;
bool m_qualityHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,106 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/model/BoundingBox.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>Type that describes the face Amazon Rekognition chose to compare with the
* faces in the target. This contains a bounding box for the selected face and
* confidence level that the bounding box contains a face. Note that Amazon
* Rekognition selects the largest face in the source image for this comparison.
* </p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/ComparedSourceImageFace">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API ComparedSourceImageFace
{
public:
ComparedSourceImageFace();
ComparedSourceImageFace(Aws::Utils::Json::JsonView jsonValue);
ComparedSourceImageFace& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>Bounding box of the face.</p>
*/
inline const BoundingBox& GetBoundingBox() const{ return m_boundingBox; }
/**
* <p>Bounding box of the face.</p>
*/
inline bool BoundingBoxHasBeenSet() const { return m_boundingBoxHasBeenSet; }
/**
* <p>Bounding box of the face.</p>
*/
inline void SetBoundingBox(const BoundingBox& value) { m_boundingBoxHasBeenSet = true; m_boundingBox = value; }
/**
* <p>Bounding box of the face.</p>
*/
inline void SetBoundingBox(BoundingBox&& value) { m_boundingBoxHasBeenSet = true; m_boundingBox = std::move(value); }
/**
* <p>Bounding box of the face.</p>
*/
inline ComparedSourceImageFace& WithBoundingBox(const BoundingBox& value) { SetBoundingBox(value); return *this;}
/**
* <p>Bounding box of the face.</p>
*/
inline ComparedSourceImageFace& WithBoundingBox(BoundingBox&& value) { SetBoundingBox(std::move(value)); return *this;}
/**
* <p>Confidence level that the selected bounding box contains a face.</p>
*/
inline double GetConfidence() const{ return m_confidence; }
/**
* <p>Confidence level that the selected bounding box contains a face.</p>
*/
inline bool ConfidenceHasBeenSet() const { return m_confidenceHasBeenSet; }
/**
* <p>Confidence level that the selected bounding box contains a face.</p>
*/
inline void SetConfidence(double value) { m_confidenceHasBeenSet = true; m_confidence = value; }
/**
* <p>Confidence level that the selected bounding box contains a face.</p>
*/
inline ComparedSourceImageFace& WithConfidence(double value) { SetConfidence(value); return *this;}
private:
BoundingBox m_boundingBox;
bool m_boundingBoxHasBeenSet;
double m_confidence;
bool m_confidenceHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,31 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
enum class ContentClassifier
{
NOT_SET,
FreeOfPersonallyIdentifiableInformation,
FreeOfAdultContent
};
namespace ContentClassifierMapper
{
AWS_REKOGNITION_API ContentClassifier GetContentClassifierForName(const Aws::String& name);
AWS_REKOGNITION_API Aws::String GetNameForContentClassifier(ContentClassifier value);
} // namespace ContentClassifierMapper
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,107 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/model/ModerationLabel.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>Information about an unsafe content label detection in a stored
* video.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/ContentModerationDetection">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API ContentModerationDetection
{
public:
ContentModerationDetection();
ContentModerationDetection(Aws::Utils::Json::JsonView jsonValue);
ContentModerationDetection& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>Time, in milliseconds from the beginning of the video, that the unsafe
* content label was detected.</p>
*/
inline long long GetTimestamp() const{ return m_timestamp; }
/**
* <p>Time, in milliseconds from the beginning of the video, that the unsafe
* content label was detected.</p>
*/
inline bool TimestampHasBeenSet() const { return m_timestampHasBeenSet; }
/**
* <p>Time, in milliseconds from the beginning of the video, that the unsafe
* content label was detected.</p>
*/
inline void SetTimestamp(long long value) { m_timestampHasBeenSet = true; m_timestamp = value; }
/**
* <p>Time, in milliseconds from the beginning of the video, that the unsafe
* content label was detected.</p>
*/
inline ContentModerationDetection& WithTimestamp(long long value) { SetTimestamp(value); return *this;}
/**
* <p>The unsafe content label detected by in the stored video.</p>
*/
inline const ModerationLabel& GetModerationLabel() const{ return m_moderationLabel; }
/**
* <p>The unsafe content label detected by in the stored video.</p>
*/
inline bool ModerationLabelHasBeenSet() const { return m_moderationLabelHasBeenSet; }
/**
* <p>The unsafe content label detected by in the stored video.</p>
*/
inline void SetModerationLabel(const ModerationLabel& value) { m_moderationLabelHasBeenSet = true; m_moderationLabel = value; }
/**
* <p>The unsafe content label detected by in the stored video.</p>
*/
inline void SetModerationLabel(ModerationLabel&& value) { m_moderationLabelHasBeenSet = true; m_moderationLabel = std::move(value); }
/**
* <p>The unsafe content label detected by in the stored video.</p>
*/
inline ContentModerationDetection& WithModerationLabel(const ModerationLabel& value) { SetModerationLabel(value); return *this;}
/**
* <p>The unsafe content label detected by in the stored video.</p>
*/
inline ContentModerationDetection& WithModerationLabel(ModerationLabel&& value) { SetModerationLabel(std::move(value)); return *this;}
private:
long long m_timestamp;
bool m_timestampHasBeenSet;
ModerationLabel m_moderationLabel;
bool m_moderationLabelHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,31 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
enum class ContentModerationSortBy
{
NOT_SET,
NAME,
TIMESTAMP
};
namespace ContentModerationSortByMapper
{
AWS_REKOGNITION_API ContentModerationSortBy GetContentModerationSortByForName(const Aws::String& name);
AWS_REKOGNITION_API Aws::String GetNameForContentModerationSortBy(ContentModerationSortBy value);
} // namespace ContentModerationSortByMapper
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,85 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/RekognitionRequest.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <utility>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
/**
*/
class AWS_REKOGNITION_API CreateCollectionRequest : public RekognitionRequest
{
public:
CreateCollectionRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "CreateCollection"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>ID for the collection that you are creating.</p>
*/
inline const Aws::String& GetCollectionId() const{ return m_collectionId; }
/**
* <p>ID for the collection that you are creating.</p>
*/
inline bool CollectionIdHasBeenSet() const { return m_collectionIdHasBeenSet; }
/**
* <p>ID for the collection that you are creating.</p>
*/
inline void SetCollectionId(const Aws::String& value) { m_collectionIdHasBeenSet = true; m_collectionId = value; }
/**
* <p>ID for the collection that you are creating.</p>
*/
inline void SetCollectionId(Aws::String&& value) { m_collectionIdHasBeenSet = true; m_collectionId = std::move(value); }
/**
* <p>ID for the collection that you are creating.</p>
*/
inline void SetCollectionId(const char* value) { m_collectionIdHasBeenSet = true; m_collectionId.assign(value); }
/**
* <p>ID for the collection that you are creating.</p>
*/
inline CreateCollectionRequest& WithCollectionId(const Aws::String& value) { SetCollectionId(value); return *this;}
/**
* <p>ID for the collection that you are creating.</p>
*/
inline CreateCollectionRequest& WithCollectionId(Aws::String&& value) { SetCollectionId(std::move(value)); return *this;}
/**
* <p>ID for the collection that you are creating.</p>
*/
inline CreateCollectionRequest& WithCollectionId(const char* value) { SetCollectionId(value); return *this;}
private:
Aws::String m_collectionId;
bool m_collectionIdHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,147 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
class AWS_REKOGNITION_API CreateCollectionResult
{
public:
CreateCollectionResult();
CreateCollectionResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
CreateCollectionResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>HTTP status code indicating the result of the operation.</p>
*/
inline int GetStatusCode() const{ return m_statusCode; }
/**
* <p>HTTP status code indicating the result of the operation.</p>
*/
inline void SetStatusCode(int value) { m_statusCode = value; }
/**
* <p>HTTP status code indicating the result of the operation.</p>
*/
inline CreateCollectionResult& WithStatusCode(int value) { SetStatusCode(value); return *this;}
/**
* <p>Amazon Resource Name (ARN) of the collection. You can use this to manage
* permissions on your resources. </p>
*/
inline const Aws::String& GetCollectionArn() const{ return m_collectionArn; }
/**
* <p>Amazon Resource Name (ARN) of the collection. You can use this to manage
* permissions on your resources. </p>
*/
inline void SetCollectionArn(const Aws::String& value) { m_collectionArn = value; }
/**
* <p>Amazon Resource Name (ARN) of the collection. You can use this to manage
* permissions on your resources. </p>
*/
inline void SetCollectionArn(Aws::String&& value) { m_collectionArn = std::move(value); }
/**
* <p>Amazon Resource Name (ARN) of the collection. You can use this to manage
* permissions on your resources. </p>
*/
inline void SetCollectionArn(const char* value) { m_collectionArn.assign(value); }
/**
* <p>Amazon Resource Name (ARN) of the collection. You can use this to manage
* permissions on your resources. </p>
*/
inline CreateCollectionResult& WithCollectionArn(const Aws::String& value) { SetCollectionArn(value); return *this;}
/**
* <p>Amazon Resource Name (ARN) of the collection. You can use this to manage
* permissions on your resources. </p>
*/
inline CreateCollectionResult& WithCollectionArn(Aws::String&& value) { SetCollectionArn(std::move(value)); return *this;}
/**
* <p>Amazon Resource Name (ARN) of the collection. You can use this to manage
* permissions on your resources. </p>
*/
inline CreateCollectionResult& WithCollectionArn(const char* value) { SetCollectionArn(value); return *this;}
/**
* <p>Version number of the face detection model associated with the collection you
* are creating.</p>
*/
inline const Aws::String& GetFaceModelVersion() const{ return m_faceModelVersion; }
/**
* <p>Version number of the face detection model associated with the collection you
* are creating.</p>
*/
inline void SetFaceModelVersion(const Aws::String& value) { m_faceModelVersion = value; }
/**
* <p>Version number of the face detection model associated with the collection you
* are creating.</p>
*/
inline void SetFaceModelVersion(Aws::String&& value) { m_faceModelVersion = std::move(value); }
/**
* <p>Version number of the face detection model associated with the collection you
* are creating.</p>
*/
inline void SetFaceModelVersion(const char* value) { m_faceModelVersion.assign(value); }
/**
* <p>Version number of the face detection model associated with the collection you
* are creating.</p>
*/
inline CreateCollectionResult& WithFaceModelVersion(const Aws::String& value) { SetFaceModelVersion(value); return *this;}
/**
* <p>Version number of the face detection model associated with the collection you
* are creating.</p>
*/
inline CreateCollectionResult& WithFaceModelVersion(Aws::String&& value) { SetFaceModelVersion(std::move(value)); return *this;}
/**
* <p>Version number of the face detection model associated with the collection you
* are creating.</p>
*/
inline CreateCollectionResult& WithFaceModelVersion(const char* value) { SetFaceModelVersion(value); return *this;}
private:
int m_statusCode;
Aws::String m_collectionArn;
Aws::String m_faceModelVersion;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,85 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/RekognitionRequest.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <utility>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
/**
*/
class AWS_REKOGNITION_API CreateProjectRequest : public RekognitionRequest
{
public:
CreateProjectRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "CreateProject"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>The name of the project to create.</p>
*/
inline const Aws::String& GetProjectName() const{ return m_projectName; }
/**
* <p>The name of the project to create.</p>
*/
inline bool ProjectNameHasBeenSet() const { return m_projectNameHasBeenSet; }
/**
* <p>The name of the project to create.</p>
*/
inline void SetProjectName(const Aws::String& value) { m_projectNameHasBeenSet = true; m_projectName = value; }
/**
* <p>The name of the project to create.</p>
*/
inline void SetProjectName(Aws::String&& value) { m_projectNameHasBeenSet = true; m_projectName = std::move(value); }
/**
* <p>The name of the project to create.</p>
*/
inline void SetProjectName(const char* value) { m_projectNameHasBeenSet = true; m_projectName.assign(value); }
/**
* <p>The name of the project to create.</p>
*/
inline CreateProjectRequest& WithProjectName(const Aws::String& value) { SetProjectName(value); return *this;}
/**
* <p>The name of the project to create.</p>
*/
inline CreateProjectRequest& WithProjectName(Aws::String&& value) { SetProjectName(std::move(value)); return *this;}
/**
* <p>The name of the project to create.</p>
*/
inline CreateProjectRequest& WithProjectName(const char* value) { SetProjectName(value); return *this;}
private:
Aws::String m_projectName;
bool m_projectNameHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,84 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
class AWS_REKOGNITION_API CreateProjectResult
{
public:
CreateProjectResult();
CreateProjectResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
CreateProjectResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>The Amazon Resource Name (ARN) of the new project. You can use the ARN to
* configure IAM access to the project. </p>
*/
inline const Aws::String& GetProjectArn() const{ return m_projectArn; }
/**
* <p>The Amazon Resource Name (ARN) of the new project. You can use the ARN to
* configure IAM access to the project. </p>
*/
inline void SetProjectArn(const Aws::String& value) { m_projectArn = value; }
/**
* <p>The Amazon Resource Name (ARN) of the new project. You can use the ARN to
* configure IAM access to the project. </p>
*/
inline void SetProjectArn(Aws::String&& value) { m_projectArn = std::move(value); }
/**
* <p>The Amazon Resource Name (ARN) of the new project. You can use the ARN to
* configure IAM access to the project. </p>
*/
inline void SetProjectArn(const char* value) { m_projectArn.assign(value); }
/**
* <p>The Amazon Resource Name (ARN) of the new project. You can use the ARN to
* configure IAM access to the project. </p>
*/
inline CreateProjectResult& WithProjectArn(const Aws::String& value) { SetProjectArn(value); return *this;}
/**
* <p>The Amazon Resource Name (ARN) of the new project. You can use the ARN to
* configure IAM access to the project. </p>
*/
inline CreateProjectResult& WithProjectArn(Aws::String&& value) { SetProjectArn(std::move(value)); return *this;}
/**
* <p>The Amazon Resource Name (ARN) of the new project. You can use the ARN to
* configure IAM access to the project. </p>
*/
inline CreateProjectResult& WithProjectArn(const char* value) { SetProjectArn(value); return *this;}
private:
Aws::String m_projectArn;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,242 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/RekognitionRequest.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/rekognition/model/OutputConfig.h>
#include <aws/rekognition/model/TrainingData.h>
#include <aws/rekognition/model/TestingData.h>
#include <utility>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
/**
*/
class AWS_REKOGNITION_API CreateProjectVersionRequest : public RekognitionRequest
{
public:
CreateProjectVersionRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "CreateProjectVersion"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>The ARN of the Amazon Rekognition Custom Labels project that manages the
* model that you want to train.</p>
*/
inline const Aws::String& GetProjectArn() const{ return m_projectArn; }
/**
* <p>The ARN of the Amazon Rekognition Custom Labels project that manages the
* model that you want to train.</p>
*/
inline bool ProjectArnHasBeenSet() const { return m_projectArnHasBeenSet; }
/**
* <p>The ARN of the Amazon Rekognition Custom Labels project that manages the
* model that you want to train.</p>
*/
inline void SetProjectArn(const Aws::String& value) { m_projectArnHasBeenSet = true; m_projectArn = value; }
/**
* <p>The ARN of the Amazon Rekognition Custom Labels project that manages the
* model that you want to train.</p>
*/
inline void SetProjectArn(Aws::String&& value) { m_projectArnHasBeenSet = true; m_projectArn = std::move(value); }
/**
* <p>The ARN of the Amazon Rekognition Custom Labels project that manages the
* model that you want to train.</p>
*/
inline void SetProjectArn(const char* value) { m_projectArnHasBeenSet = true; m_projectArn.assign(value); }
/**
* <p>The ARN of the Amazon Rekognition Custom Labels project that manages the
* model that you want to train.</p>
*/
inline CreateProjectVersionRequest& WithProjectArn(const Aws::String& value) { SetProjectArn(value); return *this;}
/**
* <p>The ARN of the Amazon Rekognition Custom Labels project that manages the
* model that you want to train.</p>
*/
inline CreateProjectVersionRequest& WithProjectArn(Aws::String&& value) { SetProjectArn(std::move(value)); return *this;}
/**
* <p>The ARN of the Amazon Rekognition Custom Labels project that manages the
* model that you want to train.</p>
*/
inline CreateProjectVersionRequest& WithProjectArn(const char* value) { SetProjectArn(value); return *this;}
/**
* <p>A name for the version of the model. This value must be unique.</p>
*/
inline const Aws::String& GetVersionName() const{ return m_versionName; }
/**
* <p>A name for the version of the model. This value must be unique.</p>
*/
inline bool VersionNameHasBeenSet() const { return m_versionNameHasBeenSet; }
/**
* <p>A name for the version of the model. This value must be unique.</p>
*/
inline void SetVersionName(const Aws::String& value) { m_versionNameHasBeenSet = true; m_versionName = value; }
/**
* <p>A name for the version of the model. This value must be unique.</p>
*/
inline void SetVersionName(Aws::String&& value) { m_versionNameHasBeenSet = true; m_versionName = std::move(value); }
/**
* <p>A name for the version of the model. This value must be unique.</p>
*/
inline void SetVersionName(const char* value) { m_versionNameHasBeenSet = true; m_versionName.assign(value); }
/**
* <p>A name for the version of the model. This value must be unique.</p>
*/
inline CreateProjectVersionRequest& WithVersionName(const Aws::String& value) { SetVersionName(value); return *this;}
/**
* <p>A name for the version of the model. This value must be unique.</p>
*/
inline CreateProjectVersionRequest& WithVersionName(Aws::String&& value) { SetVersionName(std::move(value)); return *this;}
/**
* <p>A name for the version of the model. This value must be unique.</p>
*/
inline CreateProjectVersionRequest& WithVersionName(const char* value) { SetVersionName(value); return *this;}
/**
* <p>The Amazon S3 location to store the results of training.</p>
*/
inline const OutputConfig& GetOutputConfig() const{ return m_outputConfig; }
/**
* <p>The Amazon S3 location to store the results of training.</p>
*/
inline bool OutputConfigHasBeenSet() const { return m_outputConfigHasBeenSet; }
/**
* <p>The Amazon S3 location to store the results of training.</p>
*/
inline void SetOutputConfig(const OutputConfig& value) { m_outputConfigHasBeenSet = true; m_outputConfig = value; }
/**
* <p>The Amazon S3 location to store the results of training.</p>
*/
inline void SetOutputConfig(OutputConfig&& value) { m_outputConfigHasBeenSet = true; m_outputConfig = std::move(value); }
/**
* <p>The Amazon S3 location to store the results of training.</p>
*/
inline CreateProjectVersionRequest& WithOutputConfig(const OutputConfig& value) { SetOutputConfig(value); return *this;}
/**
* <p>The Amazon S3 location to store the results of training.</p>
*/
inline CreateProjectVersionRequest& WithOutputConfig(OutputConfig&& value) { SetOutputConfig(std::move(value)); return *this;}
/**
* <p>The dataset to use for training. </p>
*/
inline const TrainingData& GetTrainingData() const{ return m_trainingData; }
/**
* <p>The dataset to use for training. </p>
*/
inline bool TrainingDataHasBeenSet() const { return m_trainingDataHasBeenSet; }
/**
* <p>The dataset to use for training. </p>
*/
inline void SetTrainingData(const TrainingData& value) { m_trainingDataHasBeenSet = true; m_trainingData = value; }
/**
* <p>The dataset to use for training. </p>
*/
inline void SetTrainingData(TrainingData&& value) { m_trainingDataHasBeenSet = true; m_trainingData = std::move(value); }
/**
* <p>The dataset to use for training. </p>
*/
inline CreateProjectVersionRequest& WithTrainingData(const TrainingData& value) { SetTrainingData(value); return *this;}
/**
* <p>The dataset to use for training. </p>
*/
inline CreateProjectVersionRequest& WithTrainingData(TrainingData&& value) { SetTrainingData(std::move(value)); return *this;}
/**
* <p>The dataset to use for testing.</p>
*/
inline const TestingData& GetTestingData() const{ return m_testingData; }
/**
* <p>The dataset to use for testing.</p>
*/
inline bool TestingDataHasBeenSet() const { return m_testingDataHasBeenSet; }
/**
* <p>The dataset to use for testing.</p>
*/
inline void SetTestingData(const TestingData& value) { m_testingDataHasBeenSet = true; m_testingData = value; }
/**
* <p>The dataset to use for testing.</p>
*/
inline void SetTestingData(TestingData&& value) { m_testingDataHasBeenSet = true; m_testingData = std::move(value); }
/**
* <p>The dataset to use for testing.</p>
*/
inline CreateProjectVersionRequest& WithTestingData(const TestingData& value) { SetTestingData(value); return *this;}
/**
* <p>The dataset to use for testing.</p>
*/
inline CreateProjectVersionRequest& WithTestingData(TestingData&& value) { SetTestingData(std::move(value)); return *this;}
private:
Aws::String m_projectArn;
bool m_projectArnHasBeenSet;
Aws::String m_versionName;
bool m_versionNameHasBeenSet;
OutputConfig m_outputConfig;
bool m_outputConfigHasBeenSet;
TrainingData m_trainingData;
bool m_trainingDataHasBeenSet;
TestingData m_testingData;
bool m_testingDataHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,91 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
class AWS_REKOGNITION_API CreateProjectVersionResult
{
public:
CreateProjectVersionResult();
CreateProjectVersionResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
CreateProjectVersionResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>The ARN of the model version that was created. Use
* <code>DescribeProjectVersion</code> to get the current status of the training
* operation.</p>
*/
inline const Aws::String& GetProjectVersionArn() const{ return m_projectVersionArn; }
/**
* <p>The ARN of the model version that was created. Use
* <code>DescribeProjectVersion</code> to get the current status of the training
* operation.</p>
*/
inline void SetProjectVersionArn(const Aws::String& value) { m_projectVersionArn = value; }
/**
* <p>The ARN of the model version that was created. Use
* <code>DescribeProjectVersion</code> to get the current status of the training
* operation.</p>
*/
inline void SetProjectVersionArn(Aws::String&& value) { m_projectVersionArn = std::move(value); }
/**
* <p>The ARN of the model version that was created. Use
* <code>DescribeProjectVersion</code> to get the current status of the training
* operation.</p>
*/
inline void SetProjectVersionArn(const char* value) { m_projectVersionArn.assign(value); }
/**
* <p>The ARN of the model version that was created. Use
* <code>DescribeProjectVersion</code> to get the current status of the training
* operation.</p>
*/
inline CreateProjectVersionResult& WithProjectVersionArn(const Aws::String& value) { SetProjectVersionArn(value); return *this;}
/**
* <p>The ARN of the model version that was created. Use
* <code>DescribeProjectVersion</code> to get the current status of the training
* operation.</p>
*/
inline CreateProjectVersionResult& WithProjectVersionArn(Aws::String&& value) { SetProjectVersionArn(std::move(value)); return *this;}
/**
* <p>The ARN of the model version that was created. Use
* <code>DescribeProjectVersion</code> to get the current status of the training
* operation.</p>
*/
inline CreateProjectVersionResult& WithProjectVersionArn(const char* value) { SetProjectVersionArn(value); return *this;}
private:
Aws::String m_projectVersionArn;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,294 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/RekognitionRequest.h>
#include <aws/rekognition/model/StreamProcessorInput.h>
#include <aws/rekognition/model/StreamProcessorOutput.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/rekognition/model/StreamProcessorSettings.h>
#include <utility>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
/**
*/
class AWS_REKOGNITION_API CreateStreamProcessorRequest : public RekognitionRequest
{
public:
CreateStreamProcessorRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "CreateStreamProcessor"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>Kinesis video stream stream that provides the source streaming video. If you
* are using the AWS CLI, the parameter name is
* <code>StreamProcessorInput</code>.</p>
*/
inline const StreamProcessorInput& GetInput() const{ return m_input; }
/**
* <p>Kinesis video stream stream that provides the source streaming video. If you
* are using the AWS CLI, the parameter name is
* <code>StreamProcessorInput</code>.</p>
*/
inline bool InputHasBeenSet() const { return m_inputHasBeenSet; }
/**
* <p>Kinesis video stream stream that provides the source streaming video. If you
* are using the AWS CLI, the parameter name is
* <code>StreamProcessorInput</code>.</p>
*/
inline void SetInput(const StreamProcessorInput& value) { m_inputHasBeenSet = true; m_input = value; }
/**
* <p>Kinesis video stream stream that provides the source streaming video. If you
* are using the AWS CLI, the parameter name is
* <code>StreamProcessorInput</code>.</p>
*/
inline void SetInput(StreamProcessorInput&& value) { m_inputHasBeenSet = true; m_input = std::move(value); }
/**
* <p>Kinesis video stream stream that provides the source streaming video. If you
* are using the AWS CLI, the parameter name is
* <code>StreamProcessorInput</code>.</p>
*/
inline CreateStreamProcessorRequest& WithInput(const StreamProcessorInput& value) { SetInput(value); return *this;}
/**
* <p>Kinesis video stream stream that provides the source streaming video. If you
* are using the AWS CLI, the parameter name is
* <code>StreamProcessorInput</code>.</p>
*/
inline CreateStreamProcessorRequest& WithInput(StreamProcessorInput&& value) { SetInput(std::move(value)); return *this;}
/**
* <p>Kinesis data stream stream to which Amazon Rekognition Video puts the
* analysis results. If you are using the AWS CLI, the parameter name is
* <code>StreamProcessorOutput</code>.</p>
*/
inline const StreamProcessorOutput& GetOutput() const{ return m_output; }
/**
* <p>Kinesis data stream stream to which Amazon Rekognition Video puts the
* analysis results. If you are using the AWS CLI, the parameter name is
* <code>StreamProcessorOutput</code>.</p>
*/
inline bool OutputHasBeenSet() const { return m_outputHasBeenSet; }
/**
* <p>Kinesis data stream stream to which Amazon Rekognition Video puts the
* analysis results. If you are using the AWS CLI, the parameter name is
* <code>StreamProcessorOutput</code>.</p>
*/
inline void SetOutput(const StreamProcessorOutput& value) { m_outputHasBeenSet = true; m_output = value; }
/**
* <p>Kinesis data stream stream to which Amazon Rekognition Video puts the
* analysis results. If you are using the AWS CLI, the parameter name is
* <code>StreamProcessorOutput</code>.</p>
*/
inline void SetOutput(StreamProcessorOutput&& value) { m_outputHasBeenSet = true; m_output = std::move(value); }
/**
* <p>Kinesis data stream stream to which Amazon Rekognition Video puts the
* analysis results. If you are using the AWS CLI, the parameter name is
* <code>StreamProcessorOutput</code>.</p>
*/
inline CreateStreamProcessorRequest& WithOutput(const StreamProcessorOutput& value) { SetOutput(value); return *this;}
/**
* <p>Kinesis data stream stream to which Amazon Rekognition Video puts the
* analysis results. If you are using the AWS CLI, the parameter name is
* <code>StreamProcessorOutput</code>.</p>
*/
inline CreateStreamProcessorRequest& WithOutput(StreamProcessorOutput&& value) { SetOutput(std::move(value)); return *this;}
/**
* <p>An identifier you assign to the stream processor. You can use
* <code>Name</code> to manage the stream processor. For example, you can get the
* current status of the stream processor by calling
* <a>DescribeStreamProcessor</a>. <code>Name</code> is idempotent. </p>
*/
inline const Aws::String& GetName() const{ return m_name; }
/**
* <p>An identifier you assign to the stream processor. You can use
* <code>Name</code> to manage the stream processor. For example, you can get the
* current status of the stream processor by calling
* <a>DescribeStreamProcessor</a>. <code>Name</code> is idempotent. </p>
*/
inline bool NameHasBeenSet() const { return m_nameHasBeenSet; }
/**
* <p>An identifier you assign to the stream processor. You can use
* <code>Name</code> to manage the stream processor. For example, you can get the
* current status of the stream processor by calling
* <a>DescribeStreamProcessor</a>. <code>Name</code> is idempotent. </p>
*/
inline void SetName(const Aws::String& value) { m_nameHasBeenSet = true; m_name = value; }
/**
* <p>An identifier you assign to the stream processor. You can use
* <code>Name</code> to manage the stream processor. For example, you can get the
* current status of the stream processor by calling
* <a>DescribeStreamProcessor</a>. <code>Name</code> is idempotent. </p>
*/
inline void SetName(Aws::String&& value) { m_nameHasBeenSet = true; m_name = std::move(value); }
/**
* <p>An identifier you assign to the stream processor. You can use
* <code>Name</code> to manage the stream processor. For example, you can get the
* current status of the stream processor by calling
* <a>DescribeStreamProcessor</a>. <code>Name</code> is idempotent. </p>
*/
inline void SetName(const char* value) { m_nameHasBeenSet = true; m_name.assign(value); }
/**
* <p>An identifier you assign to the stream processor. You can use
* <code>Name</code> to manage the stream processor. For example, you can get the
* current status of the stream processor by calling
* <a>DescribeStreamProcessor</a>. <code>Name</code> is idempotent. </p>
*/
inline CreateStreamProcessorRequest& WithName(const Aws::String& value) { SetName(value); return *this;}
/**
* <p>An identifier you assign to the stream processor. You can use
* <code>Name</code> to manage the stream processor. For example, you can get the
* current status of the stream processor by calling
* <a>DescribeStreamProcessor</a>. <code>Name</code> is idempotent. </p>
*/
inline CreateStreamProcessorRequest& WithName(Aws::String&& value) { SetName(std::move(value)); return *this;}
/**
* <p>An identifier you assign to the stream processor. You can use
* <code>Name</code> to manage the stream processor. For example, you can get the
* current status of the stream processor by calling
* <a>DescribeStreamProcessor</a>. <code>Name</code> is idempotent. </p>
*/
inline CreateStreamProcessorRequest& WithName(const char* value) { SetName(value); return *this;}
/**
* <p>Face recognition input parameters to be used by the stream processor.
* Includes the collection to use for face recognition and the face attributes to
* detect.</p>
*/
inline const StreamProcessorSettings& GetSettings() const{ return m_settings; }
/**
* <p>Face recognition input parameters to be used by the stream processor.
* Includes the collection to use for face recognition and the face attributes to
* detect.</p>
*/
inline bool SettingsHasBeenSet() const { return m_settingsHasBeenSet; }
/**
* <p>Face recognition input parameters to be used by the stream processor.
* Includes the collection to use for face recognition and the face attributes to
* detect.</p>
*/
inline void SetSettings(const StreamProcessorSettings& value) { m_settingsHasBeenSet = true; m_settings = value; }
/**
* <p>Face recognition input parameters to be used by the stream processor.
* Includes the collection to use for face recognition and the face attributes to
* detect.</p>
*/
inline void SetSettings(StreamProcessorSettings&& value) { m_settingsHasBeenSet = true; m_settings = std::move(value); }
/**
* <p>Face recognition input parameters to be used by the stream processor.
* Includes the collection to use for face recognition and the face attributes to
* detect.</p>
*/
inline CreateStreamProcessorRequest& WithSettings(const StreamProcessorSettings& value) { SetSettings(value); return *this;}
/**
* <p>Face recognition input parameters to be used by the stream processor.
* Includes the collection to use for face recognition and the face attributes to
* detect.</p>
*/
inline CreateStreamProcessorRequest& WithSettings(StreamProcessorSettings&& value) { SetSettings(std::move(value)); return *this;}
/**
* <p>ARN of the IAM role that allows access to the stream processor.</p>
*/
inline const Aws::String& GetRoleArn() const{ return m_roleArn; }
/**
* <p>ARN of the IAM role that allows access to the stream processor.</p>
*/
inline bool RoleArnHasBeenSet() const { return m_roleArnHasBeenSet; }
/**
* <p>ARN of the IAM role that allows access to the stream processor.</p>
*/
inline void SetRoleArn(const Aws::String& value) { m_roleArnHasBeenSet = true; m_roleArn = value; }
/**
* <p>ARN of the IAM role that allows access to the stream processor.</p>
*/
inline void SetRoleArn(Aws::String&& value) { m_roleArnHasBeenSet = true; m_roleArn = std::move(value); }
/**
* <p>ARN of the IAM role that allows access to the stream processor.</p>
*/
inline void SetRoleArn(const char* value) { m_roleArnHasBeenSet = true; m_roleArn.assign(value); }
/**
* <p>ARN of the IAM role that allows access to the stream processor.</p>
*/
inline CreateStreamProcessorRequest& WithRoleArn(const Aws::String& value) { SetRoleArn(value); return *this;}
/**
* <p>ARN of the IAM role that allows access to the stream processor.</p>
*/
inline CreateStreamProcessorRequest& WithRoleArn(Aws::String&& value) { SetRoleArn(std::move(value)); return *this;}
/**
* <p>ARN of the IAM role that allows access to the stream processor.</p>
*/
inline CreateStreamProcessorRequest& WithRoleArn(const char* value) { SetRoleArn(value); return *this;}
private:
StreamProcessorInput m_input;
bool m_inputHasBeenSet;
StreamProcessorOutput m_output;
bool m_outputHasBeenSet;
Aws::String m_name;
bool m_nameHasBeenSet;
StreamProcessorSettings m_settings;
bool m_settingsHasBeenSet;
Aws::String m_roleArn;
bool m_roleArnHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,77 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
class AWS_REKOGNITION_API CreateStreamProcessorResult
{
public:
CreateStreamProcessorResult();
CreateStreamProcessorResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
CreateStreamProcessorResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>ARN for the newly create stream processor.</p>
*/
inline const Aws::String& GetStreamProcessorArn() const{ return m_streamProcessorArn; }
/**
* <p>ARN for the newly create stream processor.</p>
*/
inline void SetStreamProcessorArn(const Aws::String& value) { m_streamProcessorArn = value; }
/**
* <p>ARN for the newly create stream processor.</p>
*/
inline void SetStreamProcessorArn(Aws::String&& value) { m_streamProcessorArn = std::move(value); }
/**
* <p>ARN for the newly create stream processor.</p>
*/
inline void SetStreamProcessorArn(const char* value) { m_streamProcessorArn.assign(value); }
/**
* <p>ARN for the newly create stream processor.</p>
*/
inline CreateStreamProcessorResult& WithStreamProcessorArn(const Aws::String& value) { SetStreamProcessorArn(value); return *this;}
/**
* <p>ARN for the newly create stream processor.</p>
*/
inline CreateStreamProcessorResult& WithStreamProcessorArn(Aws::String&& value) { SetStreamProcessorArn(std::move(value)); return *this;}
/**
* <p>ARN for the newly create stream processor.</p>
*/
inline CreateStreamProcessorResult& WithStreamProcessorArn(const char* value) { SetStreamProcessorArn(value); return *this;}
private:
Aws::String m_streamProcessorArn;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,164 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/rekognition/model/Geometry.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>A custom label detected in an image by a call to
* <a>DetectCustomLabels</a>.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/CustomLabel">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API CustomLabel
{
public:
CustomLabel();
CustomLabel(Aws::Utils::Json::JsonView jsonValue);
CustomLabel& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>The name of the custom label.</p>
*/
inline const Aws::String& GetName() const{ return m_name; }
/**
* <p>The name of the custom label.</p>
*/
inline bool NameHasBeenSet() const { return m_nameHasBeenSet; }
/**
* <p>The name of the custom label.</p>
*/
inline void SetName(const Aws::String& value) { m_nameHasBeenSet = true; m_name = value; }
/**
* <p>The name of the custom label.</p>
*/
inline void SetName(Aws::String&& value) { m_nameHasBeenSet = true; m_name = std::move(value); }
/**
* <p>The name of the custom label.</p>
*/
inline void SetName(const char* value) { m_nameHasBeenSet = true; m_name.assign(value); }
/**
* <p>The name of the custom label.</p>
*/
inline CustomLabel& WithName(const Aws::String& value) { SetName(value); return *this;}
/**
* <p>The name of the custom label.</p>
*/
inline CustomLabel& WithName(Aws::String&& value) { SetName(std::move(value)); return *this;}
/**
* <p>The name of the custom label.</p>
*/
inline CustomLabel& WithName(const char* value) { SetName(value); return *this;}
/**
* <p>The confidence that the model has in the detection of the custom label. The
* range is 0-100. A higher value indicates a higher confidence.</p>
*/
inline double GetConfidence() const{ return m_confidence; }
/**
* <p>The confidence that the model has in the detection of the custom label. The
* range is 0-100. A higher value indicates a higher confidence.</p>
*/
inline bool ConfidenceHasBeenSet() const { return m_confidenceHasBeenSet; }
/**
* <p>The confidence that the model has in the detection of the custom label. The
* range is 0-100. A higher value indicates a higher confidence.</p>
*/
inline void SetConfidence(double value) { m_confidenceHasBeenSet = true; m_confidence = value; }
/**
* <p>The confidence that the model has in the detection of the custom label. The
* range is 0-100. A higher value indicates a higher confidence.</p>
*/
inline CustomLabel& WithConfidence(double value) { SetConfidence(value); return *this;}
/**
* <p>The location of the detected object on the image that corresponds to the
* custom label. Includes an axis aligned coarse bounding box surrounding the
* object and a finer grain polygon for more accurate spatial information.</p>
*/
inline const Geometry& GetGeometry() const{ return m_geometry; }
/**
* <p>The location of the detected object on the image that corresponds to the
* custom label. Includes an axis aligned coarse bounding box surrounding the
* object and a finer grain polygon for more accurate spatial information.</p>
*/
inline bool GeometryHasBeenSet() const { return m_geometryHasBeenSet; }
/**
* <p>The location of the detected object on the image that corresponds to the
* custom label. Includes an axis aligned coarse bounding box surrounding the
* object and a finer grain polygon for more accurate spatial information.</p>
*/
inline void SetGeometry(const Geometry& value) { m_geometryHasBeenSet = true; m_geometry = value; }
/**
* <p>The location of the detected object on the image that corresponds to the
* custom label. Includes an axis aligned coarse bounding box surrounding the
* object and a finer grain polygon for more accurate spatial information.</p>
*/
inline void SetGeometry(Geometry&& value) { m_geometryHasBeenSet = true; m_geometry = std::move(value); }
/**
* <p>The location of the detected object on the image that corresponds to the
* custom label. Includes an axis aligned coarse bounding box surrounding the
* object and a finer grain polygon for more accurate spatial information.</p>
*/
inline CustomLabel& WithGeometry(const Geometry& value) { SetGeometry(value); return *this;}
/**
* <p>The location of the detected object on the image that corresponds to the
* custom label. Includes an axis aligned coarse bounding box surrounding the
* object and a finer grain polygon for more accurate spatial information.</p>
*/
inline CustomLabel& WithGeometry(Geometry&& value) { SetGeometry(std::move(value)); return *this;}
private:
Aws::String m_name;
bool m_nameHasBeenSet;
double m_confidence;
bool m_confidenceHasBeenSet;
Geometry m_geometry;
bool m_geometryHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,85 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/RekognitionRequest.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <utility>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
/**
*/
class AWS_REKOGNITION_API DeleteCollectionRequest : public RekognitionRequest
{
public:
DeleteCollectionRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "DeleteCollection"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>ID of the collection to delete.</p>
*/
inline const Aws::String& GetCollectionId() const{ return m_collectionId; }
/**
* <p>ID of the collection to delete.</p>
*/
inline bool CollectionIdHasBeenSet() const { return m_collectionIdHasBeenSet; }
/**
* <p>ID of the collection to delete.</p>
*/
inline void SetCollectionId(const Aws::String& value) { m_collectionIdHasBeenSet = true; m_collectionId = value; }
/**
* <p>ID of the collection to delete.</p>
*/
inline void SetCollectionId(Aws::String&& value) { m_collectionIdHasBeenSet = true; m_collectionId = std::move(value); }
/**
* <p>ID of the collection to delete.</p>
*/
inline void SetCollectionId(const char* value) { m_collectionIdHasBeenSet = true; m_collectionId.assign(value); }
/**
* <p>ID of the collection to delete.</p>
*/
inline DeleteCollectionRequest& WithCollectionId(const Aws::String& value) { SetCollectionId(value); return *this;}
/**
* <p>ID of the collection to delete.</p>
*/
inline DeleteCollectionRequest& WithCollectionId(Aws::String&& value) { SetCollectionId(std::move(value)); return *this;}
/**
* <p>ID of the collection to delete.</p>
*/
inline DeleteCollectionRequest& WithCollectionId(const char* value) { SetCollectionId(value); return *this;}
private:
Aws::String m_collectionId;
bool m_collectionIdHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,55 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
class AWS_REKOGNITION_API DeleteCollectionResult
{
public:
DeleteCollectionResult();
DeleteCollectionResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
DeleteCollectionResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>HTTP status code that indicates the result of the operation.</p>
*/
inline int GetStatusCode() const{ return m_statusCode; }
/**
* <p>HTTP status code that indicates the result of the operation.</p>
*/
inline void SetStatusCode(int value) { m_statusCode = value; }
/**
* <p>HTTP status code that indicates the result of the operation.</p>
*/
inline DeleteCollectionResult& WithStatusCode(int value) { SetStatusCode(value); return *this;}
private:
int m_statusCode;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,135 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/RekognitionRequest.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <utility>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
/**
*/
class AWS_REKOGNITION_API DeleteFacesRequest : public RekognitionRequest
{
public:
DeleteFacesRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "DeleteFaces"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>Collection from which to remove the specific faces.</p>
*/
inline const Aws::String& GetCollectionId() const{ return m_collectionId; }
/**
* <p>Collection from which to remove the specific faces.</p>
*/
inline bool CollectionIdHasBeenSet() const { return m_collectionIdHasBeenSet; }
/**
* <p>Collection from which to remove the specific faces.</p>
*/
inline void SetCollectionId(const Aws::String& value) { m_collectionIdHasBeenSet = true; m_collectionId = value; }
/**
* <p>Collection from which to remove the specific faces.</p>
*/
inline void SetCollectionId(Aws::String&& value) { m_collectionIdHasBeenSet = true; m_collectionId = std::move(value); }
/**
* <p>Collection from which to remove the specific faces.</p>
*/
inline void SetCollectionId(const char* value) { m_collectionIdHasBeenSet = true; m_collectionId.assign(value); }
/**
* <p>Collection from which to remove the specific faces.</p>
*/
inline DeleteFacesRequest& WithCollectionId(const Aws::String& value) { SetCollectionId(value); return *this;}
/**
* <p>Collection from which to remove the specific faces.</p>
*/
inline DeleteFacesRequest& WithCollectionId(Aws::String&& value) { SetCollectionId(std::move(value)); return *this;}
/**
* <p>Collection from which to remove the specific faces.</p>
*/
inline DeleteFacesRequest& WithCollectionId(const char* value) { SetCollectionId(value); return *this;}
/**
* <p>An array of face IDs to delete.</p>
*/
inline const Aws::Vector<Aws::String>& GetFaceIds() const{ return m_faceIds; }
/**
* <p>An array of face IDs to delete.</p>
*/
inline bool FaceIdsHasBeenSet() const { return m_faceIdsHasBeenSet; }
/**
* <p>An array of face IDs to delete.</p>
*/
inline void SetFaceIds(const Aws::Vector<Aws::String>& value) { m_faceIdsHasBeenSet = true; m_faceIds = value; }
/**
* <p>An array of face IDs to delete.</p>
*/
inline void SetFaceIds(Aws::Vector<Aws::String>&& value) { m_faceIdsHasBeenSet = true; m_faceIds = std::move(value); }
/**
* <p>An array of face IDs to delete.</p>
*/
inline DeleteFacesRequest& WithFaceIds(const Aws::Vector<Aws::String>& value) { SetFaceIds(value); return *this;}
/**
* <p>An array of face IDs to delete.</p>
*/
inline DeleteFacesRequest& WithFaceIds(Aws::Vector<Aws::String>&& value) { SetFaceIds(std::move(value)); return *this;}
/**
* <p>An array of face IDs to delete.</p>
*/
inline DeleteFacesRequest& AddFaceIds(const Aws::String& value) { m_faceIdsHasBeenSet = true; m_faceIds.push_back(value); return *this; }
/**
* <p>An array of face IDs to delete.</p>
*/
inline DeleteFacesRequest& AddFaceIds(Aws::String&& value) { m_faceIdsHasBeenSet = true; m_faceIds.push_back(std::move(value)); return *this; }
/**
* <p>An array of face IDs to delete.</p>
*/
inline DeleteFacesRequest& AddFaceIds(const char* value) { m_faceIdsHasBeenSet = true; m_faceIds.push_back(value); return *this; }
private:
Aws::String m_collectionId;
bool m_collectionIdHasBeenSet;
Aws::Vector<Aws::String> m_faceIds;
bool m_faceIdsHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,83 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
class AWS_REKOGNITION_API DeleteFacesResult
{
public:
DeleteFacesResult();
DeleteFacesResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
DeleteFacesResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>An array of strings (face IDs) of the faces that were deleted.</p>
*/
inline const Aws::Vector<Aws::String>& GetDeletedFaces() const{ return m_deletedFaces; }
/**
* <p>An array of strings (face IDs) of the faces that were deleted.</p>
*/
inline void SetDeletedFaces(const Aws::Vector<Aws::String>& value) { m_deletedFaces = value; }
/**
* <p>An array of strings (face IDs) of the faces that were deleted.</p>
*/
inline void SetDeletedFaces(Aws::Vector<Aws::String>&& value) { m_deletedFaces = std::move(value); }
/**
* <p>An array of strings (face IDs) of the faces that were deleted.</p>
*/
inline DeleteFacesResult& WithDeletedFaces(const Aws::Vector<Aws::String>& value) { SetDeletedFaces(value); return *this;}
/**
* <p>An array of strings (face IDs) of the faces that were deleted.</p>
*/
inline DeleteFacesResult& WithDeletedFaces(Aws::Vector<Aws::String>&& value) { SetDeletedFaces(std::move(value)); return *this;}
/**
* <p>An array of strings (face IDs) of the faces that were deleted.</p>
*/
inline DeleteFacesResult& AddDeletedFaces(const Aws::String& value) { m_deletedFaces.push_back(value); return *this; }
/**
* <p>An array of strings (face IDs) of the faces that were deleted.</p>
*/
inline DeleteFacesResult& AddDeletedFaces(Aws::String&& value) { m_deletedFaces.push_back(std::move(value)); return *this; }
/**
* <p>An array of strings (face IDs) of the faces that were deleted.</p>
*/
inline DeleteFacesResult& AddDeletedFaces(const char* value) { m_deletedFaces.push_back(value); return *this; }
private:
Aws::Vector<Aws::String> m_deletedFaces;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,85 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/RekognitionRequest.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <utility>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
/**
*/
class AWS_REKOGNITION_API DeleteProjectRequest : public RekognitionRequest
{
public:
DeleteProjectRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "DeleteProject"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>The Amazon Resource Name (ARN) of the project that you want to delete.</p>
*/
inline const Aws::String& GetProjectArn() const{ return m_projectArn; }
/**
* <p>The Amazon Resource Name (ARN) of the project that you want to delete.</p>
*/
inline bool ProjectArnHasBeenSet() const { return m_projectArnHasBeenSet; }
/**
* <p>The Amazon Resource Name (ARN) of the project that you want to delete.</p>
*/
inline void SetProjectArn(const Aws::String& value) { m_projectArnHasBeenSet = true; m_projectArn = value; }
/**
* <p>The Amazon Resource Name (ARN) of the project that you want to delete.</p>
*/
inline void SetProjectArn(Aws::String&& value) { m_projectArnHasBeenSet = true; m_projectArn = std::move(value); }
/**
* <p>The Amazon Resource Name (ARN) of the project that you want to delete.</p>
*/
inline void SetProjectArn(const char* value) { m_projectArnHasBeenSet = true; m_projectArn.assign(value); }
/**
* <p>The Amazon Resource Name (ARN) of the project that you want to delete.</p>
*/
inline DeleteProjectRequest& WithProjectArn(const Aws::String& value) { SetProjectArn(value); return *this;}
/**
* <p>The Amazon Resource Name (ARN) of the project that you want to delete.</p>
*/
inline DeleteProjectRequest& WithProjectArn(Aws::String&& value) { SetProjectArn(std::move(value)); return *this;}
/**
* <p>The Amazon Resource Name (ARN) of the project that you want to delete.</p>
*/
inline DeleteProjectRequest& WithProjectArn(const char* value) { SetProjectArn(value); return *this;}
private:
Aws::String m_projectArn;
bool m_projectArnHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,67 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/model/ProjectStatus.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
class AWS_REKOGNITION_API DeleteProjectResult
{
public:
DeleteProjectResult();
DeleteProjectResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
DeleteProjectResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>The current status of the delete project operation.</p>
*/
inline const ProjectStatus& GetStatus() const{ return m_status; }
/**
* <p>The current status of the delete project operation.</p>
*/
inline void SetStatus(const ProjectStatus& value) { m_status = value; }
/**
* <p>The current status of the delete project operation.</p>
*/
inline void SetStatus(ProjectStatus&& value) { m_status = std::move(value); }
/**
* <p>The current status of the delete project operation.</p>
*/
inline DeleteProjectResult& WithStatus(const ProjectStatus& value) { SetStatus(value); return *this;}
/**
* <p>The current status of the delete project operation.</p>
*/
inline DeleteProjectResult& WithStatus(ProjectStatus&& value) { SetStatus(std::move(value)); return *this;}
private:
ProjectStatus m_status;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,93 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/RekognitionRequest.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <utility>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
/**
*/
class AWS_REKOGNITION_API DeleteProjectVersionRequest : public RekognitionRequest
{
public:
DeleteProjectVersionRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "DeleteProjectVersion"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>The Amazon Resource Name (ARN) of the model version that you want to
* delete.</p>
*/
inline const Aws::String& GetProjectVersionArn() const{ return m_projectVersionArn; }
/**
* <p>The Amazon Resource Name (ARN) of the model version that you want to
* delete.</p>
*/
inline bool ProjectVersionArnHasBeenSet() const { return m_projectVersionArnHasBeenSet; }
/**
* <p>The Amazon Resource Name (ARN) of the model version that you want to
* delete.</p>
*/
inline void SetProjectVersionArn(const Aws::String& value) { m_projectVersionArnHasBeenSet = true; m_projectVersionArn = value; }
/**
* <p>The Amazon Resource Name (ARN) of the model version that you want to
* delete.</p>
*/
inline void SetProjectVersionArn(Aws::String&& value) { m_projectVersionArnHasBeenSet = true; m_projectVersionArn = std::move(value); }
/**
* <p>The Amazon Resource Name (ARN) of the model version that you want to
* delete.</p>
*/
inline void SetProjectVersionArn(const char* value) { m_projectVersionArnHasBeenSet = true; m_projectVersionArn.assign(value); }
/**
* <p>The Amazon Resource Name (ARN) of the model version that you want to
* delete.</p>
*/
inline DeleteProjectVersionRequest& WithProjectVersionArn(const Aws::String& value) { SetProjectVersionArn(value); return *this;}
/**
* <p>The Amazon Resource Name (ARN) of the model version that you want to
* delete.</p>
*/
inline DeleteProjectVersionRequest& WithProjectVersionArn(Aws::String&& value) { SetProjectVersionArn(std::move(value)); return *this;}
/**
* <p>The Amazon Resource Name (ARN) of the model version that you want to
* delete.</p>
*/
inline DeleteProjectVersionRequest& WithProjectVersionArn(const char* value) { SetProjectVersionArn(value); return *this;}
private:
Aws::String m_projectVersionArn;
bool m_projectVersionArnHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,67 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/model/ProjectVersionStatus.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
class AWS_REKOGNITION_API DeleteProjectVersionResult
{
public:
DeleteProjectVersionResult();
DeleteProjectVersionResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
DeleteProjectVersionResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>The status of the deletion operation.</p>
*/
inline const ProjectVersionStatus& GetStatus() const{ return m_status; }
/**
* <p>The status of the deletion operation.</p>
*/
inline void SetStatus(const ProjectVersionStatus& value) { m_status = value; }
/**
* <p>The status of the deletion operation.</p>
*/
inline void SetStatus(ProjectVersionStatus&& value) { m_status = std::move(value); }
/**
* <p>The status of the deletion operation.</p>
*/
inline DeleteProjectVersionResult& WithStatus(const ProjectVersionStatus& value) { SetStatus(value); return *this;}
/**
* <p>The status of the deletion operation.</p>
*/
inline DeleteProjectVersionResult& WithStatus(ProjectVersionStatus&& value) { SetStatus(std::move(value)); return *this;}
private:
ProjectVersionStatus m_status;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,85 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/RekognitionRequest.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <utility>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
/**
*/
class AWS_REKOGNITION_API DeleteStreamProcessorRequest : public RekognitionRequest
{
public:
DeleteStreamProcessorRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "DeleteStreamProcessor"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>The name of the stream processor you want to delete.</p>
*/
inline const Aws::String& GetName() const{ return m_name; }
/**
* <p>The name of the stream processor you want to delete.</p>
*/
inline bool NameHasBeenSet() const { return m_nameHasBeenSet; }
/**
* <p>The name of the stream processor you want to delete.</p>
*/
inline void SetName(const Aws::String& value) { m_nameHasBeenSet = true; m_name = value; }
/**
* <p>The name of the stream processor you want to delete.</p>
*/
inline void SetName(Aws::String&& value) { m_nameHasBeenSet = true; m_name = std::move(value); }
/**
* <p>The name of the stream processor you want to delete.</p>
*/
inline void SetName(const char* value) { m_nameHasBeenSet = true; m_name.assign(value); }
/**
* <p>The name of the stream processor you want to delete.</p>
*/
inline DeleteStreamProcessorRequest& WithName(const Aws::String& value) { SetName(value); return *this;}
/**
* <p>The name of the stream processor you want to delete.</p>
*/
inline DeleteStreamProcessorRequest& WithName(Aws::String&& value) { SetName(std::move(value)); return *this;}
/**
* <p>The name of the stream processor you want to delete.</p>
*/
inline DeleteStreamProcessorRequest& WithName(const char* value) { SetName(value); return *this;}
private:
Aws::String m_name;
bool m_nameHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,36 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
class AWS_REKOGNITION_API DeleteStreamProcessorResult
{
public:
DeleteStreamProcessorResult();
DeleteStreamProcessorResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
DeleteStreamProcessorResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,85 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/RekognitionRequest.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <utility>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
/**
*/
class AWS_REKOGNITION_API DescribeCollectionRequest : public RekognitionRequest
{
public:
DescribeCollectionRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "DescribeCollection"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>The ID of the collection to describe.</p>
*/
inline const Aws::String& GetCollectionId() const{ return m_collectionId; }
/**
* <p>The ID of the collection to describe.</p>
*/
inline bool CollectionIdHasBeenSet() const { return m_collectionIdHasBeenSet; }
/**
* <p>The ID of the collection to describe.</p>
*/
inline void SetCollectionId(const Aws::String& value) { m_collectionIdHasBeenSet = true; m_collectionId = value; }
/**
* <p>The ID of the collection to describe.</p>
*/
inline void SetCollectionId(Aws::String&& value) { m_collectionIdHasBeenSet = true; m_collectionId = std::move(value); }
/**
* <p>The ID of the collection to describe.</p>
*/
inline void SetCollectionId(const char* value) { m_collectionIdHasBeenSet = true; m_collectionId.assign(value); }
/**
* <p>The ID of the collection to describe.</p>
*/
inline DescribeCollectionRequest& WithCollectionId(const Aws::String& value) { SetCollectionId(value); return *this;}
/**
* <p>The ID of the collection to describe.</p>
*/
inline DescribeCollectionRequest& WithCollectionId(Aws::String&& value) { SetCollectionId(std::move(value)); return *this;}
/**
* <p>The ID of the collection to describe.</p>
*/
inline DescribeCollectionRequest& WithCollectionId(const char* value) { SetCollectionId(value); return *this;}
private:
Aws::String m_collectionId;
bool m_collectionIdHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,189 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/core/utils/DateTime.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
class AWS_REKOGNITION_API DescribeCollectionResult
{
public:
DescribeCollectionResult();
DescribeCollectionResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
DescribeCollectionResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>The number of faces that are indexed into the collection. To index faces into
* a collection, use <a>IndexFaces</a>.</p>
*/
inline long long GetFaceCount() const{ return m_faceCount; }
/**
* <p>The number of faces that are indexed into the collection. To index faces into
* a collection, use <a>IndexFaces</a>.</p>
*/
inline void SetFaceCount(long long value) { m_faceCount = value; }
/**
* <p>The number of faces that are indexed into the collection. To index faces into
* a collection, use <a>IndexFaces</a>.</p>
*/
inline DescribeCollectionResult& WithFaceCount(long long value) { SetFaceCount(value); return *this;}
/**
* <p>The version of the face model that's used by the collection for face
* detection.</p> <p>For more information, see Model Versioning in the Amazon
* Rekognition Developer Guide.</p>
*/
inline const Aws::String& GetFaceModelVersion() const{ return m_faceModelVersion; }
/**
* <p>The version of the face model that's used by the collection for face
* detection.</p> <p>For more information, see Model Versioning in the Amazon
* Rekognition Developer Guide.</p>
*/
inline void SetFaceModelVersion(const Aws::String& value) { m_faceModelVersion = value; }
/**
* <p>The version of the face model that's used by the collection for face
* detection.</p> <p>For more information, see Model Versioning in the Amazon
* Rekognition Developer Guide.</p>
*/
inline void SetFaceModelVersion(Aws::String&& value) { m_faceModelVersion = std::move(value); }
/**
* <p>The version of the face model that's used by the collection for face
* detection.</p> <p>For more information, see Model Versioning in the Amazon
* Rekognition Developer Guide.</p>
*/
inline void SetFaceModelVersion(const char* value) { m_faceModelVersion.assign(value); }
/**
* <p>The version of the face model that's used by the collection for face
* detection.</p> <p>For more information, see Model Versioning in the Amazon
* Rekognition Developer Guide.</p>
*/
inline DescribeCollectionResult& WithFaceModelVersion(const Aws::String& value) { SetFaceModelVersion(value); return *this;}
/**
* <p>The version of the face model that's used by the collection for face
* detection.</p> <p>For more information, see Model Versioning in the Amazon
* Rekognition Developer Guide.</p>
*/
inline DescribeCollectionResult& WithFaceModelVersion(Aws::String&& value) { SetFaceModelVersion(std::move(value)); return *this;}
/**
* <p>The version of the face model that's used by the collection for face
* detection.</p> <p>For more information, see Model Versioning in the Amazon
* Rekognition Developer Guide.</p>
*/
inline DescribeCollectionResult& WithFaceModelVersion(const char* value) { SetFaceModelVersion(value); return *this;}
/**
* <p>The Amazon Resource Name (ARN) of the collection.</p>
*/
inline const Aws::String& GetCollectionARN() const{ return m_collectionARN; }
/**
* <p>The Amazon Resource Name (ARN) of the collection.</p>
*/
inline void SetCollectionARN(const Aws::String& value) { m_collectionARN = value; }
/**
* <p>The Amazon Resource Name (ARN) of the collection.</p>
*/
inline void SetCollectionARN(Aws::String&& value) { m_collectionARN = std::move(value); }
/**
* <p>The Amazon Resource Name (ARN) of the collection.</p>
*/
inline void SetCollectionARN(const char* value) { m_collectionARN.assign(value); }
/**
* <p>The Amazon Resource Name (ARN) of the collection.</p>
*/
inline DescribeCollectionResult& WithCollectionARN(const Aws::String& value) { SetCollectionARN(value); return *this;}
/**
* <p>The Amazon Resource Name (ARN) of the collection.</p>
*/
inline DescribeCollectionResult& WithCollectionARN(Aws::String&& value) { SetCollectionARN(std::move(value)); return *this;}
/**
* <p>The Amazon Resource Name (ARN) of the collection.</p>
*/
inline DescribeCollectionResult& WithCollectionARN(const char* value) { SetCollectionARN(value); return *this;}
/**
* <p>The number of milliseconds since the Unix epoch time until the creation of
* the collection. The Unix epoch time is 00:00:00 Coordinated Universal Time
* (UTC), Thursday, 1 January 1970.</p>
*/
inline const Aws::Utils::DateTime& GetCreationTimestamp() const{ return m_creationTimestamp; }
/**
* <p>The number of milliseconds since the Unix epoch time until the creation of
* the collection. The Unix epoch time is 00:00:00 Coordinated Universal Time
* (UTC), Thursday, 1 January 1970.</p>
*/
inline void SetCreationTimestamp(const Aws::Utils::DateTime& value) { m_creationTimestamp = value; }
/**
* <p>The number of milliseconds since the Unix epoch time until the creation of
* the collection. The Unix epoch time is 00:00:00 Coordinated Universal Time
* (UTC), Thursday, 1 January 1970.</p>
*/
inline void SetCreationTimestamp(Aws::Utils::DateTime&& value) { m_creationTimestamp = std::move(value); }
/**
* <p>The number of milliseconds since the Unix epoch time until the creation of
* the collection. The Unix epoch time is 00:00:00 Coordinated Universal Time
* (UTC), Thursday, 1 January 1970.</p>
*/
inline DescribeCollectionResult& WithCreationTimestamp(const Aws::Utils::DateTime& value) { SetCreationTimestamp(value); return *this;}
/**
* <p>The number of milliseconds since the Unix epoch time until the creation of
* the collection. The Unix epoch time is 00:00:00 Coordinated Universal Time
* (UTC), Thursday, 1 January 1970.</p>
*/
inline DescribeCollectionResult& WithCreationTimestamp(Aws::Utils::DateTime&& value) { SetCreationTimestamp(std::move(value)); return *this;}
private:
long long m_faceCount;
Aws::String m_faceModelVersion;
Aws::String m_collectionARN;
Aws::Utils::DateTime m_creationTimestamp;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,288 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/RekognitionRequest.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <utility>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
/**
*/
class AWS_REKOGNITION_API DescribeProjectVersionsRequest : public RekognitionRequest
{
public:
DescribeProjectVersionsRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "DescribeProjectVersions"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>The Amazon Resource Name (ARN) of the project that contains the models you
* want to describe.</p>
*/
inline const Aws::String& GetProjectArn() const{ return m_projectArn; }
/**
* <p>The Amazon Resource Name (ARN) of the project that contains the models you
* want to describe.</p>
*/
inline bool ProjectArnHasBeenSet() const { return m_projectArnHasBeenSet; }
/**
* <p>The Amazon Resource Name (ARN) of the project that contains the models you
* want to describe.</p>
*/
inline void SetProjectArn(const Aws::String& value) { m_projectArnHasBeenSet = true; m_projectArn = value; }
/**
* <p>The Amazon Resource Name (ARN) of the project that contains the models you
* want to describe.</p>
*/
inline void SetProjectArn(Aws::String&& value) { m_projectArnHasBeenSet = true; m_projectArn = std::move(value); }
/**
* <p>The Amazon Resource Name (ARN) of the project that contains the models you
* want to describe.</p>
*/
inline void SetProjectArn(const char* value) { m_projectArnHasBeenSet = true; m_projectArn.assign(value); }
/**
* <p>The Amazon Resource Name (ARN) of the project that contains the models you
* want to describe.</p>
*/
inline DescribeProjectVersionsRequest& WithProjectArn(const Aws::String& value) { SetProjectArn(value); return *this;}
/**
* <p>The Amazon Resource Name (ARN) of the project that contains the models you
* want to describe.</p>
*/
inline DescribeProjectVersionsRequest& WithProjectArn(Aws::String&& value) { SetProjectArn(std::move(value)); return *this;}
/**
* <p>The Amazon Resource Name (ARN) of the project that contains the models you
* want to describe.</p>
*/
inline DescribeProjectVersionsRequest& WithProjectArn(const char* value) { SetProjectArn(value); return *this;}
/**
* <p>A list of model version names that you want to describe. You can add up to 10
* model version names to the list. If you don't specify a value, all model
* descriptions are returned. A version name is part of a model (ProjectVersion)
* ARN. For example, <code>my-model.2020-01-21T09.10.15</code> is the version name
* in the following ARN.
* <code>arn:aws:rekognition:us-east-1:123456789012:project/getting-started/version/<i>my-model.2020-01-21T09.10.15</i>/1234567890123</code>.</p>
*/
inline const Aws::Vector<Aws::String>& GetVersionNames() const{ return m_versionNames; }
/**
* <p>A list of model version names that you want to describe. You can add up to 10
* model version names to the list. If you don't specify a value, all model
* descriptions are returned. A version name is part of a model (ProjectVersion)
* ARN. For example, <code>my-model.2020-01-21T09.10.15</code> is the version name
* in the following ARN.
* <code>arn:aws:rekognition:us-east-1:123456789012:project/getting-started/version/<i>my-model.2020-01-21T09.10.15</i>/1234567890123</code>.</p>
*/
inline bool VersionNamesHasBeenSet() const { return m_versionNamesHasBeenSet; }
/**
* <p>A list of model version names that you want to describe. You can add up to 10
* model version names to the list. If you don't specify a value, all model
* descriptions are returned. A version name is part of a model (ProjectVersion)
* ARN. For example, <code>my-model.2020-01-21T09.10.15</code> is the version name
* in the following ARN.
* <code>arn:aws:rekognition:us-east-1:123456789012:project/getting-started/version/<i>my-model.2020-01-21T09.10.15</i>/1234567890123</code>.</p>
*/
inline void SetVersionNames(const Aws::Vector<Aws::String>& value) { m_versionNamesHasBeenSet = true; m_versionNames = value; }
/**
* <p>A list of model version names that you want to describe. You can add up to 10
* model version names to the list. If you don't specify a value, all model
* descriptions are returned. A version name is part of a model (ProjectVersion)
* ARN. For example, <code>my-model.2020-01-21T09.10.15</code> is the version name
* in the following ARN.
* <code>arn:aws:rekognition:us-east-1:123456789012:project/getting-started/version/<i>my-model.2020-01-21T09.10.15</i>/1234567890123</code>.</p>
*/
inline void SetVersionNames(Aws::Vector<Aws::String>&& value) { m_versionNamesHasBeenSet = true; m_versionNames = std::move(value); }
/**
* <p>A list of model version names that you want to describe. You can add up to 10
* model version names to the list. If you don't specify a value, all model
* descriptions are returned. A version name is part of a model (ProjectVersion)
* ARN. For example, <code>my-model.2020-01-21T09.10.15</code> is the version name
* in the following ARN.
* <code>arn:aws:rekognition:us-east-1:123456789012:project/getting-started/version/<i>my-model.2020-01-21T09.10.15</i>/1234567890123</code>.</p>
*/
inline DescribeProjectVersionsRequest& WithVersionNames(const Aws::Vector<Aws::String>& value) { SetVersionNames(value); return *this;}
/**
* <p>A list of model version names that you want to describe. You can add up to 10
* model version names to the list. If you don't specify a value, all model
* descriptions are returned. A version name is part of a model (ProjectVersion)
* ARN. For example, <code>my-model.2020-01-21T09.10.15</code> is the version name
* in the following ARN.
* <code>arn:aws:rekognition:us-east-1:123456789012:project/getting-started/version/<i>my-model.2020-01-21T09.10.15</i>/1234567890123</code>.</p>
*/
inline DescribeProjectVersionsRequest& WithVersionNames(Aws::Vector<Aws::String>&& value) { SetVersionNames(std::move(value)); return *this;}
/**
* <p>A list of model version names that you want to describe. You can add up to 10
* model version names to the list. If you don't specify a value, all model
* descriptions are returned. A version name is part of a model (ProjectVersion)
* ARN. For example, <code>my-model.2020-01-21T09.10.15</code> is the version name
* in the following ARN.
* <code>arn:aws:rekognition:us-east-1:123456789012:project/getting-started/version/<i>my-model.2020-01-21T09.10.15</i>/1234567890123</code>.</p>
*/
inline DescribeProjectVersionsRequest& AddVersionNames(const Aws::String& value) { m_versionNamesHasBeenSet = true; m_versionNames.push_back(value); return *this; }
/**
* <p>A list of model version names that you want to describe. You can add up to 10
* model version names to the list. If you don't specify a value, all model
* descriptions are returned. A version name is part of a model (ProjectVersion)
* ARN. For example, <code>my-model.2020-01-21T09.10.15</code> is the version name
* in the following ARN.
* <code>arn:aws:rekognition:us-east-1:123456789012:project/getting-started/version/<i>my-model.2020-01-21T09.10.15</i>/1234567890123</code>.</p>
*/
inline DescribeProjectVersionsRequest& AddVersionNames(Aws::String&& value) { m_versionNamesHasBeenSet = true; m_versionNames.push_back(std::move(value)); return *this; }
/**
* <p>A list of model version names that you want to describe. You can add up to 10
* model version names to the list. If you don't specify a value, all model
* descriptions are returned. A version name is part of a model (ProjectVersion)
* ARN. For example, <code>my-model.2020-01-21T09.10.15</code> is the version name
* in the following ARN.
* <code>arn:aws:rekognition:us-east-1:123456789012:project/getting-started/version/<i>my-model.2020-01-21T09.10.15</i>/1234567890123</code>.</p>
*/
inline DescribeProjectVersionsRequest& AddVersionNames(const char* value) { m_versionNamesHasBeenSet = true; m_versionNames.push_back(value); return *this; }
/**
* <p>If the previous response was incomplete (because there is more results to
* retrieve), Amazon Rekognition Custom Labels returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of results.
* </p>
*/
inline const Aws::String& GetNextToken() const{ return m_nextToken; }
/**
* <p>If the previous response was incomplete (because there is more results to
* retrieve), Amazon Rekognition Custom Labels returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of results.
* </p>
*/
inline bool NextTokenHasBeenSet() const { return m_nextTokenHasBeenSet; }
/**
* <p>If the previous response was incomplete (because there is more results to
* retrieve), Amazon Rekognition Custom Labels returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of results.
* </p>
*/
inline void SetNextToken(const Aws::String& value) { m_nextTokenHasBeenSet = true; m_nextToken = value; }
/**
* <p>If the previous response was incomplete (because there is more results to
* retrieve), Amazon Rekognition Custom Labels returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of results.
* </p>
*/
inline void SetNextToken(Aws::String&& value) { m_nextTokenHasBeenSet = true; m_nextToken = std::move(value); }
/**
* <p>If the previous response was incomplete (because there is more results to
* retrieve), Amazon Rekognition Custom Labels returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of results.
* </p>
*/
inline void SetNextToken(const char* value) { m_nextTokenHasBeenSet = true; m_nextToken.assign(value); }
/**
* <p>If the previous response was incomplete (because there is more results to
* retrieve), Amazon Rekognition Custom Labels returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of results.
* </p>
*/
inline DescribeProjectVersionsRequest& WithNextToken(const Aws::String& value) { SetNextToken(value); return *this;}
/**
* <p>If the previous response was incomplete (because there is more results to
* retrieve), Amazon Rekognition Custom Labels returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of results.
* </p>
*/
inline DescribeProjectVersionsRequest& WithNextToken(Aws::String&& value) { SetNextToken(std::move(value)); return *this;}
/**
* <p>If the previous response was incomplete (because there is more results to
* retrieve), Amazon Rekognition Custom Labels returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of results.
* </p>
*/
inline DescribeProjectVersionsRequest& WithNextToken(const char* value) { SetNextToken(value); return *this;}
/**
* <p>The maximum number of results to return per paginated call. The largest value
* you can specify is 100. If you specify a value greater than 100, a
* ValidationException error occurs. The default value is 100. </p>
*/
inline int GetMaxResults() const{ return m_maxResults; }
/**
* <p>The maximum number of results to return per paginated call. The largest value
* you can specify is 100. If you specify a value greater than 100, a
* ValidationException error occurs. The default value is 100. </p>
*/
inline bool MaxResultsHasBeenSet() const { return m_maxResultsHasBeenSet; }
/**
* <p>The maximum number of results to return per paginated call. The largest value
* you can specify is 100. If you specify a value greater than 100, a
* ValidationException error occurs. The default value is 100. </p>
*/
inline void SetMaxResults(int value) { m_maxResultsHasBeenSet = true; m_maxResults = value; }
/**
* <p>The maximum number of results to return per paginated call. The largest value
* you can specify is 100. If you specify a value greater than 100, a
* ValidationException error occurs. The default value is 100. </p>
*/
inline DescribeProjectVersionsRequest& WithMaxResults(int value) { SetMaxResults(value); return *this;}
private:
Aws::String m_projectArn;
bool m_projectArnHasBeenSet;
Aws::Vector<Aws::String> m_versionNames;
bool m_versionNamesHasBeenSet;
Aws::String m_nextToken;
bool m_nextTokenHasBeenSet;
int m_maxResults;
bool m_maxResultsHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,145 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/rekognition/model/ProjectVersionDescription.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
class AWS_REKOGNITION_API DescribeProjectVersionsResult
{
public:
DescribeProjectVersionsResult();
DescribeProjectVersionsResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
DescribeProjectVersionsResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>A list of model descriptions. The list is sorted by the creation date and
* time of the model versions, latest to earliest.</p>
*/
inline const Aws::Vector<ProjectVersionDescription>& GetProjectVersionDescriptions() const{ return m_projectVersionDescriptions; }
/**
* <p>A list of model descriptions. The list is sorted by the creation date and
* time of the model versions, latest to earliest.</p>
*/
inline void SetProjectVersionDescriptions(const Aws::Vector<ProjectVersionDescription>& value) { m_projectVersionDescriptions = value; }
/**
* <p>A list of model descriptions. The list is sorted by the creation date and
* time of the model versions, latest to earliest.</p>
*/
inline void SetProjectVersionDescriptions(Aws::Vector<ProjectVersionDescription>&& value) { m_projectVersionDescriptions = std::move(value); }
/**
* <p>A list of model descriptions. The list is sorted by the creation date and
* time of the model versions, latest to earliest.</p>
*/
inline DescribeProjectVersionsResult& WithProjectVersionDescriptions(const Aws::Vector<ProjectVersionDescription>& value) { SetProjectVersionDescriptions(value); return *this;}
/**
* <p>A list of model descriptions. The list is sorted by the creation date and
* time of the model versions, latest to earliest.</p>
*/
inline DescribeProjectVersionsResult& WithProjectVersionDescriptions(Aws::Vector<ProjectVersionDescription>&& value) { SetProjectVersionDescriptions(std::move(value)); return *this;}
/**
* <p>A list of model descriptions. The list is sorted by the creation date and
* time of the model versions, latest to earliest.</p>
*/
inline DescribeProjectVersionsResult& AddProjectVersionDescriptions(const ProjectVersionDescription& value) { m_projectVersionDescriptions.push_back(value); return *this; }
/**
* <p>A list of model descriptions. The list is sorted by the creation date and
* time of the model versions, latest to earliest.</p>
*/
inline DescribeProjectVersionsResult& AddProjectVersionDescriptions(ProjectVersionDescription&& value) { m_projectVersionDescriptions.push_back(std::move(value)); return *this; }
/**
* <p>If the previous response was incomplete (because there is more results to
* retrieve), Amazon Rekognition Custom Labels returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of results.
* </p>
*/
inline const Aws::String& GetNextToken() const{ return m_nextToken; }
/**
* <p>If the previous response was incomplete (because there is more results to
* retrieve), Amazon Rekognition Custom Labels returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of results.
* </p>
*/
inline void SetNextToken(const Aws::String& value) { m_nextToken = value; }
/**
* <p>If the previous response was incomplete (because there is more results to
* retrieve), Amazon Rekognition Custom Labels returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of results.
* </p>
*/
inline void SetNextToken(Aws::String&& value) { m_nextToken = std::move(value); }
/**
* <p>If the previous response was incomplete (because there is more results to
* retrieve), Amazon Rekognition Custom Labels returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of results.
* </p>
*/
inline void SetNextToken(const char* value) { m_nextToken.assign(value); }
/**
* <p>If the previous response was incomplete (because there is more results to
* retrieve), Amazon Rekognition Custom Labels returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of results.
* </p>
*/
inline DescribeProjectVersionsResult& WithNextToken(const Aws::String& value) { SetNextToken(value); return *this;}
/**
* <p>If the previous response was incomplete (because there is more results to
* retrieve), Amazon Rekognition Custom Labels returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of results.
* </p>
*/
inline DescribeProjectVersionsResult& WithNextToken(Aws::String&& value) { SetNextToken(std::move(value)); return *this;}
/**
* <p>If the previous response was incomplete (because there is more results to
* retrieve), Amazon Rekognition Custom Labels returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of results.
* </p>
*/
inline DescribeProjectVersionsResult& WithNextToken(const char* value) { SetNextToken(value); return *this;}
private:
Aws::Vector<ProjectVersionDescription> m_projectVersionDescriptions;
Aws::String m_nextToken;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,141 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/RekognitionRequest.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <utility>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
/**
*/
class AWS_REKOGNITION_API DescribeProjectsRequest : public RekognitionRequest
{
public:
DescribeProjectsRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "DescribeProjects"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>If the previous response was incomplete (because there is more results to
* retrieve), Amazon Rekognition Custom Labels returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of results.
* </p>
*/
inline const Aws::String& GetNextToken() const{ return m_nextToken; }
/**
* <p>If the previous response was incomplete (because there is more results to
* retrieve), Amazon Rekognition Custom Labels returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of results.
* </p>
*/
inline bool NextTokenHasBeenSet() const { return m_nextTokenHasBeenSet; }
/**
* <p>If the previous response was incomplete (because there is more results to
* retrieve), Amazon Rekognition Custom Labels returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of results.
* </p>
*/
inline void SetNextToken(const Aws::String& value) { m_nextTokenHasBeenSet = true; m_nextToken = value; }
/**
* <p>If the previous response was incomplete (because there is more results to
* retrieve), Amazon Rekognition Custom Labels returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of results.
* </p>
*/
inline void SetNextToken(Aws::String&& value) { m_nextTokenHasBeenSet = true; m_nextToken = std::move(value); }
/**
* <p>If the previous response was incomplete (because there is more results to
* retrieve), Amazon Rekognition Custom Labels returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of results.
* </p>
*/
inline void SetNextToken(const char* value) { m_nextTokenHasBeenSet = true; m_nextToken.assign(value); }
/**
* <p>If the previous response was incomplete (because there is more results to
* retrieve), Amazon Rekognition Custom Labels returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of results.
* </p>
*/
inline DescribeProjectsRequest& WithNextToken(const Aws::String& value) { SetNextToken(value); return *this;}
/**
* <p>If the previous response was incomplete (because there is more results to
* retrieve), Amazon Rekognition Custom Labels returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of results.
* </p>
*/
inline DescribeProjectsRequest& WithNextToken(Aws::String&& value) { SetNextToken(std::move(value)); return *this;}
/**
* <p>If the previous response was incomplete (because there is more results to
* retrieve), Amazon Rekognition Custom Labels returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of results.
* </p>
*/
inline DescribeProjectsRequest& WithNextToken(const char* value) { SetNextToken(value); return *this;}
/**
* <p>The maximum number of results to return per paginated call. The largest value
* you can specify is 100. If you specify a value greater than 100, a
* ValidationException error occurs. The default value is 100. </p>
*/
inline int GetMaxResults() const{ return m_maxResults; }
/**
* <p>The maximum number of results to return per paginated call. The largest value
* you can specify is 100. If you specify a value greater than 100, a
* ValidationException error occurs. The default value is 100. </p>
*/
inline bool MaxResultsHasBeenSet() const { return m_maxResultsHasBeenSet; }
/**
* <p>The maximum number of results to return per paginated call. The largest value
* you can specify is 100. If you specify a value greater than 100, a
* ValidationException error occurs. The default value is 100. </p>
*/
inline void SetMaxResults(int value) { m_maxResultsHasBeenSet = true; m_maxResults = value; }
/**
* <p>The maximum number of results to return per paginated call. The largest value
* you can specify is 100. If you specify a value greater than 100, a
* ValidationException error occurs. The default value is 100. </p>
*/
inline DescribeProjectsRequest& WithMaxResults(int value) { SetMaxResults(value); return *this;}
private:
Aws::String m_nextToken;
bool m_nextTokenHasBeenSet;
int m_maxResults;
bool m_maxResultsHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,145 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/rekognition/model/ProjectDescription.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
class AWS_REKOGNITION_API DescribeProjectsResult
{
public:
DescribeProjectsResult();
DescribeProjectsResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
DescribeProjectsResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>A list of project descriptions. The list is sorted by the date and time the
* projects are created.</p>
*/
inline const Aws::Vector<ProjectDescription>& GetProjectDescriptions() const{ return m_projectDescriptions; }
/**
* <p>A list of project descriptions. The list is sorted by the date and time the
* projects are created.</p>
*/
inline void SetProjectDescriptions(const Aws::Vector<ProjectDescription>& value) { m_projectDescriptions = value; }
/**
* <p>A list of project descriptions. The list is sorted by the date and time the
* projects are created.</p>
*/
inline void SetProjectDescriptions(Aws::Vector<ProjectDescription>&& value) { m_projectDescriptions = std::move(value); }
/**
* <p>A list of project descriptions. The list is sorted by the date and time the
* projects are created.</p>
*/
inline DescribeProjectsResult& WithProjectDescriptions(const Aws::Vector<ProjectDescription>& value) { SetProjectDescriptions(value); return *this;}
/**
* <p>A list of project descriptions. The list is sorted by the date and time the
* projects are created.</p>
*/
inline DescribeProjectsResult& WithProjectDescriptions(Aws::Vector<ProjectDescription>&& value) { SetProjectDescriptions(std::move(value)); return *this;}
/**
* <p>A list of project descriptions. The list is sorted by the date and time the
* projects are created.</p>
*/
inline DescribeProjectsResult& AddProjectDescriptions(const ProjectDescription& value) { m_projectDescriptions.push_back(value); return *this; }
/**
* <p>A list of project descriptions. The list is sorted by the date and time the
* projects are created.</p>
*/
inline DescribeProjectsResult& AddProjectDescriptions(ProjectDescription&& value) { m_projectDescriptions.push_back(std::move(value)); return *this; }
/**
* <p>If the previous response was incomplete (because there is more results to
* retrieve), Amazon Rekognition Custom Labels returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of results.
* </p>
*/
inline const Aws::String& GetNextToken() const{ return m_nextToken; }
/**
* <p>If the previous response was incomplete (because there is more results to
* retrieve), Amazon Rekognition Custom Labels returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of results.
* </p>
*/
inline void SetNextToken(const Aws::String& value) { m_nextToken = value; }
/**
* <p>If the previous response was incomplete (because there is more results to
* retrieve), Amazon Rekognition Custom Labels returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of results.
* </p>
*/
inline void SetNextToken(Aws::String&& value) { m_nextToken = std::move(value); }
/**
* <p>If the previous response was incomplete (because there is more results to
* retrieve), Amazon Rekognition Custom Labels returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of results.
* </p>
*/
inline void SetNextToken(const char* value) { m_nextToken.assign(value); }
/**
* <p>If the previous response was incomplete (because there is more results to
* retrieve), Amazon Rekognition Custom Labels returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of results.
* </p>
*/
inline DescribeProjectsResult& WithNextToken(const Aws::String& value) { SetNextToken(value); return *this;}
/**
* <p>If the previous response was incomplete (because there is more results to
* retrieve), Amazon Rekognition Custom Labels returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of results.
* </p>
*/
inline DescribeProjectsResult& WithNextToken(Aws::String&& value) { SetNextToken(std::move(value)); return *this;}
/**
* <p>If the previous response was incomplete (because there is more results to
* retrieve), Amazon Rekognition Custom Labels returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of results.
* </p>
*/
inline DescribeProjectsResult& WithNextToken(const char* value) { SetNextToken(value); return *this;}
private:
Aws::Vector<ProjectDescription> m_projectDescriptions;
Aws::String m_nextToken;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,85 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/RekognitionRequest.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <utility>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
/**
*/
class AWS_REKOGNITION_API DescribeStreamProcessorRequest : public RekognitionRequest
{
public:
DescribeStreamProcessorRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "DescribeStreamProcessor"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>Name of the stream processor for which you want information.</p>
*/
inline const Aws::String& GetName() const{ return m_name; }
/**
* <p>Name of the stream processor for which you want information.</p>
*/
inline bool NameHasBeenSet() const { return m_nameHasBeenSet; }
/**
* <p>Name of the stream processor for which you want information.</p>
*/
inline void SetName(const Aws::String& value) { m_nameHasBeenSet = true; m_name = value; }
/**
* <p>Name of the stream processor for which you want information.</p>
*/
inline void SetName(Aws::String&& value) { m_nameHasBeenSet = true; m_name = std::move(value); }
/**
* <p>Name of the stream processor for which you want information.</p>
*/
inline void SetName(const char* value) { m_nameHasBeenSet = true; m_name.assign(value); }
/**
* <p>Name of the stream processor for which you want information.</p>
*/
inline DescribeStreamProcessorRequest& WithName(const Aws::String& value) { SetName(value); return *this;}
/**
* <p>Name of the stream processor for which you want information.</p>
*/
inline DescribeStreamProcessorRequest& WithName(Aws::String&& value) { SetName(std::move(value)); return *this;}
/**
* <p>Name of the stream processor for which you want information.</p>
*/
inline DescribeStreamProcessorRequest& WithName(const char* value) { SetName(value); return *this;}
private:
Aws::String m_name;
bool m_nameHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,389 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/rekognition/model/StreamProcessorStatus.h>
#include <aws/core/utils/DateTime.h>
#include <aws/rekognition/model/StreamProcessorInput.h>
#include <aws/rekognition/model/StreamProcessorOutput.h>
#include <aws/rekognition/model/StreamProcessorSettings.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
class AWS_REKOGNITION_API DescribeStreamProcessorResult
{
public:
DescribeStreamProcessorResult();
DescribeStreamProcessorResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
DescribeStreamProcessorResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>Name of the stream processor. </p>
*/
inline const Aws::String& GetName() const{ return m_name; }
/**
* <p>Name of the stream processor. </p>
*/
inline void SetName(const Aws::String& value) { m_name = value; }
/**
* <p>Name of the stream processor. </p>
*/
inline void SetName(Aws::String&& value) { m_name = std::move(value); }
/**
* <p>Name of the stream processor. </p>
*/
inline void SetName(const char* value) { m_name.assign(value); }
/**
* <p>Name of the stream processor. </p>
*/
inline DescribeStreamProcessorResult& WithName(const Aws::String& value) { SetName(value); return *this;}
/**
* <p>Name of the stream processor. </p>
*/
inline DescribeStreamProcessorResult& WithName(Aws::String&& value) { SetName(std::move(value)); return *this;}
/**
* <p>Name of the stream processor. </p>
*/
inline DescribeStreamProcessorResult& WithName(const char* value) { SetName(value); return *this;}
/**
* <p>ARN of the stream processor.</p>
*/
inline const Aws::String& GetStreamProcessorArn() const{ return m_streamProcessorArn; }
/**
* <p>ARN of the stream processor.</p>
*/
inline void SetStreamProcessorArn(const Aws::String& value) { m_streamProcessorArn = value; }
/**
* <p>ARN of the stream processor.</p>
*/
inline void SetStreamProcessorArn(Aws::String&& value) { m_streamProcessorArn = std::move(value); }
/**
* <p>ARN of the stream processor.</p>
*/
inline void SetStreamProcessorArn(const char* value) { m_streamProcessorArn.assign(value); }
/**
* <p>ARN of the stream processor.</p>
*/
inline DescribeStreamProcessorResult& WithStreamProcessorArn(const Aws::String& value) { SetStreamProcessorArn(value); return *this;}
/**
* <p>ARN of the stream processor.</p>
*/
inline DescribeStreamProcessorResult& WithStreamProcessorArn(Aws::String&& value) { SetStreamProcessorArn(std::move(value)); return *this;}
/**
* <p>ARN of the stream processor.</p>
*/
inline DescribeStreamProcessorResult& WithStreamProcessorArn(const char* value) { SetStreamProcessorArn(value); return *this;}
/**
* <p>Current status of the stream processor.</p>
*/
inline const StreamProcessorStatus& GetStatus() const{ return m_status; }
/**
* <p>Current status of the stream processor.</p>
*/
inline void SetStatus(const StreamProcessorStatus& value) { m_status = value; }
/**
* <p>Current status of the stream processor.</p>
*/
inline void SetStatus(StreamProcessorStatus&& value) { m_status = std::move(value); }
/**
* <p>Current status of the stream processor.</p>
*/
inline DescribeStreamProcessorResult& WithStatus(const StreamProcessorStatus& value) { SetStatus(value); return *this;}
/**
* <p>Current status of the stream processor.</p>
*/
inline DescribeStreamProcessorResult& WithStatus(StreamProcessorStatus&& value) { SetStatus(std::move(value)); return *this;}
/**
* <p>Detailed status message about the stream processor.</p>
*/
inline const Aws::String& GetStatusMessage() const{ return m_statusMessage; }
/**
* <p>Detailed status message about the stream processor.</p>
*/
inline void SetStatusMessage(const Aws::String& value) { m_statusMessage = value; }
/**
* <p>Detailed status message about the stream processor.</p>
*/
inline void SetStatusMessage(Aws::String&& value) { m_statusMessage = std::move(value); }
/**
* <p>Detailed status message about the stream processor.</p>
*/
inline void SetStatusMessage(const char* value) { m_statusMessage.assign(value); }
/**
* <p>Detailed status message about the stream processor.</p>
*/
inline DescribeStreamProcessorResult& WithStatusMessage(const Aws::String& value) { SetStatusMessage(value); return *this;}
/**
* <p>Detailed status message about the stream processor.</p>
*/
inline DescribeStreamProcessorResult& WithStatusMessage(Aws::String&& value) { SetStatusMessage(std::move(value)); return *this;}
/**
* <p>Detailed status message about the stream processor.</p>
*/
inline DescribeStreamProcessorResult& WithStatusMessage(const char* value) { SetStatusMessage(value); return *this;}
/**
* <p>Date and time the stream processor was created</p>
*/
inline const Aws::Utils::DateTime& GetCreationTimestamp() const{ return m_creationTimestamp; }
/**
* <p>Date and time the stream processor was created</p>
*/
inline void SetCreationTimestamp(const Aws::Utils::DateTime& value) { m_creationTimestamp = value; }
/**
* <p>Date and time the stream processor was created</p>
*/
inline void SetCreationTimestamp(Aws::Utils::DateTime&& value) { m_creationTimestamp = std::move(value); }
/**
* <p>Date and time the stream processor was created</p>
*/
inline DescribeStreamProcessorResult& WithCreationTimestamp(const Aws::Utils::DateTime& value) { SetCreationTimestamp(value); return *this;}
/**
* <p>Date and time the stream processor was created</p>
*/
inline DescribeStreamProcessorResult& WithCreationTimestamp(Aws::Utils::DateTime&& value) { SetCreationTimestamp(std::move(value)); return *this;}
/**
* <p>The time, in Unix format, the stream processor was last updated. For example,
* when the stream processor moves from a running state to a failed state, or when
* the user starts or stops the stream processor.</p>
*/
inline const Aws::Utils::DateTime& GetLastUpdateTimestamp() const{ return m_lastUpdateTimestamp; }
/**
* <p>The time, in Unix format, the stream processor was last updated. For example,
* when the stream processor moves from a running state to a failed state, or when
* the user starts or stops the stream processor.</p>
*/
inline void SetLastUpdateTimestamp(const Aws::Utils::DateTime& value) { m_lastUpdateTimestamp = value; }
/**
* <p>The time, in Unix format, the stream processor was last updated. For example,
* when the stream processor moves from a running state to a failed state, or when
* the user starts or stops the stream processor.</p>
*/
inline void SetLastUpdateTimestamp(Aws::Utils::DateTime&& value) { m_lastUpdateTimestamp = std::move(value); }
/**
* <p>The time, in Unix format, the stream processor was last updated. For example,
* when the stream processor moves from a running state to a failed state, or when
* the user starts or stops the stream processor.</p>
*/
inline DescribeStreamProcessorResult& WithLastUpdateTimestamp(const Aws::Utils::DateTime& value) { SetLastUpdateTimestamp(value); return *this;}
/**
* <p>The time, in Unix format, the stream processor was last updated. For example,
* when the stream processor moves from a running state to a failed state, or when
* the user starts or stops the stream processor.</p>
*/
inline DescribeStreamProcessorResult& WithLastUpdateTimestamp(Aws::Utils::DateTime&& value) { SetLastUpdateTimestamp(std::move(value)); return *this;}
/**
* <p>Kinesis video stream that provides the source streaming video.</p>
*/
inline const StreamProcessorInput& GetInput() const{ return m_input; }
/**
* <p>Kinesis video stream that provides the source streaming video.</p>
*/
inline void SetInput(const StreamProcessorInput& value) { m_input = value; }
/**
* <p>Kinesis video stream that provides the source streaming video.</p>
*/
inline void SetInput(StreamProcessorInput&& value) { m_input = std::move(value); }
/**
* <p>Kinesis video stream that provides the source streaming video.</p>
*/
inline DescribeStreamProcessorResult& WithInput(const StreamProcessorInput& value) { SetInput(value); return *this;}
/**
* <p>Kinesis video stream that provides the source streaming video.</p>
*/
inline DescribeStreamProcessorResult& WithInput(StreamProcessorInput&& value) { SetInput(std::move(value)); return *this;}
/**
* <p>Kinesis data stream to which Amazon Rekognition Video puts the analysis
* results.</p>
*/
inline const StreamProcessorOutput& GetOutput() const{ return m_output; }
/**
* <p>Kinesis data stream to which Amazon Rekognition Video puts the analysis
* results.</p>
*/
inline void SetOutput(const StreamProcessorOutput& value) { m_output = value; }
/**
* <p>Kinesis data stream to which Amazon Rekognition Video puts the analysis
* results.</p>
*/
inline void SetOutput(StreamProcessorOutput&& value) { m_output = std::move(value); }
/**
* <p>Kinesis data stream to which Amazon Rekognition Video puts the analysis
* results.</p>
*/
inline DescribeStreamProcessorResult& WithOutput(const StreamProcessorOutput& value) { SetOutput(value); return *this;}
/**
* <p>Kinesis data stream to which Amazon Rekognition Video puts the analysis
* results.</p>
*/
inline DescribeStreamProcessorResult& WithOutput(StreamProcessorOutput&& value) { SetOutput(std::move(value)); return *this;}
/**
* <p>ARN of the IAM role that allows access to the stream processor.</p>
*/
inline const Aws::String& GetRoleArn() const{ return m_roleArn; }
/**
* <p>ARN of the IAM role that allows access to the stream processor.</p>
*/
inline void SetRoleArn(const Aws::String& value) { m_roleArn = value; }
/**
* <p>ARN of the IAM role that allows access to the stream processor.</p>
*/
inline void SetRoleArn(Aws::String&& value) { m_roleArn = std::move(value); }
/**
* <p>ARN of the IAM role that allows access to the stream processor.</p>
*/
inline void SetRoleArn(const char* value) { m_roleArn.assign(value); }
/**
* <p>ARN of the IAM role that allows access to the stream processor.</p>
*/
inline DescribeStreamProcessorResult& WithRoleArn(const Aws::String& value) { SetRoleArn(value); return *this;}
/**
* <p>ARN of the IAM role that allows access to the stream processor.</p>
*/
inline DescribeStreamProcessorResult& WithRoleArn(Aws::String&& value) { SetRoleArn(std::move(value)); return *this;}
/**
* <p>ARN of the IAM role that allows access to the stream processor.</p>
*/
inline DescribeStreamProcessorResult& WithRoleArn(const char* value) { SetRoleArn(value); return *this;}
/**
* <p>Face recognition input parameters that are being used by the stream
* processor. Includes the collection to use for face recognition and the face
* attributes to detect.</p>
*/
inline const StreamProcessorSettings& GetSettings() const{ return m_settings; }
/**
* <p>Face recognition input parameters that are being used by the stream
* processor. Includes the collection to use for face recognition and the face
* attributes to detect.</p>
*/
inline void SetSettings(const StreamProcessorSettings& value) { m_settings = value; }
/**
* <p>Face recognition input parameters that are being used by the stream
* processor. Includes the collection to use for face recognition and the face
* attributes to detect.</p>
*/
inline void SetSettings(StreamProcessorSettings&& value) { m_settings = std::move(value); }
/**
* <p>Face recognition input parameters that are being used by the stream
* processor. Includes the collection to use for face recognition and the face
* attributes to detect.</p>
*/
inline DescribeStreamProcessorResult& WithSettings(const StreamProcessorSettings& value) { SetSettings(value); return *this;}
/**
* <p>Face recognition input parameters that are being used by the stream
* processor. Includes the collection to use for face recognition and the face
* attributes to detect.</p>
*/
inline DescribeStreamProcessorResult& WithSettings(StreamProcessorSettings&& value) { SetSettings(std::move(value)); return *this;}
private:
Aws::String m_name;
Aws::String m_streamProcessorArn;
StreamProcessorStatus m_status;
Aws::String m_statusMessage;
Aws::Utils::DateTime m_creationTimestamp;
Aws::Utils::DateTime m_lastUpdateTimestamp;
StreamProcessorInput m_input;
StreamProcessorOutput m_output;
Aws::String m_roleArn;
StreamProcessorSettings m_settings;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,176 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/RekognitionRequest.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/rekognition/model/Image.h>
#include <utility>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
/**
*/
class AWS_REKOGNITION_API DetectCustomLabelsRequest : public RekognitionRequest
{
public:
DetectCustomLabelsRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "DetectCustomLabels"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>The ARN of the model version that you want to use.</p>
*/
inline const Aws::String& GetProjectVersionArn() const{ return m_projectVersionArn; }
/**
* <p>The ARN of the model version that you want to use.</p>
*/
inline bool ProjectVersionArnHasBeenSet() const { return m_projectVersionArnHasBeenSet; }
/**
* <p>The ARN of the model version that you want to use.</p>
*/
inline void SetProjectVersionArn(const Aws::String& value) { m_projectVersionArnHasBeenSet = true; m_projectVersionArn = value; }
/**
* <p>The ARN of the model version that you want to use.</p>
*/
inline void SetProjectVersionArn(Aws::String&& value) { m_projectVersionArnHasBeenSet = true; m_projectVersionArn = std::move(value); }
/**
* <p>The ARN of the model version that you want to use.</p>
*/
inline void SetProjectVersionArn(const char* value) { m_projectVersionArnHasBeenSet = true; m_projectVersionArn.assign(value); }
/**
* <p>The ARN of the model version that you want to use.</p>
*/
inline DetectCustomLabelsRequest& WithProjectVersionArn(const Aws::String& value) { SetProjectVersionArn(value); return *this;}
/**
* <p>The ARN of the model version that you want to use.</p>
*/
inline DetectCustomLabelsRequest& WithProjectVersionArn(Aws::String&& value) { SetProjectVersionArn(std::move(value)); return *this;}
/**
* <p>The ARN of the model version that you want to use.</p>
*/
inline DetectCustomLabelsRequest& WithProjectVersionArn(const char* value) { SetProjectVersionArn(value); return *this;}
inline const Image& GetImage() const{ return m_image; }
inline bool ImageHasBeenSet() const { return m_imageHasBeenSet; }
inline void SetImage(const Image& value) { m_imageHasBeenSet = true; m_image = value; }
inline void SetImage(Image&& value) { m_imageHasBeenSet = true; m_image = std::move(value); }
inline DetectCustomLabelsRequest& WithImage(const Image& value) { SetImage(value); return *this;}
inline DetectCustomLabelsRequest& WithImage(Image&& value) { SetImage(std::move(value)); return *this;}
/**
* <p>Maximum number of results you want the service to return in the response. The
* service returns the specified number of highest confidence labels ranked from
* highest confidence to lowest.</p>
*/
inline int GetMaxResults() const{ return m_maxResults; }
/**
* <p>Maximum number of results you want the service to return in the response. The
* service returns the specified number of highest confidence labels ranked from
* highest confidence to lowest.</p>
*/
inline bool MaxResultsHasBeenSet() const { return m_maxResultsHasBeenSet; }
/**
* <p>Maximum number of results you want the service to return in the response. The
* service returns the specified number of highest confidence labels ranked from
* highest confidence to lowest.</p>
*/
inline void SetMaxResults(int value) { m_maxResultsHasBeenSet = true; m_maxResults = value; }
/**
* <p>Maximum number of results you want the service to return in the response. The
* service returns the specified number of highest confidence labels ranked from
* highest confidence to lowest.</p>
*/
inline DetectCustomLabelsRequest& WithMaxResults(int value) { SetMaxResults(value); return *this;}
/**
* <p>Specifies the minimum confidence level for the labels to return. Amazon
* Rekognition doesn't return any labels with a confidence lower than this
* specified value. If you specify a value of 0, all labels are return, regardless
* of the default thresholds that the model version applies.</p>
*/
inline double GetMinConfidence() const{ return m_minConfidence; }
/**
* <p>Specifies the minimum confidence level for the labels to return. Amazon
* Rekognition doesn't return any labels with a confidence lower than this
* specified value. If you specify a value of 0, all labels are return, regardless
* of the default thresholds that the model version applies.</p>
*/
inline bool MinConfidenceHasBeenSet() const { return m_minConfidenceHasBeenSet; }
/**
* <p>Specifies the minimum confidence level for the labels to return. Amazon
* Rekognition doesn't return any labels with a confidence lower than this
* specified value. If you specify a value of 0, all labels are return, regardless
* of the default thresholds that the model version applies.</p>
*/
inline void SetMinConfidence(double value) { m_minConfidenceHasBeenSet = true; m_minConfidence = value; }
/**
* <p>Specifies the minimum confidence level for the labels to return. Amazon
* Rekognition doesn't return any labels with a confidence lower than this
* specified value. If you specify a value of 0, all labels are return, regardless
* of the default thresholds that the model version applies.</p>
*/
inline DetectCustomLabelsRequest& WithMinConfidence(double value) { SetMinConfidence(value); return *this;}
private:
Aws::String m_projectVersionArn;
bool m_projectVersionArnHasBeenSet;
Image m_image;
bool m_imageHasBeenSet;
int m_maxResults;
bool m_maxResultsHasBeenSet;
double m_minConfidence;
bool m_minConfidenceHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,78 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/rekognition/model/CustomLabel.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
class AWS_REKOGNITION_API DetectCustomLabelsResult
{
public:
DetectCustomLabelsResult();
DetectCustomLabelsResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
DetectCustomLabelsResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>An array of custom labels detected in the input image.</p>
*/
inline const Aws::Vector<CustomLabel>& GetCustomLabels() const{ return m_customLabels; }
/**
* <p>An array of custom labels detected in the input image.</p>
*/
inline void SetCustomLabels(const Aws::Vector<CustomLabel>& value) { m_customLabels = value; }
/**
* <p>An array of custom labels detected in the input image.</p>
*/
inline void SetCustomLabels(Aws::Vector<CustomLabel>&& value) { m_customLabels = std::move(value); }
/**
* <p>An array of custom labels detected in the input image.</p>
*/
inline DetectCustomLabelsResult& WithCustomLabels(const Aws::Vector<CustomLabel>& value) { SetCustomLabels(value); return *this;}
/**
* <p>An array of custom labels detected in the input image.</p>
*/
inline DetectCustomLabelsResult& WithCustomLabels(Aws::Vector<CustomLabel>&& value) { SetCustomLabels(std::move(value)); return *this;}
/**
* <p>An array of custom labels detected in the input image.</p>
*/
inline DetectCustomLabelsResult& AddCustomLabels(const CustomLabel& value) { m_customLabels.push_back(value); return *this; }
/**
* <p>An array of custom labels detected in the input image.</p>
*/
inline DetectCustomLabelsResult& AddCustomLabels(CustomLabel&& value) { m_customLabels.push_back(std::move(value)); return *this; }
private:
Aws::Vector<CustomLabel> m_customLabels;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,223 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/RekognitionRequest.h>
#include <aws/rekognition/model/Image.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/rekognition/model/Attribute.h>
#include <utility>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
/**
*/
class AWS_REKOGNITION_API DetectFacesRequest : public RekognitionRequest
{
public:
DetectFacesRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "DetectFaces"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is
* not supported. </p> <p>If you are using an AWS SDK to call Amazon Rekognition,
* you might not need to base64-encode image bytes passed using the
* <code>Bytes</code> field. For more information, see Images in the Amazon
* Rekognition developer guide.</p>
*/
inline const Image& GetImage() const{ return m_image; }
/**
* <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is
* not supported. </p> <p>If you are using an AWS SDK to call Amazon Rekognition,
* you might not need to base64-encode image bytes passed using the
* <code>Bytes</code> field. For more information, see Images in the Amazon
* Rekognition developer guide.</p>
*/
inline bool ImageHasBeenSet() const { return m_imageHasBeenSet; }
/**
* <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is
* not supported. </p> <p>If you are using an AWS SDK to call Amazon Rekognition,
* you might not need to base64-encode image bytes passed using the
* <code>Bytes</code> field. For more information, see Images in the Amazon
* Rekognition developer guide.</p>
*/
inline void SetImage(const Image& value) { m_imageHasBeenSet = true; m_image = value; }
/**
* <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is
* not supported. </p> <p>If you are using an AWS SDK to call Amazon Rekognition,
* you might not need to base64-encode image bytes passed using the
* <code>Bytes</code> field. For more information, see Images in the Amazon
* Rekognition developer guide.</p>
*/
inline void SetImage(Image&& value) { m_imageHasBeenSet = true; m_image = std::move(value); }
/**
* <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is
* not supported. </p> <p>If you are using an AWS SDK to call Amazon Rekognition,
* you might not need to base64-encode image bytes passed using the
* <code>Bytes</code> field. For more information, see Images in the Amazon
* Rekognition developer guide.</p>
*/
inline DetectFacesRequest& WithImage(const Image& value) { SetImage(value); return *this;}
/**
* <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is
* not supported. </p> <p>If you are using an AWS SDK to call Amazon Rekognition,
* you might not need to base64-encode image bytes passed using the
* <code>Bytes</code> field. For more information, see Images in the Amazon
* Rekognition developer guide.</p>
*/
inline DetectFacesRequest& WithImage(Image&& value) { SetImage(std::move(value)); return *this;}
/**
* <p>An array of facial attributes you want to be returned. This can be the
* default list of attributes or all attributes. If you don't specify a value for
* <code>Attributes</code> or if you specify <code>["DEFAULT"]</code>, the API
* returns the following subset of facial attributes: <code>BoundingBox</code>,
* <code>Confidence</code>, <code>Pose</code>, <code>Quality</code>, and
* <code>Landmarks</code>. If you provide <code>["ALL"]</code>, all facial
* attributes are returned, but the operation takes longer to complete.</p> <p>If
* you provide both, <code>["ALL", "DEFAULT"]</code>, the service uses a logical
* AND operator to determine which attributes to return (in this case, all
* attributes). </p>
*/
inline const Aws::Vector<Attribute>& GetAttributes() const{ return m_attributes; }
/**
* <p>An array of facial attributes you want to be returned. This can be the
* default list of attributes or all attributes. If you don't specify a value for
* <code>Attributes</code> or if you specify <code>["DEFAULT"]</code>, the API
* returns the following subset of facial attributes: <code>BoundingBox</code>,
* <code>Confidence</code>, <code>Pose</code>, <code>Quality</code>, and
* <code>Landmarks</code>. If you provide <code>["ALL"]</code>, all facial
* attributes are returned, but the operation takes longer to complete.</p> <p>If
* you provide both, <code>["ALL", "DEFAULT"]</code>, the service uses a logical
* AND operator to determine which attributes to return (in this case, all
* attributes). </p>
*/
inline bool AttributesHasBeenSet() const { return m_attributesHasBeenSet; }
/**
* <p>An array of facial attributes you want to be returned. This can be the
* default list of attributes or all attributes. If you don't specify a value for
* <code>Attributes</code> or if you specify <code>["DEFAULT"]</code>, the API
* returns the following subset of facial attributes: <code>BoundingBox</code>,
* <code>Confidence</code>, <code>Pose</code>, <code>Quality</code>, and
* <code>Landmarks</code>. If you provide <code>["ALL"]</code>, all facial
* attributes are returned, but the operation takes longer to complete.</p> <p>If
* you provide both, <code>["ALL", "DEFAULT"]</code>, the service uses a logical
* AND operator to determine which attributes to return (in this case, all
* attributes). </p>
*/
inline void SetAttributes(const Aws::Vector<Attribute>& value) { m_attributesHasBeenSet = true; m_attributes = value; }
/**
* <p>An array of facial attributes you want to be returned. This can be the
* default list of attributes or all attributes. If you don't specify a value for
* <code>Attributes</code> or if you specify <code>["DEFAULT"]</code>, the API
* returns the following subset of facial attributes: <code>BoundingBox</code>,
* <code>Confidence</code>, <code>Pose</code>, <code>Quality</code>, and
* <code>Landmarks</code>. If you provide <code>["ALL"]</code>, all facial
* attributes are returned, but the operation takes longer to complete.</p> <p>If
* you provide both, <code>["ALL", "DEFAULT"]</code>, the service uses a logical
* AND operator to determine which attributes to return (in this case, all
* attributes). </p>
*/
inline void SetAttributes(Aws::Vector<Attribute>&& value) { m_attributesHasBeenSet = true; m_attributes = std::move(value); }
/**
* <p>An array of facial attributes you want to be returned. This can be the
* default list of attributes or all attributes. If you don't specify a value for
* <code>Attributes</code> or if you specify <code>["DEFAULT"]</code>, the API
* returns the following subset of facial attributes: <code>BoundingBox</code>,
* <code>Confidence</code>, <code>Pose</code>, <code>Quality</code>, and
* <code>Landmarks</code>. If you provide <code>["ALL"]</code>, all facial
* attributes are returned, but the operation takes longer to complete.</p> <p>If
* you provide both, <code>["ALL", "DEFAULT"]</code>, the service uses a logical
* AND operator to determine which attributes to return (in this case, all
* attributes). </p>
*/
inline DetectFacesRequest& WithAttributes(const Aws::Vector<Attribute>& value) { SetAttributes(value); return *this;}
/**
* <p>An array of facial attributes you want to be returned. This can be the
* default list of attributes or all attributes. If you don't specify a value for
* <code>Attributes</code> or if you specify <code>["DEFAULT"]</code>, the API
* returns the following subset of facial attributes: <code>BoundingBox</code>,
* <code>Confidence</code>, <code>Pose</code>, <code>Quality</code>, and
* <code>Landmarks</code>. If you provide <code>["ALL"]</code>, all facial
* attributes are returned, but the operation takes longer to complete.</p> <p>If
* you provide both, <code>["ALL", "DEFAULT"]</code>, the service uses a logical
* AND operator to determine which attributes to return (in this case, all
* attributes). </p>
*/
inline DetectFacesRequest& WithAttributes(Aws::Vector<Attribute>&& value) { SetAttributes(std::move(value)); return *this;}
/**
* <p>An array of facial attributes you want to be returned. This can be the
* default list of attributes or all attributes. If you don't specify a value for
* <code>Attributes</code> or if you specify <code>["DEFAULT"]</code>, the API
* returns the following subset of facial attributes: <code>BoundingBox</code>,
* <code>Confidence</code>, <code>Pose</code>, <code>Quality</code>, and
* <code>Landmarks</code>. If you provide <code>["ALL"]</code>, all facial
* attributes are returned, but the operation takes longer to complete.</p> <p>If
* you provide both, <code>["ALL", "DEFAULT"]</code>, the service uses a logical
* AND operator to determine which attributes to return (in this case, all
* attributes). </p>
*/
inline DetectFacesRequest& AddAttributes(const Attribute& value) { m_attributesHasBeenSet = true; m_attributes.push_back(value); return *this; }
/**
* <p>An array of facial attributes you want to be returned. This can be the
* default list of attributes or all attributes. If you don't specify a value for
* <code>Attributes</code> or if you specify <code>["DEFAULT"]</code>, the API
* returns the following subset of facial attributes: <code>BoundingBox</code>,
* <code>Confidence</code>, <code>Pose</code>, <code>Quality</code>, and
* <code>Landmarks</code>. If you provide <code>["ALL"]</code>, all facial
* attributes are returned, but the operation takes longer to complete.</p> <p>If
* you provide both, <code>["ALL", "DEFAULT"]</code>, the service uses a logical
* AND operator to determine which attributes to return (in this case, all
* attributes). </p>
*/
inline DetectFacesRequest& AddAttributes(Attribute&& value) { m_attributesHasBeenSet = true; m_attributes.push_back(std::move(value)); return *this; }
private:
Image m_image;
bool m_imageHasBeenSet;
Aws::Vector<Attribute> m_attributes;
bool m_attributesHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,157 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/rekognition/model/OrientationCorrection.h>
#include <aws/rekognition/model/FaceDetail.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
class AWS_REKOGNITION_API DetectFacesResult
{
public:
DetectFacesResult();
DetectFacesResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
DetectFacesResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>Details of each face found in the image. </p>
*/
inline const Aws::Vector<FaceDetail>& GetFaceDetails() const{ return m_faceDetails; }
/**
* <p>Details of each face found in the image. </p>
*/
inline void SetFaceDetails(const Aws::Vector<FaceDetail>& value) { m_faceDetails = value; }
/**
* <p>Details of each face found in the image. </p>
*/
inline void SetFaceDetails(Aws::Vector<FaceDetail>&& value) { m_faceDetails = std::move(value); }
/**
* <p>Details of each face found in the image. </p>
*/
inline DetectFacesResult& WithFaceDetails(const Aws::Vector<FaceDetail>& value) { SetFaceDetails(value); return *this;}
/**
* <p>Details of each face found in the image. </p>
*/
inline DetectFacesResult& WithFaceDetails(Aws::Vector<FaceDetail>&& value) { SetFaceDetails(std::move(value)); return *this;}
/**
* <p>Details of each face found in the image. </p>
*/
inline DetectFacesResult& AddFaceDetails(const FaceDetail& value) { m_faceDetails.push_back(value); return *this; }
/**
* <p>Details of each face found in the image. </p>
*/
inline DetectFacesResult& AddFaceDetails(FaceDetail&& value) { m_faceDetails.push_back(std::move(value)); return *this; }
/**
* <p>The value of <code>OrientationCorrection</code> is always null.</p> <p>If the
* input image is in .jpeg format, it might contain exchangeable image file format
* (Exif) metadata that includes the image's orientation. Amazon Rekognition uses
* this orientation information to perform image correction. The bounding box
* coordinates are translated to represent object locations after the orientation
* information in the Exif metadata is used to correct the image orientation.
* Images in .png format don't contain Exif metadata.</p> <p>Amazon Rekognition
* doesnt perform image correction for images in .png format and .jpeg images
* without orientation information in the image Exif metadata. The bounding box
* coordinates aren't translated and represent the object locations before the
* image is rotated. </p>
*/
inline const OrientationCorrection& GetOrientationCorrection() const{ return m_orientationCorrection; }
/**
* <p>The value of <code>OrientationCorrection</code> is always null.</p> <p>If the
* input image is in .jpeg format, it might contain exchangeable image file format
* (Exif) metadata that includes the image's orientation. Amazon Rekognition uses
* this orientation information to perform image correction. The bounding box
* coordinates are translated to represent object locations after the orientation
* information in the Exif metadata is used to correct the image orientation.
* Images in .png format don't contain Exif metadata.</p> <p>Amazon Rekognition
* doesnt perform image correction for images in .png format and .jpeg images
* without orientation information in the image Exif metadata. The bounding box
* coordinates aren't translated and represent the object locations before the
* image is rotated. </p>
*/
inline void SetOrientationCorrection(const OrientationCorrection& value) { m_orientationCorrection = value; }
/**
* <p>The value of <code>OrientationCorrection</code> is always null.</p> <p>If the
* input image is in .jpeg format, it might contain exchangeable image file format
* (Exif) metadata that includes the image's orientation. Amazon Rekognition uses
* this orientation information to perform image correction. The bounding box
* coordinates are translated to represent object locations after the orientation
* information in the Exif metadata is used to correct the image orientation.
* Images in .png format don't contain Exif metadata.</p> <p>Amazon Rekognition
* doesnt perform image correction for images in .png format and .jpeg images
* without orientation information in the image Exif metadata. The bounding box
* coordinates aren't translated and represent the object locations before the
* image is rotated. </p>
*/
inline void SetOrientationCorrection(OrientationCorrection&& value) { m_orientationCorrection = std::move(value); }
/**
* <p>The value of <code>OrientationCorrection</code> is always null.</p> <p>If the
* input image is in .jpeg format, it might contain exchangeable image file format
* (Exif) metadata that includes the image's orientation. Amazon Rekognition uses
* this orientation information to perform image correction. The bounding box
* coordinates are translated to represent object locations after the orientation
* information in the Exif metadata is used to correct the image orientation.
* Images in .png format don't contain Exif metadata.</p> <p>Amazon Rekognition
* doesnt perform image correction for images in .png format and .jpeg images
* without orientation information in the image Exif metadata. The bounding box
* coordinates aren't translated and represent the object locations before the
* image is rotated. </p>
*/
inline DetectFacesResult& WithOrientationCorrection(const OrientationCorrection& value) { SetOrientationCorrection(value); return *this;}
/**
* <p>The value of <code>OrientationCorrection</code> is always null.</p> <p>If the
* input image is in .jpeg format, it might contain exchangeable image file format
* (Exif) metadata that includes the image's orientation. Amazon Rekognition uses
* this orientation information to perform image correction. The bounding box
* coordinates are translated to represent object locations after the orientation
* information in the Exif metadata is used to correct the image orientation.
* Images in .png format don't contain Exif metadata.</p> <p>Amazon Rekognition
* doesnt perform image correction for images in .png format and .jpeg images
* without orientation information in the image Exif metadata. The bounding box
* coordinates aren't translated and represent the object locations before the
* image is rotated. </p>
*/
inline DetectFacesResult& WithOrientationCorrection(OrientationCorrection&& value) { SetOrientationCorrection(std::move(value)); return *this;}
private:
Aws::Vector<FaceDetail> m_faceDetails;
OrientationCorrection m_orientationCorrection;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,169 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/RekognitionRequest.h>
#include <aws/rekognition/model/Image.h>
#include <utility>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
/**
*/
class AWS_REKOGNITION_API DetectLabelsRequest : public RekognitionRequest
{
public:
DetectLabelsRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "DetectLabels"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing image bytes is not supported.
* Images stored in an S3 Bucket do not need to be base64-encoded.</p> <p>If you
* are using an AWS SDK to call Amazon Rekognition, you might not need to
* base64-encode image bytes passed using the <code>Bytes</code> field. For more
* information, see Images in the Amazon Rekognition developer guide.</p>
*/
inline const Image& GetImage() const{ return m_image; }
/**
* <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing image bytes is not supported.
* Images stored in an S3 Bucket do not need to be base64-encoded.</p> <p>If you
* are using an AWS SDK to call Amazon Rekognition, you might not need to
* base64-encode image bytes passed using the <code>Bytes</code> field. For more
* information, see Images in the Amazon Rekognition developer guide.</p>
*/
inline bool ImageHasBeenSet() const { return m_imageHasBeenSet; }
/**
* <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing image bytes is not supported.
* Images stored in an S3 Bucket do not need to be base64-encoded.</p> <p>If you
* are using an AWS SDK to call Amazon Rekognition, you might not need to
* base64-encode image bytes passed using the <code>Bytes</code> field. For more
* information, see Images in the Amazon Rekognition developer guide.</p>
*/
inline void SetImage(const Image& value) { m_imageHasBeenSet = true; m_image = value; }
/**
* <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing image bytes is not supported.
* Images stored in an S3 Bucket do not need to be base64-encoded.</p> <p>If you
* are using an AWS SDK to call Amazon Rekognition, you might not need to
* base64-encode image bytes passed using the <code>Bytes</code> field. For more
* information, see Images in the Amazon Rekognition developer guide.</p>
*/
inline void SetImage(Image&& value) { m_imageHasBeenSet = true; m_image = std::move(value); }
/**
* <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing image bytes is not supported.
* Images stored in an S3 Bucket do not need to be base64-encoded.</p> <p>If you
* are using an AWS SDK to call Amazon Rekognition, you might not need to
* base64-encode image bytes passed using the <code>Bytes</code> field. For more
* information, see Images in the Amazon Rekognition developer guide.</p>
*/
inline DetectLabelsRequest& WithImage(const Image& value) { SetImage(value); return *this;}
/**
* <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing image bytes is not supported.
* Images stored in an S3 Bucket do not need to be base64-encoded.</p> <p>If you
* are using an AWS SDK to call Amazon Rekognition, you might not need to
* base64-encode image bytes passed using the <code>Bytes</code> field. For more
* information, see Images in the Amazon Rekognition developer guide.</p>
*/
inline DetectLabelsRequest& WithImage(Image&& value) { SetImage(std::move(value)); return *this;}
/**
* <p>Maximum number of labels you want the service to return in the response. The
* service returns the specified number of highest confidence labels. </p>
*/
inline int GetMaxLabels() const{ return m_maxLabels; }
/**
* <p>Maximum number of labels you want the service to return in the response. The
* service returns the specified number of highest confidence labels. </p>
*/
inline bool MaxLabelsHasBeenSet() const { return m_maxLabelsHasBeenSet; }
/**
* <p>Maximum number of labels you want the service to return in the response. The
* service returns the specified number of highest confidence labels. </p>
*/
inline void SetMaxLabels(int value) { m_maxLabelsHasBeenSet = true; m_maxLabels = value; }
/**
* <p>Maximum number of labels you want the service to return in the response. The
* service returns the specified number of highest confidence labels. </p>
*/
inline DetectLabelsRequest& WithMaxLabels(int value) { SetMaxLabels(value); return *this;}
/**
* <p>Specifies the minimum confidence level for the labels to return. Amazon
* Rekognition doesn't return any labels with confidence lower than this specified
* value.</p> <p>If <code>MinConfidence</code> is not specified, the operation
* returns labels with a confidence values greater than or equal to 55 percent.</p>
*/
inline double GetMinConfidence() const{ return m_minConfidence; }
/**
* <p>Specifies the minimum confidence level for the labels to return. Amazon
* Rekognition doesn't return any labels with confidence lower than this specified
* value.</p> <p>If <code>MinConfidence</code> is not specified, the operation
* returns labels with a confidence values greater than or equal to 55 percent.</p>
*/
inline bool MinConfidenceHasBeenSet() const { return m_minConfidenceHasBeenSet; }
/**
* <p>Specifies the minimum confidence level for the labels to return. Amazon
* Rekognition doesn't return any labels with confidence lower than this specified
* value.</p> <p>If <code>MinConfidence</code> is not specified, the operation
* returns labels with a confidence values greater than or equal to 55 percent.</p>
*/
inline void SetMinConfidence(double value) { m_minConfidenceHasBeenSet = true; m_minConfidence = value; }
/**
* <p>Specifies the minimum confidence level for the labels to return. Amazon
* Rekognition doesn't return any labels with confidence lower than this specified
* value.</p> <p>If <code>MinConfidence</code> is not specified, the operation
* returns labels with a confidence values greater than or equal to 55 percent.</p>
*/
inline DetectLabelsRequest& WithMinConfidence(double value) { SetMinConfidence(value); return *this;}
private:
Image m_image;
bool m_imageHasBeenSet;
int m_maxLabels;
bool m_maxLabelsHasBeenSet;
double m_minConfidence;
bool m_minConfidenceHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,203 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/rekognition/model/OrientationCorrection.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/rekognition/model/Label.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
class AWS_REKOGNITION_API DetectLabelsResult
{
public:
DetectLabelsResult();
DetectLabelsResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
DetectLabelsResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>An array of labels for the real-world objects detected. </p>
*/
inline const Aws::Vector<Label>& GetLabels() const{ return m_labels; }
/**
* <p>An array of labels for the real-world objects detected. </p>
*/
inline void SetLabels(const Aws::Vector<Label>& value) { m_labels = value; }
/**
* <p>An array of labels for the real-world objects detected. </p>
*/
inline void SetLabels(Aws::Vector<Label>&& value) { m_labels = std::move(value); }
/**
* <p>An array of labels for the real-world objects detected. </p>
*/
inline DetectLabelsResult& WithLabels(const Aws::Vector<Label>& value) { SetLabels(value); return *this;}
/**
* <p>An array of labels for the real-world objects detected. </p>
*/
inline DetectLabelsResult& WithLabels(Aws::Vector<Label>&& value) { SetLabels(std::move(value)); return *this;}
/**
* <p>An array of labels for the real-world objects detected. </p>
*/
inline DetectLabelsResult& AddLabels(const Label& value) { m_labels.push_back(value); return *this; }
/**
* <p>An array of labels for the real-world objects detected. </p>
*/
inline DetectLabelsResult& AddLabels(Label&& value) { m_labels.push_back(std::move(value)); return *this; }
/**
* <p>The value of <code>OrientationCorrection</code> is always null.</p> <p>If the
* input image is in .jpeg format, it might contain exchangeable image file format
* (Exif) metadata that includes the image's orientation. Amazon Rekognition uses
* this orientation information to perform image correction. The bounding box
* coordinates are translated to represent object locations after the orientation
* information in the Exif metadata is used to correct the image orientation.
* Images in .png format don't contain Exif metadata.</p> <p>Amazon Rekognition
* doesnt perform image correction for images in .png format and .jpeg images
* without orientation information in the image Exif metadata. The bounding box
* coordinates aren't translated and represent the object locations before the
* image is rotated. </p>
*/
inline const OrientationCorrection& GetOrientationCorrection() const{ return m_orientationCorrection; }
/**
* <p>The value of <code>OrientationCorrection</code> is always null.</p> <p>If the
* input image is in .jpeg format, it might contain exchangeable image file format
* (Exif) metadata that includes the image's orientation. Amazon Rekognition uses
* this orientation information to perform image correction. The bounding box
* coordinates are translated to represent object locations after the orientation
* information in the Exif metadata is used to correct the image orientation.
* Images in .png format don't contain Exif metadata.</p> <p>Amazon Rekognition
* doesnt perform image correction for images in .png format and .jpeg images
* without orientation information in the image Exif metadata. The bounding box
* coordinates aren't translated and represent the object locations before the
* image is rotated. </p>
*/
inline void SetOrientationCorrection(const OrientationCorrection& value) { m_orientationCorrection = value; }
/**
* <p>The value of <code>OrientationCorrection</code> is always null.</p> <p>If the
* input image is in .jpeg format, it might contain exchangeable image file format
* (Exif) metadata that includes the image's orientation. Amazon Rekognition uses
* this orientation information to perform image correction. The bounding box
* coordinates are translated to represent object locations after the orientation
* information in the Exif metadata is used to correct the image orientation.
* Images in .png format don't contain Exif metadata.</p> <p>Amazon Rekognition
* doesnt perform image correction for images in .png format and .jpeg images
* without orientation information in the image Exif metadata. The bounding box
* coordinates aren't translated and represent the object locations before the
* image is rotated. </p>
*/
inline void SetOrientationCorrection(OrientationCorrection&& value) { m_orientationCorrection = std::move(value); }
/**
* <p>The value of <code>OrientationCorrection</code> is always null.</p> <p>If the
* input image is in .jpeg format, it might contain exchangeable image file format
* (Exif) metadata that includes the image's orientation. Amazon Rekognition uses
* this orientation information to perform image correction. The bounding box
* coordinates are translated to represent object locations after the orientation
* information in the Exif metadata is used to correct the image orientation.
* Images in .png format don't contain Exif metadata.</p> <p>Amazon Rekognition
* doesnt perform image correction for images in .png format and .jpeg images
* without orientation information in the image Exif metadata. The bounding box
* coordinates aren't translated and represent the object locations before the
* image is rotated. </p>
*/
inline DetectLabelsResult& WithOrientationCorrection(const OrientationCorrection& value) { SetOrientationCorrection(value); return *this;}
/**
* <p>The value of <code>OrientationCorrection</code> is always null.</p> <p>If the
* input image is in .jpeg format, it might contain exchangeable image file format
* (Exif) metadata that includes the image's orientation. Amazon Rekognition uses
* this orientation information to perform image correction. The bounding box
* coordinates are translated to represent object locations after the orientation
* information in the Exif metadata is used to correct the image orientation.
* Images in .png format don't contain Exif metadata.</p> <p>Amazon Rekognition
* doesnt perform image correction for images in .png format and .jpeg images
* without orientation information in the image Exif metadata. The bounding box
* coordinates aren't translated and represent the object locations before the
* image is rotated. </p>
*/
inline DetectLabelsResult& WithOrientationCorrection(OrientationCorrection&& value) { SetOrientationCorrection(std::move(value)); return *this;}
/**
* <p>Version number of the label detection model that was used to detect
* labels.</p>
*/
inline const Aws::String& GetLabelModelVersion() const{ return m_labelModelVersion; }
/**
* <p>Version number of the label detection model that was used to detect
* labels.</p>
*/
inline void SetLabelModelVersion(const Aws::String& value) { m_labelModelVersion = value; }
/**
* <p>Version number of the label detection model that was used to detect
* labels.</p>
*/
inline void SetLabelModelVersion(Aws::String&& value) { m_labelModelVersion = std::move(value); }
/**
* <p>Version number of the label detection model that was used to detect
* labels.</p>
*/
inline void SetLabelModelVersion(const char* value) { m_labelModelVersion.assign(value); }
/**
* <p>Version number of the label detection model that was used to detect
* labels.</p>
*/
inline DetectLabelsResult& WithLabelModelVersion(const Aws::String& value) { SetLabelModelVersion(value); return *this;}
/**
* <p>Version number of the label detection model that was used to detect
* labels.</p>
*/
inline DetectLabelsResult& WithLabelModelVersion(Aws::String&& value) { SetLabelModelVersion(std::move(value)); return *this;}
/**
* <p>Version number of the label detection model that was used to detect
* labels.</p>
*/
inline DetectLabelsResult& WithLabelModelVersion(const char* value) { SetLabelModelVersion(value); return *this;}
private:
Aws::Vector<Label> m_labels;
OrientationCorrection m_orientationCorrection;
Aws::String m_labelModelVersion;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,186 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/RekognitionRequest.h>
#include <aws/rekognition/model/Image.h>
#include <aws/rekognition/model/HumanLoopConfig.h>
#include <utility>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
/**
*/
class AWS_REKOGNITION_API DetectModerationLabelsRequest : public RekognitionRequest
{
public:
DetectModerationLabelsRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "DetectModerationLabels"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is
* not supported. </p> <p>If you are using an AWS SDK to call Amazon Rekognition,
* you might not need to base64-encode image bytes passed using the
* <code>Bytes</code> field. For more information, see Images in the Amazon
* Rekognition developer guide.</p>
*/
inline const Image& GetImage() const{ return m_image; }
/**
* <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is
* not supported. </p> <p>If you are using an AWS SDK to call Amazon Rekognition,
* you might not need to base64-encode image bytes passed using the
* <code>Bytes</code> field. For more information, see Images in the Amazon
* Rekognition developer guide.</p>
*/
inline bool ImageHasBeenSet() const { return m_imageHasBeenSet; }
/**
* <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is
* not supported. </p> <p>If you are using an AWS SDK to call Amazon Rekognition,
* you might not need to base64-encode image bytes passed using the
* <code>Bytes</code> field. For more information, see Images in the Amazon
* Rekognition developer guide.</p>
*/
inline void SetImage(const Image& value) { m_imageHasBeenSet = true; m_image = value; }
/**
* <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is
* not supported. </p> <p>If you are using an AWS SDK to call Amazon Rekognition,
* you might not need to base64-encode image bytes passed using the
* <code>Bytes</code> field. For more information, see Images in the Amazon
* Rekognition developer guide.</p>
*/
inline void SetImage(Image&& value) { m_imageHasBeenSet = true; m_image = std::move(value); }
/**
* <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is
* not supported. </p> <p>If you are using an AWS SDK to call Amazon Rekognition,
* you might not need to base64-encode image bytes passed using the
* <code>Bytes</code> field. For more information, see Images in the Amazon
* Rekognition developer guide.</p>
*/
inline DetectModerationLabelsRequest& WithImage(const Image& value) { SetImage(value); return *this;}
/**
* <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing base64-encoded image bytes is
* not supported. </p> <p>If you are using an AWS SDK to call Amazon Rekognition,
* you might not need to base64-encode image bytes passed using the
* <code>Bytes</code> field. For more information, see Images in the Amazon
* Rekognition developer guide.</p>
*/
inline DetectModerationLabelsRequest& WithImage(Image&& value) { SetImage(std::move(value)); return *this;}
/**
* <p>Specifies the minimum confidence level for the labels to return. Amazon
* Rekognition doesn't return any labels with a confidence level lower than this
* specified value.</p> <p>If you don't specify <code>MinConfidence</code>, the
* operation returns labels with confidence values greater than or equal to 50
* percent.</p>
*/
inline double GetMinConfidence() const{ return m_minConfidence; }
/**
* <p>Specifies the minimum confidence level for the labels to return. Amazon
* Rekognition doesn't return any labels with a confidence level lower than this
* specified value.</p> <p>If you don't specify <code>MinConfidence</code>, the
* operation returns labels with confidence values greater than or equal to 50
* percent.</p>
*/
inline bool MinConfidenceHasBeenSet() const { return m_minConfidenceHasBeenSet; }
/**
* <p>Specifies the minimum confidence level for the labels to return. Amazon
* Rekognition doesn't return any labels with a confidence level lower than this
* specified value.</p> <p>If you don't specify <code>MinConfidence</code>, the
* operation returns labels with confidence values greater than or equal to 50
* percent.</p>
*/
inline void SetMinConfidence(double value) { m_minConfidenceHasBeenSet = true; m_minConfidence = value; }
/**
* <p>Specifies the minimum confidence level for the labels to return. Amazon
* Rekognition doesn't return any labels with a confidence level lower than this
* specified value.</p> <p>If you don't specify <code>MinConfidence</code>, the
* operation returns labels with confidence values greater than or equal to 50
* percent.</p>
*/
inline DetectModerationLabelsRequest& WithMinConfidence(double value) { SetMinConfidence(value); return *this;}
/**
* <p>Sets up the configuration for human evaluation, including the FlowDefinition
* the image will be sent to.</p>
*/
inline const HumanLoopConfig& GetHumanLoopConfig() const{ return m_humanLoopConfig; }
/**
* <p>Sets up the configuration for human evaluation, including the FlowDefinition
* the image will be sent to.</p>
*/
inline bool HumanLoopConfigHasBeenSet() const { return m_humanLoopConfigHasBeenSet; }
/**
* <p>Sets up the configuration for human evaluation, including the FlowDefinition
* the image will be sent to.</p>
*/
inline void SetHumanLoopConfig(const HumanLoopConfig& value) { m_humanLoopConfigHasBeenSet = true; m_humanLoopConfig = value; }
/**
* <p>Sets up the configuration for human evaluation, including the FlowDefinition
* the image will be sent to.</p>
*/
inline void SetHumanLoopConfig(HumanLoopConfig&& value) { m_humanLoopConfigHasBeenSet = true; m_humanLoopConfig = std::move(value); }
/**
* <p>Sets up the configuration for human evaluation, including the FlowDefinition
* the image will be sent to.</p>
*/
inline DetectModerationLabelsRequest& WithHumanLoopConfig(const HumanLoopConfig& value) { SetHumanLoopConfig(value); return *this;}
/**
* <p>Sets up the configuration for human evaluation, including the FlowDefinition
* the image will be sent to.</p>
*/
inline DetectModerationLabelsRequest& WithHumanLoopConfig(HumanLoopConfig&& value) { SetHumanLoopConfig(std::move(value)); return *this;}
private:
Image m_image;
bool m_imageHasBeenSet;
double m_minConfidence;
bool m_minConfidenceHasBeenSet;
HumanLoopConfig m_humanLoopConfig;
bool m_humanLoopConfigHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,160 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/rekognition/model/HumanLoopActivationOutput.h>
#include <aws/rekognition/model/ModerationLabel.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
class AWS_REKOGNITION_API DetectModerationLabelsResult
{
public:
DetectModerationLabelsResult();
DetectModerationLabelsResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
DetectModerationLabelsResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>Array of detected Moderation labels and the time, in milliseconds from the
* start of the video, they were detected.</p>
*/
inline const Aws::Vector<ModerationLabel>& GetModerationLabels() const{ return m_moderationLabels; }
/**
* <p>Array of detected Moderation labels and the time, in milliseconds from the
* start of the video, they were detected.</p>
*/
inline void SetModerationLabels(const Aws::Vector<ModerationLabel>& value) { m_moderationLabels = value; }
/**
* <p>Array of detected Moderation labels and the time, in milliseconds from the
* start of the video, they were detected.</p>
*/
inline void SetModerationLabels(Aws::Vector<ModerationLabel>&& value) { m_moderationLabels = std::move(value); }
/**
* <p>Array of detected Moderation labels and the time, in milliseconds from the
* start of the video, they were detected.</p>
*/
inline DetectModerationLabelsResult& WithModerationLabels(const Aws::Vector<ModerationLabel>& value) { SetModerationLabels(value); return *this;}
/**
* <p>Array of detected Moderation labels and the time, in milliseconds from the
* start of the video, they were detected.</p>
*/
inline DetectModerationLabelsResult& WithModerationLabels(Aws::Vector<ModerationLabel>&& value) { SetModerationLabels(std::move(value)); return *this;}
/**
* <p>Array of detected Moderation labels and the time, in milliseconds from the
* start of the video, they were detected.</p>
*/
inline DetectModerationLabelsResult& AddModerationLabels(const ModerationLabel& value) { m_moderationLabels.push_back(value); return *this; }
/**
* <p>Array of detected Moderation labels and the time, in milliseconds from the
* start of the video, they were detected.</p>
*/
inline DetectModerationLabelsResult& AddModerationLabels(ModerationLabel&& value) { m_moderationLabels.push_back(std::move(value)); return *this; }
/**
* <p>Version number of the moderation detection model that was used to detect
* unsafe content.</p>
*/
inline const Aws::String& GetModerationModelVersion() const{ return m_moderationModelVersion; }
/**
* <p>Version number of the moderation detection model that was used to detect
* unsafe content.</p>
*/
inline void SetModerationModelVersion(const Aws::String& value) { m_moderationModelVersion = value; }
/**
* <p>Version number of the moderation detection model that was used to detect
* unsafe content.</p>
*/
inline void SetModerationModelVersion(Aws::String&& value) { m_moderationModelVersion = std::move(value); }
/**
* <p>Version number of the moderation detection model that was used to detect
* unsafe content.</p>
*/
inline void SetModerationModelVersion(const char* value) { m_moderationModelVersion.assign(value); }
/**
* <p>Version number of the moderation detection model that was used to detect
* unsafe content.</p>
*/
inline DetectModerationLabelsResult& WithModerationModelVersion(const Aws::String& value) { SetModerationModelVersion(value); return *this;}
/**
* <p>Version number of the moderation detection model that was used to detect
* unsafe content.</p>
*/
inline DetectModerationLabelsResult& WithModerationModelVersion(Aws::String&& value) { SetModerationModelVersion(std::move(value)); return *this;}
/**
* <p>Version number of the moderation detection model that was used to detect
* unsafe content.</p>
*/
inline DetectModerationLabelsResult& WithModerationModelVersion(const char* value) { SetModerationModelVersion(value); return *this;}
/**
* <p>Shows the results of the human in the loop evaluation.</p>
*/
inline const HumanLoopActivationOutput& GetHumanLoopActivationOutput() const{ return m_humanLoopActivationOutput; }
/**
* <p>Shows the results of the human in the loop evaluation.</p>
*/
inline void SetHumanLoopActivationOutput(const HumanLoopActivationOutput& value) { m_humanLoopActivationOutput = value; }
/**
* <p>Shows the results of the human in the loop evaluation.</p>
*/
inline void SetHumanLoopActivationOutput(HumanLoopActivationOutput&& value) { m_humanLoopActivationOutput = std::move(value); }
/**
* <p>Shows the results of the human in the loop evaluation.</p>
*/
inline DetectModerationLabelsResult& WithHumanLoopActivationOutput(const HumanLoopActivationOutput& value) { SetHumanLoopActivationOutput(value); return *this;}
/**
* <p>Shows the results of the human in the loop evaluation.</p>
*/
inline DetectModerationLabelsResult& WithHumanLoopActivationOutput(HumanLoopActivationOutput&& value) { SetHumanLoopActivationOutput(std::move(value)); return *this;}
private:
Aws::Vector<ModerationLabel> m_moderationLabels;
Aws::String m_moderationModelVersion;
HumanLoopActivationOutput m_humanLoopActivationOutput;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,124 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/model/DetectionFilter.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/rekognition/model/RegionOfInterest.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>A set of optional parameters that you can use to set the criteria that the
* text must meet to be included in your response. <code>WordFilter</code> looks at
* a words height, width, and minimum confidence. <code>RegionOfInterest</code>
* lets you set a specific region of the image to look for text in. </p><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/DetectTextFilters">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API DetectTextFilters
{
public:
DetectTextFilters();
DetectTextFilters(Aws::Utils::Json::JsonView jsonValue);
DetectTextFilters& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
inline const DetectionFilter& GetWordFilter() const{ return m_wordFilter; }
inline bool WordFilterHasBeenSet() const { return m_wordFilterHasBeenSet; }
inline void SetWordFilter(const DetectionFilter& value) { m_wordFilterHasBeenSet = true; m_wordFilter = value; }
inline void SetWordFilter(DetectionFilter&& value) { m_wordFilterHasBeenSet = true; m_wordFilter = std::move(value); }
inline DetectTextFilters& WithWordFilter(const DetectionFilter& value) { SetWordFilter(value); return *this;}
inline DetectTextFilters& WithWordFilter(DetectionFilter&& value) { SetWordFilter(std::move(value)); return *this;}
/**
* <p> A Filter focusing on a certain area of the image. Uses a
* <code>BoundingBox</code> object to set the region of the image.</p>
*/
inline const Aws::Vector<RegionOfInterest>& GetRegionsOfInterest() const{ return m_regionsOfInterest; }
/**
* <p> A Filter focusing on a certain area of the image. Uses a
* <code>BoundingBox</code> object to set the region of the image.</p>
*/
inline bool RegionsOfInterestHasBeenSet() const { return m_regionsOfInterestHasBeenSet; }
/**
* <p> A Filter focusing on a certain area of the image. Uses a
* <code>BoundingBox</code> object to set the region of the image.</p>
*/
inline void SetRegionsOfInterest(const Aws::Vector<RegionOfInterest>& value) { m_regionsOfInterestHasBeenSet = true; m_regionsOfInterest = value; }
/**
* <p> A Filter focusing on a certain area of the image. Uses a
* <code>BoundingBox</code> object to set the region of the image.</p>
*/
inline void SetRegionsOfInterest(Aws::Vector<RegionOfInterest>&& value) { m_regionsOfInterestHasBeenSet = true; m_regionsOfInterest = std::move(value); }
/**
* <p> A Filter focusing on a certain area of the image. Uses a
* <code>BoundingBox</code> object to set the region of the image.</p>
*/
inline DetectTextFilters& WithRegionsOfInterest(const Aws::Vector<RegionOfInterest>& value) { SetRegionsOfInterest(value); return *this;}
/**
* <p> A Filter focusing on a certain area of the image. Uses a
* <code>BoundingBox</code> object to set the region of the image.</p>
*/
inline DetectTextFilters& WithRegionsOfInterest(Aws::Vector<RegionOfInterest>&& value) { SetRegionsOfInterest(std::move(value)); return *this;}
/**
* <p> A Filter focusing on a certain area of the image. Uses a
* <code>BoundingBox</code> object to set the region of the image.</p>
*/
inline DetectTextFilters& AddRegionsOfInterest(const RegionOfInterest& value) { m_regionsOfInterestHasBeenSet = true; m_regionsOfInterest.push_back(value); return *this; }
/**
* <p> A Filter focusing on a certain area of the image. Uses a
* <code>BoundingBox</code> object to set the region of the image.</p>
*/
inline DetectTextFilters& AddRegionsOfInterest(RegionOfInterest&& value) { m_regionsOfInterestHasBeenSet = true; m_regionsOfInterest.push_back(std::move(value)); return *this; }
private:
DetectionFilter m_wordFilter;
bool m_wordFilterHasBeenSet;
Aws::Vector<RegionOfInterest> m_regionsOfInterest;
bool m_regionsOfInterestHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,140 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/RekognitionRequest.h>
#include <aws/rekognition/model/Image.h>
#include <aws/rekognition/model/DetectTextFilters.h>
#include <utility>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
/**
*/
class AWS_REKOGNITION_API DetectTextRequest : public RekognitionRequest
{
public:
DetectTextRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "DetectText"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>The input image as base64-encoded bytes or an Amazon S3 object. If you use
* the AWS CLI to call Amazon Rekognition operations, you can't pass image bytes.
* </p> <p>If you are using an AWS SDK to call Amazon Rekognition, you might not
* need to base64-encode image bytes passed using the <code>Bytes</code> field. For
* more information, see Images in the Amazon Rekognition developer guide.</p>
*/
inline const Image& GetImage() const{ return m_image; }
/**
* <p>The input image as base64-encoded bytes or an Amazon S3 object. If you use
* the AWS CLI to call Amazon Rekognition operations, you can't pass image bytes.
* </p> <p>If you are using an AWS SDK to call Amazon Rekognition, you might not
* need to base64-encode image bytes passed using the <code>Bytes</code> field. For
* more information, see Images in the Amazon Rekognition developer guide.</p>
*/
inline bool ImageHasBeenSet() const { return m_imageHasBeenSet; }
/**
* <p>The input image as base64-encoded bytes or an Amazon S3 object. If you use
* the AWS CLI to call Amazon Rekognition operations, you can't pass image bytes.
* </p> <p>If you are using an AWS SDK to call Amazon Rekognition, you might not
* need to base64-encode image bytes passed using the <code>Bytes</code> field. For
* more information, see Images in the Amazon Rekognition developer guide.</p>
*/
inline void SetImage(const Image& value) { m_imageHasBeenSet = true; m_image = value; }
/**
* <p>The input image as base64-encoded bytes or an Amazon S3 object. If you use
* the AWS CLI to call Amazon Rekognition operations, you can't pass image bytes.
* </p> <p>If you are using an AWS SDK to call Amazon Rekognition, you might not
* need to base64-encode image bytes passed using the <code>Bytes</code> field. For
* more information, see Images in the Amazon Rekognition developer guide.</p>
*/
inline void SetImage(Image&& value) { m_imageHasBeenSet = true; m_image = std::move(value); }
/**
* <p>The input image as base64-encoded bytes or an Amazon S3 object. If you use
* the AWS CLI to call Amazon Rekognition operations, you can't pass image bytes.
* </p> <p>If you are using an AWS SDK to call Amazon Rekognition, you might not
* need to base64-encode image bytes passed using the <code>Bytes</code> field. For
* more information, see Images in the Amazon Rekognition developer guide.</p>
*/
inline DetectTextRequest& WithImage(const Image& value) { SetImage(value); return *this;}
/**
* <p>The input image as base64-encoded bytes or an Amazon S3 object. If you use
* the AWS CLI to call Amazon Rekognition operations, you can't pass image bytes.
* </p> <p>If you are using an AWS SDK to call Amazon Rekognition, you might not
* need to base64-encode image bytes passed using the <code>Bytes</code> field. For
* more information, see Images in the Amazon Rekognition developer guide.</p>
*/
inline DetectTextRequest& WithImage(Image&& value) { SetImage(std::move(value)); return *this;}
/**
* <p>Optional parameters that let you set the criteria that the text must meet to
* be included in your response.</p>
*/
inline const DetectTextFilters& GetFilters() const{ return m_filters; }
/**
* <p>Optional parameters that let you set the criteria that the text must meet to
* be included in your response.</p>
*/
inline bool FiltersHasBeenSet() const { return m_filtersHasBeenSet; }
/**
* <p>Optional parameters that let you set the criteria that the text must meet to
* be included in your response.</p>
*/
inline void SetFilters(const DetectTextFilters& value) { m_filtersHasBeenSet = true; m_filters = value; }
/**
* <p>Optional parameters that let you set the criteria that the text must meet to
* be included in your response.</p>
*/
inline void SetFilters(DetectTextFilters&& value) { m_filtersHasBeenSet = true; m_filters = std::move(value); }
/**
* <p>Optional parameters that let you set the criteria that the text must meet to
* be included in your response.</p>
*/
inline DetectTextRequest& WithFilters(const DetectTextFilters& value) { SetFilters(value); return *this;}
/**
* <p>Optional parameters that let you set the criteria that the text must meet to
* be included in your response.</p>
*/
inline DetectTextRequest& WithFilters(DetectTextFilters&& value) { SetFilters(std::move(value)); return *this;}
private:
Image m_image;
bool m_imageHasBeenSet;
DetectTextFilters m_filters;
bool m_filtersHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,117 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/rekognition/model/TextDetection.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
class AWS_REKOGNITION_API DetectTextResult
{
public:
DetectTextResult();
DetectTextResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
DetectTextResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>An array of text that was detected in the input image.</p>
*/
inline const Aws::Vector<TextDetection>& GetTextDetections() const{ return m_textDetections; }
/**
* <p>An array of text that was detected in the input image.</p>
*/
inline void SetTextDetections(const Aws::Vector<TextDetection>& value) { m_textDetections = value; }
/**
* <p>An array of text that was detected in the input image.</p>
*/
inline void SetTextDetections(Aws::Vector<TextDetection>&& value) { m_textDetections = std::move(value); }
/**
* <p>An array of text that was detected in the input image.</p>
*/
inline DetectTextResult& WithTextDetections(const Aws::Vector<TextDetection>& value) { SetTextDetections(value); return *this;}
/**
* <p>An array of text that was detected in the input image.</p>
*/
inline DetectTextResult& WithTextDetections(Aws::Vector<TextDetection>&& value) { SetTextDetections(std::move(value)); return *this;}
/**
* <p>An array of text that was detected in the input image.</p>
*/
inline DetectTextResult& AddTextDetections(const TextDetection& value) { m_textDetections.push_back(value); return *this; }
/**
* <p>An array of text that was detected in the input image.</p>
*/
inline DetectTextResult& AddTextDetections(TextDetection&& value) { m_textDetections.push_back(std::move(value)); return *this; }
/**
* <p>The model version used to detect text.</p>
*/
inline const Aws::String& GetTextModelVersion() const{ return m_textModelVersion; }
/**
* <p>The model version used to detect text.</p>
*/
inline void SetTextModelVersion(const Aws::String& value) { m_textModelVersion = value; }
/**
* <p>The model version used to detect text.</p>
*/
inline void SetTextModelVersion(Aws::String&& value) { m_textModelVersion = std::move(value); }
/**
* <p>The model version used to detect text.</p>
*/
inline void SetTextModelVersion(const char* value) { m_textModelVersion.assign(value); }
/**
* <p>The model version used to detect text.</p>
*/
inline DetectTextResult& WithTextModelVersion(const Aws::String& value) { SetTextModelVersion(value); return *this;}
/**
* <p>The model version used to detect text.</p>
*/
inline DetectTextResult& WithTextModelVersion(Aws::String&& value) { SetTextModelVersion(std::move(value)); return *this;}
/**
* <p>The model version used to detect text.</p>
*/
inline DetectTextResult& WithTextModelVersion(const char* value) { SetTextModelVersion(value); return *this;}
private:
Aws::Vector<TextDetection> m_textDetections;
Aws::String m_textModelVersion;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,139 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>A set of parameters that allow you to filter out certain results from your
* returned results.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/DetectionFilter">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API DetectionFilter
{
public:
DetectionFilter();
DetectionFilter(Aws::Utils::Json::JsonView jsonValue);
DetectionFilter& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>Sets confidence of word detection. Words with detection confidence below this
* will be excluded from the result. Values should be between 0.5 and 1 as Text in
* Video will not return any result below 0.5.</p>
*/
inline double GetMinConfidence() const{ return m_minConfidence; }
/**
* <p>Sets confidence of word detection. Words with detection confidence below this
* will be excluded from the result. Values should be between 0.5 and 1 as Text in
* Video will not return any result below 0.5.</p>
*/
inline bool MinConfidenceHasBeenSet() const { return m_minConfidenceHasBeenSet; }
/**
* <p>Sets confidence of word detection. Words with detection confidence below this
* will be excluded from the result. Values should be between 0.5 and 1 as Text in
* Video will not return any result below 0.5.</p>
*/
inline void SetMinConfidence(double value) { m_minConfidenceHasBeenSet = true; m_minConfidence = value; }
/**
* <p>Sets confidence of word detection. Words with detection confidence below this
* will be excluded from the result. Values should be between 0.5 and 1 as Text in
* Video will not return any result below 0.5.</p>
*/
inline DetectionFilter& WithMinConfidence(double value) { SetMinConfidence(value); return *this;}
/**
* <p>Sets the minimum height of the word bounding box. Words with bounding box
* heights lesser than this value will be excluded from the result. Value is
* relative to the video frame height.</p>
*/
inline double GetMinBoundingBoxHeight() const{ return m_minBoundingBoxHeight; }
/**
* <p>Sets the minimum height of the word bounding box. Words with bounding box
* heights lesser than this value will be excluded from the result. Value is
* relative to the video frame height.</p>
*/
inline bool MinBoundingBoxHeightHasBeenSet() const { return m_minBoundingBoxHeightHasBeenSet; }
/**
* <p>Sets the minimum height of the word bounding box. Words with bounding box
* heights lesser than this value will be excluded from the result. Value is
* relative to the video frame height.</p>
*/
inline void SetMinBoundingBoxHeight(double value) { m_minBoundingBoxHeightHasBeenSet = true; m_minBoundingBoxHeight = value; }
/**
* <p>Sets the minimum height of the word bounding box. Words with bounding box
* heights lesser than this value will be excluded from the result. Value is
* relative to the video frame height.</p>
*/
inline DetectionFilter& WithMinBoundingBoxHeight(double value) { SetMinBoundingBoxHeight(value); return *this;}
/**
* <p>Sets the minimum width of the word bounding box. Words with bounding boxes
* widths lesser than this value will be excluded from the result. Value is
* relative to the video frame width.</p>
*/
inline double GetMinBoundingBoxWidth() const{ return m_minBoundingBoxWidth; }
/**
* <p>Sets the minimum width of the word bounding box. Words with bounding boxes
* widths lesser than this value will be excluded from the result. Value is
* relative to the video frame width.</p>
*/
inline bool MinBoundingBoxWidthHasBeenSet() const { return m_minBoundingBoxWidthHasBeenSet; }
/**
* <p>Sets the minimum width of the word bounding box. Words with bounding boxes
* widths lesser than this value will be excluded from the result. Value is
* relative to the video frame width.</p>
*/
inline void SetMinBoundingBoxWidth(double value) { m_minBoundingBoxWidthHasBeenSet = true; m_minBoundingBoxWidth = value; }
/**
* <p>Sets the minimum width of the word bounding box. Words with bounding boxes
* widths lesser than this value will be excluded from the result. Value is
* relative to the video frame width.</p>
*/
inline DetectionFilter& WithMinBoundingBoxWidth(double value) { SetMinBoundingBoxWidth(value); return *this;}
private:
double m_minConfidence;
bool m_minConfidenceHasBeenSet;
double m_minBoundingBoxHeight;
bool m_minBoundingBoxHeightHasBeenSet;
double m_minBoundingBoxWidth;
bool m_minBoundingBoxWidthHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,107 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/model/EmotionName.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>The emotions that appear to be expressed on the face, and the confidence
* level in the determination. The API is only making a determination of the
* physical appearance of a person's face. It is not a determination of the
* persons internal emotional state and should not be used in such a way. For
* example, a person pretending to have a sad face might not be sad
* emotionally.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/Emotion">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API Emotion
{
public:
Emotion();
Emotion(Aws::Utils::Json::JsonView jsonValue);
Emotion& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>Type of emotion detected.</p>
*/
inline const EmotionName& GetType() const{ return m_type; }
/**
* <p>Type of emotion detected.</p>
*/
inline bool TypeHasBeenSet() const { return m_typeHasBeenSet; }
/**
* <p>Type of emotion detected.</p>
*/
inline void SetType(const EmotionName& value) { m_typeHasBeenSet = true; m_type = value; }
/**
* <p>Type of emotion detected.</p>
*/
inline void SetType(EmotionName&& value) { m_typeHasBeenSet = true; m_type = std::move(value); }
/**
* <p>Type of emotion detected.</p>
*/
inline Emotion& WithType(const EmotionName& value) { SetType(value); return *this;}
/**
* <p>Type of emotion detected.</p>
*/
inline Emotion& WithType(EmotionName&& value) { SetType(std::move(value)); return *this;}
/**
* <p>Level of confidence in the determination.</p>
*/
inline double GetConfidence() const{ return m_confidence; }
/**
* <p>Level of confidence in the determination.</p>
*/
inline bool ConfidenceHasBeenSet() const { return m_confidenceHasBeenSet; }
/**
* <p>Level of confidence in the determination.</p>
*/
inline void SetConfidence(double value) { m_confidenceHasBeenSet = true; m_confidence = value; }
/**
* <p>Level of confidence in the determination.</p>
*/
inline Emotion& WithConfidence(double value) { SetConfidence(value); return *this;}
private:
EmotionName m_type;
bool m_typeHasBeenSet;
double m_confidence;
bool m_confidenceHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,38 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
enum class EmotionName
{
NOT_SET,
HAPPY,
SAD,
ANGRY,
CONFUSED,
DISGUSTED,
SURPRISED,
CALM,
UNKNOWN,
FEAR
};
namespace EmotionNameMapper
{
AWS_REKOGNITION_API EmotionName GetEmotionNameForName(const Aws::String& name);
AWS_REKOGNITION_API Aws::String GetNameForEmotionName(EmotionName value);
} // namespace EmotionNameMapper
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,115 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/model/Summary.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>The evaluation results for the training of a model.</p><p><h3>See Also:</h3>
* <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/EvaluationResult">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API EvaluationResult
{
public:
EvaluationResult();
EvaluationResult(Aws::Utils::Json::JsonView jsonValue);
EvaluationResult& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>The F1 score for the evaluation of all labels. The F1 score metric evaluates
* the overall precision and recall performance of the model as a single value. A
* higher value indicates better precision and recall performance. A lower score
* indicates that precision, recall, or both are performing poorly. </p>
*/
inline double GetF1Score() const{ return m_f1Score; }
/**
* <p>The F1 score for the evaluation of all labels. The F1 score metric evaluates
* the overall precision and recall performance of the model as a single value. A
* higher value indicates better precision and recall performance. A lower score
* indicates that precision, recall, or both are performing poorly. </p>
*/
inline bool F1ScoreHasBeenSet() const { return m_f1ScoreHasBeenSet; }
/**
* <p>The F1 score for the evaluation of all labels. The F1 score metric evaluates
* the overall precision and recall performance of the model as a single value. A
* higher value indicates better precision and recall performance. A lower score
* indicates that precision, recall, or both are performing poorly. </p>
*/
inline void SetF1Score(double value) { m_f1ScoreHasBeenSet = true; m_f1Score = value; }
/**
* <p>The F1 score for the evaluation of all labels. The F1 score metric evaluates
* the overall precision and recall performance of the model as a single value. A
* higher value indicates better precision and recall performance. A lower score
* indicates that precision, recall, or both are performing poorly. </p>
*/
inline EvaluationResult& WithF1Score(double value) { SetF1Score(value); return *this;}
/**
* <p>The S3 bucket that contains the training summary.</p>
*/
inline const Summary& GetSummary() const{ return m_summary; }
/**
* <p>The S3 bucket that contains the training summary.</p>
*/
inline bool SummaryHasBeenSet() const { return m_summaryHasBeenSet; }
/**
* <p>The S3 bucket that contains the training summary.</p>
*/
inline void SetSummary(const Summary& value) { m_summaryHasBeenSet = true; m_summary = value; }
/**
* <p>The S3 bucket that contains the training summary.</p>
*/
inline void SetSummary(Summary&& value) { m_summaryHasBeenSet = true; m_summary = std::move(value); }
/**
* <p>The S3 bucket that contains the training summary.</p>
*/
inline EvaluationResult& WithSummary(const Summary& value) { SetSummary(value); return *this;}
/**
* <p>The S3 bucket that contains the training summary.</p>
*/
inline EvaluationResult& WithSummary(Summary&& value) { SetSummary(std::move(value)); return *this;}
private:
double m_f1Score;
bool m_f1ScoreHasBeenSet;
Summary m_summary;
bool m_summaryHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,91 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>Indicates whether or not the eyes on the face are open, and the confidence
* level in the determination.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/EyeOpen">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API EyeOpen
{
public:
EyeOpen();
EyeOpen(Aws::Utils::Json::JsonView jsonValue);
EyeOpen& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>Boolean value that indicates whether the eyes on the face are open.</p>
*/
inline bool GetValue() const{ return m_value; }
/**
* <p>Boolean value that indicates whether the eyes on the face are open.</p>
*/
inline bool ValueHasBeenSet() const { return m_valueHasBeenSet; }
/**
* <p>Boolean value that indicates whether the eyes on the face are open.</p>
*/
inline void SetValue(bool value) { m_valueHasBeenSet = true; m_value = value; }
/**
* <p>Boolean value that indicates whether the eyes on the face are open.</p>
*/
inline EyeOpen& WithValue(bool value) { SetValue(value); return *this;}
/**
* <p>Level of confidence in the determination.</p>
*/
inline double GetConfidence() const{ return m_confidence; }
/**
* <p>Level of confidence in the determination.</p>
*/
inline bool ConfidenceHasBeenSet() const { return m_confidenceHasBeenSet; }
/**
* <p>Level of confidence in the determination.</p>
*/
inline void SetConfidence(double value) { m_confidenceHasBeenSet = true; m_confidence = value; }
/**
* <p>Level of confidence in the determination.</p>
*/
inline EyeOpen& WithConfidence(double value) { SetConfidence(value); return *this;}
private:
bool m_value;
bool m_valueHasBeenSet;
double m_confidence;
bool m_confidenceHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,95 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>Indicates whether or not the face is wearing eye glasses, and the confidence
* level in the determination.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/Eyeglasses">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API Eyeglasses
{
public:
Eyeglasses();
Eyeglasses(Aws::Utils::Json::JsonView jsonValue);
Eyeglasses& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>Boolean value that indicates whether the face is wearing eye glasses or
* not.</p>
*/
inline bool GetValue() const{ return m_value; }
/**
* <p>Boolean value that indicates whether the face is wearing eye glasses or
* not.</p>
*/
inline bool ValueHasBeenSet() const { return m_valueHasBeenSet; }
/**
* <p>Boolean value that indicates whether the face is wearing eye glasses or
* not.</p>
*/
inline void SetValue(bool value) { m_valueHasBeenSet = true; m_value = value; }
/**
* <p>Boolean value that indicates whether the face is wearing eye glasses or
* not.</p>
*/
inline Eyeglasses& WithValue(bool value) { SetValue(value); return *this;}
/**
* <p>Level of confidence in the determination.</p>
*/
inline double GetConfidence() const{ return m_confidence; }
/**
* <p>Level of confidence in the determination.</p>
*/
inline bool ConfidenceHasBeenSet() const { return m_confidenceHasBeenSet; }
/**
* <p>Level of confidence in the determination.</p>
*/
inline void SetConfidence(double value) { m_confidenceHasBeenSet = true; m_confidence = value; }
/**
* <p>Level of confidence in the determination.</p>
*/
inline Eyeglasses& WithConfidence(double value) { SetConfidence(value); return *this;}
private:
bool m_value;
bool m_valueHasBeenSet;
double m_confidence;
bool m_confidenceHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,241 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/rekognition/model/BoundingBox.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>Describes the face properties such as the bounding box, face ID, image ID of
* the input image, and external image ID that you assigned. </p><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/Face">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API Face
{
public:
Face();
Face(Aws::Utils::Json::JsonView jsonValue);
Face& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>Unique identifier that Amazon Rekognition assigns to the face.</p>
*/
inline const Aws::String& GetFaceId() const{ return m_faceId; }
/**
* <p>Unique identifier that Amazon Rekognition assigns to the face.</p>
*/
inline bool FaceIdHasBeenSet() const { return m_faceIdHasBeenSet; }
/**
* <p>Unique identifier that Amazon Rekognition assigns to the face.</p>
*/
inline void SetFaceId(const Aws::String& value) { m_faceIdHasBeenSet = true; m_faceId = value; }
/**
* <p>Unique identifier that Amazon Rekognition assigns to the face.</p>
*/
inline void SetFaceId(Aws::String&& value) { m_faceIdHasBeenSet = true; m_faceId = std::move(value); }
/**
* <p>Unique identifier that Amazon Rekognition assigns to the face.</p>
*/
inline void SetFaceId(const char* value) { m_faceIdHasBeenSet = true; m_faceId.assign(value); }
/**
* <p>Unique identifier that Amazon Rekognition assigns to the face.</p>
*/
inline Face& WithFaceId(const Aws::String& value) { SetFaceId(value); return *this;}
/**
* <p>Unique identifier that Amazon Rekognition assigns to the face.</p>
*/
inline Face& WithFaceId(Aws::String&& value) { SetFaceId(std::move(value)); return *this;}
/**
* <p>Unique identifier that Amazon Rekognition assigns to the face.</p>
*/
inline Face& WithFaceId(const char* value) { SetFaceId(value); return *this;}
/**
* <p>Bounding box of the face.</p>
*/
inline const BoundingBox& GetBoundingBox() const{ return m_boundingBox; }
/**
* <p>Bounding box of the face.</p>
*/
inline bool BoundingBoxHasBeenSet() const { return m_boundingBoxHasBeenSet; }
/**
* <p>Bounding box of the face.</p>
*/
inline void SetBoundingBox(const BoundingBox& value) { m_boundingBoxHasBeenSet = true; m_boundingBox = value; }
/**
* <p>Bounding box of the face.</p>
*/
inline void SetBoundingBox(BoundingBox&& value) { m_boundingBoxHasBeenSet = true; m_boundingBox = std::move(value); }
/**
* <p>Bounding box of the face.</p>
*/
inline Face& WithBoundingBox(const BoundingBox& value) { SetBoundingBox(value); return *this;}
/**
* <p>Bounding box of the face.</p>
*/
inline Face& WithBoundingBox(BoundingBox&& value) { SetBoundingBox(std::move(value)); return *this;}
/**
* <p>Unique identifier that Amazon Rekognition assigns to the input image.</p>
*/
inline const Aws::String& GetImageId() const{ return m_imageId; }
/**
* <p>Unique identifier that Amazon Rekognition assigns to the input image.</p>
*/
inline bool ImageIdHasBeenSet() const { return m_imageIdHasBeenSet; }
/**
* <p>Unique identifier that Amazon Rekognition assigns to the input image.</p>
*/
inline void SetImageId(const Aws::String& value) { m_imageIdHasBeenSet = true; m_imageId = value; }
/**
* <p>Unique identifier that Amazon Rekognition assigns to the input image.</p>
*/
inline void SetImageId(Aws::String&& value) { m_imageIdHasBeenSet = true; m_imageId = std::move(value); }
/**
* <p>Unique identifier that Amazon Rekognition assigns to the input image.</p>
*/
inline void SetImageId(const char* value) { m_imageIdHasBeenSet = true; m_imageId.assign(value); }
/**
* <p>Unique identifier that Amazon Rekognition assigns to the input image.</p>
*/
inline Face& WithImageId(const Aws::String& value) { SetImageId(value); return *this;}
/**
* <p>Unique identifier that Amazon Rekognition assigns to the input image.</p>
*/
inline Face& WithImageId(Aws::String&& value) { SetImageId(std::move(value)); return *this;}
/**
* <p>Unique identifier that Amazon Rekognition assigns to the input image.</p>
*/
inline Face& WithImageId(const char* value) { SetImageId(value); return *this;}
/**
* <p>Identifier that you assign to all the faces in the input image.</p>
*/
inline const Aws::String& GetExternalImageId() const{ return m_externalImageId; }
/**
* <p>Identifier that you assign to all the faces in the input image.</p>
*/
inline bool ExternalImageIdHasBeenSet() const { return m_externalImageIdHasBeenSet; }
/**
* <p>Identifier that you assign to all the faces in the input image.</p>
*/
inline void SetExternalImageId(const Aws::String& value) { m_externalImageIdHasBeenSet = true; m_externalImageId = value; }
/**
* <p>Identifier that you assign to all the faces in the input image.</p>
*/
inline void SetExternalImageId(Aws::String&& value) { m_externalImageIdHasBeenSet = true; m_externalImageId = std::move(value); }
/**
* <p>Identifier that you assign to all the faces in the input image.</p>
*/
inline void SetExternalImageId(const char* value) { m_externalImageIdHasBeenSet = true; m_externalImageId.assign(value); }
/**
* <p>Identifier that you assign to all the faces in the input image.</p>
*/
inline Face& WithExternalImageId(const Aws::String& value) { SetExternalImageId(value); return *this;}
/**
* <p>Identifier that you assign to all the faces in the input image.</p>
*/
inline Face& WithExternalImageId(Aws::String&& value) { SetExternalImageId(std::move(value)); return *this;}
/**
* <p>Identifier that you assign to all the faces in the input image.</p>
*/
inline Face& WithExternalImageId(const char* value) { SetExternalImageId(value); return *this;}
/**
* <p>Confidence level that the bounding box contains a face (and not a different
* object such as a tree).</p>
*/
inline double GetConfidence() const{ return m_confidence; }
/**
* <p>Confidence level that the bounding box contains a face (and not a different
* object such as a tree).</p>
*/
inline bool ConfidenceHasBeenSet() const { return m_confidenceHasBeenSet; }
/**
* <p>Confidence level that the bounding box contains a face (and not a different
* object such as a tree).</p>
*/
inline void SetConfidence(double value) { m_confidenceHasBeenSet = true; m_confidence = value; }
/**
* <p>Confidence level that the bounding box contains a face (and not a different
* object such as a tree).</p>
*/
inline Face& WithConfidence(double value) { SetConfidence(value); return *this;}
private:
Aws::String m_faceId;
bool m_faceIdHasBeenSet;
BoundingBox m_boundingBox;
bool m_boundingBoxHasBeenSet;
Aws::String m_imageId;
bool m_imageIdHasBeenSet;
Aws::String m_externalImageId;
bool m_externalImageIdHasBeenSet;
double m_confidence;
bool m_confidenceHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,31 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
enum class FaceAttributes
{
NOT_SET,
DEFAULT,
ALL
};
namespace FaceAttributesMapper
{
AWS_REKOGNITION_API FaceAttributes GetFaceAttributesForName(const Aws::String& name);
AWS_REKOGNITION_API Aws::String GetNameForFaceAttributes(FaceAttributes value);
} // namespace FaceAttributesMapper
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,693 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/model/BoundingBox.h>
#include <aws/rekognition/model/AgeRange.h>
#include <aws/rekognition/model/Smile.h>
#include <aws/rekognition/model/Eyeglasses.h>
#include <aws/rekognition/model/Sunglasses.h>
#include <aws/rekognition/model/Gender.h>
#include <aws/rekognition/model/Beard.h>
#include <aws/rekognition/model/Mustache.h>
#include <aws/rekognition/model/EyeOpen.h>
#include <aws/rekognition/model/MouthOpen.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/rekognition/model/Pose.h>
#include <aws/rekognition/model/ImageQuality.h>
#include <aws/rekognition/model/Emotion.h>
#include <aws/rekognition/model/Landmark.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>Structure containing attributes of the face that the algorithm detected.</p>
* <p>A <code>FaceDetail</code> object contains either the default facial
* attributes or all facial attributes. The default attributes are
* <code>BoundingBox</code>, <code>Confidence</code>, <code>Landmarks</code>,
* <code>Pose</code>, and <code>Quality</code>.</p> <p> <a>GetFaceDetection</a> is
* the only Amazon Rekognition Video stored video operation that can return a
* <code>FaceDetail</code> object with all attributes. To specify which attributes
* to return, use the <code>FaceAttributes</code> input parameter for
* <a>StartFaceDetection</a>. The following Amazon Rekognition Video operations
* return only the default attributes. The corresponding Start operations don't
* have a <code>FaceAttributes</code> input parameter.</p> <ul> <li>
* <p>GetCelebrityRecognition</p> </li> <li> <p>GetPersonTracking</p> </li> <li>
* <p>GetFaceSearch</p> </li> </ul> <p>The Amazon Rekognition Image
* <a>DetectFaces</a> and <a>IndexFaces</a> operations can return all facial
* attributes. To specify which attributes to return, use the
* <code>Attributes</code> input parameter for <code>DetectFaces</code>. For
* <code>IndexFaces</code>, use the <code>DetectAttributes</code> input
* parameter.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/FaceDetail">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API FaceDetail
{
public:
FaceDetail();
FaceDetail(Aws::Utils::Json::JsonView jsonValue);
FaceDetail& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>Bounding box of the face. Default attribute.</p>
*/
inline const BoundingBox& GetBoundingBox() const{ return m_boundingBox; }
/**
* <p>Bounding box of the face. Default attribute.</p>
*/
inline bool BoundingBoxHasBeenSet() const { return m_boundingBoxHasBeenSet; }
/**
* <p>Bounding box of the face. Default attribute.</p>
*/
inline void SetBoundingBox(const BoundingBox& value) { m_boundingBoxHasBeenSet = true; m_boundingBox = value; }
/**
* <p>Bounding box of the face. Default attribute.</p>
*/
inline void SetBoundingBox(BoundingBox&& value) { m_boundingBoxHasBeenSet = true; m_boundingBox = std::move(value); }
/**
* <p>Bounding box of the face. Default attribute.</p>
*/
inline FaceDetail& WithBoundingBox(const BoundingBox& value) { SetBoundingBox(value); return *this;}
/**
* <p>Bounding box of the face. Default attribute.</p>
*/
inline FaceDetail& WithBoundingBox(BoundingBox&& value) { SetBoundingBox(std::move(value)); return *this;}
/**
* <p>The estimated age range, in years, for the face. Low represents the lowest
* estimated age and High represents the highest estimated age.</p>
*/
inline const AgeRange& GetAgeRange() const{ return m_ageRange; }
/**
* <p>The estimated age range, in years, for the face. Low represents the lowest
* estimated age and High represents the highest estimated age.</p>
*/
inline bool AgeRangeHasBeenSet() const { return m_ageRangeHasBeenSet; }
/**
* <p>The estimated age range, in years, for the face. Low represents the lowest
* estimated age and High represents the highest estimated age.</p>
*/
inline void SetAgeRange(const AgeRange& value) { m_ageRangeHasBeenSet = true; m_ageRange = value; }
/**
* <p>The estimated age range, in years, for the face. Low represents the lowest
* estimated age and High represents the highest estimated age.</p>
*/
inline void SetAgeRange(AgeRange&& value) { m_ageRangeHasBeenSet = true; m_ageRange = std::move(value); }
/**
* <p>The estimated age range, in years, for the face. Low represents the lowest
* estimated age and High represents the highest estimated age.</p>
*/
inline FaceDetail& WithAgeRange(const AgeRange& value) { SetAgeRange(value); return *this;}
/**
* <p>The estimated age range, in years, for the face. Low represents the lowest
* estimated age and High represents the highest estimated age.</p>
*/
inline FaceDetail& WithAgeRange(AgeRange&& value) { SetAgeRange(std::move(value)); return *this;}
/**
* <p>Indicates whether or not the face is smiling, and the confidence level in the
* determination.</p>
*/
inline const Smile& GetSmile() const{ return m_smile; }
/**
* <p>Indicates whether or not the face is smiling, and the confidence level in the
* determination.</p>
*/
inline bool SmileHasBeenSet() const { return m_smileHasBeenSet; }
/**
* <p>Indicates whether or not the face is smiling, and the confidence level in the
* determination.</p>
*/
inline void SetSmile(const Smile& value) { m_smileHasBeenSet = true; m_smile = value; }
/**
* <p>Indicates whether or not the face is smiling, and the confidence level in the
* determination.</p>
*/
inline void SetSmile(Smile&& value) { m_smileHasBeenSet = true; m_smile = std::move(value); }
/**
* <p>Indicates whether or not the face is smiling, and the confidence level in the
* determination.</p>
*/
inline FaceDetail& WithSmile(const Smile& value) { SetSmile(value); return *this;}
/**
* <p>Indicates whether or not the face is smiling, and the confidence level in the
* determination.</p>
*/
inline FaceDetail& WithSmile(Smile&& value) { SetSmile(std::move(value)); return *this;}
/**
* <p>Indicates whether or not the face is wearing eye glasses, and the confidence
* level in the determination.</p>
*/
inline const Eyeglasses& GetEyeglasses() const{ return m_eyeglasses; }
/**
* <p>Indicates whether or not the face is wearing eye glasses, and the confidence
* level in the determination.</p>
*/
inline bool EyeglassesHasBeenSet() const { return m_eyeglassesHasBeenSet; }
/**
* <p>Indicates whether or not the face is wearing eye glasses, and the confidence
* level in the determination.</p>
*/
inline void SetEyeglasses(const Eyeglasses& value) { m_eyeglassesHasBeenSet = true; m_eyeglasses = value; }
/**
* <p>Indicates whether or not the face is wearing eye glasses, and the confidence
* level in the determination.</p>
*/
inline void SetEyeglasses(Eyeglasses&& value) { m_eyeglassesHasBeenSet = true; m_eyeglasses = std::move(value); }
/**
* <p>Indicates whether or not the face is wearing eye glasses, and the confidence
* level in the determination.</p>
*/
inline FaceDetail& WithEyeglasses(const Eyeglasses& value) { SetEyeglasses(value); return *this;}
/**
* <p>Indicates whether or not the face is wearing eye glasses, and the confidence
* level in the determination.</p>
*/
inline FaceDetail& WithEyeglasses(Eyeglasses&& value) { SetEyeglasses(std::move(value)); return *this;}
/**
* <p>Indicates whether or not the face is wearing sunglasses, and the confidence
* level in the determination.</p>
*/
inline const Sunglasses& GetSunglasses() const{ return m_sunglasses; }
/**
* <p>Indicates whether or not the face is wearing sunglasses, and the confidence
* level in the determination.</p>
*/
inline bool SunglassesHasBeenSet() const { return m_sunglassesHasBeenSet; }
/**
* <p>Indicates whether or not the face is wearing sunglasses, and the confidence
* level in the determination.</p>
*/
inline void SetSunglasses(const Sunglasses& value) { m_sunglassesHasBeenSet = true; m_sunglasses = value; }
/**
* <p>Indicates whether or not the face is wearing sunglasses, and the confidence
* level in the determination.</p>
*/
inline void SetSunglasses(Sunglasses&& value) { m_sunglassesHasBeenSet = true; m_sunglasses = std::move(value); }
/**
* <p>Indicates whether or not the face is wearing sunglasses, and the confidence
* level in the determination.</p>
*/
inline FaceDetail& WithSunglasses(const Sunglasses& value) { SetSunglasses(value); return *this;}
/**
* <p>Indicates whether or not the face is wearing sunglasses, and the confidence
* level in the determination.</p>
*/
inline FaceDetail& WithSunglasses(Sunglasses&& value) { SetSunglasses(std::move(value)); return *this;}
/**
* <p>The predicted gender of a detected face. </p>
*/
inline const Gender& GetGender() const{ return m_gender; }
/**
* <p>The predicted gender of a detected face. </p>
*/
inline bool GenderHasBeenSet() const { return m_genderHasBeenSet; }
/**
* <p>The predicted gender of a detected face. </p>
*/
inline void SetGender(const Gender& value) { m_genderHasBeenSet = true; m_gender = value; }
/**
* <p>The predicted gender of a detected face. </p>
*/
inline void SetGender(Gender&& value) { m_genderHasBeenSet = true; m_gender = std::move(value); }
/**
* <p>The predicted gender of a detected face. </p>
*/
inline FaceDetail& WithGender(const Gender& value) { SetGender(value); return *this;}
/**
* <p>The predicted gender of a detected face. </p>
*/
inline FaceDetail& WithGender(Gender&& value) { SetGender(std::move(value)); return *this;}
/**
* <p>Indicates whether or not the face has a beard, and the confidence level in
* the determination.</p>
*/
inline const Beard& GetBeard() const{ return m_beard; }
/**
* <p>Indicates whether or not the face has a beard, and the confidence level in
* the determination.</p>
*/
inline bool BeardHasBeenSet() const { return m_beardHasBeenSet; }
/**
* <p>Indicates whether or not the face has a beard, and the confidence level in
* the determination.</p>
*/
inline void SetBeard(const Beard& value) { m_beardHasBeenSet = true; m_beard = value; }
/**
* <p>Indicates whether or not the face has a beard, and the confidence level in
* the determination.</p>
*/
inline void SetBeard(Beard&& value) { m_beardHasBeenSet = true; m_beard = std::move(value); }
/**
* <p>Indicates whether or not the face has a beard, and the confidence level in
* the determination.</p>
*/
inline FaceDetail& WithBeard(const Beard& value) { SetBeard(value); return *this;}
/**
* <p>Indicates whether or not the face has a beard, and the confidence level in
* the determination.</p>
*/
inline FaceDetail& WithBeard(Beard&& value) { SetBeard(std::move(value)); return *this;}
/**
* <p>Indicates whether or not the face has a mustache, and the confidence level in
* the determination.</p>
*/
inline const Mustache& GetMustache() const{ return m_mustache; }
/**
* <p>Indicates whether or not the face has a mustache, and the confidence level in
* the determination.</p>
*/
inline bool MustacheHasBeenSet() const { return m_mustacheHasBeenSet; }
/**
* <p>Indicates whether or not the face has a mustache, and the confidence level in
* the determination.</p>
*/
inline void SetMustache(const Mustache& value) { m_mustacheHasBeenSet = true; m_mustache = value; }
/**
* <p>Indicates whether or not the face has a mustache, and the confidence level in
* the determination.</p>
*/
inline void SetMustache(Mustache&& value) { m_mustacheHasBeenSet = true; m_mustache = std::move(value); }
/**
* <p>Indicates whether or not the face has a mustache, and the confidence level in
* the determination.</p>
*/
inline FaceDetail& WithMustache(const Mustache& value) { SetMustache(value); return *this;}
/**
* <p>Indicates whether or not the face has a mustache, and the confidence level in
* the determination.</p>
*/
inline FaceDetail& WithMustache(Mustache&& value) { SetMustache(std::move(value)); return *this;}
/**
* <p>Indicates whether or not the eyes on the face are open, and the confidence
* level in the determination.</p>
*/
inline const EyeOpen& GetEyesOpen() const{ return m_eyesOpen; }
/**
* <p>Indicates whether or not the eyes on the face are open, and the confidence
* level in the determination.</p>
*/
inline bool EyesOpenHasBeenSet() const { return m_eyesOpenHasBeenSet; }
/**
* <p>Indicates whether or not the eyes on the face are open, and the confidence
* level in the determination.</p>
*/
inline void SetEyesOpen(const EyeOpen& value) { m_eyesOpenHasBeenSet = true; m_eyesOpen = value; }
/**
* <p>Indicates whether or not the eyes on the face are open, and the confidence
* level in the determination.</p>
*/
inline void SetEyesOpen(EyeOpen&& value) { m_eyesOpenHasBeenSet = true; m_eyesOpen = std::move(value); }
/**
* <p>Indicates whether or not the eyes on the face are open, and the confidence
* level in the determination.</p>
*/
inline FaceDetail& WithEyesOpen(const EyeOpen& value) { SetEyesOpen(value); return *this;}
/**
* <p>Indicates whether or not the eyes on the face are open, and the confidence
* level in the determination.</p>
*/
inline FaceDetail& WithEyesOpen(EyeOpen&& value) { SetEyesOpen(std::move(value)); return *this;}
/**
* <p>Indicates whether or not the mouth on the face is open, and the confidence
* level in the determination.</p>
*/
inline const MouthOpen& GetMouthOpen() const{ return m_mouthOpen; }
/**
* <p>Indicates whether or not the mouth on the face is open, and the confidence
* level in the determination.</p>
*/
inline bool MouthOpenHasBeenSet() const { return m_mouthOpenHasBeenSet; }
/**
* <p>Indicates whether or not the mouth on the face is open, and the confidence
* level in the determination.</p>
*/
inline void SetMouthOpen(const MouthOpen& value) { m_mouthOpenHasBeenSet = true; m_mouthOpen = value; }
/**
* <p>Indicates whether or not the mouth on the face is open, and the confidence
* level in the determination.</p>
*/
inline void SetMouthOpen(MouthOpen&& value) { m_mouthOpenHasBeenSet = true; m_mouthOpen = std::move(value); }
/**
* <p>Indicates whether or not the mouth on the face is open, and the confidence
* level in the determination.</p>
*/
inline FaceDetail& WithMouthOpen(const MouthOpen& value) { SetMouthOpen(value); return *this;}
/**
* <p>Indicates whether or not the mouth on the face is open, and the confidence
* level in the determination.</p>
*/
inline FaceDetail& WithMouthOpen(MouthOpen&& value) { SetMouthOpen(std::move(value)); return *this;}
/**
* <p>The emotions that appear to be expressed on the face, and the confidence
* level in the determination. The API is only making a determination of the
* physical appearance of a person's face. It is not a determination of the
* persons internal emotional state and should not be used in such a way. For
* example, a person pretending to have a sad face might not be sad
* emotionally.</p>
*/
inline const Aws::Vector<Emotion>& GetEmotions() const{ return m_emotions; }
/**
* <p>The emotions that appear to be expressed on the face, and the confidence
* level in the determination. The API is only making a determination of the
* physical appearance of a person's face. It is not a determination of the
* persons internal emotional state and should not be used in such a way. For
* example, a person pretending to have a sad face might not be sad
* emotionally.</p>
*/
inline bool EmotionsHasBeenSet() const { return m_emotionsHasBeenSet; }
/**
* <p>The emotions that appear to be expressed on the face, and the confidence
* level in the determination. The API is only making a determination of the
* physical appearance of a person's face. It is not a determination of the
* persons internal emotional state and should not be used in such a way. For
* example, a person pretending to have a sad face might not be sad
* emotionally.</p>
*/
inline void SetEmotions(const Aws::Vector<Emotion>& value) { m_emotionsHasBeenSet = true; m_emotions = value; }
/**
* <p>The emotions that appear to be expressed on the face, and the confidence
* level in the determination. The API is only making a determination of the
* physical appearance of a person's face. It is not a determination of the
* persons internal emotional state and should not be used in such a way. For
* example, a person pretending to have a sad face might not be sad
* emotionally.</p>
*/
inline void SetEmotions(Aws::Vector<Emotion>&& value) { m_emotionsHasBeenSet = true; m_emotions = std::move(value); }
/**
* <p>The emotions that appear to be expressed on the face, and the confidence
* level in the determination. The API is only making a determination of the
* physical appearance of a person's face. It is not a determination of the
* persons internal emotional state and should not be used in such a way. For
* example, a person pretending to have a sad face might not be sad
* emotionally.</p>
*/
inline FaceDetail& WithEmotions(const Aws::Vector<Emotion>& value) { SetEmotions(value); return *this;}
/**
* <p>The emotions that appear to be expressed on the face, and the confidence
* level in the determination. The API is only making a determination of the
* physical appearance of a person's face. It is not a determination of the
* persons internal emotional state and should not be used in such a way. For
* example, a person pretending to have a sad face might not be sad
* emotionally.</p>
*/
inline FaceDetail& WithEmotions(Aws::Vector<Emotion>&& value) { SetEmotions(std::move(value)); return *this;}
/**
* <p>The emotions that appear to be expressed on the face, and the confidence
* level in the determination. The API is only making a determination of the
* physical appearance of a person's face. It is not a determination of the
* persons internal emotional state and should not be used in such a way. For
* example, a person pretending to have a sad face might not be sad
* emotionally.</p>
*/
inline FaceDetail& AddEmotions(const Emotion& value) { m_emotionsHasBeenSet = true; m_emotions.push_back(value); return *this; }
/**
* <p>The emotions that appear to be expressed on the face, and the confidence
* level in the determination. The API is only making a determination of the
* physical appearance of a person's face. It is not a determination of the
* persons internal emotional state and should not be used in such a way. For
* example, a person pretending to have a sad face might not be sad
* emotionally.</p>
*/
inline FaceDetail& AddEmotions(Emotion&& value) { m_emotionsHasBeenSet = true; m_emotions.push_back(std::move(value)); return *this; }
/**
* <p>Indicates the location of landmarks on the face. Default attribute.</p>
*/
inline const Aws::Vector<Landmark>& GetLandmarks() const{ return m_landmarks; }
/**
* <p>Indicates the location of landmarks on the face. Default attribute.</p>
*/
inline bool LandmarksHasBeenSet() const { return m_landmarksHasBeenSet; }
/**
* <p>Indicates the location of landmarks on the face. Default attribute.</p>
*/
inline void SetLandmarks(const Aws::Vector<Landmark>& value) { m_landmarksHasBeenSet = true; m_landmarks = value; }
/**
* <p>Indicates the location of landmarks on the face. Default attribute.</p>
*/
inline void SetLandmarks(Aws::Vector<Landmark>&& value) { m_landmarksHasBeenSet = true; m_landmarks = std::move(value); }
/**
* <p>Indicates the location of landmarks on the face. Default attribute.</p>
*/
inline FaceDetail& WithLandmarks(const Aws::Vector<Landmark>& value) { SetLandmarks(value); return *this;}
/**
* <p>Indicates the location of landmarks on the face. Default attribute.</p>
*/
inline FaceDetail& WithLandmarks(Aws::Vector<Landmark>&& value) { SetLandmarks(std::move(value)); return *this;}
/**
* <p>Indicates the location of landmarks on the face. Default attribute.</p>
*/
inline FaceDetail& AddLandmarks(const Landmark& value) { m_landmarksHasBeenSet = true; m_landmarks.push_back(value); return *this; }
/**
* <p>Indicates the location of landmarks on the face. Default attribute.</p>
*/
inline FaceDetail& AddLandmarks(Landmark&& value) { m_landmarksHasBeenSet = true; m_landmarks.push_back(std::move(value)); return *this; }
/**
* <p>Indicates the pose of the face as determined by its pitch, roll, and yaw.
* Default attribute.</p>
*/
inline const Pose& GetPose() const{ return m_pose; }
/**
* <p>Indicates the pose of the face as determined by its pitch, roll, and yaw.
* Default attribute.</p>
*/
inline bool PoseHasBeenSet() const { return m_poseHasBeenSet; }
/**
* <p>Indicates the pose of the face as determined by its pitch, roll, and yaw.
* Default attribute.</p>
*/
inline void SetPose(const Pose& value) { m_poseHasBeenSet = true; m_pose = value; }
/**
* <p>Indicates the pose of the face as determined by its pitch, roll, and yaw.
* Default attribute.</p>
*/
inline void SetPose(Pose&& value) { m_poseHasBeenSet = true; m_pose = std::move(value); }
/**
* <p>Indicates the pose of the face as determined by its pitch, roll, and yaw.
* Default attribute.</p>
*/
inline FaceDetail& WithPose(const Pose& value) { SetPose(value); return *this;}
/**
* <p>Indicates the pose of the face as determined by its pitch, roll, and yaw.
* Default attribute.</p>
*/
inline FaceDetail& WithPose(Pose&& value) { SetPose(std::move(value)); return *this;}
/**
* <p>Identifies image brightness and sharpness. Default attribute.</p>
*/
inline const ImageQuality& GetQuality() const{ return m_quality; }
/**
* <p>Identifies image brightness and sharpness. Default attribute.</p>
*/
inline bool QualityHasBeenSet() const { return m_qualityHasBeenSet; }
/**
* <p>Identifies image brightness and sharpness. Default attribute.</p>
*/
inline void SetQuality(const ImageQuality& value) { m_qualityHasBeenSet = true; m_quality = value; }
/**
* <p>Identifies image brightness and sharpness. Default attribute.</p>
*/
inline void SetQuality(ImageQuality&& value) { m_qualityHasBeenSet = true; m_quality = std::move(value); }
/**
* <p>Identifies image brightness and sharpness. Default attribute.</p>
*/
inline FaceDetail& WithQuality(const ImageQuality& value) { SetQuality(value); return *this;}
/**
* <p>Identifies image brightness and sharpness. Default attribute.</p>
*/
inline FaceDetail& WithQuality(ImageQuality&& value) { SetQuality(std::move(value)); return *this;}
/**
* <p>Confidence level that the bounding box contains a face (and not a different
* object such as a tree). Default attribute.</p>
*/
inline double GetConfidence() const{ return m_confidence; }
/**
* <p>Confidence level that the bounding box contains a face (and not a different
* object such as a tree). Default attribute.</p>
*/
inline bool ConfidenceHasBeenSet() const { return m_confidenceHasBeenSet; }
/**
* <p>Confidence level that the bounding box contains a face (and not a different
* object such as a tree). Default attribute.</p>
*/
inline void SetConfidence(double value) { m_confidenceHasBeenSet = true; m_confidence = value; }
/**
* <p>Confidence level that the bounding box contains a face (and not a different
* object such as a tree). Default attribute.</p>
*/
inline FaceDetail& WithConfidence(double value) { SetConfidence(value); return *this;}
private:
BoundingBox m_boundingBox;
bool m_boundingBoxHasBeenSet;
AgeRange m_ageRange;
bool m_ageRangeHasBeenSet;
Smile m_smile;
bool m_smileHasBeenSet;
Eyeglasses m_eyeglasses;
bool m_eyeglassesHasBeenSet;
Sunglasses m_sunglasses;
bool m_sunglassesHasBeenSet;
Gender m_gender;
bool m_genderHasBeenSet;
Beard m_beard;
bool m_beardHasBeenSet;
Mustache m_mustache;
bool m_mustacheHasBeenSet;
EyeOpen m_eyesOpen;
bool m_eyesOpenHasBeenSet;
MouthOpen m_mouthOpen;
bool m_mouthOpenHasBeenSet;
Aws::Vector<Emotion> m_emotions;
bool m_emotionsHasBeenSet;
Aws::Vector<Landmark> m_landmarks;
bool m_landmarksHasBeenSet;
Pose m_pose;
bool m_poseHasBeenSet;
ImageQuality m_quality;
bool m_qualityHasBeenSet;
double m_confidence;
bool m_confidenceHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,107 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/model/FaceDetail.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>Information about a face detected in a video analysis request and the time
* the face was detected in the video. </p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/FaceDetection">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API FaceDetection
{
public:
FaceDetection();
FaceDetection(Aws::Utils::Json::JsonView jsonValue);
FaceDetection& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>Time, in milliseconds from the start of the video, that the face was
* detected.</p>
*/
inline long long GetTimestamp() const{ return m_timestamp; }
/**
* <p>Time, in milliseconds from the start of the video, that the face was
* detected.</p>
*/
inline bool TimestampHasBeenSet() const { return m_timestampHasBeenSet; }
/**
* <p>Time, in milliseconds from the start of the video, that the face was
* detected.</p>
*/
inline void SetTimestamp(long long value) { m_timestampHasBeenSet = true; m_timestamp = value; }
/**
* <p>Time, in milliseconds from the start of the video, that the face was
* detected.</p>
*/
inline FaceDetection& WithTimestamp(long long value) { SetTimestamp(value); return *this;}
/**
* <p>The face properties for the detected face.</p>
*/
inline const FaceDetail& GetFace() const{ return m_face; }
/**
* <p>The face properties for the detected face.</p>
*/
inline bool FaceHasBeenSet() const { return m_faceHasBeenSet; }
/**
* <p>The face properties for the detected face.</p>
*/
inline void SetFace(const FaceDetail& value) { m_faceHasBeenSet = true; m_face = value; }
/**
* <p>The face properties for the detected face.</p>
*/
inline void SetFace(FaceDetail&& value) { m_faceHasBeenSet = true; m_face = std::move(value); }
/**
* <p>The face properties for the detected face.</p>
*/
inline FaceDetection& WithFace(const FaceDetail& value) { SetFace(value); return *this;}
/**
* <p>The face properties for the detected face.</p>
*/
inline FaceDetection& WithFace(FaceDetail&& value) { SetFace(std::move(value)); return *this;}
private:
long long m_timestamp;
bool m_timestampHasBeenSet;
FaceDetail m_face;
bool m_faceHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,109 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/model/Face.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>Provides face metadata. In addition, it also provides the confidence in the
* match of this face with the input face.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/FaceMatch">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API FaceMatch
{
public:
FaceMatch();
FaceMatch(Aws::Utils::Json::JsonView jsonValue);
FaceMatch& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>Confidence in the match of this face with the input face.</p>
*/
inline double GetSimilarity() const{ return m_similarity; }
/**
* <p>Confidence in the match of this face with the input face.</p>
*/
inline bool SimilarityHasBeenSet() const { return m_similarityHasBeenSet; }
/**
* <p>Confidence in the match of this face with the input face.</p>
*/
inline void SetSimilarity(double value) { m_similarityHasBeenSet = true; m_similarity = value; }
/**
* <p>Confidence in the match of this face with the input face.</p>
*/
inline FaceMatch& WithSimilarity(double value) { SetSimilarity(value); return *this;}
/**
* <p>Describes the face properties such as the bounding box, face ID, image ID of
* the source image, and external image ID that you assigned.</p>
*/
inline const Face& GetFace() const{ return m_face; }
/**
* <p>Describes the face properties such as the bounding box, face ID, image ID of
* the source image, and external image ID that you assigned.</p>
*/
inline bool FaceHasBeenSet() const { return m_faceHasBeenSet; }
/**
* <p>Describes the face properties such as the bounding box, face ID, image ID of
* the source image, and external image ID that you assigned.</p>
*/
inline void SetFace(const Face& value) { m_faceHasBeenSet = true; m_face = value; }
/**
* <p>Describes the face properties such as the bounding box, face ID, image ID of
* the source image, and external image ID that you assigned.</p>
*/
inline void SetFace(Face&& value) { m_faceHasBeenSet = true; m_face = std::move(value); }
/**
* <p>Describes the face properties such as the bounding box, face ID, image ID of
* the source image, and external image ID that you assigned.</p>
*/
inline FaceMatch& WithFace(const Face& value) { SetFace(value); return *this;}
/**
* <p>Describes the face properties such as the bounding box, face ID, image ID of
* the source image, and external image ID that you assigned.</p>
*/
inline FaceMatch& WithFace(Face&& value) { SetFace(std::move(value)); return *this;}
private:
double m_similarity;
bool m_similarityHasBeenSet;
Face m_face;
bool m_faceHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,121 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/model/Face.h>
#include <aws/rekognition/model/FaceDetail.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>Object containing both the face metadata (stored in the backend database),
* and facial attributes that are detected but aren't stored in the
* database.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/FaceRecord">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API FaceRecord
{
public:
FaceRecord();
FaceRecord(Aws::Utils::Json::JsonView jsonValue);
FaceRecord& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>Describes the face properties such as the bounding box, face ID, image ID of
* the input image, and external image ID that you assigned. </p>
*/
inline const Face& GetFace() const{ return m_face; }
/**
* <p>Describes the face properties such as the bounding box, face ID, image ID of
* the input image, and external image ID that you assigned. </p>
*/
inline bool FaceHasBeenSet() const { return m_faceHasBeenSet; }
/**
* <p>Describes the face properties such as the bounding box, face ID, image ID of
* the input image, and external image ID that you assigned. </p>
*/
inline void SetFace(const Face& value) { m_faceHasBeenSet = true; m_face = value; }
/**
* <p>Describes the face properties such as the bounding box, face ID, image ID of
* the input image, and external image ID that you assigned. </p>
*/
inline void SetFace(Face&& value) { m_faceHasBeenSet = true; m_face = std::move(value); }
/**
* <p>Describes the face properties such as the bounding box, face ID, image ID of
* the input image, and external image ID that you assigned. </p>
*/
inline FaceRecord& WithFace(const Face& value) { SetFace(value); return *this;}
/**
* <p>Describes the face properties such as the bounding box, face ID, image ID of
* the input image, and external image ID that you assigned. </p>
*/
inline FaceRecord& WithFace(Face&& value) { SetFace(std::move(value)); return *this;}
/**
* <p>Structure containing attributes of the face that the algorithm detected.</p>
*/
inline const FaceDetail& GetFaceDetail() const{ return m_faceDetail; }
/**
* <p>Structure containing attributes of the face that the algorithm detected.</p>
*/
inline bool FaceDetailHasBeenSet() const { return m_faceDetailHasBeenSet; }
/**
* <p>Structure containing attributes of the face that the algorithm detected.</p>
*/
inline void SetFaceDetail(const FaceDetail& value) { m_faceDetailHasBeenSet = true; m_faceDetail = value; }
/**
* <p>Structure containing attributes of the face that the algorithm detected.</p>
*/
inline void SetFaceDetail(FaceDetail&& value) { m_faceDetailHasBeenSet = true; m_faceDetail = std::move(value); }
/**
* <p>Structure containing attributes of the face that the algorithm detected.</p>
*/
inline FaceRecord& WithFaceDetail(const FaceDetail& value) { SetFaceDetail(value); return *this;}
/**
* <p>Structure containing attributes of the face that the algorithm detected.</p>
*/
inline FaceRecord& WithFaceDetail(FaceDetail&& value) { SetFaceDetail(std::move(value)); return *this;}
private:
Face m_face;
bool m_faceHasBeenSet;
FaceDetail m_faceDetail;
bool m_faceDetailHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,122 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>Input face recognition parameters for an Amazon Rekognition stream processor.
* <code>FaceRecognitionSettings</code> is a request parameter for
* <a>CreateStreamProcessor</a>.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/FaceSearchSettings">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API FaceSearchSettings
{
public:
FaceSearchSettings();
FaceSearchSettings(Aws::Utils::Json::JsonView jsonValue);
FaceSearchSettings& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>The ID of a collection that contains faces that you want to search for.</p>
*/
inline const Aws::String& GetCollectionId() const{ return m_collectionId; }
/**
* <p>The ID of a collection that contains faces that you want to search for.</p>
*/
inline bool CollectionIdHasBeenSet() const { return m_collectionIdHasBeenSet; }
/**
* <p>The ID of a collection that contains faces that you want to search for.</p>
*/
inline void SetCollectionId(const Aws::String& value) { m_collectionIdHasBeenSet = true; m_collectionId = value; }
/**
* <p>The ID of a collection that contains faces that you want to search for.</p>
*/
inline void SetCollectionId(Aws::String&& value) { m_collectionIdHasBeenSet = true; m_collectionId = std::move(value); }
/**
* <p>The ID of a collection that contains faces that you want to search for.</p>
*/
inline void SetCollectionId(const char* value) { m_collectionIdHasBeenSet = true; m_collectionId.assign(value); }
/**
* <p>The ID of a collection that contains faces that you want to search for.</p>
*/
inline FaceSearchSettings& WithCollectionId(const Aws::String& value) { SetCollectionId(value); return *this;}
/**
* <p>The ID of a collection that contains faces that you want to search for.</p>
*/
inline FaceSearchSettings& WithCollectionId(Aws::String&& value) { SetCollectionId(std::move(value)); return *this;}
/**
* <p>The ID of a collection that contains faces that you want to search for.</p>
*/
inline FaceSearchSettings& WithCollectionId(const char* value) { SetCollectionId(value); return *this;}
/**
* <p>Minimum face match confidence score that must be met to return a result for a
* recognized face. Default is 80. 0 is the lowest confidence. 100 is the highest
* confidence.</p>
*/
inline double GetFaceMatchThreshold() const{ return m_faceMatchThreshold; }
/**
* <p>Minimum face match confidence score that must be met to return a result for a
* recognized face. Default is 80. 0 is the lowest confidence. 100 is the highest
* confidence.</p>
*/
inline bool FaceMatchThresholdHasBeenSet() const { return m_faceMatchThresholdHasBeenSet; }
/**
* <p>Minimum face match confidence score that must be met to return a result for a
* recognized face. Default is 80. 0 is the lowest confidence. 100 is the highest
* confidence.</p>
*/
inline void SetFaceMatchThreshold(double value) { m_faceMatchThresholdHasBeenSet = true; m_faceMatchThreshold = value; }
/**
* <p>Minimum face match confidence score that must be met to return a result for a
* recognized face. Default is 80. 0 is the lowest confidence. 100 is the highest
* confidence.</p>
*/
inline FaceSearchSettings& WithFaceMatchThreshold(double value) { SetFaceMatchThreshold(value); return *this;}
private:
Aws::String m_collectionId;
bool m_collectionIdHasBeenSet;
double m_faceMatchThreshold;
bool m_faceMatchThresholdHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,31 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
enum class FaceSearchSortBy
{
NOT_SET,
INDEX,
TIMESTAMP
};
namespace FaceSearchSortByMapper
{
AWS_REKOGNITION_API FaceSearchSortBy GetFaceSearchSortByForName(const Aws::String& name);
AWS_REKOGNITION_API Aws::String GetNameForFaceSearchSortBy(FaceSearchSortBy value);
} // namespace FaceSearchSortByMapper
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,113 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/model/GenderType.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>The predicted gender of a detected face. </p> <p>Amazon Rekognition makes
* gender binary (male/female) predictions based on the physical appearance of a
* face in a particular image. This kind of prediction is not designed to
* categorize a persons gender identity, and you shouldn't use Amazon Rekognition
* to make such a determination. For example, a male actor wearing a long-haired
* wig and earrings for a role might be predicted as female.</p> <p>Using Amazon
* Rekognition to make gender binary predictions is best suited for use cases where
* aggregate gender distribution statistics need to be analyzed without identifying
* specific users. For example, the percentage of female users compared to male
* users on a social media platform. </p> <p>We don't recommend using gender binary
* predictions to make decisions that impact&#x2028; an individual's rights,
* privacy, or access to services.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/Gender">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API Gender
{
public:
Gender();
Gender(Aws::Utils::Json::JsonView jsonValue);
Gender& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>The predicted gender of the face.</p>
*/
inline const GenderType& GetValue() const{ return m_value; }
/**
* <p>The predicted gender of the face.</p>
*/
inline bool ValueHasBeenSet() const { return m_valueHasBeenSet; }
/**
* <p>The predicted gender of the face.</p>
*/
inline void SetValue(const GenderType& value) { m_valueHasBeenSet = true; m_value = value; }
/**
* <p>The predicted gender of the face.</p>
*/
inline void SetValue(GenderType&& value) { m_valueHasBeenSet = true; m_value = std::move(value); }
/**
* <p>The predicted gender of the face.</p>
*/
inline Gender& WithValue(const GenderType& value) { SetValue(value); return *this;}
/**
* <p>The predicted gender of the face.</p>
*/
inline Gender& WithValue(GenderType&& value) { SetValue(std::move(value)); return *this;}
/**
* <p>Level of confidence in the prediction.</p>
*/
inline double GetConfidence() const{ return m_confidence; }
/**
* <p>Level of confidence in the prediction.</p>
*/
inline bool ConfidenceHasBeenSet() const { return m_confidenceHasBeenSet; }
/**
* <p>Level of confidence in the prediction.</p>
*/
inline void SetConfidence(double value) { m_confidenceHasBeenSet = true; m_confidence = value; }
/**
* <p>Level of confidence in the prediction.</p>
*/
inline Gender& WithConfidence(double value) { SetConfidence(value); return *this;}
private:
GenderType m_value;
bool m_valueHasBeenSet;
double m_confidence;
bool m_confidenceHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,31 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
enum class GenderType
{
NOT_SET,
Male,
Female
};
namespace GenderTypeMapper
{
AWS_REKOGNITION_API GenderType GetGenderTypeForName(const Aws::String& name);
AWS_REKOGNITION_API Aws::String GetNameForGenderType(GenderType value);
} // namespace GenderTypeMapper
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,131 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/model/BoundingBox.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/rekognition/model/Point.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>Information about where an object (<a>DetectCustomLabels</a>) or text
* (<a>DetectText</a>) is located on an image.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/Geometry">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API Geometry
{
public:
Geometry();
Geometry(Aws::Utils::Json::JsonView jsonValue);
Geometry& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>An axis-aligned coarse representation of the detected item's location on the
* image.</p>
*/
inline const BoundingBox& GetBoundingBox() const{ return m_boundingBox; }
/**
* <p>An axis-aligned coarse representation of the detected item's location on the
* image.</p>
*/
inline bool BoundingBoxHasBeenSet() const { return m_boundingBoxHasBeenSet; }
/**
* <p>An axis-aligned coarse representation of the detected item's location on the
* image.</p>
*/
inline void SetBoundingBox(const BoundingBox& value) { m_boundingBoxHasBeenSet = true; m_boundingBox = value; }
/**
* <p>An axis-aligned coarse representation of the detected item's location on the
* image.</p>
*/
inline void SetBoundingBox(BoundingBox&& value) { m_boundingBoxHasBeenSet = true; m_boundingBox = std::move(value); }
/**
* <p>An axis-aligned coarse representation of the detected item's location on the
* image.</p>
*/
inline Geometry& WithBoundingBox(const BoundingBox& value) { SetBoundingBox(value); return *this;}
/**
* <p>An axis-aligned coarse representation of the detected item's location on the
* image.</p>
*/
inline Geometry& WithBoundingBox(BoundingBox&& value) { SetBoundingBox(std::move(value)); return *this;}
/**
* <p>Within the bounding box, a fine-grained polygon around the detected item.</p>
*/
inline const Aws::Vector<Point>& GetPolygon() const{ return m_polygon; }
/**
* <p>Within the bounding box, a fine-grained polygon around the detected item.</p>
*/
inline bool PolygonHasBeenSet() const { return m_polygonHasBeenSet; }
/**
* <p>Within the bounding box, a fine-grained polygon around the detected item.</p>
*/
inline void SetPolygon(const Aws::Vector<Point>& value) { m_polygonHasBeenSet = true; m_polygon = value; }
/**
* <p>Within the bounding box, a fine-grained polygon around the detected item.</p>
*/
inline void SetPolygon(Aws::Vector<Point>&& value) { m_polygonHasBeenSet = true; m_polygon = std::move(value); }
/**
* <p>Within the bounding box, a fine-grained polygon around the detected item.</p>
*/
inline Geometry& WithPolygon(const Aws::Vector<Point>& value) { SetPolygon(value); return *this;}
/**
* <p>Within the bounding box, a fine-grained polygon around the detected item.</p>
*/
inline Geometry& WithPolygon(Aws::Vector<Point>&& value) { SetPolygon(std::move(value)); return *this;}
/**
* <p>Within the bounding box, a fine-grained polygon around the detected item.</p>
*/
inline Geometry& AddPolygon(const Point& value) { m_polygonHasBeenSet = true; m_polygon.push_back(value); return *this; }
/**
* <p>Within the bounding box, a fine-grained polygon around the detected item.</p>
*/
inline Geometry& AddPolygon(Point&& value) { m_polygonHasBeenSet = true; m_polygon.push_back(std::move(value)); return *this; }
private:
BoundingBox m_boundingBox;
bool m_boundingBoxHasBeenSet;
Aws::Vector<Point> m_polygon;
bool m_polygonHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,101 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/RekognitionRequest.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <utility>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
/**
*/
class AWS_REKOGNITION_API GetCelebrityInfoRequest : public RekognitionRequest
{
public:
GetCelebrityInfoRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetCelebrityInfo"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>The ID for the celebrity. You get the celebrity ID from a call to the
* <a>RecognizeCelebrities</a> operation, which recognizes celebrities in an image.
* </p>
*/
inline const Aws::String& GetId() const{ return m_id; }
/**
* <p>The ID for the celebrity. You get the celebrity ID from a call to the
* <a>RecognizeCelebrities</a> operation, which recognizes celebrities in an image.
* </p>
*/
inline bool IdHasBeenSet() const { return m_idHasBeenSet; }
/**
* <p>The ID for the celebrity. You get the celebrity ID from a call to the
* <a>RecognizeCelebrities</a> operation, which recognizes celebrities in an image.
* </p>
*/
inline void SetId(const Aws::String& value) { m_idHasBeenSet = true; m_id = value; }
/**
* <p>The ID for the celebrity. You get the celebrity ID from a call to the
* <a>RecognizeCelebrities</a> operation, which recognizes celebrities in an image.
* </p>
*/
inline void SetId(Aws::String&& value) { m_idHasBeenSet = true; m_id = std::move(value); }
/**
* <p>The ID for the celebrity. You get the celebrity ID from a call to the
* <a>RecognizeCelebrities</a> operation, which recognizes celebrities in an image.
* </p>
*/
inline void SetId(const char* value) { m_idHasBeenSet = true; m_id.assign(value); }
/**
* <p>The ID for the celebrity. You get the celebrity ID from a call to the
* <a>RecognizeCelebrities</a> operation, which recognizes celebrities in an image.
* </p>
*/
inline GetCelebrityInfoRequest& WithId(const Aws::String& value) { SetId(value); return *this;}
/**
* <p>The ID for the celebrity. You get the celebrity ID from a call to the
* <a>RecognizeCelebrities</a> operation, which recognizes celebrities in an image.
* </p>
*/
inline GetCelebrityInfoRequest& WithId(Aws::String&& value) { SetId(std::move(value)); return *this;}
/**
* <p>The ID for the celebrity. You get the celebrity ID from a call to the
* <a>RecognizeCelebrities</a> operation, which recognizes celebrities in an image.
* </p>
*/
inline GetCelebrityInfoRequest& WithId(const char* value) { SetId(value); return *this;}
private:
Aws::String m_id;
bool m_idHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,121 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
class AWS_REKOGNITION_API GetCelebrityInfoResult
{
public:
GetCelebrityInfoResult();
GetCelebrityInfoResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
GetCelebrityInfoResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>An array of URLs pointing to additional celebrity information. </p>
*/
inline const Aws::Vector<Aws::String>& GetUrls() const{ return m_urls; }
/**
* <p>An array of URLs pointing to additional celebrity information. </p>
*/
inline void SetUrls(const Aws::Vector<Aws::String>& value) { m_urls = value; }
/**
* <p>An array of URLs pointing to additional celebrity information. </p>
*/
inline void SetUrls(Aws::Vector<Aws::String>&& value) { m_urls = std::move(value); }
/**
* <p>An array of URLs pointing to additional celebrity information. </p>
*/
inline GetCelebrityInfoResult& WithUrls(const Aws::Vector<Aws::String>& value) { SetUrls(value); return *this;}
/**
* <p>An array of URLs pointing to additional celebrity information. </p>
*/
inline GetCelebrityInfoResult& WithUrls(Aws::Vector<Aws::String>&& value) { SetUrls(std::move(value)); return *this;}
/**
* <p>An array of URLs pointing to additional celebrity information. </p>
*/
inline GetCelebrityInfoResult& AddUrls(const Aws::String& value) { m_urls.push_back(value); return *this; }
/**
* <p>An array of URLs pointing to additional celebrity information. </p>
*/
inline GetCelebrityInfoResult& AddUrls(Aws::String&& value) { m_urls.push_back(std::move(value)); return *this; }
/**
* <p>An array of URLs pointing to additional celebrity information. </p>
*/
inline GetCelebrityInfoResult& AddUrls(const char* value) { m_urls.push_back(value); return *this; }
/**
* <p>The name of the celebrity.</p>
*/
inline const Aws::String& GetName() const{ return m_name; }
/**
* <p>The name of the celebrity.</p>
*/
inline void SetName(const Aws::String& value) { m_name = value; }
/**
* <p>The name of the celebrity.</p>
*/
inline void SetName(Aws::String&& value) { m_name = std::move(value); }
/**
* <p>The name of the celebrity.</p>
*/
inline void SetName(const char* value) { m_name.assign(value); }
/**
* <p>The name of the celebrity.</p>
*/
inline GetCelebrityInfoResult& WithName(const Aws::String& value) { SetName(value); return *this;}
/**
* <p>The name of the celebrity.</p>
*/
inline GetCelebrityInfoResult& WithName(Aws::String&& value) { SetName(std::move(value)); return *this;}
/**
* <p>The name of the celebrity.</p>
*/
inline GetCelebrityInfoResult& WithName(const char* value) { SetName(value); return *this;}
private:
Aws::Vector<Aws::String> m_urls;
Aws::String m_name;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,240 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/RekognitionRequest.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/rekognition/model/CelebrityRecognitionSortBy.h>
#include <utility>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
/**
*/
class AWS_REKOGNITION_API GetCelebrityRecognitionRequest : public RekognitionRequest
{
public:
GetCelebrityRecognitionRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetCelebrityRecognition"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>Job identifier for the required celebrity recognition analysis. You can get
* the job identifer from a call to <code>StartCelebrityRecognition</code>.</p>
*/
inline const Aws::String& GetJobId() const{ return m_jobId; }
/**
* <p>Job identifier for the required celebrity recognition analysis. You can get
* the job identifer from a call to <code>StartCelebrityRecognition</code>.</p>
*/
inline bool JobIdHasBeenSet() const { return m_jobIdHasBeenSet; }
/**
* <p>Job identifier for the required celebrity recognition analysis. You can get
* the job identifer from a call to <code>StartCelebrityRecognition</code>.</p>
*/
inline void SetJobId(const Aws::String& value) { m_jobIdHasBeenSet = true; m_jobId = value; }
/**
* <p>Job identifier for the required celebrity recognition analysis. You can get
* the job identifer from a call to <code>StartCelebrityRecognition</code>.</p>
*/
inline void SetJobId(Aws::String&& value) { m_jobIdHasBeenSet = true; m_jobId = std::move(value); }
/**
* <p>Job identifier for the required celebrity recognition analysis. You can get
* the job identifer from a call to <code>StartCelebrityRecognition</code>.</p>
*/
inline void SetJobId(const char* value) { m_jobIdHasBeenSet = true; m_jobId.assign(value); }
/**
* <p>Job identifier for the required celebrity recognition analysis. You can get
* the job identifer from a call to <code>StartCelebrityRecognition</code>.</p>
*/
inline GetCelebrityRecognitionRequest& WithJobId(const Aws::String& value) { SetJobId(value); return *this;}
/**
* <p>Job identifier for the required celebrity recognition analysis. You can get
* the job identifer from a call to <code>StartCelebrityRecognition</code>.</p>
*/
inline GetCelebrityRecognitionRequest& WithJobId(Aws::String&& value) { SetJobId(std::move(value)); return *this;}
/**
* <p>Job identifier for the required celebrity recognition analysis. You can get
* the job identifer from a call to <code>StartCelebrityRecognition</code>.</p>
*/
inline GetCelebrityRecognitionRequest& WithJobId(const char* value) { SetJobId(value); return *this;}
/**
* <p>Maximum number of results to return per paginated call. The largest value you
* can specify is 1000. If you specify a value greater than 1000, a maximum of 1000
* results is returned. The default value is 1000.</p>
*/
inline int GetMaxResults() const{ return m_maxResults; }
/**
* <p>Maximum number of results to return per paginated call. The largest value you
* can specify is 1000. If you specify a value greater than 1000, a maximum of 1000
* results is returned. The default value is 1000.</p>
*/
inline bool MaxResultsHasBeenSet() const { return m_maxResultsHasBeenSet; }
/**
* <p>Maximum number of results to return per paginated call. The largest value you
* can specify is 1000. If you specify a value greater than 1000, a maximum of 1000
* results is returned. The default value is 1000.</p>
*/
inline void SetMaxResults(int value) { m_maxResultsHasBeenSet = true; m_maxResults = value; }
/**
* <p>Maximum number of results to return per paginated call. The largest value you
* can specify is 1000. If you specify a value greater than 1000, a maximum of 1000
* results is returned. The default value is 1000.</p>
*/
inline GetCelebrityRecognitionRequest& WithMaxResults(int value) { SetMaxResults(value); return *this;}
/**
* <p>If the previous response was incomplete (because there is more recognized
* celebrities to retrieve), Amazon Rekognition Video returns a pagination token in
* the response. You can use this pagination token to retrieve the next set of
* celebrities. </p>
*/
inline const Aws::String& GetNextToken() const{ return m_nextToken; }
/**
* <p>If the previous response was incomplete (because there is more recognized
* celebrities to retrieve), Amazon Rekognition Video returns a pagination token in
* the response. You can use this pagination token to retrieve the next set of
* celebrities. </p>
*/
inline bool NextTokenHasBeenSet() const { return m_nextTokenHasBeenSet; }
/**
* <p>If the previous response was incomplete (because there is more recognized
* celebrities to retrieve), Amazon Rekognition Video returns a pagination token in
* the response. You can use this pagination token to retrieve the next set of
* celebrities. </p>
*/
inline void SetNextToken(const Aws::String& value) { m_nextTokenHasBeenSet = true; m_nextToken = value; }
/**
* <p>If the previous response was incomplete (because there is more recognized
* celebrities to retrieve), Amazon Rekognition Video returns a pagination token in
* the response. You can use this pagination token to retrieve the next set of
* celebrities. </p>
*/
inline void SetNextToken(Aws::String&& value) { m_nextTokenHasBeenSet = true; m_nextToken = std::move(value); }
/**
* <p>If the previous response was incomplete (because there is more recognized
* celebrities to retrieve), Amazon Rekognition Video returns a pagination token in
* the response. You can use this pagination token to retrieve the next set of
* celebrities. </p>
*/
inline void SetNextToken(const char* value) { m_nextTokenHasBeenSet = true; m_nextToken.assign(value); }
/**
* <p>If the previous response was incomplete (because there is more recognized
* celebrities to retrieve), Amazon Rekognition Video returns a pagination token in
* the response. You can use this pagination token to retrieve the next set of
* celebrities. </p>
*/
inline GetCelebrityRecognitionRequest& WithNextToken(const Aws::String& value) { SetNextToken(value); return *this;}
/**
* <p>If the previous response was incomplete (because there is more recognized
* celebrities to retrieve), Amazon Rekognition Video returns a pagination token in
* the response. You can use this pagination token to retrieve the next set of
* celebrities. </p>
*/
inline GetCelebrityRecognitionRequest& WithNextToken(Aws::String&& value) { SetNextToken(std::move(value)); return *this;}
/**
* <p>If the previous response was incomplete (because there is more recognized
* celebrities to retrieve), Amazon Rekognition Video returns a pagination token in
* the response. You can use this pagination token to retrieve the next set of
* celebrities. </p>
*/
inline GetCelebrityRecognitionRequest& WithNextToken(const char* value) { SetNextToken(value); return *this;}
/**
* <p>Sort to use for celebrities returned in <code>Celebrities</code> field.
* Specify <code>ID</code> to sort by the celebrity identifier, specify
* <code>TIMESTAMP</code> to sort by the time the celebrity was recognized.</p>
*/
inline const CelebrityRecognitionSortBy& GetSortBy() const{ return m_sortBy; }
/**
* <p>Sort to use for celebrities returned in <code>Celebrities</code> field.
* Specify <code>ID</code> to sort by the celebrity identifier, specify
* <code>TIMESTAMP</code> to sort by the time the celebrity was recognized.</p>
*/
inline bool SortByHasBeenSet() const { return m_sortByHasBeenSet; }
/**
* <p>Sort to use for celebrities returned in <code>Celebrities</code> field.
* Specify <code>ID</code> to sort by the celebrity identifier, specify
* <code>TIMESTAMP</code> to sort by the time the celebrity was recognized.</p>
*/
inline void SetSortBy(const CelebrityRecognitionSortBy& value) { m_sortByHasBeenSet = true; m_sortBy = value; }
/**
* <p>Sort to use for celebrities returned in <code>Celebrities</code> field.
* Specify <code>ID</code> to sort by the celebrity identifier, specify
* <code>TIMESTAMP</code> to sort by the time the celebrity was recognized.</p>
*/
inline void SetSortBy(CelebrityRecognitionSortBy&& value) { m_sortByHasBeenSet = true; m_sortBy = std::move(value); }
/**
* <p>Sort to use for celebrities returned in <code>Celebrities</code> field.
* Specify <code>ID</code> to sort by the celebrity identifier, specify
* <code>TIMESTAMP</code> to sort by the time the celebrity was recognized.</p>
*/
inline GetCelebrityRecognitionRequest& WithSortBy(const CelebrityRecognitionSortBy& value) { SetSortBy(value); return *this;}
/**
* <p>Sort to use for celebrities returned in <code>Celebrities</code> field.
* Specify <code>ID</code> to sort by the celebrity identifier, specify
* <code>TIMESTAMP</code> to sort by the time the celebrity was recognized.</p>
*/
inline GetCelebrityRecognitionRequest& WithSortBy(CelebrityRecognitionSortBy&& value) { SetSortBy(std::move(value)); return *this;}
private:
Aws::String m_jobId;
bool m_jobIdHasBeenSet;
int m_maxResults;
bool m_maxResultsHasBeenSet;
Aws::String m_nextToken;
bool m_nextTokenHasBeenSet;
CelebrityRecognitionSortBy m_sortBy;
bool m_sortByHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,244 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/model/VideoJobStatus.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/rekognition/model/VideoMetadata.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/rekognition/model/CelebrityRecognition.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
class AWS_REKOGNITION_API GetCelebrityRecognitionResult
{
public:
GetCelebrityRecognitionResult();
GetCelebrityRecognitionResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
GetCelebrityRecognitionResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>The current status of the celebrity recognition job.</p>
*/
inline const VideoJobStatus& GetJobStatus() const{ return m_jobStatus; }
/**
* <p>The current status of the celebrity recognition job.</p>
*/
inline void SetJobStatus(const VideoJobStatus& value) { m_jobStatus = value; }
/**
* <p>The current status of the celebrity recognition job.</p>
*/
inline void SetJobStatus(VideoJobStatus&& value) { m_jobStatus = std::move(value); }
/**
* <p>The current status of the celebrity recognition job.</p>
*/
inline GetCelebrityRecognitionResult& WithJobStatus(const VideoJobStatus& value) { SetJobStatus(value); return *this;}
/**
* <p>The current status of the celebrity recognition job.</p>
*/
inline GetCelebrityRecognitionResult& WithJobStatus(VideoJobStatus&& value) { SetJobStatus(std::move(value)); return *this;}
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline const Aws::String& GetStatusMessage() const{ return m_statusMessage; }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline void SetStatusMessage(const Aws::String& value) { m_statusMessage = value; }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline void SetStatusMessage(Aws::String&& value) { m_statusMessage = std::move(value); }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline void SetStatusMessage(const char* value) { m_statusMessage.assign(value); }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline GetCelebrityRecognitionResult& WithStatusMessage(const Aws::String& value) { SetStatusMessage(value); return *this;}
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline GetCelebrityRecognitionResult& WithStatusMessage(Aws::String&& value) { SetStatusMessage(std::move(value)); return *this;}
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline GetCelebrityRecognitionResult& WithStatusMessage(const char* value) { SetStatusMessage(value); return *this;}
/**
* <p>Information about a video that Amazon Rekognition Video analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* a Amazon Rekognition Video operation.</p>
*/
inline const VideoMetadata& GetVideoMetadata() const{ return m_videoMetadata; }
/**
* <p>Information about a video that Amazon Rekognition Video analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* a Amazon Rekognition Video operation.</p>
*/
inline void SetVideoMetadata(const VideoMetadata& value) { m_videoMetadata = value; }
/**
* <p>Information about a video that Amazon Rekognition Video analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* a Amazon Rekognition Video operation.</p>
*/
inline void SetVideoMetadata(VideoMetadata&& value) { m_videoMetadata = std::move(value); }
/**
* <p>Information about a video that Amazon Rekognition Video analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* a Amazon Rekognition Video operation.</p>
*/
inline GetCelebrityRecognitionResult& WithVideoMetadata(const VideoMetadata& value) { SetVideoMetadata(value); return *this;}
/**
* <p>Information about a video that Amazon Rekognition Video analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* a Amazon Rekognition Video operation.</p>
*/
inline GetCelebrityRecognitionResult& WithVideoMetadata(VideoMetadata&& value) { SetVideoMetadata(std::move(value)); return *this;}
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of
* celebrities.</p>
*/
inline const Aws::String& GetNextToken() const{ return m_nextToken; }
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of
* celebrities.</p>
*/
inline void SetNextToken(const Aws::String& value) { m_nextToken = value; }
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of
* celebrities.</p>
*/
inline void SetNextToken(Aws::String&& value) { m_nextToken = std::move(value); }
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of
* celebrities.</p>
*/
inline void SetNextToken(const char* value) { m_nextToken.assign(value); }
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of
* celebrities.</p>
*/
inline GetCelebrityRecognitionResult& WithNextToken(const Aws::String& value) { SetNextToken(value); return *this;}
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of
* celebrities.</p>
*/
inline GetCelebrityRecognitionResult& WithNextToken(Aws::String&& value) { SetNextToken(std::move(value)); return *this;}
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of
* celebrities.</p>
*/
inline GetCelebrityRecognitionResult& WithNextToken(const char* value) { SetNextToken(value); return *this;}
/**
* <p>Array of celebrities recognized in the video.</p>
*/
inline const Aws::Vector<CelebrityRecognition>& GetCelebrities() const{ return m_celebrities; }
/**
* <p>Array of celebrities recognized in the video.</p>
*/
inline void SetCelebrities(const Aws::Vector<CelebrityRecognition>& value) { m_celebrities = value; }
/**
* <p>Array of celebrities recognized in the video.</p>
*/
inline void SetCelebrities(Aws::Vector<CelebrityRecognition>&& value) { m_celebrities = std::move(value); }
/**
* <p>Array of celebrities recognized in the video.</p>
*/
inline GetCelebrityRecognitionResult& WithCelebrities(const Aws::Vector<CelebrityRecognition>& value) { SetCelebrities(value); return *this;}
/**
* <p>Array of celebrities recognized in the video.</p>
*/
inline GetCelebrityRecognitionResult& WithCelebrities(Aws::Vector<CelebrityRecognition>&& value) { SetCelebrities(std::move(value)); return *this;}
/**
* <p>Array of celebrities recognized in the video.</p>
*/
inline GetCelebrityRecognitionResult& AddCelebrities(const CelebrityRecognition& value) { m_celebrities.push_back(value); return *this; }
/**
* <p>Array of celebrities recognized in the video.</p>
*/
inline GetCelebrityRecognitionResult& AddCelebrities(CelebrityRecognition&& value) { m_celebrities.push_back(std::move(value)); return *this; }
private:
VideoJobStatus m_jobStatus;
Aws::String m_statusMessage;
VideoMetadata m_videoMetadata;
Aws::String m_nextToken;
Aws::Vector<CelebrityRecognition> m_celebrities;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,252 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/RekognitionRequest.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/rekognition/model/ContentModerationSortBy.h>
#include <utility>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
/**
*/
class AWS_REKOGNITION_API GetContentModerationRequest : public RekognitionRequest
{
public:
GetContentModerationRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetContentModeration"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>The identifier for the unsafe content job. Use <code>JobId</code> to identify
* the job in a subsequent call to <code>GetContentModeration</code>.</p>
*/
inline const Aws::String& GetJobId() const{ return m_jobId; }
/**
* <p>The identifier for the unsafe content job. Use <code>JobId</code> to identify
* the job in a subsequent call to <code>GetContentModeration</code>.</p>
*/
inline bool JobIdHasBeenSet() const { return m_jobIdHasBeenSet; }
/**
* <p>The identifier for the unsafe content job. Use <code>JobId</code> to identify
* the job in a subsequent call to <code>GetContentModeration</code>.</p>
*/
inline void SetJobId(const Aws::String& value) { m_jobIdHasBeenSet = true; m_jobId = value; }
/**
* <p>The identifier for the unsafe content job. Use <code>JobId</code> to identify
* the job in a subsequent call to <code>GetContentModeration</code>.</p>
*/
inline void SetJobId(Aws::String&& value) { m_jobIdHasBeenSet = true; m_jobId = std::move(value); }
/**
* <p>The identifier for the unsafe content job. Use <code>JobId</code> to identify
* the job in a subsequent call to <code>GetContentModeration</code>.</p>
*/
inline void SetJobId(const char* value) { m_jobIdHasBeenSet = true; m_jobId.assign(value); }
/**
* <p>The identifier for the unsafe content job. Use <code>JobId</code> to identify
* the job in a subsequent call to <code>GetContentModeration</code>.</p>
*/
inline GetContentModerationRequest& WithJobId(const Aws::String& value) { SetJobId(value); return *this;}
/**
* <p>The identifier for the unsafe content job. Use <code>JobId</code> to identify
* the job in a subsequent call to <code>GetContentModeration</code>.</p>
*/
inline GetContentModerationRequest& WithJobId(Aws::String&& value) { SetJobId(std::move(value)); return *this;}
/**
* <p>The identifier for the unsafe content job. Use <code>JobId</code> to identify
* the job in a subsequent call to <code>GetContentModeration</code>.</p>
*/
inline GetContentModerationRequest& WithJobId(const char* value) { SetJobId(value); return *this;}
/**
* <p>Maximum number of results to return per paginated call. The largest value you
* can specify is 1000. If you specify a value greater than 1000, a maximum of 1000
* results is returned. The default value is 1000.</p>
*/
inline int GetMaxResults() const{ return m_maxResults; }
/**
* <p>Maximum number of results to return per paginated call. The largest value you
* can specify is 1000. If you specify a value greater than 1000, a maximum of 1000
* results is returned. The default value is 1000.</p>
*/
inline bool MaxResultsHasBeenSet() const { return m_maxResultsHasBeenSet; }
/**
* <p>Maximum number of results to return per paginated call. The largest value you
* can specify is 1000. If you specify a value greater than 1000, a maximum of 1000
* results is returned. The default value is 1000.</p>
*/
inline void SetMaxResults(int value) { m_maxResultsHasBeenSet = true; m_maxResults = value; }
/**
* <p>Maximum number of results to return per paginated call. The largest value you
* can specify is 1000. If you specify a value greater than 1000, a maximum of 1000
* results is returned. The default value is 1000.</p>
*/
inline GetContentModerationRequest& WithMaxResults(int value) { SetMaxResults(value); return *this;}
/**
* <p>If the previous response was incomplete (because there is more data to
* retrieve), Amazon Rekognition returns a pagination token in the response. You
* can use this pagination token to retrieve the next set of unsafe content
* labels.</p>
*/
inline const Aws::String& GetNextToken() const{ return m_nextToken; }
/**
* <p>If the previous response was incomplete (because there is more data to
* retrieve), Amazon Rekognition returns a pagination token in the response. You
* can use this pagination token to retrieve the next set of unsafe content
* labels.</p>
*/
inline bool NextTokenHasBeenSet() const { return m_nextTokenHasBeenSet; }
/**
* <p>If the previous response was incomplete (because there is more data to
* retrieve), Amazon Rekognition returns a pagination token in the response. You
* can use this pagination token to retrieve the next set of unsafe content
* labels.</p>
*/
inline void SetNextToken(const Aws::String& value) { m_nextTokenHasBeenSet = true; m_nextToken = value; }
/**
* <p>If the previous response was incomplete (because there is more data to
* retrieve), Amazon Rekognition returns a pagination token in the response. You
* can use this pagination token to retrieve the next set of unsafe content
* labels.</p>
*/
inline void SetNextToken(Aws::String&& value) { m_nextTokenHasBeenSet = true; m_nextToken = std::move(value); }
/**
* <p>If the previous response was incomplete (because there is more data to
* retrieve), Amazon Rekognition returns a pagination token in the response. You
* can use this pagination token to retrieve the next set of unsafe content
* labels.</p>
*/
inline void SetNextToken(const char* value) { m_nextTokenHasBeenSet = true; m_nextToken.assign(value); }
/**
* <p>If the previous response was incomplete (because there is more data to
* retrieve), Amazon Rekognition returns a pagination token in the response. You
* can use this pagination token to retrieve the next set of unsafe content
* labels.</p>
*/
inline GetContentModerationRequest& WithNextToken(const Aws::String& value) { SetNextToken(value); return *this;}
/**
* <p>If the previous response was incomplete (because there is more data to
* retrieve), Amazon Rekognition returns a pagination token in the response. You
* can use this pagination token to retrieve the next set of unsafe content
* labels.</p>
*/
inline GetContentModerationRequest& WithNextToken(Aws::String&& value) { SetNextToken(std::move(value)); return *this;}
/**
* <p>If the previous response was incomplete (because there is more data to
* retrieve), Amazon Rekognition returns a pagination token in the response. You
* can use this pagination token to retrieve the next set of unsafe content
* labels.</p>
*/
inline GetContentModerationRequest& WithNextToken(const char* value) { SetNextToken(value); return *this;}
/**
* <p>Sort to use for elements in the <code>ModerationLabelDetections</code> array.
* Use <code>TIMESTAMP</code> to sort array elements by the time labels are
* detected. Use <code>NAME</code> to alphabetically group elements for a label
* together. Within each label group, the array element are sorted by detection
* confidence. The default sort is by <code>TIMESTAMP</code>.</p>
*/
inline const ContentModerationSortBy& GetSortBy() const{ return m_sortBy; }
/**
* <p>Sort to use for elements in the <code>ModerationLabelDetections</code> array.
* Use <code>TIMESTAMP</code> to sort array elements by the time labels are
* detected. Use <code>NAME</code> to alphabetically group elements for a label
* together. Within each label group, the array element are sorted by detection
* confidence. The default sort is by <code>TIMESTAMP</code>.</p>
*/
inline bool SortByHasBeenSet() const { return m_sortByHasBeenSet; }
/**
* <p>Sort to use for elements in the <code>ModerationLabelDetections</code> array.
* Use <code>TIMESTAMP</code> to sort array elements by the time labels are
* detected. Use <code>NAME</code> to alphabetically group elements for a label
* together. Within each label group, the array element are sorted by detection
* confidence. The default sort is by <code>TIMESTAMP</code>.</p>
*/
inline void SetSortBy(const ContentModerationSortBy& value) { m_sortByHasBeenSet = true; m_sortBy = value; }
/**
* <p>Sort to use for elements in the <code>ModerationLabelDetections</code> array.
* Use <code>TIMESTAMP</code> to sort array elements by the time labels are
* detected. Use <code>NAME</code> to alphabetically group elements for a label
* together. Within each label group, the array element are sorted by detection
* confidence. The default sort is by <code>TIMESTAMP</code>.</p>
*/
inline void SetSortBy(ContentModerationSortBy&& value) { m_sortByHasBeenSet = true; m_sortBy = std::move(value); }
/**
* <p>Sort to use for elements in the <code>ModerationLabelDetections</code> array.
* Use <code>TIMESTAMP</code> to sort array elements by the time labels are
* detected. Use <code>NAME</code> to alphabetically group elements for a label
* together. Within each label group, the array element are sorted by detection
* confidence. The default sort is by <code>TIMESTAMP</code>.</p>
*/
inline GetContentModerationRequest& WithSortBy(const ContentModerationSortBy& value) { SetSortBy(value); return *this;}
/**
* <p>Sort to use for elements in the <code>ModerationLabelDetections</code> array.
* Use <code>TIMESTAMP</code> to sort array elements by the time labels are
* detected. Use <code>NAME</code> to alphabetically group elements for a label
* together. Within each label group, the array element are sorted by detection
* confidence. The default sort is by <code>TIMESTAMP</code>.</p>
*/
inline GetContentModerationRequest& WithSortBy(ContentModerationSortBy&& value) { SetSortBy(std::move(value)); return *this;}
private:
Aws::String m_jobId;
bool m_jobIdHasBeenSet;
int m_maxResults;
bool m_maxResultsHasBeenSet;
Aws::String m_nextToken;
bool m_nextTokenHasBeenSet;
ContentModerationSortBy m_sortBy;
bool m_sortByHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,289 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/model/VideoJobStatus.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/rekognition/model/VideoMetadata.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/rekognition/model/ContentModerationDetection.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
class AWS_REKOGNITION_API GetContentModerationResult
{
public:
GetContentModerationResult();
GetContentModerationResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
GetContentModerationResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>The current status of the unsafe content analysis job.</p>
*/
inline const VideoJobStatus& GetJobStatus() const{ return m_jobStatus; }
/**
* <p>The current status of the unsafe content analysis job.</p>
*/
inline void SetJobStatus(const VideoJobStatus& value) { m_jobStatus = value; }
/**
* <p>The current status of the unsafe content analysis job.</p>
*/
inline void SetJobStatus(VideoJobStatus&& value) { m_jobStatus = std::move(value); }
/**
* <p>The current status of the unsafe content analysis job.</p>
*/
inline GetContentModerationResult& WithJobStatus(const VideoJobStatus& value) { SetJobStatus(value); return *this;}
/**
* <p>The current status of the unsafe content analysis job.</p>
*/
inline GetContentModerationResult& WithJobStatus(VideoJobStatus&& value) { SetJobStatus(std::move(value)); return *this;}
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline const Aws::String& GetStatusMessage() const{ return m_statusMessage; }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline void SetStatusMessage(const Aws::String& value) { m_statusMessage = value; }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline void SetStatusMessage(Aws::String&& value) { m_statusMessage = std::move(value); }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline void SetStatusMessage(const char* value) { m_statusMessage.assign(value); }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline GetContentModerationResult& WithStatusMessage(const Aws::String& value) { SetStatusMessage(value); return *this;}
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline GetContentModerationResult& WithStatusMessage(Aws::String&& value) { SetStatusMessage(std::move(value)); return *this;}
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline GetContentModerationResult& WithStatusMessage(const char* value) { SetStatusMessage(value); return *this;}
/**
* <p>Information about a video that Amazon Rekognition analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* <code>GetContentModeration</code>. </p>
*/
inline const VideoMetadata& GetVideoMetadata() const{ return m_videoMetadata; }
/**
* <p>Information about a video that Amazon Rekognition analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* <code>GetContentModeration</code>. </p>
*/
inline void SetVideoMetadata(const VideoMetadata& value) { m_videoMetadata = value; }
/**
* <p>Information about a video that Amazon Rekognition analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* <code>GetContentModeration</code>. </p>
*/
inline void SetVideoMetadata(VideoMetadata&& value) { m_videoMetadata = std::move(value); }
/**
* <p>Information about a video that Amazon Rekognition analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* <code>GetContentModeration</code>. </p>
*/
inline GetContentModerationResult& WithVideoMetadata(const VideoMetadata& value) { SetVideoMetadata(value); return *this;}
/**
* <p>Information about a video that Amazon Rekognition analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* <code>GetContentModeration</code>. </p>
*/
inline GetContentModerationResult& WithVideoMetadata(VideoMetadata&& value) { SetVideoMetadata(std::move(value)); return *this;}
/**
* <p>The detected unsafe content labels and the time(s) they were detected.</p>
*/
inline const Aws::Vector<ContentModerationDetection>& GetModerationLabels() const{ return m_moderationLabels; }
/**
* <p>The detected unsafe content labels and the time(s) they were detected.</p>
*/
inline void SetModerationLabels(const Aws::Vector<ContentModerationDetection>& value) { m_moderationLabels = value; }
/**
* <p>The detected unsafe content labels and the time(s) they were detected.</p>
*/
inline void SetModerationLabels(Aws::Vector<ContentModerationDetection>&& value) { m_moderationLabels = std::move(value); }
/**
* <p>The detected unsafe content labels and the time(s) they were detected.</p>
*/
inline GetContentModerationResult& WithModerationLabels(const Aws::Vector<ContentModerationDetection>& value) { SetModerationLabels(value); return *this;}
/**
* <p>The detected unsafe content labels and the time(s) they were detected.</p>
*/
inline GetContentModerationResult& WithModerationLabels(Aws::Vector<ContentModerationDetection>&& value) { SetModerationLabels(std::move(value)); return *this;}
/**
* <p>The detected unsafe content labels and the time(s) they were detected.</p>
*/
inline GetContentModerationResult& AddModerationLabels(const ContentModerationDetection& value) { m_moderationLabels.push_back(value); return *this; }
/**
* <p>The detected unsafe content labels and the time(s) they were detected.</p>
*/
inline GetContentModerationResult& AddModerationLabels(ContentModerationDetection&& value) { m_moderationLabels.push_back(std::move(value)); return *this; }
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of unsafe
* content labels. </p>
*/
inline const Aws::String& GetNextToken() const{ return m_nextToken; }
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of unsafe
* content labels. </p>
*/
inline void SetNextToken(const Aws::String& value) { m_nextToken = value; }
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of unsafe
* content labels. </p>
*/
inline void SetNextToken(Aws::String&& value) { m_nextToken = std::move(value); }
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of unsafe
* content labels. </p>
*/
inline void SetNextToken(const char* value) { m_nextToken.assign(value); }
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of unsafe
* content labels. </p>
*/
inline GetContentModerationResult& WithNextToken(const Aws::String& value) { SetNextToken(value); return *this;}
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of unsafe
* content labels. </p>
*/
inline GetContentModerationResult& WithNextToken(Aws::String&& value) { SetNextToken(std::move(value)); return *this;}
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of unsafe
* content labels. </p>
*/
inline GetContentModerationResult& WithNextToken(const char* value) { SetNextToken(value); return *this;}
/**
* <p>Version number of the moderation detection model that was used to detect
* unsafe content.</p>
*/
inline const Aws::String& GetModerationModelVersion() const{ return m_moderationModelVersion; }
/**
* <p>Version number of the moderation detection model that was used to detect
* unsafe content.</p>
*/
inline void SetModerationModelVersion(const Aws::String& value) { m_moderationModelVersion = value; }
/**
* <p>Version number of the moderation detection model that was used to detect
* unsafe content.</p>
*/
inline void SetModerationModelVersion(Aws::String&& value) { m_moderationModelVersion = std::move(value); }
/**
* <p>Version number of the moderation detection model that was used to detect
* unsafe content.</p>
*/
inline void SetModerationModelVersion(const char* value) { m_moderationModelVersion.assign(value); }
/**
* <p>Version number of the moderation detection model that was used to detect
* unsafe content.</p>
*/
inline GetContentModerationResult& WithModerationModelVersion(const Aws::String& value) { SetModerationModelVersion(value); return *this;}
/**
* <p>Version number of the moderation detection model that was used to detect
* unsafe content.</p>
*/
inline GetContentModerationResult& WithModerationModelVersion(Aws::String&& value) { SetModerationModelVersion(std::move(value)); return *this;}
/**
* <p>Version number of the moderation detection model that was used to detect
* unsafe content.</p>
*/
inline GetContentModerationResult& WithModerationModelVersion(const char* value) { SetModerationModelVersion(value); return *this;}
private:
VideoJobStatus m_jobStatus;
Aws::String m_statusMessage;
VideoMetadata m_videoMetadata;
Aws::Vector<ContentModerationDetection> m_moderationLabels;
Aws::String m_nextToken;
Aws::String m_moderationModelVersion;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,185 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/RekognitionRequest.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <utility>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
/**
*/
class AWS_REKOGNITION_API GetFaceDetectionRequest : public RekognitionRequest
{
public:
GetFaceDetectionRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetFaceDetection"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>Unique identifier for the face detection job. The <code>JobId</code> is
* returned from <code>StartFaceDetection</code>.</p>
*/
inline const Aws::String& GetJobId() const{ return m_jobId; }
/**
* <p>Unique identifier for the face detection job. The <code>JobId</code> is
* returned from <code>StartFaceDetection</code>.</p>
*/
inline bool JobIdHasBeenSet() const { return m_jobIdHasBeenSet; }
/**
* <p>Unique identifier for the face detection job. The <code>JobId</code> is
* returned from <code>StartFaceDetection</code>.</p>
*/
inline void SetJobId(const Aws::String& value) { m_jobIdHasBeenSet = true; m_jobId = value; }
/**
* <p>Unique identifier for the face detection job. The <code>JobId</code> is
* returned from <code>StartFaceDetection</code>.</p>
*/
inline void SetJobId(Aws::String&& value) { m_jobIdHasBeenSet = true; m_jobId = std::move(value); }
/**
* <p>Unique identifier for the face detection job. The <code>JobId</code> is
* returned from <code>StartFaceDetection</code>.</p>
*/
inline void SetJobId(const char* value) { m_jobIdHasBeenSet = true; m_jobId.assign(value); }
/**
* <p>Unique identifier for the face detection job. The <code>JobId</code> is
* returned from <code>StartFaceDetection</code>.</p>
*/
inline GetFaceDetectionRequest& WithJobId(const Aws::String& value) { SetJobId(value); return *this;}
/**
* <p>Unique identifier for the face detection job. The <code>JobId</code> is
* returned from <code>StartFaceDetection</code>.</p>
*/
inline GetFaceDetectionRequest& WithJobId(Aws::String&& value) { SetJobId(std::move(value)); return *this;}
/**
* <p>Unique identifier for the face detection job. The <code>JobId</code> is
* returned from <code>StartFaceDetection</code>.</p>
*/
inline GetFaceDetectionRequest& WithJobId(const char* value) { SetJobId(value); return *this;}
/**
* <p>Maximum number of results to return per paginated call. The largest value you
* can specify is 1000. If you specify a value greater than 1000, a maximum of 1000
* results is returned. The default value is 1000.</p>
*/
inline int GetMaxResults() const{ return m_maxResults; }
/**
* <p>Maximum number of results to return per paginated call. The largest value you
* can specify is 1000. If you specify a value greater than 1000, a maximum of 1000
* results is returned. The default value is 1000.</p>
*/
inline bool MaxResultsHasBeenSet() const { return m_maxResultsHasBeenSet; }
/**
* <p>Maximum number of results to return per paginated call. The largest value you
* can specify is 1000. If you specify a value greater than 1000, a maximum of 1000
* results is returned. The default value is 1000.</p>
*/
inline void SetMaxResults(int value) { m_maxResultsHasBeenSet = true; m_maxResults = value; }
/**
* <p>Maximum number of results to return per paginated call. The largest value you
* can specify is 1000. If you specify a value greater than 1000, a maximum of 1000
* results is returned. The default value is 1000.</p>
*/
inline GetFaceDetectionRequest& WithMaxResults(int value) { SetMaxResults(value); return *this;}
/**
* <p>If the previous response was incomplete (because there are more faces to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of faces.</p>
*/
inline const Aws::String& GetNextToken() const{ return m_nextToken; }
/**
* <p>If the previous response was incomplete (because there are more faces to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of faces.</p>
*/
inline bool NextTokenHasBeenSet() const { return m_nextTokenHasBeenSet; }
/**
* <p>If the previous response was incomplete (because there are more faces to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of faces.</p>
*/
inline void SetNextToken(const Aws::String& value) { m_nextTokenHasBeenSet = true; m_nextToken = value; }
/**
* <p>If the previous response was incomplete (because there are more faces to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of faces.</p>
*/
inline void SetNextToken(Aws::String&& value) { m_nextTokenHasBeenSet = true; m_nextToken = std::move(value); }
/**
* <p>If the previous response was incomplete (because there are more faces to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of faces.</p>
*/
inline void SetNextToken(const char* value) { m_nextTokenHasBeenSet = true; m_nextToken.assign(value); }
/**
* <p>If the previous response was incomplete (because there are more faces to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of faces.</p>
*/
inline GetFaceDetectionRequest& WithNextToken(const Aws::String& value) { SetNextToken(value); return *this;}
/**
* <p>If the previous response was incomplete (because there are more faces to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of faces.</p>
*/
inline GetFaceDetectionRequest& WithNextToken(Aws::String&& value) { SetNextToken(std::move(value)); return *this;}
/**
* <p>If the previous response was incomplete (because there are more faces to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of faces.</p>
*/
inline GetFaceDetectionRequest& WithNextToken(const char* value) { SetNextToken(value); return *this;}
private:
Aws::String m_jobId;
bool m_jobIdHasBeenSet;
int m_maxResults;
bool m_maxResultsHasBeenSet;
Aws::String m_nextToken;
bool m_nextTokenHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,251 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/model/VideoJobStatus.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/rekognition/model/VideoMetadata.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/rekognition/model/FaceDetection.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
class AWS_REKOGNITION_API GetFaceDetectionResult
{
public:
GetFaceDetectionResult();
GetFaceDetectionResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
GetFaceDetectionResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>The current status of the face detection job.</p>
*/
inline const VideoJobStatus& GetJobStatus() const{ return m_jobStatus; }
/**
* <p>The current status of the face detection job.</p>
*/
inline void SetJobStatus(const VideoJobStatus& value) { m_jobStatus = value; }
/**
* <p>The current status of the face detection job.</p>
*/
inline void SetJobStatus(VideoJobStatus&& value) { m_jobStatus = std::move(value); }
/**
* <p>The current status of the face detection job.</p>
*/
inline GetFaceDetectionResult& WithJobStatus(const VideoJobStatus& value) { SetJobStatus(value); return *this;}
/**
* <p>The current status of the face detection job.</p>
*/
inline GetFaceDetectionResult& WithJobStatus(VideoJobStatus&& value) { SetJobStatus(std::move(value)); return *this;}
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline const Aws::String& GetStatusMessage() const{ return m_statusMessage; }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline void SetStatusMessage(const Aws::String& value) { m_statusMessage = value; }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline void SetStatusMessage(Aws::String&& value) { m_statusMessage = std::move(value); }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline void SetStatusMessage(const char* value) { m_statusMessage.assign(value); }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline GetFaceDetectionResult& WithStatusMessage(const Aws::String& value) { SetStatusMessage(value); return *this;}
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline GetFaceDetectionResult& WithStatusMessage(Aws::String&& value) { SetStatusMessage(std::move(value)); return *this;}
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline GetFaceDetectionResult& WithStatusMessage(const char* value) { SetStatusMessage(value); return *this;}
/**
* <p>Information about a video that Amazon Rekognition Video analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* a Amazon Rekognition video operation.</p>
*/
inline const VideoMetadata& GetVideoMetadata() const{ return m_videoMetadata; }
/**
* <p>Information about a video that Amazon Rekognition Video analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* a Amazon Rekognition video operation.</p>
*/
inline void SetVideoMetadata(const VideoMetadata& value) { m_videoMetadata = value; }
/**
* <p>Information about a video that Amazon Rekognition Video analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* a Amazon Rekognition video operation.</p>
*/
inline void SetVideoMetadata(VideoMetadata&& value) { m_videoMetadata = std::move(value); }
/**
* <p>Information about a video that Amazon Rekognition Video analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* a Amazon Rekognition video operation.</p>
*/
inline GetFaceDetectionResult& WithVideoMetadata(const VideoMetadata& value) { SetVideoMetadata(value); return *this;}
/**
* <p>Information about a video that Amazon Rekognition Video analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* a Amazon Rekognition video operation.</p>
*/
inline GetFaceDetectionResult& WithVideoMetadata(VideoMetadata&& value) { SetVideoMetadata(std::move(value)); return *this;}
/**
* <p>If the response is truncated, Amazon Rekognition returns this token that you
* can use in the subsequent request to retrieve the next set of faces. </p>
*/
inline const Aws::String& GetNextToken() const{ return m_nextToken; }
/**
* <p>If the response is truncated, Amazon Rekognition returns this token that you
* can use in the subsequent request to retrieve the next set of faces. </p>
*/
inline void SetNextToken(const Aws::String& value) { m_nextToken = value; }
/**
* <p>If the response is truncated, Amazon Rekognition returns this token that you
* can use in the subsequent request to retrieve the next set of faces. </p>
*/
inline void SetNextToken(Aws::String&& value) { m_nextToken = std::move(value); }
/**
* <p>If the response is truncated, Amazon Rekognition returns this token that you
* can use in the subsequent request to retrieve the next set of faces. </p>
*/
inline void SetNextToken(const char* value) { m_nextToken.assign(value); }
/**
* <p>If the response is truncated, Amazon Rekognition returns this token that you
* can use in the subsequent request to retrieve the next set of faces. </p>
*/
inline GetFaceDetectionResult& WithNextToken(const Aws::String& value) { SetNextToken(value); return *this;}
/**
* <p>If the response is truncated, Amazon Rekognition returns this token that you
* can use in the subsequent request to retrieve the next set of faces. </p>
*/
inline GetFaceDetectionResult& WithNextToken(Aws::String&& value) { SetNextToken(std::move(value)); return *this;}
/**
* <p>If the response is truncated, Amazon Rekognition returns this token that you
* can use in the subsequent request to retrieve the next set of faces. </p>
*/
inline GetFaceDetectionResult& WithNextToken(const char* value) { SetNextToken(value); return *this;}
/**
* <p>An array of faces detected in the video. Each element contains a detected
* face's details and the time, in milliseconds from the start of the video, the
* face was detected. </p>
*/
inline const Aws::Vector<FaceDetection>& GetFaces() const{ return m_faces; }
/**
* <p>An array of faces detected in the video. Each element contains a detected
* face's details and the time, in milliseconds from the start of the video, the
* face was detected. </p>
*/
inline void SetFaces(const Aws::Vector<FaceDetection>& value) { m_faces = value; }
/**
* <p>An array of faces detected in the video. Each element contains a detected
* face's details and the time, in milliseconds from the start of the video, the
* face was detected. </p>
*/
inline void SetFaces(Aws::Vector<FaceDetection>&& value) { m_faces = std::move(value); }
/**
* <p>An array of faces detected in the video. Each element contains a detected
* face's details and the time, in milliseconds from the start of the video, the
* face was detected. </p>
*/
inline GetFaceDetectionResult& WithFaces(const Aws::Vector<FaceDetection>& value) { SetFaces(value); return *this;}
/**
* <p>An array of faces detected in the video. Each element contains a detected
* face's details and the time, in milliseconds from the start of the video, the
* face was detected. </p>
*/
inline GetFaceDetectionResult& WithFaces(Aws::Vector<FaceDetection>&& value) { SetFaces(std::move(value)); return *this;}
/**
* <p>An array of faces detected in the video. Each element contains a detected
* face's details and the time, in milliseconds from the start of the video, the
* face was detected. </p>
*/
inline GetFaceDetectionResult& AddFaces(const FaceDetection& value) { m_faces.push_back(value); return *this; }
/**
* <p>An array of faces detected in the video. Each element contains a detected
* face's details and the time, in milliseconds from the start of the video, the
* face was detected. </p>
*/
inline GetFaceDetectionResult& AddFaces(FaceDetection&& value) { m_faces.push_back(std::move(value)); return *this; }
private:
VideoJobStatus m_jobStatus;
Aws::String m_statusMessage;
VideoMetadata m_videoMetadata;
Aws::String m_nextToken;
Aws::Vector<FaceDetection> m_faces;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,240 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/RekognitionRequest.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/rekognition/model/FaceSearchSortBy.h>
#include <utility>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
/**
*/
class AWS_REKOGNITION_API GetFaceSearchRequest : public RekognitionRequest
{
public:
GetFaceSearchRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetFaceSearch"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>The job identifer for the search request. You get the job identifier from an
* initial call to <code>StartFaceSearch</code>.</p>
*/
inline const Aws::String& GetJobId() const{ return m_jobId; }
/**
* <p>The job identifer for the search request. You get the job identifier from an
* initial call to <code>StartFaceSearch</code>.</p>
*/
inline bool JobIdHasBeenSet() const { return m_jobIdHasBeenSet; }
/**
* <p>The job identifer for the search request. You get the job identifier from an
* initial call to <code>StartFaceSearch</code>.</p>
*/
inline void SetJobId(const Aws::String& value) { m_jobIdHasBeenSet = true; m_jobId = value; }
/**
* <p>The job identifer for the search request. You get the job identifier from an
* initial call to <code>StartFaceSearch</code>.</p>
*/
inline void SetJobId(Aws::String&& value) { m_jobIdHasBeenSet = true; m_jobId = std::move(value); }
/**
* <p>The job identifer for the search request. You get the job identifier from an
* initial call to <code>StartFaceSearch</code>.</p>
*/
inline void SetJobId(const char* value) { m_jobIdHasBeenSet = true; m_jobId.assign(value); }
/**
* <p>The job identifer for the search request. You get the job identifier from an
* initial call to <code>StartFaceSearch</code>.</p>
*/
inline GetFaceSearchRequest& WithJobId(const Aws::String& value) { SetJobId(value); return *this;}
/**
* <p>The job identifer for the search request. You get the job identifier from an
* initial call to <code>StartFaceSearch</code>.</p>
*/
inline GetFaceSearchRequest& WithJobId(Aws::String&& value) { SetJobId(std::move(value)); return *this;}
/**
* <p>The job identifer for the search request. You get the job identifier from an
* initial call to <code>StartFaceSearch</code>.</p>
*/
inline GetFaceSearchRequest& WithJobId(const char* value) { SetJobId(value); return *this;}
/**
* <p>Maximum number of results to return per paginated call. The largest value you
* can specify is 1000. If you specify a value greater than 1000, a maximum of 1000
* results is returned. The default value is 1000.</p>
*/
inline int GetMaxResults() const{ return m_maxResults; }
/**
* <p>Maximum number of results to return per paginated call. The largest value you
* can specify is 1000. If you specify a value greater than 1000, a maximum of 1000
* results is returned. The default value is 1000.</p>
*/
inline bool MaxResultsHasBeenSet() const { return m_maxResultsHasBeenSet; }
/**
* <p>Maximum number of results to return per paginated call. The largest value you
* can specify is 1000. If you specify a value greater than 1000, a maximum of 1000
* results is returned. The default value is 1000.</p>
*/
inline void SetMaxResults(int value) { m_maxResultsHasBeenSet = true; m_maxResults = value; }
/**
* <p>Maximum number of results to return per paginated call. The largest value you
* can specify is 1000. If you specify a value greater than 1000, a maximum of 1000
* results is returned. The default value is 1000.</p>
*/
inline GetFaceSearchRequest& WithMaxResults(int value) { SetMaxResults(value); return *this;}
/**
* <p>If the previous response was incomplete (because there is more search results
* to retrieve), Amazon Rekognition Video returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of search
* results. </p>
*/
inline const Aws::String& GetNextToken() const{ return m_nextToken; }
/**
* <p>If the previous response was incomplete (because there is more search results
* to retrieve), Amazon Rekognition Video returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of search
* results. </p>
*/
inline bool NextTokenHasBeenSet() const { return m_nextTokenHasBeenSet; }
/**
* <p>If the previous response was incomplete (because there is more search results
* to retrieve), Amazon Rekognition Video returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of search
* results. </p>
*/
inline void SetNextToken(const Aws::String& value) { m_nextTokenHasBeenSet = true; m_nextToken = value; }
/**
* <p>If the previous response was incomplete (because there is more search results
* to retrieve), Amazon Rekognition Video returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of search
* results. </p>
*/
inline void SetNextToken(Aws::String&& value) { m_nextTokenHasBeenSet = true; m_nextToken = std::move(value); }
/**
* <p>If the previous response was incomplete (because there is more search results
* to retrieve), Amazon Rekognition Video returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of search
* results. </p>
*/
inline void SetNextToken(const char* value) { m_nextTokenHasBeenSet = true; m_nextToken.assign(value); }
/**
* <p>If the previous response was incomplete (because there is more search results
* to retrieve), Amazon Rekognition Video returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of search
* results. </p>
*/
inline GetFaceSearchRequest& WithNextToken(const Aws::String& value) { SetNextToken(value); return *this;}
/**
* <p>If the previous response was incomplete (because there is more search results
* to retrieve), Amazon Rekognition Video returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of search
* results. </p>
*/
inline GetFaceSearchRequest& WithNextToken(Aws::String&& value) { SetNextToken(std::move(value)); return *this;}
/**
* <p>If the previous response was incomplete (because there is more search results
* to retrieve), Amazon Rekognition Video returns a pagination token in the
* response. You can use this pagination token to retrieve the next set of search
* results. </p>
*/
inline GetFaceSearchRequest& WithNextToken(const char* value) { SetNextToken(value); return *this;}
/**
* <p>Sort to use for grouping faces in the response. Use <code>TIMESTAMP</code> to
* group faces by the time that they are recognized. Use <code>INDEX</code> to sort
* by recognized faces. </p>
*/
inline const FaceSearchSortBy& GetSortBy() const{ return m_sortBy; }
/**
* <p>Sort to use for grouping faces in the response. Use <code>TIMESTAMP</code> to
* group faces by the time that they are recognized. Use <code>INDEX</code> to sort
* by recognized faces. </p>
*/
inline bool SortByHasBeenSet() const { return m_sortByHasBeenSet; }
/**
* <p>Sort to use for grouping faces in the response. Use <code>TIMESTAMP</code> to
* group faces by the time that they are recognized. Use <code>INDEX</code> to sort
* by recognized faces. </p>
*/
inline void SetSortBy(const FaceSearchSortBy& value) { m_sortByHasBeenSet = true; m_sortBy = value; }
/**
* <p>Sort to use for grouping faces in the response. Use <code>TIMESTAMP</code> to
* group faces by the time that they are recognized. Use <code>INDEX</code> to sort
* by recognized faces. </p>
*/
inline void SetSortBy(FaceSearchSortBy&& value) { m_sortByHasBeenSet = true; m_sortBy = std::move(value); }
/**
* <p>Sort to use for grouping faces in the response. Use <code>TIMESTAMP</code> to
* group faces by the time that they are recognized. Use <code>INDEX</code> to sort
* by recognized faces. </p>
*/
inline GetFaceSearchRequest& WithSortBy(const FaceSearchSortBy& value) { SetSortBy(value); return *this;}
/**
* <p>Sort to use for grouping faces in the response. Use <code>TIMESTAMP</code> to
* group faces by the time that they are recognized. Use <code>INDEX</code> to sort
* by recognized faces. </p>
*/
inline GetFaceSearchRequest& WithSortBy(FaceSearchSortBy&& value) { SetSortBy(std::move(value)); return *this;}
private:
Aws::String m_jobId;
bool m_jobIdHasBeenSet;
int m_maxResults;
bool m_maxResultsHasBeenSet;
Aws::String m_nextToken;
bool m_nextTokenHasBeenSet;
FaceSearchSortBy m_sortBy;
bool m_sortByHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,286 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/model/VideoJobStatus.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/rekognition/model/VideoMetadata.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/rekognition/model/PersonMatch.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
class AWS_REKOGNITION_API GetFaceSearchResult
{
public:
GetFaceSearchResult();
GetFaceSearchResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
GetFaceSearchResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>The current status of the face search job.</p>
*/
inline const VideoJobStatus& GetJobStatus() const{ return m_jobStatus; }
/**
* <p>The current status of the face search job.</p>
*/
inline void SetJobStatus(const VideoJobStatus& value) { m_jobStatus = value; }
/**
* <p>The current status of the face search job.</p>
*/
inline void SetJobStatus(VideoJobStatus&& value) { m_jobStatus = std::move(value); }
/**
* <p>The current status of the face search job.</p>
*/
inline GetFaceSearchResult& WithJobStatus(const VideoJobStatus& value) { SetJobStatus(value); return *this;}
/**
* <p>The current status of the face search job.</p>
*/
inline GetFaceSearchResult& WithJobStatus(VideoJobStatus&& value) { SetJobStatus(std::move(value)); return *this;}
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline const Aws::String& GetStatusMessage() const{ return m_statusMessage; }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline void SetStatusMessage(const Aws::String& value) { m_statusMessage = value; }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline void SetStatusMessage(Aws::String&& value) { m_statusMessage = std::move(value); }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline void SetStatusMessage(const char* value) { m_statusMessage.assign(value); }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline GetFaceSearchResult& WithStatusMessage(const Aws::String& value) { SetStatusMessage(value); return *this;}
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline GetFaceSearchResult& WithStatusMessage(Aws::String&& value) { SetStatusMessage(std::move(value)); return *this;}
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline GetFaceSearchResult& WithStatusMessage(const char* value) { SetStatusMessage(value); return *this;}
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of search
* results. </p>
*/
inline const Aws::String& GetNextToken() const{ return m_nextToken; }
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of search
* results. </p>
*/
inline void SetNextToken(const Aws::String& value) { m_nextToken = value; }
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of search
* results. </p>
*/
inline void SetNextToken(Aws::String&& value) { m_nextToken = std::move(value); }
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of search
* results. </p>
*/
inline void SetNextToken(const char* value) { m_nextToken.assign(value); }
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of search
* results. </p>
*/
inline GetFaceSearchResult& WithNextToken(const Aws::String& value) { SetNextToken(value); return *this;}
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of search
* results. </p>
*/
inline GetFaceSearchResult& WithNextToken(Aws::String&& value) { SetNextToken(std::move(value)); return *this;}
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of search
* results. </p>
*/
inline GetFaceSearchResult& WithNextToken(const char* value) { SetNextToken(value); return *this;}
/**
* <p>Information about a video that Amazon Rekognition analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* a Amazon Rekognition Video operation. </p>
*/
inline const VideoMetadata& GetVideoMetadata() const{ return m_videoMetadata; }
/**
* <p>Information about a video that Amazon Rekognition analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* a Amazon Rekognition Video operation. </p>
*/
inline void SetVideoMetadata(const VideoMetadata& value) { m_videoMetadata = value; }
/**
* <p>Information about a video that Amazon Rekognition analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* a Amazon Rekognition Video operation. </p>
*/
inline void SetVideoMetadata(VideoMetadata&& value) { m_videoMetadata = std::move(value); }
/**
* <p>Information about a video that Amazon Rekognition analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* a Amazon Rekognition Video operation. </p>
*/
inline GetFaceSearchResult& WithVideoMetadata(const VideoMetadata& value) { SetVideoMetadata(value); return *this;}
/**
* <p>Information about a video that Amazon Rekognition analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* a Amazon Rekognition Video operation. </p>
*/
inline GetFaceSearchResult& WithVideoMetadata(VideoMetadata&& value) { SetVideoMetadata(std::move(value)); return *this;}
/**
* <p>An array of persons, <a>PersonMatch</a>, in the video whose face(s) match the
* face(s) in an Amazon Rekognition collection. It also includes time information
* for when persons are matched in the video. You specify the input collection in
* an initial call to <code>StartFaceSearch</code>. Each <code>Persons</code>
* element includes a time the person was matched, face match details
* (<code>FaceMatches</code>) for matching faces in the collection, and person
* information (<code>Person</code>) for the matched person. </p>
*/
inline const Aws::Vector<PersonMatch>& GetPersons() const{ return m_persons; }
/**
* <p>An array of persons, <a>PersonMatch</a>, in the video whose face(s) match the
* face(s) in an Amazon Rekognition collection. It also includes time information
* for when persons are matched in the video. You specify the input collection in
* an initial call to <code>StartFaceSearch</code>. Each <code>Persons</code>
* element includes a time the person was matched, face match details
* (<code>FaceMatches</code>) for matching faces in the collection, and person
* information (<code>Person</code>) for the matched person. </p>
*/
inline void SetPersons(const Aws::Vector<PersonMatch>& value) { m_persons = value; }
/**
* <p>An array of persons, <a>PersonMatch</a>, in the video whose face(s) match the
* face(s) in an Amazon Rekognition collection. It also includes time information
* for when persons are matched in the video. You specify the input collection in
* an initial call to <code>StartFaceSearch</code>. Each <code>Persons</code>
* element includes a time the person was matched, face match details
* (<code>FaceMatches</code>) for matching faces in the collection, and person
* information (<code>Person</code>) for the matched person. </p>
*/
inline void SetPersons(Aws::Vector<PersonMatch>&& value) { m_persons = std::move(value); }
/**
* <p>An array of persons, <a>PersonMatch</a>, in the video whose face(s) match the
* face(s) in an Amazon Rekognition collection. It also includes time information
* for when persons are matched in the video. You specify the input collection in
* an initial call to <code>StartFaceSearch</code>. Each <code>Persons</code>
* element includes a time the person was matched, face match details
* (<code>FaceMatches</code>) for matching faces in the collection, and person
* information (<code>Person</code>) for the matched person. </p>
*/
inline GetFaceSearchResult& WithPersons(const Aws::Vector<PersonMatch>& value) { SetPersons(value); return *this;}
/**
* <p>An array of persons, <a>PersonMatch</a>, in the video whose face(s) match the
* face(s) in an Amazon Rekognition collection. It also includes time information
* for when persons are matched in the video. You specify the input collection in
* an initial call to <code>StartFaceSearch</code>. Each <code>Persons</code>
* element includes a time the person was matched, face match details
* (<code>FaceMatches</code>) for matching faces in the collection, and person
* information (<code>Person</code>) for the matched person. </p>
*/
inline GetFaceSearchResult& WithPersons(Aws::Vector<PersonMatch>&& value) { SetPersons(std::move(value)); return *this;}
/**
* <p>An array of persons, <a>PersonMatch</a>, in the video whose face(s) match the
* face(s) in an Amazon Rekognition collection. It also includes time information
* for when persons are matched in the video. You specify the input collection in
* an initial call to <code>StartFaceSearch</code>. Each <code>Persons</code>
* element includes a time the person was matched, face match details
* (<code>FaceMatches</code>) for matching faces in the collection, and person
* information (<code>Person</code>) for the matched person. </p>
*/
inline GetFaceSearchResult& AddPersons(const PersonMatch& value) { m_persons.push_back(value); return *this; }
/**
* <p>An array of persons, <a>PersonMatch</a>, in the video whose face(s) match the
* face(s) in an Amazon Rekognition collection. It also includes time information
* for when persons are matched in the video. You specify the input collection in
* an initial call to <code>StartFaceSearch</code>. Each <code>Persons</code>
* element includes a time the person was matched, face match details
* (<code>FaceMatches</code>) for matching faces in the collection, and person
* information (<code>Person</code>) for the matched person. </p>
*/
inline GetFaceSearchResult& AddPersons(PersonMatch&& value) { m_persons.push_back(std::move(value)); return *this; }
private:
VideoJobStatus m_jobStatus;
Aws::String m_statusMessage;
Aws::String m_nextToken;
VideoMetadata m_videoMetadata;
Aws::Vector<PersonMatch> m_persons;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,252 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/RekognitionRequest.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/rekognition/model/LabelDetectionSortBy.h>
#include <utility>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
/**
*/
class AWS_REKOGNITION_API GetLabelDetectionRequest : public RekognitionRequest
{
public:
GetLabelDetectionRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetLabelDetection"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>Job identifier for the label detection operation for which you want results
* returned. You get the job identifer from an initial call to
* <code>StartlabelDetection</code>.</p>
*/
inline const Aws::String& GetJobId() const{ return m_jobId; }
/**
* <p>Job identifier for the label detection operation for which you want results
* returned. You get the job identifer from an initial call to
* <code>StartlabelDetection</code>.</p>
*/
inline bool JobIdHasBeenSet() const { return m_jobIdHasBeenSet; }
/**
* <p>Job identifier for the label detection operation for which you want results
* returned. You get the job identifer from an initial call to
* <code>StartlabelDetection</code>.</p>
*/
inline void SetJobId(const Aws::String& value) { m_jobIdHasBeenSet = true; m_jobId = value; }
/**
* <p>Job identifier for the label detection operation for which you want results
* returned. You get the job identifer from an initial call to
* <code>StartlabelDetection</code>.</p>
*/
inline void SetJobId(Aws::String&& value) { m_jobIdHasBeenSet = true; m_jobId = std::move(value); }
/**
* <p>Job identifier for the label detection operation for which you want results
* returned. You get the job identifer from an initial call to
* <code>StartlabelDetection</code>.</p>
*/
inline void SetJobId(const char* value) { m_jobIdHasBeenSet = true; m_jobId.assign(value); }
/**
* <p>Job identifier for the label detection operation for which you want results
* returned. You get the job identifer from an initial call to
* <code>StartlabelDetection</code>.</p>
*/
inline GetLabelDetectionRequest& WithJobId(const Aws::String& value) { SetJobId(value); return *this;}
/**
* <p>Job identifier for the label detection operation for which you want results
* returned. You get the job identifer from an initial call to
* <code>StartlabelDetection</code>.</p>
*/
inline GetLabelDetectionRequest& WithJobId(Aws::String&& value) { SetJobId(std::move(value)); return *this;}
/**
* <p>Job identifier for the label detection operation for which you want results
* returned. You get the job identifer from an initial call to
* <code>StartlabelDetection</code>.</p>
*/
inline GetLabelDetectionRequest& WithJobId(const char* value) { SetJobId(value); return *this;}
/**
* <p>Maximum number of results to return per paginated call. The largest value you
* can specify is 1000. If you specify a value greater than 1000, a maximum of 1000
* results is returned. The default value is 1000.</p>
*/
inline int GetMaxResults() const{ return m_maxResults; }
/**
* <p>Maximum number of results to return per paginated call. The largest value you
* can specify is 1000. If you specify a value greater than 1000, a maximum of 1000
* results is returned. The default value is 1000.</p>
*/
inline bool MaxResultsHasBeenSet() const { return m_maxResultsHasBeenSet; }
/**
* <p>Maximum number of results to return per paginated call. The largest value you
* can specify is 1000. If you specify a value greater than 1000, a maximum of 1000
* results is returned. The default value is 1000.</p>
*/
inline void SetMaxResults(int value) { m_maxResultsHasBeenSet = true; m_maxResults = value; }
/**
* <p>Maximum number of results to return per paginated call. The largest value you
* can specify is 1000. If you specify a value greater than 1000, a maximum of 1000
* results is returned. The default value is 1000.</p>
*/
inline GetLabelDetectionRequest& WithMaxResults(int value) { SetMaxResults(value); return *this;}
/**
* <p>If the previous response was incomplete (because there are more labels to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of labels. </p>
*/
inline const Aws::String& GetNextToken() const{ return m_nextToken; }
/**
* <p>If the previous response was incomplete (because there are more labels to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of labels. </p>
*/
inline bool NextTokenHasBeenSet() const { return m_nextTokenHasBeenSet; }
/**
* <p>If the previous response was incomplete (because there are more labels to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of labels. </p>
*/
inline void SetNextToken(const Aws::String& value) { m_nextTokenHasBeenSet = true; m_nextToken = value; }
/**
* <p>If the previous response was incomplete (because there are more labels to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of labels. </p>
*/
inline void SetNextToken(Aws::String&& value) { m_nextTokenHasBeenSet = true; m_nextToken = std::move(value); }
/**
* <p>If the previous response was incomplete (because there are more labels to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of labels. </p>
*/
inline void SetNextToken(const char* value) { m_nextTokenHasBeenSet = true; m_nextToken.assign(value); }
/**
* <p>If the previous response was incomplete (because there are more labels to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of labels. </p>
*/
inline GetLabelDetectionRequest& WithNextToken(const Aws::String& value) { SetNextToken(value); return *this;}
/**
* <p>If the previous response was incomplete (because there are more labels to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of labels. </p>
*/
inline GetLabelDetectionRequest& WithNextToken(Aws::String&& value) { SetNextToken(std::move(value)); return *this;}
/**
* <p>If the previous response was incomplete (because there are more labels to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of labels. </p>
*/
inline GetLabelDetectionRequest& WithNextToken(const char* value) { SetNextToken(value); return *this;}
/**
* <p>Sort to use for elements in the <code>Labels</code> array. Use
* <code>TIMESTAMP</code> to sort array elements by the time labels are detected.
* Use <code>NAME</code> to alphabetically group elements for a label together.
* Within each label group, the array element are sorted by detection confidence.
* The default sort is by <code>TIMESTAMP</code>.</p>
*/
inline const LabelDetectionSortBy& GetSortBy() const{ return m_sortBy; }
/**
* <p>Sort to use for elements in the <code>Labels</code> array. Use
* <code>TIMESTAMP</code> to sort array elements by the time labels are detected.
* Use <code>NAME</code> to alphabetically group elements for a label together.
* Within each label group, the array element are sorted by detection confidence.
* The default sort is by <code>TIMESTAMP</code>.</p>
*/
inline bool SortByHasBeenSet() const { return m_sortByHasBeenSet; }
/**
* <p>Sort to use for elements in the <code>Labels</code> array. Use
* <code>TIMESTAMP</code> to sort array elements by the time labels are detected.
* Use <code>NAME</code> to alphabetically group elements for a label together.
* Within each label group, the array element are sorted by detection confidence.
* The default sort is by <code>TIMESTAMP</code>.</p>
*/
inline void SetSortBy(const LabelDetectionSortBy& value) { m_sortByHasBeenSet = true; m_sortBy = value; }
/**
* <p>Sort to use for elements in the <code>Labels</code> array. Use
* <code>TIMESTAMP</code> to sort array elements by the time labels are detected.
* Use <code>NAME</code> to alphabetically group elements for a label together.
* Within each label group, the array element are sorted by detection confidence.
* The default sort is by <code>TIMESTAMP</code>.</p>
*/
inline void SetSortBy(LabelDetectionSortBy&& value) { m_sortByHasBeenSet = true; m_sortBy = std::move(value); }
/**
* <p>Sort to use for elements in the <code>Labels</code> array. Use
* <code>TIMESTAMP</code> to sort array elements by the time labels are detected.
* Use <code>NAME</code> to alphabetically group elements for a label together.
* Within each label group, the array element are sorted by detection confidence.
* The default sort is by <code>TIMESTAMP</code>.</p>
*/
inline GetLabelDetectionRequest& WithSortBy(const LabelDetectionSortBy& value) { SetSortBy(value); return *this;}
/**
* <p>Sort to use for elements in the <code>Labels</code> array. Use
* <code>TIMESTAMP</code> to sort array elements by the time labels are detected.
* Use <code>NAME</code> to alphabetically group elements for a label together.
* Within each label group, the array element are sorted by detection confidence.
* The default sort is by <code>TIMESTAMP</code>.</p>
*/
inline GetLabelDetectionRequest& WithSortBy(LabelDetectionSortBy&& value) { SetSortBy(std::move(value)); return *this;}
private:
Aws::String m_jobId;
bool m_jobIdHasBeenSet;
int m_maxResults;
bool m_maxResultsHasBeenSet;
Aws::String m_nextToken;
bool m_nextTokenHasBeenSet;
LabelDetectionSortBy m_sortBy;
bool m_sortByHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,303 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/model/VideoJobStatus.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/rekognition/model/VideoMetadata.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/rekognition/model/LabelDetection.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
class AWS_REKOGNITION_API GetLabelDetectionResult
{
public:
GetLabelDetectionResult();
GetLabelDetectionResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
GetLabelDetectionResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>The current status of the label detection job.</p>
*/
inline const VideoJobStatus& GetJobStatus() const{ return m_jobStatus; }
/**
* <p>The current status of the label detection job.</p>
*/
inline void SetJobStatus(const VideoJobStatus& value) { m_jobStatus = value; }
/**
* <p>The current status of the label detection job.</p>
*/
inline void SetJobStatus(VideoJobStatus&& value) { m_jobStatus = std::move(value); }
/**
* <p>The current status of the label detection job.</p>
*/
inline GetLabelDetectionResult& WithJobStatus(const VideoJobStatus& value) { SetJobStatus(value); return *this;}
/**
* <p>The current status of the label detection job.</p>
*/
inline GetLabelDetectionResult& WithJobStatus(VideoJobStatus&& value) { SetJobStatus(std::move(value)); return *this;}
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline const Aws::String& GetStatusMessage() const{ return m_statusMessage; }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline void SetStatusMessage(const Aws::String& value) { m_statusMessage = value; }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline void SetStatusMessage(Aws::String&& value) { m_statusMessage = std::move(value); }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline void SetStatusMessage(const char* value) { m_statusMessage.assign(value); }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline GetLabelDetectionResult& WithStatusMessage(const Aws::String& value) { SetStatusMessage(value); return *this;}
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline GetLabelDetectionResult& WithStatusMessage(Aws::String&& value) { SetStatusMessage(std::move(value)); return *this;}
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline GetLabelDetectionResult& WithStatusMessage(const char* value) { SetStatusMessage(value); return *this;}
/**
* <p>Information about a video that Amazon Rekognition Video analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* a Amazon Rekognition video operation.</p>
*/
inline const VideoMetadata& GetVideoMetadata() const{ return m_videoMetadata; }
/**
* <p>Information about a video that Amazon Rekognition Video analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* a Amazon Rekognition video operation.</p>
*/
inline void SetVideoMetadata(const VideoMetadata& value) { m_videoMetadata = value; }
/**
* <p>Information about a video that Amazon Rekognition Video analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* a Amazon Rekognition video operation.</p>
*/
inline void SetVideoMetadata(VideoMetadata&& value) { m_videoMetadata = std::move(value); }
/**
* <p>Information about a video that Amazon Rekognition Video analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* a Amazon Rekognition video operation.</p>
*/
inline GetLabelDetectionResult& WithVideoMetadata(const VideoMetadata& value) { SetVideoMetadata(value); return *this;}
/**
* <p>Information about a video that Amazon Rekognition Video analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* a Amazon Rekognition video operation.</p>
*/
inline GetLabelDetectionResult& WithVideoMetadata(VideoMetadata&& value) { SetVideoMetadata(std::move(value)); return *this;}
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of
* labels.</p>
*/
inline const Aws::String& GetNextToken() const{ return m_nextToken; }
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of
* labels.</p>
*/
inline void SetNextToken(const Aws::String& value) { m_nextToken = value; }
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of
* labels.</p>
*/
inline void SetNextToken(Aws::String&& value) { m_nextToken = std::move(value); }
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of
* labels.</p>
*/
inline void SetNextToken(const char* value) { m_nextToken.assign(value); }
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of
* labels.</p>
*/
inline GetLabelDetectionResult& WithNextToken(const Aws::String& value) { SetNextToken(value); return *this;}
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of
* labels.</p>
*/
inline GetLabelDetectionResult& WithNextToken(Aws::String&& value) { SetNextToken(std::move(value)); return *this;}
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of
* labels.</p>
*/
inline GetLabelDetectionResult& WithNextToken(const char* value) { SetNextToken(value); return *this;}
/**
* <p>An array of labels detected in the video. Each element contains the detected
* label and the time, in milliseconds from the start of the video, that the label
* was detected. </p>
*/
inline const Aws::Vector<LabelDetection>& GetLabels() const{ return m_labels; }
/**
* <p>An array of labels detected in the video. Each element contains the detected
* label and the time, in milliseconds from the start of the video, that the label
* was detected. </p>
*/
inline void SetLabels(const Aws::Vector<LabelDetection>& value) { m_labels = value; }
/**
* <p>An array of labels detected in the video. Each element contains the detected
* label and the time, in milliseconds from the start of the video, that the label
* was detected. </p>
*/
inline void SetLabels(Aws::Vector<LabelDetection>&& value) { m_labels = std::move(value); }
/**
* <p>An array of labels detected in the video. Each element contains the detected
* label and the time, in milliseconds from the start of the video, that the label
* was detected. </p>
*/
inline GetLabelDetectionResult& WithLabels(const Aws::Vector<LabelDetection>& value) { SetLabels(value); return *this;}
/**
* <p>An array of labels detected in the video. Each element contains the detected
* label and the time, in milliseconds from the start of the video, that the label
* was detected. </p>
*/
inline GetLabelDetectionResult& WithLabels(Aws::Vector<LabelDetection>&& value) { SetLabels(std::move(value)); return *this;}
/**
* <p>An array of labels detected in the video. Each element contains the detected
* label and the time, in milliseconds from the start of the video, that the label
* was detected. </p>
*/
inline GetLabelDetectionResult& AddLabels(const LabelDetection& value) { m_labels.push_back(value); return *this; }
/**
* <p>An array of labels detected in the video. Each element contains the detected
* label and the time, in milliseconds from the start of the video, that the label
* was detected. </p>
*/
inline GetLabelDetectionResult& AddLabels(LabelDetection&& value) { m_labels.push_back(std::move(value)); return *this; }
/**
* <p>Version number of the label detection model that was used to detect
* labels.</p>
*/
inline const Aws::String& GetLabelModelVersion() const{ return m_labelModelVersion; }
/**
* <p>Version number of the label detection model that was used to detect
* labels.</p>
*/
inline void SetLabelModelVersion(const Aws::String& value) { m_labelModelVersion = value; }
/**
* <p>Version number of the label detection model that was used to detect
* labels.</p>
*/
inline void SetLabelModelVersion(Aws::String&& value) { m_labelModelVersion = std::move(value); }
/**
* <p>Version number of the label detection model that was used to detect
* labels.</p>
*/
inline void SetLabelModelVersion(const char* value) { m_labelModelVersion.assign(value); }
/**
* <p>Version number of the label detection model that was used to detect
* labels.</p>
*/
inline GetLabelDetectionResult& WithLabelModelVersion(const Aws::String& value) { SetLabelModelVersion(value); return *this;}
/**
* <p>Version number of the label detection model that was used to detect
* labels.</p>
*/
inline GetLabelDetectionResult& WithLabelModelVersion(Aws::String&& value) { SetLabelModelVersion(std::move(value)); return *this;}
/**
* <p>Version number of the label detection model that was used to detect
* labels.</p>
*/
inline GetLabelDetectionResult& WithLabelModelVersion(const char* value) { SetLabelModelVersion(value); return *this;}
private:
VideoJobStatus m_jobStatus;
Aws::String m_statusMessage;
VideoMetadata m_videoMetadata;
Aws::String m_nextToken;
Aws::Vector<LabelDetection> m_labels;
Aws::String m_labelModelVersion;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,244 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/RekognitionRequest.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/rekognition/model/PersonTrackingSortBy.h>
#include <utility>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
/**
*/
class AWS_REKOGNITION_API GetPersonTrackingRequest : public RekognitionRequest
{
public:
GetPersonTrackingRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetPersonTracking"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>The identifier for a job that tracks persons in a video. You get the
* <code>JobId</code> from a call to <code>StartPersonTracking</code>. </p>
*/
inline const Aws::String& GetJobId() const{ return m_jobId; }
/**
* <p>The identifier for a job that tracks persons in a video. You get the
* <code>JobId</code> from a call to <code>StartPersonTracking</code>. </p>
*/
inline bool JobIdHasBeenSet() const { return m_jobIdHasBeenSet; }
/**
* <p>The identifier for a job that tracks persons in a video. You get the
* <code>JobId</code> from a call to <code>StartPersonTracking</code>. </p>
*/
inline void SetJobId(const Aws::String& value) { m_jobIdHasBeenSet = true; m_jobId = value; }
/**
* <p>The identifier for a job that tracks persons in a video. You get the
* <code>JobId</code> from a call to <code>StartPersonTracking</code>. </p>
*/
inline void SetJobId(Aws::String&& value) { m_jobIdHasBeenSet = true; m_jobId = std::move(value); }
/**
* <p>The identifier for a job that tracks persons in a video. You get the
* <code>JobId</code> from a call to <code>StartPersonTracking</code>. </p>
*/
inline void SetJobId(const char* value) { m_jobIdHasBeenSet = true; m_jobId.assign(value); }
/**
* <p>The identifier for a job that tracks persons in a video. You get the
* <code>JobId</code> from a call to <code>StartPersonTracking</code>. </p>
*/
inline GetPersonTrackingRequest& WithJobId(const Aws::String& value) { SetJobId(value); return *this;}
/**
* <p>The identifier for a job that tracks persons in a video. You get the
* <code>JobId</code> from a call to <code>StartPersonTracking</code>. </p>
*/
inline GetPersonTrackingRequest& WithJobId(Aws::String&& value) { SetJobId(std::move(value)); return *this;}
/**
* <p>The identifier for a job that tracks persons in a video. You get the
* <code>JobId</code> from a call to <code>StartPersonTracking</code>. </p>
*/
inline GetPersonTrackingRequest& WithJobId(const char* value) { SetJobId(value); return *this;}
/**
* <p>Maximum number of results to return per paginated call. The largest value you
* can specify is 1000. If you specify a value greater than 1000, a maximum of 1000
* results is returned. The default value is 1000.</p>
*/
inline int GetMaxResults() const{ return m_maxResults; }
/**
* <p>Maximum number of results to return per paginated call. The largest value you
* can specify is 1000. If you specify a value greater than 1000, a maximum of 1000
* results is returned. The default value is 1000.</p>
*/
inline bool MaxResultsHasBeenSet() const { return m_maxResultsHasBeenSet; }
/**
* <p>Maximum number of results to return per paginated call. The largest value you
* can specify is 1000. If you specify a value greater than 1000, a maximum of 1000
* results is returned. The default value is 1000.</p>
*/
inline void SetMaxResults(int value) { m_maxResultsHasBeenSet = true; m_maxResults = value; }
/**
* <p>Maximum number of results to return per paginated call. The largest value you
* can specify is 1000. If you specify a value greater than 1000, a maximum of 1000
* results is returned. The default value is 1000.</p>
*/
inline GetPersonTrackingRequest& WithMaxResults(int value) { SetMaxResults(value); return *this;}
/**
* <p>If the previous response was incomplete (because there are more persons to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of persons. </p>
*/
inline const Aws::String& GetNextToken() const{ return m_nextToken; }
/**
* <p>If the previous response was incomplete (because there are more persons to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of persons. </p>
*/
inline bool NextTokenHasBeenSet() const { return m_nextTokenHasBeenSet; }
/**
* <p>If the previous response was incomplete (because there are more persons to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of persons. </p>
*/
inline void SetNextToken(const Aws::String& value) { m_nextTokenHasBeenSet = true; m_nextToken = value; }
/**
* <p>If the previous response was incomplete (because there are more persons to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of persons. </p>
*/
inline void SetNextToken(Aws::String&& value) { m_nextTokenHasBeenSet = true; m_nextToken = std::move(value); }
/**
* <p>If the previous response was incomplete (because there are more persons to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of persons. </p>
*/
inline void SetNextToken(const char* value) { m_nextTokenHasBeenSet = true; m_nextToken.assign(value); }
/**
* <p>If the previous response was incomplete (because there are more persons to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of persons. </p>
*/
inline GetPersonTrackingRequest& WithNextToken(const Aws::String& value) { SetNextToken(value); return *this;}
/**
* <p>If the previous response was incomplete (because there are more persons to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of persons. </p>
*/
inline GetPersonTrackingRequest& WithNextToken(Aws::String&& value) { SetNextToken(std::move(value)); return *this;}
/**
* <p>If the previous response was incomplete (because there are more persons to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of persons. </p>
*/
inline GetPersonTrackingRequest& WithNextToken(const char* value) { SetNextToken(value); return *this;}
/**
* <p>Sort to use for elements in the <code>Persons</code> array. Use
* <code>TIMESTAMP</code> to sort array elements by the time persons are detected.
* Use <code>INDEX</code> to sort by the tracked persons. If you sort by
* <code>INDEX</code>, the array elements for each person are sorted by detection
* confidence. The default sort is by <code>TIMESTAMP</code>.</p>
*/
inline const PersonTrackingSortBy& GetSortBy() const{ return m_sortBy; }
/**
* <p>Sort to use for elements in the <code>Persons</code> array. Use
* <code>TIMESTAMP</code> to sort array elements by the time persons are detected.
* Use <code>INDEX</code> to sort by the tracked persons. If you sort by
* <code>INDEX</code>, the array elements for each person are sorted by detection
* confidence. The default sort is by <code>TIMESTAMP</code>.</p>
*/
inline bool SortByHasBeenSet() const { return m_sortByHasBeenSet; }
/**
* <p>Sort to use for elements in the <code>Persons</code> array. Use
* <code>TIMESTAMP</code> to sort array elements by the time persons are detected.
* Use <code>INDEX</code> to sort by the tracked persons. If you sort by
* <code>INDEX</code>, the array elements for each person are sorted by detection
* confidence. The default sort is by <code>TIMESTAMP</code>.</p>
*/
inline void SetSortBy(const PersonTrackingSortBy& value) { m_sortByHasBeenSet = true; m_sortBy = value; }
/**
* <p>Sort to use for elements in the <code>Persons</code> array. Use
* <code>TIMESTAMP</code> to sort array elements by the time persons are detected.
* Use <code>INDEX</code> to sort by the tracked persons. If you sort by
* <code>INDEX</code>, the array elements for each person are sorted by detection
* confidence. The default sort is by <code>TIMESTAMP</code>.</p>
*/
inline void SetSortBy(PersonTrackingSortBy&& value) { m_sortByHasBeenSet = true; m_sortBy = std::move(value); }
/**
* <p>Sort to use for elements in the <code>Persons</code> array. Use
* <code>TIMESTAMP</code> to sort array elements by the time persons are detected.
* Use <code>INDEX</code> to sort by the tracked persons. If you sort by
* <code>INDEX</code>, the array elements for each person are sorted by detection
* confidence. The default sort is by <code>TIMESTAMP</code>.</p>
*/
inline GetPersonTrackingRequest& WithSortBy(const PersonTrackingSortBy& value) { SetSortBy(value); return *this;}
/**
* <p>Sort to use for elements in the <code>Persons</code> array. Use
* <code>TIMESTAMP</code> to sort array elements by the time persons are detected.
* Use <code>INDEX</code> to sort by the tracked persons. If you sort by
* <code>INDEX</code>, the array elements for each person are sorted by detection
* confidence. The default sort is by <code>TIMESTAMP</code>.</p>
*/
inline GetPersonTrackingRequest& WithSortBy(PersonTrackingSortBy&& value) { SetSortBy(std::move(value)); return *this;}
private:
Aws::String m_jobId;
bool m_jobIdHasBeenSet;
int m_maxResults;
bool m_maxResultsHasBeenSet;
Aws::String m_nextToken;
bool m_nextTokenHasBeenSet;
PersonTrackingSortBy m_sortBy;
bool m_sortByHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,258 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/model/VideoJobStatus.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/rekognition/model/VideoMetadata.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/rekognition/model/PersonDetection.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
class AWS_REKOGNITION_API GetPersonTrackingResult
{
public:
GetPersonTrackingResult();
GetPersonTrackingResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
GetPersonTrackingResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>The current status of the person tracking job.</p>
*/
inline const VideoJobStatus& GetJobStatus() const{ return m_jobStatus; }
/**
* <p>The current status of the person tracking job.</p>
*/
inline void SetJobStatus(const VideoJobStatus& value) { m_jobStatus = value; }
/**
* <p>The current status of the person tracking job.</p>
*/
inline void SetJobStatus(VideoJobStatus&& value) { m_jobStatus = std::move(value); }
/**
* <p>The current status of the person tracking job.</p>
*/
inline GetPersonTrackingResult& WithJobStatus(const VideoJobStatus& value) { SetJobStatus(value); return *this;}
/**
* <p>The current status of the person tracking job.</p>
*/
inline GetPersonTrackingResult& WithJobStatus(VideoJobStatus&& value) { SetJobStatus(std::move(value)); return *this;}
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline const Aws::String& GetStatusMessage() const{ return m_statusMessage; }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline void SetStatusMessage(const Aws::String& value) { m_statusMessage = value; }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline void SetStatusMessage(Aws::String&& value) { m_statusMessage = std::move(value); }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline void SetStatusMessage(const char* value) { m_statusMessage.assign(value); }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline GetPersonTrackingResult& WithStatusMessage(const Aws::String& value) { SetStatusMessage(value); return *this;}
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline GetPersonTrackingResult& WithStatusMessage(Aws::String&& value) { SetStatusMessage(std::move(value)); return *this;}
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline GetPersonTrackingResult& WithStatusMessage(const char* value) { SetStatusMessage(value); return *this;}
/**
* <p>Information about a video that Amazon Rekognition Video analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* a Amazon Rekognition Video operation.</p>
*/
inline const VideoMetadata& GetVideoMetadata() const{ return m_videoMetadata; }
/**
* <p>Information about a video that Amazon Rekognition Video analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* a Amazon Rekognition Video operation.</p>
*/
inline void SetVideoMetadata(const VideoMetadata& value) { m_videoMetadata = value; }
/**
* <p>Information about a video that Amazon Rekognition Video analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* a Amazon Rekognition Video operation.</p>
*/
inline void SetVideoMetadata(VideoMetadata&& value) { m_videoMetadata = std::move(value); }
/**
* <p>Information about a video that Amazon Rekognition Video analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* a Amazon Rekognition Video operation.</p>
*/
inline GetPersonTrackingResult& WithVideoMetadata(const VideoMetadata& value) { SetVideoMetadata(value); return *this;}
/**
* <p>Information about a video that Amazon Rekognition Video analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* a Amazon Rekognition Video operation.</p>
*/
inline GetPersonTrackingResult& WithVideoMetadata(VideoMetadata&& value) { SetVideoMetadata(std::move(value)); return *this;}
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of persons.
* </p>
*/
inline const Aws::String& GetNextToken() const{ return m_nextToken; }
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of persons.
* </p>
*/
inline void SetNextToken(const Aws::String& value) { m_nextToken = value; }
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of persons.
* </p>
*/
inline void SetNextToken(Aws::String&& value) { m_nextToken = std::move(value); }
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of persons.
* </p>
*/
inline void SetNextToken(const char* value) { m_nextToken.assign(value); }
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of persons.
* </p>
*/
inline GetPersonTrackingResult& WithNextToken(const Aws::String& value) { SetNextToken(value); return *this;}
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of persons.
* </p>
*/
inline GetPersonTrackingResult& WithNextToken(Aws::String&& value) { SetNextToken(std::move(value)); return *this;}
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of persons.
* </p>
*/
inline GetPersonTrackingResult& WithNextToken(const char* value) { SetNextToken(value); return *this;}
/**
* <p>An array of the persons detected in the video and the time(s) their path was
* tracked throughout the video. An array element will exist for each time a
* person's path is tracked. </p>
*/
inline const Aws::Vector<PersonDetection>& GetPersons() const{ return m_persons; }
/**
* <p>An array of the persons detected in the video and the time(s) their path was
* tracked throughout the video. An array element will exist for each time a
* person's path is tracked. </p>
*/
inline void SetPersons(const Aws::Vector<PersonDetection>& value) { m_persons = value; }
/**
* <p>An array of the persons detected in the video and the time(s) their path was
* tracked throughout the video. An array element will exist for each time a
* person's path is tracked. </p>
*/
inline void SetPersons(Aws::Vector<PersonDetection>&& value) { m_persons = std::move(value); }
/**
* <p>An array of the persons detected in the video and the time(s) their path was
* tracked throughout the video. An array element will exist for each time a
* person's path is tracked. </p>
*/
inline GetPersonTrackingResult& WithPersons(const Aws::Vector<PersonDetection>& value) { SetPersons(value); return *this;}
/**
* <p>An array of the persons detected in the video and the time(s) their path was
* tracked throughout the video. An array element will exist for each time a
* person's path is tracked. </p>
*/
inline GetPersonTrackingResult& WithPersons(Aws::Vector<PersonDetection>&& value) { SetPersons(std::move(value)); return *this;}
/**
* <p>An array of the persons detected in the video and the time(s) their path was
* tracked throughout the video. An array element will exist for each time a
* person's path is tracked. </p>
*/
inline GetPersonTrackingResult& AddPersons(const PersonDetection& value) { m_persons.push_back(value); return *this; }
/**
* <p>An array of the persons detected in the video and the time(s) their path was
* tracked throughout the video. An array element will exist for each time a
* person's path is tracked. </p>
*/
inline GetPersonTrackingResult& AddPersons(PersonDetection&& value) { m_persons.push_back(std::move(value)); return *this; }
private:
VideoJobStatus m_jobStatus;
Aws::String m_statusMessage;
VideoMetadata m_videoMetadata;
Aws::String m_nextToken;
Aws::Vector<PersonDetection> m_persons;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,181 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/RekognitionRequest.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <utility>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
/**
*/
class AWS_REKOGNITION_API GetSegmentDetectionRequest : public RekognitionRequest
{
public:
GetSegmentDetectionRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetSegmentDetection"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>Job identifier for the text detection operation for which you want results
* returned. You get the job identifer from an initial call to
* <code>StartSegmentDetection</code>.</p>
*/
inline const Aws::String& GetJobId() const{ return m_jobId; }
/**
* <p>Job identifier for the text detection operation for which you want results
* returned. You get the job identifer from an initial call to
* <code>StartSegmentDetection</code>.</p>
*/
inline bool JobIdHasBeenSet() const { return m_jobIdHasBeenSet; }
/**
* <p>Job identifier for the text detection operation for which you want results
* returned. You get the job identifer from an initial call to
* <code>StartSegmentDetection</code>.</p>
*/
inline void SetJobId(const Aws::String& value) { m_jobIdHasBeenSet = true; m_jobId = value; }
/**
* <p>Job identifier for the text detection operation for which you want results
* returned. You get the job identifer from an initial call to
* <code>StartSegmentDetection</code>.</p>
*/
inline void SetJobId(Aws::String&& value) { m_jobIdHasBeenSet = true; m_jobId = std::move(value); }
/**
* <p>Job identifier for the text detection operation for which you want results
* returned. You get the job identifer from an initial call to
* <code>StartSegmentDetection</code>.</p>
*/
inline void SetJobId(const char* value) { m_jobIdHasBeenSet = true; m_jobId.assign(value); }
/**
* <p>Job identifier for the text detection operation for which you want results
* returned. You get the job identifer from an initial call to
* <code>StartSegmentDetection</code>.</p>
*/
inline GetSegmentDetectionRequest& WithJobId(const Aws::String& value) { SetJobId(value); return *this;}
/**
* <p>Job identifier for the text detection operation for which you want results
* returned. You get the job identifer from an initial call to
* <code>StartSegmentDetection</code>.</p>
*/
inline GetSegmentDetectionRequest& WithJobId(Aws::String&& value) { SetJobId(std::move(value)); return *this;}
/**
* <p>Job identifier for the text detection operation for which you want results
* returned. You get the job identifer from an initial call to
* <code>StartSegmentDetection</code>.</p>
*/
inline GetSegmentDetectionRequest& WithJobId(const char* value) { SetJobId(value); return *this;}
/**
* <p>Maximum number of results to return per paginated call. The largest value you
* can specify is 1000.</p>
*/
inline int GetMaxResults() const{ return m_maxResults; }
/**
* <p>Maximum number of results to return per paginated call. The largest value you
* can specify is 1000.</p>
*/
inline bool MaxResultsHasBeenSet() const { return m_maxResultsHasBeenSet; }
/**
* <p>Maximum number of results to return per paginated call. The largest value you
* can specify is 1000.</p>
*/
inline void SetMaxResults(int value) { m_maxResultsHasBeenSet = true; m_maxResults = value; }
/**
* <p>Maximum number of results to return per paginated call. The largest value you
* can specify is 1000.</p>
*/
inline GetSegmentDetectionRequest& WithMaxResults(int value) { SetMaxResults(value); return *this;}
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of text.</p>
*/
inline const Aws::String& GetNextToken() const{ return m_nextToken; }
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of text.</p>
*/
inline bool NextTokenHasBeenSet() const { return m_nextTokenHasBeenSet; }
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of text.</p>
*/
inline void SetNextToken(const Aws::String& value) { m_nextTokenHasBeenSet = true; m_nextToken = value; }
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of text.</p>
*/
inline void SetNextToken(Aws::String&& value) { m_nextTokenHasBeenSet = true; m_nextToken = std::move(value); }
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of text.</p>
*/
inline void SetNextToken(const char* value) { m_nextTokenHasBeenSet = true; m_nextToken.assign(value); }
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of text.</p>
*/
inline GetSegmentDetectionRequest& WithNextToken(const Aws::String& value) { SetNextToken(value); return *this;}
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of text.</p>
*/
inline GetSegmentDetectionRequest& WithNextToken(Aws::String&& value) { SetNextToken(std::move(value)); return *this;}
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of text.</p>
*/
inline GetSegmentDetectionRequest& WithNextToken(const char* value) { SetNextToken(value); return *this;}
private:
Aws::String m_jobId;
bool m_jobIdHasBeenSet;
int m_maxResults;
bool m_maxResultsHasBeenSet;
Aws::String m_nextToken;
bool m_nextTokenHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,399 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/model/VideoJobStatus.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/rekognition/model/VideoMetadata.h>
#include <aws/rekognition/model/AudioMetadata.h>
#include <aws/rekognition/model/SegmentDetection.h>
#include <aws/rekognition/model/SegmentTypeInfo.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
class AWS_REKOGNITION_API GetSegmentDetectionResult
{
public:
GetSegmentDetectionResult();
GetSegmentDetectionResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
GetSegmentDetectionResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>Current status of the segment detection job.</p>
*/
inline const VideoJobStatus& GetJobStatus() const{ return m_jobStatus; }
/**
* <p>Current status of the segment detection job.</p>
*/
inline void SetJobStatus(const VideoJobStatus& value) { m_jobStatus = value; }
/**
* <p>Current status of the segment detection job.</p>
*/
inline void SetJobStatus(VideoJobStatus&& value) { m_jobStatus = std::move(value); }
/**
* <p>Current status of the segment detection job.</p>
*/
inline GetSegmentDetectionResult& WithJobStatus(const VideoJobStatus& value) { SetJobStatus(value); return *this;}
/**
* <p>Current status of the segment detection job.</p>
*/
inline GetSegmentDetectionResult& WithJobStatus(VideoJobStatus&& value) { SetJobStatus(std::move(value)); return *this;}
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline const Aws::String& GetStatusMessage() const{ return m_statusMessage; }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline void SetStatusMessage(const Aws::String& value) { m_statusMessage = value; }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline void SetStatusMessage(Aws::String&& value) { m_statusMessage = std::move(value); }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline void SetStatusMessage(const char* value) { m_statusMessage.assign(value); }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline GetSegmentDetectionResult& WithStatusMessage(const Aws::String& value) { SetStatusMessage(value); return *this;}
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline GetSegmentDetectionResult& WithStatusMessage(Aws::String&& value) { SetStatusMessage(std::move(value)); return *this;}
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline GetSegmentDetectionResult& WithStatusMessage(const char* value) { SetStatusMessage(value); return *this;}
/**
* <p>Currently, Amazon Rekognition Video returns a single object in the
* <code>VideoMetadata</code> array. The object contains information about the
* video stream in the input file that Amazon Rekognition Video chose to analyze.
* The <code>VideoMetadata</code> object includes the video codec, video format and
* other information. Video metadata is returned in each page of information
* returned by <code>GetSegmentDetection</code>.</p>
*/
inline const Aws::Vector<VideoMetadata>& GetVideoMetadata() const{ return m_videoMetadata; }
/**
* <p>Currently, Amazon Rekognition Video returns a single object in the
* <code>VideoMetadata</code> array. The object contains information about the
* video stream in the input file that Amazon Rekognition Video chose to analyze.
* The <code>VideoMetadata</code> object includes the video codec, video format and
* other information. Video metadata is returned in each page of information
* returned by <code>GetSegmentDetection</code>.</p>
*/
inline void SetVideoMetadata(const Aws::Vector<VideoMetadata>& value) { m_videoMetadata = value; }
/**
* <p>Currently, Amazon Rekognition Video returns a single object in the
* <code>VideoMetadata</code> array. The object contains information about the
* video stream in the input file that Amazon Rekognition Video chose to analyze.
* The <code>VideoMetadata</code> object includes the video codec, video format and
* other information. Video metadata is returned in each page of information
* returned by <code>GetSegmentDetection</code>.</p>
*/
inline void SetVideoMetadata(Aws::Vector<VideoMetadata>&& value) { m_videoMetadata = std::move(value); }
/**
* <p>Currently, Amazon Rekognition Video returns a single object in the
* <code>VideoMetadata</code> array. The object contains information about the
* video stream in the input file that Amazon Rekognition Video chose to analyze.
* The <code>VideoMetadata</code> object includes the video codec, video format and
* other information. Video metadata is returned in each page of information
* returned by <code>GetSegmentDetection</code>.</p>
*/
inline GetSegmentDetectionResult& WithVideoMetadata(const Aws::Vector<VideoMetadata>& value) { SetVideoMetadata(value); return *this;}
/**
* <p>Currently, Amazon Rekognition Video returns a single object in the
* <code>VideoMetadata</code> array. The object contains information about the
* video stream in the input file that Amazon Rekognition Video chose to analyze.
* The <code>VideoMetadata</code> object includes the video codec, video format and
* other information. Video metadata is returned in each page of information
* returned by <code>GetSegmentDetection</code>.</p>
*/
inline GetSegmentDetectionResult& WithVideoMetadata(Aws::Vector<VideoMetadata>&& value) { SetVideoMetadata(std::move(value)); return *this;}
/**
* <p>Currently, Amazon Rekognition Video returns a single object in the
* <code>VideoMetadata</code> array. The object contains information about the
* video stream in the input file that Amazon Rekognition Video chose to analyze.
* The <code>VideoMetadata</code> object includes the video codec, video format and
* other information. Video metadata is returned in each page of information
* returned by <code>GetSegmentDetection</code>.</p>
*/
inline GetSegmentDetectionResult& AddVideoMetadata(const VideoMetadata& value) { m_videoMetadata.push_back(value); return *this; }
/**
* <p>Currently, Amazon Rekognition Video returns a single object in the
* <code>VideoMetadata</code> array. The object contains information about the
* video stream in the input file that Amazon Rekognition Video chose to analyze.
* The <code>VideoMetadata</code> object includes the video codec, video format and
* other information. Video metadata is returned in each page of information
* returned by <code>GetSegmentDetection</code>.</p>
*/
inline GetSegmentDetectionResult& AddVideoMetadata(VideoMetadata&& value) { m_videoMetadata.push_back(std::move(value)); return *this; }
/**
* <p>An array of objects. There can be multiple audio streams. Each
* <code>AudioMetadata</code> object contains metadata for a single audio stream.
* Audio information in an <code>AudioMetadata</code> objects includes the audio
* codec, the number of audio channels, the duration of the audio stream, and the
* sample rate. Audio metadata is returned in each page of information returned by
* <code>GetSegmentDetection</code>.</p>
*/
inline const Aws::Vector<AudioMetadata>& GetAudioMetadata() const{ return m_audioMetadata; }
/**
* <p>An array of objects. There can be multiple audio streams. Each
* <code>AudioMetadata</code> object contains metadata for a single audio stream.
* Audio information in an <code>AudioMetadata</code> objects includes the audio
* codec, the number of audio channels, the duration of the audio stream, and the
* sample rate. Audio metadata is returned in each page of information returned by
* <code>GetSegmentDetection</code>.</p>
*/
inline void SetAudioMetadata(const Aws::Vector<AudioMetadata>& value) { m_audioMetadata = value; }
/**
* <p>An array of objects. There can be multiple audio streams. Each
* <code>AudioMetadata</code> object contains metadata for a single audio stream.
* Audio information in an <code>AudioMetadata</code> objects includes the audio
* codec, the number of audio channels, the duration of the audio stream, and the
* sample rate. Audio metadata is returned in each page of information returned by
* <code>GetSegmentDetection</code>.</p>
*/
inline void SetAudioMetadata(Aws::Vector<AudioMetadata>&& value) { m_audioMetadata = std::move(value); }
/**
* <p>An array of objects. There can be multiple audio streams. Each
* <code>AudioMetadata</code> object contains metadata for a single audio stream.
* Audio information in an <code>AudioMetadata</code> objects includes the audio
* codec, the number of audio channels, the duration of the audio stream, and the
* sample rate. Audio metadata is returned in each page of information returned by
* <code>GetSegmentDetection</code>.</p>
*/
inline GetSegmentDetectionResult& WithAudioMetadata(const Aws::Vector<AudioMetadata>& value) { SetAudioMetadata(value); return *this;}
/**
* <p>An array of objects. There can be multiple audio streams. Each
* <code>AudioMetadata</code> object contains metadata for a single audio stream.
* Audio information in an <code>AudioMetadata</code> objects includes the audio
* codec, the number of audio channels, the duration of the audio stream, and the
* sample rate. Audio metadata is returned in each page of information returned by
* <code>GetSegmentDetection</code>.</p>
*/
inline GetSegmentDetectionResult& WithAudioMetadata(Aws::Vector<AudioMetadata>&& value) { SetAudioMetadata(std::move(value)); return *this;}
/**
* <p>An array of objects. There can be multiple audio streams. Each
* <code>AudioMetadata</code> object contains metadata for a single audio stream.
* Audio information in an <code>AudioMetadata</code> objects includes the audio
* codec, the number of audio channels, the duration of the audio stream, and the
* sample rate. Audio metadata is returned in each page of information returned by
* <code>GetSegmentDetection</code>.</p>
*/
inline GetSegmentDetectionResult& AddAudioMetadata(const AudioMetadata& value) { m_audioMetadata.push_back(value); return *this; }
/**
* <p>An array of objects. There can be multiple audio streams. Each
* <code>AudioMetadata</code> object contains metadata for a single audio stream.
* Audio information in an <code>AudioMetadata</code> objects includes the audio
* codec, the number of audio channels, the duration of the audio stream, and the
* sample rate. Audio metadata is returned in each page of information returned by
* <code>GetSegmentDetection</code>.</p>
*/
inline GetSegmentDetectionResult& AddAudioMetadata(AudioMetadata&& value) { m_audioMetadata.push_back(std::move(value)); return *this; }
/**
* <p>If the previous response was incomplete (because there are more labels to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of text.</p>
*/
inline const Aws::String& GetNextToken() const{ return m_nextToken; }
/**
* <p>If the previous response was incomplete (because there are more labels to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of text.</p>
*/
inline void SetNextToken(const Aws::String& value) { m_nextToken = value; }
/**
* <p>If the previous response was incomplete (because there are more labels to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of text.</p>
*/
inline void SetNextToken(Aws::String&& value) { m_nextToken = std::move(value); }
/**
* <p>If the previous response was incomplete (because there are more labels to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of text.</p>
*/
inline void SetNextToken(const char* value) { m_nextToken.assign(value); }
/**
* <p>If the previous response was incomplete (because there are more labels to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of text.</p>
*/
inline GetSegmentDetectionResult& WithNextToken(const Aws::String& value) { SetNextToken(value); return *this;}
/**
* <p>If the previous response was incomplete (because there are more labels to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of text.</p>
*/
inline GetSegmentDetectionResult& WithNextToken(Aws::String&& value) { SetNextToken(std::move(value)); return *this;}
/**
* <p>If the previous response was incomplete (because there are more labels to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of text.</p>
*/
inline GetSegmentDetectionResult& WithNextToken(const char* value) { SetNextToken(value); return *this;}
/**
* <p>An array of segments detected in a video.</p>
*/
inline const Aws::Vector<SegmentDetection>& GetSegments() const{ return m_segments; }
/**
* <p>An array of segments detected in a video.</p>
*/
inline void SetSegments(const Aws::Vector<SegmentDetection>& value) { m_segments = value; }
/**
* <p>An array of segments detected in a video.</p>
*/
inline void SetSegments(Aws::Vector<SegmentDetection>&& value) { m_segments = std::move(value); }
/**
* <p>An array of segments detected in a video.</p>
*/
inline GetSegmentDetectionResult& WithSegments(const Aws::Vector<SegmentDetection>& value) { SetSegments(value); return *this;}
/**
* <p>An array of segments detected in a video.</p>
*/
inline GetSegmentDetectionResult& WithSegments(Aws::Vector<SegmentDetection>&& value) { SetSegments(std::move(value)); return *this;}
/**
* <p>An array of segments detected in a video.</p>
*/
inline GetSegmentDetectionResult& AddSegments(const SegmentDetection& value) { m_segments.push_back(value); return *this; }
/**
* <p>An array of segments detected in a video.</p>
*/
inline GetSegmentDetectionResult& AddSegments(SegmentDetection&& value) { m_segments.push_back(std::move(value)); return *this; }
/**
* <p>An array containing the segment types requested in the call to
* <code>StartSegmentDetection</code>. </p>
*/
inline const Aws::Vector<SegmentTypeInfo>& GetSelectedSegmentTypes() const{ return m_selectedSegmentTypes; }
/**
* <p>An array containing the segment types requested in the call to
* <code>StartSegmentDetection</code>. </p>
*/
inline void SetSelectedSegmentTypes(const Aws::Vector<SegmentTypeInfo>& value) { m_selectedSegmentTypes = value; }
/**
* <p>An array containing the segment types requested in the call to
* <code>StartSegmentDetection</code>. </p>
*/
inline void SetSelectedSegmentTypes(Aws::Vector<SegmentTypeInfo>&& value) { m_selectedSegmentTypes = std::move(value); }
/**
* <p>An array containing the segment types requested in the call to
* <code>StartSegmentDetection</code>. </p>
*/
inline GetSegmentDetectionResult& WithSelectedSegmentTypes(const Aws::Vector<SegmentTypeInfo>& value) { SetSelectedSegmentTypes(value); return *this;}
/**
* <p>An array containing the segment types requested in the call to
* <code>StartSegmentDetection</code>. </p>
*/
inline GetSegmentDetectionResult& WithSelectedSegmentTypes(Aws::Vector<SegmentTypeInfo>&& value) { SetSelectedSegmentTypes(std::move(value)); return *this;}
/**
* <p>An array containing the segment types requested in the call to
* <code>StartSegmentDetection</code>. </p>
*/
inline GetSegmentDetectionResult& AddSelectedSegmentTypes(const SegmentTypeInfo& value) { m_selectedSegmentTypes.push_back(value); return *this; }
/**
* <p>An array containing the segment types requested in the call to
* <code>StartSegmentDetection</code>. </p>
*/
inline GetSegmentDetectionResult& AddSelectedSegmentTypes(SegmentTypeInfo&& value) { m_selectedSegmentTypes.push_back(std::move(value)); return *this; }
private:
VideoJobStatus m_jobStatus;
Aws::String m_statusMessage;
Aws::Vector<VideoMetadata> m_videoMetadata;
Aws::Vector<AudioMetadata> m_audioMetadata;
Aws::String m_nextToken;
Aws::Vector<SegmentDetection> m_segments;
Aws::Vector<SegmentTypeInfo> m_selectedSegmentTypes;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,189 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/RekognitionRequest.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <utility>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
/**
*/
class AWS_REKOGNITION_API GetTextDetectionRequest : public RekognitionRequest
{
public:
GetTextDetectionRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetTextDetection"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>Job identifier for the text detection operation for which you want results
* returned. You get the job identifer from an initial call to
* <code>StartTextDetection</code>.</p>
*/
inline const Aws::String& GetJobId() const{ return m_jobId; }
/**
* <p>Job identifier for the text detection operation for which you want results
* returned. You get the job identifer from an initial call to
* <code>StartTextDetection</code>.</p>
*/
inline bool JobIdHasBeenSet() const { return m_jobIdHasBeenSet; }
/**
* <p>Job identifier for the text detection operation for which you want results
* returned. You get the job identifer from an initial call to
* <code>StartTextDetection</code>.</p>
*/
inline void SetJobId(const Aws::String& value) { m_jobIdHasBeenSet = true; m_jobId = value; }
/**
* <p>Job identifier for the text detection operation for which you want results
* returned. You get the job identifer from an initial call to
* <code>StartTextDetection</code>.</p>
*/
inline void SetJobId(Aws::String&& value) { m_jobIdHasBeenSet = true; m_jobId = std::move(value); }
/**
* <p>Job identifier for the text detection operation for which you want results
* returned. You get the job identifer from an initial call to
* <code>StartTextDetection</code>.</p>
*/
inline void SetJobId(const char* value) { m_jobIdHasBeenSet = true; m_jobId.assign(value); }
/**
* <p>Job identifier for the text detection operation for which you want results
* returned. You get the job identifer from an initial call to
* <code>StartTextDetection</code>.</p>
*/
inline GetTextDetectionRequest& WithJobId(const Aws::String& value) { SetJobId(value); return *this;}
/**
* <p>Job identifier for the text detection operation for which you want results
* returned. You get the job identifer from an initial call to
* <code>StartTextDetection</code>.</p>
*/
inline GetTextDetectionRequest& WithJobId(Aws::String&& value) { SetJobId(std::move(value)); return *this;}
/**
* <p>Job identifier for the text detection operation for which you want results
* returned. You get the job identifer from an initial call to
* <code>StartTextDetection</code>.</p>
*/
inline GetTextDetectionRequest& WithJobId(const char* value) { SetJobId(value); return *this;}
/**
* <p>Maximum number of results to return per paginated call. The largest value you
* can specify is 1000.</p>
*/
inline int GetMaxResults() const{ return m_maxResults; }
/**
* <p>Maximum number of results to return per paginated call. The largest value you
* can specify is 1000.</p>
*/
inline bool MaxResultsHasBeenSet() const { return m_maxResultsHasBeenSet; }
/**
* <p>Maximum number of results to return per paginated call. The largest value you
* can specify is 1000.</p>
*/
inline void SetMaxResults(int value) { m_maxResultsHasBeenSet = true; m_maxResults = value; }
/**
* <p>Maximum number of results to return per paginated call. The largest value you
* can specify is 1000.</p>
*/
inline GetTextDetectionRequest& WithMaxResults(int value) { SetMaxResults(value); return *this;}
/**
* <p>If the previous response was incomplete (because there are more labels to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of text.</p>
*/
inline const Aws::String& GetNextToken() const{ return m_nextToken; }
/**
* <p>If the previous response was incomplete (because there are more labels to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of text.</p>
*/
inline bool NextTokenHasBeenSet() const { return m_nextTokenHasBeenSet; }
/**
* <p>If the previous response was incomplete (because there are more labels to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of text.</p>
*/
inline void SetNextToken(const Aws::String& value) { m_nextTokenHasBeenSet = true; m_nextToken = value; }
/**
* <p>If the previous response was incomplete (because there are more labels to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of text.</p>
*/
inline void SetNextToken(Aws::String&& value) { m_nextTokenHasBeenSet = true; m_nextToken = std::move(value); }
/**
* <p>If the previous response was incomplete (because there are more labels to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of text.</p>
*/
inline void SetNextToken(const char* value) { m_nextTokenHasBeenSet = true; m_nextToken.assign(value); }
/**
* <p>If the previous response was incomplete (because there are more labels to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of text.</p>
*/
inline GetTextDetectionRequest& WithNextToken(const Aws::String& value) { SetNextToken(value); return *this;}
/**
* <p>If the previous response was incomplete (because there are more labels to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of text.</p>
*/
inline GetTextDetectionRequest& WithNextToken(Aws::String&& value) { SetNextToken(std::move(value)); return *this;}
/**
* <p>If the previous response was incomplete (because there are more labels to
* retrieve), Amazon Rekognition Video returns a pagination token in the response.
* You can use this pagination token to retrieve the next set of text.</p>
*/
inline GetTextDetectionRequest& WithNextToken(const char* value) { SetNextToken(value); return *this;}
private:
Aws::String m_jobId;
bool m_jobIdHasBeenSet;
int m_maxResults;
bool m_maxResultsHasBeenSet;
Aws::String m_nextToken;
bool m_nextTokenHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,269 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/model/VideoJobStatus.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/rekognition/model/VideoMetadata.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/rekognition/model/TextDetectionResult.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
class AWS_REKOGNITION_API GetTextDetectionResult
{
public:
GetTextDetectionResult();
GetTextDetectionResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
GetTextDetectionResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>Current status of the text detection job.</p>
*/
inline const VideoJobStatus& GetJobStatus() const{ return m_jobStatus; }
/**
* <p>Current status of the text detection job.</p>
*/
inline void SetJobStatus(const VideoJobStatus& value) { m_jobStatus = value; }
/**
* <p>Current status of the text detection job.</p>
*/
inline void SetJobStatus(VideoJobStatus&& value) { m_jobStatus = std::move(value); }
/**
* <p>Current status of the text detection job.</p>
*/
inline GetTextDetectionResult& WithJobStatus(const VideoJobStatus& value) { SetJobStatus(value); return *this;}
/**
* <p>Current status of the text detection job.</p>
*/
inline GetTextDetectionResult& WithJobStatus(VideoJobStatus&& value) { SetJobStatus(std::move(value)); return *this;}
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline const Aws::String& GetStatusMessage() const{ return m_statusMessage; }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline void SetStatusMessage(const Aws::String& value) { m_statusMessage = value; }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline void SetStatusMessage(Aws::String&& value) { m_statusMessage = std::move(value); }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline void SetStatusMessage(const char* value) { m_statusMessage.assign(value); }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline GetTextDetectionResult& WithStatusMessage(const Aws::String& value) { SetStatusMessage(value); return *this;}
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline GetTextDetectionResult& WithStatusMessage(Aws::String&& value) { SetStatusMessage(std::move(value)); return *this;}
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline GetTextDetectionResult& WithStatusMessage(const char* value) { SetStatusMessage(value); return *this;}
inline const VideoMetadata& GetVideoMetadata() const{ return m_videoMetadata; }
inline void SetVideoMetadata(const VideoMetadata& value) { m_videoMetadata = value; }
inline void SetVideoMetadata(VideoMetadata&& value) { m_videoMetadata = std::move(value); }
inline GetTextDetectionResult& WithVideoMetadata(const VideoMetadata& value) { SetVideoMetadata(value); return *this;}
inline GetTextDetectionResult& WithVideoMetadata(VideoMetadata&& value) { SetVideoMetadata(std::move(value)); return *this;}
/**
* <p>An array of text detected in the video. Each element contains the detected
* text, the time in milliseconds from the start of the video that the text was
* detected, and where it was detected on the screen.</p>
*/
inline const Aws::Vector<TextDetectionResult>& GetTextDetections() const{ return m_textDetections; }
/**
* <p>An array of text detected in the video. Each element contains the detected
* text, the time in milliseconds from the start of the video that the text was
* detected, and where it was detected on the screen.</p>
*/
inline void SetTextDetections(const Aws::Vector<TextDetectionResult>& value) { m_textDetections = value; }
/**
* <p>An array of text detected in the video. Each element contains the detected
* text, the time in milliseconds from the start of the video that the text was
* detected, and where it was detected on the screen.</p>
*/
inline void SetTextDetections(Aws::Vector<TextDetectionResult>&& value) { m_textDetections = std::move(value); }
/**
* <p>An array of text detected in the video. Each element contains the detected
* text, the time in milliseconds from the start of the video that the text was
* detected, and where it was detected on the screen.</p>
*/
inline GetTextDetectionResult& WithTextDetections(const Aws::Vector<TextDetectionResult>& value) { SetTextDetections(value); return *this;}
/**
* <p>An array of text detected in the video. Each element contains the detected
* text, the time in milliseconds from the start of the video that the text was
* detected, and where it was detected on the screen.</p>
*/
inline GetTextDetectionResult& WithTextDetections(Aws::Vector<TextDetectionResult>&& value) { SetTextDetections(std::move(value)); return *this;}
/**
* <p>An array of text detected in the video. Each element contains the detected
* text, the time in milliseconds from the start of the video that the text was
* detected, and where it was detected on the screen.</p>
*/
inline GetTextDetectionResult& AddTextDetections(const TextDetectionResult& value) { m_textDetections.push_back(value); return *this; }
/**
* <p>An array of text detected in the video. Each element contains the detected
* text, the time in milliseconds from the start of the video that the text was
* detected, and where it was detected on the screen.</p>
*/
inline GetTextDetectionResult& AddTextDetections(TextDetectionResult&& value) { m_textDetections.push_back(std::move(value)); return *this; }
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of text.</p>
*/
inline const Aws::String& GetNextToken() const{ return m_nextToken; }
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of text.</p>
*/
inline void SetNextToken(const Aws::String& value) { m_nextToken = value; }
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of text.</p>
*/
inline void SetNextToken(Aws::String&& value) { m_nextToken = std::move(value); }
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of text.</p>
*/
inline void SetNextToken(const char* value) { m_nextToken.assign(value); }
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of text.</p>
*/
inline GetTextDetectionResult& WithNextToken(const Aws::String& value) { SetNextToken(value); return *this;}
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of text.</p>
*/
inline GetTextDetectionResult& WithNextToken(Aws::String&& value) { SetNextToken(std::move(value)); return *this;}
/**
* <p>If the response is truncated, Amazon Rekognition Video returns this token
* that you can use in the subsequent request to retrieve the next set of text.</p>
*/
inline GetTextDetectionResult& WithNextToken(const char* value) { SetNextToken(value); return *this;}
/**
* <p>Version number of the text detection model that was used to detect text.</p>
*/
inline const Aws::String& GetTextModelVersion() const{ return m_textModelVersion; }
/**
* <p>Version number of the text detection model that was used to detect text.</p>
*/
inline void SetTextModelVersion(const Aws::String& value) { m_textModelVersion = value; }
/**
* <p>Version number of the text detection model that was used to detect text.</p>
*/
inline void SetTextModelVersion(Aws::String&& value) { m_textModelVersion = std::move(value); }
/**
* <p>Version number of the text detection model that was used to detect text.</p>
*/
inline void SetTextModelVersion(const char* value) { m_textModelVersion.assign(value); }
/**
* <p>Version number of the text detection model that was used to detect text.</p>
*/
inline GetTextDetectionResult& WithTextModelVersion(const Aws::String& value) { SetTextModelVersion(value); return *this;}
/**
* <p>Version number of the text detection model that was used to detect text.</p>
*/
inline GetTextDetectionResult& WithTextModelVersion(Aws::String&& value) { SetTextModelVersion(std::move(value)); return *this;}
/**
* <p>Version number of the text detection model that was used to detect text.</p>
*/
inline GetTextDetectionResult& WithTextModelVersion(const char* value) { SetTextModelVersion(value); return *this;}
private:
VideoJobStatus m_jobStatus;
Aws::String m_statusMessage;
VideoMetadata m_videoMetadata;
Aws::Vector<TextDetectionResult> m_textDetections;
Aws::String m_nextToken;
Aws::String m_textModelVersion;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,67 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/model/S3Object.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>The S3 bucket that contains the Ground Truth manifest file.</p><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/GroundTruthManifest">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API GroundTruthManifest
{
public:
GroundTruthManifest();
GroundTruthManifest(Aws::Utils::Json::JsonView jsonValue);
GroundTruthManifest& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
inline const S3Object& GetS3Object() const{ return m_s3Object; }
inline bool S3ObjectHasBeenSet() const { return m_s3ObjectHasBeenSet; }
inline void SetS3Object(const S3Object& value) { m_s3ObjectHasBeenSet = true; m_s3Object = value; }
inline void SetS3Object(S3Object&& value) { m_s3ObjectHasBeenSet = true; m_s3Object = std::move(value); }
inline GroundTruthManifest& WithS3Object(const S3Object& value) { SetS3Object(value); return *this;}
inline GroundTruthManifest& WithS3Object(S3Object&& value) { SetS3Object(std::move(value)); return *this;}
private:
S3Object m_s3Object;
bool m_s3ObjectHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,192 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>Shows the results of the human in the loop evaluation. If there is no
* HumanLoopArn, the input did not trigger human review.</p><p><h3>See Also:</h3>
* <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/HumanLoopActivationOutput">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API HumanLoopActivationOutput
{
public:
HumanLoopActivationOutput();
HumanLoopActivationOutput(Aws::Utils::Json::JsonView jsonValue);
HumanLoopActivationOutput& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>The Amazon Resource Name (ARN) of the HumanLoop created.</p>
*/
inline const Aws::String& GetHumanLoopArn() const{ return m_humanLoopArn; }
/**
* <p>The Amazon Resource Name (ARN) of the HumanLoop created.</p>
*/
inline bool HumanLoopArnHasBeenSet() const { return m_humanLoopArnHasBeenSet; }
/**
* <p>The Amazon Resource Name (ARN) of the HumanLoop created.</p>
*/
inline void SetHumanLoopArn(const Aws::String& value) { m_humanLoopArnHasBeenSet = true; m_humanLoopArn = value; }
/**
* <p>The Amazon Resource Name (ARN) of the HumanLoop created.</p>
*/
inline void SetHumanLoopArn(Aws::String&& value) { m_humanLoopArnHasBeenSet = true; m_humanLoopArn = std::move(value); }
/**
* <p>The Amazon Resource Name (ARN) of the HumanLoop created.</p>
*/
inline void SetHumanLoopArn(const char* value) { m_humanLoopArnHasBeenSet = true; m_humanLoopArn.assign(value); }
/**
* <p>The Amazon Resource Name (ARN) of the HumanLoop created.</p>
*/
inline HumanLoopActivationOutput& WithHumanLoopArn(const Aws::String& value) { SetHumanLoopArn(value); return *this;}
/**
* <p>The Amazon Resource Name (ARN) of the HumanLoop created.</p>
*/
inline HumanLoopActivationOutput& WithHumanLoopArn(Aws::String&& value) { SetHumanLoopArn(std::move(value)); return *this;}
/**
* <p>The Amazon Resource Name (ARN) of the HumanLoop created.</p>
*/
inline HumanLoopActivationOutput& WithHumanLoopArn(const char* value) { SetHumanLoopArn(value); return *this;}
/**
* <p>Shows if and why human review was needed.</p>
*/
inline const Aws::Vector<Aws::String>& GetHumanLoopActivationReasons() const{ return m_humanLoopActivationReasons; }
/**
* <p>Shows if and why human review was needed.</p>
*/
inline bool HumanLoopActivationReasonsHasBeenSet() const { return m_humanLoopActivationReasonsHasBeenSet; }
/**
* <p>Shows if and why human review was needed.</p>
*/
inline void SetHumanLoopActivationReasons(const Aws::Vector<Aws::String>& value) { m_humanLoopActivationReasonsHasBeenSet = true; m_humanLoopActivationReasons = value; }
/**
* <p>Shows if and why human review was needed.</p>
*/
inline void SetHumanLoopActivationReasons(Aws::Vector<Aws::String>&& value) { m_humanLoopActivationReasonsHasBeenSet = true; m_humanLoopActivationReasons = std::move(value); }
/**
* <p>Shows if and why human review was needed.</p>
*/
inline HumanLoopActivationOutput& WithHumanLoopActivationReasons(const Aws::Vector<Aws::String>& value) { SetHumanLoopActivationReasons(value); return *this;}
/**
* <p>Shows if and why human review was needed.</p>
*/
inline HumanLoopActivationOutput& WithHumanLoopActivationReasons(Aws::Vector<Aws::String>&& value) { SetHumanLoopActivationReasons(std::move(value)); return *this;}
/**
* <p>Shows if and why human review was needed.</p>
*/
inline HumanLoopActivationOutput& AddHumanLoopActivationReasons(const Aws::String& value) { m_humanLoopActivationReasonsHasBeenSet = true; m_humanLoopActivationReasons.push_back(value); return *this; }
/**
* <p>Shows if and why human review was needed.</p>
*/
inline HumanLoopActivationOutput& AddHumanLoopActivationReasons(Aws::String&& value) { m_humanLoopActivationReasonsHasBeenSet = true; m_humanLoopActivationReasons.push_back(std::move(value)); return *this; }
/**
* <p>Shows if and why human review was needed.</p>
*/
inline HumanLoopActivationOutput& AddHumanLoopActivationReasons(const char* value) { m_humanLoopActivationReasonsHasBeenSet = true; m_humanLoopActivationReasons.push_back(value); return *this; }
/**
* <p>Shows the result of condition evaluations, including those conditions which
* activated a human review.</p>
*/
inline const Aws::String& GetHumanLoopActivationConditionsEvaluationResults() const{ return m_humanLoopActivationConditionsEvaluationResults; }
/**
* <p>Shows the result of condition evaluations, including those conditions which
* activated a human review.</p>
*/
inline bool HumanLoopActivationConditionsEvaluationResultsHasBeenSet() const { return m_humanLoopActivationConditionsEvaluationResultsHasBeenSet; }
/**
* <p>Shows the result of condition evaluations, including those conditions which
* activated a human review.</p>
*/
inline void SetHumanLoopActivationConditionsEvaluationResults(const Aws::String& value) { m_humanLoopActivationConditionsEvaluationResultsHasBeenSet = true; m_humanLoopActivationConditionsEvaluationResults = value; }
/**
* <p>Shows the result of condition evaluations, including those conditions which
* activated a human review.</p>
*/
inline void SetHumanLoopActivationConditionsEvaluationResults(Aws::String&& value) { m_humanLoopActivationConditionsEvaluationResultsHasBeenSet = true; m_humanLoopActivationConditionsEvaluationResults = std::move(value); }
/**
* <p>Shows the result of condition evaluations, including those conditions which
* activated a human review.</p>
*/
inline void SetHumanLoopActivationConditionsEvaluationResults(const char* value) { m_humanLoopActivationConditionsEvaluationResultsHasBeenSet = true; m_humanLoopActivationConditionsEvaluationResults.assign(value); }
/**
* <p>Shows the result of condition evaluations, including those conditions which
* activated a human review.</p>
*/
inline HumanLoopActivationOutput& WithHumanLoopActivationConditionsEvaluationResults(const Aws::String& value) { SetHumanLoopActivationConditionsEvaluationResults(value); return *this;}
/**
* <p>Shows the result of condition evaluations, including those conditions which
* activated a human review.</p>
*/
inline HumanLoopActivationOutput& WithHumanLoopActivationConditionsEvaluationResults(Aws::String&& value) { SetHumanLoopActivationConditionsEvaluationResults(std::move(value)); return *this;}
/**
* <p>Shows the result of condition evaluations, including those conditions which
* activated a human review.</p>
*/
inline HumanLoopActivationOutput& WithHumanLoopActivationConditionsEvaluationResults(const char* value) { SetHumanLoopActivationConditionsEvaluationResults(value); return *this;}
private:
Aws::String m_humanLoopArn;
bool m_humanLoopArnHasBeenSet;
Aws::Vector<Aws::String> m_humanLoopActivationReasons;
bool m_humanLoopActivationReasonsHasBeenSet;
Aws::String m_humanLoopActivationConditionsEvaluationResults;
bool m_humanLoopActivationConditionsEvaluationResultsHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,201 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/rekognition/model/HumanLoopDataAttributes.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>Sets up the flow definition the image will be sent to if one of the
* conditions is met. You can also set certain attributes of the image before
* review.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/HumanLoopConfig">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API HumanLoopConfig
{
public:
HumanLoopConfig();
HumanLoopConfig(Aws::Utils::Json::JsonView jsonValue);
HumanLoopConfig& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>The name of the human review used for this image. This should be kept unique
* within a region.</p>
*/
inline const Aws::String& GetHumanLoopName() const{ return m_humanLoopName; }
/**
* <p>The name of the human review used for this image. This should be kept unique
* within a region.</p>
*/
inline bool HumanLoopNameHasBeenSet() const { return m_humanLoopNameHasBeenSet; }
/**
* <p>The name of the human review used for this image. This should be kept unique
* within a region.</p>
*/
inline void SetHumanLoopName(const Aws::String& value) { m_humanLoopNameHasBeenSet = true; m_humanLoopName = value; }
/**
* <p>The name of the human review used for this image. This should be kept unique
* within a region.</p>
*/
inline void SetHumanLoopName(Aws::String&& value) { m_humanLoopNameHasBeenSet = true; m_humanLoopName = std::move(value); }
/**
* <p>The name of the human review used for this image. This should be kept unique
* within a region.</p>
*/
inline void SetHumanLoopName(const char* value) { m_humanLoopNameHasBeenSet = true; m_humanLoopName.assign(value); }
/**
* <p>The name of the human review used for this image. This should be kept unique
* within a region.</p>
*/
inline HumanLoopConfig& WithHumanLoopName(const Aws::String& value) { SetHumanLoopName(value); return *this;}
/**
* <p>The name of the human review used for this image. This should be kept unique
* within a region.</p>
*/
inline HumanLoopConfig& WithHumanLoopName(Aws::String&& value) { SetHumanLoopName(std::move(value)); return *this;}
/**
* <p>The name of the human review used for this image. This should be kept unique
* within a region.</p>
*/
inline HumanLoopConfig& WithHumanLoopName(const char* value) { SetHumanLoopName(value); return *this;}
/**
* <p>The Amazon Resource Name (ARN) of the flow definition. You can create a flow
* definition by using the Amazon Sagemaker <a
* href="https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateFlowDefinition.html">CreateFlowDefinition</a>
* Operation. </p>
*/
inline const Aws::String& GetFlowDefinitionArn() const{ return m_flowDefinitionArn; }
/**
* <p>The Amazon Resource Name (ARN) of the flow definition. You can create a flow
* definition by using the Amazon Sagemaker <a
* href="https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateFlowDefinition.html">CreateFlowDefinition</a>
* Operation. </p>
*/
inline bool FlowDefinitionArnHasBeenSet() const { return m_flowDefinitionArnHasBeenSet; }
/**
* <p>The Amazon Resource Name (ARN) of the flow definition. You can create a flow
* definition by using the Amazon Sagemaker <a
* href="https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateFlowDefinition.html">CreateFlowDefinition</a>
* Operation. </p>
*/
inline void SetFlowDefinitionArn(const Aws::String& value) { m_flowDefinitionArnHasBeenSet = true; m_flowDefinitionArn = value; }
/**
* <p>The Amazon Resource Name (ARN) of the flow definition. You can create a flow
* definition by using the Amazon Sagemaker <a
* href="https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateFlowDefinition.html">CreateFlowDefinition</a>
* Operation. </p>
*/
inline void SetFlowDefinitionArn(Aws::String&& value) { m_flowDefinitionArnHasBeenSet = true; m_flowDefinitionArn = std::move(value); }
/**
* <p>The Amazon Resource Name (ARN) of the flow definition. You can create a flow
* definition by using the Amazon Sagemaker <a
* href="https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateFlowDefinition.html">CreateFlowDefinition</a>
* Operation. </p>
*/
inline void SetFlowDefinitionArn(const char* value) { m_flowDefinitionArnHasBeenSet = true; m_flowDefinitionArn.assign(value); }
/**
* <p>The Amazon Resource Name (ARN) of the flow definition. You can create a flow
* definition by using the Amazon Sagemaker <a
* href="https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateFlowDefinition.html">CreateFlowDefinition</a>
* Operation. </p>
*/
inline HumanLoopConfig& WithFlowDefinitionArn(const Aws::String& value) { SetFlowDefinitionArn(value); return *this;}
/**
* <p>The Amazon Resource Name (ARN) of the flow definition. You can create a flow
* definition by using the Amazon Sagemaker <a
* href="https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateFlowDefinition.html">CreateFlowDefinition</a>
* Operation. </p>
*/
inline HumanLoopConfig& WithFlowDefinitionArn(Aws::String&& value) { SetFlowDefinitionArn(std::move(value)); return *this;}
/**
* <p>The Amazon Resource Name (ARN) of the flow definition. You can create a flow
* definition by using the Amazon Sagemaker <a
* href="https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateFlowDefinition.html">CreateFlowDefinition</a>
* Operation. </p>
*/
inline HumanLoopConfig& WithFlowDefinitionArn(const char* value) { SetFlowDefinitionArn(value); return *this;}
/**
* <p>Sets attributes of the input data.</p>
*/
inline const HumanLoopDataAttributes& GetDataAttributes() const{ return m_dataAttributes; }
/**
* <p>Sets attributes of the input data.</p>
*/
inline bool DataAttributesHasBeenSet() const { return m_dataAttributesHasBeenSet; }
/**
* <p>Sets attributes of the input data.</p>
*/
inline void SetDataAttributes(const HumanLoopDataAttributes& value) { m_dataAttributesHasBeenSet = true; m_dataAttributes = value; }
/**
* <p>Sets attributes of the input data.</p>
*/
inline void SetDataAttributes(HumanLoopDataAttributes&& value) { m_dataAttributesHasBeenSet = true; m_dataAttributes = std::move(value); }
/**
* <p>Sets attributes of the input data.</p>
*/
inline HumanLoopConfig& WithDataAttributes(const HumanLoopDataAttributes& value) { SetDataAttributes(value); return *this;}
/**
* <p>Sets attributes of the input data.</p>
*/
inline HumanLoopConfig& WithDataAttributes(HumanLoopDataAttributes&& value) { SetDataAttributes(std::move(value)); return *this;}
private:
Aws::String m_humanLoopName;
bool m_humanLoopNameHasBeenSet;
Aws::String m_flowDefinitionArn;
bool m_flowDefinitionArnHasBeenSet;
HumanLoopDataAttributes m_dataAttributes;
bool m_dataAttributesHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,99 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/rekognition/model/ContentClassifier.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>Allows you to set attributes of the image. Currently, you can declare an
* image as free of personally identifiable information.</p><p><h3>See Also:</h3>
* <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/HumanLoopDataAttributes">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API HumanLoopDataAttributes
{
public:
HumanLoopDataAttributes();
HumanLoopDataAttributes(Aws::Utils::Json::JsonView jsonValue);
HumanLoopDataAttributes& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>Sets whether the input image is free of personally identifiable
* information.</p>
*/
inline const Aws::Vector<ContentClassifier>& GetContentClassifiers() const{ return m_contentClassifiers; }
/**
* <p>Sets whether the input image is free of personally identifiable
* information.</p>
*/
inline bool ContentClassifiersHasBeenSet() const { return m_contentClassifiersHasBeenSet; }
/**
* <p>Sets whether the input image is free of personally identifiable
* information.</p>
*/
inline void SetContentClassifiers(const Aws::Vector<ContentClassifier>& value) { m_contentClassifiersHasBeenSet = true; m_contentClassifiers = value; }
/**
* <p>Sets whether the input image is free of personally identifiable
* information.</p>
*/
inline void SetContentClassifiers(Aws::Vector<ContentClassifier>&& value) { m_contentClassifiersHasBeenSet = true; m_contentClassifiers = std::move(value); }
/**
* <p>Sets whether the input image is free of personally identifiable
* information.</p>
*/
inline HumanLoopDataAttributes& WithContentClassifiers(const Aws::Vector<ContentClassifier>& value) { SetContentClassifiers(value); return *this;}
/**
* <p>Sets whether the input image is free of personally identifiable
* information.</p>
*/
inline HumanLoopDataAttributes& WithContentClassifiers(Aws::Vector<ContentClassifier>&& value) { SetContentClassifiers(std::move(value)); return *this;}
/**
* <p>Sets whether the input image is free of personally identifiable
* information.</p>
*/
inline HumanLoopDataAttributes& AddContentClassifiers(const ContentClassifier& value) { m_contentClassifiersHasBeenSet = true; m_contentClassifiers.push_back(value); return *this; }
/**
* <p>Sets whether the input image is free of personally identifiable
* information.</p>
*/
inline HumanLoopDataAttributes& AddContentClassifiers(ContentClassifier&& value) { m_contentClassifiersHasBeenSet = true; m_contentClassifiers.push_back(std::move(value)); return *this; }
private:
Aws::Vector<ContentClassifier> m_contentClassifiers;
bool m_contentClassifiersHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,177 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>The number of in-progress human reviews you have has exceeded the number
* allowed.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/HumanLoopQuotaExceededException">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API HumanLoopQuotaExceededException
{
public:
HumanLoopQuotaExceededException();
HumanLoopQuotaExceededException(Aws::Utils::Json::JsonView jsonValue);
HumanLoopQuotaExceededException& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>The resource type.</p>
*/
inline const Aws::String& GetResourceType() const{ return m_resourceType; }
/**
* <p>The resource type.</p>
*/
inline bool ResourceTypeHasBeenSet() const { return m_resourceTypeHasBeenSet; }
/**
* <p>The resource type.</p>
*/
inline void SetResourceType(const Aws::String& value) { m_resourceTypeHasBeenSet = true; m_resourceType = value; }
/**
* <p>The resource type.</p>
*/
inline void SetResourceType(Aws::String&& value) { m_resourceTypeHasBeenSet = true; m_resourceType = std::move(value); }
/**
* <p>The resource type.</p>
*/
inline void SetResourceType(const char* value) { m_resourceTypeHasBeenSet = true; m_resourceType.assign(value); }
/**
* <p>The resource type.</p>
*/
inline HumanLoopQuotaExceededException& WithResourceType(const Aws::String& value) { SetResourceType(value); return *this;}
/**
* <p>The resource type.</p>
*/
inline HumanLoopQuotaExceededException& WithResourceType(Aws::String&& value) { SetResourceType(std::move(value)); return *this;}
/**
* <p>The resource type.</p>
*/
inline HumanLoopQuotaExceededException& WithResourceType(const char* value) { SetResourceType(value); return *this;}
/**
* <p>The quota code.</p>
*/
inline const Aws::String& GetQuotaCode() const{ return m_quotaCode; }
/**
* <p>The quota code.</p>
*/
inline bool QuotaCodeHasBeenSet() const { return m_quotaCodeHasBeenSet; }
/**
* <p>The quota code.</p>
*/
inline void SetQuotaCode(const Aws::String& value) { m_quotaCodeHasBeenSet = true; m_quotaCode = value; }
/**
* <p>The quota code.</p>
*/
inline void SetQuotaCode(Aws::String&& value) { m_quotaCodeHasBeenSet = true; m_quotaCode = std::move(value); }
/**
* <p>The quota code.</p>
*/
inline void SetQuotaCode(const char* value) { m_quotaCodeHasBeenSet = true; m_quotaCode.assign(value); }
/**
* <p>The quota code.</p>
*/
inline HumanLoopQuotaExceededException& WithQuotaCode(const Aws::String& value) { SetQuotaCode(value); return *this;}
/**
* <p>The quota code.</p>
*/
inline HumanLoopQuotaExceededException& WithQuotaCode(Aws::String&& value) { SetQuotaCode(std::move(value)); return *this;}
/**
* <p>The quota code.</p>
*/
inline HumanLoopQuotaExceededException& WithQuotaCode(const char* value) { SetQuotaCode(value); return *this;}
/**
* <p>The service code.</p>
*/
inline const Aws::String& GetServiceCode() const{ return m_serviceCode; }
/**
* <p>The service code.</p>
*/
inline bool ServiceCodeHasBeenSet() const { return m_serviceCodeHasBeenSet; }
/**
* <p>The service code.</p>
*/
inline void SetServiceCode(const Aws::String& value) { m_serviceCodeHasBeenSet = true; m_serviceCode = value; }
/**
* <p>The service code.</p>
*/
inline void SetServiceCode(Aws::String&& value) { m_serviceCodeHasBeenSet = true; m_serviceCode = std::move(value); }
/**
* <p>The service code.</p>
*/
inline void SetServiceCode(const char* value) { m_serviceCodeHasBeenSet = true; m_serviceCode.assign(value); }
/**
* <p>The service code.</p>
*/
inline HumanLoopQuotaExceededException& WithServiceCode(const Aws::String& value) { SetServiceCode(value); return *this;}
/**
* <p>The service code.</p>
*/
inline HumanLoopQuotaExceededException& WithServiceCode(Aws::String&& value) { SetServiceCode(std::move(value)); return *this;}
/**
* <p>The service code.</p>
*/
inline HumanLoopQuotaExceededException& WithServiceCode(const char* value) { SetServiceCode(value); return *this;}
private:
Aws::String m_resourceType;
bool m_resourceTypeHasBeenSet;
Aws::String m_quotaCode;
bool m_quotaCodeHasBeenSet;
Aws::String m_serviceCode;
bool m_serviceCodeHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,131 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/Array.h>
#include <aws/rekognition/model/S3Object.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>Provides the input image either as bytes or an S3 object.</p> <p>You pass
* image bytes to an Amazon Rekognition API operation by using the
* <code>Bytes</code> property. For example, you would use the <code>Bytes</code>
* property to pass an image loaded from a local file system. Image bytes passed by
* using the <code>Bytes</code> property must be base64-encoded. Your code may not
* need to encode image bytes if you are using an AWS SDK to call Amazon
* Rekognition API operations. </p> <p>For more information, see Analyzing an Image
* Loaded from a Local File System in the Amazon Rekognition Developer Guide.</p>
* <p> You pass images stored in an S3 bucket to an Amazon Rekognition API
* operation by using the <code>S3Object</code> property. Images stored in an S3
* bucket do not need to be base64-encoded.</p> <p>The region for the S3 bucket
* containing the S3 object must match the region you use for Amazon Rekognition
* operations.</p> <p>If you use the AWS CLI to call Amazon Rekognition operations,
* passing image bytes using the Bytes property is not supported. You must first
* upload the image to an Amazon S3 bucket and then call the operation using the
* S3Object property.</p> <p>For Amazon Rekognition to process an S3 object, the
* user must have permission to access the S3 object. For more information, see
* Resource Based Policies in the Amazon Rekognition Developer Guide.
* </p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/Image">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API Image
{
public:
Image();
Image(Aws::Utils::Json::JsonView jsonValue);
Image& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>Blob of image bytes up to 5 MBs.</p>
*/
inline const Aws::Utils::ByteBuffer& GetBytes() const{ return m_bytes; }
/**
* <p>Blob of image bytes up to 5 MBs.</p>
*/
inline bool BytesHasBeenSet() const { return m_bytesHasBeenSet; }
/**
* <p>Blob of image bytes up to 5 MBs.</p>
*/
inline void SetBytes(const Aws::Utils::ByteBuffer& value) { m_bytesHasBeenSet = true; m_bytes = value; }
/**
* <p>Blob of image bytes up to 5 MBs.</p>
*/
inline void SetBytes(Aws::Utils::ByteBuffer&& value) { m_bytesHasBeenSet = true; m_bytes = std::move(value); }
/**
* <p>Blob of image bytes up to 5 MBs.</p>
*/
inline Image& WithBytes(const Aws::Utils::ByteBuffer& value) { SetBytes(value); return *this;}
/**
* <p>Blob of image bytes up to 5 MBs.</p>
*/
inline Image& WithBytes(Aws::Utils::ByteBuffer&& value) { SetBytes(std::move(value)); return *this;}
/**
* <p>Identifies an S3 object as the image source.</p>
*/
inline const S3Object& GetS3Object() const{ return m_s3Object; }
/**
* <p>Identifies an S3 object as the image source.</p>
*/
inline bool S3ObjectHasBeenSet() const { return m_s3ObjectHasBeenSet; }
/**
* <p>Identifies an S3 object as the image source.</p>
*/
inline void SetS3Object(const S3Object& value) { m_s3ObjectHasBeenSet = true; m_s3Object = value; }
/**
* <p>Identifies an S3 object as the image source.</p>
*/
inline void SetS3Object(S3Object&& value) { m_s3ObjectHasBeenSet = true; m_s3Object = std::move(value); }
/**
* <p>Identifies an S3 object as the image source.</p>
*/
inline Image& WithS3Object(const S3Object& value) { SetS3Object(value); return *this;}
/**
* <p>Identifies an S3 object as the image source.</p>
*/
inline Image& WithS3Object(S3Object&& value) { SetS3Object(std::move(value)); return *this;}
private:
Aws::Utils::ByteBuffer m_bytes;
bool m_bytesHasBeenSet;
S3Object m_s3Object;
bool m_s3ObjectHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,103 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
/**
* <p>Identifies face image brightness and sharpness. </p><p><h3>See Also:</h3>
* <a
* href="http://docs.aws.amazon.com/goto/WebAPI/rekognition-2016-06-27/ImageQuality">AWS
* API Reference</a></p>
*/
class AWS_REKOGNITION_API ImageQuality
{
public:
ImageQuality();
ImageQuality(Aws::Utils::Json::JsonView jsonValue);
ImageQuality& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>Value representing brightness of the face. The service returns a value
* between 0 and 100 (inclusive). A higher value indicates a brighter face
* image.</p>
*/
inline double GetBrightness() const{ return m_brightness; }
/**
* <p>Value representing brightness of the face. The service returns a value
* between 0 and 100 (inclusive). A higher value indicates a brighter face
* image.</p>
*/
inline bool BrightnessHasBeenSet() const { return m_brightnessHasBeenSet; }
/**
* <p>Value representing brightness of the face. The service returns a value
* between 0 and 100 (inclusive). A higher value indicates a brighter face
* image.</p>
*/
inline void SetBrightness(double value) { m_brightnessHasBeenSet = true; m_brightness = value; }
/**
* <p>Value representing brightness of the face. The service returns a value
* between 0 and 100 (inclusive). A higher value indicates a brighter face
* image.</p>
*/
inline ImageQuality& WithBrightness(double value) { SetBrightness(value); return *this;}
/**
* <p>Value representing sharpness of the face. The service returns a value between
* 0 and 100 (inclusive). A higher value indicates a sharper face image.</p>
*/
inline double GetSharpness() const{ return m_sharpness; }
/**
* <p>Value representing sharpness of the face. The service returns a value between
* 0 and 100 (inclusive). A higher value indicates a sharper face image.</p>
*/
inline bool SharpnessHasBeenSet() const { return m_sharpnessHasBeenSet; }
/**
* <p>Value representing sharpness of the face. The service returns a value between
* 0 and 100 (inclusive). A higher value indicates a sharper face image.</p>
*/
inline void SetSharpness(double value) { m_sharpnessHasBeenSet = true; m_sharpness = value; }
/**
* <p>Value representing sharpness of the face. The service returns a value between
* 0 and 100 (inclusive). A higher value indicates a sharper face image.</p>
*/
inline ImageQuality& WithSharpness(double value) { SetSharpness(value); return *this;}
private:
double m_brightness;
bool m_brightnessHasBeenSet;
double m_sharpness;
bool m_sharpnessHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,487 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/RekognitionRequest.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/rekognition/model/Image.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/rekognition/model/QualityFilter.h>
#include <aws/rekognition/model/Attribute.h>
#include <utility>
namespace Aws
{
namespace Rekognition
{
namespace Model
{
/**
*/
class AWS_REKOGNITION_API IndexFacesRequest : public RekognitionRequest
{
public:
IndexFacesRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "IndexFaces"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>The ID of an existing collection to which you want to add the faces that are
* detected in the input images.</p>
*/
inline const Aws::String& GetCollectionId() const{ return m_collectionId; }
/**
* <p>The ID of an existing collection to which you want to add the faces that are
* detected in the input images.</p>
*/
inline bool CollectionIdHasBeenSet() const { return m_collectionIdHasBeenSet; }
/**
* <p>The ID of an existing collection to which you want to add the faces that are
* detected in the input images.</p>
*/
inline void SetCollectionId(const Aws::String& value) { m_collectionIdHasBeenSet = true; m_collectionId = value; }
/**
* <p>The ID of an existing collection to which you want to add the faces that are
* detected in the input images.</p>
*/
inline void SetCollectionId(Aws::String&& value) { m_collectionIdHasBeenSet = true; m_collectionId = std::move(value); }
/**
* <p>The ID of an existing collection to which you want to add the faces that are
* detected in the input images.</p>
*/
inline void SetCollectionId(const char* value) { m_collectionIdHasBeenSet = true; m_collectionId.assign(value); }
/**
* <p>The ID of an existing collection to which you want to add the faces that are
* detected in the input images.</p>
*/
inline IndexFacesRequest& WithCollectionId(const Aws::String& value) { SetCollectionId(value); return *this;}
/**
* <p>The ID of an existing collection to which you want to add the faces that are
* detected in the input images.</p>
*/
inline IndexFacesRequest& WithCollectionId(Aws::String&& value) { SetCollectionId(std::move(value)); return *this;}
/**
* <p>The ID of an existing collection to which you want to add the faces that are
* detected in the input images.</p>
*/
inline IndexFacesRequest& WithCollectionId(const char* value) { SetCollectionId(value); return *this;}
/**
* <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing base64-encoded image bytes
* isn't supported. </p> <p>If you are using an AWS SDK to call Amazon Rekognition,
* you might not need to base64-encode image bytes passed using the
* <code>Bytes</code> field. For more information, see Images in the Amazon
* Rekognition developer guide.</p>
*/
inline const Image& GetImage() const{ return m_image; }
/**
* <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing base64-encoded image bytes
* isn't supported. </p> <p>If you are using an AWS SDK to call Amazon Rekognition,
* you might not need to base64-encode image bytes passed using the
* <code>Bytes</code> field. For more information, see Images in the Amazon
* Rekognition developer guide.</p>
*/
inline bool ImageHasBeenSet() const { return m_imageHasBeenSet; }
/**
* <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing base64-encoded image bytes
* isn't supported. </p> <p>If you are using an AWS SDK to call Amazon Rekognition,
* you might not need to base64-encode image bytes passed using the
* <code>Bytes</code> field. For more information, see Images in the Amazon
* Rekognition developer guide.</p>
*/
inline void SetImage(const Image& value) { m_imageHasBeenSet = true; m_image = value; }
/**
* <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing base64-encoded image bytes
* isn't supported. </p> <p>If you are using an AWS SDK to call Amazon Rekognition,
* you might not need to base64-encode image bytes passed using the
* <code>Bytes</code> field. For more information, see Images in the Amazon
* Rekognition developer guide.</p>
*/
inline void SetImage(Image&& value) { m_imageHasBeenSet = true; m_image = std::move(value); }
/**
* <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing base64-encoded image bytes
* isn't supported. </p> <p>If you are using an AWS SDK to call Amazon Rekognition,
* you might not need to base64-encode image bytes passed using the
* <code>Bytes</code> field. For more information, see Images in the Amazon
* Rekognition developer guide.</p>
*/
inline IndexFacesRequest& WithImage(const Image& value) { SetImage(value); return *this;}
/**
* <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS
* CLI to call Amazon Rekognition operations, passing base64-encoded image bytes
* isn't supported. </p> <p>If you are using an AWS SDK to call Amazon Rekognition,
* you might not need to base64-encode image bytes passed using the
* <code>Bytes</code> field. For more information, see Images in the Amazon
* Rekognition developer guide.</p>
*/
inline IndexFacesRequest& WithImage(Image&& value) { SetImage(std::move(value)); return *this;}
/**
* <p>The ID you want to assign to all the faces detected in the image.</p>
*/
inline const Aws::String& GetExternalImageId() const{ return m_externalImageId; }
/**
* <p>The ID you want to assign to all the faces detected in the image.</p>
*/
inline bool ExternalImageIdHasBeenSet() const { return m_externalImageIdHasBeenSet; }
/**
* <p>The ID you want to assign to all the faces detected in the image.</p>
*/
inline void SetExternalImageId(const Aws::String& value) { m_externalImageIdHasBeenSet = true; m_externalImageId = value; }
/**
* <p>The ID you want to assign to all the faces detected in the image.</p>
*/
inline void SetExternalImageId(Aws::String&& value) { m_externalImageIdHasBeenSet = true; m_externalImageId = std::move(value); }
/**
* <p>The ID you want to assign to all the faces detected in the image.</p>
*/
inline void SetExternalImageId(const char* value) { m_externalImageIdHasBeenSet = true; m_externalImageId.assign(value); }
/**
* <p>The ID you want to assign to all the faces detected in the image.</p>
*/
inline IndexFacesRequest& WithExternalImageId(const Aws::String& value) { SetExternalImageId(value); return *this;}
/**
* <p>The ID you want to assign to all the faces detected in the image.</p>
*/
inline IndexFacesRequest& WithExternalImageId(Aws::String&& value) { SetExternalImageId(std::move(value)); return *this;}
/**
* <p>The ID you want to assign to all the faces detected in the image.</p>
*/
inline IndexFacesRequest& WithExternalImageId(const char* value) { SetExternalImageId(value); return *this;}
/**
* <p>An array of facial attributes that you want to be returned. This can be the
* default list of attributes or all attributes. If you don't specify a value for
* <code>Attributes</code> or if you specify <code>["DEFAULT"]</code>, the API
* returns the following subset of facial attributes: <code>BoundingBox</code>,
* <code>Confidence</code>, <code>Pose</code>, <code>Quality</code>, and
* <code>Landmarks</code>. If you provide <code>["ALL"]</code>, all facial
* attributes are returned, but the operation takes longer to complete.</p> <p>If
* you provide both, <code>["ALL", "DEFAULT"]</code>, the service uses a logical
* AND operator to determine which attributes to return (in this case, all
* attributes). </p>
*/
inline const Aws::Vector<Attribute>& GetDetectionAttributes() const{ return m_detectionAttributes; }
/**
* <p>An array of facial attributes that you want to be returned. This can be the
* default list of attributes or all attributes. If you don't specify a value for
* <code>Attributes</code> or if you specify <code>["DEFAULT"]</code>, the API
* returns the following subset of facial attributes: <code>BoundingBox</code>,
* <code>Confidence</code>, <code>Pose</code>, <code>Quality</code>, and
* <code>Landmarks</code>. If you provide <code>["ALL"]</code>, all facial
* attributes are returned, but the operation takes longer to complete.</p> <p>If
* you provide both, <code>["ALL", "DEFAULT"]</code>, the service uses a logical
* AND operator to determine which attributes to return (in this case, all
* attributes). </p>
*/
inline bool DetectionAttributesHasBeenSet() const { return m_detectionAttributesHasBeenSet; }
/**
* <p>An array of facial attributes that you want to be returned. This can be the
* default list of attributes or all attributes. If you don't specify a value for
* <code>Attributes</code> or if you specify <code>["DEFAULT"]</code>, the API
* returns the following subset of facial attributes: <code>BoundingBox</code>,
* <code>Confidence</code>, <code>Pose</code>, <code>Quality</code>, and
* <code>Landmarks</code>. If you provide <code>["ALL"]</code>, all facial
* attributes are returned, but the operation takes longer to complete.</p> <p>If
* you provide both, <code>["ALL", "DEFAULT"]</code>, the service uses a logical
* AND operator to determine which attributes to return (in this case, all
* attributes). </p>
*/
inline void SetDetectionAttributes(const Aws::Vector<Attribute>& value) { m_detectionAttributesHasBeenSet = true; m_detectionAttributes = value; }
/**
* <p>An array of facial attributes that you want to be returned. This can be the
* default list of attributes or all attributes. If you don't specify a value for
* <code>Attributes</code> or if you specify <code>["DEFAULT"]</code>, the API
* returns the following subset of facial attributes: <code>BoundingBox</code>,
* <code>Confidence</code>, <code>Pose</code>, <code>Quality</code>, and
* <code>Landmarks</code>. If you provide <code>["ALL"]</code>, all facial
* attributes are returned, but the operation takes longer to complete.</p> <p>If
* you provide both, <code>["ALL", "DEFAULT"]</code>, the service uses a logical
* AND operator to determine which attributes to return (in this case, all
* attributes). </p>
*/
inline void SetDetectionAttributes(Aws::Vector<Attribute>&& value) { m_detectionAttributesHasBeenSet = true; m_detectionAttributes = std::move(value); }
/**
* <p>An array of facial attributes that you want to be returned. This can be the
* default list of attributes or all attributes. If you don't specify a value for
* <code>Attributes</code> or if you specify <code>["DEFAULT"]</code>, the API
* returns the following subset of facial attributes: <code>BoundingBox</code>,
* <code>Confidence</code>, <code>Pose</code>, <code>Quality</code>, and
* <code>Landmarks</code>. If you provide <code>["ALL"]</code>, all facial
* attributes are returned, but the operation takes longer to complete.</p> <p>If
* you provide both, <code>["ALL", "DEFAULT"]</code>, the service uses a logical
* AND operator to determine which attributes to return (in this case, all
* attributes). </p>
*/
inline IndexFacesRequest& WithDetectionAttributes(const Aws::Vector<Attribute>& value) { SetDetectionAttributes(value); return *this;}
/**
* <p>An array of facial attributes that you want to be returned. This can be the
* default list of attributes or all attributes. If you don't specify a value for
* <code>Attributes</code> or if you specify <code>["DEFAULT"]</code>, the API
* returns the following subset of facial attributes: <code>BoundingBox</code>,
* <code>Confidence</code>, <code>Pose</code>, <code>Quality</code>, and
* <code>Landmarks</code>. If you provide <code>["ALL"]</code>, all facial
* attributes are returned, but the operation takes longer to complete.</p> <p>If
* you provide both, <code>["ALL", "DEFAULT"]</code>, the service uses a logical
* AND operator to determine which attributes to return (in this case, all
* attributes). </p>
*/
inline IndexFacesRequest& WithDetectionAttributes(Aws::Vector<Attribute>&& value) { SetDetectionAttributes(std::move(value)); return *this;}
/**
* <p>An array of facial attributes that you want to be returned. This can be the
* default list of attributes or all attributes. If you don't specify a value for
* <code>Attributes</code> or if you specify <code>["DEFAULT"]</code>, the API
* returns the following subset of facial attributes: <code>BoundingBox</code>,
* <code>Confidence</code>, <code>Pose</code>, <code>Quality</code>, and
* <code>Landmarks</code>. If you provide <code>["ALL"]</code>, all facial
* attributes are returned, but the operation takes longer to complete.</p> <p>If
* you provide both, <code>["ALL", "DEFAULT"]</code>, the service uses a logical
* AND operator to determine which attributes to return (in this case, all
* attributes). </p>
*/
inline IndexFacesRequest& AddDetectionAttributes(const Attribute& value) { m_detectionAttributesHasBeenSet = true; m_detectionAttributes.push_back(value); return *this; }
/**
* <p>An array of facial attributes that you want to be returned. This can be the
* default list of attributes or all attributes. If you don't specify a value for
* <code>Attributes</code> or if you specify <code>["DEFAULT"]</code>, the API
* returns the following subset of facial attributes: <code>BoundingBox</code>,
* <code>Confidence</code>, <code>Pose</code>, <code>Quality</code>, and
* <code>Landmarks</code>. If you provide <code>["ALL"]</code>, all facial
* attributes are returned, but the operation takes longer to complete.</p> <p>If
* you provide both, <code>["ALL", "DEFAULT"]</code>, the service uses a logical
* AND operator to determine which attributes to return (in this case, all
* attributes). </p>
*/
inline IndexFacesRequest& AddDetectionAttributes(Attribute&& value) { m_detectionAttributesHasBeenSet = true; m_detectionAttributes.push_back(std::move(value)); return *this; }
/**
* <p>The maximum number of faces to index. The value of <code>MaxFaces</code> must
* be greater than or equal to 1. <code>IndexFaces</code> returns no more than 100
* detected faces in an image, even if you specify a larger value for
* <code>MaxFaces</code>.</p> <p>If <code>IndexFaces</code> detects more faces than
* the value of <code>MaxFaces</code>, the faces with the lowest quality are
* filtered out first. If there are still more faces than the value of
* <code>MaxFaces</code>, the faces with the smallest bounding boxes are filtered
* out (up to the number that's needed to satisfy the value of
* <code>MaxFaces</code>). Information about the unindexed faces is available in
* the <code>UnindexedFaces</code> array. </p> <p>The faces that are returned by
* <code>IndexFaces</code> are sorted by the largest face bounding box size to the
* smallest size, in descending order.</p> <p> <code>MaxFaces</code> can be used
* with a collection associated with any version of the face model.</p>
*/
inline int GetMaxFaces() const{ return m_maxFaces; }
/**
* <p>The maximum number of faces to index. The value of <code>MaxFaces</code> must
* be greater than or equal to 1. <code>IndexFaces</code> returns no more than 100
* detected faces in an image, even if you specify a larger value for
* <code>MaxFaces</code>.</p> <p>If <code>IndexFaces</code> detects more faces than
* the value of <code>MaxFaces</code>, the faces with the lowest quality are
* filtered out first. If there are still more faces than the value of
* <code>MaxFaces</code>, the faces with the smallest bounding boxes are filtered
* out (up to the number that's needed to satisfy the value of
* <code>MaxFaces</code>). Information about the unindexed faces is available in
* the <code>UnindexedFaces</code> array. </p> <p>The faces that are returned by
* <code>IndexFaces</code> are sorted by the largest face bounding box size to the
* smallest size, in descending order.</p> <p> <code>MaxFaces</code> can be used
* with a collection associated with any version of the face model.</p>
*/
inline bool MaxFacesHasBeenSet() const { return m_maxFacesHasBeenSet; }
/**
* <p>The maximum number of faces to index. The value of <code>MaxFaces</code> must
* be greater than or equal to 1. <code>IndexFaces</code> returns no more than 100
* detected faces in an image, even if you specify a larger value for
* <code>MaxFaces</code>.</p> <p>If <code>IndexFaces</code> detects more faces than
* the value of <code>MaxFaces</code>, the faces with the lowest quality are
* filtered out first. If there are still more faces than the value of
* <code>MaxFaces</code>, the faces with the smallest bounding boxes are filtered
* out (up to the number that's needed to satisfy the value of
* <code>MaxFaces</code>). Information about the unindexed faces is available in
* the <code>UnindexedFaces</code> array. </p> <p>The faces that are returned by
* <code>IndexFaces</code> are sorted by the largest face bounding box size to the
* smallest size, in descending order.</p> <p> <code>MaxFaces</code> can be used
* with a collection associated with any version of the face model.</p>
*/
inline void SetMaxFaces(int value) { m_maxFacesHasBeenSet = true; m_maxFaces = value; }
/**
* <p>The maximum number of faces to index. The value of <code>MaxFaces</code> must
* be greater than or equal to 1. <code>IndexFaces</code> returns no more than 100
* detected faces in an image, even if you specify a larger value for
* <code>MaxFaces</code>.</p> <p>If <code>IndexFaces</code> detects more faces than
* the value of <code>MaxFaces</code>, the faces with the lowest quality are
* filtered out first. If there are still more faces than the value of
* <code>MaxFaces</code>, the faces with the smallest bounding boxes are filtered
* out (up to the number that's needed to satisfy the value of
* <code>MaxFaces</code>). Information about the unindexed faces is available in
* the <code>UnindexedFaces</code> array. </p> <p>The faces that are returned by
* <code>IndexFaces</code> are sorted by the largest face bounding box size to the
* smallest size, in descending order.</p> <p> <code>MaxFaces</code> can be used
* with a collection associated with any version of the face model.</p>
*/
inline IndexFacesRequest& WithMaxFaces(int value) { SetMaxFaces(value); return *this;}
/**
* <p>A filter that specifies a quality bar for how much filtering is done to
* identify faces. Filtered faces aren't indexed. If you specify <code>AUTO</code>,
* Amazon Rekognition chooses the quality bar. If you specify <code>LOW</code>,
* <code>MEDIUM</code>, or <code>HIGH</code>, filtering removes all faces that
* dont meet the chosen quality bar. The default value is <code>AUTO</code>. The
* quality bar is based on a variety of common use cases. Low-quality detections
* can occur for a number of reasons. Some examples are an object that's
* misidentified as a face, a face that's too blurry, or a face with a pose that's
* too extreme to use. If you specify <code>NONE</code>, no filtering is performed.
* </p> <p>To use quality filtering, the collection you are using must be
* associated with version 3 of the face model or higher.</p>
*/
inline const QualityFilter& GetQualityFilter() const{ return m_qualityFilter; }
/**
* <p>A filter that specifies a quality bar for how much filtering is done to
* identify faces. Filtered faces aren't indexed. If you specify <code>AUTO</code>,
* Amazon Rekognition chooses the quality bar. If you specify <code>LOW</code>,
* <code>MEDIUM</code>, or <code>HIGH</code>, filtering removes all faces that
* dont meet the chosen quality bar. The default value is <code>AUTO</code>. The
* quality bar is based on a variety of common use cases. Low-quality detections
* can occur for a number of reasons. Some examples are an object that's
* misidentified as a face, a face that's too blurry, or a face with a pose that's
* too extreme to use. If you specify <code>NONE</code>, no filtering is performed.
* </p> <p>To use quality filtering, the collection you are using must be
* associated with version 3 of the face model or higher.</p>
*/
inline bool QualityFilterHasBeenSet() const { return m_qualityFilterHasBeenSet; }
/**
* <p>A filter that specifies a quality bar for how much filtering is done to
* identify faces. Filtered faces aren't indexed. If you specify <code>AUTO</code>,
* Amazon Rekognition chooses the quality bar. If you specify <code>LOW</code>,
* <code>MEDIUM</code>, or <code>HIGH</code>, filtering removes all faces that
* dont meet the chosen quality bar. The default value is <code>AUTO</code>. The
* quality bar is based on a variety of common use cases. Low-quality detections
* can occur for a number of reasons. Some examples are an object that's
* misidentified as a face, a face that's too blurry, or a face with a pose that's
* too extreme to use. If you specify <code>NONE</code>, no filtering is performed.
* </p> <p>To use quality filtering, the collection you are using must be
* associated with version 3 of the face model or higher.</p>
*/
inline void SetQualityFilter(const QualityFilter& value) { m_qualityFilterHasBeenSet = true; m_qualityFilter = value; }
/**
* <p>A filter that specifies a quality bar for how much filtering is done to
* identify faces. Filtered faces aren't indexed. If you specify <code>AUTO</code>,
* Amazon Rekognition chooses the quality bar. If you specify <code>LOW</code>,
* <code>MEDIUM</code>, or <code>HIGH</code>, filtering removes all faces that
* dont meet the chosen quality bar. The default value is <code>AUTO</code>. The
* quality bar is based on a variety of common use cases. Low-quality detections
* can occur for a number of reasons. Some examples are an object that's
* misidentified as a face, a face that's too blurry, or a face with a pose that's
* too extreme to use. If you specify <code>NONE</code>, no filtering is performed.
* </p> <p>To use quality filtering, the collection you are using must be
* associated with version 3 of the face model or higher.</p>
*/
inline void SetQualityFilter(QualityFilter&& value) { m_qualityFilterHasBeenSet = true; m_qualityFilter = std::move(value); }
/**
* <p>A filter that specifies a quality bar for how much filtering is done to
* identify faces. Filtered faces aren't indexed. If you specify <code>AUTO</code>,
* Amazon Rekognition chooses the quality bar. If you specify <code>LOW</code>,
* <code>MEDIUM</code>, or <code>HIGH</code>, filtering removes all faces that
* dont meet the chosen quality bar. The default value is <code>AUTO</code>. The
* quality bar is based on a variety of common use cases. Low-quality detections
* can occur for a number of reasons. Some examples are an object that's
* misidentified as a face, a face that's too blurry, or a face with a pose that's
* too extreme to use. If you specify <code>NONE</code>, no filtering is performed.
* </p> <p>To use quality filtering, the collection you are using must be
* associated with version 3 of the face model or higher.</p>
*/
inline IndexFacesRequest& WithQualityFilter(const QualityFilter& value) { SetQualityFilter(value); return *this;}
/**
* <p>A filter that specifies a quality bar for how much filtering is done to
* identify faces. Filtered faces aren't indexed. If you specify <code>AUTO</code>,
* Amazon Rekognition chooses the quality bar. If you specify <code>LOW</code>,
* <code>MEDIUM</code>, or <code>HIGH</code>, filtering removes all faces that
* dont meet the chosen quality bar. The default value is <code>AUTO</code>. The
* quality bar is based on a variety of common use cases. Low-quality detections
* can occur for a number of reasons. Some examples are an object that's
* misidentified as a face, a face that's too blurry, or a face with a pose that's
* too extreme to use. If you specify <code>NONE</code>, no filtering is performed.
* </p> <p>To use quality filtering, the collection you are using must be
* associated with version 3 of the face model or higher.</p>
*/
inline IndexFacesRequest& WithQualityFilter(QualityFilter&& value) { SetQualityFilter(std::move(value)); return *this;}
private:
Aws::String m_collectionId;
bool m_collectionIdHasBeenSet;
Image m_image;
bool m_imageHasBeenSet;
Aws::String m_externalImageId;
bool m_externalImageIdHasBeenSet;
Aws::Vector<Attribute> m_detectionAttributes;
bool m_detectionAttributesHasBeenSet;
int m_maxFaces;
bool m_maxFacesHasBeenSet;
QualityFilter m_qualityFilter;
bool m_qualityFilterHasBeenSet;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

View File

@@ -0,0 +1,324 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/rekognition/model/OrientationCorrection.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/rekognition/model/FaceRecord.h>
#include <aws/rekognition/model/UnindexedFace.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
class AWS_REKOGNITION_API IndexFacesResult
{
public:
IndexFacesResult();
IndexFacesResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
IndexFacesResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>An array of faces detected and added to the collection. For more information,
* see Searching Faces in a Collection in the Amazon Rekognition Developer Guide.
* </p>
*/
inline const Aws::Vector<FaceRecord>& GetFaceRecords() const{ return m_faceRecords; }
/**
* <p>An array of faces detected and added to the collection. For more information,
* see Searching Faces in a Collection in the Amazon Rekognition Developer Guide.
* </p>
*/
inline void SetFaceRecords(const Aws::Vector<FaceRecord>& value) { m_faceRecords = value; }
/**
* <p>An array of faces detected and added to the collection. For more information,
* see Searching Faces in a Collection in the Amazon Rekognition Developer Guide.
* </p>
*/
inline void SetFaceRecords(Aws::Vector<FaceRecord>&& value) { m_faceRecords = std::move(value); }
/**
* <p>An array of faces detected and added to the collection. For more information,
* see Searching Faces in a Collection in the Amazon Rekognition Developer Guide.
* </p>
*/
inline IndexFacesResult& WithFaceRecords(const Aws::Vector<FaceRecord>& value) { SetFaceRecords(value); return *this;}
/**
* <p>An array of faces detected and added to the collection. For more information,
* see Searching Faces in a Collection in the Amazon Rekognition Developer Guide.
* </p>
*/
inline IndexFacesResult& WithFaceRecords(Aws::Vector<FaceRecord>&& value) { SetFaceRecords(std::move(value)); return *this;}
/**
* <p>An array of faces detected and added to the collection. For more information,
* see Searching Faces in a Collection in the Amazon Rekognition Developer Guide.
* </p>
*/
inline IndexFacesResult& AddFaceRecords(const FaceRecord& value) { m_faceRecords.push_back(value); return *this; }
/**
* <p>An array of faces detected and added to the collection. For more information,
* see Searching Faces in a Collection in the Amazon Rekognition Developer Guide.
* </p>
*/
inline IndexFacesResult& AddFaceRecords(FaceRecord&& value) { m_faceRecords.push_back(std::move(value)); return *this; }
/**
* <p>If your collection is associated with a face detection model that's later
* than version 3.0, the value of <code>OrientationCorrection</code> is always null
* and no orientation information is returned.</p> <p>If your collection is
* associated with a face detection model that's version 3.0 or earlier, the
* following applies:</p> <ul> <li> <p>If the input image is in .jpeg format, it
* might contain exchangeable image file format (Exif) metadata that includes the
* image's orientation. Amazon Rekognition uses this orientation information to
* perform image correction - the bounding box coordinates are translated to
* represent object locations after the orientation information in the Exif
* metadata is used to correct the image orientation. Images in .png format don't
* contain Exif metadata. The value of <code>OrientationCorrection</code> is
* null.</p> </li> <li> <p>If the image doesn't contain orientation information in
* its Exif metadata, Amazon Rekognition returns an estimated orientation
* (ROTATE_0, ROTATE_90, ROTATE_180, ROTATE_270). Amazon Rekognition doesnt
* perform image correction for images. The bounding box coordinates aren't
* translated and represent the object locations before the image is rotated.</p>
* </li> </ul> <p>Bounding box information is returned in the
* <code>FaceRecords</code> array. You can get the version of the face detection
* model by calling <a>DescribeCollection</a>. </p>
*/
inline const OrientationCorrection& GetOrientationCorrection() const{ return m_orientationCorrection; }
/**
* <p>If your collection is associated with a face detection model that's later
* than version 3.0, the value of <code>OrientationCorrection</code> is always null
* and no orientation information is returned.</p> <p>If your collection is
* associated with a face detection model that's version 3.0 or earlier, the
* following applies:</p> <ul> <li> <p>If the input image is in .jpeg format, it
* might contain exchangeable image file format (Exif) metadata that includes the
* image's orientation. Amazon Rekognition uses this orientation information to
* perform image correction - the bounding box coordinates are translated to
* represent object locations after the orientation information in the Exif
* metadata is used to correct the image orientation. Images in .png format don't
* contain Exif metadata. The value of <code>OrientationCorrection</code> is
* null.</p> </li> <li> <p>If the image doesn't contain orientation information in
* its Exif metadata, Amazon Rekognition returns an estimated orientation
* (ROTATE_0, ROTATE_90, ROTATE_180, ROTATE_270). Amazon Rekognition doesnt
* perform image correction for images. The bounding box coordinates aren't
* translated and represent the object locations before the image is rotated.</p>
* </li> </ul> <p>Bounding box information is returned in the
* <code>FaceRecords</code> array. You can get the version of the face detection
* model by calling <a>DescribeCollection</a>. </p>
*/
inline void SetOrientationCorrection(const OrientationCorrection& value) { m_orientationCorrection = value; }
/**
* <p>If your collection is associated with a face detection model that's later
* than version 3.0, the value of <code>OrientationCorrection</code> is always null
* and no orientation information is returned.</p> <p>If your collection is
* associated with a face detection model that's version 3.0 or earlier, the
* following applies:</p> <ul> <li> <p>If the input image is in .jpeg format, it
* might contain exchangeable image file format (Exif) metadata that includes the
* image's orientation. Amazon Rekognition uses this orientation information to
* perform image correction - the bounding box coordinates are translated to
* represent object locations after the orientation information in the Exif
* metadata is used to correct the image orientation. Images in .png format don't
* contain Exif metadata. The value of <code>OrientationCorrection</code> is
* null.</p> </li> <li> <p>If the image doesn't contain orientation information in
* its Exif metadata, Amazon Rekognition returns an estimated orientation
* (ROTATE_0, ROTATE_90, ROTATE_180, ROTATE_270). Amazon Rekognition doesnt
* perform image correction for images. The bounding box coordinates aren't
* translated and represent the object locations before the image is rotated.</p>
* </li> </ul> <p>Bounding box information is returned in the
* <code>FaceRecords</code> array. You can get the version of the face detection
* model by calling <a>DescribeCollection</a>. </p>
*/
inline void SetOrientationCorrection(OrientationCorrection&& value) { m_orientationCorrection = std::move(value); }
/**
* <p>If your collection is associated with a face detection model that's later
* than version 3.0, the value of <code>OrientationCorrection</code> is always null
* and no orientation information is returned.</p> <p>If your collection is
* associated with a face detection model that's version 3.0 or earlier, the
* following applies:</p> <ul> <li> <p>If the input image is in .jpeg format, it
* might contain exchangeable image file format (Exif) metadata that includes the
* image's orientation. Amazon Rekognition uses this orientation information to
* perform image correction - the bounding box coordinates are translated to
* represent object locations after the orientation information in the Exif
* metadata is used to correct the image orientation. Images in .png format don't
* contain Exif metadata. The value of <code>OrientationCorrection</code> is
* null.</p> </li> <li> <p>If the image doesn't contain orientation information in
* its Exif metadata, Amazon Rekognition returns an estimated orientation
* (ROTATE_0, ROTATE_90, ROTATE_180, ROTATE_270). Amazon Rekognition doesnt
* perform image correction for images. The bounding box coordinates aren't
* translated and represent the object locations before the image is rotated.</p>
* </li> </ul> <p>Bounding box information is returned in the
* <code>FaceRecords</code> array. You can get the version of the face detection
* model by calling <a>DescribeCollection</a>. </p>
*/
inline IndexFacesResult& WithOrientationCorrection(const OrientationCorrection& value) { SetOrientationCorrection(value); return *this;}
/**
* <p>If your collection is associated with a face detection model that's later
* than version 3.0, the value of <code>OrientationCorrection</code> is always null
* and no orientation information is returned.</p> <p>If your collection is
* associated with a face detection model that's version 3.0 or earlier, the
* following applies:</p> <ul> <li> <p>If the input image is in .jpeg format, it
* might contain exchangeable image file format (Exif) metadata that includes the
* image's orientation. Amazon Rekognition uses this orientation information to
* perform image correction - the bounding box coordinates are translated to
* represent object locations after the orientation information in the Exif
* metadata is used to correct the image orientation. Images in .png format don't
* contain Exif metadata. The value of <code>OrientationCorrection</code> is
* null.</p> </li> <li> <p>If the image doesn't contain orientation information in
* its Exif metadata, Amazon Rekognition returns an estimated orientation
* (ROTATE_0, ROTATE_90, ROTATE_180, ROTATE_270). Amazon Rekognition doesnt
* perform image correction for images. The bounding box coordinates aren't
* translated and represent the object locations before the image is rotated.</p>
* </li> </ul> <p>Bounding box information is returned in the
* <code>FaceRecords</code> array. You can get the version of the face detection
* model by calling <a>DescribeCollection</a>. </p>
*/
inline IndexFacesResult& WithOrientationCorrection(OrientationCorrection&& value) { SetOrientationCorrection(std::move(value)); return *this;}
/**
* <p>The version number of the face detection model that's associated with the
* input collection (<code>CollectionId</code>).</p>
*/
inline const Aws::String& GetFaceModelVersion() const{ return m_faceModelVersion; }
/**
* <p>The version number of the face detection model that's associated with the
* input collection (<code>CollectionId</code>).</p>
*/
inline void SetFaceModelVersion(const Aws::String& value) { m_faceModelVersion = value; }
/**
* <p>The version number of the face detection model that's associated with the
* input collection (<code>CollectionId</code>).</p>
*/
inline void SetFaceModelVersion(Aws::String&& value) { m_faceModelVersion = std::move(value); }
/**
* <p>The version number of the face detection model that's associated with the
* input collection (<code>CollectionId</code>).</p>
*/
inline void SetFaceModelVersion(const char* value) { m_faceModelVersion.assign(value); }
/**
* <p>The version number of the face detection model that's associated with the
* input collection (<code>CollectionId</code>).</p>
*/
inline IndexFacesResult& WithFaceModelVersion(const Aws::String& value) { SetFaceModelVersion(value); return *this;}
/**
* <p>The version number of the face detection model that's associated with the
* input collection (<code>CollectionId</code>).</p>
*/
inline IndexFacesResult& WithFaceModelVersion(Aws::String&& value) { SetFaceModelVersion(std::move(value)); return *this;}
/**
* <p>The version number of the face detection model that's associated with the
* input collection (<code>CollectionId</code>).</p>
*/
inline IndexFacesResult& WithFaceModelVersion(const char* value) { SetFaceModelVersion(value); return *this;}
/**
* <p>An array of faces that were detected in the image but weren't indexed. They
* weren't indexed because the quality filter identified them as low quality, or
* the <code>MaxFaces</code> request parameter filtered them out. To use the
* quality filter, you specify the <code>QualityFilter</code> request
* parameter.</p>
*/
inline const Aws::Vector<UnindexedFace>& GetUnindexedFaces() const{ return m_unindexedFaces; }
/**
* <p>An array of faces that were detected in the image but weren't indexed. They
* weren't indexed because the quality filter identified them as low quality, or
* the <code>MaxFaces</code> request parameter filtered them out. To use the
* quality filter, you specify the <code>QualityFilter</code> request
* parameter.</p>
*/
inline void SetUnindexedFaces(const Aws::Vector<UnindexedFace>& value) { m_unindexedFaces = value; }
/**
* <p>An array of faces that were detected in the image but weren't indexed. They
* weren't indexed because the quality filter identified them as low quality, or
* the <code>MaxFaces</code> request parameter filtered them out. To use the
* quality filter, you specify the <code>QualityFilter</code> request
* parameter.</p>
*/
inline void SetUnindexedFaces(Aws::Vector<UnindexedFace>&& value) { m_unindexedFaces = std::move(value); }
/**
* <p>An array of faces that were detected in the image but weren't indexed. They
* weren't indexed because the quality filter identified them as low quality, or
* the <code>MaxFaces</code> request parameter filtered them out. To use the
* quality filter, you specify the <code>QualityFilter</code> request
* parameter.</p>
*/
inline IndexFacesResult& WithUnindexedFaces(const Aws::Vector<UnindexedFace>& value) { SetUnindexedFaces(value); return *this;}
/**
* <p>An array of faces that were detected in the image but weren't indexed. They
* weren't indexed because the quality filter identified them as low quality, or
* the <code>MaxFaces</code> request parameter filtered them out. To use the
* quality filter, you specify the <code>QualityFilter</code> request
* parameter.</p>
*/
inline IndexFacesResult& WithUnindexedFaces(Aws::Vector<UnindexedFace>&& value) { SetUnindexedFaces(std::move(value)); return *this;}
/**
* <p>An array of faces that were detected in the image but weren't indexed. They
* weren't indexed because the quality filter identified them as low quality, or
* the <code>MaxFaces</code> request parameter filtered them out. To use the
* quality filter, you specify the <code>QualityFilter</code> request
* parameter.</p>
*/
inline IndexFacesResult& AddUnindexedFaces(const UnindexedFace& value) { m_unindexedFaces.push_back(value); return *this; }
/**
* <p>An array of faces that were detected in the image but weren't indexed. They
* weren't indexed because the quality filter identified them as low quality, or
* the <code>MaxFaces</code> request parameter filtered them out. To use the
* quality filter, you specify the <code>QualityFilter</code> request
* parameter.</p>
*/
inline IndexFacesResult& AddUnindexedFaces(UnindexedFace&& value) { m_unindexedFaces.push_back(std::move(value)); return *this; }
private:
Aws::Vector<FaceRecord> m_faceRecords;
OrientationCorrection m_orientationCorrection;
Aws::String m_faceModelVersion;
Aws::Vector<UnindexedFace> m_unindexedFaces;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws

Some files were not shown because too many files have changed in this diff Show More