This repository has been archived on 2025-09-14. You can view files and clone it, but cannot push or open issues or pull requests.
Files
pxz-hos-client-cpp-module/support/aws-sdk-cpp-master/aws-cpp-sdk-rekognition/include/aws/rekognition/model/GetFaceDetectionResult.h

252 lines
9.3 KiB
C
Raw Normal View History

/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/rekognition/Rekognition_EXPORTS.h>
#include <aws/rekognition/model/VideoJobStatus.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/rekognition/model/VideoMetadata.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/rekognition/model/FaceDetection.h>
#include <utility>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace Rekognition
{
namespace Model
{
class AWS_REKOGNITION_API GetFaceDetectionResult
{
public:
GetFaceDetectionResult();
GetFaceDetectionResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
GetFaceDetectionResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
/**
* <p>The current status of the face detection job.</p>
*/
inline const VideoJobStatus& GetJobStatus() const{ return m_jobStatus; }
/**
* <p>The current status of the face detection job.</p>
*/
inline void SetJobStatus(const VideoJobStatus& value) { m_jobStatus = value; }
/**
* <p>The current status of the face detection job.</p>
*/
inline void SetJobStatus(VideoJobStatus&& value) { m_jobStatus = std::move(value); }
/**
* <p>The current status of the face detection job.</p>
*/
inline GetFaceDetectionResult& WithJobStatus(const VideoJobStatus& value) { SetJobStatus(value); return *this;}
/**
* <p>The current status of the face detection job.</p>
*/
inline GetFaceDetectionResult& WithJobStatus(VideoJobStatus&& value) { SetJobStatus(std::move(value)); return *this;}
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline const Aws::String& GetStatusMessage() const{ return m_statusMessage; }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline void SetStatusMessage(const Aws::String& value) { m_statusMessage = value; }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline void SetStatusMessage(Aws::String&& value) { m_statusMessage = std::move(value); }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline void SetStatusMessage(const char* value) { m_statusMessage.assign(value); }
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline GetFaceDetectionResult& WithStatusMessage(const Aws::String& value) { SetStatusMessage(value); return *this;}
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline GetFaceDetectionResult& WithStatusMessage(Aws::String&& value) { SetStatusMessage(std::move(value)); return *this;}
/**
* <p>If the job fails, <code>StatusMessage</code> provides a descriptive error
* message.</p>
*/
inline GetFaceDetectionResult& WithStatusMessage(const char* value) { SetStatusMessage(value); return *this;}
/**
* <p>Information about a video that Amazon Rekognition Video analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* a Amazon Rekognition video operation.</p>
*/
inline const VideoMetadata& GetVideoMetadata() const{ return m_videoMetadata; }
/**
* <p>Information about a video that Amazon Rekognition Video analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* a Amazon Rekognition video operation.</p>
*/
inline void SetVideoMetadata(const VideoMetadata& value) { m_videoMetadata = value; }
/**
* <p>Information about a video that Amazon Rekognition Video analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* a Amazon Rekognition video operation.</p>
*/
inline void SetVideoMetadata(VideoMetadata&& value) { m_videoMetadata = std::move(value); }
/**
* <p>Information about a video that Amazon Rekognition Video analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* a Amazon Rekognition video operation.</p>
*/
inline GetFaceDetectionResult& WithVideoMetadata(const VideoMetadata& value) { SetVideoMetadata(value); return *this;}
/**
* <p>Information about a video that Amazon Rekognition Video analyzed.
* <code>Videometadata</code> is returned in every page of paginated responses from
* a Amazon Rekognition video operation.</p>
*/
inline GetFaceDetectionResult& WithVideoMetadata(VideoMetadata&& value) { SetVideoMetadata(std::move(value)); return *this;}
/**
* <p>If the response is truncated, Amazon Rekognition returns this token that you
* can use in the subsequent request to retrieve the next set of faces. </p>
*/
inline const Aws::String& GetNextToken() const{ return m_nextToken; }
/**
* <p>If the response is truncated, Amazon Rekognition returns this token that you
* can use in the subsequent request to retrieve the next set of faces. </p>
*/
inline void SetNextToken(const Aws::String& value) { m_nextToken = value; }
/**
* <p>If the response is truncated, Amazon Rekognition returns this token that you
* can use in the subsequent request to retrieve the next set of faces. </p>
*/
inline void SetNextToken(Aws::String&& value) { m_nextToken = std::move(value); }
/**
* <p>If the response is truncated, Amazon Rekognition returns this token that you
* can use in the subsequent request to retrieve the next set of faces. </p>
*/
inline void SetNextToken(const char* value) { m_nextToken.assign(value); }
/**
* <p>If the response is truncated, Amazon Rekognition returns this token that you
* can use in the subsequent request to retrieve the next set of faces. </p>
*/
inline GetFaceDetectionResult& WithNextToken(const Aws::String& value) { SetNextToken(value); return *this;}
/**
* <p>If the response is truncated, Amazon Rekognition returns this token that you
* can use in the subsequent request to retrieve the next set of faces. </p>
*/
inline GetFaceDetectionResult& WithNextToken(Aws::String&& value) { SetNextToken(std::move(value)); return *this;}
/**
* <p>If the response is truncated, Amazon Rekognition returns this token that you
* can use in the subsequent request to retrieve the next set of faces. </p>
*/
inline GetFaceDetectionResult& WithNextToken(const char* value) { SetNextToken(value); return *this;}
/**
* <p>An array of faces detected in the video. Each element contains a detected
* face's details and the time, in milliseconds from the start of the video, the
* face was detected. </p>
*/
inline const Aws::Vector<FaceDetection>& GetFaces() const{ return m_faces; }
/**
* <p>An array of faces detected in the video. Each element contains a detected
* face's details and the time, in milliseconds from the start of the video, the
* face was detected. </p>
*/
inline void SetFaces(const Aws::Vector<FaceDetection>& value) { m_faces = value; }
/**
* <p>An array of faces detected in the video. Each element contains a detected
* face's details and the time, in milliseconds from the start of the video, the
* face was detected. </p>
*/
inline void SetFaces(Aws::Vector<FaceDetection>&& value) { m_faces = std::move(value); }
/**
* <p>An array of faces detected in the video. Each element contains a detected
* face's details and the time, in milliseconds from the start of the video, the
* face was detected. </p>
*/
inline GetFaceDetectionResult& WithFaces(const Aws::Vector<FaceDetection>& value) { SetFaces(value); return *this;}
/**
* <p>An array of faces detected in the video. Each element contains a detected
* face's details and the time, in milliseconds from the start of the video, the
* face was detected. </p>
*/
inline GetFaceDetectionResult& WithFaces(Aws::Vector<FaceDetection>&& value) { SetFaces(std::move(value)); return *this;}
/**
* <p>An array of faces detected in the video. Each element contains a detected
* face's details and the time, in milliseconds from the start of the video, the
* face was detected. </p>
*/
inline GetFaceDetectionResult& AddFaces(const FaceDetection& value) { m_faces.push_back(value); return *this; }
/**
* <p>An array of faces detected in the video. Each element contains a detected
* face's details and the time, in milliseconds from the start of the video, the
* face was detected. </p>
*/
inline GetFaceDetectionResult& AddFaces(FaceDetection&& value) { m_faces.push_back(std::move(value)); return *this; }
private:
VideoJobStatus m_jobStatus;
Aws::String m_statusMessage;
VideoMetadata m_videoMetadata;
Aws::String m_nextToken;
Aws::Vector<FaceDetection> m_faces;
};
} // namespace Model
} // namespace Rekognition
} // namespace Aws