288 lines
14 KiB
C++
288 lines
14 KiB
C++
/**
|
|
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
|
* SPDX-License-Identifier: Apache-2.0.
|
|
*/
|
|
|
|
#include <aws/core/utils/Outcome.h>
|
|
#include <aws/core/auth/AWSAuthSigner.h>
|
|
#include <aws/core/client/CoreErrors.h>
|
|
#include <aws/core/client/RetryStrategy.h>
|
|
#include <aws/core/http/HttpClient.h>
|
|
#include <aws/core/http/HttpResponse.h>
|
|
#include <aws/core/http/HttpClientFactory.h>
|
|
#include <aws/core/auth/AWSCredentialsProviderChain.h>
|
|
#include <aws/core/utils/json/JsonSerializer.h>
|
|
#include <aws/core/utils/memory/stl/AWSStringStream.h>
|
|
#include <aws/core/utils/threading/Executor.h>
|
|
#include <aws/core/utils/DNS.h>
|
|
#include <aws/core/utils/logging/LogMacros.h>
|
|
|
|
#include <aws/elastic-inference/ElasticInferenceClient.h>
|
|
#include <aws/elastic-inference/ElasticInferenceEndpoint.h>
|
|
#include <aws/elastic-inference/ElasticInferenceErrorMarshaller.h>
|
|
#include <aws/elastic-inference/model/DescribeAcceleratorOfferingsRequest.h>
|
|
#include <aws/elastic-inference/model/DescribeAcceleratorTypesRequest.h>
|
|
#include <aws/elastic-inference/model/DescribeAcceleratorsRequest.h>
|
|
#include <aws/elastic-inference/model/ListTagsForResourceRequest.h>
|
|
#include <aws/elastic-inference/model/TagResourceRequest.h>
|
|
#include <aws/elastic-inference/model/UntagResourceRequest.h>
|
|
|
|
using namespace Aws;
|
|
using namespace Aws::Auth;
|
|
using namespace Aws::Client;
|
|
using namespace Aws::ElasticInference;
|
|
using namespace Aws::ElasticInference::Model;
|
|
using namespace Aws::Http;
|
|
using namespace Aws::Utils::Json;
|
|
|
|
static const char* SERVICE_NAME = "elastic-inference";
|
|
static const char* ALLOCATION_TAG = "ElasticInferenceClient";
|
|
|
|
|
|
ElasticInferenceClient::ElasticInferenceClient(const Client::ClientConfiguration& clientConfiguration) :
|
|
BASECLASS(clientConfiguration,
|
|
Aws::MakeShared<AWSAuthV4Signer>(ALLOCATION_TAG, Aws::MakeShared<DefaultAWSCredentialsProviderChain>(ALLOCATION_TAG),
|
|
SERVICE_NAME, Aws::Region::ComputeSignerRegion(clientConfiguration.region)),
|
|
Aws::MakeShared<ElasticInferenceErrorMarshaller>(ALLOCATION_TAG)),
|
|
m_executor(clientConfiguration.executor)
|
|
{
|
|
init(clientConfiguration);
|
|
}
|
|
|
|
ElasticInferenceClient::ElasticInferenceClient(const AWSCredentials& credentials, const Client::ClientConfiguration& clientConfiguration) :
|
|
BASECLASS(clientConfiguration,
|
|
Aws::MakeShared<AWSAuthV4Signer>(ALLOCATION_TAG, Aws::MakeShared<SimpleAWSCredentialsProvider>(ALLOCATION_TAG, credentials),
|
|
SERVICE_NAME, Aws::Region::ComputeSignerRegion(clientConfiguration.region)),
|
|
Aws::MakeShared<ElasticInferenceErrorMarshaller>(ALLOCATION_TAG)),
|
|
m_executor(clientConfiguration.executor)
|
|
{
|
|
init(clientConfiguration);
|
|
}
|
|
|
|
ElasticInferenceClient::ElasticInferenceClient(const std::shared_ptr<AWSCredentialsProvider>& credentialsProvider,
|
|
const Client::ClientConfiguration& clientConfiguration) :
|
|
BASECLASS(clientConfiguration,
|
|
Aws::MakeShared<AWSAuthV4Signer>(ALLOCATION_TAG, credentialsProvider,
|
|
SERVICE_NAME, Aws::Region::ComputeSignerRegion(clientConfiguration.region)),
|
|
Aws::MakeShared<ElasticInferenceErrorMarshaller>(ALLOCATION_TAG)),
|
|
m_executor(clientConfiguration.executor)
|
|
{
|
|
init(clientConfiguration);
|
|
}
|
|
|
|
ElasticInferenceClient::~ElasticInferenceClient()
|
|
{
|
|
}
|
|
|
|
void ElasticInferenceClient::init(const ClientConfiguration& config)
|
|
{
|
|
SetServiceClientName("Elastic Inference");
|
|
m_configScheme = SchemeMapper::ToString(config.scheme);
|
|
if (config.endpointOverride.empty())
|
|
{
|
|
m_uri = m_configScheme + "://" + ElasticInferenceEndpoint::ForRegion(config.region, config.useDualStack);
|
|
}
|
|
else
|
|
{
|
|
OverrideEndpoint(config.endpointOverride);
|
|
}
|
|
}
|
|
|
|
void ElasticInferenceClient::OverrideEndpoint(const Aws::String& endpoint)
|
|
{
|
|
if (endpoint.compare(0, 7, "http://") == 0 || endpoint.compare(0, 8, "https://") == 0)
|
|
{
|
|
m_uri = endpoint;
|
|
}
|
|
else
|
|
{
|
|
m_uri = m_configScheme + "://" + endpoint;
|
|
}
|
|
}
|
|
|
|
DescribeAcceleratorOfferingsOutcome ElasticInferenceClient::DescribeAcceleratorOfferings(const DescribeAcceleratorOfferingsRequest& request) const
|
|
{
|
|
Aws::Http::URI uri = m_uri;
|
|
Aws::StringStream ss;
|
|
ss << "/describe-accelerator-offerings";
|
|
uri.SetPath(uri.GetPath() + ss.str());
|
|
return DescribeAcceleratorOfferingsOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_POST, Aws::Auth::SIGV4_SIGNER));
|
|
}
|
|
|
|
DescribeAcceleratorOfferingsOutcomeCallable ElasticInferenceClient::DescribeAcceleratorOfferingsCallable(const DescribeAcceleratorOfferingsRequest& request) const
|
|
{
|
|
auto task = Aws::MakeShared< std::packaged_task< DescribeAcceleratorOfferingsOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->DescribeAcceleratorOfferings(request); } );
|
|
auto packagedFunction = [task]() { (*task)(); };
|
|
m_executor->Submit(packagedFunction);
|
|
return task->get_future();
|
|
}
|
|
|
|
void ElasticInferenceClient::DescribeAcceleratorOfferingsAsync(const DescribeAcceleratorOfferingsRequest& request, const DescribeAcceleratorOfferingsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
|
|
{
|
|
m_executor->Submit( [this, request, handler, context](){ this->DescribeAcceleratorOfferingsAsyncHelper( request, handler, context ); } );
|
|
}
|
|
|
|
void ElasticInferenceClient::DescribeAcceleratorOfferingsAsyncHelper(const DescribeAcceleratorOfferingsRequest& request, const DescribeAcceleratorOfferingsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
|
|
{
|
|
handler(this, request, DescribeAcceleratorOfferings(request), context);
|
|
}
|
|
|
|
DescribeAcceleratorTypesOutcome ElasticInferenceClient::DescribeAcceleratorTypes(const DescribeAcceleratorTypesRequest& request) const
|
|
{
|
|
Aws::Http::URI uri = m_uri;
|
|
Aws::StringStream ss;
|
|
ss << "/describe-accelerator-types";
|
|
uri.SetPath(uri.GetPath() + ss.str());
|
|
return DescribeAcceleratorTypesOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER));
|
|
}
|
|
|
|
DescribeAcceleratorTypesOutcomeCallable ElasticInferenceClient::DescribeAcceleratorTypesCallable(const DescribeAcceleratorTypesRequest& request) const
|
|
{
|
|
auto task = Aws::MakeShared< std::packaged_task< DescribeAcceleratorTypesOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->DescribeAcceleratorTypes(request); } );
|
|
auto packagedFunction = [task]() { (*task)(); };
|
|
m_executor->Submit(packagedFunction);
|
|
return task->get_future();
|
|
}
|
|
|
|
void ElasticInferenceClient::DescribeAcceleratorTypesAsync(const DescribeAcceleratorTypesRequest& request, const DescribeAcceleratorTypesResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
|
|
{
|
|
m_executor->Submit( [this, request, handler, context](){ this->DescribeAcceleratorTypesAsyncHelper( request, handler, context ); } );
|
|
}
|
|
|
|
void ElasticInferenceClient::DescribeAcceleratorTypesAsyncHelper(const DescribeAcceleratorTypesRequest& request, const DescribeAcceleratorTypesResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
|
|
{
|
|
handler(this, request, DescribeAcceleratorTypes(request), context);
|
|
}
|
|
|
|
DescribeAcceleratorsOutcome ElasticInferenceClient::DescribeAccelerators(const DescribeAcceleratorsRequest& request) const
|
|
{
|
|
Aws::Http::URI uri = m_uri;
|
|
Aws::StringStream ss;
|
|
ss << "/describe-accelerators";
|
|
uri.SetPath(uri.GetPath() + ss.str());
|
|
return DescribeAcceleratorsOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_POST, Aws::Auth::SIGV4_SIGNER));
|
|
}
|
|
|
|
DescribeAcceleratorsOutcomeCallable ElasticInferenceClient::DescribeAcceleratorsCallable(const DescribeAcceleratorsRequest& request) const
|
|
{
|
|
auto task = Aws::MakeShared< std::packaged_task< DescribeAcceleratorsOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->DescribeAccelerators(request); } );
|
|
auto packagedFunction = [task]() { (*task)(); };
|
|
m_executor->Submit(packagedFunction);
|
|
return task->get_future();
|
|
}
|
|
|
|
void ElasticInferenceClient::DescribeAcceleratorsAsync(const DescribeAcceleratorsRequest& request, const DescribeAcceleratorsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
|
|
{
|
|
m_executor->Submit( [this, request, handler, context](){ this->DescribeAcceleratorsAsyncHelper( request, handler, context ); } );
|
|
}
|
|
|
|
void ElasticInferenceClient::DescribeAcceleratorsAsyncHelper(const DescribeAcceleratorsRequest& request, const DescribeAcceleratorsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
|
|
{
|
|
handler(this, request, DescribeAccelerators(request), context);
|
|
}
|
|
|
|
ListTagsForResourceOutcome ElasticInferenceClient::ListTagsForResource(const ListTagsForResourceRequest& request) const
|
|
{
|
|
if (!request.ResourceArnHasBeenSet())
|
|
{
|
|
AWS_LOGSTREAM_ERROR("ListTagsForResource", "Required field: ResourceArn, is not set");
|
|
return ListTagsForResourceOutcome(Aws::Client::AWSError<ElasticInferenceErrors>(ElasticInferenceErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [ResourceArn]", false));
|
|
}
|
|
Aws::Http::URI uri = m_uri;
|
|
Aws::StringStream ss;
|
|
ss << "/tags/";
|
|
ss << request.GetResourceArn();
|
|
uri.SetPath(uri.GetPath() + ss.str());
|
|
return ListTagsForResourceOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER));
|
|
}
|
|
|
|
ListTagsForResourceOutcomeCallable ElasticInferenceClient::ListTagsForResourceCallable(const ListTagsForResourceRequest& request) const
|
|
{
|
|
auto task = Aws::MakeShared< std::packaged_task< ListTagsForResourceOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->ListTagsForResource(request); } );
|
|
auto packagedFunction = [task]() { (*task)(); };
|
|
m_executor->Submit(packagedFunction);
|
|
return task->get_future();
|
|
}
|
|
|
|
void ElasticInferenceClient::ListTagsForResourceAsync(const ListTagsForResourceRequest& request, const ListTagsForResourceResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
|
|
{
|
|
m_executor->Submit( [this, request, handler, context](){ this->ListTagsForResourceAsyncHelper( request, handler, context ); } );
|
|
}
|
|
|
|
void ElasticInferenceClient::ListTagsForResourceAsyncHelper(const ListTagsForResourceRequest& request, const ListTagsForResourceResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
|
|
{
|
|
handler(this, request, ListTagsForResource(request), context);
|
|
}
|
|
|
|
TagResourceOutcome ElasticInferenceClient::TagResource(const TagResourceRequest& request) const
|
|
{
|
|
if (!request.ResourceArnHasBeenSet())
|
|
{
|
|
AWS_LOGSTREAM_ERROR("TagResource", "Required field: ResourceArn, is not set");
|
|
return TagResourceOutcome(Aws::Client::AWSError<ElasticInferenceErrors>(ElasticInferenceErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [ResourceArn]", false));
|
|
}
|
|
Aws::Http::URI uri = m_uri;
|
|
Aws::StringStream ss;
|
|
ss << "/tags/";
|
|
ss << request.GetResourceArn();
|
|
uri.SetPath(uri.GetPath() + ss.str());
|
|
return TagResourceOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_POST, Aws::Auth::SIGV4_SIGNER));
|
|
}
|
|
|
|
TagResourceOutcomeCallable ElasticInferenceClient::TagResourceCallable(const TagResourceRequest& request) const
|
|
{
|
|
auto task = Aws::MakeShared< std::packaged_task< TagResourceOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->TagResource(request); } );
|
|
auto packagedFunction = [task]() { (*task)(); };
|
|
m_executor->Submit(packagedFunction);
|
|
return task->get_future();
|
|
}
|
|
|
|
void ElasticInferenceClient::TagResourceAsync(const TagResourceRequest& request, const TagResourceResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
|
|
{
|
|
m_executor->Submit( [this, request, handler, context](){ this->TagResourceAsyncHelper( request, handler, context ); } );
|
|
}
|
|
|
|
void ElasticInferenceClient::TagResourceAsyncHelper(const TagResourceRequest& request, const TagResourceResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
|
|
{
|
|
handler(this, request, TagResource(request), context);
|
|
}
|
|
|
|
UntagResourceOutcome ElasticInferenceClient::UntagResource(const UntagResourceRequest& request) const
|
|
{
|
|
if (!request.ResourceArnHasBeenSet())
|
|
{
|
|
AWS_LOGSTREAM_ERROR("UntagResource", "Required field: ResourceArn, is not set");
|
|
return UntagResourceOutcome(Aws::Client::AWSError<ElasticInferenceErrors>(ElasticInferenceErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [ResourceArn]", false));
|
|
}
|
|
if (!request.TagKeysHasBeenSet())
|
|
{
|
|
AWS_LOGSTREAM_ERROR("UntagResource", "Required field: TagKeys, is not set");
|
|
return UntagResourceOutcome(Aws::Client::AWSError<ElasticInferenceErrors>(ElasticInferenceErrors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [TagKeys]", false));
|
|
}
|
|
Aws::Http::URI uri = m_uri;
|
|
Aws::StringStream ss;
|
|
ss << "/tags/";
|
|
ss << request.GetResourceArn();
|
|
uri.SetPath(uri.GetPath() + ss.str());
|
|
return UntagResourceOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_DELETE, Aws::Auth::SIGV4_SIGNER));
|
|
}
|
|
|
|
UntagResourceOutcomeCallable ElasticInferenceClient::UntagResourceCallable(const UntagResourceRequest& request) const
|
|
{
|
|
auto task = Aws::MakeShared< std::packaged_task< UntagResourceOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->UntagResource(request); } );
|
|
auto packagedFunction = [task]() { (*task)(); };
|
|
m_executor->Submit(packagedFunction);
|
|
return task->get_future();
|
|
}
|
|
|
|
void ElasticInferenceClient::UntagResourceAsync(const UntagResourceRequest& request, const UntagResourceResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
|
|
{
|
|
m_executor->Submit( [this, request, handler, context](){ this->UntagResourceAsyncHelper( request, handler, context ); } );
|
|
}
|
|
|
|
void ElasticInferenceClient::UntagResourceAsyncHelper(const UntagResourceRequest& request, const UntagResourceResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
|
|
{
|
|
handler(this, request, UntagResource(request), context);
|
|
}
|
|
|