/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace Aws { namespace Http { class HttpClient; class HttpClientFactory; } // namespace Http namespace Utils { template< typename R, typename E> class Outcome; namespace Threading { class Executor; } // namespace Threading } // namespace Utils namespace Auth { class AWSCredentials; class AWSCredentialsProvider; } // namespace Auth namespace Client { class RetryStrategy; } // namespace Client namespace SageMaker { namespace Model { class AddTagsRequest; class AssociateTrialComponentRequest; class CreateAlgorithmRequest; class CreateAppRequest; class CreateAutoMLJobRequest; class CreateCodeRepositoryRequest; class CreateCompilationJobRequest; class CreateDomainRequest; class CreateEndpointRequest; class CreateEndpointConfigRequest; class CreateExperimentRequest; class CreateFlowDefinitionRequest; class CreateHumanTaskUiRequest; class CreateHyperParameterTuningJobRequest; class CreateLabelingJobRequest; class CreateModelRequest; class CreateModelPackageRequest; class CreateMonitoringScheduleRequest; class CreateNotebookInstanceRequest; class CreateNotebookInstanceLifecycleConfigRequest; class CreatePresignedDomainUrlRequest; class CreatePresignedNotebookInstanceUrlRequest; class CreateProcessingJobRequest; class CreateTrainingJobRequest; class CreateTransformJobRequest; class CreateTrialRequest; class CreateTrialComponentRequest; class CreateUserProfileRequest; class CreateWorkforceRequest; class CreateWorkteamRequest; class DeleteAlgorithmRequest; class DeleteAppRequest; class DeleteCodeRepositoryRequest; class DeleteDomainRequest; class DeleteEndpointRequest; class DeleteEndpointConfigRequest; class DeleteExperimentRequest; class DeleteFlowDefinitionRequest; class DeleteHumanTaskUiRequest; class DeleteModelRequest; class DeleteModelPackageRequest; class DeleteMonitoringScheduleRequest; class DeleteNotebookInstanceRequest; class DeleteNotebookInstanceLifecycleConfigRequest; class DeleteTagsRequest; class DeleteTrialRequest; class DeleteTrialComponentRequest; class DeleteUserProfileRequest; class DeleteWorkforceRequest; class DeleteWorkteamRequest; class DescribeAlgorithmRequest; class DescribeAppRequest; class DescribeAutoMLJobRequest; class DescribeCodeRepositoryRequest; class DescribeCompilationJobRequest; class DescribeDomainRequest; class DescribeEndpointRequest; class DescribeEndpointConfigRequest; class DescribeExperimentRequest; class DescribeFlowDefinitionRequest; class DescribeHumanTaskUiRequest; class DescribeHyperParameterTuningJobRequest; class DescribeLabelingJobRequest; class DescribeModelRequest; class DescribeModelPackageRequest; class DescribeMonitoringScheduleRequest; class DescribeNotebookInstanceRequest; class DescribeNotebookInstanceLifecycleConfigRequest; class DescribeProcessingJobRequest; class DescribeSubscribedWorkteamRequest; class DescribeTrainingJobRequest; class DescribeTransformJobRequest; class DescribeTrialRequest; class DescribeTrialComponentRequest; class DescribeUserProfileRequest; class DescribeWorkforceRequest; class DescribeWorkteamRequest; class DisassociateTrialComponentRequest; class GetSearchSuggestionsRequest; class ListAlgorithmsRequest; class ListAppsRequest; class ListAutoMLJobsRequest; class ListCandidatesForAutoMLJobRequest; class ListCodeRepositoriesRequest; class ListCompilationJobsRequest; class ListDomainsRequest; class ListEndpointConfigsRequest; class ListEndpointsRequest; class ListExperimentsRequest; class ListFlowDefinitionsRequest; class ListHumanTaskUisRequest; class ListHyperParameterTuningJobsRequest; class ListLabelingJobsRequest; class ListLabelingJobsForWorkteamRequest; class ListModelPackagesRequest; class ListModelsRequest; class ListMonitoringExecutionsRequest; class ListMonitoringSchedulesRequest; class ListNotebookInstanceLifecycleConfigsRequest; class ListNotebookInstancesRequest; class ListProcessingJobsRequest; class ListSubscribedWorkteamsRequest; class ListTagsRequest; class ListTrainingJobsRequest; class ListTrainingJobsForHyperParameterTuningJobRequest; class ListTransformJobsRequest; class ListTrialComponentsRequest; class ListTrialsRequest; class ListUserProfilesRequest; class ListWorkforcesRequest; class ListWorkteamsRequest; class RenderUiTemplateRequest; class SearchRequest; class StartMonitoringScheduleRequest; class StartNotebookInstanceRequest; class StopAutoMLJobRequest; class StopCompilationJobRequest; class StopHyperParameterTuningJobRequest; class StopLabelingJobRequest; class StopMonitoringScheduleRequest; class StopNotebookInstanceRequest; class StopProcessingJobRequest; class StopTrainingJobRequest; class StopTransformJobRequest; class UpdateCodeRepositoryRequest; class UpdateDomainRequest; class UpdateEndpointRequest; class UpdateEndpointWeightsAndCapacitiesRequest; class UpdateExperimentRequest; class UpdateMonitoringScheduleRequest; class UpdateNotebookInstanceRequest; class UpdateNotebookInstanceLifecycleConfigRequest; class UpdateTrialRequest; class UpdateTrialComponentRequest; class UpdateUserProfileRequest; class UpdateWorkforceRequest; class UpdateWorkteamRequest; typedef Aws::Utils::Outcome AddTagsOutcome; typedef Aws::Utils::Outcome AssociateTrialComponentOutcome; typedef Aws::Utils::Outcome CreateAlgorithmOutcome; typedef Aws::Utils::Outcome CreateAppOutcome; typedef Aws::Utils::Outcome CreateAutoMLJobOutcome; typedef Aws::Utils::Outcome CreateCodeRepositoryOutcome; typedef Aws::Utils::Outcome CreateCompilationJobOutcome; typedef Aws::Utils::Outcome CreateDomainOutcome; typedef Aws::Utils::Outcome CreateEndpointOutcome; typedef Aws::Utils::Outcome CreateEndpointConfigOutcome; typedef Aws::Utils::Outcome CreateExperimentOutcome; typedef Aws::Utils::Outcome CreateFlowDefinitionOutcome; typedef Aws::Utils::Outcome CreateHumanTaskUiOutcome; typedef Aws::Utils::Outcome CreateHyperParameterTuningJobOutcome; typedef Aws::Utils::Outcome CreateLabelingJobOutcome; typedef Aws::Utils::Outcome CreateModelOutcome; typedef Aws::Utils::Outcome CreateModelPackageOutcome; typedef Aws::Utils::Outcome CreateMonitoringScheduleOutcome; typedef Aws::Utils::Outcome CreateNotebookInstanceOutcome; typedef Aws::Utils::Outcome CreateNotebookInstanceLifecycleConfigOutcome; typedef Aws::Utils::Outcome CreatePresignedDomainUrlOutcome; typedef Aws::Utils::Outcome CreatePresignedNotebookInstanceUrlOutcome; typedef Aws::Utils::Outcome CreateProcessingJobOutcome; typedef Aws::Utils::Outcome CreateTrainingJobOutcome; typedef Aws::Utils::Outcome CreateTransformJobOutcome; typedef Aws::Utils::Outcome CreateTrialOutcome; typedef Aws::Utils::Outcome CreateTrialComponentOutcome; typedef Aws::Utils::Outcome CreateUserProfileOutcome; typedef Aws::Utils::Outcome CreateWorkforceOutcome; typedef Aws::Utils::Outcome CreateWorkteamOutcome; typedef Aws::Utils::Outcome DeleteAlgorithmOutcome; typedef Aws::Utils::Outcome DeleteAppOutcome; typedef Aws::Utils::Outcome DeleteCodeRepositoryOutcome; typedef Aws::Utils::Outcome DeleteDomainOutcome; typedef Aws::Utils::Outcome DeleteEndpointOutcome; typedef Aws::Utils::Outcome DeleteEndpointConfigOutcome; typedef Aws::Utils::Outcome DeleteExperimentOutcome; typedef Aws::Utils::Outcome DeleteFlowDefinitionOutcome; typedef Aws::Utils::Outcome DeleteHumanTaskUiOutcome; typedef Aws::Utils::Outcome DeleteModelOutcome; typedef Aws::Utils::Outcome DeleteModelPackageOutcome; typedef Aws::Utils::Outcome DeleteMonitoringScheduleOutcome; typedef Aws::Utils::Outcome DeleteNotebookInstanceOutcome; typedef Aws::Utils::Outcome DeleteNotebookInstanceLifecycleConfigOutcome; typedef Aws::Utils::Outcome DeleteTagsOutcome; typedef Aws::Utils::Outcome DeleteTrialOutcome; typedef Aws::Utils::Outcome DeleteTrialComponentOutcome; typedef Aws::Utils::Outcome DeleteUserProfileOutcome; typedef Aws::Utils::Outcome DeleteWorkforceOutcome; typedef Aws::Utils::Outcome DeleteWorkteamOutcome; typedef Aws::Utils::Outcome DescribeAlgorithmOutcome; typedef Aws::Utils::Outcome DescribeAppOutcome; typedef Aws::Utils::Outcome DescribeAutoMLJobOutcome; typedef Aws::Utils::Outcome DescribeCodeRepositoryOutcome; typedef Aws::Utils::Outcome DescribeCompilationJobOutcome; typedef Aws::Utils::Outcome DescribeDomainOutcome; typedef Aws::Utils::Outcome DescribeEndpointOutcome; typedef Aws::Utils::Outcome DescribeEndpointConfigOutcome; typedef Aws::Utils::Outcome DescribeExperimentOutcome; typedef Aws::Utils::Outcome DescribeFlowDefinitionOutcome; typedef Aws::Utils::Outcome DescribeHumanTaskUiOutcome; typedef Aws::Utils::Outcome DescribeHyperParameterTuningJobOutcome; typedef Aws::Utils::Outcome DescribeLabelingJobOutcome; typedef Aws::Utils::Outcome DescribeModelOutcome; typedef Aws::Utils::Outcome DescribeModelPackageOutcome; typedef Aws::Utils::Outcome DescribeMonitoringScheduleOutcome; typedef Aws::Utils::Outcome DescribeNotebookInstanceOutcome; typedef Aws::Utils::Outcome DescribeNotebookInstanceLifecycleConfigOutcome; typedef Aws::Utils::Outcome DescribeProcessingJobOutcome; typedef Aws::Utils::Outcome DescribeSubscribedWorkteamOutcome; typedef Aws::Utils::Outcome DescribeTrainingJobOutcome; typedef Aws::Utils::Outcome DescribeTransformJobOutcome; typedef Aws::Utils::Outcome DescribeTrialOutcome; typedef Aws::Utils::Outcome DescribeTrialComponentOutcome; typedef Aws::Utils::Outcome DescribeUserProfileOutcome; typedef Aws::Utils::Outcome DescribeWorkforceOutcome; typedef Aws::Utils::Outcome DescribeWorkteamOutcome; typedef Aws::Utils::Outcome DisassociateTrialComponentOutcome; typedef Aws::Utils::Outcome GetSearchSuggestionsOutcome; typedef Aws::Utils::Outcome ListAlgorithmsOutcome; typedef Aws::Utils::Outcome ListAppsOutcome; typedef Aws::Utils::Outcome ListAutoMLJobsOutcome; typedef Aws::Utils::Outcome ListCandidatesForAutoMLJobOutcome; typedef Aws::Utils::Outcome ListCodeRepositoriesOutcome; typedef Aws::Utils::Outcome ListCompilationJobsOutcome; typedef Aws::Utils::Outcome ListDomainsOutcome; typedef Aws::Utils::Outcome ListEndpointConfigsOutcome; typedef Aws::Utils::Outcome ListEndpointsOutcome; typedef Aws::Utils::Outcome ListExperimentsOutcome; typedef Aws::Utils::Outcome ListFlowDefinitionsOutcome; typedef Aws::Utils::Outcome ListHumanTaskUisOutcome; typedef Aws::Utils::Outcome ListHyperParameterTuningJobsOutcome; typedef Aws::Utils::Outcome ListLabelingJobsOutcome; typedef Aws::Utils::Outcome ListLabelingJobsForWorkteamOutcome; typedef Aws::Utils::Outcome ListModelPackagesOutcome; typedef Aws::Utils::Outcome ListModelsOutcome; typedef Aws::Utils::Outcome ListMonitoringExecutionsOutcome; typedef Aws::Utils::Outcome ListMonitoringSchedulesOutcome; typedef Aws::Utils::Outcome ListNotebookInstanceLifecycleConfigsOutcome; typedef Aws::Utils::Outcome ListNotebookInstancesOutcome; typedef Aws::Utils::Outcome ListProcessingJobsOutcome; typedef Aws::Utils::Outcome ListSubscribedWorkteamsOutcome; typedef Aws::Utils::Outcome ListTagsOutcome; typedef Aws::Utils::Outcome ListTrainingJobsOutcome; typedef Aws::Utils::Outcome ListTrainingJobsForHyperParameterTuningJobOutcome; typedef Aws::Utils::Outcome ListTransformJobsOutcome; typedef Aws::Utils::Outcome ListTrialComponentsOutcome; typedef Aws::Utils::Outcome ListTrialsOutcome; typedef Aws::Utils::Outcome ListUserProfilesOutcome; typedef Aws::Utils::Outcome ListWorkforcesOutcome; typedef Aws::Utils::Outcome ListWorkteamsOutcome; typedef Aws::Utils::Outcome RenderUiTemplateOutcome; typedef Aws::Utils::Outcome SearchOutcome; typedef Aws::Utils::Outcome StartMonitoringScheduleOutcome; typedef Aws::Utils::Outcome StartNotebookInstanceOutcome; typedef Aws::Utils::Outcome StopAutoMLJobOutcome; typedef Aws::Utils::Outcome StopCompilationJobOutcome; typedef Aws::Utils::Outcome StopHyperParameterTuningJobOutcome; typedef Aws::Utils::Outcome StopLabelingJobOutcome; typedef Aws::Utils::Outcome StopMonitoringScheduleOutcome; typedef Aws::Utils::Outcome StopNotebookInstanceOutcome; typedef Aws::Utils::Outcome StopProcessingJobOutcome; typedef Aws::Utils::Outcome StopTrainingJobOutcome; typedef Aws::Utils::Outcome StopTransformJobOutcome; typedef Aws::Utils::Outcome UpdateCodeRepositoryOutcome; typedef Aws::Utils::Outcome UpdateDomainOutcome; typedef Aws::Utils::Outcome UpdateEndpointOutcome; typedef Aws::Utils::Outcome UpdateEndpointWeightsAndCapacitiesOutcome; typedef Aws::Utils::Outcome UpdateExperimentOutcome; typedef Aws::Utils::Outcome UpdateMonitoringScheduleOutcome; typedef Aws::Utils::Outcome UpdateNotebookInstanceOutcome; typedef Aws::Utils::Outcome UpdateNotebookInstanceLifecycleConfigOutcome; typedef Aws::Utils::Outcome UpdateTrialOutcome; typedef Aws::Utils::Outcome UpdateTrialComponentOutcome; typedef Aws::Utils::Outcome UpdateUserProfileOutcome; typedef Aws::Utils::Outcome UpdateWorkforceOutcome; typedef Aws::Utils::Outcome UpdateWorkteamOutcome; typedef std::future AddTagsOutcomeCallable; typedef std::future AssociateTrialComponentOutcomeCallable; typedef std::future CreateAlgorithmOutcomeCallable; typedef std::future CreateAppOutcomeCallable; typedef std::future CreateAutoMLJobOutcomeCallable; typedef std::future CreateCodeRepositoryOutcomeCallable; typedef std::future CreateCompilationJobOutcomeCallable; typedef std::future CreateDomainOutcomeCallable; typedef std::future CreateEndpointOutcomeCallable; typedef std::future CreateEndpointConfigOutcomeCallable; typedef std::future CreateExperimentOutcomeCallable; typedef std::future CreateFlowDefinitionOutcomeCallable; typedef std::future CreateHumanTaskUiOutcomeCallable; typedef std::future CreateHyperParameterTuningJobOutcomeCallable; typedef std::future CreateLabelingJobOutcomeCallable; typedef std::future CreateModelOutcomeCallable; typedef std::future CreateModelPackageOutcomeCallable; typedef std::future CreateMonitoringScheduleOutcomeCallable; typedef std::future CreateNotebookInstanceOutcomeCallable; typedef std::future CreateNotebookInstanceLifecycleConfigOutcomeCallable; typedef std::future CreatePresignedDomainUrlOutcomeCallable; typedef std::future CreatePresignedNotebookInstanceUrlOutcomeCallable; typedef std::future CreateProcessingJobOutcomeCallable; typedef std::future CreateTrainingJobOutcomeCallable; typedef std::future CreateTransformJobOutcomeCallable; typedef std::future CreateTrialOutcomeCallable; typedef std::future CreateTrialComponentOutcomeCallable; typedef std::future CreateUserProfileOutcomeCallable; typedef std::future CreateWorkforceOutcomeCallable; typedef std::future CreateWorkteamOutcomeCallable; typedef std::future DeleteAlgorithmOutcomeCallable; typedef std::future DeleteAppOutcomeCallable; typedef std::future DeleteCodeRepositoryOutcomeCallable; typedef std::future DeleteDomainOutcomeCallable; typedef std::future DeleteEndpointOutcomeCallable; typedef std::future DeleteEndpointConfigOutcomeCallable; typedef std::future DeleteExperimentOutcomeCallable; typedef std::future DeleteFlowDefinitionOutcomeCallable; typedef std::future DeleteHumanTaskUiOutcomeCallable; typedef std::future DeleteModelOutcomeCallable; typedef std::future DeleteModelPackageOutcomeCallable; typedef std::future DeleteMonitoringScheduleOutcomeCallable; typedef std::future DeleteNotebookInstanceOutcomeCallable; typedef std::future DeleteNotebookInstanceLifecycleConfigOutcomeCallable; typedef std::future DeleteTagsOutcomeCallable; typedef std::future DeleteTrialOutcomeCallable; typedef std::future DeleteTrialComponentOutcomeCallable; typedef std::future DeleteUserProfileOutcomeCallable; typedef std::future DeleteWorkforceOutcomeCallable; typedef std::future DeleteWorkteamOutcomeCallable; typedef std::future DescribeAlgorithmOutcomeCallable; typedef std::future DescribeAppOutcomeCallable; typedef std::future DescribeAutoMLJobOutcomeCallable; typedef std::future DescribeCodeRepositoryOutcomeCallable; typedef std::future DescribeCompilationJobOutcomeCallable; typedef std::future DescribeDomainOutcomeCallable; typedef std::future DescribeEndpointOutcomeCallable; typedef std::future DescribeEndpointConfigOutcomeCallable; typedef std::future DescribeExperimentOutcomeCallable; typedef std::future DescribeFlowDefinitionOutcomeCallable; typedef std::future DescribeHumanTaskUiOutcomeCallable; typedef std::future DescribeHyperParameterTuningJobOutcomeCallable; typedef std::future DescribeLabelingJobOutcomeCallable; typedef std::future DescribeModelOutcomeCallable; typedef std::future DescribeModelPackageOutcomeCallable; typedef std::future DescribeMonitoringScheduleOutcomeCallable; typedef std::future DescribeNotebookInstanceOutcomeCallable; typedef std::future DescribeNotebookInstanceLifecycleConfigOutcomeCallable; typedef std::future DescribeProcessingJobOutcomeCallable; typedef std::future DescribeSubscribedWorkteamOutcomeCallable; typedef std::future DescribeTrainingJobOutcomeCallable; typedef std::future DescribeTransformJobOutcomeCallable; typedef std::future DescribeTrialOutcomeCallable; typedef std::future DescribeTrialComponentOutcomeCallable; typedef std::future DescribeUserProfileOutcomeCallable; typedef std::future DescribeWorkforceOutcomeCallable; typedef std::future DescribeWorkteamOutcomeCallable; typedef std::future DisassociateTrialComponentOutcomeCallable; typedef std::future GetSearchSuggestionsOutcomeCallable; typedef std::future ListAlgorithmsOutcomeCallable; typedef std::future ListAppsOutcomeCallable; typedef std::future ListAutoMLJobsOutcomeCallable; typedef std::future ListCandidatesForAutoMLJobOutcomeCallable; typedef std::future ListCodeRepositoriesOutcomeCallable; typedef std::future ListCompilationJobsOutcomeCallable; typedef std::future ListDomainsOutcomeCallable; typedef std::future ListEndpointConfigsOutcomeCallable; typedef std::future ListEndpointsOutcomeCallable; typedef std::future ListExperimentsOutcomeCallable; typedef std::future ListFlowDefinitionsOutcomeCallable; typedef std::future ListHumanTaskUisOutcomeCallable; typedef std::future ListHyperParameterTuningJobsOutcomeCallable; typedef std::future ListLabelingJobsOutcomeCallable; typedef std::future ListLabelingJobsForWorkteamOutcomeCallable; typedef std::future ListModelPackagesOutcomeCallable; typedef std::future ListModelsOutcomeCallable; typedef std::future ListMonitoringExecutionsOutcomeCallable; typedef std::future ListMonitoringSchedulesOutcomeCallable; typedef std::future ListNotebookInstanceLifecycleConfigsOutcomeCallable; typedef std::future ListNotebookInstancesOutcomeCallable; typedef std::future ListProcessingJobsOutcomeCallable; typedef std::future ListSubscribedWorkteamsOutcomeCallable; typedef std::future ListTagsOutcomeCallable; typedef std::future ListTrainingJobsOutcomeCallable; typedef std::future ListTrainingJobsForHyperParameterTuningJobOutcomeCallable; typedef std::future ListTransformJobsOutcomeCallable; typedef std::future ListTrialComponentsOutcomeCallable; typedef std::future ListTrialsOutcomeCallable; typedef std::future ListUserProfilesOutcomeCallable; typedef std::future ListWorkforcesOutcomeCallable; typedef std::future ListWorkteamsOutcomeCallable; typedef std::future RenderUiTemplateOutcomeCallable; typedef std::future SearchOutcomeCallable; typedef std::future StartMonitoringScheduleOutcomeCallable; typedef std::future StartNotebookInstanceOutcomeCallable; typedef std::future StopAutoMLJobOutcomeCallable; typedef std::future StopCompilationJobOutcomeCallable; typedef std::future StopHyperParameterTuningJobOutcomeCallable; typedef std::future StopLabelingJobOutcomeCallable; typedef std::future StopMonitoringScheduleOutcomeCallable; typedef std::future StopNotebookInstanceOutcomeCallable; typedef std::future StopProcessingJobOutcomeCallable; typedef std::future StopTrainingJobOutcomeCallable; typedef std::future StopTransformJobOutcomeCallable; typedef std::future UpdateCodeRepositoryOutcomeCallable; typedef std::future UpdateDomainOutcomeCallable; typedef std::future UpdateEndpointOutcomeCallable; typedef std::future UpdateEndpointWeightsAndCapacitiesOutcomeCallable; typedef std::future UpdateExperimentOutcomeCallable; typedef std::future UpdateMonitoringScheduleOutcomeCallable; typedef std::future UpdateNotebookInstanceOutcomeCallable; typedef std::future UpdateNotebookInstanceLifecycleConfigOutcomeCallable; typedef std::future UpdateTrialOutcomeCallable; typedef std::future UpdateTrialComponentOutcomeCallable; typedef std::future UpdateUserProfileOutcomeCallable; typedef std::future UpdateWorkforceOutcomeCallable; typedef std::future UpdateWorkteamOutcomeCallable; } // namespace Model class SageMakerClient; typedef std::function&) > AddTagsResponseReceivedHandler; typedef std::function&) > AssociateTrialComponentResponseReceivedHandler; typedef std::function&) > CreateAlgorithmResponseReceivedHandler; typedef std::function&) > CreateAppResponseReceivedHandler; typedef std::function&) > CreateAutoMLJobResponseReceivedHandler; typedef std::function&) > CreateCodeRepositoryResponseReceivedHandler; typedef std::function&) > CreateCompilationJobResponseReceivedHandler; typedef std::function&) > CreateDomainResponseReceivedHandler; typedef std::function&) > CreateEndpointResponseReceivedHandler; typedef std::function&) > CreateEndpointConfigResponseReceivedHandler; typedef std::function&) > CreateExperimentResponseReceivedHandler; typedef std::function&) > CreateFlowDefinitionResponseReceivedHandler; typedef std::function&) > CreateHumanTaskUiResponseReceivedHandler; typedef std::function&) > CreateHyperParameterTuningJobResponseReceivedHandler; typedef std::function&) > CreateLabelingJobResponseReceivedHandler; typedef std::function&) > CreateModelResponseReceivedHandler; typedef std::function&) > CreateModelPackageResponseReceivedHandler; typedef std::function&) > CreateMonitoringScheduleResponseReceivedHandler; typedef std::function&) > CreateNotebookInstanceResponseReceivedHandler; typedef std::function&) > CreateNotebookInstanceLifecycleConfigResponseReceivedHandler; typedef std::function&) > CreatePresignedDomainUrlResponseReceivedHandler; typedef std::function&) > CreatePresignedNotebookInstanceUrlResponseReceivedHandler; typedef std::function&) > CreateProcessingJobResponseReceivedHandler; typedef std::function&) > CreateTrainingJobResponseReceivedHandler; typedef std::function&) > CreateTransformJobResponseReceivedHandler; typedef std::function&) > CreateTrialResponseReceivedHandler; typedef std::function&) > CreateTrialComponentResponseReceivedHandler; typedef std::function&) > CreateUserProfileResponseReceivedHandler; typedef std::function&) > CreateWorkforceResponseReceivedHandler; typedef std::function&) > CreateWorkteamResponseReceivedHandler; typedef std::function&) > DeleteAlgorithmResponseReceivedHandler; typedef std::function&) > DeleteAppResponseReceivedHandler; typedef std::function&) > DeleteCodeRepositoryResponseReceivedHandler; typedef std::function&) > DeleteDomainResponseReceivedHandler; typedef std::function&) > DeleteEndpointResponseReceivedHandler; typedef std::function&) > DeleteEndpointConfigResponseReceivedHandler; typedef std::function&) > DeleteExperimentResponseReceivedHandler; typedef std::function&) > DeleteFlowDefinitionResponseReceivedHandler; typedef std::function&) > DeleteHumanTaskUiResponseReceivedHandler; typedef std::function&) > DeleteModelResponseReceivedHandler; typedef std::function&) > DeleteModelPackageResponseReceivedHandler; typedef std::function&) > DeleteMonitoringScheduleResponseReceivedHandler; typedef std::function&) > DeleteNotebookInstanceResponseReceivedHandler; typedef std::function&) > DeleteNotebookInstanceLifecycleConfigResponseReceivedHandler; typedef std::function&) > DeleteTagsResponseReceivedHandler; typedef std::function&) > DeleteTrialResponseReceivedHandler; typedef std::function&) > DeleteTrialComponentResponseReceivedHandler; typedef std::function&) > DeleteUserProfileResponseReceivedHandler; typedef std::function&) > DeleteWorkforceResponseReceivedHandler; typedef std::function&) > DeleteWorkteamResponseReceivedHandler; typedef std::function&) > DescribeAlgorithmResponseReceivedHandler; typedef std::function&) > DescribeAppResponseReceivedHandler; typedef std::function&) > DescribeAutoMLJobResponseReceivedHandler; typedef std::function&) > DescribeCodeRepositoryResponseReceivedHandler; typedef std::function&) > DescribeCompilationJobResponseReceivedHandler; typedef std::function&) > DescribeDomainResponseReceivedHandler; typedef std::function&) > DescribeEndpointResponseReceivedHandler; typedef std::function&) > DescribeEndpointConfigResponseReceivedHandler; typedef std::function&) > DescribeExperimentResponseReceivedHandler; typedef std::function&) > DescribeFlowDefinitionResponseReceivedHandler; typedef std::function&) > DescribeHumanTaskUiResponseReceivedHandler; typedef std::function&) > DescribeHyperParameterTuningJobResponseReceivedHandler; typedef std::function&) > DescribeLabelingJobResponseReceivedHandler; typedef std::function&) > DescribeModelResponseReceivedHandler; typedef std::function&) > DescribeModelPackageResponseReceivedHandler; typedef std::function&) > DescribeMonitoringScheduleResponseReceivedHandler; typedef std::function&) > DescribeNotebookInstanceResponseReceivedHandler; typedef std::function&) > DescribeNotebookInstanceLifecycleConfigResponseReceivedHandler; typedef std::function&) > DescribeProcessingJobResponseReceivedHandler; typedef std::function&) > DescribeSubscribedWorkteamResponseReceivedHandler; typedef std::function&) > DescribeTrainingJobResponseReceivedHandler; typedef std::function&) > DescribeTransformJobResponseReceivedHandler; typedef std::function&) > DescribeTrialResponseReceivedHandler; typedef std::function&) > DescribeTrialComponentResponseReceivedHandler; typedef std::function&) > DescribeUserProfileResponseReceivedHandler; typedef std::function&) > DescribeWorkforceResponseReceivedHandler; typedef std::function&) > DescribeWorkteamResponseReceivedHandler; typedef std::function&) > DisassociateTrialComponentResponseReceivedHandler; typedef std::function&) > GetSearchSuggestionsResponseReceivedHandler; typedef std::function&) > ListAlgorithmsResponseReceivedHandler; typedef std::function&) > ListAppsResponseReceivedHandler; typedef std::function&) > ListAutoMLJobsResponseReceivedHandler; typedef std::function&) > ListCandidatesForAutoMLJobResponseReceivedHandler; typedef std::function&) > ListCodeRepositoriesResponseReceivedHandler; typedef std::function&) > ListCompilationJobsResponseReceivedHandler; typedef std::function&) > ListDomainsResponseReceivedHandler; typedef std::function&) > ListEndpointConfigsResponseReceivedHandler; typedef std::function&) > ListEndpointsResponseReceivedHandler; typedef std::function&) > ListExperimentsResponseReceivedHandler; typedef std::function&) > ListFlowDefinitionsResponseReceivedHandler; typedef std::function&) > ListHumanTaskUisResponseReceivedHandler; typedef std::function&) > ListHyperParameterTuningJobsResponseReceivedHandler; typedef std::function&) > ListLabelingJobsResponseReceivedHandler; typedef std::function&) > ListLabelingJobsForWorkteamResponseReceivedHandler; typedef std::function&) > ListModelPackagesResponseReceivedHandler; typedef std::function&) > ListModelsResponseReceivedHandler; typedef std::function&) > ListMonitoringExecutionsResponseReceivedHandler; typedef std::function&) > ListMonitoringSchedulesResponseReceivedHandler; typedef std::function&) > ListNotebookInstanceLifecycleConfigsResponseReceivedHandler; typedef std::function&) > ListNotebookInstancesResponseReceivedHandler; typedef std::function&) > ListProcessingJobsResponseReceivedHandler; typedef std::function&) > ListSubscribedWorkteamsResponseReceivedHandler; typedef std::function&) > ListTagsResponseReceivedHandler; typedef std::function&) > ListTrainingJobsResponseReceivedHandler; typedef std::function&) > ListTrainingJobsForHyperParameterTuningJobResponseReceivedHandler; typedef std::function&) > ListTransformJobsResponseReceivedHandler; typedef std::function&) > ListTrialComponentsResponseReceivedHandler; typedef std::function&) > ListTrialsResponseReceivedHandler; typedef std::function&) > ListUserProfilesResponseReceivedHandler; typedef std::function&) > ListWorkforcesResponseReceivedHandler; typedef std::function&) > ListWorkteamsResponseReceivedHandler; typedef std::function&) > RenderUiTemplateResponseReceivedHandler; typedef std::function&) > SearchResponseReceivedHandler; typedef std::function&) > StartMonitoringScheduleResponseReceivedHandler; typedef std::function&) > StartNotebookInstanceResponseReceivedHandler; typedef std::function&) > StopAutoMLJobResponseReceivedHandler; typedef std::function&) > StopCompilationJobResponseReceivedHandler; typedef std::function&) > StopHyperParameterTuningJobResponseReceivedHandler; typedef std::function&) > StopLabelingJobResponseReceivedHandler; typedef std::function&) > StopMonitoringScheduleResponseReceivedHandler; typedef std::function&) > StopNotebookInstanceResponseReceivedHandler; typedef std::function&) > StopProcessingJobResponseReceivedHandler; typedef std::function&) > StopTrainingJobResponseReceivedHandler; typedef std::function&) > StopTransformJobResponseReceivedHandler; typedef std::function&) > UpdateCodeRepositoryResponseReceivedHandler; typedef std::function&) > UpdateDomainResponseReceivedHandler; typedef std::function&) > UpdateEndpointResponseReceivedHandler; typedef std::function&) > UpdateEndpointWeightsAndCapacitiesResponseReceivedHandler; typedef std::function&) > UpdateExperimentResponseReceivedHandler; typedef std::function&) > UpdateMonitoringScheduleResponseReceivedHandler; typedef std::function&) > UpdateNotebookInstanceResponseReceivedHandler; typedef std::function&) > UpdateNotebookInstanceLifecycleConfigResponseReceivedHandler; typedef std::function&) > UpdateTrialResponseReceivedHandler; typedef std::function&) > UpdateTrialComponentResponseReceivedHandler; typedef std::function&) > UpdateUserProfileResponseReceivedHandler; typedef std::function&) > UpdateWorkforceResponseReceivedHandler; typedef std::function&) > UpdateWorkteamResponseReceivedHandler; /** *

Provides APIs for creating and managing Amazon SageMaker resources.

*

Other Resources:

*/ class AWS_SAGEMAKER_API SageMakerClient : public Aws::Client::AWSJsonClient { public: typedef Aws::Client::AWSJsonClient BASECLASS; /** * Initializes client to use DefaultCredentialProviderChain, with default http client factory, and optional client config. If client config * is not specified, it will be initialized to default values. */ SageMakerClient(const Aws::Client::ClientConfiguration& clientConfiguration = Aws::Client::ClientConfiguration()); /** * Initializes client to use SimpleAWSCredentialsProvider, with default http client factory, and optional client config. If client config * is not specified, it will be initialized to default values. */ SageMakerClient(const Aws::Auth::AWSCredentials& credentials, const Aws::Client::ClientConfiguration& clientConfiguration = Aws::Client::ClientConfiguration()); /** * Initializes client to use specified credentials provider with specified client config. If http client factory is not supplied, * the default http client factory will be used */ SageMakerClient(const std::shared_ptr& credentialsProvider, const Aws::Client::ClientConfiguration& clientConfiguration = Aws::Client::ClientConfiguration()); virtual ~SageMakerClient(); /** *

Adds or overwrites one or more tags for the specified Amazon SageMaker * resource. You can add tags to notebook instances, training jobs, hyperparameter * tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint * configurations, and endpoints.

Each tag consists of a key and an optional * value. Tag keys must be unique per resource. For more information about tags, * see For more information, see AWS * Tagging Strategies.

Tags that you add to a hyperparameter * tuning job by calling this API are also added to any training jobs that the * hyperparameter tuning job launches after you call this API, but not to training * jobs that the hyperparameter tuning job launched before you called this API. To * make sure that the tags associated with a hyperparameter tuning job are also * added to all training jobs that the hyperparameter tuning job launches, add the * tags when you first create the tuning job by specifying them in the * Tags parameter of CreateHyperParameterTuningJob

*

See Also:

AWS * API Reference

*/ virtual Model::AddTagsOutcome AddTags(const Model::AddTagsRequest& request) const; /** *

Adds or overwrites one or more tags for the specified Amazon SageMaker * resource. You can add tags to notebook instances, training jobs, hyperparameter * tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint * configurations, and endpoints.

Each tag consists of a key and an optional * value. Tag keys must be unique per resource. For more information about tags, * see For more information, see AWS * Tagging Strategies.

Tags that you add to a hyperparameter * tuning job by calling this API are also added to any training jobs that the * hyperparameter tuning job launches after you call this API, but not to training * jobs that the hyperparameter tuning job launched before you called this API. To * make sure that the tags associated with a hyperparameter tuning job are also * added to all training jobs that the hyperparameter tuning job launches, add the * tags when you first create the tuning job by specifying them in the * Tags parameter of CreateHyperParameterTuningJob

*

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::AddTagsOutcomeCallable AddTagsCallable(const Model::AddTagsRequest& request) const; /** *

Adds or overwrites one or more tags for the specified Amazon SageMaker * resource. You can add tags to notebook instances, training jobs, hyperparameter * tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint * configurations, and endpoints.

Each tag consists of a key and an optional * value. Tag keys must be unique per resource. For more information about tags, * see For more information, see AWS * Tagging Strategies.

Tags that you add to a hyperparameter * tuning job by calling this API are also added to any training jobs that the * hyperparameter tuning job launches after you call this API, but not to training * jobs that the hyperparameter tuning job launched before you called this API. To * make sure that the tags associated with a hyperparameter tuning job are also * added to all training jobs that the hyperparameter tuning job launches, add the * tags when you first create the tuning job by specifying them in the * Tags parameter of CreateHyperParameterTuningJob

*

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void AddTagsAsync(const Model::AddTagsRequest& request, const AddTagsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Associates a trial component with a trial. A trial component can be * associated with multiple trials. To disassociate a trial component from a trial, * call the DisassociateTrialComponent API.

See Also:

AWS * API Reference

*/ virtual Model::AssociateTrialComponentOutcome AssociateTrialComponent(const Model::AssociateTrialComponentRequest& request) const; /** *

Associates a trial component with a trial. A trial component can be * associated with multiple trials. To disassociate a trial component from a trial, * call the DisassociateTrialComponent API.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::AssociateTrialComponentOutcomeCallable AssociateTrialComponentCallable(const Model::AssociateTrialComponentRequest& request) const; /** *

Associates a trial component with a trial. A trial component can be * associated with multiple trials. To disassociate a trial component from a trial, * call the DisassociateTrialComponent API.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void AssociateTrialComponentAsync(const Model::AssociateTrialComponentRequest& request, const AssociateTrialComponentResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Create a machine learning algorithm that you can use in Amazon SageMaker and * list in the AWS Marketplace.

See Also:

AWS * API Reference

*/ virtual Model::CreateAlgorithmOutcome CreateAlgorithm(const Model::CreateAlgorithmRequest& request) const; /** *

Create a machine learning algorithm that you can use in Amazon SageMaker and * list in the AWS Marketplace.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateAlgorithmOutcomeCallable CreateAlgorithmCallable(const Model::CreateAlgorithmRequest& request) const; /** *

Create a machine learning algorithm that you can use in Amazon SageMaker and * list in the AWS Marketplace.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateAlgorithmAsync(const Model::CreateAlgorithmRequest& request, const CreateAlgorithmResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Creates a running App for the specified UserProfile. Supported Apps are * JupyterServer and KernelGateway. This operation is automatically invoked by * Amazon SageMaker Studio upon access to the associated Domain, and when new * kernel configurations are selected by the user. A user may have multiple Apps * active simultaneously.

See Also:

AWS * API Reference

*/ virtual Model::CreateAppOutcome CreateApp(const Model::CreateAppRequest& request) const; /** *

Creates a running App for the specified UserProfile. Supported Apps are * JupyterServer and KernelGateway. This operation is automatically invoked by * Amazon SageMaker Studio upon access to the associated Domain, and when new * kernel configurations are selected by the user. A user may have multiple Apps * active simultaneously.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateAppOutcomeCallable CreateAppCallable(const Model::CreateAppRequest& request) const; /** *

Creates a running App for the specified UserProfile. Supported Apps are * JupyterServer and KernelGateway. This operation is automatically invoked by * Amazon SageMaker Studio upon access to the associated Domain, and when new * kernel configurations are selected by the user. A user may have multiple Apps * active simultaneously.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateAppAsync(const Model::CreateAppRequest& request, const CreateAppResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Creates an Autopilot job.

Find the best performing model after you run * an Autopilot job by calling . Deploy that model by following the steps described * in Step * 6.1: Deploy the Model to Amazon SageMaker Hosting Services.

For * information about how to use Autopilot, see * Automate Model Development with Amazon SageMaker Autopilot.

See * Also:

AWS * API Reference

*/ virtual Model::CreateAutoMLJobOutcome CreateAutoMLJob(const Model::CreateAutoMLJobRequest& request) const; /** *

Creates an Autopilot job.

Find the best performing model after you run * an Autopilot job by calling . Deploy that model by following the steps described * in Step * 6.1: Deploy the Model to Amazon SageMaker Hosting Services.

For * information about how to use Autopilot, see * Automate Model Development with Amazon SageMaker Autopilot.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateAutoMLJobOutcomeCallable CreateAutoMLJobCallable(const Model::CreateAutoMLJobRequest& request) const; /** *

Creates an Autopilot job.

Find the best performing model after you run * an Autopilot job by calling . Deploy that model by following the steps described * in Step * 6.1: Deploy the Model to Amazon SageMaker Hosting Services.

For * information about how to use Autopilot, see * Automate Model Development with Amazon SageMaker Autopilot.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateAutoMLJobAsync(const Model::CreateAutoMLJobRequest& request, const CreateAutoMLJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Creates a Git repository as a resource in your Amazon SageMaker account. You * can associate the repository with notebook instances so that you can use Git * source control for the notebooks you create. The Git repository is a resource in * your Amazon SageMaker account, so it can be associated with more than one * notebook instance, and it persists independently from the lifecycle of any * notebook instances it is associated with.

The repository can be hosted * either in AWS * CodeCommit or in any other Git repository.

See Also:

AWS * API Reference

*/ virtual Model::CreateCodeRepositoryOutcome CreateCodeRepository(const Model::CreateCodeRepositoryRequest& request) const; /** *

Creates a Git repository as a resource in your Amazon SageMaker account. You * can associate the repository with notebook instances so that you can use Git * source control for the notebooks you create. The Git repository is a resource in * your Amazon SageMaker account, so it can be associated with more than one * notebook instance, and it persists independently from the lifecycle of any * notebook instances it is associated with.

The repository can be hosted * either in AWS * CodeCommit or in any other Git repository.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateCodeRepositoryOutcomeCallable CreateCodeRepositoryCallable(const Model::CreateCodeRepositoryRequest& request) const; /** *

Creates a Git repository as a resource in your Amazon SageMaker account. You * can associate the repository with notebook instances so that you can use Git * source control for the notebooks you create. The Git repository is a resource in * your Amazon SageMaker account, so it can be associated with more than one * notebook instance, and it persists independently from the lifecycle of any * notebook instances it is associated with.

The repository can be hosted * either in AWS * CodeCommit or in any other Git repository.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateCodeRepositoryAsync(const Model::CreateCodeRepositoryRequest& request, const CreateCodeRepositoryResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Starts a model compilation job. After the model has been compiled, Amazon * SageMaker saves the resulting model artifacts to an Amazon Simple Storage * Service (Amazon S3) bucket that you specify.

If you choose to host your * model using Amazon SageMaker hosting services, you can use the resulting model * artifacts as part of the model. You can also use the artifacts with AWS IoT * Greengrass. In that case, deploy them as an ML resource.

In the request * body, you provide the following:

  • A name for the compilation * job

  • Information about the input model artifacts

  • *
  • The output location for the compiled model and the device (target) that * the model runs on

  • The Amazon Resource Name (ARN) of the IAM * role that Amazon SageMaker assumes to perform the model compilation job.

    *

You can also provide a Tag to track the model * compilation job's resource use and costs. The response body contains the * CompilationJobArn for the compiled job.

To stop a model * compilation job, use StopCompilationJob. To get information about a * particular model compilation job, use DescribeCompilationJob. To get * information about multiple model compilation jobs, use * ListCompilationJobs.

See Also:

AWS * API Reference

*/ virtual Model::CreateCompilationJobOutcome CreateCompilationJob(const Model::CreateCompilationJobRequest& request) const; /** *

Starts a model compilation job. After the model has been compiled, Amazon * SageMaker saves the resulting model artifacts to an Amazon Simple Storage * Service (Amazon S3) bucket that you specify.

If you choose to host your * model using Amazon SageMaker hosting services, you can use the resulting model * artifacts as part of the model. You can also use the artifacts with AWS IoT * Greengrass. In that case, deploy them as an ML resource.

In the request * body, you provide the following:

  • A name for the compilation * job

  • Information about the input model artifacts

  • *
  • The output location for the compiled model and the device (target) that * the model runs on

  • The Amazon Resource Name (ARN) of the IAM * role that Amazon SageMaker assumes to perform the model compilation job.

    *

You can also provide a Tag to track the model * compilation job's resource use and costs. The response body contains the * CompilationJobArn for the compiled job.

To stop a model * compilation job, use StopCompilationJob. To get information about a * particular model compilation job, use DescribeCompilationJob. To get * information about multiple model compilation jobs, use * ListCompilationJobs.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateCompilationJobOutcomeCallable CreateCompilationJobCallable(const Model::CreateCompilationJobRequest& request) const; /** *

Starts a model compilation job. After the model has been compiled, Amazon * SageMaker saves the resulting model artifacts to an Amazon Simple Storage * Service (Amazon S3) bucket that you specify.

If you choose to host your * model using Amazon SageMaker hosting services, you can use the resulting model * artifacts as part of the model. You can also use the artifacts with AWS IoT * Greengrass. In that case, deploy them as an ML resource.

In the request * body, you provide the following:

  • A name for the compilation * job

  • Information about the input model artifacts

  • *
  • The output location for the compiled model and the device (target) that * the model runs on

  • The Amazon Resource Name (ARN) of the IAM * role that Amazon SageMaker assumes to perform the model compilation job.

    *

You can also provide a Tag to track the model * compilation job's resource use and costs. The response body contains the * CompilationJobArn for the compiled job.

To stop a model * compilation job, use StopCompilationJob. To get information about a * particular model compilation job, use DescribeCompilationJob. To get * information about multiple model compilation jobs, use * ListCompilationJobs.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateCompilationJobAsync(const Model::CreateCompilationJobRequest& request, const CreateCompilationJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Creates a Domain used by SageMaker Studio. A domain consists of * an associated directory, a list of authorized users, and a variety of security, * application, policy, and Amazon Virtual Private Cloud (VPC) configurations. An * AWS account is limited to one domain per region. Users within a domain can share * notebook files and other artifacts with each other.

When a domain is * created, an Amazon Elastic File System (EFS) volume is also created for use by * all of the users within the domain. Each user receives a private home directory * within the EFS for notebooks, Git repositories, and data files.

All * traffic between the domain and the EFS volume is communicated through the * specified subnet IDs. All other traffic goes over the Internet through an Amazon * SageMaker system VPC. The EFS traffic uses the NFS/TCP protocol over port * 2049.

NFS traffic over TCP on port 2049 needs to be allowed * in both inbound and outbound rules in order to launch a SageMaker Studio app * successfully.

See Also:

AWS * API Reference

*/ virtual Model::CreateDomainOutcome CreateDomain(const Model::CreateDomainRequest& request) const; /** *

Creates a Domain used by SageMaker Studio. A domain consists of * an associated directory, a list of authorized users, and a variety of security, * application, policy, and Amazon Virtual Private Cloud (VPC) configurations. An * AWS account is limited to one domain per region. Users within a domain can share * notebook files and other artifacts with each other.

When a domain is * created, an Amazon Elastic File System (EFS) volume is also created for use by * all of the users within the domain. Each user receives a private home directory * within the EFS for notebooks, Git repositories, and data files.

All * traffic between the domain and the EFS volume is communicated through the * specified subnet IDs. All other traffic goes over the Internet through an Amazon * SageMaker system VPC. The EFS traffic uses the NFS/TCP protocol over port * 2049.

NFS traffic over TCP on port 2049 needs to be allowed * in both inbound and outbound rules in order to launch a SageMaker Studio app * successfully.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateDomainOutcomeCallable CreateDomainCallable(const Model::CreateDomainRequest& request) const; /** *

Creates a Domain used by SageMaker Studio. A domain consists of * an associated directory, a list of authorized users, and a variety of security, * application, policy, and Amazon Virtual Private Cloud (VPC) configurations. An * AWS account is limited to one domain per region. Users within a domain can share * notebook files and other artifacts with each other.

When a domain is * created, an Amazon Elastic File System (EFS) volume is also created for use by * all of the users within the domain. Each user receives a private home directory * within the EFS for notebooks, Git repositories, and data files.

All * traffic between the domain and the EFS volume is communicated through the * specified subnet IDs. All other traffic goes over the Internet through an Amazon * SageMaker system VPC. The EFS traffic uses the NFS/TCP protocol over port * 2049.

NFS traffic over TCP on port 2049 needs to be allowed * in both inbound and outbound rules in order to launch a SageMaker Studio app * successfully.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateDomainAsync(const Model::CreateDomainRequest& request, const CreateDomainResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Creates an endpoint using the endpoint configuration specified in the * request. Amazon SageMaker uses the endpoint to provision resources and deploy * models. You create the endpoint configuration with the * CreateEndpointConfig API.

Use this API to deploy models using * Amazon SageMaker hosting services.

For an example that calls this method * when deploying a model to Amazon SageMaker hosting services, see Deploy * the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto * 3)).

You must not delete an EndpointConfig that * is in use by an endpoint that is live or while the UpdateEndpoint * or CreateEndpoint operations are being performed on the endpoint. * To update an endpoint, you must create a new EndpointConfig.

*

The endpoint name must be unique within an AWS Region in your AWS * account.

When it receives the request, Amazon SageMaker creates the * endpoint, launches the resources (ML compute instances), and deploys the * model(s) on them.

When you call CreateEndpoint, a load * call is made to DynamoDB to verify that your endpoint configuration exists. When * you read data from a DynamoDB table supporting * Eventually Consistent Reads , the response might not reflect * the results of a recently completed write operation. The response might include * some stale data. If the dependent entities are not yet in DynamoDB, this causes * a validation error. If you repeat your read request after a short time, the * response should return the latest data. So retry logic is recommended to handle * these possible issues. We also recommend that customers call * DescribeEndpointConfig before calling CreateEndpoint to minimize * the potential impact of a DynamoDB eventually consistent read.

*

When Amazon SageMaker receives the request, it sets the endpoint status to * Creating. After it creates the endpoint, it sets the status to * InService. Amazon SageMaker can then process incoming requests for * inferences. To check the status of an endpoint, use the DescribeEndpoint * API.

If any of the models hosted at this endpoint get model data from an * Amazon S3 location, Amazon SageMaker uses AWS Security Token Service to download * model artifacts from the S3 path you provided. AWS STS is activated in your IAM * user account by default. If you previously deactivated AWS STS for a region, you * need to reactivate AWS STS for that region. For more information, see Activating * and Deactivating AWS STS in an AWS Region in the AWS Identity and Access * Management User Guide.

See Also:

AWS * API Reference

*/ virtual Model::CreateEndpointOutcome CreateEndpoint(const Model::CreateEndpointRequest& request) const; /** *

Creates an endpoint using the endpoint configuration specified in the * request. Amazon SageMaker uses the endpoint to provision resources and deploy * models. You create the endpoint configuration with the * CreateEndpointConfig API.

Use this API to deploy models using * Amazon SageMaker hosting services.

For an example that calls this method * when deploying a model to Amazon SageMaker hosting services, see Deploy * the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto * 3)).

You must not delete an EndpointConfig that * is in use by an endpoint that is live or while the UpdateEndpoint * or CreateEndpoint operations are being performed on the endpoint. * To update an endpoint, you must create a new EndpointConfig.

*

The endpoint name must be unique within an AWS Region in your AWS * account.

When it receives the request, Amazon SageMaker creates the * endpoint, launches the resources (ML compute instances), and deploys the * model(s) on them.

When you call CreateEndpoint, a load * call is made to DynamoDB to verify that your endpoint configuration exists. When * you read data from a DynamoDB table supporting * Eventually Consistent Reads , the response might not reflect * the results of a recently completed write operation. The response might include * some stale data. If the dependent entities are not yet in DynamoDB, this causes * a validation error. If you repeat your read request after a short time, the * response should return the latest data. So retry logic is recommended to handle * these possible issues. We also recommend that customers call * DescribeEndpointConfig before calling CreateEndpoint to minimize * the potential impact of a DynamoDB eventually consistent read.

*

When Amazon SageMaker receives the request, it sets the endpoint status to * Creating. After it creates the endpoint, it sets the status to * InService. Amazon SageMaker can then process incoming requests for * inferences. To check the status of an endpoint, use the DescribeEndpoint * API.

If any of the models hosted at this endpoint get model data from an * Amazon S3 location, Amazon SageMaker uses AWS Security Token Service to download * model artifacts from the S3 path you provided. AWS STS is activated in your IAM * user account by default. If you previously deactivated AWS STS for a region, you * need to reactivate AWS STS for that region. For more information, see Activating * and Deactivating AWS STS in an AWS Region in the AWS Identity and Access * Management User Guide.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateEndpointOutcomeCallable CreateEndpointCallable(const Model::CreateEndpointRequest& request) const; /** *

Creates an endpoint using the endpoint configuration specified in the * request. Amazon SageMaker uses the endpoint to provision resources and deploy * models. You create the endpoint configuration with the * CreateEndpointConfig API.

Use this API to deploy models using * Amazon SageMaker hosting services.

For an example that calls this method * when deploying a model to Amazon SageMaker hosting services, see Deploy * the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto * 3)).

You must not delete an EndpointConfig that * is in use by an endpoint that is live or while the UpdateEndpoint * or CreateEndpoint operations are being performed on the endpoint. * To update an endpoint, you must create a new EndpointConfig.

*

The endpoint name must be unique within an AWS Region in your AWS * account.

When it receives the request, Amazon SageMaker creates the * endpoint, launches the resources (ML compute instances), and deploys the * model(s) on them.

When you call CreateEndpoint, a load * call is made to DynamoDB to verify that your endpoint configuration exists. When * you read data from a DynamoDB table supporting * Eventually Consistent Reads , the response might not reflect * the results of a recently completed write operation. The response might include * some stale data. If the dependent entities are not yet in DynamoDB, this causes * a validation error. If you repeat your read request after a short time, the * response should return the latest data. So retry logic is recommended to handle * these possible issues. We also recommend that customers call * DescribeEndpointConfig before calling CreateEndpoint to minimize * the potential impact of a DynamoDB eventually consistent read.

*

When Amazon SageMaker receives the request, it sets the endpoint status to * Creating. After it creates the endpoint, it sets the status to * InService. Amazon SageMaker can then process incoming requests for * inferences. To check the status of an endpoint, use the DescribeEndpoint * API.

If any of the models hosted at this endpoint get model data from an * Amazon S3 location, Amazon SageMaker uses AWS Security Token Service to download * model artifacts from the S3 path you provided. AWS STS is activated in your IAM * user account by default. If you previously deactivated AWS STS for a region, you * need to reactivate AWS STS for that region. For more information, see Activating * and Deactivating AWS STS in an AWS Region in the AWS Identity and Access * Management User Guide.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateEndpointAsync(const Model::CreateEndpointRequest& request, const CreateEndpointResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Creates an endpoint configuration that Amazon SageMaker hosting services uses * to deploy models. In the configuration, you identify one or more models, created * using the CreateModel API, to deploy and the resources that you * want Amazon SageMaker to provision. Then you call the CreateEndpoint * API.

Use this API if you want to use Amazon SageMaker hosting * services to deploy models into production.

In the request, you * define a ProductionVariant, for each model that you want to deploy. * Each ProductionVariant parameter also describes the resources that * you want Amazon SageMaker to provision. This includes the number and type of ML * compute instances to deploy.

If you are hosting multiple models, you * also assign a VariantWeight to specify how much traffic you want to * allocate to each model. For example, suppose that you want to host two models, A * and B, and you assign traffic weight 2 for model A and 1 for model B. Amazon * SageMaker distributes two-thirds of the traffic to Model A, and one-third to * model B.

For an example that calls this method when deploying a model to * Amazon SageMaker hosting services, see Deploy * the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto * 3)).

When you call CreateEndpoint, a load call is made * to DynamoDB to verify that your endpoint configuration exists. When you read * data from a DynamoDB table supporting * Eventually Consistent Reads , the response might not reflect * the results of a recently completed write operation. The response might include * some stale data. If the dependent entities are not yet in DynamoDB, this causes * a validation error. If you repeat your read request after a short time, the * response should return the latest data. So retry logic is recommended to handle * these possible issues. We also recommend that customers call * DescribeEndpointConfig before calling CreateEndpoint to minimize * the potential impact of a DynamoDB eventually consistent read.

*

See Also:

AWS * API Reference

*/ virtual Model::CreateEndpointConfigOutcome CreateEndpointConfig(const Model::CreateEndpointConfigRequest& request) const; /** *

Creates an endpoint configuration that Amazon SageMaker hosting services uses * to deploy models. In the configuration, you identify one or more models, created * using the CreateModel API, to deploy and the resources that you * want Amazon SageMaker to provision. Then you call the CreateEndpoint * API.

Use this API if you want to use Amazon SageMaker hosting * services to deploy models into production.

In the request, you * define a ProductionVariant, for each model that you want to deploy. * Each ProductionVariant parameter also describes the resources that * you want Amazon SageMaker to provision. This includes the number and type of ML * compute instances to deploy.

If you are hosting multiple models, you * also assign a VariantWeight to specify how much traffic you want to * allocate to each model. For example, suppose that you want to host two models, A * and B, and you assign traffic weight 2 for model A and 1 for model B. Amazon * SageMaker distributes two-thirds of the traffic to Model A, and one-third to * model B.

For an example that calls this method when deploying a model to * Amazon SageMaker hosting services, see Deploy * the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto * 3)).

When you call CreateEndpoint, a load call is made * to DynamoDB to verify that your endpoint configuration exists. When you read * data from a DynamoDB table supporting * Eventually Consistent Reads , the response might not reflect * the results of a recently completed write operation. The response might include * some stale data. If the dependent entities are not yet in DynamoDB, this causes * a validation error. If you repeat your read request after a short time, the * response should return the latest data. So retry logic is recommended to handle * these possible issues. We also recommend that customers call * DescribeEndpointConfig before calling CreateEndpoint to minimize * the potential impact of a DynamoDB eventually consistent read.

*

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateEndpointConfigOutcomeCallable CreateEndpointConfigCallable(const Model::CreateEndpointConfigRequest& request) const; /** *

Creates an endpoint configuration that Amazon SageMaker hosting services uses * to deploy models. In the configuration, you identify one or more models, created * using the CreateModel API, to deploy and the resources that you * want Amazon SageMaker to provision. Then you call the CreateEndpoint * API.

Use this API if you want to use Amazon SageMaker hosting * services to deploy models into production.

In the request, you * define a ProductionVariant, for each model that you want to deploy. * Each ProductionVariant parameter also describes the resources that * you want Amazon SageMaker to provision. This includes the number and type of ML * compute instances to deploy.

If you are hosting multiple models, you * also assign a VariantWeight to specify how much traffic you want to * allocate to each model. For example, suppose that you want to host two models, A * and B, and you assign traffic weight 2 for model A and 1 for model B. Amazon * SageMaker distributes two-thirds of the traffic to Model A, and one-third to * model B.

For an example that calls this method when deploying a model to * Amazon SageMaker hosting services, see Deploy * the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto * 3)).

When you call CreateEndpoint, a load call is made * to DynamoDB to verify that your endpoint configuration exists. When you read * data from a DynamoDB table supporting * Eventually Consistent Reads , the response might not reflect * the results of a recently completed write operation. The response might include * some stale data. If the dependent entities are not yet in DynamoDB, this causes * a validation error. If you repeat your read request after a short time, the * response should return the latest data. So retry logic is recommended to handle * these possible issues. We also recommend that customers call * DescribeEndpointConfig before calling CreateEndpoint to minimize * the potential impact of a DynamoDB eventually consistent read.

*

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateEndpointConfigAsync(const Model::CreateEndpointConfigRequest& request, const CreateEndpointConfigResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Creates an SageMaker experiment. An experiment is a collection of * trials that are observed, compared and evaluated as a group. A trial is a * set of steps, called trial components, that produce a machine learning * model.

The goal of an experiment is to determine the components that * produce the best model. Multiple trials are performed, each one isolating and * measuring the impact of a change to one or more inputs, while keeping the * remaining inputs constant.

When you use Amazon SageMaker Studio or the * Amazon SageMaker Python SDK, all experiments, trials, and trial components are * automatically tracked, logged, and indexed. When you use the AWS SDK for Python * (Boto), you must use the logging APIs provided by the SDK.

You can add * tags to experiments, trials, trial components and then use the Search API * to search for the tags.

To add a description to an experiment, specify * the optional Description parameter. To add a description later, or * to change the description, call the UpdateExperiment API.

To get a * list of all your experiments, call the ListExperiments API. To view an * experiment's properties, call the DescribeExperiment API. To get a list * of all the trials associated with an experiment, call the ListTrials API. * To create a trial call the CreateTrial API.

See Also:

AWS * API Reference

*/ virtual Model::CreateExperimentOutcome CreateExperiment(const Model::CreateExperimentRequest& request) const; /** *

Creates an SageMaker experiment. An experiment is a collection of * trials that are observed, compared and evaluated as a group. A trial is a * set of steps, called trial components, that produce a machine learning * model.

The goal of an experiment is to determine the components that * produce the best model. Multiple trials are performed, each one isolating and * measuring the impact of a change to one or more inputs, while keeping the * remaining inputs constant.

When you use Amazon SageMaker Studio or the * Amazon SageMaker Python SDK, all experiments, trials, and trial components are * automatically tracked, logged, and indexed. When you use the AWS SDK for Python * (Boto), you must use the logging APIs provided by the SDK.

You can add * tags to experiments, trials, trial components and then use the Search API * to search for the tags.

To add a description to an experiment, specify * the optional Description parameter. To add a description later, or * to change the description, call the UpdateExperiment API.

To get a * list of all your experiments, call the ListExperiments API. To view an * experiment's properties, call the DescribeExperiment API. To get a list * of all the trials associated with an experiment, call the ListTrials API. * To create a trial call the CreateTrial API.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateExperimentOutcomeCallable CreateExperimentCallable(const Model::CreateExperimentRequest& request) const; /** *

Creates an SageMaker experiment. An experiment is a collection of * trials that are observed, compared and evaluated as a group. A trial is a * set of steps, called trial components, that produce a machine learning * model.

The goal of an experiment is to determine the components that * produce the best model. Multiple trials are performed, each one isolating and * measuring the impact of a change to one or more inputs, while keeping the * remaining inputs constant.

When you use Amazon SageMaker Studio or the * Amazon SageMaker Python SDK, all experiments, trials, and trial components are * automatically tracked, logged, and indexed. When you use the AWS SDK for Python * (Boto), you must use the logging APIs provided by the SDK.

You can add * tags to experiments, trials, trial components and then use the Search API * to search for the tags.

To add a description to an experiment, specify * the optional Description parameter. To add a description later, or * to change the description, call the UpdateExperiment API.

To get a * list of all your experiments, call the ListExperiments API. To view an * experiment's properties, call the DescribeExperiment API. To get a list * of all the trials associated with an experiment, call the ListTrials API. * To create a trial call the CreateTrial API.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateExperimentAsync(const Model::CreateExperimentRequest& request, const CreateExperimentResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Creates a flow definition.

See Also:

AWS * API Reference

*/ virtual Model::CreateFlowDefinitionOutcome CreateFlowDefinition(const Model::CreateFlowDefinitionRequest& request) const; /** *

Creates a flow definition.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateFlowDefinitionOutcomeCallable CreateFlowDefinitionCallable(const Model::CreateFlowDefinitionRequest& request) const; /** *

Creates a flow definition.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateFlowDefinitionAsync(const Model::CreateFlowDefinitionRequest& request, const CreateFlowDefinitionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Defines the settings you will use for the human review workflow user * interface. Reviewers will see a three-panel interface with an instruction area, * the item to review, and an input area.

See Also:

AWS * API Reference

*/ virtual Model::CreateHumanTaskUiOutcome CreateHumanTaskUi(const Model::CreateHumanTaskUiRequest& request) const; /** *

Defines the settings you will use for the human review workflow user * interface. Reviewers will see a three-panel interface with an instruction area, * the item to review, and an input area.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateHumanTaskUiOutcomeCallable CreateHumanTaskUiCallable(const Model::CreateHumanTaskUiRequest& request) const; /** *

Defines the settings you will use for the human review workflow user * interface. Reviewers will see a three-panel interface with an instruction area, * the item to review, and an input area.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateHumanTaskUiAsync(const Model::CreateHumanTaskUiRequest& request, const CreateHumanTaskUiResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Starts a hyperparameter tuning job. A hyperparameter tuning job finds the * best version of a model by running many training jobs on your dataset using the * algorithm you choose and values for hyperparameters within ranges that you * specify. It then chooses the hyperparameter values that result in a model that * performs the best, as measured by an objective metric that you * choose.

See Also:

AWS * API Reference

*/ virtual Model::CreateHyperParameterTuningJobOutcome CreateHyperParameterTuningJob(const Model::CreateHyperParameterTuningJobRequest& request) const; /** *

Starts a hyperparameter tuning job. A hyperparameter tuning job finds the * best version of a model by running many training jobs on your dataset using the * algorithm you choose and values for hyperparameters within ranges that you * specify. It then chooses the hyperparameter values that result in a model that * performs the best, as measured by an objective metric that you * choose.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateHyperParameterTuningJobOutcomeCallable CreateHyperParameterTuningJobCallable(const Model::CreateHyperParameterTuningJobRequest& request) const; /** *

Starts a hyperparameter tuning job. A hyperparameter tuning job finds the * best version of a model by running many training jobs on your dataset using the * algorithm you choose and values for hyperparameters within ranges that you * specify. It then chooses the hyperparameter values that result in a model that * performs the best, as measured by an objective metric that you * choose.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateHyperParameterTuningJobAsync(const Model::CreateHyperParameterTuningJobRequest& request, const CreateHyperParameterTuningJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Creates a job that uses workers to label the data objects in your input * dataset. You can use the labeled data to train machine learning models.

*

You can select your workforce from one of three providers:

  • A * private workforce that you create. It can include employees, contractors, and * outside experts. Use a private workforce when want the data to stay within your * organization or when a specific set of skills is required.

  • One * or more vendors that you select from the AWS Marketplace. Vendors provide * expertise in specific areas.

  • The Amazon Mechanical Turk * workforce. This is the largest workforce, but it should only be used for public * data or data that has been stripped of any personally identifiable * information.

You can also use automated data labeling * to reduce the number of data objects that need to be labeled by a human. * Automated data labeling uses active learning to determine if a data * object can be labeled by machine or if it needs to be sent to a human worker. * For more information, see Using * Automated Data Labeling.

The data objects to be labeled are contained * in an Amazon S3 bucket. You create a manifest file that describes the * location of each object. For more information, see Using Input * and Output Data.

The output can be used as the manifest file for * another labeling job or as training data for your machine learning * models.

See Also:

AWS * API Reference

*/ virtual Model::CreateLabelingJobOutcome CreateLabelingJob(const Model::CreateLabelingJobRequest& request) const; /** *

Creates a job that uses workers to label the data objects in your input * dataset. You can use the labeled data to train machine learning models.

*

You can select your workforce from one of three providers:

  • A * private workforce that you create. It can include employees, contractors, and * outside experts. Use a private workforce when want the data to stay within your * organization or when a specific set of skills is required.

  • One * or more vendors that you select from the AWS Marketplace. Vendors provide * expertise in specific areas.

  • The Amazon Mechanical Turk * workforce. This is the largest workforce, but it should only be used for public * data or data that has been stripped of any personally identifiable * information.

You can also use automated data labeling * to reduce the number of data objects that need to be labeled by a human. * Automated data labeling uses active learning to determine if a data * object can be labeled by machine or if it needs to be sent to a human worker. * For more information, see Using * Automated Data Labeling.

The data objects to be labeled are contained * in an Amazon S3 bucket. You create a manifest file that describes the * location of each object. For more information, see Using Input * and Output Data.

The output can be used as the manifest file for * another labeling job or as training data for your machine learning * models.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateLabelingJobOutcomeCallable CreateLabelingJobCallable(const Model::CreateLabelingJobRequest& request) const; /** *

Creates a job that uses workers to label the data objects in your input * dataset. You can use the labeled data to train machine learning models.

*

You can select your workforce from one of three providers:

  • A * private workforce that you create. It can include employees, contractors, and * outside experts. Use a private workforce when want the data to stay within your * organization or when a specific set of skills is required.

  • One * or more vendors that you select from the AWS Marketplace. Vendors provide * expertise in specific areas.

  • The Amazon Mechanical Turk * workforce. This is the largest workforce, but it should only be used for public * data or data that has been stripped of any personally identifiable * information.

You can also use automated data labeling * to reduce the number of data objects that need to be labeled by a human. * Automated data labeling uses active learning to determine if a data * object can be labeled by machine or if it needs to be sent to a human worker. * For more information, see Using * Automated Data Labeling.

The data objects to be labeled are contained * in an Amazon S3 bucket. You create a manifest file that describes the * location of each object. For more information, see Using Input * and Output Data.

The output can be used as the manifest file for * another labeling job or as training data for your machine learning * models.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateLabelingJobAsync(const Model::CreateLabelingJobRequest& request, const CreateLabelingJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Creates a model in Amazon SageMaker. In the request, you name the model and * describe a primary container. For the primary container, you specify the Docker * image that contains inference code, artifacts (from prior training), and a * custom environment map that the inference code uses when you deploy the model * for predictions.

Use this API to create a model if you want to use Amazon * SageMaker hosting services or run a batch transform job.

To host your * model, you create an endpoint configuration with the * CreateEndpointConfig API, and then create an endpoint with the * CreateEndpoint API. Amazon SageMaker then deploys all of the * containers that you defined for the model in the hosting environment.

*

For an example that calls this method when deploying a model to Amazon * SageMaker hosting services, see Deploy * the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto * 3)).

To run a batch transform using your model, you start a job with * the CreateTransformJob API. Amazon SageMaker uses your model and * your dataset to get inferences which are then saved to a specified S3 * location.

In the CreateModel request, you must define a * container with the PrimaryContainer parameter.

In the * request, you also provide an IAM role that Amazon SageMaker can assume to access * model artifacts and docker image for deployment on ML compute hosting instances * or for batch transform jobs. In addition, you also use the IAM role to manage * permissions the inference code needs. For example, if the inference code access * any other AWS resources, you grant necessary permissions via this * role.

See Also:

AWS * API Reference

*/ virtual Model::CreateModelOutcome CreateModel(const Model::CreateModelRequest& request) const; /** *

Creates a model in Amazon SageMaker. In the request, you name the model and * describe a primary container. For the primary container, you specify the Docker * image that contains inference code, artifacts (from prior training), and a * custom environment map that the inference code uses when you deploy the model * for predictions.

Use this API to create a model if you want to use Amazon * SageMaker hosting services or run a batch transform job.

To host your * model, you create an endpoint configuration with the * CreateEndpointConfig API, and then create an endpoint with the * CreateEndpoint API. Amazon SageMaker then deploys all of the * containers that you defined for the model in the hosting environment.

*

For an example that calls this method when deploying a model to Amazon * SageMaker hosting services, see Deploy * the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto * 3)).

To run a batch transform using your model, you start a job with * the CreateTransformJob API. Amazon SageMaker uses your model and * your dataset to get inferences which are then saved to a specified S3 * location.

In the CreateModel request, you must define a * container with the PrimaryContainer parameter.

In the * request, you also provide an IAM role that Amazon SageMaker can assume to access * model artifacts and docker image for deployment on ML compute hosting instances * or for batch transform jobs. In addition, you also use the IAM role to manage * permissions the inference code needs. For example, if the inference code access * any other AWS resources, you grant necessary permissions via this * role.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateModelOutcomeCallable CreateModelCallable(const Model::CreateModelRequest& request) const; /** *

Creates a model in Amazon SageMaker. In the request, you name the model and * describe a primary container. For the primary container, you specify the Docker * image that contains inference code, artifacts (from prior training), and a * custom environment map that the inference code uses when you deploy the model * for predictions.

Use this API to create a model if you want to use Amazon * SageMaker hosting services or run a batch transform job.

To host your * model, you create an endpoint configuration with the * CreateEndpointConfig API, and then create an endpoint with the * CreateEndpoint API. Amazon SageMaker then deploys all of the * containers that you defined for the model in the hosting environment.

*

For an example that calls this method when deploying a model to Amazon * SageMaker hosting services, see Deploy * the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto * 3)).

To run a batch transform using your model, you start a job with * the CreateTransformJob API. Amazon SageMaker uses your model and * your dataset to get inferences which are then saved to a specified S3 * location.

In the CreateModel request, you must define a * container with the PrimaryContainer parameter.

In the * request, you also provide an IAM role that Amazon SageMaker can assume to access * model artifacts and docker image for deployment on ML compute hosting instances * or for batch transform jobs. In addition, you also use the IAM role to manage * permissions the inference code needs. For example, if the inference code access * any other AWS resources, you grant necessary permissions via this * role.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateModelAsync(const Model::CreateModelRequest& request, const CreateModelResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Creates a model package that you can use to create Amazon SageMaker models or * list on AWS Marketplace. Buyers can subscribe to model packages listed on AWS * Marketplace to create models in Amazon SageMaker.

To create a model * package by specifying a Docker container that contains your inference code and * the Amazon S3 location of your model artifacts, provide values for * InferenceSpecification. To create a model from an algorithm * resource that you created or subscribed to in AWS Marketplace, provide a value * for SourceAlgorithmSpecification.

See Also:

AWS * API Reference

*/ virtual Model::CreateModelPackageOutcome CreateModelPackage(const Model::CreateModelPackageRequest& request) const; /** *

Creates a model package that you can use to create Amazon SageMaker models or * list on AWS Marketplace. Buyers can subscribe to model packages listed on AWS * Marketplace to create models in Amazon SageMaker.

To create a model * package by specifying a Docker container that contains your inference code and * the Amazon S3 location of your model artifacts, provide values for * InferenceSpecification. To create a model from an algorithm * resource that you created or subscribed to in AWS Marketplace, provide a value * for SourceAlgorithmSpecification.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateModelPackageOutcomeCallable CreateModelPackageCallable(const Model::CreateModelPackageRequest& request) const; /** *

Creates a model package that you can use to create Amazon SageMaker models or * list on AWS Marketplace. Buyers can subscribe to model packages listed on AWS * Marketplace to create models in Amazon SageMaker.

To create a model * package by specifying a Docker container that contains your inference code and * the Amazon S3 location of your model artifacts, provide values for * InferenceSpecification. To create a model from an algorithm * resource that you created or subscribed to in AWS Marketplace, provide a value * for SourceAlgorithmSpecification.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateModelPackageAsync(const Model::CreateModelPackageRequest& request, const CreateModelPackageResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Creates a schedule that regularly starts Amazon SageMaker Processing Jobs to * monitor the data captured for an Amazon SageMaker Endoint.

See * Also:

AWS * API Reference

*/ virtual Model::CreateMonitoringScheduleOutcome CreateMonitoringSchedule(const Model::CreateMonitoringScheduleRequest& request) const; /** *

Creates a schedule that regularly starts Amazon SageMaker Processing Jobs to * monitor the data captured for an Amazon SageMaker Endoint.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateMonitoringScheduleOutcomeCallable CreateMonitoringScheduleCallable(const Model::CreateMonitoringScheduleRequest& request) const; /** *

Creates a schedule that regularly starts Amazon SageMaker Processing Jobs to * monitor the data captured for an Amazon SageMaker Endoint.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateMonitoringScheduleAsync(const Model::CreateMonitoringScheduleRequest& request, const CreateMonitoringScheduleResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Creates an Amazon SageMaker notebook instance. A notebook instance is a * machine learning (ML) compute instance running on a Jupyter notebook.

In * a CreateNotebookInstance request, specify the type of ML compute * instance that you want to run. Amazon SageMaker launches the instance, installs * common libraries that you can use to explore datasets for model training, and * attaches an ML storage volume to the notebook instance.

Amazon SageMaker * also provides a set of example notebooks. Each notebook demonstrates how to use * Amazon SageMaker with a specific algorithm or with a machine learning framework. *

After receiving the request, Amazon SageMaker does the following:

*
  1. Creates a network interface in the Amazon SageMaker VPC.

  2. *
  3. (Option) If you specified SubnetId, Amazon SageMaker * creates a network interface in your own VPC, which is inferred from the subnet * ID that you provide in the input. When creating this network interface, Amazon * SageMaker attaches the security group that you specified in the request to the * network interface that it creates in your VPC.

  4. Launches an EC2 * instance of the type specified in the request in the Amazon SageMaker VPC. If * you specified SubnetId of your VPC, Amazon SageMaker specifies both * network interfaces when launching this instance. This enables inbound traffic * from your own VPC to the notebook instance, assuming that the security groups * allow it.

After creating the notebook instance, Amazon * SageMaker returns its Amazon Resource Name (ARN). You can't change the name of a * notebook instance after you create it.

After Amazon SageMaker creates the * notebook instance, you can connect to the Jupyter server and work in Jupyter * notebooks. For example, you can write code to explore a dataset that you can use * for model training, train a model, host models by creating Amazon SageMaker * endpoints, and validate hosted models.

For more information, see How It * Works.

See Also:

AWS * API Reference

*/ virtual Model::CreateNotebookInstanceOutcome CreateNotebookInstance(const Model::CreateNotebookInstanceRequest& request) const; /** *

Creates an Amazon SageMaker notebook instance. A notebook instance is a * machine learning (ML) compute instance running on a Jupyter notebook.

In * a CreateNotebookInstance request, specify the type of ML compute * instance that you want to run. Amazon SageMaker launches the instance, installs * common libraries that you can use to explore datasets for model training, and * attaches an ML storage volume to the notebook instance.

Amazon SageMaker * also provides a set of example notebooks. Each notebook demonstrates how to use * Amazon SageMaker with a specific algorithm or with a machine learning framework. *

After receiving the request, Amazon SageMaker does the following:

*
  1. Creates a network interface in the Amazon SageMaker VPC.

  2. *
  3. (Option) If you specified SubnetId, Amazon SageMaker * creates a network interface in your own VPC, which is inferred from the subnet * ID that you provide in the input. When creating this network interface, Amazon * SageMaker attaches the security group that you specified in the request to the * network interface that it creates in your VPC.

  4. Launches an EC2 * instance of the type specified in the request in the Amazon SageMaker VPC. If * you specified SubnetId of your VPC, Amazon SageMaker specifies both * network interfaces when launching this instance. This enables inbound traffic * from your own VPC to the notebook instance, assuming that the security groups * allow it.

After creating the notebook instance, Amazon * SageMaker returns its Amazon Resource Name (ARN). You can't change the name of a * notebook instance after you create it.

After Amazon SageMaker creates the * notebook instance, you can connect to the Jupyter server and work in Jupyter * notebooks. For example, you can write code to explore a dataset that you can use * for model training, train a model, host models by creating Amazon SageMaker * endpoints, and validate hosted models.

For more information, see How It * Works.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateNotebookInstanceOutcomeCallable CreateNotebookInstanceCallable(const Model::CreateNotebookInstanceRequest& request) const; /** *

Creates an Amazon SageMaker notebook instance. A notebook instance is a * machine learning (ML) compute instance running on a Jupyter notebook.

In * a CreateNotebookInstance request, specify the type of ML compute * instance that you want to run. Amazon SageMaker launches the instance, installs * common libraries that you can use to explore datasets for model training, and * attaches an ML storage volume to the notebook instance.

Amazon SageMaker * also provides a set of example notebooks. Each notebook demonstrates how to use * Amazon SageMaker with a specific algorithm or with a machine learning framework. *

After receiving the request, Amazon SageMaker does the following:

*
  1. Creates a network interface in the Amazon SageMaker VPC.

  2. *
  3. (Option) If you specified SubnetId, Amazon SageMaker * creates a network interface in your own VPC, which is inferred from the subnet * ID that you provide in the input. When creating this network interface, Amazon * SageMaker attaches the security group that you specified in the request to the * network interface that it creates in your VPC.

  4. Launches an EC2 * instance of the type specified in the request in the Amazon SageMaker VPC. If * you specified SubnetId of your VPC, Amazon SageMaker specifies both * network interfaces when launching this instance. This enables inbound traffic * from your own VPC to the notebook instance, assuming that the security groups * allow it.

After creating the notebook instance, Amazon * SageMaker returns its Amazon Resource Name (ARN). You can't change the name of a * notebook instance after you create it.

After Amazon SageMaker creates the * notebook instance, you can connect to the Jupyter server and work in Jupyter * notebooks. For example, you can write code to explore a dataset that you can use * for model training, train a model, host models by creating Amazon SageMaker * endpoints, and validate hosted models.

For more information, see How It * Works.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateNotebookInstanceAsync(const Model::CreateNotebookInstanceRequest& request, const CreateNotebookInstanceResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Creates a lifecycle configuration that you can associate with a notebook * instance. A lifecycle configuration is a collection of shell scripts that * run when you create or start a notebook instance.

Each lifecycle * configuration script has a limit of 16384 characters.

The value of the * $PATH environment variable that is available to both scripts is * /sbin:bin:/usr/sbin:/usr/bin.

View CloudWatch Logs for * notebook instance lifecycle configurations in log group * /aws/sagemaker/NotebookInstances in log stream * [notebook-instance-name]/[LifecycleConfigHook].

Lifecycle * configuration scripts cannot run for longer than 5 minutes. If a script runs for * longer than 5 minutes, it fails and the notebook instance is not created or * started.

For information about notebook instance lifestyle * configurations, see Step * 2.1: (Optional) Customize a Notebook Instance.

See Also:

AWS * API Reference

*/ virtual Model::CreateNotebookInstanceLifecycleConfigOutcome CreateNotebookInstanceLifecycleConfig(const Model::CreateNotebookInstanceLifecycleConfigRequest& request) const; /** *

Creates a lifecycle configuration that you can associate with a notebook * instance. A lifecycle configuration is a collection of shell scripts that * run when you create or start a notebook instance.

Each lifecycle * configuration script has a limit of 16384 characters.

The value of the * $PATH environment variable that is available to both scripts is * /sbin:bin:/usr/sbin:/usr/bin.

View CloudWatch Logs for * notebook instance lifecycle configurations in log group * /aws/sagemaker/NotebookInstances in log stream * [notebook-instance-name]/[LifecycleConfigHook].

Lifecycle * configuration scripts cannot run for longer than 5 minutes. If a script runs for * longer than 5 minutes, it fails and the notebook instance is not created or * started.

For information about notebook instance lifestyle * configurations, see Step * 2.1: (Optional) Customize a Notebook Instance.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateNotebookInstanceLifecycleConfigOutcomeCallable CreateNotebookInstanceLifecycleConfigCallable(const Model::CreateNotebookInstanceLifecycleConfigRequest& request) const; /** *

Creates a lifecycle configuration that you can associate with a notebook * instance. A lifecycle configuration is a collection of shell scripts that * run when you create or start a notebook instance.

Each lifecycle * configuration script has a limit of 16384 characters.

The value of the * $PATH environment variable that is available to both scripts is * /sbin:bin:/usr/sbin:/usr/bin.

View CloudWatch Logs for * notebook instance lifecycle configurations in log group * /aws/sagemaker/NotebookInstances in log stream * [notebook-instance-name]/[LifecycleConfigHook].

Lifecycle * configuration scripts cannot run for longer than 5 minutes. If a script runs for * longer than 5 minutes, it fails and the notebook instance is not created or * started.

For information about notebook instance lifestyle * configurations, see Step * 2.1: (Optional) Customize a Notebook Instance.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateNotebookInstanceLifecycleConfigAsync(const Model::CreateNotebookInstanceLifecycleConfigRequest& request, const CreateNotebookInstanceLifecycleConfigResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Creates a URL for a specified UserProfile in a Domain. When accessed in a web * browser, the user will be automatically signed in to Amazon SageMaker Studio, * and granted access to all of the Apps and files associated with the Domain's * Amazon Elastic File System (EFS) volume. This operation can only be called when * the authentication mode equals IAM.

See Also:

AWS * API Reference

*/ virtual Model::CreatePresignedDomainUrlOutcome CreatePresignedDomainUrl(const Model::CreatePresignedDomainUrlRequest& request) const; /** *

Creates a URL for a specified UserProfile in a Domain. When accessed in a web * browser, the user will be automatically signed in to Amazon SageMaker Studio, * and granted access to all of the Apps and files associated with the Domain's * Amazon Elastic File System (EFS) volume. This operation can only be called when * the authentication mode equals IAM.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreatePresignedDomainUrlOutcomeCallable CreatePresignedDomainUrlCallable(const Model::CreatePresignedDomainUrlRequest& request) const; /** *

Creates a URL for a specified UserProfile in a Domain. When accessed in a web * browser, the user will be automatically signed in to Amazon SageMaker Studio, * and granted access to all of the Apps and files associated with the Domain's * Amazon Elastic File System (EFS) volume. This operation can only be called when * the authentication mode equals IAM.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreatePresignedDomainUrlAsync(const Model::CreatePresignedDomainUrlRequest& request, const CreatePresignedDomainUrlResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns a URL that you can use to connect to the Jupyter server from a * notebook instance. In the Amazon SageMaker console, when you choose * Open next to a notebook instance, Amazon SageMaker opens a new tab * showing the Jupyter server home page from the notebook instance. The console * uses this API to get the URL and show the page.

The IAM role or user * used to call this API defines the permissions to access the notebook instance. * Once the presigned URL is created, no additional permission is required to * access this URL. IAM authorization policies for this API are also enforced for * every HTTP request and WebSocket frame that attempts to connect to the notebook * instance.

You can restrict access to this API and to the URL that it * returns to a list of IP addresses that you specify. Use the * NotIpAddress condition operator and the aws:SourceIP * condition context key to specify the list of IP addresses that you want to have * access to the notebook instance. For more information, see Limit * Access to a Notebook Instance by IP Address.

The URL that you * get from a call to CreatePresignedNotebookInstanceUrl is valid only for 5 * minutes. If you try to use the URL after the 5-minute limit expires, you are * directed to the AWS console sign-in page.

See Also:

AWS * API Reference

*/ virtual Model::CreatePresignedNotebookInstanceUrlOutcome CreatePresignedNotebookInstanceUrl(const Model::CreatePresignedNotebookInstanceUrlRequest& request) const; /** *

Returns a URL that you can use to connect to the Jupyter server from a * notebook instance. In the Amazon SageMaker console, when you choose * Open next to a notebook instance, Amazon SageMaker opens a new tab * showing the Jupyter server home page from the notebook instance. The console * uses this API to get the URL and show the page.

The IAM role or user * used to call this API defines the permissions to access the notebook instance. * Once the presigned URL is created, no additional permission is required to * access this URL. IAM authorization policies for this API are also enforced for * every HTTP request and WebSocket frame that attempts to connect to the notebook * instance.

You can restrict access to this API and to the URL that it * returns to a list of IP addresses that you specify. Use the * NotIpAddress condition operator and the aws:SourceIP * condition context key to specify the list of IP addresses that you want to have * access to the notebook instance. For more information, see Limit * Access to a Notebook Instance by IP Address.

The URL that you * get from a call to CreatePresignedNotebookInstanceUrl is valid only for 5 * minutes. If you try to use the URL after the 5-minute limit expires, you are * directed to the AWS console sign-in page.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreatePresignedNotebookInstanceUrlOutcomeCallable CreatePresignedNotebookInstanceUrlCallable(const Model::CreatePresignedNotebookInstanceUrlRequest& request) const; /** *

Returns a URL that you can use to connect to the Jupyter server from a * notebook instance. In the Amazon SageMaker console, when you choose * Open next to a notebook instance, Amazon SageMaker opens a new tab * showing the Jupyter server home page from the notebook instance. The console * uses this API to get the URL and show the page.

The IAM role or user * used to call this API defines the permissions to access the notebook instance. * Once the presigned URL is created, no additional permission is required to * access this URL. IAM authorization policies for this API are also enforced for * every HTTP request and WebSocket frame that attempts to connect to the notebook * instance.

You can restrict access to this API and to the URL that it * returns to a list of IP addresses that you specify. Use the * NotIpAddress condition operator and the aws:SourceIP * condition context key to specify the list of IP addresses that you want to have * access to the notebook instance. For more information, see Limit * Access to a Notebook Instance by IP Address.

The URL that you * get from a call to CreatePresignedNotebookInstanceUrl is valid only for 5 * minutes. If you try to use the URL after the 5-minute limit expires, you are * directed to the AWS console sign-in page.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreatePresignedNotebookInstanceUrlAsync(const Model::CreatePresignedNotebookInstanceUrlRequest& request, const CreatePresignedNotebookInstanceUrlResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Creates a processing job.

See Also:

AWS * API Reference

*/ virtual Model::CreateProcessingJobOutcome CreateProcessingJob(const Model::CreateProcessingJobRequest& request) const; /** *

Creates a processing job.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateProcessingJobOutcomeCallable CreateProcessingJobCallable(const Model::CreateProcessingJobRequest& request) const; /** *

Creates a processing job.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateProcessingJobAsync(const Model::CreateProcessingJobRequest& request, const CreateProcessingJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Starts a model training job. After training completes, Amazon SageMaker saves * the resulting model artifacts to an Amazon S3 location that you specify.

*

If you choose to host your model using Amazon SageMaker hosting services, you * can use the resulting model artifacts as part of the model. You can also use the * artifacts in a machine learning service other than Amazon SageMaker, provided * that you know how to use them for inferences.

In the request body, you * provide the following:

  • AlgorithmSpecification - * Identifies the training algorithm to use.

  • * HyperParameters - Specify these algorithm-specific parameters to * enable the estimation of model parameters during training. Hyperparameters can * be tuned to optimize this learning process. For a list of hyperparameters for * each training algorithm provided by Amazon SageMaker, see Algorithms. *

  • InputDataConfig - Describes the training * dataset and the Amazon S3, EFS, or FSx location where it is stored.

  • *
  • OutputDataConfig - Identifies the Amazon S3 bucket where * you want Amazon SageMaker to save the results of model training.

  • *
  • ResourceConfig - Identifies the resources, ML compute * instances, and ML storage volumes to deploy for model training. In distributed * training, you specify more than one instance.

  • * EnableManagedSpotTraining - Optimize the cost of training machine * learning models by up to 80% by using Amazon EC2 Spot instances. For more * information, see Managed * Spot Training.

  • RoleARN - The Amazon * Resource Number (ARN) that Amazon SageMaker assumes to perform tasks on your * behalf during model training. You must grant this role the necessary permissions * so that Amazon SageMaker can successfully complete model training.

  • *
  • StoppingCondition - To help cap training costs, use * MaxRuntimeInSeconds to set a time limit for training. Use * MaxWaitTimeInSeconds to specify how long you are willing to wait * for a managed spot training job to complete.

For more * information about Amazon SageMaker, see How It * Works.

See Also:

AWS * API Reference

*/ virtual Model::CreateTrainingJobOutcome CreateTrainingJob(const Model::CreateTrainingJobRequest& request) const; /** *

Starts a model training job. After training completes, Amazon SageMaker saves * the resulting model artifacts to an Amazon S3 location that you specify.

*

If you choose to host your model using Amazon SageMaker hosting services, you * can use the resulting model artifacts as part of the model. You can also use the * artifacts in a machine learning service other than Amazon SageMaker, provided * that you know how to use them for inferences.

In the request body, you * provide the following:

  • AlgorithmSpecification - * Identifies the training algorithm to use.

  • * HyperParameters - Specify these algorithm-specific parameters to * enable the estimation of model parameters during training. Hyperparameters can * be tuned to optimize this learning process. For a list of hyperparameters for * each training algorithm provided by Amazon SageMaker, see Algorithms. *

  • InputDataConfig - Describes the training * dataset and the Amazon S3, EFS, or FSx location where it is stored.

  • *
  • OutputDataConfig - Identifies the Amazon S3 bucket where * you want Amazon SageMaker to save the results of model training.

  • *
  • ResourceConfig - Identifies the resources, ML compute * instances, and ML storage volumes to deploy for model training. In distributed * training, you specify more than one instance.

  • * EnableManagedSpotTraining - Optimize the cost of training machine * learning models by up to 80% by using Amazon EC2 Spot instances. For more * information, see Managed * Spot Training.

  • RoleARN - The Amazon * Resource Number (ARN) that Amazon SageMaker assumes to perform tasks on your * behalf during model training. You must grant this role the necessary permissions * so that Amazon SageMaker can successfully complete model training.

  • *
  • StoppingCondition - To help cap training costs, use * MaxRuntimeInSeconds to set a time limit for training. Use * MaxWaitTimeInSeconds to specify how long you are willing to wait * for a managed spot training job to complete.

For more * information about Amazon SageMaker, see How It * Works.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateTrainingJobOutcomeCallable CreateTrainingJobCallable(const Model::CreateTrainingJobRequest& request) const; /** *

Starts a model training job. After training completes, Amazon SageMaker saves * the resulting model artifacts to an Amazon S3 location that you specify.

*

If you choose to host your model using Amazon SageMaker hosting services, you * can use the resulting model artifacts as part of the model. You can also use the * artifacts in a machine learning service other than Amazon SageMaker, provided * that you know how to use them for inferences.

In the request body, you * provide the following:

  • AlgorithmSpecification - * Identifies the training algorithm to use.

  • * HyperParameters - Specify these algorithm-specific parameters to * enable the estimation of model parameters during training. Hyperparameters can * be tuned to optimize this learning process. For a list of hyperparameters for * each training algorithm provided by Amazon SageMaker, see Algorithms. *

  • InputDataConfig - Describes the training * dataset and the Amazon S3, EFS, or FSx location where it is stored.

  • *
  • OutputDataConfig - Identifies the Amazon S3 bucket where * you want Amazon SageMaker to save the results of model training.

  • *
  • ResourceConfig - Identifies the resources, ML compute * instances, and ML storage volumes to deploy for model training. In distributed * training, you specify more than one instance.

  • * EnableManagedSpotTraining - Optimize the cost of training machine * learning models by up to 80% by using Amazon EC2 Spot instances. For more * information, see Managed * Spot Training.

  • RoleARN - The Amazon * Resource Number (ARN) that Amazon SageMaker assumes to perform tasks on your * behalf during model training. You must grant this role the necessary permissions * so that Amazon SageMaker can successfully complete model training.

  • *
  • StoppingCondition - To help cap training costs, use * MaxRuntimeInSeconds to set a time limit for training. Use * MaxWaitTimeInSeconds to specify how long you are willing to wait * for a managed spot training job to complete.

For more * information about Amazon SageMaker, see How It * Works.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateTrainingJobAsync(const Model::CreateTrainingJobRequest& request, const CreateTrainingJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Starts a transform job. A transform job uses a trained model to get * inferences on a dataset and saves these results to an Amazon S3 location that * you specify.

To perform batch transformations, you create a transform job * and use the data that you have readily available.

In the request body, * you provide the following:

  • TransformJobName - * Identifies the transform job. The name must be unique within an AWS Region in an * AWS account.

  • ModelName - Identifies the model to * use. ModelName must be the name of an existing Amazon SageMaker * model in the same AWS Region and AWS account. For information on creating a * model, see CreateModel.

  • TransformInput - * Describes the dataset to be transformed and the Amazon S3 location where it is * stored.

  • TransformOutput - Identifies the Amazon * S3 location where you want Amazon SageMaker to save the results from the * transform job.

  • TransformResources - Identifies * the ML compute instances for the transform job.

For more * information about how batch transformation works, see Batch * Transform.

See Also:

AWS * API Reference

*/ virtual Model::CreateTransformJobOutcome CreateTransformJob(const Model::CreateTransformJobRequest& request) const; /** *

Starts a transform job. A transform job uses a trained model to get * inferences on a dataset and saves these results to an Amazon S3 location that * you specify.

To perform batch transformations, you create a transform job * and use the data that you have readily available.

In the request body, * you provide the following:

  • TransformJobName - * Identifies the transform job. The name must be unique within an AWS Region in an * AWS account.

  • ModelName - Identifies the model to * use. ModelName must be the name of an existing Amazon SageMaker * model in the same AWS Region and AWS account. For information on creating a * model, see CreateModel.

  • TransformInput - * Describes the dataset to be transformed and the Amazon S3 location where it is * stored.

  • TransformOutput - Identifies the Amazon * S3 location where you want Amazon SageMaker to save the results from the * transform job.

  • TransformResources - Identifies * the ML compute instances for the transform job.

For more * information about how batch transformation works, see Batch * Transform.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateTransformJobOutcomeCallable CreateTransformJobCallable(const Model::CreateTransformJobRequest& request) const; /** *

Starts a transform job. A transform job uses a trained model to get * inferences on a dataset and saves these results to an Amazon S3 location that * you specify.

To perform batch transformations, you create a transform job * and use the data that you have readily available.

In the request body, * you provide the following:

  • TransformJobName - * Identifies the transform job. The name must be unique within an AWS Region in an * AWS account.

  • ModelName - Identifies the model to * use. ModelName must be the name of an existing Amazon SageMaker * model in the same AWS Region and AWS account. For information on creating a * model, see CreateModel.

  • TransformInput - * Describes the dataset to be transformed and the Amazon S3 location where it is * stored.

  • TransformOutput - Identifies the Amazon * S3 location where you want Amazon SageMaker to save the results from the * transform job.

  • TransformResources - Identifies * the ML compute instances for the transform job.

For more * information about how batch transformation works, see Batch * Transform.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateTransformJobAsync(const Model::CreateTransformJobRequest& request, const CreateTransformJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Creates an Amazon SageMaker trial. A trial is a set of steps called * trial components that produce a machine learning model. A trial is part * of a single Amazon SageMaker experiment.

When you use Amazon * SageMaker Studio or the Amazon SageMaker Python SDK, all experiments, trials, * and trial components are automatically tracked, logged, and indexed. When you * use the AWS SDK for Python (Boto), you must use the logging APIs provided by the * SDK.

You can add tags to a trial and then use the Search API to * search for the tags.

To get a list of all your trials, call the * ListTrials API. To view a trial's properties, call the * DescribeTrial API. To create a trial component, call the * CreateTrialComponent API.

See Also:

AWS * API Reference

*/ virtual Model::CreateTrialOutcome CreateTrial(const Model::CreateTrialRequest& request) const; /** *

Creates an Amazon SageMaker trial. A trial is a set of steps called * trial components that produce a machine learning model. A trial is part * of a single Amazon SageMaker experiment.

When you use Amazon * SageMaker Studio or the Amazon SageMaker Python SDK, all experiments, trials, * and trial components are automatically tracked, logged, and indexed. When you * use the AWS SDK for Python (Boto), you must use the logging APIs provided by the * SDK.

You can add tags to a trial and then use the Search API to * search for the tags.

To get a list of all your trials, call the * ListTrials API. To view a trial's properties, call the * DescribeTrial API. To create a trial component, call the * CreateTrialComponent API.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateTrialOutcomeCallable CreateTrialCallable(const Model::CreateTrialRequest& request) const; /** *

Creates an Amazon SageMaker trial. A trial is a set of steps called * trial components that produce a machine learning model. A trial is part * of a single Amazon SageMaker experiment.

When you use Amazon * SageMaker Studio or the Amazon SageMaker Python SDK, all experiments, trials, * and trial components are automatically tracked, logged, and indexed. When you * use the AWS SDK for Python (Boto), you must use the logging APIs provided by the * SDK.

You can add tags to a trial and then use the Search API to * search for the tags.

To get a list of all your trials, call the * ListTrials API. To view a trial's properties, call the * DescribeTrial API. To create a trial component, call the * CreateTrialComponent API.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateTrialAsync(const Model::CreateTrialRequest& request, const CreateTrialResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Creates a trial component, which is a stage of a machine learning * trial. A trial is composed of one or more trial components. A trial * component can be used in multiple trials.

Trial components include * pre-processing jobs, training jobs, and batch transform jobs.

When you * use Amazon SageMaker Studio or the Amazon SageMaker Python SDK, all experiments, * trials, and trial components are automatically tracked, logged, and indexed. * When you use the AWS SDK for Python (Boto), you must use the logging APIs * provided by the SDK.

You can add tags to a trial component and then use * the Search API to search for the tags.

* CreateTrialComponent can only be invoked from within an Amazon * SageMaker managed environment. This includes Amazon SageMaker training jobs, * processing jobs, transform jobs, and Amazon SageMaker notebooks. A call to * CreateTrialComponent from outside one of these environments results * in an error.

See Also:

AWS * API Reference

*/ virtual Model::CreateTrialComponentOutcome CreateTrialComponent(const Model::CreateTrialComponentRequest& request) const; /** *

Creates a trial component, which is a stage of a machine learning * trial. A trial is composed of one or more trial components. A trial * component can be used in multiple trials.

Trial components include * pre-processing jobs, training jobs, and batch transform jobs.

When you * use Amazon SageMaker Studio or the Amazon SageMaker Python SDK, all experiments, * trials, and trial components are automatically tracked, logged, and indexed. * When you use the AWS SDK for Python (Boto), you must use the logging APIs * provided by the SDK.

You can add tags to a trial component and then use * the Search API to search for the tags.

* CreateTrialComponent can only be invoked from within an Amazon * SageMaker managed environment. This includes Amazon SageMaker training jobs, * processing jobs, transform jobs, and Amazon SageMaker notebooks. A call to * CreateTrialComponent from outside one of these environments results * in an error.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateTrialComponentOutcomeCallable CreateTrialComponentCallable(const Model::CreateTrialComponentRequest& request) const; /** *

Creates a trial component, which is a stage of a machine learning * trial. A trial is composed of one or more trial components. A trial * component can be used in multiple trials.

Trial components include * pre-processing jobs, training jobs, and batch transform jobs.

When you * use Amazon SageMaker Studio or the Amazon SageMaker Python SDK, all experiments, * trials, and trial components are automatically tracked, logged, and indexed. * When you use the AWS SDK for Python (Boto), you must use the logging APIs * provided by the SDK.

You can add tags to a trial component and then use * the Search API to search for the tags.

* CreateTrialComponent can only be invoked from within an Amazon * SageMaker managed environment. This includes Amazon SageMaker training jobs, * processing jobs, transform jobs, and Amazon SageMaker notebooks. A call to * CreateTrialComponent from outside one of these environments results * in an error.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateTrialComponentAsync(const Model::CreateTrialComponentRequest& request, const CreateTrialComponentResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Creates a user profile. A user profile represents a single user within a * domain, and is the main way to reference a "person" for the purposes of sharing, * reporting, and other user-oriented features. This entity is created when a user * onboards to Amazon SageMaker Studio. If an administrator invites a person by * email or imports them from SSO, a user profile is automatically created. A user * profile is the primary holder of settings for an individual user and has a * reference to the user's private Amazon Elastic File System (EFS) home directory. *

See Also:

AWS * API Reference

*/ virtual Model::CreateUserProfileOutcome CreateUserProfile(const Model::CreateUserProfileRequest& request) const; /** *

Creates a user profile. A user profile represents a single user within a * domain, and is the main way to reference a "person" for the purposes of sharing, * reporting, and other user-oriented features. This entity is created when a user * onboards to Amazon SageMaker Studio. If an administrator invites a person by * email or imports them from SSO, a user profile is automatically created. A user * profile is the primary holder of settings for an individual user and has a * reference to the user's private Amazon Elastic File System (EFS) home directory. *

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateUserProfileOutcomeCallable CreateUserProfileCallable(const Model::CreateUserProfileRequest& request) const; /** *

Creates a user profile. A user profile represents a single user within a * domain, and is the main way to reference a "person" for the purposes of sharing, * reporting, and other user-oriented features. This entity is created when a user * onboards to Amazon SageMaker Studio. If an administrator invites a person by * email or imports them from SSO, a user profile is automatically created. A user * profile is the primary holder of settings for an individual user and has a * reference to the user's private Amazon Elastic File System (EFS) home directory. *

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateUserProfileAsync(const Model::CreateUserProfileRequest& request, const CreateUserProfileResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Use this operation to create a workforce. This operation will return an error * if a workforce already exists in the AWS Region that you specify. You can only * create one workforce in each AWS Region per AWS account.

If you want to * create a new workforce in an AWS Region where a workforce already exists, use * the API operation to delete the existing workforce and then use * CreateWorkforce to create a new workforce.

To create a * private workforce using Amazon Cognito, you must specify a Cognito user pool in * CognitoConfig. You can also create an Amazon Cognito workforce * using the Amazon SageMaker console. For more information, see * Create a Private Workforce (Amazon Cognito).

To create a private * workforce using your own OIDC Identity Provider (IdP), specify your IdP * configuration in OidcConfig. Your OIDC IdP must support * groups because groups are used by Ground Truth and Amazon A2I to create * work teams. For more information, see * Create a Private Workforce (OIDC IdP).

See Also:

AWS * API Reference

*/ virtual Model::CreateWorkforceOutcome CreateWorkforce(const Model::CreateWorkforceRequest& request) const; /** *

Use this operation to create a workforce. This operation will return an error * if a workforce already exists in the AWS Region that you specify. You can only * create one workforce in each AWS Region per AWS account.

If you want to * create a new workforce in an AWS Region where a workforce already exists, use * the API operation to delete the existing workforce and then use * CreateWorkforce to create a new workforce.

To create a * private workforce using Amazon Cognito, you must specify a Cognito user pool in * CognitoConfig. You can also create an Amazon Cognito workforce * using the Amazon SageMaker console. For more information, see * Create a Private Workforce (Amazon Cognito).

To create a private * workforce using your own OIDC Identity Provider (IdP), specify your IdP * configuration in OidcConfig. Your OIDC IdP must support * groups because groups are used by Ground Truth and Amazon A2I to create * work teams. For more information, see * Create a Private Workforce (OIDC IdP).

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateWorkforceOutcomeCallable CreateWorkforceCallable(const Model::CreateWorkforceRequest& request) const; /** *

Use this operation to create a workforce. This operation will return an error * if a workforce already exists in the AWS Region that you specify. You can only * create one workforce in each AWS Region per AWS account.

If you want to * create a new workforce in an AWS Region where a workforce already exists, use * the API operation to delete the existing workforce and then use * CreateWorkforce to create a new workforce.

To create a * private workforce using Amazon Cognito, you must specify a Cognito user pool in * CognitoConfig. You can also create an Amazon Cognito workforce * using the Amazon SageMaker console. For more information, see * Create a Private Workforce (Amazon Cognito).

To create a private * workforce using your own OIDC Identity Provider (IdP), specify your IdP * configuration in OidcConfig. Your OIDC IdP must support * groups because groups are used by Ground Truth and Amazon A2I to create * work teams. For more information, see * Create a Private Workforce (OIDC IdP).

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateWorkforceAsync(const Model::CreateWorkforceRequest& request, const CreateWorkforceResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Creates a new work team for labeling your data. A work team is defined by one * or more Amazon Cognito user pools. You must first create the user pools before * you can create a work team.

You cannot create more than 25 work teams in * an account and region.

See Also:

AWS * API Reference

*/ virtual Model::CreateWorkteamOutcome CreateWorkteam(const Model::CreateWorkteamRequest& request) const; /** *

Creates a new work team for labeling your data. A work team is defined by one * or more Amazon Cognito user pools. You must first create the user pools before * you can create a work team.

You cannot create more than 25 work teams in * an account and region.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateWorkteamOutcomeCallable CreateWorkteamCallable(const Model::CreateWorkteamRequest& request) const; /** *

Creates a new work team for labeling your data. A work team is defined by one * or more Amazon Cognito user pools. You must first create the user pools before * you can create a work team.

You cannot create more than 25 work teams in * an account and region.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateWorkteamAsync(const Model::CreateWorkteamRequest& request, const CreateWorkteamResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Removes the specified algorithm from your account.

See Also:

* AWS * API Reference

*/ virtual Model::DeleteAlgorithmOutcome DeleteAlgorithm(const Model::DeleteAlgorithmRequest& request) const; /** *

Removes the specified algorithm from your account.

See Also:

* AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteAlgorithmOutcomeCallable DeleteAlgorithmCallable(const Model::DeleteAlgorithmRequest& request) const; /** *

Removes the specified algorithm from your account.

See Also:

* AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteAlgorithmAsync(const Model::DeleteAlgorithmRequest& request, const DeleteAlgorithmResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Used to stop and delete an app.

See Also:

AWS * API Reference

*/ virtual Model::DeleteAppOutcome DeleteApp(const Model::DeleteAppRequest& request) const; /** *

Used to stop and delete an app.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteAppOutcomeCallable DeleteAppCallable(const Model::DeleteAppRequest& request) const; /** *

Used to stop and delete an app.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteAppAsync(const Model::DeleteAppRequest& request, const DeleteAppResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes the specified Git repository from your account.

See * Also:

AWS * API Reference

*/ virtual Model::DeleteCodeRepositoryOutcome DeleteCodeRepository(const Model::DeleteCodeRepositoryRequest& request) const; /** *

Deletes the specified Git repository from your account.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteCodeRepositoryOutcomeCallable DeleteCodeRepositoryCallable(const Model::DeleteCodeRepositoryRequest& request) const; /** *

Deletes the specified Git repository from your account.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteCodeRepositoryAsync(const Model::DeleteCodeRepositoryRequest& request, const DeleteCodeRepositoryResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Used to delete a domain. If you onboarded with IAM mode, you will need to * delete your domain to onboard again using SSO. Use with caution. All of the * members of the domain will lose access to their EFS volume, including data, * notebooks, and other artifacts.

See Also:

AWS * API Reference

*/ virtual Model::DeleteDomainOutcome DeleteDomain(const Model::DeleteDomainRequest& request) const; /** *

Used to delete a domain. If you onboarded with IAM mode, you will need to * delete your domain to onboard again using SSO. Use with caution. All of the * members of the domain will lose access to their EFS volume, including data, * notebooks, and other artifacts.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteDomainOutcomeCallable DeleteDomainCallable(const Model::DeleteDomainRequest& request) const; /** *

Used to delete a domain. If you onboarded with IAM mode, you will need to * delete your domain to onboard again using SSO. Use with caution. All of the * members of the domain will lose access to their EFS volume, including data, * notebooks, and other artifacts.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteDomainAsync(const Model::DeleteDomainRequest& request, const DeleteDomainResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes an endpoint. Amazon SageMaker frees up all of the resources that were * deployed when the endpoint was created.

Amazon SageMaker retires any * custom KMS key grants associated with the endpoint, meaning you don't need to * use the RevokeGrant * API call.

See Also:

AWS * API Reference

*/ virtual Model::DeleteEndpointOutcome DeleteEndpoint(const Model::DeleteEndpointRequest& request) const; /** *

Deletes an endpoint. Amazon SageMaker frees up all of the resources that were * deployed when the endpoint was created.

Amazon SageMaker retires any * custom KMS key grants associated with the endpoint, meaning you don't need to * use the RevokeGrant * API call.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteEndpointOutcomeCallable DeleteEndpointCallable(const Model::DeleteEndpointRequest& request) const; /** *

Deletes an endpoint. Amazon SageMaker frees up all of the resources that were * deployed when the endpoint was created.

Amazon SageMaker retires any * custom KMS key grants associated with the endpoint, meaning you don't need to * use the RevokeGrant * API call.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteEndpointAsync(const Model::DeleteEndpointRequest& request, const DeleteEndpointResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes an endpoint configuration. The DeleteEndpointConfig API * deletes only the specified configuration. It does not delete endpoints created * using the configuration.

You must not delete an * EndpointConfig in use by an endpoint that is live or while the * UpdateEndpoint or CreateEndpoint operations are being * performed on the endpoint. If you delete the EndpointConfig of an * endpoint that is active or being created or updated you may lose visibility into * the instance type the endpoint is using. The endpoint must be deleted in order * to stop incurring charges.

See Also:

AWS * API Reference

*/ virtual Model::DeleteEndpointConfigOutcome DeleteEndpointConfig(const Model::DeleteEndpointConfigRequest& request) const; /** *

Deletes an endpoint configuration. The DeleteEndpointConfig API * deletes only the specified configuration. It does not delete endpoints created * using the configuration.

You must not delete an * EndpointConfig in use by an endpoint that is live or while the * UpdateEndpoint or CreateEndpoint operations are being * performed on the endpoint. If you delete the EndpointConfig of an * endpoint that is active or being created or updated you may lose visibility into * the instance type the endpoint is using. The endpoint must be deleted in order * to stop incurring charges.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteEndpointConfigOutcomeCallable DeleteEndpointConfigCallable(const Model::DeleteEndpointConfigRequest& request) const; /** *

Deletes an endpoint configuration. The DeleteEndpointConfig API * deletes only the specified configuration. It does not delete endpoints created * using the configuration.

You must not delete an * EndpointConfig in use by an endpoint that is live or while the * UpdateEndpoint or CreateEndpoint operations are being * performed on the endpoint. If you delete the EndpointConfig of an * endpoint that is active or being created or updated you may lose visibility into * the instance type the endpoint is using. The endpoint must be deleted in order * to stop incurring charges.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteEndpointConfigAsync(const Model::DeleteEndpointConfigRequest& request, const DeleteEndpointConfigResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes an Amazon SageMaker experiment. All trials associated with the * experiment must be deleted first. Use the ListTrials API to get a list of * the trials associated with the experiment.

See Also:

AWS * API Reference

*/ virtual Model::DeleteExperimentOutcome DeleteExperiment(const Model::DeleteExperimentRequest& request) const; /** *

Deletes an Amazon SageMaker experiment. All trials associated with the * experiment must be deleted first. Use the ListTrials API to get a list of * the trials associated with the experiment.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteExperimentOutcomeCallable DeleteExperimentCallable(const Model::DeleteExperimentRequest& request) const; /** *

Deletes an Amazon SageMaker experiment. All trials associated with the * experiment must be deleted first. Use the ListTrials API to get a list of * the trials associated with the experiment.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteExperimentAsync(const Model::DeleteExperimentRequest& request, const DeleteExperimentResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes the specified flow definition.

See Also:

AWS * API Reference

*/ virtual Model::DeleteFlowDefinitionOutcome DeleteFlowDefinition(const Model::DeleteFlowDefinitionRequest& request) const; /** *

Deletes the specified flow definition.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteFlowDefinitionOutcomeCallable DeleteFlowDefinitionCallable(const Model::DeleteFlowDefinitionRequest& request) const; /** *

Deletes the specified flow definition.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteFlowDefinitionAsync(const Model::DeleteFlowDefinitionRequest& request, const DeleteFlowDefinitionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Use this operation to delete a human task user interface (worker task * template).

To see a list of human task user interfaces (work task * templates) in your account, use . When you delete a worker task template, it no * longer appears when you call ListHumanTaskUis.

See * Also:

AWS * API Reference

*/ virtual Model::DeleteHumanTaskUiOutcome DeleteHumanTaskUi(const Model::DeleteHumanTaskUiRequest& request) const; /** *

Use this operation to delete a human task user interface (worker task * template).

To see a list of human task user interfaces (work task * templates) in your account, use . When you delete a worker task template, it no * longer appears when you call ListHumanTaskUis.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteHumanTaskUiOutcomeCallable DeleteHumanTaskUiCallable(const Model::DeleteHumanTaskUiRequest& request) const; /** *

Use this operation to delete a human task user interface (worker task * template).

To see a list of human task user interfaces (work task * templates) in your account, use . When you delete a worker task template, it no * longer appears when you call ListHumanTaskUis.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteHumanTaskUiAsync(const Model::DeleteHumanTaskUiRequest& request, const DeleteHumanTaskUiResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes a model. The DeleteModel API deletes only the model * entry that was created in Amazon SageMaker when you called the * CreateModel API. It does not delete model artifacts, inference code, or * the IAM role that you specified when creating the model.

See * Also:

AWS * API Reference

*/ virtual Model::DeleteModelOutcome DeleteModel(const Model::DeleteModelRequest& request) const; /** *

Deletes a model. The DeleteModel API deletes only the model * entry that was created in Amazon SageMaker when you called the * CreateModel API. It does not delete model artifacts, inference code, or * the IAM role that you specified when creating the model.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteModelOutcomeCallable DeleteModelCallable(const Model::DeleteModelRequest& request) const; /** *

Deletes a model. The DeleteModel API deletes only the model * entry that was created in Amazon SageMaker when you called the * CreateModel API. It does not delete model artifacts, inference code, or * the IAM role that you specified when creating the model.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteModelAsync(const Model::DeleteModelRequest& request, const DeleteModelResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes a model package.

A model package is used to create Amazon * SageMaker models or list on AWS Marketplace. Buyers can subscribe to model * packages listed on AWS Marketplace to create models in Amazon * SageMaker.

See Also:

AWS * API Reference

*/ virtual Model::DeleteModelPackageOutcome DeleteModelPackage(const Model::DeleteModelPackageRequest& request) const; /** *

Deletes a model package.

A model package is used to create Amazon * SageMaker models or list on AWS Marketplace. Buyers can subscribe to model * packages listed on AWS Marketplace to create models in Amazon * SageMaker.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteModelPackageOutcomeCallable DeleteModelPackageCallable(const Model::DeleteModelPackageRequest& request) const; /** *

Deletes a model package.

A model package is used to create Amazon * SageMaker models or list on AWS Marketplace. Buyers can subscribe to model * packages listed on AWS Marketplace to create models in Amazon * SageMaker.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteModelPackageAsync(const Model::DeleteModelPackageRequest& request, const DeleteModelPackageResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes a monitoring schedule. Also stops the schedule had not already been * stopped. This does not delete the job execution history of the monitoring * schedule.

See Also:

AWS * API Reference

*/ virtual Model::DeleteMonitoringScheduleOutcome DeleteMonitoringSchedule(const Model::DeleteMonitoringScheduleRequest& request) const; /** *

Deletes a monitoring schedule. Also stops the schedule had not already been * stopped. This does not delete the job execution history of the monitoring * schedule.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteMonitoringScheduleOutcomeCallable DeleteMonitoringScheduleCallable(const Model::DeleteMonitoringScheduleRequest& request) const; /** *

Deletes a monitoring schedule. Also stops the schedule had not already been * stopped. This does not delete the job execution history of the monitoring * schedule.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteMonitoringScheduleAsync(const Model::DeleteMonitoringScheduleRequest& request, const DeleteMonitoringScheduleResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes an Amazon SageMaker notebook instance. Before you can delete a * notebook instance, you must call the StopNotebookInstance API.

*

When you delete a notebook instance, you lose all of your data. * Amazon SageMaker removes the ML compute instance, and deletes the ML storage * volume and the network interface associated with the notebook instance.

*

See Also:

AWS * API Reference

*/ virtual Model::DeleteNotebookInstanceOutcome DeleteNotebookInstance(const Model::DeleteNotebookInstanceRequest& request) const; /** *

Deletes an Amazon SageMaker notebook instance. Before you can delete a * notebook instance, you must call the StopNotebookInstance API.

*

When you delete a notebook instance, you lose all of your data. * Amazon SageMaker removes the ML compute instance, and deletes the ML storage * volume and the network interface associated with the notebook instance.

*

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteNotebookInstanceOutcomeCallable DeleteNotebookInstanceCallable(const Model::DeleteNotebookInstanceRequest& request) const; /** *

Deletes an Amazon SageMaker notebook instance. Before you can delete a * notebook instance, you must call the StopNotebookInstance API.

*

When you delete a notebook instance, you lose all of your data. * Amazon SageMaker removes the ML compute instance, and deletes the ML storage * volume and the network interface associated with the notebook instance.

*

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteNotebookInstanceAsync(const Model::DeleteNotebookInstanceRequest& request, const DeleteNotebookInstanceResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes a notebook instance lifecycle configuration.

See Also:

* AWS * API Reference

*/ virtual Model::DeleteNotebookInstanceLifecycleConfigOutcome DeleteNotebookInstanceLifecycleConfig(const Model::DeleteNotebookInstanceLifecycleConfigRequest& request) const; /** *

Deletes a notebook instance lifecycle configuration.

See Also:

* AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteNotebookInstanceLifecycleConfigOutcomeCallable DeleteNotebookInstanceLifecycleConfigCallable(const Model::DeleteNotebookInstanceLifecycleConfigRequest& request) const; /** *

Deletes a notebook instance lifecycle configuration.

See Also:

* AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteNotebookInstanceLifecycleConfigAsync(const Model::DeleteNotebookInstanceLifecycleConfigRequest& request, const DeleteNotebookInstanceLifecycleConfigResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes the specified tags from an Amazon SageMaker resource.

To list * a resource's tags, use the ListTags API.

When you * call this API to delete tags from a hyperparameter tuning job, the deleted tags * are not removed from training jobs that the hyperparameter tuning job launched * before you called this API.

See Also:

AWS * API Reference

*/ virtual Model::DeleteTagsOutcome DeleteTags(const Model::DeleteTagsRequest& request) const; /** *

Deletes the specified tags from an Amazon SageMaker resource.

To list * a resource's tags, use the ListTags API.

When you * call this API to delete tags from a hyperparameter tuning job, the deleted tags * are not removed from training jobs that the hyperparameter tuning job launched * before you called this API.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteTagsOutcomeCallable DeleteTagsCallable(const Model::DeleteTagsRequest& request) const; /** *

Deletes the specified tags from an Amazon SageMaker resource.

To list * a resource's tags, use the ListTags API.

When you * call this API to delete tags from a hyperparameter tuning job, the deleted tags * are not removed from training jobs that the hyperparameter tuning job launched * before you called this API.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteTagsAsync(const Model::DeleteTagsRequest& request, const DeleteTagsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes the specified trial. All trial components that make up the trial must * be deleted first. Use the DescribeTrialComponent API to get the list of * trial components.

See Also:

AWS * API Reference

*/ virtual Model::DeleteTrialOutcome DeleteTrial(const Model::DeleteTrialRequest& request) const; /** *

Deletes the specified trial. All trial components that make up the trial must * be deleted first. Use the DescribeTrialComponent API to get the list of * trial components.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteTrialOutcomeCallable DeleteTrialCallable(const Model::DeleteTrialRequest& request) const; /** *

Deletes the specified trial. All trial components that make up the trial must * be deleted first. Use the DescribeTrialComponent API to get the list of * trial components.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteTrialAsync(const Model::DeleteTrialRequest& request, const DeleteTrialResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes the specified trial component. A trial component must be * disassociated from all trials before the trial component can be deleted. To * disassociate a trial component from a trial, call the * DisassociateTrialComponent API.

See Also:

AWS * API Reference

*/ virtual Model::DeleteTrialComponentOutcome DeleteTrialComponent(const Model::DeleteTrialComponentRequest& request) const; /** *

Deletes the specified trial component. A trial component must be * disassociated from all trials before the trial component can be deleted. To * disassociate a trial component from a trial, call the * DisassociateTrialComponent API.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteTrialComponentOutcomeCallable DeleteTrialComponentCallable(const Model::DeleteTrialComponentRequest& request) const; /** *

Deletes the specified trial component. A trial component must be * disassociated from all trials before the trial component can be deleted. To * disassociate a trial component from a trial, call the * DisassociateTrialComponent API.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteTrialComponentAsync(const Model::DeleteTrialComponentRequest& request, const DeleteTrialComponentResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes a user profile. When a user profile is deleted, the user loses access * to their EFS volume, including data, notebooks, and other * artifacts.

See Also:

AWS * API Reference

*/ virtual Model::DeleteUserProfileOutcome DeleteUserProfile(const Model::DeleteUserProfileRequest& request) const; /** *

Deletes a user profile. When a user profile is deleted, the user loses access * to their EFS volume, including data, notebooks, and other * artifacts.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteUserProfileOutcomeCallable DeleteUserProfileCallable(const Model::DeleteUserProfileRequest& request) const; /** *

Deletes a user profile. When a user profile is deleted, the user loses access * to their EFS volume, including data, notebooks, and other * artifacts.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteUserProfileAsync(const Model::DeleteUserProfileRequest& request, const DeleteUserProfileResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Use this operation to delete a workforce.

If you want to create a new * workforce in an AWS Region where a workforce already exists, use this operation * to delete the existing workforce and then use to create a new workforce.

*

If a private workforce contains one or more work teams, you must * use the operation to delete all work teams before you delete the workforce. If * you try to delete a workforce that contains one or more work teams, you will * recieve a ResourceInUse error.

See * Also:

AWS * API Reference

*/ virtual Model::DeleteWorkforceOutcome DeleteWorkforce(const Model::DeleteWorkforceRequest& request) const; /** *

Use this operation to delete a workforce.

If you want to create a new * workforce in an AWS Region where a workforce already exists, use this operation * to delete the existing workforce and then use to create a new workforce.

*

If a private workforce contains one or more work teams, you must * use the operation to delete all work teams before you delete the workforce. If * you try to delete a workforce that contains one or more work teams, you will * recieve a ResourceInUse error.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteWorkforceOutcomeCallable DeleteWorkforceCallable(const Model::DeleteWorkforceRequest& request) const; /** *

Use this operation to delete a workforce.

If you want to create a new * workforce in an AWS Region where a workforce already exists, use this operation * to delete the existing workforce and then use to create a new workforce.

*

If a private workforce contains one or more work teams, you must * use the operation to delete all work teams before you delete the workforce. If * you try to delete a workforce that contains one or more work teams, you will * recieve a ResourceInUse error.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteWorkforceAsync(const Model::DeleteWorkforceRequest& request, const DeleteWorkforceResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes an existing work team. This operation can't be undone.

See * Also:

AWS * API Reference

*/ virtual Model::DeleteWorkteamOutcome DeleteWorkteam(const Model::DeleteWorkteamRequest& request) const; /** *

Deletes an existing work team. This operation can't be undone.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteWorkteamOutcomeCallable DeleteWorkteamCallable(const Model::DeleteWorkteamRequest& request) const; /** *

Deletes an existing work team. This operation can't be undone.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteWorkteamAsync(const Model::DeleteWorkteamRequest& request, const DeleteWorkteamResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns a description of the specified algorithm that is in your * account.

See Also:

AWS * API Reference

*/ virtual Model::DescribeAlgorithmOutcome DescribeAlgorithm(const Model::DescribeAlgorithmRequest& request) const; /** *

Returns a description of the specified algorithm that is in your * account.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeAlgorithmOutcomeCallable DescribeAlgorithmCallable(const Model::DescribeAlgorithmRequest& request) const; /** *

Returns a description of the specified algorithm that is in your * account.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeAlgorithmAsync(const Model::DescribeAlgorithmRequest& request, const DescribeAlgorithmResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Describes the app.

See Also:

AWS * API Reference

*/ virtual Model::DescribeAppOutcome DescribeApp(const Model::DescribeAppRequest& request) const; /** *

Describes the app.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeAppOutcomeCallable DescribeAppCallable(const Model::DescribeAppRequest& request) const; /** *

Describes the app.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeAppAsync(const Model::DescribeAppRequest& request, const DescribeAppResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns information about an Amazon SageMaker job.

See Also:

* AWS * API Reference

*/ virtual Model::DescribeAutoMLJobOutcome DescribeAutoMLJob(const Model::DescribeAutoMLJobRequest& request) const; /** *

Returns information about an Amazon SageMaker job.

See Also:

* AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeAutoMLJobOutcomeCallable DescribeAutoMLJobCallable(const Model::DescribeAutoMLJobRequest& request) const; /** *

Returns information about an Amazon SageMaker job.

See Also:

* AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeAutoMLJobAsync(const Model::DescribeAutoMLJobRequest& request, const DescribeAutoMLJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Gets details about the specified Git repository.

See Also:

* AWS * API Reference

*/ virtual Model::DescribeCodeRepositoryOutcome DescribeCodeRepository(const Model::DescribeCodeRepositoryRequest& request) const; /** *

Gets details about the specified Git repository.

See Also:

* AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeCodeRepositoryOutcomeCallable DescribeCodeRepositoryCallable(const Model::DescribeCodeRepositoryRequest& request) const; /** *

Gets details about the specified Git repository.

See Also:

* AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeCodeRepositoryAsync(const Model::DescribeCodeRepositoryRequest& request, const DescribeCodeRepositoryResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns information about a model compilation job.

To create a model * compilation job, use CreateCompilationJob. To get information about * multiple model compilation jobs, use ListCompilationJobs.

See * Also:

AWS * API Reference

*/ virtual Model::DescribeCompilationJobOutcome DescribeCompilationJob(const Model::DescribeCompilationJobRequest& request) const; /** *

Returns information about a model compilation job.

To create a model * compilation job, use CreateCompilationJob. To get information about * multiple model compilation jobs, use ListCompilationJobs.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeCompilationJobOutcomeCallable DescribeCompilationJobCallable(const Model::DescribeCompilationJobRequest& request) const; /** *

Returns information about a model compilation job.

To create a model * compilation job, use CreateCompilationJob. To get information about * multiple model compilation jobs, use ListCompilationJobs.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeCompilationJobAsync(const Model::DescribeCompilationJobRequest& request, const DescribeCompilationJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

The description of the domain.

See Also:

AWS * API Reference

*/ virtual Model::DescribeDomainOutcome DescribeDomain(const Model::DescribeDomainRequest& request) const; /** *

The description of the domain.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeDomainOutcomeCallable DescribeDomainCallable(const Model::DescribeDomainRequest& request) const; /** *

The description of the domain.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeDomainAsync(const Model::DescribeDomainRequest& request, const DescribeDomainResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns the description of an endpoint.

See Also:

AWS * API Reference

*/ virtual Model::DescribeEndpointOutcome DescribeEndpoint(const Model::DescribeEndpointRequest& request) const; /** *

Returns the description of an endpoint.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeEndpointOutcomeCallable DescribeEndpointCallable(const Model::DescribeEndpointRequest& request) const; /** *

Returns the description of an endpoint.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeEndpointAsync(const Model::DescribeEndpointRequest& request, const DescribeEndpointResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns the description of an endpoint configuration created using the * CreateEndpointConfig API.

See Also:

AWS * API Reference

*/ virtual Model::DescribeEndpointConfigOutcome DescribeEndpointConfig(const Model::DescribeEndpointConfigRequest& request) const; /** *

Returns the description of an endpoint configuration created using the * CreateEndpointConfig API.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeEndpointConfigOutcomeCallable DescribeEndpointConfigCallable(const Model::DescribeEndpointConfigRequest& request) const; /** *

Returns the description of an endpoint configuration created using the * CreateEndpointConfig API.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeEndpointConfigAsync(const Model::DescribeEndpointConfigRequest& request, const DescribeEndpointConfigResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Provides a list of an experiment's properties.

See Also:

AWS * API Reference

*/ virtual Model::DescribeExperimentOutcome DescribeExperiment(const Model::DescribeExperimentRequest& request) const; /** *

Provides a list of an experiment's properties.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeExperimentOutcomeCallable DescribeExperimentCallable(const Model::DescribeExperimentRequest& request) const; /** *

Provides a list of an experiment's properties.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeExperimentAsync(const Model::DescribeExperimentRequest& request, const DescribeExperimentResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns information about the specified flow definition.

See * Also:

AWS * API Reference

*/ virtual Model::DescribeFlowDefinitionOutcome DescribeFlowDefinition(const Model::DescribeFlowDefinitionRequest& request) const; /** *

Returns information about the specified flow definition.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeFlowDefinitionOutcomeCallable DescribeFlowDefinitionCallable(const Model::DescribeFlowDefinitionRequest& request) const; /** *

Returns information about the specified flow definition.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeFlowDefinitionAsync(const Model::DescribeFlowDefinitionRequest& request, const DescribeFlowDefinitionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns information about the requested human task user interface (worker * task template).

See Also:

AWS * API Reference

*/ virtual Model::DescribeHumanTaskUiOutcome DescribeHumanTaskUi(const Model::DescribeHumanTaskUiRequest& request) const; /** *

Returns information about the requested human task user interface (worker * task template).

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeHumanTaskUiOutcomeCallable DescribeHumanTaskUiCallable(const Model::DescribeHumanTaskUiRequest& request) const; /** *

Returns information about the requested human task user interface (worker * task template).

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeHumanTaskUiAsync(const Model::DescribeHumanTaskUiRequest& request, const DescribeHumanTaskUiResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Gets a description of a hyperparameter tuning job.

See Also:

* AWS * API Reference

*/ virtual Model::DescribeHyperParameterTuningJobOutcome DescribeHyperParameterTuningJob(const Model::DescribeHyperParameterTuningJobRequest& request) const; /** *

Gets a description of a hyperparameter tuning job.

See Also:

* AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeHyperParameterTuningJobOutcomeCallable DescribeHyperParameterTuningJobCallable(const Model::DescribeHyperParameterTuningJobRequest& request) const; /** *

Gets a description of a hyperparameter tuning job.

See Also:

* AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeHyperParameterTuningJobAsync(const Model::DescribeHyperParameterTuningJobRequest& request, const DescribeHyperParameterTuningJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Gets information about a labeling job.

See Also:

AWS * API Reference

*/ virtual Model::DescribeLabelingJobOutcome DescribeLabelingJob(const Model::DescribeLabelingJobRequest& request) const; /** *

Gets information about a labeling job.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeLabelingJobOutcomeCallable DescribeLabelingJobCallable(const Model::DescribeLabelingJobRequest& request) const; /** *

Gets information about a labeling job.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeLabelingJobAsync(const Model::DescribeLabelingJobRequest& request, const DescribeLabelingJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Describes a model that you created using the CreateModel * API.

See Also:

AWS * API Reference

*/ virtual Model::DescribeModelOutcome DescribeModel(const Model::DescribeModelRequest& request) const; /** *

Describes a model that you created using the CreateModel * API.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeModelOutcomeCallable DescribeModelCallable(const Model::DescribeModelRequest& request) const; /** *

Describes a model that you created using the CreateModel * API.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeModelAsync(const Model::DescribeModelRequest& request, const DescribeModelResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns a description of the specified model package, which is used to create * Amazon SageMaker models or list them on AWS Marketplace.

To create models * in Amazon SageMaker, buyers can subscribe to model packages listed on AWS * Marketplace.

See Also:

AWS * API Reference

*/ virtual Model::DescribeModelPackageOutcome DescribeModelPackage(const Model::DescribeModelPackageRequest& request) const; /** *

Returns a description of the specified model package, which is used to create * Amazon SageMaker models or list them on AWS Marketplace.

To create models * in Amazon SageMaker, buyers can subscribe to model packages listed on AWS * Marketplace.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeModelPackageOutcomeCallable DescribeModelPackageCallable(const Model::DescribeModelPackageRequest& request) const; /** *

Returns a description of the specified model package, which is used to create * Amazon SageMaker models or list them on AWS Marketplace.

To create models * in Amazon SageMaker, buyers can subscribe to model packages listed on AWS * Marketplace.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeModelPackageAsync(const Model::DescribeModelPackageRequest& request, const DescribeModelPackageResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Describes the schedule for a monitoring job.

See Also:

AWS * API Reference

*/ virtual Model::DescribeMonitoringScheduleOutcome DescribeMonitoringSchedule(const Model::DescribeMonitoringScheduleRequest& request) const; /** *

Describes the schedule for a monitoring job.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeMonitoringScheduleOutcomeCallable DescribeMonitoringScheduleCallable(const Model::DescribeMonitoringScheduleRequest& request) const; /** *

Describes the schedule for a monitoring job.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeMonitoringScheduleAsync(const Model::DescribeMonitoringScheduleRequest& request, const DescribeMonitoringScheduleResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns information about a notebook instance.

See Also:

AWS * API Reference

*/ virtual Model::DescribeNotebookInstanceOutcome DescribeNotebookInstance(const Model::DescribeNotebookInstanceRequest& request) const; /** *

Returns information about a notebook instance.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeNotebookInstanceOutcomeCallable DescribeNotebookInstanceCallable(const Model::DescribeNotebookInstanceRequest& request) const; /** *

Returns information about a notebook instance.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeNotebookInstanceAsync(const Model::DescribeNotebookInstanceRequest& request, const DescribeNotebookInstanceResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns a description of a notebook instance lifecycle configuration.

*

For information about notebook instance lifestyle configurations, see Step * 2.1: (Optional) Customize a Notebook Instance.

See Also:

AWS * API Reference

*/ virtual Model::DescribeNotebookInstanceLifecycleConfigOutcome DescribeNotebookInstanceLifecycleConfig(const Model::DescribeNotebookInstanceLifecycleConfigRequest& request) const; /** *

Returns a description of a notebook instance lifecycle configuration.

*

For information about notebook instance lifestyle configurations, see Step * 2.1: (Optional) Customize a Notebook Instance.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeNotebookInstanceLifecycleConfigOutcomeCallable DescribeNotebookInstanceLifecycleConfigCallable(const Model::DescribeNotebookInstanceLifecycleConfigRequest& request) const; /** *

Returns a description of a notebook instance lifecycle configuration.

*

For information about notebook instance lifestyle configurations, see Step * 2.1: (Optional) Customize a Notebook Instance.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeNotebookInstanceLifecycleConfigAsync(const Model::DescribeNotebookInstanceLifecycleConfigRequest& request, const DescribeNotebookInstanceLifecycleConfigResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns a description of a processing job.

See Also:

AWS * API Reference

*/ virtual Model::DescribeProcessingJobOutcome DescribeProcessingJob(const Model::DescribeProcessingJobRequest& request) const; /** *

Returns a description of a processing job.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeProcessingJobOutcomeCallable DescribeProcessingJobCallable(const Model::DescribeProcessingJobRequest& request) const; /** *

Returns a description of a processing job.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeProcessingJobAsync(const Model::DescribeProcessingJobRequest& request, const DescribeProcessingJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Gets information about a work team provided by a vendor. It returns details * about the subscription with a vendor in the AWS Marketplace.

See * Also:

AWS * API Reference

*/ virtual Model::DescribeSubscribedWorkteamOutcome DescribeSubscribedWorkteam(const Model::DescribeSubscribedWorkteamRequest& request) const; /** *

Gets information about a work team provided by a vendor. It returns details * about the subscription with a vendor in the AWS Marketplace.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeSubscribedWorkteamOutcomeCallable DescribeSubscribedWorkteamCallable(const Model::DescribeSubscribedWorkteamRequest& request) const; /** *

Gets information about a work team provided by a vendor. It returns details * about the subscription with a vendor in the AWS Marketplace.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeSubscribedWorkteamAsync(const Model::DescribeSubscribedWorkteamRequest& request, const DescribeSubscribedWorkteamResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns information about a training job.

See Also:

AWS * API Reference

*/ virtual Model::DescribeTrainingJobOutcome DescribeTrainingJob(const Model::DescribeTrainingJobRequest& request) const; /** *

Returns information about a training job.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeTrainingJobOutcomeCallable DescribeTrainingJobCallable(const Model::DescribeTrainingJobRequest& request) const; /** *

Returns information about a training job.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeTrainingJobAsync(const Model::DescribeTrainingJobRequest& request, const DescribeTrainingJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns information about a transform job.

See Also:

AWS * API Reference

*/ virtual Model::DescribeTransformJobOutcome DescribeTransformJob(const Model::DescribeTransformJobRequest& request) const; /** *

Returns information about a transform job.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeTransformJobOutcomeCallable DescribeTransformJobCallable(const Model::DescribeTransformJobRequest& request) const; /** *

Returns information about a transform job.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeTransformJobAsync(const Model::DescribeTransformJobRequest& request, const DescribeTransformJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Provides a list of a trial's properties.

See Also:

AWS * API Reference

*/ virtual Model::DescribeTrialOutcome DescribeTrial(const Model::DescribeTrialRequest& request) const; /** *

Provides a list of a trial's properties.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeTrialOutcomeCallable DescribeTrialCallable(const Model::DescribeTrialRequest& request) const; /** *

Provides a list of a trial's properties.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeTrialAsync(const Model::DescribeTrialRequest& request, const DescribeTrialResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Provides a list of a trials component's properties.

See Also:

* AWS * API Reference

*/ virtual Model::DescribeTrialComponentOutcome DescribeTrialComponent(const Model::DescribeTrialComponentRequest& request) const; /** *

Provides a list of a trials component's properties.

See Also:

* AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeTrialComponentOutcomeCallable DescribeTrialComponentCallable(const Model::DescribeTrialComponentRequest& request) const; /** *

Provides a list of a trials component's properties.

See Also:

* AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeTrialComponentAsync(const Model::DescribeTrialComponentRequest& request, const DescribeTrialComponentResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Describes a user profile. For more information, see * CreateUserProfile.

See Also:

AWS * API Reference

*/ virtual Model::DescribeUserProfileOutcome DescribeUserProfile(const Model::DescribeUserProfileRequest& request) const; /** *

Describes a user profile. For more information, see * CreateUserProfile.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeUserProfileOutcomeCallable DescribeUserProfileCallable(const Model::DescribeUserProfileRequest& request) const; /** *

Describes a user profile. For more information, see * CreateUserProfile.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeUserProfileAsync(const Model::DescribeUserProfileRequest& request, const DescribeUserProfileResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Lists private workforce information, including workforce name, Amazon * Resource Name (ARN), and, if applicable, allowed IP address ranges (CIDRs). * Allowable IP address ranges are the IP addresses that workers can use to access * tasks.

This operation applies only to private * workforces.

See Also:

AWS * API Reference

*/ virtual Model::DescribeWorkforceOutcome DescribeWorkforce(const Model::DescribeWorkforceRequest& request) const; /** *

Lists private workforce information, including workforce name, Amazon * Resource Name (ARN), and, if applicable, allowed IP address ranges (CIDRs). * Allowable IP address ranges are the IP addresses that workers can use to access * tasks.

This operation applies only to private * workforces.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeWorkforceOutcomeCallable DescribeWorkforceCallable(const Model::DescribeWorkforceRequest& request) const; /** *

Lists private workforce information, including workforce name, Amazon * Resource Name (ARN), and, if applicable, allowed IP address ranges (CIDRs). * Allowable IP address ranges are the IP addresses that workers can use to access * tasks.

This operation applies only to private * workforces.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeWorkforceAsync(const Model::DescribeWorkforceRequest& request, const DescribeWorkforceResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Gets information about a specific work team. You can see information such as * the create date, the last updated date, membership information, and the work * team's Amazon Resource Name (ARN).

See Also:

AWS * API Reference

*/ virtual Model::DescribeWorkteamOutcome DescribeWorkteam(const Model::DescribeWorkteamRequest& request) const; /** *

Gets information about a specific work team. You can see information such as * the create date, the last updated date, membership information, and the work * team's Amazon Resource Name (ARN).

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeWorkteamOutcomeCallable DescribeWorkteamCallable(const Model::DescribeWorkteamRequest& request) const; /** *

Gets information about a specific work team. You can see information such as * the create date, the last updated date, membership information, and the work * team's Amazon Resource Name (ARN).

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeWorkteamAsync(const Model::DescribeWorkteamRequest& request, const DescribeWorkteamResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Disassociates a trial component from a trial. This doesn't effect other * trials the component is associated with. Before you can delete a component, you * must disassociate the component from all trials it is associated with. To * associate a trial component with a trial, call the * AssociateTrialComponent API.

To get a list of the trials a * component is associated with, use the Search API. Specify * ExperimentTrialComponent for the Resource parameter. * The list appears in the response under * Results.TrialComponent.Parents.

See Also:

AWS * API Reference

*/ virtual Model::DisassociateTrialComponentOutcome DisassociateTrialComponent(const Model::DisassociateTrialComponentRequest& request) const; /** *

Disassociates a trial component from a trial. This doesn't effect other * trials the component is associated with. Before you can delete a component, you * must disassociate the component from all trials it is associated with. To * associate a trial component with a trial, call the * AssociateTrialComponent API.

To get a list of the trials a * component is associated with, use the Search API. Specify * ExperimentTrialComponent for the Resource parameter. * The list appears in the response under * Results.TrialComponent.Parents.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DisassociateTrialComponentOutcomeCallable DisassociateTrialComponentCallable(const Model::DisassociateTrialComponentRequest& request) const; /** *

Disassociates a trial component from a trial. This doesn't effect other * trials the component is associated with. Before you can delete a component, you * must disassociate the component from all trials it is associated with. To * associate a trial component with a trial, call the * AssociateTrialComponent API.

To get a list of the trials a * component is associated with, use the Search API. Specify * ExperimentTrialComponent for the Resource parameter. * The list appears in the response under * Results.TrialComponent.Parents.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DisassociateTrialComponentAsync(const Model::DisassociateTrialComponentRequest& request, const DisassociateTrialComponentResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

An auto-complete API for the search functionality in the Amazon SageMaker * console. It returns suggestions of possible matches for the property name to use * in Search queries. Provides suggestions for * HyperParameters, Tags, and * Metrics.

See Also:

AWS * API Reference

*/ virtual Model::GetSearchSuggestionsOutcome GetSearchSuggestions(const Model::GetSearchSuggestionsRequest& request) const; /** *

An auto-complete API for the search functionality in the Amazon SageMaker * console. It returns suggestions of possible matches for the property name to use * in Search queries. Provides suggestions for * HyperParameters, Tags, and * Metrics.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetSearchSuggestionsOutcomeCallable GetSearchSuggestionsCallable(const Model::GetSearchSuggestionsRequest& request) const; /** *

An auto-complete API for the search functionality in the Amazon SageMaker * console. It returns suggestions of possible matches for the property name to use * in Search queries. Provides suggestions for * HyperParameters, Tags, and * Metrics.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetSearchSuggestionsAsync(const Model::GetSearchSuggestionsRequest& request, const GetSearchSuggestionsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Lists the machine learning algorithms that have been created.

See * Also:

AWS * API Reference

*/ virtual Model::ListAlgorithmsOutcome ListAlgorithms(const Model::ListAlgorithmsRequest& request) const; /** *

Lists the machine learning algorithms that have been created.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListAlgorithmsOutcomeCallable ListAlgorithmsCallable(const Model::ListAlgorithmsRequest& request) const; /** *

Lists the machine learning algorithms that have been created.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListAlgorithmsAsync(const Model::ListAlgorithmsRequest& request, const ListAlgorithmsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Lists apps.

See Also:

AWS * API Reference

*/ virtual Model::ListAppsOutcome ListApps(const Model::ListAppsRequest& request) const; /** *

Lists apps.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListAppsOutcomeCallable ListAppsCallable(const Model::ListAppsRequest& request) const; /** *

Lists apps.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListAppsAsync(const Model::ListAppsRequest& request, const ListAppsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Request a list of jobs.

See Also:

AWS * API Reference

*/ virtual Model::ListAutoMLJobsOutcome ListAutoMLJobs(const Model::ListAutoMLJobsRequest& request) const; /** *

Request a list of jobs.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListAutoMLJobsOutcomeCallable ListAutoMLJobsCallable(const Model::ListAutoMLJobsRequest& request) const; /** *

Request a list of jobs.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListAutoMLJobsAsync(const Model::ListAutoMLJobsRequest& request, const ListAutoMLJobsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

List the Candidates created for the job.

See Also:

AWS * API Reference

*/ virtual Model::ListCandidatesForAutoMLJobOutcome ListCandidatesForAutoMLJob(const Model::ListCandidatesForAutoMLJobRequest& request) const; /** *

List the Candidates created for the job.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListCandidatesForAutoMLJobOutcomeCallable ListCandidatesForAutoMLJobCallable(const Model::ListCandidatesForAutoMLJobRequest& request) const; /** *

List the Candidates created for the job.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListCandidatesForAutoMLJobAsync(const Model::ListCandidatesForAutoMLJobRequest& request, const ListCandidatesForAutoMLJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Gets a list of the Git repositories in your account.

See Also:

* AWS * API Reference

*/ virtual Model::ListCodeRepositoriesOutcome ListCodeRepositories(const Model::ListCodeRepositoriesRequest& request) const; /** *

Gets a list of the Git repositories in your account.

See Also:

* AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListCodeRepositoriesOutcomeCallable ListCodeRepositoriesCallable(const Model::ListCodeRepositoriesRequest& request) const; /** *

Gets a list of the Git repositories in your account.

See Also:

* AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListCodeRepositoriesAsync(const Model::ListCodeRepositoriesRequest& request, const ListCodeRepositoriesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Lists model compilation jobs that satisfy various filters.

To create a * model compilation job, use CreateCompilationJob. To get information about * a particular model compilation job you have created, use * DescribeCompilationJob.

See Also:

AWS * API Reference

*/ virtual Model::ListCompilationJobsOutcome ListCompilationJobs(const Model::ListCompilationJobsRequest& request) const; /** *

Lists model compilation jobs that satisfy various filters.

To create a * model compilation job, use CreateCompilationJob. To get information about * a particular model compilation job you have created, use * DescribeCompilationJob.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListCompilationJobsOutcomeCallable ListCompilationJobsCallable(const Model::ListCompilationJobsRequest& request) const; /** *

Lists model compilation jobs that satisfy various filters.

To create a * model compilation job, use CreateCompilationJob. To get information about * a particular model compilation job you have created, use * DescribeCompilationJob.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListCompilationJobsAsync(const Model::ListCompilationJobsRequest& request, const ListCompilationJobsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Lists the domains.

See Also:

AWS * API Reference

*/ virtual Model::ListDomainsOutcome ListDomains(const Model::ListDomainsRequest& request) const; /** *

Lists the domains.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListDomainsOutcomeCallable ListDomainsCallable(const Model::ListDomainsRequest& request) const; /** *

Lists the domains.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListDomainsAsync(const Model::ListDomainsRequest& request, const ListDomainsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Lists endpoint configurations.

See Also:

AWS * API Reference

*/ virtual Model::ListEndpointConfigsOutcome ListEndpointConfigs(const Model::ListEndpointConfigsRequest& request) const; /** *

Lists endpoint configurations.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListEndpointConfigsOutcomeCallable ListEndpointConfigsCallable(const Model::ListEndpointConfigsRequest& request) const; /** *

Lists endpoint configurations.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListEndpointConfigsAsync(const Model::ListEndpointConfigsRequest& request, const ListEndpointConfigsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Lists endpoints.

See Also:

AWS * API Reference

*/ virtual Model::ListEndpointsOutcome ListEndpoints(const Model::ListEndpointsRequest& request) const; /** *

Lists endpoints.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListEndpointsOutcomeCallable ListEndpointsCallable(const Model::ListEndpointsRequest& request) const; /** *

Lists endpoints.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListEndpointsAsync(const Model::ListEndpointsRequest& request, const ListEndpointsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Lists all the experiments in your account. The list can be filtered to show * only experiments that were created in a specific time range. The list can be * sorted by experiment name or creation time.

See Also:

AWS * API Reference

*/ virtual Model::ListExperimentsOutcome ListExperiments(const Model::ListExperimentsRequest& request) const; /** *

Lists all the experiments in your account. The list can be filtered to show * only experiments that were created in a specific time range. The list can be * sorted by experiment name or creation time.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListExperimentsOutcomeCallable ListExperimentsCallable(const Model::ListExperimentsRequest& request) const; /** *

Lists all the experiments in your account. The list can be filtered to show * only experiments that were created in a specific time range. The list can be * sorted by experiment name or creation time.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListExperimentsAsync(const Model::ListExperimentsRequest& request, const ListExperimentsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns information about the flow definitions in your account.

See * Also:

AWS * API Reference

*/ virtual Model::ListFlowDefinitionsOutcome ListFlowDefinitions(const Model::ListFlowDefinitionsRequest& request) const; /** *

Returns information about the flow definitions in your account.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListFlowDefinitionsOutcomeCallable ListFlowDefinitionsCallable(const Model::ListFlowDefinitionsRequest& request) const; /** *

Returns information about the flow definitions in your account.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListFlowDefinitionsAsync(const Model::ListFlowDefinitionsRequest& request, const ListFlowDefinitionsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns information about the human task user interfaces in your * account.

See Also:

AWS * API Reference

*/ virtual Model::ListHumanTaskUisOutcome ListHumanTaskUis(const Model::ListHumanTaskUisRequest& request) const; /** *

Returns information about the human task user interfaces in your * account.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListHumanTaskUisOutcomeCallable ListHumanTaskUisCallable(const Model::ListHumanTaskUisRequest& request) const; /** *

Returns information about the human task user interfaces in your * account.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListHumanTaskUisAsync(const Model::ListHumanTaskUisRequest& request, const ListHumanTaskUisResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Gets a list of HyperParameterTuningJobSummary objects that describe * the hyperparameter tuning jobs launched in your account.

See * Also:

AWS * API Reference

*/ virtual Model::ListHyperParameterTuningJobsOutcome ListHyperParameterTuningJobs(const Model::ListHyperParameterTuningJobsRequest& request) const; /** *

Gets a list of HyperParameterTuningJobSummary objects that describe * the hyperparameter tuning jobs launched in your account.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListHyperParameterTuningJobsOutcomeCallable ListHyperParameterTuningJobsCallable(const Model::ListHyperParameterTuningJobsRequest& request) const; /** *

Gets a list of HyperParameterTuningJobSummary objects that describe * the hyperparameter tuning jobs launched in your account.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListHyperParameterTuningJobsAsync(const Model::ListHyperParameterTuningJobsRequest& request, const ListHyperParameterTuningJobsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Gets a list of labeling jobs.

See Also:

AWS * API Reference

*/ virtual Model::ListLabelingJobsOutcome ListLabelingJobs(const Model::ListLabelingJobsRequest& request) const; /** *

Gets a list of labeling jobs.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListLabelingJobsOutcomeCallable ListLabelingJobsCallable(const Model::ListLabelingJobsRequest& request) const; /** *

Gets a list of labeling jobs.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListLabelingJobsAsync(const Model::ListLabelingJobsRequest& request, const ListLabelingJobsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Gets a list of labeling jobs assigned to a specified work team.

See * Also:

AWS * API Reference

*/ virtual Model::ListLabelingJobsForWorkteamOutcome ListLabelingJobsForWorkteam(const Model::ListLabelingJobsForWorkteamRequest& request) const; /** *

Gets a list of labeling jobs assigned to a specified work team.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListLabelingJobsForWorkteamOutcomeCallable ListLabelingJobsForWorkteamCallable(const Model::ListLabelingJobsForWorkteamRequest& request) const; /** *

Gets a list of labeling jobs assigned to a specified work team.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListLabelingJobsForWorkteamAsync(const Model::ListLabelingJobsForWorkteamRequest& request, const ListLabelingJobsForWorkteamResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Lists the model packages that have been created.

See Also:

* AWS * API Reference

*/ virtual Model::ListModelPackagesOutcome ListModelPackages(const Model::ListModelPackagesRequest& request) const; /** *

Lists the model packages that have been created.

See Also:

* AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListModelPackagesOutcomeCallable ListModelPackagesCallable(const Model::ListModelPackagesRequest& request) const; /** *

Lists the model packages that have been created.

See Also:

* AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListModelPackagesAsync(const Model::ListModelPackagesRequest& request, const ListModelPackagesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Lists models created with the CreateModel API.

See * Also:

AWS * API Reference

*/ virtual Model::ListModelsOutcome ListModels(const Model::ListModelsRequest& request) const; /** *

Lists models created with the CreateModel API.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListModelsOutcomeCallable ListModelsCallable(const Model::ListModelsRequest& request) const; /** *

Lists models created with the CreateModel API.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListModelsAsync(const Model::ListModelsRequest& request, const ListModelsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns list of all monitoring job executions.

See Also:

AWS * API Reference

*/ virtual Model::ListMonitoringExecutionsOutcome ListMonitoringExecutions(const Model::ListMonitoringExecutionsRequest& request) const; /** *

Returns list of all monitoring job executions.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListMonitoringExecutionsOutcomeCallable ListMonitoringExecutionsCallable(const Model::ListMonitoringExecutionsRequest& request) const; /** *

Returns list of all monitoring job executions.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListMonitoringExecutionsAsync(const Model::ListMonitoringExecutionsRequest& request, const ListMonitoringExecutionsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns list of all monitoring schedules.

See Also:

AWS * API Reference

*/ virtual Model::ListMonitoringSchedulesOutcome ListMonitoringSchedules(const Model::ListMonitoringSchedulesRequest& request) const; /** *

Returns list of all monitoring schedules.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListMonitoringSchedulesOutcomeCallable ListMonitoringSchedulesCallable(const Model::ListMonitoringSchedulesRequest& request) const; /** *

Returns list of all monitoring schedules.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListMonitoringSchedulesAsync(const Model::ListMonitoringSchedulesRequest& request, const ListMonitoringSchedulesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Lists notebook instance lifestyle configurations created with the * CreateNotebookInstanceLifecycleConfig API.

See Also:

AWS * API Reference

*/ virtual Model::ListNotebookInstanceLifecycleConfigsOutcome ListNotebookInstanceLifecycleConfigs(const Model::ListNotebookInstanceLifecycleConfigsRequest& request) const; /** *

Lists notebook instance lifestyle configurations created with the * CreateNotebookInstanceLifecycleConfig API.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListNotebookInstanceLifecycleConfigsOutcomeCallable ListNotebookInstanceLifecycleConfigsCallable(const Model::ListNotebookInstanceLifecycleConfigsRequest& request) const; /** *

Lists notebook instance lifestyle configurations created with the * CreateNotebookInstanceLifecycleConfig API.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListNotebookInstanceLifecycleConfigsAsync(const Model::ListNotebookInstanceLifecycleConfigsRequest& request, const ListNotebookInstanceLifecycleConfigsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns a list of the Amazon SageMaker notebook instances in the requester's * account in an AWS Region.

See Also:

AWS * API Reference

*/ virtual Model::ListNotebookInstancesOutcome ListNotebookInstances(const Model::ListNotebookInstancesRequest& request) const; /** *

Returns a list of the Amazon SageMaker notebook instances in the requester's * account in an AWS Region.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListNotebookInstancesOutcomeCallable ListNotebookInstancesCallable(const Model::ListNotebookInstancesRequest& request) const; /** *

Returns a list of the Amazon SageMaker notebook instances in the requester's * account in an AWS Region.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListNotebookInstancesAsync(const Model::ListNotebookInstancesRequest& request, const ListNotebookInstancesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Lists processing jobs that satisfy various filters.

See Also:

* AWS * API Reference

*/ virtual Model::ListProcessingJobsOutcome ListProcessingJobs(const Model::ListProcessingJobsRequest& request) const; /** *

Lists processing jobs that satisfy various filters.

See Also:

* AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListProcessingJobsOutcomeCallable ListProcessingJobsCallable(const Model::ListProcessingJobsRequest& request) const; /** *

Lists processing jobs that satisfy various filters.

See Also:

* AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListProcessingJobsAsync(const Model::ListProcessingJobsRequest& request, const ListProcessingJobsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Gets a list of the work teams that you are subscribed to in the AWS * Marketplace. The list may be empty if no work team satisfies the filter * specified in the NameContains parameter.

See Also:

* AWS * API Reference

*/ virtual Model::ListSubscribedWorkteamsOutcome ListSubscribedWorkteams(const Model::ListSubscribedWorkteamsRequest& request) const; /** *

Gets a list of the work teams that you are subscribed to in the AWS * Marketplace. The list may be empty if no work team satisfies the filter * specified in the NameContains parameter.

See Also:

* AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListSubscribedWorkteamsOutcomeCallable ListSubscribedWorkteamsCallable(const Model::ListSubscribedWorkteamsRequest& request) const; /** *

Gets a list of the work teams that you are subscribed to in the AWS * Marketplace. The list may be empty if no work team satisfies the filter * specified in the NameContains parameter.

See Also:

* AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListSubscribedWorkteamsAsync(const Model::ListSubscribedWorkteamsRequest& request, const ListSubscribedWorkteamsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns the tags for the specified Amazon SageMaker resource.

See * Also:

AWS * API Reference

*/ virtual Model::ListTagsOutcome ListTags(const Model::ListTagsRequest& request) const; /** *

Returns the tags for the specified Amazon SageMaker resource.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListTagsOutcomeCallable ListTagsCallable(const Model::ListTagsRequest& request) const; /** *

Returns the tags for the specified Amazon SageMaker resource.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListTagsAsync(const Model::ListTagsRequest& request, const ListTagsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Lists training jobs.

See Also:

AWS * API Reference

*/ virtual Model::ListTrainingJobsOutcome ListTrainingJobs(const Model::ListTrainingJobsRequest& request) const; /** *

Lists training jobs.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListTrainingJobsOutcomeCallable ListTrainingJobsCallable(const Model::ListTrainingJobsRequest& request) const; /** *

Lists training jobs.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListTrainingJobsAsync(const Model::ListTrainingJobsRequest& request, const ListTrainingJobsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Gets a list of TrainingJobSummary objects that describe the training * jobs that a hyperparameter tuning job launched.

See Also:

AWS * API Reference

*/ virtual Model::ListTrainingJobsForHyperParameterTuningJobOutcome ListTrainingJobsForHyperParameterTuningJob(const Model::ListTrainingJobsForHyperParameterTuningJobRequest& request) const; /** *

Gets a list of TrainingJobSummary objects that describe the training * jobs that a hyperparameter tuning job launched.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListTrainingJobsForHyperParameterTuningJobOutcomeCallable ListTrainingJobsForHyperParameterTuningJobCallable(const Model::ListTrainingJobsForHyperParameterTuningJobRequest& request) const; /** *

Gets a list of TrainingJobSummary objects that describe the training * jobs that a hyperparameter tuning job launched.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListTrainingJobsForHyperParameterTuningJobAsync(const Model::ListTrainingJobsForHyperParameterTuningJobRequest& request, const ListTrainingJobsForHyperParameterTuningJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Lists transform jobs.

See Also:

AWS * API Reference

*/ virtual Model::ListTransformJobsOutcome ListTransformJobs(const Model::ListTransformJobsRequest& request) const; /** *

Lists transform jobs.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListTransformJobsOutcomeCallable ListTransformJobsCallable(const Model::ListTransformJobsRequest& request) const; /** *

Lists transform jobs.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListTransformJobsAsync(const Model::ListTransformJobsRequest& request, const ListTransformJobsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Lists the trial components in your account. You can sort the list by trial * component name or creation time. You can filter the list to show only components * that were created in a specific time range. You can also filter on one of the * following:

  • ExperimentName

  • * SourceArn

  • TrialName

  • *

See Also:

AWS * API Reference

*/ virtual Model::ListTrialComponentsOutcome ListTrialComponents(const Model::ListTrialComponentsRequest& request) const; /** *

Lists the trial components in your account. You can sort the list by trial * component name or creation time. You can filter the list to show only components * that were created in a specific time range. You can also filter on one of the * following:

  • ExperimentName

  • * SourceArn

  • TrialName

  • *

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListTrialComponentsOutcomeCallable ListTrialComponentsCallable(const Model::ListTrialComponentsRequest& request) const; /** *

Lists the trial components in your account. You can sort the list by trial * component name or creation time. You can filter the list to show only components * that were created in a specific time range. You can also filter on one of the * following:

  • ExperimentName

  • * SourceArn

  • TrialName

  • *

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListTrialComponentsAsync(const Model::ListTrialComponentsRequest& request, const ListTrialComponentsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Lists the trials in your account. Specify an experiment name to limit the * list to the trials that are part of that experiment. Specify a trial component * name to limit the list to the trials that associated with that trial component. * The list can be filtered to show only trials that were created in a specific * time range. The list can be sorted by trial name or creation time.

See * Also:

AWS * API Reference

*/ virtual Model::ListTrialsOutcome ListTrials(const Model::ListTrialsRequest& request) const; /** *

Lists the trials in your account. Specify an experiment name to limit the * list to the trials that are part of that experiment. Specify a trial component * name to limit the list to the trials that associated with that trial component. * The list can be filtered to show only trials that were created in a specific * time range. The list can be sorted by trial name or creation time.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListTrialsOutcomeCallable ListTrialsCallable(const Model::ListTrialsRequest& request) const; /** *

Lists the trials in your account. Specify an experiment name to limit the * list to the trials that are part of that experiment. Specify a trial component * name to limit the list to the trials that associated with that trial component. * The list can be filtered to show only trials that were created in a specific * time range. The list can be sorted by trial name or creation time.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListTrialsAsync(const Model::ListTrialsRequest& request, const ListTrialsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Lists user profiles.

See Also:

AWS * API Reference

*/ virtual Model::ListUserProfilesOutcome ListUserProfiles(const Model::ListUserProfilesRequest& request) const; /** *

Lists user profiles.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListUserProfilesOutcomeCallable ListUserProfilesCallable(const Model::ListUserProfilesRequest& request) const; /** *

Lists user profiles.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListUserProfilesAsync(const Model::ListUserProfilesRequest& request, const ListUserProfilesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Use this operation to list all private and vendor workforces in an AWS * Region. Note that you can only have one private workforce per AWS * Region.

See Also:

AWS * API Reference

*/ virtual Model::ListWorkforcesOutcome ListWorkforces(const Model::ListWorkforcesRequest& request) const; /** *

Use this operation to list all private and vendor workforces in an AWS * Region. Note that you can only have one private workforce per AWS * Region.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListWorkforcesOutcomeCallable ListWorkforcesCallable(const Model::ListWorkforcesRequest& request) const; /** *

Use this operation to list all private and vendor workforces in an AWS * Region. Note that you can only have one private workforce per AWS * Region.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListWorkforcesAsync(const Model::ListWorkforcesRequest& request, const ListWorkforcesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Gets a list of private work teams that you have defined in a region. The list * may be empty if no work team satisfies the filter specified in the * NameContains parameter.

See Also:

AWS * API Reference

*/ virtual Model::ListWorkteamsOutcome ListWorkteams(const Model::ListWorkteamsRequest& request) const; /** *

Gets a list of private work teams that you have defined in a region. The list * may be empty if no work team satisfies the filter specified in the * NameContains parameter.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListWorkteamsOutcomeCallable ListWorkteamsCallable(const Model::ListWorkteamsRequest& request) const; /** *

Gets a list of private work teams that you have defined in a region. The list * may be empty if no work team satisfies the filter specified in the * NameContains parameter.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListWorkteamsAsync(const Model::ListWorkteamsRequest& request, const ListWorkteamsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Renders the UI template so that you can preview the worker's experience. *

See Also:

AWS * API Reference

*/ virtual Model::RenderUiTemplateOutcome RenderUiTemplate(const Model::RenderUiTemplateRequest& request) const; /** *

Renders the UI template so that you can preview the worker's experience. *

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::RenderUiTemplateOutcomeCallable RenderUiTemplateCallable(const Model::RenderUiTemplateRequest& request) const; /** *

Renders the UI template so that you can preview the worker's experience. *

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void RenderUiTemplateAsync(const Model::RenderUiTemplateRequest& request, const RenderUiTemplateResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Finds Amazon SageMaker resources that match a search query. Matching * resources are returned as a list of SearchRecord objects in the * response. You can sort the search results by any resource property in a * ascending or descending order.

You can query against the following value * types: numeric, text, Boolean, and timestamp.

See Also:

AWS * API Reference

*/ virtual Model::SearchOutcome Search(const Model::SearchRequest& request) const; /** *

Finds Amazon SageMaker resources that match a search query. Matching * resources are returned as a list of SearchRecord objects in the * response. You can sort the search results by any resource property in a * ascending or descending order.

You can query against the following value * types: numeric, text, Boolean, and timestamp.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::SearchOutcomeCallable SearchCallable(const Model::SearchRequest& request) const; /** *

Finds Amazon SageMaker resources that match a search query. Matching * resources are returned as a list of SearchRecord objects in the * response. You can sort the search results by any resource property in a * ascending or descending order.

You can query against the following value * types: numeric, text, Boolean, and timestamp.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void SearchAsync(const Model::SearchRequest& request, const SearchResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Starts a previously stopped monitoring schedule.

New monitoring * schedules are immediately started after creation.

See * Also:

AWS * API Reference

*/ virtual Model::StartMonitoringScheduleOutcome StartMonitoringSchedule(const Model::StartMonitoringScheduleRequest& request) const; /** *

Starts a previously stopped monitoring schedule.

New monitoring * schedules are immediately started after creation.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::StartMonitoringScheduleOutcomeCallable StartMonitoringScheduleCallable(const Model::StartMonitoringScheduleRequest& request) const; /** *

Starts a previously stopped monitoring schedule.

New monitoring * schedules are immediately started after creation.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void StartMonitoringScheduleAsync(const Model::StartMonitoringScheduleRequest& request, const StartMonitoringScheduleResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Launches an ML compute instance with the latest version of the libraries and * attaches your ML storage volume. After configuring the notebook instance, Amazon * SageMaker sets the notebook instance status to InService. A * notebook instance's status must be InService before you can connect * to your Jupyter notebook.

See Also:

AWS * API Reference

*/ virtual Model::StartNotebookInstanceOutcome StartNotebookInstance(const Model::StartNotebookInstanceRequest& request) const; /** *

Launches an ML compute instance with the latest version of the libraries and * attaches your ML storage volume. After configuring the notebook instance, Amazon * SageMaker sets the notebook instance status to InService. A * notebook instance's status must be InService before you can connect * to your Jupyter notebook.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::StartNotebookInstanceOutcomeCallable StartNotebookInstanceCallable(const Model::StartNotebookInstanceRequest& request) const; /** *

Launches an ML compute instance with the latest version of the libraries and * attaches your ML storage volume. After configuring the notebook instance, Amazon * SageMaker sets the notebook instance status to InService. A * notebook instance's status must be InService before you can connect * to your Jupyter notebook.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void StartNotebookInstanceAsync(const Model::StartNotebookInstanceRequest& request, const StartNotebookInstanceResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

A method for forcing the termination of a running job.

See * Also:

AWS * API Reference

*/ virtual Model::StopAutoMLJobOutcome StopAutoMLJob(const Model::StopAutoMLJobRequest& request) const; /** *

A method for forcing the termination of a running job.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::StopAutoMLJobOutcomeCallable StopAutoMLJobCallable(const Model::StopAutoMLJobRequest& request) const; /** *

A method for forcing the termination of a running job.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void StopAutoMLJobAsync(const Model::StopAutoMLJobRequest& request, const StopAutoMLJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Stops a model compilation job.

To stop a job, Amazon SageMaker sends * the algorithm the SIGTERM signal. This gracefully shuts the job down. If the job * hasn't stopped, it sends the SIGKILL signal.

When it receives a * StopCompilationJob request, Amazon SageMaker changes the * CompilationJobSummary$CompilationJobStatus of the job to * Stopping. After Amazon SageMaker stops the job, it sets the * CompilationJobSummary$CompilationJobStatus to Stopped. *

See Also:

AWS * API Reference

*/ virtual Model::StopCompilationJobOutcome StopCompilationJob(const Model::StopCompilationJobRequest& request) const; /** *

Stops a model compilation job.

To stop a job, Amazon SageMaker sends * the algorithm the SIGTERM signal. This gracefully shuts the job down. If the job * hasn't stopped, it sends the SIGKILL signal.

When it receives a * StopCompilationJob request, Amazon SageMaker changes the * CompilationJobSummary$CompilationJobStatus of the job to * Stopping. After Amazon SageMaker stops the job, it sets the * CompilationJobSummary$CompilationJobStatus to Stopped. *

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::StopCompilationJobOutcomeCallable StopCompilationJobCallable(const Model::StopCompilationJobRequest& request) const; /** *

Stops a model compilation job.

To stop a job, Amazon SageMaker sends * the algorithm the SIGTERM signal. This gracefully shuts the job down. If the job * hasn't stopped, it sends the SIGKILL signal.

When it receives a * StopCompilationJob request, Amazon SageMaker changes the * CompilationJobSummary$CompilationJobStatus of the job to * Stopping. After Amazon SageMaker stops the job, it sets the * CompilationJobSummary$CompilationJobStatus to Stopped. *

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void StopCompilationJobAsync(const Model::StopCompilationJobRequest& request, const StopCompilationJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Stops a running hyperparameter tuning job and all running training jobs that * the tuning job launched.

All model artifacts output from the training * jobs are stored in Amazon Simple Storage Service (Amazon S3). All data that the * training jobs write to Amazon CloudWatch Logs are still available in CloudWatch. * After the tuning job moves to the Stopped state, it releases all * reserved resources for the tuning job.

See Also:

AWS * API Reference

*/ virtual Model::StopHyperParameterTuningJobOutcome StopHyperParameterTuningJob(const Model::StopHyperParameterTuningJobRequest& request) const; /** *

Stops a running hyperparameter tuning job and all running training jobs that * the tuning job launched.

All model artifacts output from the training * jobs are stored in Amazon Simple Storage Service (Amazon S3). All data that the * training jobs write to Amazon CloudWatch Logs are still available in CloudWatch. * After the tuning job moves to the Stopped state, it releases all * reserved resources for the tuning job.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::StopHyperParameterTuningJobOutcomeCallable StopHyperParameterTuningJobCallable(const Model::StopHyperParameterTuningJobRequest& request) const; /** *

Stops a running hyperparameter tuning job and all running training jobs that * the tuning job launched.

All model artifacts output from the training * jobs are stored in Amazon Simple Storage Service (Amazon S3). All data that the * training jobs write to Amazon CloudWatch Logs are still available in CloudWatch. * After the tuning job moves to the Stopped state, it releases all * reserved resources for the tuning job.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void StopHyperParameterTuningJobAsync(const Model::StopHyperParameterTuningJobRequest& request, const StopHyperParameterTuningJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Stops a running labeling job. A job that is stopped cannot be restarted. Any * results obtained before the job is stopped are placed in the Amazon S3 output * bucket.

See Also:

AWS * API Reference

*/ virtual Model::StopLabelingJobOutcome StopLabelingJob(const Model::StopLabelingJobRequest& request) const; /** *

Stops a running labeling job. A job that is stopped cannot be restarted. Any * results obtained before the job is stopped are placed in the Amazon S3 output * bucket.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::StopLabelingJobOutcomeCallable StopLabelingJobCallable(const Model::StopLabelingJobRequest& request) const; /** *

Stops a running labeling job. A job that is stopped cannot be restarted. Any * results obtained before the job is stopped are placed in the Amazon S3 output * bucket.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void StopLabelingJobAsync(const Model::StopLabelingJobRequest& request, const StopLabelingJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Stops a previously started monitoring schedule.

See Also:

AWS * API Reference

*/ virtual Model::StopMonitoringScheduleOutcome StopMonitoringSchedule(const Model::StopMonitoringScheduleRequest& request) const; /** *

Stops a previously started monitoring schedule.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::StopMonitoringScheduleOutcomeCallable StopMonitoringScheduleCallable(const Model::StopMonitoringScheduleRequest& request) const; /** *

Stops a previously started monitoring schedule.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void StopMonitoringScheduleAsync(const Model::StopMonitoringScheduleRequest& request, const StopMonitoringScheduleResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Terminates the ML compute instance. Before terminating the instance, Amazon * SageMaker disconnects the ML storage volume from it. Amazon SageMaker preserves * the ML storage volume. Amazon SageMaker stops charging you for the ML compute * instance when you call StopNotebookInstance.

To access data * on the ML storage volume for a notebook instance that has been terminated, call * the StartNotebookInstance API. StartNotebookInstance * launches another ML compute instance, configures it, and attaches the preserved * ML storage volume so you can continue your work.

See Also:

AWS * API Reference

*/ virtual Model::StopNotebookInstanceOutcome StopNotebookInstance(const Model::StopNotebookInstanceRequest& request) const; /** *

Terminates the ML compute instance. Before terminating the instance, Amazon * SageMaker disconnects the ML storage volume from it. Amazon SageMaker preserves * the ML storage volume. Amazon SageMaker stops charging you for the ML compute * instance when you call StopNotebookInstance.

To access data * on the ML storage volume for a notebook instance that has been terminated, call * the StartNotebookInstance API. StartNotebookInstance * launches another ML compute instance, configures it, and attaches the preserved * ML storage volume so you can continue your work.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::StopNotebookInstanceOutcomeCallable StopNotebookInstanceCallable(const Model::StopNotebookInstanceRequest& request) const; /** *

Terminates the ML compute instance. Before terminating the instance, Amazon * SageMaker disconnects the ML storage volume from it. Amazon SageMaker preserves * the ML storage volume. Amazon SageMaker stops charging you for the ML compute * instance when you call StopNotebookInstance.

To access data * on the ML storage volume for a notebook instance that has been terminated, call * the StartNotebookInstance API. StartNotebookInstance * launches another ML compute instance, configures it, and attaches the preserved * ML storage volume so you can continue your work.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void StopNotebookInstanceAsync(const Model::StopNotebookInstanceRequest& request, const StopNotebookInstanceResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Stops a processing job.

See Also:

AWS * API Reference

*/ virtual Model::StopProcessingJobOutcome StopProcessingJob(const Model::StopProcessingJobRequest& request) const; /** *

Stops a processing job.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::StopProcessingJobOutcomeCallable StopProcessingJobCallable(const Model::StopProcessingJobRequest& request) const; /** *

Stops a processing job.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void StopProcessingJobAsync(const Model::StopProcessingJobRequest& request, const StopProcessingJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Stops a training job. To stop a job, Amazon SageMaker sends the algorithm the * SIGTERM signal, which delays job termination for 120 seconds. * Algorithms might use this 120-second window to save the model artifacts, so the * results of the training is not lost.

When it receives a * StopTrainingJob request, Amazon SageMaker changes the status of the * job to Stopping. After Amazon SageMaker stops the job, it sets the * status to Stopped.

See Also:

AWS * API Reference

*/ virtual Model::StopTrainingJobOutcome StopTrainingJob(const Model::StopTrainingJobRequest& request) const; /** *

Stops a training job. To stop a job, Amazon SageMaker sends the algorithm the * SIGTERM signal, which delays job termination for 120 seconds. * Algorithms might use this 120-second window to save the model artifacts, so the * results of the training is not lost.

When it receives a * StopTrainingJob request, Amazon SageMaker changes the status of the * job to Stopping. After Amazon SageMaker stops the job, it sets the * status to Stopped.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::StopTrainingJobOutcomeCallable StopTrainingJobCallable(const Model::StopTrainingJobRequest& request) const; /** *

Stops a training job. To stop a job, Amazon SageMaker sends the algorithm the * SIGTERM signal, which delays job termination for 120 seconds. * Algorithms might use this 120-second window to save the model artifacts, so the * results of the training is not lost.

When it receives a * StopTrainingJob request, Amazon SageMaker changes the status of the * job to Stopping. After Amazon SageMaker stops the job, it sets the * status to Stopped.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void StopTrainingJobAsync(const Model::StopTrainingJobRequest& request, const StopTrainingJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Stops a transform job.

When Amazon SageMaker receives a * StopTransformJob request, the status of the job changes to * Stopping. After Amazon SageMaker stops the job, the status is set * to Stopped. When you stop a transform job before it is completed, * Amazon SageMaker doesn't store the job's output in Amazon S3.

See * Also:

AWS * API Reference

*/ virtual Model::StopTransformJobOutcome StopTransformJob(const Model::StopTransformJobRequest& request) const; /** *

Stops a transform job.

When Amazon SageMaker receives a * StopTransformJob request, the status of the job changes to * Stopping. After Amazon SageMaker stops the job, the status is set * to Stopped. When you stop a transform job before it is completed, * Amazon SageMaker doesn't store the job's output in Amazon S3.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::StopTransformJobOutcomeCallable StopTransformJobCallable(const Model::StopTransformJobRequest& request) const; /** *

Stops a transform job.

When Amazon SageMaker receives a * StopTransformJob request, the status of the job changes to * Stopping. After Amazon SageMaker stops the job, the status is set * to Stopped. When you stop a transform job before it is completed, * Amazon SageMaker doesn't store the job's output in Amazon S3.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void StopTransformJobAsync(const Model::StopTransformJobRequest& request, const StopTransformJobResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Updates the specified Git repository with the specified values.

See * Also:

AWS * API Reference

*/ virtual Model::UpdateCodeRepositoryOutcome UpdateCodeRepository(const Model::UpdateCodeRepositoryRequest& request) const; /** *

Updates the specified Git repository with the specified values.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::UpdateCodeRepositoryOutcomeCallable UpdateCodeRepositoryCallable(const Model::UpdateCodeRepositoryRequest& request) const; /** *

Updates the specified Git repository with the specified values.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void UpdateCodeRepositoryAsync(const Model::UpdateCodeRepositoryRequest& request, const UpdateCodeRepositoryResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Updates the default settings for new user profiles in the * domain.

See Also:

AWS * API Reference

*/ virtual Model::UpdateDomainOutcome UpdateDomain(const Model::UpdateDomainRequest& request) const; /** *

Updates the default settings for new user profiles in the * domain.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::UpdateDomainOutcomeCallable UpdateDomainCallable(const Model::UpdateDomainRequest& request) const; /** *

Updates the default settings for new user profiles in the * domain.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void UpdateDomainAsync(const Model::UpdateDomainRequest& request, const UpdateDomainResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deploys the new EndpointConfig specified in the request, * switches to using newly created endpoint, and then deletes resources provisioned * for the endpoint using the previous EndpointConfig (there is no * availability loss).

When Amazon SageMaker receives the request, it sets * the endpoint status to Updating. After updating the endpoint, it * sets the status to InService. To check the status of an endpoint, * use the DescribeEndpoint API.

You must not delete an * EndpointConfig in use by an endpoint that is live or while the * UpdateEndpoint or CreateEndpoint operations are being * performed on the endpoint. To update an endpoint, you must create a new * EndpointConfig.

If you delete the * EndpointConfig of an endpoint that is active or being created or * updated you may lose visibility into the instance type the endpoint is using. * The endpoint must be deleted in order to stop incurring charges.

*

See Also:

AWS * API Reference

*/ virtual Model::UpdateEndpointOutcome UpdateEndpoint(const Model::UpdateEndpointRequest& request) const; /** *

Deploys the new EndpointConfig specified in the request, * switches to using newly created endpoint, and then deletes resources provisioned * for the endpoint using the previous EndpointConfig (there is no * availability loss).

When Amazon SageMaker receives the request, it sets * the endpoint status to Updating. After updating the endpoint, it * sets the status to InService. To check the status of an endpoint, * use the DescribeEndpoint API.

You must not delete an * EndpointConfig in use by an endpoint that is live or while the * UpdateEndpoint or CreateEndpoint operations are being * performed on the endpoint. To update an endpoint, you must create a new * EndpointConfig.

If you delete the * EndpointConfig of an endpoint that is active or being created or * updated you may lose visibility into the instance type the endpoint is using. * The endpoint must be deleted in order to stop incurring charges.

*

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::UpdateEndpointOutcomeCallable UpdateEndpointCallable(const Model::UpdateEndpointRequest& request) const; /** *

Deploys the new EndpointConfig specified in the request, * switches to using newly created endpoint, and then deletes resources provisioned * for the endpoint using the previous EndpointConfig (there is no * availability loss).

When Amazon SageMaker receives the request, it sets * the endpoint status to Updating. After updating the endpoint, it * sets the status to InService. To check the status of an endpoint, * use the DescribeEndpoint API.

You must not delete an * EndpointConfig in use by an endpoint that is live or while the * UpdateEndpoint or CreateEndpoint operations are being * performed on the endpoint. To update an endpoint, you must create a new * EndpointConfig.

If you delete the * EndpointConfig of an endpoint that is active or being created or * updated you may lose visibility into the instance type the endpoint is using. * The endpoint must be deleted in order to stop incurring charges.

*

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void UpdateEndpointAsync(const Model::UpdateEndpointRequest& request, const UpdateEndpointResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Updates variant weight of one or more variants associated with an existing * endpoint, or capacity of one variant associated with an existing endpoint. When * it receives the request, Amazon SageMaker sets the endpoint status to * Updating. After updating the endpoint, it sets the status to * InService. To check the status of an endpoint, use the * DescribeEndpoint API.

See Also:

AWS * API Reference

*/ virtual Model::UpdateEndpointWeightsAndCapacitiesOutcome UpdateEndpointWeightsAndCapacities(const Model::UpdateEndpointWeightsAndCapacitiesRequest& request) const; /** *

Updates variant weight of one or more variants associated with an existing * endpoint, or capacity of one variant associated with an existing endpoint. When * it receives the request, Amazon SageMaker sets the endpoint status to * Updating. After updating the endpoint, it sets the status to * InService. To check the status of an endpoint, use the * DescribeEndpoint API.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::UpdateEndpointWeightsAndCapacitiesOutcomeCallable UpdateEndpointWeightsAndCapacitiesCallable(const Model::UpdateEndpointWeightsAndCapacitiesRequest& request) const; /** *

Updates variant weight of one or more variants associated with an existing * endpoint, or capacity of one variant associated with an existing endpoint. When * it receives the request, Amazon SageMaker sets the endpoint status to * Updating. After updating the endpoint, it sets the status to * InService. To check the status of an endpoint, use the * DescribeEndpoint API.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void UpdateEndpointWeightsAndCapacitiesAsync(const Model::UpdateEndpointWeightsAndCapacitiesRequest& request, const UpdateEndpointWeightsAndCapacitiesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Adds, updates, or removes the description of an experiment. Updates the * display name of an experiment.

See Also:

AWS * API Reference

*/ virtual Model::UpdateExperimentOutcome UpdateExperiment(const Model::UpdateExperimentRequest& request) const; /** *

Adds, updates, or removes the description of an experiment. Updates the * display name of an experiment.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::UpdateExperimentOutcomeCallable UpdateExperimentCallable(const Model::UpdateExperimentRequest& request) const; /** *

Adds, updates, or removes the description of an experiment. Updates the * display name of an experiment.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void UpdateExperimentAsync(const Model::UpdateExperimentRequest& request, const UpdateExperimentResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Updates a previously created schedule.

See Also:

AWS * API Reference

*/ virtual Model::UpdateMonitoringScheduleOutcome UpdateMonitoringSchedule(const Model::UpdateMonitoringScheduleRequest& request) const; /** *

Updates a previously created schedule.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::UpdateMonitoringScheduleOutcomeCallable UpdateMonitoringScheduleCallable(const Model::UpdateMonitoringScheduleRequest& request) const; /** *

Updates a previously created schedule.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void UpdateMonitoringScheduleAsync(const Model::UpdateMonitoringScheduleRequest& request, const UpdateMonitoringScheduleResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Updates a notebook instance. NotebookInstance updates include upgrading or * downgrading the ML compute instance used for your notebook instance to * accommodate changes in your workload requirements.

See Also:

AWS * API Reference

*/ virtual Model::UpdateNotebookInstanceOutcome UpdateNotebookInstance(const Model::UpdateNotebookInstanceRequest& request) const; /** *

Updates a notebook instance. NotebookInstance updates include upgrading or * downgrading the ML compute instance used for your notebook instance to * accommodate changes in your workload requirements.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::UpdateNotebookInstanceOutcomeCallable UpdateNotebookInstanceCallable(const Model::UpdateNotebookInstanceRequest& request) const; /** *

Updates a notebook instance. NotebookInstance updates include upgrading or * downgrading the ML compute instance used for your notebook instance to * accommodate changes in your workload requirements.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void UpdateNotebookInstanceAsync(const Model::UpdateNotebookInstanceRequest& request, const UpdateNotebookInstanceResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Updates a notebook instance lifecycle configuration created with the * CreateNotebookInstanceLifecycleConfig API.

See Also:

AWS * API Reference

*/ virtual Model::UpdateNotebookInstanceLifecycleConfigOutcome UpdateNotebookInstanceLifecycleConfig(const Model::UpdateNotebookInstanceLifecycleConfigRequest& request) const; /** *

Updates a notebook instance lifecycle configuration created with the * CreateNotebookInstanceLifecycleConfig API.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::UpdateNotebookInstanceLifecycleConfigOutcomeCallable UpdateNotebookInstanceLifecycleConfigCallable(const Model::UpdateNotebookInstanceLifecycleConfigRequest& request) const; /** *

Updates a notebook instance lifecycle configuration created with the * CreateNotebookInstanceLifecycleConfig API.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void UpdateNotebookInstanceLifecycleConfigAsync(const Model::UpdateNotebookInstanceLifecycleConfigRequest& request, const UpdateNotebookInstanceLifecycleConfigResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Updates the display name of a trial.

See Also:

AWS * API Reference

*/ virtual Model::UpdateTrialOutcome UpdateTrial(const Model::UpdateTrialRequest& request) const; /** *

Updates the display name of a trial.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::UpdateTrialOutcomeCallable UpdateTrialCallable(const Model::UpdateTrialRequest& request) const; /** *

Updates the display name of a trial.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void UpdateTrialAsync(const Model::UpdateTrialRequest& request, const UpdateTrialResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Updates one or more properties of a trial component.

See Also:

* AWS * API Reference

*/ virtual Model::UpdateTrialComponentOutcome UpdateTrialComponent(const Model::UpdateTrialComponentRequest& request) const; /** *

Updates one or more properties of a trial component.

See Also:

* AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::UpdateTrialComponentOutcomeCallable UpdateTrialComponentCallable(const Model::UpdateTrialComponentRequest& request) const; /** *

Updates one or more properties of a trial component.

See Also:

* AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void UpdateTrialComponentAsync(const Model::UpdateTrialComponentRequest& request, const UpdateTrialComponentResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Updates a user profile.

See Also:

AWS * API Reference

*/ virtual Model::UpdateUserProfileOutcome UpdateUserProfile(const Model::UpdateUserProfileRequest& request) const; /** *

Updates a user profile.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::UpdateUserProfileOutcomeCallable UpdateUserProfileCallable(const Model::UpdateUserProfileRequest& request) const; /** *

Updates a user profile.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void UpdateUserProfileAsync(const Model::UpdateUserProfileRequest& request, const UpdateUserProfileResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Use this operation to update your workforce. You can use this operation to * require that workers use specific IP addresses to work on tasks and to update * your OpenID Connect (OIDC) Identity Provider (IdP) workforce configuration.

*

Use SourceIpConfig to restrict worker access to tasks to a * specific range of IP addresses. You specify allowed IP addresses by creating a * list of up to ten CIDRs. * By default, a workforce isn't restricted to specific IP addresses. If you * specify a range of IP addresses, workers who attempt to access tasks using any * IP address outside the specified range are denied and get a Not * Found error message on the worker portal.

Use * OidcConfig to update the configuration of a workforce created using * your own OIDC IdP.

You can only update your OIDC IdP * configuration when there are no work teams associated with your workforce. You * can delete work teams using the operation.

After restricting * access to a range of IP addresses or updating your OIDC IdP configuration with * this operation, you can view details about your update workforce using the * operation.

This operation only applies to private * workforces.

See Also:

AWS * API Reference

*/ virtual Model::UpdateWorkforceOutcome UpdateWorkforce(const Model::UpdateWorkforceRequest& request) const; /** *

Use this operation to update your workforce. You can use this operation to * require that workers use specific IP addresses to work on tasks and to update * your OpenID Connect (OIDC) Identity Provider (IdP) workforce configuration.

*

Use SourceIpConfig to restrict worker access to tasks to a * specific range of IP addresses. You specify allowed IP addresses by creating a * list of up to ten CIDRs. * By default, a workforce isn't restricted to specific IP addresses. If you * specify a range of IP addresses, workers who attempt to access tasks using any * IP address outside the specified range are denied and get a Not * Found error message on the worker portal.

Use * OidcConfig to update the configuration of a workforce created using * your own OIDC IdP.

You can only update your OIDC IdP * configuration when there are no work teams associated with your workforce. You * can delete work teams using the operation.

After restricting * access to a range of IP addresses or updating your OIDC IdP configuration with * this operation, you can view details about your update workforce using the * operation.

This operation only applies to private * workforces.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::UpdateWorkforceOutcomeCallable UpdateWorkforceCallable(const Model::UpdateWorkforceRequest& request) const; /** *

Use this operation to update your workforce. You can use this operation to * require that workers use specific IP addresses to work on tasks and to update * your OpenID Connect (OIDC) Identity Provider (IdP) workforce configuration.

*

Use SourceIpConfig to restrict worker access to tasks to a * specific range of IP addresses. You specify allowed IP addresses by creating a * list of up to ten CIDRs. * By default, a workforce isn't restricted to specific IP addresses. If you * specify a range of IP addresses, workers who attempt to access tasks using any * IP address outside the specified range are denied and get a Not * Found error message on the worker portal.

Use * OidcConfig to update the configuration of a workforce created using * your own OIDC IdP.

You can only update your OIDC IdP * configuration when there are no work teams associated with your workforce. You * can delete work teams using the operation.

After restricting * access to a range of IP addresses or updating your OIDC IdP configuration with * this operation, you can view details about your update workforce using the * operation.

This operation only applies to private * workforces.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void UpdateWorkforceAsync(const Model::UpdateWorkforceRequest& request, const UpdateWorkforceResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Updates an existing work team with new member definitions or * description.

See Also:

AWS * API Reference

*/ virtual Model::UpdateWorkteamOutcome UpdateWorkteam(const Model::UpdateWorkteamRequest& request) const; /** *

Updates an existing work team with new member definitions or * description.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::UpdateWorkteamOutcomeCallable UpdateWorkteamCallable(const Model::UpdateWorkteamRequest& request) const; /** *

Updates an existing work team with new member definitions or * description.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void UpdateWorkteamAsync(const Model::UpdateWorkteamRequest& request, const UpdateWorkteamResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; void OverrideEndpoint(const Aws::String& endpoint); private: void init(const Aws::Client::ClientConfiguration& clientConfiguration); void AddTagsAsyncHelper(const Model::AddTagsRequest& request, const AddTagsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void AssociateTrialComponentAsyncHelper(const Model::AssociateTrialComponentRequest& request, const AssociateTrialComponentResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateAlgorithmAsyncHelper(const Model::CreateAlgorithmRequest& request, const CreateAlgorithmResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateAppAsyncHelper(const Model::CreateAppRequest& request, const CreateAppResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateAutoMLJobAsyncHelper(const Model::CreateAutoMLJobRequest& request, const CreateAutoMLJobResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateCodeRepositoryAsyncHelper(const Model::CreateCodeRepositoryRequest& request, const CreateCodeRepositoryResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateCompilationJobAsyncHelper(const Model::CreateCompilationJobRequest& request, const CreateCompilationJobResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateDomainAsyncHelper(const Model::CreateDomainRequest& request, const CreateDomainResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateEndpointAsyncHelper(const Model::CreateEndpointRequest& request, const CreateEndpointResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateEndpointConfigAsyncHelper(const Model::CreateEndpointConfigRequest& request, const CreateEndpointConfigResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateExperimentAsyncHelper(const Model::CreateExperimentRequest& request, const CreateExperimentResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateFlowDefinitionAsyncHelper(const Model::CreateFlowDefinitionRequest& request, const CreateFlowDefinitionResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateHumanTaskUiAsyncHelper(const Model::CreateHumanTaskUiRequest& request, const CreateHumanTaskUiResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateHyperParameterTuningJobAsyncHelper(const Model::CreateHyperParameterTuningJobRequest& request, const CreateHyperParameterTuningJobResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateLabelingJobAsyncHelper(const Model::CreateLabelingJobRequest& request, const CreateLabelingJobResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateModelAsyncHelper(const Model::CreateModelRequest& request, const CreateModelResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateModelPackageAsyncHelper(const Model::CreateModelPackageRequest& request, const CreateModelPackageResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateMonitoringScheduleAsyncHelper(const Model::CreateMonitoringScheduleRequest& request, const CreateMonitoringScheduleResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateNotebookInstanceAsyncHelper(const Model::CreateNotebookInstanceRequest& request, const CreateNotebookInstanceResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateNotebookInstanceLifecycleConfigAsyncHelper(const Model::CreateNotebookInstanceLifecycleConfigRequest& request, const CreateNotebookInstanceLifecycleConfigResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreatePresignedDomainUrlAsyncHelper(const Model::CreatePresignedDomainUrlRequest& request, const CreatePresignedDomainUrlResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreatePresignedNotebookInstanceUrlAsyncHelper(const Model::CreatePresignedNotebookInstanceUrlRequest& request, const CreatePresignedNotebookInstanceUrlResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateProcessingJobAsyncHelper(const Model::CreateProcessingJobRequest& request, const CreateProcessingJobResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateTrainingJobAsyncHelper(const Model::CreateTrainingJobRequest& request, const CreateTrainingJobResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateTransformJobAsyncHelper(const Model::CreateTransformJobRequest& request, const CreateTransformJobResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateTrialAsyncHelper(const Model::CreateTrialRequest& request, const CreateTrialResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateTrialComponentAsyncHelper(const Model::CreateTrialComponentRequest& request, const CreateTrialComponentResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateUserProfileAsyncHelper(const Model::CreateUserProfileRequest& request, const CreateUserProfileResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateWorkforceAsyncHelper(const Model::CreateWorkforceRequest& request, const CreateWorkforceResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateWorkteamAsyncHelper(const Model::CreateWorkteamRequest& request, const CreateWorkteamResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteAlgorithmAsyncHelper(const Model::DeleteAlgorithmRequest& request, const DeleteAlgorithmResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteAppAsyncHelper(const Model::DeleteAppRequest& request, const DeleteAppResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteCodeRepositoryAsyncHelper(const Model::DeleteCodeRepositoryRequest& request, const DeleteCodeRepositoryResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteDomainAsyncHelper(const Model::DeleteDomainRequest& request, const DeleteDomainResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteEndpointAsyncHelper(const Model::DeleteEndpointRequest& request, const DeleteEndpointResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteEndpointConfigAsyncHelper(const Model::DeleteEndpointConfigRequest& request, const DeleteEndpointConfigResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteExperimentAsyncHelper(const Model::DeleteExperimentRequest& request, const DeleteExperimentResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteFlowDefinitionAsyncHelper(const Model::DeleteFlowDefinitionRequest& request, const DeleteFlowDefinitionResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteHumanTaskUiAsyncHelper(const Model::DeleteHumanTaskUiRequest& request, const DeleteHumanTaskUiResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteModelAsyncHelper(const Model::DeleteModelRequest& request, const DeleteModelResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteModelPackageAsyncHelper(const Model::DeleteModelPackageRequest& request, const DeleteModelPackageResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteMonitoringScheduleAsyncHelper(const Model::DeleteMonitoringScheduleRequest& request, const DeleteMonitoringScheduleResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteNotebookInstanceAsyncHelper(const Model::DeleteNotebookInstanceRequest& request, const DeleteNotebookInstanceResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteNotebookInstanceLifecycleConfigAsyncHelper(const Model::DeleteNotebookInstanceLifecycleConfigRequest& request, const DeleteNotebookInstanceLifecycleConfigResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteTagsAsyncHelper(const Model::DeleteTagsRequest& request, const DeleteTagsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteTrialAsyncHelper(const Model::DeleteTrialRequest& request, const DeleteTrialResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteTrialComponentAsyncHelper(const Model::DeleteTrialComponentRequest& request, const DeleteTrialComponentResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteUserProfileAsyncHelper(const Model::DeleteUserProfileRequest& request, const DeleteUserProfileResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteWorkforceAsyncHelper(const Model::DeleteWorkforceRequest& request, const DeleteWorkforceResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteWorkteamAsyncHelper(const Model::DeleteWorkteamRequest& request, const DeleteWorkteamResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeAlgorithmAsyncHelper(const Model::DescribeAlgorithmRequest& request, const DescribeAlgorithmResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeAppAsyncHelper(const Model::DescribeAppRequest& request, const DescribeAppResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeAutoMLJobAsyncHelper(const Model::DescribeAutoMLJobRequest& request, const DescribeAutoMLJobResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeCodeRepositoryAsyncHelper(const Model::DescribeCodeRepositoryRequest& request, const DescribeCodeRepositoryResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeCompilationJobAsyncHelper(const Model::DescribeCompilationJobRequest& request, const DescribeCompilationJobResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeDomainAsyncHelper(const Model::DescribeDomainRequest& request, const DescribeDomainResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeEndpointAsyncHelper(const Model::DescribeEndpointRequest& request, const DescribeEndpointResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeEndpointConfigAsyncHelper(const Model::DescribeEndpointConfigRequest& request, const DescribeEndpointConfigResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeExperimentAsyncHelper(const Model::DescribeExperimentRequest& request, const DescribeExperimentResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeFlowDefinitionAsyncHelper(const Model::DescribeFlowDefinitionRequest& request, const DescribeFlowDefinitionResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeHumanTaskUiAsyncHelper(const Model::DescribeHumanTaskUiRequest& request, const DescribeHumanTaskUiResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeHyperParameterTuningJobAsyncHelper(const Model::DescribeHyperParameterTuningJobRequest& request, const DescribeHyperParameterTuningJobResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeLabelingJobAsyncHelper(const Model::DescribeLabelingJobRequest& request, const DescribeLabelingJobResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeModelAsyncHelper(const Model::DescribeModelRequest& request, const DescribeModelResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeModelPackageAsyncHelper(const Model::DescribeModelPackageRequest& request, const DescribeModelPackageResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeMonitoringScheduleAsyncHelper(const Model::DescribeMonitoringScheduleRequest& request, const DescribeMonitoringScheduleResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeNotebookInstanceAsyncHelper(const Model::DescribeNotebookInstanceRequest& request, const DescribeNotebookInstanceResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeNotebookInstanceLifecycleConfigAsyncHelper(const Model::DescribeNotebookInstanceLifecycleConfigRequest& request, const DescribeNotebookInstanceLifecycleConfigResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeProcessingJobAsyncHelper(const Model::DescribeProcessingJobRequest& request, const DescribeProcessingJobResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeSubscribedWorkteamAsyncHelper(const Model::DescribeSubscribedWorkteamRequest& request, const DescribeSubscribedWorkteamResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeTrainingJobAsyncHelper(const Model::DescribeTrainingJobRequest& request, const DescribeTrainingJobResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeTransformJobAsyncHelper(const Model::DescribeTransformJobRequest& request, const DescribeTransformJobResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeTrialAsyncHelper(const Model::DescribeTrialRequest& request, const DescribeTrialResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeTrialComponentAsyncHelper(const Model::DescribeTrialComponentRequest& request, const DescribeTrialComponentResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeUserProfileAsyncHelper(const Model::DescribeUserProfileRequest& request, const DescribeUserProfileResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeWorkforceAsyncHelper(const Model::DescribeWorkforceRequest& request, const DescribeWorkforceResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeWorkteamAsyncHelper(const Model::DescribeWorkteamRequest& request, const DescribeWorkteamResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DisassociateTrialComponentAsyncHelper(const Model::DisassociateTrialComponentRequest& request, const DisassociateTrialComponentResponseReceivedHandler& handler, const std::shared_ptr& context) const; void GetSearchSuggestionsAsyncHelper(const Model::GetSearchSuggestionsRequest& request, const GetSearchSuggestionsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListAlgorithmsAsyncHelper(const Model::ListAlgorithmsRequest& request, const ListAlgorithmsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListAppsAsyncHelper(const Model::ListAppsRequest& request, const ListAppsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListAutoMLJobsAsyncHelper(const Model::ListAutoMLJobsRequest& request, const ListAutoMLJobsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListCandidatesForAutoMLJobAsyncHelper(const Model::ListCandidatesForAutoMLJobRequest& request, const ListCandidatesForAutoMLJobResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListCodeRepositoriesAsyncHelper(const Model::ListCodeRepositoriesRequest& request, const ListCodeRepositoriesResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListCompilationJobsAsyncHelper(const Model::ListCompilationJobsRequest& request, const ListCompilationJobsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListDomainsAsyncHelper(const Model::ListDomainsRequest& request, const ListDomainsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListEndpointConfigsAsyncHelper(const Model::ListEndpointConfigsRequest& request, const ListEndpointConfigsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListEndpointsAsyncHelper(const Model::ListEndpointsRequest& request, const ListEndpointsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListExperimentsAsyncHelper(const Model::ListExperimentsRequest& request, const ListExperimentsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListFlowDefinitionsAsyncHelper(const Model::ListFlowDefinitionsRequest& request, const ListFlowDefinitionsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListHumanTaskUisAsyncHelper(const Model::ListHumanTaskUisRequest& request, const ListHumanTaskUisResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListHyperParameterTuningJobsAsyncHelper(const Model::ListHyperParameterTuningJobsRequest& request, const ListHyperParameterTuningJobsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListLabelingJobsAsyncHelper(const Model::ListLabelingJobsRequest& request, const ListLabelingJobsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListLabelingJobsForWorkteamAsyncHelper(const Model::ListLabelingJobsForWorkteamRequest& request, const ListLabelingJobsForWorkteamResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListModelPackagesAsyncHelper(const Model::ListModelPackagesRequest& request, const ListModelPackagesResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListModelsAsyncHelper(const Model::ListModelsRequest& request, const ListModelsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListMonitoringExecutionsAsyncHelper(const Model::ListMonitoringExecutionsRequest& request, const ListMonitoringExecutionsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListMonitoringSchedulesAsyncHelper(const Model::ListMonitoringSchedulesRequest& request, const ListMonitoringSchedulesResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListNotebookInstanceLifecycleConfigsAsyncHelper(const Model::ListNotebookInstanceLifecycleConfigsRequest& request, const ListNotebookInstanceLifecycleConfigsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListNotebookInstancesAsyncHelper(const Model::ListNotebookInstancesRequest& request, const ListNotebookInstancesResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListProcessingJobsAsyncHelper(const Model::ListProcessingJobsRequest& request, const ListProcessingJobsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListSubscribedWorkteamsAsyncHelper(const Model::ListSubscribedWorkteamsRequest& request, const ListSubscribedWorkteamsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListTagsAsyncHelper(const Model::ListTagsRequest& request, const ListTagsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListTrainingJobsAsyncHelper(const Model::ListTrainingJobsRequest& request, const ListTrainingJobsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListTrainingJobsForHyperParameterTuningJobAsyncHelper(const Model::ListTrainingJobsForHyperParameterTuningJobRequest& request, const ListTrainingJobsForHyperParameterTuningJobResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListTransformJobsAsyncHelper(const Model::ListTransformJobsRequest& request, const ListTransformJobsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListTrialComponentsAsyncHelper(const Model::ListTrialComponentsRequest& request, const ListTrialComponentsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListTrialsAsyncHelper(const Model::ListTrialsRequest& request, const ListTrialsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListUserProfilesAsyncHelper(const Model::ListUserProfilesRequest& request, const ListUserProfilesResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListWorkforcesAsyncHelper(const Model::ListWorkforcesRequest& request, const ListWorkforcesResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListWorkteamsAsyncHelper(const Model::ListWorkteamsRequest& request, const ListWorkteamsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void RenderUiTemplateAsyncHelper(const Model::RenderUiTemplateRequest& request, const RenderUiTemplateResponseReceivedHandler& handler, const std::shared_ptr& context) const; void SearchAsyncHelper(const Model::SearchRequest& request, const SearchResponseReceivedHandler& handler, const std::shared_ptr& context) const; void StartMonitoringScheduleAsyncHelper(const Model::StartMonitoringScheduleRequest& request, const StartMonitoringScheduleResponseReceivedHandler& handler, const std::shared_ptr& context) const; void StartNotebookInstanceAsyncHelper(const Model::StartNotebookInstanceRequest& request, const StartNotebookInstanceResponseReceivedHandler& handler, const std::shared_ptr& context) const; void StopAutoMLJobAsyncHelper(const Model::StopAutoMLJobRequest& request, const StopAutoMLJobResponseReceivedHandler& handler, const std::shared_ptr& context) const; void StopCompilationJobAsyncHelper(const Model::StopCompilationJobRequest& request, const StopCompilationJobResponseReceivedHandler& handler, const std::shared_ptr& context) const; void StopHyperParameterTuningJobAsyncHelper(const Model::StopHyperParameterTuningJobRequest& request, const StopHyperParameterTuningJobResponseReceivedHandler& handler, const std::shared_ptr& context) const; void StopLabelingJobAsyncHelper(const Model::StopLabelingJobRequest& request, const StopLabelingJobResponseReceivedHandler& handler, const std::shared_ptr& context) const; void StopMonitoringScheduleAsyncHelper(const Model::StopMonitoringScheduleRequest& request, const StopMonitoringScheduleResponseReceivedHandler& handler, const std::shared_ptr& context) const; void StopNotebookInstanceAsyncHelper(const Model::StopNotebookInstanceRequest& request, const StopNotebookInstanceResponseReceivedHandler& handler, const std::shared_ptr& context) const; void StopProcessingJobAsyncHelper(const Model::StopProcessingJobRequest& request, const StopProcessingJobResponseReceivedHandler& handler, const std::shared_ptr& context) const; void StopTrainingJobAsyncHelper(const Model::StopTrainingJobRequest& request, const StopTrainingJobResponseReceivedHandler& handler, const std::shared_ptr& context) const; void StopTransformJobAsyncHelper(const Model::StopTransformJobRequest& request, const StopTransformJobResponseReceivedHandler& handler, const std::shared_ptr& context) const; void UpdateCodeRepositoryAsyncHelper(const Model::UpdateCodeRepositoryRequest& request, const UpdateCodeRepositoryResponseReceivedHandler& handler, const std::shared_ptr& context) const; void UpdateDomainAsyncHelper(const Model::UpdateDomainRequest& request, const UpdateDomainResponseReceivedHandler& handler, const std::shared_ptr& context) const; void UpdateEndpointAsyncHelper(const Model::UpdateEndpointRequest& request, const UpdateEndpointResponseReceivedHandler& handler, const std::shared_ptr& context) const; void UpdateEndpointWeightsAndCapacitiesAsyncHelper(const Model::UpdateEndpointWeightsAndCapacitiesRequest& request, const UpdateEndpointWeightsAndCapacitiesResponseReceivedHandler& handler, const std::shared_ptr& context) const; void UpdateExperimentAsyncHelper(const Model::UpdateExperimentRequest& request, const UpdateExperimentResponseReceivedHandler& handler, const std::shared_ptr& context) const; void UpdateMonitoringScheduleAsyncHelper(const Model::UpdateMonitoringScheduleRequest& request, const UpdateMonitoringScheduleResponseReceivedHandler& handler, const std::shared_ptr& context) const; void UpdateNotebookInstanceAsyncHelper(const Model::UpdateNotebookInstanceRequest& request, const UpdateNotebookInstanceResponseReceivedHandler& handler, const std::shared_ptr& context) const; void UpdateNotebookInstanceLifecycleConfigAsyncHelper(const Model::UpdateNotebookInstanceLifecycleConfigRequest& request, const UpdateNotebookInstanceLifecycleConfigResponseReceivedHandler& handler, const std::shared_ptr& context) const; void UpdateTrialAsyncHelper(const Model::UpdateTrialRequest& request, const UpdateTrialResponseReceivedHandler& handler, const std::shared_ptr& context) const; void UpdateTrialComponentAsyncHelper(const Model::UpdateTrialComponentRequest& request, const UpdateTrialComponentResponseReceivedHandler& handler, const std::shared_ptr& context) const; void UpdateUserProfileAsyncHelper(const Model::UpdateUserProfileRequest& request, const UpdateUserProfileResponseReceivedHandler& handler, const std::shared_ptr& context) const; void UpdateWorkforceAsyncHelper(const Model::UpdateWorkforceRequest& request, const UpdateWorkforceResponseReceivedHandler& handler, const std::shared_ptr& context) const; void UpdateWorkteamAsyncHelper(const Model::UpdateWorkteamRequest& request, const UpdateWorkteamResponseReceivedHandler& handler, const std::shared_ptr& context) const; Aws::String m_uri; Aws::String m_configScheme; std::shared_ptr m_executor; }; } // namespace SageMaker } // namespace Aws