feat(hos_client_create, hos_client_destory): 多次调用destory不会导致重复释放

This commit is contained in:
彭宣正
2020-12-14 17:24:58 +08:00
parent 505d529c32
commit 10b370e486
55976 changed files with 8544395 additions and 2 deletions

View File

@@ -0,0 +1,406 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/transfer/Transfer_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/core/utils/memory/stl/AWSSet.h>
#include <aws/core/utils/memory/stl/AWSMap.h>
#include <aws/core/utils/UUID.h>
#include <aws/core/client/AWSError.h>
#include <aws/core/client/AsyncCallerContext.h>
#include <aws/s3/S3Errors.h>
#include <iostream>
#include <atomic>
#include <mutex>
#include <condition_variable>
namespace Aws
{
namespace Utils
{
template < typename T > class Array;
}
namespace Transfer
{
class TransferHandle;
typedef std::function<Aws::IOStream*(void)> CreateDownloadStreamCallback;
static const char CLASS_TAG[] = "TransferManager";
struct DownloadConfiguration
{
DownloadConfiguration() :
versionId("")
{}
Aws::String versionId;
// TBI: controls for in-memory parts vs. resumable file-based parts with state serialization to/from file
};
class PartState
{
public:
PartState();
PartState(int partId, size_t bestProgressInBytes, size_t sizeInBytes, bool lastPart = false);
int GetPartId() const { return m_partId; }
size_t GetBestProgressInBytes() const { return m_bestProgressInBytes; }
void SetBestProgressInBytes(size_t progressInBytes) { m_bestProgressInBytes = progressInBytes; }
size_t GetSizeInBytes() const { return m_sizeInBytes; }
void SetSizeInBytes(size_t sizeInBytes) { m_sizeInBytes = sizeInBytes; }
void Reset();
void OnDataTransferred(long long amount, const std::shared_ptr<TransferHandle> &transferHandle);
void SetETag(const Aws::String& eTag) { m_eTag = eTag; }
const Aws::String& GetETag() const { return m_eTag; }
Aws::IOStream *GetDownloadPartStream() const { return m_downloadPartStream; }
void SetDownloadPartStream(Aws::IOStream *downloadPartStream) { m_downloadPartStream = downloadPartStream; }
unsigned char* GetDownloadBuffer() const { return m_downloadBuffer; }
void SetDownloadBuffer(unsigned char* downloadBuffer) { m_downloadBuffer = downloadBuffer; }
void SetRangeBegin(size_t rangeBegin) { m_rangeBegin = rangeBegin; }
size_t GetRangeBegin() const { return m_rangeBegin; }
bool IsLastPart() { return m_lastPart; }
void SetLastPart() { m_lastPart = true; }
private:
int m_partId;
Aws::String m_eTag;
size_t m_currentProgressInBytes;
size_t m_bestProgressInBytes;
size_t m_sizeInBytes;
size_t m_rangeBegin;
std::atomic<Aws::IOStream *> m_downloadPartStream;
std::atomic<unsigned char*> m_downloadBuffer;
bool m_lastPart;
};
using PartPointer = std::shared_ptr< PartState >;
using PartStateMap = Aws::Map< int, PartPointer >;
enum class TransferStatus
{
//this value is only used for directory synchronization
EXACT_OBJECT_ALREADY_EXISTS,
//Operation is still queued and has not begun processing
NOT_STARTED,
//Operation is now running
IN_PROGRESS,
//Operation was canceled. A Canceled operation can still be retried
CANCELED,
//Operation failed, A failed operaton can still be retried.
FAILED,
//Operation was successful
COMPLETED,
//Operation either failed or was canceled and a user deleted the multi-part upload from S3.
ABORTED
};
enum class TransferDirection
{
UPLOAD,
DOWNLOAD
};
/**
* This is the interface for interacting with an in-process transfer. All operations from TransferManager return an instance of this class.
* In addition to the status of the transfer and details about what operation is being performed, this class also has the Cancel() operation which is
* used to cancel a transfer, and WaitUntilCompleted() which will cause the calling thread to block until the transfer is finished.
*
* In the context that by the time you are using this class, it is thread safe.
*/
class AWS_TRANSFER_API TransferHandle
{
public:
/**
* Initialize with required information for an UPLOAD
*/
TransferHandle(const Aws::String& bucketName, const Aws::String& keyName, uint64_t totalSize, const Aws::String& targetFilePath = "");
/**
* Initialize with required information for a DOWNLOAD
*/
TransferHandle(const Aws::String& bucketName, const Aws::String& keyName, const Aws::String& targetFilePath = "");
/**
* Alternate DOWNLOAD constructor
*/
TransferHandle(const Aws::String& bucketName, const Aws::String& keyName, CreateDownloadStreamCallback createDownloadStreamFn, const Aws::String& targetFilePath = "");
/**
* Alternate DOWNLOAD constructor
*/
TransferHandle(const Aws::String& bucketName, const Aws::String& keyName,
const uint64_t fileOffset, const uint64_t downloadBytes,
CreateDownloadStreamCallback createDownloadStreamFn, const Aws::String& targetFilePath = "");
~TransferHandle();
/**
* Whether or not this transfer is being performed using parallel parts via a multi-part s3 api.
*/
inline bool IsMultipart() const { return m_isMultipart.load(); }
/**
* Whether or not this transfer is being performed using parallel parts via a multi-part s3 api.
*/
inline void SetIsMultipart(bool value) { m_isMultipart.store(value); }
/**
* If this is a multi-part transfer, this is the ID of it. e.g. UploadId for UploadPart
*/
inline const Aws::String GetMultiPartId() const { std::lock_guard<std::mutex> locker(m_getterSetterLock); return m_multipartId; }
/**
* If this is a multi-part transfer, this is the ID of it. e.g. UploadId for UploadPart
*/
inline void SetMultipartId(const Aws::String& value) { std::lock_guard<std::mutex> locker(m_getterSetterLock); m_multipartId = value; }
/**
* Returns a copy of the completed parts, in the structure of <partId, ETag>. Used for all transfers.
*/
PartStateMap GetCompletedParts() const;
/**
* Set a pending part to completed along with its etag. Used fore all transfers.
*/
void ChangePartToCompleted(const PartPointer& partState, const Aws::String &eTag);
/**
* Returns a copy of the pending parts. Used for all transfers.
*/
PartStateMap GetPendingParts() const;
/**
* Returns true or false if there are currently any pending parts.
*/
bool HasPendingParts() const;
/**
* Set a part to pending. Used for all transfers.
*/
void AddPendingPart(const PartPointer& partState);
/**
* Returns a copy of the queued parts. Used for all transfers.
*/
PartStateMap GetQueuedParts() const;
/**
* Returns true or false if there are currently any queued parts.
*/
bool HasQueuedParts() const;
/**
* Set a part to queued. Used for all transfers.
*/
void AddQueuedPart(const PartPointer& partState);
/**
* Returns a copy of the failed parts. Used for all transfers.
*/
PartStateMap GetFailedParts() const;
/**
* Returns true or false if there are currently any failed parts.
*/
bool HasFailedParts() const;
/**
* Set a pending part to failed. Used for all transfers.
*/
void ChangePartToFailed(const PartPointer& partState);
/**
* Get the parts transactionally, mostly for internal purposes.
*/
void GetAllPartsTransactional(PartStateMap& queuedParts, PartStateMap& pendingParts,
PartStateMap& failedParts, PartStateMap& completedParts);
/**
* Returns true or false if any parts have been created for this transfer
*/
bool HasParts() const;
/**
* Returns false if Cancel has been called. Largely for internal use.
*/
bool ShouldContinue() const;
/**
* Cancel the transfer. This will happen asynchronously, so if you need to wait for it to be canceled, either handle the callbacks,
* or call WaitUntilFinished.
*/
void Cancel();
/**
* Reset the cancellation status for a retry. This will be done automatically by Transfermanager.
*/
void Restart();
/**
* Total bytes transferred successfully on this transfer operation.
* We implement transfer progress with two invariants:
* (1) Never lock; given a callback that can happen hundreds of times a second or more on a solid connection, it isn't acceptable to lock each time
* (2) Never go backwards, in spite of part upload/download failures. Negative progress (canceling a highly concurrent transfer can
* lead to an enormous step backwards if many parts are aborted at once) is a confusing and undesirable user experience.
* In this sense, progress represents a high-water mark, and in the presence of heavy failures or cancellation, it may appear to pause until the
* necessary retries exceed the previous high-water mark.
*/
inline uint64_t GetBytesTransferred() const { return m_bytesTransferred.load(); }
/**
* Total bytes transferred successfully on this transfer operation.
*/
void UpdateBytesTransferred(uint64_t amount) { m_bytesTransferred.fetch_add(amount); }
/**
* The offset from which to start downloading
*/
inline uint64_t GetBytesOffset() const { return m_offset; }
/**
* The calculated total size of the object being transferred.
*/
inline uint64_t GetBytesTotalSize() const { return m_bytesTotalSize.load(); }
/**
* Sets the total size of the object being transferred.
*/
inline void SetBytesTotalSize(uint64_t value) { m_bytesTotalSize.store(value); }
/**
* Bucket portion of the object location in Amazon S3.
*/
inline const Aws::String& GetBucketName() const { return m_bucket; }
/**
* Key of the object location in Amazon S3.
*/
inline const Aws::String& GetKey() const { return m_key; }
/**
* If known, this is the location of the local file being uploaded from, or downloaded to. If you use the stream api however, this will
* always be blank.
*/
inline const Aws::String& GetTargetFilePath() const { return m_fileName; }
/**
* (Download only) version id of the object to retrieve; if not specified in constructor, then latest is used
*/
const Aws::String GetVersionId() const { std::lock_guard<std::mutex> locker(m_getterSetterLock); return m_versionId; }
void SetVersionId(const Aws::String& versionId) { std::lock_guard<std::mutex> locker(m_getterSetterLock); m_versionId = versionId; }
/**
* Upload or Download?
*/
inline TransferDirection GetTransferDirection() const { return m_direction; }
/**
* Content type of the object being transferred
*/
inline const Aws::String GetContentType() const { std::lock_guard<std::mutex> locker(m_getterSetterLock); return m_contentType; }
/**
* Content type of the object being transferred
*/
inline void SetContentType(const Aws::String& value) { std::lock_guard<std::mutex> locker(m_getterSetterLock); m_contentType = value; }
/**
* In case of an upload, this is the metadata that was placed on the object when it was uploaded.
* In the case of a download, this is the object metadata from the GetObject operation.
*/
inline const Aws::Map<Aws::String, Aws::String> GetMetadata() const { std::lock_guard<std::mutex> locker(m_getterSetterLock); return m_metadata; }
/**
* In case of an upload, this is the metadata that was placed on the object when it was uploaded.
* In the case of a download, this is the object metadata from the GetObject operation.
*/
inline void SetMetadata(const Aws::Map<Aws::String, Aws::String>& value) { std::lock_guard<std::mutex> locker(m_getterSetterLock); m_metadata = value; }
/**
* Add a new entry to or update an existed entry of m_metadata, useful when users want to get ETag directly from metadata.
*/
inline void AddMetadataEntry(const Aws::String& key, const Aws::String& value) { std::lock_guard<std::mutex> locker(m_getterSetterLock); m_metadata[key] = value; }
/**
* Arbitrary user context that can be accessed from the callbacks
*/
inline void SetContext(const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) { std::lock_guard<std::mutex> locker(m_getterSetterLock); m_context = context; }
/**
* Returns arbitrary user context or nullptr if it's not set.
*/
inline std::shared_ptr<const Aws::Client::AsyncCallerContext> GetContext() const { std::lock_guard<std::mutex> locker(m_getterSetterLock); return m_context; }
/**
* The current status of the operation
*/
TransferStatus GetStatus() const;
/**
* The current status of the operation
*/
void UpdateStatus(TransferStatus value);
/**
* The last error that was encountered by the transfer. You can handle each error individually via the errorCallback callback function
* in the TransferConfiguration.
*/
inline const Aws::Client::AWSError<Aws::S3::S3Errors> GetLastError() const { std::lock_guard<std::mutex> locker(m_getterSetterLock); return m_lastError; }
/**
* The last error that was encountered by the transfer. You can handle each error individually via the errorCallback callback function
* in the TransferConfiguration.
*/
inline void SetError(const Aws::Client::AWSError<Aws::S3::S3Errors>& error) { std::lock_guard<std::mutex> locker(m_getterSetterLock); m_lastError = error; }
/**
* Blocks the calling thread until the operation has finished. This function does not busy wait. It is safe for your CPU.
*/
void WaitUntilFinished() const;
const CreateDownloadStreamCallback& GetCreateDownloadStreamFunction() const { return m_createDownloadStreamFn; }
void WritePartToDownloadStream(Aws::IOStream* partStream, std::size_t writeOffset);
void ApplyDownloadConfiguration(const DownloadConfiguration& downloadConfig);
bool LockForCompletion()
{
bool expected = false;
return m_lastPart.compare_exchange_strong(expected, true/*desired*/);
}
/*
* Returns a unique identifier tied to this particular transfer handle.
*/
Aws::String GetId() const;
private:
void CleanupDownloadStream();
std::atomic<bool> m_isMultipart;
Aws::String m_multipartId;
TransferDirection m_direction;
PartStateMap m_completedParts;
PartStateMap m_pendingParts;
PartStateMap m_queuedParts;
PartStateMap m_failedParts;
std::atomic<uint64_t> m_bytesTransferred;
std::atomic<bool> m_lastPart;
std::atomic<uint64_t> m_bytesTotalSize;
uint64_t m_offset;
Aws::String m_bucket;
Aws::String m_key;
Aws::String m_fileName;
Aws::String m_contentType;
Aws::String m_versionId;
Aws::Map<Aws::String, Aws::String> m_metadata;
TransferStatus m_status;
Aws::Client::AWSError<Aws::S3::S3Errors> m_lastError;
std::atomic<bool> m_cancel;
std::shared_ptr<const Aws::Client::AsyncCallerContext> m_context;
const Utils::UUID m_handleId;
CreateDownloadStreamCallback m_createDownloadStreamFn;
Aws::IOStream* m_downloadStream;
mutable std::mutex m_downloadStreamLock;
mutable std::mutex m_partsLock;
mutable std::mutex m_statusLock;
mutable std::condition_variable m_waitUntilFinishedSignal;
mutable std::mutex m_getterSetterLock;
};
AWS_TRANSFER_API Aws::OStream& operator << (Aws::OStream& s, TransferStatus status);
}
}

View File

@@ -0,0 +1,316 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/transfer/TransferHandle.h>
#include <aws/s3/S3Client.h>
#include <aws/s3/model/PutObjectRequest.h>
#include <aws/s3/model/GetObjectRequest.h>
#include <aws/s3/model/CreateMultipartUploadRequest.h>
#include <aws/s3/model/UploadPartRequest.h>
#include <aws/core/utils/threading/Executor.h>
#include <aws/core/utils/memory/stl/AWSStreamFwd.h>
#include <aws/core/utils/ResourceManager.h>
#include <aws/core/client/AsyncCallerContext.h>
#include <memory>
namespace Aws
{
namespace Transfer
{
class TransferManager;
typedef std::function<void(const TransferManager*, const std::shared_ptr<const TransferHandle>&)> UploadProgressCallback;
typedef std::function<void(const TransferManager*, const std::shared_ptr<const TransferHandle>&)> DownloadProgressCallback;
typedef std::function<void(const TransferManager*, const std::shared_ptr<const TransferHandle>&)> TransferStatusUpdatedCallback;
typedef std::function<void(const TransferManager*, const std::shared_ptr<const TransferHandle>&, const Aws::Client::AWSError<Aws::S3::S3Errors>&)> ErrorCallback;
typedef std::function<void(const TransferManager*, const std::shared_ptr<const TransferHandle>&)> TransferInitiatedCallback;
const uint64_t MB5 = 5 * 1024 * 1024;
/**
* Configuration for use with TransferManager. The data here will be copied directly to TransferManager.
*/
struct TransferManagerConfiguration
{
TransferManagerConfiguration(Aws::Utils::Threading::Executor* executor) : s3Client(nullptr), transferExecutor(executor), computeContentMD5(false), transferBufferMaxHeapSize(10 * MB5), bufferSize(MB5)
{
}
/**
* S3 Client to use for transfers. You are responsible for setting this.
*/
std::shared_ptr<Aws::S3::S3Client> s3Client;
/**
* Executor to use for the transfer manager threads. This probably shouldn't be the same executor
* you are using for your client configuration. This executor will be used in a different context than the s3 client is used.
* It is not a bug to use the same executor, but at least be aware that this is how the manager will be used.
*/
Aws::Utils::Threading::Executor* transferExecutor;
/**
* When true, TransferManager will calculate the MD5 digest of the content being uploaded.
* The digest is sent to S3 via an HTTP header enabling the service to perform integrity checks.
* This option is disabled by default.
*/
bool computeContentMD5;
/**
* If you have special arguments you want passed to our put object calls, put them here. We will copy the template for each put object call
* overriding the body stream, bucket, and key. If object metadata is passed through, we will override that as well.
*/
Aws::S3::Model::PutObjectRequest putObjectTemplate;
/**
* If you have special arguments you want passed to our get object calls, put them here. We will copy the template for each put object call
* overriding the body stream, bucket, and key. If object metadata is passed through, we will override that as well.
*/
Aws::S3::Model::GetObjectRequest getObjectTemplate;
/**
* If you have special arguments you want passed to our create multipart upload calls, put them here. We will copy the template for each call
* overriding the body stream, bucket, and key. If object metadata is passed through, we will override that as well.
*/
Aws::S3::Model::CreateMultipartUploadRequest createMultipartUploadTemplate;
/**
* If you have special arguments you want passed to our upload part calls, put them here. We will copy the template for each call
* overriding the body stream, bucket, and key. If object metadata is passed through, we will override that as well.
*/
Aws::S3::Model::UploadPartRequest uploadPartTemplate;
/**
* Maximum size of the working buffers to use. This is not the same thing as max heap size for your process. This is the maximum amount of memory we will
* allocate for all transfer buffers. default is 50MB.
* If you are using Aws::Utils::Threading::PooledThreadExecutor for transferExecutor, this size should be greater than bufferSize * poolSize.
*/
uint64_t transferBufferMaxHeapSize;
/**
* Defaults to 5MB. If you are uploading large files, (larger than 50GB, this needs to be specified to be something larger than 5MB. Also keep in mind that you may need
* to increase your max heap size if this is something you plan on increasing.
*/
uint64_t bufferSize;
/**
* Callback to receive progress updates for uploads.
*/
UploadProgressCallback uploadProgressCallback;
/**
* Callback to receive progress updates for downloads.
*/
DownloadProgressCallback downloadProgressCallback;
/**
* Callback to receive updates on the status of the transfer.
*/
TransferStatusUpdatedCallback transferStatusUpdatedCallback;
/**
* Callback to receive initiated transfers for the directory operations.
*/
TransferInitiatedCallback transferInitiatedCallback;
/**
* Callback to receive all errors that are thrown over the course of a transfer.
*/
ErrorCallback errorCallback;
/**
* To support Customer Access Log Information when access S3.
* https://docs.aws.amazon.com/AmazonS3/latest/dev/LogFormat.html
* Note: query string key not started with "x-" will be filtered out.
* key/val of map entries will be key/val of query strings.
*/
Aws::Map<Aws::String, Aws::String> customizedAccessLogTag;
};
/**
* This is a utility around Amazon Simple Storage Service. It can Upload large files via parts in parallel, Upload files less than 5MB in single PutObject, and download files via GetObject,
* If a transfer fails, it can be retried for an upload. For a download, there is nothing to retry in case of failure. Just download it again. You can also abort any in progress transfers.
* The key interface for controlling and knowing the status of your upload is the TransferHandle. An instance of TransferHandle is returned from each of the public functions in this interface.
* Keep a reference to the pointer. Each of the callbacks will also pass the handle that has received an update. None of the public methods in this interface block.
*/
class AWS_TRANSFER_API TransferManager : public std::enable_shared_from_this<TransferManager>
{
public:
/**
* Create a new TransferManager instance intialized with config.
*/
static std::shared_ptr<TransferManager> Create(const TransferManagerConfiguration& config);
~TransferManager();
/**
* Uploads a file via filename, to bucketName/keyName in S3. contentType and metadata will be added to the object. If the object is larger than the configured bufferSize,
* then a multi-part upload will be performed.
*/
std::shared_ptr<TransferHandle> UploadFile(const Aws::String& fileName,
const Aws::String& bucketName,
const Aws::String& keyName,
const Aws::String& contentType,
const Aws::Map<Aws::String, Aws::String>& metadata,
const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr);
/**
* Uploads the contents of stream, to bucketName/keyName in S3. contentType and metadata will be added to the object. If the object is larger than the configured bufferSize,
* then a multi-part upload will be performed.
*/
std::shared_ptr<TransferHandle> UploadFile(const std::shared_ptr<Aws::IOStream>& stream,
const Aws::String& bucketName,
const Aws::String& keyName,
const Aws::String& contentType,
const Aws::Map<Aws::String, Aws::String>& metadata,
const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr);
/**
* Downloads the contents of bucketName/keyName in S3 to the file specified by writeToFile. This will perform a GetObject operation.
*/
std::shared_ptr<TransferHandle> DownloadFile(const Aws::String& bucketName,
const Aws::String& keyName,
const Aws::String& writeToFile,
const DownloadConfiguration& downloadConfig = DownloadConfiguration(),
const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr);
/**
* Downloads the contents of bucketName/keyName in S3 and writes it to writeToStream. This will perform a GetObject operation.
*/
std::shared_ptr<TransferHandle> DownloadFile(const Aws::String& bucketName,
const Aws::String& keyName,
CreateDownloadStreamCallback writeToStreamfn,
const DownloadConfiguration& downloadConfig = DownloadConfiguration(),
const Aws::String& writeToFile = "",
const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr);
/**
* Downloads the contents of bucketName/keyName in S3 and writes it to writeToStream. This will perform a GetObject operation for the given range.
*/
std::shared_ptr<TransferHandle> DownloadFile(const Aws::String& bucketName,
const Aws::String& keyName,
uint64_t fileOffset,
uint64_t downloadBytes,
CreateDownloadStreamCallback writeToStreamfn,
const DownloadConfiguration& downloadConfig = DownloadConfiguration(),
const Aws::String& writeToFile = "",
const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr);
/**
* Retry an download that failed from a previous DownloadFile operation. If a multi-part download was used, only the failed parts will be re-fetched.
*/
std::shared_ptr<TransferHandle> RetryDownload(const std::shared_ptr<TransferHandle>& retryHandle);
/**
* Retry an upload that failed from a previous UploadFile operation. If a multi-part upload was used, only the failed parts will be re-sent.
*/
std::shared_ptr<TransferHandle> RetryUpload(const Aws::String& fileName, const std::shared_ptr<TransferHandle>& retryHandle);
/**
* Retry an upload that failed from a previous UploadFile operation. If a multi-part upload was used, only the failed parts will be re-sent.
*/
std::shared_ptr<TransferHandle> RetryUpload(const std::shared_ptr<Aws::IOStream>& stream, const std::shared_ptr<TransferHandle>& retryHandle);
/**
* By default, multi-part uploads will remain in a FAILED state if they fail, or a CANCELED state if they were canceled. Leaving failed uploads around
* still costs the owner of the bucket money. If you know you will not be retrying the request, abort the request after canceling it or if it fails and you don't
* intend to retry it.
*/
void AbortMultipartUpload(const std::shared_ptr<TransferHandle>& inProgressHandle);
/**
* Uploads entire contents of directory to Amazon S3 bucket and stores them in a directory starting at prefix. This is an asynchronous method. You will receive notifications
* that an upload has started via the transferInitiatedCallback callback function in your configuration. If you do not set this callback, then you will not be able to handle
* the file transfers.
*
* directory: the absolute directory on disk to upload
* bucketName: the name of the S3 bucket to upload to
* prefix: the prefix to put on all objects uploaded (e.g. put them in x directory in the bucket).
*/
void UploadDirectory(const Aws::String& directory, const Aws::String& bucketName, const Aws::String& prefix, const Aws::Map<Aws::String, Aws::String>& metadata);
/**
* Downloads entire contents of an Amazon S3 bucket starting at prefix stores them in a directory (not including the prefix). This is an asynchronous method. You will receive notifications
* that a download has started via the transferInitiatedCallback callback function in your configuration. If you do not set this callback, then you will not be able to handle
* the file transfers. If an error occurs prior to the transfer being initiated (e.g. list objects fails, then an error will be passed through the errorCallback).
*
* directory: the absolute directory on disk to download to
* bucketName: the name of the S3 bucket to upload to
* prefix: the prefix in the bucket to use as the root directory (e.g. download all objects at x prefix in S3 and then store them starting in directory with the prefix stripped out).
*/
void DownloadToDirectory(const Aws::String& directory, const Aws::String& bucketName, const Aws::String& prefix = Aws::String());
private:
/**
* To ensure TransferManager is always created as a shared_ptr, since it inherits enable_shared_from_this.
*/
TransferManager(const TransferManagerConfiguration& config);
/**
* Creates TransferHandle.
* fileName is not necessary if this handle will upload data from an IOStream
*/
std::shared_ptr<TransferHandle> CreateUploadFileHandle(Aws::IOStream* fileStream,
const Aws::String& bucketName,
const Aws::String& keyName,
const Aws::String& contentType,
const Aws::Map<Aws::String,
Aws::String>& metadata,
const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context,
const Aws::String& fileName = "");
/**
* Submits the actual task to task schecduler
*/
std::shared_ptr<TransferHandle> SubmitUpload(const std::shared_ptr<TransferHandle>& handle, const std::shared_ptr<Aws::IOStream>& fileStream = nullptr);
/**
* Uploads the contents of stream, to bucketName/keyName in S3. contentType and metadata will be added to the object. If the object is larger than the configured bufferSize,
* then a multi-part upload will be performed.
*/
std::shared_ptr<TransferHandle> DoUploadFile(const std::shared_ptr<Aws::IOStream>& fileStream,
const Aws::String& bucketName,
const Aws::String& keyName,
const Aws::String& contentType,
const Aws::Map<Aws::String, Aws::String>& metadata,
const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context);
/**
* Uploads the contents of file, to bucketName/keyName in S3. contentType and metadata will be added to the object. If the object is larger than the configured bufferSize,
* then a multi-part upload will be performed.
* Keeps file to be unopenned until doing actual upload, this is useful for uplodaing directories with many small files (avoid having too many open files, which may exceed system limit)
*/
std::shared_ptr<TransferHandle> DoUploadFile(const Aws::String& fileName,
const Aws::String& bucketName,
const Aws::String& keyName,
const Aws::String& contentType,
const Aws::Map<Aws::String, Aws::String>& metadata,
const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context);
bool MultipartUploadSupported(uint64_t length) const;
bool InitializePartsForDownload(const std::shared_ptr<TransferHandle>& handle);
void DoMultiPartUpload(const std::shared_ptr<Aws::IOStream>& streamToPut, const std::shared_ptr<TransferHandle>& handle);
void DoSinglePartUpload(const std::shared_ptr<Aws::IOStream>& streamToPut, const std::shared_ptr<TransferHandle>& handle);
void DoMultiPartUpload(const std::shared_ptr<TransferHandle>& handle);
void DoSinglePartUpload(const std::shared_ptr<TransferHandle>& handle);
void DoDownload(const std::shared_ptr<TransferHandle>& handle);
void DoSinglePartDownload(const std::shared_ptr<TransferHandle>& handle);
void HandleGetObjectResponse(const Aws::S3::S3Client* client,
const Aws::S3::Model::GetObjectRequest& request,
const Aws::S3::Model::GetObjectOutcome& outcome,
const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context);
void WaitForCancellationAndAbortUpload(const std::shared_ptr<TransferHandle>& canceledHandle);
void HandleUploadPartResponse(const Aws::S3::S3Client*, const Aws::S3::Model::UploadPartRequest&, const Aws::S3::Model::UploadPartOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&);
void HandlePutObjectResponse(const Aws::S3::S3Client*, const Aws::S3::Model::PutObjectRequest&, const Aws::S3::Model::PutObjectOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&);
void HandleListObjectsResponse(const Aws::S3::S3Client*, const Aws::S3::Model::ListObjectsV2Request&, const Aws::S3::Model::ListObjectsV2Outcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&);
TransferStatus DetermineIfFailedOrCanceled(const TransferHandle&) const;
void TriggerUploadProgressCallback(const std::shared_ptr<const TransferHandle>&) const;
void TriggerDownloadProgressCallback(const std::shared_ptr<const TransferHandle>&) const;
void TriggerTransferStatusUpdatedCallback(const std::shared_ptr<const TransferHandle>&) const;
void TriggerErrorCallback(const std::shared_ptr<const TransferHandle>&, const Aws::Client::AWSError<Aws::S3::S3Errors>& error)const;
static Aws::String DetermineFilePath(const Aws::String& directory, const Aws::String& prefix, const Aws::String& keyName);
Aws::Utils::ExclusiveOwnershipResourceManager<unsigned char*> m_bufferManager;
TransferManagerConfiguration m_transferConfig;
};
}
}

View File

@@ -0,0 +1,28 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#ifdef _MSC_VER
//disable windows complaining about max template size.
#pragma warning (disable : 4503)
#endif
#if defined (USE_WINDOWS_DLL_SEMANTICS) || defined (_WIN32)
#ifdef _MSC_VER
#pragma warning(disable : 4251)
#endif // _MSC_VER
#ifdef USE_IMPORT_EXPORT
#ifdef AWS_TRANSFER_EXPORTS
#define AWS_TRANSFER_API __declspec(dllexport)
#else
#define AWS_TRANSFER_API __declspec(dllimport)
#endif // AWS_TRANSFER_EXPORTS
#else // USE_IMPORT_EXPORT
#define AWS_TRANSFER_API
#endif // USE_IMPORT_EXPORT
#else // defined (USE_WINDOWS_DLL_SEMANTICS) || defined (_WIN32)
#define AWS_TRANSFER_API
#endif // defined (USE_WINDOWS_DLL_SEMANTICS) || defined (_WIN32)