From 8faeb25076c29b0c65dd2f4125414df6c865123c Mon Sep 17 00:00:00 2001 From: Joey Hess Date: Mon, 3 Nov 2014 15:53:22 -0400 Subject: [PATCH] finish multipart support using unreleased update to aws lib to yield etags Untested and not even compiled yet. Testing should include checks that file content streams through without buffering in memory. Note that CL.consume causes all the etags to be buffered in memory. This is probably nearly unavoidable, since a request has to be constructed that contains the list of etags in its body. (While it might be possible to stream generation of the body, that would entail making a http request that dribbles out parts of the body as the multipart uploads complete, which is not likely to work well.. To limit this being a problem, it's best for partsize to be set to some suitably large value, like 1gb. Then a full terabyte file will need only 1024 etags to be stored, which will probably use around 1 mb of memory. --- Remote/S3.hs | 19 +++++++++++-------- doc/bugs/S3_upload_not_using_multipart.mdwn | 8 ++++++++ 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/Remote/S3.hs b/Remote/S3.hs index 9a618329a5..9c90d4b2c0 100644 --- a/Remote/S3.hs +++ b/Remote/S3.hs @@ -13,6 +13,10 @@ module Remote.S3 (remote, iaHost, configIA, iaItemUrl) where import qualified Aws as AWS import qualified Aws.Core as AWS import qualified Aws.S3 as S3 +#if MIN_VERSION_aws(0,10,4) +import qualified Aws.S3.Commands.Multipart as Multipart +import qualified Data.Conduit.List as CL +#endif import qualified Data.Text as T import qualified Data.Text.Encoding as T import qualified Data.ByteString.Lazy as L @@ -170,7 +174,7 @@ store r h = fileStorer $ \k f p -> do multipartupload sz k f p = do #if MIN_VERSION_aws(0,10,4) let info = hinfo h - let objects = bucketObject info h + let object = bucketObject info h uploadid <- S3.imurUploadId <$> sendS3Handle' h $ (S3.postInitiateMultipartUpload (bucket info) object) @@ -180,14 +184,13 @@ store r h = fileStorer $ \k f p -> do , S3.imuExpires = Nothing -- TODO set some reasonable expiry } - -- TODO open file, read each part of size sz (streaming - -- it); send part to S3, and get a list of etags of all - -- the parts - + etags <- sourceFile f + $= Multipart.chunkedConduit sz + $= Multipart.putConduit (hawscfg h) (hs3cfg h) (hmanager h) (bucket info) object uploadid + $$ CL.consume - void $ sendS3Handle' h $ - S3.postCompleteMultipartUpload (bucket info) object uploadid $ - zip [1..] (map T.pack etags) + void $ sendS3Handle' h $ S3.postCompleteMultipartUpload + (bucket info) object uploadid (zip [1..] etags) #else warning $ "Cannot do multipart upload (partsize " ++ show sz ++ "); built with too old a version of the aws library." singlepartupload k f p diff --git a/doc/bugs/S3_upload_not_using_multipart.mdwn b/doc/bugs/S3_upload_not_using_multipart.mdwn index 5e5d97c6a3..cd40e9d2ba 100644 --- a/doc/bugs/S3_upload_not_using_multipart.mdwn +++ b/doc/bugs/S3_upload_not_using_multipart.mdwn @@ -52,3 +52,11 @@ Please provide any additional information below. upgrade supported from repository versions: 0 1 2 [[!tag confirmed]] + +> [[fixed|done]] This is now supported, when git-annex is built with a new +> enough version of the aws library. You need to configure the remote to +> use an appropriate value for multipart, eg: +> +> git annex enableremote cloud multipart=1GiB +> +> --[[Joey]]