refuse unsafe store to unversioned exporttree with old aws version
I've developed a patch to aws, once it gets merged, the real version number of aws can be filled in.
This commit is contained in:
parent
15bd7d57ca
commit
a42e7a012a
2 changed files with 38 additions and 23 deletions
58
Remote/S3.hs
58
Remote/S3.hs
|
@ -222,7 +222,7 @@ store mh r info magic = fileStorer $ \k f p -> withS3HandleOrFail (uuid r) mh $
|
||||||
setUrlPresent k (iaPublicUrl info (bucketObject info k))
|
setUrlPresent k (iaPublicUrl info (bucketObject info k))
|
||||||
return True
|
return True
|
||||||
|
|
||||||
storeHelper :: S3Info -> S3Handle -> Maybe Magic -> FilePath -> S3.Object -> MeterUpdate -> Annex (Maybe S3VersionID)
|
storeHelper :: S3Info -> S3Handle -> Maybe Magic -> FilePath -> S3.Object -> MeterUpdate -> Annex (Maybe S3Etag, Maybe S3VersionID)
|
||||||
storeHelper info h magic f object p = liftIO $ case partSize info of
|
storeHelper info h magic f object p = liftIO $ case partSize info of
|
||||||
Just partsz | partsz > 0 -> do
|
Just partsz | partsz > 0 -> do
|
||||||
fsz <- getFileSize f
|
fsz <- getFileSize f
|
||||||
|
@ -236,8 +236,16 @@ storeHelper info h magic f object p = liftIO $ case partSize info of
|
||||||
rbody <- liftIO $ httpBodyStorer f p
|
rbody <- liftIO $ httpBodyStorer f p
|
||||||
let req = (putObject info object rbody)
|
let req = (putObject info object rbody)
|
||||||
{ S3.poContentType = encodeBS <$> contenttype }
|
{ S3.poContentType = encodeBS <$> contenttype }
|
||||||
vid <- S3.porVersionId <$> sendS3Handle h req
|
resp <- sendS3Handle h req
|
||||||
return (mkS3VersionID object vid)
|
let vid = mkS3VersionID object (S3.porVersionId resp)
|
||||||
|
-- FIXME Actual aws version that supports this is not known,
|
||||||
|
-- patch not merged yet.
|
||||||
|
-- https://github.com/aristidb/aws/issues/258
|
||||||
|
#if MIN_VERSION_aws(0,99,0)
|
||||||
|
return (Just (S3.porETag resp), vid)
|
||||||
|
#else
|
||||||
|
return (Nothing, vid)
|
||||||
|
#endif
|
||||||
multipartupload fsz partsz = runResourceT $ do
|
multipartupload fsz partsz = runResourceT $ do
|
||||||
#if MIN_VERSION_aws(0,16,0)
|
#if MIN_VERSION_aws(0,16,0)
|
||||||
contenttype <- liftIO getcontenttype
|
contenttype <- liftIO getcontenttype
|
||||||
|
@ -276,9 +284,9 @@ storeHelper info h magic f object p = liftIO $ case partSize info of
|
||||||
sendparts (offsetMeterUpdate meter (toBytesProcessed sz)) (etag:etags) (partnum + 1)
|
sendparts (offsetMeterUpdate meter (toBytesProcessed sz)) (etag:etags) (partnum + 1)
|
||||||
sendparts p [] 1
|
sendparts p [] 1
|
||||||
|
|
||||||
r <- sendS3Handle h $ S3.postCompleteMultipartUpload
|
resp <- sendS3Handle h $ S3.postCompleteMultipartUpload
|
||||||
(bucket info) object uploadid (zip [1..] etags)
|
(bucket info) object uploadid (zip [1..] etags)
|
||||||
return (mkS3VersionID object (S3.cmurVersionId r))
|
return (Just (S3.cmurETag resp), mkS3VersionID object (S3.cmurVersionId resp))
|
||||||
#else
|
#else
|
||||||
warningIO $ "Cannot do multipart upload (partsize " ++ show partsz ++ ") of large file (" ++ show fsz ++ "); built with too old a version of the aws library."
|
warningIO $ "Cannot do multipart upload (partsize " ++ show partsz ++ ") of large file (" ++ show fsz ++ "); built with too old a version of the aws library."
|
||||||
singlepartupload
|
singlepartupload
|
||||||
|
@ -365,18 +373,18 @@ storeExportS3 :: S3HandleVar -> Remote -> S3Info -> Maybe Magic -> FilePath -> K
|
||||||
storeExportS3 hv r info magic f k loc p = fst
|
storeExportS3 hv r info magic f k loc p = fst
|
||||||
<$> storeExportS3' hv r info magic f k loc p
|
<$> storeExportS3' hv r info magic f k loc p
|
||||||
|
|
||||||
storeExportS3' :: S3HandleVar -> Remote -> S3Info -> Maybe Magic -> FilePath -> Key -> ExportLocation -> MeterUpdate -> Annex (Bool, Maybe S3VersionID)
|
storeExportS3' :: S3HandleVar -> Remote -> S3Info -> Maybe Magic -> FilePath -> Key -> ExportLocation -> MeterUpdate -> Annex (Bool, (Maybe S3Etag, Maybe S3VersionID))
|
||||||
storeExportS3' hv r info magic f k loc p = withS3Handle hv $ \case
|
storeExportS3' hv r info magic f k loc p = withS3Handle hv $ \case
|
||||||
Just h -> catchNonAsync (go h) (\e -> warning (show e) >> return (False, Nothing))
|
Just h -> catchNonAsync (go h) (\e -> warning (show e) >> return (False, (Nothing, Nothing)))
|
||||||
Nothing -> do
|
Nothing -> do
|
||||||
warning $ needS3Creds (uuid r)
|
warning $ needS3Creds (uuid r)
|
||||||
return (False, Nothing)
|
return (False, (Nothing, Nothing))
|
||||||
where
|
where
|
||||||
go h = do
|
go h = do
|
||||||
let o = T.pack $ bucketExportLocation info loc
|
let o = T.pack $ bucketExportLocation info loc
|
||||||
mvid <- storeHelper info h magic f o p
|
(metag, mvid) <- storeHelper info h magic f o p
|
||||||
setS3VersionID info (uuid r) k mvid
|
setS3VersionID info (uuid r) k mvid
|
||||||
return (True, mvid)
|
return (True, (metag, mvid))
|
||||||
|
|
||||||
retrieveExportS3 :: S3HandleVar -> Remote -> S3Info -> Key -> ExportLocation -> FilePath -> MeterUpdate -> Annex Bool
|
retrieveExportS3 :: S3HandleVar -> Remote -> S3Info -> Key -> ExportLocation -> FilePath -> MeterUpdate -> Annex Bool
|
||||||
retrieveExportS3 hv r info _k loc f p =
|
retrieveExportS3 hv r info _k loc f p =
|
||||||
|
@ -568,18 +576,28 @@ rewritePreconditionException a = catchJust (Url.matchStatusCodeException want) a
|
||||||
-- When the bucket is not versioned, data loss can result.
|
-- When the bucket is not versioned, data loss can result.
|
||||||
-- This is why that configuration requires --force to enable.
|
-- This is why that configuration requires --force to enable.
|
||||||
storeExportWithContentIdentifierS3 :: S3HandleVar -> Remote -> S3Info -> Maybe Magic -> FilePath -> Key -> ExportLocation -> [ContentIdentifier] -> MeterUpdate -> Annex (Maybe ContentIdentifier)
|
storeExportWithContentIdentifierS3 :: S3HandleVar -> Remote -> S3Info -> Maybe Magic -> FilePath -> Key -> ExportLocation -> [ContentIdentifier] -> MeterUpdate -> Annex (Maybe ContentIdentifier)
|
||||||
storeExportWithContentIdentifierS3 hv r info magic src k loc _overwritablecids p =
|
storeExportWithContentIdentifierS3 hv r info magic src k loc _overwritablecids p
|
||||||
storeExportS3' hv r info magic src k loc p >>= \case
|
| versioning info = go
|
||||||
|
-- FIXME Actual aws version that supports getting Etag for a store
|
||||||
|
-- is not known; patch not merged yet.
|
||||||
|
-- https://github.com/aristidb/aws/issues/258
|
||||||
|
#if MIN_VERSION_aws(0,99,0)
|
||||||
|
| otherwise = go
|
||||||
|
#else
|
||||||
|
| otherwise = do
|
||||||
|
warning "git-annex is built with too old a version of the aws library to support this operation"
|
||||||
|
return Nothing
|
||||||
|
#endif
|
||||||
|
where
|
||||||
|
go = storeExportS3' hv r info magic src k loc p >>= \case
|
||||||
(False, _) -> return Nothing
|
(False, _) -> return Nothing
|
||||||
(True, Just vid) -> return $ Just $
|
(True, (_, Just vid)) -> return $ Just $
|
||||||
mkS3VersionedContentIdentifier vid
|
mkS3VersionedContentIdentifier vid
|
||||||
(True, Nothing) -> return $ Just $
|
(True, (Just etag, Nothing)) -> return $ Just $
|
||||||
-- FIXME for an unversioned bucket, should use the etag
|
mkS3UnversionedContentIdentifier etag
|
||||||
-- of the file, which is its md5sum, as the ContentIdentifier
|
(True, (Nothing, Nothing)) -> do
|
||||||
-- NOT mempty!
|
warning "did not get ETag for store to S3 bucket"
|
||||||
-- This is blocked by
|
return Nothing
|
||||||
-- https://github.com/aristidb/aws/issues/258
|
|
||||||
mkS3UnversionedContentIdentifier mempty
|
|
||||||
|
|
||||||
-- Does not guarantee that the removed object has the content identifier,
|
-- Does not guarantee that the removed object has the content identifier,
|
||||||
-- but when the bucket is versioned, the removed object content can still
|
-- but when the bucket is versioned, the removed object content can still
|
||||||
|
|
|
@ -18,9 +18,6 @@ and `git annex sync --content` can be configured to use it.
|
||||||
when importing from a versioned S3 remote,
|
when importing from a versioned S3 remote,
|
||||||
although the head of it does reflect the current state of the remote.
|
although the head of it does reflect the current state of the remote.
|
||||||
|
|
||||||
* storeExportWithContentIdentifierS3 has a FIXME that needs a change to
|
|
||||||
aws.
|
|
||||||
|
|
||||||
* Need to test S3 import from unversioned bucket.
|
* Need to test S3 import from unversioned bucket.
|
||||||
|
|
||||||
* Write a tip or tips to document using this new feature.
|
* Write a tip or tips to document using this new feature.
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue