S3: Deal with AWS ACL configurations that do not allow creating or checking the location of a bucket, but only reading and writing content to it.
This commit is contained in:
parent
50b1cd917a
commit
604740b720
2 changed files with 47 additions and 18 deletions
40
Remote/S3.hs
40
Remote/S3.hs
|
@ -255,10 +255,18 @@ iaMunge = (>>= munge)
|
||||||
| isSpace c = []
|
| isSpace c = []
|
||||||
| otherwise = "&" ++ show (ord c) ++ ";"
|
| otherwise = "&" ++ show (ord c) ++ ";"
|
||||||
|
|
||||||
|
{- Generate the bucket if it does not already exist, including creating the
|
||||||
|
- UUID file within the bucket.
|
||||||
|
-
|
||||||
|
- To check if the bucket exists, ask for its location. However, some ACLs
|
||||||
|
- can allow read/write to buckets, but not querying location, so first
|
||||||
|
- check if the UUID file already exists and we can skip doing anything.
|
||||||
|
-}
|
||||||
genBucket :: RemoteConfig -> UUID -> Annex ()
|
genBucket :: RemoteConfig -> UUID -> Annex ()
|
||||||
genBucket c u = do
|
genBucket c u = do
|
||||||
conn <- s3ConnectionRequired c u
|
conn <- s3ConnectionRequired c u
|
||||||
showAction "checking bucket"
|
showAction "checking bucket"
|
||||||
|
unlessM ((== Right True) <$> checkUUIDFile c u conn) $ do
|
||||||
loc <- liftIO $ getBucketLocation conn bucket
|
loc <- liftIO $ getBucketLocation conn bucket
|
||||||
case loc of
|
case loc of
|
||||||
Right _ -> writeUUIDFile c u
|
Right _ -> writeUUIDFile c u
|
||||||
|
@ -284,20 +292,38 @@ genBucket c u = do
|
||||||
writeUUIDFile :: RemoteConfig -> UUID -> Annex ()
|
writeUUIDFile :: RemoteConfig -> UUID -> Annex ()
|
||||||
writeUUIDFile c u = do
|
writeUUIDFile c u = do
|
||||||
conn <- s3ConnectionRequired c u
|
conn <- s3ConnectionRequired c u
|
||||||
go conn =<< liftIO (tryNonAsync $ getObject conn $ mkobject L.empty)
|
v <- checkUUIDFile c u conn
|
||||||
where
|
case v of
|
||||||
go _conn (Right (Right o)) = unless (obj_data o == uuidb) $
|
Left e -> error e
|
||||||
error $ "This bucket is already in use by a different S3 special remote, with UUID: " ++ show (obj_data o)
|
Right True -> return ()
|
||||||
go conn _ = do
|
Right False -> do
|
||||||
let object = setStorageClass (getStorageClass c) (mkobject uuidb)
|
let object = setStorageClass (getStorageClass c) (mkobject uuidb)
|
||||||
either s3Error return =<< liftIO (sendObject conn object)
|
either s3Error return =<< liftIO (sendObject conn object)
|
||||||
|
where
|
||||||
file = filePrefix c ++ "annex-uuid"
|
file = uuidFile c
|
||||||
uuidb = L.fromChunks [T.encodeUtf8 $ T.pack $ fromUUID u]
|
uuidb = L.fromChunks [T.encodeUtf8 $ T.pack $ fromUUID u]
|
||||||
bucket = fromJust $ getBucket c
|
bucket = fromJust $ getBucket c
|
||||||
|
|
||||||
mkobject = S3Object bucket file "" (getXheaders c)
|
mkobject = S3Object bucket file "" (getXheaders c)
|
||||||
|
|
||||||
|
{- Checks if the UUID file exists in the bucket and has the specified UUID already. -}
|
||||||
|
checkUUIDFile :: RemoteConfig -> UUID -> AWSConnection -> Annex (Either String Bool)
|
||||||
|
checkUUIDFile c u conn = check <$> liftIO (tryNonAsync $ getObject conn $ mkobject L.empty)
|
||||||
|
where
|
||||||
|
check (Right (Right o))
|
||||||
|
| obj_data o == uuidb = Right True
|
||||||
|
| otherwise = Left $ "This bucket is already in use by a different S3 special remote, with UUID: " ++ show (obj_data o)
|
||||||
|
check _ = Right False
|
||||||
|
|
||||||
|
uuidb = L.fromChunks [T.encodeUtf8 $ T.pack $ fromUUID u]
|
||||||
|
bucket = fromJust $ getBucket c
|
||||||
|
file = uuidFile c
|
||||||
|
|
||||||
|
mkobject = S3Object bucket file "" (getXheaders c)
|
||||||
|
|
||||||
|
uuidFile :: RemoteConfig -> FilePath
|
||||||
|
uuidFile c = filePrefix c ++ "annex-uuid"
|
||||||
|
|
||||||
s3ConnectionRequired :: RemoteConfig -> UUID -> Annex AWSConnection
|
s3ConnectionRequired :: RemoteConfig -> UUID -> Annex AWSConnection
|
||||||
s3ConnectionRequired c u =
|
s3ConnectionRequired c u =
|
||||||
maybe (error "Cannot connect to S3") return =<< s3Connection c u
|
maybe (error "Cannot connect to S3") return =<< s3Connection c u
|
||||||
|
|
3
debian/changelog
vendored
3
debian/changelog
vendored
|
@ -6,6 +6,9 @@ git-annex (5.20140710) UNRELEASED; urgency=medium
|
||||||
* migrate: Avoid re-checksumming when migrating from hashE to hash backend.
|
* migrate: Avoid re-checksumming when migrating from hashE to hash backend.
|
||||||
* uninit: Avoid failing final removal in some direct mode repositories
|
* uninit: Avoid failing final removal in some direct mode repositories
|
||||||
due to file modes.
|
due to file modes.
|
||||||
|
* S3: Deal with AWS ACL configurations that do not allow creating or
|
||||||
|
checking the location of a bucket, but only reading and writing content to
|
||||||
|
it.
|
||||||
|
|
||||||
-- Joey Hess <joeyh@debian.org> Wed, 09 Jul 2014 23:29:21 -0400
|
-- Joey Hess <joeyh@debian.org> Wed, 09 Jul 2014 23:29:21 -0400
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue