git-annex/Remote/S3.hs

916 lines
32 KiB
Haskell
Raw Normal View History

{- S3 remotes
2011-03-28 02:00:44 +00:00
-
- Copyright 2011-2019 Joey Hess <id@joeyh.name>
2011-03-28 02:00:44 +00:00
-
- Licensed under the GNU AGPL version 3 or higher.
2011-03-28 02:00:44 +00:00
-}
2014-10-23 19:48:37 +00:00
{-# LANGUAGE TypeFamilies #-}
2014-12-19 20:53:25 +00:00
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE BangPatterns #-}
2014-10-23 20:51:10 +00:00
{-# LANGUAGE CPP #-}
2014-10-23 19:48:37 +00:00
2014-10-23 19:54:00 +00:00
module Remote.S3 (remote, iaHost, configIA, iaItemUrl) where
2011-03-28 02:00:44 +00:00
import qualified Aws as AWS
import qualified Aws.Core as AWS
import qualified Aws.S3 as S3
import qualified Data.Text as T
import qualified Data.Text.Encoding as T
import qualified Data.ByteString.Lazy as L
import qualified Data.ByteString as BS
import qualified Data.ByteString.Char8 as B8
2011-03-29 17:49:54 +00:00
import qualified Data.Map as M
import qualified Data.Set as S
import qualified System.FilePath.Posix as Posix
import Data.Char
import Data.String
2013-04-25 17:14:49 +00:00
import Network.Socket (HostName)
import Network.HTTP.Conduit (Manager)
import Network.HTTP.Client (responseStatus, responseBody, RequestBody(..))
import Network.HTTP.Types
import Control.Monad.Trans.Resource
import Control.Monad.Catch
2014-12-19 20:53:25 +00:00
import Data.IORef
import System.Log.Logger
import Control.Concurrent.STM (atomically)
import Control.Concurrent.STM.TVar
2011-03-28 02:00:44 +00:00
import Annex.Common
import Types.Remote
2017-09-15 20:34:45 +00:00
import Types.Export
import qualified Git
2011-03-30 19:15:46 +00:00
import Config
import Config.Cost
2011-08-17 00:49:54 +00:00
import Remote.Helper.Special
import Remote.Helper.Http
2015-08-17 14:42:14 +00:00
import Remote.Helper.Messages
2019-02-20 19:55:01 +00:00
import Remote.Helper.ExportImport
import qualified Remote.Helper.AWS as AWS
2012-11-14 23:32:27 +00:00
import Creds
import Annex.UUID
import Annex.Magic
import Logs.Web
import Logs.MetaData
import Types.MetaData
import Utility.Metered
import qualified Annex.Url as Url
import Utility.DataUnits
import Annex.Content
import Annex.Url (getUrlOptions, withUrlOptions)
import Utility.Url (checkBoth, UrlOptions(..))
import Utility.Env
2014-08-09 00:29:56 +00:00
type BucketName = String
type BucketObject = String
2011-03-28 02:00:44 +00:00
remote :: RemoteType
remote = RemoteType
{ typename = "S3"
, enumerate = const (findSpecialRemotes "s3")
, generate = gen
, setup = s3Setup
, exportSupported = exportIsSupported
2019-02-20 19:55:01 +00:00
, importSupported = importUnsupported
}
2011-03-29 03:51:07 +00:00
gen :: Git.Repo -> UUID -> RemoteConfig -> RemoteGitConfig -> Annex (Maybe Remote)
2014-08-10 02:13:03 +00:00
gen r u c gc = do
cst <- remoteCost gc expensiveRemoteCost
2018-09-06 20:03:15 +00:00
info <- extractS3Info c
hdl <- mkS3HandleVar c gc u
magic <- liftIO initMagicMimeType
return $ new cst info hdl magic
2012-11-30 04:55:59 +00:00
where
new cst info hdl magic = Just $ specialRemote c
(simplyPrepare $ store hdl this info magic)
(simplyPrepare $ retrieve hdl this c info)
(simplyPrepare $ remove hdl this info)
(simplyPrepare $ checkKey hdl this c info)
this
2012-11-30 04:55:59 +00:00
where
2014-12-16 19:26:13 +00:00
this = Remote
{ uuid = u
, cost = cst
, name = Git.repoDescribe r
, storeKey = storeKeyDummy
, retrieveKeyFile = retreiveKeyFileDummy
, retrieveKeyFileCheap = retrieveCheap
-- HttpManagerRestricted is used here, so this is
-- secure.
, retrievalSecurityPolicy = RetrievalAllKeysSecure
2014-12-16 19:26:13 +00:00
, removeKey = removeKeyDummy
, lockContent = Nothing
2014-12-16 19:26:13 +00:00
, checkPresent = checkPresentDummy
, checkPresentCheap = False
, exportActions = ExportActions
{ storeExport = storeExportS3 hdl this info magic
, retrieveExport = retrieveExportS3 hdl this info
, removeExport = removeExportS3 hdl this info
, checkPresentExport = checkPresentExportS3 hdl this info
-- S3 does not have directories.
, removeExportDirectory = Nothing
, renameExport = renameExportS3 hdl this info
}
2019-02-20 19:55:01 +00:00
, importActions = importUnsupported
2018-09-06 20:03:15 +00:00
, whereisKey = Just (getPublicWebUrls u info c)
2014-12-16 19:26:13 +00:00
, remoteFsck = Nothing
, repairRepo = Nothing
, config = c
, getRepo = return r
2014-12-16 19:26:13 +00:00
, gitconfig = gc
, localpath = Nothing
, readonly = False
, appendonly = versioning info
2014-12-16 19:26:13 +00:00
, availability = GloballyAvailable
, remotetype = remote
, mkUnavailable = gen r u (M.insert "host" "!dne!" c) gc
, getInfo = includeCredsInfo c (AWS.creds u) (s3Info c info)
2014-12-16 19:26:13 +00:00
, claimUrl = Nothing
, checkUrl = Nothing
}
s3Setup :: SetupStage -> Maybe UUID -> Maybe CredPair -> RemoteConfig -> RemoteGitConfig -> Annex (RemoteConfig, UUID)
s3Setup ss mu mcreds c gc = do
u <- maybe (liftIO genUUID) return mu
s3Setup' ss u mcreds c gc
s3Setup' :: SetupStage -> UUID -> Maybe CredPair -> RemoteConfig -> RemoteGitConfig -> Annex (RemoteConfig, UUID)
s3Setup' ss u mcreds c gc
| configIA c = archiveorg
| otherwise = defaulthost
2012-11-11 04:51:07 +00:00
where
remotename = fromJust (M.lookup "name" c)
defbucket = remotename ++ "-" ++ fromUUID u
defaults = M.fromList
[ ("datacenter", T.unpack $ AWS.defaultRegion AWS.S3)
2012-11-11 04:51:07 +00:00
, ("storageclass", "STANDARD")
, ("host", AWS.s3DefaultHost)
, ("port", "80")
2012-11-11 04:51:07 +00:00
, ("bucket", defbucket)
]
2012-11-11 04:51:07 +00:00
use fullconfig = do
enableBucketVersioning ss fullconfig gc u
gitConfigSpecialRemote u fullconfig [("s3", "true")]
return (fullconfig, u)
2012-11-11 04:51:07 +00:00
defaulthost = do
(c', encsetup) <- encryptionSetup c gc
c'' <- setRemoteCredPair encsetup c' gc (AWS.creds u) mcreds
glacier, S3: Fix bug that caused embedded creds to not be encypted using the remote's key. encryptionSetup must be called before setRemoteCredPair. Otherwise, the RemoteConfig doesn't have the cipher in it, and so no cipher is used to encrypt the embedded creds. This is a security fix for non-shared encryption methods! For encryption=shared, there's no security problem, just an inconsistentency in whether the embedded creds are encrypted. This is very important to get right, so used some types to help ensure that setRemoteCredPair is only run after encryptionSetup. Note that the external special remote bypasses the type safety, since creds can be set after the initial remote config, if the external special remote program requests it. Also note that IA remotes never use encryption, so encryptionSetup is not run for them at all, and again the type safety is bypassed. This leaves two open questions: 1. What to do about S3 and glacier remotes that were set up using encryption=pubkey/hybrid with embedcreds? Such a git repo has a security hole embedded in it, and this needs to be communicated to the user. Is the changelog enough? 2. enableremote won't work in such a repo, because git-annex will try to decrypt the embedded creds, which are not encrypted, so fails. This needs to be dealt with, especially for ecryption=shared repos, which are not really broken, just inconsistently configured. Noticing that problem for encryption=shared is what led to commit fbdeeeed5fa276d94be587c8916d725eddcaf546, which tried to fix the problem by not decrypting the embedded creds. This commit was sponsored by Josh Taylor.
2014-09-18 21:07:17 +00:00
let fullconfig = c'' `M.union` defaults
case ss of
Init -> genBucket fullconfig gc u
_ -> return ()
2012-11-11 04:51:07 +00:00
use fullconfig
2012-11-11 04:51:07 +00:00
archiveorg = do
showNote "Internet Archive mode"
c' <- setRemoteCredPair noEncryptionUsed c gc (AWS.creds u) mcreds
-- Ensure user enters a valid bucket name, since
-- this determines the name of the archive.org item.
let validbucket = replace " " "-" $
fromMaybe (giveup "specify bucket=") $
getBucketName c'
let archiveconfig =
2014-08-09 01:42:46 +00:00
-- IA acdepts x-amz-* as an alias for x-archive-*
2012-11-11 04:51:07 +00:00
M.mapKeys (replace "x-archive-" "x-amz-") $
-- encryption does not make sense here
M.insert "encryption" "none" $
2014-08-10 02:13:03 +00:00
M.insert "bucket" validbucket $
M.union c' $
2012-11-11 04:51:07 +00:00
-- special constraints on key names
M.insert "mungekeys" "ia" defaults
2018-09-06 20:03:15 +00:00
info <- extractS3Info archiveconfig
hdl <- mkS3HandleVar archiveconfig gc u
withS3HandleOrFail u hdl $
writeUUIDFile archiveconfig u info
use archiveconfig
2011-03-29 20:21:21 +00:00
store :: S3HandleVar -> Remote -> S3Info -> Maybe Magic -> Storer
store mh r info magic = fileStorer $ \k f p -> withS3HandleOrFail (uuid r) mh $ \h -> do
void $ storeHelper info h magic f (T.pack $ bucketObject info k) p
-- Store public URL to item in Internet Archive.
when (isIA info && not (isChunkKey k)) $
setUrlPresent k (iaPublicUrl info (bucketObject info k))
return True
storeHelper :: S3Info -> S3Handle -> Maybe Magic -> FilePath -> S3.Object -> MeterUpdate -> Annex (Maybe S3VersionID)
storeHelper info h magic f object p = liftIO $ case partSize info of
Just partsz | partsz > 0 -> do
fsz <- getFileSize f
if fsz > partsz
then multipartupload fsz partsz
else singlepartupload
_ -> singlepartupload
where
singlepartupload = runResourceT $ do
contenttype <- liftIO getcontenttype
rbody <- liftIO $ httpBodyStorer f p
let req = (putObject info object rbody)
{ S3.poContentType = encodeBS <$> contenttype }
vid <- S3.porVersionId <$> sendS3Handle h req
return (mkS3VersionID object vid)
multipartupload fsz partsz = runResourceT $ do
#if MIN_VERSION_aws(0,16,0)
contenttype <- liftIO getcontenttype
let startreq = (S3.postInitiateMultipartUpload (bucket info) object)
{ S3.imuStorageClass = Just (storageClass info)
, S3.imuMetadata = metaHeaders info
, S3.imuAutoMakeBucket = isIA info
, S3.imuExpires = Nothing -- TODO set some reasonable expiry
, S3.imuContentType = fromString <$> contenttype
}
uploadid <- S3.imurUploadId <$> sendS3Handle h startreq
-- The actual part size will be a even multiple of the
-- 32k chunk size that lazy ByteStrings use.
let partsz' = (partsz `div` toInteger defaultChunkSize) * toInteger defaultChunkSize
2014-11-04 00:49:30 +00:00
-- Send parts of the file, taking care to stream each part
-- w/o buffering in memory, since the parts can be large.
2014-11-03 23:50:33 +00:00
etags <- bracketIO (openBinaryFile f ReadMode) hClose $ \fh -> do
2014-11-04 18:47:18 +00:00
let sendparts meter etags partnum = do
pos <- liftIO $ hTell fh
if pos >= fsz
then return (reverse etags)
else do
-- Calculate size of part that will
-- be read.
let sz = if fsz - pos < partsz'
2014-11-04 18:47:18 +00:00
then fsz - pos
else partsz'
let p' = offsetMeterUpdate p (toBytesProcessed pos)
let numchunks = ceiling (fromIntegral sz / fromIntegral defaultChunkSize :: Double)
let popper = handlePopper numchunks defaultChunkSize p' fh
let req = S3.uploadPart (bucket info) object partnum uploadid $
RequestBodyStream (fromIntegral sz) popper
S3.UploadPartResponse { S3.uprETag = etag } <- sendS3Handle h req
2014-11-04 18:47:18 +00:00
sendparts (offsetMeterUpdate meter (toBytesProcessed sz)) (etag:etags) (partnum + 1)
2014-11-04 00:04:42 +00:00
sendparts p [] 1
r <- sendS3Handle h $ S3.postCompleteMultipartUpload
(bucket info) object uploadid (zip [1..] etags)
return (mkS3VersionID object (S3.cmurVersionId r))
#else
warningIO $ "Cannot do multipart upload (partsize " ++ show partsz ++ ") of large file (" ++ show fsz ++ "); built with too old a version of the aws library."
singlepartupload
#endif
getcontenttype = maybe (pure Nothing) (flip getMagicMimeType f) magic
{- Implemented as a fileRetriever, that uses conduit to stream the chunks
- out to the file. Would be better to implement a byteRetriever, but
- that is difficult. -}
retrieve :: S3HandleVar -> Remote -> RemoteConfig -> S3Info -> Retriever
retrieve hv r c info = fileRetriever $ \f k p -> withS3Handle hv $ \case
(Just h) ->
eitherS3VersionID info (uuid r) c k (T.pack $ bucketObject info k) >>= \case
Left failreason -> do
warning failreason
giveup "cannot download content"
Right loc -> retrieveHelper info h loc f p
Nothing ->
getPublicWebUrls' (uuid r) info c k >>= \case
Left failreason -> do
warning failreason
giveup "cannot download content"
Right us -> unlessM (downloadUrl k p us f) $
giveup "failed to download content"
retrieveHelper :: S3Info -> S3Handle -> (Either S3.Object S3VersionID) -> FilePath -> MeterUpdate -> Annex ()
retrieveHelper info h loc f p = liftIO $ runResourceT $ do
let req = case loc of
Left o -> S3.getObject (bucket info) o
Right (S3VersionID o vid) -> (S3.getObject (bucket info) o)
{ S3.goVersionId = Just vid }
S3.GetObjectResponse { S3.gorResponse = rsp } <- sendS3Handle h req
Url.sinkResponseFile p zeroBytesProcessed f WriteMode rsp
retrieveCheap :: Key -> AssociatedFile -> FilePath -> Annex Bool
retrieveCheap _ _ _ = return False
remove :: S3HandleVar -> Remote -> S3Info -> Remover
remove hv r info k = withS3HandleOrFail (uuid r) hv $ \h -> liftIO $ runResourceT $ do
res <- tryNonAsync $ sendS3Handle h $
S3.DeleteObject (T.pack $ bucketObject info k) (bucket info)
return $ either (const False) (const True) res
checkKey :: S3HandleVar -> Remote -> RemoteConfig -> S3Info -> CheckPresent
checkKey hv r c info k = withS3Handle hv $ \case
Just h -> do
showChecking r
eitherS3VersionID info (uuid r) c k (T.pack $ bucketObject info k) >>= \case
Left failreason -> do
warning failreason
giveup "cannot check content"
Right loc -> checkKeyHelper info h loc
Nothing ->
getPublicWebUrls' (uuid r) info c k >>= \case
Left failreason -> do
warning failreason
giveup "cannot check content"
Right us -> do
showChecking r
let check u = withUrlOptions $
liftIO . checkBoth u (keySize k)
anyM check us
checkKeyHelper :: S3Info -> S3Handle -> (Either S3.Object S3VersionID) -> Annex Bool
checkKeyHelper info h loc = liftIO $ runResourceT $ do
#if MIN_VERSION_aws(0,10,0)
rsp <- go
2014-10-23 20:52:05 +00:00
return (isJust $ S3.horMetadata rsp)
#else
catchMissingException $ do
void go
return True
#endif
2012-11-11 04:51:07 +00:00
where
go = sendS3Handle h req
req = case loc of
Left o -> S3.headObject (bucket info) o
Right (S3VersionID o vid) -> (S3.headObject (bucket info) o)
{ S3.hoVersionId = Just vid }
#if ! MIN_VERSION_aws(0,10,0)
{- Catch exception headObject returns when an object is not present
- in the bucket, and returns False. All other exceptions indicate a
- check error and are let through. -}
catchMissingException :: Annex Bool -> Annex Bool
catchMissingException a = catchJust missing a (const $ return False)
where
missing :: AWS.HeaderException -> Maybe ()
missing e
| AWS.headerErrorMessage e == "ETag missing" = Just ()
| otherwise = Nothing
#endif
storeExportS3 :: S3HandleVar -> Remote -> S3Info -> Maybe Magic -> FilePath -> Key -> ExportLocation -> MeterUpdate -> Annex Bool
storeExportS3 hv r info magic f k loc p = withS3Handle hv $ \case
Just h -> catchNonAsync (go h) (\e -> warning (show e) >> return False)
Nothing -> do
warning $ needS3Creds (uuid r)
return False
where
go h = do
let o = T.pack $ bucketExportLocation info loc
storeHelper info h magic f o p
>>= setS3VersionID info (uuid r) k
return True
retrieveExportS3 :: S3HandleVar -> Remote -> S3Info -> Key -> ExportLocation -> FilePath -> MeterUpdate -> Annex Bool
retrieveExportS3 hv r info _k loc f p =
catchNonAsync go (\e -> warning (show e) >> return False)
where
go = withS3Handle hv $ \case
Just h -> do
retrieveHelper info h (Left (T.pack exporturl)) f p
return True
2018-09-06 20:03:15 +00:00
Nothing -> case getPublicUrlMaker info of
Nothing -> do
warning $ needS3Creds (uuid r)
return False
Just geturl -> Url.withUrlOptions $
liftIO . Url.download p (geturl exporturl) f
exporturl = bucketExportLocation info loc
removeExportS3 :: S3HandleVar -> Remote -> S3Info -> Key -> ExportLocation -> Annex Bool
removeExportS3 hv r info k loc = withS3Handle hv $ \case
Just h -> checkVersioning info (uuid r) k $
catchNonAsync (go h) (\e -> warning (show e) >> return False)
Nothing -> do
warning $ needS3Creds (uuid r)
return False
where
go h = liftIO $ runResourceT $ do
res <- tryNonAsync $ sendS3Handle h $
S3.DeleteObject (T.pack $ bucketExportLocation info loc) (bucket info)
return $ either (const False) (const True) res
checkPresentExportS3 :: S3HandleVar -> Remote -> S3Info -> Key -> ExportLocation -> Annex Bool
checkPresentExportS3 hv r info k loc = withS3Handle hv $ \case
Just h -> checkKeyHelper info h (Left (T.pack $ bucketExportLocation info loc))
Nothing -> case getPublicUrlMaker info of
Just geturl -> withUrlOptions $ liftIO .
checkBoth (geturl $ bucketExportLocation info loc) (keySize k)
Nothing -> do
warning $ needS3Creds (uuid r)
giveup "No S3 credentials configured"
-- S3 has no move primitive; copy and delete.
renameExportS3 :: S3HandleVar -> Remote -> S3Info -> Key -> ExportLocation -> ExportLocation -> Annex (Maybe Bool)
renameExportS3 hv r info k src dest = Just <$> go
where
go = withS3Handle hv $ \case
Just h -> checkVersioning info (uuid r) k $
catchNonAsync (go' h) (\_ -> return False)
Nothing -> do
warning $ needS3Creds (uuid r)
return False
go' h = liftIO $ runResourceT $ do
let co = S3.copyObject (bucket info) dstobject
(S3.ObjectId (bucket info) srcobject Nothing)
S3.CopyMetadata
-- ACL is not preserved by copy.
void $ sendS3Handle h $ co { S3.coAcl = acl info }
void $ sendS3Handle h $ S3.DeleteObject srcobject (bucket info)
return True
srcobject = T.pack $ bucketExportLocation info src
dstobject = T.pack $ bucketExportLocation info dest
{- Generate the bucket if it does not already exist, including creating the
- UUID file within the bucket.
-
- Some ACLs can allow read/write to buckets, but not querying them,
- so first check if the UUID file already exists and we can skip creating
- it.
-}
genBucket :: RemoteConfig -> RemoteGitConfig -> UUID -> Annex ()
genBucket c gc u = do
showAction "checking bucket"
2018-09-06 20:03:15 +00:00
info <- extractS3Info c
hdl <- mkS3HandleVar c gc u
withS3HandleOrFail u hdl $ \h ->
go info h =<< checkUUIDFile c u info h
2012-11-11 04:51:07 +00:00
where
go _ _ (Right True) = noop
go info h _ = do
v <- liftIO $ tryNonAsync $ runResourceT $
sendS3Handle h (S3.getBucket $ bucket info)
case v of
Right _ -> noop
Left _ -> do
showAction $ "creating bucket in " ++ datacenter
void $ liftIO $ runResourceT $ sendS3Handle h $
(S3.putBucket (bucket info))
{ S3.pbCannedAcl = acl info
, S3.pbLocationConstraint = locconstraint
#if MIN_VERSION_aws(0,13,0)
, S3.pbXStorageClass = storageclass
#endif
}
writeUUIDFile c u info h
locconstraint = mkLocationConstraint $ T.pack datacenter
2012-11-11 04:51:07 +00:00
datacenter = fromJust $ M.lookup "datacenter" c
#if MIN_VERSION_aws(0,13,0)
-- "NEARLINE" as a storage class when creating a bucket is a
-- nonstandard extension of Google Cloud Storage.
storageclass = case getStorageClass c of
sc@(S3.OtherStorageClass "NEARLINE") -> Just sc
_ -> Nothing
#endif
2011-05-16 13:42:54 +00:00
{- Writes the UUID to an annex-uuid file within the bucket.
-
- If the file already exists in the bucket, it must match,
- or this fails.
-
- Note that IA buckets can only created by having a file
- stored in them. So this also takes care of that.
-}
writeUUIDFile :: RemoteConfig -> UUID -> S3Info -> S3Handle -> Annex ()
writeUUIDFile c u info h = do
v <- checkUUIDFile c u info h
case v of
Right True -> noop
Right False -> do
warning "The bucket already exists, and its annex-uuid file indicates it is used by a different special remote."
giveup "Cannot reuse this bucket."
_ -> void $ liftIO $ runResourceT $ sendS3Handle h mkobject
where
file = T.pack $ uuidFile c
uuidb = L.fromChunks [T.encodeUtf8 $ T.pack $ fromUUID u]
mkobject = putObject info file (RequestBodyLBS uuidb)
{- Checks if the UUID file exists in the bucket
- and has the specified UUID already. -}
checkUUIDFile :: RemoteConfig -> UUID -> S3Info -> S3Handle -> Annex (Either SomeException Bool)
checkUUIDFile c u info h = tryNonAsync $ liftIO $ runResourceT $ do
resp <- tryS3 $ sendS3Handle h (S3.getObject (bucket info) file)
case resp of
Left _ -> return False
Right r -> do
v <- AWS.loadToMemory r
let !ok = check v
return ok
where
check (S3.GetObjectMemoryResponse _meta rsp) =
responseStatus rsp == ok200 && responseBody rsp == uuidb
file = T.pack $ uuidFile c
uuidb = L.fromChunks [T.encodeUtf8 $ T.pack $ fromUUID u]
uuidFile :: RemoteConfig -> FilePath
2014-08-09 00:29:56 +00:00
uuidFile c = getFilePrefix c ++ "annex-uuid"
tryS3 :: ResourceT IO a -> ResourceT IO (Either S3.S3Error a)
tryS3 a = (Right <$> a) `catch` (pure . Left)
data S3Handle = S3Handle
2014-08-09 00:51:22 +00:00
{ hmanager :: Manager
, hawscfg :: AWS.Configuration
, hs3cfg :: S3.S3Configuration AWS.NormalQuery
}
{- Sends a request to S3 and gets back the response. -}
sendS3Handle
:: (AWS.Transaction r a, AWS.ServiceConfiguration r ~ S3.S3Configuration)
=> S3Handle
-> r
-> ResourceT IO a
sendS3Handle h r = AWS.pureAws (hawscfg h) (hs3cfg h) (hmanager h) r
type S3HandleVar = TVar (Either (Annex (Maybe S3Handle)) (Maybe S3Handle))
{- Prepares a S3Handle for later use. Does not connect to S3 or do anything
- else expensive. -}
mkS3HandleVar :: RemoteConfig -> RemoteGitConfig -> UUID -> Annex S3HandleVar
mkS3HandleVar c gc u = liftIO $ newTVarIO $ Left $ do
mcreds <- getRemoteCredPair c gc (AWS.creds u)
case mcreds of
Just creds -> do
awscreds <- liftIO $ genCredentials creds
let awscfg = AWS.Configuration AWS.Timestamp awscreds debugMapper
2017-10-11 15:43:03 +00:00
#if MIN_VERSION_aws(0,17,0)
Nothing
#endif
ou <- getUrlOptions
return $ Just $ S3Handle (httpManager ou) awscfg s3cfg
Nothing -> return Nothing
where
s3cfg = s3Configuration c
withS3Handle :: S3HandleVar -> (Maybe S3Handle -> Annex a) -> Annex a
withS3Handle hv a = liftIO (readTVarIO hv) >>= \case
Right hdl -> a hdl
Left mkhdl -> do
hdl <- mkhdl
liftIO $ atomically $ writeTVar hv (Right hdl)
a hdl
withS3HandleOrFail :: UUID -> S3HandleVar -> (S3Handle -> Annex a) -> Annex a
withS3HandleOrFail u hv a = withS3Handle hv $ \case
Just hdl -> a hdl
Nothing -> do
warning $ needS3Creds u
giveup "No S3 credentials configured"
needS3Creds :: UUID -> String
needS3Creds u = missingCredPairFor "S3" (AWS.creds u)
s3Configuration :: RemoteConfig -> S3.S3Configuration AWS.NormalQuery
s3Configuration c = cfg
{ S3.s3Port = port
, S3.s3RequestStyle = case M.lookup "requeststyle" c of
Just "path" -> S3.PathStyle
Just s -> giveup $ "bad S3 requeststyle value: " ++ s
Nothing -> S3.s3RequestStyle cfg
}
where
proto
| port == 443 = AWS.HTTPS
| otherwise = AWS.HTTP
2018-09-06 20:03:15 +00:00
h = fromJust $ M.lookup "host" c
datacenter = fromJust $ M.lookup "datacenter" c
-- When the default S3 host is configured, connect directly to
-- the S3 endpoint for the configured datacenter.
-- When another host is configured, it's used as-is.
endpoint
2018-09-06 20:03:15 +00:00
| h == AWS.s3DefaultHost = AWS.s3HostName $ T.pack datacenter
| otherwise = T.encodeUtf8 $ T.pack h
port = let s = fromJust $ M.lookup "port" c in
case reads s of
[(p, _)] -> p
_ -> giveup $ "bad S3 port value: " ++ s
cfg = S3.s3 proto endpoint False
2014-08-10 02:13:03 +00:00
data S3Info = S3Info
{ bucket :: S3.Bucket
, storageClass :: S3.StorageClass
, bucketObject :: Key -> BucketObject
, bucketExportLocation :: ExportLocation -> BucketObject
2014-08-10 02:13:03 +00:00
, metaHeaders :: [(T.Text, T.Text)]
, partSize :: Maybe Integer
2014-08-10 02:13:03 +00:00
, isIA :: Bool
, versioning :: Bool
, public :: Bool
2018-09-06 20:03:15 +00:00
, publicurl :: Maybe URLString
, host :: Maybe String
2014-08-10 02:13:03 +00:00
}
2018-09-06 20:03:15 +00:00
extractS3Info :: RemoteConfig -> Annex S3Info
extractS3Info c = do
2014-08-10 02:13:03 +00:00
b <- maybe
(giveup "S3 bucket not configured")
2014-08-10 02:13:03 +00:00
(return . T.pack)
(getBucketName c)
2018-09-06 20:03:15 +00:00
return $ S3Info
2014-08-10 02:13:03 +00:00
{ bucket = b
, storageClass = getStorageClass c
, bucketObject = getBucketObject c
, bucketExportLocation = getBucketExportLocation c
2014-08-10 02:13:03 +00:00
, metaHeaders = getMetaHeaders c
, partSize = getPartSize c
2014-08-10 02:13:03 +00:00
, isIA = configIA c
, versioning = boolcfg "versioning"
, public = boolcfg "public"
2018-09-06 20:03:15 +00:00
, publicurl = M.lookup "publicurl" c
, host = M.lookup "host" c
2014-08-10 02:13:03 +00:00
}
where
boolcfg k = fromMaybe False $ yesNo =<< M.lookup k c
2014-08-10 02:13:03 +00:00
putObject :: S3Info -> T.Text -> RequestBody -> S3.PutObject
putObject info file rbody = (S3.putObject (bucket info) file rbody)
{ S3.poStorageClass = Just (storageClass info)
, S3.poMetadata = metaHeaders info
, S3.poAutoMakeBucket = isIA info
, S3.poAcl = acl info
}
acl :: S3Info -> Maybe S3.CannedAcl
acl info
| public info = Just S3.AclPublicRead
| otherwise = Nothing
2014-08-09 00:29:56 +00:00
getBucketName :: RemoteConfig -> Maybe BucketName
getBucketName = map toLower <$$> M.lookup "bucket"
getStorageClass :: RemoteConfig -> S3.StorageClass
2014-08-09 00:51:22 +00:00
getStorageClass c = case M.lookup "storageclass" c of
Just "REDUCED_REDUNDANCY" -> S3.ReducedRedundancy
#if MIN_VERSION_aws(0,13,0)
Just s -> S3.OtherStorageClass (T.pack s)
#endif
_ -> S3.Standard
getPartSize :: RemoteConfig -> Maybe Integer
getPartSize c = readSize dataUnits =<< M.lookup "partsize" c
getMetaHeaders :: RemoteConfig -> [(T.Text, T.Text)]
getMetaHeaders = map munge . filter ismetaheader . M.assocs
where
ismetaheader (h, _) = metaprefix `isPrefixOf` h
metaprefix = "x-amz-meta-"
metaprefixlen = length metaprefix
munge (k, v) = (T.pack $ drop metaprefixlen k, T.pack v)
2014-08-09 00:29:56 +00:00
getFilePrefix :: RemoteConfig -> String
getFilePrefix = M.findWithDefault "" "fileprefix"
getBucketObject :: RemoteConfig -> Key -> BucketObject
getBucketObject c = munge . serializeKey
where
munge s = case M.lookup "mungekeys" c of
Just "ia" -> iaMunge $ getFilePrefix c ++ s
_ -> getFilePrefix c ++ s
getBucketExportLocation :: RemoteConfig -> ExportLocation -> BucketObject
getBucketExportLocation c loc = getFilePrefix c ++ fromExportLocation loc
{- Internet Archive documentation limits filenames to a subset of ascii.
- While other characters seem to work now, this entity encodes everything
- else to avoid problems. -}
2014-08-09 00:29:56 +00:00
iaMunge :: String -> String
iaMunge = (>>= munge)
where
munge c
| isAsciiUpper c || isAsciiLower c || isNumber c = [c]
| c `elem` ("_-.\"" :: String) = [c]
2014-08-09 00:29:56 +00:00
| isSpace c = []
| otherwise = "&" ++ show (ord c) ++ ";"
2014-08-10 02:13:03 +00:00
configIA :: RemoteConfig -> Bool
configIA = maybe False isIAHost . M.lookup "host"
2013-04-25 17:14:49 +00:00
{- Hostname to use for archive.org S3. -}
iaHost :: HostName
iaHost = "s3.us.archive.org"
isIAHost :: HostName -> Bool
isIAHost h = ".archive.org" `isSuffixOf` map toLower h
2014-08-09 00:29:56 +00:00
iaItemUrl :: BucketName -> URLString
2014-08-10 02:13:03 +00:00
iaItemUrl b = "http://archive.org/details/" ++ b
iaPublicUrl :: S3Info -> BucketObject -> URLString
iaPublicUrl info = genericPublicUrl $
"http://archive.org/download/" ++ T.unpack (bucket info) ++ "/"
awsPublicUrl :: S3Info -> BucketObject -> URLString
awsPublicUrl info = genericPublicUrl $
"https://" ++ T.unpack (bucket info) ++ ".s3.amazonaws.com/"
genericPublicUrl :: URLString -> BucketObject -> URLString
genericPublicUrl baseurl p = baseurl Posix.</> p
2014-12-19 20:53:25 +00:00
genCredentials :: CredPair -> IO AWS.Credentials
genCredentials (keyid, secret) = AWS.Credentials
<$> pure (tobs keyid)
<*> pure (tobs secret)
2014-12-19 20:53:25 +00:00
<*> newIORef []
<*> (fmap tobs <$> getEnv "AWS_SESSION_TOKEN")
where
tobs = T.encodeUtf8 . T.pack
2014-12-19 20:53:25 +00:00
mkLocationConstraint :: AWS.Region -> S3.LocationConstraint
mkLocationConstraint "US" = S3.locationUsClassic
mkLocationConstraint r = r
debugMapper :: AWS.Logger
debugMapper level t = forward "S3" (T.unpack t)
where
forward = case level of
AWS.Debug -> debugM
AWS.Info -> infoM
AWS.Warning -> warningM
AWS.Error -> errorM
s3Info :: RemoteConfig -> S3Info -> [(String, String)]
s3Info c info = catMaybes
[ Just ("bucket", fromMaybe "unknown" (getBucketName c))
, Just ("endpoint", w82s (BS.unpack (S3.s3Endpoint s3c)))
, Just ("port", show (S3.s3Port s3c))
, Just ("storage class", showstorageclass (getStorageClass c))
, if configIA c
then Just ("internet archive item", iaItemUrl $ fromMaybe "unknown" $ getBucketName c)
else Nothing
, Just ("partsize", maybe "unlimited" (roughSize storageUnits False) (getPartSize c))
, Just ("public", if public info then "yes" else "no")
, Just ("versioning", if versioning info then "yes" else "no")
]
where
s3c = s3Configuration c
#if MIN_VERSION_aws(0,13,0)
showstorageclass (S3.OtherStorageClass t) = T.unpack t
#endif
showstorageclass sc = show sc
2018-09-06 20:03:15 +00:00
getPublicWebUrls :: UUID -> S3Info -> RemoteConfig -> Key -> Annex [URLString]
getPublicWebUrls u info c k = either (const []) id <$> getPublicWebUrls' u info c k
getPublicWebUrls' :: UUID -> S3Info -> RemoteConfig -> Key -> Annex (Either String [URLString])
getPublicWebUrls' u info c k
| not (public info) = return $ Left $
"S3 bucket does not allow public access; " ++ needS3Creds u
2018-09-06 20:03:15 +00:00
| exportTree c = if versioning info
then case publicurl info of
Just url -> getversionid (const $ genericPublicUrl url)
2018-09-06 20:03:15 +00:00
Nothing -> case host info of
Just h | h == AWS.s3DefaultHost ->
getversionid awsPublicUrl
_ -> return nopublicurl
else return (Left "exporttree used without versioning")
2018-09-06 20:03:15 +00:00
| otherwise = case getPublicUrlMaker info of
Just geturl -> return (Right [geturl $ bucketObject info k])
Nothing -> return nopublicurl
where
nopublicurl = Left "No publicurl is configured for this remote"
getversionid url = getS3VersionIDPublicUrls url info u k >>= \case
[] -> return (Left "Remote is configured to use versioning, but no S3 version ID is recorded for this key")
l -> return (Right l)
2018-09-06 20:03:15 +00:00
getPublicUrlMaker :: S3Info -> Maybe (BucketObject -> URLString)
getPublicUrlMaker info = case publicurl info of
Just url -> Just (genericPublicUrl url)
Nothing -> case host info of
Just h
| h == AWS.s3DefaultHost ->
Just (awsPublicUrl info)
| isIAHost h ->
Just (iaPublicUrl info)
_ -> Nothing
data S3VersionID = S3VersionID S3.Object T.Text
deriving (Show)
-- smart constructor
mkS3VersionID :: S3.Object -> Maybe T.Text -> Maybe S3VersionID
mkS3VersionID o (Just t)
| T.null t = Nothing
-- AWS documentation says a version ID is at most 1024 bytes long.
-- Since they are stored in the git-annex branch, prevent them from
-- being very much larger than that.
| T.length t < 2048 = Just (S3VersionID o t)
| otherwise = Nothing
mkS3VersionID _ Nothing = Nothing
-- Format for storage in per-remote metadata.
--
2018-08-31 17:49:08 +00:00
-- A S3 version ID is "url ready" so does not contain '#' and so we'll use
-- that to separate it from the object id. (Could use a space, but spaces
-- in metadata values lead to an inefficient encoding.)
formatS3VersionID :: S3VersionID -> BS.ByteString
formatS3VersionID (S3VersionID o v) = T.encodeUtf8 v <> "#" <> T.encodeUtf8 o
-- Parse from value stored in per-remote metadata.
parseS3VersionID :: BS.ByteString -> Maybe S3VersionID
parseS3VersionID b = do
let (v, rest) = B8.break (== '#') b
o <- eitherToMaybe $ T.decodeUtf8' $ BS.drop 1 rest
mkS3VersionID o (eitherToMaybe $ T.decodeUtf8' v)
setS3VersionID :: S3Info -> UUID -> Key -> Maybe S3VersionID -> Annex ()
setS3VersionID info u k vid
| versioning info = maybe noop (setS3VersionID' u k) vid
| otherwise = noop
setS3VersionID' :: UUID -> Key -> S3VersionID -> Annex ()
setS3VersionID' u k vid = addRemoteMetaData k $
RemoteMetaData u (updateMetaData s3VersionField v emptyMetaData)
where
v = mkMetaValue (CurrentlySet True) (formatS3VersionID vid)
getS3VersionID :: UUID -> Key -> Annex [S3VersionID]
getS3VersionID u k = do
(RemoteMetaData _ m) <- getCurrentRemoteMetaData u k
return $ mapMaybe parseS3VersionID $ map unwrap $ S.toList $
metaDataValues s3VersionField m
where
unwrap (MetaValue _ v) = v
s3VersionField :: MetaField
s3VersionField = mkMetaFieldUnchecked "V"
eitherS3VersionID :: S3Info -> UUID -> RemoteConfig -> Key -> S3.Object -> Annex (Either String (Either S3.Object S3VersionID))
eitherS3VersionID info u c k fallback
| versioning info = getS3VersionID u k >>= return . \case
[] -> if exportTree c
then Left "Remote is configured to use versioning, but no S3 version ID is recorded for this key"
else Right (Left fallback)
-- It's possible for a key to be stored multiple timees in
-- a bucket with different version IDs; only use one of them.
(v:_) -> Right (Right v)
| otherwise = return (Right (Left fallback))
s3VersionIDPublicUrl :: (S3Info -> BucketObject -> URLString) -> S3Info -> S3VersionID -> URLString
s3VersionIDPublicUrl mk info (S3VersionID obj vid) = mk info $ concat
[ T.unpack obj
, "?versionId="
, T.unpack vid -- version ID is "url ready" so no escaping needed
]
getS3VersionIDPublicUrls :: (S3Info -> BucketObject -> URLString) -> S3Info -> UUID -> Key -> Annex [URLString]
getS3VersionIDPublicUrls mk info u k =
map (s3VersionIDPublicUrl mk info) <$> getS3VersionID u k
-- Enable versioning on the bucket can only be done at init time;
-- setting versioning in a bucket that git-annex has already exported
-- files to risks losing the content of those un-versioned files.
enableBucketVersioning :: SetupStage -> RemoteConfig -> RemoteGitConfig -> UUID -> Annex ()
#if MIN_VERSION_aws(0,21,1)
enableBucketVersioning ss c gc u = do
2019-01-29 19:20:22 +00:00
#else
enableBucketVersioning ss c _ _ = do
#endif
info <- extractS3Info c
case ss of
Init -> when (versioning info) $
enableversioning (bucket info)
Enable oldc -> do
oldinfo <- extractS3Info oldc
when (versioning info /= versioning oldinfo) $
giveup "Cannot change versioning= of existing S3 remote."
where
enableversioning b = do
#if MIN_VERSION_aws(0,21,1)
showAction "enabling bucket versioning"
hdl <- mkS3HandleVar c gc u
withS3HandleOrFail u hdl $ \h ->
void $ liftIO $ runResourceT $ sendS3Handle h $
S3.putBucketVersioning b S3.VersioningEnabled
#else
showLongNote $ unlines
[ "This version of git-annex cannot auto-enable S3 bucket versioning."
, "You need to manually enable versioning in the S3 console"
, "for the bucket \"" ++ T.unpack b ++ "\""
, "https://docs.aws.amazon.com/AmazonS3/latest/user-guide/enable-versioning.html"
, "It's important you enable versioning before storing anything in the bucket!"
]
#endif
-- If the remote has versioning enabled, but the version ID is for some
-- reason not being recorded, it's not safe to perform an action that
-- will remove the unversioned file. The file may be the only copy of an
-- annex object.
--
-- This code could be removed eventually, since enableBucketVersioning
-- will avoid this situation. Before that was added, some remotes
-- were created without versioning, some unversioned files exported to
-- them, and then versioning enabled, and this is to avoid data loss in
-- those cases.
checkVersioning :: S3Info -> UUID -> Key -> Annex Bool -> Annex Bool
checkVersioning info u k a
| versioning info = getS3VersionID u k >>= \case
[] -> do
warning $ "Remote is configured to use versioning, but no S3 version ID is recorded for this key, so it cannot safely be modified."
return False
_ -> a
| otherwise = a