2013-04-25 21:28:25 +00:00
|
|
|
{- S3 remotes
|
2011-03-28 02:00:44 +00:00
|
|
|
-
|
2020-01-14 19:41:34 +00:00
|
|
|
- Copyright 2011-2020 Joey Hess <id@joeyh.name>
|
2011-03-28 02:00:44 +00:00
|
|
|
-
|
2019-03-13 19:48:14 +00:00
|
|
|
- Licensed under the GNU AGPL version 3 or higher.
|
2011-03-28 02:00:44 +00:00
|
|
|
-}
|
|
|
|
|
2014-10-23 19:48:37 +00:00
|
|
|
{-# LANGUAGE TypeFamilies #-}
|
2014-12-19 20:53:25 +00:00
|
|
|
{-# LANGUAGE OverloadedStrings #-}
|
2017-02-13 19:35:24 +00:00
|
|
|
{-# LANGUAGE BangPatterns #-}
|
2014-10-23 20:51:10 +00:00
|
|
|
{-# LANGUAGE CPP #-}
|
2014-10-23 19:48:37 +00:00
|
|
|
|
2014-10-23 19:54:00 +00:00
|
|
|
module Remote.S3 (remote, iaHost, configIA, iaItemUrl) where
|
2011-03-28 02:00:44 +00:00
|
|
|
|
2014-08-08 22:54:04 +00:00
|
|
|
import qualified Aws as AWS
|
|
|
|
import qualified Aws.Core as AWS
|
|
|
|
import qualified Aws.S3 as S3
|
2012-12-01 18:11:37 +00:00
|
|
|
import qualified Data.Text as T
|
2014-05-28 00:31:25 +00:00
|
|
|
import qualified Data.Text.Encoding as T
|
|
|
|
import qualified Data.ByteString.Lazy as L
|
2018-08-31 17:12:58 +00:00
|
|
|
import qualified Data.ByteString as BS
|
2019-01-07 19:51:05 +00:00
|
|
|
import qualified Data.ByteString.Char8 as B8
|
2011-03-29 17:49:54 +00:00
|
|
|
import qualified Data.Map as M
|
2018-08-31 17:12:58 +00:00
|
|
|
import qualified Data.Set as S
|
2018-09-14 16:25:23 +00:00
|
|
|
import qualified System.FilePath.Posix as Posix
|
2011-05-16 15:20:30 +00:00
|
|
|
import Data.Char
|
2019-01-23 17:08:47 +00:00
|
|
|
import Data.String
|
2013-04-25 17:14:49 +00:00
|
|
|
import Network.Socket (HostName)
|
2018-04-04 19:15:12 +00:00
|
|
|
import Network.HTTP.Conduit (Manager)
|
2017-08-17 15:00:48 +00:00
|
|
|
import Network.HTTP.Client (responseStatus, responseBody, RequestBody(..))
|
2014-08-08 22:54:04 +00:00
|
|
|
import Network.HTTP.Types
|
2019-08-15 16:55:48 +00:00
|
|
|
import Network.URI
|
2014-08-08 22:54:04 +00:00
|
|
|
import Control.Monad.Trans.Resource
|
|
|
|
import Control.Monad.Catch
|
2014-12-19 20:53:25 +00:00
|
|
|
import Data.IORef
|
2015-04-21 19:55:42 +00:00
|
|
|
import System.Log.Logger
|
2019-01-30 20:04:16 +00:00
|
|
|
import Control.Concurrent.STM (atomically)
|
|
|
|
import Control.Concurrent.STM.TVar
|
2020-09-14 22:19:51 +00:00
|
|
|
import Data.Maybe
|
2011-03-28 02:00:44 +00:00
|
|
|
|
2016-01-20 20:36:33 +00:00
|
|
|
import Annex.Common
|
2011-06-02 01:56:04 +00:00
|
|
|
import Types.Remote
|
2017-09-15 20:34:45 +00:00
|
|
|
import Types.Export
|
2011-06-30 17:16:57 +00:00
|
|
|
import qualified Git
|
2019-04-18 19:20:09 +00:00
|
|
|
import qualified Annex
|
2011-03-30 19:15:46 +00:00
|
|
|
import Config
|
2013-03-13 20:16:01 +00:00
|
|
|
import Config.Cost
|
2019-10-10 19:46:12 +00:00
|
|
|
import Annex.SpecialRemote.Config
|
2011-08-17 00:49:54 +00:00
|
|
|
import Remote.Helper.Special
|
2014-08-09 18:23:54 +00:00
|
|
|
import Remote.Helper.Http
|
2015-08-17 14:42:14 +00:00
|
|
|
import Remote.Helper.Messages
|
2019-02-20 19:55:01 +00:00
|
|
|
import Remote.Helper.ExportImport
|
2019-04-18 19:20:09 +00:00
|
|
|
import Types.Import
|
2012-11-20 20:43:58 +00:00
|
|
|
import qualified Remote.Helper.AWS as AWS
|
2012-11-14 23:32:27 +00:00
|
|
|
import Creds
|
2013-09-07 22:38:00 +00:00
|
|
|
import Annex.UUID
|
2019-01-23 17:08:47 +00:00
|
|
|
import Annex.Magic
|
2013-04-25 21:28:25 +00:00
|
|
|
import Logs.Web
|
2018-08-31 17:12:58 +00:00
|
|
|
import Logs.MetaData
|
|
|
|
import Types.MetaData
|
2020-01-10 18:10:20 +00:00
|
|
|
import Types.ProposedAccepted
|
lockContent for S3 (with versioning=yes) and git-lfs
Made several special remotes support locking content on them while
dropping, which allows dropping from another special remote when the
content will only remain on a special remote of these types.
In both cases, verify the content is present actively, because it's
certianly possible for things other than git-annex to have removed it.
Worth thinking about what to do if at some later point, git-lfs gains
support for dropping content, and a content locking operation.
That would probably need a transition; first would need to make lockContent
use the locking operation. Then, once enough time had passed that we can
assume any git-annex operating on the git-lfs remote had that change,
git-annex could finally allow dropping from git-lfs.
Or, it could be that git-lfs gains support for dropping content, but not
locking it. In that case, it seems this commit would need to be reverted,
and then wait long enough for that git-annex to be everywhere, and only
then can git-annex safely support dropping from git-lfs.
So, the assumption made in this commit could lead to bother later.. But I
think it's actually highly unlikely git-lfs does ever support dropping;
it's outside their centralized model. Probably. :) Worth keeping in mind as
the same assumption is made about other special remotes though.
This commit was sponsored by Ethan Aubin.
2020-06-26 17:46:42 +00:00
|
|
|
import Types.NumCopies
|
2014-08-09 19:58:01 +00:00
|
|
|
import Utility.Metered
|
2014-10-28 18:17:30 +00:00
|
|
|
import Utility.DataUnits
|
2015-06-05 20:23:35 +00:00
|
|
|
import Annex.Content
|
2019-11-12 17:33:41 +00:00
|
|
|
import qualified Annex.Url as Url
|
2020-09-14 22:19:51 +00:00
|
|
|
import Utility.Url (extractFromResourceT)
|
2019-11-12 17:33:41 +00:00
|
|
|
import Annex.Url (getUrlOptions, withUrlOptions, UrlOptions(..))
|
2018-09-05 19:53:57 +00:00
|
|
|
import Utility.Env
|
2013-04-25 21:28:25 +00:00
|
|
|
|
2014-08-09 00:29:56 +00:00
|
|
|
type BucketName = String
|
2018-07-31 20:29:11 +00:00
|
|
|
type BucketObject = String
|
2011-03-28 02:00:44 +00:00
|
|
|
|
2012-01-06 03:14:10 +00:00
|
|
|
remote :: RemoteType
|
2020-01-14 19:41:34 +00:00
|
|
|
remote = specialRemoteType $ RemoteType
|
2017-09-07 17:45:31 +00:00
|
|
|
{ typename = "S3"
|
|
|
|
, enumerate = const (findSpecialRemotes "s3")
|
|
|
|
, generate = gen
|
add LISTCONFIGS to external special remote protocol
Special remote programs that use GETCONFIG/SETCONFIG are recommended
to implement it.
The description is not yet used, but will be useful later when adding a way
to make initremote list all accepted configs.
configParser now takes a RemoteConfig parameter. Normally, that's not
needed, because configParser returns a parter, it does not parse it
itself. But, it's needed to look at externaltype and work out what
external remote program to run for LISTCONFIGS.
Note that, while externalUUID is changed to a Maybe UUID, checkExportSupported
used to use NoUUID. The code that now checks for Nothing used to behave
in some undefined way if the external program made requests that
triggered it.
Also, note that in externalSetup, once it generates external,
it parses the RemoteConfig strictly. That generates a
ParsedRemoteConfig, which is thrown away. The reason it's ok to throw
that away, is that, if the strict parse succeeded, the result must be
the same as the earlier, lenient parse.
initremote of an external special remote now runs the program three
times. First for LISTCONFIGS, then EXPORTSUPPORTED, and again
LISTCONFIGS+INITREMOTE. It would not be hard to eliminate at least
one of those, and it should be possible to only run the program once.
2020-01-17 19:30:14 +00:00
|
|
|
, configParser = const $ pure $ RemoteConfigParser
|
2020-01-15 14:52:28 +00:00
|
|
|
{ remoteConfigFieldParsers =
|
|
|
|
[ optionalStringParser bucketField
|
2020-01-20 19:20:04 +00:00
|
|
|
(FieldDesc "name of bucket to store content in")
|
2020-01-15 14:52:28 +00:00
|
|
|
, optionalStringParser hostField
|
2020-01-20 19:20:04 +00:00
|
|
|
(FieldDesc "S3 server hostname (default is Amazon S3)")
|
2020-01-15 14:52:28 +00:00
|
|
|
, optionalStringParser datacenterField
|
2020-01-20 19:20:04 +00:00
|
|
|
(FieldDesc "S3 datacenter to use (US, EU, us-west-1, ..)")
|
2020-01-15 14:52:28 +00:00
|
|
|
, optionalStringParser partsizeField
|
2020-01-20 19:20:04 +00:00
|
|
|
(FieldDesc "part size for multipart upload (eg 1GiB)")
|
2020-01-15 14:52:28 +00:00
|
|
|
, optionalStringParser storageclassField
|
2020-06-16 16:04:29 +00:00
|
|
|
(FieldDesc "storage class, eg STANDARD or STANDARD_IA or ONEZONE_IA")
|
2020-01-15 14:52:28 +00:00
|
|
|
, optionalStringParser fileprefixField
|
2020-01-20 19:20:04 +00:00
|
|
|
(FieldDesc "prefix to add to filenames in the bucket")
|
2020-06-16 21:59:55 +00:00
|
|
|
, yesNoParser versioningField (Just False)
|
2020-01-20 19:20:04 +00:00
|
|
|
(FieldDesc "enable versioning of bucket content")
|
2020-06-16 21:59:55 +00:00
|
|
|
, yesNoParser publicField (Just False)
|
|
|
|
(FieldDesc "allow public read access to the bucket")
|
2020-01-15 14:52:28 +00:00
|
|
|
, optionalStringParser publicurlField
|
2020-01-20 19:20:04 +00:00
|
|
|
(FieldDesc "url that can be used by public to download files")
|
2020-01-15 14:52:28 +00:00
|
|
|
, optionalStringParser protocolField
|
2020-01-20 19:20:04 +00:00
|
|
|
(FieldDesc "http or https")
|
2020-01-15 14:52:28 +00:00
|
|
|
, optionalStringParser portField
|
2020-01-20 19:20:04 +00:00
|
|
|
(FieldDesc "port to connect to")
|
2020-01-15 14:52:28 +00:00
|
|
|
, optionalStringParser requeststyleField
|
2020-01-20 19:20:04 +00:00
|
|
|
(FieldDesc "for path-style requests, set to \"path\"")
|
2020-05-07 17:18:11 +00:00
|
|
|
, signatureVersionParser signatureField
|
|
|
|
(FieldDesc "S3 signature version")
|
2020-01-20 19:20:04 +00:00
|
|
|
, optionalStringParser mungekeysField HiddenField
|
|
|
|
, optionalStringParser AWS.s3credsField HiddenField
|
2020-01-15 14:52:28 +00:00
|
|
|
]
|
2020-01-20 20:23:35 +00:00
|
|
|
, remoteConfigRestPassthrough = Just
|
|
|
|
( \f -> isMetaHeader f || isArchiveMetaHeader f
|
|
|
|
,
|
|
|
|
[ ("x-amz-meta-*", FieldDesc "http headers to add when storing on S3")
|
|
|
|
, ("x-archive-meta-*", FieldDesc "http headers to add when storing on Internet Archive")
|
|
|
|
]
|
|
|
|
)
|
2020-01-14 19:41:34 +00:00
|
|
|
}
|
2017-09-07 17:45:31 +00:00
|
|
|
, setup = s3Setup
|
2017-09-08 19:41:31 +00:00
|
|
|
, exportSupported = exportIsSupported
|
2019-04-18 19:20:09 +00:00
|
|
|
, importSupported = importIsSupported
|
2017-09-07 17:45:31 +00:00
|
|
|
}
|
2011-03-29 03:51:07 +00:00
|
|
|
|
2020-01-14 19:41:34 +00:00
|
|
|
bucketField :: RemoteConfigField
|
|
|
|
bucketField = Accepted "bucket"
|
|
|
|
|
|
|
|
hostField :: RemoteConfigField
|
|
|
|
hostField = Accepted "host"
|
|
|
|
|
|
|
|
datacenterField :: RemoteConfigField
|
|
|
|
datacenterField = Accepted "datacenter"
|
|
|
|
|
|
|
|
partsizeField :: RemoteConfigField
|
|
|
|
partsizeField = Accepted "partsize"
|
|
|
|
|
|
|
|
storageclassField :: RemoteConfigField
|
|
|
|
storageclassField = Accepted "storageclass"
|
|
|
|
|
|
|
|
fileprefixField :: RemoteConfigField
|
|
|
|
fileprefixField = Accepted "fileprefix"
|
|
|
|
|
|
|
|
versioningField :: RemoteConfigField
|
|
|
|
versioningField = Accepted "versioning"
|
|
|
|
|
|
|
|
publicField :: RemoteConfigField
|
|
|
|
publicField = Accepted "public"
|
|
|
|
|
|
|
|
publicurlField :: RemoteConfigField
|
|
|
|
publicurlField = Accepted "publicurl"
|
|
|
|
|
|
|
|
protocolField :: RemoteConfigField
|
|
|
|
protocolField = Accepted "protocol"
|
|
|
|
|
|
|
|
requeststyleField :: RemoteConfigField
|
|
|
|
requeststyleField = Accepted "requeststyle"
|
|
|
|
|
2020-05-07 17:18:11 +00:00
|
|
|
signatureField :: RemoteConfigField
|
|
|
|
signatureField = Accepted "signature"
|
|
|
|
|
|
|
|
newtype SignatureVersion = SignatureVersion Int
|
|
|
|
|
|
|
|
signatureVersionParser :: RemoteConfigField -> FieldDesc -> RemoteConfigFieldParser
|
|
|
|
signatureVersionParser f fd =
|
2020-06-16 21:59:55 +00:00
|
|
|
genParser go f (Just defver) fd
|
2020-05-07 17:18:11 +00:00
|
|
|
(Just (ValueDesc "v2 or v4"))
|
|
|
|
where
|
|
|
|
go "v2" = Just (SignatureVersion 2)
|
|
|
|
go "v4" = Just (SignatureVersion 4)
|
|
|
|
go _ = Nothing
|
|
|
|
|
|
|
|
defver = SignatureVersion 2
|
|
|
|
|
2020-01-14 19:41:34 +00:00
|
|
|
portField :: RemoteConfigField
|
|
|
|
portField = Accepted "port"
|
|
|
|
|
|
|
|
mungekeysField :: RemoteConfigField
|
|
|
|
mungekeysField = Accepted "mungekeys"
|
|
|
|
|
fix encryption of content to gcrypt and git-lfs
Fix serious regression in gcrypt and encrypted git-lfs remotes.
Since version 7.20200202.7, git-annex incorrectly stored content
on those remotes without encrypting it.
Problem was, Remote.Git enumerates all git remotes, including git-lfs
and gcrypt. It then dispatches to those. So, Remote.List used the
RemoteConfigParser from Remote.Git, instead of from git-lfs or gcrypt,
and that parser does not know about encryption fields, so did not
include them in the ParsedRemoteConfig. (Also didn't include other
fields specific to those remotes, perhaps chunking etc also didn't
get through.)
To fix, had to move RemoteConfig parsing down into the generate methods
of each remote, rather than doing it in Remote.List.
And a consequence of that was that ParsedRemoteConfig had to change to
include the RemoteConfig that got parsed, so that testremote can
generate a new remote based on an existing remote.
(I would have rather fixed this just inside Remote.Git, but that was not
practical, at least not w/o re-doing work that Remote.List already did.
Big ugly mostly mechanical patch seemed preferable to making git-annex
slower.)
2020-02-26 21:20:56 +00:00
|
|
|
gen :: Git.Repo -> UUID -> RemoteConfig -> RemoteGitConfig -> RemoteStateHandle -> Annex (Maybe Remote)
|
|
|
|
gen r u rc gc rs = do
|
|
|
|
c <- parsedRemoteConfig remote rc
|
2014-08-10 02:13:03 +00:00
|
|
|
cst <- remoteCost gc expensiveRemoteCost
|
2018-09-06 20:03:15 +00:00
|
|
|
info <- extractS3Info c
|
2019-01-30 20:04:16 +00:00
|
|
|
hdl <- mkS3HandleVar c gc u
|
2019-04-30 15:58:06 +00:00
|
|
|
magic <- liftIO initMagicMime
|
fix encryption of content to gcrypt and git-lfs
Fix serious regression in gcrypt and encrypted git-lfs remotes.
Since version 7.20200202.7, git-annex incorrectly stored content
on those remotes without encrypting it.
Problem was, Remote.Git enumerates all git remotes, including git-lfs
and gcrypt. It then dispatches to those. So, Remote.List used the
RemoteConfigParser from Remote.Git, instead of from git-lfs or gcrypt,
and that parser does not know about encryption fields, so did not
include them in the ParsedRemoteConfig. (Also didn't include other
fields specific to those remotes, perhaps chunking etc also didn't
get through.)
To fix, had to move RemoteConfig parsing down into the generate methods
of each remote, rather than doing it in Remote.List.
And a consequence of that was that ParsedRemoteConfig had to change to
include the RemoteConfig that got parsed, so that testremote can
generate a new remote based on an existing remote.
(I would have rather fixed this just inside Remote.Git, but that was not
practical, at least not w/o re-doing work that Remote.List already did.
Big ugly mostly mechanical patch seemed preferable to making git-annex
slower.)
2020-02-26 21:20:56 +00:00
|
|
|
return $ new c cst info hdl magic
|
2012-11-30 04:55:59 +00:00
|
|
|
where
|
fix encryption of content to gcrypt and git-lfs
Fix serious regression in gcrypt and encrypted git-lfs remotes.
Since version 7.20200202.7, git-annex incorrectly stored content
on those remotes without encrypting it.
Problem was, Remote.Git enumerates all git remotes, including git-lfs
and gcrypt. It then dispatches to those. So, Remote.List used the
RemoteConfigParser from Remote.Git, instead of from git-lfs or gcrypt,
and that parser does not know about encryption fields, so did not
include them in the ParsedRemoteConfig. (Also didn't include other
fields specific to those remotes, perhaps chunking etc also didn't
get through.)
To fix, had to move RemoteConfig parsing down into the generate methods
of each remote, rather than doing it in Remote.List.
And a consequence of that was that ParsedRemoteConfig had to change to
include the RemoteConfig that got parsed, so that testremote can
generate a new remote based on an existing remote.
(I would have rather fixed this just inside Remote.Git, but that was not
practical, at least not w/o re-doing work that Remote.List already did.
Big ugly mostly mechanical patch seemed preferable to making git-annex
slower.)
2020-02-26 21:20:56 +00:00
|
|
|
new c cst info hdl magic = Just $ specialRemote c
|
2020-05-13 15:50:31 +00:00
|
|
|
(store hdl this info magic)
|
|
|
|
(retrieve hdl this rs c info)
|
|
|
|
(remove hdl this info)
|
|
|
|
(checkKey hdl this rs c info)
|
2011-04-17 15:01:34 +00:00
|
|
|
this
|
2012-11-30 04:55:59 +00:00
|
|
|
where
|
2014-12-16 19:26:13 +00:00
|
|
|
this = Remote
|
|
|
|
{ uuid = u
|
|
|
|
, cost = cst
|
|
|
|
, name = Git.repoDescribe r
|
|
|
|
, storeKey = storeKeyDummy
|
2020-05-13 21:05:56 +00:00
|
|
|
, retrieveKeyFile = retrieveKeyFileDummy
|
|
|
|
, retrieveKeyFileCheap = Nothing
|
2018-06-21 15:35:27 +00:00
|
|
|
-- HttpManagerRestricted is used here, so this is
|
|
|
|
-- secure.
|
|
|
|
, retrievalSecurityPolicy = RetrievalAllKeysSecure
|
2014-12-16 19:26:13 +00:00
|
|
|
, removeKey = removeKeyDummy
|
lockContent for S3 (with versioning=yes) and git-lfs
Made several special remotes support locking content on them while
dropping, which allows dropping from another special remote when the
content will only remain on a special remote of these types.
In both cases, verify the content is present actively, because it's
certianly possible for things other than git-annex to have removed it.
Worth thinking about what to do if at some later point, git-lfs gains
support for dropping content, and a content locking operation.
That would probably need a transition; first would need to make lockContent
use the locking operation. Then, once enough time had passed that we can
assume any git-annex operating on the git-lfs remote had that change,
git-annex could finally allow dropping from git-lfs.
Or, it could be that git-lfs gains support for dropping content, but not
locking it. In that case, it seems this commit would need to be reverted,
and then wait long enough for that git-annex to be everywhere, and only
then can git-annex safely support dropping from git-lfs.
So, the assumption made in this commit could lead to bother later.. But I
think it's actually highly unlikely git-lfs does ever support dropping;
it's outside their centralized model. Probably. :) Worth keeping in mind as
the same assumption is made about other special remotes though.
This commit was sponsored by Ethan Aubin.
2020-06-26 17:46:42 +00:00
|
|
|
, lockContent = lockContentS3 hdl this rs c info
|
2014-12-16 19:26:13 +00:00
|
|
|
, checkPresent = checkPresentDummy
|
|
|
|
, checkPresentCheap = False
|
2019-01-30 18:55:28 +00:00
|
|
|
, exportActions = ExportActions
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
{ storeExport = storeExportS3 hdl this rs info magic
|
2019-01-30 20:04:16 +00:00
|
|
|
, retrieveExport = retrieveExportS3 hdl this info
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
, removeExport = removeExportS3 hdl this rs info
|
2019-01-30 20:04:16 +00:00
|
|
|
, checkPresentExport = checkPresentExportS3 hdl this info
|
2019-01-30 18:55:28 +00:00
|
|
|
-- S3 does not have directories.
|
|
|
|
, removeExportDirectory = Nothing
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
, renameExport = renameExportS3 hdl this rs info
|
2019-01-30 18:55:28 +00:00
|
|
|
}
|
2019-04-18 19:20:09 +00:00
|
|
|
, importActions = ImportActions
|
|
|
|
{ listImportableContents = listImportableContentsS3 hdl this info
|
2020-07-03 17:53:14 +00:00
|
|
|
, importKey = Nothing
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
, retrieveExportWithContentIdentifier = retrieveExportWithContentIdentifierS3 hdl this rs info
|
|
|
|
, storeExportWithContentIdentifier = storeExportWithContentIdentifierS3 hdl this rs info magic
|
|
|
|
, removeExportWithContentIdentifier = removeExportWithContentIdentifierS3 hdl this rs info
|
2019-04-18 19:20:09 +00:00
|
|
|
, removeExportDirectoryWhenEmpty = Nothing
|
2019-04-23 17:09:27 +00:00
|
|
|
, checkPresentExportWithContentIdentifier = checkPresentExportWithContentIdentifierS3 hdl this info
|
2019-04-18 19:20:09 +00:00
|
|
|
}
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
, whereisKey = Just (getPublicWebUrls u rs info c)
|
2014-12-16 19:26:13 +00:00
|
|
|
, remoteFsck = Nothing
|
|
|
|
, repairRepo = Nothing
|
|
|
|
, config = c
|
2018-06-04 18:31:55 +00:00
|
|
|
, getRepo = return r
|
2014-12-16 19:26:13 +00:00
|
|
|
, gitconfig = gc
|
|
|
|
, localpath = Nothing
|
|
|
|
, readonly = False
|
2018-08-31 14:53:07 +00:00
|
|
|
, appendonly = versioning info
|
2014-12-16 19:26:13 +00:00
|
|
|
, availability = GloballyAvailable
|
|
|
|
, remotetype = remote
|
fix encryption of content to gcrypt and git-lfs
Fix serious regression in gcrypt and encrypted git-lfs remotes.
Since version 7.20200202.7, git-annex incorrectly stored content
on those remotes without encrypting it.
Problem was, Remote.Git enumerates all git remotes, including git-lfs
and gcrypt. It then dispatches to those. So, Remote.List used the
RemoteConfigParser from Remote.Git, instead of from git-lfs or gcrypt,
and that parser does not know about encryption fields, so did not
include them in the ParsedRemoteConfig. (Also didn't include other
fields specific to those remotes, perhaps chunking etc also didn't
get through.)
To fix, had to move RemoteConfig parsing down into the generate methods
of each remote, rather than doing it in Remote.List.
And a consequence of that was that ParsedRemoteConfig had to change to
include the RemoteConfig that got parsed, so that testremote can
generate a new remote based on an existing remote.
(I would have rather fixed this just inside Remote.Git, but that was not
practical, at least not w/o re-doing work that Remote.List already did.
Big ugly mostly mechanical patch seemed preferable to making git-annex
slower.)
2020-02-26 21:20:56 +00:00
|
|
|
, mkUnavailable = gen r u (M.insert hostField (Proposed "!dne!") rc) gc rs
|
2015-06-05 20:23:35 +00:00
|
|
|
, getInfo = includeCredsInfo c (AWS.creds u) (s3Info c info)
|
2014-12-16 19:26:13 +00:00
|
|
|
, claimUrl = Nothing
|
|
|
|
, checkUrl = Nothing
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
, remoteStateHandle = rs
|
2014-12-16 19:26:13 +00:00
|
|
|
}
|
2011-03-28 05:32:47 +00:00
|
|
|
|
2017-02-07 18:35:58 +00:00
|
|
|
s3Setup :: SetupStage -> Maybe UUID -> Maybe CredPair -> RemoteConfig -> RemoteGitConfig -> Annex (RemoteConfig, UUID)
|
|
|
|
s3Setup ss mu mcreds c gc = do
|
2013-09-07 22:38:00 +00:00
|
|
|
u <- maybe (liftIO genUUID) return mu
|
2017-02-07 18:35:58 +00:00
|
|
|
s3Setup' ss u mcreds c gc
|
2017-09-08 19:41:31 +00:00
|
|
|
|
2017-02-07 18:35:58 +00:00
|
|
|
s3Setup' :: SetupStage -> UUID -> Maybe CredPair -> RemoteConfig -> RemoteGitConfig -> Annex (RemoteConfig, UUID)
|
fix embedcreds=yes reversion
Fix bug that made enableremote of S3 and webdav remotes, that have
embedcreds=yes, fail to set up the embedded creds, so accessing the remotes
failed.
(Regression introduced in version 7.20200202.7 in when reworking all the
remote configs to be parsed.)
Root problem is that parseEncryptionConfig excludes all other config keys
except encryption ones, so it is then unable to find the
credPairRemoteField. And since that field is not required to be
present, it proceeds as if it's not, rather than failing in any visible
way.
This causes it to not find any creds, and so it does not cache
them. When when the S3 remote tries to make a S3 connection, it finds no
creds, so assumes it's being used in no-creds mode, and tries to find a
public url. With no public url available, it fails, but the failure doesn't
say a lack of creds is the problem.
Fix is to provide setRemoteCredPair with a ParsedRemoteConfig, so the full
set of configs of the remote can be parsed. A bit annoying to need to
parse the remote config before the full config (as returned by
setRemoteCredPair) is available, but this avoids the problem.
I assume webdav also had the problem by inspection, but didn't try to
reproduce it with it.
Also, getRemoteCredPair used getRemoteConfigValue to get a ProposedAccepted
String, but that does not seem right. Now that it runs that code, it
crashed saying it had just a String.
Remotes that have already been enableremoted, and so lack the cached creds
file will work after this fix, because getRemoteCredPair will extract
the creds from the remote config, writing the missing file.
This commit was sponsored by Ilya Shlyakhter on Patreon.
2020-05-21 18:34:29 +00:00
|
|
|
s3Setup' ss u mcreds c gc
|
2020-01-15 14:52:28 +00:00
|
|
|
| maybe False (isIAHost . fromProposedAccepted) (M.lookup hostField c) = archiveorg
|
2016-05-23 21:03:20 +00:00
|
|
|
| otherwise = defaulthost
|
2012-11-11 04:51:07 +00:00
|
|
|
where
|
2019-10-10 17:08:17 +00:00
|
|
|
remotename = fromJust (lookupName c)
|
2012-11-11 04:51:07 +00:00
|
|
|
defbucket = remotename ++ "-" ++ fromUUID u
|
|
|
|
defaults = M.fromList
|
2020-01-14 19:41:34 +00:00
|
|
|
[ (datacenterField, Proposed $ T.unpack $ AWS.defaultRegion AWS.S3)
|
2020-01-15 15:05:07 +00:00
|
|
|
, (storageclassField, Proposed "STANDARD")
|
|
|
|
, (hostField, Proposed AWS.s3DefaultHost)
|
|
|
|
, (portField, Proposed "80")
|
|
|
|
, (bucketField, Proposed defbucket)
|
2012-11-11 04:51:07 +00:00
|
|
|
]
|
2020-01-10 18:10:20 +00:00
|
|
|
|
2020-01-15 14:52:28 +00:00
|
|
|
use fullconfig pc info = do
|
|
|
|
enableBucketVersioning ss info pc gc u
|
2018-03-27 16:41:57 +00:00
|
|
|
gitConfigSpecialRemote u fullconfig [("s3", "true")]
|
2014-02-24 19:14:44 +00:00
|
|
|
return (fullconfig, u)
|
2011-05-16 15:20:30 +00:00
|
|
|
|
2012-11-11 04:51:07 +00:00
|
|
|
defaulthost = do
|
fix embedcreds=yes reversion
Fix bug that made enableremote of S3 and webdav remotes, that have
embedcreds=yes, fail to set up the embedded creds, so accessing the remotes
failed.
(Regression introduced in version 7.20200202.7 in when reworking all the
remote configs to be parsed.)
Root problem is that parseEncryptionConfig excludes all other config keys
except encryption ones, so it is then unable to find the
credPairRemoteField. And since that field is not required to be
present, it proceeds as if it's not, rather than failing in any visible
way.
This causes it to not find any creds, and so it does not cache
them. When when the S3 remote tries to make a S3 connection, it finds no
creds, so assumes it's being used in no-creds mode, and tries to find a
public url. With no public url available, it fails, but the failure doesn't
say a lack of creds is the problem.
Fix is to provide setRemoteCredPair with a ParsedRemoteConfig, so the full
set of configs of the remote can be parsed. A bit annoying to need to
parse the remote config before the full config (as returned by
setRemoteCredPair) is available, but this avoids the problem.
I assume webdav also had the problem by inspection, but didn't try to
reproduce it with it.
Also, getRemoteCredPair used getRemoteConfigValue to get a ProposedAccepted
String, but that does not seem right. Now that it runs that code, it
crashed saying it had just a String.
Remotes that have already been enableremoted, and so lack the cached creds
file will work after this fix, because getRemoteCredPair will extract
the creds from the remote config, writing the missing file.
This commit was sponsored by Ilya Shlyakhter on Patreon.
2020-05-21 18:34:29 +00:00
|
|
|
(c', encsetup) <- encryptionSetup (c `M.union` defaults) gc
|
|
|
|
pc <- either giveup return . parseRemoteConfig c'
|
|
|
|
=<< configParser remote c'
|
|
|
|
c'' <- setRemoteCredPair encsetup pc gc (AWS.creds u) mcreds
|
|
|
|
pc' <- either giveup return . parseRemoteConfig c''
|
|
|
|
=<< configParser remote c''
|
|
|
|
info <- extractS3Info pc'
|
|
|
|
checkexportimportsafe pc' info
|
2017-09-04 16:40:33 +00:00
|
|
|
case ss of
|
fix embedcreds=yes reversion
Fix bug that made enableremote of S3 and webdav remotes, that have
embedcreds=yes, fail to set up the embedded creds, so accessing the remotes
failed.
(Regression introduced in version 7.20200202.7 in when reworking all the
remote configs to be parsed.)
Root problem is that parseEncryptionConfig excludes all other config keys
except encryption ones, so it is then unable to find the
credPairRemoteField. And since that field is not required to be
present, it proceeds as if it's not, rather than failing in any visible
way.
This causes it to not find any creds, and so it does not cache
them. When when the S3 remote tries to make a S3 connection, it finds no
creds, so assumes it's being used in no-creds mode, and tries to find a
public url. With no public url available, it fails, but the failure doesn't
say a lack of creds is the problem.
Fix is to provide setRemoteCredPair with a ParsedRemoteConfig, so the full
set of configs of the remote can be parsed. A bit annoying to need to
parse the remote config before the full config (as returned by
setRemoteCredPair) is available, but this avoids the problem.
I assume webdav also had the problem by inspection, but didn't try to
reproduce it with it.
Also, getRemoteCredPair used getRemoteConfigValue to get a ProposedAccepted
String, but that does not seem right. Now that it runs that code, it
crashed saying it had just a String.
Remotes that have already been enableremoted, and so lack the cached creds
file will work after this fix, because getRemoteCredPair will extract
the creds from the remote config, writing the missing file.
This commit was sponsored by Ilya Shlyakhter on Patreon.
2020-05-21 18:34:29 +00:00
|
|
|
Init -> genBucket pc' gc u
|
2017-09-04 16:40:33 +00:00
|
|
|
_ -> return ()
|
fix embedcreds=yes reversion
Fix bug that made enableremote of S3 and webdav remotes, that have
embedcreds=yes, fail to set up the embedded creds, so accessing the remotes
failed.
(Regression introduced in version 7.20200202.7 in when reworking all the
remote configs to be parsed.)
Root problem is that parseEncryptionConfig excludes all other config keys
except encryption ones, so it is then unable to find the
credPairRemoteField. And since that field is not required to be
present, it proceeds as if it's not, rather than failing in any visible
way.
This causes it to not find any creds, and so it does not cache
them. When when the S3 remote tries to make a S3 connection, it finds no
creds, so assumes it's being used in no-creds mode, and tries to find a
public url. With no public url available, it fails, but the failure doesn't
say a lack of creds is the problem.
Fix is to provide setRemoteCredPair with a ParsedRemoteConfig, so the full
set of configs of the remote can be parsed. A bit annoying to need to
parse the remote config before the full config (as returned by
setRemoteCredPair) is available, but this avoids the problem.
I assume webdav also had the problem by inspection, but didn't try to
reproduce it with it.
Also, getRemoteCredPair used getRemoteConfigValue to get a ProposedAccepted
String, but that does not seem right. Now that it runs that code, it
crashed saying it had just a String.
Remotes that have already been enableremoted, and so lack the cached creds
file will work after this fix, because getRemoteCredPair will extract
the creds from the remote config, writing the missing file.
This commit was sponsored by Ilya Shlyakhter on Patreon.
2020-05-21 18:34:29 +00:00
|
|
|
use c'' pc' info
|
2011-05-16 15:20:30 +00:00
|
|
|
|
2012-11-11 04:51:07 +00:00
|
|
|
archiveorg = do
|
|
|
|
showNote "Internet Archive mode"
|
fix embedcreds=yes reversion
Fix bug that made enableremote of S3 and webdav remotes, that have
embedcreds=yes, fail to set up the embedded creds, so accessing the remotes
failed.
(Regression introduced in version 7.20200202.7 in when reworking all the
remote configs to be parsed.)
Root problem is that parseEncryptionConfig excludes all other config keys
except encryption ones, so it is then unable to find the
credPairRemoteField. And since that field is not required to be
present, it proceeds as if it's not, rather than failing in any visible
way.
This causes it to not find any creds, and so it does not cache
them. When when the S3 remote tries to make a S3 connection, it finds no
creds, so assumes it's being used in no-creds mode, and tries to find a
public url. With no public url available, it fails, but the failure doesn't
say a lack of creds is the problem.
Fix is to provide setRemoteCredPair with a ParsedRemoteConfig, so the full
set of configs of the remote can be parsed. A bit annoying to need to
parse the remote config before the full config (as returned by
setRemoteCredPair) is available, but this avoids the problem.
I assume webdav also had the problem by inspection, but didn't try to
reproduce it with it.
Also, getRemoteCredPair used getRemoteConfigValue to get a ProposedAccepted
String, but that does not seem right. Now that it runs that code, it
crashed saying it had just a String.
Remotes that have already been enableremoted, and so lack the cached creds
file will work after this fix, because getRemoteCredPair will extract
the creds from the remote config, writing the missing file.
This commit was sponsored by Ilya Shlyakhter on Patreon.
2020-05-21 18:34:29 +00:00
|
|
|
pc <- either giveup return . parseRemoteConfig c
|
|
|
|
=<< configParser remote c
|
|
|
|
c' <- setRemoteCredPair noEncryptionUsed pc gc (AWS.creds u) mcreds
|
2013-10-16 20:35:47 +00:00
|
|
|
-- Ensure user enters a valid bucket name, since
|
|
|
|
-- this determines the name of the archive.org item.
|
2020-01-15 14:52:28 +00:00
|
|
|
let validbucket = replace " " "-" $ map toLower $
|
|
|
|
maybe (giveup "specify bucket=") fromProposedAccepted
|
|
|
|
(M.lookup bucketField c')
|
2013-10-16 20:35:47 +00:00
|
|
|
let archiveconfig =
|
2014-08-09 01:42:46 +00:00
|
|
|
-- IA acdepts x-amz-* as an alias for x-archive-*
|
2020-01-14 19:41:34 +00:00
|
|
|
M.mapKeys (Proposed . replace "x-archive-" "x-amz-" . fromProposedAccepted) $
|
2012-11-11 04:51:07 +00:00
|
|
|
-- encryption does not make sense here
|
2020-01-10 18:10:20 +00:00
|
|
|
M.insert encryptionField (Proposed "none") $
|
2020-01-15 15:05:07 +00:00
|
|
|
M.insert bucketField (Proposed validbucket) $
|
2014-10-12 17:15:52 +00:00
|
|
|
M.union c' $
|
2012-11-11 04:51:07 +00:00
|
|
|
-- special constraints on key names
|
2020-01-15 15:05:07 +00:00
|
|
|
M.insert mungekeysField (Proposed "ia") defaults
|
fix embedcreds=yes reversion
Fix bug that made enableremote of S3 and webdav remotes, that have
embedcreds=yes, fail to set up the embedded creds, so accessing the remotes
failed.
(Regression introduced in version 7.20200202.7 in when reworking all the
remote configs to be parsed.)
Root problem is that parseEncryptionConfig excludes all other config keys
except encryption ones, so it is then unable to find the
credPairRemoteField. And since that field is not required to be
present, it proceeds as if it's not, rather than failing in any visible
way.
This causes it to not find any creds, and so it does not cache
them. When when the S3 remote tries to make a S3 connection, it finds no
creds, so assumes it's being used in no-creds mode, and tries to find a
public url. With no public url available, it fails, but the failure doesn't
say a lack of creds is the problem.
Fix is to provide setRemoteCredPair with a ParsedRemoteConfig, so the full
set of configs of the remote can be parsed. A bit annoying to need to
parse the remote config before the full config (as returned by
setRemoteCredPair) is available, but this avoids the problem.
I assume webdav also had the problem by inspection, but didn't try to
reproduce it with it.
Also, getRemoteCredPair used getRemoteConfigValue to get a ProposedAccepted
String, but that does not seem right. Now that it runs that code, it
crashed saying it had just a String.
Remotes that have already been enableremoted, and so lack the cached creds
file will work after this fix, because getRemoteCredPair will extract
the creds from the remote config, writing the missing file.
This commit was sponsored by Ilya Shlyakhter on Patreon.
2020-05-21 18:34:29 +00:00
|
|
|
pc' <- either giveup return . parseRemoteConfig archiveconfig
|
add LISTCONFIGS to external special remote protocol
Special remote programs that use GETCONFIG/SETCONFIG are recommended
to implement it.
The description is not yet used, but will be useful later when adding a way
to make initremote list all accepted configs.
configParser now takes a RemoteConfig parameter. Normally, that's not
needed, because configParser returns a parter, it does not parse it
itself. But, it's needed to look at externaltype and work out what
external remote program to run for LISTCONFIGS.
Note that, while externalUUID is changed to a Maybe UUID, checkExportSupported
used to use NoUUID. The code that now checks for Nothing used to behave
in some undefined way if the external program made requests that
triggered it.
Also, note that in externalSetup, once it generates external,
it parses the RemoteConfig strictly. That generates a
ParsedRemoteConfig, which is thrown away. The reason it's ok to throw
that away, is that, if the strict parse succeeded, the result must be
the same as the earlier, lenient parse.
initremote of an external special remote now runs the program three
times. First for LISTCONFIGS, then EXPORTSUPPORTED, and again
LISTCONFIGS+INITREMOTE. It would not be hard to eliminate at least
one of those, and it should be possible to only run the program once.
2020-01-17 19:30:14 +00:00
|
|
|
=<< configParser remote archiveconfig
|
fix embedcreds=yes reversion
Fix bug that made enableremote of S3 and webdav remotes, that have
embedcreds=yes, fail to set up the embedded creds, so accessing the remotes
failed.
(Regression introduced in version 7.20200202.7 in when reworking all the
remote configs to be parsed.)
Root problem is that parseEncryptionConfig excludes all other config keys
except encryption ones, so it is then unable to find the
credPairRemoteField. And since that field is not required to be
present, it proceeds as if it's not, rather than failing in any visible
way.
This causes it to not find any creds, and so it does not cache
them. When when the S3 remote tries to make a S3 connection, it finds no
creds, so assumes it's being used in no-creds mode, and tries to find a
public url. With no public url available, it fails, but the failure doesn't
say a lack of creds is the problem.
Fix is to provide setRemoteCredPair with a ParsedRemoteConfig, so the full
set of configs of the remote can be parsed. A bit annoying to need to
parse the remote config before the full config (as returned by
setRemoteCredPair) is available, but this avoids the problem.
I assume webdav also had the problem by inspection, but didn't try to
reproduce it with it.
Also, getRemoteCredPair used getRemoteConfigValue to get a ProposedAccepted
String, but that does not seem right. Now that it runs that code, it
crashed saying it had just a String.
Remotes that have already been enableremoted, and so lack the cached creds
file will work after this fix, because getRemoteCredPair will extract
the creds from the remote config, writing the missing file.
This commit was sponsored by Ilya Shlyakhter on Patreon.
2020-05-21 18:34:29 +00:00
|
|
|
info <- extractS3Info pc'
|
|
|
|
checkexportimportsafe pc' info
|
|
|
|
hdl <- mkS3HandleVar pc' gc u
|
2019-01-30 20:04:16 +00:00
|
|
|
withS3HandleOrFail u hdl $
|
fix embedcreds=yes reversion
Fix bug that made enableremote of S3 and webdav remotes, that have
embedcreds=yes, fail to set up the embedded creds, so accessing the remotes
failed.
(Regression introduced in version 7.20200202.7 in when reworking all the
remote configs to be parsed.)
Root problem is that parseEncryptionConfig excludes all other config keys
except encryption ones, so it is then unable to find the
credPairRemoteField. And since that field is not required to be
present, it proceeds as if it's not, rather than failing in any visible
way.
This causes it to not find any creds, and so it does not cache
them. When when the S3 remote tries to make a S3 connection, it finds no
creds, so assumes it's being used in no-creds mode, and tries to find a
public url. With no public url available, it fails, but the failure doesn't
say a lack of creds is the problem.
Fix is to provide setRemoteCredPair with a ParsedRemoteConfig, so the full
set of configs of the remote can be parsed. A bit annoying to need to
parse the remote config before the full config (as returned by
setRemoteCredPair) is available, but this avoids the problem.
I assume webdav also had the problem by inspection, but didn't try to
reproduce it with it.
Also, getRemoteCredPair used getRemoteConfigValue to get a ProposedAccepted
String, but that does not seem right. Now that it runs that code, it
crashed saying it had just a String.
Remotes that have already been enableremoted, and so lack the cached creds
file will work after this fix, because getRemoteCredPair will extract
the creds from the remote config, writing the missing file.
This commit was sponsored by Ilya Shlyakhter on Patreon.
2020-05-21 18:34:29 +00:00
|
|
|
writeUUIDFile pc' u info
|
|
|
|
use archiveconfig pc' info
|
2019-04-18 19:20:09 +00:00
|
|
|
|
2019-04-19 17:39:33 +00:00
|
|
|
checkexportimportsafe c' info =
|
|
|
|
unlessM (Annex.getState Annex.force) $
|
|
|
|
checkexportimportsafe' c' info
|
|
|
|
checkexportimportsafe' c' info
|
2019-04-18 19:20:09 +00:00
|
|
|
| versioning info = return ()
|
2019-04-19 17:39:33 +00:00
|
|
|
| otherwise = when (exportTree c' && importTree c') $
|
|
|
|
giveup $ unwords
|
|
|
|
[ "Combining exporttree=yes and importtree=yes"
|
|
|
|
, "with an unversioned S3 bucket is not safe;"
|
|
|
|
, "exporting can overwrite other modifications"
|
|
|
|
, "to files in the bucket."
|
|
|
|
, "Recommend you add versioning=yes."
|
|
|
|
, "(Or use --force if you don't mind possibly losing data.)"
|
|
|
|
]
|
2011-03-29 20:21:21 +00:00
|
|
|
|
2019-01-30 20:04:16 +00:00
|
|
|
store :: S3HandleVar -> Remote -> S3Info -> Maybe Magic -> Storer
|
|
|
|
store mh r info magic = fileStorer $ \k f p -> withS3HandleOrFail (uuid r) mh $ \h -> do
|
2019-01-23 17:08:47 +00:00
|
|
|
void $ storeHelper info h magic f (T.pack $ bucketObject info k) p
|
2014-08-09 18:23:54 +00:00
|
|
|
-- Store public URL to item in Internet Archive.
|
2015-06-05 17:09:41 +00:00
|
|
|
when (isIA info && not (isChunkKey k)) $
|
2018-10-04 21:33:25 +00:00
|
|
|
setUrlPresent k (iaPublicUrl info (bucketObject info k))
|
2017-09-08 19:41:31 +00:00
|
|
|
|
2019-04-23 18:39:30 +00:00
|
|
|
storeHelper :: S3Info -> S3Handle -> Maybe Magic -> FilePath -> S3.Object -> MeterUpdate -> Annex (Maybe S3Etag, Maybe S3VersionID)
|
2019-01-30 19:40:13 +00:00
|
|
|
storeHelper info h magic f object p = liftIO $ case partSize info of
|
2017-09-08 19:41:31 +00:00
|
|
|
Just partsz | partsz > 0 -> do
|
2019-01-30 19:40:13 +00:00
|
|
|
fsz <- getFileSize f
|
2017-09-08 19:41:31 +00:00
|
|
|
if fsz > partsz
|
|
|
|
then multipartupload fsz partsz
|
|
|
|
else singlepartupload
|
|
|
|
_ -> singlepartupload
|
2014-10-28 18:17:30 +00:00
|
|
|
where
|
2019-01-30 19:40:13 +00:00
|
|
|
singlepartupload = runResourceT $ do
|
|
|
|
contenttype <- liftIO getcontenttype
|
2014-10-28 18:17:30 +00:00
|
|
|
rbody <- liftIO $ httpBodyStorer f p
|
2019-01-30 19:40:13 +00:00
|
|
|
let req = (putObject info object rbody)
|
2019-01-23 17:08:47 +00:00
|
|
|
{ S3.poContentType = encodeBS <$> contenttype }
|
2019-04-23 18:39:30 +00:00
|
|
|
resp <- sendS3Handle h req
|
2020-09-14 22:19:51 +00:00
|
|
|
vid <- mkS3VersionID object
|
|
|
|
<$> extractFromResourceT (S3.porVersionId resp)
|
2020-09-14 22:37:49 +00:00
|
|
|
#if MIN_VERSION_aws(0,22,0)
|
2020-09-14 22:19:51 +00:00
|
|
|
etag <- extractFromResourceT (Just (S3.porETag resp))
|
|
|
|
return (etag, vid)
|
2019-04-23 18:39:30 +00:00
|
|
|
#else
|
|
|
|
return (Nothing, vid)
|
|
|
|
#endif
|
2019-01-30 19:40:13 +00:00
|
|
|
multipartupload fsz partsz = runResourceT $ do
|
|
|
|
contenttype <- liftIO getcontenttype
|
2014-11-04 19:39:48 +00:00
|
|
|
let startreq = (S3.postInitiateMultipartUpload (bucket info) object)
|
2014-10-28 18:17:30 +00:00
|
|
|
{ S3.imuStorageClass = Just (storageClass info)
|
|
|
|
, S3.imuMetadata = metaHeaders info
|
|
|
|
, S3.imuAutoMakeBucket = isIA info
|
|
|
|
, S3.imuExpires = Nothing -- TODO set some reasonable expiry
|
2019-01-23 17:08:47 +00:00
|
|
|
, S3.imuContentType = fromString <$> contenttype
|
2014-10-28 18:17:30 +00:00
|
|
|
}
|
2014-11-04 19:39:48 +00:00
|
|
|
uploadid <- S3.imurUploadId <$> sendS3Handle h startreq
|
2014-10-28 18:17:30 +00:00
|
|
|
|
2014-11-04 20:21:55 +00:00
|
|
|
-- The actual part size will be a even multiple of the
|
2016-12-07 18:25:01 +00:00
|
|
|
-- 32k chunk size that lazy ByteStrings use.
|
2014-11-04 20:21:55 +00:00
|
|
|
let partsz' = (partsz `div` toInteger defaultChunkSize) * toInteger defaultChunkSize
|
2014-11-04 00:49:30 +00:00
|
|
|
|
2014-11-03 23:18:46 +00:00
|
|
|
-- Send parts of the file, taking care to stream each part
|
|
|
|
-- w/o buffering in memory, since the parts can be large.
|
2014-11-03 23:50:33 +00:00
|
|
|
etags <- bracketIO (openBinaryFile f ReadMode) hClose $ \fh -> do
|
2014-11-04 18:47:18 +00:00
|
|
|
let sendparts meter etags partnum = do
|
|
|
|
pos <- liftIO $ hTell fh
|
|
|
|
if pos >= fsz
|
|
|
|
then return (reverse etags)
|
|
|
|
else do
|
|
|
|
-- Calculate size of part that will
|
|
|
|
-- be read.
|
2020-02-25 17:06:09 +00:00
|
|
|
let sz = min (fsz - pos) partsz'
|
2014-11-04 19:39:48 +00:00
|
|
|
let p' = offsetMeterUpdate p (toBytesProcessed pos)
|
|
|
|
let numchunks = ceiling (fromIntegral sz / fromIntegral defaultChunkSize :: Double)
|
|
|
|
let popper = handlePopper numchunks defaultChunkSize p' fh
|
2014-11-04 19:22:08 +00:00
|
|
|
let req = S3.uploadPart (bucket info) object partnum uploadid $
|
|
|
|
RequestBodyStream (fromIntegral sz) popper
|
2017-02-07 17:01:57 +00:00
|
|
|
S3.UploadPartResponse { S3.uprETag = etag } <- sendS3Handle h req
|
2014-11-04 18:47:18 +00:00
|
|
|
sendparts (offsetMeterUpdate meter (toBytesProcessed sz)) (etag:etags) (partnum + 1)
|
2014-11-04 00:04:42 +00:00
|
|
|
sendparts p [] 1
|
2014-10-28 18:17:30 +00:00
|
|
|
|
2019-04-23 18:39:30 +00:00
|
|
|
resp <- sendS3Handle h $ S3.postCompleteMultipartUpload
|
2014-11-03 19:53:22 +00:00
|
|
|
(bucket info) object uploadid (zip [1..] etags)
|
2020-09-14 22:19:51 +00:00
|
|
|
etag <- extractFromResourceT (Just (S3.cmurETag resp))
|
|
|
|
vid <- extractFromResourceT (S3.cmurVersionId resp)
|
|
|
|
return (etag, mkS3VersionID object vid)
|
2019-01-30 19:40:13 +00:00
|
|
|
getcontenttype = maybe (pure Nothing) (flip getMagicMimeType f) magic
|
2014-08-02 19:51:58 +00:00
|
|
|
|
2014-08-09 19:58:01 +00:00
|
|
|
{- Implemented as a fileRetriever, that uses conduit to stream the chunks
|
|
|
|
- out to the file. Would be better to implement a byteRetriever, but
|
|
|
|
- that is difficult. -}
|
2020-01-14 19:41:34 +00:00
|
|
|
retrieve :: S3HandleVar -> Remote -> RemoteStateHandle -> ParsedRemoteConfig -> S3Info -> Retriever
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
retrieve hv r rs c info = fileRetriever $ \f k p -> withS3Handle hv $ \case
|
2019-01-30 18:23:39 +00:00
|
|
|
(Just h) ->
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
eitherS3VersionID info rs c k (T.pack $ bucketObject info k) >>= \case
|
2019-01-30 18:23:39 +00:00
|
|
|
Left failreason -> do
|
|
|
|
warning failreason
|
|
|
|
giveup "cannot download content"
|
|
|
|
Right loc -> retrieveHelper info h loc f p
|
|
|
|
Nothing ->
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
getPublicWebUrls' (uuid r) rs info c k >>= \case
|
2019-01-30 18:23:39 +00:00
|
|
|
Left failreason -> do
|
|
|
|
warning failreason
|
|
|
|
giveup "cannot download content"
|
2020-01-22 20:13:48 +00:00
|
|
|
Right us -> unlessM (withUrlOptions $ downloadUrl k p us f) $
|
2019-01-30 18:23:39 +00:00
|
|
|
giveup "failed to download content"
|
2017-09-08 19:41:31 +00:00
|
|
|
|
2018-08-30 18:47:52 +00:00
|
|
|
retrieveHelper :: S3Info -> S3Handle -> (Either S3.Object S3VersionID) -> FilePath -> MeterUpdate -> Annex ()
|
2019-04-19 19:10:31 +00:00
|
|
|
retrieveHelper info h loc f p = retrieveHelper' h f p $
|
|
|
|
case loc of
|
2018-08-30 18:47:52 +00:00
|
|
|
Left o -> S3.getObject (bucket info) o
|
|
|
|
Right (S3VersionID o vid) -> (S3.getObject (bucket info) o)
|
2019-01-07 19:51:05 +00:00
|
|
|
{ S3.goVersionId = Just vid }
|
2019-04-19 19:10:31 +00:00
|
|
|
|
|
|
|
retrieveHelper' :: S3Handle -> FilePath -> MeterUpdate -> S3.GetObject -> Annex ()
|
|
|
|
retrieveHelper' h f p req = liftIO $ runResourceT $ do
|
2019-01-30 19:40:13 +00:00
|
|
|
S3.GetObjectResponse { S3.gorResponse = rsp } <- sendS3Handle h req
|
2018-04-06 19:58:16 +00:00
|
|
|
Url.sinkResponseFile p zeroBytesProcessed f WriteMode rsp
|
2011-04-19 18:45:19 +00:00
|
|
|
|
2019-01-30 20:04:16 +00:00
|
|
|
remove :: S3HandleVar -> Remote -> S3Info -> Remover
|
2020-05-14 18:08:09 +00:00
|
|
|
remove hv r info k = withS3HandleOrFail (uuid r) hv $ \h -> do
|
|
|
|
S3.DeleteObjectResponse <- liftIO $ runResourceT $ sendS3Handle h $
|
2017-09-12 16:33:08 +00:00
|
|
|
S3.DeleteObject (T.pack $ bucketObject info k) (bucket info)
|
2020-05-14 18:08:09 +00:00
|
|
|
return ()
|
2017-09-12 16:33:08 +00:00
|
|
|
|
lockContent for S3 (with versioning=yes) and git-lfs
Made several special remotes support locking content on them while
dropping, which allows dropping from another special remote when the
content will only remain on a special remote of these types.
In both cases, verify the content is present actively, because it's
certianly possible for things other than git-annex to have removed it.
Worth thinking about what to do if at some later point, git-lfs gains
support for dropping content, and a content locking operation.
That would probably need a transition; first would need to make lockContent
use the locking operation. Then, once enough time had passed that we can
assume any git-annex operating on the git-lfs remote had that change,
git-annex could finally allow dropping from git-lfs.
Or, it could be that git-lfs gains support for dropping content, but not
locking it. In that case, it seems this commit would need to be reverted,
and then wait long enough for that git-annex to be everywhere, and only
then can git-annex safely support dropping from git-lfs.
So, the assumption made in this commit could lead to bother later.. But I
think it's actually highly unlikely git-lfs does ever support dropping;
it's outside their centralized model. Probably. :) Worth keeping in mind as
the same assumption is made about other special remotes though.
This commit was sponsored by Ethan Aubin.
2020-06-26 17:46:42 +00:00
|
|
|
lockContentS3 :: S3HandleVar -> Remote -> RemoteStateHandle -> ParsedRemoteConfig -> S3Info -> Maybe (Key -> (VerifiedCopy -> Annex a) -> Annex a)
|
|
|
|
lockContentS3 hv r rs c info
|
|
|
|
-- When versioning is enabled, content is never removed from the
|
|
|
|
-- remote, so nothing needs to be done to lock the content there,
|
|
|
|
-- beyond a sanity check that the content is in fact present.
|
|
|
|
| versioning info = Just $ \k callback -> do
|
|
|
|
checkVersioning info rs k
|
|
|
|
ifM (checkKey hv r rs c info k)
|
|
|
|
( withVerifiedCopy LockedCopy (uuid r) (return True) callback
|
|
|
|
, giveup $ "content seems to be missing from " ++ name r ++ " despite S3 versioning being enabled"
|
|
|
|
)
|
|
|
|
| otherwise = Nothing
|
|
|
|
|
2020-01-14 19:41:34 +00:00
|
|
|
checkKey :: S3HandleVar -> Remote -> RemoteStateHandle -> ParsedRemoteConfig -> S3Info -> CheckPresent
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
checkKey hv r rs c info k = withS3Handle hv $ \case
|
2019-01-30 18:23:39 +00:00
|
|
|
Just h -> do
|
|
|
|
showChecking r
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
eitherS3VersionID info rs c k (T.pack $ bucketObject info k) >>= \case
|
2019-01-30 18:23:39 +00:00
|
|
|
Left failreason -> do
|
|
|
|
warning failreason
|
|
|
|
giveup "cannot check content"
|
|
|
|
Right loc -> checkKeyHelper info h loc
|
|
|
|
Nothing ->
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
getPublicWebUrls' (uuid r) rs info c k >>= \case
|
2019-01-30 18:23:39 +00:00
|
|
|
Left failreason -> do
|
|
|
|
warning failreason
|
|
|
|
giveup "cannot check content"
|
|
|
|
Right us -> do
|
|
|
|
showChecking r
|
|
|
|
let check u = withUrlOptions $
|
2019-11-22 20:24:04 +00:00
|
|
|
Url.checkBoth u (fromKey keySize k)
|
2019-01-30 18:23:39 +00:00
|
|
|
anyM check us
|
2017-09-08 19:41:31 +00:00
|
|
|
|
2018-08-30 18:47:52 +00:00
|
|
|
checkKeyHelper :: S3Info -> S3Handle -> (Either S3.Object S3VersionID) -> Annex Bool
|
2019-04-23 17:09:27 +00:00
|
|
|
checkKeyHelper info h loc = checkKeyHelper' info h o limit
|
|
|
|
where
|
|
|
|
(o, limit) = case loc of
|
|
|
|
Left obj ->
|
|
|
|
(obj, id)
|
2019-04-23 17:19:48 +00:00
|
|
|
Right (S3VersionID obj vid) ->
|
2019-04-23 17:09:27 +00:00
|
|
|
(obj, \ho -> ho { S3.hoVersionId = Just vid })
|
|
|
|
|
|
|
|
checkKeyHelper' :: S3Info -> S3Handle -> S3.Object -> (S3.HeadObject -> S3.HeadObject) -> Annex Bool
|
|
|
|
checkKeyHelper' info h o limit = liftIO $ runResourceT $ do
|
2019-04-19 18:27:59 +00:00
|
|
|
rsp <- sendS3Handle h req
|
2020-09-14 22:19:51 +00:00
|
|
|
extractFromResourceT (isJust $ S3.horMetadata rsp)
|
2012-11-11 04:51:07 +00:00
|
|
|
where
|
2019-04-23 17:09:27 +00:00
|
|
|
req = limit $ S3.headObject (bucket info) o
|
2014-10-23 20:32:18 +00:00
|
|
|
|
2020-05-15 16:17:15 +00:00
|
|
|
storeExportS3 :: S3HandleVar -> Remote -> RemoteStateHandle -> S3Info -> Maybe Magic -> FilePath -> Key -> ExportLocation -> MeterUpdate -> Annex ()
|
|
|
|
storeExportS3 hv r rs info magic f k loc p = void $ storeExportS3' hv r rs info magic f k loc p
|
2019-04-18 19:20:09 +00:00
|
|
|
|
2020-05-15 16:17:15 +00:00
|
|
|
storeExportS3' :: S3HandleVar -> Remote -> RemoteStateHandle -> S3Info -> Maybe Magic -> FilePath -> Key -> ExportLocation -> MeterUpdate -> Annex (Maybe S3Etag, Maybe S3VersionID)
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
storeExportS3' hv r rs info magic f k loc p = withS3Handle hv $ \case
|
2020-05-15 16:17:15 +00:00
|
|
|
Just h -> go h
|
|
|
|
Nothing -> giveup $ needS3Creds (uuid r)
|
2017-09-08 19:41:31 +00:00
|
|
|
where
|
2019-01-30 18:55:28 +00:00
|
|
|
go h = do
|
2018-08-30 18:47:52 +00:00
|
|
|
let o = T.pack $ bucketExportLocation info loc
|
2019-04-23 18:39:30 +00:00
|
|
|
(metag, mvid) <- storeHelper info h magic f o p
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
setS3VersionID info rs k mvid
|
2020-05-15 16:17:15 +00:00
|
|
|
return (metag, mvid)
|
2017-09-08 19:41:31 +00:00
|
|
|
|
2020-05-15 16:51:09 +00:00
|
|
|
retrieveExportS3 :: S3HandleVar -> Remote -> S3Info -> Key -> ExportLocation -> FilePath -> MeterUpdate -> Annex ()
|
|
|
|
retrieveExportS3 hv r info _k loc f p = do
|
|
|
|
withS3Handle hv $ \case
|
|
|
|
Just h -> retrieveHelper info h (Left (T.pack exportloc)) f p
|
2018-09-06 20:03:15 +00:00
|
|
|
Nothing -> case getPublicUrlMaker info of
|
2020-05-15 16:51:09 +00:00
|
|
|
Just geturl -> either giveup return =<<
|
|
|
|
Url.withUrlOptions
|
|
|
|
(Url.download' p (geturl exportloc) f)
|
|
|
|
Nothing -> giveup $ needS3Creds (uuid r)
|
|
|
|
where
|
2019-08-15 16:55:48 +00:00
|
|
|
exportloc = bucketExportLocation info loc
|
2018-07-31 20:29:11 +00:00
|
|
|
|
2020-05-15 18:11:59 +00:00
|
|
|
removeExportS3 :: S3HandleVar -> Remote -> RemoteStateHandle -> S3Info -> Key -> ExportLocation -> Annex ()
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
removeExportS3 hv r rs info k loc = withS3Handle hv $ \case
|
2020-05-15 18:11:59 +00:00
|
|
|
Just h -> do
|
|
|
|
checkVersioning info rs k
|
|
|
|
liftIO $ runResourceT $ do
|
|
|
|
S3.DeleteObjectResponse <- sendS3Handle h $
|
|
|
|
S3.DeleteObject (T.pack $ bucketExportLocation info loc) (bucket info)
|
|
|
|
return ()
|
|
|
|
Nothing -> giveup $ needS3Creds (uuid r)
|
2017-09-08 19:41:31 +00:00
|
|
|
where
|
|
|
|
|
2019-01-30 20:04:16 +00:00
|
|
|
checkPresentExportS3 :: S3HandleVar -> Remote -> S3Info -> Key -> ExportLocation -> Annex Bool
|
|
|
|
checkPresentExportS3 hv r info k loc = withS3Handle hv $ \case
|
2019-01-30 18:55:28 +00:00
|
|
|
Just h -> checkKeyHelper info h (Left (T.pack $ bucketExportLocation info loc))
|
|
|
|
Nothing -> case getPublicUrlMaker info of
|
2019-11-12 17:33:41 +00:00
|
|
|
Just geturl -> withUrlOptions $
|
2019-11-22 20:24:04 +00:00
|
|
|
Url.checkBoth (geturl $ bucketExportLocation info loc) (fromKey keySize k)
|
2019-01-30 18:55:28 +00:00
|
|
|
Nothing -> do
|
|
|
|
warning $ needS3Creds (uuid r)
|
|
|
|
giveup "No S3 credentials configured"
|
2017-09-08 19:41:31 +00:00
|
|
|
|
2017-09-08 20:19:38 +00:00
|
|
|
-- S3 has no move primitive; copy and delete.
|
2020-05-15 19:05:52 +00:00
|
|
|
renameExportS3 :: S3HandleVar -> Remote -> RemoteStateHandle -> S3Info -> Key -> ExportLocation -> ExportLocation -> Annex (Maybe ())
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
renameExportS3 hv r rs info k src dest = Just <$> go
|
2017-09-08 19:41:31 +00:00
|
|
|
where
|
2019-03-11 16:44:12 +00:00
|
|
|
go = withS3Handle hv $ \case
|
2020-05-15 19:05:52 +00:00
|
|
|
Just h -> do
|
|
|
|
checkVersioning info rs k
|
|
|
|
go' h
|
|
|
|
Nothing -> giveup $ needS3Creds (uuid r)
|
2019-03-11 16:44:12 +00:00
|
|
|
|
|
|
|
go' h = liftIO $ runResourceT $ do
|
2017-09-08 20:19:38 +00:00
|
|
|
let co = S3.copyObject (bucket info) dstobject
|
2017-09-08 19:41:31 +00:00
|
|
|
(S3.ObjectId (bucket info) srcobject Nothing)
|
|
|
|
S3.CopyMetadata
|
2017-09-08 20:19:38 +00:00
|
|
|
-- ACL is not preserved by copy.
|
|
|
|
void $ sendS3Handle h $ co { S3.coAcl = acl info }
|
2017-09-08 19:41:31 +00:00
|
|
|
void $ sendS3Handle h $ S3.DeleteObject srcobject (bucket info)
|
2019-03-11 16:44:12 +00:00
|
|
|
|
2017-09-08 19:41:31 +00:00
|
|
|
srcobject = T.pack $ bucketExportLocation info src
|
|
|
|
dstobject = T.pack $ bucketExportLocation info dest
|
2014-08-09 18:23:54 +00:00
|
|
|
|
2019-04-18 19:20:09 +00:00
|
|
|
listImportableContentsS3 :: S3HandleVar -> Remote -> S3Info -> Annex (Maybe (ImportableContents (ContentIdentifier, ByteSize)))
|
|
|
|
listImportableContentsS3 hv r info =
|
|
|
|
withS3Handle hv $ \case
|
|
|
|
Nothing -> do
|
|
|
|
warning $ needS3Creds (uuid r)
|
|
|
|
return Nothing
|
2020-09-14 22:19:51 +00:00
|
|
|
Just h -> catchMaybeIO $ liftIO $ runResourceT $
|
|
|
|
extractFromResourceT =<< startlist h
|
2019-04-18 19:20:09 +00:00
|
|
|
where
|
|
|
|
startlist h
|
|
|
|
| versioning info = do
|
|
|
|
rsp <- sendS3Handle h $
|
|
|
|
S3.getBucketObjectVersions (bucket info)
|
|
|
|
continuelistversioned h [] rsp
|
|
|
|
| otherwise = do
|
|
|
|
rsp <- sendS3Handle h $
|
|
|
|
S3.getBucket (bucket info)
|
|
|
|
continuelistunversioned h [] rsp
|
|
|
|
|
|
|
|
continuelistunversioned h l rsp
|
|
|
|
| S3.gbrIsTruncated rsp = do
|
|
|
|
rsp' <- sendS3Handle h $
|
|
|
|
(S3.getBucket (bucket info))
|
|
|
|
{ S3.gbMarker = S3.gbrNextMarker rsp
|
|
|
|
}
|
|
|
|
continuelistunversioned h (rsp:l) rsp'
|
2019-04-19 17:39:33 +00:00
|
|
|
| otherwise = return $
|
|
|
|
mkImportableContentsUnversioned info (reverse (rsp:l))
|
2019-04-18 19:20:09 +00:00
|
|
|
|
|
|
|
continuelistversioned h l rsp
|
|
|
|
| S3.gbovrIsTruncated rsp = do
|
|
|
|
rsp' <- sendS3Handle h $
|
|
|
|
(S3.getBucketObjectVersions (bucket info))
|
|
|
|
{ S3.gbovKeyMarker = S3.gbovrNextKeyMarker rsp
|
|
|
|
, S3.gbovVersionIdMarker = S3.gbovrNextVersionIdMarker rsp
|
|
|
|
}
|
|
|
|
continuelistversioned h (rsp:l) rsp'
|
2019-04-19 19:10:31 +00:00
|
|
|
| otherwise = return $
|
|
|
|
mkImportableContentsVersioned info (reverse (rsp:l))
|
2019-04-19 17:39:33 +00:00
|
|
|
|
|
|
|
mkImportableContentsUnversioned :: S3Info -> [S3.GetBucketResponse] -> ImportableContents (ContentIdentifier, ByteSize)
|
|
|
|
mkImportableContentsUnversioned info l = ImportableContents
|
|
|
|
{ importableContents = concatMap (mapMaybe extract . S3.gbrContents) l
|
|
|
|
, importableHistory = []
|
|
|
|
}
|
|
|
|
where
|
|
|
|
extract oi = do
|
|
|
|
loc <- bucketImportLocation info $
|
|
|
|
T.unpack $ S3.objectKey oi
|
|
|
|
let sz = S3.objectSize oi
|
2019-04-19 19:10:31 +00:00
|
|
|
let cid = mkS3UnversionedContentIdentifier $ S3.objectETag oi
|
2019-04-19 17:39:33 +00:00
|
|
|
return (loc, (cid, sz))
|
|
|
|
|
|
|
|
mkImportableContentsVersioned :: S3Info -> [S3.GetBucketObjectVersionsResponse] -> ImportableContents (ContentIdentifier, ByteSize)
|
|
|
|
mkImportableContentsVersioned info = build . groupfiles
|
|
|
|
where
|
|
|
|
build [] = ImportableContents [] []
|
|
|
|
build l =
|
|
|
|
let (l', v) = latestversion l
|
|
|
|
in ImportableContents
|
|
|
|
{ importableContents = mapMaybe extract v
|
|
|
|
, importableHistory = case build l' of
|
|
|
|
ImportableContents [] [] -> []
|
|
|
|
h -> [h]
|
|
|
|
}
|
|
|
|
|
|
|
|
extract ovi@(S3.ObjectVersion {}) = do
|
|
|
|
loc <- bucketImportLocation info $
|
|
|
|
T.unpack $ S3.oviKey ovi
|
|
|
|
let sz = S3.oviSize ovi
|
2019-04-19 19:10:31 +00:00
|
|
|
let cid = mkS3VersionedContentIdentifier' ovi
|
2019-04-19 17:39:33 +00:00
|
|
|
return (loc, (cid, sz))
|
|
|
|
extract (S3.DeleteMarker {}) = Nothing
|
2019-04-18 19:20:09 +00:00
|
|
|
|
2019-04-19 17:39:33 +00:00
|
|
|
-- group files so all versions of a file are in a sublist,
|
|
|
|
-- with the newest first. S3 uses such an order, so it's just a
|
|
|
|
-- matter of breaking up the response list into sublists.
|
|
|
|
groupfiles = groupBy (\a b -> S3.oviKey a == S3.oviKey b)
|
|
|
|
. concatMap S3.gbovrContents
|
|
|
|
|
|
|
|
latestversion [] = ([], [])
|
|
|
|
latestversion ([]:rest) = latestversion rest
|
|
|
|
latestversion l@((first:_old):remainder) =
|
|
|
|
go (S3.oviLastModified first) [first] remainder
|
2019-04-18 19:20:09 +00:00
|
|
|
where
|
2019-04-19 17:39:33 +00:00
|
|
|
go mtime c [] = (removemostrecent mtime l, reverse c)
|
|
|
|
go mtime c ([]:rest) = go mtime c rest
|
|
|
|
go mtime c ((latest:_old):rest) =
|
|
|
|
let !mtime' = max mtime (S3.oviLastModified latest)
|
|
|
|
in go mtime' (latest:c) rest
|
2019-04-18 19:20:09 +00:00
|
|
|
|
2019-04-19 17:39:33 +00:00
|
|
|
removemostrecent _ [] = []
|
|
|
|
removemostrecent mtime ([]:rest) = removemostrecent mtime rest
|
|
|
|
removemostrecent mtime (i@(curr:old):rest)
|
|
|
|
| S3.oviLastModified curr == mtime =
|
|
|
|
old : removemostrecent mtime rest
|
|
|
|
| otherwise =
|
|
|
|
i : removemostrecent mtime rest
|
2019-04-18 19:20:09 +00:00
|
|
|
|
2020-05-15 16:51:09 +00:00
|
|
|
retrieveExportWithContentIdentifierS3 :: S3HandleVar -> Remote -> RemoteStateHandle -> S3Info -> ExportLocation -> ContentIdentifier -> FilePath -> Annex Key -> MeterUpdate -> Annex Key
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
retrieveExportWithContentIdentifierS3 hv r rs info loc cid dest mkkey p = withS3Handle hv $ \case
|
2020-05-15 16:51:09 +00:00
|
|
|
Just h -> do
|
2019-04-19 19:10:31 +00:00
|
|
|
rewritePreconditionException $ retrieveHelper' h dest p $
|
|
|
|
limitGetToContentIdentifier cid $
|
|
|
|
S3.getObject (bucket info) o
|
2020-05-15 16:51:09 +00:00
|
|
|
k <- mkkey
|
|
|
|
case extractContentIdentifier cid o of
|
|
|
|
Right vid -> setS3VersionID info rs k vid
|
|
|
|
Left _ -> noop
|
|
|
|
return k
|
|
|
|
Nothing -> giveup $ needS3Creds (uuid r)
|
2019-04-19 19:10:31 +00:00
|
|
|
where
|
|
|
|
o = T.pack $ bucketExportLocation info loc
|
|
|
|
|
|
|
|
{- Catch exception getObject returns when a precondition is not met,
|
|
|
|
- and replace with a more understandable message for the user. -}
|
|
|
|
rewritePreconditionException :: Annex a -> Annex a
|
|
|
|
rewritePreconditionException a = catchJust (Url.matchStatusCodeException want) a $
|
|
|
|
const $ giveup "requested version of object is not available in S3 bucket"
|
|
|
|
where
|
|
|
|
want st = statusCode st == 412 &&
|
|
|
|
statusMessage st == "Precondition Failed"
|
2019-04-18 19:20:09 +00:00
|
|
|
|
|
|
|
-- Does not check if content on S3 is safe to overwrite, because there
|
|
|
|
-- is no atomic way to do so. When the bucket is versioned, this is
|
|
|
|
-- acceptable because listImportableContentsS3 will find versions
|
|
|
|
-- of files that were overwritten by this and no data is lost.
|
|
|
|
--
|
|
|
|
-- When the bucket is not versioned, data loss can result.
|
|
|
|
-- This is why that configuration requires --force to enable.
|
2020-05-15 16:17:15 +00:00
|
|
|
storeExportWithContentIdentifierS3 :: S3HandleVar -> Remote -> RemoteStateHandle -> S3Info -> Maybe Magic -> FilePath -> Key -> ExportLocation -> [ContentIdentifier] -> MeterUpdate -> Annex ContentIdentifier
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
storeExportWithContentIdentifierS3 hv r rs info magic src k loc _overwritablecids p
|
2019-04-23 18:39:30 +00:00
|
|
|
| versioning info = go
|
2020-09-14 22:37:49 +00:00
|
|
|
#if MIN_VERSION_aws(0,22,0)
|
2019-04-23 18:39:30 +00:00
|
|
|
| otherwise = go
|
|
|
|
#else
|
2020-05-15 16:17:15 +00:00
|
|
|
| otherwise = giveup "git-annex is built with too old a version of the aws library to support this operation"
|
2019-04-23 18:39:30 +00:00
|
|
|
#endif
|
|
|
|
where
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
go = storeExportS3' hv r rs info magic src k loc p >>= \case
|
2020-05-15 16:17:15 +00:00
|
|
|
(_, Just vid) -> return $
|
2019-04-18 19:20:09 +00:00
|
|
|
mkS3VersionedContentIdentifier vid
|
2020-05-15 16:17:15 +00:00
|
|
|
(Just etag, Nothing) -> return $
|
2019-04-23 18:39:30 +00:00
|
|
|
mkS3UnversionedContentIdentifier etag
|
2020-05-15 16:17:15 +00:00
|
|
|
(Nothing, Nothing) ->
|
|
|
|
giveup "did not get ETag for store to S3 bucket"
|
2019-04-18 19:20:09 +00:00
|
|
|
|
2019-04-23 17:09:27 +00:00
|
|
|
-- Does not guarantee that the removed object has the content identifier,
|
|
|
|
-- but when the bucket is versioned, the removed object content can still
|
|
|
|
-- be recovered (and listImportableContentsS3 will find it).
|
|
|
|
--
|
|
|
|
-- When the bucket is not versioned, data loss can result.
|
|
|
|
-- This is why that configuration requires --force to enable.
|
2020-05-15 18:11:59 +00:00
|
|
|
removeExportWithContentIdentifierS3 :: S3HandleVar -> Remote -> RemoteStateHandle -> S3Info -> Key -> ExportLocation -> [ContentIdentifier] -> Annex ()
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
removeExportWithContentIdentifierS3 hv r rs info k loc _removeablecids =
|
|
|
|
removeExportS3 hv r rs info k loc
|
2019-04-18 19:20:09 +00:00
|
|
|
|
2019-04-23 17:09:27 +00:00
|
|
|
checkPresentExportWithContentIdentifierS3 :: S3HandleVar -> Remote -> S3Info -> Key -> ExportLocation -> [ContentIdentifier] -> Annex Bool
|
|
|
|
checkPresentExportWithContentIdentifierS3 hv r info _k loc knowncids =
|
|
|
|
withS3Handle hv $ \case
|
|
|
|
Just h -> flip anyM knowncids $
|
|
|
|
checkKeyHelper' info h o . limitHeadToContentIdentifier
|
|
|
|
Nothing -> do
|
|
|
|
warning $ needS3Creds (uuid r)
|
|
|
|
giveup "No S3 credentials configured"
|
|
|
|
where
|
|
|
|
o = T.pack $ bucketExportLocation info loc
|
2019-04-18 19:20:09 +00:00
|
|
|
|
2014-07-11 19:21:43 +00:00
|
|
|
{- Generate the bucket if it does not already exist, including creating the
|
|
|
|
- UUID file within the bucket.
|
|
|
|
-
|
2014-08-08 22:54:04 +00:00
|
|
|
- Some ACLs can allow read/write to buckets, but not querying them,
|
2019-01-29 17:44:55 +00:00
|
|
|
- so first check if the UUID file already exists and we can skip creating
|
|
|
|
- it.
|
2014-07-11 19:21:43 +00:00
|
|
|
-}
|
2020-01-14 19:41:34 +00:00
|
|
|
genBucket :: ParsedRemoteConfig -> RemoteGitConfig -> UUID -> Annex ()
|
2016-05-23 21:03:20 +00:00
|
|
|
genBucket c gc u = do
|
2011-07-19 18:07:23 +00:00
|
|
|
showAction "checking bucket"
|
2018-09-06 20:03:15 +00:00
|
|
|
info <- extractS3Info c
|
2019-01-30 20:04:16 +00:00
|
|
|
hdl <- mkS3HandleVar c gc u
|
|
|
|
withS3HandleOrFail u hdl $ \h ->
|
2015-06-05 17:09:41 +00:00
|
|
|
go info h =<< checkUUIDFile c u info h
|
2012-11-11 04:51:07 +00:00
|
|
|
where
|
2015-06-05 17:09:41 +00:00
|
|
|
go _ _ (Right True) = noop
|
|
|
|
go info h _ = do
|
2020-09-14 22:19:51 +00:00
|
|
|
r <- liftIO $ tryNonAsync $ runResourceT $ do
|
|
|
|
void $ sendS3Handle h (S3.getBucket $ bucket info)
|
|
|
|
return True
|
|
|
|
case r of
|
|
|
|
Right True -> noop
|
|
|
|
_ -> do
|
2014-08-08 22:54:04 +00:00
|
|
|
showAction $ "creating bucket in " ++ datacenter
|
2019-01-30 19:40:13 +00:00
|
|
|
void $ liftIO $ runResourceT $ sendS3Handle h $
|
2018-12-31 17:09:20 +00:00
|
|
|
(S3.putBucket (bucket info))
|
|
|
|
{ S3.pbCannedAcl = acl info
|
|
|
|
, S3.pbLocationConstraint = locconstraint
|
|
|
|
, S3.pbXStorageClass = storageclass
|
|
|
|
}
|
2015-06-05 17:09:41 +00:00
|
|
|
writeUUIDFile c u info h
|
2014-08-08 22:54:04 +00:00
|
|
|
|
2015-06-05 18:38:01 +00:00
|
|
|
locconstraint = mkLocationConstraint $ T.pack datacenter
|
2020-01-14 19:41:34 +00:00
|
|
|
datacenter = fromJust $ getRemoteConfigValue datacenterField c
|
2015-11-02 15:14:03 +00:00
|
|
|
-- "NEARLINE" as a storage class when creating a bucket is a
|
|
|
|
-- nonstandard extension of Google Cloud Storage.
|
|
|
|
storageclass = case getStorageClass c of
|
|
|
|
sc@(S3.OtherStorageClass "NEARLINE") -> Just sc
|
|
|
|
_ -> Nothing
|
2011-05-16 13:42:54 +00:00
|
|
|
|
2013-04-27 21:01:24 +00:00
|
|
|
{- Writes the UUID to an annex-uuid file within the bucket.
|
|
|
|
-
|
2017-02-13 19:35:24 +00:00
|
|
|
- If the file already exists in the bucket, it must match,
|
|
|
|
- or this fails.
|
2013-04-27 21:01:24 +00:00
|
|
|
-
|
2014-08-08 22:54:04 +00:00
|
|
|
- Note that IA buckets can only created by having a file
|
|
|
|
- stored in them. So this also takes care of that.
|
2013-04-27 21:01:24 +00:00
|
|
|
-}
|
2020-01-14 19:41:34 +00:00
|
|
|
writeUUIDFile :: ParsedRemoteConfig -> UUID -> S3Info -> S3Handle -> Annex ()
|
2015-06-05 17:09:41 +00:00
|
|
|
writeUUIDFile c u info h = do
|
|
|
|
v <- checkUUIDFile c u info h
|
2014-07-11 19:21:43 +00:00
|
|
|
case v of
|
2014-08-08 22:54:04 +00:00
|
|
|
Right True -> noop
|
2017-02-13 19:35:24 +00:00
|
|
|
Right False -> do
|
|
|
|
warning "The bucket already exists, and its annex-uuid file indicates it is used by a different special remote."
|
|
|
|
giveup "Cannot reuse this bucket."
|
2019-01-30 19:40:13 +00:00
|
|
|
_ -> void $ liftIO $ runResourceT $ sendS3Handle h mkobject
|
2013-04-27 21:01:24 +00:00
|
|
|
where
|
2014-08-08 22:54:04 +00:00
|
|
|
file = T.pack $ uuidFile c
|
2014-07-11 19:21:43 +00:00
|
|
|
uuidb = L.fromChunks [T.encodeUtf8 $ T.pack $ fromUUID u]
|
|
|
|
|
2015-06-05 17:09:41 +00:00
|
|
|
mkobject = putObject info file (RequestBodyLBS uuidb)
|
2014-07-11 19:21:43 +00:00
|
|
|
|
2014-08-08 22:54:04 +00:00
|
|
|
{- Checks if the UUID file exists in the bucket
|
|
|
|
- and has the specified UUID already. -}
|
2020-01-14 19:41:34 +00:00
|
|
|
checkUUIDFile :: ParsedRemoteConfig -> UUID -> S3Info -> S3Handle -> Annex (Either SomeException Bool)
|
2017-02-13 19:35:24 +00:00
|
|
|
checkUUIDFile c u info h = tryNonAsync $ liftIO $ runResourceT $ do
|
2019-01-30 19:40:13 +00:00
|
|
|
resp <- tryS3 $ sendS3Handle h (S3.getObject (bucket info) file)
|
2017-02-13 19:35:24 +00:00
|
|
|
case resp of
|
|
|
|
Left _ -> return False
|
|
|
|
Right r -> do
|
|
|
|
v <- AWS.loadToMemory r
|
2020-09-14 22:19:51 +00:00
|
|
|
extractFromResourceT (check v)
|
2014-07-11 19:21:43 +00:00
|
|
|
where
|
2017-02-13 19:35:24 +00:00
|
|
|
check (S3.GetObjectMemoryResponse _meta rsp) =
|
2014-08-08 22:54:04 +00:00
|
|
|
responseStatus rsp == ok200 && responseBody rsp == uuidb
|
|
|
|
|
|
|
|
file = T.pack $ uuidFile c
|
2014-06-06 18:04:35 +00:00
|
|
|
uuidb = L.fromChunks [T.encodeUtf8 $ T.pack $ fromUUID u]
|
2013-04-27 21:01:24 +00:00
|
|
|
|
2020-01-14 19:41:34 +00:00
|
|
|
uuidFile :: ParsedRemoteConfig -> FilePath
|
2014-08-09 00:29:56 +00:00
|
|
|
uuidFile c = getFilePrefix c ++ "annex-uuid"
|
2014-07-11 19:21:43 +00:00
|
|
|
|
2017-02-13 19:35:24 +00:00
|
|
|
tryS3 :: ResourceT IO a -> ResourceT IO (Either S3.S3Error a)
|
|
|
|
tryS3 a = (Right <$> a) `catch` (pure . Left)
|
|
|
|
|
2019-01-30 20:04:16 +00:00
|
|
|
data S3Handle = S3Handle
|
2014-08-09 00:51:22 +00:00
|
|
|
{ hmanager :: Manager
|
|
|
|
, hawscfg :: AWS.Configuration
|
|
|
|
, hs3cfg :: S3.S3Configuration AWS.NormalQuery
|
|
|
|
}
|
2014-08-08 22:54:04 +00:00
|
|
|
|
2019-01-30 19:40:13 +00:00
|
|
|
{- Sends a request to S3 and gets back the response. -}
|
2014-08-08 22:54:04 +00:00
|
|
|
sendS3Handle
|
2014-08-09 19:58:01 +00:00
|
|
|
:: (AWS.Transaction r a, AWS.ServiceConfiguration r ~ S3.S3Configuration)
|
|
|
|
=> S3Handle
|
|
|
|
-> r
|
|
|
|
-> ResourceT IO a
|
2019-01-30 19:40:13 +00:00
|
|
|
sendS3Handle h r = AWS.pureAws (hawscfg h) (hs3cfg h) (hmanager h) r
|
2014-08-08 22:54:04 +00:00
|
|
|
|
2019-01-30 20:04:16 +00:00
|
|
|
type S3HandleVar = TVar (Either (Annex (Maybe S3Handle)) (Maybe S3Handle))
|
2019-01-30 18:23:39 +00:00
|
|
|
|
2019-01-30 20:04:16 +00:00
|
|
|
{- Prepares a S3Handle for later use. Does not connect to S3 or do anything
|
|
|
|
- else expensive. -}
|
2020-01-14 19:41:34 +00:00
|
|
|
mkS3HandleVar :: ParsedRemoteConfig -> RemoteGitConfig -> UUID -> Annex S3HandleVar
|
2019-01-30 20:04:16 +00:00
|
|
|
mkS3HandleVar c gc u = liftIO $ newTVarIO $ Left $ do
|
2016-05-23 21:03:20 +00:00
|
|
|
mcreds <- getRemoteCredPair c gc (AWS.creds u)
|
2015-06-05 17:09:41 +00:00
|
|
|
case mcreds of
|
|
|
|
Just creds -> do
|
|
|
|
awscreds <- liftIO $ genCredentials creds
|
2019-07-05 19:09:37 +00:00
|
|
|
let awscfg = AWS.Configuration AWS.Timestamp awscreds debugMapper Nothing
|
2019-01-30 20:04:16 +00:00
|
|
|
ou <- getUrlOptions
|
|
|
|
return $ Just $ S3Handle (httpManager ou) awscfg s3cfg
|
|
|
|
Nothing -> return Nothing
|
2014-08-08 22:54:04 +00:00
|
|
|
where
|
|
|
|
s3cfg = s3Configuration c
|
|
|
|
|
2019-01-30 20:04:16 +00:00
|
|
|
withS3Handle :: S3HandleVar -> (Maybe S3Handle -> Annex a) -> Annex a
|
|
|
|
withS3Handle hv a = liftIO (readTVarIO hv) >>= \case
|
|
|
|
Right hdl -> a hdl
|
|
|
|
Left mkhdl -> do
|
|
|
|
hdl <- mkhdl
|
|
|
|
liftIO $ atomically $ writeTVar hv (Right hdl)
|
|
|
|
a hdl
|
|
|
|
|
|
|
|
withS3HandleOrFail :: UUID -> S3HandleVar -> (S3Handle -> Annex a) -> Annex a
|
|
|
|
withS3HandleOrFail u hv a = withS3Handle hv $ \case
|
|
|
|
Just hdl -> a hdl
|
|
|
|
Nothing -> do
|
|
|
|
warning $ needS3Creds u
|
|
|
|
giveup "No S3 credentials configured"
|
|
|
|
|
S3: Improve diagnostics when a remote is configured with exporttree and versioning, but no S3 version id has been recorded for a key.
When public access is used for the remote, it complained that the user
needed to set creds to use it, which was just wrong.
When creds were being used, it fell back from trying to use the version ID
to just accessing the key in the bucket, which was ok for non-export
remotes, but wrong for buckets.
In both cases, display a hopefully useful warning.
This should only come up when an existing S3 remote has been exported
to, and then later versioning was enabled.
Note that it would perhaps be possible to fall back from trying to use
retrieveKeyFile when it fails and instead use retrieveKeyFileFromExport,
which may work when S3 version ID is missing. But there are problems
with that approach; how to tell when retrieveKeyFile has failed due to this
rather than a network problem etc? Anyway, that approach would only work
until the file in the export got overwritten, and then it would no
longer be accessible. And with versioning enabled, the user wants old
versions of objects to remain accessible, so it seems better to warn
about the problem as soon as possible, so they can go back and add S3
version IDs.
This work is supported by the NIH-funded NICEMAN (ReproNim TR&D3) project.
2018-12-06 17:43:18 +00:00
|
|
|
needS3Creds :: UUID -> String
|
|
|
|
needS3Creds u = missingCredPairFor "S3" (AWS.creds u)
|
2018-07-31 20:29:11 +00:00
|
|
|
|
2020-01-14 19:41:34 +00:00
|
|
|
s3Configuration :: ParsedRemoteConfig -> S3.S3Configuration AWS.NormalQuery
|
2016-02-09 19:36:31 +00:00
|
|
|
s3Configuration c = cfg
|
|
|
|
{ S3.s3Port = port
|
2020-01-14 19:41:34 +00:00
|
|
|
, S3.s3RequestStyle = case getRemoteConfigValue requeststyleField c of
|
2016-02-09 19:36:31 +00:00
|
|
|
Just "path" -> S3.PathStyle
|
2016-11-16 01:29:54 +00:00
|
|
|
Just s -> giveup $ "bad S3 requeststyle value: " ++ s
|
2016-02-09 19:36:31 +00:00
|
|
|
Nothing -> S3.s3RequestStyle cfg
|
|
|
|
}
|
2014-08-08 22:54:04 +00:00
|
|
|
where
|
2020-01-14 19:41:34 +00:00
|
|
|
h = fromJust $ getRemoteConfigValue hostField c
|
|
|
|
datacenter = fromJust $ getRemoteConfigValue datacenterField c
|
2014-08-08 22:54:04 +00:00
|
|
|
-- When the default S3 host is configured, connect directly to
|
|
|
|
-- the S3 endpoint for the configured datacenter.
|
|
|
|
-- When another host is configured, it's used as-is.
|
|
|
|
endpoint
|
2018-09-06 20:03:15 +00:00
|
|
|
| h == AWS.s3DefaultHost = AWS.s3HostName $ T.pack datacenter
|
|
|
|
| otherwise = T.encodeUtf8 $ T.pack h
|
2020-01-14 19:41:34 +00:00
|
|
|
port = case getRemoteConfigValue portField c of
|
2019-03-22 16:17:05 +00:00
|
|
|
Just s ->
|
|
|
|
case reads s of
|
2019-03-22 16:27:41 +00:00
|
|
|
[(p, _)]
|
|
|
|
-- Let protocol setting override
|
|
|
|
-- default port 80.
|
|
|
|
| p == 80 -> case cfgproto of
|
|
|
|
Just AWS.HTTPS -> 443
|
|
|
|
_ -> p
|
|
|
|
| otherwise -> p
|
2019-03-22 16:17:05 +00:00
|
|
|
_ -> giveup $ "bad S3 port value: " ++ s
|
|
|
|
Nothing -> case cfgproto of
|
|
|
|
Just AWS.HTTPS -> 443
|
|
|
|
Just AWS.HTTP -> 80
|
|
|
|
Nothing -> 80
|
2020-01-14 19:41:34 +00:00
|
|
|
cfgproto = case getRemoteConfigValue protocolField c of
|
2019-03-22 16:17:05 +00:00
|
|
|
Just "https" -> Just AWS.HTTPS
|
|
|
|
Just "http" -> Just AWS.HTTP
|
2019-03-22 16:23:59 +00:00
|
|
|
Just s -> giveup $ "bad S3 protocol value: " ++ s
|
2019-03-22 16:17:05 +00:00
|
|
|
Nothing -> Nothing
|
|
|
|
proto = case cfgproto of
|
|
|
|
Just v -> v
|
|
|
|
Nothing
|
|
|
|
| port == 443 -> AWS.HTTPS
|
|
|
|
| otherwise -> AWS.HTTP
|
2020-05-07 17:18:11 +00:00
|
|
|
cfg = case getRemoteConfigValue signatureField c of
|
|
|
|
Just (SignatureVersion 4) ->
|
|
|
|
S3.s3v4 proto endpoint False S3.SignWithEffort
|
|
|
|
_ -> S3.s3 proto endpoint False
|
2014-08-08 22:54:04 +00:00
|
|
|
|
2014-08-10 02:13:03 +00:00
|
|
|
data S3Info = S3Info
|
|
|
|
{ bucket :: S3.Bucket
|
|
|
|
, storageClass :: S3.StorageClass
|
2018-07-31 20:29:11 +00:00
|
|
|
, bucketObject :: Key -> BucketObject
|
|
|
|
, bucketExportLocation :: ExportLocation -> BucketObject
|
2019-04-18 19:20:09 +00:00
|
|
|
, bucketImportLocation :: BucketObject -> Maybe ImportLocation
|
2014-08-10 02:13:03 +00:00
|
|
|
, metaHeaders :: [(T.Text, T.Text)]
|
2014-10-28 18:17:30 +00:00
|
|
|
, partSize :: Maybe Integer
|
2014-08-10 02:13:03 +00:00
|
|
|
, isIA :: Bool
|
2018-08-30 17:45:28 +00:00
|
|
|
, versioning :: Bool
|
2015-06-05 20:23:35 +00:00
|
|
|
, public :: Bool
|
2018-09-06 20:03:15 +00:00
|
|
|
, publicurl :: Maybe URLString
|
|
|
|
, host :: Maybe String
|
2014-08-10 02:13:03 +00:00
|
|
|
}
|
|
|
|
|
2020-01-14 19:41:34 +00:00
|
|
|
extractS3Info :: ParsedRemoteConfig -> Annex S3Info
|
2018-09-06 20:03:15 +00:00
|
|
|
extractS3Info c = do
|
2014-08-10 02:13:03 +00:00
|
|
|
b <- maybe
|
2016-11-16 01:29:54 +00:00
|
|
|
(giveup "S3 bucket not configured")
|
2014-08-10 02:13:03 +00:00
|
|
|
(return . T.pack)
|
|
|
|
(getBucketName c)
|
2018-09-06 20:03:15 +00:00
|
|
|
return $ S3Info
|
2014-08-10 02:13:03 +00:00
|
|
|
{ bucket = b
|
|
|
|
, storageClass = getStorageClass c
|
2015-06-05 20:23:35 +00:00
|
|
|
, bucketObject = getBucketObject c
|
2017-09-08 19:41:31 +00:00
|
|
|
, bucketExportLocation = getBucketExportLocation c
|
2019-04-18 19:20:09 +00:00
|
|
|
, bucketImportLocation = getBucketImportLocation c
|
2014-08-10 02:13:03 +00:00
|
|
|
, metaHeaders = getMetaHeaders c
|
2014-10-28 18:17:30 +00:00
|
|
|
, partSize = getPartSize c
|
2014-08-10 02:13:03 +00:00
|
|
|
, isIA = configIA c
|
2020-01-14 19:41:34 +00:00
|
|
|
, versioning = fromMaybe False $
|
|
|
|
getRemoteConfigValue versioningField c
|
|
|
|
, public = fromMaybe False $
|
|
|
|
getRemoteConfigValue publicField c
|
|
|
|
, publicurl = getRemoteConfigValue publicurlField c
|
|
|
|
, host = getRemoteConfigValue hostField c
|
2014-08-10 02:13:03 +00:00
|
|
|
}
|
|
|
|
|
2015-06-05 17:09:41 +00:00
|
|
|
putObject :: S3Info -> T.Text -> RequestBody -> S3.PutObject
|
|
|
|
putObject info file rbody = (S3.putObject (bucket info) file rbody)
|
|
|
|
{ S3.poStorageClass = Just (storageClass info)
|
|
|
|
, S3.poMetadata = metaHeaders info
|
|
|
|
, S3.poAutoMakeBucket = isIA info
|
2015-06-05 18:38:01 +00:00
|
|
|
, S3.poAcl = acl info
|
2015-06-05 17:09:41 +00:00
|
|
|
}
|
|
|
|
|
2015-06-05 20:23:35 +00:00
|
|
|
acl :: S3Info -> Maybe S3.CannedAcl
|
|
|
|
acl info
|
|
|
|
| public info = Just S3.AclPublicRead
|
|
|
|
| otherwise = Nothing
|
|
|
|
|
2020-01-14 19:41:34 +00:00
|
|
|
getBucketName :: ParsedRemoteConfig -> Maybe BucketName
|
|
|
|
getBucketName = map toLower <$$> getRemoteConfigValue bucketField
|
2013-04-27 21:01:24 +00:00
|
|
|
|
2020-01-14 19:41:34 +00:00
|
|
|
getStorageClass :: ParsedRemoteConfig -> S3.StorageClass
|
|
|
|
getStorageClass c = case getRemoteConfigValue storageclassField c of
|
2015-09-17 21:20:01 +00:00
|
|
|
Just s -> S3.OtherStorageClass (T.pack s)
|
2015-09-22 15:03:44 +00:00
|
|
|
_ -> S3.Standard
|
2014-10-28 18:17:30 +00:00
|
|
|
|
2020-01-14 19:41:34 +00:00
|
|
|
getPartSize :: ParsedRemoteConfig -> Maybe Integer
|
|
|
|
getPartSize c = readSize dataUnits =<< getRemoteConfigValue partsizeField c
|
2014-10-28 18:17:30 +00:00
|
|
|
|
2020-01-14 19:41:34 +00:00
|
|
|
getMetaHeaders :: ParsedRemoteConfig -> [(T.Text, T.Text)]
|
|
|
|
getMetaHeaders = map munge
|
|
|
|
. filter (isMetaHeader . fst)
|
|
|
|
. M.assocs
|
|
|
|
. getRemoteConfigPassedThrough
|
2013-04-27 21:01:24 +00:00
|
|
|
where
|
2020-01-14 19:41:34 +00:00
|
|
|
metaprefixlen = length metaPrefix
|
|
|
|
munge (k, v) = (T.pack $ drop metaprefixlen (fromProposedAccepted k), T.pack v)
|
|
|
|
|
|
|
|
isMetaHeader :: RemoteConfigField -> Bool
|
|
|
|
isMetaHeader h = metaPrefix `isPrefixOf` fromProposedAccepted h
|
|
|
|
|
2020-01-15 14:52:28 +00:00
|
|
|
isArchiveMetaHeader :: RemoteConfigField -> Bool
|
|
|
|
isArchiveMetaHeader h = "x-archive-" `isPrefixOf` fromProposedAccepted h
|
2020-01-14 19:41:34 +00:00
|
|
|
|
|
|
|
metaPrefix :: String
|
|
|
|
metaPrefix = "x-amz-meta-"
|
2013-04-27 21:01:24 +00:00
|
|
|
|
2020-01-14 19:41:34 +00:00
|
|
|
getFilePrefix :: ParsedRemoteConfig -> String
|
|
|
|
getFilePrefix = fromMaybe "" . getRemoteConfigValue fileprefixField
|
2014-08-09 00:29:56 +00:00
|
|
|
|
2020-01-14 19:41:34 +00:00
|
|
|
getBucketObject :: ParsedRemoteConfig -> Key -> BucketObject
|
2019-01-14 17:03:35 +00:00
|
|
|
getBucketObject c = munge . serializeKey
|
2014-08-09 18:23:54 +00:00
|
|
|
where
|
2020-01-15 14:52:28 +00:00
|
|
|
munge s = case getRemoteConfigValue mungekeysField c :: Maybe String of
|
2014-08-09 18:23:54 +00:00
|
|
|
Just "ia" -> iaMunge $ getFilePrefix c ++ s
|
|
|
|
_ -> getFilePrefix c ++ s
|
|
|
|
|
2020-01-14 19:41:34 +00:00
|
|
|
getBucketExportLocation :: ParsedRemoteConfig -> ExportLocation -> BucketObject
|
2019-12-02 16:26:33 +00:00
|
|
|
getBucketExportLocation c loc =
|
|
|
|
getFilePrefix c ++ fromRawFilePath (fromExportLocation loc)
|
2017-09-08 19:41:31 +00:00
|
|
|
|
2020-01-14 19:41:34 +00:00
|
|
|
getBucketImportLocation :: ParsedRemoteConfig -> BucketObject -> Maybe ImportLocation
|
2019-04-18 19:20:09 +00:00
|
|
|
getBucketImportLocation c obj
|
|
|
|
-- The uuidFile should not be imported.
|
|
|
|
| obj == uuidfile = Nothing
|
|
|
|
-- Only import files that are under the fileprefix, when
|
|
|
|
-- one is configured.
|
2019-12-02 16:26:33 +00:00
|
|
|
| prefix `isPrefixOf` obj = Just $ mkImportLocation $
|
|
|
|
toRawFilePath $ drop prefixlen obj
|
2019-04-18 19:20:09 +00:00
|
|
|
| otherwise = Nothing
|
|
|
|
where
|
|
|
|
prefix = getFilePrefix c
|
|
|
|
prefixlen = length prefix
|
|
|
|
uuidfile = uuidFile c
|
|
|
|
|
2017-09-12 16:33:08 +00:00
|
|
|
{- Internet Archive documentation limits filenames to a subset of ascii.
|
|
|
|
- While other characters seem to work now, this entity encodes everything
|
|
|
|
- else to avoid problems. -}
|
2014-08-09 00:29:56 +00:00
|
|
|
iaMunge :: String -> String
|
|
|
|
iaMunge = (>>= munge)
|
|
|
|
where
|
|
|
|
munge c
|
|
|
|
| isAsciiUpper c || isAsciiLower c || isNumber c = [c]
|
2015-05-10 19:41:41 +00:00
|
|
|
| c `elem` ("_-.\"" :: String) = [c]
|
2014-08-09 00:29:56 +00:00
|
|
|
| isSpace c = []
|
|
|
|
| otherwise = "&" ++ show (ord c) ++ ";"
|
|
|
|
|
2020-01-14 19:41:34 +00:00
|
|
|
configIA :: ParsedRemoteConfig -> Bool
|
|
|
|
configIA = maybe False isIAHost . getRemoteConfigValue hostField
|
2014-08-10 02:13:03 +00:00
|
|
|
|
2013-04-25 17:14:49 +00:00
|
|
|
{- Hostname to use for archive.org S3. -}
|
|
|
|
iaHost :: HostName
|
|
|
|
iaHost = "s3.us.archive.org"
|
|
|
|
|
|
|
|
isIAHost :: HostName -> Bool
|
|
|
|
isIAHost h = ".archive.org" `isSuffixOf` map toLower h
|
2013-04-25 20:42:17 +00:00
|
|
|
|
2014-08-09 00:29:56 +00:00
|
|
|
iaItemUrl :: BucketName -> URLString
|
2014-08-10 02:13:03 +00:00
|
|
|
iaItemUrl b = "http://archive.org/details/" ++ b
|
2013-04-25 21:28:25 +00:00
|
|
|
|
2018-07-31 20:29:11 +00:00
|
|
|
iaPublicUrl :: S3Info -> BucketObject -> URLString
|
2018-09-06 18:31:41 +00:00
|
|
|
iaPublicUrl info = genericPublicUrl $
|
2015-06-05 20:23:35 +00:00
|
|
|
"http://archive.org/download/" ++ T.unpack (bucket info) ++ "/"
|
|
|
|
|
2018-07-31 20:29:11 +00:00
|
|
|
awsPublicUrl :: S3Info -> BucketObject -> URLString
|
2018-09-06 18:31:41 +00:00
|
|
|
awsPublicUrl info = genericPublicUrl $
|
2015-06-05 20:23:35 +00:00
|
|
|
"https://" ++ T.unpack (bucket info) ++ ".s3.amazonaws.com/"
|
|
|
|
|
2018-09-06 18:31:41 +00:00
|
|
|
genericPublicUrl :: URLString -> BucketObject -> URLString
|
2019-08-15 16:55:48 +00:00
|
|
|
genericPublicUrl baseurl p =
|
|
|
|
baseurl Posix.</> escapeURIString skipescape p
|
|
|
|
where
|
|
|
|
-- Don't need to escape '/' because the bucket object
|
|
|
|
-- is not necessarily a single url component.
|
|
|
|
-- But do want to escape eg '+' and ' '
|
|
|
|
skipescape '/' = True
|
|
|
|
skipescape c = isUnescapedInURIComponent c
|
2014-12-19 20:53:25 +00:00
|
|
|
|
|
|
|
genCredentials :: CredPair -> IO AWS.Credentials
|
|
|
|
genCredentials (keyid, secret) = AWS.Credentials
|
2018-09-05 19:53:57 +00:00
|
|
|
<$> pure (tobs keyid)
|
|
|
|
<*> pure (tobs secret)
|
2014-12-19 20:53:25 +00:00
|
|
|
<*> newIORef []
|
2018-09-05 19:53:57 +00:00
|
|
|
<*> (fmap tobs <$> getEnv "AWS_SESSION_TOKEN")
|
|
|
|
where
|
|
|
|
tobs = T.encodeUtf8 . T.pack
|
2014-12-19 20:53:25 +00:00
|
|
|
|
|
|
|
mkLocationConstraint :: AWS.Region -> S3.LocationConstraint
|
|
|
|
mkLocationConstraint "US" = S3.locationUsClassic
|
|
|
|
mkLocationConstraint r = r
|
2015-04-21 19:55:42 +00:00
|
|
|
|
|
|
|
debugMapper :: AWS.Logger
|
2015-04-21 19:59:30 +00:00
|
|
|
debugMapper level t = forward "S3" (T.unpack t)
|
|
|
|
where
|
|
|
|
forward = case level of
|
|
|
|
AWS.Debug -> debugM
|
|
|
|
AWS.Info -> infoM
|
|
|
|
AWS.Warning -> warningM
|
|
|
|
AWS.Error -> errorM
|
2015-04-23 18:12:25 +00:00
|
|
|
|
2020-01-14 19:41:34 +00:00
|
|
|
s3Info :: ParsedRemoteConfig -> S3Info -> [(String, String)]
|
2015-06-05 20:23:35 +00:00
|
|
|
s3Info c info = catMaybes
|
2015-04-23 18:12:25 +00:00
|
|
|
[ Just ("bucket", fromMaybe "unknown" (getBucketName c))
|
2018-08-31 17:12:58 +00:00
|
|
|
, Just ("endpoint", w82s (BS.unpack (S3.s3Endpoint s3c)))
|
2015-04-23 18:12:25 +00:00
|
|
|
, Just ("port", show (S3.s3Port s3c))
|
2019-03-22 16:17:05 +00:00
|
|
|
, Just ("protocol", map toLower (show (S3.s3Protocol s3c)))
|
2016-05-05 15:54:59 +00:00
|
|
|
, Just ("storage class", showstorageclass (getStorageClass c))
|
2015-04-23 18:12:25 +00:00
|
|
|
, if configIA c
|
|
|
|
then Just ("internet archive item", iaItemUrl $ fromMaybe "unknown" $ getBucketName c)
|
|
|
|
else Nothing
|
|
|
|
, Just ("partsize", maybe "unlimited" (roughSize storageUnits False) (getPartSize c))
|
2015-06-05 20:23:35 +00:00
|
|
|
, Just ("public", if public info then "yes" else "no")
|
2018-09-06 18:31:41 +00:00
|
|
|
, Just ("versioning", if versioning info then "yes" else "no")
|
2015-04-23 18:12:25 +00:00
|
|
|
]
|
|
|
|
where
|
|
|
|
s3c = s3Configuration c
|
2016-05-05 15:54:59 +00:00
|
|
|
showstorageclass (S3.OtherStorageClass t) = T.unpack t
|
|
|
|
showstorageclass sc = show sc
|
2015-06-05 20:52:38 +00:00
|
|
|
|
2020-01-14 19:41:34 +00:00
|
|
|
getPublicWebUrls :: UUID -> RemoteStateHandle -> S3Info -> ParsedRemoteConfig -> Key -> Annex [URLString]
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
getPublicWebUrls u rs info c k = either (const []) id <$> getPublicWebUrls' u rs info c k
|
S3: Improve diagnostics when a remote is configured with exporttree and versioning, but no S3 version id has been recorded for a key.
When public access is used for the remote, it complained that the user
needed to set creds to use it, which was just wrong.
When creds were being used, it fell back from trying to use the version ID
to just accessing the key in the bucket, which was ok for non-export
remotes, but wrong for buckets.
In both cases, display a hopefully useful warning.
This should only come up when an existing S3 remote has been exported
to, and then later versioning was enabled.
Note that it would perhaps be possible to fall back from trying to use
retrieveKeyFile when it fails and instead use retrieveKeyFileFromExport,
which may work when S3 version ID is missing. But there are problems
with that approach; how to tell when retrieveKeyFile has failed due to this
rather than a network problem etc? Anyway, that approach would only work
until the file in the export got overwritten, and then it would no
longer be accessible. And with versioning enabled, the user wants old
versions of objects to remain accessible, so it seems better to warn
about the problem as soon as possible, so they can go back and add S3
version IDs.
This work is supported by the NIH-funded NICEMAN (ReproNim TR&D3) project.
2018-12-06 17:43:18 +00:00
|
|
|
|
2020-01-14 19:41:34 +00:00
|
|
|
getPublicWebUrls' :: UUID -> RemoteStateHandle -> S3Info -> ParsedRemoteConfig -> Key -> Annex (Either String [URLString])
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
getPublicWebUrls' u rs info c k
|
S3: Improve diagnostics when a remote is configured with exporttree and versioning, but no S3 version id has been recorded for a key.
When public access is used for the remote, it complained that the user
needed to set creds to use it, which was just wrong.
When creds were being used, it fell back from trying to use the version ID
to just accessing the key in the bucket, which was ok for non-export
remotes, but wrong for buckets.
In both cases, display a hopefully useful warning.
This should only come up when an existing S3 remote has been exported
to, and then later versioning was enabled.
Note that it would perhaps be possible to fall back from trying to use
retrieveKeyFile when it fails and instead use retrieveKeyFileFromExport,
which may work when S3 version ID is missing. But there are problems
with that approach; how to tell when retrieveKeyFile has failed due to this
rather than a network problem etc? Anyway, that approach would only work
until the file in the export got overwritten, and then it would no
longer be accessible. And with versioning enabled, the user wants old
versions of objects to remain accessible, so it seems better to warn
about the problem as soon as possible, so they can go back and add S3
version IDs.
This work is supported by the NIH-funded NICEMAN (ReproNim TR&D3) project.
2018-12-06 17:43:18 +00:00
|
|
|
| not (public info) = return $ Left $
|
|
|
|
"S3 bucket does not allow public access; " ++ needS3Creds u
|
2018-09-06 20:03:15 +00:00
|
|
|
| exportTree c = if versioning info
|
|
|
|
then case publicurl info of
|
S3: Improve diagnostics when a remote is configured with exporttree and versioning, but no S3 version id has been recorded for a key.
When public access is used for the remote, it complained that the user
needed to set creds to use it, which was just wrong.
When creds were being used, it fell back from trying to use the version ID
to just accessing the key in the bucket, which was ok for non-export
remotes, but wrong for buckets.
In both cases, display a hopefully useful warning.
This should only come up when an existing S3 remote has been exported
to, and then later versioning was enabled.
Note that it would perhaps be possible to fall back from trying to use
retrieveKeyFile when it fails and instead use retrieveKeyFileFromExport,
which may work when S3 version ID is missing. But there are problems
with that approach; how to tell when retrieveKeyFile has failed due to this
rather than a network problem etc? Anyway, that approach would only work
until the file in the export got overwritten, and then it would no
longer be accessible. And with versioning enabled, the user wants old
versions of objects to remain accessible, so it seems better to warn
about the problem as soon as possible, so they can go back and add S3
version IDs.
This work is supported by the NIH-funded NICEMAN (ReproNim TR&D3) project.
2018-12-06 17:43:18 +00:00
|
|
|
Just url -> getversionid (const $ genericPublicUrl url)
|
2018-09-06 20:03:15 +00:00
|
|
|
Nothing -> case host info of
|
|
|
|
Just h | h == AWS.s3DefaultHost ->
|
S3: Improve diagnostics when a remote is configured with exporttree and versioning, but no S3 version id has been recorded for a key.
When public access is used for the remote, it complained that the user
needed to set creds to use it, which was just wrong.
When creds were being used, it fell back from trying to use the version ID
to just accessing the key in the bucket, which was ok for non-export
remotes, but wrong for buckets.
In both cases, display a hopefully useful warning.
This should only come up when an existing S3 remote has been exported
to, and then later versioning was enabled.
Note that it would perhaps be possible to fall back from trying to use
retrieveKeyFile when it fails and instead use retrieveKeyFileFromExport,
which may work when S3 version ID is missing. But there are problems
with that approach; how to tell when retrieveKeyFile has failed due to this
rather than a network problem etc? Anyway, that approach would only work
until the file in the export got overwritten, and then it would no
longer be accessible. And with versioning enabled, the user wants old
versions of objects to remain accessible, so it seems better to warn
about the problem as soon as possible, so they can go back and add S3
version IDs.
This work is supported by the NIH-funded NICEMAN (ReproNim TR&D3) project.
2018-12-06 17:43:18 +00:00
|
|
|
getversionid awsPublicUrl
|
|
|
|
_ -> return nopublicurl
|
|
|
|
else return (Left "exporttree used without versioning")
|
2018-09-06 20:03:15 +00:00
|
|
|
| otherwise = case getPublicUrlMaker info of
|
S3: Improve diagnostics when a remote is configured with exporttree and versioning, but no S3 version id has been recorded for a key.
When public access is used for the remote, it complained that the user
needed to set creds to use it, which was just wrong.
When creds were being used, it fell back from trying to use the version ID
to just accessing the key in the bucket, which was ok for non-export
remotes, but wrong for buckets.
In both cases, display a hopefully useful warning.
This should only come up when an existing S3 remote has been exported
to, and then later versioning was enabled.
Note that it would perhaps be possible to fall back from trying to use
retrieveKeyFile when it fails and instead use retrieveKeyFileFromExport,
which may work when S3 version ID is missing. But there are problems
with that approach; how to tell when retrieveKeyFile has failed due to this
rather than a network problem etc? Anyway, that approach would only work
until the file in the export got overwritten, and then it would no
longer be accessible. And with versioning enabled, the user wants old
versions of objects to remain accessible, so it seems better to warn
about the problem as soon as possible, so they can go back and add S3
version IDs.
This work is supported by the NIH-funded NICEMAN (ReproNim TR&D3) project.
2018-12-06 17:43:18 +00:00
|
|
|
Just geturl -> return (Right [geturl $ bucketObject info k])
|
|
|
|
Nothing -> return nopublicurl
|
|
|
|
where
|
|
|
|
nopublicurl = Left "No publicurl is configured for this remote"
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
getversionid url = getS3VersionIDPublicUrls url info rs k >>= \case
|
S3: Improve diagnostics when a remote is configured with exporttree and versioning, but no S3 version id has been recorded for a key.
When public access is used for the remote, it complained that the user
needed to set creds to use it, which was just wrong.
When creds were being used, it fell back from trying to use the version ID
to just accessing the key in the bucket, which was ok for non-export
remotes, but wrong for buckets.
In both cases, display a hopefully useful warning.
This should only come up when an existing S3 remote has been exported
to, and then later versioning was enabled.
Note that it would perhaps be possible to fall back from trying to use
retrieveKeyFile when it fails and instead use retrieveKeyFileFromExport,
which may work when S3 version ID is missing. But there are problems
with that approach; how to tell when retrieveKeyFile has failed due to this
rather than a network problem etc? Anyway, that approach would only work
until the file in the export got overwritten, and then it would no
longer be accessible. And with versioning enabled, the user wants old
versions of objects to remain accessible, so it seems better to warn
about the problem as soon as possible, so they can go back and add S3
version IDs.
This work is supported by the NIH-funded NICEMAN (ReproNim TR&D3) project.
2018-12-06 17:43:18 +00:00
|
|
|
[] -> return (Left "Remote is configured to use versioning, but no S3 version ID is recorded for this key")
|
|
|
|
l -> return (Right l)
|
2015-06-05 20:52:38 +00:00
|
|
|
|
2018-09-06 20:03:15 +00:00
|
|
|
getPublicUrlMaker :: S3Info -> Maybe (BucketObject -> URLString)
|
|
|
|
getPublicUrlMaker info = case publicurl info of
|
|
|
|
Just url -> Just (genericPublicUrl url)
|
|
|
|
Nothing -> case host info of
|
|
|
|
Just h
|
|
|
|
| h == AWS.s3DefaultHost ->
|
|
|
|
Just (awsPublicUrl info)
|
|
|
|
| isIAHost h ->
|
|
|
|
Just (iaPublicUrl info)
|
|
|
|
_ -> Nothing
|
|
|
|
|
2019-04-18 19:20:09 +00:00
|
|
|
-- S3 uses a unique version id for each object stored on it.
|
|
|
|
--
|
|
|
|
-- The Object is included in this because retrieving a particular
|
|
|
|
-- version id involves a request for an object, so this keeps track of what
|
|
|
|
-- the object is.
|
2019-01-07 19:51:05 +00:00
|
|
|
data S3VersionID = S3VersionID S3.Object T.Text
|
2018-08-30 18:22:26 +00:00
|
|
|
deriving (Show)
|
|
|
|
|
|
|
|
-- smart constructor
|
2018-08-30 18:47:52 +00:00
|
|
|
mkS3VersionID :: S3.Object -> Maybe T.Text -> Maybe S3VersionID
|
2019-01-07 19:51:05 +00:00
|
|
|
mkS3VersionID o (Just t)
|
|
|
|
| T.null t = Nothing
|
2018-08-30 18:22:26 +00:00
|
|
|
-- AWS documentation says a version ID is at most 1024 bytes long.
|
|
|
|
-- Since they are stored in the git-annex branch, prevent them from
|
|
|
|
-- being very much larger than that.
|
2019-01-07 19:51:05 +00:00
|
|
|
| T.length t < 2048 = Just (S3VersionID o t)
|
2018-08-30 18:22:26 +00:00
|
|
|
| otherwise = Nothing
|
2019-01-07 19:51:05 +00:00
|
|
|
mkS3VersionID _ Nothing = Nothing
|
2018-08-30 18:22:26 +00:00
|
|
|
|
2018-09-06 18:31:41 +00:00
|
|
|
-- Format for storage in per-remote metadata.
|
|
|
|
--
|
2018-08-31 17:49:08 +00:00
|
|
|
-- A S3 version ID is "url ready" so does not contain '#' and so we'll use
|
|
|
|
-- that to separate it from the object id. (Could use a space, but spaces
|
|
|
|
-- in metadata values lead to an inefficient encoding.)
|
2019-01-07 19:51:05 +00:00
|
|
|
formatS3VersionID :: S3VersionID -> BS.ByteString
|
|
|
|
formatS3VersionID (S3VersionID o v) = T.encodeUtf8 v <> "#" <> T.encodeUtf8 o
|
2018-08-30 18:22:26 +00:00
|
|
|
|
2018-09-06 18:31:41 +00:00
|
|
|
-- Parse from value stored in per-remote metadata.
|
2019-01-07 19:51:05 +00:00
|
|
|
parseS3VersionID :: BS.ByteString -> Maybe S3VersionID
|
|
|
|
parseS3VersionID b = do
|
|
|
|
let (v, rest) = B8.break (== '#') b
|
|
|
|
o <- eitherToMaybe $ T.decodeUtf8' $ BS.drop 1 rest
|
|
|
|
mkS3VersionID o (eitherToMaybe $ T.decodeUtf8' v)
|
2018-08-30 18:47:52 +00:00
|
|
|
|
2019-04-18 19:20:09 +00:00
|
|
|
-- For a versioned bucket, the S3VersionID is used as the
|
|
|
|
-- ContentIdentifier.
|
|
|
|
mkS3VersionedContentIdentifier :: S3VersionID -> ContentIdentifier
|
|
|
|
mkS3VersionedContentIdentifier (S3VersionID _ v) =
|
|
|
|
ContentIdentifier $ T.encodeUtf8 v
|
|
|
|
|
2019-04-19 19:10:31 +00:00
|
|
|
mkS3VersionedContentIdentifier' :: S3.ObjectVersionInfo -> ContentIdentifier
|
|
|
|
mkS3VersionedContentIdentifier' =
|
|
|
|
ContentIdentifier . T.encodeUtf8 . S3.oviVersionId
|
|
|
|
|
|
|
|
-- S3 returns etags surrounded by double quotes, and the quotes may
|
|
|
|
-- be included here.
|
2019-04-18 19:20:09 +00:00
|
|
|
type S3Etag = T.Text
|
|
|
|
|
|
|
|
-- For an unversioned bucket, the S3Etag is instead used as the
|
2019-04-19 19:10:31 +00:00
|
|
|
-- ContentIdentifier. Prefixed by '#' since that cannot appear in a S3
|
|
|
|
-- version id.
|
2019-04-18 19:20:09 +00:00
|
|
|
mkS3UnversionedContentIdentifier :: S3Etag -> ContentIdentifier
|
|
|
|
mkS3UnversionedContentIdentifier t =
|
2019-04-19 19:10:31 +00:00
|
|
|
ContentIdentifier $ T.encodeUtf8 $ "#" <> T.filter (/= '"') t
|
|
|
|
|
|
|
|
-- Makes a GetObject request be guaranteed to get the object version
|
|
|
|
-- matching the ContentIdentifier, or fail.
|
|
|
|
limitGetToContentIdentifier :: ContentIdentifier -> S3.GetObject -> S3.GetObject
|
2019-04-23 17:09:27 +00:00
|
|
|
limitGetToContentIdentifier cid req =
|
|
|
|
limitToContentIdentifier cid
|
|
|
|
(\etag -> req { S3.goIfMatch = etag })
|
|
|
|
(\versionid -> req { S3.goVersionId = versionid })
|
|
|
|
|
|
|
|
limitHeadToContentIdentifier :: ContentIdentifier -> S3.HeadObject -> S3.HeadObject
|
|
|
|
limitHeadToContentIdentifier cid req =
|
|
|
|
limitToContentIdentifier cid
|
|
|
|
(\etag -> req { S3.hoIfMatch = etag })
|
|
|
|
(\versionid -> req { S3.hoVersionId = versionid })
|
|
|
|
|
|
|
|
limitToContentIdentifier :: ContentIdentifier -> (Maybe S3Etag -> a) -> (Maybe T.Text -> a) -> a
|
|
|
|
limitToContentIdentifier (ContentIdentifier v) limitetag limitversionid =
|
2019-04-19 19:10:31 +00:00
|
|
|
let t = either mempty id (T.decodeUtf8' v)
|
|
|
|
in case T.take 1 t of
|
|
|
|
"#" ->
|
|
|
|
let etag = T.drop 1 t
|
2019-04-23 17:09:27 +00:00
|
|
|
in limitetag (Just etag)
|
|
|
|
_ -> limitversionid (Just t)
|
2019-04-18 19:20:09 +00:00
|
|
|
|
2019-04-24 19:13:07 +00:00
|
|
|
-- A ContentIdentifier contains either a etag or a S3 version id.
|
|
|
|
extractContentIdentifier :: ContentIdentifier -> S3.Object -> Either S3Etag (Maybe S3VersionID)
|
|
|
|
extractContentIdentifier (ContentIdentifier v) o =
|
|
|
|
let t = either mempty id (T.decodeUtf8' v)
|
|
|
|
in case T.take 1 t of
|
|
|
|
"#" -> Left (T.drop 1 t)
|
|
|
|
_ -> Right (mkS3VersionID o (Just t))
|
|
|
|
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
setS3VersionID :: S3Info -> RemoteStateHandle -> Key -> Maybe S3VersionID -> Annex ()
|
|
|
|
setS3VersionID info rs k vid
|
|
|
|
| versioning info = maybe noop (setS3VersionID' rs k) vid
|
2018-08-30 18:47:52 +00:00
|
|
|
| otherwise = noop
|
|
|
|
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
setS3VersionID' :: RemoteStateHandle -> Key -> S3VersionID -> Annex ()
|
|
|
|
setS3VersionID' rs k vid = addRemoteMetaData k rs $
|
|
|
|
updateMetaData s3VersionField v emptyMetaData
|
2018-08-31 17:12:58 +00:00
|
|
|
where
|
|
|
|
v = mkMetaValue (CurrentlySet True) (formatS3VersionID vid)
|
2018-08-30 18:47:52 +00:00
|
|
|
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
getS3VersionID :: RemoteStateHandle -> Key -> Annex [S3VersionID]
|
|
|
|
getS3VersionID rs k = do
|
|
|
|
(RemoteMetaData _ m) <- getCurrentRemoteMetaData rs k
|
2018-08-31 17:12:58 +00:00
|
|
|
return $ mapMaybe parseS3VersionID $ map unwrap $ S.toList $
|
|
|
|
metaDataValues s3VersionField m
|
|
|
|
where
|
|
|
|
unwrap (MetaValue _ v) = v
|
|
|
|
|
|
|
|
s3VersionField :: MetaField
|
|
|
|
s3VersionField = mkMetaFieldUnchecked "V"
|
|
|
|
|
2020-01-14 19:41:34 +00:00
|
|
|
eitherS3VersionID :: S3Info -> RemoteStateHandle -> ParsedRemoteConfig -> Key -> S3.Object -> Annex (Either String (Either S3.Object S3VersionID))
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
eitherS3VersionID info rs c k fallback
|
|
|
|
| versioning info = getS3VersionID rs k >>= return . \case
|
S3: Improve diagnostics when a remote is configured with exporttree and versioning, but no S3 version id has been recorded for a key.
When public access is used for the remote, it complained that the user
needed to set creds to use it, which was just wrong.
When creds were being used, it fell back from trying to use the version ID
to just accessing the key in the bucket, which was ok for non-export
remotes, but wrong for buckets.
In both cases, display a hopefully useful warning.
This should only come up when an existing S3 remote has been exported
to, and then later versioning was enabled.
Note that it would perhaps be possible to fall back from trying to use
retrieveKeyFile when it fails and instead use retrieveKeyFileFromExport,
which may work when S3 version ID is missing. But there are problems
with that approach; how to tell when retrieveKeyFile has failed due to this
rather than a network problem etc? Anyway, that approach would only work
until the file in the export got overwritten, and then it would no
longer be accessible. And with versioning enabled, the user wants old
versions of objects to remain accessible, so it seems better to warn
about the problem as soon as possible, so they can go back and add S3
version IDs.
This work is supported by the NIH-funded NICEMAN (ReproNim TR&D3) project.
2018-12-06 17:43:18 +00:00
|
|
|
[] -> if exportTree c
|
|
|
|
then Left "Remote is configured to use versioning, but no S3 version ID is recorded for this key"
|
|
|
|
else Right (Left fallback)
|
2018-08-31 17:12:58 +00:00
|
|
|
-- It's possible for a key to be stored multiple timees in
|
|
|
|
-- a bucket with different version IDs; only use one of them.
|
S3: Improve diagnostics when a remote is configured with exporttree and versioning, but no S3 version id has been recorded for a key.
When public access is used for the remote, it complained that the user
needed to set creds to use it, which was just wrong.
When creds were being used, it fell back from trying to use the version ID
to just accessing the key in the bucket, which was ok for non-export
remotes, but wrong for buckets.
In both cases, display a hopefully useful warning.
This should only come up when an existing S3 remote has been exported
to, and then later versioning was enabled.
Note that it would perhaps be possible to fall back from trying to use
retrieveKeyFile when it fails and instead use retrieveKeyFileFromExport,
which may work when S3 version ID is missing. But there are problems
with that approach; how to tell when retrieveKeyFile has failed due to this
rather than a network problem etc? Anyway, that approach would only work
until the file in the export got overwritten, and then it would no
longer be accessible. And with versioning enabled, the user wants old
versions of objects to remain accessible, so it seems better to warn
about the problem as soon as possible, so they can go back and add S3
version IDs.
This work is supported by the NIH-funded NICEMAN (ReproNim TR&D3) project.
2018-12-06 17:43:18 +00:00
|
|
|
(v:_) -> Right (Right v)
|
|
|
|
| otherwise = return (Right (Left fallback))
|
2018-09-06 18:31:41 +00:00
|
|
|
|
|
|
|
s3VersionIDPublicUrl :: (S3Info -> BucketObject -> URLString) -> S3Info -> S3VersionID -> URLString
|
2019-08-15 16:55:48 +00:00
|
|
|
s3VersionIDPublicUrl mk info (S3VersionID obj vid) = concat
|
|
|
|
[ mk info (T.unpack obj)
|
2018-09-06 18:31:41 +00:00
|
|
|
, "?versionId="
|
2019-01-07 19:51:05 +00:00
|
|
|
, T.unpack vid -- version ID is "url ready" so no escaping needed
|
2018-09-06 18:31:41 +00:00
|
|
|
]
|
|
|
|
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
getS3VersionIDPublicUrls :: (S3Info -> BucketObject -> URLString) -> S3Info -> RemoteStateHandle -> Key -> Annex [URLString]
|
|
|
|
getS3VersionIDPublicUrls mk info rs k =
|
|
|
|
map (s3VersionIDPublicUrl mk info) <$> getS3VersionID rs k
|
2019-01-29 17:44:55 +00:00
|
|
|
|
2019-01-29 18:08:42 +00:00
|
|
|
-- Enable versioning on the bucket can only be done at init time;
|
|
|
|
-- setting versioning in a bucket that git-annex has already exported
|
|
|
|
-- files to risks losing the content of those un-versioned files.
|
2020-01-14 19:41:34 +00:00
|
|
|
enableBucketVersioning :: SetupStage -> S3Info -> ParsedRemoteConfig -> RemoteGitConfig -> UUID -> Annex ()
|
2019-02-24 16:45:09 +00:00
|
|
|
#if MIN_VERSION_aws(0,21,1)
|
2019-04-18 19:20:09 +00:00
|
|
|
enableBucketVersioning ss info c gc u = do
|
2019-01-29 19:20:22 +00:00
|
|
|
#else
|
2019-04-18 19:20:09 +00:00
|
|
|
enableBucketVersioning ss info _ _ _ = do
|
2019-01-29 19:20:22 +00:00
|
|
|
#endif
|
2019-01-29 18:08:42 +00:00
|
|
|
case ss of
|
|
|
|
Init -> when (versioning info) $
|
|
|
|
enableversioning (bucket info)
|
|
|
|
Enable oldc -> do
|
fix encryption of content to gcrypt and git-lfs
Fix serious regression in gcrypt and encrypted git-lfs remotes.
Since version 7.20200202.7, git-annex incorrectly stored content
on those remotes without encrypting it.
Problem was, Remote.Git enumerates all git remotes, including git-lfs
and gcrypt. It then dispatches to those. So, Remote.List used the
RemoteConfigParser from Remote.Git, instead of from git-lfs or gcrypt,
and that parser does not know about encryption fields, so did not
include them in the ParsedRemoteConfig. (Also didn't include other
fields specific to those remotes, perhaps chunking etc also didn't
get through.)
To fix, had to move RemoteConfig parsing down into the generate methods
of each remote, rather than doing it in Remote.List.
And a consequence of that was that ParsedRemoteConfig had to change to
include the RemoteConfig that got parsed, so that testremote can
generate a new remote based on an existing remote.
(I would have rather fixed this just inside Remote.Git, but that was not
practical, at least not w/o re-doing work that Remote.List already did.
Big ugly mostly mechanical patch seemed preferable to making git-annex
slower.)
2020-02-26 21:20:56 +00:00
|
|
|
oldpc <- parsedRemoteConfig remote oldc
|
2020-01-14 19:41:34 +00:00
|
|
|
oldinfo <- extractS3Info oldpc
|
2019-01-29 18:08:42 +00:00
|
|
|
when (versioning info /= versioning oldinfo) $
|
|
|
|
giveup "Cannot change versioning= of existing S3 remote."
|
|
|
|
where
|
|
|
|
enableversioning b = do
|
2019-02-24 16:45:09 +00:00
|
|
|
#if MIN_VERSION_aws(0,21,1)
|
2019-01-29 18:08:42 +00:00
|
|
|
showAction "enabling bucket versioning"
|
2019-01-30 20:04:16 +00:00
|
|
|
hdl <- mkS3HandleVar c gc u
|
|
|
|
withS3HandleOrFail u hdl $ \h ->
|
2019-01-30 19:40:13 +00:00
|
|
|
void $ liftIO $ runResourceT $ sendS3Handle h $
|
|
|
|
S3.putBucketVersioning b S3.VersioningEnabled
|
2019-01-29 17:44:55 +00:00
|
|
|
#else
|
2019-01-29 18:08:42 +00:00
|
|
|
showLongNote $ unlines
|
|
|
|
[ "This version of git-annex cannot auto-enable S3 bucket versioning."
|
|
|
|
, "You need to manually enable versioning in the S3 console"
|
|
|
|
, "for the bucket \"" ++ T.unpack b ++ "\""
|
|
|
|
, "https://docs.aws.amazon.com/AmazonS3/latest/user-guide/enable-versioning.html"
|
|
|
|
, "It's important you enable versioning before storing anything in the bucket!"
|
|
|
|
]
|
2019-01-29 17:44:55 +00:00
|
|
|
#endif
|
2019-01-29 19:07:27 +00:00
|
|
|
|
|
|
|
-- If the remote has versioning enabled, but the version ID is for some
|
|
|
|
-- reason not being recorded, it's not safe to perform an action that
|
|
|
|
-- will remove the unversioned file. The file may be the only copy of an
|
|
|
|
-- annex object.
|
|
|
|
--
|
|
|
|
-- This code could be removed eventually, since enableBucketVersioning
|
|
|
|
-- will avoid this situation. Before that was added, some remotes
|
|
|
|
-- were created without versioning, some unversioned files exported to
|
|
|
|
-- them, and then versioning enabled, and this is to avoid data loss in
|
|
|
|
-- those cases.
|
2020-05-15 18:11:59 +00:00
|
|
|
checkVersioning :: S3Info -> RemoteStateHandle -> Key -> Annex ()
|
|
|
|
checkVersioning info rs k
|
|
|
|
| versioning info = getS3VersionID rs k >>= \case
|
|
|
|
[] -> giveup "Remote is configured to use versioning, but no S3 version ID is recorded for this key, so it cannot safely be modified."
|
|
|
|
_ -> return ()
|
|
|
|
| otherwise = return ()
|