2013-09-07 22:38:00 +00:00
|
|
|
|
{- git remotes encrypted using git-remote-gcrypt
|
|
|
|
|
-
|
2015-01-21 16:50:09 +00:00
|
|
|
|
- Copyright 2013 Joey Hess <id@joeyh.name>
|
2013-09-07 22:38:00 +00:00
|
|
|
|
-
|
|
|
|
|
- Licensed under the GNU GPL version 3 or higher.
|
|
|
|
|
-}
|
|
|
|
|
|
2013-09-24 21:25:47 +00:00
|
|
|
|
module Remote.GCrypt (
|
|
|
|
|
remote,
|
2014-08-04 12:42:04 +00:00
|
|
|
|
chainGen,
|
2013-09-24 21:25:47 +00:00
|
|
|
|
getGCryptUUID,
|
2013-10-01 21:20:51 +00:00
|
|
|
|
coreGCryptId,
|
2015-03-16 19:28:29 +00:00
|
|
|
|
setupRepo,
|
|
|
|
|
accessShellConfig,
|
2013-09-24 21:25:47 +00:00
|
|
|
|
) where
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
|
|
|
|
import qualified Data.Map as M
|
2013-09-08 17:00:48 +00:00
|
|
|
|
import qualified Data.ByteString.Lazy as L
|
unify exception handling into Utility.Exception
Removed old extensible-exceptions, only needed for very old ghc.
Made webdav use Utility.Exception, to work after some changes in DAV's
exception handling.
Removed Annex.Exception. Mostly this was trivial, but note that
tryAnnex is replaced with tryNonAsync and catchAnnex replaced with
catchNonAsync. In theory that could be a behavior change, since the former
caught all exceptions, and the latter don't catch async exceptions.
However, in practice, nothing in the Annex monad uses async exceptions.
Grepping for throwTo and killThread only find stuff in the assistant,
which does not seem related.
Command.Add.undo is changed to accept a SomeException, and things
that use it for rollback now catch non-async exceptions, rather than
only IOExceptions.
2014-08-08 01:55:44 +00:00
|
|
|
|
import Control.Exception
|
2015-01-28 19:55:17 +00:00
|
|
|
|
import Data.Default
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
2016-01-20 20:36:33 +00:00
|
|
|
|
import Annex.Common
|
2015-09-09 22:06:49 +00:00
|
|
|
|
import qualified Annex
|
2013-09-07 22:38:00 +00:00
|
|
|
|
import Types.Remote
|
|
|
|
|
import Types.GitConfig
|
|
|
|
|
import Types.Crypto
|
2014-02-11 18:06:50 +00:00
|
|
|
|
import Types.Creds
|
2016-08-03 16:37:12 +00:00
|
|
|
|
import Types.Transfer
|
2013-09-07 22:38:00 +00:00
|
|
|
|
import qualified Git
|
|
|
|
|
import qualified Git.Command
|
|
|
|
|
import qualified Git.Config
|
|
|
|
|
import qualified Git.GCrypt
|
2013-09-12 19:54:35 +00:00
|
|
|
|
import qualified Git.Construct
|
2013-09-07 22:38:00 +00:00
|
|
|
|
import qualified Annex.Branch
|
|
|
|
|
import Config
|
|
|
|
|
import Config.Cost
|
|
|
|
|
import Remote.Helper.Git
|
|
|
|
|
import Remote.Helper.Encryptable
|
2013-09-08 19:19:14 +00:00
|
|
|
|
import Remote.Helper.Special
|
2013-09-24 21:25:47 +00:00
|
|
|
|
import Remote.Helper.Messages
|
|
|
|
|
import qualified Remote.Helper.Ssh as Ssh
|
2013-09-07 22:38:00 +00:00
|
|
|
|
import Utility.Metered
|
|
|
|
|
import Annex.UUID
|
2013-09-08 18:54:28 +00:00
|
|
|
|
import Annex.Ssh
|
|
|
|
|
import qualified Remote.Rsync
|
2014-08-04 13:00:57 +00:00
|
|
|
|
import qualified Remote.Directory
|
2013-09-08 18:54:28 +00:00
|
|
|
|
import Utility.Rsync
|
2013-09-27 20:21:56 +00:00
|
|
|
|
import Utility.Tmp
|
2013-09-12 19:54:35 +00:00
|
|
|
|
import Logs.Remote
|
2013-09-17 20:06:29 +00:00
|
|
|
|
import Utility.Gpg
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
|
|
|
|
remote :: RemoteType
|
|
|
|
|
remote = RemoteType {
|
|
|
|
|
typename = "gcrypt",
|
|
|
|
|
-- Remote.Git takes care of enumerating gcrypt remotes too,
|
|
|
|
|
-- and will call our gen on them.
|
2015-08-05 17:49:54 +00:00
|
|
|
|
enumerate = const (return []),
|
2013-09-07 22:38:00 +00:00
|
|
|
|
generate = gen,
|
|
|
|
|
setup = gCryptSetup
|
|
|
|
|
}
|
|
|
|
|
|
2014-08-04 12:42:04 +00:00
|
|
|
|
chainGen :: Git.Repo -> UUID -> RemoteConfig -> RemoteGitConfig -> Annex (Maybe Remote)
|
|
|
|
|
chainGen gcryptr u c gc = do
|
2013-09-07 22:38:00 +00:00
|
|
|
|
g <- gitRepo
|
|
|
|
|
-- get underlying git repo with real path, not gcrypt path
|
2013-09-19 16:53:24 +00:00
|
|
|
|
r <- liftIO $ Git.GCrypt.encryptedRemote g gcryptr
|
2013-09-07 22:38:00 +00:00
|
|
|
|
let r' = r { Git.remoteName = Git.remoteName gcryptr }
|
2014-08-04 12:42:04 +00:00
|
|
|
|
gen r' u c gc
|
|
|
|
|
|
|
|
|
|
gen :: Git.Repo -> UUID -> RemoteConfig -> RemoteGitConfig -> Annex (Maybe Remote)
|
|
|
|
|
gen baser u c gc = do
|
2013-09-27 20:21:56 +00:00
|
|
|
|
-- doublecheck that cache matches underlying repo's gcrypt-id
|
|
|
|
|
-- (which might not be set), only for local repos
|
2015-02-12 19:44:10 +00:00
|
|
|
|
(mgcryptid, r) <- getGCryptId True baser gc
|
2014-08-04 12:42:04 +00:00
|
|
|
|
g <- gitRepo
|
|
|
|
|
case (mgcryptid, Git.GCrypt.remoteRepoId g (Git.remoteName baser)) of
|
2013-09-12 19:54:35 +00:00
|
|
|
|
(Just gcryptid, Just cachedgcryptid)
|
2014-08-04 12:42:04 +00:00
|
|
|
|
| gcryptid /= cachedgcryptid -> resetup gcryptid r
|
|
|
|
|
_ -> gen' r u c gc
|
2013-09-12 19:54:35 +00:00
|
|
|
|
where
|
|
|
|
|
-- A different drive may have been mounted, making a different
|
|
|
|
|
-- gcrypt remote available. So need to set the cached
|
|
|
|
|
-- gcrypt-id and annex-uuid of the remote to match the remote
|
|
|
|
|
-- that is now available. Also need to set the gcrypt particiants
|
|
|
|
|
-- correctly.
|
|
|
|
|
resetup gcryptid r = do
|
|
|
|
|
let u' = genUUIDInNameSpace gCryptNameSpace gcryptid
|
2013-09-26 03:19:01 +00:00
|
|
|
|
v <- M.lookup u' <$> readRemoteLog
|
2014-08-04 12:42:04 +00:00
|
|
|
|
case (Git.remoteName baser, v) of
|
2013-09-12 19:54:35 +00:00
|
|
|
|
(Just remotename, Just c') -> do
|
|
|
|
|
setGcryptEncryption c' remotename
|
2014-08-04 12:42:04 +00:00
|
|
|
|
setConfig (remoteConfig baser "uuid") (fromUUID u')
|
2013-09-12 19:54:35 +00:00
|
|
|
|
setConfig (ConfigKey $ Git.GCrypt.remoteConfigKey "gcrypt-id" remotename) gcryptid
|
|
|
|
|
gen' r u' c' gc
|
|
|
|
|
_ -> do
|
|
|
|
|
warning $ "not using unknown gcrypt repository pointed to by remote " ++ Git.repoDescribe r
|
|
|
|
|
return Nothing
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
2013-09-12 19:54:35 +00:00
|
|
|
|
gen' :: Git.Repo -> UUID -> RemoteConfig -> RemoteGitConfig -> Annex (Maybe Remote)
|
2013-09-08 18:54:28 +00:00
|
|
|
|
gen' r u c gc = do
|
|
|
|
|
cst <- remoteCost gc $
|
|
|
|
|
if repoCheap r then nearlyCheapRemoteCost else expensiveRemoteCost
|
2015-02-12 19:44:10 +00:00
|
|
|
|
(rsynctransport, rsyncurl) <- rsyncTransportToObjects r gc
|
2013-09-08 18:54:28 +00:00
|
|
|
|
let rsyncopts = Remote.Rsync.genRsyncOpts c gc rsynctransport rsyncurl
|
|
|
|
|
let this = Remote
|
|
|
|
|
{ uuid = u
|
|
|
|
|
, cost = cst
|
|
|
|
|
, name = Git.repoDescribe r
|
2014-08-03 21:31:10 +00:00
|
|
|
|
, storeKey = storeKeyDummy
|
|
|
|
|
, retrieveKeyFile = retreiveKeyFileDummy
|
2015-04-14 20:35:10 +00:00
|
|
|
|
, retrieveKeyFileCheap = \_ _ _ -> return False
|
run Preparer to get Remover and CheckPresent actions
This will allow special remotes to eg, open a http connection and reuse it,
while checking if chunks are present, or removing chunks.
S3 and WebDAV both need this to support chunks with reasonable speed.
Note that a special remote might want to cache a http connection across
multiple requests. A simple case of this is that CheckPresent is typically
called before Store or Remove. A remote using this interface can certianly
use a Preparer that eg, uses a MVar to cache a http connection.
However, it's up to the remote to then deal with things like stale or
stalled http connections when eg, doing a series of downloads from a remote
and other places. There could be long delays between calls to a remote,
which could lead to eg, http connection stalls; the machine might even
move to a new network, etc.
It might be nice to improve this interface later to allow
the simple case without needing to handle the full complex case.
One way to do it would be to have a `Transaction SpecialRemote cache`,
where SpecialRemote contains methods for Storer, Retriever, Remover, and
CheckPresent, that all expect to be passed a `cache`.
2014-08-06 18:28:36 +00:00
|
|
|
|
, removeKey = removeKeyDummy
|
2015-10-08 19:01:38 +00:00
|
|
|
|
, lockContent = Nothing
|
run Preparer to get Remover and CheckPresent actions
This will allow special remotes to eg, open a http connection and reuse it,
while checking if chunks are present, or removing chunks.
S3 and WebDAV both need this to support chunks with reasonable speed.
Note that a special remote might want to cache a http connection across
multiple requests. A simple case of this is that CheckPresent is typically
called before Store or Remove. A remote using this interface can certianly
use a Preparer that eg, uses a MVar to cache a http connection.
However, it's up to the remote to then deal with things like stale or
stalled http connections when eg, doing a series of downloads from a remote
and other places. There could be long delays between calls to a remote,
which could lead to eg, http connection stalls; the machine might even
move to a new network, etc.
It might be nice to improve this interface later to allow
the simple case without needing to handle the full complex case.
One way to do it would be to have a `Transaction SpecialRemote cache`,
where SpecialRemote contains methods for Storer, Retriever, Remover, and
CheckPresent, that all expect to be passed a `cache`.
2014-08-06 18:28:36 +00:00
|
|
|
|
, checkPresent = checkPresentDummy
|
2014-08-06 17:45:19 +00:00
|
|
|
|
, checkPresentCheap = repoCheap r
|
2013-09-08 18:54:28 +00:00
|
|
|
|
, whereisKey = Nothing
|
2013-10-11 20:03:18 +00:00
|
|
|
|
, remoteFsck = Nothing
|
2013-10-27 19:38:59 +00:00
|
|
|
|
, repairRepo = Nothing
|
2013-11-03 00:10:54 +00:00
|
|
|
|
, config = c
|
2013-09-08 18:54:28 +00:00
|
|
|
|
, localpath = localpathCalc r
|
|
|
|
|
, repo = r
|
2017-05-11 20:03:50 +00:00
|
|
|
|
, gitconfig = gc { remoteGitConfig = extractGitConfig r }
|
2013-09-08 18:54:28 +00:00
|
|
|
|
, readonly = Git.repoIsHttp r
|
2014-01-13 18:41:10 +00:00
|
|
|
|
, availability = availabilityCalc r
|
2013-09-08 18:54:28 +00:00
|
|
|
|
, remotetype = remote
|
2014-08-10 18:52:58 +00:00
|
|
|
|
, mkUnavailable = return Nothing
|
2015-01-13 22:11:03 +00:00
|
|
|
|
, getInfo = gitRepoInfo this
|
2014-12-08 17:40:15 +00:00
|
|
|
|
, claimUrl = Nothing
|
2014-12-11 19:32:42 +00:00
|
|
|
|
, checkUrl = Nothing
|
2013-09-08 18:54:28 +00:00
|
|
|
|
}
|
2014-08-03 21:31:10 +00:00
|
|
|
|
return $ Just $ specialRemote' specialcfg c
|
|
|
|
|
(simplyPrepare $ store this rsyncopts)
|
|
|
|
|
(simplyPrepare $ retrieve this rsyncopts)
|
run Preparer to get Remover and CheckPresent actions
This will allow special remotes to eg, open a http connection and reuse it,
while checking if chunks are present, or removing chunks.
S3 and WebDAV both need this to support chunks with reasonable speed.
Note that a special remote might want to cache a http connection across
multiple requests. A simple case of this is that CheckPresent is typically
called before Store or Remove. A remote using this interface can certianly
use a Preparer that eg, uses a MVar to cache a http connection.
However, it's up to the remote to then deal with things like stale or
stalled http connections when eg, doing a series of downloads from a remote
and other places. There could be long delays between calls to a remote,
which could lead to eg, http connection stalls; the machine might even
move to a new network, etc.
It might be nice to improve this interface later to allow
the simple case without needing to handle the full complex case.
One way to do it would be to have a `Transaction SpecialRemote cache`,
where SpecialRemote contains methods for Storer, Retriever, Remover, and
CheckPresent, that all expect to be passed a `cache`.
2014-08-06 18:28:36 +00:00
|
|
|
|
(simplyPrepare $ remove this rsyncopts)
|
|
|
|
|
(simplyPrepare $ checkKey this rsyncopts)
|
2013-09-07 22:38:00 +00:00
|
|
|
|
this
|
2014-08-03 21:31:10 +00:00
|
|
|
|
where
|
2014-08-04 00:19:04 +00:00
|
|
|
|
specialcfg
|
|
|
|
|
| Git.repoIsUrl r = (specialRemoteCfg c)
|
|
|
|
|
-- Rsync displays its own progress.
|
|
|
|
|
{ displayProgress = False }
|
|
|
|
|
| otherwise = specialRemoteCfg c
|
2013-09-08 18:54:28 +00:00
|
|
|
|
|
2015-02-12 19:44:10 +00:00
|
|
|
|
rsyncTransportToObjects :: Git.Repo -> RemoteGitConfig -> Annex ([CommandParam], String)
|
|
|
|
|
rsyncTransportToObjects r gc = do
|
|
|
|
|
(rsynctransport, rsyncurl, _) <- rsyncTransport r gc
|
2013-09-24 21:25:47 +00:00
|
|
|
|
return (rsynctransport, rsyncurl ++ "/annex/objects")
|
|
|
|
|
|
2015-02-12 19:44:10 +00:00
|
|
|
|
rsyncTransport :: Git.Repo -> RemoteGitConfig -> Annex ([CommandParam], String, AccessMethod)
|
|
|
|
|
rsyncTransport r gc
|
2013-09-08 18:54:28 +00:00
|
|
|
|
| "ssh://" `isPrefixOf` loc = sshtransport $ break (== '/') $ drop (length "ssh://") loc
|
|
|
|
|
| "//:" `isInfixOf` loc = othertransport
|
|
|
|
|
| ":" `isInfixOf` loc = sshtransport $ separate (== ':') loc
|
|
|
|
|
| otherwise = othertransport
|
|
|
|
|
where
|
2014-10-09 18:53:13 +00:00
|
|
|
|
loc = Git.repoLocation r
|
2013-09-08 18:54:28 +00:00
|
|
|
|
sshtransport (host, path) = do
|
2013-09-26 19:02:27 +00:00
|
|
|
|
let rsyncpath = if "/~/" `isPrefixOf` path
|
|
|
|
|
then drop 3 path
|
|
|
|
|
else path
|
2017-02-15 19:08:46 +00:00
|
|
|
|
opts <- sshOptions ConsumeStdin (host, Nothing) gc []
|
2013-09-26 19:02:27 +00:00
|
|
|
|
return (rsyncShell $ Param "ssh" : opts, host ++ ":" ++ rsyncpath, AccessShell)
|
2013-09-24 21:25:47 +00:00
|
|
|
|
othertransport = return ([], loc, AccessDirect)
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
|
|
|
|
noCrypto :: Annex a
|
2016-11-16 01:29:54 +00:00
|
|
|
|
noCrypto = giveup "cannot use gcrypt remote without encryption enabled"
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
2014-08-03 21:31:10 +00:00
|
|
|
|
unsupportedUrl :: a
|
2016-11-16 01:29:54 +00:00
|
|
|
|
unsupportedUrl = giveup "using non-ssh remote repo url with gcrypt is not supported"
|
2013-09-08 17:00:48 +00:00
|
|
|
|
|
2017-02-07 18:35:58 +00:00
|
|
|
|
gCryptSetup :: SetupStage -> Maybe UUID -> Maybe CredPair -> RemoteConfig -> RemoteGitConfig -> Annex (RemoteConfig, UUID)
|
|
|
|
|
gCryptSetup _ mu _ c gc = go $ M.lookup "gitrepo" c
|
2013-09-07 22:38:00 +00:00
|
|
|
|
where
|
|
|
|
|
remotename = fromJust (M.lookup "name" c)
|
2016-11-16 01:29:54 +00:00
|
|
|
|
go Nothing = giveup "Specify gitrepo="
|
2013-09-07 22:38:00 +00:00
|
|
|
|
go (Just gitrepo) = do
|
2016-05-23 21:27:15 +00:00
|
|
|
|
(c', _encsetup) <- encryptionSetup c gc
|
2017-04-07 18:10:21 +00:00
|
|
|
|
|
|
|
|
|
let url = Git.GCrypt.urlPrefix ++ gitrepo
|
|
|
|
|
rs <- fromRepo Git.remotes
|
|
|
|
|
case filter (\r -> Git.remoteName r == Just remotename) rs of
|
|
|
|
|
[] -> inRepo $ Git.Command.run
|
|
|
|
|
[ Param "remote", Param "add"
|
|
|
|
|
, Param remotename
|
|
|
|
|
, Param url
|
|
|
|
|
]
|
|
|
|
|
(r:_)
|
|
|
|
|
| Git.repoLocation r == url -> noop
|
|
|
|
|
| otherwise -> error "Another remote with the same name already exists."
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
2013-09-12 19:54:35 +00:00
|
|
|
|
setGcryptEncryption c' remotename
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
|
|
|
|
{- Run a git fetch and a push to the git repo in order to get
|
|
|
|
|
- its gcrypt-id set up, so that later git annex commands
|
2013-10-01 23:10:45 +00:00
|
|
|
|
- will use the remote as a gcrypt remote. The fetch is
|
2013-09-07 22:38:00 +00:00
|
|
|
|
- needed if the repo already exists; the push is needed
|
|
|
|
|
- if the repo has not yet been initialized by gcrypt. -}
|
|
|
|
|
void $ inRepo $ Git.Command.runBool
|
|
|
|
|
[ Param "fetch"
|
|
|
|
|
, Param remotename
|
|
|
|
|
]
|
|
|
|
|
void $ inRepo $ Git.Command.runBool
|
|
|
|
|
[ Param "push"
|
|
|
|
|
, Param remotename
|
2014-02-19 05:09:17 +00:00
|
|
|
|
, Param $ Git.fromRef Annex.Branch.fullname
|
2013-09-07 22:38:00 +00:00
|
|
|
|
]
|
|
|
|
|
g <- inRepo Git.Config.reRead
|
|
|
|
|
case Git.GCrypt.remoteRepoId g (Just remotename) of
|
2016-11-16 01:29:54 +00:00
|
|
|
|
Nothing -> giveup "unable to determine gcrypt-id of remote"
|
2013-09-12 19:54:35 +00:00
|
|
|
|
Just gcryptid -> do
|
|
|
|
|
let u = genUUIDInNameSpace gCryptNameSpace gcryptid
|
2013-09-26 03:19:01 +00:00
|
|
|
|
if Just u == mu || isNothing mu
|
2013-09-08 19:19:14 +00:00
|
|
|
|
then do
|
2013-09-24 21:25:47 +00:00
|
|
|
|
method <- setupRepo gcryptid =<< inRepo (Git.Construct.fromRemoteLocation gitrepo)
|
|
|
|
|
gitConfigSpecialRemote u c' "gcrypt" (fromAccessMethod method)
|
2013-09-08 19:19:14 +00:00
|
|
|
|
return (c', u)
|
2016-11-16 01:29:54 +00:00
|
|
|
|
else giveup $ "uuid mismatch; expected " ++ show mu ++ " but remote gitrepo has " ++ show u ++ " (" ++ show gcryptid ++ ")"
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
2013-09-24 21:25:47 +00:00
|
|
|
|
{- Sets up the gcrypt repository. The repository is either a local
|
|
|
|
|
- repo, or it is accessed via rsync directly, or it is accessed over ssh
|
|
|
|
|
- and git-annex-shell is available to manage it.
|
|
|
|
|
-
|
2013-10-01 19:16:20 +00:00
|
|
|
|
- The GCryptID is recorded in the repository's git config for later use.
|
|
|
|
|
- Also, if the git config has receive.denyNonFastForwards set, disable
|
|
|
|
|
- it; gcrypt relies on being able to fast-forward branches.
|
2013-09-24 21:25:47 +00:00
|
|
|
|
-}
|
|
|
|
|
setupRepo :: Git.GCrypt.GCryptId -> Git.Repo -> Annex AccessMethod
|
|
|
|
|
setupRepo gcryptid r
|
2013-09-24 21:51:12 +00:00
|
|
|
|
| Git.repoIsUrl r = do
|
2015-02-12 19:44:10 +00:00
|
|
|
|
(_, _, accessmethod) <- rsyncTransport r def
|
2013-09-24 21:51:12 +00:00
|
|
|
|
case accessmethod of
|
2013-10-01 21:20:51 +00:00
|
|
|
|
AccessDirect -> rsyncsetup
|
|
|
|
|
AccessShell -> ifM gitannexshellsetup
|
2013-09-24 21:51:12 +00:00
|
|
|
|
( return AccessShell
|
2013-10-01 21:20:51 +00:00
|
|
|
|
, rsyncsetup
|
2013-09-24 21:51:12 +00:00
|
|
|
|
)
|
2013-09-24 21:25:47 +00:00
|
|
|
|
| Git.repoIsLocalUnknown r = localsetup =<< liftIO (Git.Config.read r)
|
|
|
|
|
| otherwise = localsetup r
|
|
|
|
|
where
|
|
|
|
|
localsetup r' = do
|
2013-10-01 19:16:20 +00:00
|
|
|
|
let setconfig k v = liftIO $ Git.Command.run [Param "config", Param k, Param v] r'
|
|
|
|
|
setconfig coreGCryptId gcryptid
|
|
|
|
|
setconfig denyNonFastForwards (Git.Config.boolConfig False)
|
2013-09-24 21:25:47 +00:00
|
|
|
|
return AccessDirect
|
|
|
|
|
|
2013-10-01 19:16:20 +00:00
|
|
|
|
{- As well as modifying the remote's git config,
|
|
|
|
|
- create the objectDir on the remote,
|
|
|
|
|
- which is needed for direct rsync of objects to work.
|
2013-09-24 21:25:47 +00:00
|
|
|
|
-}
|
2014-10-09 18:53:13 +00:00
|
|
|
|
rsyncsetup = Remote.Rsync.withRsyncScratchDir $ \tmp -> do
|
2013-09-24 21:25:47 +00:00
|
|
|
|
liftIO $ createDirectoryIfMissing True $ tmp </> objectDir
|
2015-02-12 19:44:10 +00:00
|
|
|
|
(rsynctransport, rsyncurl, _) <- rsyncTransport r def
|
2013-09-24 21:25:47 +00:00
|
|
|
|
let tmpconfig = tmp </> "config"
|
|
|
|
|
void $ liftIO $ rsync $ rsynctransport ++
|
|
|
|
|
[ Param $ rsyncurl ++ "/config"
|
|
|
|
|
, Param tmpconfig
|
|
|
|
|
]
|
2013-10-01 19:16:20 +00:00
|
|
|
|
liftIO $ do
|
2013-10-01 21:20:51 +00:00
|
|
|
|
void $ Git.Config.changeFile tmpconfig coreGCryptId gcryptid
|
|
|
|
|
void $ Git.Config.changeFile tmpconfig denyNonFastForwards (Git.Config.boolConfig False)
|
2013-09-24 21:25:47 +00:00
|
|
|
|
ok <- liftIO $ rsync $ rsynctransport ++
|
2015-06-01 17:52:23 +00:00
|
|
|
|
[ Param "--recursive"
|
2013-09-24 21:25:47 +00:00
|
|
|
|
, Param $ tmp ++ "/"
|
2013-09-26 03:19:01 +00:00
|
|
|
|
, Param rsyncurl
|
2013-09-24 21:25:47 +00:00
|
|
|
|
]
|
|
|
|
|
unless ok $
|
2016-11-16 01:29:54 +00:00
|
|
|
|
giveup "Failed to connect to remote to set it up."
|
2013-10-01 21:20:51 +00:00
|
|
|
|
return AccessDirect
|
2013-09-24 21:25:47 +00:00
|
|
|
|
|
2013-10-01 21:20:51 +00:00
|
|
|
|
{- Ask git-annex-shell to configure the repository as a gcrypt
|
|
|
|
|
- repository. May fail if it is too old. -}
|
2017-02-15 19:08:46 +00:00
|
|
|
|
gitannexshellsetup = Ssh.onRemote NoConsumeStdin r
|
|
|
|
|
(boolSystem, return False)
|
2013-10-01 21:20:51 +00:00
|
|
|
|
"gcryptsetup" [ Param gcryptid ] []
|
2013-09-24 21:51:12 +00:00
|
|
|
|
|
2013-10-01 19:16:20 +00:00
|
|
|
|
denyNonFastForwards = "receive.denyNonFastForwards"
|
|
|
|
|
|
2015-03-16 19:28:29 +00:00
|
|
|
|
accessShell :: Remote -> Bool
|
|
|
|
|
accessShell = accessShellConfig . gitconfig
|
|
|
|
|
|
|
|
|
|
accessShellConfig :: RemoteGitConfig -> Bool
|
|
|
|
|
accessShellConfig c = case method of
|
2014-08-03 21:31:10 +00:00
|
|
|
|
AccessShell -> True
|
|
|
|
|
_ -> False
|
2013-09-24 21:25:47 +00:00
|
|
|
|
where
|
2015-03-16 19:28:29 +00:00
|
|
|
|
method = toAccessMethod $ fromMaybe "" $ remoteAnnexGCrypt c
|
2013-09-24 21:25:47 +00:00
|
|
|
|
|
2014-08-03 21:31:10 +00:00
|
|
|
|
shellOrRsync :: Remote -> Annex a -> Annex a -> Annex a
|
|
|
|
|
shellOrRsync r ashell arsync
|
2015-03-16 19:28:29 +00:00
|
|
|
|
| accessShell r = ashell
|
2014-08-03 21:31:10 +00:00
|
|
|
|
| otherwise = arsync
|
|
|
|
|
|
2013-09-12 19:54:35 +00:00
|
|
|
|
{- Configure gcrypt to use the same list of keyids that
|
2013-09-17 20:06:29 +00:00
|
|
|
|
- were passed to initremote as its participants.
|
|
|
|
|
- Also, configure it to use a signing key that is in the list of
|
|
|
|
|
- participants, which gcrypt requires is the case, and may not be
|
|
|
|
|
- depending on system configuration.
|
|
|
|
|
-
|
2014-07-15 21:33:14 +00:00
|
|
|
|
- (For shared encryption, gcrypt's default behavior is used.)
|
|
|
|
|
-
|
|
|
|
|
- Also, sets gcrypt-publish-participants to avoid unncessary gpg
|
|
|
|
|
- passphrase prompts.
|
|
|
|
|
-}
|
2013-09-12 19:54:35 +00:00
|
|
|
|
setGcryptEncryption :: RemoteConfig -> String -> Annex ()
|
|
|
|
|
setGcryptEncryption c remotename = do
|
2014-07-15 21:33:14 +00:00
|
|
|
|
let participants = remoteconfig Git.GCrypt.remoteParticipantConfigKey
|
2016-05-10 20:50:31 +00:00
|
|
|
|
case cipherKeyIds =<< extractCipher c of
|
2013-09-12 19:54:35 +00:00
|
|
|
|
Nothing -> noCrypto
|
2016-05-10 20:50:31 +00:00
|
|
|
|
Just (KeyIds { keyIds = ks}) -> do
|
2013-09-12 19:54:35 +00:00
|
|
|
|
setConfig participants (unwords ks)
|
2013-09-17 20:06:29 +00:00
|
|
|
|
let signingkey = ConfigKey $ Git.GCrypt.remoteSigningKey remotename
|
2015-09-09 22:06:49 +00:00
|
|
|
|
cmd <- gpgCmd <$> Annex.getGitConfig
|
|
|
|
|
skeys <- M.keys <$> liftIO (secretKeys cmd)
|
2013-09-17 20:06:29 +00:00
|
|
|
|
case filter (`elem` ks) skeys of
|
|
|
|
|
[] -> noop
|
|
|
|
|
(k:_) -> setConfig signingkey k
|
2014-07-15 21:33:14 +00:00
|
|
|
|
setConfig (remoteconfig Git.GCrypt.remotePublishParticipantConfigKey)
|
|
|
|
|
(Git.Config.boolConfig True)
|
|
|
|
|
where
|
|
|
|
|
remoteconfig n = ConfigKey $ n remotename
|
2013-09-12 19:54:35 +00:00
|
|
|
|
|
2014-08-03 21:31:10 +00:00
|
|
|
|
store :: Remote -> Remote.Rsync.RsyncOpts -> Storer
|
|
|
|
|
store r rsyncopts
|
|
|
|
|
| not $ Git.repoIsUrl (repo r) =
|
2014-08-08 23:18:08 +00:00
|
|
|
|
byteStorer $ \k b p -> guardUsable (repo r) (return False) $ liftIO $ do
|
2014-08-04 13:35:57 +00:00
|
|
|
|
let tmpdir = Git.repoLocation (repo r) </> "tmp" </> keyFile k
|
|
|
|
|
void $ tryIO $ createDirectoryIfMissing True tmpdir
|
|
|
|
|
let tmpf = tmpdir </> keyFile k
|
|
|
|
|
meteredWriteFile p tmpf b
|
2015-01-09 17:11:56 +00:00
|
|
|
|
let destdir = parentDir $ gCryptLocation r k
|
2014-08-04 13:35:57 +00:00
|
|
|
|
Remote.Directory.finalizeStoreGeneric tmpdir destdir
|
2013-09-07 22:38:00 +00:00
|
|
|
|
return True
|
2015-03-16 19:28:29 +00:00
|
|
|
|
| Git.repoIsSsh (repo r) = if accessShell r
|
2014-08-03 21:31:10 +00:00
|
|
|
|
then fileStorer $ \k f p -> Ssh.rsyncHelper (Just p)
|
2017-03-10 17:12:24 +00:00
|
|
|
|
=<< Ssh.rsyncParamsRemote False r Upload k f
|
|
|
|
|
(AssociatedFile Nothing)
|
2014-08-03 21:31:10 +00:00
|
|
|
|
else fileStorer $ Remote.Rsync.store rsyncopts
|
2013-09-08 17:00:48 +00:00
|
|
|
|
| otherwise = unsupportedUrl
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
2014-08-03 21:31:10 +00:00
|
|
|
|
retrieve :: Remote -> Remote.Rsync.RsyncOpts -> Retriever
|
|
|
|
|
retrieve r rsyncopts
|
|
|
|
|
| not $ Git.repoIsUrl (repo r) = byteRetriever $ \k sink ->
|
2014-08-08 23:18:08 +00:00
|
|
|
|
guardUsable (repo r) (return False) $
|
2014-08-03 21:31:10 +00:00
|
|
|
|
sink =<< liftIO (L.readFile $ gCryptLocation r k)
|
2015-03-16 19:28:29 +00:00
|
|
|
|
| Git.repoIsSsh (repo r) = if accessShell r
|
2017-03-10 17:12:24 +00:00
|
|
|
|
then fileRetriever $ \f k p -> do
|
|
|
|
|
ps <- Ssh.rsyncParamsRemote False r Download k f
|
|
|
|
|
(AssociatedFile Nothing)
|
|
|
|
|
unlessM (Ssh.rsyncHelper (Just p) ps) $
|
2016-11-16 01:29:54 +00:00
|
|
|
|
giveup "rsync failed"
|
2014-08-03 21:31:10 +00:00
|
|
|
|
else fileRetriever $ Remote.Rsync.retrieve rsyncopts
|
2013-09-08 17:00:48 +00:00
|
|
|
|
| otherwise = unsupportedUrl
|
|
|
|
|
where
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
run Preparer to get Remover and CheckPresent actions
This will allow special remotes to eg, open a http connection and reuse it,
while checking if chunks are present, or removing chunks.
S3 and WebDAV both need this to support chunks with reasonable speed.
Note that a special remote might want to cache a http connection across
multiple requests. A simple case of this is that CheckPresent is typically
called before Store or Remove. A remote using this interface can certianly
use a Preparer that eg, uses a MVar to cache a http connection.
However, it's up to the remote to then deal with things like stale or
stalled http connections when eg, doing a series of downloads from a remote
and other places. There could be long delays between calls to a remote,
which could lead to eg, http connection stalls; the machine might even
move to a new network, etc.
It might be nice to improve this interface later to allow
the simple case without needing to handle the full complex case.
One way to do it would be to have a `Transaction SpecialRemote cache`,
where SpecialRemote contains methods for Storer, Retriever, Remover, and
CheckPresent, that all expect to be passed a `cache`.
2014-08-06 18:28:36 +00:00
|
|
|
|
remove :: Remote -> Remote.Rsync.RsyncOpts -> Remover
|
2013-09-08 18:54:28 +00:00
|
|
|
|
remove r rsyncopts k
|
2014-08-08 23:18:08 +00:00
|
|
|
|
| not $ Git.repoIsUrl (repo r) = guardUsable (repo r) (return False) $
|
2015-01-09 17:11:56 +00:00
|
|
|
|
liftIO $ Remote.Directory.removeDirGeneric (Git.repoLocation (repo r)) (parentDir (gCryptLocation r k))
|
2013-09-24 21:25:47 +00:00
|
|
|
|
| Git.repoIsSsh (repo r) = shellOrRsync r removeshell removersync
|
2013-09-08 17:00:48 +00:00
|
|
|
|
| otherwise = unsupportedUrl
|
|
|
|
|
where
|
2013-09-24 21:25:47 +00:00
|
|
|
|
removersync = Remote.Rsync.remove rsyncopts k
|
|
|
|
|
removeshell = Ssh.dropKey (repo r) k
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
run Preparer to get Remover and CheckPresent actions
This will allow special remotes to eg, open a http connection and reuse it,
while checking if chunks are present, or removing chunks.
S3 and WebDAV both need this to support chunks with reasonable speed.
Note that a special remote might want to cache a http connection across
multiple requests. A simple case of this is that CheckPresent is typically
called before Store or Remove. A remote using this interface can certianly
use a Preparer that eg, uses a MVar to cache a http connection.
However, it's up to the remote to then deal with things like stale or
stalled http connections when eg, doing a series of downloads from a remote
and other places. There could be long delays between calls to a remote,
which could lead to eg, http connection stalls; the machine might even
move to a new network, etc.
It might be nice to improve this interface later to allow
the simple case without needing to handle the full complex case.
One way to do it would be to have a `Transaction SpecialRemote cache`,
where SpecialRemote contains methods for Storer, Retriever, Remover, and
CheckPresent, that all expect to be passed a `cache`.
2014-08-06 18:28:36 +00:00
|
|
|
|
checkKey :: Remote -> Remote.Rsync.RsyncOpts -> CheckPresent
|
2014-08-06 17:45:19 +00:00
|
|
|
|
checkKey r rsyncopts k
|
2013-09-07 22:38:00 +00:00
|
|
|
|
| not $ Git.repoIsUrl (repo r) =
|
2013-09-24 21:25:47 +00:00
|
|
|
|
guardUsable (repo r) (cantCheck $ repo r) $
|
2014-08-06 17:45:19 +00:00
|
|
|
|
liftIO $ doesFileExist (gCryptLocation r k)
|
2013-09-24 21:25:47 +00:00
|
|
|
|
| Git.repoIsSsh (repo r) = shellOrRsync r checkshell checkrsync
|
2013-09-08 17:00:48 +00:00
|
|
|
|
| otherwise = unsupportedUrl
|
2013-09-07 22:38:00 +00:00
|
|
|
|
where
|
2014-10-09 18:53:13 +00:00
|
|
|
|
checkrsync = Remote.Rsync.checkKey (repo r) rsyncopts k
|
2013-09-24 21:25:47 +00:00
|
|
|
|
checkshell = Ssh.inAnnex (repo r) k
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
2013-09-24 21:25:47 +00:00
|
|
|
|
{- Annexed objects are hashed using lower-case directories for max
|
2013-09-08 18:54:28 +00:00
|
|
|
|
- portability. -}
|
2013-09-07 22:38:00 +00:00
|
|
|
|
gCryptLocation :: Remote -> Key -> FilePath
|
2015-01-28 19:55:17 +00:00
|
|
|
|
gCryptLocation r key = Git.repoLocation (repo r) </> objectDir </> keyPath key (hashDirLower def)
|
2013-09-24 21:25:47 +00:00
|
|
|
|
|
|
|
|
|
data AccessMethod = AccessDirect | AccessShell
|
|
|
|
|
|
|
|
|
|
fromAccessMethod :: AccessMethod -> String
|
|
|
|
|
fromAccessMethod AccessShell = "shell"
|
|
|
|
|
fromAccessMethod AccessDirect = "true"
|
|
|
|
|
|
|
|
|
|
toAccessMethod :: String -> AccessMethod
|
|
|
|
|
toAccessMethod "shell" = AccessShell
|
|
|
|
|
toAccessMethod _ = AccessDirect
|
|
|
|
|
|
2013-09-27 20:21:56 +00:00
|
|
|
|
getGCryptUUID :: Bool -> Git.Repo -> Annex (Maybe UUID)
|
|
|
|
|
getGCryptUUID fast r = (genUUIDInNameSpace gCryptNameSpace <$>) . fst
|
2015-02-12 19:44:10 +00:00
|
|
|
|
<$> getGCryptId fast r def
|
2013-09-27 20:21:56 +00:00
|
|
|
|
|
|
|
|
|
coreGCryptId :: String
|
|
|
|
|
coreGCryptId = "core.gcrypt-id"
|
|
|
|
|
|
|
|
|
|
{- gcrypt repos set up by git-annex as special remotes have a
|
|
|
|
|
- core.gcrypt-id setting in their config, which can be mapped back to
|
|
|
|
|
- the remote's UUID.
|
|
|
|
|
-
|
|
|
|
|
- In fast mode, only checks local repos. To check a remote repo,
|
|
|
|
|
- tries git-annex-shell and direct rsync of the git config file.
|
|
|
|
|
-
|
|
|
|
|
- (Also returns a version of input repo with its config read.) -}
|
2015-02-12 19:44:10 +00:00
|
|
|
|
getGCryptId :: Bool -> Git.Repo -> RemoteGitConfig -> Annex (Maybe Git.GCrypt.GCryptId, Git.Repo)
|
|
|
|
|
getGCryptId fast r gc
|
2013-10-01 18:38:20 +00:00
|
|
|
|
| Git.repoIsLocal r || Git.repoIsLocalUnknown r = extract <$>
|
2013-09-27 23:52:36 +00:00
|
|
|
|
liftIO (catchMaybeIO $ Git.Config.read r)
|
|
|
|
|
| not fast = extract . liftM fst <$> getM (eitherToMaybe <$>)
|
2017-02-15 19:08:46 +00:00
|
|
|
|
[ Ssh.onRemote NoConsumeStdin r (Git.Config.fromPipe r, return (Left $ error "configlist failed")) "configlist" [] []
|
2015-02-12 19:44:10 +00:00
|
|
|
|
, getConfigViaRsync r gc
|
2013-09-27 23:52:36 +00:00
|
|
|
|
]
|
2013-09-27 20:21:56 +00:00
|
|
|
|
| otherwise = return (Nothing, r)
|
|
|
|
|
where
|
2013-09-27 23:52:36 +00:00
|
|
|
|
extract Nothing = (Nothing, r)
|
|
|
|
|
extract (Just r') = (Git.Config.getMaybe coreGCryptId r', r')
|
|
|
|
|
|
2015-02-12 19:44:10 +00:00
|
|
|
|
getConfigViaRsync :: Git.Repo -> RemoteGitConfig -> Annex (Either SomeException (Git.Repo, String))
|
|
|
|
|
getConfigViaRsync r gc = do
|
|
|
|
|
(rsynctransport, rsyncurl, _) <- rsyncTransport r gc
|
2013-09-27 23:52:36 +00:00
|
|
|
|
liftIO $ do
|
|
|
|
|
withTmpFile "tmpconfig" $ \tmpconfig _ -> do
|
|
|
|
|
void $ rsync $ rsynctransport ++
|
|
|
|
|
[ Param $ rsyncurl ++ "/config"
|
|
|
|
|
, Param tmpconfig
|
|
|
|
|
]
|
|
|
|
|
Git.Config.fromFile r tmpconfig
|