2013-09-07 22:38:00 +00:00
|
|
|
|
{- git remotes encrypted using git-remote-gcrypt
|
|
|
|
|
-
|
2015-01-21 16:50:09 +00:00
|
|
|
|
- Copyright 2013 Joey Hess <id@joeyh.name>
|
2013-09-07 22:38:00 +00:00
|
|
|
|
-
|
|
|
|
|
- Licensed under the GNU GPL version 3 or higher.
|
|
|
|
|
-}
|
|
|
|
|
|
2013-09-24 21:25:47 +00:00
|
|
|
|
module Remote.GCrypt (
|
|
|
|
|
remote,
|
2014-08-04 12:42:04 +00:00
|
|
|
|
chainGen,
|
2013-09-24 21:25:47 +00:00
|
|
|
|
getGCryptUUID,
|
2013-10-01 21:20:51 +00:00
|
|
|
|
coreGCryptId,
|
|
|
|
|
setupRepo
|
2013-09-24 21:25:47 +00:00
|
|
|
|
) where
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
|
|
|
|
import qualified Data.Map as M
|
2013-09-08 17:00:48 +00:00
|
|
|
|
import qualified Data.ByteString.Lazy as L
|
unify exception handling into Utility.Exception
Removed old extensible-exceptions, only needed for very old ghc.
Made webdav use Utility.Exception, to work after some changes in DAV's
exception handling.
Removed Annex.Exception. Mostly this was trivial, but note that
tryAnnex is replaced with tryNonAsync and catchAnnex replaced with
catchNonAsync. In theory that could be a behavior change, since the former
caught all exceptions, and the latter don't catch async exceptions.
However, in practice, nothing in the Annex monad uses async exceptions.
Grepping for throwTo and killThread only find stuff in the assistant,
which does not seem related.
Command.Add.undo is changed to accept a SomeException, and things
that use it for rollback now catch non-async exceptions, rather than
only IOExceptions.
2014-08-08 01:55:44 +00:00
|
|
|
|
import Control.Exception
|
2015-01-28 19:55:17 +00:00
|
|
|
|
import Data.Default
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
|
|
|
|
import Common.Annex
|
|
|
|
|
import Types.Remote
|
|
|
|
|
import Types.GitConfig
|
|
|
|
|
import Types.Crypto
|
2014-02-11 18:06:50 +00:00
|
|
|
|
import Types.Creds
|
2013-09-07 22:38:00 +00:00
|
|
|
|
import qualified Git
|
|
|
|
|
import qualified Git.Command
|
|
|
|
|
import qualified Git.Config
|
|
|
|
|
import qualified Git.GCrypt
|
2013-09-12 19:54:35 +00:00
|
|
|
|
import qualified Git.Construct
|
2013-09-07 22:38:00 +00:00
|
|
|
|
import qualified Git.Types as Git ()
|
|
|
|
|
import qualified Annex.Branch
|
|
|
|
|
import Config
|
|
|
|
|
import Config.Cost
|
|
|
|
|
import Remote.Helper.Git
|
|
|
|
|
import Remote.Helper.Encryptable
|
2013-09-08 19:19:14 +00:00
|
|
|
|
import Remote.Helper.Special
|
2013-09-24 21:25:47 +00:00
|
|
|
|
import Remote.Helper.Messages
|
|
|
|
|
import qualified Remote.Helper.Ssh as Ssh
|
2013-09-07 22:38:00 +00:00
|
|
|
|
import Utility.Metered
|
|
|
|
|
import Annex.UUID
|
2013-09-08 18:54:28 +00:00
|
|
|
|
import Annex.Ssh
|
|
|
|
|
import qualified Remote.Rsync
|
2014-08-04 13:00:57 +00:00
|
|
|
|
import qualified Remote.Directory
|
2013-09-08 18:54:28 +00:00
|
|
|
|
import Utility.Rsync
|
2013-09-27 20:21:56 +00:00
|
|
|
|
import Utility.Tmp
|
2013-09-12 19:54:35 +00:00
|
|
|
|
import Logs.Remote
|
2013-09-24 21:25:47 +00:00
|
|
|
|
import Logs.Transfer
|
2013-09-17 20:06:29 +00:00
|
|
|
|
import Utility.Gpg
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
|
|
|
|
remote :: RemoteType
|
|
|
|
|
remote = RemoteType {
|
|
|
|
|
typename = "gcrypt",
|
|
|
|
|
-- Remote.Git takes care of enumerating gcrypt remotes too,
|
|
|
|
|
-- and will call our gen on them.
|
|
|
|
|
enumerate = return [],
|
|
|
|
|
generate = gen,
|
|
|
|
|
setup = gCryptSetup
|
|
|
|
|
}
|
|
|
|
|
|
2014-08-04 12:42:04 +00:00
|
|
|
|
chainGen :: Git.Repo -> UUID -> RemoteConfig -> RemoteGitConfig -> Annex (Maybe Remote)
|
|
|
|
|
chainGen gcryptr u c gc = do
|
2013-09-07 22:38:00 +00:00
|
|
|
|
g <- gitRepo
|
|
|
|
|
-- get underlying git repo with real path, not gcrypt path
|
2013-09-19 16:53:24 +00:00
|
|
|
|
r <- liftIO $ Git.GCrypt.encryptedRemote g gcryptr
|
2013-09-07 22:38:00 +00:00
|
|
|
|
let r' = r { Git.remoteName = Git.remoteName gcryptr }
|
2014-08-04 12:42:04 +00:00
|
|
|
|
gen r' u c gc
|
|
|
|
|
|
|
|
|
|
gen :: Git.Repo -> UUID -> RemoteConfig -> RemoteGitConfig -> Annex (Maybe Remote)
|
|
|
|
|
gen baser u c gc = do
|
2013-09-27 20:21:56 +00:00
|
|
|
|
-- doublecheck that cache matches underlying repo's gcrypt-id
|
|
|
|
|
-- (which might not be set), only for local repos
|
2014-08-04 12:42:04 +00:00
|
|
|
|
(mgcryptid, r) <- getGCryptId True baser
|
|
|
|
|
g <- gitRepo
|
|
|
|
|
case (mgcryptid, Git.GCrypt.remoteRepoId g (Git.remoteName baser)) of
|
2013-09-12 19:54:35 +00:00
|
|
|
|
(Just gcryptid, Just cachedgcryptid)
|
2014-08-04 12:42:04 +00:00
|
|
|
|
| gcryptid /= cachedgcryptid -> resetup gcryptid r
|
|
|
|
|
_ -> gen' r u c gc
|
2013-09-12 19:54:35 +00:00
|
|
|
|
where
|
|
|
|
|
-- A different drive may have been mounted, making a different
|
|
|
|
|
-- gcrypt remote available. So need to set the cached
|
|
|
|
|
-- gcrypt-id and annex-uuid of the remote to match the remote
|
|
|
|
|
-- that is now available. Also need to set the gcrypt particiants
|
|
|
|
|
-- correctly.
|
|
|
|
|
resetup gcryptid r = do
|
|
|
|
|
let u' = genUUIDInNameSpace gCryptNameSpace gcryptid
|
2013-09-26 03:19:01 +00:00
|
|
|
|
v <- M.lookup u' <$> readRemoteLog
|
2014-08-04 12:42:04 +00:00
|
|
|
|
case (Git.remoteName baser, v) of
|
2013-09-12 19:54:35 +00:00
|
|
|
|
(Just remotename, Just c') -> do
|
|
|
|
|
setGcryptEncryption c' remotename
|
2014-08-04 12:42:04 +00:00
|
|
|
|
setConfig (remoteConfig baser "uuid") (fromUUID u')
|
2013-09-12 19:54:35 +00:00
|
|
|
|
setConfig (ConfigKey $ Git.GCrypt.remoteConfigKey "gcrypt-id" remotename) gcryptid
|
|
|
|
|
gen' r u' c' gc
|
|
|
|
|
_ -> do
|
|
|
|
|
warning $ "not using unknown gcrypt repository pointed to by remote " ++ Git.repoDescribe r
|
|
|
|
|
return Nothing
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
2013-09-12 19:54:35 +00:00
|
|
|
|
gen' :: Git.Repo -> UUID -> RemoteConfig -> RemoteGitConfig -> Annex (Maybe Remote)
|
2013-09-08 18:54:28 +00:00
|
|
|
|
gen' r u c gc = do
|
|
|
|
|
cst <- remoteCost gc $
|
|
|
|
|
if repoCheap r then nearlyCheapRemoteCost else expensiveRemoteCost
|
2013-09-24 21:25:47 +00:00
|
|
|
|
(rsynctransport, rsyncurl) <- rsyncTransportToObjects r
|
2013-09-08 18:54:28 +00:00
|
|
|
|
let rsyncopts = Remote.Rsync.genRsyncOpts c gc rsynctransport rsyncurl
|
|
|
|
|
let this = Remote
|
|
|
|
|
{ uuid = u
|
|
|
|
|
, cost = cst
|
|
|
|
|
, name = Git.repoDescribe r
|
2014-08-03 21:31:10 +00:00
|
|
|
|
, storeKey = storeKeyDummy
|
|
|
|
|
, retrieveKeyFile = retreiveKeyFileDummy
|
2013-09-08 18:54:28 +00:00
|
|
|
|
, retrieveKeyFileCheap = \_ _ -> return False
|
run Preparer to get Remover and CheckPresent actions
This will allow special remotes to eg, open a http connection and reuse it,
while checking if chunks are present, or removing chunks.
S3 and WebDAV both need this to support chunks with reasonable speed.
Note that a special remote might want to cache a http connection across
multiple requests. A simple case of this is that CheckPresent is typically
called before Store or Remove. A remote using this interface can certianly
use a Preparer that eg, uses a MVar to cache a http connection.
However, it's up to the remote to then deal with things like stale or
stalled http connections when eg, doing a series of downloads from a remote
and other places. There could be long delays between calls to a remote,
which could lead to eg, http connection stalls; the machine might even
move to a new network, etc.
It might be nice to improve this interface later to allow
the simple case without needing to handle the full complex case.
One way to do it would be to have a `Transaction SpecialRemote cache`,
where SpecialRemote contains methods for Storer, Retriever, Remover, and
CheckPresent, that all expect to be passed a `cache`.
2014-08-06 18:28:36 +00:00
|
|
|
|
, removeKey = removeKeyDummy
|
|
|
|
|
, checkPresent = checkPresentDummy
|
2014-08-06 17:45:19 +00:00
|
|
|
|
, checkPresentCheap = repoCheap r
|
2013-09-08 18:54:28 +00:00
|
|
|
|
, whereisKey = Nothing
|
2013-10-11 20:03:18 +00:00
|
|
|
|
, remoteFsck = Nothing
|
2013-10-27 19:38:59 +00:00
|
|
|
|
, repairRepo = Nothing
|
2013-11-03 00:10:54 +00:00
|
|
|
|
, config = c
|
2013-09-08 18:54:28 +00:00
|
|
|
|
, localpath = localpathCalc r
|
|
|
|
|
, repo = r
|
|
|
|
|
, gitconfig = gc { remoteGitConfig = Just $ extractGitConfig r }
|
|
|
|
|
, readonly = Git.repoIsHttp r
|
2014-01-13 18:41:10 +00:00
|
|
|
|
, availability = availabilityCalc r
|
2013-09-08 18:54:28 +00:00
|
|
|
|
, remotetype = remote
|
2014-08-10 18:52:58 +00:00
|
|
|
|
, mkUnavailable = return Nothing
|
2015-01-13 22:11:03 +00:00
|
|
|
|
, getInfo = gitRepoInfo this
|
2014-12-08 17:40:15 +00:00
|
|
|
|
, claimUrl = Nothing
|
2014-12-11 19:32:42 +00:00
|
|
|
|
, checkUrl = Nothing
|
2013-09-08 18:54:28 +00:00
|
|
|
|
}
|
2014-08-03 21:31:10 +00:00
|
|
|
|
return $ Just $ specialRemote' specialcfg c
|
|
|
|
|
(simplyPrepare $ store this rsyncopts)
|
|
|
|
|
(simplyPrepare $ retrieve this rsyncopts)
|
run Preparer to get Remover and CheckPresent actions
This will allow special remotes to eg, open a http connection and reuse it,
while checking if chunks are present, or removing chunks.
S3 and WebDAV both need this to support chunks with reasonable speed.
Note that a special remote might want to cache a http connection across
multiple requests. A simple case of this is that CheckPresent is typically
called before Store or Remove. A remote using this interface can certianly
use a Preparer that eg, uses a MVar to cache a http connection.
However, it's up to the remote to then deal with things like stale or
stalled http connections when eg, doing a series of downloads from a remote
and other places. There could be long delays between calls to a remote,
which could lead to eg, http connection stalls; the machine might even
move to a new network, etc.
It might be nice to improve this interface later to allow
the simple case without needing to handle the full complex case.
One way to do it would be to have a `Transaction SpecialRemote cache`,
where SpecialRemote contains methods for Storer, Retriever, Remover, and
CheckPresent, that all expect to be passed a `cache`.
2014-08-06 18:28:36 +00:00
|
|
|
|
(simplyPrepare $ remove this rsyncopts)
|
|
|
|
|
(simplyPrepare $ checkKey this rsyncopts)
|
2013-09-07 22:38:00 +00:00
|
|
|
|
this
|
2014-08-03 21:31:10 +00:00
|
|
|
|
where
|
2014-08-04 00:19:04 +00:00
|
|
|
|
specialcfg
|
|
|
|
|
| Git.repoIsUrl r = (specialRemoteCfg c)
|
|
|
|
|
-- Rsync displays its own progress.
|
|
|
|
|
{ displayProgress = False }
|
|
|
|
|
| otherwise = specialRemoteCfg c
|
2013-09-08 18:54:28 +00:00
|
|
|
|
|
2013-09-24 21:25:47 +00:00
|
|
|
|
rsyncTransportToObjects :: Git.Repo -> Annex ([CommandParam], String)
|
|
|
|
|
rsyncTransportToObjects r = do
|
|
|
|
|
(rsynctransport, rsyncurl, _) <- rsyncTransport r
|
|
|
|
|
return (rsynctransport, rsyncurl ++ "/annex/objects")
|
|
|
|
|
|
|
|
|
|
rsyncTransport :: Git.Repo -> Annex ([CommandParam], String, AccessMethod)
|
2013-09-08 18:54:28 +00:00
|
|
|
|
rsyncTransport r
|
|
|
|
|
| "ssh://" `isPrefixOf` loc = sshtransport $ break (== '/') $ drop (length "ssh://") loc
|
|
|
|
|
| "//:" `isInfixOf` loc = othertransport
|
|
|
|
|
| ":" `isInfixOf` loc = sshtransport $ separate (== ':') loc
|
|
|
|
|
| otherwise = othertransport
|
|
|
|
|
where
|
2014-10-09 18:53:13 +00:00
|
|
|
|
loc = Git.repoLocation r
|
2013-09-08 18:54:28 +00:00
|
|
|
|
sshtransport (host, path) = do
|
2013-09-26 19:02:27 +00:00
|
|
|
|
let rsyncpath = if "/~/" `isPrefixOf` path
|
|
|
|
|
then drop 3 path
|
|
|
|
|
else path
|
2013-09-08 18:54:28 +00:00
|
|
|
|
opts <- sshCachingOptions (host, Nothing) []
|
2013-09-26 19:02:27 +00:00
|
|
|
|
return (rsyncShell $ Param "ssh" : opts, host ++ ":" ++ rsyncpath, AccessShell)
|
2013-09-24 21:25:47 +00:00
|
|
|
|
othertransport = return ([], loc, AccessDirect)
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
|
|
|
|
noCrypto :: Annex a
|
|
|
|
|
noCrypto = error "cannot use gcrypt remote without encryption enabled"
|
|
|
|
|
|
2014-08-03 21:31:10 +00:00
|
|
|
|
unsupportedUrl :: a
|
2013-09-08 17:00:48 +00:00
|
|
|
|
unsupportedUrl = error "using non-ssh remote repo url with gcrypt is not supported"
|
|
|
|
|
|
2014-02-11 18:06:50 +00:00
|
|
|
|
gCryptSetup :: Maybe UUID -> Maybe CredPair -> RemoteConfig -> Annex (RemoteConfig, UUID)
|
|
|
|
|
gCryptSetup mu _ c = go $ M.lookup "gitrepo" c
|
2013-09-07 22:38:00 +00:00
|
|
|
|
where
|
|
|
|
|
remotename = fromJust (M.lookup "name" c)
|
2014-10-09 18:53:13 +00:00
|
|
|
|
go Nothing = error "Specify gitrepo="
|
2013-09-07 22:38:00 +00:00
|
|
|
|
go (Just gitrepo) = do
|
glacier, S3: Fix bug that caused embedded creds to not be encypted using the remote's key.
encryptionSetup must be called before setRemoteCredPair. Otherwise,
the RemoteConfig doesn't have the cipher in it, and so no cipher is used to
encrypt the embedded creds.
This is a security fix for non-shared encryption methods!
For encryption=shared, there's no security problem, just an
inconsistentency in whether the embedded creds are encrypted.
This is very important to get right, so used some types to help ensure that
setRemoteCredPair is only run after encryptionSetup. Note that the external
special remote bypasses the type safety, since creds can be set after the
initial remote config, if the external special remote program requests it.
Also note that IA remotes never use encryption, so encryptionSetup is not
run for them at all, and again the type safety is bypassed.
This leaves two open questions:
1. What to do about S3 and glacier remotes that were set up
using encryption=pubkey/hybrid with embedcreds?
Such a git repo has a security hole embedded in it, and this needs to be
communicated to the user. Is the changelog enough?
2. enableremote won't work in such a repo, because git-annex will
try to decrypt the embedded creds, which are not encrypted, so fails.
This needs to be dealt with, especially for ecryption=shared repos,
which are not really broken, just inconsistently configured.
Noticing that problem for encryption=shared is what led to commit
fbdeeeed5fa276d94be587c8916d725eddcaf546, which tried to
fix the problem by not decrypting the embedded creds.
This commit was sponsored by Josh Taylor.
2014-09-18 21:07:17 +00:00
|
|
|
|
(c', _encsetup) <- encryptionSetup c
|
2013-09-07 22:38:00 +00:00
|
|
|
|
inRepo $ Git.Command.run
|
|
|
|
|
[ Params "remote add"
|
|
|
|
|
, Param remotename
|
|
|
|
|
, Param $ Git.GCrypt.urlPrefix ++ gitrepo
|
|
|
|
|
]
|
|
|
|
|
|
2013-09-12 19:54:35 +00:00
|
|
|
|
setGcryptEncryption c' remotename
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
|
|
|
|
{- Run a git fetch and a push to the git repo in order to get
|
|
|
|
|
- its gcrypt-id set up, so that later git annex commands
|
2013-10-01 23:10:45 +00:00
|
|
|
|
- will use the remote as a gcrypt remote. The fetch is
|
2013-09-07 22:38:00 +00:00
|
|
|
|
- needed if the repo already exists; the push is needed
|
|
|
|
|
- if the repo has not yet been initialized by gcrypt. -}
|
|
|
|
|
void $ inRepo $ Git.Command.runBool
|
|
|
|
|
[ Param "fetch"
|
|
|
|
|
, Param remotename
|
|
|
|
|
]
|
|
|
|
|
void $ inRepo $ Git.Command.runBool
|
|
|
|
|
[ Param "push"
|
|
|
|
|
, Param remotename
|
2014-02-19 05:09:17 +00:00
|
|
|
|
, Param $ Git.fromRef Annex.Branch.fullname
|
2013-09-07 22:38:00 +00:00
|
|
|
|
]
|
|
|
|
|
g <- inRepo Git.Config.reRead
|
|
|
|
|
case Git.GCrypt.remoteRepoId g (Just remotename) of
|
|
|
|
|
Nothing -> error "unable to determine gcrypt-id of remote"
|
2013-09-12 19:54:35 +00:00
|
|
|
|
Just gcryptid -> do
|
|
|
|
|
let u = genUUIDInNameSpace gCryptNameSpace gcryptid
|
2013-09-26 03:19:01 +00:00
|
|
|
|
if Just u == mu || isNothing mu
|
2013-09-08 19:19:14 +00:00
|
|
|
|
then do
|
2013-09-24 21:25:47 +00:00
|
|
|
|
method <- setupRepo gcryptid =<< inRepo (Git.Construct.fromRemoteLocation gitrepo)
|
|
|
|
|
gitConfigSpecialRemote u c' "gcrypt" (fromAccessMethod method)
|
2013-09-08 19:19:14 +00:00
|
|
|
|
return (c', u)
|
2014-10-28 19:54:44 +00:00
|
|
|
|
else error $ "uuid mismatch; expected " ++ show mu ++ " but remote gitrepo has " ++ show u ++ " (" ++ show gcryptid ++ ")"
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
2013-09-24 21:25:47 +00:00
|
|
|
|
{- Sets up the gcrypt repository. The repository is either a local
|
|
|
|
|
- repo, or it is accessed via rsync directly, or it is accessed over ssh
|
|
|
|
|
- and git-annex-shell is available to manage it.
|
|
|
|
|
-
|
2013-10-01 19:16:20 +00:00
|
|
|
|
- The GCryptID is recorded in the repository's git config for later use.
|
|
|
|
|
- Also, if the git config has receive.denyNonFastForwards set, disable
|
|
|
|
|
- it; gcrypt relies on being able to fast-forward branches.
|
2013-09-24 21:25:47 +00:00
|
|
|
|
-}
|
|
|
|
|
setupRepo :: Git.GCrypt.GCryptId -> Git.Repo -> Annex AccessMethod
|
|
|
|
|
setupRepo gcryptid r
|
2013-09-24 21:51:12 +00:00
|
|
|
|
| Git.repoIsUrl r = do
|
2013-10-01 21:20:51 +00:00
|
|
|
|
(_, _, accessmethod) <- rsyncTransport r
|
2013-09-24 21:51:12 +00:00
|
|
|
|
case accessmethod of
|
2013-10-01 21:20:51 +00:00
|
|
|
|
AccessDirect -> rsyncsetup
|
|
|
|
|
AccessShell -> ifM gitannexshellsetup
|
2013-09-24 21:51:12 +00:00
|
|
|
|
( return AccessShell
|
2013-10-01 21:20:51 +00:00
|
|
|
|
, rsyncsetup
|
2013-09-24 21:51:12 +00:00
|
|
|
|
)
|
2013-09-24 21:25:47 +00:00
|
|
|
|
| Git.repoIsLocalUnknown r = localsetup =<< liftIO (Git.Config.read r)
|
|
|
|
|
| otherwise = localsetup r
|
|
|
|
|
where
|
|
|
|
|
localsetup r' = do
|
2013-10-01 19:16:20 +00:00
|
|
|
|
let setconfig k v = liftIO $ Git.Command.run [Param "config", Param k, Param v] r'
|
|
|
|
|
setconfig coreGCryptId gcryptid
|
|
|
|
|
setconfig denyNonFastForwards (Git.Config.boolConfig False)
|
2013-09-24 21:25:47 +00:00
|
|
|
|
return AccessDirect
|
|
|
|
|
|
2013-10-01 19:16:20 +00:00
|
|
|
|
{- As well as modifying the remote's git config,
|
|
|
|
|
- create the objectDir on the remote,
|
|
|
|
|
- which is needed for direct rsync of objects to work.
|
2013-09-24 21:25:47 +00:00
|
|
|
|
-}
|
2014-10-09 18:53:13 +00:00
|
|
|
|
rsyncsetup = Remote.Rsync.withRsyncScratchDir $ \tmp -> do
|
2013-09-24 21:25:47 +00:00
|
|
|
|
liftIO $ createDirectoryIfMissing True $ tmp </> objectDir
|
2013-10-01 21:20:51 +00:00
|
|
|
|
(rsynctransport, rsyncurl, _) <- rsyncTransport r
|
2013-09-24 21:25:47 +00:00
|
|
|
|
let tmpconfig = tmp </> "config"
|
|
|
|
|
void $ liftIO $ rsync $ rsynctransport ++
|
|
|
|
|
[ Param $ rsyncurl ++ "/config"
|
|
|
|
|
, Param tmpconfig
|
|
|
|
|
]
|
2013-10-01 19:16:20 +00:00
|
|
|
|
liftIO $ do
|
2013-10-01 21:20:51 +00:00
|
|
|
|
void $ Git.Config.changeFile tmpconfig coreGCryptId gcryptid
|
|
|
|
|
void $ Git.Config.changeFile tmpconfig denyNonFastForwards (Git.Config.boolConfig False)
|
2013-09-24 21:25:47 +00:00
|
|
|
|
ok <- liftIO $ rsync $ rsynctransport ++
|
|
|
|
|
[ Params "--recursive"
|
|
|
|
|
, Param $ tmp ++ "/"
|
2013-09-26 03:19:01 +00:00
|
|
|
|
, Param rsyncurl
|
2013-09-24 21:25:47 +00:00
|
|
|
|
]
|
|
|
|
|
unless ok $
|
|
|
|
|
error "Failed to connect to remote to set it up."
|
2013-10-01 21:20:51 +00:00
|
|
|
|
return AccessDirect
|
2013-09-24 21:25:47 +00:00
|
|
|
|
|
2013-10-01 21:20:51 +00:00
|
|
|
|
{- Ask git-annex-shell to configure the repository as a gcrypt
|
|
|
|
|
- repository. May fail if it is too old. -}
|
2014-08-10 18:52:58 +00:00
|
|
|
|
gitannexshellsetup = Ssh.onRemote r (boolSystem, return False)
|
2013-10-01 21:20:51 +00:00
|
|
|
|
"gcryptsetup" [ Param gcryptid ] []
|
2013-09-24 21:51:12 +00:00
|
|
|
|
|
2013-10-01 19:16:20 +00:00
|
|
|
|
denyNonFastForwards = "receive.denyNonFastForwards"
|
|
|
|
|
|
2014-08-03 21:31:10 +00:00
|
|
|
|
isShell :: Remote -> Bool
|
|
|
|
|
isShell r = case method of
|
|
|
|
|
AccessShell -> True
|
|
|
|
|
_ -> False
|
2013-09-24 21:25:47 +00:00
|
|
|
|
where
|
2014-10-09 18:53:13 +00:00
|
|
|
|
method = toAccessMethod $ fromMaybe "" $
|
2013-09-24 21:25:47 +00:00
|
|
|
|
remoteAnnexGCrypt $ gitconfig r
|
|
|
|
|
|
2014-08-03 21:31:10 +00:00
|
|
|
|
shellOrRsync :: Remote -> Annex a -> Annex a -> Annex a
|
|
|
|
|
shellOrRsync r ashell arsync
|
|
|
|
|
| isShell r = ashell
|
|
|
|
|
| otherwise = arsync
|
|
|
|
|
|
2013-09-12 19:54:35 +00:00
|
|
|
|
{- Configure gcrypt to use the same list of keyids that
|
2013-09-17 20:06:29 +00:00
|
|
|
|
- were passed to initremote as its participants.
|
|
|
|
|
- Also, configure it to use a signing key that is in the list of
|
|
|
|
|
- participants, which gcrypt requires is the case, and may not be
|
|
|
|
|
- depending on system configuration.
|
|
|
|
|
-
|
2014-07-15 21:33:14 +00:00
|
|
|
|
- (For shared encryption, gcrypt's default behavior is used.)
|
|
|
|
|
-
|
|
|
|
|
- Also, sets gcrypt-publish-participants to avoid unncessary gpg
|
|
|
|
|
- passphrase prompts.
|
|
|
|
|
-}
|
2013-09-12 19:54:35 +00:00
|
|
|
|
setGcryptEncryption :: RemoteConfig -> String -> Annex ()
|
|
|
|
|
setGcryptEncryption c remotename = do
|
2014-07-15 21:33:14 +00:00
|
|
|
|
let participants = remoteconfig Git.GCrypt.remoteParticipantConfigKey
|
2013-09-12 19:54:35 +00:00
|
|
|
|
case extractCipher c of
|
|
|
|
|
Nothing -> noCrypto
|
2013-09-17 20:06:29 +00:00
|
|
|
|
Just (EncryptedCipher _ _ (KeyIds { keyIds = ks})) -> do
|
2013-09-12 19:54:35 +00:00
|
|
|
|
setConfig participants (unwords ks)
|
2013-09-17 20:06:29 +00:00
|
|
|
|
let signingkey = ConfigKey $ Git.GCrypt.remoteSigningKey remotename
|
|
|
|
|
skeys <- M.keys <$> liftIO secretKeys
|
|
|
|
|
case filter (`elem` ks) skeys of
|
|
|
|
|
[] -> noop
|
|
|
|
|
(k:_) -> setConfig signingkey k
|
2013-09-12 19:54:35 +00:00
|
|
|
|
Just (SharedCipher _) ->
|
|
|
|
|
unsetConfig participants
|
2014-07-15 21:33:14 +00:00
|
|
|
|
setConfig (remoteconfig Git.GCrypt.remotePublishParticipantConfigKey)
|
|
|
|
|
(Git.Config.boolConfig True)
|
|
|
|
|
where
|
|
|
|
|
remoteconfig n = ConfigKey $ n remotename
|
2013-09-12 19:54:35 +00:00
|
|
|
|
|
2014-08-03 21:31:10 +00:00
|
|
|
|
store :: Remote -> Remote.Rsync.RsyncOpts -> Storer
|
|
|
|
|
store r rsyncopts
|
|
|
|
|
| not $ Git.repoIsUrl (repo r) =
|
2014-08-08 23:18:08 +00:00
|
|
|
|
byteStorer $ \k b p -> guardUsable (repo r) (return False) $ liftIO $ do
|
2014-08-04 13:35:57 +00:00
|
|
|
|
let tmpdir = Git.repoLocation (repo r) </> "tmp" </> keyFile k
|
|
|
|
|
void $ tryIO $ createDirectoryIfMissing True tmpdir
|
|
|
|
|
let tmpf = tmpdir </> keyFile k
|
|
|
|
|
meteredWriteFile p tmpf b
|
2015-01-09 17:11:56 +00:00
|
|
|
|
let destdir = parentDir $ gCryptLocation r k
|
2014-08-04 13:35:57 +00:00
|
|
|
|
Remote.Directory.finalizeStoreGeneric tmpdir destdir
|
2013-09-07 22:38:00 +00:00
|
|
|
|
return True
|
2014-08-03 21:31:10 +00:00
|
|
|
|
| Git.repoIsSsh (repo r) = if isShell r
|
|
|
|
|
then fileStorer $ \k f p -> Ssh.rsyncHelper (Just p)
|
|
|
|
|
=<< Ssh.rsyncParamsRemote False r Upload k f Nothing
|
|
|
|
|
else fileStorer $ Remote.Rsync.store rsyncopts
|
2013-09-08 17:00:48 +00:00
|
|
|
|
| otherwise = unsupportedUrl
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
2014-08-03 21:31:10 +00:00
|
|
|
|
retrieve :: Remote -> Remote.Rsync.RsyncOpts -> Retriever
|
|
|
|
|
retrieve r rsyncopts
|
|
|
|
|
| not $ Git.repoIsUrl (repo r) = byteRetriever $ \k sink ->
|
2014-08-08 23:18:08 +00:00
|
|
|
|
guardUsable (repo r) (return False) $
|
2014-08-03 21:31:10 +00:00
|
|
|
|
sink =<< liftIO (L.readFile $ gCryptLocation r k)
|
|
|
|
|
| Git.repoIsSsh (repo r) = if isShell r
|
|
|
|
|
then fileRetriever $ \f k p ->
|
|
|
|
|
unlessM (Ssh.rsyncHelper (Just p) =<< Ssh.rsyncParamsRemote False r Download k f Nothing) $
|
|
|
|
|
error "rsync failed"
|
|
|
|
|
else fileRetriever $ Remote.Rsync.retrieve rsyncopts
|
2013-09-08 17:00:48 +00:00
|
|
|
|
| otherwise = unsupportedUrl
|
|
|
|
|
where
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
run Preparer to get Remover and CheckPresent actions
This will allow special remotes to eg, open a http connection and reuse it,
while checking if chunks are present, or removing chunks.
S3 and WebDAV both need this to support chunks with reasonable speed.
Note that a special remote might want to cache a http connection across
multiple requests. A simple case of this is that CheckPresent is typically
called before Store or Remove. A remote using this interface can certianly
use a Preparer that eg, uses a MVar to cache a http connection.
However, it's up to the remote to then deal with things like stale or
stalled http connections when eg, doing a series of downloads from a remote
and other places. There could be long delays between calls to a remote,
which could lead to eg, http connection stalls; the machine might even
move to a new network, etc.
It might be nice to improve this interface later to allow
the simple case without needing to handle the full complex case.
One way to do it would be to have a `Transaction SpecialRemote cache`,
where SpecialRemote contains methods for Storer, Retriever, Remover, and
CheckPresent, that all expect to be passed a `cache`.
2014-08-06 18:28:36 +00:00
|
|
|
|
remove :: Remote -> Remote.Rsync.RsyncOpts -> Remover
|
2013-09-08 18:54:28 +00:00
|
|
|
|
remove r rsyncopts k
|
2014-08-08 23:18:08 +00:00
|
|
|
|
| not $ Git.repoIsUrl (repo r) = guardUsable (repo r) (return False) $
|
2015-01-09 17:11:56 +00:00
|
|
|
|
liftIO $ Remote.Directory.removeDirGeneric (Git.repoLocation (repo r)) (parentDir (gCryptLocation r k))
|
2013-09-24 21:25:47 +00:00
|
|
|
|
| Git.repoIsSsh (repo r) = shellOrRsync r removeshell removersync
|
2013-09-08 17:00:48 +00:00
|
|
|
|
| otherwise = unsupportedUrl
|
|
|
|
|
where
|
2013-09-24 21:25:47 +00:00
|
|
|
|
removersync = Remote.Rsync.remove rsyncopts k
|
|
|
|
|
removeshell = Ssh.dropKey (repo r) k
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
run Preparer to get Remover and CheckPresent actions
This will allow special remotes to eg, open a http connection and reuse it,
while checking if chunks are present, or removing chunks.
S3 and WebDAV both need this to support chunks with reasonable speed.
Note that a special remote might want to cache a http connection across
multiple requests. A simple case of this is that CheckPresent is typically
called before Store or Remove. A remote using this interface can certianly
use a Preparer that eg, uses a MVar to cache a http connection.
However, it's up to the remote to then deal with things like stale or
stalled http connections when eg, doing a series of downloads from a remote
and other places. There could be long delays between calls to a remote,
which could lead to eg, http connection stalls; the machine might even
move to a new network, etc.
It might be nice to improve this interface later to allow
the simple case without needing to handle the full complex case.
One way to do it would be to have a `Transaction SpecialRemote cache`,
where SpecialRemote contains methods for Storer, Retriever, Remover, and
CheckPresent, that all expect to be passed a `cache`.
2014-08-06 18:28:36 +00:00
|
|
|
|
checkKey :: Remote -> Remote.Rsync.RsyncOpts -> CheckPresent
|
2014-08-06 17:45:19 +00:00
|
|
|
|
checkKey r rsyncopts k
|
2013-09-07 22:38:00 +00:00
|
|
|
|
| not $ Git.repoIsUrl (repo r) =
|
2013-09-24 21:25:47 +00:00
|
|
|
|
guardUsable (repo r) (cantCheck $ repo r) $
|
2014-08-06 17:45:19 +00:00
|
|
|
|
liftIO $ doesFileExist (gCryptLocation r k)
|
2013-09-24 21:25:47 +00:00
|
|
|
|
| Git.repoIsSsh (repo r) = shellOrRsync r checkshell checkrsync
|
2013-09-08 17:00:48 +00:00
|
|
|
|
| otherwise = unsupportedUrl
|
2013-09-07 22:38:00 +00:00
|
|
|
|
where
|
2014-10-09 18:53:13 +00:00
|
|
|
|
checkrsync = Remote.Rsync.checkKey (repo r) rsyncopts k
|
2013-09-24 21:25:47 +00:00
|
|
|
|
checkshell = Ssh.inAnnex (repo r) k
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
2013-09-24 21:25:47 +00:00
|
|
|
|
{- Annexed objects are hashed using lower-case directories for max
|
2013-09-08 18:54:28 +00:00
|
|
|
|
- portability. -}
|
2013-09-07 22:38:00 +00:00
|
|
|
|
gCryptLocation :: Remote -> Key -> FilePath
|
2015-01-28 19:55:17 +00:00
|
|
|
|
gCryptLocation r key = Git.repoLocation (repo r) </> objectDir </> keyPath key (hashDirLower def)
|
2013-09-24 21:25:47 +00:00
|
|
|
|
|
|
|
|
|
data AccessMethod = AccessDirect | AccessShell
|
|
|
|
|
|
|
|
|
|
fromAccessMethod :: AccessMethod -> String
|
|
|
|
|
fromAccessMethod AccessShell = "shell"
|
|
|
|
|
fromAccessMethod AccessDirect = "true"
|
|
|
|
|
|
|
|
|
|
toAccessMethod :: String -> AccessMethod
|
|
|
|
|
toAccessMethod "shell" = AccessShell
|
|
|
|
|
toAccessMethod _ = AccessDirect
|
|
|
|
|
|
2013-09-27 20:21:56 +00:00
|
|
|
|
getGCryptUUID :: Bool -> Git.Repo -> Annex (Maybe UUID)
|
|
|
|
|
getGCryptUUID fast r = (genUUIDInNameSpace gCryptNameSpace <$>) . fst
|
|
|
|
|
<$> getGCryptId fast r
|
|
|
|
|
|
|
|
|
|
coreGCryptId :: String
|
|
|
|
|
coreGCryptId = "core.gcrypt-id"
|
|
|
|
|
|
|
|
|
|
{- gcrypt repos set up by git-annex as special remotes have a
|
|
|
|
|
- core.gcrypt-id setting in their config, which can be mapped back to
|
|
|
|
|
- the remote's UUID.
|
|
|
|
|
-
|
|
|
|
|
- In fast mode, only checks local repos. To check a remote repo,
|
|
|
|
|
- tries git-annex-shell and direct rsync of the git config file.
|
|
|
|
|
-
|
|
|
|
|
- (Also returns a version of input repo with its config read.) -}
|
|
|
|
|
getGCryptId :: Bool -> Git.Repo -> Annex (Maybe Git.GCrypt.GCryptId, Git.Repo)
|
|
|
|
|
getGCryptId fast r
|
2013-10-01 18:38:20 +00:00
|
|
|
|
| Git.repoIsLocal r || Git.repoIsLocalUnknown r = extract <$>
|
2013-09-27 23:52:36 +00:00
|
|
|
|
liftIO (catchMaybeIO $ Git.Config.read r)
|
|
|
|
|
| not fast = extract . liftM fst <$> getM (eitherToMaybe <$>)
|
2014-08-10 18:52:58 +00:00
|
|
|
|
[ Ssh.onRemote r (Git.Config.fromPipe r, return (Left undefined)) "configlist" [] []
|
2013-09-27 23:52:36 +00:00
|
|
|
|
, getConfigViaRsync r
|
|
|
|
|
]
|
2013-09-27 20:21:56 +00:00
|
|
|
|
| otherwise = return (Nothing, r)
|
|
|
|
|
where
|
2013-09-27 23:52:36 +00:00
|
|
|
|
extract Nothing = (Nothing, r)
|
|
|
|
|
extract (Just r') = (Git.Config.getMaybe coreGCryptId r', r')
|
|
|
|
|
|
|
|
|
|
getConfigViaRsync :: Git.Repo -> Annex (Either SomeException (Git.Repo, String))
|
|
|
|
|
getConfigViaRsync r = do
|
|
|
|
|
(rsynctransport, rsyncurl, _) <- rsyncTransport r
|
|
|
|
|
liftIO $ do
|
|
|
|
|
withTmpFile "tmpconfig" $ \tmpconfig _ -> do
|
|
|
|
|
void $ rsync $ rsynctransport ++
|
|
|
|
|
[ Param $ rsyncurl ++ "/config"
|
|
|
|
|
, Param tmpconfig
|
|
|
|
|
]
|
|
|
|
|
Git.Config.fromFile r tmpconfig
|