2013-09-07 22:38:00 +00:00
|
|
|
|
{- git remotes encrypted using git-remote-gcrypt
|
|
|
|
|
-
|
2020-10-30 17:00:12 +00:00
|
|
|
|
- Copyright 2013-2020 Joey Hess <id@joeyh.name>
|
2013-09-07 22:38:00 +00:00
|
|
|
|
-
|
2019-03-13 19:48:14 +00:00
|
|
|
|
- Licensed under the GNU AGPL version 3 or higher.
|
2013-09-07 22:38:00 +00:00
|
|
|
|
-}
|
|
|
|
|
|
2019-11-27 20:54:11 +00:00
|
|
|
|
{-# LANGUAGE OverloadedStrings #-}
|
incremental verify for byteRetriever special remotes
Several special remotes verify content while it is being retrieved,
avoiding a separate checksum pass. They are: S3, bup, ddar, and
gcrypt (with a local repository).
Not done when using chunking, yet.
Complicated by Retriever needing to change to be polymorphic. Which in turn
meant RankNTypes is needed, and also needed some code changes. The
change in Remote.External does not change behavior at all but avoids
the type checking failing because of a "rigid, skolem type" which
"would escape its scope". So I refactored slightly to make the type
checker's job easier there.
Unfortunately, directory uses fileRetriever (except when chunked),
so it is not amoung the improved ones. Fixing that would need a way for
FileRetriever to return a Verification. But, since the file retrieved
may be encrypted or chunked, it would be extra work to always
incrementally checksum the file while retrieving it. Hm.
Some other special remotes use fileRetriever, and so don't get incremental
verification, but could be converted to byteRetriever later. One is
GitLFS, which uses downloadConduit, which writes to the file, so could
verify as it goes. Other special remotes like web could too, but don't
use Remote.Helper.Special and so will need to be addressed separately.
Sponsored-by: Dartmouth College's DANDI project
2021-08-11 17:43:30 +00:00
|
|
|
|
{-# LANGUAGE RankNTypes #-}
|
2019-11-27 20:54:11 +00:00
|
|
|
|
|
2013-09-24 21:25:47 +00:00
|
|
|
|
module Remote.GCrypt (
|
|
|
|
|
remote,
|
2014-08-04 12:42:04 +00:00
|
|
|
|
chainGen,
|
2013-09-24 21:25:47 +00:00
|
|
|
|
getGCryptUUID,
|
2013-10-01 21:20:51 +00:00
|
|
|
|
coreGCryptId,
|
2015-03-16 19:28:29 +00:00
|
|
|
|
setupRepo,
|
|
|
|
|
accessShellConfig,
|
2019-08-05 17:24:21 +00:00
|
|
|
|
setGcryptEncryption,
|
2013-09-24 21:25:47 +00:00
|
|
|
|
) where
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
|
|
|
|
import qualified Data.Map as M
|
2019-11-27 20:54:11 +00:00
|
|
|
|
import qualified Data.ByteString as S
|
2013-09-08 17:00:48 +00:00
|
|
|
|
import qualified Data.ByteString.Lazy as L
|
2020-10-30 17:00:12 +00:00
|
|
|
|
import qualified System.FilePath.ByteString as P
|
unify exception handling into Utility.Exception
Removed old extensible-exceptions, only needed for very old ghc.
Made webdav use Utility.Exception, to work after some changes in DAV's
exception handling.
Removed Annex.Exception. Mostly this was trivial, but note that
tryAnnex is replaced with tryNonAsync and catchAnnex replaced with
catchNonAsync. In theory that could be a behavior change, since the former
caught all exceptions, and the latter don't catch async exceptions.
However, in practice, nothing in the Annex monad uses async exceptions.
Grepping for throwTo and killThread only find stuff in the assistant,
which does not seem related.
Command.Add.undo is changed to accept a SomeException, and things
that use it for rollback now catch non-async exceptions, rather than
only IOExceptions.
2014-08-08 01:55:44 +00:00
|
|
|
|
import Control.Exception
|
2015-01-28 19:55:17 +00:00
|
|
|
|
import Data.Default
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
2016-01-20 20:36:33 +00:00
|
|
|
|
import Annex.Common
|
2015-09-09 22:06:49 +00:00
|
|
|
|
import qualified Annex
|
2013-09-07 22:38:00 +00:00
|
|
|
|
import Types.Remote
|
|
|
|
|
import Types.GitConfig
|
|
|
|
|
import Types.Crypto
|
2014-02-11 18:06:50 +00:00
|
|
|
|
import Types.Creds
|
2016-08-03 16:37:12 +00:00
|
|
|
|
import Types.Transfer
|
2019-12-05 18:36:43 +00:00
|
|
|
|
import Git.Types (ConfigKey(..), fromConfigKey, fromConfigValue)
|
2013-09-07 22:38:00 +00:00
|
|
|
|
import qualified Git
|
|
|
|
|
import qualified Git.Command
|
|
|
|
|
import qualified Git.Config
|
|
|
|
|
import qualified Git.GCrypt
|
2013-09-12 19:54:35 +00:00
|
|
|
|
import qualified Git.Construct
|
2013-09-07 22:38:00 +00:00
|
|
|
|
import qualified Annex.Branch
|
|
|
|
|
import Config
|
|
|
|
|
import Config.Cost
|
2020-01-14 16:35:08 +00:00
|
|
|
|
import Annex.SpecialRemote.Config
|
2013-09-07 22:38:00 +00:00
|
|
|
|
import Remote.Helper.Git
|
|
|
|
|
import Remote.Helper.Encryptable
|
2013-09-08 19:19:14 +00:00
|
|
|
|
import Remote.Helper.Special
|
2013-09-24 21:25:47 +00:00
|
|
|
|
import Remote.Helper.Messages
|
2019-02-20 19:55:01 +00:00
|
|
|
|
import Remote.Helper.ExportImport
|
2013-09-24 21:25:47 +00:00
|
|
|
|
import qualified Remote.Helper.Ssh as Ssh
|
2013-09-07 22:38:00 +00:00
|
|
|
|
import Utility.Metered
|
|
|
|
|
import Annex.UUID
|
2013-09-08 18:54:28 +00:00
|
|
|
|
import Annex.Ssh
|
2020-03-06 15:57:15 +00:00
|
|
|
|
import Annex.Perms
|
2013-09-08 18:54:28 +00:00
|
|
|
|
import qualified Remote.Rsync
|
2014-08-04 13:00:57 +00:00
|
|
|
|
import qualified Remote.Directory
|
2013-09-08 18:54:28 +00:00
|
|
|
|
import Utility.Rsync
|
2013-09-27 20:21:56 +00:00
|
|
|
|
import Utility.Tmp
|
2013-09-12 19:54:35 +00:00
|
|
|
|
import Logs.Remote
|
2013-09-17 20:06:29 +00:00
|
|
|
|
import Utility.Gpg
|
2017-08-18 02:11:31 +00:00
|
|
|
|
import Utility.SshHost
|
2020-01-22 17:20:06 +00:00
|
|
|
|
import Utility.Tuple
|
2020-10-30 17:00:12 +00:00
|
|
|
|
import Utility.Directory.Create
|
2018-03-12 22:36:07 +00:00
|
|
|
|
import Messages.Progress
|
2020-01-10 18:10:20 +00:00
|
|
|
|
import Types.ProposedAccepted
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
|
|
|
|
remote :: RemoteType
|
2020-01-14 16:35:08 +00:00
|
|
|
|
remote = specialRemoteType $ RemoteType
|
2017-09-07 17:45:31 +00:00
|
|
|
|
{ typename = "gcrypt"
|
2013-09-07 22:38:00 +00:00
|
|
|
|
-- Remote.Git takes care of enumerating gcrypt remotes too,
|
2021-01-18 18:52:56 +00:00
|
|
|
|
-- and will call our chainGen on them.
|
2017-09-07 17:45:31 +00:00
|
|
|
|
, enumerate = const (return [])
|
|
|
|
|
, generate = gen
|
2020-01-20 19:13:49 +00:00
|
|
|
|
, configParser = mkRemoteConfigParser $
|
|
|
|
|
Remote.Rsync.rsyncRemoteConfigs ++
|
2020-01-20 19:20:04 +00:00
|
|
|
|
[ optionalStringParser gitRepoField
|
|
|
|
|
(FieldDesc "(required) path or url to gcrypt repository")
|
|
|
|
|
]
|
2017-09-07 17:45:31 +00:00
|
|
|
|
, setup = gCryptSetup
|
|
|
|
|
, exportSupported = exportUnsupported
|
2019-02-20 19:55:01 +00:00
|
|
|
|
, importSupported = importUnsupported
|
add thirdPartyPopulated interface
This is to support, eg a borg repo as a special remote, which is
populated not by running git-annex commands, but by using borg. Then
git-annex sync lists the content of the remote, learns which files are
annex objects, and treats those as present in the remote.
So, most of the import machinery is reused, to a new purpose. While
normally importtree maintains a remote tracking branch, this does not,
because the files stored in the remote are annex object files, not
user-visible filenames. But, internally, a git tree is still generated,
of the files on the remote that are annex objects. This tree is used
by retrieveExportWithContentIdentifier, etc. As with other import/export
remotes, that the tree is recorded in the export log, and gets grafted
into the git-annex branch.
importKey changed to be able to return Nothing, to indicate when an
ImportLocation is not an annex object and so should be skipped from
being included in the tree.
It did not seem to make sense to have git-annex import do this, since
from the user's perspective, it's not like other imports. So only
git-annex sync does it.
Note that, git-annex sync does not yet download objects from such
remotes that are preferred content. importKeys is run with
content downloading disabled, to avoid getting the content of all
objects. Perhaps what's needed is for seekSyncContent to be run with these
remotes, but I don't know if it will just work (in particular, it needs
to avoid trying to transfer objects to them), so I skipped that for now.
(Untested and unused as of yet.)
This commit was sponsored by Jochen Bartl on Patreon.
2020-12-18 18:52:57 +00:00
|
|
|
|
, thirdPartyPopulated = False
|
2017-09-07 17:45:31 +00:00
|
|
|
|
}
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
2020-01-14 16:35:08 +00:00
|
|
|
|
gitRepoField :: RemoteConfigField
|
|
|
|
|
gitRepoField = Accepted "gitrepo"
|
|
|
|
|
|
fix encryption of content to gcrypt and git-lfs
Fix serious regression in gcrypt and encrypted git-lfs remotes.
Since version 7.20200202.7, git-annex incorrectly stored content
on those remotes without encrypting it.
Problem was, Remote.Git enumerates all git remotes, including git-lfs
and gcrypt. It then dispatches to those. So, Remote.List used the
RemoteConfigParser from Remote.Git, instead of from git-lfs or gcrypt,
and that parser does not know about encryption fields, so did not
include them in the ParsedRemoteConfig. (Also didn't include other
fields specific to those remotes, perhaps chunking etc also didn't
get through.)
To fix, had to move RemoteConfig parsing down into the generate methods
of each remote, rather than doing it in Remote.List.
And a consequence of that was that ParsedRemoteConfig had to change to
include the RemoteConfig that got parsed, so that testremote can
generate a new remote based on an existing remote.
(I would have rather fixed this just inside Remote.Git, but that was not
practical, at least not w/o re-doing work that Remote.List already did.
Big ugly mostly mechanical patch seemed preferable to making git-annex
slower.)
2020-02-26 21:20:56 +00:00
|
|
|
|
chainGen :: Git.Repo -> UUID -> RemoteConfig -> RemoteGitConfig -> RemoteStateHandle -> Annex (Maybe Remote)
|
|
|
|
|
chainGen gcryptr u rc gc rs = do
|
2013-09-07 22:38:00 +00:00
|
|
|
|
g <- gitRepo
|
|
|
|
|
-- get underlying git repo with real path, not gcrypt path
|
2013-09-19 16:53:24 +00:00
|
|
|
|
r <- liftIO $ Git.GCrypt.encryptedRemote g gcryptr
|
2013-09-07 22:38:00 +00:00
|
|
|
|
let r' = r { Git.remoteName = Git.remoteName gcryptr }
|
fix encryption of content to gcrypt and git-lfs
Fix serious regression in gcrypt and encrypted git-lfs remotes.
Since version 7.20200202.7, git-annex incorrectly stored content
on those remotes without encrypting it.
Problem was, Remote.Git enumerates all git remotes, including git-lfs
and gcrypt. It then dispatches to those. So, Remote.List used the
RemoteConfigParser from Remote.Git, instead of from git-lfs or gcrypt,
and that parser does not know about encryption fields, so did not
include them in the ParsedRemoteConfig. (Also didn't include other
fields specific to those remotes, perhaps chunking etc also didn't
get through.)
To fix, had to move RemoteConfig parsing down into the generate methods
of each remote, rather than doing it in Remote.List.
And a consequence of that was that ParsedRemoteConfig had to change to
include the RemoteConfig that got parsed, so that testremote can
generate a new remote based on an existing remote.
(I would have rather fixed this just inside Remote.Git, but that was not
practical, at least not w/o re-doing work that Remote.List already did.
Big ugly mostly mechanical patch seemed preferable to making git-annex
slower.)
2020-02-26 21:20:56 +00:00
|
|
|
|
gen r' u rc gc rs
|
2014-08-04 12:42:04 +00:00
|
|
|
|
|
fix encryption of content to gcrypt and git-lfs
Fix serious regression in gcrypt and encrypted git-lfs remotes.
Since version 7.20200202.7, git-annex incorrectly stored content
on those remotes without encrypting it.
Problem was, Remote.Git enumerates all git remotes, including git-lfs
and gcrypt. It then dispatches to those. So, Remote.List used the
RemoteConfigParser from Remote.Git, instead of from git-lfs or gcrypt,
and that parser does not know about encryption fields, so did not
include them in the ParsedRemoteConfig. (Also didn't include other
fields specific to those remotes, perhaps chunking etc also didn't
get through.)
To fix, had to move RemoteConfig parsing down into the generate methods
of each remote, rather than doing it in Remote.List.
And a consequence of that was that ParsedRemoteConfig had to change to
include the RemoteConfig that got parsed, so that testremote can
generate a new remote based on an existing remote.
(I would have rather fixed this just inside Remote.Git, but that was not
practical, at least not w/o re-doing work that Remote.List already did.
Big ugly mostly mechanical patch seemed preferable to making git-annex
slower.)
2020-02-26 21:20:56 +00:00
|
|
|
|
gen :: Git.Repo -> UUID -> RemoteConfig -> RemoteGitConfig -> RemoteStateHandle -> Annex (Maybe Remote)
|
|
|
|
|
gen baser u rc gc rs = do
|
2013-09-27 20:21:56 +00:00
|
|
|
|
-- doublecheck that cache matches underlying repo's gcrypt-id
|
|
|
|
|
-- (which might not be set), only for local repos
|
2015-02-12 19:44:10 +00:00
|
|
|
|
(mgcryptid, r) <- getGCryptId True baser gc
|
2014-08-04 12:42:04 +00:00
|
|
|
|
g <- gitRepo
|
|
|
|
|
case (mgcryptid, Git.GCrypt.remoteRepoId g (Git.remoteName baser)) of
|
2013-09-12 19:54:35 +00:00
|
|
|
|
(Just gcryptid, Just cachedgcryptid)
|
2014-08-04 12:42:04 +00:00
|
|
|
|
| gcryptid /= cachedgcryptid -> resetup gcryptid r
|
fix encryption of content to gcrypt and git-lfs
Fix serious regression in gcrypt and encrypted git-lfs remotes.
Since version 7.20200202.7, git-annex incorrectly stored content
on those remotes without encrypting it.
Problem was, Remote.Git enumerates all git remotes, including git-lfs
and gcrypt. It then dispatches to those. So, Remote.List used the
RemoteConfigParser from Remote.Git, instead of from git-lfs or gcrypt,
and that parser does not know about encryption fields, so did not
include them in the ParsedRemoteConfig. (Also didn't include other
fields specific to those remotes, perhaps chunking etc also didn't
get through.)
To fix, had to move RemoteConfig parsing down into the generate methods
of each remote, rather than doing it in Remote.List.
And a consequence of that was that ParsedRemoteConfig had to change to
include the RemoteConfig that got parsed, so that testremote can
generate a new remote based on an existing remote.
(I would have rather fixed this just inside Remote.Git, but that was not
practical, at least not w/o re-doing work that Remote.List already did.
Big ugly mostly mechanical patch seemed preferable to making git-annex
slower.)
2020-02-26 21:20:56 +00:00
|
|
|
|
_ -> do
|
|
|
|
|
c <- parsedRemoteConfig remote rc
|
|
|
|
|
gen' r u c gc rs
|
2013-09-12 19:54:35 +00:00
|
|
|
|
where
|
|
|
|
|
-- A different drive may have been mounted, making a different
|
|
|
|
|
-- gcrypt remote available. So need to set the cached
|
|
|
|
|
-- gcrypt-id and annex-uuid of the remote to match the remote
|
|
|
|
|
-- that is now available. Also need to set the gcrypt particiants
|
|
|
|
|
-- correctly.
|
|
|
|
|
resetup gcryptid r = do
|
2023-10-26 17:12:57 +00:00
|
|
|
|
let u' = genUUIDInNameSpace gCryptNameSpace (encodeBS gcryptid)
|
2020-09-22 17:52:26 +00:00
|
|
|
|
v <- M.lookup u' <$> remoteConfigMap
|
2014-08-04 12:42:04 +00:00
|
|
|
|
case (Git.remoteName baser, v) of
|
fix encryption of content to gcrypt and git-lfs
Fix serious regression in gcrypt and encrypted git-lfs remotes.
Since version 7.20200202.7, git-annex incorrectly stored content
on those remotes without encrypting it.
Problem was, Remote.Git enumerates all git remotes, including git-lfs
and gcrypt. It then dispatches to those. So, Remote.List used the
RemoteConfigParser from Remote.Git, instead of from git-lfs or gcrypt,
and that parser does not know about encryption fields, so did not
include them in the ParsedRemoteConfig. (Also didn't include other
fields specific to those remotes, perhaps chunking etc also didn't
get through.)
To fix, had to move RemoteConfig parsing down into the generate methods
of each remote, rather than doing it in Remote.List.
And a consequence of that was that ParsedRemoteConfig had to change to
include the RemoteConfig that got parsed, so that testremote can
generate a new remote based on an existing remote.
(I would have rather fixed this just inside Remote.Git, but that was not
practical, at least not w/o re-doing work that Remote.List already did.
Big ugly mostly mechanical patch seemed preferable to making git-annex
slower.)
2020-02-26 21:20:56 +00:00
|
|
|
|
(Just remotename, Just rc') -> do
|
|
|
|
|
pc <- parsedRemoteConfig remote rc'
|
2020-01-14 16:35:08 +00:00
|
|
|
|
setGcryptEncryption pc remotename
|
2020-02-19 17:45:11 +00:00
|
|
|
|
storeUUIDIn (remoteAnnexConfig baser "uuid") u'
|
2019-12-02 14:57:09 +00:00
|
|
|
|
setConfig (Git.GCrypt.remoteConfigKey "gcrypt-id" remotename) gcryptid
|
2020-01-14 16:35:08 +00:00
|
|
|
|
gen' r u' pc gc rs
|
2013-09-12 19:54:35 +00:00
|
|
|
|
_ -> do
|
filter out control characters in warning messages
Converted warning and similar to use StringContainingQuotedPath. Most
warnings are static strings, some do refer to filepaths that need to be
quoted, and others don't need quoting.
Note that, since quote filters out control characters of even
UnquotedString, this makes all warnings safe, even when an attacker
sneaks in a control character in some other way.
When json is being output, no quoting is done, since json gets its own
quoting.
This does, as a side effect, make warning messages in json output not
be indented. The indentation is only needed to offset warning messages
underneath the display of the file they apply to, so that's ok.
Sponsored-by: Brett Eisenberg on Patreon
2023-04-10 18:47:32 +00:00
|
|
|
|
warning $ UnquotedString $ "not using unknown gcrypt repository pointed to by remote " ++ Git.repoDescribe r
|
2013-09-12 19:54:35 +00:00
|
|
|
|
return Nothing
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
2020-01-14 16:35:08 +00:00
|
|
|
|
gen' :: Git.Repo -> UUID -> ParsedRemoteConfig -> RemoteGitConfig -> RemoteStateHandle -> Annex (Maybe Remote)
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
|
gen' r u c gc rs = do
|
2023-01-12 17:42:28 +00:00
|
|
|
|
cst <- remoteCost gc c $
|
|
|
|
|
if repoCheap r
|
|
|
|
|
then nearlyCheapRemoteCost
|
|
|
|
|
else expensiveRemoteCost
|
2021-03-09 19:58:09 +00:00
|
|
|
|
let (rsynctransport, rsyncurl, accessmethod) = rsyncTransportToObjects r gc
|
2022-05-03 16:12:25 +00:00
|
|
|
|
protectsargs <- liftIO Remote.Rsync.probeRsyncProtectsArgs
|
|
|
|
|
let rsyncopts = Remote.Rsync.genRsyncOpts protectsargs c gc rsynctransport rsyncurl
|
2013-09-08 18:54:28 +00:00
|
|
|
|
let this = Remote
|
|
|
|
|
{ uuid = u
|
|
|
|
|
, cost = cst
|
|
|
|
|
, name = Git.repoDescribe r
|
2014-08-03 21:31:10 +00:00
|
|
|
|
, storeKey = storeKeyDummy
|
2020-05-13 21:05:56 +00:00
|
|
|
|
, retrieveKeyFile = retrieveKeyFileDummy
|
|
|
|
|
, retrieveKeyFileCheap = Nothing
|
2018-06-21 15:35:27 +00:00
|
|
|
|
, retrievalSecurityPolicy = RetrievalAllKeysSecure
|
run Preparer to get Remover and CheckPresent actions
This will allow special remotes to eg, open a http connection and reuse it,
while checking if chunks are present, or removing chunks.
S3 and WebDAV both need this to support chunks with reasonable speed.
Note that a special remote might want to cache a http connection across
multiple requests. A simple case of this is that CheckPresent is typically
called before Store or Remove. A remote using this interface can certianly
use a Preparer that eg, uses a MVar to cache a http connection.
However, it's up to the remote to then deal with things like stale or
stalled http connections when eg, doing a series of downloads from a remote
and other places. There could be long delays between calls to a remote,
which could lead to eg, http connection stalls; the machine might even
move to a new network, etc.
It might be nice to improve this interface later to allow
the simple case without needing to handle the full complex case.
One way to do it would be to have a `Transaction SpecialRemote cache`,
where SpecialRemote contains methods for Storer, Retriever, Remover, and
CheckPresent, that all expect to be passed a `cache`.
2014-08-06 18:28:36 +00:00
|
|
|
|
, removeKey = removeKeyDummy
|
2015-10-08 19:01:38 +00:00
|
|
|
|
, lockContent = Nothing
|
run Preparer to get Remover and CheckPresent actions
This will allow special remotes to eg, open a http connection and reuse it,
while checking if chunks are present, or removing chunks.
S3 and WebDAV both need this to support chunks with reasonable speed.
Note that a special remote might want to cache a http connection across
multiple requests. A simple case of this is that CheckPresent is typically
called before Store or Remove. A remote using this interface can certianly
use a Preparer that eg, uses a MVar to cache a http connection.
However, it's up to the remote to then deal with things like stale or
stalled http connections when eg, doing a series of downloads from a remote
and other places. There could be long delays between calls to a remote,
which could lead to eg, http connection stalls; the machine might even
move to a new network, etc.
It might be nice to improve this interface later to allow
the simple case without needing to handle the full complex case.
One way to do it would be to have a `Transaction SpecialRemote cache`,
where SpecialRemote contains methods for Storer, Retriever, Remover, and
CheckPresent, that all expect to be passed a `cache`.
2014-08-06 18:28:36 +00:00
|
|
|
|
, checkPresent = checkPresentDummy
|
2014-08-06 17:45:19 +00:00
|
|
|
|
, checkPresentCheap = repoCheap r
|
2017-09-01 17:02:07 +00:00
|
|
|
|
, exportActions = exportUnsupported
|
2019-02-20 19:55:01 +00:00
|
|
|
|
, importActions = importUnsupported
|
2013-09-08 18:54:28 +00:00
|
|
|
|
, whereisKey = Nothing
|
2013-10-11 20:03:18 +00:00
|
|
|
|
, remoteFsck = Nothing
|
2013-10-27 19:38:59 +00:00
|
|
|
|
, repairRepo = Nothing
|
2013-11-03 00:10:54 +00:00
|
|
|
|
, config = c
|
2013-09-08 18:54:28 +00:00
|
|
|
|
, localpath = localpathCalc r
|
2018-06-04 18:31:55 +00:00
|
|
|
|
, getRepo = return r
|
removal of the rest of remoteGitConfig
In keyUrls, the GitConfig is used only by annexLocations
to support configured Differences. Since such configurations affect all
clones of a repository, the local repo's GitConfig must have the same
information as the remote's GitConfig would have. So, used getGitConfig
to get the local GitConfig, which is cached and so available cheaply.
That actually fixed a bug noone had ever noticed: keyUrls is
used for remotes accessed over http. The full git config of such a
remote is normally not available, so the remoteGitConfig that keyUrls
used would not have the necessary information in it.
In copyFromRemoteCheap', it uses gitAnnexLocation,
which does need the GitConfig of the remote repo itself in order to
check if it's crippled, supports symlinks, etc. So, made the
State include that GitConfig, cached. The use of gitAnnexLocation is
within a (not $ Git.repoIsUrl repo) guard, so it's local, and so
its git config will always be read and available.
(Note that gitAnnexLocation in turn calls annexLocations, so the
Differences config it uses in this case comes from the remote repo's
GitConfig and not from the local repo's GitConfig. As explained above
this is ok since they must have the same value.)
Not very happy with this mess of different GitConfigs not type-safe and
some read only sometimes etc. Very hairy. Think I got it this change
right. Test suite passes..
This commit was sponsored by Ethan Aubin.
2018-06-05 18:23:34 +00:00
|
|
|
|
, gitconfig = gc
|
2013-09-08 18:54:28 +00:00
|
|
|
|
, readonly = Git.repoIsHttp r
|
2018-08-30 15:12:18 +00:00
|
|
|
|
, appendonly = False
|
2020-12-28 19:08:53 +00:00
|
|
|
|
, untrustworthy = False
|
2023-08-16 19:54:54 +00:00
|
|
|
|
, availability = repoAvail r
|
2013-09-08 18:54:28 +00:00
|
|
|
|
, remotetype = remote
|
2014-08-10 18:52:58 +00:00
|
|
|
|
, mkUnavailable = return Nothing
|
2015-01-13 22:11:03 +00:00
|
|
|
|
, getInfo = gitRepoInfo this
|
2014-12-08 17:40:15 +00:00
|
|
|
|
, claimUrl = Nothing
|
2014-12-11 19:32:42 +00:00
|
|
|
|
, checkUrl = Nothing
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
|
, remoteStateHandle = rs
|
2013-09-08 18:54:28 +00:00
|
|
|
|
}
|
2014-08-03 21:31:10 +00:00
|
|
|
|
return $ Just $ specialRemote' specialcfg c
|
2021-03-09 19:58:09 +00:00
|
|
|
|
(store this rsyncopts accessmethod)
|
|
|
|
|
(retrieve this rsyncopts accessmethod)
|
|
|
|
|
(remove this rsyncopts accessmethod)
|
|
|
|
|
(checkKey this rsyncopts accessmethod)
|
2013-09-07 22:38:00 +00:00
|
|
|
|
this
|
2014-08-03 21:31:10 +00:00
|
|
|
|
where
|
2014-08-04 00:19:04 +00:00
|
|
|
|
specialcfg
|
|
|
|
|
| Git.repoIsUrl r = (specialRemoteCfg c)
|
|
|
|
|
-- Rsync displays its own progress.
|
|
|
|
|
{ displayProgress = False }
|
|
|
|
|
| otherwise = specialRemoteCfg c
|
2013-09-08 18:54:28 +00:00
|
|
|
|
|
2021-03-09 19:58:09 +00:00
|
|
|
|
rsyncTransportToObjects :: Git.Repo -> RemoteGitConfig -> (Annex [CommandParam], String, AccessMethod)
|
|
|
|
|
rsyncTransportToObjects r gc =
|
|
|
|
|
let (rsynctransport, rsyncurl, m) = rsyncTransport r gc
|
|
|
|
|
in (rsynctransport, rsyncurl ++ "/annex/objects", m)
|
2013-09-24 21:25:47 +00:00
|
|
|
|
|
2021-03-09 19:58:09 +00:00
|
|
|
|
rsyncTransport :: Git.Repo -> RemoteGitConfig -> (Annex [CommandParam], String, AccessMethod)
|
2015-02-12 19:44:10 +00:00
|
|
|
|
rsyncTransport r gc
|
2019-11-27 20:54:11 +00:00
|
|
|
|
| sshprefix `isPrefixOf` loc = sshtransport $ break (== '/') $ drop (length sshprefix) loc
|
2021-03-09 19:58:09 +00:00
|
|
|
|
| "rsync://" `isPrefixOf` loc = rsyncoversshtransport
|
2013-09-08 18:54:28 +00:00
|
|
|
|
| ":" `isInfixOf` loc = sshtransport $ separate (== ':') loc
|
2021-03-09 19:58:09 +00:00
|
|
|
|
| otherwise = rsyncoversshtransport
|
2013-09-08 18:54:28 +00:00
|
|
|
|
where
|
2019-11-27 20:54:11 +00:00
|
|
|
|
sshprefix = "ssh://" :: String
|
2014-10-09 18:53:13 +00:00
|
|
|
|
loc = Git.repoLocation r
|
2021-03-09 19:58:09 +00:00
|
|
|
|
sshtransport (host, path) =
|
2013-09-26 19:02:27 +00:00
|
|
|
|
let rsyncpath = if "/~/" `isPrefixOf` path
|
|
|
|
|
then drop 3 path
|
|
|
|
|
else path
|
2023-04-10 17:38:14 +00:00
|
|
|
|
sshhost = either giveup id (mkSshHost host)
|
2021-03-09 19:58:09 +00:00
|
|
|
|
mkopts = rsyncShell . (Param "ssh" :)
|
2019-06-13 15:09:55 +00:00
|
|
|
|
<$> sshOptions ConsumeStdin (sshhost, Nothing) gc []
|
2021-03-09 19:58:09 +00:00
|
|
|
|
in (mkopts, fromSshHost sshhost ++ ":" ++ rsyncpath, AccessGitAnnexShell)
|
|
|
|
|
rsyncoversshtransport =
|
|
|
|
|
-- git-remote-gcrypt uses a rsync:// url to mean
|
|
|
|
|
-- rsync over ssh. But to rsync, that's rsync protocol,
|
|
|
|
|
-- so it must be converted to a form that rsync will treat
|
|
|
|
|
-- as rsync over ssh.
|
|
|
|
|
-- There are two url forms that git-remote-gcrypt
|
|
|
|
|
-- supports: rsync://userhost/path and rsync://userhost:path
|
|
|
|
|
-- change to: userhost:/path userhost:path
|
|
|
|
|
let loc' = replace "rsync://" "" loc
|
|
|
|
|
loc'' = if ':' `elem` loc'
|
|
|
|
|
then loc'
|
|
|
|
|
else let (a, b) = break (== '/') loc' in a ++ ":" ++ b
|
|
|
|
|
in (pure [], loc'', AccessRsyncOverSsh)
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
|
|
|
|
noCrypto :: Annex a
|
2016-11-16 01:29:54 +00:00
|
|
|
|
noCrypto = giveup "cannot use gcrypt remote without encryption enabled"
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
2014-08-03 21:31:10 +00:00
|
|
|
|
unsupportedUrl :: a
|
2021-03-09 19:58:09 +00:00
|
|
|
|
unsupportedUrl = giveup "unsupported repo url for gcrypt"
|
2013-09-08 17:00:48 +00:00
|
|
|
|
|
2017-02-07 18:35:58 +00:00
|
|
|
|
gCryptSetup :: SetupStage -> Maybe UUID -> Maybe CredPair -> RemoteConfig -> RemoteGitConfig -> Annex (RemoteConfig, UUID)
|
2020-01-14 16:35:08 +00:00
|
|
|
|
gCryptSetup _ mu _ c gc = go $ fromProposedAccepted <$> M.lookup gitRepoField c
|
2013-09-07 22:38:00 +00:00
|
|
|
|
where
|
2019-10-10 17:08:17 +00:00
|
|
|
|
remotename = fromJust (lookupName c)
|
2016-11-16 01:29:54 +00:00
|
|
|
|
go Nothing = giveup "Specify gitrepo="
|
2013-09-07 22:38:00 +00:00
|
|
|
|
go (Just gitrepo) = do
|
2016-05-23 21:27:15 +00:00
|
|
|
|
(c', _encsetup) <- encryptionSetup c gc
|
2017-04-07 18:10:21 +00:00
|
|
|
|
|
|
|
|
|
let url = Git.GCrypt.urlPrefix ++ gitrepo
|
2018-01-09 19:36:56 +00:00
|
|
|
|
rs <- Annex.getGitRemotes
|
2017-04-07 18:10:21 +00:00
|
|
|
|
case filter (\r -> Git.remoteName r == Just remotename) rs of
|
|
|
|
|
[] -> inRepo $ Git.Command.run
|
|
|
|
|
[ Param "remote", Param "add"
|
|
|
|
|
, Param remotename
|
|
|
|
|
, Param url
|
|
|
|
|
]
|
|
|
|
|
(r:_)
|
|
|
|
|
| Git.repoLocation r == url -> noop
|
2023-04-10 17:38:14 +00:00
|
|
|
|
| otherwise -> giveup "Another remote with the same name already exists."
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
2020-01-14 17:18:15 +00:00
|
|
|
|
pc <- either giveup return . parseRemoteConfig c'
|
add LISTCONFIGS to external special remote protocol
Special remote programs that use GETCONFIG/SETCONFIG are recommended
to implement it.
The description is not yet used, but will be useful later when adding a way
to make initremote list all accepted configs.
configParser now takes a RemoteConfig parameter. Normally, that's not
needed, because configParser returns a parter, it does not parse it
itself. But, it's needed to look at externaltype and work out what
external remote program to run for LISTCONFIGS.
Note that, while externalUUID is changed to a Maybe UUID, checkExportSupported
used to use NoUUID. The code that now checks for Nothing used to behave
in some undefined way if the external program made requests that
triggered it.
Also, note that in externalSetup, once it generates external,
it parses the RemoteConfig strictly. That generates a
ParsedRemoteConfig, which is thrown away. The reason it's ok to throw
that away, is that, if the strict parse succeeded, the result must be
the same as the earlier, lenient parse.
initremote of an external special remote now runs the program three
times. First for LISTCONFIGS, then EXPORTSUPPORTED, and again
LISTCONFIGS+INITREMOTE. It would not be hard to eliminate at least
one of those, and it should be possible to only run the program once.
2020-01-17 19:30:14 +00:00
|
|
|
|
=<< configParser remote c'
|
2020-01-14 16:35:08 +00:00
|
|
|
|
setGcryptEncryption pc remotename
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
|
|
|
|
{- Run a git fetch and a push to the git repo in order to get
|
|
|
|
|
- its gcrypt-id set up, so that later git annex commands
|
2013-10-01 23:10:45 +00:00
|
|
|
|
- will use the remote as a gcrypt remote. The fetch is
|
2013-09-07 22:38:00 +00:00
|
|
|
|
- needed if the repo already exists; the push is needed
|
|
|
|
|
- if the repo has not yet been initialized by gcrypt. -}
|
|
|
|
|
void $ inRepo $ Git.Command.runBool
|
|
|
|
|
[ Param "fetch"
|
|
|
|
|
, Param remotename
|
|
|
|
|
]
|
|
|
|
|
void $ inRepo $ Git.Command.runBool
|
|
|
|
|
[ Param "push"
|
|
|
|
|
, Param remotename
|
2014-02-19 05:09:17 +00:00
|
|
|
|
, Param $ Git.fromRef Annex.Branch.fullname
|
2013-09-07 22:38:00 +00:00
|
|
|
|
]
|
|
|
|
|
g <- inRepo Git.Config.reRead
|
|
|
|
|
case Git.GCrypt.remoteRepoId g (Just remotename) of
|
2016-11-16 01:29:54 +00:00
|
|
|
|
Nothing -> giveup "unable to determine gcrypt-id of remote"
|
2013-09-12 19:54:35 +00:00
|
|
|
|
Just gcryptid -> do
|
2023-10-26 17:12:57 +00:00
|
|
|
|
let u = genUUIDInNameSpace gCryptNameSpace (encodeBS gcryptid)
|
2013-09-26 03:19:01 +00:00
|
|
|
|
if Just u == mu || isNothing mu
|
2013-09-08 19:19:14 +00:00
|
|
|
|
then do
|
2023-03-23 19:19:04 +00:00
|
|
|
|
method <- setupRepo gcryptid =<< inRepo (Git.Construct.fromRemoteLocation gitrepo False)
|
2018-03-27 16:41:57 +00:00
|
|
|
|
gitConfigSpecialRemote u c' [("gcrypt", fromAccessMethod method)]
|
2013-09-08 19:19:14 +00:00
|
|
|
|
return (c', u)
|
2016-11-16 01:29:54 +00:00
|
|
|
|
else giveup $ "uuid mismatch; expected " ++ show mu ++ " but remote gitrepo has " ++ show u ++ " (" ++ show gcryptid ++ ")"
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
2013-09-24 21:25:47 +00:00
|
|
|
|
{- Sets up the gcrypt repository. The repository is either a local
|
2021-03-09 19:58:09 +00:00
|
|
|
|
- repo, or it is accessed via rsync over ssh (without using
|
|
|
|
|
- git-annex-shell), or it is accessed over ssh and git-annex-shell
|
|
|
|
|
- is available to manage it.
|
2013-09-24 21:25:47 +00:00
|
|
|
|
-
|
2013-10-01 19:16:20 +00:00
|
|
|
|
- The GCryptID is recorded in the repository's git config for later use.
|
|
|
|
|
- Also, if the git config has receive.denyNonFastForwards set, disable
|
|
|
|
|
- it; gcrypt relies on being able to fast-forward branches.
|
2013-09-24 21:25:47 +00:00
|
|
|
|
-}
|
|
|
|
|
setupRepo :: Git.GCrypt.GCryptId -> Git.Repo -> Annex AccessMethod
|
|
|
|
|
setupRepo gcryptid r
|
2013-09-24 21:51:12 +00:00
|
|
|
|
| Git.repoIsUrl r = do
|
2017-08-17 16:26:14 +00:00
|
|
|
|
dummycfg <- liftIO dummyRemoteGitConfig
|
2021-03-09 19:58:09 +00:00
|
|
|
|
let (_, _, accessmethod) = rsyncTransport r dummycfg
|
2013-09-24 21:51:12 +00:00
|
|
|
|
case accessmethod of
|
2021-03-09 19:58:09 +00:00
|
|
|
|
AccessRsyncOverSsh -> rsyncsetup
|
|
|
|
|
AccessGitAnnexShell -> ifM gitannexshellsetup
|
|
|
|
|
( return AccessGitAnnexShell
|
2013-10-01 21:20:51 +00:00
|
|
|
|
, rsyncsetup
|
2013-09-24 21:51:12 +00:00
|
|
|
|
)
|
2013-09-24 21:25:47 +00:00
|
|
|
|
| Git.repoIsLocalUnknown r = localsetup =<< liftIO (Git.Config.read r)
|
|
|
|
|
| otherwise = localsetup r
|
|
|
|
|
where
|
|
|
|
|
localsetup r' = do
|
2019-12-02 14:57:09 +00:00
|
|
|
|
let setconfig k v = liftIO $ Git.Command.run [Param "config", Param (fromConfigKey k), Param v] r'
|
2013-10-01 19:16:20 +00:00
|
|
|
|
setconfig coreGCryptId gcryptid
|
|
|
|
|
setconfig denyNonFastForwards (Git.Config.boolConfig False)
|
2021-03-09 19:58:09 +00:00
|
|
|
|
return AccessRsyncOverSsh
|
2013-09-24 21:25:47 +00:00
|
|
|
|
|
2013-10-01 19:16:20 +00:00
|
|
|
|
{- As well as modifying the remote's git config,
|
|
|
|
|
- create the objectDir on the remote,
|
2021-03-09 19:58:09 +00:00
|
|
|
|
- which is needed for rsync of objects to it to work.
|
2013-09-24 21:25:47 +00:00
|
|
|
|
-}
|
2014-10-09 18:53:13 +00:00
|
|
|
|
rsyncsetup = Remote.Rsync.withRsyncScratchDir $ \tmp -> do
|
2022-06-22 20:08:49 +00:00
|
|
|
|
createAnnexDirectory (toRawFilePath tmp P.</> objectDir)
|
2017-08-17 16:26:14 +00:00
|
|
|
|
dummycfg <- liftIO dummyRemoteGitConfig
|
2021-03-09 19:58:09 +00:00
|
|
|
|
let (rsynctransport, rsyncurl, _) = rsyncTransport r dummycfg
|
2013-09-24 21:25:47 +00:00
|
|
|
|
let tmpconfig = tmp </> "config"
|
2019-06-13 15:09:55 +00:00
|
|
|
|
opts <- rsynctransport
|
|
|
|
|
void $ liftIO $ rsync $ opts ++
|
2013-09-24 21:25:47 +00:00
|
|
|
|
[ Param $ rsyncurl ++ "/config"
|
|
|
|
|
, Param tmpconfig
|
|
|
|
|
]
|
2013-10-01 19:16:20 +00:00
|
|
|
|
liftIO $ do
|
2021-08-11 00:45:02 +00:00
|
|
|
|
void $ Git.Config.changeFile tmpconfig coreGCryptId (encodeBS gcryptid)
|
2019-11-27 20:54:11 +00:00
|
|
|
|
void $ Git.Config.changeFile tmpconfig denyNonFastForwards (Git.Config.boolConfig' False)
|
2019-06-13 15:09:55 +00:00
|
|
|
|
ok <- liftIO $ rsync $ opts ++
|
2015-06-01 17:52:23 +00:00
|
|
|
|
[ Param "--recursive"
|
2013-09-24 21:25:47 +00:00
|
|
|
|
, Param $ tmp ++ "/"
|
2013-09-26 03:19:01 +00:00
|
|
|
|
, Param rsyncurl
|
2013-09-24 21:25:47 +00:00
|
|
|
|
]
|
|
|
|
|
unless ok $
|
2016-11-16 01:29:54 +00:00
|
|
|
|
giveup "Failed to connect to remote to set it up."
|
2021-03-09 19:58:09 +00:00
|
|
|
|
return AccessRsyncOverSsh
|
2013-09-24 21:25:47 +00:00
|
|
|
|
|
2013-10-01 21:20:51 +00:00
|
|
|
|
{- Ask git-annex-shell to configure the repository as a gcrypt
|
|
|
|
|
- repository. May fail if it is too old. -}
|
2017-02-15 19:08:46 +00:00
|
|
|
|
gitannexshellsetup = Ssh.onRemote NoConsumeStdin r
|
2019-11-12 14:07:27 +00:00
|
|
|
|
(\f p -> liftIO (boolSystem f p), return False)
|
2013-10-01 21:20:51 +00:00
|
|
|
|
"gcryptsetup" [ Param gcryptid ] []
|
2013-09-24 21:51:12 +00:00
|
|
|
|
|
2019-12-02 14:57:09 +00:00
|
|
|
|
denyNonFastForwards = ConfigKey "receive.denyNonFastForwards"
|
2013-10-01 19:16:20 +00:00
|
|
|
|
|
2015-03-16 19:28:29 +00:00
|
|
|
|
accessShell :: Remote -> Bool
|
|
|
|
|
accessShell = accessShellConfig . gitconfig
|
|
|
|
|
|
|
|
|
|
accessShellConfig :: RemoteGitConfig -> Bool
|
|
|
|
|
accessShellConfig c = case method of
|
2021-03-09 19:58:09 +00:00
|
|
|
|
AccessGitAnnexShell -> True
|
2014-08-03 21:31:10 +00:00
|
|
|
|
_ -> False
|
2013-09-24 21:25:47 +00:00
|
|
|
|
where
|
2015-03-16 19:28:29 +00:00
|
|
|
|
method = toAccessMethod $ fromMaybe "" $ remoteAnnexGCrypt c
|
2013-09-24 21:25:47 +00:00
|
|
|
|
|
2014-08-03 21:31:10 +00:00
|
|
|
|
shellOrRsync :: Remote -> Annex a -> Annex a -> Annex a
|
|
|
|
|
shellOrRsync r ashell arsync
|
2015-03-16 19:28:29 +00:00
|
|
|
|
| accessShell r = ashell
|
2014-08-03 21:31:10 +00:00
|
|
|
|
| otherwise = arsync
|
|
|
|
|
|
2013-09-12 19:54:35 +00:00
|
|
|
|
{- Configure gcrypt to use the same list of keyids that
|
2013-09-17 20:06:29 +00:00
|
|
|
|
- were passed to initremote as its participants.
|
|
|
|
|
- Also, configure it to use a signing key that is in the list of
|
|
|
|
|
- participants, which gcrypt requires is the case, and may not be
|
|
|
|
|
- depending on system configuration.
|
|
|
|
|
-
|
2014-07-15 21:33:14 +00:00
|
|
|
|
- (For shared encryption, gcrypt's default behavior is used.)
|
|
|
|
|
-
|
2022-08-19 21:45:04 +00:00
|
|
|
|
- Also, sets gcrypt-publish-participants to avoid unnecessary gpg
|
2014-07-15 21:33:14 +00:00
|
|
|
|
- passphrase prompts.
|
|
|
|
|
-}
|
2020-01-14 16:35:08 +00:00
|
|
|
|
setGcryptEncryption :: ParsedRemoteConfig -> String -> Annex ()
|
2013-09-12 19:54:35 +00:00
|
|
|
|
setGcryptEncryption c remotename = do
|
2014-07-15 21:33:14 +00:00
|
|
|
|
let participants = remoteconfig Git.GCrypt.remoteParticipantConfigKey
|
2019-08-05 16:41:05 +00:00
|
|
|
|
case extractCipher c of
|
2013-09-12 19:54:35 +00:00
|
|
|
|
Nothing -> noCrypto
|
2019-08-05 16:41:05 +00:00
|
|
|
|
Just cip -> case cipherKeyIds cip of
|
|
|
|
|
Nothing -> noop
|
|
|
|
|
Just (KeyIds { keyIds = ks}) -> do
|
|
|
|
|
setConfig participants (unwords ks)
|
2019-12-02 14:57:09 +00:00
|
|
|
|
let signingkey = Git.GCrypt.remoteSigningKey remotename
|
2019-08-05 16:41:05 +00:00
|
|
|
|
cmd <- gpgCmd <$> Annex.getGitConfig
|
|
|
|
|
skeys <- M.keys <$> liftIO (secretKeys cmd)
|
|
|
|
|
case filter (`elem` ks) skeys of
|
|
|
|
|
[] -> noop
|
|
|
|
|
(k:_) -> setConfig signingkey k
|
2014-07-15 21:33:14 +00:00
|
|
|
|
setConfig (remoteconfig Git.GCrypt.remotePublishParticipantConfigKey)
|
|
|
|
|
(Git.Config.boolConfig True)
|
|
|
|
|
where
|
2019-12-02 14:57:09 +00:00
|
|
|
|
remoteconfig n = n remotename
|
2013-09-12 19:54:35 +00:00
|
|
|
|
|
2021-03-09 19:58:09 +00:00
|
|
|
|
store :: Remote -> Remote.Rsync.RsyncOpts -> AccessMethod -> Storer
|
|
|
|
|
store r rsyncopts accessmethod k s p = do
|
2018-06-04 18:31:55 +00:00
|
|
|
|
repo <- getRepo r
|
2021-03-09 19:58:09 +00:00
|
|
|
|
store' repo r rsyncopts accessmethod k s p
|
2018-06-04 18:31:55 +00:00
|
|
|
|
|
2021-03-09 19:58:09 +00:00
|
|
|
|
store' :: Git.Repo -> Remote -> Remote.Rsync.RsyncOpts -> AccessMethod -> Storer
|
|
|
|
|
store' repo r rsyncopts accessmethod
|
2018-06-04 18:31:55 +00:00
|
|
|
|
| not $ Git.repoIsUrl repo =
|
2020-05-13 18:03:00 +00:00
|
|
|
|
byteStorer $ \k b p -> guardUsable repo (giveup "cannot access remote") $ liftIO $ do
|
2020-10-30 17:00:12 +00:00
|
|
|
|
let tmpdir = Git.repoPath repo P.</> "tmp" P.</> keyFile k
|
2022-08-12 16:45:46 +00:00
|
|
|
|
void $ tryIO $ createDirectoryUnder [Git.repoPath repo] tmpdir
|
2020-10-30 17:00:12 +00:00
|
|
|
|
let tmpf = tmpdir P.</> keyFile k
|
|
|
|
|
meteredWriteFile p (fromRawFilePath tmpf) b
|
|
|
|
|
let destdir = parentDir $ toRawFilePath $ gCryptLocation repo k
|
|
|
|
|
Remote.Directory.finalizeStoreGeneric (Git.repoPath repo) tmpdir destdir
|
2018-06-04 18:31:55 +00:00
|
|
|
|
| Git.repoIsSsh repo = if accessShell r
|
2018-03-12 22:36:07 +00:00
|
|
|
|
then fileStorer $ \k f p -> do
|
|
|
|
|
oh <- mkOutputHandler
|
2020-05-13 18:03:00 +00:00
|
|
|
|
ok <- Ssh.rsyncHelper oh (Just p)
|
remove git-annex-shell compat code
* Removed support for accessing git remotes that use versions of
git-annex older than 6.20180312.
* git-annex-shell: Removed several commands that were only needed to
support git-annex versions older than 6.20180312.
(lockcontent, recvkey, sendkey, transferinfo, commit)
The P2P protocol was added in that version, and used ever since, so
this code was only needed for interop with older versions.
"git-annex-shell commit" is used by newer git-annex versions, though
unnecessarily so, because the p2pstdio command makes a single commit at
shutdown. Luckily, it was run with stderr and stdout sent to /dev/null,
and non-zero exit status or other exceptions are caught and ignored. So,
that was able to be removed from git-annex-shell too.
git-annex-shell inannex, recvkey, sendkey, and dropkey are still used by
gcrypt special remotes accessed over ssh, so those had to be kept.
It would probably be possible to convert that to using the P2P protocol,
but it would be another multi-year transition.
Some git-annex-shell fields were able to be removed. I hoped to remove
all of them, and the very concept of them, but unfortunately autoinit
is used by git-annex sync, and gcrypt uses remoteuuid.
The main win here is really in Remote.Git, removing piles of hairy fallback
code.
Sponsored-by: Luke Shumaker
2021-10-11 19:35:54 +00:00
|
|
|
|
=<< Ssh.rsyncParamsRemote r Upload k f
|
2020-05-13 18:03:00 +00:00
|
|
|
|
unless ok $
|
|
|
|
|
giveup "rsync failed"
|
2021-03-09 19:58:09 +00:00
|
|
|
|
else storersync
|
|
|
|
|
| accessmethod == AccessRsyncOverSsh = storersync
|
2013-09-08 17:00:48 +00:00
|
|
|
|
| otherwise = unsupportedUrl
|
2021-03-09 19:58:09 +00:00
|
|
|
|
where
|
|
|
|
|
storersync = fileStorer $ Remote.Rsync.store rsyncopts
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
2021-03-09 19:58:09 +00:00
|
|
|
|
retrieve :: Remote -> Remote.Rsync.RsyncOpts -> AccessMethod -> Retriever
|
incremental hashing for fileRetriever
It uses tailVerify to hash the file while it's being written.
This is able to sometimes avoid a separate checksum step. Although
if the file gets written quickly enough, tailVerify may not see it
get created before the write finishes, and the checksum still happens.
Testing with the directory special remote, incremental checksumming did
not happen. But then I disabled the copy CoW probing, and it did work.
What's going on with that is the CoW probe creates an empty file on
failure, then deletes it, and then the file is created again. tailVerify
will open the first, empty file, and so fails to read the content that
gets written to the file that replaces it.
The directory special remote really ought to be able to avoid needing to
use tailVerify, and while other special remotes could do things that
cause similar problems, they probably don't. And if they do, it just
means the checksum doesn't get done incrementally.
Sponsored-by: Dartmouth College's DANDI project
2021-08-13 19:43:29 +00:00
|
|
|
|
retrieve r rsyncopts accessmethod k p miv sink = do
|
2018-06-04 18:31:55 +00:00
|
|
|
|
repo <- getRepo r
|
incremental hashing for fileRetriever
It uses tailVerify to hash the file while it's being written.
This is able to sometimes avoid a separate checksum step. Although
if the file gets written quickly enough, tailVerify may not see it
get created before the write finishes, and the checksum still happens.
Testing with the directory special remote, incremental checksumming did
not happen. But then I disabled the copy CoW probing, and it did work.
What's going on with that is the CoW probe creates an empty file on
failure, then deletes it, and then the file is created again. tailVerify
will open the first, empty file, and so fails to read the content that
gets written to the file that replaces it.
The directory special remote really ought to be able to avoid needing to
use tailVerify, and while other special remotes could do things that
cause similar problems, they probably don't. And if they do, it just
means the checksum doesn't get done incrementally.
Sponsored-by: Dartmouth College's DANDI project
2021-08-13 19:43:29 +00:00
|
|
|
|
retrieve' repo r rsyncopts accessmethod k p miv sink
|
2018-06-04 18:31:55 +00:00
|
|
|
|
|
2021-03-09 19:58:09 +00:00
|
|
|
|
retrieve' :: Git.Repo -> Remote -> Remote.Rsync.RsyncOpts -> AccessMethod -> Retriever
|
|
|
|
|
retrieve' repo r rsyncopts accessmethod
|
2018-06-04 18:31:55 +00:00
|
|
|
|
| not $ Git.repoIsUrl repo = byteRetriever $ \k sink ->
|
2020-05-13 21:05:56 +00:00
|
|
|
|
guardUsable repo (giveup "cannot access remote") $
|
2018-06-04 18:31:55 +00:00
|
|
|
|
sink =<< liftIO (L.readFile $ gCryptLocation repo k)
|
|
|
|
|
| Git.repoIsSsh repo = if accessShell r
|
2017-03-10 17:12:24 +00:00
|
|
|
|
then fileRetriever $ \f k p -> do
|
remove git-annex-shell compat code
* Removed support for accessing git remotes that use versions of
git-annex older than 6.20180312.
* git-annex-shell: Removed several commands that were only needed to
support git-annex versions older than 6.20180312.
(lockcontent, recvkey, sendkey, transferinfo, commit)
The P2P protocol was added in that version, and used ever since, so
this code was only needed for interop with older versions.
"git-annex-shell commit" is used by newer git-annex versions, though
unnecessarily so, because the p2pstdio command makes a single commit at
shutdown. Luckily, it was run with stderr and stdout sent to /dev/null,
and non-zero exit status or other exceptions are caught and ignored. So,
that was able to be removed from git-annex-shell too.
git-annex-shell inannex, recvkey, sendkey, and dropkey are still used by
gcrypt special remotes accessed over ssh, so those had to be kept.
It would probably be possible to convert that to using the P2P protocol,
but it would be another multi-year transition.
Some git-annex-shell fields were able to be removed. I hoped to remove
all of them, and the very concept of them, but unfortunately autoinit
is used by git-annex sync, and gcrypt uses remoteuuid.
The main win here is really in Remote.Git, removing piles of hairy fallback
code.
Sponsored-by: Luke Shumaker
2021-10-11 19:35:54 +00:00
|
|
|
|
ps <- Ssh.rsyncParamsRemote r Download k
|
2021-08-16 20:22:00 +00:00
|
|
|
|
(fromRawFilePath f)
|
2018-03-12 22:36:07 +00:00
|
|
|
|
oh <- mkOutputHandler
|
|
|
|
|
unlessM (Ssh.rsyncHelper oh (Just p) ps) $
|
2016-11-16 01:29:54 +00:00
|
|
|
|
giveup "rsync failed"
|
2021-03-09 19:58:09 +00:00
|
|
|
|
else retrieversync
|
|
|
|
|
| accessmethod == AccessRsyncOverSsh = retrieversync
|
2013-09-08 17:00:48 +00:00
|
|
|
|
| otherwise = unsupportedUrl
|
|
|
|
|
where
|
2021-03-09 19:58:09 +00:00
|
|
|
|
retrieversync = fileRetriever $ Remote.Rsync.retrieve rsyncopts
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
2021-03-09 19:58:09 +00:00
|
|
|
|
remove :: Remote -> Remote.Rsync.RsyncOpts -> AccessMethod -> Remover
|
toward SafeDropProof expiry checking
Added Maybe POSIXTime to SafeDropProof, which gets set when the proof is
based on a LockedCopy. If there are several LockedCopies, it uses the
closest expiry time. That is not optimal, it may be that the proof
expires based on one LockedCopy but another one has not expired. But
that seems unlikely to really happen, and anyway the user can just
re-run a drop if it fails due to expiry.
Pass the SafeDropProof to removeKey, which is responsible for checking
it for expiry in situations where that could be a problem. Which really
only means in Remote.Git.
Made Remote.Git check expiry when dropping from a local remote.
Checking expiry when dropping from a P2P remote is not yet implemented.
P2P.Protocol.remove has SafeDropProof plumbed through to it for that
purpose.
Fixing the remaining 2 build warnings should complete this work.
Note that the use of a POSIXTime here means that if the clock gets set
forward while git-annex is in the middle of a drop, it may say that
dropping took too long. That seems ok. Less ok is that if the clock gets
turned back a sufficient amount (eg 5 minutes), proof expiry won't be
noticed. It might be better to use the Monotonic clock, but that doesn't
advance when a laptop is suspended, and while there is the linux
Boottime clock, that is not available on other systems. Perhaps a
combination of POSIXTime and the Monotonic clock could detect laptop
suspension and also detect clock being turned back?
There is a potential future flag day where
p2pDefaultLockContentRetentionDuration is not assumed, but is probed
using the P2P protocol, and peers that don't support it can no longer
produce a LockedCopy. Until that happens, when git-annex is
communicating with older peers there is a risk of data loss when
a ssh connection closes during LOCKCONTENT.
2024-07-04 16:23:46 +00:00
|
|
|
|
remove r rsyncopts accessmethod proof k = do
|
2018-06-04 18:31:55 +00:00
|
|
|
|
repo <- getRepo r
|
toward SafeDropProof expiry checking
Added Maybe POSIXTime to SafeDropProof, which gets set when the proof is
based on a LockedCopy. If there are several LockedCopies, it uses the
closest expiry time. That is not optimal, it may be that the proof
expires based on one LockedCopy but another one has not expired. But
that seems unlikely to really happen, and anyway the user can just
re-run a drop if it fails due to expiry.
Pass the SafeDropProof to removeKey, which is responsible for checking
it for expiry in situations where that could be a problem. Which really
only means in Remote.Git.
Made Remote.Git check expiry when dropping from a local remote.
Checking expiry when dropping from a P2P remote is not yet implemented.
P2P.Protocol.remove has SafeDropProof plumbed through to it for that
purpose.
Fixing the remaining 2 build warnings should complete this work.
Note that the use of a POSIXTime here means that if the clock gets set
forward while git-annex is in the middle of a drop, it may say that
dropping took too long. That seems ok. Less ok is that if the clock gets
turned back a sufficient amount (eg 5 minutes), proof expiry won't be
noticed. It might be better to use the Monotonic clock, but that doesn't
advance when a laptop is suspended, and while there is the linux
Boottime clock, that is not available on other systems. Perhaps a
combination of POSIXTime and the Monotonic clock could detect laptop
suspension and also detect clock being turned back?
There is a potential future flag day where
p2pDefaultLockContentRetentionDuration is not assumed, but is probed
using the P2P protocol, and peers that don't support it can no longer
produce a LockedCopy. Until that happens, when git-annex is
communicating with older peers there is a risk of data loss when
a ssh connection closes during LOCKCONTENT.
2024-07-04 16:23:46 +00:00
|
|
|
|
remove' repo r rsyncopts accessmethod proof k
|
2018-06-04 18:31:55 +00:00
|
|
|
|
|
2021-03-09 19:58:09 +00:00
|
|
|
|
remove' :: Git.Repo -> Remote -> Remote.Rsync.RsyncOpts -> AccessMethod -> Remover
|
toward SafeDropProof expiry checking
Added Maybe POSIXTime to SafeDropProof, which gets set when the proof is
based on a LockedCopy. If there are several LockedCopies, it uses the
closest expiry time. That is not optimal, it may be that the proof
expires based on one LockedCopy but another one has not expired. But
that seems unlikely to really happen, and anyway the user can just
re-run a drop if it fails due to expiry.
Pass the SafeDropProof to removeKey, which is responsible for checking
it for expiry in situations where that could be a problem. Which really
only means in Remote.Git.
Made Remote.Git check expiry when dropping from a local remote.
Checking expiry when dropping from a P2P remote is not yet implemented.
P2P.Protocol.remove has SafeDropProof plumbed through to it for that
purpose.
Fixing the remaining 2 build warnings should complete this work.
Note that the use of a POSIXTime here means that if the clock gets set
forward while git-annex is in the middle of a drop, it may say that
dropping took too long. That seems ok. Less ok is that if the clock gets
turned back a sufficient amount (eg 5 minutes), proof expiry won't be
noticed. It might be better to use the Monotonic clock, but that doesn't
advance when a laptop is suspended, and while there is the linux
Boottime clock, that is not available on other systems. Perhaps a
combination of POSIXTime and the Monotonic clock could detect laptop
suspension and also detect clock being turned back?
There is a potential future flag day where
p2pDefaultLockContentRetentionDuration is not assumed, but is probed
using the P2P protocol, and peers that don't support it can no longer
produce a LockedCopy. Until that happens, when git-annex is
communicating with older peers there is a risk of data loss when
a ssh connection closes during LOCKCONTENT.
2024-07-04 16:23:46 +00:00
|
|
|
|
remove' repo r rsyncopts accessmethod proof k
|
2020-05-14 18:08:09 +00:00
|
|
|
|
| not $ Git.repoIsUrl repo = guardUsable repo (giveup "cannot access remote") $
|
2023-07-21 20:04:11 +00:00
|
|
|
|
liftIO $ Remote.Directory.removeDirGeneric True
|
|
|
|
|
(gCryptTopDir repo)
|
2020-10-30 17:00:12 +00:00
|
|
|
|
(fromRawFilePath (parentDir (toRawFilePath (gCryptLocation repo k))))
|
2018-06-04 18:31:55 +00:00
|
|
|
|
| Git.repoIsSsh repo = shellOrRsync r removeshell removersync
|
2021-03-09 19:58:09 +00:00
|
|
|
|
| accessmethod == AccessRsyncOverSsh = removersync
|
2013-09-08 17:00:48 +00:00
|
|
|
|
| otherwise = unsupportedUrl
|
|
|
|
|
where
|
toward SafeDropProof expiry checking
Added Maybe POSIXTime to SafeDropProof, which gets set when the proof is
based on a LockedCopy. If there are several LockedCopies, it uses the
closest expiry time. That is not optimal, it may be that the proof
expires based on one LockedCopy but another one has not expired. But
that seems unlikely to really happen, and anyway the user can just
re-run a drop if it fails due to expiry.
Pass the SafeDropProof to removeKey, which is responsible for checking
it for expiry in situations where that could be a problem. Which really
only means in Remote.Git.
Made Remote.Git check expiry when dropping from a local remote.
Checking expiry when dropping from a P2P remote is not yet implemented.
P2P.Protocol.remove has SafeDropProof plumbed through to it for that
purpose.
Fixing the remaining 2 build warnings should complete this work.
Note that the use of a POSIXTime here means that if the clock gets set
forward while git-annex is in the middle of a drop, it may say that
dropping took too long. That seems ok. Less ok is that if the clock gets
turned back a sufficient amount (eg 5 minutes), proof expiry won't be
noticed. It might be better to use the Monotonic clock, but that doesn't
advance when a laptop is suspended, and while there is the linux
Boottime clock, that is not available on other systems. Perhaps a
combination of POSIXTime and the Monotonic clock could detect laptop
suspension and also detect clock being turned back?
There is a potential future flag day where
p2pDefaultLockContentRetentionDuration is not assumed, but is probed
using the P2P protocol, and peers that don't support it can no longer
produce a LockedCopy. Until that happens, when git-annex is
communicating with older peers there is a risk of data loss when
a ssh connection closes during LOCKCONTENT.
2024-07-04 16:23:46 +00:00
|
|
|
|
removersync = Remote.Rsync.remove rsyncopts proof k
|
|
|
|
|
removeshell = Ssh.dropKey repo proof k
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
2021-03-09 19:58:09 +00:00
|
|
|
|
checkKey :: Remote -> Remote.Rsync.RsyncOpts -> AccessMethod -> CheckPresent
|
|
|
|
|
checkKey r rsyncopts accessmethod k = do
|
2018-06-04 18:31:55 +00:00
|
|
|
|
repo <- getRepo r
|
2021-03-09 19:58:09 +00:00
|
|
|
|
checkKey' repo r rsyncopts accessmethod k
|
2018-06-04 18:31:55 +00:00
|
|
|
|
|
2021-03-09 19:58:09 +00:00
|
|
|
|
checkKey' :: Git.Repo -> Remote -> Remote.Rsync.RsyncOpts -> AccessMethod -> CheckPresent
|
|
|
|
|
checkKey' repo r rsyncopts accessmethod k
|
2018-06-04 18:31:55 +00:00
|
|
|
|
| not $ Git.repoIsUrl repo =
|
|
|
|
|
guardUsable repo (cantCheck repo) $
|
|
|
|
|
liftIO $ doesFileExist (gCryptLocation repo k)
|
|
|
|
|
| Git.repoIsSsh repo = shellOrRsync r checkshell checkrsync
|
2021-03-09 19:58:09 +00:00
|
|
|
|
| accessmethod == AccessRsyncOverSsh = checkrsync
|
2013-09-08 17:00:48 +00:00
|
|
|
|
| otherwise = unsupportedUrl
|
2013-09-07 22:38:00 +00:00
|
|
|
|
where
|
remove "checking remotename" message
This fixes fsck of a remote that uses chunking displaying
(checking remotename) (checking remotename)" for every chunk.
Also, some remotes displayed the message, and others did not, with no
consistency. It was originally displayed only when accessing remotes
that were expensive or might involve a password prompt, I think, but
nothing in the API said when to do it so it became an inconsistent mess.
Originally I thought fsck should always display it. But it only displays
in fsck --from remote, so the user knows the remote is being accessed,
so there is no reason to tell them it's accessing it over and over.
It was also possible for git-annex move to sometimes display it twice,
due to checking if content is present twice. But, the user of move
specifies --from/--to, so it does not need to display when it's
accessing the remote, as the user expects it to access the remote.
git-annex get might display it, but only if the remote also supports
hasKeyCheap, which is really only local git remotes, which didn't
display it always; and in any case nothing displayed it before hasKeyCheap,
which is checked first, so I don't think this needs to display it ever.
mirror is like move. And that's all the main places it would have been
displayed.
This commit was sponsored by Jochen Bartl on Patreon.
2021-04-27 16:50:45 +00:00
|
|
|
|
checkrsync = Remote.Rsync.checkKey rsyncopts k
|
2018-06-04 18:31:55 +00:00
|
|
|
|
checkshell = Ssh.inAnnex repo k
|
2013-09-07 22:38:00 +00:00
|
|
|
|
|
2023-07-21 20:04:11 +00:00
|
|
|
|
gCryptTopDir :: Git.Repo -> FilePath
|
|
|
|
|
gCryptTopDir repo = Git.repoLocation repo </> fromRawFilePath objectDir
|
|
|
|
|
|
2013-09-24 21:25:47 +00:00
|
|
|
|
{- Annexed objects are hashed using lower-case directories for max
|
2013-09-08 18:54:28 +00:00
|
|
|
|
- portability. -}
|
2018-06-04 18:31:55 +00:00
|
|
|
|
gCryptLocation :: Git.Repo -> Key -> FilePath
|
2023-07-21 20:04:11 +00:00
|
|
|
|
gCryptLocation repo key = gCryptTopDir repo
|
2019-12-11 18:12:22 +00:00
|
|
|
|
</> fromRawFilePath (keyPath key (hashDirLower def))
|
2013-09-24 21:25:47 +00:00
|
|
|
|
|
2021-03-09 19:58:09 +00:00
|
|
|
|
data AccessMethod = AccessRsyncOverSsh | AccessGitAnnexShell
|
|
|
|
|
deriving (Eq)
|
2013-09-24 21:25:47 +00:00
|
|
|
|
|
|
|
|
|
fromAccessMethod :: AccessMethod -> String
|
2021-03-09 19:58:09 +00:00
|
|
|
|
fromAccessMethod AccessGitAnnexShell = "shell"
|
|
|
|
|
fromAccessMethod AccessRsyncOverSsh = "true"
|
2013-09-24 21:25:47 +00:00
|
|
|
|
|
|
|
|
|
toAccessMethod :: String -> AccessMethod
|
2021-03-09 19:58:09 +00:00
|
|
|
|
toAccessMethod "shell" = AccessGitAnnexShell
|
|
|
|
|
toAccessMethod _ = AccessRsyncOverSsh
|
2013-09-24 21:25:47 +00:00
|
|
|
|
|
2013-09-27 20:21:56 +00:00
|
|
|
|
getGCryptUUID :: Bool -> Git.Repo -> Annex (Maybe UUID)
|
2017-08-17 16:26:14 +00:00
|
|
|
|
getGCryptUUID fast r = do
|
|
|
|
|
dummycfg <- liftIO dummyRemoteGitConfig
|
2023-10-26 17:12:57 +00:00
|
|
|
|
(genUUIDInNameSpace gCryptNameSpace . encodeBS <$>) . fst
|
2017-08-17 16:26:14 +00:00
|
|
|
|
<$> getGCryptId fast r dummycfg
|
2013-09-27 20:21:56 +00:00
|
|
|
|
|
2019-12-02 14:57:09 +00:00
|
|
|
|
coreGCryptId :: ConfigKey
|
2013-09-27 20:21:56 +00:00
|
|
|
|
coreGCryptId = "core.gcrypt-id"
|
|
|
|
|
|
|
|
|
|
{- gcrypt repos set up by git-annex as special remotes have a
|
|
|
|
|
- core.gcrypt-id setting in their config, which can be mapped back to
|
|
|
|
|
- the remote's UUID.
|
|
|
|
|
-
|
|
|
|
|
- In fast mode, only checks local repos. To check a remote repo,
|
|
|
|
|
- tries git-annex-shell and direct rsync of the git config file.
|
|
|
|
|
-
|
|
|
|
|
- (Also returns a version of input repo with its config read.) -}
|
2015-02-12 19:44:10 +00:00
|
|
|
|
getGCryptId :: Bool -> Git.Repo -> RemoteGitConfig -> Annex (Maybe Git.GCrypt.GCryptId, Git.Repo)
|
|
|
|
|
getGCryptId fast r gc
|
2013-10-01 18:38:20 +00:00
|
|
|
|
| Git.repoIsLocal r || Git.repoIsLocalUnknown r = extract <$>
|
2013-09-27 23:52:36 +00:00
|
|
|
|
liftIO (catchMaybeIO $ Git.Config.read r)
|
2020-01-22 17:20:06 +00:00
|
|
|
|
| not fast = extract . liftM fst3 <$> getM (eitherToMaybe <$>)
|
2023-04-10 17:38:14 +00:00
|
|
|
|
[ Ssh.onRemote NoConsumeStdin r (\f p -> liftIO (Git.Config.fromPipe r f p Git.Config.ConfigList), return (Left $ giveup "configlist failed")) "configlist" [] []
|
2015-02-12 19:44:10 +00:00
|
|
|
|
, getConfigViaRsync r gc
|
2013-09-27 23:52:36 +00:00
|
|
|
|
]
|
2013-09-27 20:21:56 +00:00
|
|
|
|
| otherwise = return (Nothing, r)
|
|
|
|
|
where
|
2013-09-27 23:52:36 +00:00
|
|
|
|
extract Nothing = (Nothing, r)
|
2019-12-05 18:36:43 +00:00
|
|
|
|
extract (Just r') = (fromConfigValue <$> Git.Config.getMaybe coreGCryptId r', r')
|
2013-09-27 23:52:36 +00:00
|
|
|
|
|
2020-11-19 19:34:26 +00:00
|
|
|
|
getConfigViaRsync :: Git.Repo -> RemoteGitConfig -> Annex (Either SomeException (Git.Repo, S.ByteString, String))
|
2015-02-12 19:44:10 +00:00
|
|
|
|
getConfigViaRsync r gc = do
|
2021-03-09 19:58:09 +00:00
|
|
|
|
let (rsynctransport, rsyncurl, _) = rsyncTransport r gc
|
2019-06-13 15:09:55 +00:00
|
|
|
|
opts <- rsynctransport
|
2013-09-27 23:52:36 +00:00
|
|
|
|
liftIO $ do
|
|
|
|
|
withTmpFile "tmpconfig" $ \tmpconfig _ -> do
|
2019-06-13 15:09:55 +00:00
|
|
|
|
void $ rsync $ opts ++
|
2013-09-27 23:52:36 +00:00
|
|
|
|
[ Param $ rsyncurl ++ "/config"
|
|
|
|
|
, Param tmpconfig
|
|
|
|
|
]
|
|
|
|
|
Git.Config.fromFile r tmpconfig
|