2011-03-27 19:56:43 +00:00
{- Standard git remotes.
-
2020-04-17 21:09:29 +00:00
- Copyright 2011 - 2020 Joey Hess < id @ joeyh . name >
2011-03-27 19:56:43 +00:00
-
2018-03-09 05:03:11 +00:00
- Licensed under the GNU AGPL version 3 or higher .
2011-03-27 19:56:43 +00:00
- }
2013-05-11 20:03:00 +00:00
{- # LANGUAGE CPP # -}
2019-11-27 20:54:11 +00:00
{- # LANGUAGE OverloadedStrings # -}
2013-05-11 20:03:00 +00:00
2012-07-22 17:48:50 +00:00
module Remote.Git (
remote ,
configRead ,
repoAvail ,
2020-04-17 21:09:29 +00:00
onLocalRepo ,
2012-07-22 17:48:50 +00:00
) where
2011-03-27 19:56:43 +00:00
2016-01-20 20:36:33 +00:00
import Annex.Common
2013-05-14 17:53:29 +00:00
import Annex.Ssh
2011-06-02 01:56:04 +00:00
import Types.Remote
2013-01-01 17:52:47 +00:00
import Types.GitConfig
2011-06-30 17:16:57 +00:00
import qualified Git
2011-12-13 19:05:07 +00:00
import qualified Git.Config
import qualified Git.Construct
2012-10-12 17:45:14 +00:00
import qualified Git.Command
2013-09-05 20:02:39 +00:00
import qualified Git.GCrypt
2014-05-22 17:42:17 +00:00
import qualified Git.Types as Git
2011-03-27 19:56:43 +00:00
import qualified Annex
2012-01-28 19:54:42 +00:00
import Logs.Presence
2014-03-22 14:42:38 +00:00
import Annex.Transfer
2011-10-15 21:47:03 +00:00
import Annex.UUID
2011-10-04 04:40:47 +00:00
import qualified Annex.Content
2011-12-12 21:38:46 +00:00
import qualified Annex.BranchState
2012-02-25 22:02:49 +00:00
import qualified Annex.Branch
2013-09-28 18:35:21 +00:00
import qualified Annex.Url as Url
2019-10-10 17:08:17 +00:00
import qualified Annex.SpecialRemote.Config as SpecialRemote
2013-05-12 23:19:28 +00:00
import Utility.Tmp
2011-03-28 01:43:25 +00:00
import Config
2013-03-13 20:16:01 +00:00
import Config.Cost
2020-01-14 16:35:08 +00:00
import Annex.SpecialRemote.Config
2017-08-17 16:26:14 +00:00
import Config.DynamicConfig
2014-01-26 20:36:31 +00:00
import Annex.Init
2014-03-13 23:06:26 +00:00
import Types.CleanupActions
2014-01-26 20:32:55 +00:00
import qualified CmdLine.GitAnnexShell.Fields as Fields
2012-12-12 23:20:38 +00:00
import Logs.Location
2013-03-28 21:03:04 +00:00
import Utility.Metered
2013-08-04 17:12:18 +00:00
import Utility.CopyFile
2013-10-14 19:05:10 +00:00
import Utility.Env
2013-10-11 20:03:18 +00:00
import Utility.Batch
2016-12-09 17:34:00 +00:00
import Utility.SimpleProtocol
2013-09-07 22:38:00 +00:00
import Remote.Helper.Git
2013-09-24 17:37:41 +00:00
import Remote.Helper.Messages
2019-02-20 19:55:01 +00:00
import Remote.Helper.ExportImport
2013-09-24 17:37:41 +00:00
import qualified Remote.Helper.Ssh as Ssh
2013-09-07 22:38:00 +00:00
import qualified Remote.GCrypt
2019-08-01 19:11:45 +00:00
import qualified Remote.GitLFS
2016-12-06 16:19:47 +00:00
import qualified Remote.P2P
2018-03-08 20:21:16 +00:00
import qualified Remote.Helper.P2P as P2PHelper
import P2P.Address
2015-02-28 21:23:13 +00:00
import Annex.Path
2014-05-22 17:42:17 +00:00
import Creds
2015-10-09 17:07:03 +00:00
import Types.NumCopies
2020-01-10 18:10:20 +00:00
import Types.ProposedAccepted
2017-09-30 02:36:08 +00:00
import Annex.Action
2018-03-12 21:56:39 +00:00
import Messages.Progress
2011-03-27 19:56:43 +00:00
2019-09-12 18:11:19 +00:00
# ifndef mingw32_HOST_OS
2020-01-01 18:40:00 +00:00
import qualified Utility.RawFilePath as R
2019-09-12 18:11:19 +00:00
import Utility.FileMode
# endif
2012-09-20 17:35:53 +00:00
import Control.Concurrent
2012-10-05 21:03:58 +00:00
import Control.Concurrent.MSampleVar
2013-09-07 22:38:00 +00:00
import qualified Data.Map as M
2019-11-27 20:54:11 +00:00
import qualified Data.ByteString as S
2014-08-10 18:52:58 +00:00
import Network.URI
2012-09-20 17:35:53 +00:00
2011-12-31 08:11:39 +00:00
remote :: RemoteType
2017-09-07 17:45:31 +00:00
remote = RemoteType
{ typename = " git "
, enumerate = list
, generate = gen
2020-01-14 17:18:15 +00:00
, configParser = mkRemoteConfigParser
2020-01-20 19:20:04 +00:00
[ optionalStringParser locationField
( FieldDesc " url of git remote to remember with special remote " )
]
2017-09-07 17:45:31 +00:00
, setup = gitSetup
, exportSupported = exportUnsupported
2019-02-20 19:55:01 +00:00
, importSupported = importUnsupported
2017-09-07 17:45:31 +00:00
}
2011-03-29 03:51:07 +00:00
2020-01-14 16:35:08 +00:00
locationField :: RemoteConfigField
locationField = Accepted " location "
2015-08-05 17:49:54 +00:00
list :: Bool -> Annex [ Git . Repo ]
list autoinit = do
2011-12-14 19:30:14 +00:00
c <- fromRepo Git . config
2018-01-09 19:36:56 +00:00
rs <- mapM ( tweakurl c ) =<< Annex . getGitRemotes
2015-08-05 17:49:54 +00:00
mapM ( configRead autoinit ) rs
2012-10-29 01:27:15 +00:00
where
2020-02-19 17:45:11 +00:00
annexurl r = remoteConfig r " annexurl "
2012-10-29 01:27:15 +00:00
tweakurl c r = do
let n = fromJust $ Git . remoteName r
2020-02-19 17:45:11 +00:00
case M . lookup ( annexurl r ) c of
2012-10-29 01:27:15 +00:00
Nothing -> return r
Just url -> inRepo $ \ g ->
Git . Construct . remoteNamed n $
2019-12-05 18:36:43 +00:00
Git . Construct . fromRemoteLocation ( Git . fromConfigValue url ) g
2012-07-22 17:48:50 +00:00
2014-05-22 17:42:17 +00:00
{- Git remotes are normally set up using standard git command, not
- git - annex initremote and enableremote .
-
- For initremote , the git remote must already be set up , and have a uuid .
- Initremote simply remembers its location .
-
- enableremote simply sets up a git remote using the stored location .
- No attempt is made to make the remote be accessible via ssh key setup ,
- etc .
- }
2017-02-07 18:35:58 +00:00
gitSetup :: SetupStage -> Maybe UUID -> Maybe CredPair -> RemoteConfig -> RemoteGitConfig -> Annex ( RemoteConfig , UUID )
gitSetup Init mu _ c _ = do
2016-11-16 01:29:54 +00:00
let location = fromMaybe ( giveup " Specify location=url " ) $
2020-01-10 18:10:20 +00:00
Url . parseURIRelaxed . fromProposedAccepted
2020-01-14 16:35:08 +00:00
=<< M . lookup locationField c
2018-01-09 19:36:56 +00:00
rs <- Annex . getGitRemotes
u <- case filter ( \ r -> Git . location r == Git . Url location ) rs of
2014-05-22 17:42:17 +00:00
[ r ] -> getRepoUUID r
2016-11-16 01:29:54 +00:00
[] -> giveup " could not find existing git remote with specified location "
_ -> giveup " found multiple git remotes with specified location "
2017-02-07 18:35:58 +00:00
if isNothing mu || mu == Just u
then return ( c , u )
else error " git remote did not have specified uuid "
2017-09-04 16:40:33 +00:00
gitSetup ( Enable _ ) ( Just u ) _ c _ = do
2014-05-22 17:42:17 +00:00
inRepo $ Git . Command . run
[ Param " remote "
, Param " add "
2019-10-10 17:08:17 +00:00
, Param $ fromMaybe ( giveup " no name " ) ( SpecialRemote . lookupName c )
2020-01-14 16:35:08 +00:00
, Param $ maybe ( giveup " no location " ) fromProposedAccepted ( M . lookup locationField c )
2014-05-22 17:42:17 +00:00
]
return ( c , u )
2017-09-04 16:40:33 +00:00
gitSetup ( Enable _ ) Nothing _ _ _ = error " unable to enable git remote with no specified uuid "
2014-05-22 17:42:17 +00:00
2012-07-22 17:48:50 +00:00
{- It's assumed to be cheap to read the config of non - URL remotes, so this is
2018-01-10 18:21:18 +00:00
- done each time git - annex is run in a way that uses remotes , unless
- annex - checkuuid is false .
2012-07-22 17:48:50 +00:00
-
- Conversely , the config of an URL remote is only read when there is no
- cached UUID value . - }
2015-08-05 17:49:54 +00:00
configRead :: Bool -> Git . Repo -> Annex Git . Repo
configRead autoinit r = do
2014-05-16 20:08:20 +00:00
gc <- Annex . getRemoteGitConfig r
avoid beware of the leopard situation
* Display a warning message when a remote uses a protocol, such as
git://, that git-annex does not support. Silently skipping such a
remote was confusing behavior.
It sets annex-ignore, so the warning is only displayed once.
* Also display a warning message when a remote, without a known uuid,
is located in a directory that does not currently exist, to avoid
silently skipping such a remote.
This is a bit more debatable, since git-annex get will say,
try making repository available. And since it does not set annex-ignore,
the warning will be displayed repeatedly. It's also an extreme edge case,
I don't think I've ever seen it happen in real life.
2020-05-04 17:01:11 +00:00
hasuuid <- ( /= NoUUID ) <$> getRepoUUID r
2017-08-17 16:26:14 +00:00
annexignore <- liftIO $ getDynamicConfig ( remoteAnnexIgnore gc )
avoid beware of the leopard situation
* Display a warning message when a remote uses a protocol, such as
git://, that git-annex does not support. Silently skipping such a
remote was confusing behavior.
It sets annex-ignore, so the warning is only displayed once.
* Also display a warning message when a remote, without a known uuid,
is located in a directory that does not currently exist, to avoid
silently skipping such a remote.
This is a bit more debatable, since git-annex get will say,
try making repository available. And since it does not set annex-ignore,
the warning will be displayed repeatedly. It's also an extreme edge case,
I don't think I've ever seen it happen in real life.
2020-05-04 17:01:11 +00:00
case ( repoCheap r , annexignore , hasuuid ) of
2013-01-01 17:52:47 +00:00
( _ , True , _ ) -> return r
2018-01-10 18:21:18 +00:00
( True , _ , _ )
avoid beware of the leopard situation
* Display a warning message when a remote uses a protocol, such as
git://, that git-annex does not support. Silently skipping such a
remote was confusing behavior.
It sets annex-ignore, so the warning is only displayed once.
* Also display a warning message when a remote, without a known uuid,
is located in a directory that does not currently exist, to avoid
silently skipping such a remote.
This is a bit more debatable, since git-annex get will say,
try making repository available. And since it does not set annex-ignore,
the warning will be displayed repeatedly. It's also an extreme edge case,
I don't think I've ever seen it happen in real life.
2020-05-04 17:01:11 +00:00
| remoteAnnexCheckUUID gc -> tryGitConfigRead autoinit r hasuuid
2018-01-10 18:21:18 +00:00
| otherwise -> return r
avoid beware of the leopard situation
* Display a warning message when a remote uses a protocol, such as
git://, that git-annex does not support. Silently skipping such a
remote was confusing behavior.
It sets annex-ignore, so the warning is only displayed once.
* Also display a warning message when a remote, without a known uuid,
is located in a directory that does not currently exist, to avoid
silently skipping such a remote.
This is a bit more debatable, since git-annex get will say,
try making repository available. And since it does not set annex-ignore,
the warning will be displayed repeatedly. It's also an extreme edge case,
I don't think I've ever seen it happen in real life.
2020-05-04 17:01:11 +00:00
( False , _ , False ) -> configSpecialGitRemotes r >>= \ case
Nothing -> tryGitConfigRead autoinit r False
2019-11-18 20:09:09 +00:00
Just r' -> return r'
2012-07-22 17:48:50 +00:00
_ -> return r
2011-03-28 01:43:25 +00:00
fix encryption of content to gcrypt and git-lfs
Fix serious regression in gcrypt and encrypted git-lfs remotes.
Since version 7.20200202.7, git-annex incorrectly stored content
on those remotes without encrypting it.
Problem was, Remote.Git enumerates all git remotes, including git-lfs
and gcrypt. It then dispatches to those. So, Remote.List used the
RemoteConfigParser from Remote.Git, instead of from git-lfs or gcrypt,
and that parser does not know about encryption fields, so did not
include them in the ParsedRemoteConfig. (Also didn't include other
fields specific to those remotes, perhaps chunking etc also didn't
get through.)
To fix, had to move RemoteConfig parsing down into the generate methods
of each remote, rather than doing it in Remote.List.
And a consequence of that was that ParsedRemoteConfig had to change to
include the RemoteConfig that got parsed, so that testremote can
generate a new remote based on an existing remote.
(I would have rather fixed this just inside Remote.Git, but that was not
practical, at least not w/o re-doing work that Remote.List already did.
Big ugly mostly mechanical patch seemed preferable to making git-annex
slower.)
2020-02-26 21:20:56 +00:00
gen :: Git . Repo -> UUID -> RemoteConfig -> RemoteGitConfig -> RemoteStateHandle -> Annex ( Maybe Remote )
gen r u rc gc rs
2019-08-05 17:24:21 +00:00
-- Remote.GitLFS may be used with a repo that is also encrypted
-- with gcrypt so is checked first.
fix encryption of content to gcrypt and git-lfs
Fix serious regression in gcrypt and encrypted git-lfs remotes.
Since version 7.20200202.7, git-annex incorrectly stored content
on those remotes without encrypting it.
Problem was, Remote.Git enumerates all git remotes, including git-lfs
and gcrypt. It then dispatches to those. So, Remote.List used the
RemoteConfigParser from Remote.Git, instead of from git-lfs or gcrypt,
and that parser does not know about encryption fields, so did not
include them in the ParsedRemoteConfig. (Also didn't include other
fields specific to those remotes, perhaps chunking etc also didn't
get through.)
To fix, had to move RemoteConfig parsing down into the generate methods
of each remote, rather than doing it in Remote.List.
And a consequence of that was that ParsedRemoteConfig had to change to
include the RemoteConfig that got parsed, so that testremote can
generate a new remote based on an existing remote.
(I would have rather fixed this just inside Remote.Git, but that was not
practical, at least not w/o re-doing work that Remote.List already did.
Big ugly mostly mechanical patch seemed preferable to making git-annex
slower.)
2020-02-26 21:20:56 +00:00
| remoteAnnexGitLFS gc = Remote . GitLFS . gen r u rc gc rs
| Git . GCrypt . isEncrypted r = Remote . GCrypt . chainGen r u rc gc rs
2016-12-06 16:19:47 +00:00
| otherwise = case repoP2PAddress r of
2018-01-10 18:21:18 +00:00
Nothing -> do
use P2P protocol for checkpresent, retrieve, and store
Note that, due to not using rsync to transfer files to ssh remotes
any longer, permissions and other file metadata of annexed files
will no longer be preserved when copying them to ssh remotes.
Other remotes never supported preserving that information, so
this is not considered a regression. Added NEWS item about this.
Another significant side effect of this is that, even when rsync is run to
retrieve a file, its progress display will no longer be shown, and
instead the native git-annex progress display will appear. It would be
possible to use the rsync process display when rsync is used (old
git-annex-shell and also retrieval from a local repository), but it
would have complicated the code unncessarily, and been inconsistent
behavior.
(I'd been thinking for a while about eliminating the rsync progress
display, since it's got some annoying verbosities, including display of
the key and the "(xfr#1, to-chk=0/1)" bit and was already somewhat
inconsistent.)
retrieveKeyFileCheap still uses rsync, since that ensures that it gets
the actual file content from the remote. Using the P2P protocol would
use the local content, as long as the local and remote size are the
same.
This commit was sponsored by John Pellman on Patreon.
2018-03-09 16:57:32 +00:00
st <- mkState r u gc
fix encryption of content to gcrypt and git-lfs
Fix serious regression in gcrypt and encrypted git-lfs remotes.
Since version 7.20200202.7, git-annex incorrectly stored content
on those remotes without encrypting it.
Problem was, Remote.Git enumerates all git remotes, including git-lfs
and gcrypt. It then dispatches to those. So, Remote.List used the
RemoteConfigParser from Remote.Git, instead of from git-lfs or gcrypt,
and that parser does not know about encryption fields, so did not
include them in the ParsedRemoteConfig. (Also didn't include other
fields specific to those remotes, perhaps chunking etc also didn't
get through.)
To fix, had to move RemoteConfig parsing down into the generate methods
of each remote, rather than doing it in Remote.List.
And a consequence of that was that ParsedRemoteConfig had to change to
include the RemoteConfig that got parsed, so that testremote can
generate a new remote based on an existing remote.
(I would have rather fixed this just inside Remote.Git, but that was not
practical, at least not w/o re-doing work that Remote.List already did.
Big ugly mostly mechanical patch seemed preferable to making git-annex
slower.)
2020-02-26 21:20:56 +00:00
c <- parsedRemoteConfig remote rc
go st c <$> remoteCost gc defcst
Just addr -> Remote . P2P . chainGen addr r u rc gc rs
2012-10-29 01:27:15 +00:00
where
defcst = if repoCheap r then cheapRemoteCost else expensiveRemoteCost
fix encryption of content to gcrypt and git-lfs
Fix serious regression in gcrypt and encrypted git-lfs remotes.
Since version 7.20200202.7, git-annex incorrectly stored content
on those remotes without encrypting it.
Problem was, Remote.Git enumerates all git remotes, including git-lfs
and gcrypt. It then dispatches to those. So, Remote.List used the
RemoteConfigParser from Remote.Git, instead of from git-lfs or gcrypt,
and that parser does not know about encryption fields, so did not
include them in the ParsedRemoteConfig. (Also didn't include other
fields specific to those remotes, perhaps chunking etc also didn't
get through.)
To fix, had to move RemoteConfig parsing down into the generate methods
of each remote, rather than doing it in Remote.List.
And a consequence of that was that ParsedRemoteConfig had to change to
include the RemoteConfig that got parsed, so that testremote can
generate a new remote based on an existing remote.
(I would have rather fixed this just inside Remote.Git, but that was not
practical, at least not w/o re-doing work that Remote.List already did.
Big ugly mostly mechanical patch seemed preferable to making git-annex
slower.)
2020-02-26 21:20:56 +00:00
go st c cst = Just new
2013-01-01 17:52:47 +00:00
where
new = Remote
{ uuid = u
, cost = cst
, name = Git . repoDescribe r
use P2P protocol for checkpresent, retrieve, and store
Note that, due to not using rsync to transfer files to ssh remotes
any longer, permissions and other file metadata of annexed files
will no longer be preserved when copying them to ssh remotes.
Other remotes never supported preserving that information, so
this is not considered a regression. Added NEWS item about this.
Another significant side effect of this is that, even when rsync is run to
retrieve a file, its progress display will no longer be shown, and
instead the native git-annex progress display will appear. It would be
possible to use the rsync process display when rsync is used (old
git-annex-shell and also retrieval from a local repository), but it
would have complicated the code unncessarily, and been inconsistent
behavior.
(I'd been thinking for a while about eliminating the rsync progress
display, since it's got some annoying verbosities, including display of
the key and the "(xfr#1, to-chk=0/1)" bit and was already somewhat
inconsistent.)
retrieveKeyFileCheap still uses rsync, since that ensures that it gets
the actual file content from the remote. Using the P2P protocol would
use the local content, as long as the local and remote size are the
same.
This commit was sponsored by John Pellman on Patreon.
2018-03-09 16:57:32 +00:00
, storeKey = copyToRemote new st
, retrieveKeyFile = copyFromRemote new st
2020-05-13 21:05:56 +00:00
, retrieveKeyFileCheap = copyFromRemoteCheap new st r
2018-06-21 15:35:27 +00:00
, retrievalSecurityPolicy = RetrievalAllKeysSecure
use P2P protocol for checkpresent, retrieve, and store
Note that, due to not using rsync to transfer files to ssh remotes
any longer, permissions and other file metadata of annexed files
will no longer be preserved when copying them to ssh remotes.
Other remotes never supported preserving that information, so
this is not considered a regression. Added NEWS item about this.
Another significant side effect of this is that, even when rsync is run to
retrieve a file, its progress display will no longer be shown, and
instead the native git-annex progress display will appear. It would be
possible to use the rsync process display when rsync is used (old
git-annex-shell and also retrieval from a local repository), but it
would have complicated the code unncessarily, and been inconsistent
behavior.
(I'd been thinking for a while about eliminating the rsync progress
display, since it's got some annoying verbosities, including display of
the key and the "(xfr#1, to-chk=0/1)" bit and was already somewhat
inconsistent.)
retrieveKeyFileCheap still uses rsync, since that ensures that it gets
the actual file content from the remote. Using the P2P protocol would
use the local content, as long as the local and remote size are the
same.
This commit was sponsored by John Pellman on Patreon.
2018-03-09 16:57:32 +00:00
, removeKey = dropKey new st
, lockContent = Just ( lockKey new st )
, checkPresent = inAnnex new st
2014-08-06 17:45:19 +00:00
, checkPresentCheap = repoCheap r
2017-09-01 17:02:07 +00:00
, exportActions = exportUnsupported
2019-02-20 19:55:01 +00:00
, importActions = importUnsupported
2013-01-01 17:52:47 +00:00
, whereisKey = Nothing
2013-10-11 20:03:18 +00:00
, remoteFsck = if Git . repoIsUrl r
then Nothing
else Just $ fsckOnRemote r
2013-10-27 19:38:59 +00:00
, repairRepo = if Git . repoIsUrl r
then Nothing
else Just $ repairRemote r
2013-11-02 20:37:28 +00:00
, config = c
2013-09-07 22:38:00 +00:00
, localpath = localpathCalc r
2018-06-04 20:48:26 +00:00
, getRepo = getRepoFromState st
removal of the rest of remoteGitConfig
In keyUrls, the GitConfig is used only by annexLocations
to support configured Differences. Since such configurations affect all
clones of a repository, the local repo's GitConfig must have the same
information as the remote's GitConfig would have. So, used getGitConfig
to get the local GitConfig, which is cached and so available cheaply.
That actually fixed a bug noone had ever noticed: keyUrls is
used for remotes accessed over http. The full git config of such a
remote is normally not available, so the remoteGitConfig that keyUrls
used would not have the necessary information in it.
In copyFromRemoteCheap', it uses gitAnnexLocation,
which does need the GitConfig of the remote repo itself in order to
check if it's crippled, supports symlinks, etc. So, made the
State include that GitConfig, cached. The use of gitAnnexLocation is
within a (not $ Git.repoIsUrl repo) guard, so it's local, and so
its git config will always be read and available.
(Note that gitAnnexLocation in turn calls annexLocations, so the
Differences config it uses in this case comes from the remote repo's
GitConfig and not from the local repo's GitConfig. As explained above
this is ok since they must have the same value.)
Not very happy with this mess of different GitConfigs not type-safe and
some read only sometimes etc. Very hairy. Think I got it this change
right. Test suite passes..
This commit was sponsored by Ethan Aubin.
2018-06-05 18:23:34 +00:00
, gitconfig = gc
2013-01-01 17:52:47 +00:00
, readonly = Git . repoIsHttp r
2018-08-30 15:12:18 +00:00
, appendonly = False
2014-01-13 18:41:10 +00:00
, availability = availabilityCalc r
2013-01-01 17:52:47 +00:00
, remotetype = remote
fix encryption of content to gcrypt and git-lfs
Fix serious regression in gcrypt and encrypted git-lfs remotes.
Since version 7.20200202.7, git-annex incorrectly stored content
on those remotes without encrypting it.
Problem was, Remote.Git enumerates all git remotes, including git-lfs
and gcrypt. It then dispatches to those. So, Remote.List used the
RemoteConfigParser from Remote.Git, instead of from git-lfs or gcrypt,
and that parser does not know about encryption fields, so did not
include them in the ParsedRemoteConfig. (Also didn't include other
fields specific to those remotes, perhaps chunking etc also didn't
get through.)
To fix, had to move RemoteConfig parsing down into the generate methods
of each remote, rather than doing it in Remote.List.
And a consequence of that was that ParsedRemoteConfig had to change to
include the RemoteConfig that got parsed, so that testremote can
generate a new remote based on an existing remote.
(I would have rather fixed this just inside Remote.Git, but that was not
practical, at least not w/o re-doing work that Remote.List already did.
Big ugly mostly mechanical patch seemed preferable to making git-annex
slower.)
2020-02-26 21:20:56 +00:00
, mkUnavailable = unavailable r u rc gc rs
2015-01-13 22:11:03 +00:00
, getInfo = gitRepoInfo new
2014-12-08 17:40:15 +00:00
, claimUrl = Nothing
2014-12-11 19:32:42 +00:00
, checkUrl = Nothing
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
, remoteStateHandle = rs
2013-01-01 17:52:47 +00:00
}
2011-03-27 19:56:43 +00:00
fix encryption of content to gcrypt and git-lfs
Fix serious regression in gcrypt and encrypted git-lfs remotes.
Since version 7.20200202.7, git-annex incorrectly stored content
on those remotes without encrypting it.
Problem was, Remote.Git enumerates all git remotes, including git-lfs
and gcrypt. It then dispatches to those. So, Remote.List used the
RemoteConfigParser from Remote.Git, instead of from git-lfs or gcrypt,
and that parser does not know about encryption fields, so did not
include them in the ParsedRemoteConfig. (Also didn't include other
fields specific to those remotes, perhaps chunking etc also didn't
get through.)
To fix, had to move RemoteConfig parsing down into the generate methods
of each remote, rather than doing it in Remote.List.
And a consequence of that was that ParsedRemoteConfig had to change to
include the RemoteConfig that got parsed, so that testremote can
generate a new remote based on an existing remote.
(I would have rather fixed this just inside Remote.Git, but that was not
practical, at least not w/o re-doing work that Remote.List already did.
Big ugly mostly mechanical patch seemed preferable to making git-annex
slower.)
2020-02-26 21:20:56 +00:00
unavailable :: Git . Repo -> UUID -> RemoteConfig -> RemoteGitConfig -> RemoteStateHandle -> Annex ( Maybe Remote )
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
unavailable r = gen r'
2014-08-10 18:52:58 +00:00
where
r' = case Git . location r of
Git . Local { Git . gitdir = d } ->
r { Git . location = Git . LocalUnknown d }
Git . Url url -> case uriAuthority url of
Just auth ->
let auth' = auth { uriRegName = " !dne! " }
in r { Git . location = Git . Url ( url { uriAuthority = Just auth' } ) }
Nothing -> r { Git . location = Git . Unknown }
_ -> r -- already unavailable
2012-06-27 02:27:30 +00:00
{- Checks relatively inexpensively if a repository is available for use. -}
repoAvail :: Git . Repo -> Annex Bool
repoAvail r
| Git . repoIsHttp r = return True
2013-09-09 13:58:17 +00:00
| Git . GCrypt . isEncrypted r = do
g <- gitRepo
liftIO $ do
2013-09-19 16:53:24 +00:00
er <- Git . GCrypt . encryptedRemote g r
2013-09-09 13:58:17 +00:00
if Git . repoIsLocal er || Git . repoIsLocalUnknown er
then catchBoolIO $
void ( Git . Config . read er ) >> return True
else return True
2012-06-27 02:27:30 +00:00
| Git . repoIsUrl r = return True
| Git . repoIsLocalUnknown r = return False
2014-03-06 21:12:50 +00:00
| otherwise = liftIO $ isJust <$> catchMaybeIO ( Git . Config . read r )
2012-06-27 02:27:30 +00:00
2011-03-28 01:43:25 +00:00
{- Tries to read the config for a specified remote, updates state, and
- returns the updated repo . - }
avoid beware of the leopard situation
* Display a warning message when a remote uses a protocol, such as
git://, that git-annex does not support. Silently skipping such a
remote was confusing behavior.
It sets annex-ignore, so the warning is only displayed once.
* Also display a warning message when a remote, without a known uuid,
is located in a directory that does not currently exist, to avoid
silently skipping such a remote.
This is a bit more debatable, since git-annex get will say,
try making repository available. And since it does not set annex-ignore,
the warning will be displayed repeatedly. It's also an extreme edge case,
I don't think I've ever seen it happen in real life.
2020-05-04 17:01:11 +00:00
tryGitConfigRead :: Bool -> Git . Repo -> Bool -> Annex Git . Repo
tryGitConfigRead autoinit r hasuuid
2013-04-24 00:06:02 +00:00
| haveconfig r = return r -- already read
2019-11-18 20:09:09 +00:00
| Git . repoIsSsh r = storeUpdatedRemote $ do
2017-02-15 19:08:46 +00:00
v <- Ssh . onRemote NoConsumeStdin r
2020-04-13 17:05:41 +00:00
( pipedconfig Git . Config . ConfigList autoinit ( Git . repoDescribe r ) , return ( Left $ giveup " configlist failed " ) )
2017-02-15 19:08:46 +00:00
" configlist " [] configlistfields
2013-04-24 00:06:02 +00:00
case v of
Right r'
| haveconfig r' -> return r'
| otherwise -> configlist_failed
Left _ -> configlist_failed
2019-11-18 20:09:09 +00:00
| Git . repoIsHttp r = storeUpdatedRemote geturlconfig
2020-02-19 17:45:11 +00:00
| Git . GCrypt . isEncrypted r = handlegcrypt =<< getConfigMaybe ( remoteAnnexConfig r " uuid " )
avoid beware of the leopard situation
* Display a warning message when a remote uses a protocol, such as
git://, that git-annex does not support. Silently skipping such a
remote was confusing behavior.
It sets annex-ignore, so the warning is only displayed once.
* Also display a warning message when a remote, without a known uuid,
is located in a directory that does not currently exist, to avoid
silently skipping such a remote.
This is a bit more debatable, since git-annex get will say,
try making repository available. And since it does not set annex-ignore,
the warning will be displayed repeatedly. It's also an extreme edge case,
I don't think I've ever seen it happen in real life.
2020-05-04 17:01:11 +00:00
| Git . repoIsUrl r = do
set_ignore " uses a protocol not supported by git-annex " False
return r
| otherwise = storeUpdatedRemote $
liftIO readlocalannexconfig
` catchNonAsync ` const failedreadlocalconfig
2012-10-29 01:27:15 +00:00
where
2013-04-24 00:06:02 +00:00
haveconfig = not . M . null . Git . config
2020-04-13 17:05:41 +00:00
pipedconfig st mustincludeuuuid configloc cmd params = do
v <- liftIO $ Git . Config . fromPipe r cmd params st
2013-09-24 21:51:12 +00:00
case v of
2020-01-22 17:20:06 +00:00
Right ( r' , val , _err ) -> do
2020-01-22 15:39:15 +00:00
unless ( isUUIDConfigured r' || S . null val || not mustincludeuuuid ) $ do
2019-11-12 14:07:27 +00:00
warning $ " Failed to get annex.uuid configuration of repository " ++ Git . repoDescribe r
warning $ " Instead, got: " ++ show val
warning $ " This is unexpected; please check the network transport! "
2013-09-24 21:51:12 +00:00
return $ Right r'
2020-01-22 17:20:06 +00:00
Left l -> do
warning $ " Unable to parse git config from " ++ configloc
return $ Left l
2011-08-17 00:48:11 +00:00
2020-01-22 20:13:48 +00:00
geturlconfig = Url . withUrlOptionsPromptingCreds $ \ uo -> do
2019-11-12 14:07:27 +00:00
v <- withTmpFile " git-annex.tmp " $ \ tmpfile h -> do
liftIO $ hClose h
2015-04-19 04:38:29 +00:00
let url = Git . repoLocation r ++ " /config "
2019-11-12 14:07:27 +00:00
ifM ( liftIO $ Url . downloadQuiet nullMeterUpdate url tmpfile uo )
2020-04-13 17:05:41 +00:00
( Just <$> pipedconfig Git . Config . ConfigNullList False url " git " [ Param " config " , Param " --null " , Param " --list " , Param " --file " , File tmpfile ]
2016-05-03 16:53:42 +00:00
, return Nothing
2013-05-25 05:47:19 +00:00
)
case v of
2016-05-03 16:53:42 +00:00
Just ( Right r' ) -> do
2014-01-26 17:03:25 +00:00
-- Cache when http remote is not bare for
-- optimisation.
unless ( Git . Config . isBare r' ) $
2016-05-24 19:48:22 +00:00
setremote setRemoteBare False
2014-01-26 17:03:25 +00:00
return r'
2017-03-29 16:43:47 +00:00
_ -> do
set_ignore " not usable by git-annex " False
return r
2011-08-17 00:48:11 +00:00
2013-04-24 00:06:02 +00:00
{- Is this remote just not available, or does
- it not have git - annex - shell ?
- Find out by trying to fetch from the remote . - }
configlist_failed = case Git . remoteName r of
Nothing -> return r
Just n -> do
2014-05-16 16:58:50 +00:00
whenM ( inRepo $ Git . Command . runBool [ Param " fetch " , Param " --quiet " , Param n ] ) $ do
set_ignore " does not have git-annex installed " True
2013-04-24 00:06:02 +00:00
return r
2013-05-25 05:47:19 +00:00
2014-05-16 16:58:50 +00:00
set_ignore msg longmessage = do
2014-01-26 17:03:25 +00:00
case Git . remoteName r of
Nothing -> noop
2014-05-16 16:58:50 +00:00
Just n -> do
2016-05-24 19:48:22 +00:00
warning $ " Remote " ++ n ++ " " ++ msg ++ " ; setting annex-ignore "
2014-05-16 16:58:50 +00:00
when longmessage $
2016-05-24 19:48:22 +00:00
warning $ " This could be a problem with the git-annex installation on the remote. Please make sure that git-annex-shell is available in PATH when you ssh into the remote. Once you have fixed the git-annex installation, run: git annex enableremote " ++ n
setremote setRemoteIgnore True
2014-01-26 17:03:25 +00:00
2016-05-24 19:48:22 +00:00
setremote setter v = case Git . remoteName r of
2013-05-25 05:47:19 +00:00
Nothing -> noop
2016-05-27 15:15:52 +00:00
Just _ -> setter r v
2016-05-24 19:48:22 +00:00
2013-09-08 19:19:14 +00:00
handlegcrypt Nothing = return r
handlegcrypt ( Just _cacheduuid ) = do
-- Generate UUID from the gcrypt-id
g <- gitRepo
case Git . GCrypt . remoteRepoId g ( Git . remoteName r ) of
Nothing -> return r
2019-11-18 20:09:09 +00:00
Just v -> storeUpdatedRemote $ liftIO $ setUUID r $
2013-09-08 19:19:14 +00:00
genUUIDInNameSpace gCryptNameSpace v
2013-04-24 00:06:02 +00:00
2014-07-15 18:45:27 +00:00
{- The local repo may not yet be initialized, so try to initialize
- it if allowed . However , if that fails , still return the read
- git config . - }
2014-07-15 18:27:43 +00:00
readlocalannexconfig = do
2017-09-30 02:36:08 +00:00
let check = do
2014-07-15 18:27:43 +00:00
Annex . BranchState . disableUpdate
2020-06-16 17:24:00 +00:00
catchNonAsync autoInitialize $ \ e ->
warning $ " remote " ++ Git . repoDescribe r ++
" : " ++ show e
2014-07-15 18:27:43 +00:00
Annex . getState Annex . repo
2017-09-30 02:36:08 +00:00
s <- Annex . new r
Annex . eval s $ check ` finally ` stopCoProcesses
2015-08-05 17:49:54 +00:00
avoid beware of the leopard situation
* Display a warning message when a remote uses a protocol, such as
git://, that git-annex does not support. Silently skipping such a
remote was confusing behavior.
It sets annex-ignore, so the warning is only displayed once.
* Also display a warning message when a remote, without a known uuid,
is located in a directory that does not currently exist, to avoid
silently skipping such a remote.
This is a bit more debatable, since git-annex get will say,
try making repository available. And since it does not set annex-ignore,
the warning will be displayed repeatedly. It's also an extreme edge case,
I don't think I've ever seen it happen in real life.
2020-05-04 17:01:11 +00:00
failedreadlocalconfig = do
unless hasuuid $ case Git . remoteName r of
Nothing -> noop
Just n -> do
warning $ " Remote " ++ n ++ " cannot currently be accessed. "
return r
2015-08-05 17:49:54 +00:00
configlistfields = if autoinit
then [ ( Fields . autoInit , " 1 " ) ]
else []
2014-07-15 18:27:43 +00:00
2019-11-18 20:09:09 +00:00
{- Handles special remotes that can be enabled by the presence of
- regular git remotes .
-
- When a remote repo is found to be such a special remote , its
- UUID is cached in the git config , and the repo returned with
- the UUID set .
- }
configSpecialGitRemotes :: Git . Repo -> Annex ( Maybe Git . Repo )
configSpecialGitRemotes r = Remote . GitLFS . configKnownUrl r >>= \ case
Nothing -> return Nothing
Just r' -> Just <$> storeUpdatedRemote ( return r' )
storeUpdatedRemote :: Annex Git . Repo -> Annex Git . Repo
storeUpdatedRemote = observe $ \ r' -> do
l <- Annex . getGitRemotes
let rs = exchange l r'
Annex . changeState $ \ s -> s { Annex . gitremotes = Just rs }
where
exchange [] _ = []
exchange ( old : ls ) new
| Git . remoteName old == Git . remoteName new =
new : exchange ls new
| otherwise =
old : exchange ls new
2014-08-06 17:45:19 +00:00
{- Checks if a given remote has the content for a key in its annex. -}
use P2P protocol for checkpresent, retrieve, and store
Note that, due to not using rsync to transfer files to ssh remotes
any longer, permissions and other file metadata of annexed files
will no longer be preserved when copying them to ssh remotes.
Other remotes never supported preserving that information, so
this is not considered a regression. Added NEWS item about this.
Another significant side effect of this is that, even when rsync is run to
retrieve a file, its progress display will no longer be shown, and
instead the native git-annex progress display will appear. It would be
possible to use the rsync process display when rsync is used (old
git-annex-shell and also retrieval from a local repository), but it
would have complicated the code unncessarily, and been inconsistent
behavior.
(I'd been thinking for a while about eliminating the rsync progress
display, since it's got some annoying verbosities, including display of
the key and the "(xfr#1, to-chk=0/1)" bit and was already somewhat
inconsistent.)
retrieveKeyFileCheap still uses rsync, since that ensures that it gets
the actual file content from the remote. Using the P2P protocol would
use the local content, as long as the local and remote size are the
same.
This commit was sponsored by John Pellman on Patreon.
2018-03-09 16:57:32 +00:00
inAnnex :: Remote -> State -> Key -> Annex Bool
2018-06-04 18:31:55 +00:00
inAnnex rmt st key = do
repo <- getRepo rmt
inAnnex' repo rmt st key
inAnnex' :: Git . Repo -> Remote -> State -> Key -> Annex Bool
2020-04-17 21:09:29 +00:00
inAnnex' repo rmt st @ ( State connpool duc _ _ _ ) key
2018-06-04 18:31:55 +00:00
| Git . repoIsHttp repo = checkhttp
| Git . repoIsUrl repo = checkremote
2011-11-09 22:33:15 +00:00
| otherwise = checklocal
2012-10-29 01:27:15 +00:00
where
2014-02-25 01:29:37 +00:00
checkhttp = do
2018-06-04 18:31:55 +00:00
showChecking repo
removal of the rest of remoteGitConfig
In keyUrls, the GitConfig is used only by annexLocations
to support configured Differences. Since such configurations affect all
clones of a repository, the local repo's GitConfig must have the same
information as the remote's GitConfig would have. So, used getGitConfig
to get the local GitConfig, which is cached and so available cheaply.
That actually fixed a bug noone had ever noticed: keyUrls is
used for remotes accessed over http. The full git config of such a
remote is normally not available, so the remoteGitConfig that keyUrls
used would not have the necessary information in it.
In copyFromRemoteCheap', it uses gitAnnexLocation,
which does need the GitConfig of the remote repo itself in order to
check if it's crippled, supports symlinks, etc. So, made the
State include that GitConfig, cached. The use of gitAnnexLocation is
within a (not $ Git.repoIsUrl repo) guard, so it's local, and so
its git config will always be read and available.
(Note that gitAnnexLocation in turn calls annexLocations, so the
Differences config it uses in this case comes from the remote repo's
GitConfig and not from the local repo's GitConfig. As explained above
this is ok since they must have the same value.)
Not very happy with this mess of different GitConfigs not type-safe and
some read only sometimes etc. Very hairy. Think I got it this change
right. Test suite passes..
This commit was sponsored by Ethan Aubin.
2018-06-05 18:23:34 +00:00
gc <- Annex . getGitConfig
2020-01-22 20:13:48 +00:00
ifM ( Url . withUrlOptionsPromptingCreds $ \ uo -> anyM ( \ u -> Url . checkBoth u ( fromKey keySize key ) uo ) ( keyUrls gc repo rmt key ) )
2019-11-12 17:33:41 +00:00
( return True
, giveup " not found "
)
use P2P protocol for checkpresent, retrieve, and store
Note that, due to not using rsync to transfer files to ssh remotes
any longer, permissions and other file metadata of annexed files
will no longer be preserved when copying them to ssh remotes.
Other remotes never supported preserving that information, so
this is not considered a regression. Added NEWS item about this.
Another significant side effect of this is that, even when rsync is run to
retrieve a file, its progress display will no longer be shown, and
instead the native git-annex progress display will appear. It would be
possible to use the rsync process display when rsync is used (old
git-annex-shell and also retrieval from a local repository), but it
would have complicated the code unncessarily, and been inconsistent
behavior.
(I'd been thinking for a while about eliminating the rsync progress
display, since it's got some annoying verbosities, including display of
the key and the "(xfr#1, to-chk=0/1)" bit and was already somewhat
inconsistent.)
retrieveKeyFileCheap still uses rsync, since that ensures that it gets
the actual file content from the remote. Using the P2P protocol would
use the local content, as long as the local and remote size are the
same.
This commit was sponsored by John Pellman on Patreon.
2018-03-09 16:57:32 +00:00
checkremote =
2018-06-04 18:31:55 +00:00
let fallback = Ssh . inAnnex repo key
2018-03-12 20:50:21 +00:00
in P2PHelper . checkpresent ( Ssh . runProto rmt connpool ( cantCheck rmt ) fallback ) key
2018-01-10 18:21:18 +00:00
checklocal = ifM duc
2018-06-04 18:31:55 +00:00
( guardUsable repo ( cantCheck repo ) $
maybe ( cantCheck repo ) return
2020-04-17 21:09:29 +00:00
=<< onLocalFast st ( Annex . Content . inAnnexSafe key )
2018-06-04 18:31:55 +00:00
, cantCheck repo
2018-01-10 18:21:18 +00:00
)
2011-08-17 01:04:23 +00:00
removal of the rest of remoteGitConfig
In keyUrls, the GitConfig is used only by annexLocations
to support configured Differences. Since such configurations affect all
clones of a repository, the local repo's GitConfig must have the same
information as the remote's GitConfig would have. So, used getGitConfig
to get the local GitConfig, which is cached and so available cheaply.
That actually fixed a bug noone had ever noticed: keyUrls is
used for remotes accessed over http. The full git config of such a
remote is normally not available, so the remoteGitConfig that keyUrls
used would not have the necessary information in it.
In copyFromRemoteCheap', it uses gitAnnexLocation,
which does need the GitConfig of the remote repo itself in order to
check if it's crippled, supports symlinks, etc. So, made the
State include that GitConfig, cached. The use of gitAnnexLocation is
within a (not $ Git.repoIsUrl repo) guard, so it's local, and so
its git config will always be read and available.
(Note that gitAnnexLocation in turn calls annexLocations, so the
Differences config it uses in this case comes from the remote repo's
GitConfig and not from the local repo's GitConfig. As explained above
this is ok since they must have the same value.)
Not very happy with this mess of different GitConfigs not type-safe and
some read only sometimes etc. Very hairy. Think I got it this change
right. Test suite passes..
This commit was sponsored by Ethan Aubin.
2018-06-05 18:23:34 +00:00
keyUrls :: GitConfig -> Git . Repo -> Remote -> Key -> [ String ]
keyUrls gc repo r key = map tourl locs'
2012-10-29 01:27:15 +00:00
where
2018-06-04 18:31:55 +00:00
tourl l = Git . repoLocation repo ++ " / " ++ l
2014-01-26 17:03:25 +00:00
-- If the remote is known to not be bare, try the hash locations
-- used for non-bare repos first, as an optimisation.
locs
removal of the rest of remoteGitConfig
In keyUrls, the GitConfig is used only by annexLocations
to support configured Differences. Since such configurations affect all
clones of a repository, the local repo's GitConfig must have the same
information as the remote's GitConfig would have. So, used getGitConfig
to get the local GitConfig, which is cached and so available cheaply.
That actually fixed a bug noone had ever noticed: keyUrls is
used for remotes accessed over http. The full git config of such a
remote is normally not available, so the remoteGitConfig that keyUrls
used would not have the necessary information in it.
In copyFromRemoteCheap', it uses gitAnnexLocation,
which does need the GitConfig of the remote repo itself in order to
check if it's crippled, supports symlinks, etc. So, made the
State include that GitConfig, cached. The use of gitAnnexLocation is
within a (not $ Git.repoIsUrl repo) guard, so it's local, and so
its git config will always be read and available.
(Note that gitAnnexLocation in turn calls annexLocations, so the
Differences config it uses in this case comes from the remote repo's
GitConfig and not from the local repo's GitConfig. As explained above
this is ok since they must have the same value.)
Not very happy with this mess of different GitConfigs not type-safe and
some read only sometimes etc. Very hairy. Think I got it this change
right. Test suite passes..
This commit was sponsored by Ethan Aubin.
2018-06-05 18:23:34 +00:00
| remoteAnnexBare remoteconfig == Just False = reverse ( annexLocations gc key )
| otherwise = annexLocations gc key
2013-08-02 16:27:32 +00:00
# ifndef mingw32_HOST_OS
2019-12-11 18:12:22 +00:00
locs' = map fromRawFilePath locs
2013-07-07 17:35:06 +00:00
# else
2019-12-11 18:12:22 +00:00
locs' = map ( replace " \ \ " " / " . fromRawFilePath ) locs
2013-07-07 17:35:06 +00:00
# endif
2015-01-28 20:51:40 +00:00
remoteconfig = gitconfig r
2011-08-17 01:04:23 +00:00
2020-05-14 18:08:09 +00:00
dropKey :: Remote -> State -> Key -> Annex ()
2018-06-04 18:31:55 +00:00
dropKey r st key = do
repo <- getRepo r
2020-05-14 18:08:09 +00:00
dropKey' repo r st key
2018-06-04 18:31:55 +00:00
2020-05-14 18:08:09 +00:00
dropKey' :: Git . Repo -> Remote -> State -> Key -> Annex ()
2020-04-17 21:09:29 +00:00
dropKey' repo r st @ ( State connpool duc _ _ _ ) key
2018-06-04 18:31:55 +00:00
| not $ Git . repoIsUrl repo = ifM duc
2020-05-14 18:08:09 +00:00
( guardUsable repo ( giveup " cannot access remote " ) $
2020-04-17 21:09:29 +00:00
commitOnCleanup repo r st $ onLocalFast st $ do
2014-08-08 23:18:08 +00:00
whenM ( Annex . Content . inAnnex key ) $ do
2020-07-25 15:54:34 +00:00
let cleanup = logStatus key InfoMissing
Annex . Content . lockContentForRemoval key cleanup $ \ lock -> do
2016-12-09 16:56:38 +00:00
Annex . Content . removeAnnex lock
2020-07-25 15:54:34 +00:00
cleanup
2014-08-08 23:18:08 +00:00
Annex . Content . saveState True
2020-05-14 18:08:09 +00:00
, giveup " remote does not have expected annex.uuid value "
2018-01-10 18:21:18 +00:00
)
2020-05-14 18:08:09 +00:00
| Git . repoIsHttp repo = giveup " dropping from http remote not supported "
2020-04-17 21:09:29 +00:00
| otherwise = commitOnCleanup repo r st $ do
2020-05-14 18:08:09 +00:00
let fallback = Ssh . dropKey' repo key
2018-07-03 17:09:04 +00:00
P2PHelper . remove ( Ssh . runProto r connpool ( return False ) fallback ) key
2011-03-27 19:56:43 +00:00
use P2P protocol for checkpresent, retrieve, and store
Note that, due to not using rsync to transfer files to ssh remotes
any longer, permissions and other file metadata of annexed files
will no longer be preserved when copying them to ssh remotes.
Other remotes never supported preserving that information, so
this is not considered a regression. Added NEWS item about this.
Another significant side effect of this is that, even when rsync is run to
retrieve a file, its progress display will no longer be shown, and
instead the native git-annex progress display will appear. It would be
possible to use the rsync process display when rsync is used (old
git-annex-shell and also retrieval from a local repository), but it
would have complicated the code unncessarily, and been inconsistent
behavior.
(I'd been thinking for a while about eliminating the rsync progress
display, since it's got some annoying verbosities, including display of
the key and the "(xfr#1, to-chk=0/1)" bit and was already somewhat
inconsistent.)
retrieveKeyFileCheap still uses rsync, since that ensures that it gets
the actual file content from the remote. Using the P2P protocol would
use the local content, as long as the local and remote size are the
same.
This commit was sponsored by John Pellman on Patreon.
2018-03-09 16:57:32 +00:00
lockKey :: Remote -> State -> Key -> ( VerifiedCopy -> Annex r ) -> Annex r
2018-06-04 18:31:55 +00:00
lockKey r st key callback = do
repo <- getRepo r
lockKey' repo r st key callback
lockKey' :: Git . Repo -> Remote -> State -> Key -> ( VerifiedCopy -> Annex r ) -> Annex r
2020-04-17 21:09:29 +00:00
lockKey' repo r st @ ( State connpool duc _ _ _ ) key callback
2018-06-04 18:31:55 +00:00
| not $ Git . repoIsUrl repo = ifM duc
( guardUsable repo failedlock $ do
2015-10-09 17:35:28 +00:00
inorigrepo <- Annex . makeRunner
-- Lock content from perspective of remote,
-- and then run the callback in the original
-- annex monad, not the remote's.
2020-04-17 21:09:29 +00:00
onLocalFast st $
make sure that lockContentShared is always paired with an inAnnex check
lockContentShared had a screwy caveat that it didn't verify that the content
was present when locking it, but in the most common case, eg indirect mode,
it failed to lock when the content is not present.
That led to a few callers forgetting to check inAnnex when using it,
but the potential data loss was unlikely to be noticed because it only
affected direct mode I think.
Fix data loss bug when the local repository uses direct mode, and a
locally modified file is dropped from a remote repsitory. The bug
caused the modified file to be counted as a copy of the original file.
(This is not a severe bug because in such a situation, dropping
from the remote and then modifying the file is allowed and has the same
end result.)
And, in content locking over tor, when the remote repository is
in direct mode, it neglected to check that the content was actually
present when locking it. This could cause git annex drop to remove
the only copy of a file when it thought the tor remote had a copy.
So, make lockContentShared do its own inAnnex check. This could perhaps
be optimised for direct mode, to avoid the check then, since locking
the content necessarily verifies it exists there, but I have not bothered
with that.
This commit was sponsored by Jeff Goeke-Smith on Patreon.
2018-03-07 18:13:02 +00:00
Annex . Content . lockContentShared key $
liftIO . inorigrepo . callback
2018-01-10 18:21:18 +00:00
, failedlock
)
2018-06-04 18:31:55 +00:00
| Git . repoIsSsh repo = do
2015-10-09 21:21:02 +00:00
showLocking r
2018-03-09 17:48:10 +00:00
let withconn = Ssh . withP2PSshConnection r connpool fallback
P2PHelper . lock withconn Ssh . runProtoConn ( uuid r ) key callback
2018-03-09 17:42:55 +00:00
| otherwise = failedlock
where
2020-06-03 19:23:23 +00:00
fallback = withNullHandle $ \ nullh -> do
2017-02-15 19:08:46 +00:00
Just ( cmd , params ) <- Ssh . git_annex_shell ConsumeStdin
2018-06-04 18:31:55 +00:00
repo " lockcontent "
2019-01-14 17:03:35 +00:00
[ Param $ serializeKey key ] []
2020-06-03 19:23:23 +00:00
let p = ( proc cmd ( toCommand params ) )
{ std_in = CreatePipe
, std_out = CreatePipe
, std_err = UseHandle nullh
}
bracketIO ( createProcess p ) cleanupProcess fallback'
fallback' ( Just hin , Just hout , Nothing , p ) = do
2016-12-09 17:34:00 +00:00
v <- liftIO $ tryIO $ getProtocolLine hout
2016-04-18 18:04:20 +00:00
let signaldone = void $ tryNonAsync $ liftIO $ mapM_ tryNonAsync
[ hPutStrLn hout " "
, hFlush hout
, hClose hin
, hClose hout
, void $ waitForProcess p
]
2015-10-09 20:55:41 +00:00
let checkexited = not . isJust <$> getProcessExitCode p
case v of
Left _exited -> do
2015-10-09 21:21:02 +00:00
showNote " lockcontent failed "
2015-10-09 20:55:41 +00:00
liftIO $ do
hClose hin
hClose hout
2016-04-18 18:04:20 +00:00
void $ waitForProcess p
2015-10-09 21:21:02 +00:00
failedlock
2015-10-09 20:55:41 +00:00
Right l
2016-12-09 17:34:00 +00:00
| l == Just Ssh . contentLockedMarker -> bracket_
2015-10-09 20:55:41 +00:00
noop
signaldone
( withVerifiedCopy LockedCopy r checkexited callback )
| otherwise -> do
2015-10-09 21:21:02 +00:00
showNote " lockcontent failed "
2015-10-09 20:55:41 +00:00
signaldone
2015-10-09 21:21:02 +00:00
failedlock
2020-06-03 19:23:23 +00:00
fallback' _ = error " internal "
2016-11-16 01:29:54 +00:00
failedlock = giveup " can't lock content "
2015-10-09 17:07:03 +00:00
2011-03-27 19:56:43 +00:00
{- Tries to copy a key's content from a remote's annex to a file. -}
2020-05-13 21:05:56 +00:00
copyFromRemote :: Remote -> State -> Key -> AssociatedFile -> FilePath -> MeterUpdate -> Annex Verification
use P2P protocol for checkpresent, retrieve, and store
Note that, due to not using rsync to transfer files to ssh remotes
any longer, permissions and other file metadata of annexed files
will no longer be preserved when copying them to ssh remotes.
Other remotes never supported preserving that information, so
this is not considered a regression. Added NEWS item about this.
Another significant side effect of this is that, even when rsync is run to
retrieve a file, its progress display will no longer be shown, and
instead the native git-annex progress display will appear. It would be
possible to use the rsync process display when rsync is used (old
git-annex-shell and also retrieval from a local repository), but it
would have complicated the code unncessarily, and been inconsistent
behavior.
(I'd been thinking for a while about eliminating the rsync progress
display, since it's got some annoying verbosities, including display of
the key and the "(xfr#1, to-chk=0/1)" bit and was already somewhat
inconsistent.)
retrieveKeyFileCheap still uses rsync, since that ensures that it gets
the actual file content from the remote. Using the P2P protocol would
use the local content, as long as the local and remote size are the
same.
This commit was sponsored by John Pellman on Patreon.
2018-03-09 16:57:32 +00:00
copyFromRemote = copyFromRemote' False
2015-04-10 19:15:01 +00:00
2020-05-13 21:05:56 +00:00
copyFromRemote' :: Bool -> Remote -> State -> Key -> AssociatedFile -> FilePath -> MeterUpdate -> Annex Verification
2018-06-04 18:31:55 +00:00
copyFromRemote' forcersync r st key file dest meterupdate = do
repo <- getRepo r
copyFromRemote'' repo forcersync r st key file dest meterupdate
2020-05-13 21:05:56 +00:00
copyFromRemote'' :: Git . Repo -> Bool -> Remote -> State -> Key -> AssociatedFile -> FilePath -> MeterUpdate -> Annex Verification
2020-04-17 21:09:29 +00:00
copyFromRemote'' repo forcersync r st @ ( State connpool _ _ _ _ ) key file dest meterupdate
2020-05-13 21:05:56 +00:00
| Git . repoIsHttp repo = do
removal of the rest of remoteGitConfig
In keyUrls, the GitConfig is used only by annexLocations
to support configured Differences. Since such configurations affect all
clones of a repository, the local repo's GitConfig must have the same
information as the remote's GitConfig would have. So, used getGitConfig
to get the local GitConfig, which is cached and so available cheaply.
That actually fixed a bug noone had ever noticed: keyUrls is
used for remotes accessed over http. The full git config of such a
remote is normally not available, so the remoteGitConfig that keyUrls
used would not have the necessary information in it.
In copyFromRemoteCheap', it uses gitAnnexLocation,
which does need the GitConfig of the remote repo itself in order to
check if it's crippled, supports symlinks, etc. So, made the
State include that GitConfig, cached. The use of gitAnnexLocation is
within a (not $ Git.repoIsUrl repo) guard, so it's local, and so
its git config will always be read and available.
(Note that gitAnnexLocation in turn calls annexLocations, so the
Differences config it uses in this case comes from the remote repo's
GitConfig and not from the local repo's GitConfig. As explained above
this is ok since they must have the same value.)
Not very happy with this mess of different GitConfigs not type-safe and
some read only sometimes etc. Very hairy. Think I got it this change
right. Test suite passes..
This commit was sponsored by Ethan Aubin.
2018-06-05 18:23:34 +00:00
gc <- Annex . getGitConfig
2020-05-13 21:05:56 +00:00
ok <- Url . withUrlOptionsPromptingCreds $
2020-01-22 20:13:48 +00:00
Annex . Content . downloadUrl key meterupdate ( keyUrls gc repo r key ) dest
2020-05-13 21:05:56 +00:00
unless ok $
giveup " failed to download content "
return UnVerified
| not $ Git . repoIsUrl repo = guardUsable repo ( giveup " cannot access remote " ) $ do
2014-04-17 18:31:42 +00:00
params <- Ssh . rsyncParams r Download
2012-07-01 20:59:54 +00:00
u <- getUUID
2015-09-14 16:13:38 +00:00
hardlink <- wantHardLink
2012-07-01 20:59:54 +00:00
-- run copy from perspective of remote
2020-05-13 21:05:56 +00:00
onLocalFast st $ Annex . Content . prepSendAnnex key >>= \ case
Just ( object , checksuccess ) -> do
copier <- mkCopier hardlink st params
( ok , v ) <- runTransfer ( Transfer Download u ( fromKey id key ) )
file stdRetry $ \ p ->
metered ( Just ( combineMeterUpdate p meterupdate ) ) key $ \ _ p' ->
copier object dest p' checksuccess
if ok
then return v
else giveup " failed to retrieve content from remote "
Nothing -> giveup " content is not present in remote "
2018-06-04 18:31:55 +00:00
| Git . repoIsSsh repo = if forcersync
2020-05-13 21:05:56 +00:00
then do
( ok , v ) <- fallback meterupdate
if ok
then return v
else giveup " failed to retrieve content from remote "
use P2P protocol for checkpresent, retrieve, and store
Note that, due to not using rsync to transfer files to ssh remotes
any longer, permissions and other file metadata of annexed files
will no longer be preserved when copying them to ssh remotes.
Other remotes never supported preserving that information, so
this is not considered a regression. Added NEWS item about this.
Another significant side effect of this is that, even when rsync is run to
retrieve a file, its progress display will no longer be shown, and
instead the native git-annex progress display will appear. It would be
possible to use the rsync process display when rsync is used (old
git-annex-shell and also retrieval from a local repository), but it
would have complicated the code unncessarily, and been inconsistent
behavior.
(I'd been thinking for a while about eliminating the rsync progress
display, since it's got some annoying verbosities, including display of
the key and the "(xfr#1, to-chk=0/1)" bit and was already somewhat
inconsistent.)
retrieveKeyFileCheap still uses rsync, since that ensures that it gets
the actual file content from the remote. Using the P2P protocol would
use the local content, as long as the local and remote size are the
same.
This commit was sponsored by John Pellman on Patreon.
2018-03-09 16:57:32 +00:00
else P2PHelper . retrieve
2018-07-03 17:09:04 +00:00
( \ p -> Ssh . runProto r connpool ( return ( False , UnVerified ) ) ( fallback p ) )
use P2P protocol for checkpresent, retrieve, and store
Note that, due to not using rsync to transfer files to ssh remotes
any longer, permissions and other file metadata of annexed files
will no longer be preserved when copying them to ssh remotes.
Other remotes never supported preserving that information, so
this is not considered a regression. Added NEWS item about this.
Another significant side effect of this is that, even when rsync is run to
retrieve a file, its progress display will no longer be shown, and
instead the native git-annex progress display will appear. It would be
possible to use the rsync process display when rsync is used (old
git-annex-shell and also retrieval from a local repository), but it
would have complicated the code unncessarily, and been inconsistent
behavior.
(I'd been thinking for a while about eliminating the rsync progress
display, since it's got some annoying verbosities, including display of
the key and the "(xfr#1, to-chk=0/1)" bit and was already somewhat
inconsistent.)
retrieveKeyFileCheap still uses rsync, since that ensures that it gets
the actual file content from the remote. Using the P2P protocol would
use the local content, as long as the local and remote size are the
same.
This commit was sponsored by John Pellman on Patreon.
2018-03-09 16:57:32 +00:00
key file dest meterupdate
2020-05-13 21:05:56 +00:00
| otherwise = giveup " copying from non-ssh, non-http remote not supported "
2012-10-29 01:27:15 +00:00
where
2018-03-13 18:18:30 +00:00
fallback p = unVerified $ feedprogressback $ \ p' -> do
2018-03-12 23:18:47 +00:00
oh <- mkOutputHandlerQuiet
Ssh . rsyncHelper oh ( Just ( combineMeterUpdate p' p ) )
=<< Ssh . rsyncParamsRemote False r Download key dest file
2020-05-13 21:05:56 +00:00
2012-10-29 01:27:15 +00:00
{- Feed local rsync's progress info back to the remote,
- by forking a feeder thread that runs
- git - annex - shell transferinfo at the same time
- git - annex - shell sendkey is running .
-
2013-05-14 17:51:14 +00:00
- To avoid extra password prompts , this is only done when ssh
- connection caching is supported .
2012-10-29 01:27:15 +00:00
- Note that it actually waits for rsync to indicate
- progress before starting transferinfo , in order
- to ensure ssh connection caching works and reuses
- the connection set up for the sendkey .
-
- Also note that older git - annex - shell does not support
- transferinfo , so stderr is dropped and failure ignored .
- }
2013-05-14 17:51:14 +00:00
feedprogressback a = ifM ( isJust <$> sshCacheDir )
( feedprogressback' a
2013-05-19 22:15:29 +00:00
, a $ const noop
2013-05-14 17:52:30 +00:00
)
2013-05-14 17:51:14 +00:00
feedprogressback' a = do
2012-10-29 01:27:15 +00:00
u <- getUUID
2017-03-10 17:12:24 +00:00
let AssociatedFile afile = file
2012-10-29 01:27:15 +00:00
let fields = ( Fields . remoteUUID , fromUUID u )
2019-11-26 19:27:22 +00:00
: maybe [] ( \ f -> [ ( Fields . associatedFile , fromRawFilePath f ) ] ) afile
2017-02-15 19:08:46 +00:00
Just ( cmd , params ) <- Ssh . git_annex_shell ConsumeStdin
2018-06-04 18:31:55 +00:00
repo " transferinfo "
2019-01-14 17:03:35 +00:00
[ Param $ serializeKey key ] fields
2013-09-26 03:19:01 +00:00
v <- liftIO ( newEmptySV :: IO ( MSampleVar Integer ) )
2020-06-03 19:23:23 +00:00
pv <- liftIO $ newEmptyMVar
2012-10-29 01:27:15 +00:00
tid <- liftIO $ forkIO $ void $ tryIO $ do
bytes <- readSV v
p <- createProcess $
( proc cmd ( toCommand params ) )
{ std_in = CreatePipe
, std_err = CreatePipe
}
2020-06-03 19:23:23 +00:00
putMVar pv p
2012-10-29 01:27:15 +00:00
hClose $ stderrHandle p
let h = stdinHandle p
let send b = do
2013-09-26 03:19:01 +00:00
hPrint h b
2012-10-29 01:27:15 +00:00
hFlush h
send bytes
forever $
send =<< readSV v
2015-04-10 19:15:01 +00:00
let feeder = \ n -> do
meterupdate n
writeSV v ( fromBytesProcessed n )
2015-08-13 18:20:28 +00:00
-- It can easily take 0.3 seconds to clean up after
-- the transferinfo, and all that's involved is shutting
-- down the process and associated thread cleanly. So,
-- do it in the background.
let cleanup = forkIO $ do
2014-08-04 00:14:20 +00:00
void $ tryIO $ killThread tid
2020-06-03 19:23:23 +00:00
void $ tryNonAsync $
maybe noop ( void . waitForProcess . processHandle )
=<< tryTakeMVar pv
let forcestop = do
void $ tryIO $ killThread tid
void $ tryNonAsync $
maybe noop cleanupProcess
=<< tryTakeMVar pv
2014-08-04 00:14:20 +00:00
bracketIO noop ( const cleanup ) ( const $ a feeder )
2020-06-03 19:23:23 +00:00
` onException ` liftIO forcestop
2011-08-17 01:04:23 +00:00
2020-05-13 21:05:56 +00:00
copyFromRemoteCheap :: Remote -> State -> Git . Repo -> Maybe ( Key -> AssociatedFile -> FilePath -> Annex () )
2013-08-02 16:27:32 +00:00
# ifndef mingw32_HOST_OS
2020-07-02 15:46:26 +00:00
copyFromRemoteCheap r st repo
2020-05-13 21:05:56 +00:00
| not $ Git . repoIsUrl repo = Just $ \ key _af file -> guardUsable repo ( giveup " cannot access remote " ) $ do
removal of the rest of remoteGitConfig
In keyUrls, the GitConfig is used only by annexLocations
to support configured Differences. Since such configurations affect all
clones of a repository, the local repo's GitConfig must have the same
information as the remote's GitConfig would have. So, used getGitConfig
to get the local GitConfig, which is cached and so available cheaply.
That actually fixed a bug noone had ever noticed: keyUrls is
used for remotes accessed over http. The full git config of such a
remote is normally not available, so the remoteGitConfig that keyUrls
used would not have the necessary information in it.
In copyFromRemoteCheap', it uses gitAnnexLocation,
which does need the GitConfig of the remote repo itself in order to
check if it's crippled, supports symlinks, etc. So, made the
State include that GitConfig, cached. The use of gitAnnexLocation is
within a (not $ Git.repoIsUrl repo) guard, so it's local, and so
its git config will always be read and available.
(Note that gitAnnexLocation in turn calls annexLocations, so the
Differences config it uses in this case comes from the remote repo's
GitConfig and not from the local repo's GitConfig. As explained above
this is ok since they must have the same value.)
Not very happy with this mess of different GitConfigs not type-safe and
some read only sometimes etc. Very hairy. Think I got it this change
right. Test suite passes..
This commit was sponsored by Ethan Aubin.
2018-06-05 18:23:34 +00:00
gc <- getGitConfigFromState st
loc <- liftIO $ gitAnnexLocation key repo gc
2019-12-11 18:12:22 +00:00
liftIO $ ifM ( R . doesPathExist loc )
2015-04-18 17:36:12 +00:00
( do
2020-10-30 19:55:59 +00:00
absloc <- absPath loc
R . createSymbolicLink absloc ( toRawFilePath file )
2020-05-13 21:05:56 +00:00
, giveup " remote does not contain key "
2015-04-18 17:36:12 +00:00
)
2020-05-13 21:05:56 +00:00
| Git . repoIsSsh repo = Just $ \ key af file ->
2012-03-16 00:39:25 +00:00
ifM ( Annex . Content . preseedTmp key file )
2020-05-13 21:05:56 +00:00
( void $ copyFromRemote' True r st key af file nullMeterUpdate
, giveup " cannot preseed rsync with existing content "
2012-03-16 00:39:25 +00:00
)
2020-05-13 21:05:56 +00:00
| otherwise = Nothing
2013-08-04 17:12:18 +00:00
# else
2020-07-02 16:01:09 +00:00
copyFromRemoteCheap _ _ _ = Nothing
2013-08-04 17:12:18 +00:00
# endif
2012-01-20 17:23:11 +00:00
2011-03-27 19:56:43 +00:00
{- Tries to copy a key's content to a remote's annex. -}
2020-05-13 18:03:00 +00:00
copyToRemote :: Remote -> State -> Key -> AssociatedFile -> MeterUpdate -> Annex ()
2018-06-04 18:31:55 +00:00
copyToRemote r st key file meterupdate = do
repo <- getRepo r
copyToRemote' repo r st key file meterupdate
2020-05-13 18:03:00 +00:00
copyToRemote' :: Git . Repo -> Remote -> State -> Key -> AssociatedFile -> MeterUpdate -> Annex ()
2020-04-17 21:09:29 +00:00
copyToRemote' repo r st @ ( State connpool duc _ _ _ ) key file meterupdate
2018-06-04 18:31:55 +00:00
| not $ Git . repoIsUrl repo = ifM duc
2020-05-13 18:03:00 +00:00
( guardUsable repo ( giveup " cannot access remote " ) $ commitOnCleanup repo r st $
2013-01-10 15:45:44 +00:00
copylocal =<< Annex . Content . prepSendAnnex key
2020-05-13 18:03:00 +00:00
, giveup " remote does not have expected annex.uuid value "
2018-01-10 18:21:18 +00:00
)
2020-04-17 21:09:29 +00:00
| Git . repoIsSsh repo = commitOnCleanup repo r st $
use P2P protocol for checkpresent, retrieve, and store
Note that, due to not using rsync to transfer files to ssh remotes
any longer, permissions and other file metadata of annexed files
will no longer be preserved when copying them to ssh remotes.
Other remotes never supported preserving that information, so
this is not considered a regression. Added NEWS item about this.
Another significant side effect of this is that, even when rsync is run to
retrieve a file, its progress display will no longer be shown, and
instead the native git-annex progress display will appear. It would be
possible to use the rsync process display when rsync is used (old
git-annex-shell and also retrieval from a local repository), but it
would have complicated the code unncessarily, and been inconsistent
behavior.
(I'd been thinking for a while about eliminating the rsync progress
display, since it's got some annoying verbosities, including display of
the key and the "(xfr#1, to-chk=0/1)" bit and was already somewhat
inconsistent.)
retrieveKeyFileCheap still uses rsync, since that ensures that it gets
the actual file content from the remote. Using the P2P protocol would
use the local content, as long as the local and remote size are the
same.
This commit was sponsored by John Pellman on Patreon.
2018-03-09 16:57:32 +00:00
P2PHelper . store
2020-05-13 18:03:00 +00:00
( Ssh . runProto r connpool ( return False ) . copyremotefallback )
use P2P protocol for checkpresent, retrieve, and store
Note that, due to not using rsync to transfer files to ssh remotes
any longer, permissions and other file metadata of annexed files
will no longer be preserved when copying them to ssh remotes.
Other remotes never supported preserving that information, so
this is not considered a regression. Added NEWS item about this.
Another significant side effect of this is that, even when rsync is run to
retrieve a file, its progress display will no longer be shown, and
instead the native git-annex progress display will appear. It would be
possible to use the rsync process display when rsync is used (old
git-annex-shell and also retrieval from a local repository), but it
would have complicated the code unncessarily, and been inconsistent
behavior.
(I'd been thinking for a while about eliminating the rsync progress
display, since it's got some annoying verbosities, including display of
the key and the "(xfr#1, to-chk=0/1)" bit and was already somewhat
inconsistent.)
retrieveKeyFileCheap still uses rsync, since that ensures that it gets
the actual file content from the remote. Using the P2P protocol would
use the local content, as long as the local and remote size are the
same.
This commit was sponsored by John Pellman on Patreon.
2018-03-09 16:57:32 +00:00
key file meterupdate
2020-05-13 18:03:00 +00:00
| otherwise = giveup " copying to non-ssh repo not supported "
2012-12-08 21:03:39 +00:00
where
2020-05-13 18:03:00 +00:00
copylocal Nothing = giveup " content not available "
use P2P protocol for checkpresent, retrieve, and store
Note that, due to not using rsync to transfer files to ssh remotes
any longer, permissions and other file metadata of annexed files
will no longer be preserved when copying them to ssh remotes.
Other remotes never supported preserving that information, so
this is not considered a regression. Added NEWS item about this.
Another significant side effect of this is that, even when rsync is run to
retrieve a file, its progress display will no longer be shown, and
instead the native git-annex progress display will appear. It would be
possible to use the rsync process display when rsync is used (old
git-annex-shell and also retrieval from a local repository), but it
would have complicated the code unncessarily, and been inconsistent
behavior.
(I'd been thinking for a while about eliminating the rsync progress
display, since it's got some annoying verbosities, including display of
the key and the "(xfr#1, to-chk=0/1)" bit and was already somewhat
inconsistent.)
retrieveKeyFileCheap still uses rsync, since that ensures that it gets
the actual file content from the remote. Using the P2P protocol would
use the local content, as long as the local and remote size are the
same.
This commit was sponsored by John Pellman on Patreon.
2018-03-09 16:57:32 +00:00
copylocal ( Just ( object , checksuccess ) ) = do
2013-03-12 20:41:54 +00:00
-- The checksuccess action is going to be run in
deal with unlocked files when calling rsyncParamsRemote
In copyFromRemote, it used to check isDirect, but that was not needed;
the remote is sending the file, so it doesn't matter if the local,
receiving repository is in direct mode or not. And, since the content is not
present, yet, it's certianly not unlocked. Note that, the remote may indeed
be sending an unlocked file, but sendkey uses sendAnnex, which will detect
if the file is modified before or during transfer, and will exit nonzero,
aborting the upload. So, the receiver doesn't need any checks.
In copyToRemote, it forces recvkey to verify content whenever it's being
sent from a v6 repository. recvkey is almost always going to verify content
anyway, unless annex.verify is not set. So, this doesn't make it any more
expensive, except for in that unusual configuration. The alternative would
be to change the recvkey interface, so that the sender checks afterwards if
what it was sending changed, and the receiver then throws out the bad
transfer. That would be less expensive for the reciever, as it would not
need to do a checksum verification. But, it would mean another network
round trip, and since rsync closes the connection, it would need to open
another ssh connection to do this. Even with connction caching, that would
add latency to uploads. It would also complicate the interface, especially
because an older git-annex-shell would not have the new interface
available. For these reasons, I prefer punting on that at this time, and
instead someone might set annex.verify=false and be unhappy that it still
verifies..
(One other gotcha not dealt with is that a v5 repo could be upgraded to v6
while an upload is in progress, and a file unlocked and modified.)
(Also, I double-checked Remote.GCrypt's calls to rsyncParamsRemote, and
they're fine. When a file is being uploaded to gcrypt, or any other special
repository, it is mediated by sendAnnex, so changes will be detected at
that level and the special remote implementation doesn't need to worry
about them.)
2015-12-26 18:05:07 +00:00
-- the remote's Annex, but it needs access to the local
2013-03-12 20:41:54 +00:00
-- Annex monad's state.
checksuccessio <- Annex . withCurrentState checksuccess
2014-04-17 18:31:42 +00:00
params <- Ssh . rsyncParams r Upload
2012-07-01 20:59:54 +00:00
u <- getUUID
2015-09-14 16:13:38 +00:00
hardlink <- wantHardLink
2011-03-27 19:56:43 +00:00
-- run copy from perspective of remote
2020-05-13 18:03:00 +00:00
res <- onLocalFast st $ ifM ( Annex . Content . inAnnex key )
2013-03-10 21:54:27 +00:00
( return True
2019-11-22 20:24:04 +00:00
, runTransfer ( Transfer Download u ( fromKey id key ) ) file stdRetry $ \ p -> do
2019-07-17 18:19:00 +00:00
copier <- mkCopier hardlink st params
other 80% of avoding verification when hard linking to objects in shared repo
In c6632ee5c8e66c26ef18317f56ae02bae1e7e280, it actually only handled
uploading objects to a shared repository. To avoid verification when
downloading objects from a shared repository, was a lot harder.
On the plus side, if the process of downloading a file from a remote
is able to verify its content on the side, the remote can indicate this
now, and avoid the extra post-download verification.
As of yet, I don't have any remotes (except Git) using this ability.
Some more work would be needed to support it in special remotes.
It would make sense for tahoe to implicitly verify things downloaded from it;
as long as you trust your tahoe server (which typically runs locally),
there's cryptographic integrity. OTOH, despite bup being based on shas,
a bup repo under an attacker's control could have the git ref used for an
object changed, and so a bup repo shouldn't implicitly verify. Indeed,
tahoe seems unique in being trustworthy enough to implicitly verify.
2015-10-02 17:56:42 +00:00
let verify = Annex . Content . RemoteVerify r
2018-06-21 17:34:11 +00:00
let rsp = RetrievalAllKeysSecure
2020-11-16 18:09:55 +00:00
res <- Annex . Content . getViaTmp rsp verify key file $ \ dest ->
2019-11-15 16:01:30 +00:00
metered ( Just ( combineMeterUpdate meterupdate p ) ) key $ \ _ p' ->
2020-10-30 19:55:59 +00:00
copier object ( fromRawFilePath dest ) p' ( liftIO checksuccessio )
2019-11-15 16:01:30 +00:00
Annex . Content . saveState True
return res
2012-09-18 17:59:03 +00:00
)
2020-05-13 18:03:00 +00:00
unless res $
giveup " failed to send content to remote "
copyremotefallback p = either ( const False ) id
<$> tryNonAsync ( copyremotefallback' p )
copyremotefallback' p = Annex . Content . sendAnnex key noop $ \ object -> do
2018-03-12 23:18:47 +00:00
-- This is too broad really, but recvkey normally
-- verifies content anyway, so avoid complicating
-- it with a local sendAnnex check and rollback.
2019-08-30 17:54:57 +00:00
let unlocked = True
2018-03-12 23:18:47 +00:00
oh <- mkOutputHandlerQuiet
Ssh . rsyncHelper oh ( Just p )
=<< Ssh . rsyncParamsRemote unlocked r Upload key object file
2011-03-27 19:56:43 +00:00
2013-10-11 20:03:18 +00:00
fsckOnRemote :: Git . Repo -> [ CommandParam ] -> Annex ( IO Bool )
fsckOnRemote r params
2013-10-14 16:23:38 +00:00
| Git . repoIsUrl r = do
2017-02-15 19:08:46 +00:00
s <- Ssh . git_annex_shell NoConsumeStdin r " fsck " params []
2013-10-11 20:03:18 +00:00
return $ case s of
Nothing -> return False
Just ( c , ps ) -> batchCommand c ps
2013-10-14 16:23:38 +00:00
| otherwise = return $ do
2015-02-28 21:23:13 +00:00
program <- programPath
2013-10-14 19:05:10 +00:00
r' <- Git . Config . read r
2014-06-10 23:20:14 +00:00
environ <- getEnvironment
let environ' = addEntries
2019-12-09 17:49:05 +00:00
[ ( " GIT_WORK_TREE " , fromRawFilePath $ Git . repoPath r' )
, ( " GIT_DIR " , fromRawFilePath $ Git . localGitDir r' )
2014-06-10 23:20:14 +00:00
] environ
2015-02-09 18:16:42 +00:00
batchCommandEnv program ( Param " fsck " : params ) ( Just environ' )
2013-10-11 20:03:18 +00:00
2013-10-27 19:38:59 +00:00
{- The passed repair action is run in the Annex monad of the remote. -}
repairRemote :: Git . Repo -> Annex Bool -> Annex ( IO Bool )
2014-03-06 21:12:50 +00:00
repairRemote r a = return $ do
2013-09-07 22:38:00 +00:00
s <- Annex . new r
Annex . eval s $ do
Annex . BranchState . disableUpdate
2014-03-06 21:12:50 +00:00
ensureInitialized
2017-09-30 02:36:08 +00:00
a ` finally ` stopCoProcesses
2011-03-27 19:56:43 +00:00
2020-04-17 21:09:29 +00:00
data LocalRemoteAnnex = LocalRemoteAnnex Git . Repo ( MVar ( Maybe Annex . AnnexState ) )
{- This can safely be called on a Repo that is not local, but of course
- onLocal will not work if used with the result . - }
mkLocalRemoteAnnex :: Git . Repo -> Annex ( LocalRemoteAnnex )
mkLocalRemoteAnnex repo = LocalRemoteAnnex repo <$> liftIO ( newMVar Nothing )
2014-03-06 21:12:50 +00:00
{- Runs an action from the perspective of a local remote.
-
- The AnnexState is cached for speed and to avoid resource leaks .
2017-02-17 19:21:39 +00:00
- However , coprocesses are stopped after each call to avoid git
- processes hanging around on removable media .
consolidate calls to ensureInitialized
tryGitConfigRead may run ensureInitialized first, but when checkuuid = false,
that is skipped. So, make sure it's run before all onLocal actions.
ensureInitialized is inexpensive, so the extra call by tryGitConfigRead
is not a big deal. But since it was easy to do, I made it only be run
once by all calls to onLocal.
A few calls to onLocal didn't call ensureInitialized before. Notably,
the checkPresent action didn't, and does now.
That means that there's a guarantee that any necessary repo upgrades
will be run before the checkPresent action runs in the repo. Which is
important especially for the direct mode conversion, because without
that upgrade, the checkPresent action would need to support direct mode
still. Now I can remove the last bits of direct mode support in
Annex.Content without worrying that it will break accessing remotes
that have not been upgraded.
This does necessarily mean that checkPresent needs to write to the disk
when performing such a repo upgrade. The other remote actions already
did, so retrieval from a readonly remote that needed to be upgraded would
fail. Having checkPresent also fail doesn't seem like a large reversion,
especially since it already failed in the default case when checkuuid = true.
2019-08-27 16:05:20 +00:00
-
- The remote will be automatically initialized / upgraded first ,
- when possible .
2014-03-06 21:12:50 +00:00
- }
2020-04-17 21:09:29 +00:00
onLocal :: State -> Annex a -> Annex a
onLocal ( State _ _ _ _ lra ) = onLocal' lra
onLocalRepo :: Git . Repo -> Annex a -> Annex a
onLocalRepo repo a = do
lra <- mkLocalRemoteAnnex repo
onLocal' lra a
onLocal' :: LocalRemoteAnnex -> Annex a -> Annex a
onLocal' ( LocalRemoteAnnex repo v ) a = liftIO ( takeMVar v ) >>= \ case
Nothing -> do
st <- liftIO $ Annex . new repo
go ( st , ensureInitialized >> a )
Just st -> go ( st , a )
2014-03-06 21:12:50 +00:00
where
consolidate calls to ensureInitialized
tryGitConfigRead may run ensureInitialized first, but when checkuuid = false,
that is skipped. So, make sure it's run before all onLocal actions.
ensureInitialized is inexpensive, so the extra call by tryGitConfigRead
is not a big deal. But since it was easy to do, I made it only be run
once by all calls to onLocal.
A few calls to onLocal didn't call ensureInitialized before. Notably,
the checkPresent action didn't, and does now.
That means that there's a guarantee that any necessary repo upgrades
will be run before the checkPresent action runs in the repo. Which is
important especially for the direct mode conversion, because without
that upgrade, the checkPresent action would need to support direct mode
still. Now I can remove the last bits of direct mode support in
Annex.Content without worrying that it will break accessing remotes
that have not been upgraded.
This does necessarily mean that checkPresent needs to write to the disk
when performing such a repo upgrade. The other remote actions already
did, so retrieval from a readonly remote that needed to be upgraded would
fail. Having checkPresent also fail doesn't seem like a large reversion,
especially since it already failed in the default case when checkuuid = true.
2019-08-27 16:05:20 +00:00
go ( st , a' ) = do
2015-04-04 00:08:38 +00:00
curro <- Annex . getState Annex . output
2020-04-17 21:09:29 +00:00
let act = Annex . run ( st { Annex . output = curro } ) $
consolidate calls to ensureInitialized
tryGitConfigRead may run ensureInitialized first, but when checkuuid = false,
that is skipped. So, make sure it's run before all onLocal actions.
ensureInitialized is inexpensive, so the extra call by tryGitConfigRead
is not a big deal. But since it was easy to do, I made it only be run
once by all calls to onLocal.
A few calls to onLocal didn't call ensureInitialized before. Notably,
the checkPresent action didn't, and does now.
That means that there's a guarantee that any necessary repo upgrades
will be run before the checkPresent action runs in the repo. Which is
important especially for the direct mode conversion, because without
that upgrade, the checkPresent action would need to support direct mode
still. Now I can remove the last bits of direct mode support in
Annex.Content without worrying that it will break accessing remotes
that have not been upgraded.
This does necessarily mean that checkPresent needs to write to the disk
when performing such a repo upgrade. The other remote actions already
did, so retrieval from a readonly remote that needed to be upgraded would
fail. Having checkPresent also fail doesn't seem like a large reversion,
especially since it already failed in the default case when checkuuid = true.
2019-08-27 16:05:20 +00:00
a' ` finally ` stopCoProcesses
2020-04-17 21:09:29 +00:00
( ret , st' ) <- liftIO $ act ` onException ` cache st
liftIO $ cache st'
2014-03-06 21:12:50 +00:00
return ret
2020-04-17 21:09:29 +00:00
cache st = putMVar v ( Just st )
2014-03-06 21:12:50 +00:00
2017-02-17 19:21:39 +00:00
{- Faster variant of onLocal.
-
- The repository's git - annex branch is not updated , as an optimisation .
- No caller of onLocalFast can query data from the branch and be ensured
- it gets the most current value . Caller of onLocalFast can make changes
- to the branch , however .
- }
2020-04-17 21:09:29 +00:00
onLocalFast :: State -> Annex a -> Annex a
onLocalFast st a = onLocal st $ Annex . BranchState . disableUpdate >> a
2017-02-17 19:21:39 +00:00
2019-07-17 18:19:00 +00:00
-- To avoid the overhead of trying copy-on-write every time, it's tried
-- once and if it fails, is not tried again.
newtype CopyCoWTried = CopyCoWTried ( MVar Bool )
newCopyCoWTried :: IO CopyCoWTried
newCopyCoWTried = CopyCoWTried <$> newEmptyMVar
{- Copys a file. Uses copy - on - write if it is supported. Otherwise,
- uses rsync , so that interrupted copies can be resumed . - }
rsyncOrCopyFile :: State -> [ CommandParam ] -> FilePath -> FilePath -> MeterUpdate -> Annex Bool
2013-08-02 16:27:32 +00:00
# ifdef mingw32_HOST_OS
2019-09-12 18:11:19 +00:00
rsyncOrCopyFile _st _rsyncparams src dest p =
2019-07-17 18:19:00 +00:00
-- rsync is only available on Windows in some installation methods,
2019-02-18 21:27:34 +00:00
-- and is not strictly needed here, so don't use it.
2019-07-17 18:19:00 +00:00
docopywith copyFileExternal
2013-05-11 20:03:00 +00:00
where
# else
2019-09-12 18:11:19 +00:00
rsyncOrCopyFile st rsyncparams src dest p =
2019-07-17 18:19:00 +00:00
-- If multiple threads reach this at the same time, they
-- will both try CoW, which is acceptable.
ifM ( liftIO $ isEmptyMVar copycowtried )
( do
ok <- docopycow
void $ liftIO $ tryPutMVar copycowtried ok
pure ok <||> dorsync
, ifM ( liftIO $ readMVar copycowtried )
( docopycow <||> dorsync
, dorsync
)
)
2012-10-29 01:27:15 +00:00
where
2019-07-17 18:19:00 +00:00
copycowtried = case st of
2020-04-17 21:09:29 +00:00
State _ _ ( CopyCoWTried v ) _ _ -> v
2018-03-12 22:36:07 +00:00
dorsync = do
2019-08-09 19:26:58 +00:00
-- dest may already exist, so make sure rsync can write to it
2020-11-06 18:10:58 +00:00
void $ liftIO $ tryIO $ allowWrite ( toRawFilePath dest )
2019-11-15 16:01:30 +00:00
oh <- mkOutputHandlerQuiet
2018-03-12 22:36:07 +00:00
Ssh . rsyncHelper oh ( Just p ) $
rsyncparams ++ [ File src , File dest ]
2019-07-17 18:19:00 +00:00
docopycow = docopywith copyCoW
2019-02-18 21:38:21 +00:00
# endif
2019-07-17 18:19:00 +00:00
docopywith a = liftIO $ watchFileSize dest p $
a CopyTimeStamps src dest
2011-06-14 00:23:47 +00:00
2020-04-17 21:09:29 +00:00
commitOnCleanup :: Git . Repo -> Remote -> State -> Annex a -> Annex a
commitOnCleanup repo r st a = go ` after ` a
2012-10-29 01:27:15 +00:00
where
2014-03-13 23:06:26 +00:00
go = Annex . addCleanup ( RemoteCleanup $ uuid r ) cleanup
2012-10-29 01:27:15 +00:00
cleanup
2020-04-17 21:09:29 +00:00
| not $ Git . repoIsUrl repo = onLocalFast st $
2012-10-29 01:27:15 +00:00
doQuietSideAction $
2018-08-02 18:06:06 +00:00
Annex . Branch . commit =<< Annex . Branch . commitMessage
2020-06-04 19:36:34 +00:00
| otherwise = do
2012-10-29 01:27:15 +00:00
Just ( shellcmd , shellparams ) <-
2017-02-15 19:08:46 +00:00
Ssh . git_annex_shell NoConsumeStdin
2018-06-04 18:31:55 +00:00
repo " commit " [] []
2012-10-29 01:27:15 +00:00
-- Throw away stderr, since the remote may not
-- have a new enough git-annex shell to
-- support committing.
2020-06-04 19:36:34 +00:00
liftIO $ void $ catchMaybeIO $ withNullHandle $ \ nullh ->
let p = ( proc shellcmd ( toCommand shellparams ) )
{ std_out = UseHandle nullh
, std_err = UseHandle nullh
}
in withCreateProcess p $ \ _ _ _ ->
forceSuccessProcess p
2015-09-14 16:13:38 +00:00
wantHardLink :: Annex Bool
2016-01-13 18:19:31 +00:00
wantHardLink = ( annexHardLink <$> Annex . getGitConfig )
-- Not unlocked files that are hard linked in the work tree,
-- because they can be modified at any time.
<&&> ( not <$> annexThin <$> Annex . getGitConfig )
2015-09-14 16:13:38 +00:00
other 80% of avoding verification when hard linking to objects in shared repo
In c6632ee5c8e66c26ef18317f56ae02bae1e7e280, it actually only handled
uploading objects to a shared repository. To avoid verification when
downloading objects from a shared repository, was a lot harder.
On the plus side, if the process of downloading a file from a remote
is able to verify its content on the side, the remote can indicate this
now, and avoid the extra post-download verification.
As of yet, I don't have any remotes (except Git) using this ability.
Some more work would be needed to support it in special remotes.
It would make sense for tahoe to implicitly verify things downloaded from it;
as long as you trust your tahoe server (which typically runs locally),
there's cryptographic integrity. OTOH, despite bup being based on shas,
a bup repo under an attacker's control could have the git ref used for an
object changed, and so a bup repo shouldn't implicitly verify. Indeed,
tahoe seems unique in being trustworthy enough to implicitly verify.
2015-10-02 17:56:42 +00:00
-- Copies from src to dest, updating a meter. If the copy finishes
2015-12-10 18:29:34 +00:00
-- successfully, calls a final check action, which must also succeed, or
other 80% of avoding verification when hard linking to objects in shared repo
In c6632ee5c8e66c26ef18317f56ae02bae1e7e280, it actually only handled
uploading objects to a shared repository. To avoid verification when
downloading objects from a shared repository, was a lot harder.
On the plus side, if the process of downloading a file from a remote
is able to verify its content on the side, the remote can indicate this
now, and avoid the extra post-download verification.
As of yet, I don't have any remotes (except Git) using this ability.
Some more work would be needed to support it in special remotes.
It would make sense for tahoe to implicitly verify things downloaded from it;
as long as you trust your tahoe server (which typically runs locally),
there's cryptographic integrity. OTOH, despite bup being based on shas,
a bup repo under an attacker's control could have the git ref used for an
object changed, and so a bup repo shouldn't implicitly verify. Indeed,
tahoe seems unique in being trustworthy enough to implicitly verify.
2015-10-02 17:56:42 +00:00
-- returns false.
--
2015-09-14 16:13:38 +00:00
-- If either the remote or local repository wants to use hard links,
other 80% of avoding verification when hard linking to objects in shared repo
In c6632ee5c8e66c26ef18317f56ae02bae1e7e280, it actually only handled
uploading objects to a shared repository. To avoid verification when
downloading objects from a shared repository, was a lot harder.
On the plus side, if the process of downloading a file from a remote
is able to verify its content on the side, the remote can indicate this
now, and avoid the extra post-download verification.
As of yet, I don't have any remotes (except Git) using this ability.
Some more work would be needed to support it in special remotes.
It would make sense for tahoe to implicitly verify things downloaded from it;
as long as you trust your tahoe server (which typically runs locally),
there's cryptographic integrity. OTOH, despite bup being based on shas,
a bup repo under an attacker's control could have the git ref used for an
object changed, and so a bup repo shouldn't implicitly verify. Indeed,
tahoe seems unique in being trustworthy enough to implicitly verify.
2015-10-02 17:56:42 +00:00
-- the copier will do so (falling back to copying if a hard link cannot be
-- made).
--
-- When a hard link is created, returns Verified; the repo being linked
-- from is implicitly trusted, so no expensive verification needs to be
-- done.
type Copier = FilePath -> FilePath -> MeterUpdate -> Annex Bool -> Annex ( Bool , Verification )
2019-07-17 18:19:00 +00:00
mkCopier :: Bool -> State -> [ CommandParam ] -> Annex Copier
mkCopier remotewanthardlink st rsyncparams = do
other 80% of avoding verification when hard linking to objects in shared repo
In c6632ee5c8e66c26ef18317f56ae02bae1e7e280, it actually only handled
uploading objects to a shared repository. To avoid verification when
downloading objects from a shared repository, was a lot harder.
On the plus side, if the process of downloading a file from a remote
is able to verify its content on the side, the remote can indicate this
now, and avoid the extra post-download verification.
As of yet, I don't have any remotes (except Git) using this ability.
Some more work would be needed to support it in special remotes.
It would make sense for tahoe to implicitly verify things downloaded from it;
as long as you trust your tahoe server (which typically runs locally),
there's cryptographic integrity. OTOH, despite bup being based on shas,
a bup repo under an attacker's control could have the git ref used for an
object changed, and so a bup repo shouldn't implicitly verify. Indeed,
tahoe seems unique in being trustworthy enough to implicitly verify.
2015-10-02 17:56:42 +00:00
let copier = \ src dest p check -> unVerified $
2019-07-17 18:19:00 +00:00
rsyncOrCopyFile st rsyncparams src dest p <&&> check
2015-09-14 16:13:38 +00:00
localwanthardlink <- wantHardLink
other 80% of avoding verification when hard linking to objects in shared repo
In c6632ee5c8e66c26ef18317f56ae02bae1e7e280, it actually only handled
uploading objects to a shared repository. To avoid verification when
downloading objects from a shared repository, was a lot harder.
On the plus side, if the process of downloading a file from a remote
is able to verify its content on the side, the remote can indicate this
now, and avoid the extra post-download verification.
As of yet, I don't have any remotes (except Git) using this ability.
Some more work would be needed to support it in special remotes.
It would make sense for tahoe to implicitly verify things downloaded from it;
as long as you trust your tahoe server (which typically runs locally),
there's cryptographic integrity. OTOH, despite bup being based on shas,
a bup repo under an attacker's control could have the git ref used for an
object changed, and so a bup repo shouldn't implicitly verify. Indeed,
tahoe seems unique in being trustworthy enough to implicitly verify.
2015-10-02 17:56:42 +00:00
let linker = \ src dest -> createLink src dest >> return True
2019-08-28 15:53:10 +00:00
if remotewanthardlink || localwanthardlink
then return $ \ src dest p check ->
other 80% of avoding verification when hard linking to objects in shared repo
In c6632ee5c8e66c26ef18317f56ae02bae1e7e280, it actually only handled
uploading objects to a shared repository. To avoid verification when
downloading objects from a shared repository, was a lot harder.
On the plus side, if the process of downloading a file from a remote
is able to verify its content on the side, the remote can indicate this
now, and avoid the extra post-download verification.
As of yet, I don't have any remotes (except Git) using this ability.
Some more work would be needed to support it in special remotes.
It would make sense for tahoe to implicitly verify things downloaded from it;
as long as you trust your tahoe server (which typically runs locally),
there's cryptographic integrity. OTOH, despite bup being based on shas,
a bup repo under an attacker's control could have the git ref used for an
object changed, and so a bup repo shouldn't implicitly verify. Indeed,
tahoe seems unique in being trustworthy enough to implicitly verify.
2015-10-02 17:56:42 +00:00
ifM ( liftIO ( catchBoolIO ( linker src dest ) ) )
( return ( True , Verified )
, copier src dest p check
)
2019-08-28 15:53:10 +00:00
else return copier
2018-01-10 18:21:18 +00:00
2018-03-08 18:02:18 +00:00
{- Normally the UUID of a local repository is checked at startup,
- but annex - checkuuid config can prevent that . To avoid getting
- confused , a deferred check is done just before the repository
- is used .
- This returns False when the repository UUID is not as expected . - }
2018-01-10 18:21:18 +00:00
type DeferredUUIDCheck = Annex Bool
2020-04-17 21:09:29 +00:00
data State = State Ssh . P2PSshConnectionPool DeferredUUIDCheck CopyCoWTried ( Annex ( Git . Repo , GitConfig ) ) LocalRemoteAnnex
2018-06-04 20:48:26 +00:00
getRepoFromState :: State -> Annex Git . Repo
2020-04-17 21:09:29 +00:00
getRepoFromState ( State _ _ _ a _ ) = fst <$> a
removal of the rest of remoteGitConfig
In keyUrls, the GitConfig is used only by annexLocations
to support configured Differences. Since such configurations affect all
clones of a repository, the local repo's GitConfig must have the same
information as the remote's GitConfig would have. So, used getGitConfig
to get the local GitConfig, which is cached and so available cheaply.
That actually fixed a bug noone had ever noticed: keyUrls is
used for remotes accessed over http. The full git config of such a
remote is normally not available, so the remoteGitConfig that keyUrls
used would not have the necessary information in it.
In copyFromRemoteCheap', it uses gitAnnexLocation,
which does need the GitConfig of the remote repo itself in order to
check if it's crippled, supports symlinks, etc. So, made the
State include that GitConfig, cached. The use of gitAnnexLocation is
within a (not $ Git.repoIsUrl repo) guard, so it's local, and so
its git config will always be read and available.
(Note that gitAnnexLocation in turn calls annexLocations, so the
Differences config it uses in this case comes from the remote repo's
GitConfig and not from the local repo's GitConfig. As explained above
this is ok since they must have the same value.)
Not very happy with this mess of different GitConfigs not type-safe and
some read only sometimes etc. Very hairy. Think I got it this change
right. Test suite passes..
This commit was sponsored by Ethan Aubin.
2018-06-05 18:23:34 +00:00
2019-08-13 17:10:33 +00:00
# ifndef mingw32_HOST_OS
removal of the rest of remoteGitConfig
In keyUrls, the GitConfig is used only by annexLocations
to support configured Differences. Since such configurations affect all
clones of a repository, the local repo's GitConfig must have the same
information as the remote's GitConfig would have. So, used getGitConfig
to get the local GitConfig, which is cached and so available cheaply.
That actually fixed a bug noone had ever noticed: keyUrls is
used for remotes accessed over http. The full git config of such a
remote is normally not available, so the remoteGitConfig that keyUrls
used would not have the necessary information in it.
In copyFromRemoteCheap', it uses gitAnnexLocation,
which does need the GitConfig of the remote repo itself in order to
check if it's crippled, supports symlinks, etc. So, made the
State include that GitConfig, cached. The use of gitAnnexLocation is
within a (not $ Git.repoIsUrl repo) guard, so it's local, and so
its git config will always be read and available.
(Note that gitAnnexLocation in turn calls annexLocations, so the
Differences config it uses in this case comes from the remote repo's
GitConfig and not from the local repo's GitConfig. As explained above
this is ok since they must have the same value.)
Not very happy with this mess of different GitConfigs not type-safe and
some read only sometimes etc. Very hairy. Think I got it this change
right. Test suite passes..
This commit was sponsored by Ethan Aubin.
2018-06-05 18:23:34 +00:00
{- The config of the remote git repository, cached for speed. -}
getGitConfigFromState :: State -> Annex GitConfig
2020-04-17 21:09:29 +00:00
getGitConfigFromState ( State _ _ _ a _ ) = snd <$> a
2019-08-13 17:10:33 +00:00
# endif
2018-06-04 20:48:26 +00:00
mkState :: Git . Repo -> UUID -> RemoteGitConfig -> Annex State
mkState r u gc = do
pool <- Ssh . mkP2PSshConnectionPool
2019-07-17 18:19:00 +00:00
copycowtried <- liftIO newCopyCoWTried
2020-04-17 21:09:29 +00:00
lra <- mkLocalRemoteAnnex r
2018-06-04 20:48:26 +00:00
( duc , getrepo ) <- go
2020-04-17 21:09:29 +00:00
return $ State pool duc copycowtried getrepo lra
2018-06-04 20:48:26 +00:00
where
go
removal of the rest of remoteGitConfig
In keyUrls, the GitConfig is used only by annexLocations
to support configured Differences. Since such configurations affect all
clones of a repository, the local repo's GitConfig must have the same
information as the remote's GitConfig would have. So, used getGitConfig
to get the local GitConfig, which is cached and so available cheaply.
That actually fixed a bug noone had ever noticed: keyUrls is
used for remotes accessed over http. The full git config of such a
remote is normally not available, so the remoteGitConfig that keyUrls
used would not have the necessary information in it.
In copyFromRemoteCheap', it uses gitAnnexLocation,
which does need the GitConfig of the remote repo itself in order to
check if it's crippled, supports symlinks, etc. So, made the
State include that GitConfig, cached. The use of gitAnnexLocation is
within a (not $ Git.repoIsUrl repo) guard, so it's local, and so
its git config will always be read and available.
(Note that gitAnnexLocation in turn calls annexLocations, so the
Differences config it uses in this case comes from the remote repo's
GitConfig and not from the local repo's GitConfig. As explained above
this is ok since they must have the same value.)
Not very happy with this mess of different GitConfigs not type-safe and
some read only sometimes etc. Very hairy. Think I got it this change
right. Test suite passes..
This commit was sponsored by Ethan Aubin.
2018-06-05 18:23:34 +00:00
| remoteAnnexCheckUUID gc = return
git-annex config annex.largefiles
annex.largefiles can be configured by git-annex config, to more easily set
a default that will also be used by clones, without needing to shoehorn the
expression into the gitattributes file. The git config and gitattributes
override that.
Whenever something is added to git-annex config, we have to consider what
happens if a user puts a purposfully bad value in there. Or, if a new
git-annex adds some new value that an old git-annex can't parse.
In this case, a global annex.largefiles that can't be parsed currently
makes an error be thrown. That might not be ideal, but the gitattribute
behaves the same, and is almost equally repo-global.
Performance notes:
git-annex add and addurl construct a matcher once
and uses it for every file, so the added time penalty for reading the global
config log is minor. If the gitattributes annex.largefiles were deprecated,
git-annex add would get around 2% faster (excluding hashing), because
looking that up for each file is not fast. So this new way of setting
it is progress toward speeding up add.
git-annex smudge does need to load the log every time. As well as checking
the git attribute. Not ideal. Setting annex.gitaddtoannex=false avoids
both overheads.
2019-12-20 16:12:31 +00:00
( return True , return ( r , extractGitConfig FromGitConfig r ) )
2018-06-04 20:48:26 +00:00
| otherwise = do
rv <- liftIO newEmptyMVar
let getrepo = ifM ( liftIO $ isEmptyMVar rv )
( do
avoid beware of the leopard situation
* Display a warning message when a remote uses a protocol, such as
git://, that git-annex does not support. Silently skipping such a
remote was confusing behavior.
It sets annex-ignore, so the warning is only displayed once.
* Also display a warning message when a remote, without a known uuid,
is located in a directory that does not currently exist, to avoid
silently skipping such a remote.
This is a bit more debatable, since git-annex get will say,
try making repository available. And since it does not set annex-ignore,
the warning will be displayed repeatedly. It's also an extreme edge case,
I don't think I've ever seen it happen in real life.
2020-05-04 17:01:11 +00:00
r' <- tryGitConfigRead False r True
git-annex config annex.largefiles
annex.largefiles can be configured by git-annex config, to more easily set
a default that will also be used by clones, without needing to shoehorn the
expression into the gitattributes file. The git config and gitattributes
override that.
Whenever something is added to git-annex config, we have to consider what
happens if a user puts a purposfully bad value in there. Or, if a new
git-annex adds some new value that an old git-annex can't parse.
In this case, a global annex.largefiles that can't be parsed currently
makes an error be thrown. That might not be ideal, but the gitattribute
behaves the same, and is almost equally repo-global.
Performance notes:
git-annex add and addurl construct a matcher once
and uses it for every file, so the added time penalty for reading the global
config log is minor. If the gitattributes annex.largefiles were deprecated,
git-annex add would get around 2% faster (excluding hashing), because
looking that up for each file is not fast. So this new way of setting
it is progress toward speeding up add.
git-annex smudge does need to load the log every time. As well as checking
the git attribute. Not ideal. Setting annex.gitaddtoannex=false avoids
both overheads.
2019-12-20 16:12:31 +00:00
let t = ( r' , extractGitConfig FromGitConfig r' )
removal of the rest of remoteGitConfig
In keyUrls, the GitConfig is used only by annexLocations
to support configured Differences. Since such configurations affect all
clones of a repository, the local repo's GitConfig must have the same
information as the remote's GitConfig would have. So, used getGitConfig
to get the local GitConfig, which is cached and so available cheaply.
That actually fixed a bug noone had ever noticed: keyUrls is
used for remotes accessed over http. The full git config of such a
remote is normally not available, so the remoteGitConfig that keyUrls
used would not have the necessary information in it.
In copyFromRemoteCheap', it uses gitAnnexLocation,
which does need the GitConfig of the remote repo itself in order to
check if it's crippled, supports symlinks, etc. So, made the
State include that GitConfig, cached. The use of gitAnnexLocation is
within a (not $ Git.repoIsUrl repo) guard, so it's local, and so
its git config will always be read and available.
(Note that gitAnnexLocation in turn calls annexLocations, so the
Differences config it uses in this case comes from the remote repo's
GitConfig and not from the local repo's GitConfig. As explained above
this is ok since they must have the same value.)
Not very happy with this mess of different GitConfigs not type-safe and
some read only sometimes etc. Very hairy. Think I got it this change
right. Test suite passes..
This commit was sponsored by Ethan Aubin.
2018-06-05 18:23:34 +00:00
void $ liftIO $ tryPutMVar rv t
return t
2018-06-04 20:48:26 +00:00
, liftIO $ readMVar rv
)
cv <- liftIO newEmptyMVar
let duc = ifM ( liftIO $ isEmptyMVar cv )
( do
removal of the rest of remoteGitConfig
In keyUrls, the GitConfig is used only by annexLocations
to support configured Differences. Since such configurations affect all
clones of a repository, the local repo's GitConfig must have the same
information as the remote's GitConfig would have. So, used getGitConfig
to get the local GitConfig, which is cached and so available cheaply.
That actually fixed a bug noone had ever noticed: keyUrls is
used for remotes accessed over http. The full git config of such a
remote is normally not available, so the remoteGitConfig that keyUrls
used would not have the necessary information in it.
In copyFromRemoteCheap', it uses gitAnnexLocation,
which does need the GitConfig of the remote repo itself in order to
check if it's crippled, supports symlinks, etc. So, made the
State include that GitConfig, cached. The use of gitAnnexLocation is
within a (not $ Git.repoIsUrl repo) guard, so it's local, and so
its git config will always be read and available.
(Note that gitAnnexLocation in turn calls annexLocations, so the
Differences config it uses in this case comes from the remote repo's
GitConfig and not from the local repo's GitConfig. As explained above
this is ok since they must have the same value.)
Not very happy with this mess of different GitConfigs not type-safe and
some read only sometimes etc. Very hairy. Think I got it this change
right. Test suite passes..
This commit was sponsored by Ethan Aubin.
2018-06-05 18:23:34 +00:00
r' <- fst <$> getrepo
2018-06-04 20:48:26 +00:00
u' <- getRepoUUID r'
let ok = u' == u
void $ liftIO $ tryPutMVar cv ok
unless ok $
warning $ Git . repoDescribe r ++ " is not the expected repository. The remote's annex-checkuuid configuration prevented noticing the change until now. "
return ok
, liftIO $ readMVar cv
)
return ( duc , getrepo )