2011-04-28 00:06:07 +00:00
|
|
|
{- A remote that is only accessible by rsync.
|
|
|
|
-
|
2020-01-14 16:35:08 +00:00
|
|
|
- Copyright 2011-2020 Joey Hess <id@joeyh.name>
|
2011-04-28 00:06:07 +00:00
|
|
|
-
|
2019-03-13 19:48:14 +00:00
|
|
|
- Licensed under the GNU AGPL version 3 or higher.
|
2011-04-28 00:06:07 +00:00
|
|
|
-}
|
|
|
|
|
2013-05-10 21:29:59 +00:00
|
|
|
{-# LANGUAGE CPP #-}
|
|
|
|
|
2013-09-08 18:54:28 +00:00
|
|
|
module Remote.Rsync (
|
|
|
|
remote,
|
2014-08-03 21:31:10 +00:00
|
|
|
store,
|
|
|
|
retrieve,
|
2013-09-08 18:54:28 +00:00
|
|
|
remove,
|
2014-08-06 17:45:19 +00:00
|
|
|
checkKey,
|
2013-09-08 18:54:28 +00:00
|
|
|
withRsyncScratchDir,
|
2020-01-20 19:13:49 +00:00
|
|
|
rsyncRemoteConfigs,
|
2013-09-08 18:54:28 +00:00
|
|
|
genRsyncOpts,
|
2022-05-03 16:12:25 +00:00
|
|
|
RsyncOpts,
|
|
|
|
probeRsyncProtectsArgs,
|
2013-09-08 18:54:28 +00:00
|
|
|
) where
|
2011-04-28 00:06:07 +00:00
|
|
|
|
2016-01-20 20:36:33 +00:00
|
|
|
import Annex.Common
|
2011-06-02 01:56:04 +00:00
|
|
|
import Types.Remote
|
2011-06-30 17:16:57 +00:00
|
|
|
import qualified Git
|
2011-04-28 00:06:07 +00:00
|
|
|
import Config
|
2013-03-13 20:16:01 +00:00
|
|
|
import Config.Cost
|
2011-10-04 04:40:47 +00:00
|
|
|
import Annex.Content
|
2013-09-07 22:38:00 +00:00
|
|
|
import Annex.UUID
|
2013-04-13 22:10:49 +00:00
|
|
|
import Annex.Ssh
|
2020-03-06 15:57:15 +00:00
|
|
|
import Annex.Perms
|
2011-08-17 00:49:54 +00:00
|
|
|
import Remote.Helper.Special
|
2019-02-20 19:55:01 +00:00
|
|
|
import Remote.Helper.ExportImport
|
2018-02-28 16:09:03 +00:00
|
|
|
import Types.Export
|
2020-01-10 18:10:20 +00:00
|
|
|
import Types.ProposedAccepted
|
2014-03-18 16:55:08 +00:00
|
|
|
import Remote.Rsync.RsyncUrl
|
2011-04-28 00:06:07 +00:00
|
|
|
import Crypto
|
2012-09-19 18:28:32 +00:00
|
|
|
import Utility.Rsync
|
2013-02-15 17:33:36 +00:00
|
|
|
import Utility.CopyFile
|
2022-05-03 16:12:25 +00:00
|
|
|
import Utility.Process.Transcript
|
2015-04-03 20:48:30 +00:00
|
|
|
import Messages.Progress
|
2013-03-28 21:03:04 +00:00
|
|
|
import Utility.Metered
|
2016-08-03 16:37:12 +00:00
|
|
|
import Types.Transfer
|
2014-02-11 18:06:50 +00:00
|
|
|
import Types.Creds
|
2015-01-28 20:51:40 +00:00
|
|
|
import Annex.DirHashes
|
2017-12-31 20:08:31 +00:00
|
|
|
import Utility.Tmp.Dir
|
2017-08-18 02:11:31 +00:00
|
|
|
import Utility.SshHost
|
2020-01-14 16:35:08 +00:00
|
|
|
import Annex.SpecialRemote.Config
|
2022-05-09 17:18:47 +00:00
|
|
|
import Annex.Verify
|
2022-07-12 18:53:32 +00:00
|
|
|
import qualified Utility.RawFilePath as R
|
2011-04-28 00:06:07 +00:00
|
|
|
|
2014-02-11 19:29:56 +00:00
|
|
|
import qualified Data.Map as M
|
|
|
|
|
2011-12-31 08:11:39 +00:00
|
|
|
remote :: RemoteType
|
2020-01-14 16:35:08 +00:00
|
|
|
remote = specialRemoteType $ RemoteType
|
2017-09-07 17:45:31 +00:00
|
|
|
{ typename = "rsync"
|
|
|
|
, enumerate = const (findSpecialRemotes "rsyncurl")
|
|
|
|
, generate = gen
|
2020-01-20 19:13:49 +00:00
|
|
|
, configParser = mkRemoteConfigParser $ rsyncRemoteConfigs ++
|
|
|
|
[ optionalStringParser rsyncUrlField
|
2020-01-20 19:20:04 +00:00
|
|
|
(FieldDesc "(required) url or hostname:/directory for rsync to use")
|
2020-01-14 16:35:08 +00:00
|
|
|
]
|
2017-09-07 17:45:31 +00:00
|
|
|
, setup = rsyncSetup
|
2018-02-28 16:09:03 +00:00
|
|
|
, exportSupported = exportIsSupported
|
2019-02-20 19:55:01 +00:00
|
|
|
, importSupported = importUnsupported
|
add thirdPartyPopulated interface
This is to support, eg a borg repo as a special remote, which is
populated not by running git-annex commands, but by using borg. Then
git-annex sync lists the content of the remote, learns which files are
annex objects, and treats those as present in the remote.
So, most of the import machinery is reused, to a new purpose. While
normally importtree maintains a remote tracking branch, this does not,
because the files stored in the remote are annex object files, not
user-visible filenames. But, internally, a git tree is still generated,
of the files on the remote that are annex objects. This tree is used
by retrieveExportWithContentIdentifier, etc. As with other import/export
remotes, that the tree is recorded in the export log, and gets grafted
into the git-annex branch.
importKey changed to be able to return Nothing, to indicate when an
ImportLocation is not an annex object and so should be skipped from
being included in the tree.
It did not seem to make sense to have git-annex import do this, since
from the user's perspective, it's not like other imports. So only
git-annex sync does it.
Note that, git-annex sync does not yet download objects from such
remotes that are preferred content. importKeys is run with
content downloading disabled, to avoid getting the content of all
objects. Perhaps what's needed is for seekSyncContent to be run with these
remotes, but I don't know if it will just work (in particular, it needs
to avoid trying to transfer objects to them), so I skipped that for now.
(Untested and unused as of yet.)
This commit was sponsored by Jochen Bartl on Patreon.
2020-12-18 18:52:57 +00:00
|
|
|
, thirdPartyPopulated = False
|
2017-09-07 17:45:31 +00:00
|
|
|
}
|
2011-04-28 00:06:07 +00:00
|
|
|
|
2020-01-14 16:35:08 +00:00
|
|
|
shellEscapeField :: RemoteConfigField
|
|
|
|
shellEscapeField = Accepted "shellescape"
|
|
|
|
|
|
|
|
rsyncUrlField :: RemoteConfigField
|
|
|
|
rsyncUrlField = Accepted "rsyncurl"
|
|
|
|
|
fix encryption of content to gcrypt and git-lfs
Fix serious regression in gcrypt and encrypted git-lfs remotes.
Since version 7.20200202.7, git-annex incorrectly stored content
on those remotes without encrypting it.
Problem was, Remote.Git enumerates all git remotes, including git-lfs
and gcrypt. It then dispatches to those. So, Remote.List used the
RemoteConfigParser from Remote.Git, instead of from git-lfs or gcrypt,
and that parser does not know about encryption fields, so did not
include them in the ParsedRemoteConfig. (Also didn't include other
fields specific to those remotes, perhaps chunking etc also didn't
get through.)
To fix, had to move RemoteConfig parsing down into the generate methods
of each remote, rather than doing it in Remote.List.
And a consequence of that was that ParsedRemoteConfig had to change to
include the RemoteConfig that got parsed, so that testremote can
generate a new remote based on an existing remote.
(I would have rather fixed this just inside Remote.Git, but that was not
practical, at least not w/o re-doing work that Remote.List already did.
Big ugly mostly mechanical patch seemed preferable to making git-annex
slower.)
2020-02-26 21:20:56 +00:00
|
|
|
gen :: Git.Repo -> UUID -> RemoteConfig -> RemoteGitConfig -> RemoteStateHandle -> Annex (Maybe Remote)
|
|
|
|
gen r u rc gc rs = do
|
|
|
|
c <- parsedRemoteConfig remote rc
|
2023-01-12 17:42:28 +00:00
|
|
|
cst <- remoteCost gc c expensiveRemoteCost
|
2013-09-08 18:54:28 +00:00
|
|
|
(transport, url) <- rsyncTransport gc $
|
2016-11-16 01:29:54 +00:00
|
|
|
fromMaybe (giveup "missing rsyncurl") $ remoteAnnexRsyncUrl gc
|
2022-05-03 16:12:25 +00:00
|
|
|
protectsargs <- liftIO probeRsyncProtectsArgs
|
|
|
|
let o = genRsyncOpts protectsargs c gc transport url
|
2013-09-08 18:54:28 +00:00
|
|
|
let islocal = rsyncUrlIsPath $ rsyncUrl o
|
2021-08-09 19:17:48 +00:00
|
|
|
return $ Just $ specialRemote c
|
2020-05-13 15:50:31 +00:00
|
|
|
(fileStorer $ store o)
|
|
|
|
(fileRetriever $ retrieve o)
|
|
|
|
(remove o)
|
remove "checking remotename" message
This fixes fsck of a remote that uses chunking displaying
(checking remotename) (checking remotename)" for every chunk.
Also, some remotes displayed the message, and others did not, with no
consistency. It was originally displayed only when accessing remotes
that were expensive or might involve a password prompt, I think, but
nothing in the API said when to do it so it became an inconsistent mess.
Originally I thought fsck should always display it. But it only displays
in fsck --from remote, so the user knows the remote is being accessed,
so there is no reason to tell them it's accessing it over and over.
It was also possible for git-annex move to sometimes display it twice,
due to checking if content is present twice. But, the user of move
specifies --from/--to, so it does not need to display when it's
accessing the remote, as the user expects it to access the remote.
git-annex get might display it, but only if the remote also supports
hasKeyCheap, which is really only local git remotes, which didn't
display it always; and in any case nothing displayed it before hasKeyCheap,
which is checked first, so I don't think this needs to display it ever.
mirror is like move. And that's all the main places it would have been
displayed.
This commit was sponsored by Jochen Bartl on Patreon.
2021-04-27 16:50:45 +00:00
|
|
|
(checkKey o)
|
2012-07-22 17:48:50 +00:00
|
|
|
Remote
|
|
|
|
{ uuid = u
|
|
|
|
, cost = cst
|
|
|
|
, name = Git.repoDescribe r
|
2014-08-03 20:54:57 +00:00
|
|
|
, storeKey = storeKeyDummy
|
2020-05-13 21:05:56 +00:00
|
|
|
, retrieveKeyFile = retrieveKeyFileDummy
|
|
|
|
, retrieveKeyFileCheap = Just (retrieveCheap o)
|
2018-06-21 15:35:27 +00:00
|
|
|
, retrievalSecurityPolicy = RetrievalAllKeysSecure
|
run Preparer to get Remover and CheckPresent actions
This will allow special remotes to eg, open a http connection and reuse it,
while checking if chunks are present, or removing chunks.
S3 and WebDAV both need this to support chunks with reasonable speed.
Note that a special remote might want to cache a http connection across
multiple requests. A simple case of this is that CheckPresent is typically
called before Store or Remove. A remote using this interface can certianly
use a Preparer that eg, uses a MVar to cache a http connection.
However, it's up to the remote to then deal with things like stale or
stalled http connections when eg, doing a series of downloads from a remote
and other places. There could be long delays between calls to a remote,
which could lead to eg, http connection stalls; the machine might even
move to a new network, etc.
It might be nice to improve this interface later to allow
the simple case without needing to handle the full complex case.
One way to do it would be to have a `Transaction SpecialRemote cache`,
where SpecialRemote contains methods for Storer, Retriever, Remover, and
CheckPresent, that all expect to be passed a `cache`.
2014-08-06 18:28:36 +00:00
|
|
|
, removeKey = removeKeyDummy
|
2015-10-08 19:01:38 +00:00
|
|
|
, lockContent = Nothing
|
run Preparer to get Remover and CheckPresent actions
This will allow special remotes to eg, open a http connection and reuse it,
while checking if chunks are present, or removing chunks.
S3 and WebDAV both need this to support chunks with reasonable speed.
Note that a special remote might want to cache a http connection across
multiple requests. A simple case of this is that CheckPresent is typically
called before Store or Remove. A remote using this interface can certianly
use a Preparer that eg, uses a MVar to cache a http connection.
However, it's up to the remote to then deal with things like stale or
stalled http connections when eg, doing a series of downloads from a remote
and other places. There could be long delays between calls to a remote,
which could lead to eg, http connection stalls; the machine might even
move to a new network, etc.
It might be nice to improve this interface later to allow
the simple case without needing to handle the full complex case.
One way to do it would be to have a `Transaction SpecialRemote cache`,
where SpecialRemote contains methods for Storer, Retriever, Remover, and
CheckPresent, that all expect to be passed a `cache`.
2014-08-06 18:28:36 +00:00
|
|
|
, checkPresent = checkPresentDummy
|
2014-08-06 17:45:19 +00:00
|
|
|
, checkPresentCheap = False
|
2019-01-30 18:55:28 +00:00
|
|
|
, exportActions = ExportActions
|
2018-02-28 16:09:03 +00:00
|
|
|
{ storeExport = storeExportM o
|
|
|
|
, retrieveExport = retrieveExportM o
|
|
|
|
, removeExport = removeExportM o
|
2020-12-28 18:37:15 +00:00
|
|
|
, versionedExport = False
|
2018-02-28 16:09:03 +00:00
|
|
|
, checkPresentExport = checkPresentExportM o
|
|
|
|
, removeExportDirectory = Just (removeExportDirectoryM o)
|
|
|
|
, renameExport = renameExportM o
|
|
|
|
}
|
2019-02-20 19:55:01 +00:00
|
|
|
, importActions = importUnsupported
|
2012-07-22 17:48:50 +00:00
|
|
|
, whereisKey = Nothing
|
2013-10-11 20:03:18 +00:00
|
|
|
, remoteFsck = Nothing
|
2013-10-27 19:38:59 +00:00
|
|
|
, repairRepo = Nothing
|
2013-11-02 20:37:28 +00:00
|
|
|
, config = c
|
2018-06-04 18:31:55 +00:00
|
|
|
, getRepo = return r
|
2013-01-01 17:52:47 +00:00
|
|
|
, gitconfig = gc
|
2013-03-15 23:16:13 +00:00
|
|
|
, localpath = if islocal
|
2012-07-22 17:48:50 +00:00
|
|
|
then Just $ rsyncUrl o
|
|
|
|
else Nothing
|
2012-08-26 19:39:02 +00:00
|
|
|
, readonly = False
|
2018-08-30 15:12:18 +00:00
|
|
|
, appendonly = False
|
2020-12-28 19:08:53 +00:00
|
|
|
, untrustworthy = False
|
2014-01-13 18:41:10 +00:00
|
|
|
, availability = if islocal then LocallyAvailable else GloballyAvailable
|
2012-07-22 17:48:50 +00:00
|
|
|
, remotetype = remote
|
2014-08-10 18:52:58 +00:00
|
|
|
, mkUnavailable = return Nothing
|
2014-10-21 18:36:09 +00:00
|
|
|
, getInfo = return [("url", url)]
|
2014-12-08 17:40:15 +00:00
|
|
|
, claimUrl = Nothing
|
2014-12-11 19:32:42 +00:00
|
|
|
, checkUrl = Nothing
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
, remoteStateHandle = rs
|
2012-07-22 17:48:50 +00:00
|
|
|
}
|
2013-09-08 18:54:28 +00:00
|
|
|
|
2022-05-03 16:12:25 +00:00
|
|
|
-- | Since 3.2.4, rsync protects filenames from being exposed to the shell.
|
|
|
|
newtype RsyncProtectsArgs = RsyncProtectsArgs Bool
|
|
|
|
|
|
|
|
probeRsyncProtectsArgs :: IO RsyncProtectsArgs
|
|
|
|
probeRsyncProtectsArgs = do
|
|
|
|
(helpoutput, _) <- processTranscript "rsync" ["--help"] Nothing
|
|
|
|
-- The --old-args option was added to disable the new arg
|
|
|
|
-- protection, so use it to detect when that feature is supported
|
|
|
|
-- by rsync, rather than parsing versions.
|
|
|
|
return (RsyncProtectsArgs $ "--old-args" `isInfixOf` helpoutput)
|
|
|
|
|
|
|
|
|
2020-01-20 19:13:49 +00:00
|
|
|
-- Things used by genRsyncOpts
|
|
|
|
rsyncRemoteConfigs :: [RemoteConfigFieldParser]
|
|
|
|
rsyncRemoteConfigs =
|
2020-06-16 21:59:55 +00:00
|
|
|
[ yesNoParser shellEscapeField (Just True)
|
|
|
|
(FieldDesc "set to no to avoid usual shell escaping (not recommended)")
|
2020-01-20 19:13:49 +00:00
|
|
|
]
|
|
|
|
|
2022-05-03 16:12:25 +00:00
|
|
|
genRsyncOpts :: RsyncProtectsArgs -> ParsedRemoteConfig -> RemoteGitConfig -> Annex [CommandParam] -> RsyncUrl -> RsyncOpts
|
|
|
|
genRsyncOpts (RsyncProtectsArgs protectsargs) c gc transport url = RsyncOpts
|
2014-02-02 20:06:34 +00:00
|
|
|
{ rsyncUrl = url
|
2019-06-13 15:09:55 +00:00
|
|
|
, rsyncOptions = appendtransport $ opts []
|
|
|
|
, rsyncUploadOptions = appendtransport $
|
|
|
|
opts (remoteAnnexRsyncUploadOptions gc)
|
|
|
|
, rsyncDownloadOptions = appendtransport $
|
|
|
|
opts (remoteAnnexRsyncDownloadOptions gc)
|
2022-05-03 16:12:25 +00:00
|
|
|
, rsyncShellEscape = if protectsargs
|
|
|
|
then False
|
|
|
|
else fromMaybe True (getRemoteConfigValue shellEscapeField c)
|
2014-02-02 20:06:34 +00:00
|
|
|
}
|
2012-11-11 04:51:07 +00:00
|
|
|
where
|
2019-06-13 15:09:55 +00:00
|
|
|
appendtransport l = (++ l) <$> transport
|
2014-02-02 20:06:34 +00:00
|
|
|
opts specificopts = map Param $ filter safe $
|
|
|
|
remoteAnnexRsyncOptions gc ++ specificopts
|
2013-01-01 17:52:47 +00:00
|
|
|
safe opt
|
2012-11-11 04:51:07 +00:00
|
|
|
-- Don't allow user to pass --delete to rsync;
|
|
|
|
-- that could cause it to delete other keys
|
|
|
|
-- in the same hash bucket as a key it sends.
|
2013-01-01 17:52:47 +00:00
|
|
|
| opt == "--delete" = False
|
|
|
|
| opt == "--delete-excluded" = False
|
2012-11-11 04:51:07 +00:00
|
|
|
| otherwise = True
|
2013-09-08 18:54:28 +00:00
|
|
|
|
2019-06-13 15:09:55 +00:00
|
|
|
rsyncTransport :: RemoteGitConfig -> RsyncUrl -> Annex (Annex [CommandParam], RsyncUrl)
|
2014-02-21 17:06:39 +00:00
|
|
|
rsyncTransport gc url
|
|
|
|
| rsyncUrlIsShell url =
|
2019-06-13 15:09:55 +00:00
|
|
|
(\transport -> return (rsyncShell <$> transport, url)) =<<
|
2013-09-08 18:54:28 +00:00
|
|
|
case fromNull ["ssh"] (remoteAnnexRsyncTransport gc) of
|
|
|
|
"ssh":sshopts -> do
|
|
|
|
let (port, sshopts') = sshReadPort sshopts
|
2023-04-10 17:38:14 +00:00
|
|
|
userhost = either giveup id $ mkSshHost $
|
2017-08-18 02:11:31 +00:00
|
|
|
takeWhile (/= ':') url
|
2019-06-13 15:09:55 +00:00
|
|
|
return $ (Param "ssh":) <$> sshOptions ConsumeStdin
|
2015-02-12 19:44:10 +00:00
|
|
|
(userhost, port) gc
|
2013-09-08 18:54:28 +00:00
|
|
|
(map Param $ loginopt ++ sshopts')
|
2019-06-13 15:09:55 +00:00
|
|
|
"rsh":rshopts -> return $ pure $ map Param $ "rsh" :
|
2013-09-08 18:54:28 +00:00
|
|
|
loginopt ++ rshopts
|
2016-11-16 01:29:54 +00:00
|
|
|
rsh -> giveup $ "Unknown Rsync transport: "
|
2013-09-08 18:54:28 +00:00
|
|
|
++ unwords rsh
|
2019-06-13 15:09:55 +00:00
|
|
|
| otherwise = return (pure [], url)
|
2013-09-08 18:54:28 +00:00
|
|
|
where
|
2014-02-21 17:06:39 +00:00
|
|
|
login = case separate (=='@') url of
|
|
|
|
(_h, "") -> Nothing
|
|
|
|
(l, _) -> Just l
|
2013-04-13 22:10:49 +00:00
|
|
|
loginopt = maybe [] (\l -> ["-l",l]) login
|
2013-09-08 18:54:28 +00:00
|
|
|
fromNull as xs = if null xs then as else xs
|
2011-04-28 00:30:43 +00:00
|
|
|
|
2017-02-07 18:35:58 +00:00
|
|
|
rsyncSetup :: SetupStage -> Maybe UUID -> Maybe CredPair -> RemoteConfig -> RemoteGitConfig -> Annex (RemoteConfig, UUID)
|
|
|
|
rsyncSetup _ mu _ c gc = do
|
2013-09-07 22:38:00 +00:00
|
|
|
u <- maybe (liftIO genUUID) return mu
|
2011-04-28 00:06:07 +00:00
|
|
|
-- verify configuration is sane
|
2020-01-10 18:10:20 +00:00
|
|
|
let url = maybe (giveup "Specify rsyncurl=") fromProposedAccepted $
|
2020-01-14 16:35:08 +00:00
|
|
|
M.lookup rsyncUrlField c
|
2016-05-23 21:27:15 +00:00
|
|
|
(c', _encsetup) <- encryptionSetup c gc
|
2011-04-28 00:06:07 +00:00
|
|
|
|
|
|
|
-- The rsyncurl is stored in git config, not only in this remote's
|
2023-03-14 02:39:16 +00:00
|
|
|
-- persistent state, so it can vary between hosts.
|
2018-03-27 16:41:57 +00:00
|
|
|
gitConfigSpecialRemote u c' [("rsyncurl", url)]
|
2013-09-07 22:38:00 +00:00
|
|
|
return (c', u)
|
2011-04-28 00:06:07 +00:00
|
|
|
|
2014-08-03 21:31:10 +00:00
|
|
|
{- To send a single key is slightly tricky; need to build up a temporary
|
|
|
|
- directory structure to pass to rsync so it can create the hash
|
|
|
|
- directories.
|
|
|
|
-
|
|
|
|
- This would not be necessary if the hash directory structure used locally
|
|
|
|
- was always the same as that used on the rsync remote. So if that's ever
|
|
|
|
- unified, this gets nicer.
|
|
|
|
- (When we have the right hash directory structure, we can just
|
|
|
|
- pass --include=X --include=X/Y --include=X/Y/file --exclude=*)
|
|
|
|
-}
|
2020-05-13 18:03:00 +00:00
|
|
|
store :: RsyncOpts -> Key -> FilePath -> MeterUpdate -> Annex ()
|
2018-02-28 16:09:03 +00:00
|
|
|
store o k src meterupdate = storeGeneric o meterupdate basedest populatedest
|
|
|
|
where
|
2019-12-11 18:12:22 +00:00
|
|
|
basedest = fromRawFilePath $ Prelude.head (keyPaths k)
|
2018-02-28 16:09:03 +00:00
|
|
|
populatedest dest = liftIO $ if canrename
|
2014-08-03 21:31:10 +00:00
|
|
|
then do
|
2022-07-12 18:53:32 +00:00
|
|
|
R.rename (toRawFilePath src) (toRawFilePath dest)
|
2014-08-03 21:31:10 +00:00
|
|
|
return True
|
2022-06-22 20:47:34 +00:00
|
|
|
else createLinkOrCopy (toRawFilePath src) (toRawFilePath dest)
|
2018-02-28 16:09:03 +00:00
|
|
|
{- If the key being sent is encrypted or chunked, the file
|
|
|
|
- containing its content is a temp file, and so can be
|
|
|
|
- renamed into place. Otherwise, the file is the annexed
|
|
|
|
- object file, and has to be copied or hard linked into place. -}
|
|
|
|
canrename = isEncKey k || isChunkKey k
|
|
|
|
|
2020-05-13 18:03:00 +00:00
|
|
|
storeGeneric :: RsyncOpts -> MeterUpdate -> FilePath -> (FilePath -> Annex Bool) -> Annex ()
|
|
|
|
storeGeneric o meterupdate basedest populatedest =
|
|
|
|
unlessM (storeGeneric' o meterupdate basedest populatedest) $
|
|
|
|
giveup "failed to rsync content"
|
|
|
|
|
|
|
|
storeGeneric' :: RsyncOpts -> MeterUpdate -> FilePath -> (FilePath -> Annex Bool) -> Annex Bool
|
|
|
|
storeGeneric' o meterupdate basedest populatedest = withRsyncScratchDir $ \tmp -> do
|
2018-02-28 16:09:03 +00:00
|
|
|
let dest = tmp </> basedest
|
2020-10-30 14:29:42 +00:00
|
|
|
createAnnexDirectory (parentDir (toRawFilePath dest))
|
2018-02-28 16:09:03 +00:00
|
|
|
ok <- populatedest dest
|
2014-08-03 21:31:10 +00:00
|
|
|
ps <- sendParams
|
|
|
|
if ok
|
|
|
|
then showResumable $ rsyncRemote Upload o (Just meterupdate) $ ps ++
|
2015-06-01 17:52:23 +00:00
|
|
|
Param "--recursive" : partialParams ++
|
2014-08-03 21:31:10 +00:00
|
|
|
-- tmp/ to send contents of tmp dir
|
2015-06-01 17:52:23 +00:00
|
|
|
[ File $ addTrailingPathSeparator tmp
|
2014-08-03 21:31:10 +00:00
|
|
|
, Param $ rsyncUrl o
|
|
|
|
]
|
|
|
|
else return False
|
2011-04-28 00:06:07 +00:00
|
|
|
|
2021-08-16 20:22:00 +00:00
|
|
|
retrieve :: RsyncOpts -> RawFilePath -> Key -> MeterUpdate -> Annex ()
|
|
|
|
retrieve o f k p = rsyncRetrieveKey o k (fromRawFilePath f) (Just p)
|
2012-01-20 17:23:11 +00:00
|
|
|
|
2020-05-13 21:05:56 +00:00
|
|
|
retrieveCheap :: RsyncOpts -> Key -> AssociatedFile -> FilePath -> Annex ()
|
|
|
|
retrieveCheap o k _af f = ifM (preseedTmp k f)
|
|
|
|
( rsyncRetrieveKey o k f Nothing
|
|
|
|
, giveup "cannot preseed rsync with existing content"
|
|
|
|
)
|
2011-04-28 00:06:07 +00:00
|
|
|
|
run Preparer to get Remover and CheckPresent actions
This will allow special remotes to eg, open a http connection and reuse it,
while checking if chunks are present, or removing chunks.
S3 and WebDAV both need this to support chunks with reasonable speed.
Note that a special remote might want to cache a http connection across
multiple requests. A simple case of this is that CheckPresent is typically
called before Store or Remove. A remote using this interface can certianly
use a Preparer that eg, uses a MVar to cache a http connection.
However, it's up to the remote to then deal with things like stale or
stalled http connections when eg, doing a series of downloads from a remote
and other places. There could be long delays between calls to a remote,
which could lead to eg, http connection stalls; the machine might even
move to a new network, etc.
It might be nice to improve this interface later to allow
the simple case without needing to handle the full complex case.
One way to do it would be to have a `Transaction SpecialRemote cache`,
where SpecialRemote contains methods for Storer, Retriever, Remover, and
CheckPresent, that all expect to be passed a `cache`.
2014-08-06 18:28:36 +00:00
|
|
|
remove :: RsyncOpts -> Remover
|
2018-02-28 16:09:03 +00:00
|
|
|
remove o k = removeGeneric o includes
|
|
|
|
where
|
|
|
|
includes = concatMap use dirHashes
|
2019-12-11 18:12:22 +00:00
|
|
|
use h = let dir = fromRawFilePath (h def k) in
|
2020-10-30 14:29:42 +00:00
|
|
|
[ fromRawFilePath (parentDir (toRawFilePath dir))
|
2018-02-28 16:09:03 +00:00
|
|
|
, dir
|
|
|
|
-- match content directory and anything in it
|
2019-12-18 20:45:03 +00:00
|
|
|
, dir </> fromRawFilePath (keyFile k) </> "***"
|
2018-02-28 16:09:03 +00:00
|
|
|
]
|
|
|
|
|
|
|
|
{- An empty directory is rsynced to make it delete. Everything is excluded,
|
|
|
|
- except for the specified includes. Due to the way rsync traverses
|
|
|
|
- directories, the includes must match both the file to be deleted, and
|
|
|
|
- its parent directories, but not their other contents. -}
|
2020-05-14 18:08:09 +00:00
|
|
|
removeGeneric :: RsyncOpts -> [String] -> Annex ()
|
2018-02-28 16:09:03 +00:00
|
|
|
removeGeneric o includes = do
|
2020-05-14 18:08:09 +00:00
|
|
|
ps <- sendParams
|
|
|
|
opts <- rsyncOptions o
|
|
|
|
ok <- withRsyncScratchDir $ \tmp -> liftIO $ do
|
2023-03-14 02:39:16 +00:00
|
|
|
{- Send an empty directory to rsync to make it delete. -}
|
2020-05-14 18:08:09 +00:00
|
|
|
rsync $ opts ++ ps ++
|
|
|
|
map (\s -> Param $ "--include=" ++ s) includes ++
|
|
|
|
[ Param "--exclude=*" -- exclude everything else
|
|
|
|
, Param "--quiet", Param "--delete", Param "--recursive"
|
|
|
|
] ++ partialParams ++
|
|
|
|
[ Param $ addTrailingPathSeparator tmp
|
|
|
|
, Param $ rsyncUrl o
|
|
|
|
]
|
|
|
|
unless ok $
|
|
|
|
giveup "rsync failed"
|
|
|
|
|
remove "checking remotename" message
This fixes fsck of a remote that uses chunking displaying
(checking remotename) (checking remotename)" for every chunk.
Also, some remotes displayed the message, and others did not, with no
consistency. It was originally displayed only when accessing remotes
that were expensive or might involve a password prompt, I think, but
nothing in the API said when to do it so it became an inconsistent mess.
Originally I thought fsck should always display it. But it only displays
in fsck --from remote, so the user knows the remote is being accessed,
so there is no reason to tell them it's accessing it over and over.
It was also possible for git-annex move to sometimes display it twice,
due to checking if content is present twice. But, the user of move
specifies --from/--to, so it does not need to display when it's
accessing the remote, as the user expects it to access the remote.
git-annex get might display it, but only if the remote also supports
hasKeyCheap, which is really only local git remotes, which didn't
display it always; and in any case nothing displayed it before hasKeyCheap,
which is checked first, so I don't think this needs to display it ever.
mirror is like move. And that's all the main places it would have been
displayed.
This commit was sponsored by Jochen Bartl on Patreon.
2021-04-27 16:50:45 +00:00
|
|
|
checkKey :: RsyncOpts -> CheckPresent
|
|
|
|
checkKey o k = checkPresentGeneric o (rsyncUrls o k)
|
2018-02-28 16:09:03 +00:00
|
|
|
|
|
|
|
checkPresentGeneric :: RsyncOpts -> [RsyncUrl] -> Annex Bool
|
2019-06-13 15:09:55 +00:00
|
|
|
checkPresentGeneric o rsyncurls = do
|
|
|
|
opts <- rsyncOptions o
|
2011-12-02 19:50:27 +00:00
|
|
|
-- note: Does not currently differentiate between rsync failing
|
2011-04-28 00:06:07 +00:00
|
|
|
-- to connect, and the file not being present.
|
2018-02-28 16:09:03 +00:00
|
|
|
untilTrue rsyncurls $ \u ->
|
2020-06-04 19:36:34 +00:00
|
|
|
liftIO $ catchBoolIO $ withNullHandle $ \nullh ->
|
|
|
|
let p = (proc "rsync" $ toCommand $ opts ++ [Param u])
|
|
|
|
{ std_out = UseHandle nullh
|
|
|
|
, std_err = UseHandle nullh
|
|
|
|
}
|
|
|
|
in withCreateProcess p $ \_ _ _ -> checkSuccessProcess
|
2011-04-28 00:06:07 +00:00
|
|
|
|
2020-05-15 16:17:15 +00:00
|
|
|
storeExportM :: RsyncOpts -> FilePath -> Key -> ExportLocation -> MeterUpdate -> Annex ()
|
2018-02-28 16:09:03 +00:00
|
|
|
storeExportM o src _k loc meterupdate =
|
2020-05-15 16:17:15 +00:00
|
|
|
storeGeneric o meterupdate basedest populatedest
|
2018-02-28 16:09:03 +00:00
|
|
|
where
|
2019-11-26 19:27:22 +00:00
|
|
|
basedest = fromRawFilePath (fromExportLocation loc)
|
2022-06-22 20:47:34 +00:00
|
|
|
populatedest = liftIO . createLinkOrCopy (toRawFilePath src) . toRawFilePath
|
2018-02-28 16:09:03 +00:00
|
|
|
|
2022-05-09 16:25:04 +00:00
|
|
|
retrieveExportM :: RsyncOpts -> Key -> ExportLocation -> FilePath -> MeterUpdate -> Annex Verification
|
2022-05-09 17:18:47 +00:00
|
|
|
retrieveExportM o k loc dest p =
|
|
|
|
verifyKeyContentIncrementally AlwaysVerify k $ \iv ->
|
|
|
|
tailVerify iv (toRawFilePath dest) $
|
|
|
|
rsyncRetrieve o [rsyncurl] dest (Just p)
|
2018-02-28 16:09:03 +00:00
|
|
|
where
|
2019-11-26 19:27:22 +00:00
|
|
|
rsyncurl = mkRsyncUrl o (fromRawFilePath (fromExportLocation loc))
|
2018-02-28 16:09:03 +00:00
|
|
|
|
|
|
|
checkPresentExportM :: RsyncOpts -> Key -> ExportLocation -> Annex Bool
|
|
|
|
checkPresentExportM o _k loc = checkPresentGeneric o [rsyncurl]
|
|
|
|
where
|
2019-11-26 19:27:22 +00:00
|
|
|
rsyncurl = mkRsyncUrl o (fromRawFilePath (fromExportLocation loc))
|
2018-02-28 16:09:03 +00:00
|
|
|
|
2020-05-15 18:11:59 +00:00
|
|
|
removeExportM :: RsyncOpts -> Key -> ExportLocation -> Annex ()
|
2018-02-28 16:09:03 +00:00
|
|
|
removeExportM o _k loc =
|
2020-10-30 14:29:42 +00:00
|
|
|
removeGeneric o $ map fromRawFilePath $
|
|
|
|
includes $ fromExportLocation loc
|
2018-02-28 16:09:03 +00:00
|
|
|
where
|
|
|
|
includes f = f : case upFrom f of
|
|
|
|
Nothing -> []
|
|
|
|
Just f' -> includes f'
|
|
|
|
|
2020-05-15 18:32:45 +00:00
|
|
|
removeExportDirectoryM :: RsyncOpts -> ExportDirectory -> Annex ()
|
|
|
|
removeExportDirectoryM o ed = removeGeneric o (allbelow d : includes d)
|
2018-02-28 16:09:03 +00:00
|
|
|
where
|
2019-11-26 19:27:22 +00:00
|
|
|
d = fromRawFilePath $ fromExportDirectory ed
|
2018-02-28 16:09:03 +00:00
|
|
|
allbelow f = f </> "***"
|
2020-10-30 14:29:42 +00:00
|
|
|
includes f = f : case upFrom (toRawFilePath f) of
|
2018-02-28 16:09:03 +00:00
|
|
|
Nothing -> []
|
2020-10-30 14:29:42 +00:00
|
|
|
Just f' -> includes (fromRawFilePath f')
|
2018-02-28 16:09:03 +00:00
|
|
|
|
2020-05-15 19:05:52 +00:00
|
|
|
renameExportM :: RsyncOpts -> Key -> ExportLocation -> ExportLocation -> Annex (Maybe ())
|
2019-03-11 16:44:12 +00:00
|
|
|
renameExportM _ _ _ _ = return Nothing
|
2018-02-28 16:09:03 +00:00
|
|
|
|
2011-04-28 00:06:07 +00:00
|
|
|
{- Rsync params to enable resumes of sending files safely,
|
|
|
|
- ensure that files are only moved into place once complete
|
|
|
|
-}
|
2015-06-01 17:52:23 +00:00
|
|
|
partialParams :: [CommandParam]
|
|
|
|
partialParams = [Param "--partial", Param "--partial-dir=.rsync-partial"]
|
2011-04-28 00:06:07 +00:00
|
|
|
|
2013-05-09 17:49:47 +00:00
|
|
|
{- When sending files from crippled filesystems, the permissions can be all
|
|
|
|
- messed up, and it's better to use the default permissions on the
|
|
|
|
- destination. -}
|
|
|
|
sendParams :: Annex [CommandParam]
|
|
|
|
sendParams = ifM crippledFileSystem
|
|
|
|
( return [rsyncUseDestinationPermissions]
|
|
|
|
, return []
|
|
|
|
)
|
|
|
|
|
2011-04-28 00:06:07 +00:00
|
|
|
{- Runs an action in an empty scratch directory that can be used to build
|
|
|
|
- up trees for rsync. -}
|
2013-09-24 21:25:47 +00:00
|
|
|
withRsyncScratchDir :: (FilePath -> Annex a) -> Annex a
|
2011-04-28 00:06:07 +00:00
|
|
|
withRsyncScratchDir a = do
|
2020-10-30 19:55:59 +00:00
|
|
|
t <- fromRawFilePath <$> fromRepo gitAnnexTmpObjectDir
|
2015-07-30 17:29:45 +00:00
|
|
|
withTmpDirIn t "rsynctmp" a
|
2011-04-28 00:06:07 +00:00
|
|
|
|
2020-05-15 16:51:09 +00:00
|
|
|
rsyncRetrieve :: RsyncOpts -> [RsyncUrl] -> FilePath -> Maybe MeterUpdate -> Annex ()
|
|
|
|
rsyncRetrieve o rsyncurls dest meterupdate =
|
|
|
|
unlessM go $
|
|
|
|
giveup "rsync failed"
|
|
|
|
where
|
|
|
|
go = showResumable $ untilTrue rsyncurls $ \u -> rsyncRemote Download o meterupdate
|
2013-04-11 21:15:45 +00:00
|
|
|
-- use inplace when retrieving to support resuming
|
|
|
|
[ Param "--inplace"
|
|
|
|
, Param u
|
2013-05-14 17:24:15 +00:00
|
|
|
, File dest
|
2013-04-11 21:15:45 +00:00
|
|
|
]
|
|
|
|
|
2020-05-13 21:05:56 +00:00
|
|
|
rsyncRetrieveKey :: RsyncOpts -> Key -> FilePath -> Maybe MeterUpdate -> Annex ()
|
|
|
|
rsyncRetrieveKey o k dest meterupdate =
|
2020-05-15 16:51:09 +00:00
|
|
|
rsyncRetrieve o (rsyncUrls o k) dest meterupdate
|
2018-02-28 16:09:03 +00:00
|
|
|
|
2013-12-02 16:53:39 +00:00
|
|
|
showResumable :: Annex Bool -> Annex Bool
|
|
|
|
showResumable a = ifM a
|
|
|
|
( return True
|
|
|
|
, do
|
|
|
|
showLongNote "rsync failed -- run git annex again to resume file transfer"
|
|
|
|
return False
|
|
|
|
)
|
|
|
|
|
2014-02-02 20:06:34 +00:00
|
|
|
rsyncRemote :: Direction -> RsyncOpts -> Maybe MeterUpdate -> [CommandParam] -> Annex Bool
|
2015-04-03 20:48:30 +00:00
|
|
|
rsyncRemote direction o m params = do
|
2019-06-13 15:09:55 +00:00
|
|
|
opts <- mkopts
|
|
|
|
let ps = opts ++ Param "--progress" : params
|
2015-04-03 20:48:30 +00:00
|
|
|
case m of
|
|
|
|
Nothing -> liftIO $ rsync ps
|
|
|
|
Just meter -> do
|
2021-08-09 16:06:10 +00:00
|
|
|
oh <- mkOutputHandlerQuiet
|
2015-04-04 18:34:03 +00:00
|
|
|
liftIO $ rsyncProgress oh meter ps
|
2012-11-11 04:51:07 +00:00
|
|
|
where
|
2019-06-13 15:09:55 +00:00
|
|
|
mkopts
|
2014-02-02 20:06:34 +00:00
|
|
|
| direction == Download = rsyncDownloadOptions o
|
|
|
|
| otherwise = rsyncUploadOptions o
|