2011-04-28 00:06:07 +00:00
|
|
|
{- A remote that is only accessible by rsync.
|
|
|
|
-
|
|
|
|
- Copyright 2011 Joey Hess <joey@kitenet.net>
|
|
|
|
-
|
|
|
|
- Licensed under the GNU GPL version 3 or higher.
|
|
|
|
-}
|
|
|
|
|
2013-05-10 21:29:59 +00:00
|
|
|
{-# LANGUAGE CPP #-}
|
|
|
|
|
2013-09-08 18:54:28 +00:00
|
|
|
module Remote.Rsync (
|
|
|
|
remote,
|
2014-08-03 21:31:10 +00:00
|
|
|
store,
|
|
|
|
retrieve,
|
2013-09-08 18:54:28 +00:00
|
|
|
remove,
|
2014-08-06 17:45:19 +00:00
|
|
|
checkKey,
|
2013-09-08 18:54:28 +00:00
|
|
|
withRsyncScratchDir,
|
|
|
|
genRsyncOpts,
|
|
|
|
RsyncOpts
|
|
|
|
) where
|
2011-04-28 00:06:07 +00:00
|
|
|
|
2011-10-05 20:02:51 +00:00
|
|
|
import Common.Annex
|
2011-06-02 01:56:04 +00:00
|
|
|
import Types.Remote
|
2011-06-30 17:16:57 +00:00
|
|
|
import qualified Git
|
2011-04-28 00:06:07 +00:00
|
|
|
import Config
|
2013-03-13 20:16:01 +00:00
|
|
|
import Config.Cost
|
2011-10-04 04:40:47 +00:00
|
|
|
import Annex.Content
|
2013-09-07 22:38:00 +00:00
|
|
|
import Annex.UUID
|
2013-04-13 22:10:49 +00:00
|
|
|
import Annex.Ssh
|
2011-08-17 00:49:54 +00:00
|
|
|
import Remote.Helper.Special
|
2014-03-18 16:55:08 +00:00
|
|
|
import Remote.Rsync.RsyncUrl
|
2011-04-28 00:06:07 +00:00
|
|
|
import Crypto
|
2012-09-19 18:28:32 +00:00
|
|
|
import Utility.Rsync
|
2013-02-15 17:33:36 +00:00
|
|
|
import Utility.CopyFile
|
2013-03-28 21:03:04 +00:00
|
|
|
import Utility.Metered
|
2014-02-11 19:29:56 +00:00
|
|
|
import Utility.PID
|
2012-06-06 00:25:32 +00:00
|
|
|
import Annex.Perms
|
2014-02-02 20:06:34 +00:00
|
|
|
import Logs.Transfer
|
2014-02-11 18:06:50 +00:00
|
|
|
import Types.Creds
|
2014-08-03 20:54:57 +00:00
|
|
|
import Types.Key (isChunkKey)
|
2011-04-28 00:06:07 +00:00
|
|
|
|
2014-02-11 19:29:56 +00:00
|
|
|
import qualified Data.Map as M
|
|
|
|
|
2011-12-31 08:11:39 +00:00
|
|
|
remote :: RemoteType
|
2011-04-28 00:06:07 +00:00
|
|
|
remote = RemoteType {
|
|
|
|
typename = "rsync",
|
|
|
|
enumerate = findSpecialRemotes "rsyncurl",
|
|
|
|
generate = gen,
|
|
|
|
setup = rsyncSetup
|
|
|
|
}
|
|
|
|
|
2013-09-12 19:54:35 +00:00
|
|
|
gen :: Git.Repo -> UUID -> RemoteConfig -> RemoteGitConfig -> Annex (Maybe Remote)
|
2013-01-01 17:52:47 +00:00
|
|
|
gen r u c gc = do
|
|
|
|
cst <- remoteCost gc expensiveRemoteCost
|
2013-09-08 18:54:28 +00:00
|
|
|
(transport, url) <- rsyncTransport gc $
|
|
|
|
fromMaybe (error "missing rsyncurl") $ remoteAnnexRsyncUrl gc
|
|
|
|
let o = genRsyncOpts c gc transport url
|
|
|
|
let islocal = rsyncUrlIsPath $ rsyncUrl o
|
2014-08-03 20:54:57 +00:00
|
|
|
return $ Just $ specialRemote' specialcfg c
|
2014-08-03 21:31:10 +00:00
|
|
|
(simplyPrepare $ fileStorer $ store o)
|
|
|
|
(simplyPrepare $ fileRetriever $ retrieve o)
|
run Preparer to get Remover and CheckPresent actions
This will allow special remotes to eg, open a http connection and reuse it,
while checking if chunks are present, or removing chunks.
S3 and WebDAV both need this to support chunks with reasonable speed.
Note that a special remote might want to cache a http connection across
multiple requests. A simple case of this is that CheckPresent is typically
called before Store or Remove. A remote using this interface can certianly
use a Preparer that eg, uses a MVar to cache a http connection.
However, it's up to the remote to then deal with things like stale or
stalled http connections when eg, doing a series of downloads from a remote
and other places. There could be long delays between calls to a remote,
which could lead to eg, http connection stalls; the machine might even
move to a new network, etc.
It might be nice to improve this interface later to allow
the simple case without needing to handle the full complex case.
One way to do it would be to have a `Transaction SpecialRemote cache`,
where SpecialRemote contains methods for Storer, Retriever, Remover, and
CheckPresent, that all expect to be passed a `cache`.
2014-08-06 18:28:36 +00:00
|
|
|
(simplyPrepare $ remove o)
|
|
|
|
(simplyPrepare $ checkKey r o)
|
2012-07-22 17:48:50 +00:00
|
|
|
Remote
|
|
|
|
{ uuid = u
|
|
|
|
, cost = cst
|
|
|
|
, name = Git.repoDescribe r
|
2014-08-03 20:54:57 +00:00
|
|
|
, storeKey = storeKeyDummy
|
|
|
|
, retrieveKeyFile = retreiveKeyFileDummy
|
2012-07-22 17:48:50 +00:00
|
|
|
, retrieveKeyFileCheap = retrieveCheap o
|
run Preparer to get Remover and CheckPresent actions
This will allow special remotes to eg, open a http connection and reuse it,
while checking if chunks are present, or removing chunks.
S3 and WebDAV both need this to support chunks with reasonable speed.
Note that a special remote might want to cache a http connection across
multiple requests. A simple case of this is that CheckPresent is typically
called before Store or Remove. A remote using this interface can certianly
use a Preparer that eg, uses a MVar to cache a http connection.
However, it's up to the remote to then deal with things like stale or
stalled http connections when eg, doing a series of downloads from a remote
and other places. There could be long delays between calls to a remote,
which could lead to eg, http connection stalls; the machine might even
move to a new network, etc.
It might be nice to improve this interface later to allow
the simple case without needing to handle the full complex case.
One way to do it would be to have a `Transaction SpecialRemote cache`,
where SpecialRemote contains methods for Storer, Retriever, Remover, and
CheckPresent, that all expect to be passed a `cache`.
2014-08-06 18:28:36 +00:00
|
|
|
, removeKey = removeKeyDummy
|
|
|
|
, checkPresent = checkPresentDummy
|
2014-08-06 17:45:19 +00:00
|
|
|
, checkPresentCheap = False
|
2012-07-22 17:48:50 +00:00
|
|
|
, whereisKey = Nothing
|
2013-10-11 20:03:18 +00:00
|
|
|
, remoteFsck = Nothing
|
2013-10-27 19:38:59 +00:00
|
|
|
, repairRepo = Nothing
|
2013-11-02 20:37:28 +00:00
|
|
|
, config = c
|
2012-07-22 17:48:50 +00:00
|
|
|
, repo = r
|
2013-01-01 17:52:47 +00:00
|
|
|
, gitconfig = gc
|
2013-03-15 23:16:13 +00:00
|
|
|
, localpath = if islocal
|
2012-07-22 17:48:50 +00:00
|
|
|
then Just $ rsyncUrl o
|
|
|
|
else Nothing
|
2012-08-26 19:39:02 +00:00
|
|
|
, readonly = False
|
2014-01-13 18:41:10 +00:00
|
|
|
, availability = if islocal then LocallyAvailable else GloballyAvailable
|
2012-07-22 17:48:50 +00:00
|
|
|
, remotetype = remote
|
2014-08-10 18:52:58 +00:00
|
|
|
, mkUnavailable = return Nothing
|
2014-10-21 18:36:09 +00:00
|
|
|
, getInfo = return [("url", url)]
|
2014-12-08 17:40:15 +00:00
|
|
|
, claimUrl = Nothing
|
2014-12-08 23:14:24 +00:00
|
|
|
, checkUrl = const $ return Nothing
|
2012-07-22 17:48:50 +00:00
|
|
|
}
|
2014-08-03 20:54:57 +00:00
|
|
|
where
|
|
|
|
specialcfg = (specialRemoteCfg c)
|
|
|
|
-- Rsync displays its own progress.
|
|
|
|
{ displayProgress = False }
|
2013-09-08 18:54:28 +00:00
|
|
|
|
|
|
|
genRsyncOpts :: RemoteConfig -> RemoteGitConfig -> [CommandParam] -> RsyncUrl -> RsyncOpts
|
2014-02-02 20:06:34 +00:00
|
|
|
genRsyncOpts c gc transport url = RsyncOpts
|
|
|
|
{ rsyncUrl = url
|
2014-09-11 17:21:35 +00:00
|
|
|
, rsyncOptions = transport ++ opts []
|
2014-02-02 20:06:34 +00:00
|
|
|
, rsyncUploadOptions = transport ++ opts (remoteAnnexRsyncUploadOptions gc)
|
|
|
|
, rsyncDownloadOptions = transport ++ opts (remoteAnnexRsyncDownloadOptions gc)
|
|
|
|
, rsyncShellEscape = M.lookup "shellescape" c /= Just "no"
|
|
|
|
}
|
2012-11-11 04:51:07 +00:00
|
|
|
where
|
2014-02-02 20:06:34 +00:00
|
|
|
opts specificopts = map Param $ filter safe $
|
|
|
|
remoteAnnexRsyncOptions gc ++ specificopts
|
2013-01-01 17:52:47 +00:00
|
|
|
safe opt
|
2012-11-11 04:51:07 +00:00
|
|
|
-- Don't allow user to pass --delete to rsync;
|
|
|
|
-- that could cause it to delete other keys
|
|
|
|
-- in the same hash bucket as a key it sends.
|
2013-01-01 17:52:47 +00:00
|
|
|
| opt == "--delete" = False
|
|
|
|
| opt == "--delete-excluded" = False
|
2012-11-11 04:51:07 +00:00
|
|
|
| otherwise = True
|
2013-09-08 18:54:28 +00:00
|
|
|
|
|
|
|
rsyncTransport :: RemoteGitConfig -> RsyncUrl -> Annex ([CommandParam], RsyncUrl)
|
2014-02-21 17:06:39 +00:00
|
|
|
rsyncTransport gc url
|
|
|
|
| rsyncUrlIsShell url =
|
|
|
|
(\rsh -> return (rsyncShell rsh, url)) =<<
|
2013-09-08 18:54:28 +00:00
|
|
|
case fromNull ["ssh"] (remoteAnnexRsyncTransport gc) of
|
|
|
|
"ssh":sshopts -> do
|
|
|
|
let (port, sshopts') = sshReadPort sshopts
|
2014-02-21 17:06:39 +00:00
|
|
|
userhost = takeWhile (/=':') url
|
2013-09-08 18:54:28 +00:00
|
|
|
-- Connection caching
|
|
|
|
(Param "ssh":) <$> sshCachingOptions
|
2014-02-21 17:06:39 +00:00
|
|
|
(userhost, port)
|
2013-09-08 18:54:28 +00:00
|
|
|
(map Param $ loginopt ++ sshopts')
|
|
|
|
"rsh":rshopts -> return $ map Param $ "rsh" :
|
|
|
|
loginopt ++ rshopts
|
|
|
|
rsh -> error $ "Unknown Rsync transport: "
|
|
|
|
++ unwords rsh
|
2014-02-21 17:06:39 +00:00
|
|
|
| otherwise = return ([], url)
|
2013-09-08 18:54:28 +00:00
|
|
|
where
|
2014-02-21 17:06:39 +00:00
|
|
|
login = case separate (=='@') url of
|
|
|
|
(_h, "") -> Nothing
|
|
|
|
(l, _) -> Just l
|
2013-04-13 22:10:49 +00:00
|
|
|
loginopt = maybe [] (\l -> ["-l",l]) login
|
2013-09-08 18:54:28 +00:00
|
|
|
fromNull as xs = if null xs then as else xs
|
2011-04-28 00:30:43 +00:00
|
|
|
|
2014-02-11 18:06:50 +00:00
|
|
|
rsyncSetup :: Maybe UUID -> Maybe CredPair -> RemoteConfig -> Annex (RemoteConfig, UUID)
|
|
|
|
rsyncSetup mu _ c = do
|
2013-09-07 22:38:00 +00:00
|
|
|
u <- maybe (liftIO genUUID) return mu
|
2011-04-28 00:06:07 +00:00
|
|
|
-- verify configuration is sane
|
2011-07-15 16:47:14 +00:00
|
|
|
let url = fromMaybe (error "Specify rsyncurl=") $
|
2011-05-15 06:49:43 +00:00
|
|
|
M.lookup "rsyncurl" c
|
glacier, S3: Fix bug that caused embedded creds to not be encypted using the remote's key.
encryptionSetup must be called before setRemoteCredPair. Otherwise,
the RemoteConfig doesn't have the cipher in it, and so no cipher is used to
encrypt the embedded creds.
This is a security fix for non-shared encryption methods!
For encryption=shared, there's no security problem, just an
inconsistentency in whether the embedded creds are encrypted.
This is very important to get right, so used some types to help ensure that
setRemoteCredPair is only run after encryptionSetup. Note that the external
special remote bypasses the type safety, since creds can be set after the
initial remote config, if the external special remote program requests it.
Also note that IA remotes never use encryption, so encryptionSetup is not
run for them at all, and again the type safety is bypassed.
This leaves two open questions:
1. What to do about S3 and glacier remotes that were set up
using encryption=pubkey/hybrid with embedcreds?
Such a git repo has a security hole embedded in it, and this needs to be
communicated to the user. Is the changelog enough?
2. enableremote won't work in such a repo, because git-annex will
try to decrypt the embedded creds, which are not encrypted, so fails.
This needs to be dealt with, especially for ecryption=shared repos,
which are not really broken, just inconsistently configured.
Noticing that problem for encryption=shared is what led to commit
fbdeeeed5fa276d94be587c8916d725eddcaf546, which tried to
fix the problem by not decrypting the embedded creds.
This commit was sponsored by Josh Taylor.
2014-09-18 21:07:17 +00:00
|
|
|
(c', _encsetup) <- encryptionSetup c
|
2011-04-28 00:06:07 +00:00
|
|
|
|
|
|
|
-- The rsyncurl is stored in git config, not only in this remote's
|
|
|
|
-- persistant state, so it can vary between hosts.
|
|
|
|
gitConfigSpecialRemote u c' "rsyncurl" url
|
2013-09-07 22:38:00 +00:00
|
|
|
return (c', u)
|
2011-04-28 00:06:07 +00:00
|
|
|
|
2014-08-03 21:31:10 +00:00
|
|
|
{- To send a single key is slightly tricky; need to build up a temporary
|
|
|
|
- directory structure to pass to rsync so it can create the hash
|
|
|
|
- directories.
|
|
|
|
-
|
|
|
|
- This would not be necessary if the hash directory structure used locally
|
|
|
|
- was always the same as that used on the rsync remote. So if that's ever
|
|
|
|
- unified, this gets nicer.
|
|
|
|
- (When we have the right hash directory structure, we can just
|
|
|
|
- pass --include=X --include=X/Y --include=X/Y/file --exclude=*)
|
|
|
|
-}
|
|
|
|
store :: RsyncOpts -> Key -> FilePath -> MeterUpdate -> Annex Bool
|
|
|
|
store o k src meterupdate = withRsyncScratchDir $ \tmp -> do
|
|
|
|
let dest = tmp </> Prelude.head (keyPaths k)
|
|
|
|
liftIO $ createDirectoryIfMissing True $ parentDir dest
|
|
|
|
ok <- liftIO $ if canrename
|
|
|
|
then do
|
|
|
|
rename src dest
|
|
|
|
return True
|
|
|
|
else createLinkOrCopy src dest
|
|
|
|
ps <- sendParams
|
|
|
|
if ok
|
|
|
|
then showResumable $ rsyncRemote Upload o (Just meterupdate) $ ps ++
|
|
|
|
[ Param "--recursive"
|
|
|
|
, partialParams
|
|
|
|
-- tmp/ to send contents of tmp dir
|
|
|
|
, File $ addTrailingPathSeparator tmp
|
|
|
|
, Param $ rsyncUrl o
|
|
|
|
]
|
|
|
|
else return False
|
|
|
|
where
|
2014-10-09 18:53:13 +00:00
|
|
|
{- If the key being sent is encrypted or chunked, the file
|
2014-08-03 21:31:10 +00:00
|
|
|
- containing its content is a temp file, and so can be
|
|
|
|
- renamed into place. Otherwise, the file is the annexed
|
|
|
|
- object file, and has to be copied or hard linked into place. -}
|
|
|
|
canrename = isEncKey k || isChunkKey k
|
2011-04-28 00:06:07 +00:00
|
|
|
|
2014-08-03 21:31:10 +00:00
|
|
|
retrieve :: RsyncOpts -> FilePath -> Key -> MeterUpdate -> Annex ()
|
|
|
|
retrieve o f k p =
|
2014-08-03 20:54:57 +00:00
|
|
|
unlessM (rsyncRetrieve o k f (Just p)) $
|
|
|
|
error "rsync failed"
|
2012-01-20 17:23:11 +00:00
|
|
|
|
|
|
|
retrieveCheap :: RsyncOpts -> Key -> FilePath -> Annex Bool
|
2013-04-11 21:15:45 +00:00
|
|
|
retrieveCheap o k f = ifM (preseedTmp k f) ( rsyncRetrieve o k f Nothing , return False )
|
2011-04-28 00:06:07 +00:00
|
|
|
|
run Preparer to get Remover and CheckPresent actions
This will allow special remotes to eg, open a http connection and reuse it,
while checking if chunks are present, or removing chunks.
S3 and WebDAV both need this to support chunks with reasonable speed.
Note that a special remote might want to cache a http connection across
multiple requests. A simple case of this is that CheckPresent is typically
called before Store or Remove. A remote using this interface can certianly
use a Preparer that eg, uses a MVar to cache a http connection.
However, it's up to the remote to then deal with things like stale or
stalled http connections when eg, doing a series of downloads from a remote
and other places. There could be long delays between calls to a remote,
which could lead to eg, http connection stalls; the machine might even
move to a new network, etc.
It might be nice to improve this interface later to allow
the simple case without needing to handle the full complex case.
One way to do it would be to have a `Transaction SpecialRemote cache`,
where SpecialRemote contains methods for Storer, Retriever, Remover, and
CheckPresent, that all expect to be passed a `cache`.
2014-08-06 18:28:36 +00:00
|
|
|
remove :: RsyncOpts -> Remover
|
2013-05-09 17:49:47 +00:00
|
|
|
remove o k = do
|
|
|
|
ps <- sendParams
|
|
|
|
withRsyncScratchDir $ \tmp -> liftIO $ do
|
|
|
|
{- Send an empty directory to rysnc to make it delete. -}
|
|
|
|
let dummy = tmp </> keyFile k
|
|
|
|
createDirectoryIfMissing True dummy
|
|
|
|
rsync $ rsyncOptions o ++ ps ++
|
|
|
|
map (\s -> Param $ "--include=" ++ s) includes ++
|
|
|
|
[ Param "--exclude=*" -- exclude everything else
|
|
|
|
, Params "--quiet --delete --recursive"
|
|
|
|
, partialParams
|
|
|
|
, Param $ addTrailingPathSeparator dummy
|
|
|
|
, Param $ rsyncUrl o
|
|
|
|
]
|
2012-11-11 04:51:07 +00:00
|
|
|
where
|
|
|
|
{- Specify include rules to match the directories where the
|
|
|
|
- content could be. Note that the parent directories have
|
|
|
|
- to also be explicitly included, due to how rsync
|
|
|
|
- traverses directories. -}
|
|
|
|
includes = concatMap use annexHashes
|
|
|
|
use h = let dir = h k in
|
|
|
|
[ parentDir dir
|
|
|
|
, dir
|
|
|
|
-- match content directory and anything in it
|
|
|
|
, dir </> keyFile k </> "***"
|
|
|
|
]
|
2011-04-28 00:06:07 +00:00
|
|
|
|
run Preparer to get Remover and CheckPresent actions
This will allow special remotes to eg, open a http connection and reuse it,
while checking if chunks are present, or removing chunks.
S3 and WebDAV both need this to support chunks with reasonable speed.
Note that a special remote might want to cache a http connection across
multiple requests. A simple case of this is that CheckPresent is typically
called before Store or Remove. A remote using this interface can certianly
use a Preparer that eg, uses a MVar to cache a http connection.
However, it's up to the remote to then deal with things like stale or
stalled http connections when eg, doing a series of downloads from a remote
and other places. There could be long delays between calls to a remote,
which could lead to eg, http connection stalls; the machine might even
move to a new network, etc.
It might be nice to improve this interface later to allow
the simple case without needing to handle the full complex case.
One way to do it would be to have a `Transaction SpecialRemote cache`,
where SpecialRemote contains methods for Storer, Retriever, Remover, and
CheckPresent, that all expect to be passed a `cache`.
2014-08-06 18:28:36 +00:00
|
|
|
checkKey :: Git.Repo -> RsyncOpts -> CheckPresent
|
2014-08-06 17:45:19 +00:00
|
|
|
checkKey r o k = do
|
2011-07-19 18:07:23 +00:00
|
|
|
showAction $ "checking " ++ Git.repoDescribe r
|
2011-12-02 19:50:27 +00:00
|
|
|
-- note: Does not currently differentiate between rsync failing
|
2011-04-28 00:06:07 +00:00
|
|
|
-- to connect, and the file not being present.
|
2014-08-06 17:45:19 +00:00
|
|
|
untilTrue (rsyncUrls o k) $ \u ->
|
2012-11-11 04:51:07 +00:00
|
|
|
liftIO $ catchBoolIO $ do
|
|
|
|
withQuietOutput createProcessSuccess $
|
|
|
|
proc "rsync" $ toCommand $
|
|
|
|
rsyncOptions o ++ [Param u]
|
|
|
|
return True
|
2011-04-28 00:06:07 +00:00
|
|
|
|
|
|
|
{- Rsync params to enable resumes of sending files safely,
|
|
|
|
- ensure that files are only moved into place once complete
|
|
|
|
-}
|
|
|
|
partialParams :: CommandParam
|
2012-07-10 18:40:31 +00:00
|
|
|
partialParams = Params "--partial --partial-dir=.rsync-partial"
|
2011-04-28 00:06:07 +00:00
|
|
|
|
2013-05-09 17:49:47 +00:00
|
|
|
{- When sending files from crippled filesystems, the permissions can be all
|
|
|
|
- messed up, and it's better to use the default permissions on the
|
|
|
|
- destination. -}
|
|
|
|
sendParams :: Annex [CommandParam]
|
|
|
|
sendParams = ifM crippledFileSystem
|
|
|
|
( return [rsyncUseDestinationPermissions]
|
|
|
|
, return []
|
|
|
|
)
|
|
|
|
|
2011-04-28 00:06:07 +00:00
|
|
|
{- Runs an action in an empty scratch directory that can be used to build
|
|
|
|
- up trees for rsync. -}
|
2013-09-24 21:25:47 +00:00
|
|
|
withRsyncScratchDir :: (FilePath -> Annex a) -> Annex a
|
2011-04-28 00:06:07 +00:00
|
|
|
withRsyncScratchDir a = do
|
2014-02-11 19:29:56 +00:00
|
|
|
p <- liftIO getPID
|
2014-02-26 20:52:56 +00:00
|
|
|
t <- fromRepo gitAnnexTmpObjectDir
|
2012-06-06 00:25:32 +00:00
|
|
|
createAnnexDirectory t
|
2014-02-11 19:29:56 +00:00
|
|
|
let tmp = t </> "rsynctmp" </> show p
|
2011-04-28 00:06:07 +00:00
|
|
|
nuke tmp
|
2011-07-15 16:47:14 +00:00
|
|
|
liftIO $ createDirectoryIfMissing True tmp
|
2012-01-03 04:29:27 +00:00
|
|
|
nuke tmp `after` a tmp
|
2012-11-11 04:51:07 +00:00
|
|
|
where
|
|
|
|
nuke d = liftIO $ whenM (doesDirectoryExist d) $
|
|
|
|
removeDirectoryRecursive d
|
2011-04-28 00:06:07 +00:00
|
|
|
|
2013-04-11 21:15:45 +00:00
|
|
|
rsyncRetrieve :: RsyncOpts -> Key -> FilePath -> Maybe MeterUpdate -> Annex Bool
|
2014-08-03 20:54:57 +00:00
|
|
|
rsyncRetrieve o k dest meterupdate =
|
|
|
|
showResumable $ untilTrue (rsyncUrls o k) $ \u -> rsyncRemote Download o meterupdate
|
2013-04-11 21:15:45 +00:00
|
|
|
-- use inplace when retrieving to support resuming
|
|
|
|
[ Param "--inplace"
|
|
|
|
, Param u
|
2013-05-14 17:24:15 +00:00
|
|
|
, File dest
|
2013-04-11 21:15:45 +00:00
|
|
|
]
|
|
|
|
|
2013-12-02 16:53:39 +00:00
|
|
|
showResumable :: Annex Bool -> Annex Bool
|
|
|
|
showResumable a = ifM a
|
|
|
|
( return True
|
|
|
|
, do
|
|
|
|
showLongNote "rsync failed -- run git annex again to resume file transfer"
|
|
|
|
return False
|
|
|
|
)
|
|
|
|
|
2014-02-02 20:06:34 +00:00
|
|
|
rsyncRemote :: Direction -> RsyncOpts -> Maybe MeterUpdate -> [CommandParam] -> Annex Bool
|
|
|
|
rsyncRemote direction o callback params = do
|
2011-07-19 18:07:23 +00:00
|
|
|
showOutput -- make way for progress bar
|
2014-02-02 20:06:34 +00:00
|
|
|
liftIO $ (maybe rsync rsyncProgress callback) $
|
|
|
|
opts ++ [Params "--progress"] ++ params
|
2012-11-11 04:51:07 +00:00
|
|
|
where
|
2014-02-02 20:06:34 +00:00
|
|
|
opts
|
|
|
|
| direction == Download = rsyncDownloadOptions o
|
|
|
|
| otherwise = rsyncUploadOptions o
|