2014-03-22 14:42:38 +00:00
|
|
|
{- git-annex transfers
|
|
|
|
-
|
2017-02-27 19:21:24 +00:00
|
|
|
- Copyright 2012-2017 Joey Hess <id@joeyh.name>
|
2014-03-22 14:42:38 +00:00
|
|
|
-
|
|
|
|
- Licensed under the GNU GPL version 3 or higher.
|
|
|
|
-}
|
|
|
|
|
2017-11-29 19:49:05 +00:00
|
|
|
{-# LANGUAGE CPP, BangPatterns #-}
|
2014-03-22 14:42:38 +00:00
|
|
|
|
|
|
|
module Annex.Transfer (
|
|
|
|
module X,
|
|
|
|
upload,
|
|
|
|
download,
|
|
|
|
runTransfer,
|
2014-08-15 18:17:05 +00:00
|
|
|
alwaysRunTransfer,
|
2014-03-22 14:42:38 +00:00
|
|
|
noRetry,
|
|
|
|
forwardRetry,
|
2016-09-06 16:42:50 +00:00
|
|
|
pickRemote,
|
2014-03-22 14:42:38 +00:00
|
|
|
) where
|
|
|
|
|
2016-01-20 20:36:33 +00:00
|
|
|
import Annex.Common
|
2016-09-06 16:42:50 +00:00
|
|
|
import qualified Annex
|
2014-03-22 14:42:38 +00:00
|
|
|
import Logs.Transfer as X
|
2016-08-03 16:37:12 +00:00
|
|
|
import Types.Transfer as X
|
2014-03-22 19:01:48 +00:00
|
|
|
import Annex.Notification as X
|
2014-03-22 14:42:38 +00:00
|
|
|
import Annex.Perms
|
|
|
|
import Utility.Metered
|
2015-11-12 22:05:45 +00:00
|
|
|
import Annex.LockPool
|
2017-02-27 19:21:24 +00:00
|
|
|
import Types.Key
|
2016-09-06 16:42:50 +00:00
|
|
|
import qualified Types.Remote as Remote
|
2016-09-09 16:57:42 +00:00
|
|
|
import Types.Concurrency
|
2014-03-22 14:42:38 +00:00
|
|
|
|
|
|
|
import Control.Concurrent
|
2017-03-08 18:49:30 +00:00
|
|
|
import qualified Data.Map.Strict as M
|
|
|
|
import Data.Ord
|
2014-03-22 14:42:38 +00:00
|
|
|
|
2016-08-03 17:46:20 +00:00
|
|
|
upload :: Observable v => UUID -> Key -> AssociatedFile -> RetryDecider -> (MeterUpdate -> Annex v) -> NotifyWitness -> Annex v
|
|
|
|
upload u key f d a _witness = guardHaveUUID u $
|
|
|
|
runTransfer (Transfer Upload u key) f d a
|
2015-05-12 19:50:03 +00:00
|
|
|
|
2016-08-03 17:46:20 +00:00
|
|
|
download :: Observable v => UUID -> Key -> AssociatedFile -> RetryDecider -> (MeterUpdate -> Annex v) -> NotifyWitness -> Annex v
|
|
|
|
download u key f d a _witness = guardHaveUUID u $
|
|
|
|
runTransfer (Transfer Download u key) f d a
|
2016-06-02 17:50:05 +00:00
|
|
|
|
|
|
|
guardHaveUUID :: Observable v => UUID -> Annex v -> Annex v
|
|
|
|
guardHaveUUID u a
|
|
|
|
| u == NoUUID = return observeFailure
|
|
|
|
| otherwise = a
|
2014-03-22 14:42:38 +00:00
|
|
|
|
|
|
|
{- Runs a transfer action. Creates and locks the lock file while the
|
|
|
|
- action is running, and stores info in the transfer information
|
|
|
|
- file.
|
|
|
|
-
|
|
|
|
- If the transfer action returns False, the transfer info is
|
|
|
|
- left in the failedTransferDir.
|
|
|
|
-
|
|
|
|
- If the transfer is already in progress, returns False.
|
|
|
|
-
|
|
|
|
- An upload can be run from a read-only filesystem, and in this case
|
|
|
|
- no transfer information or lock file is used.
|
|
|
|
-}
|
2017-03-10 17:12:24 +00:00
|
|
|
runTransfer :: Observable v => Transfer -> AssociatedFile -> RetryDecider -> (MeterUpdate -> Annex v) -> Annex v
|
2014-08-15 18:17:05 +00:00
|
|
|
runTransfer = runTransfer' False
|
|
|
|
|
|
|
|
{- Like runTransfer, but ignores any existing transfer lock file for the
|
|
|
|
- transfer, allowing re-running a transfer that is already in progress.
|
|
|
|
-
|
|
|
|
- Note that this may result in confusing progress meter display in the
|
|
|
|
- webapp, if multiple processes are writing to the transfer info file. -}
|
2017-03-10 17:12:24 +00:00
|
|
|
alwaysRunTransfer :: Observable v => Transfer -> AssociatedFile -> RetryDecider -> (MeterUpdate -> Annex v) -> Annex v
|
2014-08-15 18:17:05 +00:00
|
|
|
alwaysRunTransfer = runTransfer' True
|
|
|
|
|
2017-03-10 17:12:24 +00:00
|
|
|
runTransfer' :: Observable v => Bool -> Transfer -> AssociatedFile -> RetryDecider -> (MeterUpdate -> Annex v) -> Annex v
|
2017-10-17 21:54:38 +00:00
|
|
|
runTransfer' ignorelock t afile shouldretry transferaction = checkSecureHashes t $ do
|
|
|
|
info <- liftIO $ startTransferInfo afile
|
|
|
|
(meter, tfile, metervar) <- mkProgressUpdater t info
|
|
|
|
mode <- annexFileMode
|
|
|
|
(lck, inprogress) <- prep tfile mode info
|
|
|
|
if inprogress && not ignorelock
|
|
|
|
then do
|
|
|
|
showNote "transfer already in progress, or unable to take transfer lock"
|
|
|
|
return observeFailure
|
|
|
|
else do
|
|
|
|
v <- retry info metervar $ transferaction meter
|
|
|
|
liftIO $ cleanup tfile lck
|
|
|
|
if observeBool v
|
|
|
|
then removeFailedTransfer t
|
|
|
|
else recordFailedTransfer t info
|
|
|
|
return v
|
2014-03-22 14:42:38 +00:00
|
|
|
where
|
|
|
|
#ifndef mingw32_HOST_OS
|
create directory for transfer lock file, and catch perm error
Before, the call to mkProgressUpdater created the directory as a
side-effect, but since that ignored failure to create it, this led to
a "does not exist" exception when the transfer lock file was created,
rather than a permissions error.
So, make sure the directory exists before trying to lock the file in it.
When a PermissionDenied exception is caught, skip making the transfer lock.
This lets downloads from readonly remotes happen.
If an upload is being tried, and the lock file can't be written due to
permissions, then probably the actual transfer will fail for the same
reason, so I think it's ok that it continues w/o taking the lock in that
case.
2016-02-12 18:11:25 +00:00
|
|
|
prep tfile mode info = catchPermissionDenied (const prepfailed) $ do
|
2015-05-12 23:36:16 +00:00
|
|
|
let lck = transferLockFile tfile
|
create directory for transfer lock file, and catch perm error
Before, the call to mkProgressUpdater created the directory as a
side-effect, but since that ignored failure to create it, this led to
a "does not exist" exception when the transfer lock file was created,
rather than a permissions error.
So, make sure the directory exists before trying to lock the file in it.
When a PermissionDenied exception is caught, skip making the transfer lock.
This lets downloads from readonly remotes happen.
If an upload is being tried, and the lock file can't be written due to
permissions, then probably the actual transfer will fail for the same
reason, so I think it's ok that it continues w/o taking the lock in that
case.
2016-02-12 18:11:25 +00:00
|
|
|
createAnnexDirectory $ takeDirectory lck
|
2015-05-12 23:36:16 +00:00
|
|
|
r <- tryLockExclusive (Just mode) lck
|
|
|
|
case r of
|
|
|
|
Nothing -> return (Nothing, True)
|
2015-11-16 19:35:41 +00:00
|
|
|
Just lockhandle -> ifM (checkSaneLock lck lockhandle)
|
2015-05-12 23:39:28 +00:00
|
|
|
( do
|
2015-11-12 22:05:45 +00:00
|
|
|
void $ liftIO $ tryIO $
|
|
|
|
writeTransferInfoFile info tfile
|
2015-05-12 23:36:16 +00:00
|
|
|
return (Just lockhandle, False)
|
2017-05-25 20:02:17 +00:00
|
|
|
, do
|
|
|
|
liftIO $ dropLock lockhandle
|
|
|
|
return (Nothing, True)
|
2015-05-12 23:39:28 +00:00
|
|
|
)
|
2014-03-22 14:42:38 +00:00
|
|
|
#else
|
2016-02-15 18:58:45 +00:00
|
|
|
prep tfile _mode info = catchPermissionDenied (const prepfailed) $ do
|
2015-05-12 23:36:16 +00:00
|
|
|
let lck = transferLockFile tfile
|
create directory for transfer lock file, and catch perm error
Before, the call to mkProgressUpdater created the directory as a
side-effect, but since that ignored failure to create it, this led to
a "does not exist" exception when the transfer lock file was created,
rather than a permissions error.
So, make sure the directory exists before trying to lock the file in it.
When a PermissionDenied exception is caught, skip making the transfer lock.
This lets downloads from readonly remotes happen.
If an upload is being tried, and the lock file can't be written due to
permissions, then probably the actual transfer will fail for the same
reason, so I think it's ok that it continues w/o taking the lock in that
case.
2016-02-12 18:11:25 +00:00
|
|
|
createAnnexDirectory $ takeDirectory lck
|
2016-02-15 19:03:44 +00:00
|
|
|
v <- catchMaybeIO $ liftIO $ lockExclusive lck
|
2014-03-22 14:42:38 +00:00
|
|
|
case v of
|
|
|
|
Nothing -> return (Nothing, False)
|
|
|
|
Just Nothing -> return (Nothing, True)
|
|
|
|
Just (Just lockhandle) -> do
|
2015-11-12 22:05:45 +00:00
|
|
|
void $ liftIO $ tryIO $
|
|
|
|
writeTransferInfoFile info tfile
|
2014-03-22 14:42:38 +00:00
|
|
|
return (Just lockhandle, False)
|
|
|
|
#endif
|
create directory for transfer lock file, and catch perm error
Before, the call to mkProgressUpdater created the directory as a
side-effect, but since that ignored failure to create it, this led to
a "does not exist" exception when the transfer lock file was created,
rather than a permissions error.
So, make sure the directory exists before trying to lock the file in it.
When a PermissionDenied exception is caught, skip making the transfer lock.
This lets downloads from readonly remotes happen.
If an upload is being tried, and the lock file can't be written due to
permissions, then probably the actual transfer will fail for the same
reason, so I think it's ok that it continues w/o taking the lock in that
case.
2016-02-12 18:11:25 +00:00
|
|
|
prepfailed = return (Nothing, False)
|
|
|
|
|
2014-03-22 14:42:38 +00:00
|
|
|
cleanup _ Nothing = noop
|
|
|
|
cleanup tfile (Just lockhandle) = do
|
2015-05-12 23:36:16 +00:00
|
|
|
let lck = transferLockFile tfile
|
2014-03-22 14:42:38 +00:00
|
|
|
void $ tryIO $ removeFile tfile
|
|
|
|
#ifndef mingw32_HOST_OS
|
2015-05-12 23:36:16 +00:00
|
|
|
void $ tryIO $ removeFile lck
|
|
|
|
dropLock lockhandle
|
2014-03-22 14:42:38 +00:00
|
|
|
#else
|
|
|
|
{- Windows cannot delete the lockfile until the lock
|
|
|
|
- is closed. So it's possible to race with another
|
|
|
|
- process that takes the lock before it's removed,
|
|
|
|
- so ignore failure to remove.
|
|
|
|
-}
|
|
|
|
dropLock lockhandle
|
2015-05-12 23:36:16 +00:00
|
|
|
void $ tryIO $ removeFile lck
|
2014-03-22 14:42:38 +00:00
|
|
|
#endif
|
2017-10-17 21:54:38 +00:00
|
|
|
retry oldinfo metervar run = do
|
unify exception handling into Utility.Exception
Removed old extensible-exceptions, only needed for very old ghc.
Made webdav use Utility.Exception, to work after some changes in DAV's
exception handling.
Removed Annex.Exception. Mostly this was trivial, but note that
tryAnnex is replaced with tryNonAsync and catchAnnex replaced with
catchNonAsync. In theory that could be a behavior change, since the former
caught all exceptions, and the latter don't catch async exceptions.
However, in practice, nothing in the Annex monad uses async exceptions.
Grepping for throwTo and killThread only find stuff in the assistant,
which does not seem related.
Command.Add.undo is changed to accept a SomeException, and things
that use it for rollback now catch non-async exceptions, rather than
only IOExceptions.
2014-08-08 01:55:44 +00:00
|
|
|
v <- tryNonAsync run
|
2014-03-22 14:42:38 +00:00
|
|
|
case v of
|
|
|
|
Right b -> return b
|
2014-07-30 19:57:19 +00:00
|
|
|
Left e -> do
|
|
|
|
warning (show e)
|
2014-03-22 14:42:38 +00:00
|
|
|
b <- getbytescomplete metervar
|
|
|
|
let newinfo = oldinfo { bytesComplete = Just b }
|
|
|
|
if shouldretry oldinfo newinfo
|
2017-10-17 21:54:38 +00:00
|
|
|
then retry newinfo metervar run
|
other 80% of avoding verification when hard linking to objects in shared repo
In c6632ee5c8e66c26ef18317f56ae02bae1e7e280, it actually only handled
uploading objects to a shared repository. To avoid verification when
downloading objects from a shared repository, was a lot harder.
On the plus side, if the process of downloading a file from a remote
is able to verify its content on the side, the remote can indicate this
now, and avoid the extra post-download verification.
As of yet, I don't have any remotes (except Git) using this ability.
Some more work would be needed to support it in special remotes.
It would make sense for tahoe to implicitly verify things downloaded from it;
as long as you trust your tahoe server (which typically runs locally),
there's cryptographic integrity. OTOH, despite bup being based on shas,
a bup repo under an attacker's control could have the git ref used for an
object changed, and so a bup repo shouldn't implicitly verify. Indeed,
tahoe seems unique in being trustworthy enough to implicitly verify.
2015-10-02 17:56:42 +00:00
|
|
|
else return observeFailure
|
2014-03-22 14:42:38 +00:00
|
|
|
getbytescomplete metervar
|
|
|
|
| transferDirection t == Upload =
|
|
|
|
liftIO $ readMVar metervar
|
|
|
|
| otherwise = do
|
|
|
|
f <- fromRepo $ gitAnnexTmpObjectLocation (transferKey t)
|
2015-01-20 20:58:48 +00:00
|
|
|
liftIO $ catchDefaultIO 0 $ getFileSize f
|
2014-03-22 14:42:38 +00:00
|
|
|
|
2017-02-27 19:21:24 +00:00
|
|
|
{- Avoid download and upload of keys with insecure content when
|
|
|
|
- annex.securehashesonly is configured.
|
|
|
|
-
|
|
|
|
- This is not a security check. Even if this let the content be
|
|
|
|
- downloaded, the actual security checks would prevent the content from
|
|
|
|
- being added to the repository. The only reason this is done here is to
|
|
|
|
- avoid transferring content that's going to be rejected anyway.
|
|
|
|
-
|
|
|
|
- We assume that, if annex.securehashesonly is set and the local repo
|
|
|
|
- still contains content using an insecure hash, remotes will likewise
|
|
|
|
- tend to be configured to reject it, so Upload is also prevented.
|
|
|
|
-}
|
|
|
|
checkSecureHashes :: Observable v => Transfer -> Annex v -> Annex v
|
|
|
|
checkSecureHashes t a
|
|
|
|
| cryptographicallySecure variety = a
|
|
|
|
| otherwise = ifM (annexSecureHashesOnly <$> Annex.getGitConfig)
|
|
|
|
( do
|
|
|
|
warning $ "annex.securehashesonly blocked transfer of " ++ formatKeyVariety variety ++ " key"
|
|
|
|
return observeFailure
|
|
|
|
, a
|
|
|
|
)
|
|
|
|
where
|
|
|
|
variety = keyVariety (transferKey t)
|
|
|
|
|
2014-03-22 14:42:38 +00:00
|
|
|
type RetryDecider = TransferInfo -> TransferInfo -> Bool
|
|
|
|
|
|
|
|
noRetry :: RetryDecider
|
|
|
|
noRetry _ _ = False
|
|
|
|
|
|
|
|
{- Retries a transfer when it fails, as long as the failed transfer managed
|
|
|
|
- to send some data. -}
|
|
|
|
forwardRetry :: RetryDecider
|
|
|
|
forwardRetry old new = bytesComplete old < bytesComplete new
|
2016-09-06 16:42:50 +00:00
|
|
|
|
|
|
|
{- Picks a remote from the list and tries a transfer to it. If the transfer
|
|
|
|
- does not succeed, goes on to try other remotes from the list.
|
|
|
|
-
|
|
|
|
- The list should already be ordered by remote cost, and is normally
|
|
|
|
- tried in order. However, when concurrent jobs are running, they will
|
|
|
|
- be assigned different remotes of the same cost when possible. This can
|
|
|
|
- increase total transfer speed.
|
|
|
|
-}
|
|
|
|
pickRemote :: Observable v => [Remote] -> (Remote -> Annex v) -> Annex v
|
2016-09-09 16:57:42 +00:00
|
|
|
pickRemote l a = go l =<< Annex.getState Annex.concurrency
|
2016-09-06 16:42:50 +00:00
|
|
|
where
|
|
|
|
go [] _ = return observeFailure
|
|
|
|
go (r:[]) _ = a r
|
2016-09-09 16:57:42 +00:00
|
|
|
go rs (Concurrent n) | n > 1 = do
|
2016-09-06 16:42:50 +00:00
|
|
|
mv <- Annex.getState Annex.activeremotes
|
|
|
|
active <- liftIO $ takeMVar mv
|
2017-03-08 18:49:30 +00:00
|
|
|
let rs' = sortBy (lessActiveFirst active) rs
|
2016-09-06 16:42:50 +00:00
|
|
|
goconcurrent mv active rs'
|
|
|
|
go (r:rs) _ = do
|
|
|
|
ok <- a r
|
|
|
|
if observeBool ok
|
|
|
|
then return ok
|
2016-09-09 16:57:42 +00:00
|
|
|
else go rs NonConcurrent
|
2016-09-06 16:42:50 +00:00
|
|
|
goconcurrent mv active [] = do
|
|
|
|
liftIO $ putMVar mv active
|
|
|
|
return observeFailure
|
|
|
|
goconcurrent mv active (r:rs) = do
|
2017-03-08 18:49:30 +00:00
|
|
|
let !active' = M.insertWith (+) r 1 active
|
2016-09-06 16:42:50 +00:00
|
|
|
liftIO $ putMVar mv active'
|
|
|
|
let getnewactive = do
|
|
|
|
active'' <- liftIO $ takeMVar mv
|
2017-03-08 18:49:30 +00:00
|
|
|
let !active''' = M.update (\n -> if n > 1 then Just (n-1) else Nothing) r active''
|
2016-09-06 16:42:50 +00:00
|
|
|
return active'''
|
|
|
|
let removeactive = liftIO . putMVar mv =<< getnewactive
|
|
|
|
ok <- a r `onException` removeactive
|
|
|
|
if observeBool ok
|
|
|
|
then do
|
|
|
|
removeactive
|
|
|
|
return ok
|
|
|
|
else do
|
|
|
|
active'' <- getnewactive
|
|
|
|
-- Re-sort the remaining rs
|
|
|
|
-- because other threads could have
|
|
|
|
-- been assigned them in the meantime.
|
2017-03-08 18:49:30 +00:00
|
|
|
let rs' = sortBy (lessActiveFirst active'') rs
|
2016-09-06 16:42:50 +00:00
|
|
|
goconcurrent mv active'' rs'
|
|
|
|
|
2017-03-08 18:49:30 +00:00
|
|
|
lessActiveFirst :: M.Map Remote Integer -> Remote -> Remote -> Ordering
|
|
|
|
lessActiveFirst active a b
|
|
|
|
| Remote.cost a == Remote.cost b = comparing (`M.lookup` active) a b
|
2016-09-06 16:42:50 +00:00
|
|
|
| otherwise = compare a b
|