2012-07-23 03:16:56 +00:00
|
|
|
{- git-annex assistant thread to scan remotes to find needed transfers
|
|
|
|
-
|
|
|
|
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
|
|
|
-
|
|
|
|
- Licensed under the GNU GPL version 3 or higher.
|
|
|
|
-}
|
|
|
|
|
|
|
|
module Assistant.Threads.TransferScanner where
|
|
|
|
|
|
|
|
import Assistant.Common
|
|
|
|
import Assistant.ScanRemotes
|
|
|
|
import Assistant.TransferQueue
|
|
|
|
import Assistant.ThreadedMonad
|
2012-07-28 22:47:24 +00:00
|
|
|
import Assistant.DaemonStatus
|
2012-07-29 17:22:08 +00:00
|
|
|
import Assistant.Alert
|
2012-10-18 19:22:28 +00:00
|
|
|
import Assistant.Drop
|
2012-07-23 03:16:56 +00:00
|
|
|
import Logs.Transfer
|
2012-07-25 18:15:09 +00:00
|
|
|
import Logs.Location
|
2012-08-26 21:45:30 +00:00
|
|
|
import Logs.Web (webUUID)
|
2012-07-25 18:15:09 +00:00
|
|
|
import qualified Remote
|
2012-08-26 19:39:02 +00:00
|
|
|
import qualified Types.Remote as Remote
|
2012-07-23 03:16:56 +00:00
|
|
|
import Utility.ThreadScheduler
|
2012-07-25 18:15:09 +00:00
|
|
|
import qualified Git.LsFiles as LsFiles
|
|
|
|
import Command
|
|
|
|
import Annex.Content
|
2012-10-09 16:18:41 +00:00
|
|
|
import Annex.Wanted
|
2012-07-23 03:16:56 +00:00
|
|
|
|
2012-08-24 19:52:23 +00:00
|
|
|
import qualified Data.Set as S
|
|
|
|
|
2012-07-23 03:16:56 +00:00
|
|
|
thisThread :: ThreadName
|
|
|
|
thisThread = "TransferScanner"
|
|
|
|
|
2012-07-25 17:12:34 +00:00
|
|
|
{- This thread waits until a remote needs to be scanned, to find transfers
|
|
|
|
- that need to be made, to keep data in sync.
|
|
|
|
-}
|
2012-09-06 18:56:04 +00:00
|
|
|
transferScannerThread :: ThreadState -> DaemonStatusHandle -> ScanRemoteMap -> TransferQueue -> NamedThread
|
|
|
|
transferScannerThread st dstatus scanremotes transferqueue = thread $ do
|
2012-08-24 17:46:10 +00:00
|
|
|
startupScan
|
2012-08-24 19:52:23 +00:00
|
|
|
go S.empty
|
2012-08-24 17:46:10 +00:00
|
|
|
where
|
2012-09-06 18:56:04 +00:00
|
|
|
thread = NamedThread thisThread
|
2012-08-24 19:52:23 +00:00
|
|
|
go scanned = do
|
|
|
|
threadDelaySeconds (Seconds 2)
|
scan multiple remotes in one pass
The expensive transfer scan now scans a whole set of remotes in one pass.
So at startup, or when network comes up, it will run only once.
Note that this can result in transfers from/to higher cost remotes being
queued before other transfers of other content from/to lower cost remotes.
Before, low cost remotes were scanned first and all their transfers came
first. When multiple transfers are queued for a key, the lower cost ones
are still queued first. However, this could result in transfers from slow
remotes running for a long time while transfers of other data from faster
remotes waits.
I expect to make the transfer queue smarter about ordering
and/or make it allow multiple transfers at a time, which should eliminate
this annoyance. (Also, it was already possible to get into that situation,
for example if the network was up, lots of transfers from slow remotes
might be queued, and then a disk is mounted and its faster transfers have
to wait.)
Also note that this means I don't need to improve the code in
Assistant.Sync that currently checks if any of the reconnected remotes
have diverged, and if so, queues scans of all of them. That had been very
innefficient, but now doesn't matter.
2012-08-26 18:01:43 +00:00
|
|
|
(rs, infos) <- unzip <$> getScanRemote scanremotes
|
|
|
|
if any fullScan infos || any (`S.notMember` scanned) rs
|
2012-08-24 19:52:23 +00:00
|
|
|
then do
|
scan multiple remotes in one pass
The expensive transfer scan now scans a whole set of remotes in one pass.
So at startup, or when network comes up, it will run only once.
Note that this can result in transfers from/to higher cost remotes being
queued before other transfers of other content from/to lower cost remotes.
Before, low cost remotes were scanned first and all their transfers came
first. When multiple transfers are queued for a key, the lower cost ones
are still queued first. However, this could result in transfers from slow
remotes running for a long time while transfers of other data from faster
remotes waits.
I expect to make the transfer queue smarter about ordering
and/or make it allow multiple transfers at a time, which should eliminate
this annoyance. (Also, it was already possible to get into that situation,
for example if the network was up, lots of transfers from slow remotes
might be queued, and then a disk is mounted and its faster transfers have
to wait.)
Also note that this means I don't need to improve the code in
Assistant.Sync that currently checks if any of the reconnected remotes
have diverged, and if so, queues scans of all of them. That had been very
innefficient, but now doesn't matter.
2012-08-26 18:01:43 +00:00
|
|
|
expensiveScan st dstatus transferqueue rs
|
2012-09-13 04:57:52 +00:00
|
|
|
go $ scanned `S.union` S.fromList rs
|
2012-08-24 19:52:23 +00:00
|
|
|
else do
|
scan multiple remotes in one pass
The expensive transfer scan now scans a whole set of remotes in one pass.
So at startup, or when network comes up, it will run only once.
Note that this can result in transfers from/to higher cost remotes being
queued before other transfers of other content from/to lower cost remotes.
Before, low cost remotes were scanned first and all their transfers came
first. When multiple transfers are queued for a key, the lower cost ones
are still queued first. However, this could result in transfers from slow
remotes running for a long time while transfers of other data from faster
remotes waits.
I expect to make the transfer queue smarter about ordering
and/or make it allow multiple transfers at a time, which should eliminate
this annoyance. (Also, it was already possible to get into that situation,
for example if the network was up, lots of transfers from slow remotes
might be queued, and then a disk is mounted and its faster transfers have
to wait.)
Also note that this means I don't need to improve the code in
Assistant.Sync that currently checks if any of the reconnected remotes
have diverged, and if so, queues scans of all of them. That had been very
innefficient, but now doesn't matter.
2012-08-26 18:01:43 +00:00
|
|
|
mapM_ (failedTransferScan st dstatus transferqueue) rs
|
2012-08-24 19:52:23 +00:00
|
|
|
go scanned
|
|
|
|
{- All available remotes are scanned in full on startup,
|
|
|
|
- for multiple reasons, including:
|
2012-08-24 17:46:10 +00:00
|
|
|
-
|
|
|
|
- * This may be the first run, and there may be remotes
|
|
|
|
- already in place, that need to be synced.
|
|
|
|
- * We may have run before, and scanned a remote, but
|
|
|
|
- only been in a subdirectory of the git remote, and so
|
|
|
|
- not synced it all.
|
|
|
|
- * We may have run before, and had transfers queued,
|
|
|
|
- and then the system (or us) crashed, and that info was
|
|
|
|
- lost.
|
|
|
|
-}
|
2012-08-24 19:52:23 +00:00
|
|
|
startupScan = addScanRemotes scanremotes True
|
2012-10-14 18:47:01 +00:00
|
|
|
=<< syncRemotes <$> getDaemonStatus dstatus
|
2012-07-23 03:16:56 +00:00
|
|
|
|
2012-08-23 19:22:23 +00:00
|
|
|
{- This is a cheap scan for failed transfers involving a remote. -}
|
|
|
|
failedTransferScan :: ThreadState -> DaemonStatusHandle -> TransferQueue -> Remote -> IO ()
|
|
|
|
failedTransferScan st dstatus transferqueue r = do
|
2012-09-17 18:58:43 +00:00
|
|
|
failed <- runThreadState st $ getFailedTransfers (Remote.uuid r)
|
|
|
|
runThreadState st $ mapM_ removeFailedTransfer $ map fst failed
|
|
|
|
mapM_ retry failed
|
2012-08-23 19:22:23 +00:00
|
|
|
where
|
2012-09-17 18:58:43 +00:00
|
|
|
retry (t, info)
|
2012-08-24 17:04:28 +00:00
|
|
|
| transferDirection t == Download = do
|
|
|
|
{- Check if the remote still has the key.
|
|
|
|
- If not, relies on the expensiveScan to
|
|
|
|
- get it queued from some other remote. -}
|
2012-09-17 18:58:43 +00:00
|
|
|
whenM (runThreadState st $ remoteHas r $ transferKey t) $
|
|
|
|
requeue t info
|
2012-08-24 17:04:28 +00:00
|
|
|
| otherwise = do
|
|
|
|
{- The Transferrer checks when uploading
|
|
|
|
- that the remote doesn't already have the
|
|
|
|
- key, so it's not redundantly checked
|
|
|
|
- here. -}
|
|
|
|
requeue t info
|
2012-09-17 18:58:43 +00:00
|
|
|
requeue t info = queueTransferWhenSmall
|
|
|
|
transferqueue dstatus (associatedFile info) t r
|
2012-08-23 19:22:23 +00:00
|
|
|
|
scan multiple remotes in one pass
The expensive transfer scan now scans a whole set of remotes in one pass.
So at startup, or when network comes up, it will run only once.
Note that this can result in transfers from/to higher cost remotes being
queued before other transfers of other content from/to lower cost remotes.
Before, low cost remotes were scanned first and all their transfers came
first. When multiple transfers are queued for a key, the lower cost ones
are still queued first. However, this could result in transfers from slow
remotes running for a long time while transfers of other data from faster
remotes waits.
I expect to make the transfer queue smarter about ordering
and/or make it allow multiple transfers at a time, which should eliminate
this annoyance. (Also, it was already possible to get into that situation,
for example if the network was up, lots of transfers from slow remotes
might be queued, and then a disk is mounted and its faster transfers have
to wait.)
Also note that this means I don't need to improve the code in
Assistant.Sync that currently checks if any of the reconnected remotes
have diverged, and if so, queues scans of all of them. That had been very
innefficient, but now doesn't matter.
2012-08-26 18:01:43 +00:00
|
|
|
{- This is a expensive scan through the full git work tree, finding
|
2012-10-18 17:42:17 +00:00
|
|
|
- files to transfer. The scan is blocked when the transfer queue gets
|
|
|
|
- too large.
|
|
|
|
-
|
|
|
|
- This also finds files that are present either here or on a remote
|
|
|
|
- but that are not preferred content, and drops them. Searching for files
|
|
|
|
- to drop is done concurrently with the scan for transfers.
|
|
|
|
-
|
|
|
|
- TODO: It would be better to first drop as much as we can, before
|
|
|
|
- transferring much, to minimise disk use.
|
|
|
|
-}
|
scan multiple remotes in one pass
The expensive transfer scan now scans a whole set of remotes in one pass.
So at startup, or when network comes up, it will run only once.
Note that this can result in transfers from/to higher cost remotes being
queued before other transfers of other content from/to lower cost remotes.
Before, low cost remotes were scanned first and all their transfers came
first. When multiple transfers are queued for a key, the lower cost ones
are still queued first. However, this could result in transfers from slow
remotes running for a long time while transfers of other data from faster
remotes waits.
I expect to make the transfer queue smarter about ordering
and/or make it allow multiple transfers at a time, which should eliminate
this annoyance. (Also, it was already possible to get into that situation,
for example if the network was up, lots of transfers from slow remotes
might be queued, and then a disk is mounted and its faster transfers have
to wait.)
Also note that this means I don't need to improve the code in
Assistant.Sync that currently checks if any of the reconnected remotes
have diverged, and if so, queues scans of all of them. That had been very
innefficient, but now doesn't matter.
2012-08-26 18:01:43 +00:00
|
|
|
expensiveScan :: ThreadState -> DaemonStatusHandle -> TransferQueue -> [Remote] -> IO ()
|
2012-08-26 21:45:30 +00:00
|
|
|
expensiveScan st dstatus transferqueue rs = unless onlyweb $ do
|
|
|
|
liftIO $ debug thisThread ["starting scan of", show visiblers]
|
|
|
|
void $ alertWhile dstatus (scanAlert visiblers) $ do
|
2012-10-12 05:17:45 +00:00
|
|
|
g <- runThreadState st gitRepo
|
2012-10-04 23:56:32 +00:00
|
|
|
(files, cleanup) <- LsFiles.inRepo [] g
|
2012-10-18 17:42:17 +00:00
|
|
|
forM_ files $ \f -> do
|
|
|
|
ts <- runThreadState st $
|
|
|
|
ifAnnexed f (findtransfers f) (return [])
|
|
|
|
mapM_ (enqueue f) ts
|
2012-10-04 23:56:32 +00:00
|
|
|
void cleanup
|
2012-08-24 17:46:10 +00:00
|
|
|
return True
|
2012-08-26 21:45:30 +00:00
|
|
|
liftIO $ debug thisThread ["finished scan of", show visiblers]
|
2012-07-25 18:15:09 +00:00
|
|
|
where
|
2012-08-26 21:45:30 +00:00
|
|
|
onlyweb = all (== webUUID) $ map Remote.uuid rs
|
|
|
|
visiblers = let rs' = filter (not . Remote.readonly) rs
|
|
|
|
in if null rs' then rs else rs'
|
scan multiple remotes in one pass
The expensive transfer scan now scans a whole set of remotes in one pass.
So at startup, or when network comes up, it will run only once.
Note that this can result in transfers from/to higher cost remotes being
queued before other transfers of other content from/to lower cost remotes.
Before, low cost remotes were scanned first and all their transfers came
first. When multiple transfers are queued for a key, the lower cost ones
are still queued first. However, this could result in transfers from slow
remotes running for a long time while transfers of other data from faster
remotes waits.
I expect to make the transfer queue smarter about ordering
and/or make it allow multiple transfers at a time, which should eliminate
this annoyance. (Also, it was already possible to get into that situation,
for example if the network was up, lots of transfers from slow remotes
might be queued, and then a disk is mounted and its faster transfers have
to wait.)
Also note that this means I don't need to improve the code in
Assistant.Sync that currently checks if any of the reconnected remotes
have diverged, and if so, queues scans of all of them. That had been very
innefficient, but now doesn't matter.
2012-08-26 18:01:43 +00:00
|
|
|
enqueue f (r, t) = do
|
|
|
|
debug thisThread ["queuing", show t]
|
|
|
|
queueTransferWhenSmall transferqueue dstatus (Just f) t r
|
2012-10-09 16:18:41 +00:00
|
|
|
findtransfers f (key, _) = do
|
2012-10-18 18:55:59 +00:00
|
|
|
locs <- loggedLocations key
|
|
|
|
{- The syncable remotes may have changed since this
|
2012-10-18 17:42:17 +00:00
|
|
|
- scan began. -}
|
2012-10-18 18:55:59 +00:00
|
|
|
syncrs <- liftIO $ syncRemotes <$> getDaemonStatus dstatus
|
|
|
|
present <- inAnnex key
|
|
|
|
|
2012-10-18 20:05:43 +00:00
|
|
|
handleDrops' locs syncrs present key (Just f)
|
2012-10-18 18:55:59 +00:00
|
|
|
|
|
|
|
let slocs = S.fromList locs
|
|
|
|
let use a = return $ catMaybes $ map (a key slocs) syncrs
|
|
|
|
if present
|
|
|
|
then filterM (wantSend (Just f) . Remote.uuid . fst)
|
2012-10-18 17:42:17 +00:00
|
|
|
=<< use (genTransfer Upload False)
|
2012-10-18 18:55:59 +00:00
|
|
|
else ifM (wantGet $ Just f)
|
2012-10-18 17:42:17 +00:00
|
|
|
( use (genTransfer Download True) , return [] )
|
|
|
|
|
|
|
|
genTransfer :: Direction -> Bool -> Key -> S.Set UUID -> Remote -> Maybe (Remote, Transfer)
|
2012-10-18 18:55:59 +00:00
|
|
|
genTransfer direction want key slocs r
|
2012-10-18 17:42:17 +00:00
|
|
|
| direction == Upload && Remote.readonly r = Nothing
|
2012-10-18 18:55:59 +00:00
|
|
|
| (S.member (Remote.uuid r) slocs) == want = Just
|
2012-10-18 17:42:17 +00:00
|
|
|
(r, Transfer direction (Remote.uuid r) key)
|
|
|
|
| otherwise = Nothing
|
2012-07-25 18:15:09 +00:00
|
|
|
|
2012-08-24 17:04:28 +00:00
|
|
|
remoteHas :: Remote -> Key -> Annex Bool
|
|
|
|
remoteHas r key = elem
|
|
|
|
<$> pure (Remote.uuid r)
|
|
|
|
<*> loggedLocations key
|