2020-12-09 17:21:20 +00:00
|
|
|
{- A pool of "git-annex transferrer" processes
|
2013-03-19 22:46:29 +00:00
|
|
|
-
|
2020-12-07 16:50:48 +00:00
|
|
|
- Copyright 2013-2020 Joey Hess <id@joeyh.name>
|
2013-03-19 22:46:29 +00:00
|
|
|
-
|
2019-03-13 19:48:14 +00:00
|
|
|
- Licensed under the GNU AGPL version 3 or higher.
|
2013-03-19 22:46:29 +00:00
|
|
|
-}
|
|
|
|
|
2020-12-07 16:50:48 +00:00
|
|
|
{-# LANGUAGE RankNTypes #-}
|
2020-12-11 19:28:58 +00:00
|
|
|
{-# LANGUAGE CPP #-}
|
2013-03-19 22:46:29 +00:00
|
|
|
|
2020-12-07 16:50:48 +00:00
|
|
|
module Annex.TransferrerPool where
|
|
|
|
|
|
|
|
import Annex.Common
|
2020-12-07 20:11:29 +00:00
|
|
|
import qualified Annex
|
2020-12-07 16:50:48 +00:00
|
|
|
import Types.TransferrerPool
|
2020-12-09 17:28:16 +00:00
|
|
|
import Types.Transferrer
|
2016-08-03 16:37:12 +00:00
|
|
|
import Types.Transfer
|
2020-12-07 20:11:29 +00:00
|
|
|
import qualified Types.Remote as Remote
|
2020-12-08 19:22:18 +00:00
|
|
|
import Types.StallDetection
|
2020-12-07 20:11:29 +00:00
|
|
|
import Types.Messages
|
2020-12-11 19:28:58 +00:00
|
|
|
import Types.CleanupActions
|
2020-12-04 17:50:03 +00:00
|
|
|
import Messages.Serialized
|
2020-12-07 20:11:29 +00:00
|
|
|
import Annex.Path
|
|
|
|
import Utility.Batch
|
2020-12-08 19:22:18 +00:00
|
|
|
import Utility.Metered
|
|
|
|
import Utility.HumanTime
|
|
|
|
import Utility.ThreadScheduler
|
2020-12-09 19:44:00 +00:00
|
|
|
import qualified Utility.SimpleProtocol as Proto
|
2013-03-19 22:46:29 +00:00
|
|
|
|
|
|
|
import Control.Concurrent
|
2020-12-08 19:22:18 +00:00
|
|
|
import Control.Concurrent.Async
|
|
|
|
import Control.Concurrent.STM hiding (check)
|
2020-12-07 16:50:48 +00:00
|
|
|
import Control.Monad.IO.Class (MonadIO)
|
2020-12-08 19:22:18 +00:00
|
|
|
import Data.Time.Clock.POSIX
|
2020-12-07 20:11:29 +00:00
|
|
|
import System.Log.Logger (debugM)
|
2020-12-11 19:28:58 +00:00
|
|
|
import qualified Data.Map as M
|
|
|
|
#ifndef mingw32_HOST_OS
|
|
|
|
import System.Posix.Signals
|
|
|
|
import System.Posix.Process (getProcessGroupIDOf)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
type SignalActionsVar = TVar (M.Map SignalAction (Int -> IO ()))
|
2013-03-19 22:46:29 +00:00
|
|
|
|
2020-12-07 20:11:29 +00:00
|
|
|
{- Runs an action with a Transferrer from the pool. -}
|
|
|
|
withTransferrer :: (Transferrer -> Annex a) -> Annex a
|
|
|
|
withTransferrer a = do
|
|
|
|
program <- liftIO programPath
|
|
|
|
pool <- Annex.getState Annex.transferrerpool
|
|
|
|
let nocheck = pure (pure True)
|
2020-12-11 19:28:58 +00:00
|
|
|
signalactonsvar <- Annex.getState Annex.signalactions
|
|
|
|
withTransferrer' False signalactonsvar nocheck program nonBatchCommandMaker pool a
|
2020-12-07 20:11:29 +00:00
|
|
|
|
|
|
|
withTransferrer'
|
2020-12-09 16:43:38 +00:00
|
|
|
:: (MonadIO m, MonadMask m)
|
2020-12-07 20:11:29 +00:00
|
|
|
=> Bool
|
|
|
|
-- ^ When minimizeprocesses is True, only one Transferrer is left
|
|
|
|
-- running in the pool at a time. So if this needed to start a
|
|
|
|
-- new Transferrer, it's stopped when done. Otherwise, idle
|
|
|
|
-- processes are left in the pool for use later.
|
2020-12-11 19:28:58 +00:00
|
|
|
-> SignalActionsVar
|
2020-12-07 20:11:29 +00:00
|
|
|
-> MkCheckTransferrer
|
|
|
|
-> FilePath
|
|
|
|
-> BatchCommandMaker
|
|
|
|
-> TransferrerPool
|
|
|
|
-> (Transferrer -> m a)
|
|
|
|
-> m a
|
2020-12-11 19:28:58 +00:00
|
|
|
withTransferrer' minimizeprocesses signalactonsvar mkcheck program batchmaker pool a = do
|
2020-12-07 20:11:29 +00:00
|
|
|
(mi, leftinpool) <- liftIO $ atomically (popTransferrerPool pool)
|
2020-12-09 16:43:38 +00:00
|
|
|
(i@(TransferrerPoolItem _ check), t) <- liftIO $ case mi of
|
|
|
|
Nothing -> do
|
2020-12-11 19:28:58 +00:00
|
|
|
t <- mkTransferrer signalactonsvar program batchmaker
|
2020-12-09 16:43:38 +00:00
|
|
|
i <- mkTransferrerPoolItem mkcheck t
|
|
|
|
return (i, t)
|
2020-12-11 19:28:58 +00:00
|
|
|
Just i -> checkTransferrerPoolItem signalactonsvar program batchmaker i
|
2020-12-07 16:50:48 +00:00
|
|
|
a t `finally` returntopool leftinpool check t i
|
|
|
|
where
|
|
|
|
returntopool leftinpool check t i
|
|
|
|
| not minimizeprocesses || leftinpool == 0 =
|
2020-12-08 19:22:18 +00:00
|
|
|
-- If the transferrer got killed, the handles will
|
|
|
|
-- be closed, so it should not be returned to the
|
|
|
|
-- pool.
|
|
|
|
liftIO $ whenM (hIsOpen (transferrerWrite t)) $
|
|
|
|
liftIO $ atomically $ pushTransferrerPool pool i
|
2020-12-07 20:11:29 +00:00
|
|
|
| otherwise = liftIO $ do
|
2020-12-11 19:28:58 +00:00
|
|
|
void $ forkIO $ transferrerShutdown t
|
2014-01-06 21:07:08 +00:00
|
|
|
atomically $ pushTransferrerPool pool $ TransferrerPoolItem Nothing check
|
assistant: Start a new git-annex transferkeys process after a network connection change
So that remotes that use a persistent network connection are restarted.
A remote might keep open a long duration network connection, and could
fail to deal well with losing the connection. This is particularly a
concern now that we have external special reotes. An external
special remote that is implemented naively might open the connection only
when PREPARE is sent, and if it loses connection, throw errors on each
request that is made.
(Note that the ssh connection caching should not have this problem; if the
long-duration ssh process loses connection, the named pipe is disconnected
and the next ssh attempt will reconnect. Also, XMPP already deals with
disconnection robustly in its own way.)
There's no way for git-annex to know if a lost network connection actually
affects a given remote, which might have a transfer in process. It does not
make sense to force kill the transferkeys process every time the NetWatcher
detects a change. (Especially because the NetWatcher sometimes polls 1
change per hour.)
In any case, the NetWatcher only detects connection to a network, not
disconnection. So if a transfer is in progress over the network, and the
network goes down, that will need to time out on its own.
An alternate approch that was considered is to use a separate transferkeys
process for each remote, and detect when a request fails, and assume that
means that process is in a failing state and restart it. The problem with
that approach is that if a resource is not available and a remote fails
every time, it degrades to starting a new transferkeys process for every
file transfer, which is too expensive.
Instead, this commit only handles the network reconnection case, and restarts
transferkeys only once the network has reconnected and another transfer needs
to be made. So, a transferkeys process will be reused for 1 hour, or until the
next network connection.
----
The NotificationBroadcaster was rewritten to use TMVars rather than MSampleVars,
to allow checking without blocking if a notification has been received.
----
This commit was sponsored by Tobias Brunner.
2014-01-06 20:03:39 +00:00
|
|
|
|
|
|
|
{- Check if a Transferrer from the pool is still ok to be used.
|
|
|
|
- If not, stop it and start a new one. -}
|
2020-12-11 19:28:58 +00:00
|
|
|
checkTransferrerPoolItem :: SignalActionsVar -> FilePath -> BatchCommandMaker -> TransferrerPoolItem -> IO (TransferrerPoolItem, Transferrer)
|
|
|
|
checkTransferrerPoolItem signalactonsvar program batchmaker i = case i of
|
assistant: Start a new git-annex transferkeys process after a network connection change
So that remotes that use a persistent network connection are restarted.
A remote might keep open a long duration network connection, and could
fail to deal well with losing the connection. This is particularly a
concern now that we have external special reotes. An external
special remote that is implemented naively might open the connection only
when PREPARE is sent, and if it loses connection, throw errors on each
request that is made.
(Note that the ssh connection caching should not have this problem; if the
long-duration ssh process loses connection, the named pipe is disconnected
and the next ssh attempt will reconnect. Also, XMPP already deals with
disconnection robustly in its own way.)
There's no way for git-annex to know if a lost network connection actually
affects a given remote, which might have a transfer in process. It does not
make sense to force kill the transferkeys process every time the NetWatcher
detects a change. (Especially because the NetWatcher sometimes polls 1
change per hour.)
In any case, the NetWatcher only detects connection to a network, not
disconnection. So if a transfer is in progress over the network, and the
network goes down, that will need to time out on its own.
An alternate approch that was considered is to use a separate transferkeys
process for each remote, and detect when a request fails, and assume that
means that process is in a failing state and restart it. The problem with
that approach is that if a resource is not available and a remote fails
every time, it degrades to starting a new transferkeys process for every
file transfer, which is too expensive.
Instead, this commit only handles the network reconnection case, and restarts
transferkeys only once the network has reconnected and another transfer needs
to be made. So, a transferkeys process will be reused for 1 hour, or until the
next network connection.
----
The NotificationBroadcaster was rewritten to use TMVars rather than MSampleVars,
to allow checking without blocking if a notification has been received.
----
This commit was sponsored by Tobias Brunner.
2014-01-06 20:03:39 +00:00
|
|
|
TransferrerPoolItem (Just t) check -> ifM check
|
2020-12-09 16:43:38 +00:00
|
|
|
( return (i, t)
|
assistant: Start a new git-annex transferkeys process after a network connection change
So that remotes that use a persistent network connection are restarted.
A remote might keep open a long duration network connection, and could
fail to deal well with losing the connection. This is particularly a
concern now that we have external special reotes. An external
special remote that is implemented naively might open the connection only
when PREPARE is sent, and if it loses connection, throw errors on each
request that is made.
(Note that the ssh connection caching should not have this problem; if the
long-duration ssh process loses connection, the named pipe is disconnected
and the next ssh attempt will reconnect. Also, XMPP already deals with
disconnection robustly in its own way.)
There's no way for git-annex to know if a lost network connection actually
affects a given remote, which might have a transfer in process. It does not
make sense to force kill the transferkeys process every time the NetWatcher
detects a change. (Especially because the NetWatcher sometimes polls 1
change per hour.)
In any case, the NetWatcher only detects connection to a network, not
disconnection. So if a transfer is in progress over the network, and the
network goes down, that will need to time out on its own.
An alternate approch that was considered is to use a separate transferkeys
process for each remote, and detect when a request fails, and assume that
means that process is in a failing state and restart it. The problem with
that approach is that if a resource is not available and a remote fails
every time, it degrades to starting a new transferkeys process for every
file transfer, which is too expensive.
Instead, this commit only handles the network reconnection case, and restarts
transferkeys only once the network has reconnected and another transfer needs
to be made. So, a transferkeys process will be reused for 1 hour, or until the
next network connection.
----
The NotificationBroadcaster was rewritten to use TMVars rather than MSampleVars,
to allow checking without blocking if a notification has been received.
----
This commit was sponsored by Tobias Brunner.
2014-01-06 20:03:39 +00:00
|
|
|
, do
|
2020-12-11 19:28:58 +00:00
|
|
|
transferrerShutdown t
|
assistant: Start a new git-annex transferkeys process after a network connection change
So that remotes that use a persistent network connection are restarted.
A remote might keep open a long duration network connection, and could
fail to deal well with losing the connection. This is particularly a
concern now that we have external special reotes. An external
special remote that is implemented naively might open the connection only
when PREPARE is sent, and if it loses connection, throw errors on each
request that is made.
(Note that the ssh connection caching should not have this problem; if the
long-duration ssh process loses connection, the named pipe is disconnected
and the next ssh attempt will reconnect. Also, XMPP already deals with
disconnection robustly in its own way.)
There's no way for git-annex to know if a lost network connection actually
affects a given remote, which might have a transfer in process. It does not
make sense to force kill the transferkeys process every time the NetWatcher
detects a change. (Especially because the NetWatcher sometimes polls 1
change per hour.)
In any case, the NetWatcher only detects connection to a network, not
disconnection. So if a transfer is in progress over the network, and the
network goes down, that will need to time out on its own.
An alternate approch that was considered is to use a separate transferkeys
process for each remote, and detect when a request fails, and assume that
means that process is in a failing state and restart it. The problem with
that approach is that if a resource is not available and a remote fails
every time, it degrades to starting a new transferkeys process for every
file transfer, which is too expensive.
Instead, this commit only handles the network reconnection case, and restarts
transferkeys only once the network has reconnected and another transfer needs
to be made. So, a transferkeys process will be reused for 1 hour, or until the
next network connection.
----
The NotificationBroadcaster was rewritten to use TMVars rather than MSampleVars,
to allow checking without blocking if a notification has been received.
----
This commit was sponsored by Tobias Brunner.
2014-01-06 20:03:39 +00:00
|
|
|
new check
|
2013-03-19 22:46:29 +00:00
|
|
|
)
|
assistant: Start a new git-annex transferkeys process after a network connection change
So that remotes that use a persistent network connection are restarted.
A remote might keep open a long duration network connection, and could
fail to deal well with losing the connection. This is particularly a
concern now that we have external special reotes. An external
special remote that is implemented naively might open the connection only
when PREPARE is sent, and if it loses connection, throw errors on each
request that is made.
(Note that the ssh connection caching should not have this problem; if the
long-duration ssh process loses connection, the named pipe is disconnected
and the next ssh attempt will reconnect. Also, XMPP already deals with
disconnection robustly in its own way.)
There's no way for git-annex to know if a lost network connection actually
affects a given remote, which might have a transfer in process. It does not
make sense to force kill the transferkeys process every time the NetWatcher
detects a change. (Especially because the NetWatcher sometimes polls 1
change per hour.)
In any case, the NetWatcher only detects connection to a network, not
disconnection. So if a transfer is in progress over the network, and the
network goes down, that will need to time out on its own.
An alternate approch that was considered is to use a separate transferkeys
process for each remote, and detect when a request fails, and assume that
means that process is in a failing state and restart it. The problem with
that approach is that if a resource is not available and a remote fails
every time, it degrades to starting a new transferkeys process for every
file transfer, which is too expensive.
Instead, this commit only handles the network reconnection case, and restarts
transferkeys only once the network has reconnected and another transfer needs
to be made. So, a transferkeys process will be reused for 1 hour, or until the
next network connection.
----
The NotificationBroadcaster was rewritten to use TMVars rather than MSampleVars,
to allow checking without blocking if a notification has been received.
----
This commit was sponsored by Tobias Brunner.
2014-01-06 20:03:39 +00:00
|
|
|
TransferrerPoolItem Nothing check -> new check
|
|
|
|
where
|
|
|
|
new check = do
|
2020-12-11 19:28:58 +00:00
|
|
|
t <- mkTransferrer signalactonsvar program batchmaker
|
2020-12-09 16:43:38 +00:00
|
|
|
return (TransferrerPoolItem (Just t) check, t)
|
2013-03-19 22:46:29 +00:00
|
|
|
|
2020-12-09 19:44:00 +00:00
|
|
|
data TransferRequestLevel = AnnexLevel | AssistantLevel
|
|
|
|
deriving (Show)
|
|
|
|
|
2013-03-19 22:46:29 +00:00
|
|
|
{- Requests that a Transferrer perform a Transfer, and waits for it to
|
2020-12-08 19:22:18 +00:00
|
|
|
- finish.
|
|
|
|
-
|
|
|
|
- When a stall is detected, kills the Transferrer.
|
|
|
|
-
|
|
|
|
- If the transfer failed or stalled, returns TransferInfo with an
|
|
|
|
- updated bytesComplete reflecting how much data has been transferred.
|
|
|
|
-}
|
2020-12-07 16:50:48 +00:00
|
|
|
performTransfer
|
|
|
|
:: (Monad m, MonadIO m, MonadMask m)
|
2020-12-08 19:22:18 +00:00
|
|
|
=> Maybe StallDetection
|
2020-12-07 20:11:29 +00:00
|
|
|
-> TransferRequestLevel
|
2020-12-07 16:50:48 +00:00
|
|
|
-> (forall a. Annex a -> m a)
|
|
|
|
-- ^ Run an annex action in the monad. Will not be used with
|
|
|
|
-- actions that block for a long time.
|
2020-12-08 19:22:18 +00:00
|
|
|
-> Maybe Remote
|
|
|
|
-> Transfer
|
|
|
|
-> TransferInfo
|
|
|
|
-> Transferrer
|
|
|
|
-> m (Either TransferInfo ())
|
|
|
|
performTransfer stalldetection level runannex r t info transferrer = do
|
|
|
|
bpv <- liftIO $ newTVarIO zeroBytesProcessed
|
|
|
|
ifM (catchBoolIO $ bracket setup cleanup (go bpv))
|
|
|
|
( return (Right ())
|
|
|
|
, do
|
|
|
|
n <- case transferDirection t of
|
|
|
|
Upload -> liftIO $ atomically $
|
|
|
|
fromBytesProcessed <$> readTVar bpv
|
|
|
|
Download -> do
|
|
|
|
f <- runannex $ fromRepo $ gitAnnexTmpObjectLocation (transferKey t)
|
|
|
|
liftIO $ catchDefaultIO 0 $ getFileSize f
|
|
|
|
return $ Left $ info { bytesComplete = Just n }
|
|
|
|
)
|
|
|
|
where
|
|
|
|
setup = do
|
|
|
|
liftIO $ sendRequest level t r
|
|
|
|
(associatedFile info)
|
|
|
|
(transferrerWrite transferrer)
|
2020-12-11 22:26:30 +00:00
|
|
|
metervar <- liftIO $ newTVarIO Nothing
|
2020-12-08 19:22:18 +00:00
|
|
|
stalledvar <- liftIO $ newTVarIO False
|
|
|
|
tid <- liftIO $ async $
|
|
|
|
detectStalls stalldetection metervar $ do
|
|
|
|
atomically $ writeTVar stalledvar True
|
|
|
|
killTransferrer transferrer
|
|
|
|
return (metervar, tid, stalledvar)
|
|
|
|
|
|
|
|
cleanup (_, tid, stalledvar) = do
|
|
|
|
liftIO $ uninterruptibleCancel tid
|
|
|
|
whenM (liftIO $ atomically $ readTVar stalledvar) $ do
|
|
|
|
runannex $ showLongNote "Transfer stalled"
|
|
|
|
-- Close handles, to prevent the transferrer being
|
|
|
|
-- reused since the process was killed.
|
|
|
|
liftIO $ hClose $ transferrerRead transferrer
|
|
|
|
liftIO $ hClose $ transferrerWrite transferrer
|
|
|
|
|
|
|
|
go bpv (metervar, _, _) = relaySerializedOutput
|
2020-12-07 20:11:29 +00:00
|
|
|
(liftIO $ readResponse (transferrerRead transferrer))
|
|
|
|
(liftIO . sendSerializedOutputResponse (transferrerWrite transferrer))
|
2020-12-08 19:22:18 +00:00
|
|
|
(updatemeter bpv metervar)
|
2020-12-07 16:50:48 +00:00
|
|
|
runannex
|
2020-12-08 19:22:18 +00:00
|
|
|
|
|
|
|
updatemeter bpv metervar (Just n) = liftIO $ do
|
2020-12-11 22:26:30 +00:00
|
|
|
atomically $ writeTVar metervar (Just n)
|
2020-12-08 19:22:18 +00:00
|
|
|
atomically $ writeTVar bpv n
|
|
|
|
updatemeter _bpv metervar Nothing = liftIO $
|
2020-12-11 22:26:30 +00:00
|
|
|
atomically $ writeTVar metervar Nothing
|
2020-12-08 19:22:18 +00:00
|
|
|
|
2020-12-11 22:26:30 +00:00
|
|
|
detectStalls :: Maybe StallDetection -> TVar (Maybe BytesProcessed) -> IO () -> IO ()
|
2020-12-08 19:22:18 +00:00
|
|
|
detectStalls Nothing _ _ = noop
|
|
|
|
detectStalls (Just (StallDetection minsz duration)) metervar onstall = go Nothing
|
|
|
|
where
|
|
|
|
go st = do
|
|
|
|
starttm <- getPOSIXTime
|
|
|
|
threadDelaySeconds (Seconds (fromIntegral (durationSeconds duration)))
|
2020-12-11 22:26:30 +00:00
|
|
|
-- Get whatever progress value was reported last, if any.
|
|
|
|
v <- atomically $ fmap fromBytesProcessed
|
|
|
|
<$> readTVar metervar
|
|
|
|
let cont = go v
|
|
|
|
case (st, v) of
|
|
|
|
(Nothing, _) -> cont
|
|
|
|
(_, Nothing) -> cont
|
|
|
|
(Just prev, Just sofar)
|
2020-12-08 19:22:18 +00:00
|
|
|
-- Just in case a progress meter somehow runs
|
|
|
|
-- backwards, or a second progress meter was
|
|
|
|
-- started and is at a smaller value than
|
|
|
|
-- the previous one.
|
2020-12-11 22:26:30 +00:00
|
|
|
| prev > sofar -> cont
|
2020-12-08 19:22:18 +00:00
|
|
|
| otherwise -> do
|
|
|
|
endtm <- getPOSIXTime
|
|
|
|
let actualduration = endtm - starttm
|
|
|
|
let sz = sofar - prev
|
|
|
|
let expectedsz = (minsz * durationSeconds duration)
|
|
|
|
`div` max 1 (ceiling actualduration)
|
|
|
|
if sz < expectedsz
|
|
|
|
then onstall
|
2020-12-11 22:26:30 +00:00
|
|
|
else cont
|
2013-03-19 22:46:29 +00:00
|
|
|
|
2020-12-09 16:32:29 +00:00
|
|
|
{- Starts a new git-annex transfer process, setting up handles
|
2013-03-19 22:46:29 +00:00
|
|
|
- that will be used to communicate with it. -}
|
2020-12-11 19:28:58 +00:00
|
|
|
mkTransferrer :: SignalActionsVar -> FilePath -> BatchCommandMaker -> IO Transferrer
|
|
|
|
mkTransferrer signalactonsvar program batchmaker = do
|
2013-12-01 18:56:37 +00:00
|
|
|
{- It runs as a batch job. -}
|
2020-12-09 17:21:20 +00:00
|
|
|
let (program', params') = batchmaker (program, [Param "transferrer"])
|
2013-03-19 22:46:29 +00:00
|
|
|
{- It's put into its own group so that the whole group can be
|
|
|
|
- killed to stop a transfer. -}
|
2020-12-11 19:28:58 +00:00
|
|
|
(Just writeh, Just readh, _, ph) <- createProcess
|
2013-12-11 03:19:18 +00:00
|
|
|
(proc program' $ toCommand params')
|
|
|
|
{ create_group = True
|
|
|
|
, std_in = CreatePipe
|
|
|
|
, std_out = CreatePipe
|
|
|
|
}
|
2020-12-11 19:28:58 +00:00
|
|
|
|
|
|
|
{- Set up signal propagation, so eg ctrl-c will also interrupt
|
|
|
|
- the processes in the transferrer's process group.
|
|
|
|
-
|
|
|
|
- There is a race between the process being created and this point.
|
|
|
|
- If a signal is received before this can run, it is not sent to
|
|
|
|
- the transferrer. This leaves the transferrer waiting for the
|
|
|
|
- first message on stdin to tell what to do. If the signal kills
|
|
|
|
- this parent process, the transferrer will then get a sigpipe
|
|
|
|
- and die too. If the signal suspends this parent process,
|
|
|
|
- it's ok to leave the transferrer running, as it's waiting on
|
|
|
|
- the pipe until this process wakes back up.
|
|
|
|
-}
|
|
|
|
#ifndef mingw32_HOST_OS
|
|
|
|
pid <- getPid ph
|
|
|
|
unregistersignalprop <- case pid of
|
|
|
|
Just p -> getProcessGroupIDOf p >>= \pgrp -> do
|
|
|
|
atomically $ modifyTVar' signalactonsvar $
|
|
|
|
M.insert (PropagateSignalProcessGroup p) $ \sig ->
|
|
|
|
signalProcessGroup (fromIntegral sig) pgrp
|
|
|
|
return $ atomically $ modifyTVar' signalactonsvar $
|
|
|
|
M.delete (PropagateSignalProcessGroup p)
|
|
|
|
Nothing -> return noop
|
|
|
|
#else
|
|
|
|
let unregistersignalprop = noop
|
|
|
|
#endif
|
|
|
|
|
2013-03-19 22:46:29 +00:00
|
|
|
return $ Transferrer
|
2013-12-11 03:19:18 +00:00
|
|
|
{ transferrerRead = readh
|
|
|
|
, transferrerWrite = writeh
|
2020-12-11 19:28:58 +00:00
|
|
|
, transferrerHandle = ph
|
|
|
|
, transferrerShutdown = do
|
|
|
|
hClose readh
|
|
|
|
hClose writeh
|
|
|
|
void $ waitForProcess ph
|
|
|
|
unregistersignalprop
|
2013-03-19 22:46:29 +00:00
|
|
|
}
|
|
|
|
|
2020-12-07 20:11:29 +00:00
|
|
|
-- | Send a request to perform a transfer.
|
|
|
|
sendRequest :: TransferRequestLevel -> Transfer -> Maybe Remote -> AssociatedFile -> Handle -> IO ()
|
|
|
|
sendRequest level t mremote afile h = do
|
2020-12-09 19:44:00 +00:00
|
|
|
let tr = maybe
|
|
|
|
(TransferRemoteUUID (transferUUID t))
|
|
|
|
(TransferRemoteName . Remote.name)
|
|
|
|
mremote
|
|
|
|
let f = case (level, transferDirection t) of
|
|
|
|
(AnnexLevel, Upload) -> UploadRequest
|
|
|
|
(AnnexLevel, Download) -> DownloadRequest
|
|
|
|
(AssistantLevel, Upload) -> AssistantUploadRequest
|
|
|
|
(AssistantLevel, Download) -> AssistantDownloadRequest
|
|
|
|
let r = f tr (transferKey t) (TransferAssociatedFile afile)
|
|
|
|
let l = unwords $ Proto.formatMessage r
|
2020-12-07 20:11:29 +00:00
|
|
|
debugM "transfer" ("> " ++ l)
|
|
|
|
hPutStrLn h l
|
|
|
|
hFlush h
|
|
|
|
|
|
|
|
sendSerializedOutputResponse :: Handle -> SerializedOutputResponse -> IO ()
|
2020-12-09 19:44:00 +00:00
|
|
|
sendSerializedOutputResponse h sor = do
|
|
|
|
let l = unwords $ Proto.formatMessage $
|
|
|
|
TransferSerializedOutputResponse sor
|
|
|
|
debugM "transfer" ("> " ++ show l)
|
|
|
|
hPutStrLn h l
|
|
|
|
hFlush h
|
2020-12-07 20:11:29 +00:00
|
|
|
|
2020-12-11 19:28:58 +00:00
|
|
|
-- | Read a response to a transfer request.
|
2020-12-07 20:11:29 +00:00
|
|
|
--
|
|
|
|
-- Before the final response, this will return whatever SerializedOutput
|
|
|
|
-- should be displayed as the transfer is performed.
|
|
|
|
readResponse :: Handle -> IO (Either SerializedOutput Bool)
|
|
|
|
readResponse h = do
|
|
|
|
l <- liftIO $ hGetLine h
|
|
|
|
debugM "transfer" ("< " ++ l)
|
2020-12-09 19:44:00 +00:00
|
|
|
case Proto.parseMessage l of
|
2020-12-07 20:11:29 +00:00
|
|
|
Just (TransferOutput so) -> return (Left so)
|
|
|
|
Just (TransferResult r) -> return (Right r)
|
2020-12-09 17:21:20 +00:00
|
|
|
Nothing -> transferrerProtocolError l
|
2020-12-07 20:11:29 +00:00
|
|
|
|
2020-12-09 17:21:20 +00:00
|
|
|
transferrerProtocolError :: String -> a
|
2020-12-09 19:44:00 +00:00
|
|
|
transferrerProtocolError l = giveup $ "transferrer protocol error: " ++ show l
|
2020-12-08 15:43:06 +00:00
|
|
|
|
|
|
|
{- Kill the transferrer, and all its child processes. -}
|
|
|
|
killTransferrer :: Transferrer -> IO ()
|
|
|
|
killTransferrer t = do
|
|
|
|
interruptProcessGroupOf $ transferrerHandle t
|
|
|
|
threadDelay 50000 -- 0.05 second grace period
|
|
|
|
terminateProcess $ transferrerHandle t
|
2020-12-09 17:10:35 +00:00
|
|
|
|
|
|
|
{- Stop all transferrers in the pool. -}
|
|
|
|
emptyTransferrerPool :: Annex ()
|
|
|
|
emptyTransferrerPool = do
|
|
|
|
poolvar <- Annex.getState Annex.transferrerpool
|
|
|
|
pool <- liftIO $ atomically $ swapTVar poolvar []
|
|
|
|
liftIO $ forM_ pool $ \case
|
2020-12-11 19:28:58 +00:00
|
|
|
TransferrerPoolItem (Just t) _ -> transferrerShutdown t
|
2020-12-09 17:10:35 +00:00
|
|
|
TransferrerPoolItem Nothing _ -> noop
|