separate queue for cleanup actions
When running multiple concurrent actions, the cleanup phase is run in a separate queue than the main action queue. This can make some commands faster, because less time is spent on bookkeeping in between each file transfer. But as far as I can see, nothing will be sped up much by this yet, because all the existing cleanup actions are very light-weight. This is just groundwork for deferring checksum verification to cleanup time. This change does mean that if the user expects -J2 will mean that they see no more than 2 jobs running at a time, they may be surprised to see 4 in some cases (if the cleanup actions are slow enough to notice). It might also make sense to enable background cleanup without the -J, for at least one cleanup action. Indeed, that's the behavior that -J1 has now. At some point in the future, it make make sense to make the behavior with no -J the same as -J1. The only reason it's not currently is that git-annex can build w/o concurrent-output, and also any bugs in concurrent-output (such as perhaps misbehaving on non-VT100 compatible terminals) are avoided by default by only using it when -J is used.
This commit is contained in:
		
					parent
					
						
							
								c04b2af3e1
							
						
					
				
			
			
				commit
				
					
						659640e224
					
				
			
		
					 6 changed files with 128 additions and 46 deletions
				
			
		
							
								
								
									
										5
									
								
								Annex.hs
									
										
									
									
									
								
							
							
						
						
									
										5
									
								
								Annex.hs
									
										
									
									
									
								
							| 
						 | 
					@ -142,7 +142,7 @@ data AnnexState = AnnexState
 | 
				
			||||||
	, tempurls :: M.Map Key URLString
 | 
						, tempurls :: M.Map Key URLString
 | 
				
			||||||
	, existinghooks :: M.Map Git.Hook.Hook Bool
 | 
						, existinghooks :: M.Map Git.Hook.Hook Bool
 | 
				
			||||||
	, desktopnotify :: DesktopNotify
 | 
						, desktopnotify :: DesktopNotify
 | 
				
			||||||
	, workers :: WorkerPool AnnexState
 | 
						, workers :: TMVar (WorkerPool AnnexState)
 | 
				
			||||||
	, activekeys :: TVar (M.Map Key ThreadId)
 | 
						, activekeys :: TVar (M.Map Key ThreadId)
 | 
				
			||||||
	, activeremotes :: MVar (M.Map (Types.Remote.RemoteA Annex) Integer)
 | 
						, activeremotes :: MVar (M.Map (Types.Remote.RemoteA Annex) Integer)
 | 
				
			||||||
	, keysdbhandle :: Maybe Keys.DbHandle
 | 
						, keysdbhandle :: Maybe Keys.DbHandle
 | 
				
			||||||
| 
						 | 
					@ -155,6 +155,7 @@ newState :: GitConfig -> Git.Repo -> IO AnnexState
 | 
				
			||||||
newState c r = do
 | 
					newState c r = do
 | 
				
			||||||
	emptyactiveremotes <- newMVar M.empty
 | 
						emptyactiveremotes <- newMVar M.empty
 | 
				
			||||||
	emptyactivekeys <- newTVarIO M.empty
 | 
						emptyactivekeys <- newTVarIO M.empty
 | 
				
			||||||
 | 
						emptyworkerpool <- newTMVarIO UnallocatedWorkerPool
 | 
				
			||||||
	o <- newMessageState
 | 
						o <- newMessageState
 | 
				
			||||||
	sc <- newTMVarIO False
 | 
						sc <- newTMVarIO False
 | 
				
			||||||
	return $ AnnexState
 | 
						return $ AnnexState
 | 
				
			||||||
| 
						 | 
					@ -199,7 +200,7 @@ newState c r = do
 | 
				
			||||||
		, tempurls = M.empty
 | 
							, tempurls = M.empty
 | 
				
			||||||
		, existinghooks = M.empty
 | 
							, existinghooks = M.empty
 | 
				
			||||||
		, desktopnotify = mempty
 | 
							, desktopnotify = mempty
 | 
				
			||||||
		, workers = UnallocatedWorkerPool
 | 
							, workers = emptyworkerpool
 | 
				
			||||||
		, activekeys = emptyactivekeys
 | 
							, activekeys = emptyactivekeys
 | 
				
			||||||
		, activeremotes = emptyactiveremotes
 | 
							, activeremotes = emptyactiveremotes
 | 
				
			||||||
		, keysdbhandle = Nothing
 | 
							, keysdbhandle = Nothing
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -11,7 +11,6 @@ import Annex
 | 
				
			||||||
import Annex.Common
 | 
					import Annex.Common
 | 
				
			||||||
import Annex.Action
 | 
					import Annex.Action
 | 
				
			||||||
import qualified Annex.Queue
 | 
					import qualified Annex.Queue
 | 
				
			||||||
import Types.WorkerPool
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
import qualified Data.Map as M
 | 
					import qualified Data.Map as M
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -43,9 +42,8 @@ dupState :: Annex AnnexState
 | 
				
			||||||
dupState = do
 | 
					dupState = do
 | 
				
			||||||
	st <- Annex.getState id
 | 
						st <- Annex.getState id
 | 
				
			||||||
	return $ st
 | 
						return $ st
 | 
				
			||||||
		{ Annex.workers = UnallocatedWorkerPool
 | 
					 | 
				
			||||||
		-- each thread has its own repoqueue
 | 
							-- each thread has its own repoqueue
 | 
				
			||||||
		, Annex.repoqueue = Nothing
 | 
							{ Annex.repoqueue = Nothing
 | 
				
			||||||
		-- avoid sharing eg, open file handles
 | 
							-- avoid sharing eg, open file handles
 | 
				
			||||||
		, Annex.catfilehandles = M.empty
 | 
							, Annex.catfilehandles = M.empty
 | 
				
			||||||
		, Annex.checkattrhandle = Nothing
 | 
							, Annex.checkattrhandle = Nothing
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -30,6 +30,10 @@ git-annex (7.20190508) UNRELEASED; urgency=medium
 | 
				
			||||||
    security hole CVE-2018-10857 (except for configurations which enabled curl
 | 
					    security hole CVE-2018-10857 (except for configurations which enabled curl
 | 
				
			||||||
    and bypassed public IP address restrictions). Now it will work
 | 
					    and bypassed public IP address restrictions). Now it will work
 | 
				
			||||||
    if allowed by annex.security.allowed-ip-addresses.
 | 
					    if allowed by annex.security.allowed-ip-addresses.
 | 
				
			||||||
 | 
					  * When running multiple concurrent actions, the cleanup phase is run
 | 
				
			||||||
 | 
					    in a separate queue than the main action queue. This can make some
 | 
				
			||||||
 | 
					    commands faster, because less time is spent on bookkeeping in
 | 
				
			||||||
 | 
					    between each file transfer.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 -- Joey Hess <id@joeyh.name>  Mon, 06 May 2019 13:52:02 -0400
 | 
					 -- Joey Hess <id@joeyh.name>  Mon, 06 May 2019 13:52:02 -0400
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -24,7 +24,6 @@ import Control.Concurrent.Async
 | 
				
			||||||
import Control.Concurrent.STM
 | 
					import Control.Concurrent.STM
 | 
				
			||||||
import Control.Exception (throwIO)
 | 
					import Control.Exception (throwIO)
 | 
				
			||||||
import GHC.Conc
 | 
					import GHC.Conc
 | 
				
			||||||
import Data.Either
 | 
					 | 
				
			||||||
import qualified Data.Map.Strict as M
 | 
					import qualified Data.Map.Strict as M
 | 
				
			||||||
import qualified System.Console.Regions as Regions
 | 
					import qualified System.Console.Regions as Regions
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -61,7 +60,9 @@ commandAction a = Annex.getState Annex.concurrency >>= \case
 | 
				
			||||||
	run = void $ includeCommandAction a
 | 
						run = void $ includeCommandAction a
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	runconcurrent n = do
 | 
						runconcurrent n = do
 | 
				
			||||||
		ws <- liftIO . drainTo (n-1) =<< Annex.getState Annex.workers
 | 
							tv <- Annex.getState Annex.workers
 | 
				
			||||||
 | 
							ws <- liftIO $ drainTo (n-1) (== PerformStage) 
 | 
				
			||||||
 | 
								=<< atomically (takeTMVar tv)
 | 
				
			||||||
		(st, ws') <- case ws of
 | 
							(st, ws') <- case ws of
 | 
				
			||||||
			UnallocatedWorkerPool -> do
 | 
								UnallocatedWorkerPool -> do
 | 
				
			||||||
				-- Generate the remote list now, to avoid
 | 
									-- Generate the remote list now, to avoid
 | 
				
			||||||
| 
						 | 
					@ -72,61 +73,99 @@ commandAction a = Annex.getState Annex.concurrency >>= \case
 | 
				
			||||||
				_ <- remoteList
 | 
									_ <- remoteList
 | 
				
			||||||
				st <- dupState
 | 
									st <- dupState
 | 
				
			||||||
				return (st, allocateWorkerPool st (n-1))
 | 
									return (st, allocateWorkerPool st (n-1))
 | 
				
			||||||
			WorkerPool l -> findFreeSlot l
 | 
								WorkerPool _ -> findFreeSlot (== PerformStage) ws
 | 
				
			||||||
		w <- liftIO $ async $ snd <$> Annex.run st
 | 
							w <- liftIO $ async $ snd <$> Annex.run st
 | 
				
			||||||
			(inOwnConsoleRegion (Annex.output st) run)
 | 
								(inOwnConsoleRegion (Annex.output st) run)
 | 
				
			||||||
		Annex.changeState $ \s -> s
 | 
							liftIO $ atomically $ putTMVar tv $
 | 
				
			||||||
			{ Annex.workers = addWorkerPool ws' (Right w) }
 | 
								addWorkerPool (ActiveWorker w PerformStage) ws'
 | 
				
			||||||
 | 
					
 | 
				
			||||||
commandActions :: [CommandStart] -> Annex ()
 | 
					commandActions :: [CommandStart] -> Annex ()
 | 
				
			||||||
commandActions = mapM_ commandAction
 | 
					commandActions = mapM_ commandAction
 | 
				
			||||||
 | 
					
 | 
				
			||||||
{- Waits for any forked off command actions to finish.
 | 
					{- Waits for any worker threads to finish.
 | 
				
			||||||
 -
 | 
					 -
 | 
				
			||||||
 - Merge together the cleanup actions of all the AnnexStates used by
 | 
					 - Merge the AnnexStates used by the threads back into the current Annex's
 | 
				
			||||||
 - threads, into the current Annex's state, so they'll run at shutdown.
 | 
					 - state.
 | 
				
			||||||
 -
 | 
					 | 
				
			||||||
 - Also merge together the errcounters of the AnnexStates.
 | 
					 | 
				
			||||||
 -}
 | 
					 -}
 | 
				
			||||||
finishCommandActions :: Annex ()
 | 
					finishCommandActions :: Annex ()
 | 
				
			||||||
finishCommandActions = do
 | 
					finishCommandActions = do
 | 
				
			||||||
	ws <- Annex.getState Annex.workers
 | 
						tv <- Annex.getState Annex.workers
 | 
				
			||||||
	Annex.changeState $ \s -> s { Annex.workers = UnallocatedWorkerPool }
 | 
						let get = liftIO $ atomically $ takeTMVar tv
 | 
				
			||||||
	ws' <- liftIO $ drainTo 0 ws
 | 
						let put = liftIO . atomically . putTMVar tv
 | 
				
			||||||
 | 
						bracketOnError get put $ \ws -> do
 | 
				
			||||||
 | 
							ws' <- liftIO $ drainTo 0 (const True) ws
 | 
				
			||||||
		forM_ (idleWorkers ws') mergeState
 | 
							forM_ (idleWorkers ws') mergeState
 | 
				
			||||||
 | 
							put UnallocatedWorkerPool
 | 
				
			||||||
 | 
					
 | 
				
			||||||
{- Wait for jobs from the WorkerPool to complete, until
 | 
					{- Wait for jobs from the WorkerPool to complete, until
 | 
				
			||||||
 - the number of running jobs is not larger than the specified number.
 | 
					 - the number of running jobs of the desired stage
 | 
				
			||||||
 | 
					 - is not larger than the specified number.
 | 
				
			||||||
 -
 | 
					 -
 | 
				
			||||||
 - If a job throws an exception, it is propigated, but first
 | 
					 - If a job throws an exception, it is propigated, but first
 | 
				
			||||||
 - all other jobs are waited for, to allow for a clean shutdown.
 | 
					 - all other jobs are waited for, to allow for a clean shutdown.
 | 
				
			||||||
 -}
 | 
					 -}
 | 
				
			||||||
drainTo :: Int -> WorkerPool t -> IO (WorkerPool t)
 | 
					drainTo :: Int -> (WorkerStage -> Bool) -> WorkerPool t -> IO (WorkerPool t)
 | 
				
			||||||
drainTo _ UnallocatedWorkerPool = pure UnallocatedWorkerPool
 | 
					drainTo _ _ UnallocatedWorkerPool = pure UnallocatedWorkerPool
 | 
				
			||||||
drainTo sz (WorkerPool l)
 | 
					drainTo sz wantstage (WorkerPool l)
 | 
				
			||||||
	| null as || sz >= length as = pure (WorkerPool l)
 | 
						| null as || sz >= length as = pure (WorkerPool l)
 | 
				
			||||||
	| otherwise = do
 | 
						| otherwise = do
 | 
				
			||||||
		(done, ret) <- waitAnyCatch as
 | 
							(done, ret) <- waitAnyCatch (mapMaybe workerAsync as)
 | 
				
			||||||
		let as' = filter (/= done) as
 | 
							let (ActiveWorker _ donestage:[], as') =
 | 
				
			||||||
 | 
								partition (\w -> workerAsync w == Just done) as
 | 
				
			||||||
		case ret of
 | 
							case ret of
 | 
				
			||||||
			Left e -> do
 | 
								Left e -> do
 | 
				
			||||||
				void $ drainTo 0 $ WorkerPool $
 | 
									void $ drainTo 0 (const True) $ WorkerPool $
 | 
				
			||||||
					map Left sts ++ map Right as'
 | 
										sts ++ as' ++ otheras
 | 
				
			||||||
				throwIO e
 | 
									throwIO e
 | 
				
			||||||
			Right st -> do
 | 
								Right st -> do
 | 
				
			||||||
				drainTo sz $ WorkerPool $
 | 
									let w = IdleWorker st donestage
 | 
				
			||||||
					map Left (st:sts) ++ map Right as'
 | 
									drainTo sz wantstage $ WorkerPool $
 | 
				
			||||||
 | 
										w : sts ++ as' ++ otheras
 | 
				
			||||||
  where
 | 
					  where
 | 
				
			||||||
	(sts, as) = partitionEithers l
 | 
						(sts, allas) = partition isidle l
 | 
				
			||||||
 | 
						(as, otheras) = partition (wantstage . workerStage) allas
 | 
				
			||||||
 | 
						isidle (IdleWorker _ _) = True
 | 
				
			||||||
 | 
						isidle (ActiveWorker _ _) = False
 | 
				
			||||||
 | 
					
 | 
				
			||||||
findFreeSlot :: [Worker Annex.AnnexState] -> Annex (Annex.AnnexState, WorkerPool Annex.AnnexState)
 | 
					findFreeSlot :: (WorkerStage -> Bool) -> WorkerPool Annex.AnnexState -> Annex (Annex.AnnexState, WorkerPool Annex.AnnexState)
 | 
				
			||||||
findFreeSlot = go []
 | 
					findFreeSlot wantstage (WorkerPool l) = go [] l
 | 
				
			||||||
  where
 | 
					  where
 | 
				
			||||||
	go c [] = do
 | 
						go c [] = do
 | 
				
			||||||
		st <- dupState
 | 
							st <- dupState
 | 
				
			||||||
		return (st, WorkerPool c)
 | 
							return (st, WorkerPool c)
 | 
				
			||||||
	go c (Left st:rest) = return (st, WorkerPool (c ++ rest))
 | 
						go c ((IdleWorker st stage):rest) | wantstage stage = 
 | 
				
			||||||
 | 
							return (st, WorkerPool (c ++ rest))
 | 
				
			||||||
	go c (v:rest) = go (v:c) rest
 | 
						go c (v:rest) = go (v:c) rest
 | 
				
			||||||
 | 
					findFreeSlot _ UnallocatedWorkerPool = do
 | 
				
			||||||
 | 
						st <- dupState
 | 
				
			||||||
 | 
						return (st, UnallocatedWorkerPool)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					{- Changes the current thread's stage in the worker pool.
 | 
				
			||||||
 | 
					 -
 | 
				
			||||||
 | 
					 - An idle worker with the desired stage is found in the pool
 | 
				
			||||||
 | 
					 - (waiting if necessary for one to become idle)
 | 
				
			||||||
 | 
					 - and the stages of it and the current thread are swapped.
 | 
				
			||||||
 | 
					 -}
 | 
				
			||||||
 | 
					changeStageTo :: WorkerStage -> Annex ()
 | 
				
			||||||
 | 
					changeStageTo newstage = Annex.getState Annex.concurrency >>= \case
 | 
				
			||||||
 | 
						NonConcurrent -> noop
 | 
				
			||||||
 | 
						Concurrent n -> go n
 | 
				
			||||||
 | 
						ConcurrentPerCpu -> go =<< liftIO getNumProcessors
 | 
				
			||||||
 | 
					  where
 | 
				
			||||||
 | 
						go n = do
 | 
				
			||||||
 | 
							tv <- Annex.getState Annex.workers
 | 
				
			||||||
 | 
							let get = liftIO $ atomically $ takeTMVar tv
 | 
				
			||||||
 | 
							let put = liftIO . atomically . putTMVar tv
 | 
				
			||||||
 | 
							bracketOnError get put $ \pool -> do
 | 
				
			||||||
 | 
								pool' <- liftIO $ drainTo (n-1) (== newstage) pool
 | 
				
			||||||
 | 
								(idlest, pool'') <- findFreeSlot (== newstage) pool'
 | 
				
			||||||
 | 
								mytid <- liftIO myThreadId
 | 
				
			||||||
 | 
								case removeThreadIdWorkerPool mytid pool'' of
 | 
				
			||||||
 | 
									Just ((myaid, oldstage), pool''') -> do
 | 
				
			||||||
 | 
										liftIO $ print "switching"
 | 
				
			||||||
 | 
										put $ addWorkerPool (IdleWorker idlest oldstage) $
 | 
				
			||||||
 | 
											addWorkerPool (ActiveWorker myaid newstage) pool'''
 | 
				
			||||||
 | 
									Nothing -> put pool'
 | 
				
			||||||
 | 
					
 | 
				
			||||||
{- Like commandAction, but without the concurrency. -}
 | 
					{- Like commandAction, but without the concurrency. -}
 | 
				
			||||||
includeCommandAction :: CommandStart -> CommandCleanup
 | 
					includeCommandAction :: CommandStart -> CommandCleanup
 | 
				
			||||||
| 
						 | 
					@ -161,7 +200,9 @@ callCommandActionQuiet :: CommandStart -> Annex (Maybe Bool)
 | 
				
			||||||
callCommandActionQuiet = start
 | 
					callCommandActionQuiet = start
 | 
				
			||||||
  where
 | 
					  where
 | 
				
			||||||
	start   = stage $ maybe skip perform
 | 
						start   = stage $ maybe skip perform
 | 
				
			||||||
	perform = stage $ maybe failure cleanup
 | 
						perform = stage $ maybe failure $ \a -> do
 | 
				
			||||||
 | 
							changeStageTo CleanupStage
 | 
				
			||||||
 | 
							cleanup a
 | 
				
			||||||
	cleanup = stage $ status
 | 
						cleanup = stage $ status
 | 
				
			||||||
	stage = (=<<)
 | 
						stage = (=<<)
 | 
				
			||||||
	skip = return Nothing
 | 
						skip = return Nothing
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -7,8 +7,8 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
module Types.WorkerPool where
 | 
					module Types.WorkerPool where
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import Control.Concurrent
 | 
				
			||||||
import Control.Concurrent.Async
 | 
					import Control.Concurrent.Async
 | 
				
			||||||
import Data.Either
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
-- | Pool of worker threads. 
 | 
					-- | Pool of worker threads. 
 | 
				
			||||||
data WorkerPool t
 | 
					data WorkerPool t
 | 
				
			||||||
| 
						 | 
					@ -16,15 +16,54 @@ data WorkerPool t
 | 
				
			||||||
	| WorkerPool [Worker t]
 | 
						| WorkerPool [Worker t]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
-- | A worker can either be idle or running an Async action.
 | 
					-- | A worker can either be idle or running an Async action.
 | 
				
			||||||
type Worker t = Either t (Async t)
 | 
					-- And it is used for some stage.
 | 
				
			||||||
 | 
					data Worker t
 | 
				
			||||||
 | 
						= IdleWorker t WorkerStage
 | 
				
			||||||
 | 
						| ActiveWorker (Async t) WorkerStage
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					-- | These correspond to CommandPerform and CommandCleanup.
 | 
				
			||||||
 | 
					data WorkerStage = PerformStage | CleanupStage
 | 
				
			||||||
 | 
						deriving (Eq)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					workerStage :: Worker t -> WorkerStage
 | 
				
			||||||
 | 
					workerStage (IdleWorker _ s) = s
 | 
				
			||||||
 | 
					workerStage (ActiveWorker _ s) = s
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					workerAsync :: Worker t -> Maybe (Async t)
 | 
				
			||||||
 | 
					workerAsync (IdleWorker _ _) = Nothing
 | 
				
			||||||
 | 
					workerAsync (ActiveWorker aid _) = Just aid
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					-- | Allocates a WorkerPool that has the specified number of workers
 | 
				
			||||||
 | 
					-- in it, of each stage.
 | 
				
			||||||
 | 
					--
 | 
				
			||||||
 | 
					-- The stages are distributed evenly throughout.
 | 
				
			||||||
allocateWorkerPool :: t -> Int -> WorkerPool t
 | 
					allocateWorkerPool :: t -> Int -> WorkerPool t
 | 
				
			||||||
allocateWorkerPool t n = WorkerPool $ replicate n (Left t)
 | 
					allocateWorkerPool t n = WorkerPool $ take (n+n) $
 | 
				
			||||||
 | 
						map (uncurry IdleWorker) $ zip (repeat t) stages
 | 
				
			||||||
 | 
					  where
 | 
				
			||||||
 | 
						stages = concat $ repeat [PerformStage, CleanupStage]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
addWorkerPool :: WorkerPool t -> Worker t -> WorkerPool t
 | 
					addWorkerPool :: Worker t -> WorkerPool t -> WorkerPool t
 | 
				
			||||||
addWorkerPool (WorkerPool l) w = WorkerPool (w:l)
 | 
					addWorkerPool w (WorkerPool l) = WorkerPool (w:l)
 | 
				
			||||||
addWorkerPool UnallocatedWorkerPool w = WorkerPool [w]
 | 
					addWorkerPool w UnallocatedWorkerPool = WorkerPool [w]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
idleWorkers :: WorkerPool t -> [t]
 | 
					idleWorkers :: WorkerPool t -> [t]
 | 
				
			||||||
idleWorkers UnallocatedWorkerPool = []
 | 
					idleWorkers UnallocatedWorkerPool = []
 | 
				
			||||||
idleWorkers (WorkerPool l) = lefts l
 | 
					idleWorkers (WorkerPool l) = go l
 | 
				
			||||||
 | 
					  where
 | 
				
			||||||
 | 
						go [] = []
 | 
				
			||||||
 | 
						go (IdleWorker t _ : rest) = t : go rest
 | 
				
			||||||
 | 
						go (ActiveWorker _ _ : rest) = go rest
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					-- | Removes a worker from the pool whose Async uses the ThreadId.
 | 
				
			||||||
 | 
					--
 | 
				
			||||||
 | 
					-- Each Async has its own ThreadId, so this stops once it finds
 | 
				
			||||||
 | 
					-- a match.
 | 
				
			||||||
 | 
					removeThreadIdWorkerPool :: ThreadId -> WorkerPool t -> Maybe ((Async t, WorkerStage), WorkerPool t)
 | 
				
			||||||
 | 
					removeThreadIdWorkerPool _ UnallocatedWorkerPool = Nothing
 | 
				
			||||||
 | 
					removeThreadIdWorkerPool tid (WorkerPool l) = go [] l
 | 
				
			||||||
 | 
					  where
 | 
				
			||||||
 | 
						go _ [] = Nothing
 | 
				
			||||||
 | 
						go c (ActiveWorker a stage : rest)
 | 
				
			||||||
 | 
							| asyncThreadId a == tid = Just ((a, stage), WorkerPool (c++rest))
 | 
				
			||||||
 | 
						go c (v : rest) = go (v:c) rest
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -17,8 +17,7 @@ are still some things that could be improved, tracked here:
 | 
				
			||||||
  can still end up stuck doing checksum verification at the same time, 
 | 
					  can still end up stuck doing checksum verification at the same time, 
 | 
				
			||||||
  so the pipe to the remote is not saturated.
 | 
					  so the pipe to the remote is not saturated.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
  Running cleanup actions in a separate queue from the main job queue
 | 
					  Now that cleanup actions don't occupy space in the main worker queue,
 | 
				
			||||||
  wouldn't be sufficient for this, because verification is done as part
 | 
					  all that needs to be done is make checksum verification be done as the
 | 
				
			||||||
  of the same action that transfers content. That needs to somehow be
 | 
					  cleanup action. Currently, it's bundled into the same action that
 | 
				
			||||||
  refactored to a cleanup action that ingests the file, and then
 | 
					  transfers content.
 | 
				
			||||||
  the cleanup action can be run in a separate queue.
 | 
					 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue