make WorkerStage an open type
Rather than limiting it to PerformStage and CleanupStage, this opens it up so any number of stages can be added as needed by commands. Each concurrent command has a set of stages that it uses, and only transitions between those can block waiting for a free slot in the worker pool. Calling enteringStage for some other stage does not block, and has very little overhead. Note that while before the Annex state was duplicated on the first call to commandAction, this now happens earlier, in startConcurrency. That means that seek stage actions should that use startConcurrency and then modify Annex state won't modify the state of worker threads they then start. I audited all of them, and only Command.Seek did so; prepMerge changes the working directory and so has to come before startConcurrency. Also, the remote list is built before duplicating the state, which means that it gets built earlier now than it used to. This would only have an effect of making commands that end up not needing to perform any actions unncessary build the remote list (only when they're run with concurrency enable), but that's a minor overhead compared to commands seeking through the work tree and determining they don't need to do anything.
This commit is contained in:
parent
e19408ed9d
commit
53882ab4a7
17 changed files with 230 additions and 147 deletions
|
@ -55,18 +55,21 @@ commandActions = mapM_ commandAction
|
|||
-}
|
||||
commandAction :: CommandStart -> Annex ()
|
||||
commandAction start = Annex.getState Annex.concurrency >>= \case
|
||||
NonConcurrent -> void $ includeCommandAction start
|
||||
Concurrent n -> runconcurrent n
|
||||
ConcurrentPerCpu -> runconcurrent =<< liftIO getNumProcessors
|
||||
NonConcurrent -> runnonconcurrent
|
||||
Concurrent _ -> runconcurrent
|
||||
ConcurrentPerCpu -> runconcurrent
|
||||
where
|
||||
runconcurrent n = do
|
||||
runnonconcurrent = void $ includeCommandAction start
|
||||
runconcurrent = do
|
||||
tv <- Annex.getState Annex.workers
|
||||
workerst <- waitWorkerSlot n PerformStage tv
|
||||
liftIO (atomically (waitInitialWorkerSlot tv)) >>=
|
||||
maybe runnonconcurrent (runconcurrent' tv)
|
||||
runconcurrent' tv (workerst, workerstage) = do
|
||||
aid <- liftIO $ async $ snd <$> Annex.run workerst
|
||||
(concurrentjob workerst)
|
||||
liftIO $ atomically $ do
|
||||
pool <- takeTMVar tv
|
||||
let !pool' = addWorkerPool (ActiveWorker aid PerformStage) pool
|
||||
let !pool' = addWorkerPool (ActiveWorker aid workerstage) pool
|
||||
putTMVar tv pool'
|
||||
void $ liftIO $ forkIO $ debugLocks $ do
|
||||
-- accountCommandAction will usually catch
|
||||
|
@ -109,8 +112,7 @@ commandAction start = Annex.getState Annex.concurrency >>= \case
|
|||
performconcurrent startmsg perform = do
|
||||
showStartMessage startmsg
|
||||
perform >>= \case
|
||||
Just cleanup -> do
|
||||
changeStageTo CleanupStage
|
||||
Just cleanup -> enteringStage CleanupStage $ do
|
||||
r <- cleanup
|
||||
showEndMessage startmsg r
|
||||
return r
|
||||
|
@ -118,56 +120,6 @@ commandAction start = Annex.getState Annex.concurrency >>= \case
|
|||
showEndMessage startmsg False
|
||||
return False
|
||||
|
||||
-- | Wait until there's an idle worker in the pool, remove it from the
|
||||
-- pool, and return its state.
|
||||
--
|
||||
-- If the pool is unallocated, it will be allocated to the specified size.
|
||||
waitWorkerSlot :: Int -> WorkerStage -> TMVar (WorkerPool Annex.AnnexState) -> Annex Annex.AnnexState
|
||||
waitWorkerSlot n wantstage tv = debugLocks $
|
||||
join $ liftIO $ atomically $ waitWorkerSlot' wantstage tv >>= \case
|
||||
Nothing -> return $ do
|
||||
-- Generate the remote list now, to avoid
|
||||
-- each thread generating it, which would
|
||||
-- be more expensive and could cause
|
||||
-- threads to contend over eg, calls to
|
||||
-- setConfig.
|
||||
_ <- remoteList
|
||||
st <- dupState
|
||||
liftIO $ atomically $ do
|
||||
let (WorkerPool l) = allocateWorkerPool st (max n 1)
|
||||
let (st', pool) = findidle st [] l
|
||||
void $ swapTMVar tv pool
|
||||
return st'
|
||||
Just st -> return $ return st
|
||||
where
|
||||
findidle st _ [] = (st, WorkerPool [])
|
||||
findidle _ c ((IdleWorker st stage):rest)
|
||||
| stage == wantstage = (st, WorkerPool (c ++ rest))
|
||||
findidle st c (w:rest) = findidle st (w:c) rest
|
||||
|
||||
-- | STM action that waits until there's an idle worker in the worker pool,
|
||||
-- removes it from the pool, and returns its state.
|
||||
--
|
||||
-- If the worker pool is not already allocated, returns Nothing.
|
||||
waitWorkerSlot' :: WorkerStage -> TMVar (WorkerPool Annex.AnnexState) -> STM (Maybe (Annex.AnnexState))
|
||||
waitWorkerSlot' wantstage tv =
|
||||
takeTMVar tv >>= \case
|
||||
UnallocatedWorkerPool -> do
|
||||
putTMVar tv UnallocatedWorkerPool
|
||||
return Nothing
|
||||
WorkerPool l -> do
|
||||
(st, pool') <- waitWorkerSlot'' wantstage l
|
||||
putTMVar tv pool'
|
||||
return $ Just st
|
||||
|
||||
waitWorkerSlot'' :: WorkerStage -> [Worker Annex.AnnexState] -> STM (Annex.AnnexState, WorkerPool Annex.AnnexState)
|
||||
waitWorkerSlot'' wantstage = findidle []
|
||||
where
|
||||
findidle _ [] = retry
|
||||
findidle c ((IdleWorker st stage):rest)
|
||||
| stage == wantstage = return (st, WorkerPool (c ++ rest))
|
||||
findidle c (w:rest) = findidle (w:c) rest
|
||||
|
||||
{- Waits for all worker threads to finish and merges their AnnexStates
|
||||
- back into the current Annex's state.
|
||||
-}
|
||||
|
@ -178,37 +130,11 @@ finishCommandActions = do
|
|||
swapTMVar tv UnallocatedWorkerPool
|
||||
case pool of
|
||||
UnallocatedWorkerPool -> noop
|
||||
WorkerPool l -> forM_ (mapMaybe workerAsync l) $ \aid ->
|
||||
WorkerPool _ l -> forM_ (mapMaybe workerAsync l) $ \aid ->
|
||||
liftIO (waitCatch aid) >>= \case
|
||||
Left _ -> noop
|
||||
Right st -> mergeState st
|
||||
|
||||
{- Changes the current thread's stage in the worker pool.
|
||||
-
|
||||
- The pool needs to continue to contain the same number of worker threads
|
||||
- for each stage. So, an idle worker with the desired stage is found in
|
||||
- the pool (waiting if necessary for one to become idle), and the stages
|
||||
- of it and the current thread are swapped.
|
||||
-
|
||||
- Noop if the current thread already has the requested stage, or if the
|
||||
- current thread is not in the worker pool, or if concurrency is not
|
||||
- enabled.
|
||||
-}
|
||||
changeStageTo :: WorkerStage -> Annex ()
|
||||
changeStageTo newstage = debugLocks $ do
|
||||
mytid <- liftIO myThreadId
|
||||
tv <- Annex.getState Annex.workers
|
||||
liftIO $ atomically $ do
|
||||
pool <- takeTMVar tv
|
||||
case removeThreadIdWorkerPool mytid pool of
|
||||
Just ((myaid, oldstage), WorkerPool l)
|
||||
| oldstage /= newstage -> do
|
||||
(idlest, restpool) <- waitWorkerSlot'' newstage l
|
||||
let pool' = addWorkerPool (IdleWorker idlest oldstage) $
|
||||
addWorkerPool (ActiveWorker myaid newstage) restpool
|
||||
putTMVar tv pool'
|
||||
_ -> putTMVar tv pool
|
||||
|
||||
{- Like commandAction, but without the concurrency. -}
|
||||
includeCommandAction :: CommandStart -> CommandCleanup
|
||||
includeCommandAction start =
|
||||
|
@ -261,28 +187,35 @@ performCommandAction' startmsg perform =
|
|||
showEndMessage startmsg r
|
||||
return r
|
||||
|
||||
{- Do concurrent output when that has been requested. -}
|
||||
allowConcurrentOutput :: Annex a -> Annex a
|
||||
allowConcurrentOutput a = do
|
||||
{- Start concurrency when that has been requested.
|
||||
- Should be run wrapping the seek stage of a command.
|
||||
-
|
||||
- Note that a duplicate of the Annex state is made here, and worker
|
||||
- threads use that state. While the worker threads are not actually
|
||||
- started here, that has the same effect.
|
||||
-}
|
||||
startConcurrency :: UsedStages -> Annex a -> Annex a
|
||||
startConcurrency usedstages a = do
|
||||
fromcmdline <- Annex.getState Annex.concurrency
|
||||
fromgitcfg <- annexJobs <$> Annex.getGitConfig
|
||||
let usegitcfg = Annex.changeState $
|
||||
\c -> c { Annex.concurrency = fromgitcfg }
|
||||
case (fromcmdline, fromgitcfg) of
|
||||
(NonConcurrent, NonConcurrent) -> a
|
||||
(Concurrent n, _) -> do
|
||||
raisecapabilitiesto n
|
||||
goconcurrent
|
||||
(ConcurrentPerCpu, _) -> goconcurrent
|
||||
(Concurrent n, _) ->
|
||||
goconcurrent n
|
||||
(ConcurrentPerCpu, _) ->
|
||||
goconcurrentpercpu
|
||||
(NonConcurrent, Concurrent n) -> do
|
||||
usegitcfg
|
||||
raisecapabilitiesto n
|
||||
goconcurrent
|
||||
goconcurrent n
|
||||
(NonConcurrent, ConcurrentPerCpu) -> do
|
||||
usegitcfg
|
||||
goconcurrent
|
||||
goconcurrentpercpu
|
||||
where
|
||||
goconcurrent = do
|
||||
goconcurrent n = do
|
||||
raisecapabilitiesto n
|
||||
initworkerpool n
|
||||
withMessageState $ \s -> case outputType s of
|
||||
NormalOutput -> ifM (liftIO concurrentOutputSupported)
|
||||
( Regions.displayConsoleRegions $
|
||||
|
@ -292,6 +225,8 @@ allowConcurrentOutput a = do
|
|||
_ -> goconcurrent' False
|
||||
goconcurrent' b = bracket_ (setup b) cleanup a
|
||||
|
||||
goconcurrentpercpu = goconcurrent =<< liftIO getNumProcessors
|
||||
|
||||
setup = setconcurrentoutputenabled
|
||||
|
||||
cleanup = do
|
||||
|
@ -305,6 +240,17 @@ allowConcurrentOutput a = do
|
|||
c <- liftIO getNumCapabilities
|
||||
when (n > c) $
|
||||
liftIO $ setNumCapabilities n
|
||||
|
||||
initworkerpool n = do
|
||||
-- Generate the remote list now, to avoid each thread
|
||||
-- generating it, which would be more expensive and
|
||||
-- could cause threads to contend over eg, calls to
|
||||
-- setConfig.
|
||||
_ <- remoteList
|
||||
st <- dupState
|
||||
tv <- Annex.getState Annex.workers
|
||||
liftIO $ atomically $ putTMVar tv $
|
||||
allocateWorkerPool st (max n 1) usedstages
|
||||
|
||||
{- Ensures that only one thread processes a key at a time.
|
||||
- Other threads will block until it's done.
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue