2010-10-27 17:12:02 +00:00
|
|
|
{- git repository command queue
|
2010-10-27 20:53:54 +00:00
|
|
|
-
|
2022-02-18 19:06:40 +00:00
|
|
|
- Copyright 2010-2022 Joey Hess <id@joeyh.name>
|
2010-10-27 20:53:54 +00:00
|
|
|
-
|
2019-03-13 19:48:14 +00:00
|
|
|
- Licensed under the GNU AGPL version 3 or higher.
|
2010-10-26 19:59:50 +00:00
|
|
|
-}
|
|
|
|
|
2013-10-17 19:56:56 +00:00
|
|
|
{-# LANGUAGE CPP, BangPatterns #-}
|
2012-02-15 15:13:13 +00:00
|
|
|
|
2011-06-30 17:25:37 +00:00
|
|
|
module Git.Queue (
|
2010-10-26 19:59:50 +00:00
|
|
|
Queue,
|
2011-12-20 18:37:53 +00:00
|
|
|
new,
|
improve git command queue flushing with time limit
So that eg, addurl of several large files that take time to download will
update the index for each file, rather than deferring the index updates to
the end.
In cases like an add of many smallish files, where a new file is being
added every few seconds. In that case, the queue will still build up a
lot of changes which are flushed at once, for best performance. Since
the default queue size is 10240, often it only gets flushed once at the
end, same as before. (Notice that updateQueue updated _lastchanged
when adding a new item to the queue without flushing it; that is
necessary to avoid it flushing the queue every 5 minutes in this case.)
But, when it takes more than a 5 minutes to add a file, the overhead of
updating the index immediately is probably small, so do it after each
file. This avoids git-annex potentially taking a very very long time
indeed to stage newly added files, which can be annoying to the user who
would like to get on with doing something with the files it's already
added, eg using git mv to rename them to a better name.
This is only likely to cause a problem if it takes say, 30 seconds to
update the index; doing an extra 30 seconds of work after every 5
minute file add would be less optimal. Normally, updating the index takes
significantly less time than that. On a SSD with 100k files it takes
less than 1 second, and the index write time is bound by disk read and
write so is not too much worse on a hard drive. So I hope this will not
impact users, although if it does turn out to, the time limit could be
made configurable.
A perhaps better way to do it would be to have a background worker
thread that wakes up every 60 seconds or so and flushes the queue.
That is made somewhat difficult because the queue can contain Annex
actions and so this would add a new source of concurrency issues.
So I'm trying to avoid that approach if possible.
Sponsored-by: Erik Bjäreholt on Patreon
2021-12-14 15:48:07 +00:00
|
|
|
defaultTimelimit,
|
2012-06-07 19:19:44 +00:00
|
|
|
addCommand,
|
|
|
|
addUpdateIndex,
|
2022-02-18 19:06:40 +00:00
|
|
|
addFlushAction,
|
|
|
|
FlushActionRunner(..),
|
2011-03-16 19:10:15 +00:00
|
|
|
size,
|
2011-04-07 17:59:31 +00:00
|
|
|
full,
|
2012-02-15 15:13:13 +00:00
|
|
|
flush,
|
2015-11-05 22:21:48 +00:00
|
|
|
merge,
|
2010-10-26 19:59:50 +00:00
|
|
|
) where
|
|
|
|
|
2012-02-01 20:05:02 +00:00
|
|
|
import Utility.SafeCommand
|
2011-12-20 18:37:53 +00:00
|
|
|
import Common
|
2011-06-30 17:25:37 +00:00
|
|
|
import Git
|
2011-12-14 19:56:11 +00:00
|
|
|
import Git.Command
|
2012-06-07 19:19:44 +00:00
|
|
|
import qualified Git.UpdateIndex
|
2013-10-17 19:56:56 +00:00
|
|
|
|
2018-04-22 17:28:31 +00:00
|
|
|
import qualified Data.Map.Strict as M
|
2019-11-12 14:44:51 +00:00
|
|
|
import Control.Monad.IO.Class
|
improve git command queue flushing with time limit
So that eg, addurl of several large files that take time to download will
update the index for each file, rather than deferring the index updates to
the end.
In cases like an add of many smallish files, where a new file is being
added every few seconds. In that case, the queue will still build up a
lot of changes which are flushed at once, for best performance. Since
the default queue size is 10240, often it only gets flushed once at the
end, same as before. (Notice that updateQueue updated _lastchanged
when adding a new item to the queue without flushing it; that is
necessary to avoid it flushing the queue every 5 minutes in this case.)
But, when it takes more than a 5 minutes to add a file, the overhead of
updating the index immediately is probably small, so do it after each
file. This avoids git-annex potentially taking a very very long time
indeed to stage newly added files, which can be annoying to the user who
would like to get on with doing something with the files it's already
added, eg using git mv to rename them to a better name.
This is only likely to cause a problem if it takes say, 30 seconds to
update the index; doing an extra 30 seconds of work after every 5
minute file add would be less optimal. Normally, updating the index takes
significantly less time than that. On a SSD with 100k files it takes
less than 1 second, and the index write time is bound by disk read and
write so is not too much worse on a hard drive. So I hope this will not
impact users, although if it does turn out to, the time limit could be
made configurable.
A perhaps better way to do it would be to have a background worker
thread that wakes up every 60 seconds or so and flushes the queue.
That is made somewhat difficult because the queue can contain Annex
actions and so this would add a new source of concurrency issues.
So I'm trying to avoid that approach if possible.
Sponsored-by: Erik Bjäreholt on Patreon
2021-12-14 15:48:07 +00:00
|
|
|
import Data.Time.Clock
|
|
|
|
import Data.Time.Clock.POSIX
|
2014-02-25 18:09:39 +00:00
|
|
|
|
2015-11-05 22:21:48 +00:00
|
|
|
{- Queable actions that can be performed in a git repository. -}
|
2019-11-12 14:44:51 +00:00
|
|
|
data Action m
|
2012-06-07 19:19:44 +00:00
|
|
|
{- Updating the index file, using a list of streamers that can
|
|
|
|
- be added to as the queue grows. -}
|
2015-11-05 22:21:48 +00:00
|
|
|
= UpdateIndexAction [Git.UpdateIndex.Streamer] -- in reverse order
|
2012-06-07 19:19:44 +00:00
|
|
|
{- A git command to run, on a list of files that can be added to
|
|
|
|
- as the queue grows. -}
|
|
|
|
| CommandAction
|
2021-01-04 16:51:55 +00:00
|
|
|
{ getCommonParams :: [CommandParam]
|
|
|
|
-- ^ parameters that come before the git subcommand
|
|
|
|
-- (in addition to the Repo's gitGlobalOpts.
|
|
|
|
, getSubcommand :: String
|
2012-06-07 19:19:44 +00:00
|
|
|
, getParams :: [CommandParam]
|
2021-01-04 16:51:55 +00:00
|
|
|
-- ^ parameters that come after the git subcommand
|
2018-08-17 17:24:52 +00:00
|
|
|
, getFiles :: [CommandParam]
|
|
|
|
}
|
2022-02-18 19:06:40 +00:00
|
|
|
{- A FlushAction can be added along with CommandActions or
|
|
|
|
- UpdateIndexActions, and when the queue later gets flushed,
|
|
|
|
- those will be run before the FlushAction is. -}
|
|
|
|
| FlushAction
|
|
|
|
{ getFlushActionRunner :: FlushActionRunner m
|
add restage log
When pointer files need to be restaged, they're first written to the
log, and then when the restage operation runs, it reads the log. This
way, if the git-annex process is interrupted before it can do the
restaging, a later git-annex process can do it.
Currently, this lets a git-annex get/drop command be interrupted and
then re-ran, and as long as it gets/drops additional files, it will
clean up after the interrupted command. But more changes are
needed to make it easier to restage after an interrupted process.
Kept using the git queue to run the restage action, even though the
list of files that it builds up for that action is not actually used by
the action. This could perhaps be simplified to make restaging a cleanup
action that gets registered, rather than using the git queue for it. But
I wasn't sure if that would cause visible behavior changes, when eg
dropping a large number of files, currently the git queue flushes
periodically, and so it restages incrementally, rather than all at the
end.
In restagePointerFiles, it reads the restage log twice, once to get
the number of files and size, and a second time to process it.
This seemed better than reading the whole file into memory, since
potentially a huge number of files could be in there. Probably the OS
will cache the file in memory and there will not be much performance
impact. It might be better to keep running tallies in another file
though. But updating that atomically with the log seems hard.
Also note that it's possible for calcRestageLog to see a different file
than streamRestageLog does. More files may be added to the log in
between. That is ok, it will only cause the filterprocessfaster heuristic to
operate with slightly out of date information, so it may make the wrong
choice for the files that got added and be a little slower than ideal.
Sponsored-by: Dartmouth College's DANDI project
2022-09-23 18:38:59 +00:00
|
|
|
, getFlushActionFiles :: [RawFilePath]
|
2018-08-16 18:28:05 +00:00
|
|
|
}
|
2010-10-26 19:59:50 +00:00
|
|
|
|
2022-02-18 19:06:40 +00:00
|
|
|
{- The String must be unique for each flush action. -}
|
add restage log
When pointer files need to be restaged, they're first written to the
log, and then when the restage operation runs, it reads the log. This
way, if the git-annex process is interrupted before it can do the
restaging, a later git-annex process can do it.
Currently, this lets a git-annex get/drop command be interrupted and
then re-ran, and as long as it gets/drops additional files, it will
clean up after the interrupted command. But more changes are
needed to make it easier to restage after an interrupted process.
Kept using the git queue to run the restage action, even though the
list of files that it builds up for that action is not actually used by
the action. This could perhaps be simplified to make restaging a cleanup
action that gets registered, rather than using the git queue for it. But
I wasn't sure if that would cause visible behavior changes, when eg
dropping a large number of files, currently the git queue flushes
periodically, and so it restages incrementally, rather than all at the
end.
In restagePointerFiles, it reads the restage log twice, once to get
the number of files and size, and a second time to process it.
This seemed better than reading the whole file into memory, since
potentially a huge number of files could be in there. Probably the OS
will cache the file in memory and there will not be much performance
impact. It might be better to keep running tallies in another file
though. But updating that atomically with the log seems hard.
Also note that it's possible for calcRestageLog to see a different file
than streamRestageLog does. More files may be added to the log in
between. That is ok, it will only cause the filterprocessfaster heuristic to
operate with slightly out of date information, so it may make the wrong
choice for the files that got added and be a little slower than ideal.
Sponsored-by: Dartmouth College's DANDI project
2022-09-23 18:38:59 +00:00
|
|
|
data FlushActionRunner m = FlushActionRunner String (Repo -> [RawFilePath] -> m ())
|
2018-08-17 17:24:52 +00:00
|
|
|
|
2022-02-18 19:06:40 +00:00
|
|
|
instance Eq (FlushActionRunner m) where
|
|
|
|
FlushActionRunner s1 _ == FlushActionRunner s2 _ = s1 == s2
|
2018-08-17 17:24:52 +00:00
|
|
|
|
2022-02-18 19:06:40 +00:00
|
|
|
{- A key that can uniquely represent an action in a Map.
|
|
|
|
-
|
|
|
|
- The ordering controls what order the actions are run in when flushing
|
|
|
|
- the queue. -}
|
2021-01-04 16:38:43 +00:00
|
|
|
data ActionKey
|
|
|
|
= UpdateIndexActionKey
|
2021-01-04 16:51:55 +00:00
|
|
|
| CommandActionKey [CommandParam] String [CommandParam]
|
2022-02-18 19:06:40 +00:00
|
|
|
| FlushActionKey String
|
2012-06-07 19:19:44 +00:00
|
|
|
deriving (Eq, Ord)
|
2010-10-26 19:59:50 +00:00
|
|
|
|
2019-11-12 14:44:51 +00:00
|
|
|
actionKey :: Action m -> ActionKey
|
2012-06-07 19:19:44 +00:00
|
|
|
actionKey (UpdateIndexAction _) = UpdateIndexActionKey
|
2021-01-04 16:51:55 +00:00
|
|
|
actionKey CommandAction { getCommonParams = c, getSubcommand = s, getParams = p } = CommandActionKey c s p
|
2022-02-18 19:06:40 +00:00
|
|
|
actionKey FlushAction { getFlushActionRunner = FlushActionRunner s _ } = FlushActionKey s
|
2012-06-05 00:41:22 +00:00
|
|
|
|
2010-10-26 19:59:50 +00:00
|
|
|
{- A queue of actions to perform (in any order) on a git repository,
|
|
|
|
- with lists of files to perform them on. This allows coalescing
|
|
|
|
- similar git commands. -}
|
2019-11-12 14:44:51 +00:00
|
|
|
data Queue m = Queue
|
2012-02-15 15:13:13 +00:00
|
|
|
{ size :: Int
|
|
|
|
, _limit :: Int
|
improve git command queue flushing with time limit
So that eg, addurl of several large files that take time to download will
update the index for each file, rather than deferring the index updates to
the end.
In cases like an add of many smallish files, where a new file is being
added every few seconds. In that case, the queue will still build up a
lot of changes which are flushed at once, for best performance. Since
the default queue size is 10240, often it only gets flushed once at the
end, same as before. (Notice that updateQueue updated _lastchanged
when adding a new item to the queue without flushing it; that is
necessary to avoid it flushing the queue every 5 minutes in this case.)
But, when it takes more than a 5 minutes to add a file, the overhead of
updating the index immediately is probably small, so do it after each
file. This avoids git-annex potentially taking a very very long time
indeed to stage newly added files, which can be annoying to the user who
would like to get on with doing something with the files it's already
added, eg using git mv to rename them to a better name.
This is only likely to cause a problem if it takes say, 30 seconds to
update the index; doing an extra 30 seconds of work after every 5
minute file add would be less optimal. Normally, updating the index takes
significantly less time than that. On a SSD with 100k files it takes
less than 1 second, and the index write time is bound by disk read and
write so is not too much worse on a hard drive. So I hope this will not
impact users, although if it does turn out to, the time limit could be
made configurable.
A perhaps better way to do it would be to have a background worker
thread that wakes up every 60 seconds or so and flushes the queue.
That is made somewhat difficult because the queue can contain Annex
actions and so this would add a new source of concurrency issues.
So I'm trying to avoid that approach if possible.
Sponsored-by: Erik Bjäreholt on Patreon
2021-12-14 15:48:07 +00:00
|
|
|
, _timelimit :: NominalDiffTime
|
|
|
|
, _lastchanged :: POSIXTime
|
2019-11-12 14:44:51 +00:00
|
|
|
, items :: M.Map ActionKey (Action m)
|
2012-02-15 15:13:13 +00:00
|
|
|
}
|
2010-10-26 19:59:50 +00:00
|
|
|
|
2011-04-07 17:59:31 +00:00
|
|
|
{- A recommended maximum size for the queue, after which it should be
|
|
|
|
- run.
|
|
|
|
-
|
|
|
|
- 10240 is semi-arbitrary. If we assume git filenames are between 10 and
|
|
|
|
- 255 characters long, then the queue will build up between 100kb and
|
|
|
|
- 2550kb long commands. The max command line length on linux is somewhere
|
|
|
|
- above 20k, so this is a fairly good balance -- the queue will buffer
|
|
|
|
- only a few megabytes of stuff and a minimal number of commands will be
|
|
|
|
- run by xargs. -}
|
2012-02-15 15:13:13 +00:00
|
|
|
defaultLimit :: Int
|
|
|
|
defaultLimit = 10240
|
2011-04-07 17:59:31 +00:00
|
|
|
|
improve git command queue flushing with time limit
So that eg, addurl of several large files that take time to download will
update the index for each file, rather than deferring the index updates to
the end.
In cases like an add of many smallish files, where a new file is being
added every few seconds. In that case, the queue will still build up a
lot of changes which are flushed at once, for best performance. Since
the default queue size is 10240, often it only gets flushed once at the
end, same as before. (Notice that updateQueue updated _lastchanged
when adding a new item to the queue without flushing it; that is
necessary to avoid it flushing the queue every 5 minutes in this case.)
But, when it takes more than a 5 minutes to add a file, the overhead of
updating the index immediately is probably small, so do it after each
file. This avoids git-annex potentially taking a very very long time
indeed to stage newly added files, which can be annoying to the user who
would like to get on with doing something with the files it's already
added, eg using git mv to rename them to a better name.
This is only likely to cause a problem if it takes say, 30 seconds to
update the index; doing an extra 30 seconds of work after every 5
minute file add would be less optimal. Normally, updating the index takes
significantly less time than that. On a SSD with 100k files it takes
less than 1 second, and the index write time is bound by disk read and
write so is not too much worse on a hard drive. So I hope this will not
impact users, although if it does turn out to, the time limit could be
made configurable.
A perhaps better way to do it would be to have a background worker
thread that wakes up every 60 seconds or so and flushes the queue.
That is made somewhat difficult because the queue can contain Annex
actions and so this would add a new source of concurrency issues.
So I'm trying to avoid that approach if possible.
Sponsored-by: Erik Bjäreholt on Patreon
2021-12-14 15:48:07 +00:00
|
|
|
{- How close together in seconds changes to the queue have to be happening
|
2021-12-14 17:53:36 +00:00
|
|
|
- in order for it to keep accumulating actions, rather than running actions
|
improve git command queue flushing with time limit
So that eg, addurl of several large files that take time to download will
update the index for each file, rather than deferring the index updates to
the end.
In cases like an add of many smallish files, where a new file is being
added every few seconds. In that case, the queue will still build up a
lot of changes which are flushed at once, for best performance. Since
the default queue size is 10240, often it only gets flushed once at the
end, same as before. (Notice that updateQueue updated _lastchanged
when adding a new item to the queue without flushing it; that is
necessary to avoid it flushing the queue every 5 minutes in this case.)
But, when it takes more than a 5 minutes to add a file, the overhead of
updating the index immediately is probably small, so do it after each
file. This avoids git-annex potentially taking a very very long time
indeed to stage newly added files, which can be annoying to the user who
would like to get on with doing something with the files it's already
added, eg using git mv to rename them to a better name.
This is only likely to cause a problem if it takes say, 30 seconds to
update the index; doing an extra 30 seconds of work after every 5
minute file add would be less optimal. Normally, updating the index takes
significantly less time than that. On a SSD with 100k files it takes
less than 1 second, and the index write time is bound by disk read and
write so is not too much worse on a hard drive. So I hope this will not
impact users, although if it does turn out to, the time limit could be
made configurable.
A perhaps better way to do it would be to have a background worker
thread that wakes up every 60 seconds or so and flushes the queue.
That is made somewhat difficult because the queue can contain Annex
actions and so this would add a new source of concurrency issues.
So I'm trying to avoid that approach if possible.
Sponsored-by: Erik Bjäreholt on Patreon
2021-12-14 15:48:07 +00:00
|
|
|
- immediately. -}
|
|
|
|
defaultTimelimit :: NominalDiffTime
|
|
|
|
defaultTimelimit = 60 * 5
|
|
|
|
|
2010-10-26 19:59:50 +00:00
|
|
|
{- Constructor for empty queue. -}
|
improve git command queue flushing with time limit
So that eg, addurl of several large files that take time to download will
update the index for each file, rather than deferring the index updates to
the end.
In cases like an add of many smallish files, where a new file is being
added every few seconds. In that case, the queue will still build up a
lot of changes which are flushed at once, for best performance. Since
the default queue size is 10240, often it only gets flushed once at the
end, same as before. (Notice that updateQueue updated _lastchanged
when adding a new item to the queue without flushing it; that is
necessary to avoid it flushing the queue every 5 minutes in this case.)
But, when it takes more than a 5 minutes to add a file, the overhead of
updating the index immediately is probably small, so do it after each
file. This avoids git-annex potentially taking a very very long time
indeed to stage newly added files, which can be annoying to the user who
would like to get on with doing something with the files it's already
added, eg using git mv to rename them to a better name.
This is only likely to cause a problem if it takes say, 30 seconds to
update the index; doing an extra 30 seconds of work after every 5
minute file add would be less optimal. Normally, updating the index takes
significantly less time than that. On a SSD with 100k files it takes
less than 1 second, and the index write time is bound by disk read and
write so is not too much worse on a hard drive. So I hope this will not
impact users, although if it does turn out to, the time limit could be
made configurable.
A perhaps better way to do it would be to have a background worker
thread that wakes up every 60 seconds or so and flushes the queue.
That is made somewhat difficult because the queue can contain Annex
actions and so this would add a new source of concurrency issues.
So I'm trying to avoid that approach if possible.
Sponsored-by: Erik Bjäreholt on Patreon
2021-12-14 15:48:07 +00:00
|
|
|
new :: Maybe Int -> Maybe NominalDiffTime -> IO (Queue m)
|
|
|
|
new lim tlim = do
|
|
|
|
now <- getPOSIXTime
|
|
|
|
return $ Queue 0
|
|
|
|
(fromMaybe defaultLimit lim)
|
|
|
|
(fromMaybe defaultTimelimit tlim)
|
|
|
|
now
|
|
|
|
M.empty
|
2010-10-26 19:59:50 +00:00
|
|
|
|
2012-06-07 19:40:44 +00:00
|
|
|
{- Adds an git command to the queue.
|
2012-06-07 19:19:44 +00:00
|
|
|
-
|
2012-06-07 19:40:44 +00:00
|
|
|
- Git commands with the same subcommand but different parameters are
|
2023-03-14 02:39:16 +00:00
|
|
|
- assumed to be equivalent enough to perform in any order with the same
|
2021-01-04 16:51:55 +00:00
|
|
|
- end result.
|
2012-06-07 19:19:44 +00:00
|
|
|
-}
|
2021-01-04 16:51:55 +00:00
|
|
|
addCommand :: MonadIO m => [CommandParam] -> String -> [CommandParam] -> [FilePath] -> Queue m -> Repo -> m (Queue m)
|
|
|
|
addCommand commonparams subcommand params files q repo =
|
2022-02-18 19:06:40 +00:00
|
|
|
updateQueue action conflicting (length files) q repo
|
2012-12-13 04:24:19 +00:00
|
|
|
where
|
|
|
|
action = CommandAction
|
2021-01-04 16:51:55 +00:00
|
|
|
{ getCommonParams = commonparams
|
|
|
|
, getSubcommand = subcommand
|
2012-12-13 04:24:19 +00:00
|
|
|
, getParams = params
|
2018-08-17 17:24:52 +00:00
|
|
|
, getFiles = map File files
|
2012-12-13 04:24:19 +00:00
|
|
|
}
|
2014-06-18 21:23:36 +00:00
|
|
|
|
2022-02-18 19:06:40 +00:00
|
|
|
conflicting (CommandAction { getSubcommand = s }) = s /= subcommand
|
|
|
|
conflicting (FlushAction {}) = False
|
|
|
|
conflicting _ = True
|
2012-06-07 19:19:44 +00:00
|
|
|
|
2022-02-18 19:06:40 +00:00
|
|
|
{- Adds an flush action to the queue. This can co-exist with anything else
|
|
|
|
- that gets added to the queue, and when the queue is eventually flushed,
|
|
|
|
- it will be run after the other things in the queue. -}
|
add restage log
When pointer files need to be restaged, they're first written to the
log, and then when the restage operation runs, it reads the log. This
way, if the git-annex process is interrupted before it can do the
restaging, a later git-annex process can do it.
Currently, this lets a git-annex get/drop command be interrupted and
then re-ran, and as long as it gets/drops additional files, it will
clean up after the interrupted command. But more changes are
needed to make it easier to restage after an interrupted process.
Kept using the git queue to run the restage action, even though the
list of files that it builds up for that action is not actually used by
the action. This could perhaps be simplified to make restaging a cleanup
action that gets registered, rather than using the git queue for it. But
I wasn't sure if that would cause visible behavior changes, when eg
dropping a large number of files, currently the git queue flushes
periodically, and so it restages incrementally, rather than all at the
end.
In restagePointerFiles, it reads the restage log twice, once to get
the number of files and size, and a second time to process it.
This seemed better than reading the whole file into memory, since
potentially a huge number of files could be in there. Probably the OS
will cache the file in memory and there will not be much performance
impact. It might be better to keep running tallies in another file
though. But updating that atomically with the log seems hard.
Also note that it's possible for calcRestageLog to see a different file
than streamRestageLog does. More files may be added to the log in
between. That is ok, it will only cause the filterprocessfaster heuristic to
operate with slightly out of date information, so it may make the wrong
choice for the files that got added and be a little slower than ideal.
Sponsored-by: Dartmouth College's DANDI project
2022-09-23 18:38:59 +00:00
|
|
|
addFlushAction :: MonadIO m => FlushActionRunner m -> [RawFilePath] -> Queue m -> Repo -> m (Queue m)
|
2022-02-18 19:06:40 +00:00
|
|
|
addFlushAction runner files q repo =
|
|
|
|
updateQueue action (const False) (length files) q repo
|
2018-08-17 17:24:52 +00:00
|
|
|
where
|
2022-02-18 19:06:40 +00:00
|
|
|
action = FlushAction
|
|
|
|
{ getFlushActionRunner = runner
|
|
|
|
, getFlushActionFiles = files
|
2018-08-17 17:24:52 +00:00
|
|
|
}
|
|
|
|
|
2012-06-10 17:56:04 +00:00
|
|
|
{- Adds an update-index streamer to the queue. -}
|
2019-11-12 14:44:51 +00:00
|
|
|
addUpdateIndex :: MonadIO m => Git.UpdateIndex.Streamer -> Queue m -> Repo -> m (Queue m)
|
2012-06-07 19:19:44 +00:00
|
|
|
addUpdateIndex streamer q repo =
|
2022-02-18 19:06:40 +00:00
|
|
|
updateQueue action conflicting 1 q repo
|
2012-12-13 04:24:19 +00:00
|
|
|
where
|
|
|
|
-- the list is built in reverse order
|
2015-11-05 22:21:48 +00:00
|
|
|
action = UpdateIndexAction [streamer]
|
2012-06-07 19:19:44 +00:00
|
|
|
|
2022-02-18 19:06:40 +00:00
|
|
|
conflicting (UpdateIndexAction _) = False
|
|
|
|
conflicting (FlushAction {}) = False
|
|
|
|
conflicting _ = True
|
2012-06-07 19:19:44 +00:00
|
|
|
|
improve git command queue flushing with time limit
So that eg, addurl of several large files that take time to download will
update the index for each file, rather than deferring the index updates to
the end.
In cases like an add of many smallish files, where a new file is being
added every few seconds. In that case, the queue will still build up a
lot of changes which are flushed at once, for best performance. Since
the default queue size is 10240, often it only gets flushed once at the
end, same as before. (Notice that updateQueue updated _lastchanged
when adding a new item to the queue without flushing it; that is
necessary to avoid it flushing the queue every 5 minutes in this case.)
But, when it takes more than a 5 minutes to add a file, the overhead of
updating the index immediately is probably small, so do it after each
file. This avoids git-annex potentially taking a very very long time
indeed to stage newly added files, which can be annoying to the user who
would like to get on with doing something with the files it's already
added, eg using git mv to rename them to a better name.
This is only likely to cause a problem if it takes say, 30 seconds to
update the index; doing an extra 30 seconds of work after every 5
minute file add would be less optimal. Normally, updating the index takes
significantly less time than that. On a SSD with 100k files it takes
less than 1 second, and the index write time is bound by disk read and
write so is not too much worse on a hard drive. So I hope this will not
impact users, although if it does turn out to, the time limit could be
made configurable.
A perhaps better way to do it would be to have a background worker
thread that wakes up every 60 seconds or so and flushes the queue.
That is made somewhat difficult because the queue can contain Annex
actions and so this would add a new source of concurrency issues.
So I'm trying to avoid that approach if possible.
Sponsored-by: Erik Bjäreholt on Patreon
2021-12-14 15:48:07 +00:00
|
|
|
{- Updates or adds an action in the queue.
|
|
|
|
-
|
2022-02-18 19:06:40 +00:00
|
|
|
- If the queue already contains a conflicting action, it will be flushed
|
improve git command queue flushing with time limit
So that eg, addurl of several large files that take time to download will
update the index for each file, rather than deferring the index updates to
the end.
In cases like an add of many smallish files, where a new file is being
added every few seconds. In that case, the queue will still build up a
lot of changes which are flushed at once, for best performance. Since
the default queue size is 10240, often it only gets flushed once at the
end, same as before. (Notice that updateQueue updated _lastchanged
when adding a new item to the queue without flushing it; that is
necessary to avoid it flushing the queue every 5 minutes in this case.)
But, when it takes more than a 5 minutes to add a file, the overhead of
updating the index immediately is probably small, so do it after each
file. This avoids git-annex potentially taking a very very long time
indeed to stage newly added files, which can be annoying to the user who
would like to get on with doing something with the files it's already
added, eg using git mv to rename them to a better name.
This is only likely to cause a problem if it takes say, 30 seconds to
update the index; doing an extra 30 seconds of work after every 5
minute file add would be less optimal. Normally, updating the index takes
significantly less time than that. On a SSD with 100k files it takes
less than 1 second, and the index write time is bound by disk read and
write so is not too much worse on a hard drive. So I hope this will not
impact users, although if it does turn out to, the time limit could be
made configurable.
A perhaps better way to do it would be to have a background worker
thread that wakes up every 60 seconds or so and flushes the queue.
That is made somewhat difficult because the queue can contain Annex
actions and so this would add a new source of concurrency issues.
So I'm trying to avoid that approach if possible.
Sponsored-by: Erik Bjäreholt on Patreon
2021-12-14 15:48:07 +00:00
|
|
|
- before adding the action; this is to ensure that conflicting actions,
|
|
|
|
- like add and rm, are run in the right order.
|
|
|
|
-
|
|
|
|
- If the queue's time limit has been exceeded, it will also be flushed,
|
|
|
|
- and the action will be run right away.
|
|
|
|
-}
|
2019-11-12 14:44:51 +00:00
|
|
|
updateQueue :: MonadIO m => Action m -> (Action m -> Bool) -> Int -> Queue m -> Repo -> m (Queue m)
|
2022-02-18 19:06:40 +00:00
|
|
|
updateQueue !action conflicting sizeincrease q repo = do
|
improve git command queue flushing with time limit
So that eg, addurl of several large files that take time to download will
update the index for each file, rather than deferring the index updates to
the end.
In cases like an add of many smallish files, where a new file is being
added every few seconds. In that case, the queue will still build up a
lot of changes which are flushed at once, for best performance. Since
the default queue size is 10240, often it only gets flushed once at the
end, same as before. (Notice that updateQueue updated _lastchanged
when adding a new item to the queue without flushing it; that is
necessary to avoid it flushing the queue every 5 minutes in this case.)
But, when it takes more than a 5 minutes to add a file, the overhead of
updating the index immediately is probably small, so do it after each
file. This avoids git-annex potentially taking a very very long time
indeed to stage newly added files, which can be annoying to the user who
would like to get on with doing something with the files it's already
added, eg using git mv to rename them to a better name.
This is only likely to cause a problem if it takes say, 30 seconds to
update the index; doing an extra 30 seconds of work after every 5
minute file add would be less optimal. Normally, updating the index takes
significantly less time than that. On a SSD with 100k files it takes
less than 1 second, and the index write time is bound by disk read and
write so is not too much worse on a hard drive. So I hope this will not
impact users, although if it does turn out to, the time limit could be
made configurable.
A perhaps better way to do it would be to have a background worker
thread that wakes up every 60 seconds or so and flushes the queue.
That is made somewhat difficult because the queue can contain Annex
actions and so this would add a new source of concurrency issues.
So I'm trying to avoid that approach if possible.
Sponsored-by: Erik Bjäreholt on Patreon
2021-12-14 15:48:07 +00:00
|
|
|
now <- liftIO getPOSIXTime
|
|
|
|
if now - (_lastchanged q) > _timelimit q
|
2022-02-18 19:06:40 +00:00
|
|
|
then if isconflicting
|
2021-12-14 17:51:00 +00:00
|
|
|
then do
|
|
|
|
q' <- flush q repo
|
|
|
|
flush (mk q') repo
|
|
|
|
else flush (mk q) repo
|
2022-02-18 19:06:40 +00:00
|
|
|
else if isconflicting
|
2021-12-14 17:51:00 +00:00
|
|
|
then mk <$> flush q repo
|
|
|
|
else return $ mk (q { _lastchanged = now })
|
2012-12-13 04:24:19 +00:00
|
|
|
where
|
2022-02-18 19:06:40 +00:00
|
|
|
isconflicting = not (null (filter conflicting (M.elems (items q))))
|
improve git command queue flushing with time limit
So that eg, addurl of several large files that take time to download will
update the index for each file, rather than deferring the index updates to
the end.
In cases like an add of many smallish files, where a new file is being
added every few seconds. In that case, the queue will still build up a
lot of changes which are flushed at once, for best performance. Since
the default queue size is 10240, often it only gets flushed once at the
end, same as before. (Notice that updateQueue updated _lastchanged
when adding a new item to the queue without flushing it; that is
necessary to avoid it flushing the queue every 5 minutes in this case.)
But, when it takes more than a 5 minutes to add a file, the overhead of
updating the index immediately is probably small, so do it after each
file. This avoids git-annex potentially taking a very very long time
indeed to stage newly added files, which can be annoying to the user who
would like to get on with doing something with the files it's already
added, eg using git mv to rename them to a better name.
This is only likely to cause a problem if it takes say, 30 seconds to
update the index; doing an extra 30 seconds of work after every 5
minute file add would be less optimal. Normally, updating the index takes
significantly less time than that. On a SSD with 100k files it takes
less than 1 second, and the index write time is bound by disk read and
write so is not too much worse on a hard drive. So I hope this will not
impact users, although if it does turn out to, the time limit could be
made configurable.
A perhaps better way to do it would be to have a background worker
thread that wakes up every 60 seconds or so and flushes the queue.
That is made somewhat difficult because the queue can contain Annex
actions and so this would add a new source of concurrency issues.
So I'm trying to avoid that approach if possible.
Sponsored-by: Erik Bjäreholt on Patreon
2021-12-14 15:48:07 +00:00
|
|
|
mk q' = newq
|
2012-12-13 04:24:19 +00:00
|
|
|
where
|
|
|
|
!newq = q'
|
|
|
|
{ size = newsize
|
|
|
|
, items = newitems
|
|
|
|
}
|
|
|
|
!newsize = size q' + sizeincrease
|
2018-04-22 17:28:31 +00:00
|
|
|
!newitems = M.insertWith combineNewOld (actionKey action) action (items q')
|
2015-11-05 22:21:48 +00:00
|
|
|
|
2018-08-17 17:19:37 +00:00
|
|
|
{- The new value comes first. It probably has a smaller list of files than
|
|
|
|
- the old value. So, the list append of the new value first is more
|
|
|
|
- efficient. -}
|
2019-11-12 14:44:51 +00:00
|
|
|
combineNewOld :: Action m -> Action m -> Action m
|
2021-01-04 16:51:55 +00:00
|
|
|
combineNewOld (CommandAction _cps1 _sc1 _ps1 fs1) (CommandAction cps2 sc2 ps2 fs2) =
|
|
|
|
CommandAction cps2 sc2 ps2 (fs1++fs2)
|
2015-11-05 22:21:48 +00:00
|
|
|
combineNewOld (UpdateIndexAction s1) (UpdateIndexAction s2) =
|
|
|
|
UpdateIndexAction (s1++s2)
|
2022-02-18 19:06:40 +00:00
|
|
|
combineNewOld (FlushAction _r1 fs1) (FlushAction r2 fs2) =
|
|
|
|
FlushAction r2 (fs1++fs2)
|
2015-11-05 22:21:48 +00:00
|
|
|
combineNewOld anew _aold = anew
|
|
|
|
|
|
|
|
{- Merges the contents of the second queue into the first.
|
|
|
|
- This should only be used when the two queues are known to contain
|
|
|
|
- non-conflicting actions. -}
|
2019-11-12 14:44:51 +00:00
|
|
|
merge :: Queue m -> Queue m -> Queue m
|
2015-11-05 22:21:48 +00:00
|
|
|
merge origq newq = origq
|
|
|
|
{ size = size origq + size newq
|
|
|
|
, items = M.unionWith combineNewOld (items newq) (items origq)
|
improve git command queue flushing with time limit
So that eg, addurl of several large files that take time to download will
update the index for each file, rather than deferring the index updates to
the end.
In cases like an add of many smallish files, where a new file is being
added every few seconds. In that case, the queue will still build up a
lot of changes which are flushed at once, for best performance. Since
the default queue size is 10240, often it only gets flushed once at the
end, same as before. (Notice that updateQueue updated _lastchanged
when adding a new item to the queue without flushing it; that is
necessary to avoid it flushing the queue every 5 minutes in this case.)
But, when it takes more than a 5 minutes to add a file, the overhead of
updating the index immediately is probably small, so do it after each
file. This avoids git-annex potentially taking a very very long time
indeed to stage newly added files, which can be annoying to the user who
would like to get on with doing something with the files it's already
added, eg using git mv to rename them to a better name.
This is only likely to cause a problem if it takes say, 30 seconds to
update the index; doing an extra 30 seconds of work after every 5
minute file add would be less optimal. Normally, updating the index takes
significantly less time than that. On a SSD with 100k files it takes
less than 1 second, and the index write time is bound by disk read and
write so is not too much worse on a hard drive. So I hope this will not
impact users, although if it does turn out to, the time limit could be
made configurable.
A perhaps better way to do it would be to have a background worker
thread that wakes up every 60 seconds or so and flushes the queue.
That is made somewhat difficult because the queue can contain Annex
actions and so this would add a new source of concurrency issues.
So I'm trying to avoid that approach if possible.
Sponsored-by: Erik Bjäreholt on Patreon
2021-12-14 15:48:07 +00:00
|
|
|
, _lastchanged = max (_lastchanged origq) (_lastchanged newq)
|
2015-11-05 22:21:48 +00:00
|
|
|
}
|
2010-10-26 19:59:50 +00:00
|
|
|
|
2011-04-07 17:59:31 +00:00
|
|
|
{- Is a queue large enough that it should be flushed? -}
|
2019-11-12 14:44:51 +00:00
|
|
|
full :: Queue m -> Bool
|
improve git command queue flushing with time limit
So that eg, addurl of several large files that take time to download will
update the index for each file, rather than deferring the index updates to
the end.
In cases like an add of many smallish files, where a new file is being
added every few seconds. In that case, the queue will still build up a
lot of changes which are flushed at once, for best performance. Since
the default queue size is 10240, often it only gets flushed once at the
end, same as before. (Notice that updateQueue updated _lastchanged
when adding a new item to the queue without flushing it; that is
necessary to avoid it flushing the queue every 5 minutes in this case.)
But, when it takes more than a 5 minutes to add a file, the overhead of
updating the index immediately is probably small, so do it after each
file. This avoids git-annex potentially taking a very very long time
indeed to stage newly added files, which can be annoying to the user who
would like to get on with doing something with the files it's already
added, eg using git mv to rename them to a better name.
This is only likely to cause a problem if it takes say, 30 seconds to
update the index; doing an extra 30 seconds of work after every 5
minute file add would be less optimal. Normally, updating the index takes
significantly less time than that. On a SSD with 100k files it takes
less than 1 second, and the index write time is bound by disk read and
write so is not too much worse on a hard drive. So I hope this will not
impact users, although if it does turn out to, the time limit could be
made configurable.
A perhaps better way to do it would be to have a background worker
thread that wakes up every 60 seconds or so and flushes the queue.
That is made somewhat difficult because the queue can contain Annex
actions and so this would add a new source of concurrency issues.
So I'm trying to avoid that approach if possible.
Sponsored-by: Erik Bjäreholt on Patreon
2021-12-14 15:48:07 +00:00
|
|
|
full (Queue cur lim _ _ _) = cur >= lim
|
2011-04-07 17:59:31 +00:00
|
|
|
|
2010-10-26 19:59:50 +00:00
|
|
|
{- Runs a queue on a git repository. -}
|
2019-11-12 14:44:51 +00:00
|
|
|
flush :: MonadIO m => Queue m -> Repo -> m (Queue m)
|
improve git command queue flushing with time limit
So that eg, addurl of several large files that take time to download will
update the index for each file, rather than deferring the index updates to
the end.
In cases like an add of many smallish files, where a new file is being
added every few seconds. In that case, the queue will still build up a
lot of changes which are flushed at once, for best performance. Since
the default queue size is 10240, often it only gets flushed once at the
end, same as before. (Notice that updateQueue updated _lastchanged
when adding a new item to the queue without flushing it; that is
necessary to avoid it flushing the queue every 5 minutes in this case.)
But, when it takes more than a 5 minutes to add a file, the overhead of
updating the index immediately is probably small, so do it after each
file. This avoids git-annex potentially taking a very very long time
indeed to stage newly added files, which can be annoying to the user who
would like to get on with doing something with the files it's already
added, eg using git mv to rename them to a better name.
This is only likely to cause a problem if it takes say, 30 seconds to
update the index; doing an extra 30 seconds of work after every 5
minute file add would be less optimal. Normally, updating the index takes
significantly less time than that. On a SSD with 100k files it takes
less than 1 second, and the index write time is bound by disk read and
write so is not too much worse on a hard drive. So I hope this will not
impact users, although if it does turn out to, the time limit could be
made configurable.
A perhaps better way to do it would be to have a background worker
thread that wakes up every 60 seconds or so and flushes the queue.
That is made somewhat difficult because the queue can contain Annex
actions and so this would add a new source of concurrency issues.
So I'm trying to avoid that approach if possible.
Sponsored-by: Erik Bjäreholt on Patreon
2021-12-14 15:48:07 +00:00
|
|
|
flush (Queue _ lim tlim _ m) repo = do
|
2012-06-07 19:19:44 +00:00
|
|
|
forM_ (M.elems m) $ runAction repo
|
improve git command queue flushing with time limit
So that eg, addurl of several large files that take time to download will
update the index for each file, rather than deferring the index updates to
the end.
In cases like an add of many smallish files, where a new file is being
added every few seconds. In that case, the queue will still build up a
lot of changes which are flushed at once, for best performance. Since
the default queue size is 10240, often it only gets flushed once at the
end, same as before. (Notice that updateQueue updated _lastchanged
when adding a new item to the queue without flushing it; that is
necessary to avoid it flushing the queue every 5 minutes in this case.)
But, when it takes more than a 5 minutes to add a file, the overhead of
updating the index immediately is probably small, so do it after each
file. This avoids git-annex potentially taking a very very long time
indeed to stage newly added files, which can be annoying to the user who
would like to get on with doing something with the files it's already
added, eg using git mv to rename them to a better name.
This is only likely to cause a problem if it takes say, 30 seconds to
update the index; doing an extra 30 seconds of work after every 5
minute file add would be less optimal. Normally, updating the index takes
significantly less time than that. On a SSD with 100k files it takes
less than 1 second, and the index write time is bound by disk read and
write so is not too much worse on a hard drive. So I hope this will not
impact users, although if it does turn out to, the time limit could be
made configurable.
A perhaps better way to do it would be to have a background worker
thread that wakes up every 60 seconds or so and flushes the queue.
That is made somewhat difficult because the queue can contain Annex
actions and so this would add a new source of concurrency issues.
So I'm trying to avoid that approach if possible.
Sponsored-by: Erik Bjäreholt on Patreon
2021-12-14 15:48:07 +00:00
|
|
|
now <- liftIO getPOSIXTime
|
|
|
|
return $ Queue 0 lim tlim now M.empty
|
2010-10-26 19:59:50 +00:00
|
|
|
|
|
|
|
{- Runs an Action on a list of files in a git repository.
|
|
|
|
-
|
2011-07-14 20:56:06 +00:00
|
|
|
- Complicated by commandline length limits.
|
|
|
|
-
|
|
|
|
- Intentionally runs the command even if the list of files is empty;
|
|
|
|
- this allows queueing commands that do not need a list of files. -}
|
2019-11-12 14:44:51 +00:00
|
|
|
runAction :: MonadIO m => Repo -> Action m -> m ()
|
2012-06-07 19:40:44 +00:00
|
|
|
runAction repo (UpdateIndexAction streamers) =
|
2012-06-13 01:13:15 +00:00
|
|
|
-- list is stored in reverse order
|
2019-11-12 14:44:51 +00:00
|
|
|
liftIO $ Git.UpdateIndex.streamUpdateIndex repo $ reverse streamers
|
|
|
|
runAction repo action@(CommandAction {}) = liftIO $ do
|
2013-10-17 19:56:56 +00:00
|
|
|
#ifndef mingw32_HOST_OS
|
2020-06-04 19:36:34 +00:00
|
|
|
let p = (proc "xargs" $ "-0":"git":toCommand gitparams)
|
|
|
|
{ env = gitEnv repo
|
|
|
|
, std_in = CreatePipe
|
|
|
|
}
|
|
|
|
withCreateProcess p (go p)
|
2013-10-17 19:56:56 +00:00
|
|
|
#else
|
2016-06-02 01:46:58 +00:00
|
|
|
-- Using xargs on Windows is problematic, so just run the command
|
2013-10-17 19:56:56 +00:00
|
|
|
-- once per file (not as efficient.)
|
|
|
|
if null (getFiles action)
|
2014-06-12 22:37:12 +00:00
|
|
|
then void $ boolSystemEnv "git" gitparams (gitEnv repo)
|
2018-08-17 17:24:52 +00:00
|
|
|
else forM_ (getFiles action) $ \f ->
|
|
|
|
void $ boolSystemEnv "git" (gitparams ++ [f]) (gitEnv repo)
|
2013-10-17 19:56:56 +00:00
|
|
|
#endif
|
2012-12-13 04:24:19 +00:00
|
|
|
where
|
2013-10-17 19:56:56 +00:00
|
|
|
gitparams = gitCommandLine
|
2021-01-04 16:51:55 +00:00
|
|
|
(getCommonParams action++Param (getSubcommand action):getParams action)
|
|
|
|
repo
|
2020-11-23 18:00:17 +00:00
|
|
|
#ifndef mingw32_HOST_OS
|
2020-06-05 20:38:11 +00:00
|
|
|
go p (Just h) _ _ pid = do
|
2020-06-04 19:36:34 +00:00
|
|
|
hPutStr h $ intercalate "\0" $ toCommand $ getFiles action
|
|
|
|
hClose h
|
|
|
|
forceSuccessProcess p pid
|
|
|
|
go _ _ _ _ _ = error "internal"
|
2020-11-23 18:00:17 +00:00
|
|
|
#endif
|
2022-02-18 19:06:40 +00:00
|
|
|
runAction repo action@(FlushAction {}) =
|
|
|
|
let FlushActionRunner _ runner = getFlushActionRunner action
|
|
|
|
in runner repo (getFlushActionFiles action)
|