2015-04-04 18:34:03 +00:00
|
|
|
{- Metered IO and actions
|
2013-03-28 21:03:04 +00:00
|
|
|
-
|
2021-02-09 21:03:27 +00:00
|
|
|
- Copyright 2012-2021 Joey Hess <id@joeyh.name>
|
2013-03-28 21:03:04 +00:00
|
|
|
-
|
2014-05-10 14:01:27 +00:00
|
|
|
- License: BSD-2-clause
|
2013-03-28 21:03:04 +00:00
|
|
|
-}
|
|
|
|
|
2016-12-08 20:28:07 +00:00
|
|
|
{-# LANGUAGE TypeSynonymInstances, BangPatterns #-}
|
2013-03-28 21:03:04 +00:00
|
|
|
|
2019-11-21 19:38:06 +00:00
|
|
|
module Utility.Metered (
|
|
|
|
MeterUpdate,
|
2020-12-03 17:01:28 +00:00
|
|
|
MeterState(..),
|
2019-11-21 19:38:06 +00:00
|
|
|
nullMeterUpdate,
|
|
|
|
combineMeterUpdate,
|
2020-09-29 21:53:48 +00:00
|
|
|
TotalSize(..),
|
2019-11-21 19:38:06 +00:00
|
|
|
BytesProcessed(..),
|
|
|
|
toBytesProcessed,
|
|
|
|
fromBytesProcessed,
|
|
|
|
addBytesProcessed,
|
|
|
|
zeroBytesProcessed,
|
|
|
|
withMeteredFile,
|
|
|
|
meteredWrite,
|
|
|
|
meteredWrite',
|
|
|
|
meteredWriteFile,
|
|
|
|
offsetMeterUpdate,
|
|
|
|
hGetContentsMetered,
|
|
|
|
hGetMetered,
|
|
|
|
defaultChunkSize,
|
|
|
|
watchFileSize,
|
|
|
|
OutputHandler(..),
|
|
|
|
ProgressParser,
|
|
|
|
commandMeter,
|
|
|
|
commandMeter',
|
2020-09-29 21:53:48 +00:00
|
|
|
commandMeterExitCode,
|
|
|
|
commandMeterExitCode',
|
2019-11-21 19:38:06 +00:00
|
|
|
demeterCommand,
|
|
|
|
demeterCommandEnv,
|
|
|
|
avoidProgress,
|
|
|
|
rateLimitMeterUpdate,
|
bwlimit
Added annex.bwlimit and remote.name.annex-bwlimit config that works for git
remotes and many but not all special remotes.
This nearly works, at least for a git remote on the same disk. With it set
to 100kb/1s, the meter displays an actual bandwidth of 128 kb/s, with
occasional spikes to 160 kb/s. So it needs to delay just a bit longer...
I'm unsure why.
However, at the beginning a lot of data flows before it determines the
right bandwidth limit. A granularity of less than 1s would probably improve
that.
And, I don't know yet if it makes sense to have it be 100ks/1s rather than
100kb/s. Is there a situation where the user would want a larger
granularity? Does granulatity need to be configurable at all? I only used that
format for the config really in order to reuse an existing parser.
This can't support for external special remotes, or for ones that
themselves shell out to an external command. (Well, it could, but it
would involve pausing and resuming the child process tree, which seems
very hard to implement and very strange besides.) There could also be some
built-in special remotes that it still doesn't work for, due to them not
having a progress meter whose displays blocks the bandwidth using thread.
But I don't think there are actually any that run a separate thread for
downloads than the thread that displays the progress meter.
Sponsored-by: Graham Spencer on Patreon
2021-09-21 20:58:02 +00:00
|
|
|
bwLimitMeterUpdate,
|
2019-11-21 19:38:06 +00:00
|
|
|
Meter,
|
|
|
|
mkMeter,
|
|
|
|
setMeterTotalSize,
|
|
|
|
updateMeter,
|
|
|
|
displayMeterHandle,
|
|
|
|
clearMeterHandle,
|
|
|
|
bandwidthMeter,
|
|
|
|
) where
|
2013-03-28 21:03:04 +00:00
|
|
|
|
|
|
|
import Common
|
2017-05-16 03:32:17 +00:00
|
|
|
import Utility.Percentage
|
|
|
|
import Utility.DataUnits
|
|
|
|
import Utility.HumanTime
|
2020-07-29 19:23:18 +00:00
|
|
|
import Utility.SimpleProtocol as Proto
|
bwlimit
Added annex.bwlimit and remote.name.annex-bwlimit config that works for git
remotes and many but not all special remotes.
This nearly works, at least for a git remote on the same disk. With it set
to 100kb/1s, the meter displays an actual bandwidth of 128 kb/s, with
occasional spikes to 160 kb/s. So it needs to delay just a bit longer...
I'm unsure why.
However, at the beginning a lot of data flows before it determines the
right bandwidth limit. A granularity of less than 1s would probably improve
that.
And, I don't know yet if it makes sense to have it be 100ks/1s rather than
100kb/s. Is there a situation where the user would want a larger
granularity? Does granulatity need to be configurable at all? I only used that
format for the config really in order to reuse an existing parser.
This can't support for external special remotes, or for ones that
themselves shell out to an external command. (Well, it could, but it
would involve pausing and resuming the child process tree, which seems
very hard to implement and very strange besides.) There could also be some
built-in special remotes that it still doesn't work for, due to them not
having a progress meter whose displays blocks the bandwidth using thread.
But I don't think there are actually any that run a separate thread for
downloads than the thread that displays the progress meter.
Sponsored-by: Graham Spencer on Patreon
2021-09-21 20:58:02 +00:00
|
|
|
import Utility.ThreadScheduler
|
2013-03-28 21:03:04 +00:00
|
|
|
|
|
|
|
import qualified Data.ByteString.Lazy as L
|
|
|
|
import qualified Data.ByteString as S
|
|
|
|
import System.IO.Unsafe
|
|
|
|
import Foreign.Storable (Storable(sizeOf))
|
|
|
|
import System.Posix.Types
|
2014-07-25 20:20:32 +00:00
|
|
|
import Data.Int
|
2015-11-17 00:27:01 +00:00
|
|
|
import Control.Concurrent
|
2015-04-03 20:48:30 +00:00
|
|
|
import Control.Concurrent.Async
|
2015-11-17 00:27:01 +00:00
|
|
|
import Control.Monad.IO.Class (MonadIO)
|
2016-09-08 17:17:43 +00:00
|
|
|
import Data.Time.Clock
|
|
|
|
import Data.Time.Clock.POSIX
|
2013-03-28 21:03:04 +00:00
|
|
|
|
|
|
|
{- An action that can be run repeatedly, updating it on the bytes processed.
|
|
|
|
-
|
|
|
|
- Note that each call receives the total number of bytes processed, so
|
|
|
|
- far, *not* an incremental amount since the last call. -}
|
|
|
|
type MeterUpdate = (BytesProcessed -> IO ())
|
|
|
|
|
2014-08-01 19:09:49 +00:00
|
|
|
nullMeterUpdate :: MeterUpdate
|
|
|
|
nullMeterUpdate _ = return ()
|
|
|
|
|
2015-11-16 23:32:30 +00:00
|
|
|
combineMeterUpdate :: MeterUpdate -> MeterUpdate -> MeterUpdate
|
|
|
|
combineMeterUpdate a b = \n -> a n >> b n
|
|
|
|
|
2013-03-28 21:03:04 +00:00
|
|
|
{- Total number of bytes processed so far. -}
|
|
|
|
newtype BytesProcessed = BytesProcessed Integer
|
2020-12-03 17:01:28 +00:00
|
|
|
deriving (Eq, Ord, Show, Read)
|
2013-03-28 21:03:04 +00:00
|
|
|
|
|
|
|
class AsBytesProcessed a where
|
|
|
|
toBytesProcessed :: a -> BytesProcessed
|
|
|
|
fromBytesProcessed :: BytesProcessed -> a
|
|
|
|
|
2014-07-25 20:20:32 +00:00
|
|
|
instance AsBytesProcessed BytesProcessed where
|
|
|
|
toBytesProcessed = id
|
|
|
|
fromBytesProcessed = id
|
|
|
|
|
2013-03-28 21:03:04 +00:00
|
|
|
instance AsBytesProcessed Integer where
|
|
|
|
toBytesProcessed i = BytesProcessed i
|
|
|
|
fromBytesProcessed (BytesProcessed i) = i
|
|
|
|
|
|
|
|
instance AsBytesProcessed Int where
|
|
|
|
toBytesProcessed i = BytesProcessed $ toInteger i
|
|
|
|
fromBytesProcessed (BytesProcessed i) = fromInteger i
|
|
|
|
|
2014-07-25 20:20:32 +00:00
|
|
|
instance AsBytesProcessed Int64 where
|
|
|
|
toBytesProcessed i = BytesProcessed $ toInteger i
|
|
|
|
fromBytesProcessed (BytesProcessed i) = fromInteger i
|
|
|
|
|
2013-03-28 21:03:04 +00:00
|
|
|
instance AsBytesProcessed FileOffset where
|
|
|
|
toBytesProcessed sz = BytesProcessed $ toInteger sz
|
|
|
|
fromBytesProcessed (BytesProcessed sz) = fromInteger sz
|
|
|
|
|
|
|
|
addBytesProcessed :: AsBytesProcessed v => BytesProcessed -> v -> BytesProcessed
|
|
|
|
addBytesProcessed (BytesProcessed i) v =
|
|
|
|
let (BytesProcessed n) = toBytesProcessed v
|
|
|
|
in BytesProcessed $! i + n
|
|
|
|
|
|
|
|
zeroBytesProcessed :: BytesProcessed
|
|
|
|
zeroBytesProcessed = BytesProcessed 0
|
|
|
|
|
|
|
|
{- Sends the content of a file to an action, updating the meter as it's
|
|
|
|
- consumed. -}
|
|
|
|
withMeteredFile :: FilePath -> MeterUpdate -> (L.ByteString -> IO a) -> IO a
|
|
|
|
withMeteredFile f meterupdate a = withBinaryFile f ReadMode $ \h ->
|
|
|
|
hGetContentsMetered h meterupdate >>= a
|
|
|
|
|
2021-02-09 21:03:27 +00:00
|
|
|
{- Calls the action repeatedly with chunks from the lazy ByteString.
|
|
|
|
- Updates the meter after each chunk is processed. -}
|
|
|
|
meteredWrite :: MeterUpdate -> (S.ByteString -> IO ()) -> L.ByteString -> IO ()
|
|
|
|
meteredWrite meterupdate a = void . meteredWrite' meterupdate a
|
2016-12-07 17:37:35 +00:00
|
|
|
|
2021-02-09 21:03:27 +00:00
|
|
|
meteredWrite' :: MeterUpdate -> (S.ByteString -> IO ()) -> L.ByteString -> IO BytesProcessed
|
|
|
|
meteredWrite' meterupdate a = go zeroBytesProcessed . L.toChunks
|
2013-03-28 21:03:04 +00:00
|
|
|
where
|
2016-12-07 17:37:35 +00:00
|
|
|
go sofar [] = return sofar
|
2013-03-28 21:03:04 +00:00
|
|
|
go sofar (c:cs) = do
|
2021-02-09 21:03:27 +00:00
|
|
|
a c
|
2016-12-08 20:28:07 +00:00
|
|
|
let !sofar' = addBytesProcessed sofar $ S.length c
|
2013-03-28 21:03:04 +00:00
|
|
|
meterupdate sofar'
|
|
|
|
go sofar' cs
|
|
|
|
|
|
|
|
meteredWriteFile :: MeterUpdate -> FilePath -> L.ByteString -> IO ()
|
|
|
|
meteredWriteFile meterupdate f b = withBinaryFile f WriteMode $ \h ->
|
2021-02-09 21:03:27 +00:00
|
|
|
meteredWrite meterupdate (S.hPut h) b
|
2013-03-28 21:03:04 +00:00
|
|
|
|
2014-07-25 20:20:32 +00:00
|
|
|
{- Applies an offset to a MeterUpdate. This can be useful when
|
|
|
|
- performing a sequence of actions, such as multiple meteredWriteFiles,
|
resume interrupted chunked downloads
Leverage the new chunked remotes to automatically resume downloads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also properly handle starting a download
from one remote, interrupting, and resuming from another one, and so on.
(Resuming interrupted chunked uploads is similarly doable, although
slightly more expensive.)
This commit was sponsored by Thomas Djärv.
2014-07-27 22:52:42 +00:00
|
|
|
- that all update a common meter progressively. Or when resuming.
|
2014-07-25 20:20:32 +00:00
|
|
|
-}
|
|
|
|
offsetMeterUpdate :: MeterUpdate -> BytesProcessed -> MeterUpdate
|
|
|
|
offsetMeterUpdate base offset = \n -> base (offset `addBytesProcessed` n)
|
|
|
|
|
2013-03-28 21:03:04 +00:00
|
|
|
{- This is like L.hGetContents, but after each chunk is read, a meter
|
|
|
|
- is updated based on the size of the chunk.
|
2014-11-03 22:37:05 +00:00
|
|
|
-
|
|
|
|
- All the usual caveats about using unsafeInterleaveIO apply to the
|
|
|
|
- meter updates, so use caution.
|
|
|
|
-}
|
|
|
|
hGetContentsMetered :: Handle -> MeterUpdate -> IO L.ByteString
|
2016-12-07 18:25:01 +00:00
|
|
|
hGetContentsMetered h = hGetMetered h Nothing
|
2014-11-03 22:37:05 +00:00
|
|
|
|
2016-12-07 18:25:01 +00:00
|
|
|
{- Reads from the Handle, updating the meter after each chunk is read.
|
|
|
|
-
|
|
|
|
- Stops at EOF, or when the requested number of bytes have been read.
|
|
|
|
- Closes the Handle at EOF, but otherwise leaves it open.
|
2013-03-28 21:03:04 +00:00
|
|
|
-
|
|
|
|
- Note that the meter update is run in unsafeInterleaveIO, which means that
|
|
|
|
- it can be run at any time. It's even possible for updates to run out
|
|
|
|
- of order, as different parts of the ByteString are consumed.
|
|
|
|
-}
|
2016-12-07 18:25:01 +00:00
|
|
|
hGetMetered :: Handle -> Maybe Integer -> MeterUpdate -> IO L.ByteString
|
|
|
|
hGetMetered h wantsize meterupdate = lazyRead zeroBytesProcessed
|
2013-03-28 21:03:04 +00:00
|
|
|
where
|
|
|
|
lazyRead sofar = unsafeInterleaveIO $ loop sofar
|
|
|
|
|
|
|
|
loop sofar = do
|
2016-12-07 18:25:01 +00:00
|
|
|
c <- S.hGet h (nextchunksize (fromBytesProcessed sofar))
|
2013-03-28 21:03:04 +00:00
|
|
|
if S.null c
|
|
|
|
then do
|
2020-12-01 19:39:22 +00:00
|
|
|
when (wantsize /= Just 0) $
|
|
|
|
hClose h
|
|
|
|
return L.empty
|
2013-03-28 21:03:04 +00:00
|
|
|
else do
|
2016-12-08 20:28:07 +00:00
|
|
|
let !sofar' = addBytesProcessed sofar (S.length c)
|
2013-03-28 21:03:04 +00:00
|
|
|
meterupdate sofar'
|
2014-11-03 22:37:05 +00:00
|
|
|
if keepgoing (fromBytesProcessed sofar')
|
|
|
|
then do
|
|
|
|
{- unsafeInterleaveIO causes this to be
|
|
|
|
- deferred until the data is read from the
|
|
|
|
- ByteString. -}
|
|
|
|
cs <- lazyRead sofar'
|
|
|
|
return $ L.append (L.fromChunks [c]) cs
|
|
|
|
else return $ L.fromChunks [c]
|
2016-12-07 18:25:01 +00:00
|
|
|
|
|
|
|
keepgoing n = case wantsize of
|
|
|
|
Nothing -> True
|
|
|
|
Just sz -> n < sz
|
|
|
|
|
|
|
|
nextchunksize n = case wantsize of
|
|
|
|
Nothing -> defaultChunkSize
|
|
|
|
Just sz ->
|
|
|
|
let togo = sz - n
|
|
|
|
in if togo < toInteger defaultChunkSize
|
|
|
|
then fromIntegral togo
|
|
|
|
else defaultChunkSize
|
2013-03-28 21:03:04 +00:00
|
|
|
|
|
|
|
{- Same default chunk size Lazy ByteStrings use. -}
|
|
|
|
defaultChunkSize :: Int
|
|
|
|
defaultChunkSize = 32 * k - chunkOverhead
|
|
|
|
where
|
|
|
|
k = 1024
|
2015-04-19 04:38:29 +00:00
|
|
|
chunkOverhead = 2 * sizeOf (1 :: Int) -- GHC specific
|
2014-12-17 17:21:55 +00:00
|
|
|
|
2017-05-25 18:30:18 +00:00
|
|
|
{- Runs an action, watching a file as it grows and updating the meter.
|
|
|
|
-
|
|
|
|
- The file may already exist, and the action could throw the original file
|
|
|
|
- away and start over. To avoid reporting the original file size followed
|
|
|
|
- by a smaller size in that case, wait until the file starts growing
|
|
|
|
- before updating the meter for the first time.
|
|
|
|
-}
|
2015-11-17 00:27:01 +00:00
|
|
|
watchFileSize :: (MonadIO m, MonadMask m) => FilePath -> MeterUpdate -> m a -> m a
|
|
|
|
watchFileSize f p a = bracket
|
2017-05-25 18:30:18 +00:00
|
|
|
(liftIO $ forkIO $ watcher =<< getsz)
|
2015-11-17 00:27:01 +00:00
|
|
|
(liftIO . void . tryIO . killThread)
|
|
|
|
(const a)
|
|
|
|
where
|
|
|
|
watcher oldsz = do
|
|
|
|
threadDelay 500000 -- 0.5 seconds
|
2017-05-25 18:30:18 +00:00
|
|
|
sz <- getsz
|
|
|
|
when (sz > oldsz) $
|
|
|
|
p sz
|
|
|
|
watcher sz
|
|
|
|
getsz = catchDefaultIO zeroBytesProcessed $
|
2020-11-05 15:26:34 +00:00
|
|
|
toBytesProcessed <$> getFileSize f'
|
|
|
|
f' = toRawFilePath f
|
2015-11-17 00:27:01 +00:00
|
|
|
|
2015-04-04 18:34:03 +00:00
|
|
|
data OutputHandler = OutputHandler
|
|
|
|
{ quietMode :: Bool
|
|
|
|
, stderrHandler :: String -> IO ()
|
|
|
|
}
|
|
|
|
|
2014-12-17 17:21:55 +00:00
|
|
|
{- Parses the String looking for a command's progress output, and returns
|
2020-09-29 21:53:48 +00:00
|
|
|
- Maybe the number of bytes done so far, optionally a total size,
|
|
|
|
- and any any remainder of the string that could be an incomplete
|
|
|
|
- progress output. That remainder should be prepended to future output,
|
|
|
|
- and fed back in. This interface allows the command's output to be read
|
|
|
|
- in any desired size chunk, or even one character at a time.
|
2014-12-17 17:21:55 +00:00
|
|
|
-}
|
2020-09-29 21:53:48 +00:00
|
|
|
type ProgressParser = String -> (Maybe BytesProcessed, Maybe TotalSize, String)
|
|
|
|
|
|
|
|
newtype TotalSize = TotalSize Integer
|
2020-12-11 16:03:40 +00:00
|
|
|
deriving (Show, Eq)
|
2014-12-17 17:21:55 +00:00
|
|
|
|
|
|
|
{- Runs a command and runs a ProgressParser on its output, in order
|
2015-04-03 20:48:30 +00:00
|
|
|
- to update a meter.
|
2020-09-29 21:53:48 +00:00
|
|
|
-
|
|
|
|
- If the Meter is provided, the ProgressParser can report the total size,
|
|
|
|
- which allows creating a Meter before the size is known.
|
2015-04-03 20:48:30 +00:00
|
|
|
-}
|
2020-09-29 21:53:48 +00:00
|
|
|
commandMeter :: ProgressParser -> OutputHandler -> Maybe Meter -> MeterUpdate -> FilePath -> [CommandParam] -> IO Bool
|
|
|
|
commandMeter progressparser oh meter meterupdate cmd params =
|
|
|
|
commandMeter' progressparser oh meter meterupdate cmd params id
|
|
|
|
|
|
|
|
commandMeter' :: ProgressParser -> OutputHandler -> Maybe Meter -> MeterUpdate -> FilePath -> [CommandParam] -> (CreateProcess -> CreateProcess) -> IO Bool
|
|
|
|
commandMeter' progressparser oh meter meterupdate cmd params mkprocess = do
|
|
|
|
ret <- commandMeterExitCode' progressparser oh meter meterupdate cmd params mkprocess
|
2019-08-15 18:47:22 +00:00
|
|
|
return $ case ret of
|
|
|
|
Just ExitSuccess -> True
|
|
|
|
_ -> False
|
|
|
|
|
2020-09-29 21:53:48 +00:00
|
|
|
commandMeterExitCode :: ProgressParser -> OutputHandler -> Maybe Meter -> MeterUpdate -> FilePath -> [CommandParam] -> IO (Maybe ExitCode)
|
|
|
|
commandMeterExitCode progressparser oh meter meterupdate cmd params =
|
|
|
|
commandMeterExitCode' progressparser oh meter meterupdate cmd params id
|
|
|
|
|
|
|
|
commandMeterExitCode' :: ProgressParser -> OutputHandler -> Maybe Meter -> MeterUpdate -> FilePath -> [CommandParam] -> (CreateProcess -> CreateProcess) -> IO (Maybe ExitCode)
|
|
|
|
commandMeterExitCode' progressparser oh mmeter meterupdate cmd params mkprocess =
|
|
|
|
outputFilter cmd params mkprocess Nothing
|
2020-11-17 21:31:08 +00:00
|
|
|
(const $ feedprogress mmeter zeroBytesProcessed [])
|
2015-04-07 00:18:57 +00:00
|
|
|
handlestderr
|
2014-12-17 17:21:55 +00:00
|
|
|
where
|
2020-09-29 21:53:48 +00:00
|
|
|
feedprogress sendtotalsize prev buf h = do
|
2015-02-10 16:34:34 +00:00
|
|
|
b <- S.hGetSome h 80
|
|
|
|
if S.null b
|
2015-04-07 00:18:57 +00:00
|
|
|
then return ()
|
2014-12-17 17:21:55 +00:00
|
|
|
else do
|
2015-04-04 18:34:03 +00:00
|
|
|
unless (quietMode oh) $ do
|
2015-04-03 20:48:30 +00:00
|
|
|
S.hPut stdout b
|
|
|
|
hFlush stdout
|
2019-12-18 17:26:06 +00:00
|
|
|
let s = decodeBS b
|
2020-09-29 21:53:48 +00:00
|
|
|
let (mbytes, mtotalsize, buf') = progressparser (buf++s)
|
|
|
|
sendtotalsize' <- case (sendtotalsize, mtotalsize) of
|
2020-12-11 16:03:40 +00:00
|
|
|
(Just meter, Just t) -> do
|
|
|
|
setMeterTotalSize meter t
|
2020-09-29 21:53:48 +00:00
|
|
|
return Nothing
|
|
|
|
_ -> return sendtotalsize
|
2014-12-17 17:21:55 +00:00
|
|
|
case mbytes of
|
2020-09-29 21:53:48 +00:00
|
|
|
Nothing -> feedprogress sendtotalsize' prev buf' h
|
2014-12-17 17:21:55 +00:00
|
|
|
(Just bytes) -> do
|
|
|
|
when (bytes /= prev) $
|
2015-04-04 18:34:03 +00:00
|
|
|
meterupdate bytes
|
2020-09-29 21:53:48 +00:00
|
|
|
feedprogress sendtotalsize' bytes buf' h
|
2015-04-04 18:34:03 +00:00
|
|
|
|
2020-11-18 18:54:02 +00:00
|
|
|
handlestderr ph h = hGetLineUntilExitOrEOF ph h >>= \case
|
|
|
|
Just l -> do
|
|
|
|
stderrHandler oh l
|
|
|
|
handlestderr ph h
|
|
|
|
Nothing -> return ()
|
2015-04-04 18:34:03 +00:00
|
|
|
|
|
|
|
{- Runs a command, that may display one or more progress meters on
|
|
|
|
- either stdout or stderr, and prevents the meters from being displayed.
|
|
|
|
-
|
|
|
|
- The other command output is handled as configured by the OutputHandler.
|
|
|
|
-}
|
|
|
|
demeterCommand :: OutputHandler -> FilePath -> [CommandParam] -> IO Bool
|
|
|
|
demeterCommand oh cmd params = demeterCommandEnv oh cmd params Nothing
|
|
|
|
|
|
|
|
demeterCommandEnv :: OutputHandler -> FilePath -> [CommandParam] -> Maybe [(String, String)] -> IO Bool
|
2019-08-15 18:47:22 +00:00
|
|
|
demeterCommandEnv oh cmd params environ = do
|
2020-09-29 21:53:48 +00:00
|
|
|
ret <- outputFilter cmd params id environ
|
2020-11-17 21:31:08 +00:00
|
|
|
(\ph outh -> avoidProgress True ph outh stdouthandler)
|
|
|
|
(\ph errh -> avoidProgress True ph errh $ stderrHandler oh)
|
2019-08-15 18:47:22 +00:00
|
|
|
return $ case ret of
|
|
|
|
Just ExitSuccess -> True
|
|
|
|
_ -> False
|
2015-04-04 18:34:03 +00:00
|
|
|
where
|
2015-04-07 00:18:57 +00:00
|
|
|
stdouthandler l =
|
|
|
|
unless (quietMode oh) $
|
|
|
|
putStrLn l
|
2015-04-04 18:34:03 +00:00
|
|
|
|
2015-04-04 18:53:17 +00:00
|
|
|
{- To suppress progress output, while displaying other messages,
|
|
|
|
- filter out lines that contain \r (typically used to reset to the
|
|
|
|
- beginning of the line when updating a progress display).
|
|
|
|
-}
|
2020-11-17 21:31:08 +00:00
|
|
|
avoidProgress :: Bool -> ProcessHandle -> Handle -> (String -> IO ()) -> IO ()
|
2020-11-18 18:54:02 +00:00
|
|
|
avoidProgress doavoid ph h emitter = hGetLineUntilExitOrEOF ph h >>= \case
|
|
|
|
Just s -> do
|
|
|
|
unless (doavoid && '\r' `elem` s) $
|
|
|
|
emitter s
|
|
|
|
avoidProgress doavoid ph h emitter
|
|
|
|
Nothing -> return ()
|
2015-04-07 00:18:57 +00:00
|
|
|
|
|
|
|
outputFilter
|
|
|
|
:: FilePath
|
|
|
|
-> [CommandParam]
|
2020-09-29 21:53:48 +00:00
|
|
|
-> (CreateProcess -> CreateProcess)
|
2015-04-07 00:18:57 +00:00
|
|
|
-> Maybe [(String, String)]
|
2020-11-17 21:31:08 +00:00
|
|
|
-> (ProcessHandle -> Handle -> IO ())
|
|
|
|
-> (ProcessHandle -> Handle -> IO ())
|
2019-08-15 18:47:22 +00:00
|
|
|
-> IO (Maybe ExitCode)
|
2020-09-29 21:53:48 +00:00
|
|
|
outputFilter cmd params mkprocess environ outfilter errfilter =
|
2020-06-03 17:19:28 +00:00
|
|
|
catchMaybeIO $ withCreateProcess p go
|
2015-04-07 00:18:57 +00:00
|
|
|
where
|
2020-11-17 21:31:08 +00:00
|
|
|
go _ (Just outh) (Just errh) ph = do
|
|
|
|
outt <- async $ tryIO (outfilter ph outh) >> hClose outh
|
|
|
|
errt <- async $ tryIO (errfilter ph errh) >> hClose errh
|
|
|
|
ret <- waitForProcess ph
|
|
|
|
wait outt
|
|
|
|
wait errt
|
2020-07-22 15:04:33 +00:00
|
|
|
return ret
|
2020-06-03 17:19:28 +00:00
|
|
|
go _ _ _ _ = error "internal"
|
|
|
|
|
2020-09-29 21:53:48 +00:00
|
|
|
p = mkprocess (proc cmd (toCommand params))
|
2020-06-03 17:19:28 +00:00
|
|
|
{ env = environ
|
|
|
|
, std_out = CreatePipe
|
|
|
|
, std_err = CreatePipe
|
|
|
|
}
|
2016-09-08 17:17:43 +00:00
|
|
|
|
|
|
|
-- | Limit a meter to only update once per unit of time.
|
|
|
|
--
|
|
|
|
-- It's nice to display the final update to 100%, even if it comes soon
|
2018-03-13 01:46:58 +00:00
|
|
|
-- after a previous update. To make that happen, the Meter has to know
|
|
|
|
-- its total size.
|
|
|
|
rateLimitMeterUpdate :: NominalDiffTime -> Meter -> MeterUpdate -> IO MeterUpdate
|
|
|
|
rateLimitMeterUpdate delta (Meter totalsizev _ _ _) meterupdate = do
|
2016-09-08 17:17:43 +00:00
|
|
|
lastupdate <- newMVar (toEnum 0 :: POSIXTime)
|
|
|
|
return $ mu lastupdate
|
|
|
|
where
|
2018-03-16 16:06:45 +00:00
|
|
|
mu lastupdate n@(BytesProcessed i) = readMVar totalsizev >>= \case
|
2020-12-11 16:03:40 +00:00
|
|
|
Just (TotalSize t) | i >= t -> meterupdate n
|
2016-09-08 17:17:43 +00:00
|
|
|
_ -> do
|
|
|
|
now <- getPOSIXTime
|
|
|
|
prev <- takeMVar lastupdate
|
|
|
|
if now - prev >= delta
|
|
|
|
then do
|
|
|
|
putMVar lastupdate now
|
|
|
|
meterupdate n
|
|
|
|
else putMVar lastupdate prev
|
2017-05-16 03:32:17 +00:00
|
|
|
|
bwlimit
Added annex.bwlimit and remote.name.annex-bwlimit config that works for git
remotes and many but not all special remotes.
This nearly works, at least for a git remote on the same disk. With it set
to 100kb/1s, the meter displays an actual bandwidth of 128 kb/s, with
occasional spikes to 160 kb/s. So it needs to delay just a bit longer...
I'm unsure why.
However, at the beginning a lot of data flows before it determines the
right bandwidth limit. A granularity of less than 1s would probably improve
that.
And, I don't know yet if it makes sense to have it be 100ks/1s rather than
100kb/s. Is there a situation where the user would want a larger
granularity? Does granulatity need to be configurable at all? I only used that
format for the config really in order to reuse an existing parser.
This can't support for external special remotes, or for ones that
themselves shell out to an external command. (Well, it could, but it
would involve pausing and resuming the child process tree, which seems
very hard to implement and very strange besides.) There could also be some
built-in special remotes that it still doesn't work for, due to them not
having a progress meter whose displays blocks the bandwidth using thread.
But I don't think there are actually any that run a separate thread for
downloads than the thread that displays the progress meter.
Sponsored-by: Graham Spencer on Patreon
2021-09-21 20:58:02 +00:00
|
|
|
-- | Bandwidth limiting by inserting a delay at the point that a meter is
|
|
|
|
-- updated.
|
|
|
|
--
|
|
|
|
-- This will only work when the actions that use bandwidth are run in the
|
|
|
|
-- same process and thread as the call to the MeterUpdate.
|
|
|
|
--
|
|
|
|
-- For example, if the desired bandwidth is 100kb/s, and over the past
|
2021-09-22 19:14:28 +00:00
|
|
|
-- 1/10th of a second, 30kb was sent, then the current bandwidth is
|
|
|
|
-- 300kb/s, 3x as fast as desired. So, after getting the next chunk,
|
|
|
|
-- pause for twice as long as it took to get it.
|
bwlimit
Added annex.bwlimit and remote.name.annex-bwlimit config that works for git
remotes and many but not all special remotes.
This nearly works, at least for a git remote on the same disk. With it set
to 100kb/1s, the meter displays an actual bandwidth of 128 kb/s, with
occasional spikes to 160 kb/s. So it needs to delay just a bit longer...
I'm unsure why.
However, at the beginning a lot of data flows before it determines the
right bandwidth limit. A granularity of less than 1s would probably improve
that.
And, I don't know yet if it makes sense to have it be 100ks/1s rather than
100kb/s. Is there a situation where the user would want a larger
granularity? Does granulatity need to be configurable at all? I only used that
format for the config really in order to reuse an existing parser.
This can't support for external special remotes, or for ones that
themselves shell out to an external command. (Well, it could, but it
would involve pausing and resuming the child process tree, which seems
very hard to implement and very strange besides.) There could also be some
built-in special remotes that it still doesn't work for, due to them not
having a progress meter whose displays blocks the bandwidth using thread.
But I don't think there are actually any that run a separate thread for
downloads than the thread that displays the progress meter.
Sponsored-by: Graham Spencer on Patreon
2021-09-21 20:58:02 +00:00
|
|
|
bwLimitMeterUpdate :: ByteSize -> Duration -> MeterUpdate -> IO MeterUpdate
|
2021-09-22 19:14:28 +00:00
|
|
|
bwLimitMeterUpdate bwlimit duration meterupdate
|
|
|
|
| bwlimit <= 0 = return meterupdate
|
|
|
|
| otherwise = do
|
bwlimit
Added annex.bwlimit and remote.name.annex-bwlimit config that works for git
remotes and many but not all special remotes.
This nearly works, at least for a git remote on the same disk. With it set
to 100kb/1s, the meter displays an actual bandwidth of 128 kb/s, with
occasional spikes to 160 kb/s. So it needs to delay just a bit longer...
I'm unsure why.
However, at the beginning a lot of data flows before it determines the
right bandwidth limit. A granularity of less than 1s would probably improve
that.
And, I don't know yet if it makes sense to have it be 100ks/1s rather than
100kb/s. Is there a situation where the user would want a larger
granularity? Does granulatity need to be configurable at all? I only used that
format for the config really in order to reuse an existing parser.
This can't support for external special remotes, or for ones that
themselves shell out to an external command. (Well, it could, but it
would involve pausing and resuming the child process tree, which seems
very hard to implement and very strange besides.) There could also be some
built-in special remotes that it still doesn't work for, due to them not
having a progress meter whose displays blocks the bandwidth using thread.
But I don't think there are actually any that run a separate thread for
downloads than the thread that displays the progress meter.
Sponsored-by: Graham Spencer on Patreon
2021-09-21 20:58:02 +00:00
|
|
|
nowtime <- getPOSIXTime
|
avoid potentially very long bwlimit delay at start
I first saw this getting with -J2 over ssh, but later saw it also
without the -J2. It was resuming, and the calulated unboundDelay was
many minutes. The first update of the meter jumped to some large value,
because of the resuming, and so it thought the BW was super fast.
Avoid by waiting until the second meter update.
Might be a good idea to also guard for the delay being many seconds
and avoid waiting. But how many? If BW is legitimately super fast, and a
remote happens to read more than a 32kb or so chunk at a time, it could
in theory download megabytes or gigabytes of data before the first meter
update. It would actually be appropriate then to delay for a long time,
if the desired BW was low. Could make up some numbers that are sane now,
but tech may improve.
(BTW, pleased to see bwlimit does work with -J. I had worried that
it might not, if the meter update happened in a different thread than
the downloading, but it's done in the same thread.)
Sponsored-by: Brett Eisenberg on Patreon
2021-09-22 22:38:15 +00:00
|
|
|
mv <- newMVar (nowtime, Nothing)
|
2021-09-22 19:14:28 +00:00
|
|
|
return (mu mv)
|
|
|
|
where
|
|
|
|
mu mv n@(BytesProcessed i) = do
|
|
|
|
endtime <- getPOSIXTime
|
avoid potentially very long bwlimit delay at start
I first saw this getting with -J2 over ssh, but later saw it also
without the -J2. It was resuming, and the calulated unboundDelay was
many minutes. The first update of the meter jumped to some large value,
because of the resuming, and so it thought the BW was super fast.
Avoid by waiting until the second meter update.
Might be a good idea to also guard for the delay being many seconds
and avoid waiting. But how many? If BW is legitimately super fast, and a
remote happens to read more than a 32kb or so chunk at a time, it could
in theory download megabytes or gigabytes of data before the first meter
update. It would actually be appropriate then to delay for a long time,
if the desired BW was low. Could make up some numbers that are sane now,
but tech may improve.
(BTW, pleased to see bwlimit does work with -J. I had worried that
it might not, if the meter update happened in a different thread than
the downloading, but it's done in the same thread.)
Sponsored-by: Brett Eisenberg on Patreon
2021-09-22 22:38:15 +00:00
|
|
|
(starttime, mprevi) <- takeMVar mv
|
|
|
|
|
|
|
|
case mprevi of
|
|
|
|
Just previ -> do
|
|
|
|
let runtime = endtime - starttime
|
|
|
|
let currbw = fromIntegral (i - previ) / runtime
|
|
|
|
let pausescale = if currbw > bwlimit'
|
|
|
|
then (currbw / bwlimit') - 1
|
|
|
|
else 0
|
|
|
|
unboundDelay (floor (runtime * pausescale * msecs))
|
|
|
|
Nothing -> return ()
|
|
|
|
|
bwlimit
Added annex.bwlimit and remote.name.annex-bwlimit config that works for git
remotes and many but not all special remotes.
This nearly works, at least for a git remote on the same disk. With it set
to 100kb/1s, the meter displays an actual bandwidth of 128 kb/s, with
occasional spikes to 160 kb/s. So it needs to delay just a bit longer...
I'm unsure why.
However, at the beginning a lot of data flows before it determines the
right bandwidth limit. A granularity of less than 1s would probably improve
that.
And, I don't know yet if it makes sense to have it be 100ks/1s rather than
100kb/s. Is there a situation where the user would want a larger
granularity? Does granulatity need to be configurable at all? I only used that
format for the config really in order to reuse an existing parser.
This can't support for external special remotes, or for ones that
themselves shell out to an external command. (Well, it could, but it
would involve pausing and resuming the child process tree, which seems
very hard to implement and very strange besides.) There could also be some
built-in special remotes that it still doesn't work for, due to them not
having a progress meter whose displays blocks the bandwidth using thread.
But I don't think there are actually any that run a separate thread for
downloads than the thread that displays the progress meter.
Sponsored-by: Graham Spencer on Patreon
2021-09-21 20:58:02 +00:00
|
|
|
meterupdate n
|
2021-09-22 19:14:28 +00:00
|
|
|
|
|
|
|
nowtime <- getPOSIXTime
|
avoid potentially very long bwlimit delay at start
I first saw this getting with -J2 over ssh, but later saw it also
without the -J2. It was resuming, and the calulated unboundDelay was
many minutes. The first update of the meter jumped to some large value,
because of the resuming, and so it thought the BW was super fast.
Avoid by waiting until the second meter update.
Might be a good idea to also guard for the delay being many seconds
and avoid waiting. But how many? If BW is legitimately super fast, and a
remote happens to read more than a 32kb or so chunk at a time, it could
in theory download megabytes or gigabytes of data before the first meter
update. It would actually be appropriate then to delay for a long time,
if the desired BW was low. Could make up some numbers that are sane now,
but tech may improve.
(BTW, pleased to see bwlimit does work with -J. I had worried that
it might not, if the meter update happened in a different thread than
the downloading, but it's done in the same thread.)
Sponsored-by: Brett Eisenberg on Patreon
2021-09-22 22:38:15 +00:00
|
|
|
putMVar mv (nowtime, Just i)
|
2021-09-22 19:14:28 +00:00
|
|
|
|
|
|
|
bwlimit' = fromIntegral (bwlimit * durationSeconds duration)
|
|
|
|
msecs = fromIntegral oneSecond
|
bwlimit
Added annex.bwlimit and remote.name.annex-bwlimit config that works for git
remotes and many but not all special remotes.
This nearly works, at least for a git remote on the same disk. With it set
to 100kb/1s, the meter displays an actual bandwidth of 128 kb/s, with
occasional spikes to 160 kb/s. So it needs to delay just a bit longer...
I'm unsure why.
However, at the beginning a lot of data flows before it determines the
right bandwidth limit. A granularity of less than 1s would probably improve
that.
And, I don't know yet if it makes sense to have it be 100ks/1s rather than
100kb/s. Is there a situation where the user would want a larger
granularity? Does granulatity need to be configurable at all? I only used that
format for the config really in order to reuse an existing parser.
This can't support for external special remotes, or for ones that
themselves shell out to an external command. (Well, it could, but it
would involve pausing and resuming the child process tree, which seems
very hard to implement and very strange besides.) There could also be some
built-in special remotes that it still doesn't work for, due to them not
having a progress meter whose displays blocks the bandwidth using thread.
But I don't think there are actually any that run a separate thread for
downloads than the thread that displays the progress meter.
Sponsored-by: Graham Spencer on Patreon
2021-09-21 20:58:02 +00:00
|
|
|
|
2020-12-11 16:03:40 +00:00
|
|
|
data Meter = Meter (MVar (Maybe TotalSize)) (MVar MeterState) (MVar String) DisplayMeter
|
2017-05-16 03:32:17 +00:00
|
|
|
|
2020-12-03 17:01:28 +00:00
|
|
|
data MeterState = MeterState
|
|
|
|
{ meterBytesProcessed :: BytesProcessed
|
2020-12-04 17:54:33 +00:00
|
|
|
, meterTimeStamp :: POSIXTime
|
|
|
|
} deriving (Show)
|
2017-05-16 03:32:17 +00:00
|
|
|
|
2020-12-11 16:03:40 +00:00
|
|
|
type DisplayMeter = MVar String -> Maybe TotalSize -> MeterState -> MeterState -> IO ()
|
2017-05-16 03:32:17 +00:00
|
|
|
|
2020-12-11 16:03:40 +00:00
|
|
|
type RenderMeter = Maybe TotalSize -> MeterState -> MeterState -> String
|
2017-05-16 03:32:17 +00:00
|
|
|
|
|
|
|
-- | Make a meter. Pass the total size, if it's known.
|
2020-12-11 16:03:40 +00:00
|
|
|
mkMeter :: Maybe TotalSize -> DisplayMeter -> IO Meter
|
2020-12-03 17:01:28 +00:00
|
|
|
mkMeter totalsize displaymeter = do
|
2020-12-04 17:54:33 +00:00
|
|
|
ts <- getPOSIXTime
|
2020-12-03 17:01:28 +00:00
|
|
|
Meter
|
|
|
|
<$> newMVar totalsize
|
|
|
|
<*> newMVar (MeterState zeroBytesProcessed ts)
|
|
|
|
<*> newMVar ""
|
|
|
|
<*> pure displaymeter
|
2017-05-16 03:32:17 +00:00
|
|
|
|
2020-12-11 16:03:40 +00:00
|
|
|
setMeterTotalSize :: Meter -> TotalSize -> IO ()
|
2018-03-16 16:06:45 +00:00
|
|
|
setMeterTotalSize (Meter totalsizev _ _ _) = void . swapMVar totalsizev . Just
|
2018-03-13 01:46:58 +00:00
|
|
|
|
2017-05-16 03:32:17 +00:00
|
|
|
-- | Updates the meter, displaying it if necessary.
|
2018-04-06 19:58:16 +00:00
|
|
|
updateMeter :: Meter -> MeterUpdate
|
2018-03-13 01:46:58 +00:00
|
|
|
updateMeter (Meter totalsizev sv bv displaymeter) new = do
|
2020-12-04 17:54:33 +00:00
|
|
|
now <- getPOSIXTime
|
2020-12-03 17:01:28 +00:00
|
|
|
let curms = MeterState new now
|
|
|
|
oldms <- swapMVar sv curms
|
|
|
|
when (meterBytesProcessed oldms /= new) $ do
|
2018-03-16 16:06:45 +00:00
|
|
|
totalsize <- readMVar totalsizev
|
2020-12-03 17:01:28 +00:00
|
|
|
displaymeter bv totalsize oldms curms
|
2017-05-16 03:32:17 +00:00
|
|
|
|
|
|
|
-- | Display meter to a Handle.
|
2018-03-13 01:46:58 +00:00
|
|
|
displayMeterHandle :: Handle -> RenderMeter -> DisplayMeter
|
|
|
|
displayMeterHandle h rendermeter v msize old new = do
|
2021-10-07 14:58:49 +00:00
|
|
|
olds <- takeMVar v
|
2018-03-13 01:46:58 +00:00
|
|
|
let s = rendermeter msize old new
|
2021-10-07 14:58:49 +00:00
|
|
|
let padding = replicate (length olds - length s) ' '
|
|
|
|
let s' = s <> padding
|
|
|
|
putMVar v s'
|
2017-05-16 03:32:17 +00:00
|
|
|
-- Avoid writing when the rendered meter has not changed.
|
2021-10-07 14:58:49 +00:00
|
|
|
when (olds /= s') $ do
|
|
|
|
hPutStr h ('\r':s')
|
2017-05-16 03:32:17 +00:00
|
|
|
hFlush h
|
|
|
|
|
2021-06-08 16:48:30 +00:00
|
|
|
-- | Clear meter displayed by displayMeterHandle. May be called before
|
|
|
|
-- outputting something else, followed by more calls to displayMeterHandle.
|
2017-05-16 03:32:17 +00:00
|
|
|
clearMeterHandle :: Meter -> Handle -> IO ()
|
2018-03-13 01:46:58 +00:00
|
|
|
clearMeterHandle (Meter _ _ v _) h = do
|
2017-05-16 03:32:17 +00:00
|
|
|
olds <- readMVar v
|
|
|
|
hPutStr h $ '\r' : replicate (length olds) ' ' ++ "\r"
|
|
|
|
hFlush h
|
|
|
|
|
|
|
|
-- | Display meter in the form:
|
2018-03-14 17:39:14 +00:00
|
|
|
-- 10% 1.3MiB 300 KiB/s 16m40s
|
2017-05-16 03:32:17 +00:00
|
|
|
-- or when total size is not known:
|
2018-03-14 17:39:14 +00:00
|
|
|
-- 1.3 MiB 300 KiB/s
|
2017-05-16 03:32:17 +00:00
|
|
|
bandwidthMeter :: RenderMeter
|
2020-12-03 17:01:28 +00:00
|
|
|
bandwidthMeter mtotalsize (MeterState (BytesProcessed old) before) (MeterState (BytesProcessed new) now) =
|
2017-05-16 03:32:17 +00:00
|
|
|
unwords $ catMaybes
|
2018-03-14 17:39:14 +00:00
|
|
|
[ Just percentamount
|
|
|
|
-- Pad enough for max width: "100% xxxx.xx KiB xxxx KiB/s"
|
|
|
|
, Just $ replicate (29 - length percentamount - length rate) ' '
|
2017-05-16 03:32:17 +00:00
|
|
|
, Just rate
|
|
|
|
, estimatedcompletion
|
|
|
|
]
|
|
|
|
where
|
2022-05-05 19:35:11 +00:00
|
|
|
amount = roughSize' committeeUnits True 2 new
|
2018-03-14 17:39:14 +00:00
|
|
|
percentamount = case mtotalsize of
|
2020-12-11 16:03:40 +00:00
|
|
|
Just (TotalSize totalsize) ->
|
2018-03-14 17:39:14 +00:00
|
|
|
let p = showPercentage 0 $
|
|
|
|
percentage totalsize (min new totalsize)
|
|
|
|
in p ++ replicate (6 - length p) ' ' ++ amount
|
|
|
|
Nothing -> amount
|
2022-05-05 19:35:11 +00:00
|
|
|
rate = roughSize' committeeUnits True 0 bytespersecond ++ "/s"
|
2017-05-16 03:32:17 +00:00
|
|
|
bytespersecond
|
|
|
|
| duration == 0 = fromIntegral transferred
|
|
|
|
| otherwise = floor $ fromIntegral transferred / duration
|
|
|
|
transferred = max 0 (new - old)
|
|
|
|
duration = max 0 (now - before)
|
|
|
|
estimatedcompletion = case mtotalsize of
|
2020-12-11 16:03:40 +00:00
|
|
|
Just (TotalSize totalsize)
|
2017-05-16 03:32:17 +00:00
|
|
|
| bytespersecond > 0 ->
|
|
|
|
Just $ fromDuration $ Duration $
|
2018-03-20 03:26:41 +00:00
|
|
|
(totalsize - new) `div` bytespersecond
|
2017-05-16 03:32:17 +00:00
|
|
|
_ -> Nothing
|
2020-07-29 19:23:18 +00:00
|
|
|
|
|
|
|
instance Proto.Serializable BytesProcessed where
|
|
|
|
serialize (BytesProcessed n) = show n
|
|
|
|
deserialize = BytesProcessed <$$> readish
|