2015-04-04 00:08:25 +00:00
|
|
|
{- git-annex progress output
|
|
|
|
-
|
bwlimit
Added annex.bwlimit and remote.name.annex-bwlimit config that works for git
remotes and many but not all special remotes.
This nearly works, at least for a git remote on the same disk. With it set
to 100kb/1s, the meter displays an actual bandwidth of 128 kb/s, with
occasional spikes to 160 kb/s. So it needs to delay just a bit longer...
I'm unsure why.
However, at the beginning a lot of data flows before it determines the
right bandwidth limit. A granularity of less than 1s would probably improve
that.
And, I don't know yet if it makes sense to have it be 100ks/1s rather than
100kb/s. Is there a situation where the user would want a larger
granularity? Does granulatity need to be configurable at all? I only used that
format for the config really in order to reuse an existing parser.
This can't support for external special remotes, or for ones that
themselves shell out to an external command. (Well, it could, but it
would involve pausing and resuming the child process tree, which seems
very hard to implement and very strange besides.) There could also be some
built-in special remotes that it still doesn't work for, due to them not
having a progress meter whose displays blocks the bandwidth using thread.
But I don't think there are actually any that run a separate thread for
downloads than the thread that displays the progress meter.
Sponsored-by: Graham Spencer on Patreon
2021-09-21 20:58:02 +00:00
|
|
|
- Copyright 2010-2021 Joey Hess <id@joeyh.name>
|
2015-04-04 00:08:25 +00:00
|
|
|
-
|
2019-03-13 19:48:14 +00:00
|
|
|
- Licensed under the GNU AGPL version 3 or higher.
|
2015-04-04 00:08:25 +00:00
|
|
|
-}
|
|
|
|
|
2019-06-25 16:30:18 +00:00
|
|
|
{-# LANGUAGE FlexibleInstances #-}
|
2019-11-26 19:27:22 +00:00
|
|
|
{-# LANGUAGE OverloadedStrings #-}
|
2019-06-25 16:30:18 +00:00
|
|
|
|
2015-04-03 20:48:30 +00:00
|
|
|
module Messages.Progress where
|
|
|
|
|
|
|
|
import Common
|
2021-06-08 16:48:30 +00:00
|
|
|
import qualified Annex
|
2015-04-03 20:48:30 +00:00
|
|
|
import Messages
|
|
|
|
import Utility.Metered
|
|
|
|
import Types
|
|
|
|
import Types.Messages
|
|
|
|
import Types.Key
|
2019-06-25 17:12:47 +00:00
|
|
|
import Types.KeySource
|
bwlimit
Added annex.bwlimit and remote.name.annex-bwlimit config that works for git
remotes and many but not all special remotes.
This nearly works, at least for a git remote on the same disk. With it set
to 100kb/1s, the meter displays an actual bandwidth of 128 kb/s, with
occasional spikes to 160 kb/s. So it needs to delay just a bit longer...
I'm unsure why.
However, at the beginning a lot of data flows before it determines the
right bandwidth limit. A granularity of less than 1s would probably improve
that.
And, I don't know yet if it makes sense to have it be 100ks/1s rather than
100kb/s. Is there a situation where the user would want a larger
granularity? Does granulatity need to be configurable at all? I only used that
format for the config really in order to reuse an existing parser.
This can't support for external special remotes, or for ones that
themselves shell out to an external command. (Well, it could, but it
would involve pausing and resuming the child process tree, which seems
very hard to implement and very strange besides.) There could also be some
built-in special remotes that it still doesn't work for, due to them not
having a progress meter whose displays blocks the bandwidth using thread.
But I don't think there are actually any that run a separate thread for
downloads than the thread that displays the progress meter.
Sponsored-by: Graham Spencer on Patreon
2021-09-21 20:58:02 +00:00
|
|
|
import Types.StallDetection (BwRate(..))
|
2019-06-25 17:12:47 +00:00
|
|
|
import Utility.InodeCache
|
2016-09-09 19:06:54 +00:00
|
|
|
import qualified Messages.JSON as JSON
|
2015-11-06 17:44:57 +00:00
|
|
|
import Messages.Concurrent
|
2020-12-03 17:01:28 +00:00
|
|
|
import Messages.Internal
|
2023-04-11 18:27:22 +00:00
|
|
|
import Utility.SafeOutput
|
2018-10-13 05:36:06 +00:00
|
|
|
|
2015-11-06 17:44:57 +00:00
|
|
|
import qualified System.Console.Regions as Regions
|
|
|
|
import qualified System.Console.Concurrent as Console
|
2020-12-04 17:07:30 +00:00
|
|
|
import Control.Monad.IO.Class (MonadIO)
|
2020-12-11 16:39:00 +00:00
|
|
|
import Data.IORef
|
2015-11-06 17:44:57 +00:00
|
|
|
|
2019-06-25 16:30:18 +00:00
|
|
|
{- Class of things from which a size can be gotten to display a progress
|
|
|
|
- meter. -}
|
|
|
|
class MeterSize t where
|
2020-12-11 16:03:40 +00:00
|
|
|
getMeterSize :: t -> Annex (Maybe TotalSize)
|
2019-06-25 16:30:18 +00:00
|
|
|
|
2019-06-25 17:12:47 +00:00
|
|
|
instance MeterSize t => MeterSize (Maybe t) where
|
|
|
|
getMeterSize Nothing = pure Nothing
|
|
|
|
getMeterSize (Just t) = getMeterSize t
|
|
|
|
|
|
|
|
instance MeterSize FileSize where
|
2020-12-11 16:03:40 +00:00
|
|
|
getMeterSize = pure . Just . TotalSize
|
2019-06-25 16:30:18 +00:00
|
|
|
|
|
|
|
instance MeterSize Key where
|
2020-12-11 16:03:40 +00:00
|
|
|
getMeterSize = pure . fmap TotalSize . fromKey keySize
|
2019-06-25 16:30:18 +00:00
|
|
|
|
2019-06-25 17:12:47 +00:00
|
|
|
instance MeterSize InodeCache where
|
2020-12-11 16:03:40 +00:00
|
|
|
getMeterSize = pure . Just . TotalSize . inodeCacheFileSize
|
2019-06-25 17:12:47 +00:00
|
|
|
|
|
|
|
instance MeterSize KeySource where
|
|
|
|
getMeterSize = maybe (pure Nothing) getMeterSize . inodeCache
|
|
|
|
|
2019-06-25 16:30:18 +00:00
|
|
|
{- When the key's size is not known, the file is statted to get the size.
|
2017-11-14 20:27:39 +00:00
|
|
|
- This allows uploads of keys without size to still have progress
|
|
|
|
- displayed.
|
2019-06-25 16:30:18 +00:00
|
|
|
-}
|
2020-11-05 15:26:34 +00:00
|
|
|
data KeySizer = KeySizer Key (Annex (Maybe RawFilePath))
|
2019-06-25 16:30:18 +00:00
|
|
|
|
|
|
|
instance MeterSize KeySizer where
|
2019-11-22 20:24:04 +00:00
|
|
|
getMeterSize (KeySizer k getsrcfile) = case fromKey keySize k of
|
2020-12-11 16:03:40 +00:00
|
|
|
Just sz -> return (Just (TotalSize sz))
|
2019-06-25 16:30:18 +00:00
|
|
|
Nothing -> do
|
|
|
|
srcfile <- getsrcfile
|
|
|
|
case srcfile of
|
|
|
|
Nothing -> return Nothing
|
2020-12-11 16:03:40 +00:00
|
|
|
Just f -> catchMaybeIO $ liftIO $
|
|
|
|
TotalSize <$> getFileSize f
|
2019-06-25 16:30:18 +00:00
|
|
|
|
|
|
|
{- Shows a progress meter while performing an action.
|
|
|
|
- The action is passed the meter and a callback to use to update the meter.
|
2021-06-08 16:48:30 +00:00
|
|
|
-}
|
2020-12-04 17:07:30 +00:00
|
|
|
metered
|
|
|
|
:: MeterSize sizer
|
|
|
|
=> Maybe MeterUpdate
|
|
|
|
-> sizer
|
bwlimit
Added annex.bwlimit and remote.name.annex-bwlimit config that works for git
remotes and many but not all special remotes.
This nearly works, at least for a git remote on the same disk. With it set
to 100kb/1s, the meter displays an actual bandwidth of 128 kb/s, with
occasional spikes to 160 kb/s. So it needs to delay just a bit longer...
I'm unsure why.
However, at the beginning a lot of data flows before it determines the
right bandwidth limit. A granularity of less than 1s would probably improve
that.
And, I don't know yet if it makes sense to have it be 100ks/1s rather than
100kb/s. Is there a situation where the user would want a larger
granularity? Does granulatity need to be configurable at all? I only used that
format for the config really in order to reuse an existing parser.
This can't support for external special remotes, or for ones that
themselves shell out to an external command. (Well, it could, but it
would involve pausing and resuming the child process tree, which seems
very hard to implement and very strange besides.) There could also be some
built-in special remotes that it still doesn't work for, due to them not
having a progress meter whose displays blocks the bandwidth using thread.
But I don't think there are actually any that run a separate thread for
downloads than the thread that displays the progress meter.
Sponsored-by: Graham Spencer on Patreon
2021-09-21 20:58:02 +00:00
|
|
|
-> Maybe BwRate
|
2020-12-04 17:07:30 +00:00
|
|
|
-> (Meter -> MeterUpdate -> Annex a)
|
|
|
|
-> Annex a
|
bwlimit
Added annex.bwlimit and remote.name.annex-bwlimit config that works for git
remotes and many but not all special remotes.
This nearly works, at least for a git remote on the same disk. With it set
to 100kb/1s, the meter displays an actual bandwidth of 128 kb/s, with
occasional spikes to 160 kb/s. So it needs to delay just a bit longer...
I'm unsure why.
However, at the beginning a lot of data flows before it determines the
right bandwidth limit. A granularity of less than 1s would probably improve
that.
And, I don't know yet if it makes sense to have it be 100ks/1s rather than
100kb/s. Is there a situation where the user would want a larger
granularity? Does granulatity need to be configurable at all? I only used that
format for the config really in order to reuse an existing parser.
This can't support for external special remotes, or for ones that
themselves shell out to an external command. (Well, it could, but it
would involve pausing and resuming the child process tree, which seems
very hard to implement and very strange besides.) There could also be some
built-in special remotes that it still doesn't work for, due to them not
having a progress meter whose displays blocks the bandwidth using thread.
But I don't think there are actually any that run a separate thread for
downloads than the thread that displays the progress meter.
Sponsored-by: Graham Spencer on Patreon
2021-09-21 20:58:02 +00:00
|
|
|
metered othermeterupdate sizer bwlimit a = withMessageState $ \st -> do
|
2020-12-04 17:07:30 +00:00
|
|
|
sz <- getMeterSize sizer
|
bwlimit
Added annex.bwlimit and remote.name.annex-bwlimit config that works for git
remotes and many but not all special remotes.
This nearly works, at least for a git remote on the same disk. With it set
to 100kb/1s, the meter displays an actual bandwidth of 128 kb/s, with
occasional spikes to 160 kb/s. So it needs to delay just a bit longer...
I'm unsure why.
However, at the beginning a lot of data flows before it determines the
right bandwidth limit. A granularity of less than 1s would probably improve
that.
And, I don't know yet if it makes sense to have it be 100ks/1s rather than
100kb/s. Is there a situation where the user would want a larger
granularity? Does granulatity need to be configurable at all? I only used that
format for the config really in order to reuse an existing parser.
This can't support for external special remotes, or for ones that
themselves shell out to an external command. (Well, it could, but it
would involve pausing and resuming the child process tree, which seems
very hard to implement and very strange besides.) There could also be some
built-in special remotes that it still doesn't work for, due to them not
having a progress meter whose displays blocks the bandwidth using thread.
But I don't think there are actually any that run a separate thread for
downloads than the thread that displays the progress meter.
Sponsored-by: Graham Spencer on Patreon
2021-09-21 20:58:02 +00:00
|
|
|
metered' st setclear othermeterupdate sz bwlimit showOutput a
|
2021-06-08 16:48:30 +00:00
|
|
|
where
|
|
|
|
setclear c = Annex.changeState $ \st -> st
|
|
|
|
{ Annex.output = (Annex.output st) { clearProgressMeter = c } }
|
2020-12-04 17:07:30 +00:00
|
|
|
|
|
|
|
metered'
|
|
|
|
:: (Monad m, MonadIO m, MonadMask m)
|
|
|
|
=> MessageState
|
2021-06-08 16:48:30 +00:00
|
|
|
-> (IO () -> m ())
|
|
|
|
-- ^ This should set clearProgressMeter when progress meters
|
|
|
|
-- are being displayed; not needed when outputType is not
|
|
|
|
-- NormalOutput.
|
2020-12-04 17:07:30 +00:00
|
|
|
-> Maybe MeterUpdate
|
2020-12-11 16:03:40 +00:00
|
|
|
-> Maybe TotalSize
|
bwlimit
Added annex.bwlimit and remote.name.annex-bwlimit config that works for git
remotes and many but not all special remotes.
This nearly works, at least for a git remote on the same disk. With it set
to 100kb/1s, the meter displays an actual bandwidth of 128 kb/s, with
occasional spikes to 160 kb/s. So it needs to delay just a bit longer...
I'm unsure why.
However, at the beginning a lot of data flows before it determines the
right bandwidth limit. A granularity of less than 1s would probably improve
that.
And, I don't know yet if it makes sense to have it be 100ks/1s rather than
100kb/s. Is there a situation where the user would want a larger
granularity? Does granulatity need to be configurable at all? I only used that
format for the config really in order to reuse an existing parser.
This can't support for external special remotes, or for ones that
themselves shell out to an external command. (Well, it could, but it
would involve pausing and resuming the child process tree, which seems
very hard to implement and very strange besides.) There could also be some
built-in special remotes that it still doesn't work for, due to them not
having a progress meter whose displays blocks the bandwidth using thread.
But I don't think there are actually any that run a separate thread for
downloads than the thread that displays the progress meter.
Sponsored-by: Graham Spencer on Patreon
2021-09-21 20:58:02 +00:00
|
|
|
-> Maybe BwRate
|
2020-12-04 17:07:30 +00:00
|
|
|
-> m ()
|
|
|
|
-- ^ this should run showOutput
|
|
|
|
-> (Meter -> MeterUpdate -> m a)
|
|
|
|
-> m a
|
bwlimit
Added annex.bwlimit and remote.name.annex-bwlimit config that works for git
remotes and many but not all special remotes.
This nearly works, at least for a git remote on the same disk. With it set
to 100kb/1s, the meter displays an actual bandwidth of 128 kb/s, with
occasional spikes to 160 kb/s. So it needs to delay just a bit longer...
I'm unsure why.
However, at the beginning a lot of data flows before it determines the
right bandwidth limit. A granularity of less than 1s would probably improve
that.
And, I don't know yet if it makes sense to have it be 100ks/1s rather than
100kb/s. Is there a situation where the user would want a larger
granularity? Does granulatity need to be configurable at all? I only used that
format for the config really in order to reuse an existing parser.
This can't support for external special remotes, or for ones that
themselves shell out to an external command. (Well, it could, but it
would involve pausing and resuming the child process tree, which seems
very hard to implement and very strange besides.) There could also be some
built-in special remotes that it still doesn't work for, due to them not
having a progress meter whose displays blocks the bandwidth using thread.
But I don't think there are actually any that run a separate thread for
downloads than the thread that displays the progress meter.
Sponsored-by: Graham Spencer on Patreon
2021-09-21 20:58:02 +00:00
|
|
|
metered' st setclear othermeterupdate msize bwlimit showoutput a = go st
|
2015-04-03 20:48:30 +00:00
|
|
|
where
|
2020-12-04 17:50:03 +00:00
|
|
|
go (MessageState { outputType = QuietOutput }) = nometer
|
|
|
|
go (MessageState { outputType = NormalOutput, concurrentOutputEnabled = False }) = do
|
2020-12-04 17:07:30 +00:00
|
|
|
showoutput
|
2018-03-13 01:46:58 +00:00
|
|
|
meter <- liftIO $ mkMeter msize $
|
|
|
|
displayMeterHandle stdout bandwidthMeter
|
2021-06-08 16:48:30 +00:00
|
|
|
let clear = clearMeterHandle meter stdout
|
|
|
|
setclear clear
|
2020-12-03 17:01:28 +00:00
|
|
|
m <- liftIO $ rateLimitMeterUpdate consoleratelimit meter $
|
2017-05-16 03:32:17 +00:00
|
|
|
updateMeter meter
|
bwlimit
Added annex.bwlimit and remote.name.annex-bwlimit config that works for git
remotes and many but not all special remotes.
This nearly works, at least for a git remote on the same disk. With it set
to 100kb/1s, the meter displays an actual bandwidth of 128 kb/s, with
occasional spikes to 160 kb/s. So it needs to delay just a bit longer...
I'm unsure why.
However, at the beginning a lot of data flows before it determines the
right bandwidth limit. A granularity of less than 1s would probably improve
that.
And, I don't know yet if it makes sense to have it be 100ks/1s rather than
100kb/s. Is there a situation where the user would want a larger
granularity? Does granulatity need to be configurable at all? I only used that
format for the config really in order to reuse an existing parser.
This can't support for external special remotes, or for ones that
themselves shell out to an external command. (Well, it could, but it
would involve pausing and resuming the child process tree, which seems
very hard to implement and very strange besides.) There could also be some
built-in special remotes that it still doesn't work for, due to them not
having a progress meter whose displays blocks the bandwidth using thread.
But I don't think there are actually any that run a separate thread for
downloads than the thread that displays the progress meter.
Sponsored-by: Graham Spencer on Patreon
2021-09-21 20:58:02 +00:00
|
|
|
r <- a meter =<< mkmeterupdate m
|
2021-06-08 16:48:30 +00:00
|
|
|
setclear noop
|
|
|
|
liftIO clear
|
2015-05-12 17:54:16 +00:00
|
|
|
return r
|
2020-12-04 17:50:03 +00:00
|
|
|
go (MessageState { outputType = NormalOutput, concurrentOutputEnabled = True }) =
|
2020-12-04 17:07:30 +00:00
|
|
|
withProgressRegion st $ \r -> do
|
2018-03-13 01:46:58 +00:00
|
|
|
meter <- liftIO $ mkMeter msize $ \_ msize' old new ->
|
|
|
|
let s = bandwidthMeter msize' old new
|
|
|
|
in Regions.setConsoleRegion r ('\n' : s)
|
2020-12-03 17:01:28 +00:00
|
|
|
m <- liftIO $ rateLimitMeterUpdate consoleratelimit meter $
|
2017-05-16 03:32:17 +00:00
|
|
|
updateMeter meter
|
bwlimit
Added annex.bwlimit and remote.name.annex-bwlimit config that works for git
remotes and many but not all special remotes.
This nearly works, at least for a git remote on the same disk. With it set
to 100kb/1s, the meter displays an actual bandwidth of 128 kb/s, with
occasional spikes to 160 kb/s. So it needs to delay just a bit longer...
I'm unsure why.
However, at the beginning a lot of data flows before it determines the
right bandwidth limit. A granularity of less than 1s would probably improve
that.
And, I don't know yet if it makes sense to have it be 100ks/1s rather than
100kb/s. Is there a situation where the user would want a larger
granularity? Does granulatity need to be configurable at all? I only used that
format for the config really in order to reuse an existing parser.
This can't support for external special remotes, or for ones that
themselves shell out to an external command. (Well, it could, but it
would involve pausing and resuming the child process tree, which seems
very hard to implement and very strange besides.) There could also be some
built-in special remotes that it still doesn't work for, due to them not
having a progress meter whose displays blocks the bandwidth using thread.
But I don't think there are actually any that run a separate thread for
downloads than the thread that displays the progress meter.
Sponsored-by: Graham Spencer on Patreon
2021-09-21 20:58:02 +00:00
|
|
|
a meter =<< mkmeterupdate m
|
2020-12-04 17:50:03 +00:00
|
|
|
go (MessageState { outputType = JSONOutput jsonoptions })
|
2018-02-19 18:03:23 +00:00
|
|
|
| jsonProgress jsonoptions = do
|
2020-12-04 17:07:30 +00:00
|
|
|
let buf = jsonBuffer st
|
2020-12-03 17:01:28 +00:00
|
|
|
meter <- liftIO $ mkMeter msize $ \_ msize' _old new ->
|
|
|
|
JSON.progress buf msize' (meterBytesProcessed new)
|
|
|
|
m <- liftIO $ rateLimitMeterUpdate jsonratelimit meter $
|
2018-03-13 01:46:58 +00:00
|
|
|
updateMeter meter
|
bwlimit
Added annex.bwlimit and remote.name.annex-bwlimit config that works for git
remotes and many but not all special remotes.
This nearly works, at least for a git remote on the same disk. With it set
to 100kb/1s, the meter displays an actual bandwidth of 128 kb/s, with
occasional spikes to 160 kb/s. So it needs to delay just a bit longer...
I'm unsure why.
However, at the beginning a lot of data flows before it determines the
right bandwidth limit. A granularity of less than 1s would probably improve
that.
And, I don't know yet if it makes sense to have it be 100ks/1s rather than
100kb/s. Is there a situation where the user would want a larger
granularity? Does granulatity need to be configurable at all? I only used that
format for the config really in order to reuse an existing parser.
This can't support for external special remotes, or for ones that
themselves shell out to an external command. (Well, it could, but it
would involve pausing and resuming the child process tree, which seems
very hard to implement and very strange besides.) There could also be some
built-in special remotes that it still doesn't work for, due to them not
having a progress meter whose displays blocks the bandwidth using thread.
But I don't think there are actually any that run a separate thread for
downloads than the thread that displays the progress meter.
Sponsored-by: Graham Spencer on Patreon
2021-09-21 20:58:02 +00:00
|
|
|
a meter =<< mkmeterupdate m
|
2018-02-19 18:03:23 +00:00
|
|
|
| otherwise = nometer
|
2020-12-04 18:54:09 +00:00
|
|
|
go (MessageState { outputType = SerializedOutput h _ }) = do
|
2020-12-11 16:52:22 +00:00
|
|
|
liftIO $ outputSerialized h BeginProgressMeter
|
|
|
|
case msize of
|
|
|
|
Just sz -> liftIO $ outputSerialized h $ UpdateProgressMeterTotalSize sz
|
|
|
|
Nothing -> noop
|
2020-12-11 16:39:00 +00:00
|
|
|
szv <- liftIO $ newIORef msize
|
|
|
|
meter <- liftIO $ mkMeter msize $ \_ msize' _old new -> do
|
|
|
|
case msize' of
|
|
|
|
Just sz | msize' /= msize -> do
|
|
|
|
psz <- readIORef szv
|
|
|
|
when (msize' /= psz) $ do
|
|
|
|
writeIORef szv msize'
|
|
|
|
outputSerialized h $ UpdateProgressMeterTotalSize sz
|
|
|
|
_ -> noop
|
2020-12-04 17:50:03 +00:00
|
|
|
outputSerialized h $ UpdateProgressMeter $
|
|
|
|
meterBytesProcessed new
|
2020-12-03 17:01:28 +00:00
|
|
|
m <- liftIO $ rateLimitMeterUpdate minratelimit meter $
|
|
|
|
updateMeter meter
|
bwlimit
Added annex.bwlimit and remote.name.annex-bwlimit config that works for git
remotes and many but not all special remotes.
This nearly works, at least for a git remote on the same disk. With it set
to 100kb/1s, the meter displays an actual bandwidth of 128 kb/s, with
occasional spikes to 160 kb/s. So it needs to delay just a bit longer...
I'm unsure why.
However, at the beginning a lot of data flows before it determines the
right bandwidth limit. A granularity of less than 1s would probably improve
that.
And, I don't know yet if it makes sense to have it be 100ks/1s rather than
100kb/s. Is there a situation where the user would want a larger
granularity? Does granulatity need to be configurable at all? I only used that
format for the config really in order to reuse an existing parser.
This can't support for external special remotes, or for ones that
themselves shell out to an external command. (Well, it could, but it
would involve pausing and resuming the child process tree, which seems
very hard to implement and very strange besides.) There could also be some
built-in special remotes that it still doesn't work for, due to them not
having a progress meter whose displays blocks the bandwidth using thread.
But I don't think there are actually any that run a separate thread for
downloads than the thread that displays the progress meter.
Sponsored-by: Graham Spencer on Patreon
2021-09-21 20:58:02 +00:00
|
|
|
(a meter =<< mkmeterupdate m)
|
2020-12-04 17:50:03 +00:00
|
|
|
`finally` (liftIO $ outputSerialized h EndProgressMeter)
|
2018-03-13 01:46:58 +00:00
|
|
|
nometer = do
|
|
|
|
dummymeter <- liftIO $ mkMeter Nothing $
|
|
|
|
\_ _ _ _ -> return ()
|
bwlimit
Added annex.bwlimit and remote.name.annex-bwlimit config that works for git
remotes and many but not all special remotes.
This nearly works, at least for a git remote on the same disk. With it set
to 100kb/1s, the meter displays an actual bandwidth of 128 kb/s, with
occasional spikes to 160 kb/s. So it needs to delay just a bit longer...
I'm unsure why.
However, at the beginning a lot of data flows before it determines the
right bandwidth limit. A granularity of less than 1s would probably improve
that.
And, I don't know yet if it makes sense to have it be 100ks/1s rather than
100kb/s. Is there a situation where the user would want a larger
granularity? Does granulatity need to be configurable at all? I only used that
format for the config really in order to reuse an existing parser.
This can't support for external special remotes, or for ones that
themselves shell out to an external command. (Well, it could, but it
would involve pausing and resuming the child process tree, which seems
very hard to implement and very strange besides.) There could also be some
built-in special remotes that it still doesn't work for, due to them not
having a progress meter whose displays blocks the bandwidth using thread.
But I don't think there are actually any that run a separate thread for
downloads than the thread that displays the progress meter.
Sponsored-by: Graham Spencer on Patreon
2021-09-21 20:58:02 +00:00
|
|
|
a dummymeter =<< mkmeterupdate (const noop)
|
2015-04-03 20:48:30 +00:00
|
|
|
|
bwlimit
Added annex.bwlimit and remote.name.annex-bwlimit config that works for git
remotes and many but not all special remotes.
This nearly works, at least for a git remote on the same disk. With it set
to 100kb/1s, the meter displays an actual bandwidth of 128 kb/s, with
occasional spikes to 160 kb/s. So it needs to delay just a bit longer...
I'm unsure why.
However, at the beginning a lot of data flows before it determines the
right bandwidth limit. A granularity of less than 1s would probably improve
that.
And, I don't know yet if it makes sense to have it be 100ks/1s rather than
100kb/s. Is there a situation where the user would want a larger
granularity? Does granulatity need to be configurable at all? I only used that
format for the config really in order to reuse an existing parser.
This can't support for external special remotes, or for ones that
themselves shell out to an external command. (Well, it could, but it
would involve pausing and resuming the child process tree, which seems
very hard to implement and very strange besides.) There could also be some
built-in special remotes that it still doesn't work for, due to them not
having a progress meter whose displays blocks the bandwidth using thread.
But I don't think there are actually any that run a separate thread for
downloads than the thread that displays the progress meter.
Sponsored-by: Graham Spencer on Patreon
2021-09-21 20:58:02 +00:00
|
|
|
mkmeterupdate m =
|
|
|
|
let mu = case othermeterupdate of
|
|
|
|
Nothing -> m
|
|
|
|
Just om -> combineMeterUpdate m om
|
|
|
|
in case bwlimit of
|
|
|
|
Nothing -> return mu
|
|
|
|
Just (BwRate sz duration) -> liftIO $
|
|
|
|
bwLimitMeterUpdate sz duration mu
|
2016-09-08 17:17:43 +00:00
|
|
|
|
2020-12-03 17:01:28 +00:00
|
|
|
consoleratelimit = 0.2
|
|
|
|
|
|
|
|
jsonratelimit = 0.1
|
|
|
|
|
|
|
|
minratelimit = min consoleratelimit jsonratelimit
|
2021-06-08 16:48:30 +00:00
|
|
|
|
2018-04-07 03:09:19 +00:00
|
|
|
{- Poll file size to display meter. -}
|
2024-01-19 18:26:21 +00:00
|
|
|
meteredFile :: RawFilePath -> Maybe MeterUpdate -> Key -> (MeterUpdate -> Annex a) -> Annex a
|
2016-09-09 20:15:39 +00:00
|
|
|
meteredFile file combinemeterupdate key a =
|
bwlimit
Added annex.bwlimit and remote.name.annex-bwlimit config that works for git
remotes and many but not all special remotes.
This nearly works, at least for a git remote on the same disk. With it set
to 100kb/1s, the meter displays an actual bandwidth of 128 kb/s, with
occasional spikes to 160 kb/s. So it needs to delay just a bit longer...
I'm unsure why.
However, at the beginning a lot of data flows before it determines the
right bandwidth limit. A granularity of less than 1s would probably improve
that.
And, I don't know yet if it makes sense to have it be 100ks/1s rather than
100kb/s. Is there a situation where the user would want a larger
granularity? Does granulatity need to be configurable at all? I only used that
format for the config really in order to reuse an existing parser.
This can't support for external special remotes, or for ones that
themselves shell out to an external command. (Well, it could, but it
would involve pausing and resuming the child process tree, which seems
very hard to implement and very strange besides.) There could also be some
built-in special remotes that it still doesn't work for, due to them not
having a progress meter whose displays blocks the bandwidth using thread.
But I don't think there are actually any that run a separate thread for
downloads than the thread that displays the progress meter.
Sponsored-by: Graham Spencer on Patreon
2021-09-21 20:58:02 +00:00
|
|
|
metered combinemeterupdate key Nothing $ \_ p ->
|
2018-04-07 03:09:19 +00:00
|
|
|
watchFileSize file p a
|
2016-09-09 20:15:39 +00:00
|
|
|
|
2015-04-03 20:48:30 +00:00
|
|
|
{- Progress dots. -}
|
|
|
|
showProgressDots :: Annex ()
|
2023-04-10 21:03:41 +00:00
|
|
|
showProgressDots = outputMessage JSON.none id "."
|
2015-04-03 20:48:30 +00:00
|
|
|
|
2015-04-04 18:34:03 +00:00
|
|
|
{- Runs a command, that may output progress to either stdout or
|
|
|
|
- stderr, as well as other messages.
|
2015-04-04 00:38:56 +00:00
|
|
|
-
|
2015-04-04 18:34:03 +00:00
|
|
|
- In quiet mode, the output is suppressed, except for error messages.
|
2015-04-04 00:38:56 +00:00
|
|
|
-}
|
2015-04-04 18:34:03 +00:00
|
|
|
progressCommand :: FilePath -> [CommandParam] -> Annex Bool
|
|
|
|
progressCommand cmd params = progressCommandEnv cmd params Nothing
|
2015-04-04 00:38:56 +00:00
|
|
|
|
2015-04-04 18:34:03 +00:00
|
|
|
progressCommandEnv :: FilePath -> [CommandParam] -> Maybe [(String, String)] -> Annex Bool
|
|
|
|
progressCommandEnv cmd params environ = ifM commandProgressDisabled
|
|
|
|
( do
|
|
|
|
oh <- mkOutputHandler
|
|
|
|
liftIO $ demeterCommandEnv oh cmd params environ
|
|
|
|
, liftIO $ boolSystemEnv cmd params environ
|
|
|
|
)
|
2015-04-03 20:48:30 +00:00
|
|
|
|
2015-04-04 18:34:03 +00:00
|
|
|
mkOutputHandler :: Annex OutputHandler
|
|
|
|
mkOutputHandler = OutputHandler
|
|
|
|
<$> commandProgressDisabled
|
|
|
|
<*> mkStderrEmitter
|
2015-04-04 00:38:56 +00:00
|
|
|
|
2018-03-12 21:56:39 +00:00
|
|
|
mkOutputHandlerQuiet :: Annex OutputHandler
|
|
|
|
mkOutputHandlerQuiet = OutputHandler
|
|
|
|
<$> pure True
|
|
|
|
<*> mkStderrEmitter
|
|
|
|
|
2020-11-17 21:31:08 +00:00
|
|
|
mkStderrRelayer :: Annex (ProcessHandle -> Handle -> IO ())
|
2015-04-04 18:53:17 +00:00
|
|
|
mkStderrRelayer = do
|
|
|
|
quiet <- commandProgressDisabled
|
|
|
|
emitter <- mkStderrEmitter
|
2020-11-17 21:31:08 +00:00
|
|
|
return $ \ph h -> avoidProgress quiet ph h emitter
|
2015-04-04 18:53:17 +00:00
|
|
|
|
2015-04-03 20:48:30 +00:00
|
|
|
{- Generates an IO action that can be used to emit stderr.
|
|
|
|
-
|
|
|
|
- When a progress meter is displayed, this takes care to avoid
|
|
|
|
- messing it up with interleaved stderr from a command.
|
|
|
|
-}
|
|
|
|
mkStderrEmitter :: Annex (String -> IO ())
|
2016-09-09 16:57:42 +00:00
|
|
|
mkStderrEmitter = withMessageState go
|
2015-04-03 20:48:30 +00:00
|
|
|
where
|
2018-10-13 05:36:06 +00:00
|
|
|
go s
|
2023-04-11 18:27:22 +00:00
|
|
|
| concurrentOutputEnabled s = return (Console.errorConcurrent . safeOutput)
|
|
|
|
| otherwise = return (hPutStrLn stderr . safeOutput)
|