Add progress bar display to the directory special remote.
So far I've only written progress bars for sending files, not yet receiving. No longer uses external cp at all. ByteString IO is fast enough.
This commit is contained in:
parent
8fc533643d
commit
9856c24a59
4 changed files with 86 additions and 35 deletions
25
Messages.hs
25
Messages.hs
|
@ -10,6 +10,8 @@ module Messages (
|
|||
showNote,
|
||||
showAction,
|
||||
showProgress,
|
||||
metered,
|
||||
MeterUpdate,
|
||||
showSideAction,
|
||||
showOutput,
|
||||
showLongNote,
|
||||
|
@ -29,9 +31,13 @@ module Messages (
|
|||
) where
|
||||
|
||||
import Text.JSON
|
||||
import Data.Progress.Meter
|
||||
import Data.Progress.Tracker
|
||||
import Data.Quantity
|
||||
|
||||
import Common
|
||||
import Types
|
||||
import Types.Key
|
||||
import qualified Annex
|
||||
import qualified Messages.JSON as JSON
|
||||
|
||||
|
@ -46,10 +52,29 @@ showNote s = handle (JSON.note s) $
|
|||
showAction :: String -> Annex ()
|
||||
showAction s = showNote $ s ++ "..."
|
||||
|
||||
{- Progress dots. -}
|
||||
showProgress :: Annex ()
|
||||
showProgress = handle q $
|
||||
flushed $ putStr "."
|
||||
|
||||
{- Shows a progress meter while performing a transfer of a key.
|
||||
- The action is passed a callback to use to update the meter. -}
|
||||
type MeterUpdate = Integer -> IO ()
|
||||
metered :: Key -> (MeterUpdate -> Annex a) -> Annex a
|
||||
metered key a = Annex.getState Annex.output >>= go (keySize key)
|
||||
where
|
||||
go (Just size) Annex.NormalOutput = do
|
||||
progress <- liftIO $ newProgress "" size
|
||||
meter <- liftIO $ newMeter progress "B" 20 (renderNums binaryOpts 1)
|
||||
showOutput
|
||||
liftIO $ displayMeter stdout meter
|
||||
r <- a $ \n -> liftIO $ do
|
||||
incrP progress n
|
||||
displayMeter stdout meter
|
||||
liftIO $ clearMeter stdout meter
|
||||
return r
|
||||
go _ _ = a (const $ return ())
|
||||
|
||||
showSideAction :: String -> Annex ()
|
||||
showSideAction s = handle q $
|
||||
putStrLn $ "(" ++ s ++ "...)"
|
||||
|
|
|
@ -8,7 +8,9 @@
|
|||
module Remote.Directory (remote) where
|
||||
|
||||
import qualified Data.ByteString.Lazy.Char8 as L
|
||||
import qualified Data.ByteString.Char8 as S
|
||||
import qualified Data.Map as M
|
||||
import Control.Exception (bracket)
|
||||
|
||||
import Common.Annex
|
||||
import Utility.CopyFile
|
||||
|
@ -129,46 +131,71 @@ withStoredFiles = withCheckedFiles doesFileExist
|
|||
store :: FilePath -> ChunkSize -> Key -> Annex Bool
|
||||
store d chunksize k = do
|
||||
src <- inRepo $ gitAnnexLocation k
|
||||
liftIO $ catchBoolIO $ storeHelper d chunksize k $ \dests ->
|
||||
case chunksize of
|
||||
Nothing -> do
|
||||
let dest = Prelude.head dests
|
||||
ok <- copyFileExternal src dest
|
||||
return $ if ok then [dest] else []
|
||||
Just _ -> storeSplit chunksize dests =<< L.readFile src
|
||||
metered k $ \meterupdate ->
|
||||
liftIO $ catchBoolIO $ storeHelper d chunksize k $ \dests ->
|
||||
case chunksize of
|
||||
Nothing -> do
|
||||
let dest = Prelude.head dests
|
||||
meteredWriteFile meterupdate dest
|
||||
=<< L.readFile src
|
||||
return [dest]
|
||||
Just _ ->
|
||||
storeSplit meterupdate chunksize dests
|
||||
=<< L.readFile src
|
||||
|
||||
storeEncrypted :: FilePath -> ChunkSize -> (Cipher, Key) -> Key -> Annex Bool
|
||||
storeEncrypted d chunksize (cipher, enck) k = do
|
||||
src <- inRepo $ gitAnnexLocation k
|
||||
liftIO $ catchBoolIO $ storeHelper d chunksize enck $ encrypt src
|
||||
where
|
||||
encrypt src dests = withEncryptedContent cipher (L.readFile src) $ \s ->
|
||||
case chunksize of
|
||||
Nothing -> do
|
||||
let dest = Prelude.head dests
|
||||
L.writeFile dest s
|
||||
return [dest]
|
||||
Just _ -> storeSplit chunksize dests s
|
||||
metered k $ \meterupdate ->
|
||||
liftIO $ catchBoolIO $ storeHelper d chunksize enck $ \dests ->
|
||||
withEncryptedContent cipher (L.readFile src) $ \s ->
|
||||
case chunksize of
|
||||
Nothing -> do
|
||||
let dest = Prelude.head dests
|
||||
meteredWriteFile meterupdate dest s
|
||||
return [dest]
|
||||
Just _ -> storeSplit meterupdate chunksize dests s
|
||||
|
||||
{- Splits a ByteString into chunks and writes to dests.
|
||||
{- Splits a ByteString into chunks and writes to dests, obeying configured
|
||||
- chunk size (not to be confused with the L.ByteString chunk size).
|
||||
- Note: Must always write at least one file, even for empty ByteString. -}
|
||||
storeSplit :: ChunkSize -> [FilePath] -> L.ByteString -> IO [FilePath]
|
||||
storeSplit Nothing _ _ = error "bad storeSplit call"
|
||||
storeSplit _ [] _ = error "bad storeSplit call"
|
||||
storeSplit (Just chunksize) alldests@(firstdest:_) s
|
||||
| L.null s = do
|
||||
storeSplit :: MeterUpdate -> ChunkSize -> [FilePath] -> L.ByteString -> IO [FilePath]
|
||||
storeSplit _ Nothing _ _ = error "bad storeSplit call"
|
||||
storeSplit _ _ [] _ = error "bad storeSplit call"
|
||||
storeSplit meterupdate (Just chunksize) alldests@(firstdest:_) b
|
||||
| L.null b = do
|
||||
-- must always write at least one file, even for empty
|
||||
L.writeFile firstdest s
|
||||
L.writeFile firstdest b
|
||||
return [firstdest]
|
||||
| otherwise = storeSplit' chunksize alldests s []
|
||||
storeSplit' :: Int64 -> [FilePath] -> L.ByteString -> [FilePath] -> IO [FilePath]
|
||||
storeSplit' _ [] _ _ = error "expected an infinite list"
|
||||
storeSplit' chunksize (d:dests) s c
|
||||
| L.null s = return $ reverse c
|
||||
| otherwise = do
|
||||
let (chunk, rest) = L.splitAt chunksize s
|
||||
L.writeFile d chunk
|
||||
storeSplit' chunksize dests rest (d:c)
|
||||
| otherwise = storeSplit' meterupdate chunksize alldests (L.toChunks b) []
|
||||
storeSplit' :: MeterUpdate -> Int64 -> [FilePath] -> [S.ByteString] -> [FilePath] -> IO [FilePath]
|
||||
storeSplit' _ _ [] _ _ = error "ran out of dests"
|
||||
storeSplit' _ _ _ [] c = return $ reverse c
|
||||
storeSplit' meterupdate chunksize (d:dests) bs c = do
|
||||
bs' <- bracket (openFile d WriteMode) hClose (feed chunksize bs)
|
||||
storeSplit' meterupdate chunksize dests bs' (d:c)
|
||||
where
|
||||
feed _ [] _ = return []
|
||||
feed sz (l:ls) h = do
|
||||
let s = fromIntegral $ S.length l
|
||||
if s <= sz
|
||||
then do
|
||||
S.hPut h l
|
||||
meterupdate $ toInteger s
|
||||
feed (sz - s) ls h
|
||||
else return (l:ls)
|
||||
|
||||
{- Write a L.ByteString to a file, updating a progress meter
|
||||
- after each chunk of the L.ByteString, typically every 64 kb or so. -}
|
||||
meteredWriteFile :: MeterUpdate -> FilePath -> L.ByteString -> IO ()
|
||||
meteredWriteFile meterupdate dest b =
|
||||
bracket (openFile dest WriteMode) hClose (feed $ L.toChunks b)
|
||||
where
|
||||
feed [] _ = return ()
|
||||
feed (l:ls) h = do
|
||||
S.hPut h l
|
||||
meterupdate $ toInteger $ S.length l
|
||||
feed ls h
|
||||
|
||||
{- Generates a list of destinations to write to in order to store a key.
|
||||
- When chunksize is specified, this list will be a list of chunks.
|
||||
|
|
|
@ -144,9 +144,7 @@ storeHelper (conn, bucket) r k file = do
|
|||
case fromJust $ M.lookup "storageclass" $ fromJust $ config r of
|
||||
"REDUCED_REDUNDANCY" -> REDUCED_REDUNDANCY
|
||||
_ -> STANDARD
|
||||
getsize = do
|
||||
s <- liftIO $ getFileStatus file
|
||||
return $ fileSize s
|
||||
getsize = fileSize <$> (liftIO $ getFileStatus file)
|
||||
|
||||
xheaders = filter isxheader $ M.assocs $ fromJust $ config r
|
||||
isxheader (h, _) = "x-amz-" `isPrefixOf` h
|
||||
|
|
1
debian/changelog
vendored
1
debian/changelog
vendored
|
@ -4,6 +4,7 @@ git-annex (3.20120230) UNRELEASED; urgency=low
|
|||
which can read better than the old "." (which still works too).
|
||||
* Directory special remotes now support chunking files written to them,
|
||||
avoiding writing files larger than a specified size.
|
||||
* Add progress bar display to the directory special remote.
|
||||
|
||||
-- Joey Hess <joeyh@debian.org> Thu, 01 Mar 2012 22:34:27 -0400
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue