prepare for new style chunking

Moved old legacy chunking code, and cleaned up the directory and webdav
remotes use of it, so when no chunking is configured, that code is not
used.

The config for new style chunking will be chunk=1M instead of chunksize=1M.

There should be no behavior changes from this commit.

This commit was sponsored by Andreas Laas.
This commit is contained in:
Joey Hess 2014-07-24 14:49:22 -04:00
parent d751591ac8
commit 9e2d49d441
4 changed files with 233 additions and 196 deletions

View file

@ -12,7 +12,6 @@ module Remote.Directory (remote) where
import qualified Data.ByteString.Lazy as L
import qualified Data.ByteString as S
import qualified Data.Map as M
import Data.Int
import Common.Annex
import Types.Remote
@ -24,6 +23,7 @@ import Utility.FileMode
import Remote.Helper.Special
import Remote.Helper.Encryptable
import Remote.Helper.Chunked
import qualified Remote.Helper.Chunked.Legacy as Legacy
import Crypto
import Annex.Content
import Annex.UUID
@ -40,19 +40,19 @@ remote = RemoteType {
gen :: Git.Repo -> UUID -> RemoteConfig -> RemoteGitConfig -> Annex (Maybe Remote)
gen r u c gc = do
cst <- remoteCost gc cheapRemoteCost
let chunksize = chunkSize c
let chunkconfig = chunkConfig c
return $ Just $ encryptableRemote c
(storeEncrypted dir (getGpgEncParams (c,gc)) chunksize)
(retrieveEncrypted dir chunksize)
(storeEncrypted dir (getGpgEncParams (c,gc)) chunkconfig)
(retrieveEncrypted dir chunkconfig)
Remote {
uuid = u,
cost = cst,
name = Git.repoDescribe r,
storeKey = store dir chunksize,
retrieveKeyFile = retrieve dir chunksize,
retrieveKeyFileCheap = retrieveCheap dir chunksize,
storeKey = store dir chunkconfig,
retrieveKeyFile = retrieve dir chunkconfig,
retrieveKeyFileCheap = retrieveCheap dir chunkconfig,
removeKey = remove dir,
hasKey = checkPresent dir chunksize,
hasKey = checkPresent dir chunkconfig,
hasKeyCheap = True,
whereisKey = Nothing,
remoteFsck = Nothing,
@ -97,77 +97,77 @@ storeDir d k = addTrailingPathSeparator $ d </> hashDirLower k </> keyFile k
tmpDir :: FilePath -> Key -> FilePath
tmpDir d k = addTrailingPathSeparator $ d </> "tmp" </> keyFile k
withCheckedFiles :: (FilePath -> IO Bool) -> ChunkSize -> FilePath -> Key -> ([FilePath] -> IO Bool) -> IO Bool
withCheckedFiles :: (FilePath -> IO Bool) -> ChunkConfig -> FilePath -> Key -> ([FilePath] -> IO Bool) -> IO Bool
withCheckedFiles _ _ [] _ _ = return False
withCheckedFiles check Nothing d k a = go $ locations d k
where
go [] = return False
go (f:fs) = ifM (check f) ( a [f] , go fs )
withCheckedFiles check (Just _) d k a = go $ locations d k
withCheckedFiles check (LegacyChunkSize _) d k a = go $ locations d k
where
go [] = return False
go (f:fs) = do
let chunkcount = f ++ chunkCount
let chunkcount = f ++ Legacy.chunkCount
ifM (check chunkcount)
( do
chunks <- listChunks f <$> readFile chunkcount
chunks <- Legacy.listChunks f <$> readFile chunkcount
ifM (allM check chunks)
( a chunks , return False )
, do
chunks <- probeChunks f check
chunks <- Legacy.probeChunks f check
if null chunks
then go fs
else a chunks
)
withCheckedFiles check _ d k a = go $ locations d k
where
go [] = return False
go (f:fs) = ifM (check f) ( a [f] , go fs )
withStoredFiles :: ChunkSize -> FilePath -> Key -> ([FilePath] -> IO Bool) -> IO Bool
withStoredFiles :: ChunkConfig -> FilePath -> Key -> ([FilePath] -> IO Bool) -> IO Bool
withStoredFiles = withCheckedFiles doesFileExist
store :: FilePath -> ChunkSize -> Key -> AssociatedFile -> MeterUpdate -> Annex Bool
store d chunksize k _f p = sendAnnex k (void $ remove d k) $ \src ->
store :: FilePath -> ChunkConfig -> Key -> AssociatedFile -> MeterUpdate -> Annex Bool
store d chunkconfig k _f p = sendAnnex k (void $ remove d k) $ \src ->
metered (Just p) k $ \meterupdate ->
storeHelper d chunksize k k $ \dests ->
case chunksize of
Nothing -> do
storeHelper d chunkconfig k k $ \dests ->
case chunkconfig of
LegacyChunkSize chunksize ->
storeLegacyChunked meterupdate chunksize dests
=<< L.readFile src
_ -> do
let dest = Prelude.head dests
meteredWriteFile meterupdate dest
=<< L.readFile src
return [dest]
Just _ ->
storeSplit meterupdate chunksize dests
=<< L.readFile src
storeEncrypted :: FilePath -> [CommandParam] -> ChunkSize -> (Cipher, Key) -> Key -> MeterUpdate -> Annex Bool
storeEncrypted d gpgOpts chunksize (cipher, enck) k p = sendAnnex k (void $ remove d enck) $ \src ->
storeEncrypted :: FilePath -> [CommandParam] -> ChunkConfig -> (Cipher, Key) -> Key -> MeterUpdate -> Annex Bool
storeEncrypted d gpgOpts chunkconfig (cipher, enck) k p = sendAnnex k (void $ remove d enck) $ \src ->
metered (Just p) k $ \meterupdate ->
storeHelper d chunksize enck k $ \dests ->
storeHelper d chunkconfig enck k $ \dests ->
encrypt gpgOpts cipher (feedFile src) $ readBytes $ \b ->
case chunksize of
Nothing -> do
case chunkconfig of
LegacyChunkSize chunksize ->
storeLegacyChunked meterupdate chunksize dests b
_ -> do
let dest = Prelude.head dests
meteredWriteFile meterupdate dest b
return [dest]
Just _ -> storeSplit meterupdate chunksize dests b
{- Splits a ByteString into chunks and writes to dests, obeying configured
- chunk size (not to be confused with the L.ByteString chunk size).
- Note: Must always write at least one file, even for empty ByteString. -}
storeSplit :: MeterUpdate -> ChunkSize -> [FilePath] -> L.ByteString -> IO [FilePath]
storeSplit _ Nothing _ _ = error "bad storeSplit call"
storeSplit _ _ [] _ = error "bad storeSplit call"
storeSplit meterupdate (Just chunksize) alldests@(firstdest:_) b
storeLegacyChunked :: MeterUpdate -> Legacy.ChunkSize -> [FilePath] -> L.ByteString -> IO [FilePath]
storeLegacyChunked _ _ [] _ = error "bad storeLegacyChunked call"
storeLegacyChunked meterupdate chunksize alldests@(firstdest:_) b
| L.null b = do
-- must always write at least one file, even for empty
L.writeFile firstdest b
return [firstdest]
| otherwise = storeSplit' meterupdate chunksize alldests (L.toChunks b) []
storeSplit' :: MeterUpdate -> Int64 -> [FilePath] -> [S.ByteString] -> [FilePath] -> IO [FilePath]
storeSplit' _ _ [] _ _ = error "ran out of dests"
storeSplit' _ _ _ [] c = return $ reverse c
storeSplit' meterupdate chunksize (d:dests) bs c = do
| otherwise = storeLegacyChunked' meterupdate chunksize alldests (L.toChunks b) []
storeLegacyChunked' :: MeterUpdate -> Legacy.ChunkSize -> [FilePath] -> [S.ByteString] -> [FilePath] -> IO [FilePath]
storeLegacyChunked' _ _ [] _ _ = error "ran out of dests"
storeLegacyChunked' _ _ _ [] c = return $ reverse c
storeLegacyChunked' meterupdate chunksize (d:dests) bs c = do
bs' <- withFile d WriteMode $
feed zeroBytesProcessed chunksize bs
storeSplit' meterupdate chunksize dests bs' (d:c)
storeLegacyChunked' meterupdate chunksize dests bs' (d:c)
where
feed _ _ [] _ = return []
feed bytes sz (l:ls) h = do
@ -181,19 +181,28 @@ storeSplit' meterupdate chunksize (d:dests) bs c = do
feed bytes' (sz - s) ls h
else return (l:ls)
storeHelper :: FilePath -> ChunkSize -> Key -> Key -> ([FilePath] -> IO [FilePath]) -> Annex Bool
storeHelper d chunksize key origkey storer = check <&&> go
storeHelper :: FilePath -> ChunkConfig -> Key -> Key -> ([FilePath] -> IO [FilePath]) -> Annex Bool
storeHelper d chunkconfig key origkey storer = check <&&> liftIO go
where
tmpdir = tmpDir d key
destdir = storeDir d key
{- An encrypted key does not have a known size,
- so check that the size of the original key is available as free
- space. -}
check = do
liftIO $ createDirectoryIfMissing True tmpdir
checkDiskSpace (Just tmpdir) origkey 0
go = liftIO $ catchBoolIO $
storeChunks key tmpdir destdir chunksize storer recorder finalizer
go = case chunkconfig of
NoChunks -> flip catchNonAsync (\e -> print e >> return False) $ do
let tmpf = tmpdir </> keyFile key
void $ storer [tmpf]
finalizer tmpdir destdir
return True
ChunkSize _ -> error "TODO: storeHelper with ChunkSize"
LegacyChunkSize _ -> Legacy.storeChunks key tmpdir destdir storer recorder finalizer
finalizer tmp dest = do
void $ tryIO $ allowWrite dest -- may already exist
void $ tryIO $ removeDirectoryRecursive dest -- or not exist
@ -203,21 +212,22 @@ storeHelper d chunksize key origkey storer = check <&&> go
void $ tryIO $ do
mapM_ preventWrite =<< dirContents dest
preventWrite dest
recorder f s = do
void $ tryIO $ allowWrite f
writeFile f s
void $ tryIO $ preventWrite f
retrieve :: FilePath -> ChunkSize -> Key -> AssociatedFile -> FilePath -> MeterUpdate -> Annex Bool
retrieve d chunksize k _ f p = metered (Just p) k $ \meterupdate ->
liftIO $ withStoredFiles chunksize d k $ \files ->
retrieve :: FilePath -> ChunkConfig -> Key -> AssociatedFile -> FilePath -> MeterUpdate -> Annex Bool
retrieve d chunkconfig k _ f p = metered (Just p) k $ \meterupdate ->
liftIO $ withStoredFiles chunkconfig d k $ \files ->
catchBoolIO $ do
meteredWriteFileChunks meterupdate f files L.readFile
Legacy.meteredWriteFileChunks meterupdate f files L.readFile
return True
retrieveEncrypted :: FilePath -> ChunkSize -> (Cipher, Key) -> Key -> FilePath -> MeterUpdate -> Annex Bool
retrieveEncrypted d chunksize (cipher, enck) k f p = metered (Just p) k $ \meterupdate ->
liftIO $ withStoredFiles chunksize d enck $ \files ->
retrieveEncrypted :: FilePath -> ChunkConfig -> (Cipher, Key) -> Key -> FilePath -> MeterUpdate -> Annex Bool
retrieveEncrypted d chunkconfig (cipher, enck) k f p = metered (Just p) k $ \meterupdate ->
liftIO $ withStoredFiles chunkconfig d enck $ \files ->
catchBoolIO $ do
decrypt cipher (feeder files) $
readBytes $ meteredWriteFile meterupdate f
@ -225,10 +235,12 @@ retrieveEncrypted d chunksize (cipher, enck) k f p = metered (Just p) k $ \meter
where
feeder files h = forM_ files $ L.hPut h <=< L.readFile
retrieveCheap :: FilePath -> ChunkSize -> Key -> FilePath -> Annex Bool
retrieveCheap _ (Just _) _ _ = return False -- no cheap retrieval for chunks
retrieveCheap :: FilePath -> ChunkConfig -> Key -> FilePath -> Annex Bool
-- no cheap retrieval for chunks
retrieveCheap _ (ChunkSize _) _ _ = return False
retrieveCheap _ (LegacyChunkSize _) _ _ = return False
#ifndef mingw32_HOST_OS
retrieveCheap d _ k f = liftIO $ withStoredFiles Nothing d k go
retrieveCheap d ck k f = liftIO $ withStoredFiles ck d k go
where
go [file] = catchBoolIO $ createSymbolicLink file f >> return True
go _files = return False
@ -250,6 +262,6 @@ remove d k = liftIO $ do
where
dir = storeDir d k
checkPresent :: FilePath -> ChunkSize -> Key -> Annex (Either String Bool)
checkPresent d chunksize k = liftIO $ catchMsgIO $ withStoredFiles chunksize d k $
checkPresent :: FilePath -> ChunkConfig -> Key -> Annex (Either String Bool)
checkPresent d chunkconfig k = liftIO $ catchMsgIO $ withStoredFiles chunkconfig d k $
const $ return True -- withStoredFiles checked that it exists

View file

@ -1,144 +1,31 @@
{- git-annex chunked remotes
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012-2014 Joey Hess <joey@kitenet.net>
-
- Licensed under the GNU GPL version 3 or higher.
-}
module Remote.Helper.Chunked where
import Common.Annex
import Utility.DataUnits
import Types.Remote
import Utility.Metered
import qualified Data.Map as M
import qualified Data.ByteString.Lazy as L
import Data.Int
import qualified Control.Exception as E
type ChunkSize = Maybe Int64
data ChunkConfig
= NoChunks
| ChunkSize Int64
| LegacyChunkSize Int64
{- Gets a remote's configured chunk size. -}
chunkSize :: RemoteConfig -> ChunkSize
chunkSize m =
chunkConfig :: RemoteConfig -> ChunkConfig
chunkConfig m =
case M.lookup "chunksize" m of
Nothing -> Nothing
Just v -> case readSize dataUnits v of
Nothing -> error "bad chunksize"
Just size
| size <= 0 -> error "bad chunksize"
| otherwise -> Just $ fromInteger size
{- This is an extension that's added to the usual file (or whatever)
- where the remote stores a key. -}
type ChunkExt = String
{- A record of the number of chunks used.
-
- While this can be guessed at based on the size of the key, encryption
- makes that larger. Also, using this helps deal with changes to chunksize
- over the life of a remote.
-}
chunkCount :: ChunkExt
chunkCount = ".chunkcount"
{- An infinite stream of extensions to use for chunks. -}
chunkStream :: [ChunkExt]
chunkStream = map (\n -> ".chunk" ++ show n) [1 :: Integer ..]
{- Parses the String from the chunkCount file, and returns the files that
- are used to store the chunks. -}
listChunks :: FilePath -> String -> [FilePath]
listChunks basedest chunkcount = take count $ map (basedest ++) chunkStream
Nothing -> case M.lookup "chunk" m of
Nothing -> NoChunks
Just v -> ChunkSize $ readsz v "chunk"
Just v -> LegacyChunkSize $ readsz v "chunksize"
where
count = fromMaybe 0 $ readish chunkcount
{- For use when there is no chunkCount file; uses the action to find
- chunks, and returns them, or Nothing if none found. Relies on
- storeChunks's finalizer atomically moving the chunks into place once all
- are written.
-
- This is only needed to work around a bug that caused the chunkCount file
- not to be written.
-}
probeChunks :: FilePath -> (FilePath -> IO Bool) -> IO [FilePath]
probeChunks basedest check = go [] $ map (basedest ++) chunkStream
where
go l [] = return (reverse l)
go l (c:cs) = ifM (check c)
( go (c:l) cs
, go l []
)
{- Given the base destination to use to store a value,
- generates a stream of temporary destinations (just one when not chunking)
- and passes it to an action, which should chunk and store the data,
- and return the destinations it stored to, or [] on error. Then
- calls the recorder to write the chunk count (if chunking). Finally, the
- finalizer is called to rename the tmp into the dest
- (and do any other cleanup).
-}
storeChunks :: Key -> FilePath -> FilePath -> ChunkSize -> ([FilePath] -> IO [FilePath]) -> (FilePath -> String -> IO ()) -> (FilePath -> FilePath -> IO ()) -> IO Bool
storeChunks key tmp dest chunksize storer recorder finalizer = either onerr return
=<< (E.try go :: IO (Either E.SomeException Bool))
where
go = do
stored <- storer tmpdests
when (isJust chunksize) $ do
let chunkcount = basef ++ chunkCount
recorder chunkcount (show $ length stored)
finalizer tmp dest
return (not $ null stored)
onerr e = do
print e
return False
basef = tmp ++ keyFile key
tmpdests
| isNothing chunksize = [basef]
| otherwise = map (basef ++ ) chunkStream
{- Given a list of destinations to use, chunks the data according to the
- ChunkSize, and runs the storer action to store each chunk. Returns
- the destinations where data was stored, or [] on error.
-
- This buffers each chunk in memory.
- More optimal versions of this can be written, that rely
- on L.toChunks to split the lazy bytestring into chunks (typically
- smaller than the ChunkSize), and eg, write those chunks to a Handle.
- But this is the best that can be done with the storer interface that
- writes a whole L.ByteString at a time.
-}
storeChunked :: ChunkSize -> [FilePath] -> (FilePath -> L.ByteString -> IO ()) -> L.ByteString -> IO [FilePath]
storeChunked chunksize dests storer content = either onerr return
=<< (E.try (go chunksize dests) :: IO (Either E.SomeException [FilePath]))
where
go _ [] = return [] -- no dests!?
go Nothing (d:_) = do
storer d content
return [d]
go (Just sz) _
-- always write a chunk, even if the data is 0 bytes
| L.null content = go Nothing dests
| otherwise = storechunks sz [] dests content
onerr e = do
print e
return []
storechunks _ _ [] _ = return [] -- ran out of dests
storechunks sz useddests (d:ds) b
| L.null b = return $ reverse useddests
| otherwise = do
let (chunk, b') = L.splitAt sz b
storer d chunk
storechunks sz (d:useddests) ds b'
{- Writes a series of chunks to a file. The feeder is called to get
- each chunk. -}
meteredWriteFileChunks :: MeterUpdate -> FilePath -> [v] -> (v -> IO L.ByteString) -> IO ()
meteredWriteFileChunks meterupdate dest chunks feeder =
withBinaryFile dest WriteMode $ \h ->
forM_ chunks $
meteredWrite meterupdate h <=< feeder
readsz v f = case readSize dataUnits v of
Just size | size > 0 -> fromInteger size
_ -> error ("bad " ++ f)

View file

@ -0,0 +1,127 @@
{- legacy git-annex chunked remotes
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
-
- Licensed under the GNU GPL version 3 or higher.
-}
module Remote.Helper.Chunked.Legacy where
import Common.Annex
import Utility.Metered
import qualified Data.ByteString.Lazy as L
import Data.Int
import qualified Control.Exception as E
type ChunkSize = Int64
{- This is an extension that's added to the usual file (or whatever)
- where the remote stores a key. -}
type ChunkExt = String
{- A record of the number of chunks used.
-
- While this can be guessed at based on the size of the key, encryption
- makes that larger. Also, using this helps deal with changes to chunksize
- over the life of a remote.
-}
chunkCount :: ChunkExt
chunkCount = ".chunkcount"
{- An infinite stream of extensions to use for chunks. -}
chunkStream :: [ChunkExt]
chunkStream = map (\n -> ".chunk" ++ show n) [1 :: Integer ..]
{- Parses the String from the chunkCount file, and returns the files that
- are used to store the chunks. -}
listChunks :: FilePath -> String -> [FilePath]
listChunks basedest chunkcount = take count $ map (basedest ++) chunkStream
where
count = fromMaybe 0 $ readish chunkcount
{- For use when there is no chunkCount file; uses the action to find
- chunks, and returns them, or Nothing if none found. Relies on
- storeChunks's finalizer atomically moving the chunks into place once all
- are written.
-
- This is only needed to work around a bug that caused the chunkCount file
- not to be written.
-}
probeChunks :: FilePath -> (FilePath -> IO Bool) -> IO [FilePath]
probeChunks basedest check = go [] $ map (basedest ++) chunkStream
where
go l [] = return (reverse l)
go l (c:cs) = ifM (check c)
( go (c:l) cs
, go l []
)
{- Given the base destination to use to store a value,
- generates a stream of temporary destinations,
- and passes it to an action, which should chunk and store the data,
- and return the destinations it stored to, or [] on error. Then
- calls the recorder to write the chunk count. Finally, the
- finalizer is called to rename the tmp into the dest
- (and do any other cleanup).
-}
storeChunks :: Key -> FilePath -> FilePath -> ([FilePath] -> IO [FilePath]) -> (FilePath -> String -> IO ()) -> (FilePath -> FilePath -> IO ()) -> IO Bool
storeChunks key tmp dest storer recorder finalizer = either onerr return
=<< (E.try go :: IO (Either E.SomeException Bool))
where
go = do
stored <- storer tmpdests
let chunkcount = basef ++ chunkCount
recorder chunkcount (show $ length stored)
finalizer tmp dest
return (not $ null stored)
onerr e = do
print e
return False
basef = tmp ++ keyFile key
tmpdests = map (basef ++ ) chunkStream
{- Given a list of destinations to use, chunks the data according to the
- ChunkSize, and runs the storer action to store each chunk. Returns
- the destinations where data was stored, or [] on error.
-
- This buffers each chunk in memory.
- More optimal versions of this can be written, that rely
- on L.toChunks to split the lazy bytestring into chunks (typically
- smaller than the ChunkSize), and eg, write those chunks to a Handle.
- But this is the best that can be done with the storer interface that
- writes a whole L.ByteString at a time.
-}
storeChunked :: ChunkSize -> [FilePath] -> (FilePath -> L.ByteString -> IO ()) -> L.ByteString -> IO [FilePath]
storeChunked chunksize dests storer content = either onerr return
=<< (E.try (go (Just chunksize) dests) :: IO (Either E.SomeException [FilePath]))
where
go _ [] = return [] -- no dests!?
go Nothing (d:_) = do
storer d content
return [d]
go (Just sz) _
-- always write a chunk, even if the data is 0 bytes
| L.null content = go Nothing dests
| otherwise = storechunks sz [] dests content
onerr e = do
print e
return []
storechunks _ _ [] _ = return [] -- ran out of dests
storechunks sz useddests (d:ds) b
| L.null b = return $ reverse useddests
| otherwise = do
let (chunk, b') = L.splitAt sz b
storer d chunk
storechunks sz (d:useddests) ds b'
{- Writes a series of chunks to a file. The feeder is called to get
- each chunk. -}
meteredWriteFileChunks :: MeterUpdate -> FilePath -> [v] -> (v -> IO L.ByteString) -> IO ()
meteredWriteFileChunks meterupdate dest chunks feeder =
withBinaryFile dest WriteMode $ \h ->
forM_ chunks $
meteredWrite meterupdate h <=< feeder

View file

@ -33,6 +33,7 @@ import Config.Cost
import Remote.Helper.Special
import Remote.Helper.Encryptable
import Remote.Helper.Chunked
import qualified Remote.Helper.Chunked.Legacy as Legacy
import Crypto
import Creds
import Utility.Metered
@ -111,13 +112,21 @@ storeEncrypted r (cipher, enck) k p = metered (Just p) k $ \meterupdate ->
storeHelper :: Remote -> Key -> DavUrl -> DavUser -> DavPass -> L.ByteString -> IO Bool
storeHelper r k baseurl user pass b = catchBoolIO $ do
mkdirRecursiveDAV tmpurl user pass
storeChunks k tmpurl keyurl chunksize storer recorder finalizer
case chunkconfig of
NoChunks -> flip catchNonAsync (\e -> print e >> return False) $ do
storehttp tmpurl b
finalizer tmpurl keyurl
return True
ChunkSize _ -> error "TODO: storeHelper with ChunkSize"
LegacyChunkSize chunksize -> do
let storer urls = Legacy.storeChunked chunksize urls storehttp b
let recorder url s = storehttp url (L8.fromString s)
Legacy.storeChunks k tmpurl keyurl storer recorder finalizer
where
tmpurl = tmpLocation baseurl k
keyurl = davLocation baseurl k
chunksize = chunkSize $ config r
storer urls = storeChunked chunksize urls storehttp b
recorder url s = storehttp url (L8.fromString s)
chunkconfig = chunkConfig $ config r
finalizer srcurl desturl = do
void $ tryNonAsync (deleteDAV desturl user pass)
mkdirRecursiveDAV (urlParent desturl) user pass
@ -131,7 +140,7 @@ retrieve :: Remote -> Key -> AssociatedFile -> FilePath -> MeterUpdate -> Annex
retrieve r k _f d p = metered (Just p) k $ \meterupdate ->
davAction r False $ \(baseurl, user, pass) -> liftIO $ catchBoolIO $
withStoredFiles r k baseurl user pass onerr $ \urls -> do
meteredWriteFileChunks meterupdate d urls $ \url -> do
Legacy.meteredWriteFileChunks meterupdate d urls $ \url -> do
mb <- getDAV url user pass
case mb of
Nothing -> throwIO "download failed"
@ -200,20 +209,22 @@ withStoredFiles
-> (DavUrl -> IO a)
-> ([DavUrl] -> IO a)
-> IO a
withStoredFiles r k baseurl user pass onerr a
| isJust $ chunkSize $ config r = do
let chunkcount = keyurl ++ chunkCount
withStoredFiles r k baseurl user pass onerr a = case chunkconfig of
NoChunks -> a [keyurl]
ChunkSize _ -> error "TODO: withStoredFiles with ChunkSize"
LegacyChunkSize _ -> do
let chunkcount = keyurl ++ Legacy.chunkCount
v <- getDAV chunkcount user pass
case v of
Just s -> a $ listChunks keyurl $ L8.toString s
Just s -> a $ Legacy.listChunks keyurl $ L8.toString s
Nothing -> do
chunks <- probeChunks keyurl $ \u -> (== Right True) <$> existsDAV u user pass
chunks <- Legacy.probeChunks keyurl $ \u -> (== Right True) <$> existsDAV u user pass
if null chunks
then onerr chunkcount
else a chunks
| otherwise = a [keyurl]
where
keyurl = davLocation baseurl k ++ keyFile k
chunkconfig = chunkConfig $ config r
davAction :: Remote -> a -> ((DavUrl, DavUser, DavPass) -> Annex a) -> Annex a
davAction r unconfigured action = do