2012-11-16 21:58:08 +00:00
|
|
|
{- git-annex chunked remotes
|
|
|
|
-
|
resume interrupted chunked downloads
Leverage the new chunked remotes to automatically resume downloads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also properly handle starting a download
from one remote, interrupting, and resuming from another one, and so on.
(Resuming interrupted chunked uploads is similarly doable, although
slightly more expensive.)
This commit was sponsored by Thomas Djärv.
2014-07-27 22:52:42 +00:00
|
|
|
- Copyright 2014 Joey Hess <joey@kitenet.net>
|
2012-11-16 21:58:08 +00:00
|
|
|
-
|
|
|
|
- Licensed under the GNU GPL version 3 or higher.
|
|
|
|
-}
|
|
|
|
|
2014-07-27 00:11:41 +00:00
|
|
|
module Remote.Helper.Chunked (
|
|
|
|
ChunkSize,
|
|
|
|
ChunkConfig(..),
|
2014-10-21 18:36:09 +00:00
|
|
|
describeChunkConfig,
|
2014-08-03 19:35:23 +00:00
|
|
|
getChunkConfig,
|
2014-07-27 00:11:41 +00:00
|
|
|
storeChunks,
|
|
|
|
removeChunks,
|
|
|
|
retrieveChunks,
|
2014-08-06 17:45:19 +00:00
|
|
|
checkPresentChunks,
|
2014-07-27 00:11:41 +00:00
|
|
|
) where
|
2012-11-16 21:58:08 +00:00
|
|
|
|
2014-07-24 20:42:35 +00:00
|
|
|
import Common.Annex
|
2012-11-16 21:58:08 +00:00
|
|
|
import Utility.DataUnits
|
2014-07-29 18:53:17 +00:00
|
|
|
import Types.StoreRetrieve
|
2012-11-16 21:58:08 +00:00
|
|
|
import Types.Remote
|
2014-07-25 20:20:32 +00:00
|
|
|
import Types.Key
|
|
|
|
import Logs.Chunk
|
2014-07-24 20:42:35 +00:00
|
|
|
import Utility.Metered
|
2014-07-27 00:11:41 +00:00
|
|
|
import Crypto (EncKey)
|
2014-07-30 14:34:39 +00:00
|
|
|
import Backend (isStableKey)
|
2012-11-16 21:58:08 +00:00
|
|
|
|
2014-07-24 20:42:35 +00:00
|
|
|
import qualified Data.ByteString.Lazy as L
|
2012-11-16 21:58:08 +00:00
|
|
|
import qualified Data.Map as M
|
|
|
|
|
2014-07-24 18:49:22 +00:00
|
|
|
data ChunkConfig
|
|
|
|
= NoChunks
|
2014-07-24 19:08:07 +00:00
|
|
|
| UnpaddedChunks ChunkSize
|
|
|
|
| LegacyChunks ChunkSize
|
2014-08-01 19:36:11 +00:00
|
|
|
deriving (Show)
|
2012-11-16 21:58:08 +00:00
|
|
|
|
2014-10-21 18:36:09 +00:00
|
|
|
describeChunkConfig :: ChunkConfig -> String
|
|
|
|
describeChunkConfig NoChunks = "none"
|
|
|
|
describeChunkConfig (UnpaddedChunks sz) = describeChunkSize sz ++ "chunks"
|
|
|
|
describeChunkConfig (LegacyChunks sz) = describeChunkSize sz ++ " chunks (old style)"
|
|
|
|
|
|
|
|
describeChunkSize :: ChunkSize -> String
|
|
|
|
describeChunkSize sz = roughSize storageUnits False (fromIntegral sz)
|
|
|
|
|
2014-07-27 06:13:51 +00:00
|
|
|
noChunks :: ChunkConfig -> Bool
|
|
|
|
noChunks NoChunks = True
|
|
|
|
noChunks _ = False
|
|
|
|
|
2014-08-03 19:35:23 +00:00
|
|
|
getChunkConfig :: RemoteConfig -> ChunkConfig
|
|
|
|
getChunkConfig m =
|
2012-11-16 21:58:08 +00:00
|
|
|
case M.lookup "chunksize" m of
|
2014-07-24 18:49:22 +00:00
|
|
|
Nothing -> case M.lookup "chunk" m of
|
|
|
|
Nothing -> NoChunks
|
2014-08-01 19:36:11 +00:00
|
|
|
Just v -> readsz UnpaddedChunks v "chunk"
|
|
|
|
Just v -> readsz LegacyChunks v "chunksize"
|
2013-10-26 19:03:12 +00:00
|
|
|
where
|
2014-08-01 19:36:11 +00:00
|
|
|
readsz c v f = case readSize dataUnits v of
|
|
|
|
Just size
|
|
|
|
| size == 0 -> NoChunks
|
|
|
|
| size > 0 -> c (fromInteger size)
|
|
|
|
_ -> error $ "bad configuration " ++ f ++ "=" ++ v
|
2014-07-24 20:42:35 +00:00
|
|
|
|
2014-07-25 20:20:32 +00:00
|
|
|
-- An infinite stream of chunk keys, starting from chunk 1.
|
|
|
|
newtype ChunkKeyStream = ChunkKeyStream [Key]
|
|
|
|
|
|
|
|
chunkKeyStream :: Key -> ChunkSize -> ChunkKeyStream
|
|
|
|
chunkKeyStream basek chunksize = ChunkKeyStream $ map mk [1..]
|
|
|
|
where
|
|
|
|
mk chunknum = sizedk { keyChunkNum = Just chunknum }
|
|
|
|
sizedk = basek { keyChunkSize = Just (toInteger chunksize) }
|
|
|
|
|
|
|
|
nextChunkKeyStream :: ChunkKeyStream -> (Key, ChunkKeyStream)
|
|
|
|
nextChunkKeyStream (ChunkKeyStream (k:l)) = (k, ChunkKeyStream l)
|
|
|
|
nextChunkKeyStream (ChunkKeyStream []) = undefined -- stream is infinite!
|
|
|
|
|
|
|
|
takeChunkKeyStream :: ChunkCount -> ChunkKeyStream -> [Key]
|
|
|
|
takeChunkKeyStream n (ChunkKeyStream l) = genericTake n l
|
|
|
|
|
|
|
|
-- Number of chunks already consumed from the stream.
|
|
|
|
numChunks :: ChunkKeyStream -> Integer
|
|
|
|
numChunks = pred . fromJust . keyChunkNum . fst . nextChunkKeyStream
|
|
|
|
|
2014-07-26 16:04:35 +00:00
|
|
|
{- Splits up the key's content into chunks, passing each chunk to
|
|
|
|
- the storer action, along with a corresponding chunk key and a
|
|
|
|
- progress meter update callback.
|
2014-07-25 20:20:32 +00:00
|
|
|
-
|
resume interrupted chunked uploads
Leverage the new chunked remotes to automatically resume uploads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also allow starting an upload from one repository,
interrupting it, and then resuming the upload to the same remote from
an entirely different repository.
Note that I added a comment that storeKey should atomically move the content
into place once it's all received. This was already an undocumented
requirement -- it's necessary for hasKey to work reliably. This resume code
just uses hasKey to find the first chunk that's missing.
Note that if there are two uploads of the same key to the same chunked remote,
one might resume at the point the other had gotten to, but both will then
redundantly upload. As before.
In the non-resume case, this adds one hasKey call per storeKey, and only
if the remote is configured to use chunks. Future work: Try to eliminate that
hasKey. Notice that eg, `git annex copy --to` checks if the key is present
before sending it, so is already running hasKey.. which could perhaps
be cached and reused.
However, this additional overhead is not very large compared with
transferring an entire large file, and the ability to resume
is certianly worth it. There is an optimisation in place for small files,
that avoids trying to resume if the whole file fits within one chunk.
This commit was sponsored by Georg Bauer.
2014-07-28 18:18:08 +00:00
|
|
|
- To support resuming, the checker is used to find the first missing
|
|
|
|
- chunk key. Storing starts from that chunk.
|
|
|
|
-
|
2014-07-27 05:18:38 +00:00
|
|
|
- This buffers each chunk in memory, so can use a lot of memory
|
|
|
|
- with a large ChunkSize.
|
|
|
|
- More optimal versions of this can be written, that rely
|
|
|
|
- on L.toChunks to split the lazy bytestring into chunks (typically
|
|
|
|
- smaller than the ChunkSize), and eg, write those chunks to a Handle.
|
|
|
|
- But this is the best that can be done with the storer interface that
|
|
|
|
- writes a whole L.ByteString at a time.
|
2014-07-25 20:20:32 +00:00
|
|
|
-}
|
2014-07-27 03:26:10 +00:00
|
|
|
storeChunks
|
|
|
|
:: UUID
|
|
|
|
-> ChunkConfig
|
|
|
|
-> Key
|
|
|
|
-> FilePath
|
|
|
|
-> MeterUpdate
|
2014-08-06 17:45:19 +00:00
|
|
|
-> Storer
|
|
|
|
-> CheckPresent
|
2014-07-27 03:26:10 +00:00
|
|
|
-> Annex Bool
|
2014-07-29 18:53:17 +00:00
|
|
|
storeChunks u chunkconfig k f p storer checker =
|
|
|
|
case chunkconfig of
|
2014-07-30 14:34:39 +00:00
|
|
|
(UnpaddedChunks chunksize) | isStableKey k ->
|
2014-07-29 18:53:17 +00:00
|
|
|
bracketIO open close (go chunksize)
|
2014-08-03 20:18:40 +00:00
|
|
|
_ -> storer k (FileContent f) p
|
2014-07-25 20:20:32 +00:00
|
|
|
where
|
resume interrupted chunked uploads
Leverage the new chunked remotes to automatically resume uploads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also allow starting an upload from one repository,
interrupting it, and then resuming the upload to the same remote from
an entirely different repository.
Note that I added a comment that storeKey should atomically move the content
into place once it's all received. This was already an undocumented
requirement -- it's necessary for hasKey to work reliably. This resume code
just uses hasKey to find the first chunk that's missing.
Note that if there are two uploads of the same key to the same chunked remote,
one might resume at the point the other had gotten to, but both will then
redundantly upload. As before.
In the non-resume case, this adds one hasKey call per storeKey, and only
if the remote is configured to use chunks. Future work: Try to eliminate that
hasKey. Notice that eg, `git annex copy --to` checks if the key is present
before sending it, so is already running hasKey.. which could perhaps
be cached and reused.
However, this additional overhead is not very large compared with
transferring an entire large file, and the ability to resume
is certianly worth it. There is an optimisation in place for small files,
that avoids trying to resume if the whole file fits within one chunk.
This commit was sponsored by Georg Bauer.
2014-07-28 18:18:08 +00:00
|
|
|
open = tryIO $ openBinaryFile f ReadMode
|
|
|
|
|
|
|
|
close (Right h) = hClose h
|
|
|
|
close (Left _) = noop
|
2014-07-25 20:20:32 +00:00
|
|
|
|
2014-07-29 18:53:17 +00:00
|
|
|
go _ (Left e) = do
|
resume interrupted chunked uploads
Leverage the new chunked remotes to automatically resume uploads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also allow starting an upload from one repository,
interrupting it, and then resuming the upload to the same remote from
an entirely different repository.
Note that I added a comment that storeKey should atomically move the content
into place once it's all received. This was already an undocumented
requirement -- it's necessary for hasKey to work reliably. This resume code
just uses hasKey to find the first chunk that's missing.
Note that if there are two uploads of the same key to the same chunked remote,
one might resume at the point the other had gotten to, but both will then
redundantly upload. As before.
In the non-resume case, this adds one hasKey call per storeKey, and only
if the remote is configured to use chunks. Future work: Try to eliminate that
hasKey. Notice that eg, `git annex copy --to` checks if the key is present
before sending it, so is already running hasKey.. which could perhaps
be cached and reused.
However, this additional overhead is not very large compared with
transferring an entire large file, and the ability to resume
is certianly worth it. There is an optimisation in place for small files,
that avoids trying to resume if the whole file fits within one chunk.
This commit was sponsored by Georg Bauer.
2014-07-28 18:18:08 +00:00
|
|
|
warning (show e)
|
|
|
|
return False
|
2014-08-03 20:18:40 +00:00
|
|
|
go chunksize (Right h) = do
|
2014-07-29 18:53:17 +00:00
|
|
|
let chunkkeys = chunkKeyStream k chunksize
|
|
|
|
(chunkkeys', startpos) <- seekResume h chunkkeys checker
|
|
|
|
b <- liftIO $ L.hGetContents h
|
2014-08-03 20:18:40 +00:00
|
|
|
gochunks p startpos chunksize b chunkkeys'
|
resume interrupted chunked uploads
Leverage the new chunked remotes to automatically resume uploads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also allow starting an upload from one repository,
interrupting it, and then resuming the upload to the same remote from
an entirely different repository.
Note that I added a comment that storeKey should atomically move the content
into place once it's all received. This was already an undocumented
requirement -- it's necessary for hasKey to work reliably. This resume code
just uses hasKey to find the first chunk that's missing.
Note that if there are two uploads of the same key to the same chunked remote,
one might resume at the point the other had gotten to, but both will then
redundantly upload. As before.
In the non-resume case, this adds one hasKey call per storeKey, and only
if the remote is configured to use chunks. Future work: Try to eliminate that
hasKey. Notice that eg, `git annex copy --to` checks if the key is present
before sending it, so is already running hasKey.. which could perhaps
be cached and reused.
However, this additional overhead is not very large compared with
transferring an entire large file, and the ability to resume
is certianly worth it. There is an optimisation in place for small files,
that avoids trying to resume if the whole file fits within one chunk.
This commit was sponsored by Georg Bauer.
2014-07-28 18:18:08 +00:00
|
|
|
|
|
|
|
gochunks :: MeterUpdate -> BytesProcessed -> ChunkSize -> L.ByteString -> ChunkKeyStream -> Annex Bool
|
|
|
|
gochunks meterupdate startpos chunksize = loop startpos . splitchunk
|
2014-07-25 20:20:32 +00:00
|
|
|
where
|
2014-07-27 05:18:38 +00:00
|
|
|
splitchunk = L.splitAt chunksize
|
|
|
|
|
|
|
|
loop bytesprocessed (chunk, bs) chunkkeys
|
|
|
|
| L.null chunk && numchunks > 0 = do
|
2014-10-09 18:53:13 +00:00
|
|
|
-- Once all chunks are successfully
|
2014-07-27 05:18:38 +00:00
|
|
|
-- stored, update the chunk log.
|
2014-07-28 17:19:08 +00:00
|
|
|
chunksStored u k (FixedSizeChunks chunksize) numchunks
|
2014-07-25 20:20:32 +00:00
|
|
|
return True
|
2014-07-27 05:18:38 +00:00
|
|
|
| otherwise = do
|
2014-07-30 00:31:16 +00:00
|
|
|
liftIO $ meterupdate' zeroBytesProcessed
|
2014-07-27 05:18:38 +00:00
|
|
|
let (chunkkey, chunkkeys') = nextChunkKeyStream chunkkeys
|
2014-07-29 20:22:19 +00:00
|
|
|
ifM (storer chunkkey (ByteContent chunk) meterupdate')
|
2014-07-27 05:18:38 +00:00
|
|
|
( do
|
|
|
|
let bytesprocessed' = addBytesProcessed bytesprocessed (L.length chunk)
|
|
|
|
loop bytesprocessed' (splitchunk bs) chunkkeys'
|
|
|
|
, return False
|
|
|
|
)
|
2014-07-25 20:20:32 +00:00
|
|
|
where
|
2014-07-27 05:18:38 +00:00
|
|
|
numchunks = numChunks chunkkeys
|
2014-10-09 18:53:13 +00:00
|
|
|
{- The MeterUpdate that is passed to the action
|
2014-07-25 20:20:32 +00:00
|
|
|
- storing a chunk is offset, so that it reflects
|
|
|
|
- the total bytes that have already been stored
|
|
|
|
- in previous chunks. -}
|
|
|
|
meterupdate' = offsetMeterUpdate meterupdate bytesprocessed
|
|
|
|
|
resume interrupted chunked uploads
Leverage the new chunked remotes to automatically resume uploads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also allow starting an upload from one repository,
interrupting it, and then resuming the upload to the same remote from
an entirely different repository.
Note that I added a comment that storeKey should atomically move the content
into place once it's all received. This was already an undocumented
requirement -- it's necessary for hasKey to work reliably. This resume code
just uses hasKey to find the first chunk that's missing.
Note that if there are two uploads of the same key to the same chunked remote,
one might resume at the point the other had gotten to, but both will then
redundantly upload. As before.
In the non-resume case, this adds one hasKey call per storeKey, and only
if the remote is configured to use chunks. Future work: Try to eliminate that
hasKey. Notice that eg, `git annex copy --to` checks if the key is present
before sending it, so is already running hasKey.. which could perhaps
be cached and reused.
However, this additional overhead is not very large compared with
transferring an entire large file, and the ability to resume
is certianly worth it. There is an optimisation in place for small files,
that avoids trying to resume if the whole file fits within one chunk.
This commit was sponsored by Georg Bauer.
2014-07-28 18:18:08 +00:00
|
|
|
{- Check if any of the chunk keys are present. If found, seek forward
|
|
|
|
- in the Handle, so it will be read starting at the first missing chunk.
|
|
|
|
- Returns the ChunkKeyStream truncated to start at the first missing
|
|
|
|
- chunk, and the number of bytes skipped due to resuming.
|
|
|
|
-
|
|
|
|
- As an optimisation, if the file fits into a single chunk, there's no need
|
|
|
|
- to check if that chunk is present -- we know it's not, because otherwise
|
|
|
|
- the whole file would be present and there would be no reason to try to
|
|
|
|
- store it.
|
|
|
|
-}
|
|
|
|
seekResume
|
|
|
|
:: Handle
|
|
|
|
-> ChunkKeyStream
|
2014-08-06 17:45:19 +00:00
|
|
|
-> CheckPresent
|
resume interrupted chunked uploads
Leverage the new chunked remotes to automatically resume uploads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also allow starting an upload from one repository,
interrupting it, and then resuming the upload to the same remote from
an entirely different repository.
Note that I added a comment that storeKey should atomically move the content
into place once it's all received. This was already an undocumented
requirement -- it's necessary for hasKey to work reliably. This resume code
just uses hasKey to find the first chunk that's missing.
Note that if there are two uploads of the same key to the same chunked remote,
one might resume at the point the other had gotten to, but both will then
redundantly upload. As before.
In the non-resume case, this adds one hasKey call per storeKey, and only
if the remote is configured to use chunks. Future work: Try to eliminate that
hasKey. Notice that eg, `git annex copy --to` checks if the key is present
before sending it, so is already running hasKey.. which could perhaps
be cached and reused.
However, this additional overhead is not very large compared with
transferring an entire large file, and the ability to resume
is certianly worth it. There is an optimisation in place for small files,
that avoids trying to resume if the whole file fits within one chunk.
This commit was sponsored by Georg Bauer.
2014-07-28 18:18:08 +00:00
|
|
|
-> Annex (ChunkKeyStream, BytesProcessed)
|
|
|
|
seekResume h chunkkeys checker = do
|
|
|
|
sz <- liftIO (hFileSize h)
|
|
|
|
if sz <= fromMaybe 0 (keyChunkSize $ fst $ nextChunkKeyStream chunkkeys)
|
|
|
|
then return (chunkkeys, zeroBytesProcessed)
|
|
|
|
else check 0 chunkkeys sz
|
|
|
|
where
|
|
|
|
check pos cks sz
|
|
|
|
| pos >= sz = do
|
|
|
|
-- All chunks are already stored!
|
|
|
|
liftIO $ hSeek h AbsoluteSeek sz
|
2014-08-01 20:29:39 +00:00
|
|
|
return (cks, toBytesProcessed sz)
|
resume interrupted chunked uploads
Leverage the new chunked remotes to automatically resume uploads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also allow starting an upload from one repository,
interrupting it, and then resuming the upload to the same remote from
an entirely different repository.
Note that I added a comment that storeKey should atomically move the content
into place once it's all received. This was already an undocumented
requirement -- it's necessary for hasKey to work reliably. This resume code
just uses hasKey to find the first chunk that's missing.
Note that if there are two uploads of the same key to the same chunked remote,
one might resume at the point the other had gotten to, but both will then
redundantly upload. As before.
In the non-resume case, this adds one hasKey call per storeKey, and only
if the remote is configured to use chunks. Future work: Try to eliminate that
hasKey. Notice that eg, `git annex copy --to` checks if the key is present
before sending it, so is already running hasKey.. which could perhaps
be cached and reused.
However, this additional overhead is not very large compared with
transferring an entire large file, and the ability to resume
is certianly worth it. There is an optimisation in place for small files,
that avoids trying to resume if the whole file fits within one chunk.
This commit was sponsored by Georg Bauer.
2014-07-28 18:18:08 +00:00
|
|
|
| otherwise = do
|
unify exception handling into Utility.Exception
Removed old extensible-exceptions, only needed for very old ghc.
Made webdav use Utility.Exception, to work after some changes in DAV's
exception handling.
Removed Annex.Exception. Mostly this was trivial, but note that
tryAnnex is replaced with tryNonAsync and catchAnnex replaced with
catchNonAsync. In theory that could be a behavior change, since the former
caught all exceptions, and the latter don't catch async exceptions.
However, in practice, nothing in the Annex monad uses async exceptions.
Grepping for throwTo and killThread only find stuff in the assistant,
which does not seem related.
Command.Add.undo is changed to accept a SomeException, and things
that use it for rollback now catch non-async exceptions, rather than
only IOExceptions.
2014-08-08 01:55:44 +00:00
|
|
|
v <- tryNonAsync (checker k)
|
resume interrupted chunked uploads
Leverage the new chunked remotes to automatically resume uploads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also allow starting an upload from one repository,
interrupting it, and then resuming the upload to the same remote from
an entirely different repository.
Note that I added a comment that storeKey should atomically move the content
into place once it's all received. This was already an undocumented
requirement -- it's necessary for hasKey to work reliably. This resume code
just uses hasKey to find the first chunk that's missing.
Note that if there are two uploads of the same key to the same chunked remote,
one might resume at the point the other had gotten to, but both will then
redundantly upload. As before.
In the non-resume case, this adds one hasKey call per storeKey, and only
if the remote is configured to use chunks. Future work: Try to eliminate that
hasKey. Notice that eg, `git annex copy --to` checks if the key is present
before sending it, so is already running hasKey.. which could perhaps
be cached and reused.
However, this additional overhead is not very large compared with
transferring an entire large file, and the ability to resume
is certianly worth it. There is an optimisation in place for small files,
that avoids trying to resume if the whole file fits within one chunk.
This commit was sponsored by Georg Bauer.
2014-07-28 18:18:08 +00:00
|
|
|
case v of
|
|
|
|
Right True ->
|
|
|
|
check pos' cks' sz
|
|
|
|
_ -> do
|
|
|
|
when (pos > 0) $
|
|
|
|
liftIO $ hSeek h AbsoluteSeek pos
|
|
|
|
return (cks, toBytesProcessed pos)
|
|
|
|
where
|
|
|
|
(k, cks') = nextChunkKeyStream cks
|
|
|
|
pos' = pos + fromMaybe 0 (keyChunkSize k)
|
|
|
|
|
2014-07-27 00:11:41 +00:00
|
|
|
{- Removes all chunks of a key from a remote, by calling a remover
|
2014-07-27 02:47:52 +00:00
|
|
|
- action on each.
|
|
|
|
-
|
|
|
|
- The remover action should succeed even if asked to
|
2014-07-27 00:11:41 +00:00
|
|
|
- remove a key that is not present on the remote.
|
|
|
|
-
|
|
|
|
- This action may be called on a chunked key. It will simply remove it.
|
|
|
|
-}
|
|
|
|
removeChunks :: (Key -> Annex Bool) -> UUID -> ChunkConfig -> EncKey -> Key -> Annex Bool
|
|
|
|
removeChunks remover u chunkconfig encryptor k = do
|
|
|
|
ls <- chunkKeys u chunkconfig k
|
2014-07-27 02:34:10 +00:00
|
|
|
ok <- allM (remover . encryptor) (concat ls)
|
2014-07-27 06:13:51 +00:00
|
|
|
when ok $ do
|
|
|
|
let chunksizes = catMaybes $ map (keyChunkSize <=< headMaybe) ls
|
2014-07-28 17:19:08 +00:00
|
|
|
forM_ chunksizes $ chunksRemoved u k . FixedSizeChunks . fromIntegral
|
2014-07-27 00:11:41 +00:00
|
|
|
return ok
|
|
|
|
|
2014-07-29 18:53:17 +00:00
|
|
|
{- Retrieves a key from a remote, using a retriever action.
|
2014-07-27 00:11:41 +00:00
|
|
|
-
|
|
|
|
- When the remote is chunked, tries each of the options returned by
|
|
|
|
- chunkKeys until it finds one where the retriever successfully
|
resume interrupted chunked downloads
Leverage the new chunked remotes to automatically resume downloads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also properly handle starting a download
from one remote, interrupting, and resuming from another one, and so on.
(Resuming interrupted chunked uploads is similarly doable, although
slightly more expensive.)
This commit was sponsored by Thomas Djärv.
2014-07-27 22:52:42 +00:00
|
|
|
- gets the first chunked key. The content of that key, and any
|
2014-07-27 00:11:41 +00:00
|
|
|
- other chunks in the list is fed to the sink.
|
|
|
|
-
|
|
|
|
- If retrival of one of the subsequent chunks throws an exception,
|
|
|
|
- gives up and returns False. Note that partial data may have been
|
|
|
|
- written to the sink in this case.
|
resume interrupted chunked downloads
Leverage the new chunked remotes to automatically resume downloads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also properly handle starting a download
from one remote, interrupting, and resuming from another one, and so on.
(Resuming interrupted chunked uploads is similarly doable, although
slightly more expensive.)
This commit was sponsored by Thomas Djärv.
2014-07-27 22:52:42 +00:00
|
|
|
-
|
|
|
|
- Resuming is supported when using chunks. When the destination file
|
|
|
|
- already exists, it skips to the next chunked key that would be needed
|
|
|
|
- to resume.
|
2014-07-27 00:11:41 +00:00
|
|
|
-}
|
|
|
|
retrieveChunks
|
2014-07-29 18:53:17 +00:00
|
|
|
:: Retriever
|
2014-07-27 00:11:41 +00:00
|
|
|
-> UUID
|
|
|
|
-> ChunkConfig
|
|
|
|
-> EncKey
|
|
|
|
-> Key
|
resume interrupted chunked downloads
Leverage the new chunked remotes to automatically resume downloads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also properly handle starting a download
from one remote, interrupting, and resuming from another one, and so on.
(Resuming interrupted chunked uploads is similarly doable, although
slightly more expensive.)
This commit was sponsored by Thomas Djärv.
2014-07-27 22:52:42 +00:00
|
|
|
-> FilePath
|
2014-07-27 00:11:41 +00:00
|
|
|
-> MeterUpdate
|
2014-07-30 00:10:14 +00:00
|
|
|
-> (Maybe Handle -> Maybe MeterUpdate -> ContentSource -> Annex Bool)
|
2014-07-27 00:11:41 +00:00
|
|
|
-> Annex Bool
|
resume interrupted chunked downloads
Leverage the new chunked remotes to automatically resume downloads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also properly handle starting a download
from one remote, interrupting, and resuming from another one, and so on.
(Resuming interrupted chunked uploads is similarly doable, although
slightly more expensive.)
This commit was sponsored by Thomas Djärv.
2014-07-27 22:52:42 +00:00
|
|
|
retrieveChunks retriever u chunkconfig encryptor basek dest basep sink
|
2014-07-27 06:13:51 +00:00
|
|
|
| noChunks chunkconfig =
|
|
|
|
-- Optimisation: Try the unchunked key first, to avoid
|
resume interrupted chunked downloads
Leverage the new chunked remotes to automatically resume downloads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also properly handle starting a download
from one remote, interrupting, and resuming from another one, and so on.
(Resuming interrupted chunked uploads is similarly doable, although
slightly more expensive.)
This commit was sponsored by Thomas Djärv.
2014-07-27 22:52:42 +00:00
|
|
|
-- looking in the git-annex branch for chunk counts
|
|
|
|
-- that are likely not there.
|
unify exception handling into Utility.Exception
Removed old extensible-exceptions, only needed for very old ghc.
Made webdav use Utility.Exception, to work after some changes in DAV's
exception handling.
Removed Annex.Exception. Mostly this was trivial, but note that
tryAnnex is replaced with tryNonAsync and catchAnnex replaced with
catchNonAsync. In theory that could be a behavior change, since the former
caught all exceptions, and the latter don't catch async exceptions.
However, in practice, nothing in the Annex monad uses async exceptions.
Grepping for throwTo and killThread only find stuff in the assistant,
which does not seem related.
Command.Add.undo is changed to accept a SomeException, and things
that use it for rollback now catch non-async exceptions, rather than
only IOExceptions.
2014-08-08 01:55:44 +00:00
|
|
|
getunchunked `catchNonAsync`
|
resume interrupted chunked downloads
Leverage the new chunked remotes to automatically resume downloads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also properly handle starting a download
from one remote, interrupting, and resuming from another one, and so on.
(Resuming interrupted chunked uploads is similarly doable, although
slightly more expensive.)
This commit was sponsored by Thomas Djärv.
2014-07-27 22:52:42 +00:00
|
|
|
const (go =<< chunkKeysOnly u basek)
|
2014-07-27 06:13:51 +00:00
|
|
|
| otherwise = go =<< chunkKeys u chunkconfig basek
|
2014-07-27 00:11:41 +00:00
|
|
|
where
|
2014-07-29 20:22:19 +00:00
|
|
|
go ls = do
|
|
|
|
currsize <- liftIO $ catchMaybeIO $
|
resume interrupted chunked downloads
Leverage the new chunked remotes to automatically resume downloads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also properly handle starting a download
from one remote, interrupting, and resuming from another one, and so on.
(Resuming interrupted chunked uploads is similarly doable, although
slightly more expensive.)
This commit was sponsored by Thomas Djärv.
2014-07-27 22:52:42 +00:00
|
|
|
toInteger . fileSize <$> getFileStatus dest
|
|
|
|
let ls' = maybe ls (setupResume ls) currsize
|
2014-08-01 21:18:39 +00:00
|
|
|
if any null ls'
|
2014-08-01 21:16:20 +00:00
|
|
|
then return True -- dest is already complete
|
unify exception handling into Utility.Exception
Removed old extensible-exceptions, only needed for very old ghc.
Made webdav use Utility.Exception, to work after some changes in DAV's
exception handling.
Removed Annex.Exception. Mostly this was trivial, but note that
tryAnnex is replaced with tryNonAsync and catchAnnex replaced with
catchNonAsync. In theory that could be a behavior change, since the former
caught all exceptions, and the latter don't catch async exceptions.
However, in practice, nothing in the Annex monad uses async exceptions.
Grepping for throwTo and killThread only find stuff in the assistant,
which does not seem related.
Command.Add.undo is changed to accept a SomeException, and things
that use it for rollback now catch non-async exceptions, rather than
only IOExceptions.
2014-08-08 01:55:44 +00:00
|
|
|
else firstavail currsize ls' `catchNonAsync` giveup
|
2014-07-27 06:13:51 +00:00
|
|
|
|
2014-07-27 03:01:44 +00:00
|
|
|
giveup e = do
|
2014-07-29 20:22:19 +00:00
|
|
|
warning (show e)
|
2014-07-27 03:01:44 +00:00
|
|
|
return False
|
2014-07-27 00:11:41 +00:00
|
|
|
|
resume interrupted chunked downloads
Leverage the new chunked remotes to automatically resume downloads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also properly handle starting a download
from one remote, interrupting, and resuming from another one, and so on.
(Resuming interrupted chunked uploads is similarly doable, although
slightly more expensive.)
This commit was sponsored by Thomas Djärv.
2014-07-27 22:52:42 +00:00
|
|
|
firstavail _ [] = return False
|
|
|
|
firstavail currsize ([]:ls) = firstavail currsize ls
|
2014-07-30 00:10:14 +00:00
|
|
|
firstavail currsize ((k:ks):ls)
|
|
|
|
| k == basek = getunchunked
|
unify exception handling into Utility.Exception
Removed old extensible-exceptions, only needed for very old ghc.
Made webdav use Utility.Exception, to work after some changes in DAV's
exception handling.
Removed Annex.Exception. Mostly this was trivial, but note that
tryAnnex is replaced with tryNonAsync and catchAnnex replaced with
catchNonAsync. In theory that could be a behavior change, since the former
caught all exceptions, and the latter don't catch async exceptions.
However, in practice, nothing in the Annex monad uses async exceptions.
Grepping for throwTo and killThread only find stuff in the assistant,
which does not seem related.
Command.Add.undo is changed to accept a SomeException, and things
that use it for rollback now catch non-async exceptions, rather than
only IOExceptions.
2014-08-08 01:55:44 +00:00
|
|
|
`catchNonAsync` (const $ firstavail currsize ls)
|
2014-07-30 00:10:14 +00:00
|
|
|
| otherwise = do
|
|
|
|
let offset = resumeOffset currsize k
|
|
|
|
let p = maybe basep
|
|
|
|
(offsetMeterUpdate basep . toBytesProcessed)
|
|
|
|
offset
|
unify exception handling into Utility.Exception
Removed old extensible-exceptions, only needed for very old ghc.
Made webdav use Utility.Exception, to work after some changes in DAV's
exception handling.
Removed Annex.Exception. Mostly this was trivial, but note that
tryAnnex is replaced with tryNonAsync and catchAnnex replaced with
catchNonAsync. In theory that could be a behavior change, since the former
caught all exceptions, and the latter don't catch async exceptions.
However, in practice, nothing in the Annex monad uses async exceptions.
Grepping for throwTo and killThread only find stuff in the assistant,
which does not seem related.
Command.Add.undo is changed to accept a SomeException, and things
that use it for rollback now catch non-async exceptions, rather than
only IOExceptions.
2014-08-08 01:55:44 +00:00
|
|
|
v <- tryNonAsync $
|
2014-07-30 00:10:14 +00:00
|
|
|
retriever (encryptor k) p $ \content ->
|
|
|
|
bracketIO (maybe opennew openresume offset) hClose $ \h -> do
|
|
|
|
void $ tosink (Just h) p content
|
|
|
|
let sz = toBytesProcessed $
|
|
|
|
fromMaybe 0 $ keyChunkSize k
|
|
|
|
getrest p h sz sz ks
|
unify exception handling into Utility.Exception
Removed old extensible-exceptions, only needed for very old ghc.
Made webdav use Utility.Exception, to work after some changes in DAV's
exception handling.
Removed Annex.Exception. Mostly this was trivial, but note that
tryAnnex is replaced with tryNonAsync and catchAnnex replaced with
catchNonAsync. In theory that could be a behavior change, since the former
caught all exceptions, and the latter don't catch async exceptions.
However, in practice, nothing in the Annex monad uses async exceptions.
Grepping for throwTo and killThread only find stuff in the assistant,
which does not seem related.
Command.Add.undo is changed to accept a SomeException, and things
that use it for rollback now catch non-async exceptions, rather than
only IOExceptions.
2014-08-08 01:55:44 +00:00
|
|
|
`catchNonAsync` giveup
|
2014-07-30 00:10:14 +00:00
|
|
|
case v of
|
|
|
|
Left e
|
|
|
|
| null ls -> giveup e
|
|
|
|
| otherwise -> firstavail currsize ls
|
|
|
|
Right r -> return r
|
resume interrupted chunked downloads
Leverage the new chunked remotes to automatically resume downloads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also properly handle starting a download
from one remote, interrupting, and resuming from another one, and so on.
(Resuming interrupted chunked uploads is similarly doable, although
slightly more expensive.)
This commit was sponsored by Thomas Djärv.
2014-07-27 22:52:42 +00:00
|
|
|
|
|
|
|
getrest _ _ _ _ [] = return True
|
|
|
|
getrest p h sz bytesprocessed (k:ks) = do
|
|
|
|
let p' = offsetMeterUpdate p bytesprocessed
|
2014-07-30 00:31:16 +00:00
|
|
|
liftIO $ p' zeroBytesProcessed
|
2014-07-30 00:10:14 +00:00
|
|
|
ifM (retriever (encryptor k) p' $ tosink (Just h) p')
|
|
|
|
( getrest p h sz (addBytesProcessed bytesprocessed sz) ks
|
|
|
|
, giveup "chunk retrieval failed"
|
|
|
|
)
|
resume interrupted chunked downloads
Leverage the new chunked remotes to automatically resume downloads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also properly handle starting a download
from one remote, interrupting, and resuming from another one, and so on.
(Resuming interrupted chunked uploads is similarly doable, although
slightly more expensive.)
This commit was sponsored by Thomas Djärv.
2014-07-27 22:52:42 +00:00
|
|
|
|
2014-07-30 00:10:14 +00:00
|
|
|
getunchunked = retriever (encryptor basek) basep $ tosink Nothing basep
|
resume interrupted chunked downloads
Leverage the new chunked remotes to automatically resume downloads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also properly handle starting a download
from one remote, interrupting, and resuming from another one, and so on.
(Resuming interrupted chunked uploads is similarly doable, although
slightly more expensive.)
This commit was sponsored by Thomas Djärv.
2014-07-27 22:52:42 +00:00
|
|
|
|
|
|
|
opennew = openBinaryFile dest WriteMode
|
2014-07-27 00:11:41 +00:00
|
|
|
|
resume interrupted chunked downloads
Leverage the new chunked remotes to automatically resume downloads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also properly handle starting a download
from one remote, interrupting, and resuming from another one, and so on.
(Resuming interrupted chunked uploads is similarly doable, although
slightly more expensive.)
This commit was sponsored by Thomas Djärv.
2014-07-27 22:52:42 +00:00
|
|
|
-- Open the file and seek to the start point in order to resume.
|
|
|
|
openresume startpoint = do
|
|
|
|
-- ReadWriteMode allows seeking; AppendMode does not.
|
|
|
|
h <- openBinaryFile dest ReadWriteMode
|
|
|
|
hSeek h AbsoluteSeek startpoint
|
|
|
|
return h
|
|
|
|
|
2014-10-09 18:53:13 +00:00
|
|
|
{- Progress meter updating is a bit tricky: If the Retriever
|
2014-07-29 21:17:41 +00:00
|
|
|
- populates a file, it is responsible for updating progress
|
|
|
|
- as the file is being retrieved.
|
|
|
|
-
|
|
|
|
- However, if the Retriever generates a lazy ByteString,
|
|
|
|
- it is not responsible for updating progress (often it cannot).
|
2014-08-03 05:21:38 +00:00
|
|
|
- Instead, the sink is passed a meter to update as it consumes
|
2014-07-30 00:31:16 +00:00
|
|
|
- the ByteString.
|
|
|
|
-}
|
2014-07-30 00:10:14 +00:00
|
|
|
tosink h p content = sink h p' content
|
|
|
|
where
|
|
|
|
p'
|
|
|
|
| isByteContent content = Just p
|
|
|
|
| otherwise = Nothing
|
2014-07-29 21:17:41 +00:00
|
|
|
|
resume interrupted chunked downloads
Leverage the new chunked remotes to automatically resume downloads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also properly handle starting a download
from one remote, interrupting, and resuming from another one, and so on.
(Resuming interrupted chunked uploads is similarly doable, although
slightly more expensive.)
This commit was sponsored by Thomas Djärv.
2014-07-27 22:52:42 +00:00
|
|
|
{- Can resume when the chunk's offset is at or before the end of
|
|
|
|
- the dest file. -}
|
|
|
|
resumeOffset :: Maybe Integer -> Key -> Maybe Integer
|
|
|
|
resumeOffset Nothing _ = Nothing
|
|
|
|
resumeOffset currsize k
|
|
|
|
| offset <= currsize = offset
|
|
|
|
| otherwise = Nothing
|
|
|
|
where
|
|
|
|
offset = chunkKeyOffset k
|
|
|
|
|
|
|
|
{- Drops chunks that are already present in a file, based on its size.
|
|
|
|
- Keeps any non-chunk keys.
|
|
|
|
-}
|
|
|
|
setupResume :: [[Key]] -> Integer -> [[Key]]
|
|
|
|
setupResume ls currsize = map dropunneeded ls
|
|
|
|
where
|
|
|
|
dropunneeded [] = []
|
|
|
|
dropunneeded l@(k:_) = case keyChunkSize k of
|
|
|
|
Just chunksize | chunksize > 0 ->
|
|
|
|
genericDrop (currsize `div` chunksize) l
|
|
|
|
_ -> l
|
2014-07-27 00:11:41 +00:00
|
|
|
|
|
|
|
{- Checks if a key is present in a remote. This requires any one
|
|
|
|
- of the lists of options returned by chunkKeys to all check out
|
|
|
|
- as being present using the checker action.
|
2014-08-06 17:45:19 +00:00
|
|
|
-
|
|
|
|
- Throws an exception if the remote is not accessible.
|
2014-07-27 00:11:41 +00:00
|
|
|
-}
|
2014-08-06 17:45:19 +00:00
|
|
|
checkPresentChunks
|
|
|
|
:: CheckPresent
|
2014-07-27 00:11:41 +00:00
|
|
|
-> UUID
|
|
|
|
-> ChunkConfig
|
|
|
|
-> EncKey
|
|
|
|
-> Key
|
2014-08-06 17:45:19 +00:00
|
|
|
-> Annex Bool
|
|
|
|
checkPresentChunks checker u chunkconfig encryptor basek
|
|
|
|
| noChunks chunkconfig = do
|
2014-07-27 06:13:51 +00:00
|
|
|
-- Optimisation: Try the unchunked key first, to avoid
|
resume interrupted chunked downloads
Leverage the new chunked remotes to automatically resume downloads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also properly handle starting a download
from one remote, interrupting, and resuming from another one, and so on.
(Resuming interrupted chunked uploads is similarly doable, although
slightly more expensive.)
This commit was sponsored by Thomas Djärv.
2014-07-27 22:52:42 +00:00
|
|
|
-- looking in the git-annex branch for chunk counts
|
|
|
|
-- that are likely not there.
|
2014-08-06 17:45:19 +00:00
|
|
|
v <- check basek
|
|
|
|
case v of
|
|
|
|
Right True -> return True
|
2014-08-10 18:52:58 +00:00
|
|
|
Left e -> checklists (Just e) =<< chunkKeysOnly u basek
|
2014-08-06 17:45:19 +00:00
|
|
|
_ -> checklists Nothing =<< chunkKeysOnly u basek
|
2014-07-29 19:07:32 +00:00
|
|
|
| otherwise = checklists Nothing =<< chunkKeys u chunkconfig basek
|
2014-07-27 00:11:41 +00:00
|
|
|
where
|
2014-08-06 17:45:19 +00:00
|
|
|
checklists Nothing [] = return False
|
2014-08-10 18:52:58 +00:00
|
|
|
checklists (Just deferrederror) [] = throwM deferrederror
|
2014-07-29 19:07:32 +00:00
|
|
|
checklists d (l:ls)
|
2014-07-27 02:25:50 +00:00
|
|
|
| not (null l) = do
|
|
|
|
v <- checkchunks l
|
|
|
|
case v of
|
2014-07-29 19:07:32 +00:00
|
|
|
Left e -> checklists (Just e) ls
|
2014-08-06 17:45:19 +00:00
|
|
|
Right True -> return True
|
2014-07-29 19:07:32 +00:00
|
|
|
Right False -> checklists Nothing ls
|
|
|
|
| otherwise = checklists d ls
|
2014-07-27 00:11:41 +00:00
|
|
|
|
2014-08-10 18:52:58 +00:00
|
|
|
checkchunks :: [Key] -> Annex (Either SomeException Bool)
|
2014-07-27 00:11:41 +00:00
|
|
|
checkchunks [] = return (Right True)
|
|
|
|
checkchunks (k:ks) = do
|
2014-08-06 17:45:19 +00:00
|
|
|
v <- check k
|
|
|
|
case v of
|
|
|
|
Right True -> checkchunks ks
|
|
|
|
Right False -> return $ Right False
|
2014-08-10 18:52:58 +00:00
|
|
|
Left e -> return $ Left e
|
2014-08-06 17:45:19 +00:00
|
|
|
|
unify exception handling into Utility.Exception
Removed old extensible-exceptions, only needed for very old ghc.
Made webdav use Utility.Exception, to work after some changes in DAV's
exception handling.
Removed Annex.Exception. Mostly this was trivial, but note that
tryAnnex is replaced with tryNonAsync and catchAnnex replaced with
catchNonAsync. In theory that could be a behavior change, since the former
caught all exceptions, and the latter don't catch async exceptions.
However, in practice, nothing in the Annex monad uses async exceptions.
Grepping for throwTo and killThread only find stuff in the assistant,
which does not seem related.
Command.Add.undo is changed to accept a SomeException, and things
that use it for rollback now catch non-async exceptions, rather than
only IOExceptions.
2014-08-08 01:55:44 +00:00
|
|
|
check = tryNonAsync . checker . encryptor
|
2014-07-27 00:11:41 +00:00
|
|
|
|
2014-07-27 05:24:34 +00:00
|
|
|
{- A key can be stored in a remote unchunked, or as a list of chunked keys.
|
2014-07-27 06:13:51 +00:00
|
|
|
- This can be the case whether or not the remote is currently configured
|
|
|
|
- to use chunking.
|
|
|
|
-
|
|
|
|
- It's even possible for a remote to have the same key stored multiple
|
|
|
|
- times with different chunk sizes!
|
|
|
|
-
|
|
|
|
- This finds all possible lists of keys that might be on the remote that
|
|
|
|
- can be combined to get back the requested key, in order from most to
|
|
|
|
- least likely to exist.
|
2014-07-27 05:24:34 +00:00
|
|
|
-}
|
|
|
|
chunkKeys :: UUID -> ChunkConfig -> Key -> Annex [[Key]]
|
2014-07-27 06:13:51 +00:00
|
|
|
chunkKeys u chunkconfig k = do
|
|
|
|
l <- chunkKeysOnly u k
|
|
|
|
return $ if noChunks chunkconfig
|
|
|
|
then [k] : l
|
|
|
|
else l ++ [[k]]
|
|
|
|
|
|
|
|
chunkKeysOnly :: UUID -> Key -> Annex [[Key]]
|
|
|
|
chunkKeysOnly u k = map (toChunkList k) <$> getCurrentChunks u k
|
2014-07-27 05:24:34 +00:00
|
|
|
|
2014-07-28 17:19:08 +00:00
|
|
|
toChunkList :: Key -> (ChunkMethod, ChunkCount) -> [Key]
|
|
|
|
toChunkList k (FixedSizeChunks chunksize, chunkcount) =
|
|
|
|
takeChunkKeyStream chunkcount $ chunkKeyStream k chunksize
|
|
|
|
toChunkList _ (UnknownChunks _, _) = []
|