2012-11-16 21:58:08 +00:00
|
|
|
{- git-annex chunked remotes
|
|
|
|
-
|
incremental verify for byteRetriever special remotes
Several special remotes verify content while it is being retrieved,
avoiding a separate checksum pass. They are: S3, bup, ddar, and
gcrypt (with a local repository).
Not done when using chunking, yet.
Complicated by Retriever needing to change to be polymorphic. Which in turn
meant RankNTypes is needed, and also needed some code changes. The
change in Remote.External does not change behavior at all but avoids
the type checking failing because of a "rigid, skolem type" which
"would escape its scope". So I refactored slightly to make the type
checker's job easier there.
Unfortunately, directory uses fileRetriever (except when chunked),
so it is not amoung the improved ones. Fixing that would need a way for
FileRetriever to return a Verification. But, since the file retrieved
may be encrypted or chunked, it would be extra work to always
incrementally checksum the file while retrieving it. Hm.
Some other special remotes use fileRetriever, and so don't get incremental
verification, but could be converted to byteRetriever later. One is
GitLFS, which uses downloadConduit, which writes to the file, so could
verify as it goes. Other special remotes like web could too, but don't
use Remote.Helper.Special and so will need to be addressed separately.
Sponsored-by: Dartmouth College's DANDI project
2021-08-11 17:43:30 +00:00
|
|
|
- Copyright 2014-2021 Joey Hess <id@joeyh.name>
|
2012-11-16 21:58:08 +00:00
|
|
|
-
|
2019-03-13 19:48:14 +00:00
|
|
|
- Licensed under the GNU AGPL version 3 or higher.
|
2012-11-16 21:58:08 +00:00
|
|
|
-}
|
|
|
|
|
incremental verify for byteRetriever special remotes
Several special remotes verify content while it is being retrieved,
avoiding a separate checksum pass. They are: S3, bup, ddar, and
gcrypt (with a local repository).
Not done when using chunking, yet.
Complicated by Retriever needing to change to be polymorphic. Which in turn
meant RankNTypes is needed, and also needed some code changes. The
change in Remote.External does not change behavior at all but avoids
the type checking failing because of a "rigid, skolem type" which
"would escape its scope". So I refactored slightly to make the type
checker's job easier there.
Unfortunately, directory uses fileRetriever (except when chunked),
so it is not amoung the improved ones. Fixing that would need a way for
FileRetriever to return a Verification. But, since the file retrieved
may be encrypted or chunked, it would be extra work to always
incrementally checksum the file while retrieving it. Hm.
Some other special remotes use fileRetriever, and so don't get incremental
verification, but could be converted to byteRetriever later. One is
GitLFS, which uses downloadConduit, which writes to the file, so could
verify as it goes. Other special remotes like web could too, but don't
use Remote.Helper.Special and so will need to be addressed separately.
Sponsored-by: Dartmouth College's DANDI project
2021-08-11 17:43:30 +00:00
|
|
|
{-# LANGUAGE RankNTypes #-}
|
|
|
|
|
2014-07-27 00:11:41 +00:00
|
|
|
module Remote.Helper.Chunked (
|
|
|
|
ChunkSize,
|
|
|
|
ChunkConfig(..),
|
2015-08-19 18:13:19 +00:00
|
|
|
noChunks,
|
2014-10-21 18:36:09 +00:00
|
|
|
describeChunkConfig,
|
2020-01-14 17:18:15 +00:00
|
|
|
chunkConfigParsers,
|
2014-08-03 19:35:23 +00:00
|
|
|
getChunkConfig,
|
2014-07-27 00:11:41 +00:00
|
|
|
storeChunks,
|
|
|
|
removeChunks,
|
|
|
|
retrieveChunks,
|
2014-08-06 17:45:19 +00:00
|
|
|
checkPresentChunks,
|
2020-01-15 15:30:45 +00:00
|
|
|
chunkField,
|
2014-07-27 00:11:41 +00:00
|
|
|
) where
|
2012-11-16 21:58:08 +00:00
|
|
|
|
2016-01-20 20:36:33 +00:00
|
|
|
import Annex.Common
|
2021-02-16 19:46:14 +00:00
|
|
|
import qualified Annex
|
2012-11-16 21:58:08 +00:00
|
|
|
import Utility.DataUnits
|
2014-07-29 18:53:17 +00:00
|
|
|
import Types.StoreRetrieve
|
2012-11-16 21:58:08 +00:00
|
|
|
import Types.Remote
|
2020-01-10 18:10:20 +00:00
|
|
|
import Types.ProposedAccepted
|
2014-07-25 20:20:32 +00:00
|
|
|
import Logs.Chunk
|
2014-07-24 20:42:35 +00:00
|
|
|
import Utility.Metered
|
2021-02-16 19:46:14 +00:00
|
|
|
import Crypto
|
2014-07-30 14:34:39 +00:00
|
|
|
import Backend (isStableKey)
|
2019-10-10 20:10:12 +00:00
|
|
|
import Annex.SpecialRemote.Config
|
incremental verify for byteRetriever special remotes
Several special remotes verify content while it is being retrieved,
avoiding a separate checksum pass. They are: S3, bup, ddar, and
gcrypt (with a local repository).
Not done when using chunking, yet.
Complicated by Retriever needing to change to be polymorphic. Which in turn
meant RankNTypes is needed, and also needed some code changes. The
change in Remote.External does not change behavior at all but avoids
the type checking failing because of a "rigid, skolem type" which
"would escape its scope". So I refactored slightly to make the type
checker's job easier there.
Unfortunately, directory uses fileRetriever (except when chunked),
so it is not amoung the improved ones. Fixing that would need a way for
FileRetriever to return a Verification. But, since the file retrieved
may be encrypted or chunked, it would be extra work to always
incrementally checksum the file while retrieving it. Hm.
Some other special remotes use fileRetriever, and so don't get incremental
verification, but could be converted to byteRetriever later. One is
GitLFS, which uses downloadConduit, which writes to the file, so could
verify as it goes. Other special remotes like web could too, but don't
use Remote.Helper.Special and so will need to be addressed separately.
Sponsored-by: Dartmouth College's DANDI project
2021-08-11 17:43:30 +00:00
|
|
|
import Annex.Verify
|
2021-02-16 19:46:14 +00:00
|
|
|
import qualified Utility.RawFilePath as R
|
2012-11-16 21:58:08 +00:00
|
|
|
|
2021-02-16 19:46:14 +00:00
|
|
|
import qualified Data.ByteString as S
|
2014-07-24 20:42:35 +00:00
|
|
|
import qualified Data.ByteString.Lazy as L
|
2012-11-16 21:58:08 +00:00
|
|
|
|
2014-07-24 18:49:22 +00:00
|
|
|
data ChunkConfig
|
|
|
|
= NoChunks
|
2014-07-24 19:08:07 +00:00
|
|
|
| UnpaddedChunks ChunkSize
|
|
|
|
| LegacyChunks ChunkSize
|
2014-08-01 19:36:11 +00:00
|
|
|
deriving (Show)
|
2012-11-16 21:58:08 +00:00
|
|
|
|
2014-10-21 18:36:09 +00:00
|
|
|
describeChunkConfig :: ChunkConfig -> String
|
|
|
|
describeChunkConfig NoChunks = "none"
|
2021-04-27 19:23:56 +00:00
|
|
|
describeChunkConfig (UnpaddedChunks sz) = describeChunkSize sz ++ " chunks"
|
2014-10-21 18:36:09 +00:00
|
|
|
describeChunkConfig (LegacyChunks sz) = describeChunkSize sz ++ " chunks (old style)"
|
|
|
|
|
|
|
|
describeChunkSize :: ChunkSize -> String
|
|
|
|
describeChunkSize sz = roughSize storageUnits False (fromIntegral sz)
|
|
|
|
|
2014-07-27 06:13:51 +00:00
|
|
|
noChunks :: ChunkConfig -> Bool
|
|
|
|
noChunks NoChunks = True
|
|
|
|
noChunks _ = False
|
|
|
|
|
2020-01-14 17:18:15 +00:00
|
|
|
chunkConfigParsers :: [RemoteConfigFieldParser]
|
|
|
|
chunkConfigParsers =
|
2020-01-20 19:20:04 +00:00
|
|
|
[ optionalStringParser chunksizeField HiddenField -- deprecated
|
2020-01-14 16:35:08 +00:00
|
|
|
, optionalStringParser chunkField
|
2020-01-20 19:20:04 +00:00
|
|
|
(FieldDesc "size of chunks (eg, 1MiB)")
|
2020-01-13 16:35:39 +00:00
|
|
|
]
|
|
|
|
|
|
|
|
getChunkConfig :: ParsedRemoteConfig -> ChunkConfig
|
|
|
|
getChunkConfig c =
|
|
|
|
case getRemoteConfigValue chunksizeField c of
|
|
|
|
Nothing -> case getRemoteConfigValue chunkField c of
|
2014-07-24 18:49:22 +00:00
|
|
|
Nothing -> NoChunks
|
2020-01-14 17:05:38 +00:00
|
|
|
Just v -> readsz UnpaddedChunks v chunkField
|
|
|
|
Just v -> readsz LegacyChunks v chunksizeField
|
2013-10-26 19:03:12 +00:00
|
|
|
where
|
2020-01-13 16:35:39 +00:00
|
|
|
readsz mk v f = case readSize dataUnits v of
|
2014-08-01 19:36:11 +00:00
|
|
|
Just size
|
|
|
|
| size == 0 -> NoChunks
|
2020-01-13 16:35:39 +00:00
|
|
|
| size > 0 -> mk (fromInteger size)
|
2020-01-10 18:10:20 +00:00
|
|
|
_ -> giveup $ "bad configuration " ++ fromProposedAccepted f ++ "=" ++ v
|
2014-07-24 20:42:35 +00:00
|
|
|
|
2014-07-25 20:20:32 +00:00
|
|
|
-- An infinite stream of chunk keys, starting from chunk 1.
|
|
|
|
newtype ChunkKeyStream = ChunkKeyStream [Key]
|
|
|
|
|
|
|
|
chunkKeyStream :: Key -> ChunkSize -> ChunkKeyStream
|
|
|
|
chunkKeyStream basek chunksize = ChunkKeyStream $ map mk [1..]
|
|
|
|
where
|
2019-11-22 20:24:04 +00:00
|
|
|
mk chunknum = alterKey sizedk $ \d -> d
|
|
|
|
{ keyChunkNum = Just chunknum }
|
|
|
|
sizedk = alterKey basek $ \d -> d
|
|
|
|
{ keyChunkSize = Just (toInteger chunksize) }
|
2014-07-25 20:20:32 +00:00
|
|
|
|
|
|
|
nextChunkKeyStream :: ChunkKeyStream -> (Key, ChunkKeyStream)
|
|
|
|
nextChunkKeyStream (ChunkKeyStream (k:l)) = (k, ChunkKeyStream l)
|
2015-04-19 04:38:29 +00:00
|
|
|
nextChunkKeyStream (ChunkKeyStream []) = error "expected infinite ChunkKeyStream"
|
2014-07-25 20:20:32 +00:00
|
|
|
|
|
|
|
takeChunkKeyStream :: ChunkCount -> ChunkKeyStream -> [Key]
|
|
|
|
takeChunkKeyStream n (ChunkKeyStream l) = genericTake n l
|
|
|
|
|
|
|
|
-- Number of chunks already consumed from the stream.
|
|
|
|
numChunks :: ChunkKeyStream -> Integer
|
2019-11-22 20:24:04 +00:00
|
|
|
numChunks = pred . fromJust . fromKey keyChunkNum . fst . nextChunkKeyStream
|
2014-07-25 20:20:32 +00:00
|
|
|
|
2014-07-26 16:04:35 +00:00
|
|
|
{- Splits up the key's content into chunks, passing each chunk to
|
|
|
|
- the storer action, along with a corresponding chunk key and a
|
|
|
|
- progress meter update callback.
|
2014-07-25 20:20:32 +00:00
|
|
|
-
|
resume interrupted chunked uploads
Leverage the new chunked remotes to automatically resume uploads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also allow starting an upload from one repository,
interrupting it, and then resuming the upload to the same remote from
an entirely different repository.
Note that I added a comment that storeKey should atomically move the content
into place once it's all received. This was already an undocumented
requirement -- it's necessary for hasKey to work reliably. This resume code
just uses hasKey to find the first chunk that's missing.
Note that if there are two uploads of the same key to the same chunked remote,
one might resume at the point the other had gotten to, but both will then
redundantly upload. As before.
In the non-resume case, this adds one hasKey call per storeKey, and only
if the remote is configured to use chunks. Future work: Try to eliminate that
hasKey. Notice that eg, `git annex copy --to` checks if the key is present
before sending it, so is already running hasKey.. which could perhaps
be cached and reused.
However, this additional overhead is not very large compared with
transferring an entire large file, and the ability to resume
is certianly worth it. There is an optimisation in place for small files,
that avoids trying to resume if the whole file fits within one chunk.
This commit was sponsored by Georg Bauer.
2014-07-28 18:18:08 +00:00
|
|
|
- To support resuming, the checker is used to find the first missing
|
|
|
|
- chunk key. Storing starts from that chunk.
|
|
|
|
-
|
2014-07-27 05:18:38 +00:00
|
|
|
- This buffers each chunk in memory, so can use a lot of memory
|
|
|
|
- with a large ChunkSize.
|
|
|
|
- More optimal versions of this can be written, that rely
|
|
|
|
- on L.toChunks to split the lazy bytestring into chunks (typically
|
|
|
|
- smaller than the ChunkSize), and eg, write those chunks to a Handle.
|
|
|
|
- But this is the best that can be done with the storer interface that
|
|
|
|
- writes a whole L.ByteString at a time.
|
2014-07-25 20:20:32 +00:00
|
|
|
-}
|
2014-07-27 03:26:10 +00:00
|
|
|
storeChunks
|
2021-02-16 19:46:14 +00:00
|
|
|
:: LensGpgEncParams encc
|
|
|
|
=> UUID
|
2014-07-27 03:26:10 +00:00
|
|
|
-> ChunkConfig
|
2016-04-27 16:54:43 +00:00
|
|
|
-> EncKey
|
2014-07-27 03:26:10 +00:00
|
|
|
-> Key
|
|
|
|
-> FilePath
|
|
|
|
-> MeterUpdate
|
2021-02-16 19:46:14 +00:00
|
|
|
-> Maybe (Cipher, EncKey)
|
|
|
|
-> encc
|
2014-08-06 17:45:19 +00:00
|
|
|
-> Storer
|
|
|
|
-> CheckPresent
|
2020-05-13 18:03:00 +00:00
|
|
|
-> Annex ()
|
2021-02-16 19:46:14 +00:00
|
|
|
storeChunks u chunkconfig encryptor k f p enc encc storer checker =
|
2014-07-29 18:53:17 +00:00
|
|
|
case chunkconfig of
|
2020-10-22 23:54:26 +00:00
|
|
|
-- Only stable keys are safe to store chunked,
|
|
|
|
-- because an unstable key can have multiple different
|
|
|
|
-- objects, and mixing up chunks from them would be
|
|
|
|
-- possible without this check.
|
2020-07-29 19:23:18 +00:00
|
|
|
(UnpaddedChunks chunksize) -> ifM (isStableKey k)
|
|
|
|
( do
|
|
|
|
h <- liftIO $ openBinaryFile f ReadMode
|
|
|
|
go chunksize h
|
|
|
|
liftIO $ hClose h
|
2021-02-16 19:46:14 +00:00
|
|
|
, storechunk k (FileContent f) p
|
2020-07-29 19:23:18 +00:00
|
|
|
)
|
2021-02-16 19:46:14 +00:00
|
|
|
_ -> storechunk k (FileContent f) p
|
2014-07-25 20:20:32 +00:00
|
|
|
where
|
2020-05-13 18:03:00 +00:00
|
|
|
go chunksize h = do
|
2014-07-29 18:53:17 +00:00
|
|
|
let chunkkeys = chunkKeyStream k chunksize
|
2016-04-27 16:54:43 +00:00
|
|
|
(chunkkeys', startpos) <- seekResume h encryptor chunkkeys checker
|
2014-07-29 18:53:17 +00:00
|
|
|
b <- liftIO $ L.hGetContents h
|
2014-08-03 20:18:40 +00:00
|
|
|
gochunks p startpos chunksize b chunkkeys'
|
resume interrupted chunked uploads
Leverage the new chunked remotes to automatically resume uploads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also allow starting an upload from one repository,
interrupting it, and then resuming the upload to the same remote from
an entirely different repository.
Note that I added a comment that storeKey should atomically move the content
into place once it's all received. This was already an undocumented
requirement -- it's necessary for hasKey to work reliably. This resume code
just uses hasKey to find the first chunk that's missing.
Note that if there are two uploads of the same key to the same chunked remote,
one might resume at the point the other had gotten to, but both will then
redundantly upload. As before.
In the non-resume case, this adds one hasKey call per storeKey, and only
if the remote is configured to use chunks. Future work: Try to eliminate that
hasKey. Notice that eg, `git annex copy --to` checks if the key is present
before sending it, so is already running hasKey.. which could perhaps
be cached and reused.
However, this additional overhead is not very large compared with
transferring an entire large file, and the ability to resume
is certianly worth it. There is an optimisation in place for small files,
that avoids trying to resume if the whole file fits within one chunk.
This commit was sponsored by Georg Bauer.
2014-07-28 18:18:08 +00:00
|
|
|
|
2020-05-13 18:03:00 +00:00
|
|
|
gochunks :: MeterUpdate -> BytesProcessed -> ChunkSize -> L.ByteString -> ChunkKeyStream -> Annex ()
|
resume interrupted chunked uploads
Leverage the new chunked remotes to automatically resume uploads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also allow starting an upload from one repository,
interrupting it, and then resuming the upload to the same remote from
an entirely different repository.
Note that I added a comment that storeKey should atomically move the content
into place once it's all received. This was already an undocumented
requirement -- it's necessary for hasKey to work reliably. This resume code
just uses hasKey to find the first chunk that's missing.
Note that if there are two uploads of the same key to the same chunked remote,
one might resume at the point the other had gotten to, but both will then
redundantly upload. As before.
In the non-resume case, this adds one hasKey call per storeKey, and only
if the remote is configured to use chunks. Future work: Try to eliminate that
hasKey. Notice that eg, `git annex copy --to` checks if the key is present
before sending it, so is already running hasKey.. which could perhaps
be cached and reused.
However, this additional overhead is not very large compared with
transferring an entire large file, and the ability to resume
is certianly worth it. There is an optimisation in place for small files,
that avoids trying to resume if the whole file fits within one chunk.
This commit was sponsored by Georg Bauer.
2014-07-28 18:18:08 +00:00
|
|
|
gochunks meterupdate startpos chunksize = loop startpos . splitchunk
|
2014-07-25 20:20:32 +00:00
|
|
|
where
|
2014-07-27 05:18:38 +00:00
|
|
|
splitchunk = L.splitAt chunksize
|
|
|
|
|
|
|
|
loop bytesprocessed (chunk, bs) chunkkeys
|
|
|
|
| L.null chunk && numchunks > 0 = do
|
2014-10-09 18:53:13 +00:00
|
|
|
-- Once all chunks are successfully
|
2014-07-27 05:18:38 +00:00
|
|
|
-- stored, update the chunk log.
|
2014-07-28 17:19:08 +00:00
|
|
|
chunksStored u k (FixedSizeChunks chunksize) numchunks
|
2014-07-27 05:18:38 +00:00
|
|
|
| otherwise = do
|
2014-07-30 00:31:16 +00:00
|
|
|
liftIO $ meterupdate' zeroBytesProcessed
|
2014-07-27 05:18:38 +00:00
|
|
|
let (chunkkey, chunkkeys') = nextChunkKeyStream chunkkeys
|
2021-02-16 19:46:14 +00:00
|
|
|
storechunk chunkkey (ByteContent chunk) meterupdate'
|
2020-05-13 18:03:00 +00:00
|
|
|
let bytesprocessed' = addBytesProcessed bytesprocessed (L.length chunk)
|
|
|
|
loop bytesprocessed' (splitchunk bs) chunkkeys'
|
2014-07-25 20:20:32 +00:00
|
|
|
where
|
2014-07-27 05:18:38 +00:00
|
|
|
numchunks = numChunks chunkkeys
|
2014-10-09 18:53:13 +00:00
|
|
|
{- The MeterUpdate that is passed to the action
|
2014-07-25 20:20:32 +00:00
|
|
|
- storing a chunk is offset, so that it reflects
|
|
|
|
- the total bytes that have already been stored
|
|
|
|
- in previous chunks. -}
|
|
|
|
meterupdate' = offsetMeterUpdate meterupdate bytesprocessed
|
|
|
|
|
2021-02-16 19:46:14 +00:00
|
|
|
storechunk ck content meterupdate = case enc of
|
|
|
|
Nothing -> storer ck content meterupdate
|
|
|
|
(Just (cipher, enck)) -> do
|
|
|
|
cmd <- gpgCmd <$> Annex.getGitConfig
|
|
|
|
withBytes content $ \b ->
|
|
|
|
encrypt cmd encc cipher (feedBytes b) $
|
|
|
|
readBytes $ \encb ->
|
|
|
|
storer (enck ck) (ByteContent encb) meterupdate
|
|
|
|
|
resume interrupted chunked uploads
Leverage the new chunked remotes to automatically resume uploads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also allow starting an upload from one repository,
interrupting it, and then resuming the upload to the same remote from
an entirely different repository.
Note that I added a comment that storeKey should atomically move the content
into place once it's all received. This was already an undocumented
requirement -- it's necessary for hasKey to work reliably. This resume code
just uses hasKey to find the first chunk that's missing.
Note that if there are two uploads of the same key to the same chunked remote,
one might resume at the point the other had gotten to, but both will then
redundantly upload. As before.
In the non-resume case, this adds one hasKey call per storeKey, and only
if the remote is configured to use chunks. Future work: Try to eliminate that
hasKey. Notice that eg, `git annex copy --to` checks if the key is present
before sending it, so is already running hasKey.. which could perhaps
be cached and reused.
However, this additional overhead is not very large compared with
transferring an entire large file, and the ability to resume
is certianly worth it. There is an optimisation in place for small files,
that avoids trying to resume if the whole file fits within one chunk.
This commit was sponsored by Georg Bauer.
2014-07-28 18:18:08 +00:00
|
|
|
{- Check if any of the chunk keys are present. If found, seek forward
|
|
|
|
- in the Handle, so it will be read starting at the first missing chunk.
|
|
|
|
- Returns the ChunkKeyStream truncated to start at the first missing
|
|
|
|
- chunk, and the number of bytes skipped due to resuming.
|
|
|
|
-
|
|
|
|
- As an optimisation, if the file fits into a single chunk, there's no need
|
|
|
|
- to check if that chunk is present -- we know it's not, because otherwise
|
|
|
|
- the whole file would be present and there would be no reason to try to
|
|
|
|
- store it.
|
|
|
|
-}
|
|
|
|
seekResume
|
|
|
|
:: Handle
|
2016-04-27 16:54:43 +00:00
|
|
|
-> EncKey
|
resume interrupted chunked uploads
Leverage the new chunked remotes to automatically resume uploads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also allow starting an upload from one repository,
interrupting it, and then resuming the upload to the same remote from
an entirely different repository.
Note that I added a comment that storeKey should atomically move the content
into place once it's all received. This was already an undocumented
requirement -- it's necessary for hasKey to work reliably. This resume code
just uses hasKey to find the first chunk that's missing.
Note that if there are two uploads of the same key to the same chunked remote,
one might resume at the point the other had gotten to, but both will then
redundantly upload. As before.
In the non-resume case, this adds one hasKey call per storeKey, and only
if the remote is configured to use chunks. Future work: Try to eliminate that
hasKey. Notice that eg, `git annex copy --to` checks if the key is present
before sending it, so is already running hasKey.. which could perhaps
be cached and reused.
However, this additional overhead is not very large compared with
transferring an entire large file, and the ability to resume
is certianly worth it. There is an optimisation in place for small files,
that avoids trying to resume if the whole file fits within one chunk.
This commit was sponsored by Georg Bauer.
2014-07-28 18:18:08 +00:00
|
|
|
-> ChunkKeyStream
|
2014-08-06 17:45:19 +00:00
|
|
|
-> CheckPresent
|
resume interrupted chunked uploads
Leverage the new chunked remotes to automatically resume uploads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also allow starting an upload from one repository,
interrupting it, and then resuming the upload to the same remote from
an entirely different repository.
Note that I added a comment that storeKey should atomically move the content
into place once it's all received. This was already an undocumented
requirement -- it's necessary for hasKey to work reliably. This resume code
just uses hasKey to find the first chunk that's missing.
Note that if there are two uploads of the same key to the same chunked remote,
one might resume at the point the other had gotten to, but both will then
redundantly upload. As before.
In the non-resume case, this adds one hasKey call per storeKey, and only
if the remote is configured to use chunks. Future work: Try to eliminate that
hasKey. Notice that eg, `git annex copy --to` checks if the key is present
before sending it, so is already running hasKey.. which could perhaps
be cached and reused.
However, this additional overhead is not very large compared with
transferring an entire large file, and the ability to resume
is certianly worth it. There is an optimisation in place for small files,
that avoids trying to resume if the whole file fits within one chunk.
This commit was sponsored by Georg Bauer.
2014-07-28 18:18:08 +00:00
|
|
|
-> Annex (ChunkKeyStream, BytesProcessed)
|
2016-04-27 16:54:43 +00:00
|
|
|
seekResume h encryptor chunkkeys checker = do
|
resume interrupted chunked uploads
Leverage the new chunked remotes to automatically resume uploads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also allow starting an upload from one repository,
interrupting it, and then resuming the upload to the same remote from
an entirely different repository.
Note that I added a comment that storeKey should atomically move the content
into place once it's all received. This was already an undocumented
requirement -- it's necessary for hasKey to work reliably. This resume code
just uses hasKey to find the first chunk that's missing.
Note that if there are two uploads of the same key to the same chunked remote,
one might resume at the point the other had gotten to, but both will then
redundantly upload. As before.
In the non-resume case, this adds one hasKey call per storeKey, and only
if the remote is configured to use chunks. Future work: Try to eliminate that
hasKey. Notice that eg, `git annex copy --to` checks if the key is present
before sending it, so is already running hasKey.. which could perhaps
be cached and reused.
However, this additional overhead is not very large compared with
transferring an entire large file, and the ability to resume
is certianly worth it. There is an optimisation in place for small files,
that avoids trying to resume if the whole file fits within one chunk.
This commit was sponsored by Georg Bauer.
2014-07-28 18:18:08 +00:00
|
|
|
sz <- liftIO (hFileSize h)
|
2019-11-22 20:24:04 +00:00
|
|
|
if sz <= fromMaybe 0 (fromKey keyChunkSize $ fst $ nextChunkKeyStream chunkkeys)
|
resume interrupted chunked uploads
Leverage the new chunked remotes to automatically resume uploads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also allow starting an upload from one repository,
interrupting it, and then resuming the upload to the same remote from
an entirely different repository.
Note that I added a comment that storeKey should atomically move the content
into place once it's all received. This was already an undocumented
requirement -- it's necessary for hasKey to work reliably. This resume code
just uses hasKey to find the first chunk that's missing.
Note that if there are two uploads of the same key to the same chunked remote,
one might resume at the point the other had gotten to, but both will then
redundantly upload. As before.
In the non-resume case, this adds one hasKey call per storeKey, and only
if the remote is configured to use chunks. Future work: Try to eliminate that
hasKey. Notice that eg, `git annex copy --to` checks if the key is present
before sending it, so is already running hasKey.. which could perhaps
be cached and reused.
However, this additional overhead is not very large compared with
transferring an entire large file, and the ability to resume
is certianly worth it. There is an optimisation in place for small files,
that avoids trying to resume if the whole file fits within one chunk.
This commit was sponsored by Georg Bauer.
2014-07-28 18:18:08 +00:00
|
|
|
then return (chunkkeys, zeroBytesProcessed)
|
|
|
|
else check 0 chunkkeys sz
|
|
|
|
where
|
|
|
|
check pos cks sz
|
|
|
|
| pos >= sz = do
|
|
|
|
-- All chunks are already stored!
|
|
|
|
liftIO $ hSeek h AbsoluteSeek sz
|
2014-08-01 20:29:39 +00:00
|
|
|
return (cks, toBytesProcessed sz)
|
resume interrupted chunked uploads
Leverage the new chunked remotes to automatically resume uploads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also allow starting an upload from one repository,
interrupting it, and then resuming the upload to the same remote from
an entirely different repository.
Note that I added a comment that storeKey should atomically move the content
into place once it's all received. This was already an undocumented
requirement -- it's necessary for hasKey to work reliably. This resume code
just uses hasKey to find the first chunk that's missing.
Note that if there are two uploads of the same key to the same chunked remote,
one might resume at the point the other had gotten to, but both will then
redundantly upload. As before.
In the non-resume case, this adds one hasKey call per storeKey, and only
if the remote is configured to use chunks. Future work: Try to eliminate that
hasKey. Notice that eg, `git annex copy --to` checks if the key is present
before sending it, so is already running hasKey.. which could perhaps
be cached and reused.
However, this additional overhead is not very large compared with
transferring an entire large file, and the ability to resume
is certianly worth it. There is an optimisation in place for small files,
that avoids trying to resume if the whole file fits within one chunk.
This commit was sponsored by Georg Bauer.
2014-07-28 18:18:08 +00:00
|
|
|
| otherwise = do
|
2016-04-27 16:54:43 +00:00
|
|
|
v <- tryNonAsync (checker (encryptor k))
|
resume interrupted chunked uploads
Leverage the new chunked remotes to automatically resume uploads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also allow starting an upload from one repository,
interrupting it, and then resuming the upload to the same remote from
an entirely different repository.
Note that I added a comment that storeKey should atomically move the content
into place once it's all received. This was already an undocumented
requirement -- it's necessary for hasKey to work reliably. This resume code
just uses hasKey to find the first chunk that's missing.
Note that if there are two uploads of the same key to the same chunked remote,
one might resume at the point the other had gotten to, but both will then
redundantly upload. As before.
In the non-resume case, this adds one hasKey call per storeKey, and only
if the remote is configured to use chunks. Future work: Try to eliminate that
hasKey. Notice that eg, `git annex copy --to` checks if the key is present
before sending it, so is already running hasKey.. which could perhaps
be cached and reused.
However, this additional overhead is not very large compared with
transferring an entire large file, and the ability to resume
is certianly worth it. There is an optimisation in place for small files,
that avoids trying to resume if the whole file fits within one chunk.
This commit was sponsored by Georg Bauer.
2014-07-28 18:18:08 +00:00
|
|
|
case v of
|
|
|
|
Right True ->
|
|
|
|
check pos' cks' sz
|
|
|
|
_ -> do
|
|
|
|
when (pos > 0) $
|
|
|
|
liftIO $ hSeek h AbsoluteSeek pos
|
|
|
|
return (cks, toBytesProcessed pos)
|
|
|
|
where
|
|
|
|
(k, cks') = nextChunkKeyStream cks
|
2019-11-22 20:24:04 +00:00
|
|
|
pos' = pos + fromMaybe 0 (fromKey keyChunkSize k)
|
resume interrupted chunked uploads
Leverage the new chunked remotes to automatically resume uploads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also allow starting an upload from one repository,
interrupting it, and then resuming the upload to the same remote from
an entirely different repository.
Note that I added a comment that storeKey should atomically move the content
into place once it's all received. This was already an undocumented
requirement -- it's necessary for hasKey to work reliably. This resume code
just uses hasKey to find the first chunk that's missing.
Note that if there are two uploads of the same key to the same chunked remote,
one might resume at the point the other had gotten to, but both will then
redundantly upload. As before.
In the non-resume case, this adds one hasKey call per storeKey, and only
if the remote is configured to use chunks. Future work: Try to eliminate that
hasKey. Notice that eg, `git annex copy --to` checks if the key is present
before sending it, so is already running hasKey.. which could perhaps
be cached and reused.
However, this additional overhead is not very large compared with
transferring an entire large file, and the ability to resume
is certianly worth it. There is an optimisation in place for small files,
that avoids trying to resume if the whole file fits within one chunk.
This commit was sponsored by Georg Bauer.
2014-07-28 18:18:08 +00:00
|
|
|
|
2014-07-27 00:11:41 +00:00
|
|
|
{- Removes all chunks of a key from a remote, by calling a remover
|
2014-07-27 02:47:52 +00:00
|
|
|
- action on each.
|
|
|
|
-
|
2014-07-27 00:11:41 +00:00
|
|
|
- This action may be called on a chunked key. It will simply remove it.
|
|
|
|
-}
|
2020-05-14 18:08:09 +00:00
|
|
|
removeChunks :: Remover -> UUID -> ChunkConfig -> EncKey -> Key -> Annex ()
|
2014-07-27 00:11:41 +00:00
|
|
|
removeChunks remover u chunkconfig encryptor k = do
|
2020-10-22 17:37:09 +00:00
|
|
|
ls <- map chunkKeyList <$> chunkKeys u chunkconfig k
|
2020-05-14 18:08:09 +00:00
|
|
|
mapM_ (remover . encryptor) (concat ls)
|
|
|
|
let chunksizes = catMaybes $ map (fromKey keyChunkSize <=< headMaybe) ls
|
|
|
|
forM_ chunksizes $ chunksRemoved u k . FixedSizeChunks . fromIntegral
|
2014-07-27 00:11:41 +00:00
|
|
|
|
2014-07-29 18:53:17 +00:00
|
|
|
{- Retrieves a key from a remote, using a retriever action.
|
2014-07-27 00:11:41 +00:00
|
|
|
-
|
|
|
|
- When the remote is chunked, tries each of the options returned by
|
|
|
|
- chunkKeys until it finds one where the retriever successfully
|
2021-02-16 19:46:14 +00:00
|
|
|
- gets the first chunked key.
|
2014-07-27 00:11:41 +00:00
|
|
|
-
|
2023-03-13 22:55:18 +00:00
|
|
|
- If retrieval of one of the subsequent chunks throws an exception,
|
2021-02-16 19:46:14 +00:00
|
|
|
- gives up. Note that partial data may have been written to the file
|
2020-05-13 21:05:56 +00:00
|
|
|
- in this case.
|
resume interrupted chunked downloads
Leverage the new chunked remotes to automatically resume downloads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also properly handle starting a download
from one remote, interrupting, and resuming from another one, and so on.
(Resuming interrupted chunked uploads is similarly doable, although
slightly more expensive.)
This commit was sponsored by Thomas Djärv.
2014-07-27 22:52:42 +00:00
|
|
|
-
|
|
|
|
- Resuming is supported when using chunks. When the destination file
|
|
|
|
- already exists, it skips to the next chunked key that would be needed
|
|
|
|
- to resume.
|
2021-02-16 19:46:14 +00:00
|
|
|
-
|
|
|
|
- Handles decrypting the content when encryption is used.
|
2014-07-27 00:11:41 +00:00
|
|
|
-}
|
|
|
|
retrieveChunks
|
2021-02-16 19:46:14 +00:00
|
|
|
:: LensGpgEncParams encc
|
|
|
|
=> Retriever
|
2014-07-27 00:11:41 +00:00
|
|
|
-> UUID
|
incremental verify for byteRetriever special remotes
Several special remotes verify content while it is being retrieved,
avoiding a separate checksum pass. They are: S3, bup, ddar, and
gcrypt (with a local repository).
Not done when using chunking, yet.
Complicated by Retriever needing to change to be polymorphic. Which in turn
meant RankNTypes is needed, and also needed some code changes. The
change in Remote.External does not change behavior at all but avoids
the type checking failing because of a "rigid, skolem type" which
"would escape its scope". So I refactored slightly to make the type
checker's job easier there.
Unfortunately, directory uses fileRetriever (except when chunked),
so it is not amoung the improved ones. Fixing that would need a way for
FileRetriever to return a Verification. But, since the file retrieved
may be encrypted or chunked, it would be extra work to always
incrementally checksum the file while retrieving it. Hm.
Some other special remotes use fileRetriever, and so don't get incremental
verification, but could be converted to byteRetriever later. One is
GitLFS, which uses downloadConduit, which writes to the file, so could
verify as it goes. Other special remotes like web could too, but don't
use Remote.Helper.Special and so will need to be addressed separately.
Sponsored-by: Dartmouth College's DANDI project
2021-08-11 17:43:30 +00:00
|
|
|
-> VerifyConfig
|
2014-07-27 00:11:41 +00:00
|
|
|
-> ChunkConfig
|
|
|
|
-> EncKey
|
|
|
|
-> Key
|
resume interrupted chunked downloads
Leverage the new chunked remotes to automatically resume downloads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also properly handle starting a download
from one remote, interrupting, and resuming from another one, and so on.
(Resuming interrupted chunked uploads is similarly doable, although
slightly more expensive.)
This commit was sponsored by Thomas Djärv.
2014-07-27 22:52:42 +00:00
|
|
|
-> FilePath
|
2014-07-27 00:11:41 +00:00
|
|
|
-> MeterUpdate
|
2021-02-16 19:46:14 +00:00
|
|
|
-> Maybe (Cipher, EncKey)
|
|
|
|
-> encc
|
incremental verify for byteRetriever special remotes
Several special remotes verify content while it is being retrieved,
avoiding a separate checksum pass. They are: S3, bup, ddar, and
gcrypt (with a local repository).
Not done when using chunking, yet.
Complicated by Retriever needing to change to be polymorphic. Which in turn
meant RankNTypes is needed, and also needed some code changes. The
change in Remote.External does not change behavior at all but avoids
the type checking failing because of a "rigid, skolem type" which
"would escape its scope". So I refactored slightly to make the type
checker's job easier there.
Unfortunately, directory uses fileRetriever (except when chunked),
so it is not amoung the improved ones. Fixing that would need a way for
FileRetriever to return a Verification. But, since the file retrieved
may be encrypted or chunked, it would be extra work to always
incrementally checksum the file while retrieving it. Hm.
Some other special remotes use fileRetriever, and so don't get incremental
verification, but could be converted to byteRetriever later. One is
GitLFS, which uses downloadConduit, which writes to the file, so could
verify as it goes. Other special remotes like web could too, but don't
use Remote.Helper.Special and so will need to be addressed separately.
Sponsored-by: Dartmouth College's DANDI project
2021-08-11 17:43:30 +00:00
|
|
|
-> Annex Verification
|
|
|
|
retrieveChunks retriever u vc chunkconfig encryptor basek dest basep enc encc
|
|
|
|
| noChunks chunkconfig = do
|
2014-07-27 06:13:51 +00:00
|
|
|
-- Optimisation: Try the unchunked key first, to avoid
|
resume interrupted chunked downloads
Leverage the new chunked remotes to automatically resume downloads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also properly handle starting a download
from one remote, interrupting, and resuming from another one, and so on.
(Resuming interrupted chunked uploads is similarly doable, although
slightly more expensive.)
This commit was sponsored by Thomas Djärv.
2014-07-27 22:52:42 +00:00
|
|
|
-- looking in the git-annex branch for chunk counts
|
|
|
|
-- that are likely not there.
|
incremental hashing for fileRetriever
It uses tailVerify to hash the file while it's being written.
This is able to sometimes avoid a separate checksum step. Although
if the file gets written quickly enough, tailVerify may not see it
get created before the write finishes, and the checksum still happens.
Testing with the directory special remote, incremental checksumming did
not happen. But then I disabled the copy CoW probing, and it did work.
What's going on with that is the CoW probe creates an empty file on
failure, then deletes it, and then the file is created again. tailVerify
will open the first, empty file, and so fails to read the content that
gets written to the file that replaces it.
The directory special remote really ought to be able to avoid needing to
use tailVerify, and while other special remotes could do things that
cause similar problems, they probably don't. And if they do, it just
means the checksum doesn't get done incrementally.
Sponsored-by: Dartmouth College's DANDI project
2021-08-13 19:43:29 +00:00
|
|
|
tryNonAsync getunchunked >>= \case
|
2021-08-11 18:42:49 +00:00
|
|
|
Right r -> finalize r
|
2021-08-16 18:50:21 +00:00
|
|
|
Left e -> go (Just e)
|
2021-08-11 18:42:49 +00:00
|
|
|
=<< chunkKeysOnly u chunkconfig basek
|
|
|
|
| otherwise = go Nothing
|
|
|
|
=<< chunkKeys u chunkconfig basek
|
2014-07-27 00:11:41 +00:00
|
|
|
where
|
2020-10-22 17:37:09 +00:00
|
|
|
go pe cks = do
|
|
|
|
let ls = map chunkKeyList cks
|
2020-11-05 15:26:34 +00:00
|
|
|
currsize <- liftIO $ catchMaybeIO $ getFileSize (toRawFilePath dest)
|
resume interrupted chunked downloads
Leverage the new chunked remotes to automatically resume downloads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also properly handle starting a download
from one remote, interrupting, and resuming from another one, and so on.
(Resuming interrupted chunked uploads is similarly doable, although
slightly more expensive.)
This commit was sponsored by Thomas Djärv.
2014-07-27 22:52:42 +00:00
|
|
|
let ls' = maybe ls (setupResume ls) currsize
|
2014-08-01 21:18:39 +00:00
|
|
|
if any null ls'
|
2021-08-16 18:50:21 +00:00
|
|
|
-- dest is already complete
|
|
|
|
then finalize (Right Nothing)
|
2021-08-11 18:42:49 +00:00
|
|
|
else finalize =<< firstavail pe currsize ls'
|
2014-07-27 00:11:41 +00:00
|
|
|
|
2020-05-21 18:44:40 +00:00
|
|
|
firstavail Nothing _ [] = giveup "unable to determine the chunks to use for this remote"
|
fix embedcreds=yes reversion
Fix bug that made enableremote of S3 and webdav remotes, that have
embedcreds=yes, fail to set up the embedded creds, so accessing the remotes
failed.
(Regression introduced in version 7.20200202.7 in when reworking all the
remote configs to be parsed.)
Root problem is that parseEncryptionConfig excludes all other config keys
except encryption ones, so it is then unable to find the
credPairRemoteField. And since that field is not required to be
present, it proceeds as if it's not, rather than failing in any visible
way.
This causes it to not find any creds, and so it does not cache
them. When when the S3 remote tries to make a S3 connection, it finds no
creds, so assumes it's being used in no-creds mode, and tries to find a
public url. With no public url available, it fails, but the failure doesn't
say a lack of creds is the problem.
Fix is to provide setRemoteCredPair with a ParsedRemoteConfig, so the full
set of configs of the remote can be parsed. A bit annoying to need to
parse the remote config before the full config (as returned by
setRemoteCredPair) is available, but this avoids the problem.
I assume webdav also had the problem by inspection, but didn't try to
reproduce it with it.
Also, getRemoteCredPair used getRemoteConfigValue to get a ProposedAccepted
String, but that does not seem right. Now that it runs that code, it
crashed saying it had just a String.
Remotes that have already been enableremoted, and so lack the cached creds
file will work after this fix, because getRemoteCredPair will extract
the creds from the remote config, writing the missing file.
This commit was sponsored by Ilya Shlyakhter on Patreon.
2020-05-21 18:34:29 +00:00
|
|
|
firstavail (Just e) _ [] = throwM e
|
|
|
|
firstavail pe currsize ([]:ls) = firstavail pe currsize ls
|
2021-10-14 16:45:05 +00:00
|
|
|
firstavail pe currsize ((k:ks):ls)
|
incremental hashing for fileRetriever
It uses tailVerify to hash the file while it's being written.
This is able to sometimes avoid a separate checksum step. Although
if the file gets written quickly enough, tailVerify may not see it
get created before the write finishes, and the checksum still happens.
Testing with the directory special remote, incremental checksumming did
not happen. But then I disabled the copy CoW probing, and it did work.
What's going on with that is the CoW probe creates an empty file on
failure, then deletes it, and then the file is created again. tailVerify
will open the first, empty file, and so fails to read the content that
gets written to the file that replaces it.
The directory special remote really ought to be able to avoid needing to
use tailVerify, and while other special remotes could do things that
cause similar problems, they probably don't. And if they do, it just
means the checksum doesn't get done incrementally.
Sponsored-by: Dartmouth College's DANDI project
2021-08-13 19:43:29 +00:00
|
|
|
| k == basek = getunchunked
|
2021-10-14 16:45:05 +00:00
|
|
|
`catchNonAsync` (\e -> firstavail (Just (pickerr e)) currsize ls)
|
2014-07-30 00:10:14 +00:00
|
|
|
| otherwise = do
|
|
|
|
let offset = resumeOffset currsize k
|
|
|
|
let p = maybe basep
|
|
|
|
(offsetMeterUpdate basep . toBytesProcessed)
|
|
|
|
offset
|
unify exception handling into Utility.Exception
Removed old extensible-exceptions, only needed for very old ghc.
Made webdav use Utility.Exception, to work after some changes in DAV's
exception handling.
Removed Annex.Exception. Mostly this was trivial, but note that
tryAnnex is replaced with tryNonAsync and catchAnnex replaced with
catchNonAsync. In theory that could be a behavior change, since the former
caught all exceptions, and the latter don't catch async exceptions.
However, in practice, nothing in the Annex monad uses async exceptions.
Grepping for throwTo and killThread only find stuff in the assistant,
which does not seem related.
Command.Add.undo is changed to accept a SomeException, and things
that use it for rollback now catch non-async exceptions, rather than
only IOExceptions.
2014-08-08 01:55:44 +00:00
|
|
|
v <- tryNonAsync $
|
incremental hashing for fileRetriever
It uses tailVerify to hash the file while it's being written.
This is able to sometimes avoid a separate checksum step. Although
if the file gets written quickly enough, tailVerify may not see it
get created before the write finishes, and the checksum still happens.
Testing with the directory special remote, incremental checksumming did
not happen. But then I disabled the copy CoW probing, and it did work.
What's going on with that is the CoW probe creates an empty file on
failure, then deletes it, and then the file is created again. tailVerify
will open the first, empty file, and so fails to read the content that
gets written to the file that replaces it.
The directory special remote really ought to be able to avoid needing to
use tailVerify, and while other special remotes could do things that
cause similar problems, they probably don't. And if they do, it just
means the checksum doesn't get done incrementally.
Sponsored-by: Dartmouth College's DANDI project
2021-08-13 19:43:29 +00:00
|
|
|
retriever (encryptor k) p Nothing $ \content ->
|
2021-08-11 18:42:49 +00:00
|
|
|
bracket (maybe opennew openresume offset) (liftIO . hClose . fst) $ \(h, iv) -> do
|
incremental hashing for fileRetriever
It uses tailVerify to hash the file while it's being written.
This is able to sometimes avoid a separate checksum step. Although
if the file gets written quickly enough, tailVerify may not see it
get created before the write finishes, and the checksum still happens.
Testing with the directory special remote, incremental checksumming did
not happen. But then I disabled the copy CoW probing, and it did work.
What's going on with that is the CoW probe creates an empty file on
failure, then deletes it, and then the file is created again. tailVerify
will open the first, empty file, and so fails to read the content that
gets written to the file that replaces it.
The directory special remote really ought to be able to avoid needing to
use tailVerify, and while other special remotes could do things that
cause similar problems, they probably don't. And if they do, it just
means the checksum doesn't get done incrementally.
Sponsored-by: Dartmouth College's DANDI project
2021-08-13 19:43:29 +00:00
|
|
|
retrieved iv (Just h) p content
|
2014-07-30 00:10:14 +00:00
|
|
|
let sz = toBytesProcessed $
|
2019-11-22 20:24:04 +00:00
|
|
|
fromMaybe 0 $ fromKey keyChunkSize k
|
incremental hashing for fileRetriever
It uses tailVerify to hash the file while it's being written.
This is able to sometimes avoid a separate checksum step. Although
if the file gets written quickly enough, tailVerify may not see it
get created before the write finishes, and the checksum still happens.
Testing with the directory special remote, incremental checksumming did
not happen. But then I disabled the copy CoW probing, and it did work.
What's going on with that is the CoW probe creates an empty file on
failure, then deletes it, and then the file is created again. tailVerify
will open the first, empty file, and so fails to read the content that
gets written to the file that replaces it.
The directory special remote really ought to be able to avoid needing to
use tailVerify, and while other special remotes could do things that
cause similar problems, they probably don't. And if they do, it just
means the checksum doesn't get done incrementally.
Sponsored-by: Dartmouth College's DANDI project
2021-08-13 19:43:29 +00:00
|
|
|
getrest p h iv sz sz ks
|
2014-07-30 00:10:14 +00:00
|
|
|
case v of
|
2021-10-14 16:45:05 +00:00
|
|
|
Left e -> firstavail (Just (pickerr e)) currsize ls
|
2014-07-30 00:10:14 +00:00
|
|
|
Right r -> return r
|
2021-10-14 16:45:05 +00:00
|
|
|
where
|
|
|
|
-- Prefer an earlier exception to a later one, because the
|
|
|
|
-- more probable location is tried first and less probable
|
|
|
|
-- ones later.
|
|
|
|
pickerr e = case pe of
|
|
|
|
Just pe' -> pe'
|
|
|
|
Nothing -> e
|
resume interrupted chunked downloads
Leverage the new chunked remotes to automatically resume downloads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also properly handle starting a download
from one remote, interrupting, and resuming from another one, and so on.
(Resuming interrupted chunked uploads is similarly doable, although
slightly more expensive.)
This commit was sponsored by Thomas Djärv.
2014-07-27 22:52:42 +00:00
|
|
|
|
2021-08-16 18:50:21 +00:00
|
|
|
getrest _ _ iv _ _ [] = return (Right iv)
|
2021-08-11 18:42:49 +00:00
|
|
|
getrest p h iv sz bytesprocessed (k:ks) = do
|
resume interrupted chunked downloads
Leverage the new chunked remotes to automatically resume downloads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also properly handle starting a download
from one remote, interrupting, and resuming from another one, and so on.
(Resuming interrupted chunked uploads is similarly doable, although
slightly more expensive.)
This commit was sponsored by Thomas Djärv.
2014-07-27 22:52:42 +00:00
|
|
|
let p' = offsetMeterUpdate p bytesprocessed
|
2014-07-30 00:31:16 +00:00
|
|
|
liftIO $ p' zeroBytesProcessed
|
incremental hashing for fileRetriever
It uses tailVerify to hash the file while it's being written.
This is able to sometimes avoid a separate checksum step. Although
if the file gets written quickly enough, tailVerify may not see it
get created before the write finishes, and the checksum still happens.
Testing with the directory special remote, incremental checksumming did
not happen. But then I disabled the copy CoW probing, and it did work.
What's going on with that is the CoW probe creates an empty file on
failure, then deletes it, and then the file is created again. tailVerify
will open the first, empty file, and so fails to read the content that
gets written to the file that replaces it.
The directory special remote really ought to be able to avoid needing to
use tailVerify, and while other special remotes could do things that
cause similar problems, they probably don't. And if they do, it just
means the checksum doesn't get done incrementally.
Sponsored-by: Dartmouth College's DANDI project
2021-08-13 19:43:29 +00:00
|
|
|
retriever (encryptor k) p' Nothing $
|
2021-08-11 18:42:49 +00:00
|
|
|
retrieved iv (Just h) p'
|
incremental hashing for fileRetriever
It uses tailVerify to hash the file while it's being written.
This is able to sometimes avoid a separate checksum step. Although
if the file gets written quickly enough, tailVerify may not see it
get created before the write finishes, and the checksum still happens.
Testing with the directory special remote, incremental checksumming did
not happen. But then I disabled the copy CoW probing, and it did work.
What's going on with that is the CoW probe creates an empty file on
failure, then deletes it, and then the file is created again. tailVerify
will open the first, empty file, and so fails to read the content that
gets written to the file that replaces it.
The directory special remote really ought to be able to avoid needing to
use tailVerify, and while other special remotes could do things that
cause similar problems, they probably don't. And if they do, it just
means the checksum doesn't get done incrementally.
Sponsored-by: Dartmouth College's DANDI project
2021-08-13 19:43:29 +00:00
|
|
|
getrest p h iv sz (addBytesProcessed bytesprocessed sz) ks
|
resume interrupted chunked downloads
Leverage the new chunked remotes to automatically resume downloads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also properly handle starting a download
from one remote, interrupting, and resuming from another one, and so on.
(Resuming interrupted chunked uploads is similarly doable, although
slightly more expensive.)
This commit was sponsored by Thomas Djärv.
2014-07-27 22:52:42 +00:00
|
|
|
|
incremental hashing for fileRetriever
It uses tailVerify to hash the file while it's being written.
This is able to sometimes avoid a separate checksum step. Although
if the file gets written quickly enough, tailVerify may not see it
get created before the write finishes, and the checksum still happens.
Testing with the directory special remote, incremental checksumming did
not happen. But then I disabled the copy CoW probing, and it did work.
What's going on with that is the CoW probe creates an empty file on
failure, then deletes it, and then the file is created again. tailVerify
will open the first, empty file, and so fails to read the content that
gets written to the file that replaces it.
The directory special remote really ought to be able to avoid needing to
use tailVerify, and while other special remotes could do things that
cause similar problems, they probably don't. And if they do, it just
means the checksum doesn't get done incrementally.
Sponsored-by: Dartmouth College's DANDI project
2021-08-13 19:43:29 +00:00
|
|
|
getunchunked = do
|
|
|
|
iv <- startVerifyKeyContentIncrementally vc basek
|
|
|
|
case enc of
|
2021-08-16 18:50:21 +00:00
|
|
|
Just _ -> do
|
|
|
|
retriever (encryptor basek) basep Nothing $
|
|
|
|
retrieved iv Nothing basep
|
|
|
|
return (Right iv)
|
incremental hashing for fileRetriever
It uses tailVerify to hash the file while it's being written.
This is able to sometimes avoid a separate checksum step. Although
if the file gets written quickly enough, tailVerify may not see it
get created before the write finishes, and the checksum still happens.
Testing with the directory special remote, incremental checksumming did
not happen. But then I disabled the copy CoW probing, and it did work.
What's going on with that is the CoW probe creates an empty file on
failure, then deletes it, and then the file is created again. tailVerify
will open the first, empty file, and so fails to read the content that
gets written to the file that replaces it.
The directory special remote really ought to be able to avoid needing to
use tailVerify, and while other special remotes could do things that
cause similar problems, they probably don't. And if they do, it just
means the checksum doesn't get done incrementally.
Sponsored-by: Dartmouth College's DANDI project
2021-08-13 19:43:29 +00:00
|
|
|
-- Not chunked and not encrypted, so ask the
|
|
|
|
-- retriever to incrementally verify when it
|
2021-08-16 18:50:21 +00:00
|
|
|
-- retrieves to a file. It may not finish
|
|
|
|
-- passing the whole file content to the
|
|
|
|
-- incremental verifier though.
|
|
|
|
Nothing -> do
|
|
|
|
retriever (encryptor basek) basep iv $
|
|
|
|
retrieved iv Nothing basep
|
|
|
|
return $ case iv of
|
|
|
|
Nothing -> Right iv
|
|
|
|
Just iv' -> Left (IncompleteVerify iv')
|
resume interrupted chunked downloads
Leverage the new chunked remotes to automatically resume downloads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also properly handle starting a download
from one remote, interrupting, and resuming from another one, and so on.
(Resuming interrupted chunked uploads is similarly doable, although
slightly more expensive.)
This commit was sponsored by Thomas Djärv.
2014-07-27 22:52:42 +00:00
|
|
|
|
2021-08-11 18:42:49 +00:00
|
|
|
opennew = do
|
|
|
|
iv <- startVerifyKeyContentIncrementally vc basek
|
|
|
|
h <- liftIO $ openBinaryFile dest WriteMode
|
|
|
|
return (h, iv)
|
2014-07-27 00:11:41 +00:00
|
|
|
|
resume interrupted chunked downloads
Leverage the new chunked remotes to automatically resume downloads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also properly handle starting a download
from one remote, interrupting, and resuming from another one, and so on.
(Resuming interrupted chunked uploads is similarly doable, although
slightly more expensive.)
This commit was sponsored by Thomas Djärv.
2014-07-27 22:52:42 +00:00
|
|
|
-- Open the file and seek to the start point in order to resume.
|
|
|
|
openresume startpoint = do
|
|
|
|
-- ReadWriteMode allows seeking; AppendMode does not.
|
2021-08-11 18:42:49 +00:00
|
|
|
h <- liftIO $ openBinaryFile dest ReadWriteMode
|
|
|
|
liftIO $ hSeek h AbsoluteSeek startpoint
|
|
|
|
-- No incremental verification when resuming, since that
|
|
|
|
-- would need to read up to the startpoint.
|
|
|
|
let iv = Nothing
|
|
|
|
return (h, iv)
|
resume interrupted chunked downloads
Leverage the new chunked remotes to automatically resume downloads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also properly handle starting a download
from one remote, interrupting, and resuming from another one, and so on.
(Resuming interrupted chunked uploads is similarly doable, although
slightly more expensive.)
This commit was sponsored by Thomas Djärv.
2014-07-27 22:52:42 +00:00
|
|
|
|
2014-10-09 18:53:13 +00:00
|
|
|
{- Progress meter updating is a bit tricky: If the Retriever
|
2014-07-29 21:17:41 +00:00
|
|
|
- populates a file, it is responsible for updating progress
|
|
|
|
- as the file is being retrieved.
|
|
|
|
-
|
|
|
|
- However, if the Retriever generates a lazy ByteString,
|
|
|
|
- it is not responsible for updating progress (often it cannot).
|
2021-02-16 19:46:14 +00:00
|
|
|
- Instead, writeRetrievedContent is passed a meter to update
|
|
|
|
- as it consumes the ByteString.
|
2014-07-30 00:31:16 +00:00
|
|
|
-}
|
incremental verify for byteRetriever special remotes
Several special remotes verify content while it is being retrieved,
avoiding a separate checksum pass. They are: S3, bup, ddar, and
gcrypt (with a local repository).
Not done when using chunking, yet.
Complicated by Retriever needing to change to be polymorphic. Which in turn
meant RankNTypes is needed, and also needed some code changes. The
change in Remote.External does not change behavior at all but avoids
the type checking failing because of a "rigid, skolem type" which
"would escape its scope". So I refactored slightly to make the type
checker's job easier there.
Unfortunately, directory uses fileRetriever (except when chunked),
so it is not amoung the improved ones. Fixing that would need a way for
FileRetriever to return a Verification. But, since the file retrieved
may be encrypted or chunked, it would be extra work to always
incrementally checksum the file while retrieving it. Hm.
Some other special remotes use fileRetriever, and so don't get incremental
verification, but could be converted to byteRetriever later. One is
GitLFS, which uses downloadConduit, which writes to the file, so could
verify as it goes. Other special remotes like web could too, but don't
use Remote.Helper.Special and so will need to be addressed separately.
Sponsored-by: Dartmouth College's DANDI project
2021-08-11 17:43:30 +00:00
|
|
|
retrieved iv h p content =
|
|
|
|
writeRetrievedContent dest enc encc h p' content iv
|
2014-07-30 00:10:14 +00:00
|
|
|
where
|
|
|
|
p'
|
|
|
|
| isByteContent content = Just p
|
|
|
|
| otherwise = Nothing
|
2021-08-11 18:42:49 +00:00
|
|
|
|
2021-08-16 18:50:21 +00:00
|
|
|
finalize (Right Nothing) = return UnVerified
|
2021-08-18 17:54:40 +00:00
|
|
|
finalize (Right (Just iv)) =
|
2021-11-09 16:29:09 +00:00
|
|
|
liftIO (finalizeIncrementalVerifier iv) >>= \case
|
2021-08-18 17:54:40 +00:00
|
|
|
Just True -> return Verified
|
|
|
|
_ -> return UnVerified
|
2021-08-16 18:50:21 +00:00
|
|
|
finalize (Left v) = return v
|
2014-07-29 21:17:41 +00:00
|
|
|
|
incremental verify for byteRetriever special remotes
Several special remotes verify content while it is being retrieved,
avoiding a separate checksum pass. They are: S3, bup, ddar, and
gcrypt (with a local repository).
Not done when using chunking, yet.
Complicated by Retriever needing to change to be polymorphic. Which in turn
meant RankNTypes is needed, and also needed some code changes. The
change in Remote.External does not change behavior at all but avoids
the type checking failing because of a "rigid, skolem type" which
"would escape its scope". So I refactored slightly to make the type
checker's job easier there.
Unfortunately, directory uses fileRetriever (except when chunked),
so it is not amoung the improved ones. Fixing that would need a way for
FileRetriever to return a Verification. But, since the file retrieved
may be encrypted or chunked, it would be extra work to always
incrementally checksum the file while retrieving it. Hm.
Some other special remotes use fileRetriever, and so don't get incremental
verification, but could be converted to byteRetriever later. One is
GitLFS, which uses downloadConduit, which writes to the file, so could
verify as it goes. Other special remotes like web could too, but don't
use Remote.Helper.Special and so will need to be addressed separately.
Sponsored-by: Dartmouth College's DANDI project
2021-08-11 17:43:30 +00:00
|
|
|
{- Writes retrieved file content to the provided Handle, decrypting it
|
2021-02-16 19:46:14 +00:00
|
|
|
- first if necessary.
|
|
|
|
-
|
|
|
|
- If the remote did not store the content using chunks, no Handle
|
incremental verify for byteRetriever special remotes
Several special remotes verify content while it is being retrieved,
avoiding a separate checksum pass. They are: S3, bup, ddar, and
gcrypt (with a local repository).
Not done when using chunking, yet.
Complicated by Retriever needing to change to be polymorphic. Which in turn
meant RankNTypes is needed, and also needed some code changes. The
change in Remote.External does not change behavior at all but avoids
the type checking failing because of a "rigid, skolem type" which
"would escape its scope". So I refactored slightly to make the type
checker's job easier there.
Unfortunately, directory uses fileRetriever (except when chunked),
so it is not amoung the improved ones. Fixing that would need a way for
FileRetriever to return a Verification. But, since the file retrieved
may be encrypted or chunked, it would be extra work to always
incrementally checksum the file while retrieving it. Hm.
Some other special remotes use fileRetriever, and so don't get incremental
verification, but could be converted to byteRetriever later. One is
GitLFS, which uses downloadConduit, which writes to the file, so could
verify as it goes. Other special remotes like web could too, but don't
use Remote.Helper.Special and so will need to be addressed separately.
Sponsored-by: Dartmouth College's DANDI project
2021-08-11 17:43:30 +00:00
|
|
|
- will be provided, and instead the content will be written to the
|
|
|
|
- dest file.
|
2021-02-16 19:46:14 +00:00
|
|
|
-
|
incremental hashing for fileRetriever
It uses tailVerify to hash the file while it's being written.
This is able to sometimes avoid a separate checksum step. Although
if the file gets written quickly enough, tailVerify may not see it
get created before the write finishes, and the checksum still happens.
Testing with the directory special remote, incremental checksumming did
not happen. But then I disabled the copy CoW probing, and it did work.
What's going on with that is the CoW probe creates an empty file on
failure, then deletes it, and then the file is created again. tailVerify
will open the first, empty file, and so fails to read the content that
gets written to the file that replaces it.
The directory special remote really ought to be able to avoid needing to
use tailVerify, and while other special remotes could do things that
cause similar problems, they probably don't. And if they do, it just
means the checksum doesn't get done incrementally.
Sponsored-by: Dartmouth College's DANDI project
2021-08-13 19:43:29 +00:00
|
|
|
- The IncrementalVerifier is updated as the file content is read.
|
|
|
|
-
|
2021-02-16 19:46:14 +00:00
|
|
|
- Note that when neither chunking nor encryption is used, and the remote
|
|
|
|
- provides FileContent, that file only needs to be renamed
|
|
|
|
- into place. (And it may even already be in the right place..)
|
|
|
|
-}
|
|
|
|
writeRetrievedContent
|
|
|
|
:: LensGpgEncParams encc
|
|
|
|
=> FilePath
|
|
|
|
-> Maybe (Cipher, EncKey)
|
|
|
|
-> encc
|
|
|
|
-> Maybe Handle
|
|
|
|
-> Maybe MeterUpdate
|
|
|
|
-> ContentSource
|
incremental verify for byteRetriever special remotes
Several special remotes verify content while it is being retrieved,
avoiding a separate checksum pass. They are: S3, bup, ddar, and
gcrypt (with a local repository).
Not done when using chunking, yet.
Complicated by Retriever needing to change to be polymorphic. Which in turn
meant RankNTypes is needed, and also needed some code changes. The
change in Remote.External does not change behavior at all but avoids
the type checking failing because of a "rigid, skolem type" which
"would escape its scope". So I refactored slightly to make the type
checker's job easier there.
Unfortunately, directory uses fileRetriever (except when chunked),
so it is not amoung the improved ones. Fixing that would need a way for
FileRetriever to return a Verification. But, since the file retrieved
may be encrypted or chunked, it would be extra work to always
incrementally checksum the file while retrieving it. Hm.
Some other special remotes use fileRetriever, and so don't get incremental
verification, but could be converted to byteRetriever later. One is
GitLFS, which uses downloadConduit, which writes to the file, so could
verify as it goes. Other special remotes like web could too, but don't
use Remote.Helper.Special and so will need to be addressed separately.
Sponsored-by: Dartmouth College's DANDI project
2021-08-11 17:43:30 +00:00
|
|
|
-> Maybe IncrementalVerifier
|
incremental hashing for fileRetriever
It uses tailVerify to hash the file while it's being written.
This is able to sometimes avoid a separate checksum step. Although
if the file gets written quickly enough, tailVerify may not see it
get created before the write finishes, and the checksum still happens.
Testing with the directory special remote, incremental checksumming did
not happen. But then I disabled the copy CoW probing, and it did work.
What's going on with that is the CoW probe creates an empty file on
failure, then deletes it, and then the file is created again. tailVerify
will open the first, empty file, and so fails to read the content that
gets written to the file that replaces it.
The directory special remote really ought to be able to avoid needing to
use tailVerify, and while other special remotes could do things that
cause similar problems, they probably don't. And if they do, it just
means the checksum doesn't get done incrementally.
Sponsored-by: Dartmouth College's DANDI project
2021-08-13 19:43:29 +00:00
|
|
|
-> Annex ()
|
incremental verify for byteRetriever special remotes
Several special remotes verify content while it is being retrieved,
avoiding a separate checksum pass. They are: S3, bup, ddar, and
gcrypt (with a local repository).
Not done when using chunking, yet.
Complicated by Retriever needing to change to be polymorphic. Which in turn
meant RankNTypes is needed, and also needed some code changes. The
change in Remote.External does not change behavior at all but avoids
the type checking failing because of a "rigid, skolem type" which
"would escape its scope". So I refactored slightly to make the type
checker's job easier there.
Unfortunately, directory uses fileRetriever (except when chunked),
so it is not amoung the improved ones. Fixing that would need a way for
FileRetriever to return a Verification. But, since the file retrieved
may be encrypted or chunked, it would be extra work to always
incrementally checksum the file while retrieving it. Hm.
Some other special remotes use fileRetriever, and so don't get incremental
verification, but could be converted to byteRetriever later. One is
GitLFS, which uses downloadConduit, which writes to the file, so could
verify as it goes. Other special remotes like web could too, but don't
use Remote.Helper.Special and so will need to be addressed separately.
Sponsored-by: Dartmouth College's DANDI project
2021-08-11 17:43:30 +00:00
|
|
|
writeRetrievedContent dest enc encc mh mp content miv = case (enc, mh, content) of
|
2021-02-16 19:46:14 +00:00
|
|
|
(Nothing, Nothing, FileContent f)
|
incremental hashing for fileRetriever
It uses tailVerify to hash the file while it's being written.
This is able to sometimes avoid a separate checksum step. Although
if the file gets written quickly enough, tailVerify may not see it
get created before the write finishes, and the checksum still happens.
Testing with the directory special remote, incremental checksumming did
not happen. But then I disabled the copy CoW probing, and it did work.
What's going on with that is the CoW probe creates an empty file on
failure, then deletes it, and then the file is created again. tailVerify
will open the first, empty file, and so fails to read the content that
gets written to the file that replaces it.
The directory special remote really ought to be able to avoid needing to
use tailVerify, and while other special remotes could do things that
cause similar problems, they probably don't. And if they do, it just
means the checksum doesn't get done incrementally.
Sponsored-by: Dartmouth College's DANDI project
2021-08-13 19:43:29 +00:00
|
|
|
| f == dest -> noop
|
2022-06-22 20:47:34 +00:00
|
|
|
| otherwise -> liftIO $ moveFile (toRawFilePath f) (toRawFilePath dest)
|
2021-02-16 19:46:14 +00:00
|
|
|
(Just (cipher, _), _, ByteContent b) -> do
|
|
|
|
cmd <- gpgCmd <$> Annex.getGitConfig
|
|
|
|
decrypt cmd encc cipher (feedBytes b) $
|
|
|
|
readBytes write
|
|
|
|
(Just (cipher, _), _, FileContent f) -> do
|
|
|
|
cmd <- gpgCmd <$> Annex.getGitConfig
|
|
|
|
withBytes content $ \b ->
|
|
|
|
decrypt cmd encc cipher (feedBytes b) $
|
|
|
|
readBytes write
|
|
|
|
liftIO $ removeWhenExistsWith R.removeLink (toRawFilePath f)
|
|
|
|
(Nothing, _, FileContent f) -> do
|
|
|
|
withBytes content write
|
|
|
|
liftIO $ removeWhenExistsWith R.removeLink (toRawFilePath f)
|
incremental hashing for fileRetriever
It uses tailVerify to hash the file while it's being written.
This is able to sometimes avoid a separate checksum step. Although
if the file gets written quickly enough, tailVerify may not see it
get created before the write finishes, and the checksum still happens.
Testing with the directory special remote, incremental checksumming did
not happen. But then I disabled the copy CoW probing, and it did work.
What's going on with that is the CoW probe creates an empty file on
failure, then deletes it, and then the file is created again. tailVerify
will open the first, empty file, and so fails to read the content that
gets written to the file that replaces it.
The directory special remote really ought to be able to avoid needing to
use tailVerify, and while other special remotes could do things that
cause similar problems, they probably don't. And if they do, it just
means the checksum doesn't get done incrementally.
Sponsored-by: Dartmouth College's DANDI project
2021-08-13 19:43:29 +00:00
|
|
|
(Nothing, _, ByteContent b) -> write b
|
2021-02-16 19:46:14 +00:00
|
|
|
where
|
|
|
|
write b = case mh of
|
incremental verify for byteRetriever special remotes
Several special remotes verify content while it is being retrieved,
avoiding a separate checksum pass. They are: S3, bup, ddar, and
gcrypt (with a local repository).
Not done when using chunking, yet.
Complicated by Retriever needing to change to be polymorphic. Which in turn
meant RankNTypes is needed, and also needed some code changes. The
change in Remote.External does not change behavior at all but avoids
the type checking failing because of a "rigid, skolem type" which
"would escape its scope". So I refactored slightly to make the type
checker's job easier there.
Unfortunately, directory uses fileRetriever (except when chunked),
so it is not amoung the improved ones. Fixing that would need a way for
FileRetriever to return a Verification. But, since the file retrieved
may be encrypted or chunked, it would be extra work to always
incrementally checksum the file while retrieving it. Hm.
Some other special remotes use fileRetriever, and so don't get incremental
verification, but could be converted to byteRetriever later. One is
GitLFS, which uses downloadConduit, which writes to the file, so could
verify as it goes. Other special remotes like web could too, but don't
use Remote.Helper.Special and so will need to be addressed separately.
Sponsored-by: Dartmouth College's DANDI project
2021-08-11 17:43:30 +00:00
|
|
|
Just h -> liftIO $ write' b h
|
|
|
|
Nothing -> liftIO $ bracket opendest hClose (write' b)
|
|
|
|
write' b h = case mp of
|
|
|
|
Just p ->
|
|
|
|
let writer = case miv of
|
|
|
|
Just iv -> \s -> do
|
2021-11-09 16:29:09 +00:00
|
|
|
updateIncrementalVerifier iv s
|
incremental verify for byteRetriever special remotes
Several special remotes verify content while it is being retrieved,
avoiding a separate checksum pass. They are: S3, bup, ddar, and
gcrypt (with a local repository).
Not done when using chunking, yet.
Complicated by Retriever needing to change to be polymorphic. Which in turn
meant RankNTypes is needed, and also needed some code changes. The
change in Remote.External does not change behavior at all but avoids
the type checking failing because of a "rigid, skolem type" which
"would escape its scope". So I refactored slightly to make the type
checker's job easier there.
Unfortunately, directory uses fileRetriever (except when chunked),
so it is not amoung the improved ones. Fixing that would need a way for
FileRetriever to return a Verification. But, since the file retrieved
may be encrypted or chunked, it would be extra work to always
incrementally checksum the file while retrieving it. Hm.
Some other special remotes use fileRetriever, and so don't get incremental
verification, but could be converted to byteRetriever later. One is
GitLFS, which uses downloadConduit, which writes to the file, so could
verify as it goes. Other special remotes like web could too, but don't
use Remote.Helper.Special and so will need to be addressed separately.
Sponsored-by: Dartmouth College's DANDI project
2021-08-11 17:43:30 +00:00
|
|
|
S.hPut h s
|
|
|
|
Nothing -> S.hPut h
|
|
|
|
in meteredWrite p writer b
|
2021-02-16 19:46:14 +00:00
|
|
|
Nothing -> L.hPut h b
|
|
|
|
opendest = openBinaryFile dest WriteMode
|
|
|
|
|
resume interrupted chunked downloads
Leverage the new chunked remotes to automatically resume downloads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also properly handle starting a download
from one remote, interrupting, and resuming from another one, and so on.
(Resuming interrupted chunked uploads is similarly doable, although
slightly more expensive.)
This commit was sponsored by Thomas Djärv.
2014-07-27 22:52:42 +00:00
|
|
|
{- Can resume when the chunk's offset is at or before the end of
|
|
|
|
- the dest file. -}
|
|
|
|
resumeOffset :: Maybe Integer -> Key -> Maybe Integer
|
|
|
|
resumeOffset Nothing _ = Nothing
|
|
|
|
resumeOffset currsize k
|
|
|
|
| offset <= currsize = offset
|
|
|
|
| otherwise = Nothing
|
|
|
|
where
|
|
|
|
offset = chunkKeyOffset k
|
|
|
|
|
|
|
|
{- Drops chunks that are already present in a file, based on its size.
|
|
|
|
- Keeps any non-chunk keys.
|
|
|
|
-}
|
|
|
|
setupResume :: [[Key]] -> Integer -> [[Key]]
|
|
|
|
setupResume ls currsize = map dropunneeded ls
|
|
|
|
where
|
|
|
|
dropunneeded [] = []
|
2019-11-22 20:24:04 +00:00
|
|
|
dropunneeded l@(k:_) = case fromKey keyChunkSize k of
|
resume interrupted chunked downloads
Leverage the new chunked remotes to automatically resume downloads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also properly handle starting a download
from one remote, interrupting, and resuming from another one, and so on.
(Resuming interrupted chunked uploads is similarly doable, although
slightly more expensive.)
This commit was sponsored by Thomas Djärv.
2014-07-27 22:52:42 +00:00
|
|
|
Just chunksize | chunksize > 0 ->
|
|
|
|
genericDrop (currsize `div` chunksize) l
|
|
|
|
_ -> l
|
2014-07-27 00:11:41 +00:00
|
|
|
|
|
|
|
{- Checks if a key is present in a remote. This requires any one
|
|
|
|
- of the lists of options returned by chunkKeys to all check out
|
|
|
|
- as being present using the checker action.
|
2014-08-06 17:45:19 +00:00
|
|
|
-
|
|
|
|
- Throws an exception if the remote is not accessible.
|
2014-07-27 00:11:41 +00:00
|
|
|
-}
|
2014-08-06 17:45:19 +00:00
|
|
|
checkPresentChunks
|
|
|
|
:: CheckPresent
|
2014-07-27 00:11:41 +00:00
|
|
|
-> UUID
|
|
|
|
-> ChunkConfig
|
|
|
|
-> EncKey
|
|
|
|
-> Key
|
2014-08-06 17:45:19 +00:00
|
|
|
-> Annex Bool
|
|
|
|
checkPresentChunks checker u chunkconfig encryptor basek
|
|
|
|
| noChunks chunkconfig = do
|
2014-07-27 06:13:51 +00:00
|
|
|
-- Optimisation: Try the unchunked key first, to avoid
|
resume interrupted chunked downloads
Leverage the new chunked remotes to automatically resume downloads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also properly handle starting a download
from one remote, interrupting, and resuming from another one, and so on.
(Resuming interrupted chunked uploads is similarly doable, although
slightly more expensive.)
This commit was sponsored by Thomas Djärv.
2014-07-27 22:52:42 +00:00
|
|
|
-- looking in the git-annex branch for chunk counts
|
|
|
|
-- that are likely not there.
|
2014-08-06 17:45:19 +00:00
|
|
|
v <- check basek
|
2020-10-22 16:57:58 +00:00
|
|
|
let getchunkkeys = chunkKeysOnly u chunkconfig basek
|
2014-08-06 17:45:19 +00:00
|
|
|
case v of
|
|
|
|
Right True -> return True
|
2020-10-22 16:57:58 +00:00
|
|
|
Left e -> checklists (Just e) =<< getchunkkeys
|
|
|
|
_ -> checklists Nothing =<< getchunkkeys
|
2014-07-29 19:07:32 +00:00
|
|
|
| otherwise = checklists Nothing =<< chunkKeys u chunkconfig basek
|
2014-07-27 00:11:41 +00:00
|
|
|
where
|
2014-08-06 17:45:19 +00:00
|
|
|
checklists Nothing [] = return False
|
2014-08-10 18:52:58 +00:00
|
|
|
checklists (Just deferrederror) [] = throwM deferrederror
|
2020-10-22 17:37:09 +00:00
|
|
|
checklists d (ck:cks)
|
2014-07-27 02:25:50 +00:00
|
|
|
| not (null l) = do
|
|
|
|
v <- checkchunks l
|
|
|
|
case v of
|
2020-10-22 17:37:09 +00:00
|
|
|
Left e -> checklists (Just e) cks
|
|
|
|
Right True -> do
|
|
|
|
ensureChunksAreLogged u basek ck
|
|
|
|
return True
|
|
|
|
Right False -> checklists Nothing cks
|
|
|
|
| otherwise = checklists d cks
|
|
|
|
where
|
|
|
|
l = chunkKeyList ck
|
2014-07-27 00:11:41 +00:00
|
|
|
|
2014-08-10 18:52:58 +00:00
|
|
|
checkchunks :: [Key] -> Annex (Either SomeException Bool)
|
2014-07-27 00:11:41 +00:00
|
|
|
checkchunks [] = return (Right True)
|
|
|
|
checkchunks (k:ks) = do
|
2014-08-06 17:45:19 +00:00
|
|
|
v <- check k
|
|
|
|
case v of
|
|
|
|
Right True -> checkchunks ks
|
|
|
|
Right False -> return $ Right False
|
2014-08-10 18:52:58 +00:00
|
|
|
Left e -> return $ Left e
|
2014-08-06 17:45:19 +00:00
|
|
|
|
unify exception handling into Utility.Exception
Removed old extensible-exceptions, only needed for very old ghc.
Made webdav use Utility.Exception, to work after some changes in DAV's
exception handling.
Removed Annex.Exception. Mostly this was trivial, but note that
tryAnnex is replaced with tryNonAsync and catchAnnex replaced with
catchNonAsync. In theory that could be a behavior change, since the former
caught all exceptions, and the latter don't catch async exceptions.
However, in practice, nothing in the Annex monad uses async exceptions.
Grepping for throwTo and killThread only find stuff in the assistant,
which does not seem related.
Command.Add.undo is changed to accept a SomeException, and things
that use it for rollback now catch non-async exceptions, rather than
only IOExceptions.
2014-08-08 01:55:44 +00:00
|
|
|
check = tryNonAsync . checker . encryptor
|
2014-07-27 00:11:41 +00:00
|
|
|
|
2020-10-22 17:37:09 +00:00
|
|
|
data ChunkKeys
|
|
|
|
= ChunkKeys [Key]
|
|
|
|
| SpeculativeChunkKeys (ChunkMethod, ChunkCount) [Key]
|
|
|
|
|
|
|
|
chunkKeyList :: ChunkKeys -> [Key]
|
|
|
|
chunkKeyList (ChunkKeys l) = l
|
|
|
|
chunkKeyList (SpeculativeChunkKeys _ l) = l
|
|
|
|
|
2014-07-27 05:24:34 +00:00
|
|
|
{- A key can be stored in a remote unchunked, or as a list of chunked keys.
|
2014-07-27 06:13:51 +00:00
|
|
|
- This can be the case whether or not the remote is currently configured
|
|
|
|
- to use chunking.
|
|
|
|
-
|
|
|
|
- It's even possible for a remote to have the same key stored multiple
|
|
|
|
- times with different chunk sizes!
|
|
|
|
-
|
|
|
|
- This finds all possible lists of keys that might be on the remote that
|
|
|
|
- can be combined to get back the requested key, in order from most to
|
|
|
|
- least likely to exist.
|
2020-10-22 16:57:58 +00:00
|
|
|
-
|
|
|
|
- Speculatively tries chunks using the ChunkConfig last of all
|
|
|
|
- (when that's not the same as the recorded chunks). This can help
|
|
|
|
- recover from data loss, where the chunk log didn't make it out,
|
|
|
|
- though only as long as the ChunkConfig is unchanged.
|
2014-07-27 05:24:34 +00:00
|
|
|
-}
|
2020-10-22 17:37:09 +00:00
|
|
|
chunkKeys :: UUID -> ChunkConfig -> Key -> Annex [ChunkKeys]
|
2020-10-22 16:57:58 +00:00
|
|
|
chunkKeys = chunkKeys' False
|
|
|
|
|
|
|
|
{- Same as chunkKeys, but excluding the unchunked key. -}
|
2020-10-22 17:37:09 +00:00
|
|
|
chunkKeysOnly :: UUID -> ChunkConfig -> Key -> Annex [ChunkKeys]
|
2020-10-22 16:57:58 +00:00
|
|
|
chunkKeysOnly = chunkKeys' True
|
|
|
|
|
2020-10-22 17:37:09 +00:00
|
|
|
chunkKeys' :: Bool -> UUID -> ChunkConfig -> Key -> Annex [ChunkKeys]
|
2020-10-22 16:57:58 +00:00
|
|
|
chunkKeys' onlychunks u chunkconfig k = do
|
|
|
|
recorded <- getCurrentChunks u k
|
2020-10-22 17:37:09 +00:00
|
|
|
let recordedl = map (ChunkKeys . toChunkList k) recorded
|
2020-10-22 16:57:58 +00:00
|
|
|
return $ addspeculative recorded $ if onlychunks
|
|
|
|
then recordedl
|
|
|
|
else if noChunks chunkconfig
|
2020-10-22 17:37:09 +00:00
|
|
|
then ChunkKeys [k] : recordedl
|
|
|
|
else recordedl ++ [ChunkKeys [k]]
|
2020-10-22 16:57:58 +00:00
|
|
|
where
|
|
|
|
addspeculative recorded l = case chunkconfig of
|
|
|
|
NoChunks -> l
|
|
|
|
UnpaddedChunks chunksz -> case fromKey keySize k of
|
|
|
|
Nothing -> l
|
|
|
|
Just keysz ->
|
|
|
|
let (d, m) = keysz `divMod` fromIntegral chunksz
|
2022-06-09 18:24:56 +00:00
|
|
|
chunkcount = max 1 (d + if m == 0 then 0 else 1)
|
2020-10-22 16:57:58 +00:00
|
|
|
v = (FixedSizeChunks chunksz, chunkcount)
|
|
|
|
in if v `elem` recorded
|
|
|
|
then l
|
2020-10-22 17:37:09 +00:00
|
|
|
else l ++ [SpeculativeChunkKeys v (toChunkList k v)]
|
2020-10-22 16:57:58 +00:00
|
|
|
LegacyChunks _ -> l
|
2014-07-27 05:24:34 +00:00
|
|
|
|
2014-07-28 17:19:08 +00:00
|
|
|
toChunkList :: Key -> (ChunkMethod, ChunkCount) -> [Key]
|
|
|
|
toChunkList k (FixedSizeChunks chunksize, chunkcount) =
|
|
|
|
takeChunkKeyStream chunkcount $ chunkKeyStream k chunksize
|
|
|
|
toChunkList _ (UnknownChunks _, _) = []
|
2020-10-22 17:37:09 +00:00
|
|
|
|
|
|
|
{- When chunkKeys provided a speculative chunk list, and that has been
|
|
|
|
- verified to be present, use this to log it in the chunk log. This way,
|
|
|
|
- a later change to the chunk size of the remote won't prevent accessing
|
|
|
|
- the chunks. -}
|
|
|
|
ensureChunksAreLogged :: UUID -> Key -> ChunkKeys -> Annex ()
|
|
|
|
ensureChunksAreLogged u k (SpeculativeChunkKeys (chunkmethod, chunkcount) _) =
|
|
|
|
chunksStored u k chunkmethod chunkcount
|
|
|
|
ensureChunksAreLogged _ _ (ChunkKeys _) = return ()
|
2021-02-16 19:46:14 +00:00
|
|
|
|
|
|
|
withBytes :: ContentSource -> (L.ByteString -> Annex a) -> Annex a
|
|
|
|
withBytes (ByteContent b) a = a b
|
|
|
|
withBytes (FileContent f) a = a =<< liftIO (L.readFile f)
|