make storeKey throw exceptions
When storing content on remote fails, always display a reason why. Since the Storer used by special remotes already did, this mostly affects git remotes, but not entirely. For example, if git-lfs failed to connect to the endpoint, it used to silently return False.
This commit is contained in:
parent
b50ee9cd0c
commit
c1cd402081
34 changed files with 214 additions and 197 deletions
|
@ -584,21 +584,19 @@ unlinkAnnex key = do
|
||||||
{- Runs an action to transfer an object's content.
|
{- Runs an action to transfer an object's content.
|
||||||
-
|
-
|
||||||
- In some cases, it's possible for the file to change as it's being sent.
|
- In some cases, it's possible for the file to change as it's being sent.
|
||||||
- If this happens, runs the rollback action and returns False. The
|
- If this happens, runs the rollback action and throws an exception.
|
||||||
- rollback action should remove the data that was transferred.
|
- The rollback action should remove the data that was transferred.
|
||||||
-}
|
-}
|
||||||
sendAnnex :: Key -> Annex () -> (FilePath -> Annex Bool) -> Annex Bool
|
sendAnnex :: Key -> Annex () -> (FilePath -> Annex a) -> Annex a
|
||||||
sendAnnex key rollback sendobject = go =<< prepSendAnnex key
|
sendAnnex key rollback sendobject = go =<< prepSendAnnex key
|
||||||
where
|
where
|
||||||
go Nothing = return False
|
|
||||||
go (Just (f, checksuccess)) = do
|
go (Just (f, checksuccess)) = do
|
||||||
r <- sendobject f
|
r <- sendobject f
|
||||||
ifM checksuccess
|
unlessM checksuccess $ do
|
||||||
( return r
|
rollback
|
||||||
, do
|
giveup "content changed while it was being sent"
|
||||||
rollback
|
return r
|
||||||
return False
|
go Nothing = giveup "content not available to send"
|
||||||
)
|
|
||||||
|
|
||||||
{- Returns a file that contains an object's content,
|
{- Returns a file that contains an object's content,
|
||||||
- and a check to run after the transfer is complete.
|
- and a check to run after the transfer is complete.
|
||||||
|
|
|
@ -26,6 +26,7 @@ git-annex (8.20200502) UNRELEASED; urgency=medium
|
||||||
the current directory.
|
the current directory.
|
||||||
* Display a warning message when asked to operate on a file inside a
|
* Display a warning message when asked to operate on a file inside a
|
||||||
directory that's a symbolic link to elsewhere.
|
directory that's a symbolic link to elsewhere.
|
||||||
|
* When storing content on remote fails, always display a reason why.
|
||||||
|
|
||||||
-- Joey Hess <id@joeyh.name> Mon, 04 May 2020 12:46:11 -0400
|
-- Joey Hess <id@joeyh.name> Mon, 04 May 2020 12:46:11 -0400
|
||||||
|
|
||||||
|
|
|
@ -126,8 +126,12 @@ toPerform dest removewhen key afile fastcheck isthere =
|
||||||
Right False -> do
|
Right False -> do
|
||||||
showAction $ "to " ++ Remote.name dest
|
showAction $ "to " ++ Remote.name dest
|
||||||
ok <- notifyTransfer Upload afile $
|
ok <- notifyTransfer Upload afile $
|
||||||
upload (Remote.uuid dest) key afile stdRetry $
|
upload (Remote.uuid dest) key afile stdRetry $ \p ->
|
||||||
Remote.storeKey dest key afile
|
tryNonAsync (Remote.storeKey dest key afile p) >>= \case
|
||||||
|
Left e -> do
|
||||||
|
warning (show e)
|
||||||
|
return False
|
||||||
|
Right () -> return True
|
||||||
if ok
|
if ok
|
||||||
then finish False $
|
then finish False $
|
||||||
Remote.logStatus dest key InfoPresent
|
Remote.logStatus dest key InfoPresent
|
||||||
|
|
|
@ -40,6 +40,7 @@ import "crypto-api" Crypto.Random
|
||||||
import qualified Data.ByteString as B
|
import qualified Data.ByteString as B
|
||||||
import qualified Data.ByteString.Lazy as L
|
import qualified Data.ByteString.Lazy as L
|
||||||
import qualified Data.Map as M
|
import qualified Data.Map as M
|
||||||
|
import Data.Either
|
||||||
import Control.Concurrent.STM hiding (check)
|
import Control.Concurrent.STM hiding (check)
|
||||||
|
|
||||||
cmd :: Command
|
cmd :: Command
|
||||||
|
@ -217,11 +218,11 @@ test runannex mkr mkk =
|
||||||
, check ("present " ++ show False) $ \r k ->
|
, check ("present " ++ show False) $ \r k ->
|
||||||
whenwritable r $ present r k False
|
whenwritable r $ present r k False
|
||||||
, check "storeKey" $ \r k ->
|
, check "storeKey" $ \r k ->
|
||||||
whenwritable r $ store r k
|
whenwritable r $ isRight <$> tryNonAsync (store r k)
|
||||||
, check ("present " ++ show True) $ \r k ->
|
, check ("present " ++ show True) $ \r k ->
|
||||||
whenwritable r $ present r k True
|
whenwritable r $ present r k True
|
||||||
, check "storeKey when already present" $ \r k ->
|
, check "storeKey when already present" $ \r k ->
|
||||||
whenwritable r $ store r k
|
whenwritable r $ isRight <$> tryNonAsync (store r k)
|
||||||
, check ("present " ++ show True) $ \r k -> present r k True
|
, check ("present " ++ show True) $ \r k -> present r k True
|
||||||
, check "retrieveKeyFile" $ \r k -> do
|
, check "retrieveKeyFile" $ \r k -> do
|
||||||
lockContentForRemoval k removeAnnex
|
lockContentForRemoval k removeAnnex
|
||||||
|
@ -341,7 +342,7 @@ testUnavailable :: RunAnnex -> Annex (Maybe Remote) -> Annex Key -> [TestTree]
|
||||||
testUnavailable runannex mkr mkk =
|
testUnavailable runannex mkr mkk =
|
||||||
[ check (== Right False) "removeKey" $ \r k ->
|
[ check (== Right False) "removeKey" $ \r k ->
|
||||||
Remote.removeKey r k
|
Remote.removeKey r k
|
||||||
, check (== Right False) "storeKey" $ \r k ->
|
, check isLeft "storeKey" $ \r k ->
|
||||||
Remote.storeKey r k (AssociatedFile Nothing) nullMeterUpdate
|
Remote.storeKey r k (AssociatedFile Nothing) nullMeterUpdate
|
||||||
, check (`notElem` [Right True, Right False]) "checkPresent" $ \r k ->
|
, check (`notElem` [Right True, Right False]) "checkPresent" $ \r k ->
|
||||||
Remote.checkPresent r k
|
Remote.checkPresent r k
|
||||||
|
|
|
@ -52,10 +52,13 @@ start o key = startingCustomOutput key $ case fromToOptions o of
|
||||||
toPerform :: Key -> AssociatedFile -> Remote -> CommandPerform
|
toPerform :: Key -> AssociatedFile -> Remote -> CommandPerform
|
||||||
toPerform key file remote = go Upload file $
|
toPerform key file remote = go Upload file $
|
||||||
upload (uuid remote) key file stdRetry $ \p -> do
|
upload (uuid remote) key file stdRetry $ \p -> do
|
||||||
ok <- Remote.storeKey remote key file p
|
tryNonAsync (Remote.storeKey remote key file p) >>= \case
|
||||||
when ok $
|
Left e -> do
|
||||||
Remote.logStatus remote key InfoPresent
|
warning (show e)
|
||||||
return ok
|
return False
|
||||||
|
Right () -> do
|
||||||
|
Remote.logStatus remote key InfoPresent
|
||||||
|
return True
|
||||||
|
|
||||||
fromPerform :: Key -> AssociatedFile -> Remote -> CommandPerform
|
fromPerform :: Key -> AssociatedFile -> Remote -> CommandPerform
|
||||||
fromPerform key file remote = go Upload file $
|
fromPerform key file remote = go Upload file $
|
||||||
|
|
|
@ -38,10 +38,13 @@ start = do
|
||||||
runner (TransferRequest direction remote key file)
|
runner (TransferRequest direction remote key file)
|
||||||
| direction == Upload = notifyTransfer direction file $
|
| direction == Upload = notifyTransfer direction file $
|
||||||
upload (Remote.uuid remote) key file stdRetry $ \p -> do
|
upload (Remote.uuid remote) key file stdRetry $ \p -> do
|
||||||
ok <- Remote.storeKey remote key file p
|
tryNonAsync (Remote.storeKey remote key file p) >>= \case
|
||||||
when ok $
|
Left e -> do
|
||||||
Remote.logStatus remote key InfoPresent
|
warning (show e)
|
||||||
return ok
|
return False
|
||||||
|
Right () -> do
|
||||||
|
Remote.logStatus remote key InfoPresent
|
||||||
|
return True
|
||||||
| otherwise = notifyTransfer direction file $
|
| otherwise = notifyTransfer direction file $
|
||||||
download (Remote.uuid remote) key file stdRetry $ \p ->
|
download (Remote.uuid remote) key file stdRetry $ \p ->
|
||||||
getViaTmp (Remote.retrievalSecurityPolicy remote) (RemoteVerify remote) key $ \t -> do
|
getViaTmp (Remote.retrievalSecurityPolicy remote) (RemoteVerify remote) key $ \t -> do
|
||||||
|
|
|
@ -160,7 +160,8 @@ adbSetup _ mu _ c gc = do
|
||||||
store :: AndroidSerial -> AndroidPath -> Storer
|
store :: AndroidSerial -> AndroidPath -> Storer
|
||||||
store serial adir = fileStorer $ \k src _p ->
|
store serial adir = fileStorer $ \k src _p ->
|
||||||
let dest = androidLocation adir k
|
let dest = androidLocation adir k
|
||||||
in store' serial dest src
|
in unlessM (store' serial dest src) $
|
||||||
|
giveup "adb failed"
|
||||||
|
|
||||||
store' :: AndroidSerial -> AndroidPath -> FilePath -> Annex Bool
|
store' :: AndroidSerial -> AndroidPath -> FilePath -> Annex Bool
|
||||||
store' serial dest src = store'' serial dest src (return True)
|
store' serial dest src = store'' serial dest src (return True)
|
||||||
|
|
|
@ -111,10 +111,8 @@ downloadKey key _file dest p = unVerified $
|
||||||
downloadKeyCheap :: Key -> AssociatedFile -> FilePath -> Annex Bool
|
downloadKeyCheap :: Key -> AssociatedFile -> FilePath -> Annex Bool
|
||||||
downloadKeyCheap _ _ _ = return False
|
downloadKeyCheap _ _ _ = return False
|
||||||
|
|
||||||
uploadKey :: Key -> AssociatedFile -> MeterUpdate -> Annex Bool
|
uploadKey :: Key -> AssociatedFile -> MeterUpdate -> Annex ()
|
||||||
uploadKey _ _ _ = do
|
uploadKey _ _ _ = giveup "upload to bittorrent not supported"
|
||||||
warning "upload to bittorrent not supported"
|
|
||||||
return False
|
|
||||||
|
|
||||||
dropKey :: Key -> Annex Bool
|
dropKey :: Key -> Annex Bool
|
||||||
dropKey k = do
|
dropKey k = do
|
||||||
|
|
|
@ -156,9 +156,7 @@ store r buprepo = byteStorer $ \k b p -> do
|
||||||
showOutput -- make way for bup output
|
showOutput -- make way for bup output
|
||||||
let cmd = proc "bup" (toCommand params)
|
let cmd = proc "bup" (toCommand params)
|
||||||
quiet <- commandProgressDisabled
|
quiet <- commandProgressDisabled
|
||||||
let feeder = \h -> do
|
let feeder = \h -> meteredWrite p h b
|
||||||
meteredWrite p h b
|
|
||||||
return True
|
|
||||||
liftIO $ if quiet
|
liftIO $ if quiet
|
||||||
then feedWithQuietOutput createProcessSuccess cmd feeder
|
then feedWithQuietOutput createProcessSuccess cmd feeder
|
||||||
else withHandle StdinHandle createProcessSuccess cmd feeder
|
else withHandle StdinHandle createProcessSuccess cmd feeder
|
||||||
|
|
|
@ -127,7 +127,8 @@ store ddarrepo = fileStorer $ \k src _p -> do
|
||||||
, Param $ ddarRepoLocation ddarrepo
|
, Param $ ddarRepoLocation ddarrepo
|
||||||
, File src
|
, File src
|
||||||
]
|
]
|
||||||
liftIO $ boolSystem "ddar" params
|
unlessM (liftIO $ boolSystem "ddar" params) $
|
||||||
|
giveup "ddar failed"
|
||||||
|
|
||||||
{- Convert remote DdarRepo to host and path on remote end -}
|
{- Convert remote DdarRepo to host and path on remote end -}
|
||||||
splitRemoteDdarRepo :: DdarRepo -> (SshHost, String)
|
splitRemoteDdarRepo :: DdarRepo -> (SshHost, String)
|
||||||
|
|
|
@ -170,16 +170,16 @@ checkDiskSpaceDirectory d k = do
|
||||||
<*> getFileStatus annexdir
|
<*> getFileStatus annexdir
|
||||||
checkDiskSpace (Just d) k 0 samefilesystem
|
checkDiskSpace (Just d) k 0 samefilesystem
|
||||||
|
|
||||||
store :: FilePath -> ChunkConfig -> Key -> L.ByteString -> MeterUpdate -> Annex Bool
|
store :: FilePath -> ChunkConfig -> Key -> L.ByteString -> MeterUpdate -> Annex ()
|
||||||
store d chunkconfig k b p = liftIO $ do
|
store d chunkconfig k b p = liftIO $ do
|
||||||
void $ tryIO $ createDirectoryUnder d tmpdir
|
void $ tryIO $ createDirectoryUnder d tmpdir
|
||||||
case chunkconfig of
|
case chunkconfig of
|
||||||
LegacyChunks chunksize -> Legacy.store d chunksize (finalizeStoreGeneric d) k b p tmpdir destdir
|
LegacyChunks chunksize ->
|
||||||
|
Legacy.store d chunksize (finalizeStoreGeneric d) k b p tmpdir destdir
|
||||||
_ -> do
|
_ -> do
|
||||||
let tmpf = tmpdir </> kf
|
let tmpf = tmpdir </> kf
|
||||||
meteredWriteFile p tmpf b
|
meteredWriteFile p tmpf b
|
||||||
finalizeStoreGeneric d tmpdir destdir
|
finalizeStoreGeneric d tmpdir destdir
|
||||||
return True
|
|
||||||
where
|
where
|
||||||
tmpdir = addTrailingPathSeparator $ d </> "tmp" </> kf
|
tmpdir = addTrailingPathSeparator $ d </> "tmp" </> kf
|
||||||
kf = fromRawFilePath (keyFile k)
|
kf = fromRawFilePath (keyFile k)
|
||||||
|
|
|
@ -70,7 +70,7 @@ storeLegacyChunked' meterupdate chunksize (d:dests) bs c = do
|
||||||
feed bytes' (sz - s) ls h
|
feed bytes' (sz - s) ls h
|
||||||
else return (l:ls)
|
else return (l:ls)
|
||||||
|
|
||||||
storeHelper :: FilePath -> (FilePath -> FilePath -> IO ()) -> Key -> ([FilePath] -> IO [FilePath]) -> FilePath -> FilePath -> IO Bool
|
storeHelper :: FilePath -> (FilePath -> FilePath -> IO ()) -> Key -> ([FilePath] -> IO [FilePath]) -> FilePath -> FilePath -> IO ()
|
||||||
storeHelper repotop finalizer key storer tmpdir destdir = do
|
storeHelper repotop finalizer key storer tmpdir destdir = do
|
||||||
void $ liftIO $ tryIO $ createDirectoryUnder repotop tmpdir
|
void $ liftIO $ tryIO $ createDirectoryUnder repotop tmpdir
|
||||||
Legacy.storeChunks key tmpdir destdir storer recorder finalizer
|
Legacy.storeChunks key tmpdir destdir storer recorder finalizer
|
||||||
|
@ -80,7 +80,7 @@ storeHelper repotop finalizer key storer tmpdir destdir = do
|
||||||
writeFile f s
|
writeFile f s
|
||||||
void $ tryIO $ preventWrite f
|
void $ tryIO $ preventWrite f
|
||||||
|
|
||||||
store :: FilePath -> ChunkSize -> (FilePath -> FilePath -> IO ()) -> Key -> L.ByteString -> MeterUpdate -> FilePath -> FilePath -> IO Bool
|
store :: FilePath -> ChunkSize -> (FilePath -> FilePath -> IO ()) -> Key -> L.ByteString -> MeterUpdate -> FilePath -> FilePath -> IO ()
|
||||||
store repotop chunksize finalizer k b p = storeHelper repotop finalizer k $ \dests ->
|
store repotop chunksize finalizer k b p = storeHelper repotop finalizer k $ \dests ->
|
||||||
storeLegacyChunked p chunksize dests b
|
storeLegacyChunked p chunksize dests b
|
||||||
|
|
||||||
|
|
|
@ -222,14 +222,14 @@ checkExportSupported' external = go `catchNonAsync` (const (return False))
|
||||||
_ -> Nothing
|
_ -> Nothing
|
||||||
|
|
||||||
storeKeyM :: External -> Storer
|
storeKeyM :: External -> Storer
|
||||||
storeKeyM external = fileStorer $ \k f p ->
|
storeKeyM external = fileStorer $ \k f p -> either giveup return =<< go k f p
|
||||||
handleRequestKey external (\sk -> TRANSFER Upload sk f) k (Just p) $ \resp ->
|
where
|
||||||
|
go k f p = handleRequestKey external (\sk -> TRANSFER Upload sk f) k (Just p) $ \resp ->
|
||||||
case resp of
|
case resp of
|
||||||
TRANSFER_SUCCESS Upload k' | k == k' -> result True
|
TRANSFER_SUCCESS Upload k' | k == k' ->
|
||||||
|
result (Right ())
|
||||||
TRANSFER_FAILURE Upload k' errmsg | k == k' ->
|
TRANSFER_FAILURE Upload k' errmsg | k == k' ->
|
||||||
Just $ do
|
result (Left (respErrorMessage "TRANSFER" errmsg))
|
||||||
warning $ respErrorMessage "TRANSFER" errmsg
|
|
||||||
return (Result False)
|
|
||||||
_ -> Nothing
|
_ -> Nothing
|
||||||
|
|
||||||
retrieveKeyFileM :: External -> Retriever
|
retrieveKeyFileM :: External -> Retriever
|
||||||
|
|
|
@ -367,20 +367,21 @@ store r rsyncopts k s p = do
|
||||||
store' :: Git.Repo -> Remote -> Remote.Rsync.RsyncOpts -> Storer
|
store' :: Git.Repo -> Remote -> Remote.Rsync.RsyncOpts -> Storer
|
||||||
store' repo r rsyncopts
|
store' repo r rsyncopts
|
||||||
| not $ Git.repoIsUrl repo =
|
| not $ Git.repoIsUrl repo =
|
||||||
byteStorer $ \k b p -> guardUsable repo (return False) $ liftIO $ do
|
byteStorer $ \k b p -> guardUsable repo (giveup "cannot access remote") $ liftIO $ do
|
||||||
let tmpdir = Git.repoLocation repo </> "tmp" </> fromRawFilePath (keyFile k)
|
let tmpdir = Git.repoLocation repo </> "tmp" </> fromRawFilePath (keyFile k)
|
||||||
void $ tryIO $ createDirectoryUnder (Git.repoLocation repo) tmpdir
|
void $ tryIO $ createDirectoryUnder (Git.repoLocation repo) tmpdir
|
||||||
let tmpf = tmpdir </> fromRawFilePath (keyFile k)
|
let tmpf = tmpdir </> fromRawFilePath (keyFile k)
|
||||||
meteredWriteFile p tmpf b
|
meteredWriteFile p tmpf b
|
||||||
let destdir = parentDir $ gCryptLocation repo k
|
let destdir = parentDir $ gCryptLocation repo k
|
||||||
Remote.Directory.finalizeStoreGeneric (Git.repoLocation repo) tmpdir destdir
|
Remote.Directory.finalizeStoreGeneric (Git.repoLocation repo) tmpdir destdir
|
||||||
return True
|
|
||||||
| Git.repoIsSsh repo = if accessShell r
|
| Git.repoIsSsh repo = if accessShell r
|
||||||
then fileStorer $ \k f p -> do
|
then fileStorer $ \k f p -> do
|
||||||
oh <- mkOutputHandler
|
oh <- mkOutputHandler
|
||||||
Ssh.rsyncHelper oh (Just p)
|
ok <- Ssh.rsyncHelper oh (Just p)
|
||||||
=<< Ssh.rsyncParamsRemote False r Upload k f
|
=<< Ssh.rsyncParamsRemote False r Upload k f
|
||||||
(AssociatedFile Nothing)
|
(AssociatedFile Nothing)
|
||||||
|
unless ok $
|
||||||
|
giveup "rsync failed"
|
||||||
else fileStorer $ Remote.Rsync.store rsyncopts
|
else fileStorer $ Remote.Rsync.store rsyncopts
|
||||||
| otherwise = unsupportedUrl
|
| otherwise = unsupportedUrl
|
||||||
|
|
||||||
|
|
|
@ -649,28 +649,26 @@ copyFromRemoteCheap' _ _ _ _ _ _ = return False
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
{- Tries to copy a key's content to a remote's annex. -}
|
{- Tries to copy a key's content to a remote's annex. -}
|
||||||
copyToRemote :: Remote -> State -> Key -> AssociatedFile -> MeterUpdate -> Annex Bool
|
copyToRemote :: Remote -> State -> Key -> AssociatedFile -> MeterUpdate -> Annex ()
|
||||||
copyToRemote r st key file meterupdate = do
|
copyToRemote r st key file meterupdate = do
|
||||||
repo <- getRepo r
|
repo <- getRepo r
|
||||||
copyToRemote' repo r st key file meterupdate
|
copyToRemote' repo r st key file meterupdate
|
||||||
|
|
||||||
copyToRemote' :: Git.Repo -> Remote -> State -> Key -> AssociatedFile -> MeterUpdate -> Annex Bool
|
copyToRemote' :: Git.Repo -> Remote -> State -> Key -> AssociatedFile -> MeterUpdate -> Annex ()
|
||||||
copyToRemote' repo r st@(State connpool duc _ _ _) key file meterupdate
|
copyToRemote' repo r st@(State connpool duc _ _ _) key file meterupdate
|
||||||
| not $ Git.repoIsUrl repo = ifM duc
|
| not $ Git.repoIsUrl repo = ifM duc
|
||||||
( guardUsable repo (return False) $ commitOnCleanup repo r st $
|
( guardUsable repo (giveup "cannot access remote") $ commitOnCleanup repo r st $
|
||||||
copylocal =<< Annex.Content.prepSendAnnex key
|
copylocal =<< Annex.Content.prepSendAnnex key
|
||||||
, return False
|
, giveup "remote does not have expected annex.uuid value"
|
||||||
)
|
)
|
||||||
| Git.repoIsSsh repo = commitOnCleanup repo r st $
|
| Git.repoIsSsh repo = commitOnCleanup repo r st $
|
||||||
P2PHelper.store
|
P2PHelper.store
|
||||||
(\p -> Ssh.runProto r connpool (return False) (copyremotefallback p))
|
(Ssh.runProto r connpool (return False) . copyremotefallback)
|
||||||
key file meterupdate
|
key file meterupdate
|
||||||
|
|
||||||
| otherwise = do
|
| otherwise = giveup "copying to non-ssh repo not supported"
|
||||||
warning "copying to non-ssh repo not supported"
|
|
||||||
return False
|
|
||||||
where
|
where
|
||||||
copylocal Nothing = return False
|
copylocal Nothing = giveup "content not available"
|
||||||
copylocal (Just (object, checksuccess)) = do
|
copylocal (Just (object, checksuccess)) = do
|
||||||
-- The checksuccess action is going to be run in
|
-- The checksuccess action is going to be run in
|
||||||
-- the remote's Annex, but it needs access to the local
|
-- the remote's Annex, but it needs access to the local
|
||||||
|
@ -680,7 +678,7 @@ copyToRemote' repo r st@(State connpool duc _ _ _) key file meterupdate
|
||||||
u <- getUUID
|
u <- getUUID
|
||||||
hardlink <- wantHardLink
|
hardlink <- wantHardLink
|
||||||
-- run copy from perspective of remote
|
-- run copy from perspective of remote
|
||||||
onLocalFast st $ ifM (Annex.Content.inAnnex key)
|
res <- onLocalFast st $ ifM (Annex.Content.inAnnex key)
|
||||||
( return True
|
( return True
|
||||||
, runTransfer (Transfer Download u (fromKey id key)) file stdRetry $ \p -> do
|
, runTransfer (Transfer Download u (fromKey id key)) file stdRetry $ \p -> do
|
||||||
copier <- mkCopier hardlink st params
|
copier <- mkCopier hardlink st params
|
||||||
|
@ -692,7 +690,11 @@ copyToRemote' repo r st@(State connpool duc _ _ _) key file meterupdate
|
||||||
Annex.Content.saveState True
|
Annex.Content.saveState True
|
||||||
return res
|
return res
|
||||||
)
|
)
|
||||||
copyremotefallback p = Annex.Content.sendAnnex key noop $ \object -> do
|
unless res $
|
||||||
|
giveup "failed to send content to remote"
|
||||||
|
copyremotefallback p = either (const False) id
|
||||||
|
<$> tryNonAsync (copyremotefallback' p)
|
||||||
|
copyremotefallback' p = Annex.Content.sendAnnex key noop $ \object -> do
|
||||||
-- This is too broad really, but recvkey normally
|
-- This is too broad really, but recvkey normally
|
||||||
-- verifies content anyway, so avoid complicating
|
-- verifies content anyway, so avoid complicating
|
||||||
-- it with a local sendAnnex check and rollback.
|
-- it with a local sendAnnex check and rollback.
|
||||||
|
|
|
@ -440,18 +440,15 @@ mkDownloadRequest rs k = case (extractKeySha256 k, extractKeySize k) of
|
||||||
|
|
||||||
store :: RemoteStateHandle -> TVar LFSHandle -> Storer
|
store :: RemoteStateHandle -> TVar LFSHandle -> Storer
|
||||||
store rs h = fileStorer $ \k src p -> getLFSEndpoint LFS.RequestUpload h >>= \case
|
store rs h = fileStorer $ \k src p -> getLFSEndpoint LFS.RequestUpload h >>= \case
|
||||||
Nothing -> return False
|
Nothing -> giveup "unable to connect to git-lfs endpoint"
|
||||||
Just endpoint -> flip catchNonAsync failederr $ do
|
Just endpoint -> do
|
||||||
(req, sha256, size) <- mkUploadRequest rs k src
|
(req, sha256, size) <- mkUploadRequest rs k src
|
||||||
sendTransferRequest req endpoint >>= \case
|
sendTransferRequest req endpoint >>= \case
|
||||||
Left err -> do
|
|
||||||
warning err
|
|
||||||
return False
|
|
||||||
Right resp -> do
|
Right resp -> do
|
||||||
body <- liftIO $ httpBodyStorer src p
|
body <- liftIO $ httpBodyStorer src p
|
||||||
forM_ (LFS.objects resp) $
|
forM_ (LFS.objects resp) $
|
||||||
send body sha256 size
|
send body sha256 size
|
||||||
return True
|
Left err -> giveup err
|
||||||
where
|
where
|
||||||
send body sha256 size tro
|
send body sha256 size tro
|
||||||
| LFS.resp_oid tro /= sha256 || LFS.resp_size tro /= size =
|
| LFS.resp_oid tro /= sha256 || LFS.resp_size tro /= size =
|
||||||
|
@ -466,9 +463,6 @@ store rs h = fileStorer $ \k src p -> getLFSEndpoint LFS.RequestUpload h >>= \ca
|
||||||
Just [] -> noop -- server already has it
|
Just [] -> noop -- server already has it
|
||||||
Just reqs -> forM_ reqs $
|
Just reqs -> forM_ reqs $
|
||||||
makeSmallAPIRequest . setRequestCheckStatus
|
makeSmallAPIRequest . setRequestCheckStatus
|
||||||
failederr e = do
|
|
||||||
warning (show e)
|
|
||||||
return False
|
|
||||||
|
|
||||||
retrieve :: RemoteStateHandle -> TVar LFSHandle -> Retriever
|
retrieve :: RemoteStateHandle -> TVar LFSHandle -> Retriever
|
||||||
retrieve rs h = fileRetriever $ \dest k p -> getLFSEndpoint LFS.RequestDownload h >>= \case
|
retrieve rs h = fileRetriever $ \dest k p -> getLFSEndpoint LFS.RequestDownload h >>= \case
|
||||||
|
|
|
@ -147,7 +147,7 @@ checkNonEmpty k
|
||||||
giveup "Cannot store empty files in Glacier."
|
giveup "Cannot store empty files in Glacier."
|
||||||
| otherwise = return ()
|
| otherwise = return ()
|
||||||
|
|
||||||
store' :: Remote -> Key -> L.ByteString -> MeterUpdate -> Annex Bool
|
store' :: Remote -> Key -> L.ByteString -> MeterUpdate -> Annex ()
|
||||||
store' r k b p = go =<< glacierEnv c gc u
|
store' r k b p = go =<< glacierEnv c gc u
|
||||||
where
|
where
|
||||||
c = config r
|
c = config r
|
||||||
|
@ -160,13 +160,11 @@ store' r k b p = go =<< glacierEnv c gc u
|
||||||
, Param $ getVault $ config r
|
, Param $ getVault $ config r
|
||||||
, Param "-"
|
, Param "-"
|
||||||
]
|
]
|
||||||
go Nothing = return False
|
go Nothing = giveup "Glacier not usable."
|
||||||
go (Just e) = do
|
go (Just e) = liftIO $ do
|
||||||
let cmd = (proc "glacier" (toCommand params)) { env = Just e }
|
let cmd = (proc "glacier" (toCommand params)) { env = Just e }
|
||||||
liftIO $ catchBoolIO $
|
withHandle StdinHandle createProcessSuccess cmd $ \h ->
|
||||||
withHandle StdinHandle createProcessSuccess cmd $ \h -> do
|
meteredWrite p h b
|
||||||
meteredWrite p h b
|
|
||||||
return True
|
|
||||||
|
|
||||||
retrieve :: Remote -> Retriever
|
retrieve :: Remote -> Retriever
|
||||||
retrieve = byteRetriever . retrieve'
|
retrieve = byteRetriever . retrieve'
|
||||||
|
|
|
@ -117,28 +117,22 @@ storeChunks
|
||||||
-> MeterUpdate
|
-> MeterUpdate
|
||||||
-> Storer
|
-> Storer
|
||||||
-> CheckPresent
|
-> CheckPresent
|
||||||
-> Annex Bool
|
-> Annex ()
|
||||||
storeChunks u chunkconfig encryptor k f p storer checker =
|
storeChunks u chunkconfig encryptor k f p storer checker =
|
||||||
case chunkconfig of
|
case chunkconfig of
|
||||||
(UnpaddedChunks chunksize) | isStableKey k ->
|
(UnpaddedChunks chunksize) | isStableKey k -> do
|
||||||
bracketIO open close (go chunksize)
|
h <- liftIO $ openBinaryFile f ReadMode
|
||||||
|
go chunksize h
|
||||||
|
liftIO $ hClose h
|
||||||
_ -> storer k (FileContent f) p
|
_ -> storer k (FileContent f) p
|
||||||
where
|
where
|
||||||
open = tryIO $ openBinaryFile f ReadMode
|
go chunksize h = do
|
||||||
|
|
||||||
close (Right h) = hClose h
|
|
||||||
close (Left _) = noop
|
|
||||||
|
|
||||||
go _ (Left e) = do
|
|
||||||
warning (show e)
|
|
||||||
return False
|
|
||||||
go chunksize (Right h) = do
|
|
||||||
let chunkkeys = chunkKeyStream k chunksize
|
let chunkkeys = chunkKeyStream k chunksize
|
||||||
(chunkkeys', startpos) <- seekResume h encryptor chunkkeys checker
|
(chunkkeys', startpos) <- seekResume h encryptor chunkkeys checker
|
||||||
b <- liftIO $ L.hGetContents h
|
b <- liftIO $ L.hGetContents h
|
||||||
gochunks p startpos chunksize b chunkkeys'
|
gochunks p startpos chunksize b chunkkeys'
|
||||||
|
|
||||||
gochunks :: MeterUpdate -> BytesProcessed -> ChunkSize -> L.ByteString -> ChunkKeyStream -> Annex Bool
|
gochunks :: MeterUpdate -> BytesProcessed -> ChunkSize -> L.ByteString -> ChunkKeyStream -> Annex ()
|
||||||
gochunks meterupdate startpos chunksize = loop startpos . splitchunk
|
gochunks meterupdate startpos chunksize = loop startpos . splitchunk
|
||||||
where
|
where
|
||||||
splitchunk = L.splitAt chunksize
|
splitchunk = L.splitAt chunksize
|
||||||
|
@ -148,16 +142,12 @@ storeChunks u chunkconfig encryptor k f p storer checker =
|
||||||
-- Once all chunks are successfully
|
-- Once all chunks are successfully
|
||||||
-- stored, update the chunk log.
|
-- stored, update the chunk log.
|
||||||
chunksStored u k (FixedSizeChunks chunksize) numchunks
|
chunksStored u k (FixedSizeChunks chunksize) numchunks
|
||||||
return True
|
|
||||||
| otherwise = do
|
| otherwise = do
|
||||||
liftIO $ meterupdate' zeroBytesProcessed
|
liftIO $ meterupdate' zeroBytesProcessed
|
||||||
let (chunkkey, chunkkeys') = nextChunkKeyStream chunkkeys
|
let (chunkkey, chunkkeys') = nextChunkKeyStream chunkkeys
|
||||||
ifM (storer chunkkey (ByteContent chunk) meterupdate')
|
storer chunkkey (ByteContent chunk) meterupdate'
|
||||||
( do
|
let bytesprocessed' = addBytesProcessed bytesprocessed (L.length chunk)
|
||||||
let bytesprocessed' = addBytesProcessed bytesprocessed (L.length chunk)
|
loop bytesprocessed' (splitchunk bs) chunkkeys'
|
||||||
loop bytesprocessed' (splitchunk bs) chunkkeys'
|
|
||||||
, return False
|
|
||||||
)
|
|
||||||
where
|
where
|
||||||
numchunks = numChunks chunkkeys
|
numchunks = numChunks chunkkeys
|
||||||
{- The MeterUpdate that is passed to the action
|
{- The MeterUpdate that is passed to the action
|
||||||
|
|
|
@ -63,20 +63,15 @@ probeChunks basedest check = go [] $ map (basedest ++) chunkStream
|
||||||
- finalizer is called to rename the tmp into the dest
|
- finalizer is called to rename the tmp into the dest
|
||||||
- (and do any other cleanup).
|
- (and do any other cleanup).
|
||||||
-}
|
-}
|
||||||
storeChunks :: Key -> FilePath -> FilePath -> ([FilePath] -> IO [FilePath]) -> (FilePath -> String -> IO ()) -> (FilePath -> FilePath -> IO ()) -> IO Bool
|
storeChunks :: Key -> FilePath -> FilePath -> ([FilePath] -> IO [FilePath]) -> (FilePath -> String -> IO ()) -> (FilePath -> FilePath -> IO ()) -> IO ()
|
||||||
storeChunks key tmp dest storer recorder finalizer = either onerr return
|
storeChunks key tmp dest storer recorder finalizer = do
|
||||||
=<< (E.try go :: IO (Either E.SomeException Bool))
|
stored <- storer tmpdests
|
||||||
|
let chunkcount = basef ++ chunkCount
|
||||||
|
recorder chunkcount (show $ length stored)
|
||||||
|
finalizer tmp dest
|
||||||
|
when (null stored) $
|
||||||
|
giveup "no chunks were stored"
|
||||||
where
|
where
|
||||||
go = do
|
|
||||||
stored <- storer tmpdests
|
|
||||||
let chunkcount = basef ++ chunkCount
|
|
||||||
recorder chunkcount (show $ length stored)
|
|
||||||
finalizer tmp dest
|
|
||||||
return (not $ null stored)
|
|
||||||
onerr e = do
|
|
||||||
warningIO (show e)
|
|
||||||
return False
|
|
||||||
|
|
||||||
basef = tmp ++ fromRawFilePath (keyFile key)
|
basef = tmp ++ fromRawFilePath (keyFile key)
|
||||||
tmpdests = map (basef ++ ) chunkStream
|
tmpdests = map (basef ++ ) chunkStream
|
||||||
|
|
||||||
|
|
|
@ -188,9 +188,8 @@ adjustExportImport r rs = case getRemoteConfigValue exportTreeField (config r) o
|
||||||
-- when another repository has already stored the
|
-- when another repository has already stored the
|
||||||
-- key, and the local repository does not know
|
-- key, and the local repository does not know
|
||||||
-- about it. To avoid unnecessary costs, don't do it.
|
-- about it. To avoid unnecessary costs, don't do it.
|
||||||
{ storeKey = \_ _ _ -> do
|
{ storeKey = \_ _ _ ->
|
||||||
warning "remote is configured with exporttree=yes; use `git-annex export` to store content on it"
|
giveup "remote is configured with exporttree=yes; use `git-annex export` to store content on it"
|
||||||
return False
|
|
||||||
-- Keys can be retrieved using retrieveExport,
|
-- Keys can be retrieved using retrieveExport,
|
||||||
-- but since that retrieves from a path in the
|
-- but since that retrieves from a path in the
|
||||||
-- remote that another writer could have replaced
|
-- remote that another writer could have replaced
|
||||||
|
|
|
@ -25,7 +25,7 @@ import Network.HTTP.Types
|
||||||
--
|
--
|
||||||
-- Implemented as a fileStorer, so that the content can be streamed
|
-- Implemented as a fileStorer, so that the content can be streamed
|
||||||
-- from the file in constant space.
|
-- from the file in constant space.
|
||||||
httpStorer :: (Key -> RequestBody -> Annex Bool) -> Storer
|
httpStorer :: (Key -> RequestBody -> Annex ()) -> Storer
|
||||||
httpStorer a = fileStorer $ \k f m -> a k =<< liftIO (httpBodyStorer f m)
|
httpStorer a = fileStorer $ \k f m -> a k =<< liftIO (httpBodyStorer f m)
|
||||||
|
|
||||||
-- Reads the file and generates a streaming request body, that will update
|
-- Reads the file and generates a streaming request body, that will update
|
||||||
|
|
|
@ -30,12 +30,14 @@ type ProtoConnRunner c = forall a. P2P.Proto a -> ClosableConnection c -> Annex
|
||||||
-- the pool when done.
|
-- the pool when done.
|
||||||
type WithConn a c = (ClosableConnection c -> Annex (ClosableConnection c, a)) -> Annex a
|
type WithConn a c = (ClosableConnection c -> Annex (ClosableConnection c, a)) -> Annex a
|
||||||
|
|
||||||
store :: (MeterUpdate -> ProtoRunner Bool) -> Key -> AssociatedFile -> MeterUpdate -> Annex Bool
|
store :: (MeterUpdate -> ProtoRunner Bool) -> Key -> AssociatedFile -> MeterUpdate -> Annex ()
|
||||||
store runner k af p = do
|
store runner k af p = do
|
||||||
let sizer = KeySizer k (fmap fst <$> prepSendAnnex k)
|
let sizer = KeySizer k (fmap fst <$> prepSendAnnex k)
|
||||||
metered (Just p) sizer $ \_ p' ->
|
metered (Just p) sizer $ \_ p' ->
|
||||||
fromMaybe False
|
runner p' (P2P.put k af p') >>= \case
|
||||||
<$> runner p' (P2P.put k af p')
|
Just True -> return ()
|
||||||
|
Just False -> giveup "transfer failed"
|
||||||
|
Nothing -> giveup "can't connect to remote"
|
||||||
|
|
||||||
retrieve :: (MeterUpdate -> ProtoRunner (Bool, Verification)) -> Key -> AssociatedFile -> FilePath -> MeterUpdate -> Annex (Bool, Verification)
|
retrieve :: (MeterUpdate -> ProtoRunner (Bool, Verification)) -> Key -> AssociatedFile -> FilePath -> MeterUpdate -> Annex (Bool, Verification)
|
||||||
retrieve runner k af dest p =
|
retrieve runner k af dest p =
|
||||||
|
|
|
@ -44,23 +44,23 @@ adjustReadOnly r
|
||||||
}
|
}
|
||||||
| otherwise = r
|
| otherwise = r
|
||||||
|
|
||||||
readonlyStoreKey :: Key -> AssociatedFile -> MeterUpdate -> Annex Bool
|
readonlyStoreKey :: Key -> AssociatedFile -> MeterUpdate -> Annex ()
|
||||||
readonlyStoreKey _ _ _ = readonlyFail
|
readonlyStoreKey _ _ _ = readonlyFail
|
||||||
|
|
||||||
readonlyRemoveKey :: Key -> Annex Bool
|
readonlyRemoveKey :: Key -> Annex Bool
|
||||||
readonlyRemoveKey _ = readonlyFail
|
readonlyRemoveKey _ = readonlyFail'
|
||||||
|
|
||||||
readonlyStorer :: Storer
|
readonlyStorer :: Storer
|
||||||
readonlyStorer _ _ _ = readonlyFail
|
readonlyStorer _ _ _ = readonlyFail
|
||||||
|
|
||||||
readonlyStoreExport :: FilePath -> Key -> ExportLocation -> MeterUpdate -> Annex Bool
|
readonlyStoreExport :: FilePath -> Key -> ExportLocation -> MeterUpdate -> Annex Bool
|
||||||
readonlyStoreExport _ _ _ _ = readonlyFail
|
readonlyStoreExport _ _ _ _ = readonlyFail'
|
||||||
|
|
||||||
readonlyRemoveExport :: Key -> ExportLocation -> Annex Bool
|
readonlyRemoveExport :: Key -> ExportLocation -> Annex Bool
|
||||||
readonlyRemoveExport _ _ = readonlyFail
|
readonlyRemoveExport _ _ = readonlyFail'
|
||||||
|
|
||||||
readonlyRemoveExportDirectory :: ExportDirectory -> Annex Bool
|
readonlyRemoveExportDirectory :: ExportDirectory -> Annex Bool
|
||||||
readonlyRemoveExportDirectory _ = readonlyFail
|
readonlyRemoveExportDirectory _ = readonlyFail'
|
||||||
|
|
||||||
readonlyRenameExport :: Key -> ExportLocation -> ExportLocation -> Annex (Maybe Bool)
|
readonlyRenameExport :: Key -> ExportLocation -> ExportLocation -> Annex (Maybe Bool)
|
||||||
readonlyRenameExport _ _ _ = return Nothing
|
readonlyRenameExport _ _ _ = return Nothing
|
||||||
|
@ -70,10 +70,13 @@ readonlyStoreExportWithContentIdentifier _ _ _ _ _ =
|
||||||
return $ Left readonlyWarning
|
return $ Left readonlyWarning
|
||||||
|
|
||||||
readonlyRemoveExportWithContentIdentifier :: Key -> ExportLocation -> [ContentIdentifier] -> Annex Bool
|
readonlyRemoveExportWithContentIdentifier :: Key -> ExportLocation -> [ContentIdentifier] -> Annex Bool
|
||||||
readonlyRemoveExportWithContentIdentifier _ _ _ = readonlyFail
|
readonlyRemoveExportWithContentIdentifier _ _ _ = readonlyFail'
|
||||||
|
|
||||||
readonlyFail :: Annex Bool
|
readonlyFail :: Annex ()
|
||||||
readonlyFail = do
|
readonlyFail = giveup readonlyWarning
|
||||||
|
|
||||||
|
readonlyFail' :: Annex Bool
|
||||||
|
readonlyFail' = do
|
||||||
warning readonlyWarning
|
warning readonlyWarning
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
|
@ -90,7 +90,7 @@ mkRetrievalVerifiableKeysSecure gc
|
||||||
|
|
||||||
-- A Storer that expects to be provided with a file containing
|
-- A Storer that expects to be provided with a file containing
|
||||||
-- the content of the key to store.
|
-- the content of the key to store.
|
||||||
fileStorer :: (Key -> FilePath -> MeterUpdate -> Annex Bool) -> Storer
|
fileStorer :: (Key -> FilePath -> MeterUpdate -> Annex ()) -> Storer
|
||||||
fileStorer a k (FileContent f) m = a k f m
|
fileStorer a k (FileContent f) m = a k f m
|
||||||
fileStorer a k (ByteContent b) m = withTmp k $ \f -> do
|
fileStorer a k (ByteContent b) m = withTmp k $ \f -> do
|
||||||
liftIO $ L.writeFile f b
|
liftIO $ L.writeFile f b
|
||||||
|
@ -98,7 +98,7 @@ fileStorer a k (ByteContent b) m = withTmp k $ \f -> do
|
||||||
|
|
||||||
-- A Storer that expects to be provided with a L.ByteString of
|
-- A Storer that expects to be provided with a L.ByteString of
|
||||||
-- the content to store.
|
-- the content to store.
|
||||||
byteStorer :: (Key -> L.ByteString -> MeterUpdate -> Annex Bool) -> Storer
|
byteStorer :: (Key -> L.ByteString -> MeterUpdate -> Annex ()) -> Storer
|
||||||
byteStorer a k c m = withBytes c $ \b -> a k b m
|
byteStorer a k c m = withBytes c $ \b -> a k b m
|
||||||
|
|
||||||
-- A Retriever that writes the content of a Key to a provided file.
|
-- A Retriever that writes the content of a Key to a provided file.
|
||||||
|
@ -120,8 +120,8 @@ byteRetriever a k _m callback = a k (callback . ByteContent)
|
||||||
- but they are never actually used (since specialRemote replaces them).
|
- but they are never actually used (since specialRemote replaces them).
|
||||||
- Here are some dummy ones.
|
- Here are some dummy ones.
|
||||||
-}
|
-}
|
||||||
storeKeyDummy :: Key -> AssociatedFile -> MeterUpdate -> Annex Bool
|
storeKeyDummy :: Key -> AssociatedFile -> MeterUpdate -> Annex ()
|
||||||
storeKeyDummy _ _ _ = return False
|
storeKeyDummy _ _ _ = error "missing storeKey implementation"
|
||||||
retreiveKeyFileDummy :: Key -> AssociatedFile -> FilePath -> MeterUpdate -> Annex (Bool, Verification)
|
retreiveKeyFileDummy :: Key -> AssociatedFile -> FilePath -> MeterUpdate -> Annex (Bool, Verification)
|
||||||
retreiveKeyFileDummy _ _ _ _ = unVerified (return False)
|
retreiveKeyFileDummy _ _ _ _ = unVerified (return False)
|
||||||
removeKeyDummy :: Key -> Annex Bool
|
removeKeyDummy :: Key -> Annex Bool
|
||||||
|
@ -208,7 +208,7 @@ specialRemote' cfg c storer retriever remover checkpresent baser = encr
|
||||||
safely a = catchNonAsync a (\e -> warning (show e) >> return False)
|
safely a = catchNonAsync a (\e -> warning (show e) >> return False)
|
||||||
|
|
||||||
-- chunk, then encrypt, then feed to the storer
|
-- chunk, then encrypt, then feed to the storer
|
||||||
storeKeyGen k p enc = safely $ sendAnnex k rollback $ \src ->
|
storeKeyGen k p enc = sendAnnex k rollback $ \src ->
|
||||||
displayprogress p k (Just src) $ \p' ->
|
displayprogress p k (Just src) $ \p' ->
|
||||||
storeChunks (uuid baser) chunkconfig enck k src p'
|
storeChunks (uuid baser) chunkconfig enck k src p'
|
||||||
(storechunk enc)
|
(storechunk enc)
|
||||||
|
|
|
@ -133,8 +133,17 @@ lookupHook hookname action = do
|
||||||
hook = annexConfig $ encodeBS' $ hookname ++ "-" ++ action ++ "-hook"
|
hook = annexConfig $ encodeBS' $ hookname ++ "-" ++ action ++ "-hook"
|
||||||
hookfallback = annexConfig $ encodeBS' $ hookname ++ "-hook"
|
hookfallback = annexConfig $ encodeBS' $ hookname ++ "-hook"
|
||||||
|
|
||||||
runHook :: HookName -> Action -> Key -> Maybe FilePath -> Annex Bool -> Annex Bool
|
runHook :: HookName -> Action -> Key -> Maybe FilePath -> Annex ()
|
||||||
runHook hook action k f a = maybe (return False) run =<< lookupHook hook action
|
runHook hook action k f = lookupHook hook action >>= \case
|
||||||
|
Just command -> do
|
||||||
|
showOutput -- make way for hook output
|
||||||
|
environ <- liftIO (hookEnv action k f)
|
||||||
|
unlessM (progressCommandEnv "sh" [Param "-c", Param command] environ) $
|
||||||
|
giveup $ hook ++ " hook exited nonzero!"
|
||||||
|
Nothing -> giveup $ action ++ " hook misconfigured"
|
||||||
|
|
||||||
|
runHook' :: HookName -> Action -> Key -> Maybe FilePath -> Annex Bool -> Annex Bool
|
||||||
|
runHook' hook action k f a = maybe (return False) run =<< lookupHook hook action
|
||||||
where
|
where
|
||||||
run command = do
|
run command = do
|
||||||
showOutput -- make way for hook output
|
showOutput -- make way for hook output
|
||||||
|
@ -146,19 +155,18 @@ runHook hook action k f a = maybe (return False) run =<< lookupHook hook action
|
||||||
)
|
)
|
||||||
|
|
||||||
store :: HookName -> Storer
|
store :: HookName -> Storer
|
||||||
store h = fileStorer $ \k src _p ->
|
store h = fileStorer $ \k src _p -> runHook h "store" k (Just src)
|
||||||
runHook h "store" k (Just src) $ return True
|
|
||||||
|
|
||||||
retrieve :: HookName -> Retriever
|
retrieve :: HookName -> Retriever
|
||||||
retrieve h = fileRetriever $ \d k _p ->
|
retrieve h = fileRetriever $ \d k _p ->
|
||||||
unlessM (runHook h "retrieve" k (Just d) $ return True) $
|
unlessM (runHook' h "retrieve" k (Just d) $ return True) $
|
||||||
giveup "failed to retrieve content"
|
giveup "failed to retrieve content"
|
||||||
|
|
||||||
retrieveCheap :: HookName -> Key -> AssociatedFile -> FilePath -> Annex Bool
|
retrieveCheap :: HookName -> Key -> AssociatedFile -> FilePath -> Annex Bool
|
||||||
retrieveCheap _ _ _ _ = return False
|
retrieveCheap _ _ _ _ = return False
|
||||||
|
|
||||||
remove :: HookName -> Remover
|
remove :: HookName -> Remover
|
||||||
remove h k = runHook h "remove" k Nothing $ return True
|
remove h k = runHook' h "remove" k Nothing $ return True
|
||||||
|
|
||||||
checkKey :: Git.Repo -> HookName -> CheckPresent
|
checkKey :: Git.Repo -> HookName -> CheckPresent
|
||||||
checkKey r h k = do
|
checkKey r h k = do
|
||||||
|
|
|
@ -201,7 +201,7 @@ rsyncSetup _ mu _ c gc = do
|
||||||
- (When we have the right hash directory structure, we can just
|
- (When we have the right hash directory structure, we can just
|
||||||
- pass --include=X --include=X/Y --include=X/Y/file --exclude=*)
|
- pass --include=X --include=X/Y --include=X/Y/file --exclude=*)
|
||||||
-}
|
-}
|
||||||
store :: RsyncOpts -> Key -> FilePath -> MeterUpdate -> Annex Bool
|
store :: RsyncOpts -> Key -> FilePath -> MeterUpdate -> Annex ()
|
||||||
store o k src meterupdate = storeGeneric o meterupdate basedest populatedest
|
store o k src meterupdate = storeGeneric o meterupdate basedest populatedest
|
||||||
where
|
where
|
||||||
basedest = fromRawFilePath $ Prelude.head (keyPaths k)
|
basedest = fromRawFilePath $ Prelude.head (keyPaths k)
|
||||||
|
@ -216,8 +216,13 @@ store o k src meterupdate = storeGeneric o meterupdate basedest populatedest
|
||||||
- object file, and has to be copied or hard linked into place. -}
|
- object file, and has to be copied or hard linked into place. -}
|
||||||
canrename = isEncKey k || isChunkKey k
|
canrename = isEncKey k || isChunkKey k
|
||||||
|
|
||||||
storeGeneric :: RsyncOpts -> MeterUpdate -> FilePath -> (FilePath -> Annex Bool) -> Annex Bool
|
storeGeneric :: RsyncOpts -> MeterUpdate -> FilePath -> (FilePath -> Annex Bool) -> Annex ()
|
||||||
storeGeneric o meterupdate basedest populatedest = withRsyncScratchDir $ \tmp -> do
|
storeGeneric o meterupdate basedest populatedest =
|
||||||
|
unlessM (storeGeneric' o meterupdate basedest populatedest) $
|
||||||
|
giveup "failed to rsync content"
|
||||||
|
|
||||||
|
storeGeneric' :: RsyncOpts -> MeterUpdate -> FilePath -> (FilePath -> Annex Bool) -> Annex Bool
|
||||||
|
storeGeneric' o meterupdate basedest populatedest = withRsyncScratchDir $ \tmp -> do
|
||||||
let dest = tmp </> basedest
|
let dest = tmp </> basedest
|
||||||
createAnnexDirectory (parentDir dest)
|
createAnnexDirectory (parentDir dest)
|
||||||
ok <- populatedest dest
|
ok <- populatedest dest
|
||||||
|
@ -287,7 +292,7 @@ checkPresentGeneric o rsyncurls = do
|
||||||
|
|
||||||
storeExportM :: RsyncOpts -> FilePath -> Key -> ExportLocation -> MeterUpdate -> Annex Bool
|
storeExportM :: RsyncOpts -> FilePath -> Key -> ExportLocation -> MeterUpdate -> Annex Bool
|
||||||
storeExportM o src _k loc meterupdate =
|
storeExportM o src _k loc meterupdate =
|
||||||
storeGeneric o meterupdate basedest populatedest
|
storeGeneric' o meterupdate basedest populatedest
|
||||||
where
|
where
|
||||||
basedest = fromRawFilePath (fromExportLocation loc)
|
basedest = fromRawFilePath (fromExportLocation loc)
|
||||||
populatedest = liftIO . createLinkOrCopy src
|
populatedest = liftIO . createLinkOrCopy src
|
||||||
|
|
|
@ -322,7 +322,6 @@ store mh r info magic = fileStorer $ \k f p -> withS3HandleOrFail (uuid r) mh $
|
||||||
-- Store public URL to item in Internet Archive.
|
-- Store public URL to item in Internet Archive.
|
||||||
when (isIA info && not (isChunkKey k)) $
|
when (isIA info && not (isChunkKey k)) $
|
||||||
setUrlPresent k (iaPublicUrl info (bucketObject info k))
|
setUrlPresent k (iaPublicUrl info (bucketObject info k))
|
||||||
return True
|
|
||||||
|
|
||||||
storeHelper :: S3Info -> S3Handle -> Maybe Magic -> FilePath -> S3.Object -> MeterUpdate -> Annex (Maybe S3Etag, Maybe S3VersionID)
|
storeHelper :: S3Info -> S3Handle -> Maybe Magic -> FilePath -> S3.Object -> MeterUpdate -> Annex (Maybe S3Etag, Maybe S3VersionID)
|
||||||
storeHelper info h magic f object p = liftIO $ case partSize info of
|
storeHelper info h magic f object p = liftIO $ case partSize info of
|
||||||
|
|
|
@ -135,11 +135,11 @@ tahoeSetup _ mu _ c _ = do
|
||||||
where
|
where
|
||||||
missingfurl = giveup "Set TAHOE_FURL to the introducer furl to use."
|
missingfurl = giveup "Set TAHOE_FURL to the introducer furl to use."
|
||||||
|
|
||||||
store :: RemoteStateHandle -> TahoeHandle -> Key -> AssociatedFile -> MeterUpdate -> Annex Bool
|
store :: RemoteStateHandle -> TahoeHandle -> Key -> AssociatedFile -> MeterUpdate -> Annex ()
|
||||||
store rs hdl k _f _p = sendAnnex k noop $ \src ->
|
store rs hdl k _f _p = sendAnnex k noop $ \src ->
|
||||||
parsePut <$> liftIO (readTahoe hdl "put" [File src]) >>= maybe
|
parsePut <$> liftIO (readTahoe hdl "put" [File src]) >>= maybe
|
||||||
(return False)
|
(giveup "tahoe failed to store content")
|
||||||
(\cap -> storeCapability rs k cap >> return True)
|
(\cap -> storeCapability rs k cap)
|
||||||
|
|
||||||
retrieve :: RemoteStateHandle -> TahoeHandle -> Key -> AssociatedFile -> FilePath -> MeterUpdate -> Annex (Bool, Verification)
|
retrieve :: RemoteStateHandle -> TahoeHandle -> Key -> AssociatedFile -> FilePath -> MeterUpdate -> Annex (Bool, Verification)
|
||||||
retrieve rs hdl k _f d _p = unVerified $ go =<< getCapability rs k
|
retrieve rs hdl k _f d _p = unVerified $ go =<< getCapability rs k
|
||||||
|
|
|
@ -97,10 +97,8 @@ downloadKey key _af dest p = unVerified $ get =<< getWebUrls key
|
||||||
downloadKeyCheap :: Key -> AssociatedFile -> FilePath -> Annex Bool
|
downloadKeyCheap :: Key -> AssociatedFile -> FilePath -> Annex Bool
|
||||||
downloadKeyCheap _ _ _ = return False
|
downloadKeyCheap _ _ _ = return False
|
||||||
|
|
||||||
uploadKey :: Key -> AssociatedFile -> MeterUpdate -> Annex Bool
|
uploadKey :: Key -> AssociatedFile -> MeterUpdate -> Annex ()
|
||||||
uploadKey _ _ _ = do
|
uploadKey _ _ _ = giveup "upload to web not supported"
|
||||||
warning "upload to web not supported"
|
|
||||||
return False
|
|
||||||
|
|
||||||
dropKey :: Key -> Annex Bool
|
dropKey :: Key -> Annex Bool
|
||||||
dropKey k = do
|
dropKey k = do
|
||||||
|
|
|
@ -139,18 +139,13 @@ webdavSetup _ mu mcreds c gc = do
|
||||||
|
|
||||||
store :: DavHandleVar -> ChunkConfig -> Storer
|
store :: DavHandleVar -> ChunkConfig -> Storer
|
||||||
store hv (LegacyChunks chunksize) = fileStorer $ \k f p ->
|
store hv (LegacyChunks chunksize) = fileStorer $ \k f p ->
|
||||||
withDavHandle hv $ \case
|
withDavHandle hv $ \dav -> liftIO $
|
||||||
Nothing -> return False
|
withMeteredFile f p $ storeLegacyChunked chunksize k dav
|
||||||
Just dav -> liftIO $
|
|
||||||
withMeteredFile f p $ storeLegacyChunked chunksize k dav
|
|
||||||
store hv _ = httpStorer $ \k reqbody ->
|
store hv _ = httpStorer $ \k reqbody ->
|
||||||
withDavHandle hv $ \case
|
withDavHandle hv $ \dav -> liftIO $ goDAV dav $ do
|
||||||
Nothing -> return False
|
let tmp = keyTmpLocation k
|
||||||
Just dav -> liftIO $ goDAV dav $ do
|
let dest = keyLocation k
|
||||||
let tmp = keyTmpLocation k
|
storeHelper dav tmp dest reqbody
|
||||||
let dest = keyLocation k
|
|
||||||
storeHelper dav tmp dest reqbody
|
|
||||||
return True
|
|
||||||
|
|
||||||
storeHelper :: DavHandle -> DavLocation -> DavLocation -> RequestBody -> DAVT IO ()
|
storeHelper :: DavHandle -> DavLocation -> DavLocation -> RequestBody -> DAVT IO ()
|
||||||
storeHelper dav tmp dest reqbody = do
|
storeHelper dav tmp dest reqbody = do
|
||||||
|
@ -172,12 +167,10 @@ retrieveCheap _ _ _ = return False
|
||||||
|
|
||||||
retrieve :: DavHandleVar -> ChunkConfig -> Retriever
|
retrieve :: DavHandleVar -> ChunkConfig -> Retriever
|
||||||
retrieve hv cc = fileRetriever $ \d k p ->
|
retrieve hv cc = fileRetriever $ \d k p ->
|
||||||
withDavHandle hv $ \case
|
withDavHandle hv $ \dav -> case cc of
|
||||||
Nothing -> giveup "unable to connect"
|
LegacyChunks _ -> retrieveLegacyChunked d k p dav
|
||||||
Just dav -> case cc of
|
_ -> liftIO $
|
||||||
LegacyChunks _ -> retrieveLegacyChunked d k p dav
|
goDAV dav $ retrieveHelper (keyLocation k) d p
|
||||||
_ -> liftIO $
|
|
||||||
goDAV dav $ retrieveHelper (keyLocation k) d p
|
|
||||||
|
|
||||||
retrieveHelper :: DavLocation -> FilePath -> MeterUpdate -> DAVT IO ()
|
retrieveHelper :: DavLocation -> FilePath -> MeterUpdate -> DAVT IO ()
|
||||||
retrieveHelper loc d p = do
|
retrieveHelper loc d p = do
|
||||||
|
@ -186,9 +179,9 @@ retrieveHelper loc d p = do
|
||||||
withContentM $ httpBodyRetriever d p
|
withContentM $ httpBodyRetriever d p
|
||||||
|
|
||||||
remove :: DavHandleVar -> Remover
|
remove :: DavHandleVar -> Remover
|
||||||
remove hv k = withDavHandle hv $ \case
|
remove hv k = withDavHandle' hv $ \case
|
||||||
Nothing -> return False
|
Left _e -> return False
|
||||||
Just dav -> liftIO $ goDAV dav $
|
Right dav -> liftIO $ goDAV dav $
|
||||||
-- Delete the key's whole directory, including any
|
-- Delete the key's whole directory, including any
|
||||||
-- legacy chunked files, etc, in a single action.
|
-- legacy chunked files, etc, in a single action.
|
||||||
removeHelper (keyDir k)
|
removeHelper (keyDir k)
|
||||||
|
@ -206,20 +199,18 @@ removeHelper d = do
|
||||||
_ -> return False
|
_ -> return False
|
||||||
|
|
||||||
checkKey :: DavHandleVar -> Remote -> ChunkConfig -> CheckPresent
|
checkKey :: DavHandleVar -> Remote -> ChunkConfig -> CheckPresent
|
||||||
checkKey hv r chunkconfig k = withDavHandle hv $ \case
|
checkKey hv r chunkconfig k = withDavHandle hv $ \dav -> do
|
||||||
Nothing -> giveup $ name r ++ " not configured"
|
showChecking r
|
||||||
Just dav -> do
|
case chunkconfig of
|
||||||
showChecking r
|
LegacyChunks _ -> checkKeyLegacyChunked dav k
|
||||||
case chunkconfig of
|
_ -> do
|
||||||
LegacyChunks _ -> checkKeyLegacyChunked dav k
|
v <- liftIO $ goDAV dav $
|
||||||
_ -> do
|
existsDAV (keyLocation k)
|
||||||
v <- liftIO $ goDAV dav $
|
either giveup return v
|
||||||
existsDAV (keyLocation k)
|
|
||||||
either giveup return v
|
|
||||||
|
|
||||||
storeExportDav :: DavHandleVar -> FilePath -> Key -> ExportLocation -> MeterUpdate -> Annex Bool
|
storeExportDav :: DavHandleVar -> FilePath -> Key -> ExportLocation -> MeterUpdate -> Annex Bool
|
||||||
storeExportDav hdl f k loc p = case exportLocation loc of
|
storeExportDav hdl f k loc p = case exportLocation loc of
|
||||||
Right dest -> withDavHandle hdl $ \mh -> runExport mh $ \dav -> do
|
Right dest -> withDavHandle' hdl $ \mh -> runExport mh $ \dav -> do
|
||||||
reqbody <- liftIO $ httpBodyStorer f p
|
reqbody <- liftIO $ httpBodyStorer f p
|
||||||
storeHelper dav (keyTmpLocation k) dest reqbody
|
storeHelper dav (keyTmpLocation k) dest reqbody
|
||||||
return True
|
return True
|
||||||
|
@ -229,23 +220,23 @@ storeExportDav hdl f k loc p = case exportLocation loc of
|
||||||
|
|
||||||
retrieveExportDav :: DavHandleVar -> Key -> ExportLocation -> FilePath -> MeterUpdate -> Annex Bool
|
retrieveExportDav :: DavHandleVar -> Key -> ExportLocation -> FilePath -> MeterUpdate -> Annex Bool
|
||||||
retrieveExportDav hdl _k loc d p = case exportLocation loc of
|
retrieveExportDav hdl _k loc d p = case exportLocation loc of
|
||||||
Right src -> withDavHandle hdl $ \mh -> runExport mh $ \_dav -> do
|
Right src -> withDavHandle' hdl $ \mh -> runExport mh $ \_dav -> do
|
||||||
retrieveHelper src d p
|
retrieveHelper src d p
|
||||||
return True
|
return True
|
||||||
Left _err -> return False
|
Left _err -> return False
|
||||||
|
|
||||||
checkPresentExportDav :: DavHandleVar -> Remote -> Key -> ExportLocation -> Annex Bool
|
checkPresentExportDav :: DavHandleVar -> Remote -> Key -> ExportLocation -> Annex Bool
|
||||||
checkPresentExportDav hdl r _k loc = case exportLocation loc of
|
checkPresentExportDav hdl _ _k loc = case exportLocation loc of
|
||||||
Right p -> withDavHandle hdl $ \case
|
Right p -> withDavHandle' hdl $ \case
|
||||||
Nothing -> giveup $ name r ++ " not configured"
|
Left e -> giveup e
|
||||||
Just h -> liftIO $ do
|
Right h -> liftIO $ do
|
||||||
v <- goDAV h $ existsDAV p
|
v <- goDAV h $ existsDAV p
|
||||||
either giveup return v
|
either giveup return v
|
||||||
Left err -> giveup err
|
Left err -> giveup err
|
||||||
|
|
||||||
removeExportDav :: DavHandleVar-> Key -> ExportLocation -> Annex Bool
|
removeExportDav :: DavHandleVar-> Key -> ExportLocation -> Annex Bool
|
||||||
removeExportDav hdl _k loc = case exportLocation loc of
|
removeExportDav hdl _k loc = case exportLocation loc of
|
||||||
Right p -> withDavHandle hdl $ \mh -> runExport mh $ \_dav ->
|
Right p -> withDavHandle' hdl $ \mh -> runExport mh $ \_dav ->
|
||||||
removeHelper p
|
removeHelper p
|
||||||
-- When the exportLocation is not legal for webdav,
|
-- When the exportLocation is not legal for webdav,
|
||||||
-- the content is certianly not stored there, so it's ok for
|
-- the content is certianly not stored there, so it's ok for
|
||||||
|
@ -255,7 +246,7 @@ removeExportDav hdl _k loc = case exportLocation loc of
|
||||||
Left _err -> return True
|
Left _err -> return True
|
||||||
|
|
||||||
removeExportDirectoryDav :: DavHandleVar -> ExportDirectory -> Annex Bool
|
removeExportDirectoryDav :: DavHandleVar -> ExportDirectory -> Annex Bool
|
||||||
removeExportDirectoryDav hdl dir = withDavHandle hdl $ \mh -> runExport mh $ \_dav -> do
|
removeExportDirectoryDav hdl dir = withDavHandle' hdl $ \mh -> runExport mh $ \_dav -> do
|
||||||
let d = fromRawFilePath $ fromExportDirectory dir
|
let d = fromRawFilePath $ fromExportDirectory dir
|
||||||
debugDav $ "delContent " ++ d
|
debugDav $ "delContent " ++ d
|
||||||
safely (inLocation d delContentM)
|
safely (inLocation d delContentM)
|
||||||
|
@ -263,23 +254,23 @@ removeExportDirectoryDav hdl dir = withDavHandle hdl $ \mh -> runExport mh $ \_d
|
||||||
|
|
||||||
renameExportDav :: DavHandleVar -> Key -> ExportLocation -> ExportLocation -> Annex (Maybe Bool)
|
renameExportDav :: DavHandleVar -> Key -> ExportLocation -> ExportLocation -> Annex (Maybe Bool)
|
||||||
renameExportDav hdl _k src dest = case (exportLocation src, exportLocation dest) of
|
renameExportDav hdl _k src dest = case (exportLocation src, exportLocation dest) of
|
||||||
(Right srcl, Right destl) -> withDavHandle hdl $ \case
|
(Right srcl, Right destl) -> withDavHandle' hdl $ \case
|
||||||
Just h
|
Right h
|
||||||
-- box.com's DAV endpoint has buggy handling of renames,
|
-- box.com's DAV endpoint has buggy handling of renames,
|
||||||
-- so avoid renaming when using it.
|
-- so avoid renaming when using it.
|
||||||
| boxComUrl `isPrefixOf` baseURL h -> return Nothing
|
| boxComUrl `isPrefixOf` baseURL h -> return Nothing
|
||||||
| otherwise -> do
|
| otherwise -> do
|
||||||
v <- runExport (Just h) $ \dav -> do
|
v <- runExport (Right h) $ \dav -> do
|
||||||
maybe noop (void . mkColRecursive) (locationParent destl)
|
maybe noop (void . mkColRecursive) (locationParent destl)
|
||||||
moveDAV (baseURL dav) srcl destl
|
moveDAV (baseURL dav) srcl destl
|
||||||
return True
|
return True
|
||||||
return (Just v)
|
return (Just v)
|
||||||
Nothing -> return (Just False)
|
Left _e -> return (Just False)
|
||||||
_ -> return (Just False)
|
_ -> return (Just False)
|
||||||
|
|
||||||
runExport :: Maybe DavHandle -> (DavHandle -> DAVT IO Bool) -> Annex Bool
|
runExport :: Either String DavHandle -> (DavHandle -> DAVT IO Bool) -> Annex Bool
|
||||||
runExport Nothing _ = return False
|
runExport (Left _e) _ = return False
|
||||||
runExport (Just h) a = fromMaybe False <$> liftIO (goDAV h $ safely (a h))
|
runExport (Right h) a = fromMaybe False <$> liftIO (goDAV h $ safely (a h))
|
||||||
|
|
||||||
configUrl :: ParsedRemoteConfig -> Maybe URLString
|
configUrl :: ParsedRemoteConfig -> Maybe URLString
|
||||||
configUrl c = fixup <$> getRemoteConfigValue urlField c
|
configUrl c = fixup <$> getRemoteConfigValue urlField c
|
||||||
|
@ -418,7 +409,7 @@ choke f = do
|
||||||
|
|
||||||
data DavHandle = DavHandle DAVContext DavUser DavPass URLString
|
data DavHandle = DavHandle DAVContext DavUser DavPass URLString
|
||||||
|
|
||||||
type DavHandleVar = TVar (Either (Annex (Maybe DavHandle)) (Maybe DavHandle))
|
type DavHandleVar = TVar (Either (Annex (Either String DavHandle)) (Either String DavHandle))
|
||||||
|
|
||||||
{- Prepares a DavHandle for later use. Does not connect to the server or do
|
{- Prepares a DavHandle for later use. Does not connect to the server or do
|
||||||
- anything else expensive. -}
|
- anything else expensive. -}
|
||||||
|
@ -429,11 +420,19 @@ mkDavHandleVar c gc u = liftIO $ newTVarIO $ Left $ do
|
||||||
(Just (user, pass), Just baseurl) -> do
|
(Just (user, pass), Just baseurl) -> do
|
||||||
ctx <- mkDAVContext baseurl
|
ctx <- mkDAVContext baseurl
|
||||||
let h = DavHandle ctx (toDavUser user) (toDavPass pass) baseurl
|
let h = DavHandle ctx (toDavUser user) (toDavPass pass) baseurl
|
||||||
return (Just h)
|
return (Right h)
|
||||||
_ -> return Nothing
|
_ -> return $ Left "webdav credentials not available"
|
||||||
|
|
||||||
withDavHandle :: DavHandleVar -> (Maybe DavHandle -> Annex a) -> Annex a
|
withDavHandle :: DavHandleVar -> (DavHandle -> Annex a) -> Annex a
|
||||||
withDavHandle hv a = liftIO (readTVarIO hv) >>= \case
|
withDavHandle hv a = liftIO (readTVarIO hv) >>= \case
|
||||||
|
Right hdl -> either giveup a hdl
|
||||||
|
Left mkhdl -> do
|
||||||
|
hdl <- mkhdl
|
||||||
|
liftIO $ atomically $ writeTVar hv (Right hdl)
|
||||||
|
either giveup a hdl
|
||||||
|
|
||||||
|
withDavHandle' :: DavHandleVar -> (Either String DavHandle -> Annex a) -> Annex a
|
||||||
|
withDavHandle' hv a = liftIO (readTVarIO hv) >>= \case
|
||||||
Right hdl -> a hdl
|
Right hdl -> a hdl
|
||||||
Left mkhdl -> do
|
Left mkhdl -> do
|
||||||
hdl <- mkhdl
|
hdl <- mkhdl
|
||||||
|
@ -472,7 +471,7 @@ prepDAV user pass = do
|
||||||
-- Legacy chunking code, to be removed eventually.
|
-- Legacy chunking code, to be removed eventually.
|
||||||
--
|
--
|
||||||
|
|
||||||
storeLegacyChunked :: ChunkSize -> Key -> DavHandle -> L.ByteString -> IO Bool
|
storeLegacyChunked :: ChunkSize -> Key -> DavHandle -> L.ByteString -> IO ()
|
||||||
storeLegacyChunked chunksize k dav b =
|
storeLegacyChunked chunksize k dav b =
|
||||||
Legacy.storeChunks k tmp dest storer recorder finalizer
|
Legacy.storeChunks k tmp dest storer recorder finalizer
|
||||||
where
|
where
|
||||||
|
|
|
@ -84,7 +84,8 @@ data RemoteA a = Remote
|
||||||
-- Transfers a key's contents from disk to the remote.
|
-- Transfers a key's contents from disk to the remote.
|
||||||
-- The key should not appear to be present on the remote until
|
-- The key should not appear to be present on the remote until
|
||||||
-- all of its contents have been transferred.
|
-- all of its contents have been transferred.
|
||||||
, storeKey :: Key -> AssociatedFile -> MeterUpdate -> a Bool
|
-- Throws exception on failure.
|
||||||
|
, storeKey :: Key -> AssociatedFile -> MeterUpdate -> a ()
|
||||||
-- Retrieves a key's contents to a file.
|
-- Retrieves a key's contents to a file.
|
||||||
-- (The MeterUpdate does not need to be used if it writes
|
-- (The MeterUpdate does not need to be used if it writes
|
||||||
-- sequentially to the file.)
|
-- sequentially to the file.)
|
||||||
|
|
|
@ -23,7 +23,7 @@ isByteContent (FileContent _) = False
|
||||||
|
|
||||||
-- Action that stores a Key's content on a remote.
|
-- Action that stores a Key's content on a remote.
|
||||||
-- Can throw exceptions.
|
-- Can throw exceptions.
|
||||||
type Storer = Key -> ContentSource -> MeterUpdate -> Annex Bool
|
type Storer = Key -> ContentSource -> MeterUpdate -> Annex ()
|
||||||
|
|
||||||
-- Action that retrieves a Key's content from a remote, passing it to a
|
-- Action that retrieves a Key's content from a remote, passing it to a
|
||||||
-- callback, which will fully consume the content before returning.
|
-- callback, which will fully consume the content before returning.
|
||||||
|
|
|
@ -13,3 +13,6 @@ I am not yet sure what exact combinations of dots, spaces and maybe dashes cause
|
||||||
### What version of git-annex are you using? On what operating system?
|
### What version of git-annex are you using? On what operating system?
|
||||||
|
|
||||||
git-annex version: 8.20200309-05df404212, Debian testing
|
git-annex version: 8.20200309-05df404212, Debian testing
|
||||||
|
|
||||||
|
[[!meta title="change exception handling of remotes to avoid ever failing
|
||||||
|
without telling the reason why"]]
|
||||||
|
|
|
@ -0,0 +1,12 @@
|
||||||
|
[[!comment format=mdwn
|
||||||
|
username="joey"
|
||||||
|
subject="""comment 3"""
|
||||||
|
date="2020-05-13T17:58:49Z"
|
||||||
|
content="""
|
||||||
|
Converted the storeKey method to throw exceptions. This was a 1000 line
|
||||||
|
patch, 3 hours of work. Seems likely it will take 24 hours work to finish
|
||||||
|
converting all the methods..
|
||||||
|
|
||||||
|
There were quite a few places where it used to return False without
|
||||||
|
displaying a reason for the failure, so the work seems worth it.
|
||||||
|
"""]]
|
Loading…
Reference in a new issue