directory special remote: Made more efficient and robust.
Files are now written to a tmp directory in the remote, and once all chunks are written, etc, it's moved into the final place atomically. For now, checkpresent still checks every single chunk of a file, because the old method could leave partially transferred files with some chunks present and others not.
This commit is contained in:
parent
d3dfeeb3d9
commit
5f977cc725
5 changed files with 47 additions and 42 deletions
|
@ -12,7 +12,6 @@ import Command
|
|||
import Annex.Content
|
||||
import Utility.Rsync
|
||||
import Logs.Transfer
|
||||
import Types.Remote
|
||||
import qualified Fields
|
||||
|
||||
def :: [Command]
|
||||
|
|
|
@ -48,7 +48,7 @@ gen r u c = do
|
|||
storeKey = store dir chunksize,
|
||||
retrieveKeyFile = retrieve dir chunksize,
|
||||
retrieveKeyFileCheap = retrieveCheap dir chunksize,
|
||||
removeKey = remove dir chunksize,
|
||||
removeKey = remove dir,
|
||||
hasKey = checkPresent dir chunksize,
|
||||
hasKeyCheap = True,
|
||||
whereisKey = Nothing,
|
||||
|
@ -73,10 +73,19 @@ directorySetup u c = do
|
|||
gitConfigSpecialRemote u c' "directory" dir
|
||||
return $ M.delete "directory" c'
|
||||
|
||||
{- Locations to try to access a given Key in the Directory. -}
|
||||
{- Locations to try to access a given Key in the Directory.
|
||||
- We try more than since we used to write to different hash directories. -}
|
||||
locations :: FilePath -> Key -> [FilePath]
|
||||
locations d k = map (d </>) (keyPaths k)
|
||||
|
||||
{- Directory where the file(s) for a key are stored. -}
|
||||
storeDir :: FilePath -> Key -> FilePath
|
||||
storeDir d k = addTrailingPathSeparator $ d </> hashDirLower k </> keyFile k
|
||||
|
||||
{- Where we store temporary data for a key as it's being uploaded. -}
|
||||
tmpDir :: FilePath -> Key -> FilePath
|
||||
tmpDir d k = addTrailingPathSeparator $ d </> "tmp" </> keyFile k
|
||||
|
||||
withCheckedFiles :: (FilePath -> IO Bool) -> ChunkSize -> FilePath -> Key -> ([FilePath] -> IO Bool) -> IO Bool
|
||||
withCheckedFiles _ _ [] _ _ = return False
|
||||
withCheckedFiles check Nothing d k a = go $ locations d k
|
||||
|
@ -159,18 +168,22 @@ storeSplit' meterupdate chunksize (d:dests) bs c = do
|
|||
storeHelper :: FilePath -> ChunkSize -> Key -> ([FilePath] -> IO [FilePath]) -> Annex Bool
|
||||
storeHelper d chunksize key storer = check <&&> go
|
||||
where
|
||||
basedest = Prelude.head $ locations d key
|
||||
dir = parentDir basedest
|
||||
tmpdir = tmpDir d key
|
||||
destdir = storeDir d key
|
||||
{- The size is not exactly known when encrypting the key;
|
||||
- this assumes that at least the size of the key is
|
||||
- needed as free space. -}
|
||||
check = checkDiskSpace (Just dir) key 0
|
||||
go = liftIO $ catchBoolIO $ do
|
||||
createDirectoryIfMissing True dir
|
||||
allowWrite dir
|
||||
preventWrite dir `after` storeChunks basedest chunksize storer recorder finalizer
|
||||
finalizer f dest = do
|
||||
renameFile f dest
|
||||
check = do
|
||||
liftIO $ createDirectoryIfMissing True tmpdir
|
||||
checkDiskSpace (Just tmpdir) key 0
|
||||
go = liftIO $ catchBoolIO $
|
||||
storeChunks key tmpdir destdir chunksize storer recorder finalizer
|
||||
finalizer tmp dest = do
|
||||
void $ tryIO $ allowWrite dest -- may already exist
|
||||
void $ tryIO $ removeDirectoryRecursive dest -- or not exist
|
||||
createDirectoryIfMissing True (parentDir dest)
|
||||
renameDirectory tmp dest
|
||||
mapM_ preventWrite =<< dirContents dest
|
||||
preventWrite dest
|
||||
recorder f s = do
|
||||
void $ tryIO $ allowWrite f
|
||||
|
@ -201,16 +214,13 @@ retrieveCheap d _ k f = liftIO $ withStoredFiles Nothing d k go
|
|||
go [file] = catchBoolIO $ createSymbolicLink file f >> return True
|
||||
go _files = return False
|
||||
|
||||
remove :: FilePath -> ChunkSize -> Key -> Annex Bool
|
||||
remove d chunksize k = liftIO $ withStoredFiles chunksize d k go
|
||||
remove :: FilePath -> Key -> Annex Bool
|
||||
remove d k = liftIO $ catchBoolIO $ do
|
||||
allowWrite dir
|
||||
removeDirectoryRecursive dir
|
||||
return True
|
||||
where
|
||||
go = all id <$$> mapM removefile
|
||||
removefile file = catchBoolIO $ do
|
||||
let dir = parentDir file
|
||||
allowWrite dir
|
||||
removeFile file
|
||||
_ <- tryIO $ removeDirectory dir
|
||||
return True
|
||||
dir = storeDir d k
|
||||
|
||||
checkPresent :: FilePath -> ChunkSize -> Key -> Annex (Either String Bool)
|
||||
checkPresent d chunksize k = liftIO $ catchMsgIO $ withStoredFiles chunksize d k $
|
||||
|
|
|
@ -58,33 +58,28 @@ chunkStream = map (\n -> ".chunk" ++ show n) [1 :: Integer ..]
|
|||
{- Given the base destination to use to store a value,
|
||||
- generates a stream of temporary destinations (just one when not chunking)
|
||||
- and passes it to an action, which should chunk and store the data,
|
||||
- and return the destinations it stored to, or [] on error.
|
||||
-
|
||||
- Then calles the finalizer to rename the temporary destinations into
|
||||
- their final places (and do any other cleanup), and writes the chunk count
|
||||
- (if chunking)
|
||||
- and return the destinations it stored to, or [] on error. Then
|
||||
- calls the storer to write the chunk count (if chunking). Finally, the
|
||||
- fianlizer is called to rename the tmp into the dest
|
||||
- (and do any other cleanup).
|
||||
-}
|
||||
storeChunks :: FilePath -> ChunkSize -> ([FilePath] -> IO [FilePath]) -> (FilePath -> String -> IO ()) -> (FilePath -> FilePath -> IO ()) -> IO Bool
|
||||
storeChunks basedest chunksize storer recorder finalizer =
|
||||
storeChunks :: Key -> FilePath -> FilePath -> ChunkSize -> ([FilePath] -> IO [FilePath]) -> (FilePath -> String -> IO ()) -> (FilePath -> FilePath -> IO ()) -> IO Bool
|
||||
storeChunks key tmp dest chunksize storer recorder finalizer =
|
||||
either (const $ return False) return
|
||||
=<< (E.try go :: IO (Either E.SomeException Bool))
|
||||
where
|
||||
go = do
|
||||
stored <- storer tmpdests
|
||||
forM_ stored $ \d -> do
|
||||
let dest = detmpprefix d
|
||||
finalizer d dest
|
||||
when (chunksize /= Nothing) $ do
|
||||
let chunkcount = basedest ++ chunkCount
|
||||
let chunkcount = basef ++ chunkCount
|
||||
recorder chunkcount (show $ length stored)
|
||||
finalizer tmp dest
|
||||
return (not $ null stored)
|
||||
|
||||
tmpprefix = ".tmp"
|
||||
detmpprefix f = take (length f - tmpprefixlen) f
|
||||
tmpprefixlen = length tmpprefix
|
||||
basef = tmp ++ keyFile key
|
||||
tmpdests
|
||||
| chunksize == Nothing = [basedest ++ tmpprefix]
|
||||
| otherwise = map (++ tmpprefix) $ map (basedest ++) chunkStream
|
||||
| chunksize == Nothing = [basef]
|
||||
| otherwise = map (basef ++ ) chunkStream
|
||||
|
||||
{- Given a list of destinations to use, chunks the data according to the
|
||||
- ChunkSize, and runs the storer action to store each chunk. Returns
|
||||
|
|
|
@ -90,7 +90,7 @@ store r k _f p = metered (Just p) k $ \meterupdate ->
|
|||
let url = davLocation baseurl k
|
||||
f <- inRepo $ gitAnnexLocation k
|
||||
liftIO $ withMeteredFile f meterupdate $
|
||||
storeHelper r url user pass
|
||||
storeHelper r k url user pass
|
||||
|
||||
storeEncrypted :: Remote -> (Cipher, Key) -> Key -> MeterUpdate -> Annex Bool
|
||||
storeEncrypted r (cipher, enck) k p = metered (Just p) k $ \meterupdate ->
|
||||
|
@ -98,12 +98,12 @@ storeEncrypted r (cipher, enck) k p = metered (Just p) k $ \meterupdate ->
|
|||
let url = davLocation baseurl enck
|
||||
f <- inRepo $ gitAnnexLocation k
|
||||
liftIO $ encrypt cipher (streamMeteredFile f meterupdate) $
|
||||
readBytes $ storeHelper r url user pass
|
||||
readBytes $ storeHelper r enck url user pass
|
||||
|
||||
storeHelper :: Remote -> DavUrl -> DavUser -> DavPass -> L.ByteString -> IO Bool
|
||||
storeHelper r urlbase user pass b = catchBoolIO $ do
|
||||
storeHelper :: Remote -> Key -> DavUrl -> DavUser -> DavPass -> L.ByteString -> IO Bool
|
||||
storeHelper r k urlbase user pass b = catchBoolIO $ do
|
||||
davMkdir (urlParent urlbase) user pass
|
||||
storeChunks urlbase chunksize storer recorder finalizer
|
||||
storeChunks k undefined undefined chunksize storer recorder finalizer
|
||||
where
|
||||
chunksize = chunkSize $ config r
|
||||
storer urls = storeChunked chunksize urls storehttp b
|
||||
|
|
1
debian/changelog
vendored
1
debian/changelog
vendored
|
@ -10,6 +10,7 @@ git-annex (3.20121113) UNRELEASED; urgency=low
|
|||
* Getting a file from chunked directory special remotes no longer buffers
|
||||
it all in memory.
|
||||
* S3: Added progress display for uploading and downloading.
|
||||
* directory special remote: Made more efficient and robust.
|
||||
|
||||
-- Joey Hess <joeyh@debian.org> Tue, 13 Nov 2012 13:17:07 -0400
|
||||
|
||||
|
|
Loading…
Reference in a new issue