convert getFileSize to RawFilePath
Lots of nice wins from this in avoiding unncessary work, and I think nothing got slower. This commit was sponsored by Boyd Stephen Smith Jr. on Patreon.
This commit is contained in:
parent
2670af9d5a
commit
9b0dde834e
34 changed files with 79 additions and 70 deletions
|
@ -363,7 +363,7 @@ inodeMap getfiles = do
|
||||||
let f' = fromRawFilePath f
|
let f' = fromRawFilePath f
|
||||||
if isSymbolicLink s
|
if isSymbolicLink s
|
||||||
then pure $ Just (Left f', f')
|
then pure $ Just (Left f', f')
|
||||||
else withTSDelta (\d -> liftIO $ toInodeCache d f' s)
|
else withTSDelta (\d -> liftIO $ toInodeCache d f s)
|
||||||
>>= return . \case
|
>>= return . \case
|
||||||
Just i -> Just (Right (inodeCacheToKey Strongly i), f')
|
Just i -> Just (Right (inodeCacheToKey Strongly i), f')
|
||||||
Nothing -> Nothing
|
Nothing -> Nothing
|
||||||
|
|
|
@ -323,7 +323,7 @@ getViaTmpFromDisk rsp v key action = checkallowed $ do
|
||||||
_ -> MustVerify
|
_ -> MustVerify
|
||||||
else verification
|
else verification
|
||||||
if ok
|
if ok
|
||||||
then ifM (verifyKeyContent rsp v verification' key (fromRawFilePath tmpfile))
|
then ifM (verifyKeyContent rsp v verification' key tmpfile)
|
||||||
( ifM (pruneTmpWorkDirBefore tmpfile (moveAnnex key))
|
( ifM (pruneTmpWorkDirBefore tmpfile (moveAnnex key))
|
||||||
( do
|
( do
|
||||||
logStatus key InfoPresent
|
logStatus key InfoPresent
|
||||||
|
@ -373,7 +373,7 @@ getViaTmpFromDisk rsp v key action = checkallowed $ do
|
||||||
- If the RetrievalSecurityPolicy requires verification and the key's
|
- If the RetrievalSecurityPolicy requires verification and the key's
|
||||||
- backend doesn't support it, the verification will fail.
|
- backend doesn't support it, the verification will fail.
|
||||||
-}
|
-}
|
||||||
verifyKeyContent :: RetrievalSecurityPolicy -> VerifyConfig -> Verification -> Key -> FilePath -> Annex Bool
|
verifyKeyContent :: RetrievalSecurityPolicy -> VerifyConfig -> Verification -> Key -> RawFilePath -> Annex Bool
|
||||||
verifyKeyContent rsp v verification k f = case (rsp, verification) of
|
verifyKeyContent rsp v verification k f = case (rsp, verification) of
|
||||||
(_, Verified) -> return True
|
(_, Verified) -> return True
|
||||||
(RetrievalVerifiableKeysSecure, _) -> ifM (Backend.isVerifiable k)
|
(RetrievalVerifiableKeysSecure, _) -> ifM (Backend.isVerifiable k)
|
||||||
|
@ -434,16 +434,17 @@ shouldVerify (RemoteVerify r) =
|
||||||
-}
|
-}
|
||||||
checkDiskSpaceToGet :: Key -> a -> Annex a -> Annex a
|
checkDiskSpaceToGet :: Key -> a -> Annex a -> Annex a
|
||||||
checkDiskSpaceToGet key unabletoget getkey = do
|
checkDiskSpaceToGet key unabletoget getkey = do
|
||||||
tmp <- fromRawFilePath <$> fromRepo (gitAnnexTmpObjectLocation key)
|
tmp <- fromRepo (gitAnnexTmpObjectLocation key)
|
||||||
|
let tmp' = fromRawFilePath tmp
|
||||||
|
|
||||||
e <- liftIO $ doesFileExist tmp
|
e <- liftIO $ doesFileExist tmp'
|
||||||
alreadythere <- liftIO $ if e
|
alreadythere <- liftIO $ if e
|
||||||
then getFileSize tmp
|
then getFileSize tmp
|
||||||
else return 0
|
else return 0
|
||||||
ifM (checkDiskSpace Nothing key alreadythere True)
|
ifM (checkDiskSpace Nothing key alreadythere True)
|
||||||
( do
|
( do
|
||||||
-- The tmp file may not have been left writable
|
-- The tmp file may not have been left writable
|
||||||
when e $ thawContent tmp
|
when e $ thawContent tmp'
|
||||||
getkey
|
getkey
|
||||||
, return unabletoget
|
, return unabletoget
|
||||||
)
|
)
|
||||||
|
@ -703,7 +704,7 @@ isUnmodified key f = go =<< geti
|
||||||
where
|
where
|
||||||
go Nothing = return False
|
go Nothing = return False
|
||||||
go (Just fc) = isUnmodifiedCheap' key fc <||> expensivecheck fc
|
go (Just fc) = isUnmodifiedCheap' key fc <||> expensivecheck fc
|
||||||
expensivecheck fc = ifM (verifyKeyContent RetrievalAllKeysSecure AlwaysVerify UnVerified key (fromRawFilePath f))
|
expensivecheck fc = ifM (verifyKeyContent RetrievalAllKeysSecure AlwaysVerify UnVerified key f)
|
||||||
( do
|
( do
|
||||||
-- The file could have been modified while it was
|
-- The file could have been modified while it was
|
||||||
-- being verified. Detect that.
|
-- being verified. Detect that.
|
||||||
|
|
|
@ -164,7 +164,7 @@ ingest' preferredbackend meterupdate (Just (LockedDown cfg source)) mk restage =
|
||||||
Just k -> return k
|
Just k -> return k
|
||||||
let src = contentLocation source
|
let src = contentLocation source
|
||||||
ms <- liftIO $ catchMaybeIO $ R.getFileStatus src
|
ms <- liftIO $ catchMaybeIO $ R.getFileStatus src
|
||||||
mcache <- maybe (pure Nothing) (liftIO . toInodeCache delta (fromRawFilePath src)) ms
|
mcache <- maybe (pure Nothing) (liftIO . toInodeCache delta src) ms
|
||||||
case (mcache, inodeCache source) of
|
case (mcache, inodeCache source) of
|
||||||
(_, Nothing) -> go k mcache ms
|
(_, Nothing) -> go k mcache ms
|
||||||
(Just newc, Just c) | compareStrong c newc -> go k mcache ms
|
(Just newc, Just c) | compareStrong c newc -> go k mcache ms
|
||||||
|
|
|
@ -166,7 +166,7 @@ runTransfer' ignorelock t afile retrydecider transferaction = enteringStage Tran
|
||||||
liftIO $ readMVar metervar
|
liftIO $ readMVar metervar
|
||||||
| otherwise = do
|
| otherwise = do
|
||||||
f <- fromRepo $ gitAnnexTmpObjectLocation (transferKey t)
|
f <- fromRepo $ gitAnnexTmpObjectLocation (transferKey t)
|
||||||
liftIO $ catchDefaultIO 0 $ getFileSize (fromRawFilePath f)
|
liftIO $ catchDefaultIO 0 $ getFileSize f
|
||||||
|
|
||||||
{- Avoid download and upload of keys with insecure content when
|
{- Avoid download and upload of keys with insecure content when
|
||||||
- annex.securehashesonly is configured.
|
- annex.securehashesonly is configured.
|
||||||
|
|
|
@ -123,7 +123,7 @@ youtubeDlMaxSize workdir = ifM (Annex.getState Annex.force)
|
||||||
Just have -> do
|
Just have -> do
|
||||||
inprogress <- sizeOfDownloadsInProgress (const True)
|
inprogress <- sizeOfDownloadsInProgress (const True)
|
||||||
partial <- liftIO $ sum
|
partial <- liftIO $ sum
|
||||||
<$> (mapM getFileSize =<< dirContents workdir)
|
<$> (mapM (getFileSize . toRawFilePath) =<< dirContents workdir)
|
||||||
reserve <- annexDiskReserve <$> Annex.getGitConfig
|
reserve <- annexDiskReserve <$> Annex.getGitConfig
|
||||||
let maxsize = have - reserve - inprogress + partial
|
let maxsize = have - reserve - inprogress + partial
|
||||||
if maxsize > 0
|
if maxsize > 0
|
||||||
|
|
|
@ -140,7 +140,8 @@ repairStaleGitLocks r = do
|
||||||
repairStaleLocks :: [FilePath] -> Assistant ()
|
repairStaleLocks :: [FilePath] -> Assistant ()
|
||||||
repairStaleLocks lockfiles = go =<< getsizes
|
repairStaleLocks lockfiles = go =<< getsizes
|
||||||
where
|
where
|
||||||
getsize lf = catchMaybeIO $ (\s -> (lf, s)) <$> getFileSize lf
|
getsize lf = catchMaybeIO $ (\s -> (lf, s))
|
||||||
|
<$> getFileSize (toRawFilePath lf)
|
||||||
getsizes = liftIO $ catMaybes <$> mapM getsize lockfiles
|
getsizes = liftIO $ catMaybes <$> mapM getsize lockfiles
|
||||||
go [] = return ()
|
go [] = return ()
|
||||||
go l = ifM (liftIO $ null <$> Lsof.query ("--" : map fst l))
|
go l = ifM (liftIO $ null <$> Lsof.query ("--" : map fst l))
|
||||||
|
|
|
@ -223,7 +223,7 @@ checkLogSize :: Int -> Assistant ()
|
||||||
checkLogSize n = do
|
checkLogSize n = do
|
||||||
f <- liftAnnex $ fromRawFilePath <$> fromRepo gitAnnexDaemonLogFile
|
f <- liftAnnex $ fromRawFilePath <$> fromRepo gitAnnexDaemonLogFile
|
||||||
logs <- liftIO $ listLogs f
|
logs <- liftIO $ listLogs f
|
||||||
totalsize <- liftIO $ sum <$> mapM getFileSize logs
|
totalsize <- liftIO $ sum <$> mapM (getFileSize . toRawFilePath) logs
|
||||||
when (totalsize > 2 * oneMegabyte) $ do
|
when (totalsize > 2 * oneMegabyte) $ do
|
||||||
notice ["Rotated logs due to size:", show totalsize]
|
notice ["Rotated logs due to size:", show totalsize]
|
||||||
liftIO $ openLog f >>= handleToFd >>= redirLog
|
liftIO $ openLog f >>= handleToFd >>= redirLog
|
||||||
|
|
|
@ -37,7 +37,7 @@ transferPollerThread = namedThread "TransferPoller" $ do
|
||||||
- temp file being used for the transfer. -}
|
- temp file being used for the transfer. -}
|
||||||
| transferDirection t == Download = do
|
| transferDirection t == Download = do
|
||||||
let f = gitAnnexTmpObjectLocation (transferKey t) g
|
let f = gitAnnexTmpObjectLocation (transferKey t) g
|
||||||
sz <- liftIO $ catchMaybeIO $ getFileSize (fromRawFilePath f)
|
sz <- liftIO $ catchMaybeIO $ getFileSize f
|
||||||
newsize t info sz
|
newsize t info sz
|
||||||
{- Uploads don't need to be polled for when the TransferWatcher
|
{- Uploads don't need to be polled for when the TransferWatcher
|
||||||
- thread can track file modifications. -}
|
- thread can track file modifications. -}
|
||||||
|
|
|
@ -218,7 +218,8 @@ onAddUnlocked symlinkssupported matcher f fs = do
|
||||||
=<< inRepo (toTopFilePath (toRawFilePath file))
|
=<< inRepo (toTopFilePath (toRawFilePath file))
|
||||||
samefilestatus key file status = do
|
samefilestatus key file status = do
|
||||||
cache <- Database.Keys.getInodeCaches key
|
cache <- Database.Keys.getInodeCaches key
|
||||||
curr <- withTSDelta $ \delta -> liftIO $ toInodeCache delta file status
|
curr <- withTSDelta $ \delta ->
|
||||||
|
liftIO $ toInodeCache delta (toRawFilePath file) status
|
||||||
case (cache, curr) of
|
case (cache, curr) of
|
||||||
(_, Just c) -> elemInodeCaches c cache
|
(_, Just c) -> elemInodeCaches c cache
|
||||||
([], Nothing) -> return True
|
([], Nothing) -> return True
|
||||||
|
|
|
@ -113,16 +113,16 @@ distributionDownloadComplete d dest cleanup t
|
||||||
| transferDirection t == Download = do
|
| transferDirection t == Download = do
|
||||||
debug ["finished downloading git-annex distribution"]
|
debug ["finished downloading git-annex distribution"]
|
||||||
maybe (failedupgrade "bad download") go
|
maybe (failedupgrade "bad download") go
|
||||||
=<< liftAnnex (withObjectLoc k (fsckit . fromRawFilePath))
|
=<< liftAnnex (withObjectLoc k fsckit)
|
||||||
| otherwise = cleanup
|
| otherwise = cleanup
|
||||||
where
|
where
|
||||||
k = mkKey $ const $ distributionKey d
|
k = mkKey $ const $ distributionKey d
|
||||||
fsckit f = Backend.maybeLookupBackendVariety (fromKey keyVariety k) >>= \case
|
fsckit f = Backend.maybeLookupBackendVariety (fromKey keyVariety k) >>= \case
|
||||||
Nothing -> return $ Just f
|
Nothing -> return $ Just (fromRawFilePath f)
|
||||||
Just b -> case Types.Backend.verifyKeyContent b of
|
Just b -> case Types.Backend.verifyKeyContent b of
|
||||||
Nothing -> return $ Just f
|
Nothing -> return $ Just (fromRawFilePath f)
|
||||||
Just verifier -> ifM (verifier k f)
|
Just verifier -> ifM (verifier k f)
|
||||||
( return $ Just f
|
( return $ Just (fromRawFilePath f)
|
||||||
, return Nothing
|
, return Nothing
|
||||||
)
|
)
|
||||||
go f = do
|
go f = do
|
||||||
|
|
|
@ -102,12 +102,12 @@ genKeyExternal ebname hasext ks meterupdate =
|
||||||
return $ GetNextMessage go
|
return $ GetNextMessage go
|
||||||
go _ = Nothing
|
go _ = Nothing
|
||||||
|
|
||||||
verifyKeyContentExternal :: ExternalBackendName -> HasExt -> MeterUpdate -> Key -> FilePath -> Annex Bool
|
verifyKeyContentExternal :: ExternalBackendName -> HasExt -> MeterUpdate -> Key -> RawFilePath -> Annex Bool
|
||||||
verifyKeyContentExternal ebname hasext meterupdate k f =
|
verifyKeyContentExternal ebname hasext meterupdate k f =
|
||||||
withExternalState ebname hasext $ \st ->
|
withExternalState ebname hasext $ \st ->
|
||||||
handleRequest st req notavail go
|
handleRequest st req notavail go
|
||||||
where
|
where
|
||||||
req = VERIFYKEYCONTENT (toProtoKey k) f
|
req = VERIFYKEYCONTENT (toProtoKey k) (fromRawFilePath f)
|
||||||
|
|
||||||
-- This should not be able to happen, because CANVERIFY is checked
|
-- This should not be able to happen, because CANVERIFY is checked
|
||||||
-- before this function is enable, and so the external program
|
-- before this function is enable, and so the external program
|
||||||
|
|
|
@ -15,12 +15,13 @@ module Backend.Hash (
|
||||||
|
|
||||||
import Annex.Common
|
import Annex.Common
|
||||||
import qualified Annex
|
import qualified Annex
|
||||||
|
import Backend.Utilities
|
||||||
import Types.Key
|
import Types.Key
|
||||||
import Types.Backend
|
import Types.Backend
|
||||||
import Types.KeySource
|
import Types.KeySource
|
||||||
import Utility.Hash
|
import Utility.Hash
|
||||||
import Utility.Metered
|
import Utility.Metered
|
||||||
import Backend.Utilities
|
import qualified Utility.RawFilePath as R
|
||||||
|
|
||||||
import qualified Data.ByteString as S
|
import qualified Data.ByteString as S
|
||||||
import qualified Data.ByteString.Char8 as S8
|
import qualified Data.ByteString.Char8 as S8
|
||||||
|
@ -100,7 +101,7 @@ hashKeyVariety (Blake2spHash size) he = Blake2spKey size he
|
||||||
{- A key is a hash of its contents. -}
|
{- A key is a hash of its contents. -}
|
||||||
keyValue :: Hash -> KeySource -> MeterUpdate -> Annex Key
|
keyValue :: Hash -> KeySource -> MeterUpdate -> Annex Key
|
||||||
keyValue hash source meterupdate = do
|
keyValue hash source meterupdate = do
|
||||||
let file = fromRawFilePath (contentLocation source)
|
let file = contentLocation source
|
||||||
filesize <- liftIO $ getFileSize file
|
filesize <- liftIO $ getFileSize file
|
||||||
s <- hashFile hash file meterupdate
|
s <- hashFile hash file meterupdate
|
||||||
return $ mkKey $ \k -> k
|
return $ mkKey $ \k -> k
|
||||||
|
@ -117,10 +118,10 @@ keyValueE hash source meterupdate =
|
||||||
|
|
||||||
{- A key's checksum is checked during fsck when it's content is present
|
{- A key's checksum is checked during fsck when it's content is present
|
||||||
- except for in fast mode. -}
|
- except for in fast mode. -}
|
||||||
checkKeyChecksum :: Hash -> Key -> FilePath -> Annex Bool
|
checkKeyChecksum :: Hash -> Key -> RawFilePath -> Annex Bool
|
||||||
checkKeyChecksum hash key file = catchIOErrorType HardwareFault hwfault $ do
|
checkKeyChecksum hash key file = catchIOErrorType HardwareFault hwfault $ do
|
||||||
fast <- Annex.getState Annex.fast
|
fast <- Annex.getState Annex.fast
|
||||||
exists <- liftIO $ doesFileExist file
|
exists <- liftIO $ R.doesPathExist file
|
||||||
case (exists, fast) of
|
case (exists, fast) of
|
||||||
(True, False) -> do
|
(True, False) -> do
|
||||||
showAction "checksum"
|
showAction "checksum"
|
||||||
|
@ -191,9 +192,9 @@ trivialMigrate' oldkey newbackend afile maxextlen
|
||||||
oldvariety = fromKey keyVariety oldkey
|
oldvariety = fromKey keyVariety oldkey
|
||||||
newvariety = backendVariety newbackend
|
newvariety = backendVariety newbackend
|
||||||
|
|
||||||
hashFile :: Hash -> FilePath -> MeterUpdate -> Annex String
|
hashFile :: Hash -> RawFilePath -> MeterUpdate -> Annex String
|
||||||
hashFile hash file meterupdate =
|
hashFile hash file meterupdate =
|
||||||
liftIO $ withMeteredFile file meterupdate $ \b -> do
|
liftIO $ withMeteredFile (fromRawFilePath file) meterupdate $ \b -> do
|
||||||
let h = hasher b
|
let h = hasher b
|
||||||
-- Force full evaluation of hash so whole file is read
|
-- Force full evaluation of hash so whole file is read
|
||||||
-- before returning.
|
-- before returning.
|
||||||
|
|
|
@ -39,7 +39,7 @@ keyValue :: KeySource -> MeterUpdate -> Annex Key
|
||||||
keyValue source _ = do
|
keyValue source _ = do
|
||||||
let f = contentLocation source
|
let f = contentLocation source
|
||||||
stat <- liftIO $ R.getFileStatus f
|
stat <- liftIO $ R.getFileStatus f
|
||||||
sz <- liftIO $ getFileSize' (fromRawFilePath f) stat
|
sz <- liftIO $ getFileSize' f stat
|
||||||
relf <- fromRawFilePath . getTopFilePath
|
relf <- fromRawFilePath . getTopFilePath
|
||||||
<$> inRepo (toTopFilePath $ keyFilename source)
|
<$> inRepo (toTopFilePath $ keyFilename source)
|
||||||
return $ mkKey $ \k -> k
|
return $ mkKey $ \k -> k
|
||||||
|
|
|
@ -384,7 +384,7 @@ checkKeySizeOr :: (Key -> Annex String) -> Key -> RawFilePath -> ActionItem -> A
|
||||||
checkKeySizeOr bad key file ai = case fromKey keySize key of
|
checkKeySizeOr bad key file ai = case fromKey keySize key of
|
||||||
Nothing -> return True
|
Nothing -> return True
|
||||||
Just size -> do
|
Just size -> do
|
||||||
size' <- liftIO $ getFileSize (fromRawFilePath file)
|
size' <- liftIO $ getFileSize file
|
||||||
comparesizes size size'
|
comparesizes size size'
|
||||||
where
|
where
|
||||||
comparesizes a b = do
|
comparesizes a b = do
|
||||||
|
@ -461,7 +461,7 @@ checkBackendOr' bad backend key file ai postcheck =
|
||||||
case Types.Backend.verifyKeyContent backend of
|
case Types.Backend.verifyKeyContent backend of
|
||||||
Nothing -> return True
|
Nothing -> return True
|
||||||
Just verifier -> do
|
Just verifier -> do
|
||||||
ok <- verifier key (fromRawFilePath file)
|
ok <- verifier key file
|
||||||
ifM postcheck
|
ifM postcheck
|
||||||
( do
|
( do
|
||||||
unless ok $ do
|
unless ok $ do
|
||||||
|
|
|
@ -676,8 +676,8 @@ staleSize label dirspec = go =<< lift (dirKeys dirspec)
|
||||||
return $ sizer storageUnits False size
|
return $ sizer storageUnits False size
|
||||||
keysizes keys = do
|
keysizes keys = do
|
||||||
dir <- lift $ fromRepo dirspec
|
dir <- lift $ fromRepo dirspec
|
||||||
liftIO $ forM keys $ \k -> catchDefaultIO 0 $
|
liftIO $ forM keys $ \k ->
|
||||||
getFileSize (fromRawFilePath (dir P.</> keyFile k))
|
catchDefaultIO 0 $ getFileSize (dir P.</> keyFile k)
|
||||||
|
|
||||||
aside :: String -> String
|
aside :: String -> String
|
||||||
aside s = " (" ++ s ++ ")"
|
aside s = " (" ++ s ++ ")"
|
||||||
|
|
|
@ -48,7 +48,7 @@ startSrcDest ps@(src:dest:[])
|
||||||
where
|
where
|
||||||
src' = toRawFilePath src
|
src' = toRawFilePath src
|
||||||
go key = starting "reinject" ai si $
|
go key = starting "reinject" ai si $
|
||||||
ifM (verifyKeyContent RetrievalAllKeysSecure DefaultVerify UnVerified key src)
|
ifM (verifyKeyContent RetrievalAllKeysSecure DefaultVerify UnVerified key src')
|
||||||
( perform src' key
|
( perform src' key
|
||||||
, giveup $ src ++ " does not have expected content of " ++ dest
|
, giveup $ src ++ " does not have expected content of " ++ dest
|
||||||
)
|
)
|
||||||
|
|
|
@ -207,7 +207,7 @@ shouldAnnex file indexmeta moldkey = ifM (annexGitAddToAnnex <$> Annex.getGitCon
|
||||||
-- annex.largefiles now matches it, because the content is not
|
-- annex.largefiles now matches it, because the content is not
|
||||||
-- changed.
|
-- changed.
|
||||||
checkunchangedgitfile cont = case (moldkey, indexmeta) of
|
checkunchangedgitfile cont = case (moldkey, indexmeta) of
|
||||||
(Nothing, Just (sha, sz, _)) -> liftIO (catchMaybeIO (getFileSize (fromRawFilePath file))) >>= \case
|
(Nothing, Just (sha, sz, _)) -> liftIO (catchMaybeIO (getFileSize file)) >>= \case
|
||||||
Just sz' | sz' == sz -> do
|
Just sz' | sz' == sz -> do
|
||||||
-- The size is the same, so the file
|
-- The size is the same, so the file
|
||||||
-- is not much larger than what was stored
|
-- is not much larger than what was stored
|
||||||
|
|
|
@ -293,7 +293,7 @@ test runannex mkr mkk =
|
||||||
Nothing -> return True
|
Nothing -> return True
|
||||||
Just b -> case Types.Backend.verifyKeyContent b of
|
Just b -> case Types.Backend.verifyKeyContent b of
|
||||||
Nothing -> return True
|
Nothing -> return True
|
||||||
Just verifier -> verifier k (serializeKey k)
|
Just verifier -> verifier k (serializeKey' k)
|
||||||
get r k = getViaTmp (Remote.retrievalSecurityPolicy r) (RemoteVerify r) k $ \dest ->
|
get r k = getViaTmp (Remote.retrievalSecurityPolicy r) (RemoteVerify r) k $ \dest ->
|
||||||
tryNonAsync (Remote.retrieveKeyFile r k (AssociatedFile Nothing) (fromRawFilePath dest) nullMeterUpdate) >>= \case
|
tryNonAsync (Remote.retrieveKeyFile r k (AssociatedFile Nothing) (fromRawFilePath dest) nullMeterUpdate) >>= \case
|
||||||
Right v -> return (True, v)
|
Right v -> return (True, v)
|
||||||
|
@ -352,7 +352,7 @@ testExportTree runannex mkr mkk1 mkk2 =
|
||||||
liftIO $ hClose h
|
liftIO $ hClose h
|
||||||
tryNonAsync (Remote.retrieveExport ea k testexportlocation tmp nullMeterUpdate) >>= \case
|
tryNonAsync (Remote.retrieveExport ea k testexportlocation tmp nullMeterUpdate) >>= \case
|
||||||
Left _ -> return False
|
Left _ -> return False
|
||||||
Right () -> verifyKeyContent RetrievalAllKeysSecure AlwaysVerify UnVerified k tmp
|
Right () -> verifyKeyContent RetrievalAllKeysSecure AlwaysVerify UnVerified k (toRawFilePath tmp)
|
||||||
checkpresentexport ea k = Remote.checkPresentExport ea k testexportlocation
|
checkpresentexport ea k = Remote.checkPresentExport ea k testexportlocation
|
||||||
removeexport ea k = Remote.removeExport ea k testexportlocation
|
removeexport ea k = Remote.removeExport ea k testexportlocation
|
||||||
removeexportdirectory ea = case Remote.removeExportDirectory ea of
|
removeexportdirectory ea = case Remote.removeExportDirectory ea of
|
||||||
|
|
|
@ -107,7 +107,7 @@ benchDb tmpdir num = do
|
||||||
initDb db SQL.createTables
|
initDb db SQL.createTables
|
||||||
h <- liftIO $ H.openDbQueue H.MultiWriter db SQL.containedTable
|
h <- liftIO $ H.openDbQueue H.MultiWriter db SQL.containedTable
|
||||||
liftIO $ populateAssociatedFiles h num
|
liftIO $ populateAssociatedFiles h num
|
||||||
sz <- liftIO $ getFileSize db
|
sz <- liftIO $ getFileSize (toRawFilePath db)
|
||||||
liftIO $ putStrLn $ "size of database on disk: " ++
|
liftIO $ putStrLn $ "size of database on disk: " ++
|
||||||
roughSize storageUnits False sz
|
roughSize storageUnits False sz
|
||||||
mv <- liftIO $ newMVar 1
|
mv <- liftIO $ newMVar 1
|
||||||
|
|
3
Limit.hs
3
Limit.hs
|
@ -464,8 +464,7 @@ limitSize lb vs s = case readSize dataUnits s of
|
||||||
LimitAnnexFiles -> goannexed sz fi
|
LimitAnnexFiles -> goannexed sz fi
|
||||||
LimitDiskFiles -> case contentFile fi of
|
LimitDiskFiles -> case contentFile fi of
|
||||||
Just f -> do
|
Just f -> do
|
||||||
filesize <- liftIO $ catchMaybeIO $
|
filesize <- liftIO $ catchMaybeIO $ getFileSize f
|
||||||
getFileSize (fromRawFilePath f)
|
|
||||||
return $ filesize `vs` Just sz
|
return $ filesize `vs` Just sz
|
||||||
Nothing -> goannexed sz fi
|
Nothing -> goannexed sz fi
|
||||||
go sz _ (MatchingKey key _) = checkkey sz key
|
go sz _ (MatchingKey key _) = checkkey sz key
|
||||||
|
|
|
@ -49,7 +49,7 @@ instance MeterSize KeySource where
|
||||||
- This allows uploads of keys without size to still have progress
|
- This allows uploads of keys without size to still have progress
|
||||||
- displayed.
|
- displayed.
|
||||||
-}
|
-}
|
||||||
data KeySizer = KeySizer Key (Annex (Maybe FilePath))
|
data KeySizer = KeySizer Key (Annex (Maybe RawFilePath))
|
||||||
|
|
||||||
instance MeterSize KeySizer where
|
instance MeterSize KeySizer where
|
||||||
getMeterSize (KeySizer k getsrcfile) = case fromKey keySize k of
|
getMeterSize (KeySizer k getsrcfile) = case fromKey keySize k of
|
||||||
|
|
|
@ -40,13 +40,13 @@ runLocal :: RunState -> RunProto Annex -> LocalF (Proto a) -> Annex (Either Prot
|
||||||
runLocal runst runner a = case a of
|
runLocal runst runner a = case a of
|
||||||
TmpContentSize k next -> do
|
TmpContentSize k next -> do
|
||||||
tmp <- fromRepo $ gitAnnexTmpObjectLocation k
|
tmp <- fromRepo $ gitAnnexTmpObjectLocation k
|
||||||
size <- liftIO $ catchDefaultIO 0 $ getFileSize $ fromRawFilePath tmp
|
size <- liftIO $ catchDefaultIO 0 $ getFileSize tmp
|
||||||
runner (next (Len size))
|
runner (next (Len size))
|
||||||
FileSize f next -> do
|
FileSize f next -> do
|
||||||
size <- liftIO $ catchDefaultIO 0 $ getFileSize f
|
size <- liftIO $ catchDefaultIO 0 $ getFileSize (toRawFilePath f)
|
||||||
runner (next (Len size))
|
runner (next (Len size))
|
||||||
ContentSize k next -> do
|
ContentSize k next -> do
|
||||||
let getsize = liftIO . catchMaybeIO . getFileSize . fromRawFilePath
|
let getsize = liftIO . catchMaybeIO . getFileSize
|
||||||
size <- inAnnex' isJust Nothing getsize k
|
size <- inAnnex' isJust Nothing getsize k
|
||||||
runner (next (Len <$> size))
|
runner (next (Len <$> size))
|
||||||
ReadContent k af o sender next -> do
|
ReadContent k af o sender next -> do
|
||||||
|
@ -166,7 +166,7 @@ runLocal runst runner a = case a of
|
||||||
indicatetransferred ti
|
indicatetransferred ti
|
||||||
|
|
||||||
rightsize <- do
|
rightsize <- do
|
||||||
sz <- liftIO $ getFileSize dest
|
sz <- liftIO $ getFileSize (toRawFilePath dest)
|
||||||
return (toInteger sz == l + o)
|
return (toInteger sz == l + o)
|
||||||
|
|
||||||
runner validitycheck >>= \case
|
runner validitycheck >>= \case
|
||||||
|
|
|
@ -339,15 +339,15 @@ removeExportLocation topdir loc =
|
||||||
listImportableContentsM :: RawFilePath -> Annex (Maybe (ImportableContents (ContentIdentifier, ByteSize)))
|
listImportableContentsM :: RawFilePath -> Annex (Maybe (ImportableContents (ContentIdentifier, ByteSize)))
|
||||||
listImportableContentsM dir = catchMaybeIO $ liftIO $ do
|
listImportableContentsM dir = catchMaybeIO $ liftIO $ do
|
||||||
l <- dirContentsRecursive (fromRawFilePath dir)
|
l <- dirContentsRecursive (fromRawFilePath dir)
|
||||||
l' <- mapM go l
|
l' <- mapM (go . toRawFilePath) l
|
||||||
return $ ImportableContents (catMaybes l') []
|
return $ ImportableContents (catMaybes l') []
|
||||||
where
|
where
|
||||||
go f = do
|
go f = do
|
||||||
st <- getFileStatus f
|
st <- R.getFileStatus f
|
||||||
mkContentIdentifier f st >>= \case
|
mkContentIdentifier f st >>= \case
|
||||||
Nothing -> return Nothing
|
Nothing -> return Nothing
|
||||||
Just cid -> do
|
Just cid -> do
|
||||||
relf <- relPathDirToFile dir (toRawFilePath f)
|
relf <- relPathDirToFile dir f
|
||||||
sz <- getFileSize' f st
|
sz <- getFileSize' f st
|
||||||
return $ Just (mkImportLocation relf, (cid, sz))
|
return $ Just (mkImportLocation relf, (cid, sz))
|
||||||
|
|
||||||
|
@ -359,7 +359,7 @@ listImportableContentsM dir = catchMaybeIO $ liftIO $ do
|
||||||
-- result in extra work to re-import them.
|
-- result in extra work to re-import them.
|
||||||
--
|
--
|
||||||
-- If the file is not a regular file, this will return Nothing.
|
-- If the file is not a regular file, this will return Nothing.
|
||||||
mkContentIdentifier :: FilePath -> FileStatus -> IO (Maybe ContentIdentifier)
|
mkContentIdentifier :: RawFilePath -> FileStatus -> IO (Maybe ContentIdentifier)
|
||||||
mkContentIdentifier f st =
|
mkContentIdentifier f st =
|
||||||
fmap (ContentIdentifier . encodeBS . showInodeCache)
|
fmap (ContentIdentifier . encodeBS . showInodeCache)
|
||||||
<$> toInodeCache noTSDelta f st
|
<$> toInodeCache noTSDelta f st
|
||||||
|
@ -373,7 +373,7 @@ importKeyM :: RawFilePath -> ExportLocation -> ContentIdentifier -> MeterUpdate
|
||||||
importKeyM dir loc cid p = do
|
importKeyM dir loc cid p = do
|
||||||
backend <- chooseBackend f
|
backend <- chooseBackend f
|
||||||
k <- fst <$> genKey ks p backend
|
k <- fst <$> genKey ks p backend
|
||||||
currcid <- liftIO $ mkContentIdentifier (fromRawFilePath absf)
|
currcid <- liftIO $ mkContentIdentifier absf
|
||||||
=<< R.getFileStatus absf
|
=<< R.getFileStatus absf
|
||||||
guardSameContentIdentifiers (return k) cid currcid
|
guardSameContentIdentifiers (return k) cid currcid
|
||||||
where
|
where
|
||||||
|
@ -421,7 +421,7 @@ retrieveExportWithContentIdentifierM dir loc cid dest mkkey p =
|
||||||
-- Check before copy, to avoid expensive copy of wrong file
|
-- Check before copy, to avoid expensive copy of wrong file
|
||||||
-- content.
|
-- content.
|
||||||
precheck cont = guardSameContentIdentifiers cont cid
|
precheck cont = guardSameContentIdentifiers cont cid
|
||||||
=<< liftIO . mkContentIdentifier f'
|
=<< liftIO . mkContentIdentifier f
|
||||||
=<< liftIO (R.getFileStatus f)
|
=<< liftIO (R.getFileStatus f)
|
||||||
|
|
||||||
-- Check after copy, in case the file was changed while it was
|
-- Check after copy, in case the file was changed while it was
|
||||||
|
@ -442,7 +442,7 @@ retrieveExportWithContentIdentifierM dir loc cid dest mkkey p =
|
||||||
#else
|
#else
|
||||||
postcheck cont = do
|
postcheck cont = do
|
||||||
#endif
|
#endif
|
||||||
currcid <- liftIO $ mkContentIdentifier f'
|
currcid <- liftIO $ mkContentIdentifier f
|
||||||
#ifndef mingw32_HOST_OS
|
#ifndef mingw32_HOST_OS
|
||||||
=<< getFdStatus fd
|
=<< getFdStatus fd
|
||||||
#else
|
#else
|
||||||
|
@ -458,7 +458,7 @@ storeExportWithContentIdentifierM dir src _k loc overwritablecids p = do
|
||||||
liftIO $ hFlush tmph
|
liftIO $ hFlush tmph
|
||||||
liftIO $ hClose tmph
|
liftIO $ hClose tmph
|
||||||
resetAnnexFilePerm tmpf
|
resetAnnexFilePerm tmpf
|
||||||
liftIO (getFileStatus tmpf) >>= liftIO . mkContentIdentifier tmpf >>= \case
|
liftIO (getFileStatus tmpf) >>= liftIO . mkContentIdentifier (toRawFilePath tmpf) >>= \case
|
||||||
Nothing -> giveup "unable to generate content identifier"
|
Nothing -> giveup "unable to generate content identifier"
|
||||||
Just newcid -> do
|
Just newcid -> do
|
||||||
checkExportContent dir loc
|
checkExportContent dir loc
|
||||||
|
@ -506,7 +506,7 @@ checkExportContent dir loc knowncids unsafe callback =
|
||||||
tryWhenExists (liftIO $ R.getFileStatus dest) >>= \case
|
tryWhenExists (liftIO $ R.getFileStatus dest) >>= \case
|
||||||
Just destst
|
Just destst
|
||||||
| not (isRegularFile destst) -> unsafe
|
| not (isRegularFile destst) -> unsafe
|
||||||
| otherwise -> catchDefaultIO Nothing (liftIO $ mkContentIdentifier (fromRawFilePath dest) destst) >>= \case
|
| otherwise -> catchDefaultIO Nothing (liftIO $ mkContentIdentifier dest destst) >>= \case
|
||||||
Just destcid
|
Just destcid
|
||||||
| destcid `elem` knowncids -> callback KnownContentIdentifier
|
| destcid `elem` knowncids -> callback KnownContentIdentifier
|
||||||
-- dest exists with other content
|
-- dest exists with other content
|
||||||
|
|
|
@ -389,7 +389,7 @@ mkUploadRequest rs k content = case (extractKeySha256 k, extractKeySize k) of
|
||||||
ret sha256 size
|
ret sha256 size
|
||||||
_ -> do
|
_ -> do
|
||||||
sha256 <- calcsha256
|
sha256 <- calcsha256
|
||||||
size <- liftIO $ getFileSize content
|
size <- liftIO $ getFileSize (toRawFilePath content)
|
||||||
rememberboth sha256 size
|
rememberboth sha256 size
|
||||||
ret sha256 size
|
ret sha256 size
|
||||||
where
|
where
|
||||||
|
|
|
@ -251,7 +251,7 @@ retrieveChunks retriever u chunkconfig encryptor basek dest basep sink
|
||||||
where
|
where
|
||||||
go pe cks = do
|
go pe cks = do
|
||||||
let ls = map chunkKeyList cks
|
let ls = map chunkKeyList cks
|
||||||
currsize <- liftIO $ catchMaybeIO $ getFileSize dest
|
currsize <- liftIO $ catchMaybeIO $ getFileSize (toRawFilePath dest)
|
||||||
let ls' = maybe ls (setupResume ls) currsize
|
let ls' = maybe ls (setupResume ls) currsize
|
||||||
if any null ls'
|
if any null ls'
|
||||||
then noop -- dest is already complete
|
then noop -- dest is already complete
|
||||||
|
|
|
@ -32,7 +32,7 @@ httpStorer a = fileStorer $ \k f m -> a k =<< liftIO (httpBodyStorer f m)
|
||||||
-- the meter as it's sent.
|
-- the meter as it's sent.
|
||||||
httpBodyStorer :: FilePath -> MeterUpdate -> IO RequestBody
|
httpBodyStorer :: FilePath -> MeterUpdate -> IO RequestBody
|
||||||
httpBodyStorer src m = do
|
httpBodyStorer src m = do
|
||||||
size <- getFileSize src
|
size <- getFileSize (toRawFilePath src)
|
||||||
let streamer sink = withMeteredFile src m $ \b -> byteStringPopper b sink
|
let streamer sink = withMeteredFile src m $ \b -> byteStringPopper b sink
|
||||||
return $ RequestBodyStream (fromInteger size) streamer
|
return $ RequestBodyStream (fromInteger size) streamer
|
||||||
|
|
||||||
|
|
|
@ -32,7 +32,7 @@ type WithConn a c = (ClosableConnection c -> Annex (ClosableConnection c, a)) ->
|
||||||
|
|
||||||
store :: (MeterUpdate -> ProtoRunner Bool) -> Key -> AssociatedFile -> MeterUpdate -> Annex ()
|
store :: (MeterUpdate -> ProtoRunner Bool) -> Key -> AssociatedFile -> MeterUpdate -> Annex ()
|
||||||
store runner k af p = do
|
store runner k af p = do
|
||||||
let sizer = KeySizer k (fmap fst <$> prepSendAnnex k)
|
let sizer = KeySizer k (fmap (toRawFilePath . fst) <$> prepSendAnnex k)
|
||||||
metered (Just p) sizer $ \_ p' ->
|
metered (Just p) sizer $ \_ p' ->
|
||||||
runner p' (P2P.put k af p') >>= \case
|
runner p' (P2P.put k af p') >>= \case
|
||||||
Just True -> return ()
|
Just True -> return ()
|
||||||
|
|
|
@ -249,7 +249,7 @@ specialRemote' cfg c storer retriever remover checkpresent baser = encr
|
||||||
|
|
||||||
displayprogress p k srcfile a
|
displayprogress p k srcfile a
|
||||||
| displayProgress cfg =
|
| displayProgress cfg =
|
||||||
metered (Just p) (KeySizer k (return srcfile)) (const a)
|
metered (Just p) (KeySizer k (pure (fmap toRawFilePath srcfile))) (const a)
|
||||||
| otherwise = a p
|
| otherwise = a p
|
||||||
|
|
||||||
{- Sink callback for retrieveChunks. Stores the file content into the
|
{- Sink callback for retrieveChunks. Stores the file content into the
|
||||||
|
|
|
@ -333,7 +333,7 @@ store mh r info magic = fileStorer $ \k f p -> withS3HandleOrFail (uuid r) mh $
|
||||||
storeHelper :: S3Info -> S3Handle -> Maybe Magic -> FilePath -> S3.Object -> MeterUpdate -> Annex (Maybe S3Etag, Maybe S3VersionID)
|
storeHelper :: S3Info -> S3Handle -> Maybe Magic -> FilePath -> S3.Object -> MeterUpdate -> Annex (Maybe S3Etag, Maybe S3VersionID)
|
||||||
storeHelper info h magic f object p = liftIO $ case partSize info of
|
storeHelper info h magic f object p = liftIO $ case partSize info of
|
||||||
Just partsz | partsz > 0 -> do
|
Just partsz | partsz > 0 -> do
|
||||||
fsz <- getFileSize f
|
fsz <- getFileSize (toRawFilePath f)
|
||||||
if fsz > partsz
|
if fsz > partsz
|
||||||
then multipartupload fsz partsz
|
then multipartupload fsz partsz
|
||||||
else singlepartupload
|
else singlepartupload
|
||||||
|
|
|
@ -20,7 +20,7 @@ data BackendA a = Backend
|
||||||
, genKey :: Maybe (KeySource -> MeterUpdate -> a Key)
|
, genKey :: Maybe (KeySource -> MeterUpdate -> a Key)
|
||||||
-- Verifies the content of a key using a hash. This does not need
|
-- Verifies the content of a key using a hash. This does not need
|
||||||
-- to be cryptographically secure.
|
-- to be cryptographically secure.
|
||||||
, verifyKeyContent :: Maybe (Key -> FilePath -> a Bool)
|
, verifyKeyContent :: Maybe (Key -> RawFilePath -> a Bool)
|
||||||
-- Checks if a key can be upgraded to a better form.
|
-- Checks if a key can be upgraded to a better form.
|
||||||
, canUpgradeKey :: Maybe (Key -> Bool)
|
, canUpgradeKey :: Maybe (Key -> Bool)
|
||||||
-- Checks if there is a fast way to migrate a key to a different
|
-- Checks if there is a fast way to migrate a key to a different
|
||||||
|
|
|
@ -1,4 +1,6 @@
|
||||||
{- File size.
|
{- File size.
|
||||||
|
-
|
||||||
|
- Copyright 2015-2020 Joey Hess <id@joeyh.name>
|
||||||
-
|
-
|
||||||
- License: BSD-2-clause
|
- License: BSD-2-clause
|
||||||
-}
|
-}
|
||||||
|
@ -18,6 +20,8 @@ import Control.Exception (bracket)
|
||||||
import System.IO
|
import System.IO
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
import qualified Utility.RawFilePath as R
|
||||||
|
|
||||||
type FileSize = Integer
|
type FileSize = Integer
|
||||||
|
|
||||||
{- Gets the size of a file.
|
{- Gets the size of a file.
|
||||||
|
@ -26,18 +30,18 @@ type FileSize = Integer
|
||||||
- FileOffset which maxes out at 2 gb.
|
- FileOffset which maxes out at 2 gb.
|
||||||
- See https://github.com/jystic/unix-compat/issues/16
|
- See https://github.com/jystic/unix-compat/issues/16
|
||||||
-}
|
-}
|
||||||
getFileSize :: FilePath -> IO FileSize
|
getFileSize :: R.RawFilePath -> IO FileSize
|
||||||
#ifndef mingw32_HOST_OS
|
#ifndef mingw32_HOST_OS
|
||||||
getFileSize f = fmap (fromIntegral . fileSize) (getFileStatus f)
|
getFileSize f = fmap (fromIntegral . fileSize) (R.getFileStatus f)
|
||||||
#else
|
#else
|
||||||
getFileSize f = bracket (openFile f ReadMode) hClose hFileSize
|
getFileSize f = bracket (openFile (fromRawFilePath f) ReadMode) hClose hFileSize
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
{- Gets the size of the file, when its FileStatus is already known.
|
{- Gets the size of the file, when its FileStatus is already known.
|
||||||
-
|
-
|
||||||
- On windows, uses getFileSize. Otherwise, the FileStatus contains the
|
- On windows, uses getFileSize. Otherwise, the FileStatus contains the
|
||||||
- size, so this does not do any work. -}
|
- size, so this does not do any work. -}
|
||||||
getFileSize' :: FilePath -> FileStatus -> IO FileSize
|
getFileSize' :: R.RawFilePath -> FileStatus -> IO FileSize
|
||||||
#ifndef mingw32_HOST_OS
|
#ifndef mingw32_HOST_OS
|
||||||
getFileSize' _ s = return $ fromIntegral $ fileSize s
|
getFileSize' _ s = return $ fromIntegral $ fileSize s
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -186,15 +186,15 @@ readInodeCache s = case words s of
|
||||||
|
|
||||||
genInodeCache :: RawFilePath -> TSDelta -> IO (Maybe InodeCache)
|
genInodeCache :: RawFilePath -> TSDelta -> IO (Maybe InodeCache)
|
||||||
genInodeCache f delta = catchDefaultIO Nothing $
|
genInodeCache f delta = catchDefaultIO Nothing $
|
||||||
toInodeCache delta (fromRawFilePath f) =<< R.getFileStatus f
|
toInodeCache delta f =<< R.getFileStatus f
|
||||||
|
|
||||||
toInodeCache :: TSDelta -> FilePath -> FileStatus -> IO (Maybe InodeCache)
|
toInodeCache :: TSDelta -> RawFilePath -> FileStatus -> IO (Maybe InodeCache)
|
||||||
toInodeCache (TSDelta getdelta) f s
|
toInodeCache (TSDelta getdelta) f s
|
||||||
| isRegularFile s = do
|
| isRegularFile s = do
|
||||||
delta <- getdelta
|
delta <- getdelta
|
||||||
sz <- getFileSize' f s
|
sz <- getFileSize' f s
|
||||||
#ifdef mingw32_HOST_OS
|
#ifdef mingw32_HOST_OS
|
||||||
mtime <- utcTimeToPOSIXSeconds <$> getModificationTime f
|
mtime <- utcTimeToPOSIXSeconds <$> getModificationTime (fromRawFilePath f)
|
||||||
#else
|
#else
|
||||||
let mtime = modificationTimeHiRes s
|
let mtime = modificationTimeHiRes s
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -223,7 +223,8 @@ watchFileSize f p a = bracket
|
||||||
p sz
|
p sz
|
||||||
watcher sz
|
watcher sz
|
||||||
getsz = catchDefaultIO zeroBytesProcessed $
|
getsz = catchDefaultIO zeroBytesProcessed $
|
||||||
toBytesProcessed <$> getFileSize f
|
toBytesProcessed <$> getFileSize f'
|
||||||
|
f' = toRawFilePath f
|
||||||
|
|
||||||
data OutputHandler = OutputHandler
|
data OutputHandler = OutputHandler
|
||||||
{ quietMode :: Bool
|
{ quietMode :: Bool
|
||||||
|
|
|
@ -52,6 +52,7 @@ import Network.HTTP.Client.Restricted
|
||||||
import Utility.HttpManagerRestricted
|
import Utility.HttpManagerRestricted
|
||||||
#endif
|
#endif
|
||||||
import Utility.IPAddress
|
import Utility.IPAddress
|
||||||
|
import qualified Utility.RawFilePath as R
|
||||||
|
|
||||||
import Network.URI
|
import Network.URI
|
||||||
import Network.HTTP.Types
|
import Network.HTTP.Types
|
||||||
|
@ -309,8 +310,8 @@ getUrlInfo url uo = case parseURIRelaxed url of
|
||||||
=<< curlRestrictedParams r u defport (basecurlparams url')
|
=<< curlRestrictedParams r u defport (basecurlparams url')
|
||||||
|
|
||||||
existsfile u = do
|
existsfile u = do
|
||||||
let f = unEscapeString (uriPath u)
|
let f = toRawFilePath (unEscapeString (uriPath u))
|
||||||
s <- catchMaybeIO $ getFileStatus f
|
s <- catchMaybeIO $ R.getFileStatus f
|
||||||
case s of
|
case s of
|
||||||
Just stat -> do
|
Just stat -> do
|
||||||
sz <- getFileSize' f stat
|
sz <- getFileSize' f stat
|
||||||
|
@ -455,7 +456,7 @@ download' nocurlerror meterupdate url file uo =
|
||||||
-}
|
-}
|
||||||
downloadConduit :: MeterUpdate -> Request -> FilePath -> UrlOptions -> IO ()
|
downloadConduit :: MeterUpdate -> Request -> FilePath -> UrlOptions -> IO ()
|
||||||
downloadConduit meterupdate req file uo =
|
downloadConduit meterupdate req file uo =
|
||||||
catchMaybeIO (getFileSize file) >>= \case
|
catchMaybeIO (getFileSize (toRawFilePath file)) >>= \case
|
||||||
Just sz | sz > 0 -> resumedownload sz
|
Just sz | sz > 0 -> resumedownload sz
|
||||||
_ -> join $ runResourceT $ do
|
_ -> join $ runResourceT $ do
|
||||||
liftIO $ debugM "url" (show req')
|
liftIO $ debugM "url" (show req')
|
||||||
|
|
Loading…
Add table
Reference in a new issue