convert getFileSize to RawFilePath
Lots of nice wins from this in avoiding unncessary work, and I think nothing got slower. This commit was sponsored by Boyd Stephen Smith Jr. on Patreon.
This commit is contained in:
parent
2670af9d5a
commit
9b0dde834e
34 changed files with 79 additions and 70 deletions
|
@ -363,7 +363,7 @@ inodeMap getfiles = do
|
|||
let f' = fromRawFilePath f
|
||||
if isSymbolicLink s
|
||||
then pure $ Just (Left f', f')
|
||||
else withTSDelta (\d -> liftIO $ toInodeCache d f' s)
|
||||
else withTSDelta (\d -> liftIO $ toInodeCache d f s)
|
||||
>>= return . \case
|
||||
Just i -> Just (Right (inodeCacheToKey Strongly i), f')
|
||||
Nothing -> Nothing
|
||||
|
|
|
@ -323,7 +323,7 @@ getViaTmpFromDisk rsp v key action = checkallowed $ do
|
|||
_ -> MustVerify
|
||||
else verification
|
||||
if ok
|
||||
then ifM (verifyKeyContent rsp v verification' key (fromRawFilePath tmpfile))
|
||||
then ifM (verifyKeyContent rsp v verification' key tmpfile)
|
||||
( ifM (pruneTmpWorkDirBefore tmpfile (moveAnnex key))
|
||||
( do
|
||||
logStatus key InfoPresent
|
||||
|
@ -373,7 +373,7 @@ getViaTmpFromDisk rsp v key action = checkallowed $ do
|
|||
- If the RetrievalSecurityPolicy requires verification and the key's
|
||||
- backend doesn't support it, the verification will fail.
|
||||
-}
|
||||
verifyKeyContent :: RetrievalSecurityPolicy -> VerifyConfig -> Verification -> Key -> FilePath -> Annex Bool
|
||||
verifyKeyContent :: RetrievalSecurityPolicy -> VerifyConfig -> Verification -> Key -> RawFilePath -> Annex Bool
|
||||
verifyKeyContent rsp v verification k f = case (rsp, verification) of
|
||||
(_, Verified) -> return True
|
||||
(RetrievalVerifiableKeysSecure, _) -> ifM (Backend.isVerifiable k)
|
||||
|
@ -434,16 +434,17 @@ shouldVerify (RemoteVerify r) =
|
|||
-}
|
||||
checkDiskSpaceToGet :: Key -> a -> Annex a -> Annex a
|
||||
checkDiskSpaceToGet key unabletoget getkey = do
|
||||
tmp <- fromRawFilePath <$> fromRepo (gitAnnexTmpObjectLocation key)
|
||||
tmp <- fromRepo (gitAnnexTmpObjectLocation key)
|
||||
let tmp' = fromRawFilePath tmp
|
||||
|
||||
e <- liftIO $ doesFileExist tmp
|
||||
e <- liftIO $ doesFileExist tmp'
|
||||
alreadythere <- liftIO $ if e
|
||||
then getFileSize tmp
|
||||
else return 0
|
||||
ifM (checkDiskSpace Nothing key alreadythere True)
|
||||
( do
|
||||
-- The tmp file may not have been left writable
|
||||
when e $ thawContent tmp
|
||||
when e $ thawContent tmp'
|
||||
getkey
|
||||
, return unabletoget
|
||||
)
|
||||
|
@ -703,7 +704,7 @@ isUnmodified key f = go =<< geti
|
|||
where
|
||||
go Nothing = return False
|
||||
go (Just fc) = isUnmodifiedCheap' key fc <||> expensivecheck fc
|
||||
expensivecheck fc = ifM (verifyKeyContent RetrievalAllKeysSecure AlwaysVerify UnVerified key (fromRawFilePath f))
|
||||
expensivecheck fc = ifM (verifyKeyContent RetrievalAllKeysSecure AlwaysVerify UnVerified key f)
|
||||
( do
|
||||
-- The file could have been modified while it was
|
||||
-- being verified. Detect that.
|
||||
|
|
|
@ -164,7 +164,7 @@ ingest' preferredbackend meterupdate (Just (LockedDown cfg source)) mk restage =
|
|||
Just k -> return k
|
||||
let src = contentLocation source
|
||||
ms <- liftIO $ catchMaybeIO $ R.getFileStatus src
|
||||
mcache <- maybe (pure Nothing) (liftIO . toInodeCache delta (fromRawFilePath src)) ms
|
||||
mcache <- maybe (pure Nothing) (liftIO . toInodeCache delta src) ms
|
||||
case (mcache, inodeCache source) of
|
||||
(_, Nothing) -> go k mcache ms
|
||||
(Just newc, Just c) | compareStrong c newc -> go k mcache ms
|
||||
|
|
|
@ -166,7 +166,7 @@ runTransfer' ignorelock t afile retrydecider transferaction = enteringStage Tran
|
|||
liftIO $ readMVar metervar
|
||||
| otherwise = do
|
||||
f <- fromRepo $ gitAnnexTmpObjectLocation (transferKey t)
|
||||
liftIO $ catchDefaultIO 0 $ getFileSize (fromRawFilePath f)
|
||||
liftIO $ catchDefaultIO 0 $ getFileSize f
|
||||
|
||||
{- Avoid download and upload of keys with insecure content when
|
||||
- annex.securehashesonly is configured.
|
||||
|
|
|
@ -123,7 +123,7 @@ youtubeDlMaxSize workdir = ifM (Annex.getState Annex.force)
|
|||
Just have -> do
|
||||
inprogress <- sizeOfDownloadsInProgress (const True)
|
||||
partial <- liftIO $ sum
|
||||
<$> (mapM getFileSize =<< dirContents workdir)
|
||||
<$> (mapM (getFileSize . toRawFilePath) =<< dirContents workdir)
|
||||
reserve <- annexDiskReserve <$> Annex.getGitConfig
|
||||
let maxsize = have - reserve - inprogress + partial
|
||||
if maxsize > 0
|
||||
|
|
|
@ -140,7 +140,8 @@ repairStaleGitLocks r = do
|
|||
repairStaleLocks :: [FilePath] -> Assistant ()
|
||||
repairStaleLocks lockfiles = go =<< getsizes
|
||||
where
|
||||
getsize lf = catchMaybeIO $ (\s -> (lf, s)) <$> getFileSize lf
|
||||
getsize lf = catchMaybeIO $ (\s -> (lf, s))
|
||||
<$> getFileSize (toRawFilePath lf)
|
||||
getsizes = liftIO $ catMaybes <$> mapM getsize lockfiles
|
||||
go [] = return ()
|
||||
go l = ifM (liftIO $ null <$> Lsof.query ("--" : map fst l))
|
||||
|
|
|
@ -223,7 +223,7 @@ checkLogSize :: Int -> Assistant ()
|
|||
checkLogSize n = do
|
||||
f <- liftAnnex $ fromRawFilePath <$> fromRepo gitAnnexDaemonLogFile
|
||||
logs <- liftIO $ listLogs f
|
||||
totalsize <- liftIO $ sum <$> mapM getFileSize logs
|
||||
totalsize <- liftIO $ sum <$> mapM (getFileSize . toRawFilePath) logs
|
||||
when (totalsize > 2 * oneMegabyte) $ do
|
||||
notice ["Rotated logs due to size:", show totalsize]
|
||||
liftIO $ openLog f >>= handleToFd >>= redirLog
|
||||
|
|
|
@ -37,7 +37,7 @@ transferPollerThread = namedThread "TransferPoller" $ do
|
|||
- temp file being used for the transfer. -}
|
||||
| transferDirection t == Download = do
|
||||
let f = gitAnnexTmpObjectLocation (transferKey t) g
|
||||
sz <- liftIO $ catchMaybeIO $ getFileSize (fromRawFilePath f)
|
||||
sz <- liftIO $ catchMaybeIO $ getFileSize f
|
||||
newsize t info sz
|
||||
{- Uploads don't need to be polled for when the TransferWatcher
|
||||
- thread can track file modifications. -}
|
||||
|
|
|
@ -218,7 +218,8 @@ onAddUnlocked symlinkssupported matcher f fs = do
|
|||
=<< inRepo (toTopFilePath (toRawFilePath file))
|
||||
samefilestatus key file status = do
|
||||
cache <- Database.Keys.getInodeCaches key
|
||||
curr <- withTSDelta $ \delta -> liftIO $ toInodeCache delta file status
|
||||
curr <- withTSDelta $ \delta ->
|
||||
liftIO $ toInodeCache delta (toRawFilePath file) status
|
||||
case (cache, curr) of
|
||||
(_, Just c) -> elemInodeCaches c cache
|
||||
([], Nothing) -> return True
|
||||
|
|
|
@ -113,16 +113,16 @@ distributionDownloadComplete d dest cleanup t
|
|||
| transferDirection t == Download = do
|
||||
debug ["finished downloading git-annex distribution"]
|
||||
maybe (failedupgrade "bad download") go
|
||||
=<< liftAnnex (withObjectLoc k (fsckit . fromRawFilePath))
|
||||
=<< liftAnnex (withObjectLoc k fsckit)
|
||||
| otherwise = cleanup
|
||||
where
|
||||
k = mkKey $ const $ distributionKey d
|
||||
fsckit f = Backend.maybeLookupBackendVariety (fromKey keyVariety k) >>= \case
|
||||
Nothing -> return $ Just f
|
||||
Nothing -> return $ Just (fromRawFilePath f)
|
||||
Just b -> case Types.Backend.verifyKeyContent b of
|
||||
Nothing -> return $ Just f
|
||||
Nothing -> return $ Just (fromRawFilePath f)
|
||||
Just verifier -> ifM (verifier k f)
|
||||
( return $ Just f
|
||||
( return $ Just (fromRawFilePath f)
|
||||
, return Nothing
|
||||
)
|
||||
go f = do
|
||||
|
|
|
@ -102,12 +102,12 @@ genKeyExternal ebname hasext ks meterupdate =
|
|||
return $ GetNextMessage go
|
||||
go _ = Nothing
|
||||
|
||||
verifyKeyContentExternal :: ExternalBackendName -> HasExt -> MeterUpdate -> Key -> FilePath -> Annex Bool
|
||||
verifyKeyContentExternal :: ExternalBackendName -> HasExt -> MeterUpdate -> Key -> RawFilePath -> Annex Bool
|
||||
verifyKeyContentExternal ebname hasext meterupdate k f =
|
||||
withExternalState ebname hasext $ \st ->
|
||||
handleRequest st req notavail go
|
||||
where
|
||||
req = VERIFYKEYCONTENT (toProtoKey k) f
|
||||
req = VERIFYKEYCONTENT (toProtoKey k) (fromRawFilePath f)
|
||||
|
||||
-- This should not be able to happen, because CANVERIFY is checked
|
||||
-- before this function is enable, and so the external program
|
||||
|
|
|
@ -15,12 +15,13 @@ module Backend.Hash (
|
|||
|
||||
import Annex.Common
|
||||
import qualified Annex
|
||||
import Backend.Utilities
|
||||
import Types.Key
|
||||
import Types.Backend
|
||||
import Types.KeySource
|
||||
import Utility.Hash
|
||||
import Utility.Metered
|
||||
import Backend.Utilities
|
||||
import qualified Utility.RawFilePath as R
|
||||
|
||||
import qualified Data.ByteString as S
|
||||
import qualified Data.ByteString.Char8 as S8
|
||||
|
@ -100,7 +101,7 @@ hashKeyVariety (Blake2spHash size) he = Blake2spKey size he
|
|||
{- A key is a hash of its contents. -}
|
||||
keyValue :: Hash -> KeySource -> MeterUpdate -> Annex Key
|
||||
keyValue hash source meterupdate = do
|
||||
let file = fromRawFilePath (contentLocation source)
|
||||
let file = contentLocation source
|
||||
filesize <- liftIO $ getFileSize file
|
||||
s <- hashFile hash file meterupdate
|
||||
return $ mkKey $ \k -> k
|
||||
|
@ -117,10 +118,10 @@ keyValueE hash source meterupdate =
|
|||
|
||||
{- A key's checksum is checked during fsck when it's content is present
|
||||
- except for in fast mode. -}
|
||||
checkKeyChecksum :: Hash -> Key -> FilePath -> Annex Bool
|
||||
checkKeyChecksum :: Hash -> Key -> RawFilePath -> Annex Bool
|
||||
checkKeyChecksum hash key file = catchIOErrorType HardwareFault hwfault $ do
|
||||
fast <- Annex.getState Annex.fast
|
||||
exists <- liftIO $ doesFileExist file
|
||||
exists <- liftIO $ R.doesPathExist file
|
||||
case (exists, fast) of
|
||||
(True, False) -> do
|
||||
showAction "checksum"
|
||||
|
@ -191,9 +192,9 @@ trivialMigrate' oldkey newbackend afile maxextlen
|
|||
oldvariety = fromKey keyVariety oldkey
|
||||
newvariety = backendVariety newbackend
|
||||
|
||||
hashFile :: Hash -> FilePath -> MeterUpdate -> Annex String
|
||||
hashFile :: Hash -> RawFilePath -> MeterUpdate -> Annex String
|
||||
hashFile hash file meterupdate =
|
||||
liftIO $ withMeteredFile file meterupdate $ \b -> do
|
||||
liftIO $ withMeteredFile (fromRawFilePath file) meterupdate $ \b -> do
|
||||
let h = hasher b
|
||||
-- Force full evaluation of hash so whole file is read
|
||||
-- before returning.
|
||||
|
|
|
@ -39,7 +39,7 @@ keyValue :: KeySource -> MeterUpdate -> Annex Key
|
|||
keyValue source _ = do
|
||||
let f = contentLocation source
|
||||
stat <- liftIO $ R.getFileStatus f
|
||||
sz <- liftIO $ getFileSize' (fromRawFilePath f) stat
|
||||
sz <- liftIO $ getFileSize' f stat
|
||||
relf <- fromRawFilePath . getTopFilePath
|
||||
<$> inRepo (toTopFilePath $ keyFilename source)
|
||||
return $ mkKey $ \k -> k
|
||||
|
|
|
@ -384,7 +384,7 @@ checkKeySizeOr :: (Key -> Annex String) -> Key -> RawFilePath -> ActionItem -> A
|
|||
checkKeySizeOr bad key file ai = case fromKey keySize key of
|
||||
Nothing -> return True
|
||||
Just size -> do
|
||||
size' <- liftIO $ getFileSize (fromRawFilePath file)
|
||||
size' <- liftIO $ getFileSize file
|
||||
comparesizes size size'
|
||||
where
|
||||
comparesizes a b = do
|
||||
|
@ -461,7 +461,7 @@ checkBackendOr' bad backend key file ai postcheck =
|
|||
case Types.Backend.verifyKeyContent backend of
|
||||
Nothing -> return True
|
||||
Just verifier -> do
|
||||
ok <- verifier key (fromRawFilePath file)
|
||||
ok <- verifier key file
|
||||
ifM postcheck
|
||||
( do
|
||||
unless ok $ do
|
||||
|
|
|
@ -676,8 +676,8 @@ staleSize label dirspec = go =<< lift (dirKeys dirspec)
|
|||
return $ sizer storageUnits False size
|
||||
keysizes keys = do
|
||||
dir <- lift $ fromRepo dirspec
|
||||
liftIO $ forM keys $ \k -> catchDefaultIO 0 $
|
||||
getFileSize (fromRawFilePath (dir P.</> keyFile k))
|
||||
liftIO $ forM keys $ \k ->
|
||||
catchDefaultIO 0 $ getFileSize (dir P.</> keyFile k)
|
||||
|
||||
aside :: String -> String
|
||||
aside s = " (" ++ s ++ ")"
|
||||
|
|
|
@ -48,7 +48,7 @@ startSrcDest ps@(src:dest:[])
|
|||
where
|
||||
src' = toRawFilePath src
|
||||
go key = starting "reinject" ai si $
|
||||
ifM (verifyKeyContent RetrievalAllKeysSecure DefaultVerify UnVerified key src)
|
||||
ifM (verifyKeyContent RetrievalAllKeysSecure DefaultVerify UnVerified key src')
|
||||
( perform src' key
|
||||
, giveup $ src ++ " does not have expected content of " ++ dest
|
||||
)
|
||||
|
|
|
@ -207,7 +207,7 @@ shouldAnnex file indexmeta moldkey = ifM (annexGitAddToAnnex <$> Annex.getGitCon
|
|||
-- annex.largefiles now matches it, because the content is not
|
||||
-- changed.
|
||||
checkunchangedgitfile cont = case (moldkey, indexmeta) of
|
||||
(Nothing, Just (sha, sz, _)) -> liftIO (catchMaybeIO (getFileSize (fromRawFilePath file))) >>= \case
|
||||
(Nothing, Just (sha, sz, _)) -> liftIO (catchMaybeIO (getFileSize file)) >>= \case
|
||||
Just sz' | sz' == sz -> do
|
||||
-- The size is the same, so the file
|
||||
-- is not much larger than what was stored
|
||||
|
|
|
@ -293,7 +293,7 @@ test runannex mkr mkk =
|
|||
Nothing -> return True
|
||||
Just b -> case Types.Backend.verifyKeyContent b of
|
||||
Nothing -> return True
|
||||
Just verifier -> verifier k (serializeKey k)
|
||||
Just verifier -> verifier k (serializeKey' k)
|
||||
get r k = getViaTmp (Remote.retrievalSecurityPolicy r) (RemoteVerify r) k $ \dest ->
|
||||
tryNonAsync (Remote.retrieveKeyFile r k (AssociatedFile Nothing) (fromRawFilePath dest) nullMeterUpdate) >>= \case
|
||||
Right v -> return (True, v)
|
||||
|
@ -352,7 +352,7 @@ testExportTree runannex mkr mkk1 mkk2 =
|
|||
liftIO $ hClose h
|
||||
tryNonAsync (Remote.retrieveExport ea k testexportlocation tmp nullMeterUpdate) >>= \case
|
||||
Left _ -> return False
|
||||
Right () -> verifyKeyContent RetrievalAllKeysSecure AlwaysVerify UnVerified k tmp
|
||||
Right () -> verifyKeyContent RetrievalAllKeysSecure AlwaysVerify UnVerified k (toRawFilePath tmp)
|
||||
checkpresentexport ea k = Remote.checkPresentExport ea k testexportlocation
|
||||
removeexport ea k = Remote.removeExport ea k testexportlocation
|
||||
removeexportdirectory ea = case Remote.removeExportDirectory ea of
|
||||
|
|
|
@ -107,7 +107,7 @@ benchDb tmpdir num = do
|
|||
initDb db SQL.createTables
|
||||
h <- liftIO $ H.openDbQueue H.MultiWriter db SQL.containedTable
|
||||
liftIO $ populateAssociatedFiles h num
|
||||
sz <- liftIO $ getFileSize db
|
||||
sz <- liftIO $ getFileSize (toRawFilePath db)
|
||||
liftIO $ putStrLn $ "size of database on disk: " ++
|
||||
roughSize storageUnits False sz
|
||||
mv <- liftIO $ newMVar 1
|
||||
|
|
3
Limit.hs
3
Limit.hs
|
@ -464,8 +464,7 @@ limitSize lb vs s = case readSize dataUnits s of
|
|||
LimitAnnexFiles -> goannexed sz fi
|
||||
LimitDiskFiles -> case contentFile fi of
|
||||
Just f -> do
|
||||
filesize <- liftIO $ catchMaybeIO $
|
||||
getFileSize (fromRawFilePath f)
|
||||
filesize <- liftIO $ catchMaybeIO $ getFileSize f
|
||||
return $ filesize `vs` Just sz
|
||||
Nothing -> goannexed sz fi
|
||||
go sz _ (MatchingKey key _) = checkkey sz key
|
||||
|
|
|
@ -49,7 +49,7 @@ instance MeterSize KeySource where
|
|||
- This allows uploads of keys without size to still have progress
|
||||
- displayed.
|
||||
-}
|
||||
data KeySizer = KeySizer Key (Annex (Maybe FilePath))
|
||||
data KeySizer = KeySizer Key (Annex (Maybe RawFilePath))
|
||||
|
||||
instance MeterSize KeySizer where
|
||||
getMeterSize (KeySizer k getsrcfile) = case fromKey keySize k of
|
||||
|
|
|
@ -40,13 +40,13 @@ runLocal :: RunState -> RunProto Annex -> LocalF (Proto a) -> Annex (Either Prot
|
|||
runLocal runst runner a = case a of
|
||||
TmpContentSize k next -> do
|
||||
tmp <- fromRepo $ gitAnnexTmpObjectLocation k
|
||||
size <- liftIO $ catchDefaultIO 0 $ getFileSize $ fromRawFilePath tmp
|
||||
size <- liftIO $ catchDefaultIO 0 $ getFileSize tmp
|
||||
runner (next (Len size))
|
||||
FileSize f next -> do
|
||||
size <- liftIO $ catchDefaultIO 0 $ getFileSize f
|
||||
size <- liftIO $ catchDefaultIO 0 $ getFileSize (toRawFilePath f)
|
||||
runner (next (Len size))
|
||||
ContentSize k next -> do
|
||||
let getsize = liftIO . catchMaybeIO . getFileSize . fromRawFilePath
|
||||
let getsize = liftIO . catchMaybeIO . getFileSize
|
||||
size <- inAnnex' isJust Nothing getsize k
|
||||
runner (next (Len <$> size))
|
||||
ReadContent k af o sender next -> do
|
||||
|
@ -166,7 +166,7 @@ runLocal runst runner a = case a of
|
|||
indicatetransferred ti
|
||||
|
||||
rightsize <- do
|
||||
sz <- liftIO $ getFileSize dest
|
||||
sz <- liftIO $ getFileSize (toRawFilePath dest)
|
||||
return (toInteger sz == l + o)
|
||||
|
||||
runner validitycheck >>= \case
|
||||
|
|
|
@ -339,15 +339,15 @@ removeExportLocation topdir loc =
|
|||
listImportableContentsM :: RawFilePath -> Annex (Maybe (ImportableContents (ContentIdentifier, ByteSize)))
|
||||
listImportableContentsM dir = catchMaybeIO $ liftIO $ do
|
||||
l <- dirContentsRecursive (fromRawFilePath dir)
|
||||
l' <- mapM go l
|
||||
l' <- mapM (go . toRawFilePath) l
|
||||
return $ ImportableContents (catMaybes l') []
|
||||
where
|
||||
go f = do
|
||||
st <- getFileStatus f
|
||||
st <- R.getFileStatus f
|
||||
mkContentIdentifier f st >>= \case
|
||||
Nothing -> return Nothing
|
||||
Just cid -> do
|
||||
relf <- relPathDirToFile dir (toRawFilePath f)
|
||||
relf <- relPathDirToFile dir f
|
||||
sz <- getFileSize' f st
|
||||
return $ Just (mkImportLocation relf, (cid, sz))
|
||||
|
||||
|
@ -359,7 +359,7 @@ listImportableContentsM dir = catchMaybeIO $ liftIO $ do
|
|||
-- result in extra work to re-import them.
|
||||
--
|
||||
-- If the file is not a regular file, this will return Nothing.
|
||||
mkContentIdentifier :: FilePath -> FileStatus -> IO (Maybe ContentIdentifier)
|
||||
mkContentIdentifier :: RawFilePath -> FileStatus -> IO (Maybe ContentIdentifier)
|
||||
mkContentIdentifier f st =
|
||||
fmap (ContentIdentifier . encodeBS . showInodeCache)
|
||||
<$> toInodeCache noTSDelta f st
|
||||
|
@ -373,7 +373,7 @@ importKeyM :: RawFilePath -> ExportLocation -> ContentIdentifier -> MeterUpdate
|
|||
importKeyM dir loc cid p = do
|
||||
backend <- chooseBackend f
|
||||
k <- fst <$> genKey ks p backend
|
||||
currcid <- liftIO $ mkContentIdentifier (fromRawFilePath absf)
|
||||
currcid <- liftIO $ mkContentIdentifier absf
|
||||
=<< R.getFileStatus absf
|
||||
guardSameContentIdentifiers (return k) cid currcid
|
||||
where
|
||||
|
@ -421,7 +421,7 @@ retrieveExportWithContentIdentifierM dir loc cid dest mkkey p =
|
|||
-- Check before copy, to avoid expensive copy of wrong file
|
||||
-- content.
|
||||
precheck cont = guardSameContentIdentifiers cont cid
|
||||
=<< liftIO . mkContentIdentifier f'
|
||||
=<< liftIO . mkContentIdentifier f
|
||||
=<< liftIO (R.getFileStatus f)
|
||||
|
||||
-- Check after copy, in case the file was changed while it was
|
||||
|
@ -442,7 +442,7 @@ retrieveExportWithContentIdentifierM dir loc cid dest mkkey p =
|
|||
#else
|
||||
postcheck cont = do
|
||||
#endif
|
||||
currcid <- liftIO $ mkContentIdentifier f'
|
||||
currcid <- liftIO $ mkContentIdentifier f
|
||||
#ifndef mingw32_HOST_OS
|
||||
=<< getFdStatus fd
|
||||
#else
|
||||
|
@ -458,7 +458,7 @@ storeExportWithContentIdentifierM dir src _k loc overwritablecids p = do
|
|||
liftIO $ hFlush tmph
|
||||
liftIO $ hClose tmph
|
||||
resetAnnexFilePerm tmpf
|
||||
liftIO (getFileStatus tmpf) >>= liftIO . mkContentIdentifier tmpf >>= \case
|
||||
liftIO (getFileStatus tmpf) >>= liftIO . mkContentIdentifier (toRawFilePath tmpf) >>= \case
|
||||
Nothing -> giveup "unable to generate content identifier"
|
||||
Just newcid -> do
|
||||
checkExportContent dir loc
|
||||
|
@ -506,7 +506,7 @@ checkExportContent dir loc knowncids unsafe callback =
|
|||
tryWhenExists (liftIO $ R.getFileStatus dest) >>= \case
|
||||
Just destst
|
||||
| not (isRegularFile destst) -> unsafe
|
||||
| otherwise -> catchDefaultIO Nothing (liftIO $ mkContentIdentifier (fromRawFilePath dest) destst) >>= \case
|
||||
| otherwise -> catchDefaultIO Nothing (liftIO $ mkContentIdentifier dest destst) >>= \case
|
||||
Just destcid
|
||||
| destcid `elem` knowncids -> callback KnownContentIdentifier
|
||||
-- dest exists with other content
|
||||
|
|
|
@ -389,7 +389,7 @@ mkUploadRequest rs k content = case (extractKeySha256 k, extractKeySize k) of
|
|||
ret sha256 size
|
||||
_ -> do
|
||||
sha256 <- calcsha256
|
||||
size <- liftIO $ getFileSize content
|
||||
size <- liftIO $ getFileSize (toRawFilePath content)
|
||||
rememberboth sha256 size
|
||||
ret sha256 size
|
||||
where
|
||||
|
|
|
@ -251,7 +251,7 @@ retrieveChunks retriever u chunkconfig encryptor basek dest basep sink
|
|||
where
|
||||
go pe cks = do
|
||||
let ls = map chunkKeyList cks
|
||||
currsize <- liftIO $ catchMaybeIO $ getFileSize dest
|
||||
currsize <- liftIO $ catchMaybeIO $ getFileSize (toRawFilePath dest)
|
||||
let ls' = maybe ls (setupResume ls) currsize
|
||||
if any null ls'
|
||||
then noop -- dest is already complete
|
||||
|
|
|
@ -32,7 +32,7 @@ httpStorer a = fileStorer $ \k f m -> a k =<< liftIO (httpBodyStorer f m)
|
|||
-- the meter as it's sent.
|
||||
httpBodyStorer :: FilePath -> MeterUpdate -> IO RequestBody
|
||||
httpBodyStorer src m = do
|
||||
size <- getFileSize src
|
||||
size <- getFileSize (toRawFilePath src)
|
||||
let streamer sink = withMeteredFile src m $ \b -> byteStringPopper b sink
|
||||
return $ RequestBodyStream (fromInteger size) streamer
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ type WithConn a c = (ClosableConnection c -> Annex (ClosableConnection c, a)) ->
|
|||
|
||||
store :: (MeterUpdate -> ProtoRunner Bool) -> Key -> AssociatedFile -> MeterUpdate -> Annex ()
|
||||
store runner k af p = do
|
||||
let sizer = KeySizer k (fmap fst <$> prepSendAnnex k)
|
||||
let sizer = KeySizer k (fmap (toRawFilePath . fst) <$> prepSendAnnex k)
|
||||
metered (Just p) sizer $ \_ p' ->
|
||||
runner p' (P2P.put k af p') >>= \case
|
||||
Just True -> return ()
|
||||
|
|
|
@ -249,7 +249,7 @@ specialRemote' cfg c storer retriever remover checkpresent baser = encr
|
|||
|
||||
displayprogress p k srcfile a
|
||||
| displayProgress cfg =
|
||||
metered (Just p) (KeySizer k (return srcfile)) (const a)
|
||||
metered (Just p) (KeySizer k (pure (fmap toRawFilePath srcfile))) (const a)
|
||||
| otherwise = a p
|
||||
|
||||
{- Sink callback for retrieveChunks. Stores the file content into the
|
||||
|
|
|
@ -333,7 +333,7 @@ store mh r info magic = fileStorer $ \k f p -> withS3HandleOrFail (uuid r) mh $
|
|||
storeHelper :: S3Info -> S3Handle -> Maybe Magic -> FilePath -> S3.Object -> MeterUpdate -> Annex (Maybe S3Etag, Maybe S3VersionID)
|
||||
storeHelper info h magic f object p = liftIO $ case partSize info of
|
||||
Just partsz | partsz > 0 -> do
|
||||
fsz <- getFileSize f
|
||||
fsz <- getFileSize (toRawFilePath f)
|
||||
if fsz > partsz
|
||||
then multipartupload fsz partsz
|
||||
else singlepartupload
|
||||
|
|
|
@ -20,7 +20,7 @@ data BackendA a = Backend
|
|||
, genKey :: Maybe (KeySource -> MeterUpdate -> a Key)
|
||||
-- Verifies the content of a key using a hash. This does not need
|
||||
-- to be cryptographically secure.
|
||||
, verifyKeyContent :: Maybe (Key -> FilePath -> a Bool)
|
||||
, verifyKeyContent :: Maybe (Key -> RawFilePath -> a Bool)
|
||||
-- Checks if a key can be upgraded to a better form.
|
||||
, canUpgradeKey :: Maybe (Key -> Bool)
|
||||
-- Checks if there is a fast way to migrate a key to a different
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
{- File size.
|
||||
-
|
||||
- Copyright 2015-2020 Joey Hess <id@joeyh.name>
|
||||
-
|
||||
- License: BSD-2-clause
|
||||
-}
|
||||
|
@ -18,6 +20,8 @@ import Control.Exception (bracket)
|
|||
import System.IO
|
||||
#endif
|
||||
|
||||
import qualified Utility.RawFilePath as R
|
||||
|
||||
type FileSize = Integer
|
||||
|
||||
{- Gets the size of a file.
|
||||
|
@ -26,18 +30,18 @@ type FileSize = Integer
|
|||
- FileOffset which maxes out at 2 gb.
|
||||
- See https://github.com/jystic/unix-compat/issues/16
|
||||
-}
|
||||
getFileSize :: FilePath -> IO FileSize
|
||||
getFileSize :: R.RawFilePath -> IO FileSize
|
||||
#ifndef mingw32_HOST_OS
|
||||
getFileSize f = fmap (fromIntegral . fileSize) (getFileStatus f)
|
||||
getFileSize f = fmap (fromIntegral . fileSize) (R.getFileStatus f)
|
||||
#else
|
||||
getFileSize f = bracket (openFile f ReadMode) hClose hFileSize
|
||||
getFileSize f = bracket (openFile (fromRawFilePath f) ReadMode) hClose hFileSize
|
||||
#endif
|
||||
|
||||
{- Gets the size of the file, when its FileStatus is already known.
|
||||
-
|
||||
- On windows, uses getFileSize. Otherwise, the FileStatus contains the
|
||||
- size, so this does not do any work. -}
|
||||
getFileSize' :: FilePath -> FileStatus -> IO FileSize
|
||||
getFileSize' :: R.RawFilePath -> FileStatus -> IO FileSize
|
||||
#ifndef mingw32_HOST_OS
|
||||
getFileSize' _ s = return $ fromIntegral $ fileSize s
|
||||
#else
|
||||
|
|
|
@ -186,15 +186,15 @@ readInodeCache s = case words s of
|
|||
|
||||
genInodeCache :: RawFilePath -> TSDelta -> IO (Maybe InodeCache)
|
||||
genInodeCache f delta = catchDefaultIO Nothing $
|
||||
toInodeCache delta (fromRawFilePath f) =<< R.getFileStatus f
|
||||
toInodeCache delta f =<< R.getFileStatus f
|
||||
|
||||
toInodeCache :: TSDelta -> FilePath -> FileStatus -> IO (Maybe InodeCache)
|
||||
toInodeCache :: TSDelta -> RawFilePath -> FileStatus -> IO (Maybe InodeCache)
|
||||
toInodeCache (TSDelta getdelta) f s
|
||||
| isRegularFile s = do
|
||||
delta <- getdelta
|
||||
sz <- getFileSize' f s
|
||||
#ifdef mingw32_HOST_OS
|
||||
mtime <- utcTimeToPOSIXSeconds <$> getModificationTime f
|
||||
mtime <- utcTimeToPOSIXSeconds <$> getModificationTime (fromRawFilePath f)
|
||||
#else
|
||||
let mtime = modificationTimeHiRes s
|
||||
#endif
|
||||
|
|
|
@ -223,7 +223,8 @@ watchFileSize f p a = bracket
|
|||
p sz
|
||||
watcher sz
|
||||
getsz = catchDefaultIO zeroBytesProcessed $
|
||||
toBytesProcessed <$> getFileSize f
|
||||
toBytesProcessed <$> getFileSize f'
|
||||
f' = toRawFilePath f
|
||||
|
||||
data OutputHandler = OutputHandler
|
||||
{ quietMode :: Bool
|
||||
|
|
|
@ -52,6 +52,7 @@ import Network.HTTP.Client.Restricted
|
|||
import Utility.HttpManagerRestricted
|
||||
#endif
|
||||
import Utility.IPAddress
|
||||
import qualified Utility.RawFilePath as R
|
||||
|
||||
import Network.URI
|
||||
import Network.HTTP.Types
|
||||
|
@ -309,8 +310,8 @@ getUrlInfo url uo = case parseURIRelaxed url of
|
|||
=<< curlRestrictedParams r u defport (basecurlparams url')
|
||||
|
||||
existsfile u = do
|
||||
let f = unEscapeString (uriPath u)
|
||||
s <- catchMaybeIO $ getFileStatus f
|
||||
let f = toRawFilePath (unEscapeString (uriPath u))
|
||||
s <- catchMaybeIO $ R.getFileStatus f
|
||||
case s of
|
||||
Just stat -> do
|
||||
sz <- getFileSize' f stat
|
||||
|
@ -455,7 +456,7 @@ download' nocurlerror meterupdate url file uo =
|
|||
-}
|
||||
downloadConduit :: MeterUpdate -> Request -> FilePath -> UrlOptions -> IO ()
|
||||
downloadConduit meterupdate req file uo =
|
||||
catchMaybeIO (getFileSize file) >>= \case
|
||||
catchMaybeIO (getFileSize (toRawFilePath file)) >>= \case
|
||||
Just sz | sz > 0 -> resumedownload sz
|
||||
_ -> join $ runResourceT $ do
|
||||
liftIO $ debugM "url" (show req')
|
||||
|
|
Loading…
Add table
Reference in a new issue