diff --git a/Annex.hs b/Annex.hs index 11ddb3fb9a..582ffd644d 100644 --- a/Annex.hs +++ b/Annex.hs @@ -221,7 +221,7 @@ data AnnexState = AnnexState , existinghooks :: M.Map Git.Hook.Hook Bool , workers :: Maybe (TMVar (WorkerPool (AnnexState, AnnexRead))) , cachedcurrentbranch :: (Maybe (Maybe Git.Branch, Maybe Adjustment)) - , cachedgitenv :: Maybe (AltIndexFile, FilePath, [(String, String)]) + , cachedgitenv :: Maybe (AltIndexFile, OsPath, [(String, String)]) , urloptions :: Maybe UrlOptions , insmudgecleanfilter :: Bool , getvectorclock :: IO CandidateVectorClock diff --git a/Annex/Branch.hs b/Annex/Branch.hs index 4e02ce30da..d94391aaf8 100644 --- a/Annex/Branch.hs +++ b/Annex/Branch.hs @@ -54,7 +54,6 @@ import Data.Char import Data.ByteString.Builder import Control.Concurrent (threadDelay) import Control.Concurrent.MVar -import qualified System.FilePath.ByteString as P import System.PosixCompat.Files (isRegularFile) import Annex.Common @@ -644,7 +643,7 @@ branchFiles :: Annex ([OsPath], IO Bool) branchFiles = withIndex $ inRepo branchFiles' branchFiles' :: Git.Repo -> IO ([OsPath], IO Bool) -branchFiles' = Git.Command.pipeNullSplit' $ +branchFiles' = Git.Command.pipeNullSplit'' toOsPath $ lsTreeParams Git.LsTree.LsTreeRecursive (Git.LsTree.LsTreeLong False) fullname [Param "--name-only"] @@ -681,7 +680,8 @@ mergeIndex jl branches = do prepareModifyIndex :: JournalLocked -> Annex () prepareModifyIndex _jl = do index <- fromRepo gitAnnexIndex - void $ liftIO $ tryIO $ R.removeLink (index <> ".lock") + void $ liftIO $ tryIO $ + removeFile (index <> literalOsPath ".lock") {- Runs an action using the branch's index file. -} withIndex :: Annex a -> Annex a @@ -690,7 +690,7 @@ withIndex' :: Bool -> Annex a -> Annex a withIndex' bootstrapping a = withIndexFile AnnexIndexFile $ \f -> do checkIndexOnce $ unlessM (liftIO $ doesFileExist f) $ do unless bootstrapping create - createAnnexDirectory $ toOsPath $ takeDirectory f + createAnnexDirectory $ takeDirectory f unless bootstrapping $ inRepo genIndex a @@ -712,7 +712,7 @@ forceUpdateIndex jl branchref = do {- Checks if the index needs to be updated. -} needUpdateIndex :: Git.Ref -> Annex Bool needUpdateIndex branchref = do - f <- toOsPath <$> fromRepo gitAnnexIndexStatus + f <- fromRepo gitAnnexIndexStatus committedref <- Git.Ref . firstLine' <$> liftIO (catchDefaultIO mempty $ F.readFile' f) return (committedref /= branchref) @@ -748,19 +748,20 @@ stageJournal jl commitindex = withIndex $ withOtherTmp $ \tmpdir -> do Git.UpdateIndex.streamUpdateIndex g [genstream dir h jh jlogh] commitindex - liftIO $ cleanup (fromOsPath dir) jlogh jlogf + liftIO $ cleanup dir jlogh jlogf where genstream dir h jh jlogh streamer = readDirectory jh >>= \case Nothing -> return () Just file -> do - let path = dir P. file - unless (dirCruft file) $ whenM (isfile path) $ do + let file' = toOsPath file + let path = dir file' + unless (file' `elem` dirCruft) $ whenM (isfile path) $ do sha <- Git.HashObject.hashFile h path B.hPutStr jlogh (file <> "\n") streamer $ Git.UpdateIndex.updateIndexLine - sha TreeFile (asTopFilePath $ fileJournal file) + sha TreeFile (asTopFilePath $ fileJournal file') genstream dir h jh jlogh streamer - isfile file = isRegularFile <$> R.getFileStatus file + isfile file = isRegularFile <$> R.getFileStatus (fromOsPath file) -- Clean up the staged files, as listed in the temp log file. -- The temp file is used to avoid needing to buffer all the -- filenames in memory. @@ -768,10 +769,10 @@ stageJournal jl commitindex = withIndex $ withOtherTmp $ \tmpdir -> do hFlush jlogh hSeek jlogh AbsoluteSeek 0 stagedfs <- lines <$> hGetContents jlogh - mapM_ (removeFile . (dir )) stagedfs + mapM_ (removeFile . (dir ) . toOsPath) stagedfs hClose jlogh removeWhenExistsWith (R.removeLink) (fromOsPath jlogf) - openjlog tmpdir = liftIO $ openTmpFileIn (toOsPath tmpdir) (toOsPath "jlog") + openjlog tmpdir = liftIO $ openTmpFileIn tmpdir (literalOsPath "jlog") getLocalTransitions :: Annex Transitions getLocalTransitions = @@ -932,7 +933,7 @@ getIgnoredRefs = S.fromList . mapMaybe Git.Sha.extractSha . fileLines' <$> content where content = do - f <- toOsPath <$> fromRepo gitAnnexIgnoredRefs + f <- fromRepo gitAnnexIgnoredRefs liftIO $ catchDefaultIO mempty $ F.readFile' f addMergedRefs :: [(Git.Sha, Git.Branch)] -> Annex () @@ -950,7 +951,7 @@ getMergedRefs = S.fromList . map fst <$> getMergedRefs' getMergedRefs' :: Annex [(Git.Sha, Git.Branch)] getMergedRefs' = do - f <- toOsPath <$> fromRepo gitAnnexMergedRefs + f <- fromRepo gitAnnexMergedRefs s <- liftIO $ catchDefaultIO mempty $ F.readFile' f return $ map parse $ fileLines' s where diff --git a/Annex/Content/Presence.hs b/Annex/Content/Presence.hs index 2eb0016ddd..9dfc68a202 100644 --- a/Annex/Content/Presence.hs +++ b/Annex/Content/Presence.hs @@ -41,18 +41,16 @@ import Config import Annex.Perms #endif -import qualified System.FilePath.ByteString as P - {- Checks if a given key's content is currently present. -} inAnnex :: Key -> Annex Bool -inAnnex key = inAnnexCheck key $ liftIO . R.doesPathExist +inAnnex key = inAnnexCheck key $ liftIO . R.doesPathExist . fromOsPath {- Runs an arbitrary check on a key's content. -} -inAnnexCheck :: Key -> (RawFilePath -> Annex Bool) -> Annex Bool +inAnnexCheck :: Key -> (OsPath -> Annex Bool) -> Annex Bool inAnnexCheck key check = inAnnex' id False check key {- inAnnex that performs an arbitrary check of the key's content. -} -inAnnex' :: (a -> Bool) -> a -> (RawFilePath -> Annex a) -> Key -> Annex a +inAnnex' :: (a -> Bool) -> a -> (OsPath -> Annex a) -> Key -> Annex a inAnnex' isgood bad check key = withObjectLoc key $ \loc -> do r <- check loc if isgood r @@ -75,7 +73,7 @@ inAnnex' isgood bad check key = withObjectLoc key $ \loc -> do objectFileExists :: Key -> Annex Bool objectFileExists key = calcRepo (gitAnnexLocation key) - >>= liftIO . R.doesPathExist + >>= liftIO . doesFileExist {- A safer check; the key's content must not only be present, but - is not in the process of being removed. -} @@ -93,7 +91,7 @@ inAnnexSafe key = inAnnex' (fromMaybe True) (Just False) go key {- The content file must exist, but the lock file generally - won't exist unless a removal is in process. -} checklock (Just lockfile) contentfile = - ifM (liftIO $ doesFileExist (fromRawFilePath contentfile)) + ifM (liftIO $ doesFileExist contentfile) ( checkOr is_unlocked lockfile , return is_missing ) @@ -102,7 +100,7 @@ inAnnexSafe key = inAnnex' (fromMaybe True) (Just False) go key Just True -> is_locked Just False -> is_unlocked #else - checklock Nothing contentfile = liftIO $ ifM (doesFileExist (fromRawFilePath contentfile)) + checklock Nothing contentfile = liftIO $ ifM (doesFileExist contentfile) ( lockShared contentfile >>= \case Nothing -> return is_locked Just lockhandle -> do @@ -113,7 +111,7 @@ inAnnexSafe key = inAnnex' (fromMaybe True) (Just False) go key {- In Windows, see if we can take a shared lock. If so, - remove the lock file to clean up after ourselves. -} checklock (Just lockfile) contentfile = - ifM (liftIO $ doesFileExist (fromRawFilePath contentfile)) + ifM (liftIO $ doesFileExist contentfile) ( modifyContentDir lockfile $ liftIO $ lockShared lockfile >>= \case Nothing -> return is_locked @@ -134,7 +132,7 @@ inAnnexSafe key = inAnnex' (fromMaybe True) (Just False) go key - content locking works, from running at the same time as content is locked - using the old method. -} -withContentLockFile :: Key -> (Maybe RawFilePath -> Annex a) -> Annex a +withContentLockFile :: Key -> (Maybe OsPath -> Annex a) -> Annex a withContentLockFile k a = do v <- getVersion if versionNeedsWritableContentFiles v @@ -146,7 +144,7 @@ withContentLockFile k a = do - will switch over to v10 content lock files at the - right time. -} gitdir <- fromRepo Git.localGitDir - let gitconfig = gitdir P. "config" + let gitconfig = gitdir literalOsPath "config" ic <- withTSDelta (liftIO . genInodeCache gitconfig) oldic <- Annex.getState Annex.gitconfiginodecache v' <- if fromMaybe False (compareStrong <$> ic <*> oldic) @@ -161,7 +159,7 @@ withContentLockFile k a = do where go v = contentLockFile k v >>= a -contentLockFile :: Key -> Maybe RepoVersion -> Annex (Maybe RawFilePath) +contentLockFile :: Key -> Maybe RepoVersion -> Annex (Maybe OsPath) #ifndef mingw32_HOST_OS {- Older versions of git-annex locked content files themselves, but newer - versions use a separate lock file, to better support repos shared @@ -177,7 +175,7 @@ contentLockFile key _ = Just <$> calcRepo (gitAnnexContentLock key) #endif {- Performs an action, passing it the location to use for a key's content. -} -withObjectLoc :: Key -> (RawFilePath -> Annex a) -> Annex a +withObjectLoc :: Key -> (OsPath -> Annex a) -> Annex a withObjectLoc key a = a =<< calcRepo (gitAnnexLocation key) {- Check if a file contains the unmodified content of the key. @@ -185,7 +183,7 @@ withObjectLoc key a = a =<< calcRepo (gitAnnexLocation key) - The expensive way to tell is to do a verification of its content. - The cheaper way is to see if the InodeCache for the key matches the - file. -} -isUnmodified :: Key -> RawFilePath -> Annex Bool +isUnmodified :: Key -> OsPath -> Annex Bool isUnmodified key f = withTSDelta (liftIO . genInodeCache f) >>= \case Just fc -> do @@ -193,7 +191,7 @@ isUnmodified key f = isUnmodified' key f fc ic Nothing -> return False -isUnmodified' :: Key -> RawFilePath -> InodeCache -> [InodeCache] -> Annex Bool +isUnmodified' :: Key -> OsPath -> InodeCache -> [InodeCache] -> Annex Bool isUnmodified' = isUnmodifiedLowLevel Database.Keys.addInodeCaches {- Cheap check if a file contains the unmodified content of the key, @@ -206,7 +204,7 @@ isUnmodified' = isUnmodifiedLowLevel Database.Keys.addInodeCaches - this may report a false positive when repeated edits are made to a file - within a small time window (eg 1 second). -} -isUnmodifiedCheap :: Key -> RawFilePath -> Annex Bool +isUnmodifiedCheap :: Key -> OsPath -> Annex Bool isUnmodifiedCheap key f = maybe (pure False) (isUnmodifiedCheap' key) =<< withTSDelta (liftIO . genInodeCache f) diff --git a/Annex/Content/Presence/LowLevel.hs b/Annex/Content/Presence/LowLevel.hs index 6f50c187b2..1def5173f9 100644 --- a/Annex/Content/Presence/LowLevel.hs +++ b/Annex/Content/Presence/LowLevel.hs @@ -12,7 +12,7 @@ import Annex.Verify import Annex.InodeSentinal import Utility.InodeCache -isUnmodifiedLowLevel :: (Key -> [InodeCache] -> Annex ()) -> Key -> RawFilePath -> InodeCache -> [InodeCache] -> Annex Bool +isUnmodifiedLowLevel :: (Key -> [InodeCache] -> Annex ()) -> Key -> OsPath -> InodeCache -> [InodeCache] -> Annex Bool isUnmodifiedLowLevel addinodecaches key f fc ic = isUnmodifiedCheapLowLevel fc ic <||> expensivecheck where diff --git a/Annex/GitOverlay.hs b/Annex/GitOverlay.hs index 6c02a79fa9..384feed39a 100644 --- a/Annex/GitOverlay.hs +++ b/Annex/GitOverlay.hs @@ -23,7 +23,7 @@ import qualified Annex.Queue import Config.Smudge {- Runs an action using a different git index file. -} -withIndexFile :: AltIndexFile -> (FilePath -> Annex a) -> Annex a +withIndexFile :: AltIndexFile -> (OsPath -> Annex a) -> Annex a withIndexFile i = withAltRepo usecachedgitenv restoregitenv where -- This is an optimisation. Since withIndexFile is run repeatedly, @@ -58,7 +58,7 @@ withIndexFile i = withAltRepo usecachedgitenv restoregitenv f <- indexEnvVal $ case i of AnnexIndexFile -> gitAnnexIndex g ViewIndexFile -> gitAnnexViewIndex g - g' <- addGitEnv g indexEnv f + g' <- addGitEnv g indexEnv (fromOsPath f) return (g', f) restoregitenv g g' = g' { gitEnv = gitEnv g } diff --git a/Annex/Link.hs b/Annex/Link.hs index 47f7cfbbcb..7d8ecb7a14 100644 --- a/Annex/Link.hs +++ b/Annex/Link.hs @@ -54,7 +54,7 @@ import System.PosixCompat.Files (isSymbolicLink) type LinkTarget = S.ByteString {- Checks if a file is a link to a key. -} -isAnnexLink :: RawFilePath -> Annex (Maybe Key) +isAnnexLink :: OsPath -> Annex (Maybe Key) isAnnexLink file = maybe Nothing parseLinkTargetOrPointer <$> getAnnexLinkTarget file {- Gets the link target of a symlink. @@ -65,13 +65,13 @@ isAnnexLink file = maybe Nothing parseLinkTargetOrPointer <$> getAnnexLinkTarget - Returns Nothing if the file is not a symlink, or not a link to annex - content. -} -getAnnexLinkTarget :: RawFilePath -> Annex (Maybe LinkTarget) +getAnnexLinkTarget :: OsPath -> Annex (Maybe LinkTarget) getAnnexLinkTarget f = getAnnexLinkTarget' f =<< (coreSymlinks <$> Annex.getGitConfig) {- Pass False to force looking inside file, for when git checks out - symlinks as plain files. -} -getAnnexLinkTarget' :: RawFilePath -> Bool -> Annex (Maybe S.ByteString) +getAnnexLinkTarget' :: OsPath -> Bool -> Annex (Maybe S.ByteString) getAnnexLinkTarget' file coresymlinks = if coresymlinks then check probesymlink $ return Nothing @@ -86,9 +86,9 @@ getAnnexLinkTarget' file coresymlinks = if coresymlinks | otherwise -> return Nothing Nothing -> fallback - probesymlink = R.readSymbolicLink file + probesymlink = R.readSymbolicLink (fromOsPath file) - probefilecontent = F.withFile (toOsPath file) ReadMode $ \h -> do + probefilecontent = F.withFile file ReadMode $ \h -> do s <- S.hGet h maxSymlinkSz -- If we got the full amount, the file is too large -- to be a symlink target. @@ -241,6 +241,7 @@ restagePointerFiles r = unlessM (Annex.getState Annex.insmudgecleanfilter) $ do let replaceindex = liftIO $ moveFile tmpindex realindex let updatetmpindex = do r' <- liftIO $ Git.Env.addGitEnv r Git.Index.indexEnv + . fromOsPath =<< Git.Index.indexEnvVal tmpindex configfilterprocess numsz $ runupdateindex tsd r' replaceindex @@ -452,7 +453,7 @@ isPointerFile f = catchDefaultIO Nothing $ fdToHandle fd in bracket open hClose readhandle #else - ifM (isSymbolicLink <$> R.getSymbolicLinkStatus (toRawFilePath f)) + ifM (isSymbolicLink <$> R.getSymbolicLinkStatus (fromOsPath f)) ( return Nothing , F.withFile f ReadMode readhandle ) diff --git a/Annex/MetaData.hs b/Annex/MetaData.hs index 1eba836455..ac93d4988b 100644 --- a/Annex/MetaData.hs +++ b/Annex/MetaData.hs @@ -38,7 +38,7 @@ import Text.Read - - Also, can generate new metadata, if configured to do so. -} -genMetaData :: Key -> RawFilePath -> Maybe POSIXTime -> Annex () +genMetaData :: Key -> OsPath -> Maybe POSIXTime -> Annex () genMetaData key file mmtime = do catKeyFileHEAD file >>= \case Nothing -> noop @@ -57,8 +57,8 @@ genMetaData key file mmtime = do Nothing -> noop where warncopied = warning $ UnquotedString $ - "Copied metadata from old version of " ++ fromRawFilePath file ++ " to new version. " ++ - "If you don't want this copied metadata, run: git annex metadata --remove-all " ++ fromRawFilePath file + "Copied metadata from old version of " ++ fromOsPath file ++ " to new version. " ++ + "If you don't want this copied metadata, run: git annex metadata --remove-all " ++ fromOsPath file -- If the only fields copied were date metadata, and they'll -- be overwritten with the current mtime, no need to warn about -- copying. diff --git a/Annex/Verify.hs b/Annex/Verify.hs index 697ffeadc0..001529eb68 100644 --- a/Annex/Verify.hs +++ b/Annex/Verify.hs @@ -39,13 +39,13 @@ import Utility.Metered import Annex.WorkerPool import Types.WorkerPool import Types.Key +import qualified Utility.FileIO as F import Control.Concurrent.STM import Control.Concurrent.Async import qualified Data.ByteString as S #if WITH_INOTIFY import qualified System.INotify as INotify -import qualified System.FilePath.ByteString as P #endif shouldVerify :: VerifyConfig -> Annex Bool @@ -73,7 +73,7 @@ shouldVerify (RemoteVerify r) = - If the RetrievalSecurityPolicy requires verification and the key's - backend doesn't support it, the verification will fail. -} -verifyKeyContentPostRetrieval :: RetrievalSecurityPolicy -> VerifyConfig -> Verification -> Key -> RawFilePath -> Annex Bool +verifyKeyContentPostRetrieval :: RetrievalSecurityPolicy -> VerifyConfig -> Verification -> Key -> OsPath -> Annex Bool verifyKeyContentPostRetrieval rsp v verification k f = case (rsp, verification) of (_, Verified) -> return True (RetrievalVerifiableKeysSecure, _) -> ifM (isVerifiable k) @@ -105,11 +105,11 @@ verifyKeyContentPostRetrieval rsp v verification k f = case (rsp, verification) -- When possible, does an incremental verification, because that can be -- faster. Eg, the VURL backend can need to try multiple checksums and only -- with an incremental verification does it avoid reading files twice. -verifyKeyContent :: Key -> RawFilePath -> Annex Bool +verifyKeyContent :: Key -> OsPath -> Annex Bool verifyKeyContent k f = verifyKeySize k f <&&> verifyKeyContent' k f -- Does not verify size. -verifyKeyContent' :: Key -> RawFilePath -> Annex Bool +verifyKeyContent' :: Key -> OsPath -> Annex Bool verifyKeyContent' k f = Backend.maybeLookupBackendVariety (fromKey keyVariety k) >>= \case Nothing -> return True @@ -119,7 +119,7 @@ verifyKeyContent' k f = iv <- mkiv k showAction (UnquotedString (descIncrementalVerifier iv)) res <- liftIO $ catchDefaultIO Nothing $ - withBinaryFile (fromRawFilePath f) ReadMode $ \h -> do + F.withBinaryFile f ReadMode $ \h -> do feedIncrementalVerifier h iv finalizeIncrementalVerifier iv case res of @@ -129,7 +129,7 @@ verifyKeyContent' k f = Just verifier -> verifier k f (Nothing, Just verifier) -> verifier k f -resumeVerifyKeyContent :: Key -> RawFilePath -> IncrementalVerifier -> Annex Bool +resumeVerifyKeyContent :: Key -> OsPath -> IncrementalVerifier -> Annex Bool resumeVerifyKeyContent k f iv = liftIO (positionIncrementalVerifier iv) >>= \case Nothing -> fallback Just endpos -> do @@ -151,7 +151,7 @@ resumeVerifyKeyContent k f iv = liftIO (positionIncrementalVerifier iv) >>= \cas | otherwise = do showAction (UnquotedString (descIncrementalVerifier iv)) liftIO $ catchDefaultIO (Just False) $ - withBinaryFile (fromRawFilePath f) ReadMode $ \h -> do + F.withBinaryFile f ReadMode $ \h -> do hSeek h AbsoluteSeek endpos feedIncrementalVerifier h iv finalizeIncrementalVerifier iv @@ -167,7 +167,7 @@ feedIncrementalVerifier h iv = do where chunk = 65536 -verifyKeySize :: Key -> RawFilePath -> Annex Bool +verifyKeySize :: Key -> OsPath -> Annex Bool verifyKeySize k f = case fromKey keySize k of Just size -> do size' <- liftIO $ catchDefaultIO 0 $ getFileSize f @@ -295,7 +295,7 @@ resumeVerifyFromOffset o incrementalverifier meterupdate h -- and if the disk is slow, the reader may never catch up to the writer, -- and the disk cache may never speed up reads. So this should only be -- used when there's not a better way to incrementally verify. -tailVerify :: Maybe IncrementalVerifier -> RawFilePath -> Annex a -> Annex a +tailVerify :: Maybe IncrementalVerifier -> OsPath -> Annex a -> Annex a tailVerify (Just iv) f writer = do finished <- liftIO newEmptyTMVarIO t <- liftIO $ async $ tailVerify' iv f finished @@ -305,7 +305,7 @@ tailVerify (Just iv) f writer = do writer `finally` finishtail tailVerify Nothing _ writer = writer -tailVerify' :: IncrementalVerifier -> RawFilePath -> TMVar () -> IO () +tailVerify' :: IncrementalVerifier -> OsPath -> TMVar () -> IO () #if WITH_INOTIFY tailVerify' iv f finished = tryNonAsync go >>= \case @@ -318,15 +318,16 @@ tailVerify' iv f finished = -- of resuming, and waiting for modification deals with such -- situations. inotifydirchange i cont = - INotify.addWatch i [INotify.Modify] dir $ \case + INotify.addWatch i [INotify.Modify] (fromOsPath dir) $ \case -- Ignore changes to other files in the directory. INotify.Modified { INotify.maybeFilePath = fn } - | fn == Just basef -> cont + | fn == Just basef' -> cont _ -> noop where - (dir, basef) = P.splitFileName f + (dir, basef) = splitFileName f + basef' = fromOsPath basef - inotifyfilechange i = INotify.addWatch i [INotify.Modify] f . const + inotifyfilechange i = INotify.addWatch i [INotify.Modify] (fromOsPath f) . const go = INotify.withINotify $ \i -> do modified <- newEmptyTMVarIO @@ -354,7 +355,7 @@ tailVerify' iv f finished = case v of Just () -> do r <- tryNonAsync $ - tryWhenExists (openBinaryFile (fromRawFilePath f) ReadMode) >>= \case + tryWhenExists (F.openBinaryFile f ReadMode) >>= \case Just h -> return (Just h) -- File does not exist, must have been -- deleted. Wait for next modification diff --git a/Annex/WorkTree.hs b/Annex/WorkTree.hs index 41abc2471e..ce9cb449a7 100644 --- a/Annex/WorkTree.hs +++ b/Annex/WorkTree.hs @@ -22,11 +22,11 @@ import qualified Database.Keys - When in an adjusted branch that may have hidden the file, looks for a - pointer to a key in the original branch. -} -lookupKey :: RawFilePath -> Annex (Maybe Key) +lookupKey :: OsPath -> Annex (Maybe Key) lookupKey = lookupKey' catkeyfile where catkeyfile file = - ifM (liftIO $ doesFileExist $ fromRawFilePath file) + ifM (liftIO $ doesFileExist file) ( catKeyFile file , catKeyFileHidden file =<< getCurrentBranch ) @@ -35,22 +35,22 @@ lookupKey = lookupKey' catkeyfile - changes in the work tree. This means it's slower, but it also has - consistently the same behavior for locked files as for unlocked files. -} -lookupKeyStaged :: RawFilePath -> Annex (Maybe Key) +lookupKeyStaged :: OsPath -> Annex (Maybe Key) lookupKeyStaged file = catKeyFile file >>= \case Just k -> return (Just k) Nothing -> catKeyFileHidden file =<< getCurrentBranch {- Like lookupKey, but does not find keys for hidden files. -} -lookupKeyNotHidden :: RawFilePath -> Annex (Maybe Key) +lookupKeyNotHidden :: OsPath -> Annex (Maybe Key) lookupKeyNotHidden = lookupKey' catkeyfile where catkeyfile file = - ifM (liftIO $ doesFileExist $ fromRawFilePath file) + ifM (liftIO $ doesFileExist file) ( catKeyFile file , return Nothing ) -lookupKey' :: (RawFilePath -> Annex (Maybe Key)) -> RawFilePath -> Annex (Maybe Key) +lookupKey' :: (OsPath -> Annex (Maybe Key)) -> OsPath -> Annex (Maybe Key) lookupKey' catkeyfile file = isAnnexLink file >>= \case Just key -> return (Just key) Nothing -> catkeyfile file diff --git a/Backend.hs b/Backend.hs index 216b59fb4a..dc15733bd7 100644 --- a/Backend.hs +++ b/Backend.hs @@ -67,7 +67,7 @@ getBackend :: FilePath -> Key -> Annex (Maybe Backend) getBackend file k = maybeLookupBackendVariety (fromKey keyVariety k) >>= \case Just backend -> return $ Just backend Nothing -> do - warning $ "skipping " <> QuotedPath (toRawFilePath file) <> " (" <> + warning $ "skipping " <> QuotedPath (toOsPath file) <> " (" <> UnquotedString (unknownBackendVarietyMessage (fromKey keyVariety k)) <> ")" return Nothing @@ -78,7 +78,7 @@ unknownBackendVarietyMessage v = {- Looks up the backend that should be used for a file. - That can be configured on a per-file basis in the gitattributes file, - or forced with --backend. -} -chooseBackend :: RawFilePath -> Annex Backend +chooseBackend :: OsPath -> Annex Backend chooseBackend f = Annex.getRead Annex.forcebackend >>= go where go Nothing = do diff --git a/Backend/VURL/Utilities.hs b/Backend/VURL/Utilities.hs index 37dcb9eea6..82e5939e7c 100644 --- a/Backend/VURL/Utilities.hs +++ b/Backend/VURL/Utilities.hs @@ -43,7 +43,7 @@ migrateFromVURLToURL oldkey newbackend _af _ | otherwise = return Nothing -- The Backend must use a cryptographically secure hash. -generateEquivilantKey :: Backend -> RawFilePath -> Annex (Maybe Key) +generateEquivilantKey :: Backend -> OsPath -> Annex (Maybe Key) generateEquivilantKey b f = case genKey b of Just genkey -> do diff --git a/Database/ContentIdentifier.hs b/Database/ContentIdentifier.hs index 2f112a209c..c531f915ea 100644 --- a/Database/ContentIdentifier.hs +++ b/Database/ContentIdentifier.hs @@ -47,11 +47,9 @@ import Git.FilePath import qualified Git.DiffTree as DiffTree import Logs import qualified Logs.ContentIdentifier as Log -import qualified Utility.RawFilePath as R import Database.Persist.Sql hiding (Key) import Database.Persist.TH -import qualified System.FilePath.ByteString as P #if MIN_VERSION_persistent_sqlite(2,13,3) import Database.RawFilePath @@ -99,14 +97,14 @@ openDb :: Annex ContentIdentifierHandle openDb = do dbdir <- calcRepo' gitAnnexContentIdentifierDbDir let db = dbdir literalOsPath "db" - isnew <- liftIO $ not <$> doesDirectoryPathExist db + isnew <- liftIO $ not <$> doesDirectoryExist db if isnew then initDb db $ void $ runMigrationSilent migrateContentIdentifier -- Migrate from old versions of database, which had buggy -- and suboptimal uniqueness constraints. #if MIN_VERSION_persistent_sqlite(2,13,3) - else liftIO $ runSqlite' db $ void $ + else liftIO $ runSqlite' (fromOsPath db) $ void $ runMigrationSilent migrateContentIdentifier #else else liftIO $ runSqlite (T.pack (fromRawFilePath db)) $ void $ diff --git a/Database/Export.hs b/Database/Export.hs index 6de86c7925..0ed6c126bb 100644 --- a/Database/Export.hs +++ b/Database/Export.hs @@ -58,11 +58,9 @@ import Git.Types import Git.Sha import Git.FilePath import qualified Git.DiffTree -import qualified Utility.RawFilePath as R import Database.Persist.Sql hiding (Key) import Database.Persist.TH -import qualified System.FilePath.ByteString as P data ExportHandle = ExportHandle H.DbQueue UUID @@ -98,8 +96,8 @@ ExportTreeCurrent openDb :: UUID -> Annex ExportHandle openDb u = do dbdir <- calcRepo' (gitAnnexExportDbDir u) - let db = dbdir P. "db" - unlessM (liftIO $ R.doesPathExist db) $ do + let db = dbdir literalOsPath "db" + unlessM (liftIO $ doesDirectoryExist db) $ do initDb db $ void $ runMigrationSilent migrateExport h <- liftIO $ H.openDbQueue db "exported" @@ -136,26 +134,27 @@ addExportedLocation :: ExportHandle -> Key -> ExportLocation -> IO () addExportedLocation h k el = queueDb h $ do void $ insertUniqueFast $ Exported k ef let edirs = map - (\ed -> ExportedDirectory (SByteString (fromExportDirectory ed)) ef) + (\ed -> ExportedDirectory (SByteString (fromOsPath (fromExportDirectory ed))) ef) (exportDirectories el) putMany edirs where - ef = SByteString (fromExportLocation el) + ef = SByteString (fromOsPath (fromExportLocation el)) removeExportedLocation :: ExportHandle -> Key -> ExportLocation -> IO () removeExportedLocation h k el = queueDb h $ do deleteWhere [ExportedKey ==. k, ExportedFile ==. ef] - let subdirs = map (SByteString . fromExportDirectory) + let subdirs = map + (SByteString . fromOsPath . fromExportDirectory) (exportDirectories el) deleteWhere [ExportedDirectoryFile ==. ef, ExportedDirectorySubdir <-. subdirs] where - ef = SByteString (fromExportLocation el) + ef = SByteString (fromOsPath (fromExportLocation el)) {- Note that this does not see recently queued changes. -} getExportedLocation :: ExportHandle -> Key -> IO [ExportLocation] getExportedLocation (ExportHandle h _) k = H.queryDbQueue h $ do l <- selectList [ExportedKey ==. k] [] - return $ map (mkExportLocation . (\(SByteString f) -> f) . exportedFile . entityVal) l + return $ map (mkExportLocation . (\(SByteString f) -> toOsPath f) . exportedFile . entityVal) l {- Note that this does not see recently queued changes. -} isExportDirectoryEmpty :: ExportHandle -> ExportDirectory -> IO Bool @@ -163,13 +162,13 @@ isExportDirectoryEmpty (ExportHandle h _) d = H.queryDbQueue h $ do l <- selectList [ExportedDirectorySubdir ==. ed] [] return $ null l where - ed = SByteString $ fromExportDirectory d + ed = SByteString $ fromOsPath $ fromExportDirectory d {- Get locations in the export that might contain a key. -} getExportTree :: ExportHandle -> Key -> IO [ExportLocation] getExportTree (ExportHandle h _) k = H.queryDbQueue h $ do l <- selectList [ExportTreeKey ==. k] [] - return $ map (mkExportLocation . (\(SByteString f) -> f) . exportTreeFile . entityVal) l + return $ map (mkExportLocation . (\(SByteString f) -> toOsPath f) . exportTreeFile . entityVal) l {- Get keys that might be currently exported to a location. - @@ -180,19 +179,19 @@ getExportTreeKey (ExportHandle h _) el = H.queryDbQueue h $ do map (exportTreeKey . entityVal) <$> selectList [ExportTreeFile ==. ef] [] where - ef = SByteString (fromExportLocation el) + ef = SByteString (fromOsPath (fromExportLocation el)) addExportTree :: ExportHandle -> Key -> ExportLocation -> IO () addExportTree h k loc = queueDb h $ void $ insertUniqueFast $ ExportTree k ef where - ef = SByteString (fromExportLocation loc) + ef = SByteString (fromOsPath (fromExportLocation loc)) removeExportTree :: ExportHandle -> Key -> ExportLocation -> IO () removeExportTree h k loc = queueDb h $ deleteWhere [ExportTreeKey ==. k, ExportTreeFile ==. ef] where - ef = SByteString (fromExportLocation loc) + ef = SByteString (fromOsPath (fromExportLocation loc)) -- An action that is passed the old and new values that were exported, -- and updates state. diff --git a/Database/ImportFeed.hs b/Database/ImportFeed.hs index ad18a15530..2d1611c73c 100644 --- a/Database/ImportFeed.hs +++ b/Database/ImportFeed.hs @@ -40,11 +40,9 @@ import Logs.MetaData import Types.MetaData import Annex.MetaData.StandardFields import Annex.LockFile -import qualified Utility.RawFilePath as R import Database.Persist.Sql hiding (Key) import Database.Persist.TH -import qualified System.FilePath.ByteString as P import qualified Data.ByteString as B import qualified Data.Set as S @@ -75,8 +73,8 @@ AnnexBranch openDb :: Annex ImportFeedDbHandle openDb = do dbdir <- calcRepo' gitAnnexImportFeedDbDir - let db = dbdir P. "db" - isnew <- liftIO $ not <$> R.doesPathExist db + let db = dbdir literalOsPath "db" + isnew <- liftIO $ not <$> doesDirectoryExist db when isnew $ initDb db $ void $ runMigrationSilent migrateImportFeed diff --git a/Database/Keys.hs b/Database/Keys.hs index 9704b6ff4c..686be30e13 100644 --- a/Database/Keys.hs +++ b/Database/Keys.hs @@ -54,11 +54,10 @@ import Git.Branch (writeTreeQuiet, update') import qualified Git.Ref import Config import Config.Smudge -import qualified Utility.RawFilePath as R +import qualified Utility.OsString as OS import qualified Data.ByteString as S import qualified Data.ByteString.Char8 as S8 -import qualified System.FilePath.ByteString as P import Control.Concurrent.Async {- Runs an action that reads from the database. @@ -129,8 +128,8 @@ openDb forwrite _ = do lck <- calcRepo' gitAnnexKeysDbLock catchPermissionDenied permerr $ withExclusiveLock lck $ do dbdir <- calcRepo' gitAnnexKeysDbDir - let db = dbdir P. "db" - dbexists <- liftIO $ R.doesPathExist db + let db = dbdir literalOsPath "db" + dbexists <- liftIO $ doesDirectoryExist db case dbexists of True -> open db False False -> do @@ -182,7 +181,7 @@ emptyWhenBare a = ifM isBareRepo ) {- Include a known associated file along with any recorded in the database. -} -getAssociatedFilesIncluding :: AssociatedFile -> Key -> Annex [RawFilePath] +getAssociatedFilesIncluding :: AssociatedFile -> Key -> Annex [OsPath] getAssociatedFilesIncluding afile k = emptyWhenBare $ do g <- Annex.gitRepo l <- map (`fromTopFilePath` g) <$> getAssociatedFiles k @@ -201,7 +200,7 @@ removeAssociatedFile k = runWriterIO AssociatedTable . SQL.removeAssociatedFile k {- Stats the files, and stores their InodeCaches. -} -storeInodeCaches :: Key -> [RawFilePath] -> Annex () +storeInodeCaches :: Key -> [OsPath] -> Annex () storeInodeCaches k fs = withTSDelta $ \d -> addInodeCaches k . catMaybes =<< liftIO (mapM (\f -> genInodeCache f d) fs) @@ -265,7 +264,7 @@ reconcileStaged dbisnew qh = ifM isBareRepo ( return mempty , do gitindex <- inRepo currentIndexFile - indexcache <- fromRawFilePath <$> calcRepo' gitAnnexKeysDbIndexCache + indexcache <- fromOsPath <$> calcRepo' gitAnnexKeysDbIndexCache withTSDelta (liftIO . genInodeCache gitindex) >>= \case Just cur -> readindexcache indexcache >>= \case Nothing -> go cur indexcache =<< getindextree @@ -356,8 +355,9 @@ reconcileStaged dbisnew qh = ifM isBareRepo -- be a pointer file. And a pointer file that is replaced with -- a non-pointer file will match this. This is only a -- prefilter so that's ok. - , Param $ "-G" ++ fromRawFilePath (toInternalGitPath $ - P.pathSeparator `S.cons` objectDir) + , Param $ "-G" ++ + fromOsPath (toInternalGitPath $ + pathSeparator `OS.cons` objectDir) -- Disable rename detection. , Param "--no-renames" -- Avoid other complications. @@ -371,6 +371,7 @@ reconcileStaged dbisnew qh = ifM isBareRepo procdiff mdfeeder (info:file:rest) conflicted | ":" `S.isPrefixOf` info = case S8.words info of (_colonsrcmode:dstmode:srcsha:dstsha:status:[]) -> do + let file' = asTopFilePath (toOsPath file) let conflicted' = status == "U" -- avoid removing associated file when -- there is a merge conflict @@ -378,17 +379,15 @@ reconcileStaged dbisnew qh = ifM isBareRepo send mdfeeder (Ref srcsha) $ \case Just oldkey -> do liftIO $ SQL.removeAssociatedFile oldkey - (asTopFilePath file) - (SQL.WriteHandle qh) + file' (SQL.WriteHandle qh) return True Nothing -> return False send mdfeeder (Ref dstsha) $ \case Just key -> do liftIO $ addassociatedfile key - (asTopFilePath file) - (SQL.WriteHandle qh) + file' (SQL.WriteHandle qh) when (dstmode /= fmtTreeItemType TreeSymlink) $ - reconcilepointerfile (asTopFilePath file) key + reconcilepointerfile file' key return True Nothing -> return False procdiff mdfeeder rest @@ -403,11 +402,11 @@ reconcileStaged dbisnew qh = ifM isBareRepo procmergeconflictdiff mdfeeder (info:file:rest) conflicted | ":" `S.isPrefixOf` info = case S8.words info of (_colonmode:_mode:sha:_sha:status:[]) -> do + let file' = asTopFilePath (toOsPath file) send mdfeeder (Ref sha) $ \case Just key -> do liftIO $ SQL.addAssociatedFile key - (asTopFilePath file) - (SQL.WriteHandle qh) + file' (SQL.WriteHandle qh) return True Nothing -> return False let conflicted' = status == "U" diff --git a/Git/Command.hs b/Git/Command.hs index ec4db40d53..b3c25dcee1 100644 --- a/Git/Command.hs +++ b/Git/Command.hs @@ -123,9 +123,12 @@ pipeNullSplit params repo = do - convenience. -} pipeNullSplit' :: [CommandParam] -> Repo -> IO ([S.ByteString], IO Bool) -pipeNullSplit' params repo = do +pipeNullSplit' = pipeNullSplit'' id + +pipeNullSplit'' :: (S.ByteString -> t) -> [CommandParam] -> Repo -> IO ([t], IO Bool) +pipeNullSplit'' f params repo = do (s, cleanup) <- pipeNullSplit params repo - return (map L.toStrict s, cleanup) + return (map (f . L.toStrict) s, cleanup) pipeNullSplitStrict :: [CommandParam] -> Repo -> IO [S.ByteString] pipeNullSplitStrict params repo = do diff --git a/Git/Index.hs b/Git/Index.hs index 45bb238613..de4ceaf3dc 100644 --- a/Git/Index.hs +++ b/Git/Index.hs @@ -28,8 +28,8 @@ indexEnv = "GIT_INDEX_FILE" - - So, an absolute path is the only safe option for this to return. -} -indexEnvVal :: OsPath -> IO String -indexEnvVal p = fromOsPath <$> absPath p +indexEnvVal :: OsPath -> IO OsPath +indexEnvVal p = absPath p {- Forces git to use the specified index file. - @@ -42,7 +42,7 @@ override :: OsPath -> Repo -> IO (IO ()) override index _r = do res <- getEnv var val <- indexEnvVal index - setEnv var val True + setEnv var (fromOsPath val) True return $ reset res where var = "GIT_INDEX_FILE" diff --git a/Git/Log.hs b/Git/Log.hs index a3246d5102..1d6e719bb4 100644 --- a/Git/Log.hs +++ b/Git/Log.hs @@ -19,7 +19,7 @@ import Data.Time.Clock.POSIX data LoggedFileChange t = LoggedFileChange { changetime :: POSIXTime , changed :: t - , changedfile :: FilePath + , changedfile :: OsPath , oldref :: Ref , newref :: Ref } @@ -34,7 +34,7 @@ getGitLog -> Maybe Ref -> [FilePath] -> [CommandParam] - -> (Sha -> FilePath -> Maybe t) + -> (Sha -> OsPath -> Maybe t) -> Repo -> IO ([LoggedFileChange t], IO Bool) getGitLog ref stopref fs os selector repo = do @@ -75,7 +75,7 @@ commitinfoFormat = "%H %ct" -- -- The commitinfo is not included before all changelines, so -- keep track of the most recently seen commitinfo. -parseGitRawLog :: (Ref -> FilePath -> Maybe t) -> [String] -> [LoggedFileChange t] +parseGitRawLog :: (Ref -> OsPath -> Maybe t) -> [String] -> [LoggedFileChange t] parseGitRawLog selector = parse (deleteSha, epoch) where epoch = toEnum 0 :: POSIXTime @@ -91,11 +91,12 @@ parseGitRawLog selector = parse (deleteSha, epoch) _ -> (oldcommitsha, oldts, cl') mrc = do (old, new) <- parseRawChangeLine cl - v <- selector commitsha c2 + let c2' = toOsPath c2 + v <- selector commitsha c2' return $ LoggedFileChange { changetime = ts , changed = v - , changedfile = c2 + , changedfile = c2' , oldref = old , newref = new } diff --git a/Git/LsFiles.hs b/Git/LsFiles.hs index 9057a7bb5b..d26e758748 100644 --- a/Git/LsFiles.hs +++ b/Git/LsFiles.hs @@ -332,7 +332,7 @@ reduceUnmerged c (i:is) = reduceUnmerged (new:c) rest - Note that this uses a --debug option whose output could change at some - point in the future. If the output is not as expected, will use Nothing. -} -inodeCaches :: [OsPath] -> Repo -> IO ([(FilePath, Maybe InodeCache)], IO Bool) +inodeCaches :: [OsPath] -> Repo -> IO ([(OsPath, Maybe InodeCache)], IO Bool) inodeCaches locs repo = guardSafeForLsFiles repo $ do (ls, cleanup) <- pipeNullSplit params repo return (parse Nothing (map decodeBL ls), cleanup) @@ -348,11 +348,11 @@ inodeCaches locs repo = guardSafeForLsFiles repo $ do parse Nothing (f:ls) = parse (Just f) ls parse (Just f) (s:[]) = let i = parsedebug s - in (f, i) : [] + in (toOsPath f, i) : [] parse (Just f) (s:ls) = let (d, f') = splitdebug s i = parsedebug d - in (f, i) : parse (Just f') ls + in (toOsPath f, i) : parse (Just f') ls parse _ _ = [] -- First 5 lines are --debug output, remainder is the next filename. diff --git a/Logs/Export.hs b/Logs/Export.hs index a3cf823d53..169708f1e8 100644 --- a/Logs/Export.hs +++ b/Logs/Export.hs @@ -130,7 +130,7 @@ getExportExcluded :: UUID -> Annex [Git.Tree.TreeItem] getExportExcluded u = do logf <- fromRepo $ gitAnnexExportExcludeLog u liftIO $ catchDefaultIO [] $ exportExcludedParser - <$> F.readFile (toOsPath logf) + <$> F.readFile logf where exportExcludedParser :: L.ByteString -> [Git.Tree.TreeItem] diff --git a/Logs/Location.hs b/Logs/Location.hs index 608020899a..2adcddd2e3 100644 --- a/Logs/Location.hs +++ b/Logs/Location.hs @@ -124,7 +124,7 @@ parseLoggedLocationsWithoutClusters l = map (toUUID . fromLogInfo . info) (filterPresent (parseLog l)) -getLoggedLocations :: (RawFilePath -> Annex [LogInfo]) -> Key -> Annex [UUID] +getLoggedLocations :: (OsPath -> Annex [LogInfo]) -> Key -> Annex [UUID] getLoggedLocations getter key = do config <- Annex.getGitConfig locs <- map (toUUID . fromLogInfo) <$> getter (locationLogFile config key) @@ -301,8 +301,8 @@ overLocationLogsJournal v branchsha keyaction mclusters = changedlocs _ _ _ Nothing = pure (S.empty, S.empty) overLocationLogsHelper - :: ((RawFilePath -> Maybe Key) -> (Annex (FileContents Key b) -> Annex v) -> Annex a) - -> ((Maybe L.ByteString -> [UUID]) -> Key -> RawFilePath -> Maybe (L.ByteString, Maybe b) -> Annex u) + :: ((OsPath -> Maybe Key) -> (Annex (FileContents Key b) -> Annex v) -> Annex a) + -> ((Maybe L.ByteString -> [UUID]) -> Key -> OsPath -> Maybe (L.ByteString, Maybe b) -> Annex u) -> Bool -> v -> (Annex (FileContents Key b) -> Annex v -> Annex v) diff --git a/Logs/MetaData.hs b/Logs/MetaData.hs index 746b72dfbd..b5650e0904 100644 --- a/Logs/MetaData.hs +++ b/Logs/MetaData.hs @@ -59,7 +59,7 @@ import qualified Data.ByteString.Lazy as L getCurrentMetaData :: Key -> Annex MetaData getCurrentMetaData = getCurrentMetaData' metaDataLogFile -getCurrentMetaData' :: (GitConfig -> Key -> RawFilePath) -> Key -> Annex MetaData +getCurrentMetaData' :: (GitConfig -> Key -> OsPath) -> Key -> Annex MetaData getCurrentMetaData' getlogfile k = do config <- Annex.getGitConfig parseCurrentMetaData <$> Annex.Branch.get (getlogfile config k) @@ -101,7 +101,7 @@ getCurrentRemoteMetaData (RemoteStateHandle u) k = extractRemoteMetaData u <$> addMetaData :: Key -> MetaData -> Annex () addMetaData = addMetaData' (Annex.Branch.RegardingUUID []) metaDataLogFile -addMetaData' :: Annex.Branch.RegardingUUID -> (GitConfig -> Key -> RawFilePath) -> Key -> MetaData -> Annex () +addMetaData' :: Annex.Branch.RegardingUUID -> (GitConfig -> Key -> OsPath) -> Key -> MetaData -> Annex () addMetaData' ru getlogfile k metadata = addMetaDataClocked' ru getlogfile k metadata =<< currentVectorClock @@ -112,7 +112,7 @@ addMetaData' ru getlogfile k metadata = addMetaDataClocked :: Key -> MetaData -> CandidateVectorClock -> Annex () addMetaDataClocked = addMetaDataClocked' (Annex.Branch.RegardingUUID []) metaDataLogFile -addMetaDataClocked' :: Annex.Branch.RegardingUUID -> (GitConfig -> Key -> RawFilePath) -> Key -> MetaData -> CandidateVectorClock -> Annex () +addMetaDataClocked' :: Annex.Branch.RegardingUUID -> (GitConfig -> Key -> OsPath) -> Key -> MetaData -> CandidateVectorClock -> Annex () addMetaDataClocked' ru getlogfile k d@(MetaData m) c | d == emptyMetaData = noop | otherwise = do @@ -160,5 +160,5 @@ copyMetaData oldkey newkey (const $ buildLog l) return True -readLog :: RawFilePath -> Annex (Log MetaData) +readLog :: OsPath -> Annex (Log MetaData) readLog = parseLog <$$> Annex.Branch.get diff --git a/Logs/Migrate.hs b/Logs/Migrate.hs index 63ace2f92e..07f7b39fa0 100644 --- a/Logs/Migrate.hs +++ b/Logs/Migrate.hs @@ -56,11 +56,10 @@ import Git.Log import Logs.File import Logs import Annex.CatFile +import qualified Utility.OsString as OS -import qualified Data.ByteString as B import qualified Data.ByteString.Lazy as L import Control.Concurrent.STM -import System.FilePath.ByteString as P -- | What to use to record a migration. This should be the same Sha that is -- used to as the content of the annexed file in the HEAD branch. @@ -95,7 +94,7 @@ commitMigration = do n <- readTVar nv let !n' = succ n writeTVar nv n' - return (asTopFilePath (encodeBS (show n'))) + return (asTopFilePath (toOsPath (show n'))) let rec h r = liftIO $ sendMkTree h (fromTreeItemType TreeFile) BlobObject @@ -110,8 +109,8 @@ commitMigration = do n <- liftIO $ atomically $ readTVar nv when (n > 0) $ do treesha <- liftIO $ flip recordTree g $ Tree - [ RecordedSubTree (asTopFilePath "old") oldt [] - , RecordedSubTree (asTopFilePath "new") newt [] + [ RecordedSubTree (asTopFilePath (literalOsPath "old")) oldt [] + , RecordedSubTree (asTopFilePath (literalOsPath "new")) newt [] ] commitsha <- Annex.Branch.rememberTreeish treesha (asTopFilePath migrationTreeGraftPoint) @@ -129,7 +128,7 @@ streamNewDistributedMigrations incremental a = do (stoppoint, toskip) <- getPerformedMigrations (l, cleanup) <- inRepo $ getGitLog branchsha (if incremental then stoppoint else Nothing) - [fromRawFilePath migrationTreeGraftPoint] + [fromOsPath migrationTreeGraftPoint] -- Need to follow because migrate.tree is grafted in -- and then deleted, and normally git log stops when a file -- gets deleted. @@ -142,7 +141,7 @@ streamNewDistributedMigrations incremental a = do go toskip c | newref c `elem` nullShas = return () | changed c `elem` toskip = return () - | not ("/new/" `B.isInfixOf` newfile) = return () + | not (literalOsPath "/new/" `OS.isInfixOf` newfile) = return () | otherwise = catKey (newref c) >>= \case Nothing -> return () @@ -150,10 +149,10 @@ streamNewDistributedMigrations incremental a = do Nothing -> return () Just oldkey -> a oldkey newkey where - newfile = toRawFilePath (changedfile c) + newfile = changedfile c oldfile = migrationTreeGraftPoint - P. "old" - P. P.takeBaseName (fromInternalGitPath newfile) + literalOsPath "old" + takeBaseName (fromInternalGitPath newfile) oldfileref = branchFileRef (changed c) oldfile getPerformedMigrations :: Annex (Maybe Sha, [Sha]) diff --git a/Logs/PreferredContent/Raw.hs b/Logs/PreferredContent/Raw.hs index e86347d375..0a19756f75 100644 --- a/Logs/PreferredContent/Raw.hs +++ b/Logs/PreferredContent/Raw.hs @@ -32,7 +32,7 @@ requiredContentSet u expr = do setLog requiredContentLog u expr Annex.changeState $ \st -> st { Annex.requiredcontentmap = Nothing } -setLog :: RawFilePath -> UUID -> PreferredContentExpression -> Annex () +setLog :: OsPath -> UUID -> PreferredContentExpression -> Annex () setLog logfile uuid@(UUID _) val = do c <- currentVectorClock Annex.Branch.change (Annex.Branch.RegardingUUID [uuid]) logfile $ diff --git a/Logs/Presence.hs b/Logs/Presence.hs index 810ce6462d..f459df9110 100644 --- a/Logs/Presence.hs +++ b/Logs/Presence.hs @@ -32,11 +32,11 @@ import Git.Types (RefDate) import qualified Data.ByteString.Lazy as L {- Adds to the log, removing any LogLines that are obsoleted. -} -addLog :: Annex.Branch.RegardingUUID -> RawFilePath -> LogStatus -> LogInfo -> Annex () +addLog :: Annex.Branch.RegardingUUID -> OsPath -> LogStatus -> LogInfo -> Annex () addLog ru file logstatus loginfo = addLog' ru file logstatus loginfo =<< currentVectorClock -addLog' :: Annex.Branch.RegardingUUID -> RawFilePath -> LogStatus -> LogInfo -> CandidateVectorClock -> Annex () +addLog' :: Annex.Branch.RegardingUUID -> OsPath -> LogStatus -> LogInfo -> CandidateVectorClock -> Annex () addLog' ru file logstatus loginfo c = Annex.Branch.changeOrAppend ru file $ \b -> let old = parseLog b @@ -53,7 +53,7 @@ addLog' ru file logstatus loginfo c = - When the log was changed, the onchange action is run (with the journal - still locked to prevent any concurrent changes) and True is returned. -} -maybeAddLog :: Annex.Branch.RegardingUUID -> RawFilePath -> LogStatus -> LogInfo -> Annex () -> Annex Bool +maybeAddLog :: Annex.Branch.RegardingUUID -> OsPath -> LogStatus -> LogInfo -> Annex () -> Annex Bool maybeAddLog ru file logstatus loginfo onchange = do c <- currentVectorClock let f = \b -> @@ -72,15 +72,15 @@ genLine logstatus loginfo c old = LogLine c' logstatus loginfo {- Reads a log file. - Note that the LogLines returned may be in any order. -} -readLog :: RawFilePath -> Annex [LogLine] +readLog :: OsPath -> Annex [LogLine] readLog = parseLog <$$> Annex.Branch.get {- Reads a log and returns only the info that is still present. -} -presentLogInfo :: RawFilePath -> Annex [LogInfo] +presentLogInfo :: OsPath -> Annex [LogInfo] presentLogInfo file = map info . filterPresent <$> readLog file {- Reads a log and returns only the info that is no longer present. -} -notPresentLogInfo :: RawFilePath -> Annex [LogInfo] +notPresentLogInfo :: OsPath -> Annex [LogInfo] notPresentLogInfo file = map info . filterNotPresent <$> readLog file {- Reads a historical version of a log and returns the info that was in @@ -88,7 +88,7 @@ notPresentLogInfo file = map info . filterNotPresent <$> readLog file - - The date is formatted as shown in gitrevisions man page. -} -historicalLogInfo :: RefDate -> RawFilePath -> Annex [LogInfo] +historicalLogInfo :: RefDate -> OsPath -> Annex [LogInfo] historicalLogInfo refdate file = parseLogInfo <$> Annex.Branch.getHistorical refdate file diff --git a/Logs/Schedule.hs b/Logs/Schedule.hs index 7abcf14da8..6727fdd316 100644 --- a/Logs/Schedule.hs +++ b/Logs/Schedule.hs @@ -63,7 +63,7 @@ scheduleChange u a = scheduleSet u . S.toList . a =<< scheduleGet u getLastRunTimes :: Annex (M.Map ScheduledActivity LocalTime) getLastRunTimes = do - f <- fromRawFilePath <$> fromRepo gitAnnexScheduleState + f <- fromOsPath <$> fromRepo gitAnnexScheduleState liftIO $ fromMaybe M.empty <$> catchDefaultIO Nothing (readish <$> readFile f) diff --git a/Logs/SingleValue.hs b/Logs/SingleValue.hs index 2018e73ee7..f46fbe5e28 100644 --- a/Logs/SingleValue.hs +++ b/Logs/SingleValue.hs @@ -27,13 +27,13 @@ import Annex.VectorClock import qualified Data.Set as S -readLog :: (Ord v, SingleValueSerializable v) => RawFilePath -> Annex (Log v) +readLog :: (Ord v, SingleValueSerializable v) => OsPath -> Annex (Log v) readLog = parseLog <$$> Annex.Branch.get -getLog :: (Ord v, SingleValueSerializable v) => RawFilePath -> Annex (Maybe v) +getLog :: (Ord v, SingleValueSerializable v) => OsPath -> Annex (Maybe v) getLog = newestValue <$$> readLog -setLog :: (Ord v, SingleValueSerializable v) => Annex.Branch.RegardingUUID -> RawFilePath -> v -> Annex () +setLog :: (Ord v, SingleValueSerializable v) => Annex.Branch.RegardingUUID -> OsPath -> v -> Annex () setLog ru f v = do c <- currentVectorClock Annex.Branch.change ru f $ \old -> diff --git a/Remote/Helper/Chunked.hs b/Remote/Helper/Chunked.hs index 9b40d5b10c..6d6b619fd4 100644 --- a/Remote/Helper/Chunked.hs +++ b/Remote/Helper/Chunked.hs @@ -34,6 +34,7 @@ import Backend (isStableKey) import Annex.SpecialRemote.Config import Annex.Verify import qualified Utility.RawFilePath as R +import qualified Utility.FileIO as F import qualified Data.ByteString as S import qualified Data.ByteString.Lazy as L @@ -120,7 +121,7 @@ storeChunks -> ChunkConfig -> EncKey -> Key - -> FilePath + -> OsPath -> MeterUpdate -> Maybe (Cipher, EncKey) -> encc @@ -135,7 +136,7 @@ storeChunks u chunkconfig encryptor k f p enc encc storer checker = -- possible without this check. (UnpaddedChunks chunksize) -> ifM (isStableKey k) ( do - h <- liftIO $ openBinaryFile f ReadMode + h <- liftIO $ F.openBinaryFile f ReadMode go chunksize h liftIO $ hClose h , storechunk k (FileContent f) p @@ -257,7 +258,7 @@ retrieveChunks -> ChunkConfig -> EncKey -> Key - -> FilePath + -> OsPath -> MeterUpdate -> Maybe (Cipher, EncKey) -> encc @@ -276,7 +277,7 @@ retrieveChunks retriever u vc chunkconfig encryptor basek dest basep enc encc where go pe cks = do let ls = map chunkKeyList cks - currsize <- liftIO $ catchMaybeIO $ getFileSize (toRawFilePath dest) + currsize <- liftIO $ catchMaybeIO $ getFileSize dest let ls' = maybe ls (setupResume ls) currsize if any null ls' -- dest is already complete @@ -339,7 +340,7 @@ retrieveChunks retriever u vc chunkconfig encryptor basek dest basep enc encc -- passing the whole file content to the -- incremental verifier though. Nothing -> do - retriever (encryptor basek) basep (toRawFilePath dest) iv $ + retriever (encryptor basek) basep dest iv $ retrieved iv Nothing basep return $ case iv of Nothing -> Right iv @@ -347,13 +348,13 @@ retrieveChunks retriever u vc chunkconfig encryptor basek dest basep enc encc opennew = do iv <- startVerifyKeyContentIncrementally vc basek - h <- liftIO $ openBinaryFile dest WriteMode + h <- liftIO $ F.openBinaryFile dest WriteMode return (h, iv) -- Open the file and seek to the start point in order to resume. openresume startpoint = do -- ReadWriteMode allows seeking; AppendMode does not. - h <- liftIO $ openBinaryFile dest ReadWriteMode + h <- liftIO $ F.openBinaryFile dest ReadWriteMode liftIO $ hSeek h AbsoluteSeek startpoint -- No incremental verification when resuming, since that -- would need to read up to the startpoint. @@ -398,7 +399,7 @@ retrieveChunks retriever u vc chunkconfig encryptor basek dest basep enc encc -} writeRetrievedContent :: LensEncParams encc - => FilePath + => OsPath -> Maybe (Cipher, EncKey) -> encc -> Maybe Handle @@ -409,7 +410,7 @@ writeRetrievedContent writeRetrievedContent dest enc encc mh mp content miv = case (enc, mh, content) of (Nothing, Nothing, FileContent f) | f == dest -> noop - | otherwise -> liftIO $ moveFile (toRawFilePath f) (toRawFilePath dest) + | otherwise -> liftIO $ moveFile f dest (Just (cipher, _), _, ByteContent b) -> do cmd <- gpgCmd <$> Annex.getGitConfig decrypt cmd encc cipher (feedBytes b) $ @@ -419,10 +420,10 @@ writeRetrievedContent dest enc encc mh mp content miv = case (enc, mh, content) withBytes content $ \b -> decrypt cmd encc cipher (feedBytes b) $ readBytes write - liftIO $ removeWhenExistsWith R.removeLink (toRawFilePath f) + liftIO $ removeWhenExistsWith R.removeLink (fromOsPath f) (Nothing, _, FileContent f) -> do withBytes content write - liftIO $ removeWhenExistsWith R.removeLink (toRawFilePath f) + liftIO $ removeWhenExistsWith R.removeLink (fromOsPath f) (Nothing, _, ByteContent b) -> write b where write b = case mh of @@ -437,7 +438,7 @@ writeRetrievedContent dest enc encc mh mp content miv = case (enc, mh, content) Nothing -> S.hPut h in meteredWrite p writer b Nothing -> L.hPut h b - opendest = openBinaryFile dest WriteMode + opendest = F.openBinaryFile dest WriteMode {- Can resume when the chunk's offset is at or before the end of - the dest file. -} @@ -583,4 +584,4 @@ ensureChunksAreLogged _ _ (ChunkKeys _) = return () withBytes :: ContentSource -> (L.ByteString -> Annex a) -> Annex a withBytes (ByteContent b) a = a b -withBytes (FileContent f) a = a =<< liftIO (L.readFile f) +withBytes (FileContent f) a = a =<< liftIO (L.readFile (fromOsPath f)) diff --git a/Remote/Helper/Chunked/Legacy.hs b/Remote/Helper/Chunked/Legacy.hs index faae6ddc90..9f4c3fea36 100644 --- a/Remote/Helper/Chunked/Legacy.hs +++ b/Remote/Helper/Chunked/Legacy.hs @@ -72,7 +72,7 @@ storeChunks key tmp dest storer recorder finalizer = do when (null stored) $ giveup "no chunks were stored" where - basef = tmp ++ fromRawFilePath (keyFile key) + basef = tmp ++ fromOsPath (keyFile key) tmpdests = map (basef ++ ) chunkStream {- Given a list of destinations to use, chunks the data according to the diff --git a/Remote/Helper/Git.hs b/Remote/Helper/Git.hs index a8f6798662..ae43c0ece5 100644 --- a/Remote/Helper/Git.hs +++ b/Remote/Helper/Git.hs @@ -23,15 +23,14 @@ import Data.Time.Clock.POSIX import System.PosixCompat.Files (modificationTime) import qualified Data.Map as M import qualified Data.Set as S -import qualified System.FilePath.ByteString as P repoCheap :: Git.Repo -> Bool repoCheap = not . Git.repoIsUrl -localpathCalc :: Git.Repo -> Maybe FilePath +localpathCalc :: Git.Repo -> Maybe OsPath localpathCalc r | not (Git.repoIsLocal r) && not (Git.repoIsLocalUnknown r) = Nothing - | otherwise = Just $ fromRawFilePath $ Git.repoPath r + | otherwise = Just $ Git.repoPath r {- Checks relatively inexpensively if a repository is available for use. -} repoAvail :: Git.Repo -> Annex Availability @@ -63,8 +62,11 @@ guardUsable r fallback a gitRepoInfo :: Remote -> Annex [(String, String)] gitRepoInfo r = do d <- fromRepo Git.localGitDir - mtimes <- liftIO $ mapM (\p -> modificationTime <$> R.getFileStatus p) - =<< emptyWhenDoesNotExist (dirContentsRecursive (d P. "refs" P. "remotes" P. encodeBS (Remote.name r))) + let refsdir = d literalOsPath "refs" + literalOsPath "remotes" + toOsPath (Remote.name r) + mtimes <- liftIO $ mapM (\p -> modificationTime <$> R.getFileStatus (fromOsPath p)) + =<< emptyWhenDoesNotExist (dirContentsRecursive refsdir) let lastsynctime = case mtimes of [] -> "never" _ -> show $ posixSecondsToUTCTime $ realToFrac $ maximum mtimes diff --git a/Types/StoreRetrieve.hs b/Types/StoreRetrieve.hs index ce7d228d74..1de9dea067 100644 --- a/Types/StoreRetrieve.hs +++ b/Types/StoreRetrieve.hs @@ -18,7 +18,7 @@ import qualified Data.ByteString.Lazy as L -- A source of a Key's content. data ContentSource - = FileContent FilePath + = FileContent OsPath | ByteContent L.ByteString isByteContent :: ContentSource -> Bool @@ -43,7 +43,7 @@ type Storer = Key -> ContentSource -> MeterUpdate -> Annex () -- content to the verifier before running the callback. -- This should not be done when it retrieves ByteContent. type Retriever = forall a. - Key -> MeterUpdate -> RawFilePath -> Maybe IncrementalVerifier + Key -> MeterUpdate -> OsPath -> Maybe IncrementalVerifier -> (ContentSource -> Annex a) -> Annex a -- Action that removes a Key's content from a remote. diff --git a/Upgrade/V7.hs b/Upgrade/V7.hs index 0e301bd09d..d4b3e35461 100644 --- a/Upgrade/V7.hs +++ b/Upgrade/V7.hs @@ -24,7 +24,6 @@ import Config import qualified Utility.RawFilePath as R import qualified Utility.FileIO as F -import qualified System.FilePath.ByteString as P import System.PosixCompat.Files (isSymbolicLink) upgrade :: Bool -> Annex UpgradeResult @@ -40,48 +39,52 @@ upgrade automatic = do -- The old content identifier database is deleted here, but the -- new database is not populated. It will be automatically -- populated from the git-annex branch the next time it is used. - removeOldDb . fromRawFilePath =<< fromRepo gitAnnexContentIdentifierDbDirOld - liftIO . removeWhenExistsWith R.removeLink + removeOldDb =<< fromRepo gitAnnexContentIdentifierDbDirOld + liftIO . removeWhenExistsWith (R.removeLink . fromOsPath) =<< fromRepo gitAnnexContentIdentifierLockOld -- The export databases are deleted here. The new databases -- will be populated by the next thing that needs them, the same -- way as they would be in a fresh clone. - removeOldDb . fromRawFilePath =<< calcRepo' gitAnnexExportDir + removeOldDb =<< calcRepo' gitAnnexExportDir populateKeysDb - removeOldDb . fromRawFilePath =<< fromRepo gitAnnexKeysDbOld - liftIO . removeWhenExistsWith R.removeLink + removeOldDb =<< fromRepo gitAnnexKeysDbOld + liftIO . removeWhenExistsWith (R.removeLink . fromOsPath) =<< fromRepo gitAnnexKeysDbIndexCacheOld - liftIO . removeWhenExistsWith R.removeLink + liftIO . removeWhenExistsWith (R.removeLink . fromOsPath) =<< fromRepo gitAnnexKeysDbLockOld updateSmudgeFilter return UpgradeSuccess -gitAnnexKeysDbOld :: Git.Repo -> RawFilePath -gitAnnexKeysDbOld r = gitAnnexDir r P. "keys" +gitAnnexKeysDbOld :: Git.Repo -> OsPath +gitAnnexKeysDbOld r = gitAnnexDir r literalOsPath "keys" -gitAnnexKeysDbLockOld :: Git.Repo -> RawFilePath -gitAnnexKeysDbLockOld r = gitAnnexKeysDbOld r <> ".lck" +gitAnnexKeysDbLockOld :: Git.Repo -> OsPath +gitAnnexKeysDbLockOld r = + gitAnnexKeysDbOld r <> literalOsPath ".lck" -gitAnnexKeysDbIndexCacheOld :: Git.Repo -> RawFilePath -gitAnnexKeysDbIndexCacheOld r = gitAnnexKeysDbOld r <> ".cache" +gitAnnexKeysDbIndexCacheOld :: Git.Repo -> OsPath +gitAnnexKeysDbIndexCacheOld r = + gitAnnexKeysDbOld r <> literalOsPath ".cache" -gitAnnexContentIdentifierDbDirOld :: Git.Repo -> RawFilePath -gitAnnexContentIdentifierDbDirOld r = gitAnnexDir r P. "cids" +gitAnnexContentIdentifierDbDirOld :: Git.Repo -> OsPath +gitAnnexContentIdentifierDbDirOld r = + gitAnnexDir r literalOsPath "cids" -gitAnnexContentIdentifierLockOld :: Git.Repo -> RawFilePath -gitAnnexContentIdentifierLockOld r = gitAnnexContentIdentifierDbDirOld r <> ".lck" +gitAnnexContentIdentifierLockOld :: Git.Repo -> OsPath +gitAnnexContentIdentifierLockOld r = + gitAnnexContentIdentifierDbDirOld r <> literalOsPath ".lck" -removeOldDb :: FilePath -> Annex () +removeOldDb :: OsPath -> Annex () removeOldDb db = whenM (liftIO $ doesDirectoryExist db) $ do v <- liftIO $ tryNonAsync $ removePathForcibly db case v of - Left ex -> giveup $ "Failed removing old database directory " ++ db ++ " during upgrade (" ++ show ex ++ ") -- delete that and re-run git-annex to finish the upgrade." + Left ex -> giveup $ "Failed removing old database directory " ++ fromOsPath db ++ " during upgrade (" ++ show ex ++ ") -- delete that and re-run git-annex to finish the upgrade." Right () -> return () -- Populate the new keys database with associated files and inode caches. @@ -108,11 +111,11 @@ populateKeysDb = unlessM isBareRepo $ do (l, cleanup) <- inRepo $ LsFiles.inodeCaches [top] forM_ l $ \case (_f, Nothing) -> giveup "Unable to parse git ls-files --debug output while upgrading git-annex sqlite databases." - (f, Just ic) -> unlessM (liftIO $ catchBoolIO $ isSymbolicLink <$> R.getSymbolicLinkStatus (toRawFilePath f)) $ do - catKeyFile (toRawFilePath f) >>= \case + (f, Just ic) -> unlessM (liftIO $ catchBoolIO $ isSymbolicLink <$> R.getSymbolicLinkStatus (fromOsPath f)) $ do + catKeyFile f >>= \case Nothing -> noop Just k -> do - topf <- inRepo $ toTopFilePath $ toRawFilePath f + topf <- inRepo $ toTopFilePath f Database.Keys.runWriter AssociatedTable $ \h -> liftIO $ Database.Keys.SQL.addAssociatedFile k topf h Database.Keys.runWriter ContentTable $ \h -> liftIO $ @@ -130,10 +133,10 @@ updateSmudgeFilter :: Annex () updateSmudgeFilter = do lf <- Annex.fromRepo Git.attributesLocal ls <- liftIO $ map decodeBS . fileLines' - <$> catchDefaultIO "" (F.readFile' (toOsPath lf)) + <$> catchDefaultIO "" (F.readFile' lf) let ls' = removedotfilter ls when (ls /= ls') $ - liftIO $ writeFile (fromRawFilePath lf) (unlines ls') + liftIO $ writeFile (fromOsPath lf) (unlines ls') where removedotfilter ("* filter=annex":".* !filter":rest) = "* filter=annex" : removedotfilter rest diff --git a/Utility/WebApp.hs b/Utility/WebApp.hs index 937b3bad5a..178a63f050 100644 --- a/Utility/WebApp.hs +++ b/Utility/WebApp.hs @@ -187,7 +187,7 @@ insertAuthToken extractAuthToken predicate webapp root pathbits params = - to avoid exposing the secret token when launching the web browser. -} writeHtmlShim :: String -> String -> FilePath -> IO () writeHtmlShim title url file = - viaTmp (writeFileProtected . fromOsPath) + viaTmp (writeFileProtected) (toOsPath $ toRawFilePath file) (genHtmlShim title url)