more OsPath conversion

Sponsored-by: Brock Spratlen
This commit is contained in:
Joey Hess 2025-02-01 11:54:19 -04:00
parent c69e57aede
commit 474cf3bc8b
No known key found for this signature in database
GPG key ID: DB12DB0FF05F8F38
38 changed files with 342 additions and 330 deletions

View file

@ -313,7 +313,7 @@ updateTo' pairs = do
- transitions that have not been applied to all refs will be applied on
- the fly.
-}
get :: RawFilePath -> Annex L.ByteString
get :: OsPath -> Annex L.ByteString
get file = do
st <- update
case getCache file st of
@ -353,7 +353,7 @@ getUnmergedRefs = unmergedRefs <$> update
- using some optimised method. The journal has to be checked, in case
- it has a newer version of the file that has not reached the branch yet.
-}
precache :: RawFilePath -> L.ByteString -> Annex ()
precache :: OsPath -> L.ByteString -> Annex ()
precache file branchcontent = do
st <- getState
content <- if journalIgnorable st
@ -369,12 +369,12 @@ precache file branchcontent = do
- reflect changes in remotes.
- (Changing the value this returns, and then merging is always the
- same as using get, and then changing its value.) -}
getLocal :: RawFilePath -> Annex L.ByteString
getLocal :: OsPath -> Annex L.ByteString
getLocal = getLocal' (GetPrivate True)
getLocal' :: GetPrivate -> RawFilePath -> Annex L.ByteString
getLocal' :: GetPrivate -> OsPath -> Annex L.ByteString
getLocal' getprivate file = do
fastDebug "Annex.Branch" ("read " ++ fromRawFilePath file)
fastDebug "Annex.Branch" ("read " ++ fromOsPath file)
go =<< getJournalFileStale getprivate file
where
go NoJournalledContent = getRef fullname file
@ -384,14 +384,14 @@ getLocal' getprivate file = do
return (v <> journalcontent)
{- Gets the content of a file as staged in the branch's index. -}
getStaged :: RawFilePath -> Annex L.ByteString
getStaged :: OsPath -> Annex L.ByteString
getStaged = getRef indexref
where
-- This makes git cat-file be run with ":file",
-- so it looks at the index.
indexref = Ref ""
getHistorical :: RefDate -> RawFilePath -> Annex L.ByteString
getHistorical :: RefDate -> OsPath -> Annex L.ByteString
getHistorical date file =
-- This check avoids some ugly error messages when the reflog
-- is empty.
@ -400,7 +400,7 @@ getHistorical date file =
, getRef (Git.Ref.dateRef fullname date) file
)
getRef :: Ref -> RawFilePath -> Annex L.ByteString
getRef :: Ref -> OsPath -> Annex L.ByteString
getRef ref file = withIndex $ catFile ref file
{- Applies a function to modify the content of a file.
@ -408,7 +408,7 @@ getRef ref file = withIndex $ catFile ref file
- Note that this does not cause the branch to be merged, it only
- modifies the current content of the file on the branch.
-}
change :: Journalable content => RegardingUUID -> RawFilePath -> (L.ByteString -> content) -> Annex ()
change :: Journalable content => RegardingUUID -> OsPath -> (L.ByteString -> content) -> Annex ()
change ru file f = lockJournal $ \jl -> f <$> getToChange ru file >>= set jl ru file
{- Applies a function which can modify the content of a file, or not.
@ -416,7 +416,7 @@ change ru file f = lockJournal $ \jl -> f <$> getToChange ru file >>= set jl ru
- When the file was modified, runs the onchange action, and returns
- True. The action is run while the journal is still locked,
- so another concurrent call to this cannot happen while it is running. -}
maybeChange :: Journalable content => RegardingUUID -> RawFilePath -> (L.ByteString -> Maybe content) -> Annex () -> Annex Bool
maybeChange :: Journalable content => RegardingUUID -> OsPath -> (L.ByteString -> Maybe content) -> Annex () -> Annex Bool
maybeChange ru file f onchange = lockJournal $ \jl -> do
v <- getToChange ru file
case f v of
@ -449,7 +449,7 @@ data ChangeOrAppend t = Change t | Append t
- state that would confuse the older version. This is planned to be
- changed in a future repository version.
-}
changeOrAppend :: Journalable content => RegardingUUID -> RawFilePath -> (L.ByteString -> ChangeOrAppend content) -> Annex ()
changeOrAppend :: Journalable content => RegardingUUID -> OsPath -> (L.ByteString -> ChangeOrAppend content) -> Annex ()
changeOrAppend ru file f = lockJournal $ \jl ->
checkCanAppendJournalFile jl ru file >>= \case
Just appendable -> ifM (annexAlwaysCompact <$> Annex.getGitConfig)
@ -481,7 +481,7 @@ changeOrAppend ru file f = lockJournal $ \jl ->
oldc <> journalableByteString toappend
{- Only get private information when the RegardingUUID is itself private. -}
getToChange :: RegardingUUID -> RawFilePath -> Annex L.ByteString
getToChange :: RegardingUUID -> OsPath -> Annex L.ByteString
getToChange ru f = flip getLocal' f . GetPrivate =<< regardingPrivateUUID ru
{- Records new content of a file into the journal.
@ -493,11 +493,11 @@ getToChange ru f = flip getLocal' f . GetPrivate =<< regardingPrivateUUID ru
- git-annex index, and should not be written to the public git-annex
- branch.
-}
set :: Journalable content => JournalLocked -> RegardingUUID -> RawFilePath -> content -> Annex ()
set :: Journalable content => JournalLocked -> RegardingUUID -> OsPath -> content -> Annex ()
set jl ru f c = do
journalChanged
setJournalFile jl ru f c
fastDebug "Annex.Branch" ("set " ++ fromRawFilePath f)
fastDebug "Annex.Branch" ("set " ++ fromOsPath f)
-- Could cache the new content, but it would involve
-- evaluating a Journalable Builder twice, which is not very
-- efficient. Instead, assume that it's not common to need to read
@ -505,11 +505,11 @@ set jl ru f c = do
invalidateCache f
{- Appends content to the journal file. -}
append :: Journalable content => JournalLocked -> RawFilePath -> AppendableJournalFile -> content -> Annex ()
append :: Journalable content => JournalLocked -> OsPath -> AppendableJournalFile -> content -> Annex ()
append jl f appendable toappend = do
journalChanged
appendJournalFile jl appendable toappend
fastDebug "Annex.Branch" ("append " ++ fromRawFilePath f)
fastDebug "Annex.Branch" ("append " ++ fromOsPath f)
invalidateCache f
{- Commit message used when making a commit of whatever data has changed
@ -611,7 +611,7 @@ commitIndex' jl branchref message basemessage retrynum parents = do
- not been merged in, returns Nothing, because it's not possible to
- efficiently handle that.
-}
files :: Annex (Maybe ([RawFilePath], IO Bool))
files :: Annex (Maybe ([OsPath], IO Bool))
files = do
st <- update
if not (null (unmergedRefs st))
@ -629,10 +629,10 @@ files = do
{- Lists all files currently in the journal, but not files in the private
- journal. -}
journalledFiles :: Annex [RawFilePath]
journalledFiles :: Annex [OsPath]
journalledFiles = getJournalledFilesStale gitAnnexJournalDir
journalledFilesPrivate :: Annex [RawFilePath]
journalledFilesPrivate :: Annex [OsPath]
journalledFilesPrivate = ifM privateUUIDsKnown
( getJournalledFilesStale gitAnnexPrivateJournalDir
, return []
@ -640,10 +640,10 @@ journalledFilesPrivate = ifM privateUUIDsKnown
{- Files in the branch, not including any from journalled changes,
- and without updating the branch. -}
branchFiles :: Annex ([RawFilePath], IO Bool)
branchFiles :: Annex ([OsPath], IO Bool)
branchFiles = withIndex $ inRepo branchFiles'
branchFiles' :: Git.Repo -> IO ([RawFilePath], IO Bool)
branchFiles' :: Git.Repo -> IO ([OsPath], IO Bool)
branchFiles' = Git.Command.pipeNullSplit' $
lsTreeParams Git.LsTree.LsTreeRecursive (Git.LsTree.LsTreeLong False)
fullname
@ -690,7 +690,7 @@ withIndex' :: Bool -> Annex a -> Annex a
withIndex' bootstrapping a = withIndexFile AnnexIndexFile $ \f -> do
checkIndexOnce $ unlessM (liftIO $ doesFileExist f) $ do
unless bootstrapping create
createAnnexDirectory $ toRawFilePath $ takeDirectory f
createAnnexDirectory $ toOsPath $ takeDirectory f
unless bootstrapping $ inRepo genIndex
a
@ -748,7 +748,7 @@ stageJournal jl commitindex = withIndex $ withOtherTmp $ \tmpdir -> do
Git.UpdateIndex.streamUpdateIndex g
[genstream dir h jh jlogh]
commitindex
liftIO $ cleanup (fromRawFilePath dir) jlogh jlogf
liftIO $ cleanup (fromOsPath dir) jlogh jlogf
where
genstream dir h jh jlogh streamer = readDirectory jh >>= \case
Nothing -> return ()
@ -999,7 +999,7 @@ data UnmergedBranches t
= UnmergedBranches t
| NoUnmergedBranches t
type FileContents t b = Maybe (t, RawFilePath, Maybe (L.ByteString, Maybe b))
type FileContents t b = Maybe (t, OsPath, Maybe (L.ByteString, Maybe b))
{- Runs an action on the content of selected files from the branch.
- This is much faster than reading the content of each file in turn,
@ -1022,7 +1022,7 @@ overBranchFileContents
-- the callback can be run more than once on the same filename,
-- and in this case it's also possible for the callback to be
-- passed some of the same file content repeatedly.
-> (RawFilePath -> Maybe v)
-> (OsPath -> Maybe v)
-> (Annex (FileContents v Bool) -> Annex a)
-> Annex (UnmergedBranches (a, Git.Sha))
overBranchFileContents ignorejournal select go = do
@ -1036,7 +1036,7 @@ overBranchFileContents ignorejournal select go = do
else NoUnmergedBranches v
overBranchFileContents'
:: (RawFilePath -> Maybe v)
:: (OsPath -> Maybe v)
-> (Annex (FileContents v Bool) -> Annex a)
-> BranchState
-> Annex (a, Git.Sha)
@ -1086,11 +1086,11 @@ combineStaleJournalWithBranch branchcontent journalledcontent =
- files.
-}
overJournalFileContents
:: (RawFilePath -> L.ByteString -> Annex (L.ByteString, Maybe b))
:: (OsPath -> L.ByteString -> Annex (L.ByteString, Maybe b))
-- ^ Called with the journalled file content when the journalled
-- content may be stale or lack information committed to the
-- git-annex branch.
-> (RawFilePath -> Maybe v)
-> (OsPath -> Maybe v)
-> (Annex (FileContents v b) -> Annex a)
-> Annex a
overJournalFileContents handlestale select go = do
@ -1098,9 +1098,9 @@ overJournalFileContents handlestale select go = do
go $ overJournalFileContents' buf handlestale select
overJournalFileContents'
:: MVar ([RawFilePath], [RawFilePath])
-> (RawFilePath -> L.ByteString -> Annex (L.ByteString, Maybe b))
-> (RawFilePath -> Maybe a)
:: MVar ([OsPath], [OsPath])
-> (OsPath -> L.ByteString -> Annex (L.ByteString, Maybe b))
-> (OsPath -> Maybe a)
-> Annex (FileContents a b)
overJournalFileContents' buf handlestale select =
liftIO (tryTakeMVar buf) >>= \case

View file

@ -118,7 +118,7 @@ enableInteractiveBranchAccess = changeState $ \s -> s
, journalIgnorable = False
}
setCache :: RawFilePath -> L.ByteString -> Annex ()
setCache :: OsPath -> L.ByteString -> Annex ()
setCache file content = changeState $ \s -> s
{ cachedFileContents = add (cachedFileContents s) }
where
@ -126,7 +126,7 @@ setCache file content = changeState $ \s -> s
| length l < logFilesToCache = (file, content) : l
| otherwise = (file, content) : Prelude.init l
getCache :: RawFilePath -> BranchState -> Maybe L.ByteString
getCache :: OsPath -> BranchState -> Maybe L.ByteString
getCache file state = go (cachedFileContents state)
where
go [] = Nothing
@ -134,7 +134,7 @@ getCache file state = go (cachedFileContents state)
| f == file && not (needInteractiveAccess state) = Just c
| otherwise = go rest
invalidateCache :: RawFilePath -> Annex ()
invalidateCache :: OsPath -> Annex ()
invalidateCache f = changeState $ \s -> s
{ cachedFileContents = filter (\(f', _) -> f' /= f)
(cachedFileContents s)

View file

@ -45,11 +45,11 @@ import Types.AdjustedBranch
import Types.CatFileHandles
import Utility.ResourcePool
catFile :: Git.Branch -> RawFilePath -> Annex L.ByteString
catFile :: Git.Branch -> OsPath -> Annex L.ByteString
catFile branch file = withCatFileHandle $ \h ->
liftIO $ Git.CatFile.catFile h branch file
catFileDetails :: Git.Branch -> RawFilePath -> Annex (Maybe (L.ByteString, Sha, ObjectType))
catFileDetails :: Git.Branch -> OsPath -> Annex (Maybe (L.ByteString, Sha, ObjectType))
catFileDetails branch file = withCatFileHandle $ \h ->
liftIO $ Git.CatFile.catFileDetails h branch file
@ -167,8 +167,8 @@ catKey' ref sz
catKey' _ _ = return Nothing
{- Gets a symlink target. -}
catSymLinkTarget :: Sha -> Annex RawFilePath
catSymLinkTarget sha = fromInternalGitPath . L.toStrict <$> get
catSymLinkTarget :: Sha -> Annex OsPath
catSymLinkTarget sha = fromInternalGitPath . toOsPath . L.toStrict <$> get
where
-- Avoid buffering the whole file content, which might be large.
-- 8192 is enough if it really is a symlink.
@ -195,25 +195,25 @@ catSymLinkTarget sha = fromInternalGitPath . L.toStrict <$> get
-
- So, this gets info from the index, unless running as a daemon.
-}
catKeyFile :: RawFilePath -> Annex (Maybe Key)
catKeyFile :: OsPath -> Annex (Maybe Key)
catKeyFile f = ifM (Annex.getState Annex.daemon)
( catKeyFileHEAD f
, maybe (pure Nothing) catKey =<< inRepo (Git.Ref.fileRef f)
)
catKeyFileHEAD :: RawFilePath -> Annex (Maybe Key)
catKeyFileHEAD :: OsPath -> Annex (Maybe Key)
catKeyFileHEAD f = maybe (pure Nothing) catKey
=<< inRepo (Git.Ref.fileFromRef Git.Ref.headRef f)
{- Look in the original branch from whence an adjusted branch is based
- to find the file. But only when the adjustment hides some files. -}
catKeyFileHidden :: RawFilePath -> CurrBranch -> Annex (Maybe Key)
catKeyFileHidden :: OsPath -> CurrBranch -> Annex (Maybe Key)
catKeyFileHidden = hiddenCat catKey
catObjectMetaDataHidden :: RawFilePath -> CurrBranch -> Annex (Maybe (Sha, Integer, ObjectType))
catObjectMetaDataHidden :: OsPath -> CurrBranch -> Annex (Maybe (Sha, Integer, ObjectType))
catObjectMetaDataHidden = hiddenCat catObjectMetaData
hiddenCat :: (Ref -> Annex (Maybe a)) -> RawFilePath -> CurrBranch -> Annex (Maybe a)
hiddenCat :: (Ref -> Annex (Maybe a)) -> OsPath -> CurrBranch -> Annex (Maybe a)
hiddenCat a f (Just origbranch, Just adj)
| adjustmentHidesFiles adj =
maybe (pure Nothing) a

View file

@ -19,13 +19,12 @@ import Utility.DataUnits
import Utility.CopyFile
import qualified Utility.RawFilePath as R
import qualified System.FilePath.ByteString as P
import System.PosixCompat.Files (linkCount)
{- Runs the secure erase command if set, otherwise does nothing.
- File may or may not be deleted at the end; caller is responsible for
- making sure it's deleted. -}
secureErase :: RawFilePath -> Annex ()
secureErase :: OsPath -> Annex ()
secureErase = void . runAnnexPathHook "%file"
secureEraseAnnexHook annexSecureEraseCommand
@ -44,45 +43,48 @@ data LinkedOrCopied = Linked | Copied
- execute bit will be set. The mode is not fully copied over because
- git doesn't support file modes beyond execute.
-}
linkOrCopy :: Key -> RawFilePath -> RawFilePath -> Maybe FileMode -> Annex (Maybe LinkedOrCopied)
linkOrCopy :: Key -> OsPath -> OsPath -> Maybe FileMode -> Annex (Maybe LinkedOrCopied)
linkOrCopy = linkOrCopy' (annexThin <$> Annex.getGitConfig)
linkOrCopy' :: Annex Bool -> Key -> RawFilePath -> RawFilePath -> Maybe FileMode -> Annex (Maybe LinkedOrCopied)
linkOrCopy' :: Annex Bool -> Key -> OsPath -> OsPath -> Maybe FileMode -> Annex (Maybe LinkedOrCopied)
linkOrCopy' canhardlink key src dest destmode = catchDefaultIO Nothing $
ifM canhardlink
( hardlink
( hardlinkorcopy
, copy =<< getstat
)
where
hardlink = do
hardlinkorcopy = do
s <- getstat
if linkCount s > 1
then copy s
else liftIO (R.createLink src dest >> preserveGitMode dest destmode >> return (Just Linked))
`catchIO` const (copy s)
else hardlink `catchIO` const (copy s)
hardlink = liftIO $ do
R.createLink (fromOsPath src) (fromOsPath dest)
void $ preserveGitMode dest destmode
return (Just Linked)
copy s = ifM (checkedCopyFile' key src dest destmode s)
( return (Just Copied)
, return Nothing
)
getstat = liftIO $ R.getFileStatus src
getstat = liftIO $ R.getFileStatus (fromOsPath src)
{- Checks disk space before copying. -}
checkedCopyFile :: Key -> RawFilePath -> RawFilePath -> Maybe FileMode -> Annex Bool
checkedCopyFile :: Key -> OsPath -> OsPath -> Maybe FileMode -> Annex Bool
checkedCopyFile key src dest destmode = catchBoolIO $
checkedCopyFile' key src dest destmode
=<< liftIO (R.getFileStatus src)
=<< liftIO (R.getFileStatus (fromOsPath src))
checkedCopyFile' :: Key -> RawFilePath -> RawFilePath -> Maybe FileMode -> FileStatus -> Annex Bool
checkedCopyFile' :: Key -> OsPath -> OsPath -> Maybe FileMode -> FileStatus -> Annex Bool
checkedCopyFile' key src dest destmode s = catchBoolIO $ do
sz <- liftIO $ getFileSize' src s
ifM (checkDiskSpace' sz (Just $ P.takeDirectory dest) key 0 True)
ifM (checkDiskSpace' sz (Just $ takeDirectory dest) key 0 True)
( liftIO $
copyFileExternal CopyAllMetaData (fromRawFilePath src) (fromRawFilePath dest)
copyFileExternal CopyAllMetaData src dest
<&&> preserveGitMode dest destmode
, return False
)
preserveGitMode :: RawFilePath -> Maybe FileMode -> IO Bool
preserveGitMode :: OsPath -> Maybe FileMode -> IO Bool
preserveGitMode f (Just mode)
| isExecutable mode = catchBoolIO $ do
modifyFileMode f $ addModes executeModes
@ -100,12 +102,12 @@ preserveGitMode _ _ = return True
- to be downloaded from the free space. This way, we avoid overcommitting
- when doing concurrent downloads.
-}
checkDiskSpace :: Maybe FileSize -> Maybe RawFilePath -> Key -> Integer -> Bool -> Annex Bool
checkDiskSpace :: Maybe FileSize -> Maybe OsPath -> Key -> Integer -> Bool -> Annex Bool
checkDiskSpace msz destdir key = checkDiskSpace' sz destdir key
where
sz = fromMaybe 1 (fromKey keySize key <|> msz)
checkDiskSpace' :: FileSize -> Maybe RawFilePath -> Key -> Integer -> Bool -> Annex Bool
checkDiskSpace' :: FileSize -> Maybe OsPath -> Key -> Integer -> Bool -> Annex Bool
checkDiskSpace' sz destdir key alreadythere samefilesystem = ifM (Annex.getRead Annex.force)
( return True
, do
@ -118,7 +120,7 @@ checkDiskSpace' sz destdir key alreadythere samefilesystem = ifM (Annex.getRead
inprogress <- if samefilesystem
then sizeOfDownloadsInProgress (/= key)
else pure 0
dir >>= liftIO . getDiskFree . fromRawFilePath >>= \case
dir >>= liftIO . getDiskFree . fromOsPath >>= \case
Just have -> do
reserve <- annexDiskReserve <$> Annex.getGitConfig
let delta = sz + reserve - have - alreadythere + inprogress

View file

@ -30,12 +30,14 @@ import System.PosixCompat.Files (fileMode)
-
- Returns an InodeCache if it populated the pointer file.
-}
populatePointerFile :: Restage -> Key -> RawFilePath -> RawFilePath -> Annex (Maybe InodeCache)
populatePointerFile :: Restage -> Key -> OsPath -> OsPath -> Annex (Maybe InodeCache)
populatePointerFile restage k obj f = go =<< liftIO (isPointerFile f)
where
go (Just k') | k == k' = do
destmode <- liftIO $ catchMaybeIO $ fileMode <$> R.getFileStatus f
liftIO $ removeWhenExistsWith R.removeLink f
let f' = fromOsPath f
destmode <- liftIO $ catchMaybeIO $
fileMode <$> R.getFileStatus f'
liftIO $ removeWhenExistsWith R.removeLink f'
(ic, populated) <- replaceWorkTreeFile f $ \tmp -> do
ok <- linkOrCopy k obj tmp destmode >>= \case
Just _ -> thawContent tmp >> return True
@ -51,19 +53,20 @@ populatePointerFile restage k obj f = go =<< liftIO (isPointerFile f)
{- Removes the content from a pointer file, replacing it with a pointer.
-
- Does not check if the pointer file is modified. -}
depopulatePointerFile :: Key -> RawFilePath -> Annex ()
depopulatePointerFile :: Key -> OsPath -> Annex ()
depopulatePointerFile key file = do
st <- liftIO $ catchMaybeIO $ R.getFileStatus file
let file' = fromOsPath file
st <- liftIO $ catchMaybeIO $ R.getFileStatus file'
let mode = fmap fileMode st
secureErase file
liftIO $ removeWhenExistsWith R.removeLink file
liftIO $ removeWhenExistsWith R.removeLink file'
ic <- replaceWorkTreeFile file $ \tmp -> do
liftIO $ writePointerFile tmp key mode
#if ! defined(mingw32_HOST_OS)
-- Don't advance mtime; this avoids unnecessary re-smudging
-- by git in some cases.
liftIO $ maybe noop
(\t -> touch tmp t False)
(\t -> touch (fromOsPath tmp) t False)
(fmap Posix.modificationTimeHiRes st)
#endif
withTSDelta (liftIO . genInodeCache tmp)

View file

@ -51,7 +51,7 @@ tryCopyCoW (CopyCoWTried copycowtried) src dest meterupdate =
-- CoW is known to work, so delete
-- dest if it exists in order to do a fast
-- CoW copy.
void $ tryIO $ removeFile dest
void $ tryIO $ removeFile dest'
docopycow
, return False
)
@ -60,18 +60,18 @@ tryCopyCoW (CopyCoWTried copycowtried) src dest meterupdate =
docopycow = watchFileSize dest' meterupdate $ const $
copyCoW CopyTimeStamps src dest
dest' = toRawFilePath dest
dest' = toOsPath dest
-- Check if the dest file already exists, which would prevent
-- probing CoW. If the file exists but is empty, there's no benefit
-- to resuming from it when CoW does not work, so remove it.
destfilealreadypopulated =
tryIO (R.getFileStatus dest') >>= \case
tryIO (R.getFileStatus (toRawFilePath dest)) >>= \case
Left _ -> return False
Right st -> do
sz <- getFileSize' dest' st
if sz == 0
then tryIO (removeFile dest) >>= \case
then tryIO (removeFile dest') >>= \case
Right () -> return False
Left _ -> return True
else return True
@ -111,14 +111,15 @@ fileCopier copycowtried src dest meterupdate iv =
docopy = do
-- The file might have had the write bit removed,
-- so make sure we can write to it.
void $ tryIO $ allowWrite dest'
void $ tryIO $ allowWrite (toOsPath dest)
withBinaryFile src ReadMode $ \hsrc ->
fileContentCopier hsrc dest meterupdate iv
-- Copy src mode and mtime.
mode <- fileMode <$> R.getFileStatus (toRawFilePath src)
mtime <- utcTimeToPOSIXSeconds <$> getModificationTime src
mtime <- utcTimeToPOSIXSeconds
<$> getModificationTime (toOsPath src)
R.setFileMode dest' mode
touch dest' mtime False

View file

@ -85,9 +85,9 @@ startExternalAddonProcess basecmd ps pid = do
runerr (Just cmd) =
return $ Left $ ProgramFailure $
"Cannot run " ++ cmd ++ " -- Make sure it's executable and that its dependencies are installed."
"Cannot run " ++ fromOsPath cmd ++ " -- Make sure it's executable and that its dependencies are installed."
runerr Nothing = do
path <- intercalate ":" <$> getSearchPath
path <- intercalate ":" . map fromOsPath <$> getSearchPath
return $ Left $ ProgramNotInstalled $
"Cannot run " ++ basecmd ++ " -- It is not installed in PATH (" ++ path ++ ")"

View file

@ -66,13 +66,13 @@ withIndexFile i = withAltRepo usecachedgitenv restoregitenv
{- Runs an action using a different git work tree.
-
- Smudge and clean filters are disabled in this work tree. -}
withWorkTree :: FilePath -> Annex a -> Annex a
withWorkTree :: OsPath -> Annex a -> Annex a
withWorkTree d a = withAltRepo
(\g -> return $ (g { location = modlocation (location g), gitGlobalOpts = gitGlobalOpts g ++ bypassSmudgeConfig }, ()))
(\g g' -> g' { location = location g, gitGlobalOpts = gitGlobalOpts g })
(const a)
where
modlocation l@(Local {}) = l { worktree = Just (toRawFilePath d) }
modlocation l@(Local {}) = l { worktree = Just d }
modlocation _ = giveup "withWorkTree of non-local git repo"
{- Runs an action with the git index file and HEAD, and a few other
@ -83,13 +83,13 @@ withWorkTree d a = withAltRepo
-
- Needs git 2.2.0 or newer.
-}
withWorkTreeRelated :: FilePath -> Annex a -> Annex a
withWorkTreeRelated :: OsPath -> Annex a -> Annex a
withWorkTreeRelated d a = withAltRepo modrepo unmodrepo (const a)
where
modrepo g = liftIO $ do
g' <- addGitEnv g "GIT_COMMON_DIR" . fromRawFilePath
g' <- addGitEnv g "GIT_COMMON_DIR" . fromOsPath
=<< absPath (localGitDir g)
g'' <- addGitEnv g' "GIT_DIR" d
g'' <- addGitEnv g' "GIT_DIR" (fromOsPath d)
return (g'' { gitEnvOverridesGitDir = True }, ())
unmodrepo g g' = g'
{ gitEnv = gitEnv g

View file

@ -28,7 +28,7 @@ hashObjectStop = maybe noop stop =<< Annex.getState Annex.hashobjecthandle
liftIO $ freeResourcePool p Git.HashObject.hashObjectStop
Annex.changeState $ \s -> s { Annex.hashobjecthandle = Nothing }
hashFile :: RawFilePath -> Annex Sha
hashFile :: OsPath -> Annex Sha
hashFile f = withHashObjectHandle $ \h ->
liftIO $ Git.HashObject.hashFile h f

View file

@ -30,22 +30,22 @@ compareInodeCachesWith = ifM inodesChanged ( return Weakly, return Strongly )
{- Checks if one of the provided old InodeCache matches the current
- version of a file. -}
sameInodeCache :: RawFilePath -> [InodeCache] -> Annex Bool
sameInodeCache :: OsPath -> [InodeCache] -> Annex Bool
sameInodeCache file [] = do
fastDebug "Annex.InodeSentinal" $
fromRawFilePath file ++ " inode cache empty"
fromOsPath file ++ " inode cache empty"
return False
sameInodeCache file old = go =<< withTSDelta (liftIO . genInodeCache file)
where
go Nothing = do
fastDebug "Annex.InodeSentinal" $
fromRawFilePath file ++ " not present, cannot compare with inode cache"
fromOsPath file ++ " not present, cannot compare with inode cache"
return False
go (Just curr) = ifM (elemInodeCaches curr old)
( return True
, do
fastDebug "Annex.InodeSentinal" $
fromRawFilePath file ++ " (" ++ show curr ++ ") does not match inode cache (" ++ show old ++ ")"
fromOsPath file ++ " (" ++ show curr ++ ") does not match inode cache (" ++ show old ++ ")"
return False
)
@ -99,7 +99,7 @@ createInodeSentinalFile evenwithobjects =
alreadyexists = liftIO. sentinalFileExists =<< annexSentinalFile
hasobjects
| evenwithobjects = pure False
| otherwise = liftIO . doesDirectoryExist . fromRawFilePath
| otherwise = liftIO . doesDirectoryExist
=<< fromRepo gitAnnexObjectDir
annexSentinalFile :: Annex SentinalFile

View file

@ -26,13 +26,12 @@ import Annex.LockFile
import Annex.BranchState
import Types.BranchState
import Utility.Directory.Stream
import qualified Utility.RawFilePath as R
import qualified Utility.FileIO as F
import qualified Utility.OsString as OS
import qualified Data.Set as S
import qualified Data.ByteString.Lazy as L
import qualified Data.ByteString as B
import qualified System.FilePath.ByteString as P
import Data.ByteString.Builder
import Data.Char
@ -83,7 +82,7 @@ privateUUIDsKnown' = not . S.null . annexPrivateRepos . Annex.gitconfig
- interrupted write truncating information that was earlier read from the
- file, and so losing data.
-}
setJournalFile :: Journalable content => JournalLocked -> RegardingUUID -> RawFilePath -> content -> Annex ()
setJournalFile :: Journalable content => JournalLocked -> RegardingUUID -> OsPath -> content -> Annex ()
setJournalFile _jl ru file content = withOtherTmp $ \tmp -> do
st <- getState
jd <- fromRepo =<< ifM (regardingPrivateUUID ru)
@ -92,10 +91,10 @@ setJournalFile _jl ru file content = withOtherTmp $ \tmp -> do
)
-- journal file is written atomically
let jfile = journalFile file
let tmpfile = tmp P.</> jfile
liftIO $ F.withFile (toOsPath tmpfile) WriteMode $ \h ->
let tmpfile = tmp </> jfile
liftIO $ F.withFile tmpfile WriteMode $ \h ->
writeJournalHandle h content
let dest = jd P.</> jfile
let dest = jd </> jfile
let mv = do
liftIO $ moveFile tmpfile dest
setAnnexFilePerm dest
@ -103,20 +102,20 @@ setJournalFile _jl ru file content = withOtherTmp $ \tmp -> do
-- exists
mv `catchIO` (const (createAnnexDirectory jd >> mv))
newtype AppendableJournalFile = AppendableJournalFile (RawFilePath, RawFilePath)
newtype AppendableJournalFile = AppendableJournalFile (OsPath, OsPath)
{- If the journal file does not exist, it cannot be appended to, because
- that would overwrite whatever content the file has in the git-annex
- branch. -}
checkCanAppendJournalFile :: JournalLocked -> RegardingUUID -> RawFilePath -> Annex (Maybe AppendableJournalFile)
checkCanAppendJournalFile :: JournalLocked -> RegardingUUID -> OsPath -> Annex (Maybe AppendableJournalFile)
checkCanAppendJournalFile _jl ru file = do
st <- getState
jd <- fromRepo =<< ifM (regardingPrivateUUID ru)
( return (gitAnnexPrivateJournalDir st)
, return (gitAnnexJournalDir st)
)
let jfile = jd P.</> journalFile file
ifM (liftIO $ R.doesPathExist jfile)
let jfile = jd </> journalFile file
ifM (liftIO $ doesFileExist jfile)
( return (Just (AppendableJournalFile (jd, jfile)))
, return Nothing
)
@ -134,7 +133,7 @@ checkCanAppendJournalFile _jl ru file = do
-}
appendJournalFile :: Journalable content => JournalLocked -> AppendableJournalFile -> content -> Annex ()
appendJournalFile _jl (AppendableJournalFile (jd, jfile)) content = do
let write = liftIO $ F.withFile (toOsPath jfile) ReadWriteMode $ \h -> do
let write = liftIO $ F.withFile jfile ReadWriteMode $ \h -> do
sz <- hFileSize h
when (sz /= 0) $ do
hSeek h SeekFromEnd (-1)
@ -161,7 +160,7 @@ data JournalledContent
-- information that were made after that journal file was written.
{- Gets any journalled content for a file in the branch. -}
getJournalFile :: JournalLocked -> GetPrivate -> RawFilePath -> Annex JournalledContent
getJournalFile :: JournalLocked -> GetPrivate -> OsPath -> Annex JournalledContent
getJournalFile _jl = getJournalFileStale
data GetPrivate = GetPrivate Bool
@ -179,7 +178,7 @@ data GetPrivate = GetPrivate Bool
- (or is in progress when this is called), if the file content does not end
- with a newline, it is truncated back to the previous newline.
-}
getJournalFileStale :: GetPrivate -> RawFilePath -> Annex JournalledContent
getJournalFileStale :: GetPrivate -> OsPath -> Annex JournalledContent
getJournalFileStale (GetPrivate getprivate) file = do
st <- Annex.getState id
let repo = Annex.repo st
@ -205,7 +204,7 @@ getJournalFileStale (GetPrivate getprivate) file = do
jfile = journalFile file
getfrom d = catchMaybeIO $
discardIncompleteAppend . L.fromStrict
<$> F.readFile' (toOsPath (d P.</> jfile))
<$> F.readFile' (d </> jfile)
-- Note that this forces read of the whole lazy bytestring.
discardIncompleteAppend :: L.ByteString -> L.ByteString
@ -224,18 +223,18 @@ discardIncompleteAppend v
{- List of existing journal files in a journal directory, but without locking,
- may miss new ones just being added, or may have false positives if the
- journal is staged as it is run. -}
getJournalledFilesStale :: (BranchState -> Git.Repo -> RawFilePath) -> Annex [RawFilePath]
getJournalledFilesStale :: (BranchState -> Git.Repo -> OsPath) -> Annex [OsPath]
getJournalledFilesStale getjournaldir = do
bs <- getState
repo <- Annex.gitRepo
let d = getjournaldir bs repo
fs <- liftIO $ catchDefaultIO [] $
getDirectoryContents (fromRawFilePath d)
return $ filter (`notElem` [".", ".."]) $
map (fileJournal . toRawFilePath) fs
getDirectoryContents d
return $ filter (`notElem` dirCruft) $
map fileJournal fs
{- Directory handle open on a journal directory. -}
withJournalHandle :: (BranchState -> Git.Repo -> RawFilePath) -> (DirectoryHandle -> IO a) -> Annex a
withJournalHandle :: (BranchState -> Git.Repo -> OsPath) -> (DirectoryHandle -> IO a) -> Annex a
withJournalHandle getjournaldir a = do
bs <- getState
repo <- Annex.gitRepo
@ -244,15 +243,15 @@ withJournalHandle getjournaldir a = do
where
-- avoid overhead of creating the journal directory when it already
-- exists
opendir d = liftIO (openDirectory d)
opendir d = liftIO (openDirectory (fromOsPath d))
`catchIO` (const (createAnnexDirectory d >> opendir d))
{- Checks if there are changes in the journal. -}
journalDirty :: (BranchState -> Git.Repo -> RawFilePath) -> Annex Bool
journalDirty :: (BranchState -> Git.Repo -> OsPath) -> Annex Bool
journalDirty getjournaldir = do
st <- getState
d <- fromRepo (getjournaldir st)
liftIO $ isDirectoryPopulated d
liftIO $ isDirectoryPopulated (fromOsPath d)
{- Produces a filename to use in the journal for a file on the branch.
- The filename does not include the journal directory.
@ -261,33 +260,33 @@ journalDirty getjournaldir = do
- used in the branch is not necessary, and all the files are put directly
- in the journal directory.
-}
journalFile :: RawFilePath -> RawFilePath
journalFile file = B.concatMap mangle file
journalFile :: OsPath -> OsPath
journalFile file = OS.concat $ map mangle $ OS.unpack file
where
mangle c
| P.isPathSeparator c = B.singleton underscore
| c == underscore = B.pack [underscore, underscore]
| otherwise = B.singleton c
underscore = fromIntegral (ord '_')
| isPathSeparator c = OS.singleton underscore
| c == underscore = OS.pack [underscore, underscore]
| otherwise = OS.singleton c
underscore = unsafeFromChar '_'
{- Converts a journal file (relative to the journal dir) back to the
- filename on the branch. -}
fileJournal :: RawFilePath -> RawFilePath
fileJournal :: OsPath -> OsPath
fileJournal = go
where
go b =
let (h, t) = B.break (== underscore) b
in h <> case B.uncons t of
let (h, t) = OS.break (== underscore) b
in h <> case OS.uncons t of
Nothing -> t
Just (_u, t') -> case B.uncons t' of
Just (_u, t') -> case OS.uncons t' of
Nothing -> t'
Just (w, t'')
| w == underscore ->
B.cons underscore (go t'')
OS.cons underscore (go t'')
| otherwise ->
B.cons P.pathSeparator (go t')
OS.cons pathSeparator (go t')
underscore = fromIntegral (ord '_')
underscore = unsafeFromChar '_'
{- Sentinal value, only produced by lockJournal; required
- as a parameter by things that need to ensure the journal is

View file

@ -39,11 +39,11 @@ import Utility.CopyFile
import qualified Database.Keys.Handle
import qualified Utility.RawFilePath as R
import qualified Utility.FileIO as F
import qualified Utility.OsString as OS
import qualified Data.ByteString as S
import qualified Data.ByteString.Char8 as S8
import qualified Data.ByteString.Lazy as L
import qualified System.FilePath.ByteString as P
#ifndef mingw32_HOST_OS
#if MIN_VERSION_unix(2,8,0)
#else
@ -103,7 +103,7 @@ getAnnexLinkTarget' file coresymlinks = if coresymlinks
then mempty
else s
makeAnnexLink :: LinkTarget -> RawFilePath -> Annex ()
makeAnnexLink :: LinkTarget -> OsPath -> Annex ()
makeAnnexLink = makeGitLink
{- Creates a link on disk.
@ -113,26 +113,31 @@ makeAnnexLink = makeGitLink
- it's staged as such, so use addAnnexLink when adding a new file or
- modified link to git.
-}
makeGitLink :: LinkTarget -> RawFilePath -> Annex ()
makeGitLink :: LinkTarget -> OsPath -> Annex ()
makeGitLink linktarget file = ifM (coreSymlinks <$> Annex.getGitConfig)
( liftIO $ do
void $ tryIO $ R.removeLink file
R.createSymbolicLink linktarget file
, liftIO $ F.writeFile' (toOsPath file) linktarget
void $ tryIO $ R.removeLink file'
R.createSymbolicLink linktarget file'
, liftIO $ F.writeFile' file linktarget
)
where
file' = fromOsPath file
{- Creates a link on disk, and additionally stages it in git. -}
addAnnexLink :: LinkTarget -> RawFilePath -> Annex ()
addAnnexLink :: LinkTarget -> OsPath -> Annex ()
addAnnexLink linktarget file = do
makeAnnexLink linktarget file
stageSymlink file =<< hashSymlink linktarget
{- Injects a symlink target into git, returning its Sha. -}
hashSymlink :: LinkTarget -> Annex Sha
hashSymlink = hashBlob . toInternalGitPath
hashSymlink = go . fromOsPath . toInternalGitPath . toOsPath
where
go :: LinkTarget -> Annex Sha
go = hashBlob
{- Stages a symlink to an annexed object, using a Sha of its target. -}
stageSymlink :: RawFilePath -> Sha -> Annex ()
stageSymlink :: OsPath -> Sha -> Annex ()
stageSymlink file sha =
Annex.Queue.addUpdateIndex =<<
inRepo (Git.UpdateIndex.stageSymlink file sha)
@ -142,7 +147,7 @@ hashPointerFile :: Key -> Annex Sha
hashPointerFile key = hashBlob $ formatPointer key
{- Stages a pointer file, using a Sha of its content -}
stagePointerFile :: RawFilePath -> Maybe FileMode -> Sha -> Annex ()
stagePointerFile :: OsPath -> Maybe FileMode -> Sha -> Annex ()
stagePointerFile file mode sha =
Annex.Queue.addUpdateIndex =<<
inRepo (Git.UpdateIndex.stageFile sha treeitemtype file)
@ -151,10 +156,10 @@ stagePointerFile file mode sha =
| maybe False isExecutable mode = TreeExecutable
| otherwise = TreeFile
writePointerFile :: RawFilePath -> Key -> Maybe FileMode -> IO ()
writePointerFile :: OsPath -> Key -> Maybe FileMode -> IO ()
writePointerFile file k mode = do
F.writeFile' (toOsPath file) (formatPointer k)
maybe noop (R.setFileMode file) mode
F.writeFile' file (formatPointer k)
maybe noop (R.setFileMode (fromOsPath file)) mode
newtype Restage = Restage Bool
@ -187,7 +192,7 @@ newtype Restage = Restage Bool
- if the process is interrupted before the git queue is fulushed, the
- restage will be taken care of later.
-}
restagePointerFile :: Restage -> RawFilePath -> InodeCache -> Annex ()
restagePointerFile :: Restage -> OsPath -> InodeCache -> Annex ()
restagePointerFile (Restage False) f orig = do
flip writeRestageLog orig =<< inRepo (toTopFilePath f)
toplevelWarning True $ unableToRestage $ Just f
@ -225,14 +230,14 @@ restagePointerFiles r = unlessM (Annex.getState Annex.insmudgecleanfilter) $ do
=<< Annex.getRead Annex.keysdbhandle
realindex <- liftIO $ Git.Index.currentIndexFile r
numsz@(numfiles, _) <- calcnumsz
let lock = fromRawFilePath (Git.Index.indexFileLock realindex)
let lock = Git.Index.indexFileLock realindex
lockindex = liftIO $ catchMaybeIO $ Git.LockFile.openLock' lock
unlockindex = liftIO . maybe noop Git.LockFile.closeLock
showwarning = warning $ unableToRestage Nothing
go Nothing = showwarning
go (Just _) = withtmpdir $ \tmpdir -> do
tsd <- getTSDelta
let tmpindex = toRawFilePath (tmpdir </> "index")
let tmpindex = tmpdir </> literalOsPath "index"
let replaceindex = liftIO $ moveFile tmpindex realindex
let updatetmpindex = do
r' <- liftIO $ Git.Env.addGitEnv r Git.Index.indexEnv
@ -247,8 +252,8 @@ restagePointerFiles r = unlessM (Annex.getState Annex.insmudgecleanfilter) $ do
bracket lockindex unlockindex go
where
withtmpdir = withTmpDirIn
(fromRawFilePath $ Git.localGitDir r)
(toOsPath "annexindex")
(Git.localGitDir r)
(literalOsPath "annexindex")
isunmodified tsd f orig =
genInodeCache f tsd >>= return . \case
@ -325,7 +330,7 @@ restagePointerFiles r = unlessM (Annex.getState Annex.insmudgecleanfilter) $ do
ck = ConfigKey "filter.annex.process"
ckd = ConfigKey "filter.annex.process-temp-disabled"
unableToRestage :: Maybe RawFilePath -> StringContainingQuotedPath
unableToRestage :: Maybe OsPath -> StringContainingQuotedPath
unableToRestage mf =
"git status will show " <> maybe "some files" QuotedPath mf
<> " to be modified, since content availability has changed"
@ -361,7 +366,8 @@ parseLinkTargetOrPointer' b =
Nothing -> Right Nothing
where
parsekey l
| isLinkToAnnex l = fileKey $ snd $ S8.breakEnd pathsep l
| isLinkToAnnex l = fileKey $ toOsPath $
snd $ S8.breakEnd pathsep l
| otherwise = Nothing
restvalid r
@ -400,9 +406,9 @@ parseLinkTargetOrPointerLazy' b =
in parseLinkTargetOrPointer' (L.toStrict b')
formatPointer :: Key -> S.ByteString
formatPointer k = prefix <> keyFile k <> nl
formatPointer k = fromOsPath prefix <> fromOsPath (keyFile k) <> nl
where
prefix = toInternalGitPath $ P.pathSeparator `S.cons` objectDir
prefix = toInternalGitPath $ pathSeparator `OS.cons` objectDir
nl = S8.singleton '\n'
{- Maximum size of a file that could be a pointer to a key.
@ -434,21 +440,21 @@ maxSymlinkSz = 8192
- an object that looks like a pointer file. Or that a non-annex
- symlink does. Avoids a false positive in those cases.
- -}
isPointerFile :: RawFilePath -> IO (Maybe Key)
isPointerFile :: OsPath -> IO (Maybe Key)
isPointerFile f = catchDefaultIO Nothing $
#if defined(mingw32_HOST_OS)
F.withFile (toOsPath f) ReadMode readhandle
F.withFile f ReadMode readhandle
#else
#if MIN_VERSION_unix(2,8,0)
let open = do
fd <- openFd (fromRawFilePath f) ReadOnly
fd <- openFd (fromOsPath f) ReadOnly
(defaultFileFlags { nofollow = True })
fdToHandle fd
in bracket open hClose readhandle
#else
ifM (isSymbolicLink <$> R.getSymbolicLinkStatus f)
ifM (isSymbolicLink <$> R.getSymbolicLinkStatus (toRawFilePath f))
( return Nothing
, F.withFile (toOsPath f) ReadMode readhandle
, F.withFile f ReadMode readhandle
)
#endif
#endif
@ -463,13 +469,13 @@ isPointerFile f = catchDefaultIO Nothing $
- than .git to be used.
-}
isLinkToAnnex :: S.ByteString -> Bool
isLinkToAnnex s = p `S.isInfixOf` s
isLinkToAnnex s = p `OS.isInfixOf` (toOsPath s)
#ifdef mingw32_HOST_OS
-- '/' is used inside pointer files on Windows, not the native '\'
|| p' `S.isInfixOf` s
|| p' `OS.isInfixOf` s
#endif
where
p = P.pathSeparator `S.cons` objectDir
p = pathSeparator `OS.cons` objectDir
#ifdef mingw32_HOST_OS
p' = toInternalGitPath p
#endif

View file

@ -387,7 +387,7 @@ gitAnnexFsckDir u r mc = case annexDbDir =<< mc of
Nothing -> go (gitAnnexDir r)
Just d -> go d
where
go d = d </> literalOsPath "fsck" </> uuidPath u
go d = d </> literalOsPath "fsck" </> fromUUID u
{- used to store information about incremental fscks. -}
gitAnnexFsckState :: UUID -> Git.Repo -> OsPath
@ -408,7 +408,7 @@ gitAnnexFsckDbLock u r c = gitAnnexFsckDir u r (Just c) </> literalOsPath "fsck.
{- .git/annex/fsckresults/uuid is used to store results of git fscks -}
gitAnnexFsckResultsLog :: UUID -> Git.Repo -> OsPath
gitAnnexFsckResultsLog u r =
gitAnnexDir r </> literalOsPath "fsckresults" </> uuidPath u
gitAnnexDir r </> literalOsPath "fsckresults" </> fromUUID u
{- .git/annex/upgrade.log is used to record repository version upgrades. -}
gitAnnexUpgradeLog :: Git.Repo -> OsPath
@ -476,7 +476,7 @@ gitAnnexExportDir r c = fromMaybe (gitAnnexDir r) (annexDbDir c)
{- Directory containing database used to record export info. -}
gitAnnexExportDbDir :: UUID -> Git.Repo -> GitConfig -> OsPath
gitAnnexExportDbDir u r c =
gitAnnexExportDir r c </> uuidPath u </> literalOsPath "exportdb"
gitAnnexExportDir r c </> fromUUID u </> literalOsPath "exportdb"
{- Lock file for export database. -}
gitAnnexExportLock :: UUID -> Git.Repo -> GitConfig -> OsPath
@ -491,7 +491,7 @@ gitAnnexExportUpdateLock u r c = gitAnnexExportDbDir u r c <> literalOsPath ".up
- remote, but were excluded by its preferred content settings. -}
gitAnnexExportExcludeLog :: UUID -> Git.Repo -> OsPath
gitAnnexExportExcludeLog u r = gitAnnexDir r
</> literalOsPath "export.ex" </> uuidPath u
</> literalOsPath "export.ex" </> fromUUID u
{- Directory containing database used to record remote content ids.
-
@ -516,7 +516,7 @@ gitAnnexImportDir r c =
{- File containing state about the last import done from a remote. -}
gitAnnexImportLog :: UUID -> Git.Repo -> GitConfig -> OsPath
gitAnnexImportLog u r c =
gitAnnexImportDir r c </> uuidPath u </> literalOsPath "log"
gitAnnexImportDir r c </> fromUUID u </> literalOsPath "log"
{- Directory containing database used by importfeed. -}
gitAnnexImportFeedDbDir :: Git.Repo -> GitConfig -> OsPath

View file

@ -7,20 +7,17 @@
module Annex.Multicast where
import Common
import Annex.Path
import Utility.Env
import Utility.PartialPrelude
import System.Process
import System.IO
import GHC.IO.Handle.FD
import Control.Applicative
import Prelude
multicastReceiveEnv :: String
multicastReceiveEnv = "GIT_ANNEX_MULTICAST_RECEIVE"
multicastCallbackEnv :: IO (FilePath, [(String, String)], Handle)
multicastCallbackEnv :: IO (OsPath, [(String, String)], Handle)
multicastCallbackEnv = do
gitannex <- programPath
-- This will even work on Windows

View file

@ -40,18 +40,18 @@ import qualified Data.Map as M
- git-annex-shell or git-remote-annex, this finds a git-annex program
- instead.
-}
programPath :: IO FilePath
programPath :: IO OsPath
programPath = go =<< getEnv "GIT_ANNEX_DIR"
where
go (Just dir) = do
name <- reqgitannex <$> getProgName
return (dir </> name)
return (toOsPath dir </> toOsPath name)
go Nothing = do
name <- getProgName
exe <- if isgitannex name
then getExecutablePath
else pure "git-annex"
p <- if isAbsolute exe
p <- if isAbsolute (toOsPath exe)
then return exe
else fromMaybe exe <$> readProgramFile
maybe cannotFindProgram return =<< searchPath p
@ -65,12 +65,12 @@ programPath = go =<< getEnv "GIT_ANNEX_DIR"
readProgramFile :: IO (Maybe FilePath)
readProgramFile = catchDefaultIO Nothing $ do
programfile <- programFile
headMaybe . lines <$> readFile programfile
headMaybe . lines <$> readFile (fromOsPath programfile)
cannotFindProgram :: IO a
cannotFindProgram = do
f <- programFile
giveup $ "cannot find git-annex program in PATH or in " ++ f
giveup $ "cannot find git-annex program in PATH or in " ++ fromOsPath f
{- Runs a git-annex child process.
-
@ -88,7 +88,7 @@ gitAnnexChildProcess
gitAnnexChildProcess subcmd ps f a = do
cmd <- liftIO programPath
ps' <- gitAnnexChildProcessParams subcmd ps
pidLockChildProcess cmd ps' f a
pidLockChildProcess (fromOsPath cmd) ps' f a
{- Parameters to pass to a git-annex child process to run a subcommand
- with some parameters.

View file

@ -31,7 +31,7 @@ addCommand commonparams command params files = do
store =<< flushWhenFull =<<
(Git.Queue.addCommand commonparams command params files q =<< gitRepo)
addFlushAction :: Git.Queue.FlushActionRunner Annex -> [RawFilePath] -> Annex ()
addFlushAction :: Git.Queue.FlushActionRunner Annex -> [OsPath] -> Annex ()
addFlushAction runner files = do
q <- get
store =<< flushWhenFull =<<

View file

@ -21,8 +21,6 @@ import Utility.Tmp
import Utility.Tmp.Dir
import Utility.Directory.Create
import qualified System.FilePath.ByteString as P
{- replaceFile on a file located inside the gitAnnexDir. -}
replaceGitAnnexDirFile :: OsPath -> (OsPath -> Annex a) -> Annex a
replaceGitAnnexDirFile = replaceFile createAnnexDirectory

View file

@ -23,8 +23,6 @@ import Utility.PID
import Control.Concurrent
import Text.Read
import Data.Time.Clock.POSIX
import qualified Utility.RawFilePath as R
import qualified System.FilePath.ByteString as P
{- Called when a location log change is journalled, so the LiveUpdate
- is done. This is called with the journal still locked, so no concurrent
@ -146,12 +144,11 @@ checkStaleSizeChanges :: RepoSizeHandle -> Annex ()
checkStaleSizeChanges h@(RepoSizeHandle (Just _) livev) = do
livedir <- calcRepo' gitAnnexRepoSizeLiveDir
pid <- liftIO getPID
let pidlockfile = show pid
let pidlockfile = toOsPath (show pid)
now <- liftIO getPOSIXTime
liftIO (takeMVar livev) >>= \case
Nothing -> do
lck <- takeExclusiveLock $
livedir P.</> toRawFilePath pidlockfile
lck <- takeExclusiveLock $ livedir </> pidlockfile
go livedir lck pidlockfile now
Just v@(lck, lastcheck)
| now >= lastcheck + 60 ->
@ -161,11 +158,11 @@ checkStaleSizeChanges h@(RepoSizeHandle (Just _) livev) = do
where
go livedir lck pidlockfile now = do
void $ tryNonAsync $ do
lockfiles <- liftIO $ filter (not . dirCruft . toRawFilePath)
<$> getDirectoryContents (fromRawFilePath livedir)
lockfiles <- liftIO $ filter (`notElem` dirCruft)
<$> getDirectoryContents livedir
stale <- forM lockfiles $ \lockfile ->
if (lockfile /= pidlockfile)
then case readMaybe lockfile of
then case readMaybe (fromOsPath lockfile) of
Nothing -> return Nothing
Just pid -> checkstale livedir lockfile pid
else return Nothing
@ -176,7 +173,7 @@ checkStaleSizeChanges h@(RepoSizeHandle (Just _) livev) = do
liftIO $ putMVar livev (Just (lck, now))
checkstale livedir lockfile pid =
let f = livedir P.</> toRawFilePath lockfile
let f = livedir </> lockfile
in trySharedLock f >>= \case
Nothing -> return Nothing
Just lck -> do
@ -184,6 +181,6 @@ checkStaleSizeChanges h@(RepoSizeHandle (Just _) livev) = do
( StaleSizeChanger (SizeChangeProcessId pid)
, do
dropLock lck
removeWhenExistsWith R.removeLink f
removeWhenExistsWith removeFile f
)
checkStaleSizeChanges (RepoSizeHandle Nothing _) = noop

View file

@ -40,14 +40,14 @@ import Types.Concurrency
import Git.Env
import Git.Ssh
import qualified Utility.RawFilePath as R
import qualified Utility.OsString as OS
import Annex.Perms
#ifndef mingw32_HOST_OS
import Annex.LockPool
#endif
import Control.Concurrent.STM
import qualified Data.ByteString as S
import qualified System.FilePath.ByteString as P
import qualified Data.ByteString.Short as SBS
{- Some ssh commands are fed stdin on a pipe and so should be allowed to
- consume it. But ssh commands that are not piped stdin should generally
@ -101,15 +101,15 @@ consumeStdinParams NoConsumeStdin = [Param "-n"]
{- Returns a filename to use for a ssh connection caching socket, and
- parameters to enable ssh connection caching. -}
sshCachingInfo :: (SshHost, Maybe Integer) -> Annex (Maybe RawFilePath, [CommandParam])
sshCachingInfo :: (SshHost, Maybe Integer) -> Annex (Maybe OsPath, [CommandParam])
sshCachingInfo (host, port) = go =<< sshCacheDir'
where
go (Right dir) =
liftIO (bestSocketPath $ dir P.</> hostport2socket host port) >>= return . \case
liftIO (bestSocketPath $ dir </> hostport2socket host port) >>= return . \case
Nothing -> (Nothing, [])
Just socketfile ->
(Just socketfile
, sshConnectionCachingParams (fromRawFilePath socketfile)
, sshConnectionCachingParams (fromOsPath socketfile)
)
-- No connection caching with concurrency is not a good
-- combination, so warn the user.
@ -137,10 +137,10 @@ sshCachingInfo (host, port) = go =<< sshCacheDir'
- file.
-
- If no path can be constructed that is a valid socket, returns Nothing. -}
bestSocketPath :: RawFilePath -> IO (Maybe RawFilePath)
bestSocketPath :: OsPath -> IO (Maybe OsPath)
bestSocketPath abssocketfile = do
relsocketfile <- liftIO $ relPathCwdToFile abssocketfile
let socketfile = if S.length abssocketfile <= S.length relsocketfile
let socketfile = if OS.length abssocketfile <= OS.length relsocketfile
then abssocketfile
else relsocketfile
return $ if valid_unix_socket_path socketfile sshgarbagelen
@ -167,10 +167,10 @@ sshSocketDirEnv = "GIT_ANNEX_SSH_SOCKET_DIR"
-
- The directory will be created if it does not exist.
-}
sshCacheDir :: Annex (Maybe RawFilePath)
sshCacheDir :: Annex (Maybe OsPath)
sshCacheDir = eitherToMaybe <$> sshCacheDir'
sshCacheDir' :: Annex (Either String RawFilePath)
sshCacheDir' :: Annex (Either String OsPath)
sshCacheDir' =
ifM (fromMaybe BuildInfo.sshconnectioncaching . annexSshCaching <$> Annex.getGitConfig)
( ifM crippledFileSystem
@ -191,9 +191,9 @@ sshCacheDir' =
gettmpdir = liftIO $ getEnv sshSocketDirEnv
usetmpdir tmpdir = do
let socktmp = tmpdir </> "ssh"
let socktmp = toOsPath tmpdir </> literalOsPath "ssh"
createDirectoryIfMissing True socktmp
return (toRawFilePath socktmp)
return socktmp
crippledfswarning = unwords
[ "This repository is on a crippled filesystem, so unix named"
@ -216,7 +216,7 @@ portParams (Just port) = [Param "-p", Param $ show port]
- Locks the socket lock file to prevent other git-annex processes from
- stopping the ssh multiplexer on this socket.
-}
prepSocket :: RawFilePath -> SshHost -> [CommandParam] -> Annex ()
prepSocket :: OsPath -> SshHost -> [CommandParam] -> Annex ()
prepSocket socketfile sshhost sshparams = do
-- There could be stale ssh connections hanging around
-- from a previous git-annex run that was interrupted.
@ -288,11 +288,11 @@ prepSocket socketfile sshhost sshparams = do
- and this check makes such files be skipped since the corresponding lock
- file won't exist.
-}
enumSocketFiles :: Annex [RawFilePath]
enumSocketFiles :: Annex [OsPath]
enumSocketFiles = liftIO . go =<< sshCacheDir
where
go Nothing = return []
go (Just dir) = filterM (R.doesPathExist . socket2lock)
go (Just dir) = filterM (R.doesPathExist . fromOsPath . socket2lock)
=<< filter (not . isLock)
<$> catchDefaultIO [] (dirContents dir)
@ -326,45 +326,45 @@ sshCleanup = mapM_ cleanup =<< enumSocketFiles
forceSshCleanup :: Annex ()
forceSshCleanup = mapM_ forceStopSsh =<< enumSocketFiles
forceStopSsh :: RawFilePath -> Annex ()
forceStopSsh :: OsPath -> Annex ()
forceStopSsh socketfile = withNullHandle $ \nullh -> do
let (dir, base) = splitFileName (fromRawFilePath socketfile)
let (dir, base) = splitFileName socketfile
let p = (proc "ssh" $ toCommand $
[ Param "-O", Param "stop" ] ++
sshConnectionCachingParams base ++
sshConnectionCachingParams (fromOsPath base) ++
[Param "localhost"])
{ cwd = Just dir
{ cwd = Just (fromOsPath dir)
-- "ssh -O stop" is noisy on stderr even with -q
, std_out = UseHandle nullh
, std_err = UseHandle nullh
}
void $ liftIO $ catchMaybeIO $ withCreateProcess p $ \_ _ _ pid ->
forceSuccessProcess p pid
liftIO $ removeWhenExistsWith R.removeLink socketfile
liftIO $ removeWhenExistsWith R.removeLink (fromOsPath socketfile)
{- This needs to be as short as possible, due to limitations on the length
- of the path to a socket file. At the same time, it needs to be unique
- for each host.
-}
hostport2socket :: SshHost -> Maybe Integer -> RawFilePath
hostport2socket :: SshHost -> Maybe Integer -> OsPath
hostport2socket host Nothing = hostport2socket' $ fromSshHost host
hostport2socket host (Just port) = hostport2socket' $
fromSshHost host ++ "!" ++ show port
hostport2socket' :: String -> RawFilePath
hostport2socket' :: String -> OsPath
hostport2socket' s
| length s > lengthofmd5s = toRawFilePath $ show $ md5 $ encodeBL s
| otherwise = toRawFilePath s
| length s > lengthofmd5s = toOsPath $ show $ md5 $ encodeBL s
| otherwise = toOsPath s
where
lengthofmd5s = 32
socket2lock :: RawFilePath -> RawFilePath
socket2lock :: OsPath -> OsPath
socket2lock socket = socket <> lockExt
isLock :: RawFilePath -> Bool
isLock f = lockExt `S.isSuffixOf` f
isLock :: OsPath -> Bool
isLock f = lockExt `OS.isSuffixOf` f
lockExt :: S.ByteString
lockExt = ".lock"
lockExt :: OsPath
lockExt = literalOsPath ".lock"
{- This is the size of the sun_path component of sockaddr_un, which
- is the limit to the total length of the filename of a unix socket.
@ -376,8 +376,9 @@ sizeof_sockaddr_un_sun_path = 100
{- Note that this looks at the true length of the path in bytes, as it will
- appear on disk. -}
valid_unix_socket_path :: RawFilePath -> Int -> Bool
valid_unix_socket_path f n = S.length f + n < sizeof_sockaddr_un_sun_path
valid_unix_socket_path :: OsPath -> Int -> Bool
valid_unix_socket_path f n =
SBS.length (fromOsPath f) + n < sizeof_sockaddr_un_sun_path
{- Parses the SSH port, and returns the other OpenSSH options. If
- several ports are found, the last one takes precedence. -}
@ -463,7 +464,7 @@ sshOptionsTo remote gc localr
liftIO $ do
localr' <- addGitEnv localr sshOptionsEnv
(toSshOptionsEnv sshopts)
addGitEnv localr' gitSshEnv command
addGitEnv localr' gitSshEnv (fromOsPath command)
runSshOptions :: [String] -> String -> IO ()
runSshOptions args s = do

View file

@ -43,7 +43,7 @@ data RunTransferrer = RunTransferrer String [CommandParam] BatchCommandMaker
mkRunTransferrer :: BatchCommandMaker -> Annex RunTransferrer
mkRunTransferrer batchmaker = RunTransferrer
<$> liftIO programPath
<$> liftIO (fromOsPath <$> programPath)
<*> gitAnnexChildProcessParams "transferrer" []
<*> pure batchmaker

View file

@ -96,7 +96,7 @@ genKeyExternal ebname hasext ks meterupdate =
withExternalState ebname hasext $ \st ->
handleRequest st req notavail go
where
req = GENKEY (fromRawFilePath (contentLocation ks))
req = GENKEY (fromOsPath (contentLocation ks))
notavail = giveup $ "Cannot generate a key, since " ++ externalBackendProgram ebname ++ " is not available."
go (GENKEY_SUCCESS pk) = Just $ Result <$> fromProtoKey pk hasext ks
@ -107,12 +107,12 @@ genKeyExternal ebname hasext ks meterupdate =
return $ GetNextMessage go
go _ = Nothing
verifyKeyContentExternal :: ExternalBackendName -> HasExt -> MeterUpdate -> Key -> RawFilePath -> Annex Bool
verifyKeyContentExternal :: ExternalBackendName -> HasExt -> MeterUpdate -> Key -> OsPath -> Annex Bool
verifyKeyContentExternal ebname hasext meterupdate k f =
withExternalState ebname hasext $ \st ->
handleRequest st req notavail go
where
req = VERIFYKEYCONTENT (toProtoKey k) (fromRawFilePath f)
req = VERIFYKEYCONTENT (toProtoKey k) (fromOsPath f)
-- This should not be able to happen, because CANVERIFY is checked
-- before this function is enable, and so the external program

View file

@ -108,6 +108,6 @@ hookExists h r = do
runHook :: (FilePath -> [CommandParam] -> IO a) -> Hook -> [CommandParam] -> Repo -> IO a
runHook runner h ps r = do
let f = fromOsPath $ hookFile h r
let f = hookFile h r
(c, cps) <- findShellCommand f
runner c (cps ++ ps)

View file

@ -53,11 +53,11 @@ data Action m
- those will be run before the FlushAction is. -}
| FlushAction
{ getFlushActionRunner :: FlushActionRunner m
, getFlushActionFiles :: [RawFilePath]
, getFlushActionFiles :: [OsPath]
}
{- The String must be unique for each flush action. -}
data FlushActionRunner m = FlushActionRunner String (Repo -> [RawFilePath] -> m ())
data FlushActionRunner m = FlushActionRunner String (Repo -> [OsPath] -> m ())
instance Eq (FlushActionRunner m) where
FlushActionRunner s1 _ == FlushActionRunner s2 _ = s1 == s2
@ -140,7 +140,7 @@ addCommand commonparams subcommand params files q repo =
{- Adds an flush action to the queue. This can co-exist with anything else
- that gets added to the queue, and when the queue is eventually flushed,
- it will be run after the other things in the queue. -}
addFlushAction :: MonadIO m => FlushActionRunner m -> [RawFilePath] -> Queue m -> Repo -> m (Queue m)
addFlushAction :: MonadIO m => FlushActionRunner m -> [OsPath] -> Queue m -> Repo -> m (Queue m)
addFlushAction runner files q repo =
updateQueue action (const False) (length files) q repo
where

View file

@ -15,7 +15,6 @@ import Annex.Common
import Git.Fsck
import Git.Types
import Logs.File
import qualified Utility.RawFilePath as R
import qualified Data.Set as S
@ -25,7 +24,7 @@ writeFsckResults u fsckresults = do
case serializeFsckResults fsckresults of
Just s -> store s logfile
Nothing -> liftIO $
removeWhenExistsWith R.removeLink logfile
removeWhenExistsWith removeFile logfile
where
store s logfile = writeLogFile logfile s
@ -46,7 +45,7 @@ readFsckResults :: UUID -> Annex FsckResults
readFsckResults u = do
logfile <- fromRepo $ gitAnnexFsckResultsLog u
liftIO $ catchDefaultIO (FsckFoundMissing S.empty False) $
deserializeFsckResults <$> readFile (fromRawFilePath logfile)
deserializeFsckResults <$> readFile (fromOsPath logfile)
deserializeFsckResults :: String -> FsckResults
deserializeFsckResults = deserialize . lines
@ -58,6 +57,6 @@ deserializeFsckResults = deserialize . lines
in if S.null s then FsckFailed else FsckFoundMissing s t
clearFsckResults :: UUID -> Annex ()
clearFsckResults = liftIO . removeWhenExistsWith R.removeLink
clearFsckResults = liftIO . removeWhenExistsWith removeFile
<=< fromRepo . gitAnnexFsckResultsLog

View file

@ -18,7 +18,6 @@ import qualified Utility.FileIO as F
import qualified Data.ByteString as S
import qualified Data.ByteString.Lazy as L
import qualified Utility.RawFilePath as R
-- | Log a file whose pointer needs to be restaged in git.
-- The content of the file may not be a pointer, if it is populated with
@ -52,13 +51,13 @@ streamRestageLog finalizer processor = do
lckf <- fromRepo gitAnnexRestageLock
withExclusiveLock lckf $ liftIO $
whenM (R.doesPathExist logf) $
ifM (R.doesPathExist oldf)
whenM (doesPathExist logf) $
ifM (doesPathExist oldf)
( do
h <- F.openFile (toOsPath oldf) AppendMode
hPutStr h =<< readFile (fromRawFilePath logf)
h <- F.openFile oldf AppendMode
hPutStr h =<< readFile (fromOsPath logf)
hClose h
liftIO $ removeWhenExistsWith R.removeLink logf
liftIO $ removeWhenExistsWith removeFile logf
, moveFile logf oldf
)
@ -67,7 +66,7 @@ streamRestageLog finalizer processor = do
Just (f, ic) -> processor f ic
Nothing -> noop
liftIO $ removeWhenExistsWith R.removeLink oldf
liftIO $ removeWhenExistsWith removeFile oldf
-- | Calculate over both the current restage log, and also over the old
-- one if it had started to be processed but did not get finished due
@ -86,11 +85,12 @@ calcRestageLog start update = do
Nothing -> v
formatRestageLog :: TopFilePath -> InodeCache -> S.ByteString
formatRestageLog f ic = encodeBS (showInodeCache ic) <> ":" <> getTopFilePath f
formatRestageLog f ic =
encodeBS (showInodeCache ic) <> ":" <> fromOsPath (getTopFilePath f)
parseRestageLog :: String -> Maybe (TopFilePath, InodeCache)
parseRestageLog l =
let (ics, f) = separate (== ':') l
in do
ic <- readInodeCache ics
return (asTopFilePath (toRawFilePath f), ic)
return (asTopFilePath (toOsPath f), ic)

View file

@ -21,7 +21,7 @@ smudgeLog k f = do
logf <- fromRepo gitAnnexSmudgeLog
lckf <- fromRepo gitAnnexSmudgeLock
appendLogFile logf lckf $ L.fromStrict $
serializeKey' k <> " " <> getTopFilePath f
serializeKey' k <> " " <> fromOsPath (getTopFilePath f)
-- | Streams all smudged files, and then empties the log at the end.
--
@ -43,4 +43,4 @@ streamSmudged a = do
let (ks, f) = separate (== ' ') l
in do
k <- deserializeKey ks
return (k, asTopFilePath (toRawFilePath f))
return (k, asTopFilePath (toOsPath f))

View file

@ -21,8 +21,8 @@ import Utility.PID
import Annex.LockPool
import Utility.TimeStamp
import Logs.File
import qualified Utility.RawFilePath as R
import qualified Utility.FileIO as F
import qualified Utility.OsString as OS
#ifndef mingw32_HOST_OS
import Annex.Perms
#endif
@ -30,9 +30,6 @@ import Annex.Perms
import Data.Time.Clock
import Data.Time.Clock.POSIX
import Control.Concurrent.STM
import qualified Data.ByteString as B
import qualified Data.ByteString.Char8 as B8
import qualified System.FilePath.ByteString as P
describeTransfer :: Git.Quote.QuotePath -> Transfer -> TransferInfo -> String
describeTransfer qp t info = unwords
@ -62,20 +59,21 @@ percentComplete t info =
- appropriate permissions, which should be run after locking the transfer
- lock file, but before using the callback, and a TVar that can be used to
- read the number of bytes processed so far. -}
mkProgressUpdater :: Transfer -> TransferInfo -> RawFilePath -> Annex (MeterUpdate, Annex (), TVar (Maybe BytesProcessed))
mkProgressUpdater :: Transfer -> TransferInfo -> OsPath -> Annex (MeterUpdate, Annex (), TVar (Maybe BytesProcessed))
mkProgressUpdater t info tfile = do
let createtfile = void $ tryNonAsync $ writeTransferInfoFile info tfile
let createtfile = void $ tryNonAsync $
writeTransferInfoFile info tfile
tvar <- liftIO $ newTVarIO Nothing
loggedtvar <- liftIO $ newTVarIO 0
return (liftIO . updater (fromRawFilePath tfile) tvar loggedtvar, createtfile, tvar)
return (liftIO . updater tvar loggedtvar, createtfile, tvar)
where
updater tfile' tvar loggedtvar new = do
updater tvar loggedtvar new = do
old <- atomically $ swapTVar tvar (Just new)
let oldbytes = maybe 0 fromBytesProcessed old
let newbytes = fromBytesProcessed new
when (newbytes - oldbytes >= mindelta) $ do
let info' = info { bytesComplete = Just newbytes }
_ <- tryIO $ updateTransferInfoFile info' tfile'
_ <- tryIO $ updateTransferInfoFile info' tfile
atomically $ writeTVar loggedtvar newbytes
{- The minimum change in bytesComplete that is worth
@ -109,9 +107,9 @@ checkTransfer :: Transfer -> Annex (Maybe TransferInfo)
checkTransfer t = debugLocks $ do
(tfile, lck, moldlck) <- fromRepo $ transferFileAndLockFile t
let deletestale = do
void $ tryIO $ R.removeLink tfile
void $ tryIO $ R.removeLink lck
maybe noop (void . tryIO . R.removeLink) moldlck
void $ tryIO $ removeFile tfile
void $ tryIO $ removeFile lck
maybe noop (void . tryIO . removeFile) moldlck
#ifndef mingw32_HOST_OS
v <- getLockStatus lck
v' <- case (moldlck, v) of
@ -198,7 +196,7 @@ clearFailedTransfers u = do
removeFailedTransfer :: Transfer -> Annex ()
removeFailedTransfer t = do
f <- fromRepo $ failedTransferFile t
liftIO $ void $ tryIO $ R.removeLink f
liftIO $ void $ tryIO $ removeFile f
recordFailedTransfer :: Transfer -> TransferInfo -> Annex ()
recordFailedTransfer t info = do
@ -225,46 +223,47 @@ recordFailedTransfer t info = do
- At some point in the future, when old git-annex processes are no longer
- a concern, this complication can be removed.
-}
transferFileAndLockFile :: Transfer -> Git.Repo -> (RawFilePath, RawFilePath, Maybe RawFilePath)
transferFileAndLockFile :: Transfer -> Git.Repo -> (OsPath, OsPath, Maybe OsPath)
transferFileAndLockFile (Transfer direction u kd) r =
case direction of
Upload -> (transferfile, uuidlockfile, Nothing)
Download -> (transferfile, nouuidlockfile, Just uuidlockfile)
where
td = transferDir direction r
fu = B8.filter (/= '/') (fromUUID u)
fu = OS.filter (/= unsafeFromChar '/') (fromUUID u)
kf = keyFile (mkKey (const kd))
lckkf = "lck." <> kf
transferfile = td P.</> fu P.</> kf
uuidlockfile = td P.</> fu P.</> lckkf
nouuidlockfile = td P.</> "lck" P.</> lckkf
lckkf = literalOsPath "lck." <> kf
transferfile = td </> fu </> kf
uuidlockfile = td </> fu </> lckkf
nouuidlockfile = td </> literalOsPath "lck" </> lckkf
{- The transfer information file to use to record a failed Transfer -}
failedTransferFile :: Transfer -> Git.Repo -> RawFilePath
failedTransferFile :: Transfer -> Git.Repo -> OsPath
failedTransferFile (Transfer direction u kd) r =
failedTransferDir u direction r
P.</> keyFile (mkKey (const kd))
</> keyFile (mkKey (const kd))
{- Parses a transfer information filename to a Transfer. -}
parseTransferFile :: RawFilePath -> Maybe Transfer
parseTransferFile :: OsPath -> Maybe Transfer
parseTransferFile file
| "lck." `B.isPrefixOf` P.takeFileName file = Nothing
| literalOsPath "lck." `OS.isPrefixOf` takeFileName file = Nothing
| otherwise = case drop (length bits - 3) bits of
[direction, u, key] -> Transfer
<$> parseDirection direction
<$> parseDirection (fromOsPath direction)
<*> pure (toUUID u)
<*> fmap (fromKey id) (fileKey key)
_ -> Nothing
where
bits = P.splitDirectories file
bits = splitDirectories file
writeTransferInfoFile :: TransferInfo -> RawFilePath -> Annex ()
writeTransferInfoFile :: TransferInfo -> OsPath -> Annex ()
writeTransferInfoFile info tfile = writeLogFile tfile $ writeTransferInfo info
-- The file keeps whatever permissions it has, so should be used only
-- after it's been created with the right perms by writeTransferInfoFile.
updateTransferInfoFile :: TransferInfo -> FilePath -> IO ()
updateTransferInfoFile info tfile = writeFile tfile $ writeTransferInfo info
updateTransferInfoFile :: TransferInfo -> OsPath -> IO ()
updateTransferInfoFile info tfile =
writeFile (fromOsPath tfile) $ writeTransferInfo info
{- File format is a header line containing the startedTime and any
- bytesComplete value. Followed by a newline and the associatedFile.
@ -283,12 +282,12 @@ writeTransferInfo info = unlines
#endif
-- comes last; arbitrary content
, let AssociatedFile afile = associatedFile info
in maybe "" fromRawFilePath afile
in maybe "" fromOsPath afile
]
readTransferInfoFile :: Maybe PID -> RawFilePath -> IO (Maybe TransferInfo)
readTransferInfoFile :: Maybe PID -> OsPath -> IO (Maybe TransferInfo)
readTransferInfoFile mpid tfile = catchDefaultIO Nothing $
readTransferInfo mpid . decodeBS <$> F.readFile' (toOsPath tfile)
readTransferInfo mpid . decodeBS <$> F.readFile' tfile
readTransferInfo :: Maybe PID -> String -> Maybe TransferInfo
readTransferInfo mpid s = TransferInfo
@ -301,9 +300,13 @@ readTransferInfo mpid s = TransferInfo
<*> pure Nothing
<*> pure Nothing
<*> bytes
<*> pure (AssociatedFile (if null filename then Nothing else Just (toRawFilePath filename)))
<*> pure af
<*> pure False
where
af = AssociatedFile $
if null filename
then Nothing
else Just (toOsPath filename)
#ifdef mingw32_HOST_OS
(firstliner, otherlines) = separate (== '\n') s
(secondliner, rest) = separate (== '\n') otherlines
@ -326,16 +329,18 @@ readTransferInfo mpid s = TransferInfo
else pure Nothing -- not failure
{- The directory holding transfer information files for a given Direction. -}
transferDir :: Direction -> Git.Repo -> RawFilePath
transferDir direction r = gitAnnexTransferDir r P.</> formatDirection direction
transferDir :: Direction -> Git.Repo -> OsPath
transferDir direction r =
gitAnnexTransferDir r
</> toOsPath (formatDirection direction)
{- The directory holding failed transfer information files for a given
- Direction and UUID -}
failedTransferDir :: UUID -> Direction -> Git.Repo -> RawFilePath
failedTransferDir :: UUID -> Direction -> Git.Repo -> OsPath
failedTransferDir u direction r = gitAnnexTransferDir r
P.</> "failed"
P.</> formatDirection direction
P.</> B8.filter (/= '/') (fromUUID u)
</> literalOsPath "failed"
</> toOsPath (formatDirection direction)
</> OS.filter (/= unsafeFromChar '/') (fromUUID u)
prop_read_write_transferinfo :: TransferInfo -> Bool
prop_read_write_transferinfo info

View file

@ -32,8 +32,8 @@ import qualified Data.ByteString.Lazy as L
import qualified Data.Attoparsec.ByteString.Lazy as A
import qualified Data.Attoparsec.ByteString.Char8 as A8
transitionsLog :: RawFilePath
transitionsLog = "transitions.log"
transitionsLog :: OsPath
transitionsLog = literalOsPath "transitions.log"
data Transition
= ForgetGitHistory
@ -102,7 +102,7 @@ knownTransitionList = nub . rights . map transition . S.elems
{- Typically ran with Annex.Branch.change, but we can't import Annex.Branch
- here since it depends on this module. -}
recordTransitions :: (RawFilePath -> (L.ByteString -> Builder) -> Annex ()) -> Transitions -> Annex ()
recordTransitions :: (OsPath -> (L.ByteString -> Builder) -> Annex ()) -> Transitions -> Annex ()
recordTransitions changer t = changer transitionsLog $
buildTransitions . S.union t . parseTransitionsStrictly "local"

View file

@ -52,7 +52,7 @@ instance APIVersion V0 where protocolVersion _ = P2P.ProtocolVersion 0
newtype B64Key = B64Key Key
deriving (Show)
newtype B64FilePath = B64FilePath RawFilePath
newtype B64FilePath = B64FilePath OsPath
deriving (Show)
associatedFileToB64FilePath :: AssociatedFile -> Maybe B64FilePath
@ -233,11 +233,11 @@ instance FromHttpApiData (B64UUID t) where
Left err -> Left err
instance ToHttpApiData B64FilePath where
toUrlPiece (B64FilePath f) = encodeB64Text f
toUrlPiece (B64FilePath f) = encodeB64Text (fromOsPath f)
instance FromHttpApiData B64FilePath where
parseUrlPiece t = case decodeB64Text t of
Right b -> Right (B64FilePath b)
Right b -> Right (B64FilePath (toOsPath b))
Left err -> Left err
instance ToHttpApiData Offset where

View file

@ -175,7 +175,7 @@ serveUnixSocket unixsocket serveconn = do
-- Connections have to authenticate to do anything,
-- so it's fine that other local users can connect to the
-- socket.
modifyFileMode (toRawFilePath unixsocket) $ addModes
modifyFileMode (toOsPath unixsocket) $ addModes
[groupReadMode, groupWriteMode, otherReadMode, otherWriteMode]
S.listen soc 2
forever $ do
@ -381,7 +381,7 @@ runRelayService conn runner service = case connRepo conn of
serviceproc repo = gitCreateProcess
[ Param cmd
, File (fromRawFilePath (repoPath repo))
, File (fromOsPath (repoPath repo))
] repo
serviceproc' repo = (serviceproc repo)
{ std_out = CreatePipe

View file

@ -10,6 +10,7 @@
{-# LANGUAGE DeriveFunctor, TemplateHaskell, FlexibleContexts #-}
{-# LANGUAGE TypeSynonymInstances, FlexibleInstances, RankNTypes #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE OverloadedStrings #-}
{-# OPTIONS_GHC -fno-warn-orphans #-}
module P2P.Protocol where
@ -25,8 +26,9 @@ import Utility.AuthToken
import Utility.Applicative
import Utility.PartialPrelude
import Utility.Metered
import Utility.FileSystemEncoding
import Utility.MonotonicClock
import Utility.OsPath
import qualified Utility.OsString as OS
import Git.FilePath
import Annex.ChangedRefs (ChangedRefs)
import Types.NumCopies
@ -37,8 +39,6 @@ import Control.Monad.Free.TH
import Control.Monad.Catch
import System.Exit (ExitCode(..))
import System.IO
import qualified System.FilePath.ByteString as P
import qualified Data.ByteString as B
import qualified Data.ByteString.Lazy as L
import qualified Data.Set as S
import Data.Char
@ -224,17 +224,19 @@ instance Proto.Serializable Service where
instance Proto.Serializable ProtoAssociatedFile where
serialize (ProtoAssociatedFile (AssociatedFile Nothing)) = ""
serialize (ProtoAssociatedFile (AssociatedFile (Just af))) =
decodeBS $ toInternalGitPath $ encodeBS $ concatMap esc $ fromRawFilePath af
fromOsPath $ toInternalGitPath $
OS.concat $ map esc $ OS.unpack af
where
esc '%' = "%%"
esc c
| isSpace c = "%"
| otherwise = [c]
esc c = case OS.toChar c of
'%' -> literalOsPath "%%"
c' | isSpace c' -> literalOsPath "%"
_ -> OS.singleton c
deserialize s = case fromInternalGitPath $ toRawFilePath $ deesc [] s of
deserialize s = case fromInternalGitPath $ toOsPath $ deesc [] s of
f
| B.null f -> Just $ ProtoAssociatedFile $ AssociatedFile Nothing
| P.isRelative f -> Just $ ProtoAssociatedFile $
| OS.null f -> Just $ ProtoAssociatedFile $
AssociatedFile Nothing
| isRelative f -> Just $ ProtoAssociatedFile $
AssociatedFile $ Just f
| otherwise -> Nothing
where

View file

@ -56,7 +56,7 @@ runHooks r starthook stophook a = do
firstrun lck
a
where
remoteid = uuidPath (uuid r)
remoteid = fromUUID (uuid r)
run Nothing = noop
run (Just command) = void $ liftIO $
boolSystem "sh" [Param "-c", Param command]

View file

@ -9,16 +9,16 @@
module Types.Direction where
import qualified Data.ByteString as B
import Data.ByteString.Short
data Direction = Upload | Download
deriving (Eq, Ord, Show, Read)
formatDirection :: Direction -> B.ByteString
formatDirection :: Direction -> ShortByteString
formatDirection Upload = "upload"
formatDirection Download = "download"
parseDirection :: B.ByteString -> Maybe Direction
parseDirection :: ShortByteString -> Maybe Direction
parseDirection "upload" = Just Upload
parseDirection "download" = Just Download
parseDirection _ = Nothing

View file

@ -7,7 +7,7 @@
module Types.Transitions where
import Utility.RawFilePath
import Utility.OsPath
import qualified Data.ByteString.Lazy as L
import Data.ByteString.Builder
@ -16,4 +16,4 @@ data FileTransition
= ChangeFile Builder
| PreserveFile
type TransitionCalculator = RawFilePath -> L.ByteString -> FileTransition
type TransitionCalculator = OsPath -> L.ByteString -> FileTransition

View file

@ -65,6 +65,14 @@ instance ToUUID SB.ShortByteString where
| SB.null b = NoUUID
| otherwise = UUID (SB.fromShort b)
-- OsPath is a ShortByteString internally, so this is the most
-- efficient conversion.
instance FromUUID OsPath where
fromUUID s = toOsPath (fromUUID s :: SB.ShortByteString)
instance ToUUID OsPath where
toUUID s = toUUID (fromOsPath s :: SB.ShortByteString)
instance FromUUID String where
fromUUID s = decodeBS (fromUUID s)
@ -102,9 +110,6 @@ buildUUID NoUUID = mempty
isUUID :: String -> Bool
isUUID = isJust . U.fromString
uuidPath :: UUID -> OsPath
uuidPath u = toOsPath (fromUUID u :: SB.ShortByteString)
-- A description of a UUID.
newtype UUIDDesc = UUIDDesc B.ByteString
deriving (Eq, Sem.Semigroup, Monoid, IsString)

View file

@ -28,7 +28,6 @@ import Config
import Annex.Perms
import Utility.InodeCache
import Annex.InodeSentinal
import qualified Utility.RawFilePath as R
import qualified Utility.FileIO as F
setIndirect :: Annex ()
@ -79,27 +78,27 @@ switchHEADBack = maybe noop switch =<< inRepo Git.Branch.currentUnsafe
Nothing -> inRepo $ Git.Branch.checkout orighead
{- Absolute FilePaths of Files in the tree that are associated with a key. -}
associatedFiles :: Key -> Annex [FilePath]
associatedFiles :: Key -> Annex [OsPath]
associatedFiles key = do
files <- associatedFilesRelative key
top <- fromRawFilePath <$> fromRepo Git.repoPath
top <- fromRepo Git.repoPath
return $ map (top </>) files
{- List of files in the tree that are associated with a key, relative to
- the top of the repo. -}
associatedFilesRelative :: Key -> Annex [FilePath]
associatedFilesRelative :: Key -> Annex [OsPath]
associatedFilesRelative key = do
mapping <- calcRepo (gitAnnexMapping key)
liftIO $ catchDefaultIO [] $ F.withFile (toOsPath mapping) ReadMode $ \h ->
liftIO $ catchDefaultIO [] $ F.withFile mapping ReadMode $ \h ->
-- Read strictly to ensure the file is closed promptly
lines <$> hGetContentsStrict h
map toOsPath . lines <$> hGetContentsStrict h
{- Removes the list of associated files. -}
removeAssociatedFiles :: Key -> Annex ()
removeAssociatedFiles key = do
mapping <- calcRepo $ gitAnnexMapping key
modifyContentDir mapping $
liftIO $ removeWhenExistsWith R.removeLink mapping
liftIO $ removeWhenExistsWith removeFile mapping
{- Checks if a file in the tree, associated with a key, has not been modified.
-
@ -107,10 +106,8 @@ removeAssociatedFiles key = do
- expensive checksum, this relies on a cache that contains the file's
- expected mtime and inode.
-}
goodContent :: Key -> FilePath -> Annex Bool
goodContent key file =
sameInodeCache (toRawFilePath file)
=<< recordedInodeCache key
goodContent :: Key -> OsPath -> Annex Bool
goodContent key file = sameInodeCache file =<< recordedInodeCache key
{- Gets the recorded inode cache for a key.
-
@ -120,26 +117,25 @@ recordedInodeCache :: Key -> Annex [InodeCache]
recordedInodeCache key = withInodeCacheFile key $ \f ->
liftIO $ catchDefaultIO [] $
mapMaybe (readInodeCache . decodeBS) . fileLines'
<$> F.readFile' (toOsPath f)
<$> F.readFile' f
{- Removes an inode cache. -}
removeInodeCache :: Key -> Annex ()
removeInodeCache key = withInodeCacheFile key $ \f ->
modifyContentDir f $
liftIO $ removeWhenExistsWith R.removeLink f
modifyContentDir f $ liftIO $ removeWhenExistsWith removeFile f
withInodeCacheFile :: Key -> (RawFilePath -> Annex a) -> Annex a
withInodeCacheFile :: Key -> (OsPath -> Annex a) -> Annex a
withInodeCacheFile key a = a =<< calcRepo (gitAnnexInodeCache key)
{- File that maps from a key to the file(s) in the git repository. -}
gitAnnexMapping :: Key -> Git.Repo -> GitConfig -> IO RawFilePath
gitAnnexMapping :: Key -> Git.Repo -> GitConfig -> IO OsPath
gitAnnexMapping key r c = do
loc <- gitAnnexLocation key r c
return $ loc <> ".map"
return $ loc <> literalOsPath ".map"
{- File that caches information about a key's content, used to determine
- if a file has changed. -}
gitAnnexInodeCache :: Key -> Git.Repo -> GitConfig -> IO RawFilePath
gitAnnexInodeCache :: Key -> Git.Repo -> GitConfig -> IO OsPath
gitAnnexInodeCache key r c = do
loc <- gitAnnexLocation key r c
return $ loc <> ".cache"
return $ loc <> literalOsPath ".cache"

View file

@ -44,12 +44,12 @@ copyMetaDataParams meta = map snd $ filter fst
{- The cp command is used, because I hate reinventing the wheel,
- and because this allows easy access to features like cp --reflink
- and preserving metadata. -}
copyFileExternal :: CopyMetaData -> FilePath -> FilePath -> IO Bool
copyFileExternal :: CopyMetaData -> OsPath -> OsPath -> IO Bool
copyFileExternal meta src dest = do
-- Delete any existing dest file because an unwritable file
-- would prevent cp from working.
void $ tryIO $ removeFile (toOsPath dest)
boolSystem "cp" $ params ++ [File src, File dest]
void $ tryIO $ removeFile dest
boolSystem "cp" $ params ++ [File (fromOsPath src), File (fromOsPath dest)]
where
params
| BuildInfo.cp_reflink_supported =
@ -87,10 +87,10 @@ copyCoW meta src dest
{- Create a hard link if the filesystem allows it, and fall back to copying
- the file. -}
createLinkOrCopy :: RawFilePath -> RawFilePath -> IO Bool
createLinkOrCopy :: OsPath -> OsPath -> IO Bool
createLinkOrCopy src dest = go `catchIO` const fallback
where
go = do
R.createLink src dest
R.createLink (fromOsPath src) (fromOsPath dest)
return True
fallback = copyFileExternal CopyAllMetaData (fromRawFilePath src) (fromRawFilePath dest)
fallback = copyFileExternal CopyAllMetaData src dest

View file

@ -13,6 +13,7 @@ module Utility.Shell (
findShellCommand,
) where
import Utility.OsPath
import Utility.SafeCommand
#ifdef mingw32_HOST_OS
import Utility.Path
@ -35,12 +36,12 @@ shebang = "#!" ++ shellPath
-- parse it for shebang.
--
-- This has no effect on Unix.
findShellCommand :: FilePath -> IO (FilePath, [CommandParam])
findShellCommand :: OsPath -> IO (FilePath, [CommandParam])
findShellCommand f = do
#ifndef mingw32_HOST_OS
defcmd
#else
l <- catchDefaultIO Nothing $ headMaybe . lines <$> readFile f
l <- catchDefaultIO Nothing $ headMaybe . lines <$> readFile (fromOsPath f)
case l of
Just ('#':'!':rest) -> case words rest of
[] -> defcmd
@ -55,4 +56,4 @@ findShellCommand f = do
_ -> defcmd
#endif
where
defcmd = return (f, [])
defcmd = return (fromOsPath f, [])