OsPath conversion
While some RawFilePath and FilePath remain, this converts most of
git-annex to using OsPath.
(When built without the OsPath build flag, is falls back to using
type OsPath = RawFilePath.)
The goals are
1) improved performance by using OsPath end-to-end when possible
2) potentially avoiding memory use problems caused by pinned strict
ByteString, since OsPath uses ShortByteString
3) eventually eliminating the filepath-bytestring dependency so I don't
need to keep maintaining that library
(this doesn't get all the way, but close)
4) generally improved type safety, since OsPath is a newtype, while
FilePath and RawFilePath are just type aliaes.
This is the result of a type checker driven process. I started by
converting from System.Directory to System.Directory.OsPath, and from
System.FilePath to System.OsPath. Then I fixed all the compile errors,
which took 3 weeks of work.
Unfortunately, there are several test suite failures at this point.
Also, it only has been built on linux, on windows and OSX there are
probably ifdefs whose code still needs to be converted.
Note that there is a parallel line of commits, starting with
05bdce328d
which is the incremental progress as I worked on this. It will be merged
with this commit. In some cases, commits in that line explain in more
details the reasons for some specific changes.
This commit is contained in:
parent
d46504e51e
commit
f1ba21d698
369 changed files with 4453 additions and 4046 deletions
4
Annex.hs
4
Annex.hs
|
@ -221,7 +221,7 @@ data AnnexState = AnnexState
|
|||
, existinghooks :: M.Map Git.Hook.Hook Bool
|
||||
, workers :: Maybe (TMVar (WorkerPool (AnnexState, AnnexRead)))
|
||||
, cachedcurrentbranch :: (Maybe (Maybe Git.Branch, Maybe Adjustment))
|
||||
, cachedgitenv :: Maybe (AltIndexFile, FilePath, [(String, String)])
|
||||
, cachedgitenv :: Maybe (AltIndexFile, OsPath, [(String, String)])
|
||||
, urloptions :: Maybe UrlOptions
|
||||
, insmudgecleanfilter :: Bool
|
||||
, getvectorclock :: IO CandidateVectorClock
|
||||
|
@ -465,7 +465,7 @@ withCurrentState a = do
|
|||
- because the git repo paths are stored relative.
|
||||
- Instead, use this.
|
||||
-}
|
||||
changeDirectory :: FilePath -> Annex ()
|
||||
changeDirectory :: OsPath -> Annex ()
|
||||
changeDirectory d = do
|
||||
r <- liftIO . Git.adjustPath absPath =<< gitRepo
|
||||
liftIO $ setCurrentDirectory d
|
||||
|
|
|
@ -161,7 +161,7 @@ adjustToPointer ti@(TreeItem f _m s) = catKey s >>= \case
|
|||
Database.Keys.addAssociatedFile k f
|
||||
exe <- catchDefaultIO False $
|
||||
(isExecutable . fileMode) <$>
|
||||
(liftIO . R.getFileStatus
|
||||
(liftIO . R.getFileStatus . fromOsPath
|
||||
=<< calcRepo (gitAnnexLocation k))
|
||||
let mode = fromTreeItemType $
|
||||
if exe then TreeExecutable else TreeFile
|
||||
|
@ -171,13 +171,13 @@ adjustToPointer ti@(TreeItem f _m s) = catKey s >>= \case
|
|||
adjustToSymlink :: TreeItem -> Annex (Maybe TreeItem)
|
||||
adjustToSymlink = adjustToSymlink' gitAnnexLink
|
||||
|
||||
adjustToSymlink' :: (RawFilePath -> Key -> Git.Repo -> GitConfig -> IO RawFilePath) -> TreeItem -> Annex (Maybe TreeItem)
|
||||
adjustToSymlink' :: (OsPath -> Key -> Git.Repo -> GitConfig -> IO OsPath) -> TreeItem -> Annex (Maybe TreeItem)
|
||||
adjustToSymlink' gitannexlink ti@(TreeItem f _m s) = catKey s >>= \case
|
||||
Just k -> do
|
||||
absf <- inRepo $ \r -> absPath $ fromTopFilePath f r
|
||||
linktarget <- calcRepo $ gitannexlink absf k
|
||||
Just . TreeItem f (fromTreeItemType TreeSymlink)
|
||||
<$> hashSymlink linktarget
|
||||
<$> hashSymlink (fromOsPath linktarget)
|
||||
Nothing -> return (Just ti)
|
||||
|
||||
-- This is a hidden branch ref, that's used as the basis for the AdjBranch,
|
||||
|
@ -269,7 +269,7 @@ updateAdjustedBranch adj (AdjBranch currbranch) origbranch
|
|||
-- origbranch.
|
||||
_ <- propigateAdjustedCommits' True origbranch adj commitlck
|
||||
|
||||
origheadfile <- inRepo $ F.readFile' . toOsPath . Git.Ref.headFile
|
||||
origheadfile <- inRepo $ F.readFile' . Git.Ref.headFile
|
||||
origheadsha <- inRepo (Git.Ref.sha currbranch)
|
||||
|
||||
b <- adjustBranch adj origbranch
|
||||
|
@ -282,7 +282,7 @@ updateAdjustedBranch adj (AdjBranch currbranch) origbranch
|
|||
Just s -> do
|
||||
inRepo $ \r -> do
|
||||
let newheadfile = fromRef' s
|
||||
F.writeFile' (toOsPath (Git.Ref.headFile r)) newheadfile
|
||||
F.writeFile' (Git.Ref.headFile r) newheadfile
|
||||
return (Just newheadfile)
|
||||
_ -> return Nothing
|
||||
|
||||
|
@ -296,9 +296,9 @@ updateAdjustedBranch adj (AdjBranch currbranch) origbranch
|
|||
unless ok $ case newheadfile of
|
||||
Nothing -> noop
|
||||
Just v -> preventCommits $ \_commitlck -> inRepo $ \r -> do
|
||||
v' <- F.readFile' (toOsPath (Git.Ref.headFile r))
|
||||
v' <- F.readFile' (Git.Ref.headFile r)
|
||||
when (v == v') $
|
||||
F.writeFile' (toOsPath (Git.Ref.headFile r)) origheadfile
|
||||
F.writeFile' (Git.Ref.headFile r) origheadfile
|
||||
|
||||
return ok
|
||||
| otherwise = preventCommits $ \commitlck -> do
|
||||
|
@ -451,7 +451,7 @@ preventCommits = bracket setup cleanup
|
|||
where
|
||||
setup = do
|
||||
lck <- fromRepo $ indexFileLock . indexFile
|
||||
liftIO $ Git.LockFile.openLock (fromRawFilePath lck)
|
||||
liftIO $ Git.LockFile.openLock lck
|
||||
cleanup = liftIO . Git.LockFile.closeLock
|
||||
|
||||
{- Commits a given adjusted tree, with the provided parent ref.
|
||||
|
@ -631,7 +631,7 @@ reverseAdjustedTree basis adj csha = do
|
|||
where
|
||||
m = M.fromList $ map (\i@(TreeItem f' _ _) -> (norm f', i)) $
|
||||
map diffTreeToTreeItem changes
|
||||
norm = normalise . fromRawFilePath . getTopFilePath
|
||||
norm = normalise . getTopFilePath
|
||||
|
||||
diffTreeToTreeItem :: Git.DiffTree.DiffTreeItem -> TreeItem
|
||||
diffTreeToTreeItem dti = TreeItem
|
||||
|
|
|
@ -29,11 +29,8 @@ import Annex.GitOverlay
|
|||
import Utility.Tmp.Dir
|
||||
import Utility.CopyFile
|
||||
import Utility.Directory.Create
|
||||
import qualified Utility.RawFilePath as R
|
||||
import qualified Utility.FileIO as F
|
||||
|
||||
import qualified System.FilePath.ByteString as P
|
||||
|
||||
canMergeToAdjustedBranch :: Branch -> (OrigBranch, Adjustment) -> Annex Bool
|
||||
canMergeToAdjustedBranch tomerge (origbranch, adj) =
|
||||
inRepo $ Git.Branch.changed currbranch tomerge
|
||||
|
@ -74,23 +71,24 @@ mergeToAdjustedBranch tomerge (origbranch, adj) mergeconfig canresolvemerge comm
|
|||
changestomerge (Just updatedorig) = withOtherTmp $ \othertmpdir -> do
|
||||
git_dir <- fromRepo Git.localGitDir
|
||||
tmpwt <- fromRepo gitAnnexMergeDir
|
||||
withTmpDirIn (fromRawFilePath othertmpdir) (toOsPath "git") $ \tmpgit -> withWorkTreeRelated tmpgit $
|
||||
withTmpDirIn othertmpdir (literalOsPath "git") $ \tmpgit -> withWorkTreeRelated tmpgit $
|
||||
withemptydir git_dir tmpwt $ withWorkTree tmpwt $ do
|
||||
let tmpgit' = toRawFilePath tmpgit
|
||||
liftIO $ writeFile (tmpgit </> "HEAD") (fromRef updatedorig)
|
||||
liftIO $ F.writeFile'
|
||||
(tmpgit </> literalOsPath "HEAD")
|
||||
(fromRef' updatedorig)
|
||||
-- Copy in refs and packed-refs, to work
|
||||
-- around bug in git 2.13.0, which
|
||||
-- causes it not to look in GIT_DIR for refs.
|
||||
refs <- liftIO $ emptyWhenDoesNotExist $
|
||||
dirContentsRecursive $
|
||||
git_dir P.</> "refs"
|
||||
let refs' = (git_dir P.</> "packed-refs") : refs
|
||||
git_dir </> literalOsPath "refs"
|
||||
let refs' = (git_dir </> literalOsPath "packed-refs") : refs
|
||||
liftIO $ forM_ refs' $ \src -> do
|
||||
whenM (R.doesPathExist src) $ do
|
||||
whenM (doesFileExist src) $ do
|
||||
dest <- relPathDirToFile git_dir src
|
||||
let dest' = tmpgit' P.</> dest
|
||||
let dest' = tmpgit </> dest
|
||||
createDirectoryUnder [git_dir]
|
||||
(P.takeDirectory dest')
|
||||
(takeDirectory dest')
|
||||
void $ createLinkOrCopy src dest'
|
||||
-- This reset makes git merge not care
|
||||
-- that the work tree is empty; otherwise
|
||||
|
@ -107,7 +105,7 @@ mergeToAdjustedBranch tomerge (origbranch, adj) mergeconfig canresolvemerge comm
|
|||
if merged
|
||||
then do
|
||||
!mergecommit <- liftIO $ extractSha
|
||||
<$> F.readFile' (toOsPath (tmpgit' P.</> "HEAD"))
|
||||
<$> F.readFile' (tmpgit </> literalOsPath "HEAD")
|
||||
-- This is run after the commit lock is dropped.
|
||||
return $ postmerge mergecommit
|
||||
else return $ return False
|
||||
|
@ -118,7 +116,7 @@ mergeToAdjustedBranch tomerge (origbranch, adj) mergeconfig canresolvemerge comm
|
|||
setup = do
|
||||
whenM (doesDirectoryExist d) $
|
||||
removeDirectoryRecursive d
|
||||
createDirectoryUnder [git_dir] (toRawFilePath d)
|
||||
createDirectoryUnder [git_dir] d
|
||||
cleanup _ = removeDirectoryRecursive d
|
||||
|
||||
{- A merge commit has been made between the basisbranch and
|
||||
|
|
|
@ -133,7 +133,7 @@ autoMergeFrom' branch currbranch mergeconfig commitmode willresolvemerge toresol
|
|||
resolveMerge :: Maybe Git.Ref -> Git.Ref -> Bool -> Annex Bool
|
||||
resolveMerge us them inoverlay = do
|
||||
top <- if inoverlay
|
||||
then pure "."
|
||||
then pure (literalOsPath ".")
|
||||
else fromRepo Git.repoPath
|
||||
(fs, cleanup) <- inRepo (LsFiles.unmerged [top])
|
||||
srcmap <- if inoverlay
|
||||
|
@ -150,7 +150,7 @@ resolveMerge us them inoverlay = do
|
|||
unless (null deleted) $
|
||||
Annex.Queue.addCommand [] "rm"
|
||||
[Param "--quiet", Param "-f", Param "--"]
|
||||
(map fromRawFilePath deleted)
|
||||
(map fromOsPath deleted)
|
||||
void $ liftIO cleanup2
|
||||
|
||||
when merged $ do
|
||||
|
@ -167,7 +167,7 @@ resolveMerge us them inoverlay = do
|
|||
, LsFiles.unmergedSiblingFile u
|
||||
]
|
||||
|
||||
resolveMerge' :: InodeMap -> Maybe Git.Ref -> Git.Ref -> Bool -> LsFiles.Unmerged -> Annex ([Key], Maybe FilePath)
|
||||
resolveMerge' :: InodeMap -> Maybe Git.Ref -> Git.Ref -> Bool -> LsFiles.Unmerged -> Annex ([Key], Maybe OsPath)
|
||||
resolveMerge' _ Nothing _ _ _ = return ([], Nothing)
|
||||
resolveMerge' unstagedmap (Just us) them inoverlay u = do
|
||||
kus <- getkey LsFiles.valUs
|
||||
|
@ -182,7 +182,7 @@ resolveMerge' unstagedmap (Just us) them inoverlay u = do
|
|||
-- files, so delete here.
|
||||
unless inoverlay $
|
||||
unless (islocked LsFiles.valUs) $
|
||||
liftIO $ removeWhenExistsWith R.removeLink (toRawFilePath file)
|
||||
liftIO $ removeWhenExistsWith removeFile file
|
||||
| otherwise -> resolveby [keyUs, keyThem] $
|
||||
-- Only resolve using symlink when both
|
||||
-- were locked, otherwise use unlocked
|
||||
|
@ -204,8 +204,8 @@ resolveMerge' unstagedmap (Just us) them inoverlay u = do
|
|||
-- Neither side is annexed file; cannot resolve.
|
||||
(Nothing, Nothing) -> return ([], Nothing)
|
||||
where
|
||||
file = fromRawFilePath $ LsFiles.unmergedFile u
|
||||
sibfile = fromRawFilePath <$> LsFiles.unmergedSiblingFile u
|
||||
file = LsFiles.unmergedFile u
|
||||
sibfile = LsFiles.unmergedSiblingFile u
|
||||
|
||||
getkey select =
|
||||
case select (LsFiles.unmergedSha u) of
|
||||
|
@ -230,16 +230,15 @@ resolveMerge' unstagedmap (Just us) them inoverlay u = do
|
|||
dest = variantFile file key
|
||||
destmode = fromTreeItemType <$> select (LsFiles.unmergedTreeItemType u)
|
||||
|
||||
stagefile :: FilePath -> Annex FilePath
|
||||
stagefile :: OsPath -> Annex OsPath
|
||||
stagefile f
|
||||
| inoverlay = (</> f) . fromRawFilePath <$> fromRepo Git.repoPath
|
||||
| inoverlay = (</> f) <$> fromRepo Git.repoPath
|
||||
| otherwise = pure f
|
||||
|
||||
makesymlink key dest = do
|
||||
let rdest = toRawFilePath dest
|
||||
l <- calcRepo $ gitAnnexLink rdest key
|
||||
unless inoverlay $ replacewithsymlink rdest l
|
||||
dest' <- toRawFilePath <$> stagefile dest
|
||||
l <- fromOsPath <$> calcRepo (gitAnnexLink dest key)
|
||||
unless inoverlay $ replacewithsymlink dest l
|
||||
dest' <- stagefile dest
|
||||
stageSymlink dest' =<< hashSymlink l
|
||||
|
||||
replacewithsymlink dest link = replaceWorkTreeFile dest $
|
||||
|
@ -248,27 +247,27 @@ resolveMerge' unstagedmap (Just us) them inoverlay u = do
|
|||
makepointer key dest destmode = do
|
||||
unless inoverlay $
|
||||
unlessM (reuseOldFile unstagedmap key file dest) $
|
||||
linkFromAnnex key (toRawFilePath dest) destmode >>= \case
|
||||
linkFromAnnex key dest destmode >>= \case
|
||||
LinkAnnexFailed -> liftIO $
|
||||
writePointerFile (toRawFilePath dest) key destmode
|
||||
writePointerFile dest key destmode
|
||||
_ -> noop
|
||||
dest' <- toRawFilePath <$> stagefile dest
|
||||
dest' <- stagefile dest
|
||||
stagePointerFile dest' destmode =<< hashPointerFile key
|
||||
unless inoverlay $
|
||||
Database.Keys.addAssociatedFile key
|
||||
=<< inRepo (toTopFilePath (toRawFilePath dest))
|
||||
=<< inRepo (toTopFilePath dest)
|
||||
|
||||
{- Stage a graft of a directory or file from a branch
|
||||
- and update the work tree. -}
|
||||
graftin b item selectwant selectwant' selectunwant = do
|
||||
Annex.Queue.addUpdateIndex
|
||||
=<< fromRepo (UpdateIndex.lsSubTree b item)
|
||||
|
||||
=<< fromRepo (UpdateIndex.lsSubTree b (fromOsPath item))
|
||||
|
||||
let replacefile isexecutable = case selectwant' (LsFiles.unmergedSha u) of
|
||||
Nothing -> noop
|
||||
Just sha -> replaceWorkTreeFile (toRawFilePath item) $ \tmp -> do
|
||||
Just sha -> replaceWorkTreeFile item $ \tmp -> do
|
||||
c <- catObject sha
|
||||
liftIO $ F.writeFile (toOsPath tmp) c
|
||||
liftIO $ F.writeFile tmp c
|
||||
when isexecutable $
|
||||
liftIO $ void $ tryIO $
|
||||
modifyFileMode tmp $
|
||||
|
@ -281,7 +280,7 @@ resolveMerge' unstagedmap (Just us) them inoverlay u = do
|
|||
Nothing -> noop
|
||||
Just sha -> do
|
||||
link <- catSymLinkTarget sha
|
||||
replacewithsymlink (toRawFilePath item) link
|
||||
replacewithsymlink item (fromOsPath link)
|
||||
(Just TreeFile, Just TreeSymlink) -> replacefile False
|
||||
(Just TreeExecutable, Just TreeSymlink) -> replacefile True
|
||||
_ -> ifM (liftIO $ doesDirectoryExist item)
|
||||
|
@ -305,9 +304,9 @@ resolveMerge' unstagedmap (Just us) them inoverlay u = do
|
|||
, Param "--cached"
|
||||
, Param "--"
|
||||
]
|
||||
(catMaybes [Just file, sibfile])
|
||||
(map fromOsPath $ catMaybes [Just file, sibfile])
|
||||
liftIO $ maybe noop
|
||||
(removeWhenExistsWith R.removeLink . toRawFilePath)
|
||||
(removeWhenExistsWith removeFile)
|
||||
sibfile
|
||||
void a
|
||||
return (ks, Just file)
|
||||
|
@ -322,13 +321,13 @@ resolveMerge' unstagedmap (Just us) them inoverlay u = do
|
|||
- C) are pointers to or have the content of keys that were involved
|
||||
- in the merge.
|
||||
-}
|
||||
cleanConflictCruft :: [Key] -> [FilePath] -> InodeMap -> Annex ()
|
||||
cleanConflictCruft :: [Key] -> [OsPath] -> InodeMap -> Annex ()
|
||||
cleanConflictCruft resolvedks resolvedfs unstagedmap = do
|
||||
is <- S.fromList . map (inodeCacheToKey Strongly) . concat
|
||||
<$> mapM Database.Keys.getInodeCaches resolvedks
|
||||
forM_ (M.toList unstagedmap) $ \(i, f) ->
|
||||
whenM (matchesresolved is i f) $
|
||||
liftIO $ removeWhenExistsWith R.removeLink (toRawFilePath f)
|
||||
liftIO $ removeWhenExistsWith removeFile f
|
||||
where
|
||||
fs = S.fromList resolvedfs
|
||||
ks = S.fromList resolvedks
|
||||
|
@ -336,19 +335,24 @@ cleanConflictCruft resolvedks resolvedfs unstagedmap = do
|
|||
matchesresolved is i f
|
||||
| S.member f fs || S.member (conflictCruftBase f) fs = anyM id
|
||||
[ pure $ either (const False) (`S.member` is) i
|
||||
, inks <$> isAnnexLink (toRawFilePath f)
|
||||
, inks <$> liftIO (isPointerFile (toRawFilePath f))
|
||||
, inks <$> isAnnexLink f
|
||||
, inks <$> liftIO (isPointerFile f)
|
||||
]
|
||||
| otherwise = return False
|
||||
|
||||
conflictCruftBase :: FilePath -> FilePath
|
||||
conflictCruftBase f = reverse $ drop 1 $ dropWhile (/= '~') $ reverse f
|
||||
conflictCruftBase :: OsPath -> OsPath
|
||||
conflictCruftBase = toOsPath
|
||||
. reverse
|
||||
. drop 1
|
||||
. dropWhile (/= '~')
|
||||
. reverse
|
||||
. fromOsPath
|
||||
|
||||
{- When possible, reuse an existing file from the srcmap as the
|
||||
- content of a worktree file in the resolved merge. It must have the
|
||||
- same name as the origfile, or a name that git would use for conflict
|
||||
- cruft. And, its inode cache must be a known one for the key. -}
|
||||
reuseOldFile :: InodeMap -> Key -> FilePath -> FilePath -> Annex Bool
|
||||
reuseOldFile :: InodeMap -> Key -> OsPath -> OsPath -> Annex Bool
|
||||
reuseOldFile srcmap key origfile destfile = do
|
||||
is <- map (inodeCacheToKey Strongly)
|
||||
<$> Database.Keys.getInodeCaches key
|
||||
|
@ -374,19 +378,18 @@ commitResolvedMerge commitmode = do
|
|||
, Param "git-annex automatic merge conflict fix"
|
||||
]
|
||||
|
||||
type InodeMap = M.Map (Either FilePath InodeCacheKey) FilePath
|
||||
type InodeMap = M.Map (Either OsPath InodeCacheKey) OsPath
|
||||
|
||||
inodeMap :: Annex ([RawFilePath], IO Bool) -> Annex InodeMap
|
||||
inodeMap :: Annex ([OsPath], IO Bool) -> Annex InodeMap
|
||||
inodeMap getfiles = do
|
||||
(fs, cleanup) <- getfiles
|
||||
fsis <- forM fs $ \f -> do
|
||||
s <- liftIO $ R.getSymbolicLinkStatus f
|
||||
let f' = fromRawFilePath f
|
||||
s <- liftIO $ R.getSymbolicLinkStatus (fromOsPath f)
|
||||
if isSymbolicLink s
|
||||
then pure $ Just (Left f', f')
|
||||
then pure $ Just (Left f, f)
|
||||
else withTSDelta (\d -> liftIO $ toInodeCache d f s)
|
||||
>>= return . \case
|
||||
Just i -> Just (Right (inodeCacheToKey Strongly i), f')
|
||||
Just i -> Just (Right (inodeCacheToKey Strongly i), f)
|
||||
Nothing -> Nothing
|
||||
void $ liftIO cleanup
|
||||
return $ M.fromList $ catMaybes fsis
|
||||
|
|
|
@ -54,7 +54,6 @@ import Data.Char
|
|||
import Data.ByteString.Builder
|
||||
import Control.Concurrent (threadDelay)
|
||||
import Control.Concurrent.MVar
|
||||
import qualified System.FilePath.ByteString as P
|
||||
import System.PosixCompat.Files (isRegularFile)
|
||||
|
||||
import Annex.Common
|
||||
|
@ -313,7 +312,7 @@ updateTo' pairs = do
|
|||
- transitions that have not been applied to all refs will be applied on
|
||||
- the fly.
|
||||
-}
|
||||
get :: RawFilePath -> Annex L.ByteString
|
||||
get :: OsPath -> Annex L.ByteString
|
||||
get file = do
|
||||
st <- update
|
||||
case getCache file st of
|
||||
|
@ -353,7 +352,7 @@ getUnmergedRefs = unmergedRefs <$> update
|
|||
- using some optimised method. The journal has to be checked, in case
|
||||
- it has a newer version of the file that has not reached the branch yet.
|
||||
-}
|
||||
precache :: RawFilePath -> L.ByteString -> Annex ()
|
||||
precache :: OsPath -> L.ByteString -> Annex ()
|
||||
precache file branchcontent = do
|
||||
st <- getState
|
||||
content <- if journalIgnorable st
|
||||
|
@ -369,12 +368,12 @@ precache file branchcontent = do
|
|||
- reflect changes in remotes.
|
||||
- (Changing the value this returns, and then merging is always the
|
||||
- same as using get, and then changing its value.) -}
|
||||
getLocal :: RawFilePath -> Annex L.ByteString
|
||||
getLocal :: OsPath -> Annex L.ByteString
|
||||
getLocal = getLocal' (GetPrivate True)
|
||||
|
||||
getLocal' :: GetPrivate -> RawFilePath -> Annex L.ByteString
|
||||
getLocal' :: GetPrivate -> OsPath -> Annex L.ByteString
|
||||
getLocal' getprivate file = do
|
||||
fastDebug "Annex.Branch" ("read " ++ fromRawFilePath file)
|
||||
fastDebug "Annex.Branch" ("read " ++ fromOsPath file)
|
||||
go =<< getJournalFileStale getprivate file
|
||||
where
|
||||
go NoJournalledContent = getRef fullname file
|
||||
|
@ -384,14 +383,14 @@ getLocal' getprivate file = do
|
|||
return (v <> journalcontent)
|
||||
|
||||
{- Gets the content of a file as staged in the branch's index. -}
|
||||
getStaged :: RawFilePath -> Annex L.ByteString
|
||||
getStaged :: OsPath -> Annex L.ByteString
|
||||
getStaged = getRef indexref
|
||||
where
|
||||
-- This makes git cat-file be run with ":file",
|
||||
-- so it looks at the index.
|
||||
indexref = Ref ""
|
||||
|
||||
getHistorical :: RefDate -> RawFilePath -> Annex L.ByteString
|
||||
getHistorical :: RefDate -> OsPath -> Annex L.ByteString
|
||||
getHistorical date file =
|
||||
-- This check avoids some ugly error messages when the reflog
|
||||
-- is empty.
|
||||
|
@ -400,7 +399,7 @@ getHistorical date file =
|
|||
, getRef (Git.Ref.dateRef fullname date) file
|
||||
)
|
||||
|
||||
getRef :: Ref -> RawFilePath -> Annex L.ByteString
|
||||
getRef :: Ref -> OsPath -> Annex L.ByteString
|
||||
getRef ref file = withIndex $ catFile ref file
|
||||
|
||||
{- Applies a function to modify the content of a file.
|
||||
|
@ -408,7 +407,7 @@ getRef ref file = withIndex $ catFile ref file
|
|||
- Note that this does not cause the branch to be merged, it only
|
||||
- modifies the current content of the file on the branch.
|
||||
-}
|
||||
change :: Journalable content => RegardingUUID -> RawFilePath -> (L.ByteString -> content) -> Annex ()
|
||||
change :: Journalable content => RegardingUUID -> OsPath -> (L.ByteString -> content) -> Annex ()
|
||||
change ru file f = lockJournal $ \jl -> f <$> getToChange ru file >>= set jl ru file
|
||||
|
||||
{- Applies a function which can modify the content of a file, or not.
|
||||
|
@ -416,7 +415,7 @@ change ru file f = lockJournal $ \jl -> f <$> getToChange ru file >>= set jl ru
|
|||
- When the file was modified, runs the onchange action, and returns
|
||||
- True. The action is run while the journal is still locked,
|
||||
- so another concurrent call to this cannot happen while it is running. -}
|
||||
maybeChange :: Journalable content => RegardingUUID -> RawFilePath -> (L.ByteString -> Maybe content) -> Annex () -> Annex Bool
|
||||
maybeChange :: Journalable content => RegardingUUID -> OsPath -> (L.ByteString -> Maybe content) -> Annex () -> Annex Bool
|
||||
maybeChange ru file f onchange = lockJournal $ \jl -> do
|
||||
v <- getToChange ru file
|
||||
case f v of
|
||||
|
@ -449,7 +448,7 @@ data ChangeOrAppend t = Change t | Append t
|
|||
- state that would confuse the older version. This is planned to be
|
||||
- changed in a future repository version.
|
||||
-}
|
||||
changeOrAppend :: Journalable content => RegardingUUID -> RawFilePath -> (L.ByteString -> ChangeOrAppend content) -> Annex ()
|
||||
changeOrAppend :: Journalable content => RegardingUUID -> OsPath -> (L.ByteString -> ChangeOrAppend content) -> Annex ()
|
||||
changeOrAppend ru file f = lockJournal $ \jl ->
|
||||
checkCanAppendJournalFile jl ru file >>= \case
|
||||
Just appendable -> ifM (annexAlwaysCompact <$> Annex.getGitConfig)
|
||||
|
@ -481,7 +480,7 @@ changeOrAppend ru file f = lockJournal $ \jl ->
|
|||
oldc <> journalableByteString toappend
|
||||
|
||||
{- Only get private information when the RegardingUUID is itself private. -}
|
||||
getToChange :: RegardingUUID -> RawFilePath -> Annex L.ByteString
|
||||
getToChange :: RegardingUUID -> OsPath -> Annex L.ByteString
|
||||
getToChange ru f = flip getLocal' f . GetPrivate =<< regardingPrivateUUID ru
|
||||
|
||||
{- Records new content of a file into the journal.
|
||||
|
@ -493,11 +492,11 @@ getToChange ru f = flip getLocal' f . GetPrivate =<< regardingPrivateUUID ru
|
|||
- git-annex index, and should not be written to the public git-annex
|
||||
- branch.
|
||||
-}
|
||||
set :: Journalable content => JournalLocked -> RegardingUUID -> RawFilePath -> content -> Annex ()
|
||||
set :: Journalable content => JournalLocked -> RegardingUUID -> OsPath -> content -> Annex ()
|
||||
set jl ru f c = do
|
||||
journalChanged
|
||||
setJournalFile jl ru f c
|
||||
fastDebug "Annex.Branch" ("set " ++ fromRawFilePath f)
|
||||
fastDebug "Annex.Branch" ("set " ++ fromOsPath f)
|
||||
-- Could cache the new content, but it would involve
|
||||
-- evaluating a Journalable Builder twice, which is not very
|
||||
-- efficient. Instead, assume that it's not common to need to read
|
||||
|
@ -505,11 +504,11 @@ set jl ru f c = do
|
|||
invalidateCache f
|
||||
|
||||
{- Appends content to the journal file. -}
|
||||
append :: Journalable content => JournalLocked -> RawFilePath -> AppendableJournalFile -> content -> Annex ()
|
||||
append :: Journalable content => JournalLocked -> OsPath -> AppendableJournalFile -> content -> Annex ()
|
||||
append jl f appendable toappend = do
|
||||
journalChanged
|
||||
appendJournalFile jl appendable toappend
|
||||
fastDebug "Annex.Branch" ("append " ++ fromRawFilePath f)
|
||||
fastDebug "Annex.Branch" ("append " ++ fromOsPath f)
|
||||
invalidateCache f
|
||||
|
||||
{- Commit message used when making a commit of whatever data has changed
|
||||
|
@ -611,7 +610,7 @@ commitIndex' jl branchref message basemessage retrynum parents = do
|
|||
- not been merged in, returns Nothing, because it's not possible to
|
||||
- efficiently handle that.
|
||||
-}
|
||||
files :: Annex (Maybe ([RawFilePath], IO Bool))
|
||||
files :: Annex (Maybe ([OsPath], IO Bool))
|
||||
files = do
|
||||
st <- update
|
||||
if not (null (unmergedRefs st))
|
||||
|
@ -629,10 +628,10 @@ files = do
|
|||
|
||||
{- Lists all files currently in the journal, but not files in the private
|
||||
- journal. -}
|
||||
journalledFiles :: Annex [RawFilePath]
|
||||
journalledFiles :: Annex [OsPath]
|
||||
journalledFiles = getJournalledFilesStale gitAnnexJournalDir
|
||||
|
||||
journalledFilesPrivate :: Annex [RawFilePath]
|
||||
journalledFilesPrivate :: Annex [OsPath]
|
||||
journalledFilesPrivate = ifM privateUUIDsKnown
|
||||
( getJournalledFilesStale gitAnnexPrivateJournalDir
|
||||
, return []
|
||||
|
@ -640,11 +639,11 @@ journalledFilesPrivate = ifM privateUUIDsKnown
|
|||
|
||||
{- Files in the branch, not including any from journalled changes,
|
||||
- and without updating the branch. -}
|
||||
branchFiles :: Annex ([RawFilePath], IO Bool)
|
||||
branchFiles :: Annex ([OsPath], IO Bool)
|
||||
branchFiles = withIndex $ inRepo branchFiles'
|
||||
|
||||
branchFiles' :: Git.Repo -> IO ([RawFilePath], IO Bool)
|
||||
branchFiles' = Git.Command.pipeNullSplit' $
|
||||
branchFiles' :: Git.Repo -> IO ([OsPath], IO Bool)
|
||||
branchFiles' = Git.Command.pipeNullSplit'' toOsPath $
|
||||
lsTreeParams Git.LsTree.LsTreeRecursive (Git.LsTree.LsTreeLong False)
|
||||
fullname
|
||||
[Param "--name-only"]
|
||||
|
@ -681,7 +680,8 @@ mergeIndex jl branches = do
|
|||
prepareModifyIndex :: JournalLocked -> Annex ()
|
||||
prepareModifyIndex _jl = do
|
||||
index <- fromRepo gitAnnexIndex
|
||||
void $ liftIO $ tryIO $ R.removeLink (index <> ".lock")
|
||||
void $ liftIO $ tryIO $
|
||||
removeFile (index <> literalOsPath ".lock")
|
||||
|
||||
{- Runs an action using the branch's index file. -}
|
||||
withIndex :: Annex a -> Annex a
|
||||
|
@ -690,7 +690,7 @@ withIndex' :: Bool -> Annex a -> Annex a
|
|||
withIndex' bootstrapping a = withIndexFile AnnexIndexFile $ \f -> do
|
||||
checkIndexOnce $ unlessM (liftIO $ doesFileExist f) $ do
|
||||
unless bootstrapping create
|
||||
createAnnexDirectory $ toRawFilePath $ takeDirectory f
|
||||
createAnnexDirectory $ takeDirectory f
|
||||
unless bootstrapping $ inRepo genIndex
|
||||
a
|
||||
|
||||
|
@ -712,7 +712,7 @@ forceUpdateIndex jl branchref = do
|
|||
{- Checks if the index needs to be updated. -}
|
||||
needUpdateIndex :: Git.Ref -> Annex Bool
|
||||
needUpdateIndex branchref = do
|
||||
f <- toOsPath <$> fromRepo gitAnnexIndexStatus
|
||||
f <- fromRepo gitAnnexIndexStatus
|
||||
committedref <- Git.Ref . firstLine' <$>
|
||||
liftIO (catchDefaultIO mempty $ F.readFile' f)
|
||||
return (committedref /= branchref)
|
||||
|
@ -748,19 +748,20 @@ stageJournal jl commitindex = withIndex $ withOtherTmp $ \tmpdir -> do
|
|||
Git.UpdateIndex.streamUpdateIndex g
|
||||
[genstream dir h jh jlogh]
|
||||
commitindex
|
||||
liftIO $ cleanup (fromRawFilePath dir) jlogh jlogf
|
||||
liftIO $ cleanup dir jlogh jlogf
|
||||
where
|
||||
genstream dir h jh jlogh streamer = readDirectory jh >>= \case
|
||||
Nothing -> return ()
|
||||
Just file -> do
|
||||
let path = dir P.</> file
|
||||
unless (dirCruft file) $ whenM (isfile path) $ do
|
||||
let file' = toOsPath file
|
||||
let path = dir </> file'
|
||||
unless (file' `elem` dirCruft) $ whenM (isfile path) $ do
|
||||
sha <- Git.HashObject.hashFile h path
|
||||
B.hPutStr jlogh (file <> "\n")
|
||||
streamer $ Git.UpdateIndex.updateIndexLine
|
||||
sha TreeFile (asTopFilePath $ fileJournal file)
|
||||
sha TreeFile (asTopFilePath $ fileJournal file')
|
||||
genstream dir h jh jlogh streamer
|
||||
isfile file = isRegularFile <$> R.getFileStatus file
|
||||
isfile file = isRegularFile <$> R.getFileStatus (fromOsPath file)
|
||||
-- Clean up the staged files, as listed in the temp log file.
|
||||
-- The temp file is used to avoid needing to buffer all the
|
||||
-- filenames in memory.
|
||||
|
@ -768,10 +769,10 @@ stageJournal jl commitindex = withIndex $ withOtherTmp $ \tmpdir -> do
|
|||
hFlush jlogh
|
||||
hSeek jlogh AbsoluteSeek 0
|
||||
stagedfs <- lines <$> hGetContents jlogh
|
||||
mapM_ (removeFile . (dir </>)) stagedfs
|
||||
mapM_ (removeFile . (dir </>) . toOsPath) stagedfs
|
||||
hClose jlogh
|
||||
removeWhenExistsWith (R.removeLink) (fromOsPath jlogf)
|
||||
openjlog tmpdir = liftIO $ openTmpFileIn (toOsPath tmpdir) (toOsPath "jlog")
|
||||
removeWhenExistsWith removeFile jlogf
|
||||
openjlog tmpdir = liftIO $ openTmpFileIn tmpdir (literalOsPath "jlog")
|
||||
|
||||
getLocalTransitions :: Annex Transitions
|
||||
getLocalTransitions =
|
||||
|
@ -932,7 +933,7 @@ getIgnoredRefs =
|
|||
S.fromList . mapMaybe Git.Sha.extractSha . fileLines' <$> content
|
||||
where
|
||||
content = do
|
||||
f <- toOsPath <$> fromRepo gitAnnexIgnoredRefs
|
||||
f <- fromRepo gitAnnexIgnoredRefs
|
||||
liftIO $ catchDefaultIO mempty $ F.readFile' f
|
||||
|
||||
addMergedRefs :: [(Git.Sha, Git.Branch)] -> Annex ()
|
||||
|
@ -950,7 +951,7 @@ getMergedRefs = S.fromList . map fst <$> getMergedRefs'
|
|||
|
||||
getMergedRefs' :: Annex [(Git.Sha, Git.Branch)]
|
||||
getMergedRefs' = do
|
||||
f <- toOsPath <$> fromRepo gitAnnexMergedRefs
|
||||
f <- fromRepo gitAnnexMergedRefs
|
||||
s <- liftIO $ catchDefaultIO mempty $ F.readFile' f
|
||||
return $ map parse $ fileLines' s
|
||||
where
|
||||
|
@ -999,7 +1000,7 @@ data UnmergedBranches t
|
|||
= UnmergedBranches t
|
||||
| NoUnmergedBranches t
|
||||
|
||||
type FileContents t b = Maybe (t, RawFilePath, Maybe (L.ByteString, Maybe b))
|
||||
type FileContents t b = Maybe (t, OsPath, Maybe (L.ByteString, Maybe b))
|
||||
|
||||
{- Runs an action on the content of selected files from the branch.
|
||||
- This is much faster than reading the content of each file in turn,
|
||||
|
@ -1022,7 +1023,7 @@ overBranchFileContents
|
|||
-- the callback can be run more than once on the same filename,
|
||||
-- and in this case it's also possible for the callback to be
|
||||
-- passed some of the same file content repeatedly.
|
||||
-> (RawFilePath -> Maybe v)
|
||||
-> (OsPath -> Maybe v)
|
||||
-> (Annex (FileContents v Bool) -> Annex a)
|
||||
-> Annex (UnmergedBranches (a, Git.Sha))
|
||||
overBranchFileContents ignorejournal select go = do
|
||||
|
@ -1036,7 +1037,7 @@ overBranchFileContents ignorejournal select go = do
|
|||
else NoUnmergedBranches v
|
||||
|
||||
overBranchFileContents'
|
||||
:: (RawFilePath -> Maybe v)
|
||||
:: (OsPath -> Maybe v)
|
||||
-> (Annex (FileContents v Bool) -> Annex a)
|
||||
-> BranchState
|
||||
-> Annex (a, Git.Sha)
|
||||
|
@ -1086,11 +1087,11 @@ combineStaleJournalWithBranch branchcontent journalledcontent =
|
|||
- files.
|
||||
-}
|
||||
overJournalFileContents
|
||||
:: (RawFilePath -> L.ByteString -> Annex (L.ByteString, Maybe b))
|
||||
:: (OsPath -> L.ByteString -> Annex (L.ByteString, Maybe b))
|
||||
-- ^ Called with the journalled file content when the journalled
|
||||
-- content may be stale or lack information committed to the
|
||||
-- git-annex branch.
|
||||
-> (RawFilePath -> Maybe v)
|
||||
-> (OsPath -> Maybe v)
|
||||
-> (Annex (FileContents v b) -> Annex a)
|
||||
-> Annex a
|
||||
overJournalFileContents handlestale select go = do
|
||||
|
@ -1098,9 +1099,9 @@ overJournalFileContents handlestale select go = do
|
|||
go $ overJournalFileContents' buf handlestale select
|
||||
|
||||
overJournalFileContents'
|
||||
:: MVar ([RawFilePath], [RawFilePath])
|
||||
-> (RawFilePath -> L.ByteString -> Annex (L.ByteString, Maybe b))
|
||||
-> (RawFilePath -> Maybe a)
|
||||
:: MVar ([OsPath], [OsPath])
|
||||
-> (OsPath -> L.ByteString -> Annex (L.ByteString, Maybe b))
|
||||
-> (OsPath -> Maybe a)
|
||||
-> Annex (FileContents a b)
|
||||
overJournalFileContents' buf handlestale select =
|
||||
liftIO (tryTakeMVar buf) >>= \case
|
||||
|
|
|
@ -118,7 +118,7 @@ enableInteractiveBranchAccess = changeState $ \s -> s
|
|||
, journalIgnorable = False
|
||||
}
|
||||
|
||||
setCache :: RawFilePath -> L.ByteString -> Annex ()
|
||||
setCache :: OsPath -> L.ByteString -> Annex ()
|
||||
setCache file content = changeState $ \s -> s
|
||||
{ cachedFileContents = add (cachedFileContents s) }
|
||||
where
|
||||
|
@ -126,7 +126,7 @@ setCache file content = changeState $ \s -> s
|
|||
| length l < logFilesToCache = (file, content) : l
|
||||
| otherwise = (file, content) : Prelude.init l
|
||||
|
||||
getCache :: RawFilePath -> BranchState -> Maybe L.ByteString
|
||||
getCache :: OsPath -> BranchState -> Maybe L.ByteString
|
||||
getCache file state = go (cachedFileContents state)
|
||||
where
|
||||
go [] = Nothing
|
||||
|
@ -134,7 +134,7 @@ getCache file state = go (cachedFileContents state)
|
|||
| f == file && not (needInteractiveAccess state) = Just c
|
||||
| otherwise = go rest
|
||||
|
||||
invalidateCache :: RawFilePath -> Annex ()
|
||||
invalidateCache :: OsPath -> Annex ()
|
||||
invalidateCache f = changeState $ \s -> s
|
||||
{ cachedFileContents = filter (\(f', _) -> f' /= f)
|
||||
(cachedFileContents s)
|
||||
|
|
|
@ -45,11 +45,11 @@ import Types.AdjustedBranch
|
|||
import Types.CatFileHandles
|
||||
import Utility.ResourcePool
|
||||
|
||||
catFile :: Git.Branch -> RawFilePath -> Annex L.ByteString
|
||||
catFile :: Git.Branch -> OsPath -> Annex L.ByteString
|
||||
catFile branch file = withCatFileHandle $ \h ->
|
||||
liftIO $ Git.CatFile.catFile h branch file
|
||||
|
||||
catFileDetails :: Git.Branch -> RawFilePath -> Annex (Maybe (L.ByteString, Sha, ObjectType))
|
||||
catFileDetails :: Git.Branch -> OsPath -> Annex (Maybe (L.ByteString, Sha, ObjectType))
|
||||
catFileDetails branch file = withCatFileHandle $ \h ->
|
||||
liftIO $ Git.CatFile.catFileDetails h branch file
|
||||
|
||||
|
@ -167,8 +167,8 @@ catKey' ref sz
|
|||
catKey' _ _ = return Nothing
|
||||
|
||||
{- Gets a symlink target. -}
|
||||
catSymLinkTarget :: Sha -> Annex RawFilePath
|
||||
catSymLinkTarget sha = fromInternalGitPath . L.toStrict <$> get
|
||||
catSymLinkTarget :: Sha -> Annex OsPath
|
||||
catSymLinkTarget sha = fromInternalGitPath . toOsPath . L.toStrict <$> get
|
||||
where
|
||||
-- Avoid buffering the whole file content, which might be large.
|
||||
-- 8192 is enough if it really is a symlink.
|
||||
|
@ -195,25 +195,25 @@ catSymLinkTarget sha = fromInternalGitPath . L.toStrict <$> get
|
|||
-
|
||||
- So, this gets info from the index, unless running as a daemon.
|
||||
-}
|
||||
catKeyFile :: RawFilePath -> Annex (Maybe Key)
|
||||
catKeyFile :: OsPath -> Annex (Maybe Key)
|
||||
catKeyFile f = ifM (Annex.getState Annex.daemon)
|
||||
( catKeyFileHEAD f
|
||||
, maybe (pure Nothing) catKey =<< inRepo (Git.Ref.fileRef f)
|
||||
)
|
||||
|
||||
catKeyFileHEAD :: RawFilePath -> Annex (Maybe Key)
|
||||
catKeyFileHEAD :: OsPath -> Annex (Maybe Key)
|
||||
catKeyFileHEAD f = maybe (pure Nothing) catKey
|
||||
=<< inRepo (Git.Ref.fileFromRef Git.Ref.headRef f)
|
||||
|
||||
{- Look in the original branch from whence an adjusted branch is based
|
||||
- to find the file. But only when the adjustment hides some files. -}
|
||||
catKeyFileHidden :: RawFilePath -> CurrBranch -> Annex (Maybe Key)
|
||||
catKeyFileHidden :: OsPath -> CurrBranch -> Annex (Maybe Key)
|
||||
catKeyFileHidden = hiddenCat catKey
|
||||
|
||||
catObjectMetaDataHidden :: RawFilePath -> CurrBranch -> Annex (Maybe (Sha, Integer, ObjectType))
|
||||
catObjectMetaDataHidden :: OsPath -> CurrBranch -> Annex (Maybe (Sha, Integer, ObjectType))
|
||||
catObjectMetaDataHidden = hiddenCat catObjectMetaData
|
||||
|
||||
hiddenCat :: (Ref -> Annex (Maybe a)) -> RawFilePath -> CurrBranch -> Annex (Maybe a)
|
||||
hiddenCat :: (Ref -> Annex (Maybe a)) -> OsPath -> CurrBranch -> Annex (Maybe a)
|
||||
hiddenCat a f (Just origbranch, Just adj)
|
||||
| adjustmentHidesFiles adj =
|
||||
maybe (pure Nothing) a
|
||||
|
|
|
@ -24,11 +24,11 @@ import qualified Git
|
|||
import Git.Sha
|
||||
import qualified Utility.SimpleProtocol as Proto
|
||||
import qualified Utility.FileIO as F
|
||||
import qualified Utility.OsString as OS
|
||||
|
||||
import Control.Concurrent
|
||||
import Control.Concurrent.STM
|
||||
import Control.Concurrent.STM.TBMChan
|
||||
import qualified System.FilePath.ByteString as P
|
||||
|
||||
newtype ChangedRefs = ChangedRefs [Git.Ref]
|
||||
deriving (Show)
|
||||
|
@ -82,7 +82,7 @@ watchChangedRefs = do
|
|||
|
||||
g <- gitRepo
|
||||
let gittop = Git.localGitDir g
|
||||
let refdir = gittop P.</> "refs"
|
||||
let refdir = gittop </> literalOsPath "refs"
|
||||
liftIO $ createDirectoryUnder [gittop] refdir
|
||||
|
||||
let notifyhook = Just $ notifyHook chan
|
||||
|
@ -93,18 +93,17 @@ watchChangedRefs = do
|
|||
|
||||
if canWatch
|
||||
then do
|
||||
h <- liftIO $ watchDir
|
||||
(fromRawFilePath refdir)
|
||||
h <- liftIO $ watchDir refdir
|
||||
(const False) True hooks id
|
||||
return $ Just $ ChangedRefsHandle h chan
|
||||
else return Nothing
|
||||
|
||||
notifyHook :: TBMChan Git.Sha -> FilePath -> Maybe FileStatus -> IO ()
|
||||
notifyHook :: TBMChan Git.Sha -> OsPath -> Maybe FileStatus -> IO ()
|
||||
notifyHook chan reffile _
|
||||
| ".lock" `isSuffixOf` reffile = noop
|
||||
| literalOsPath ".lock" `OS.isSuffixOf` reffile = noop
|
||||
| otherwise = void $ do
|
||||
sha <- catchDefaultIO Nothing $
|
||||
extractSha <$> F.readFile' (toOsPath (toRawFilePath reffile))
|
||||
extractSha <$> F.readFile' reffile
|
||||
-- When the channel is full, there is probably no reader
|
||||
-- running, or ref changes have been occurring very fast,
|
||||
-- so it's ok to not write the change to it.
|
||||
|
|
|
@ -29,14 +29,14 @@ annexAttrs =
|
|||
, "annex.mincopies"
|
||||
]
|
||||
|
||||
checkAttr :: Git.Attr -> RawFilePath -> Annex String
|
||||
checkAttr :: Git.Attr -> OsPath -> Annex String
|
||||
checkAttr attr file = withCheckAttrHandle $ \h -> do
|
||||
r <- liftIO $ Git.checkAttr h attr file
|
||||
if r == Git.unspecifiedAttr
|
||||
then return ""
|
||||
else return r
|
||||
|
||||
checkAttrs :: [Git.Attr] -> RawFilePath -> Annex [String]
|
||||
checkAttrs :: [Git.Attr] -> OsPath -> Annex [String]
|
||||
checkAttrs attrs file = withCheckAttrHandle $ \h ->
|
||||
liftIO $ Git.checkAttrs h attrs file
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ import Annex.Concurrent.Utility
|
|||
|
||||
newtype CheckGitIgnore = CheckGitIgnore Bool
|
||||
|
||||
checkIgnored :: CheckGitIgnore -> RawFilePath -> Annex Bool
|
||||
checkIgnored :: CheckGitIgnore -> OsPath -> Annex Bool
|
||||
checkIgnored (CheckGitIgnore False) _ = pure False
|
||||
checkIgnored (CheckGitIgnore True) file =
|
||||
ifM (Annex.getRead Annex.force)
|
||||
|
|
134
Annex/Content.hs
134
Annex/Content.hs
|
@ -110,7 +110,6 @@ import Utility.FileMode
|
|||
import qualified Utility.RawFilePath as R
|
||||
import qualified Utility.FileIO as F
|
||||
|
||||
import qualified System.FilePath.ByteString as P
|
||||
import System.PosixCompat.Files (isSymbolicLink, linkCount)
|
||||
import Data.Time.Clock.POSIX
|
||||
|
||||
|
@ -248,7 +247,7 @@ lockContentForRemoval key fallback a = lockContentUsing lock key fallback $
|
|||
{- Passed the object content file, and maybe a separate lock file to use,
|
||||
- when the content file itself should not be locked. -}
|
||||
type ContentLocker
|
||||
= RawFilePath
|
||||
= OsPath
|
||||
-> Maybe LockFile
|
||||
->
|
||||
( Annex (Maybe LockHandle)
|
||||
|
@ -260,7 +259,7 @@ type ContentLocker
|
|||
-- and prior to deleting the lock file, in order to
|
||||
-- ensure that no other processes also have a shared lock.
|
||||
#else
|
||||
, Maybe (RawFilePath -> Annex ())
|
||||
, Maybe (OsPath -> Annex ())
|
||||
-- ^ On Windows, this is called after the lock is dropped,
|
||||
-- but before the lock file is cleaned up.
|
||||
#endif
|
||||
|
@ -278,7 +277,7 @@ winLocker takelock _ (Just lockfile) =
|
|||
let lck = do
|
||||
modifyContentDir lockfile $
|
||||
void $ liftIO $ tryIO $
|
||||
writeFile (fromRawFilePath lockfile) ""
|
||||
writeFile (fromOsPath lockfile) ""
|
||||
liftIO $ takelock lockfile
|
||||
in (lck, Nothing)
|
||||
-- never reached; windows always uses a separate lock file
|
||||
|
@ -371,13 +370,13 @@ lockContentUsing contentlocker key fallback a = withContentLockFile key $ \mlock
|
|||
|
||||
cleanuplockfile lockfile = void $ tryNonAsync $ do
|
||||
thawContentDir lockfile
|
||||
liftIO $ removeWhenExistsWith R.removeLink lockfile
|
||||
liftIO $ removeWhenExistsWith removeFile lockfile
|
||||
cleanObjectDirs lockfile
|
||||
|
||||
{- Runs an action, passing it the temp file to get,
|
||||
- and if the action succeeds, verifies the file matches
|
||||
- the key and moves the file into the annex as a key's content. -}
|
||||
getViaTmp :: RetrievalSecurityPolicy -> VerifyConfig -> Key -> AssociatedFile -> Maybe FileSize -> (RawFilePath -> Annex (Bool, Verification)) -> Annex Bool
|
||||
getViaTmp :: RetrievalSecurityPolicy -> VerifyConfig -> Key -> AssociatedFile -> Maybe FileSize -> (OsPath -> Annex (Bool, Verification)) -> Annex Bool
|
||||
getViaTmp rsp v key af sz action =
|
||||
checkDiskSpaceToGet key sz False $
|
||||
getViaTmpFromDisk rsp v key af action
|
||||
|
@ -385,10 +384,10 @@ getViaTmp rsp v key af sz action =
|
|||
{- Like getViaTmp, but does not check that there is enough disk space
|
||||
- for the incoming key. For use when the key content is already on disk
|
||||
- and not being copied into place. -}
|
||||
getViaTmpFromDisk :: RetrievalSecurityPolicy -> VerifyConfig -> Key -> AssociatedFile -> (RawFilePath -> Annex (Bool, Verification)) -> Annex Bool
|
||||
getViaTmpFromDisk :: RetrievalSecurityPolicy -> VerifyConfig -> Key -> AssociatedFile -> (OsPath -> Annex (Bool, Verification)) -> Annex Bool
|
||||
getViaTmpFromDisk rsp v key af action = checkallowed $ do
|
||||
tmpfile <- prepTmp key
|
||||
resuming <- liftIO $ R.doesPathExist tmpfile
|
||||
resuming <- liftIO $ R.doesPathExist $ fromOsPath tmpfile
|
||||
(ok, verification) <- action tmpfile
|
||||
-- When the temp file already had content, we don't know if
|
||||
-- that content is good or not, so only trust if it the action
|
||||
|
@ -434,11 +433,11 @@ getViaTmpFromDisk rsp v key af action = checkallowed $ do
|
|||
- left off, and so if the bad content were not deleted, repeated downloads
|
||||
- would continue to fail.
|
||||
-}
|
||||
verificationOfContentFailed :: RawFilePath -> Annex ()
|
||||
verificationOfContentFailed :: OsPath -> Annex ()
|
||||
verificationOfContentFailed tmpfile = do
|
||||
warning "Verification of content failed"
|
||||
pruneTmpWorkDirBefore tmpfile
|
||||
(liftIO . removeWhenExistsWith R.removeLink)
|
||||
(liftIO . removeWhenExistsWith removeFile)
|
||||
|
||||
{- Checks if there is enough free disk space to download a key
|
||||
- to its temp file.
|
||||
|
@ -451,7 +450,7 @@ verificationOfContentFailed tmpfile = do
|
|||
checkDiskSpaceToGet :: Key -> Maybe FileSize -> a -> Annex a -> Annex a
|
||||
checkDiskSpaceToGet key sz unabletoget getkey = do
|
||||
tmp <- fromRepo (gitAnnexTmpObjectLocation key)
|
||||
e <- liftIO $ doesFileExist (fromRawFilePath tmp)
|
||||
e <- liftIO $ doesFileExist tmp
|
||||
alreadythere <- liftIO $ if e
|
||||
then getFileSize tmp
|
||||
else return 0
|
||||
|
@ -463,7 +462,7 @@ checkDiskSpaceToGet key sz unabletoget getkey = do
|
|||
, return unabletoget
|
||||
)
|
||||
|
||||
prepTmp :: Key -> Annex RawFilePath
|
||||
prepTmp :: Key -> Annex OsPath
|
||||
prepTmp key = do
|
||||
tmp <- fromRepo $ gitAnnexTmpObjectLocation key
|
||||
createAnnexDirectory (parentDir tmp)
|
||||
|
@ -473,11 +472,11 @@ prepTmp key = do
|
|||
- the temp file. If the action throws an exception, the temp file is
|
||||
- left behind, which allows for resuming.
|
||||
-}
|
||||
withTmp :: Key -> (RawFilePath -> Annex a) -> Annex a
|
||||
withTmp :: Key -> (OsPath -> Annex a) -> Annex a
|
||||
withTmp key action = do
|
||||
tmp <- prepTmp key
|
||||
res <- action tmp
|
||||
pruneTmpWorkDirBefore tmp (liftIO . removeWhenExistsWith R.removeLink)
|
||||
pruneTmpWorkDirBefore tmp (liftIO . removeWhenExistsWith removeFile)
|
||||
return res
|
||||
|
||||
{- Moves a key's content into .git/annex/objects/
|
||||
|
@ -508,7 +507,7 @@ withTmp key action = do
|
|||
- accepted into the repository. Will display a warning message in this
|
||||
- case. May also throw exceptions in some cases.
|
||||
-}
|
||||
moveAnnex :: Key -> AssociatedFile -> RawFilePath -> Annex Bool
|
||||
moveAnnex :: Key -> AssociatedFile -> OsPath -> Annex Bool
|
||||
moveAnnex key af src = ifM (checkSecureHashes' key)
|
||||
( do
|
||||
#ifdef mingw32_HOST_OS
|
||||
|
@ -522,7 +521,7 @@ moveAnnex key af src = ifM (checkSecureHashes' key)
|
|||
, return False
|
||||
)
|
||||
where
|
||||
storeobject dest = ifM (liftIO $ R.doesPathExist dest)
|
||||
storeobject dest = ifM (liftIO $ R.doesPathExist $ fromOsPath dest)
|
||||
( alreadyhave
|
||||
, adjustedBranchRefresh af $ modifyContentDir dest $ do
|
||||
liftIO $ moveFile src dest
|
||||
|
@ -540,7 +539,7 @@ moveAnnex key af src = ifM (checkSecureHashes' key)
|
|||
Database.Keys.addInodeCaches key
|
||||
(catMaybes (destic:ics))
|
||||
)
|
||||
alreadyhave = liftIO $ R.removeLink src
|
||||
alreadyhave = liftIO $ removeFile src
|
||||
|
||||
checkSecureHashes :: Key -> Annex (Maybe String)
|
||||
checkSecureHashes key = ifM (Backend.isCryptographicallySecureKey key)
|
||||
|
@ -563,7 +562,7 @@ data LinkAnnexResult = LinkAnnexOk | LinkAnnexFailed | LinkAnnexNoop
|
|||
|
||||
{- Populates the annex object file by hard linking or copying a source
|
||||
- file to it. -}
|
||||
linkToAnnex :: Key -> RawFilePath -> Maybe InodeCache -> Annex LinkAnnexResult
|
||||
linkToAnnex :: Key -> OsPath -> Maybe InodeCache -> Annex LinkAnnexResult
|
||||
linkToAnnex key src srcic = ifM (checkSecureHashes' key)
|
||||
( do
|
||||
dest <- calcRepo (gitAnnexLocation key)
|
||||
|
@ -580,13 +579,13 @@ linkToAnnex key src srcic = ifM (checkSecureHashes' key)
|
|||
- afterwards. Note that a consequence of this is that, if the file
|
||||
- already exists, it will be overwritten.
|
||||
-}
|
||||
linkFromAnnex :: Key -> RawFilePath -> Maybe FileMode -> Annex LinkAnnexResult
|
||||
linkFromAnnex :: Key -> OsPath -> Maybe FileMode -> Annex LinkAnnexResult
|
||||
linkFromAnnex key dest destmode =
|
||||
replaceFile' (const noop) dest (== LinkAnnexOk) $ \tmp ->
|
||||
linkFromAnnex' key tmp destmode
|
||||
|
||||
{- This is only safe to use when dest is not a worktree file. -}
|
||||
linkFromAnnex' :: Key -> RawFilePath -> Maybe FileMode -> Annex LinkAnnexResult
|
||||
linkFromAnnex' :: Key -> OsPath -> Maybe FileMode -> Annex LinkAnnexResult
|
||||
linkFromAnnex' key dest destmode = do
|
||||
src <- calcRepo (gitAnnexLocation key)
|
||||
srcic <- withTSDelta (liftIO . genInodeCache src)
|
||||
|
@ -606,7 +605,7 @@ data FromTo = From | To
|
|||
-
|
||||
- Nothing is done if the destination file already exists.
|
||||
-}
|
||||
linkAnnex :: FromTo -> Key -> RawFilePath -> Maybe InodeCache -> RawFilePath -> Maybe FileMode -> Annex LinkAnnexResult
|
||||
linkAnnex :: FromTo -> Key -> OsPath -> Maybe InodeCache -> OsPath -> Maybe FileMode -> Annex LinkAnnexResult
|
||||
linkAnnex _ _ _ Nothing _ _ = return LinkAnnexFailed
|
||||
linkAnnex fromto key src (Just srcic) dest destmode =
|
||||
withTSDelta (liftIO . genInodeCache dest) >>= \case
|
||||
|
@ -636,7 +635,7 @@ linkAnnex fromto key src (Just srcic) dest destmode =
|
|||
catMaybes [destic, Just srcic]
|
||||
return LinkAnnexOk
|
||||
_ -> do
|
||||
liftIO $ removeWhenExistsWith R.removeLink dest
|
||||
liftIO $ removeWhenExistsWith removeFile dest
|
||||
failed
|
||||
|
||||
{- Removes the annex object file for a key. Lowlevel. -}
|
||||
|
@ -645,7 +644,7 @@ unlinkAnnex key = do
|
|||
obj <- calcRepo (gitAnnexLocation key)
|
||||
modifyContentDir obj $ do
|
||||
secureErase obj
|
||||
liftIO $ removeWhenExistsWith R.removeLink obj
|
||||
liftIO $ removeWhenExistsWith removeFile obj
|
||||
|
||||
{- Runs an action to transfer an object's content. The action is also
|
||||
- passed the size of the object.
|
||||
|
@ -654,7 +653,7 @@ unlinkAnnex key = do
|
|||
- If this happens, runs the rollback action and throws an exception.
|
||||
- The rollback action should remove the data that was transferred.
|
||||
-}
|
||||
sendAnnex :: Key -> Maybe FilePath -> Annex () -> (FilePath -> FileSize -> Annex a) -> Annex a
|
||||
sendAnnex :: Key -> Maybe OsPath -> Annex () -> (OsPath -> FileSize -> Annex a) -> Annex a
|
||||
sendAnnex key o rollback sendobject = go =<< prepSendAnnex' key o
|
||||
where
|
||||
go (Just (f, sz, check)) = do
|
||||
|
@ -677,10 +676,10 @@ sendAnnex key o rollback sendobject = go =<< prepSendAnnex' key o
|
|||
- Annex monad of the remote that is receiving the object, rather than
|
||||
- the sender. So it cannot rely on Annex state.
|
||||
-}
|
||||
prepSendAnnex :: Key -> Maybe FilePath -> Annex (Maybe (FilePath, FileSize, Annex Bool))
|
||||
prepSendAnnex :: Key -> Maybe OsPath -> Annex (Maybe (OsPath, FileSize, Annex Bool))
|
||||
prepSendAnnex key Nothing = withObjectLoc key $ \f -> do
|
||||
let retval c cs = return $ Just
|
||||
( fromRawFilePath f
|
||||
( f
|
||||
, inodeCacheFileSize c
|
||||
, sameInodeCache f cs
|
||||
)
|
||||
|
@ -705,19 +704,18 @@ prepSendAnnex key Nothing = withObjectLoc key $ \f -> do
|
|||
Nothing -> return Nothing
|
||||
-- If the provided object file is the annex object file, handle as above.
|
||||
prepSendAnnex key (Just o) = withObjectLoc key $ \aof ->
|
||||
let o' = toRawFilePath o
|
||||
in if aof == o'
|
||||
if aof == o
|
||||
then prepSendAnnex key Nothing
|
||||
else do
|
||||
withTSDelta (liftIO . genInodeCache o') >>= \case
|
||||
withTSDelta (liftIO . genInodeCache o) >>= \case
|
||||
Nothing -> return Nothing
|
||||
Just c -> return $ Just
|
||||
( o
|
||||
, inodeCacheFileSize c
|
||||
, sameInodeCache o' [c]
|
||||
, sameInodeCache o [c]
|
||||
)
|
||||
|
||||
prepSendAnnex' :: Key -> Maybe FilePath -> Annex (Maybe (FilePath, FileSize, Annex (Maybe String)))
|
||||
prepSendAnnex' :: Key -> Maybe OsPath -> Annex (Maybe (OsPath, FileSize, Annex (Maybe String)))
|
||||
prepSendAnnex' key o = prepSendAnnex key o >>= \case
|
||||
Just (f, sz, checksuccess) ->
|
||||
let checksuccess' = ifM checksuccess
|
||||
|
@ -751,7 +749,7 @@ cleanObjectLoc key cleaner = do
|
|||
-
|
||||
- Does nothing if the object directory is not empty, and does not
|
||||
- throw an exception if it's unable to remove a directory. -}
|
||||
cleanObjectDirs :: RawFilePath -> Annex ()
|
||||
cleanObjectDirs :: OsPath -> Annex ()
|
||||
cleanObjectDirs f = do
|
||||
HashLevels n <- objectHashLevels <$> Annex.getGitConfig
|
||||
liftIO $ go f (succ n)
|
||||
|
@ -761,14 +759,14 @@ cleanObjectDirs f = do
|
|||
let dir = parentDir file
|
||||
maybe noop (const $ go dir (n-1))
|
||||
<=< catchMaybeIO $ tryWhenExists $
|
||||
removeDirectory (fromRawFilePath dir)
|
||||
removeDirectory dir
|
||||
|
||||
{- Removes a key's file from .git/annex/objects/ -}
|
||||
removeAnnex :: ContentRemovalLock -> Annex ()
|
||||
removeAnnex (ContentRemovalLock key) = withObjectLoc key $ \file ->
|
||||
cleanObjectLoc key $ do
|
||||
secureErase file
|
||||
liftIO $ removeWhenExistsWith R.removeLink file
|
||||
liftIO $ removeWhenExistsWith removeFile file
|
||||
g <- Annex.gitRepo
|
||||
mapM_ (\f -> void $ tryIO $ resetpointer $ fromTopFilePath f g)
|
||||
=<< Database.Keys.getAssociatedFiles key
|
||||
|
@ -776,7 +774,7 @@ removeAnnex (ContentRemovalLock key) = withObjectLoc key $ \file ->
|
|||
where
|
||||
-- Check associated pointer file for modifications, and reset if
|
||||
-- it's unmodified.
|
||||
resetpointer file = unlessM (liftIO $ isSymbolicLink <$> R.getSymbolicLinkStatus file) $
|
||||
resetpointer file = unlessM (liftIO $ isSymbolicLink <$> R.getSymbolicLinkStatus (fromOsPath file)) $
|
||||
ifM (isUnmodified key file)
|
||||
( adjustedBranchRefresh (AssociatedFile (Just file)) $
|
||||
depopulatePointerFile key file
|
||||
|
@ -789,11 +787,11 @@ removeAnnex (ContentRemovalLock key) = withObjectLoc key $ \file ->
|
|||
|
||||
{- Moves a key out of .git/annex/objects/ into .git/annex/bad, and
|
||||
- returns the file it was moved to. -}
|
||||
moveBad :: Key -> Annex RawFilePath
|
||||
moveBad :: Key -> Annex OsPath
|
||||
moveBad key = do
|
||||
src <- calcRepo (gitAnnexLocation key)
|
||||
bad <- fromRepo gitAnnexBadDir
|
||||
let dest = bad P.</> P.takeFileName src
|
||||
let dest = bad </> takeFileName src
|
||||
createAnnexDirectory (parentDir dest)
|
||||
cleanObjectLoc key $
|
||||
liftIO $ moveFile src dest
|
||||
|
@ -826,7 +824,7 @@ listKeys' keyloc want = do
|
|||
then do
|
||||
contents' <- filterM present contents
|
||||
keys <- filterM (Annex.eval s . want) $
|
||||
mapMaybe (fileKey . P.takeFileName) contents'
|
||||
mapMaybe (fileKey . takeFileName) contents'
|
||||
continue keys []
|
||||
else do
|
||||
let deeper = walk s (depth - 1)
|
||||
|
@ -844,8 +842,8 @@ listKeys' keyloc want = do
|
|||
present _ | inanywhere = pure True
|
||||
present d = presentInAnnex d
|
||||
|
||||
presentInAnnex = R.doesPathExist . contentfile
|
||||
contentfile d = d P.</> P.takeFileName d
|
||||
presentInAnnex = R.doesPathExist . fromOsPath . contentfile
|
||||
contentfile d = d </> takeFileName d
|
||||
|
||||
{- Things to do to record changes to content when shutting down.
|
||||
-
|
||||
|
@ -868,11 +866,11 @@ saveState nocommit = doSideAction $ do
|
|||
- Otherwise, only displays one error message, from one of the urls
|
||||
- that failed.
|
||||
-}
|
||||
downloadUrl :: Bool -> Key -> MeterUpdate -> Maybe IncrementalVerifier -> [Url.URLString] -> FilePath -> Url.UrlOptions -> Annex Bool
|
||||
downloadUrl :: Bool -> Key -> MeterUpdate -> Maybe IncrementalVerifier -> [Url.URLString] -> OsPath -> Url.UrlOptions -> Annex Bool
|
||||
downloadUrl listfailedurls k p iv urls file uo =
|
||||
-- Poll the file to handle configurations where an external
|
||||
-- download command is used.
|
||||
meteredFile (toRawFilePath file) (Just p) k (go urls [])
|
||||
meteredFile file (Just p) k (go urls [])
|
||||
where
|
||||
go (u:us) errs p' = Url.download' p' iv u file uo >>= \case
|
||||
Right () -> return True
|
||||
|
@ -898,18 +896,18 @@ downloadUrl listfailedurls k p iv urls file uo =
|
|||
|
||||
{- Copies a key's content, when present, to a temp file.
|
||||
- This is used to speed up some rsyncs. -}
|
||||
preseedTmp :: Key -> FilePath -> Annex Bool
|
||||
preseedTmp :: Key -> OsPath -> Annex Bool
|
||||
preseedTmp key file = go =<< inAnnex key
|
||||
where
|
||||
go False = return False
|
||||
go True = do
|
||||
ok <- copy
|
||||
when ok $ thawContent (toRawFilePath file)
|
||||
when ok $ thawContent file
|
||||
return ok
|
||||
copy = ifM (liftIO $ doesFileExist file)
|
||||
( return True
|
||||
, do
|
||||
s <- fromRawFilePath <$> (calcRepo $ gitAnnexLocation key)
|
||||
s <- calcRepo $ gitAnnexLocation key
|
||||
liftIO $ ifM (doesFileExist s)
|
||||
( copyFileExternal CopyTimeStamps s file
|
||||
, return False
|
||||
|
@ -918,15 +916,15 @@ preseedTmp key file = go =<< inAnnex key
|
|||
|
||||
{- Finds files directly inside a directory like gitAnnexBadDir
|
||||
- (not in subdirectories) and returns the corresponding keys. -}
|
||||
dirKeys :: (Git.Repo -> RawFilePath) -> Annex [Key]
|
||||
dirKeys :: (Git.Repo -> OsPath) -> Annex [Key]
|
||||
dirKeys dirspec = do
|
||||
dir <- fromRawFilePath <$> fromRepo dirspec
|
||||
dir <- fromRepo dirspec
|
||||
ifM (liftIO $ doesDirectoryExist dir)
|
||||
( do
|
||||
contents <- liftIO $ getDirectoryContents dir
|
||||
files <- liftIO $ filterM doesFileExist $
|
||||
map (dir </>) contents
|
||||
return $ mapMaybe (fileKey . P.takeFileName . toRawFilePath) files
|
||||
return $ mapMaybe (fileKey . takeFileName) files
|
||||
, return []
|
||||
)
|
||||
|
||||
|
@ -936,7 +934,7 @@ dirKeys dirspec = do
|
|||
- Also, stale keys that can be proven to have no value
|
||||
- (ie, their content is already present) are deleted.
|
||||
-}
|
||||
staleKeysPrune :: (Git.Repo -> RawFilePath) -> Bool -> Annex [Key]
|
||||
staleKeysPrune :: (Git.Repo -> OsPath) -> Bool -> Annex [Key]
|
||||
staleKeysPrune dirspec nottransferred = do
|
||||
contents <- dirKeys dirspec
|
||||
|
||||
|
@ -945,8 +943,8 @@ staleKeysPrune dirspec nottransferred = do
|
|||
|
||||
dir <- fromRepo dirspec
|
||||
forM_ dups $ \k ->
|
||||
pruneTmpWorkDirBefore (dir P.</> keyFile k)
|
||||
(liftIO . R.removeLink)
|
||||
pruneTmpWorkDirBefore (dir </> keyFile k)
|
||||
(liftIO . removeFile)
|
||||
|
||||
if nottransferred
|
||||
then do
|
||||
|
@ -961,9 +959,9 @@ staleKeysPrune dirspec nottransferred = do
|
|||
- This preserves the invariant that the workdir never exists without
|
||||
- the content file.
|
||||
-}
|
||||
pruneTmpWorkDirBefore :: RawFilePath -> (RawFilePath -> Annex a) -> Annex a
|
||||
pruneTmpWorkDirBefore :: OsPath -> (OsPath -> Annex a) -> Annex a
|
||||
pruneTmpWorkDirBefore f action = do
|
||||
let workdir = fromRawFilePath $ gitAnnexTmpWorkDir f
|
||||
let workdir = gitAnnexTmpWorkDir f
|
||||
liftIO $ whenM (doesDirectoryExist workdir) $
|
||||
removeDirectoryRecursive workdir
|
||||
action f
|
||||
|
@ -978,22 +976,21 @@ pruneTmpWorkDirBefore f action = do
|
|||
- the temporary work directory is retained (unless
|
||||
- empty), so anything in it can be used on resume.
|
||||
-}
|
||||
withTmpWorkDir :: Key -> (RawFilePath -> Annex (Maybe a)) -> Annex (Maybe a)
|
||||
withTmpWorkDir :: Key -> (OsPath -> Annex (Maybe a)) -> Annex (Maybe a)
|
||||
withTmpWorkDir key action = do
|
||||
-- Create the object file if it does not exist. This way,
|
||||
-- staleKeysPrune only has to look for object files, and can
|
||||
-- clean up gitAnnexTmpWorkDir for those it finds.
|
||||
obj <- prepTmp key
|
||||
let obj' = fromRawFilePath obj
|
||||
unlessM (liftIO $ doesFileExist obj') $ do
|
||||
liftIO $ writeFile obj' ""
|
||||
unlessM (liftIO $ doesFileExist obj) $ do
|
||||
liftIO $ writeFile (fromOsPath obj) ""
|
||||
setAnnexFilePerm obj
|
||||
let tmpdir = gitAnnexTmpWorkDir obj
|
||||
createAnnexDirectory tmpdir
|
||||
res <- action tmpdir
|
||||
case res of
|
||||
Just _ -> liftIO $ removeDirectoryRecursive (fromRawFilePath tmpdir)
|
||||
Nothing -> liftIO $ void $ tryIO $ removeDirectory (fromRawFilePath tmpdir)
|
||||
Just _ -> liftIO $ removeDirectoryRecursive tmpdir
|
||||
Nothing -> liftIO $ void $ tryIO $ removeDirectory tmpdir
|
||||
return res
|
||||
|
||||
{- Finds items in the first, smaller list, that are not
|
||||
|
@ -1028,12 +1025,12 @@ getKeyStatus :: Key -> Annex KeyStatus
|
|||
getKeyStatus key = catchDefaultIO KeyMissing $ do
|
||||
afs <- not . null <$> Database.Keys.getAssociatedFiles key
|
||||
obj <- calcRepo (gitAnnexLocation key)
|
||||
multilink <- ((> 1) . linkCount <$> liftIO (R.getFileStatus obj))
|
||||
multilink <- ((> 1) . linkCount <$> liftIO (R.getFileStatus (fromOsPath obj)))
|
||||
return $ if multilink && afs
|
||||
then KeyUnlockedThin
|
||||
else KeyPresent
|
||||
|
||||
getKeyFileStatus :: Key -> RawFilePath -> Annex KeyStatus
|
||||
getKeyFileStatus :: Key -> OsPath -> Annex KeyStatus
|
||||
getKeyFileStatus key file = do
|
||||
s <- getKeyStatus key
|
||||
case s of
|
||||
|
@ -1071,23 +1068,22 @@ contentSize key = catchDefaultIO Nothing $
|
|||
- timestamp. The file is written atomically, so when it contained an
|
||||
- earlier timestamp, a reader will always see one or the other timestamp.
|
||||
-}
|
||||
writeContentRetentionTimestamp :: Key -> RawFilePath -> POSIXTime -> Annex ()
|
||||
writeContentRetentionTimestamp :: Key -> OsPath -> POSIXTime -> Annex ()
|
||||
writeContentRetentionTimestamp key rt t = do
|
||||
lckfile <- calcRepo (gitAnnexContentRetentionTimestampLock key)
|
||||
modifyContentDirWhenExists lckfile $ bracket (lock lckfile) unlock $ \_ ->
|
||||
readContentRetentionTimestamp rt >>= \case
|
||||
Just ts | ts >= t -> return ()
|
||||
_ -> replaceFile (const noop) rt $ \tmp ->
|
||||
liftIO $ writeFile (fromRawFilePath tmp) $ show t
|
||||
liftIO $ writeFile (fromOsPath tmp) $ show t
|
||||
where
|
||||
lock = takeExclusiveLock
|
||||
unlock = liftIO . dropLock
|
||||
|
||||
{- Does not need locking because the file is written atomically. -}
|
||||
readContentRetentionTimestamp :: RawFilePath -> Annex (Maybe POSIXTime)
|
||||
readContentRetentionTimestamp :: OsPath -> Annex (Maybe POSIXTime)
|
||||
readContentRetentionTimestamp rt =
|
||||
liftIO $ join <$> tryWhenExists
|
||||
(parsePOSIXTime <$> F.readFile' (toOsPath rt))
|
||||
liftIO $ join <$> tryWhenExists (parsePOSIXTime <$> F.readFile' rt)
|
||||
|
||||
{- Checks if the retention timestamp is in the future, if so returns
|
||||
- Nothing.
|
||||
|
@ -1118,8 +1114,8 @@ checkRetentionTimestamp key locker = do
|
|||
{- Remove the retention timestamp and its lock file. Another lock must
|
||||
- be held, that prevents anything else writing to the file at the same
|
||||
- time. -}
|
||||
removeRetentionTimeStamp :: Key -> RawFilePath -> Annex ()
|
||||
removeRetentionTimeStamp :: Key -> OsPath -> Annex ()
|
||||
removeRetentionTimeStamp key rt = modifyContentDirWhenExists rt $ do
|
||||
liftIO $ removeWhenExistsWith R.removeLink rt
|
||||
liftIO $ removeWhenExistsWith removeFile rt
|
||||
rtl <- calcRepo (gitAnnexContentRetentionTimestampLock key)
|
||||
liftIO $ removeWhenExistsWith R.removeLink rtl
|
||||
liftIO $ removeWhenExistsWith removeFile rtl
|
||||
|
|
|
@ -19,13 +19,12 @@ import Utility.DataUnits
|
|||
import Utility.CopyFile
|
||||
import qualified Utility.RawFilePath as R
|
||||
|
||||
import qualified System.FilePath.ByteString as P
|
||||
import System.PosixCompat.Files (linkCount)
|
||||
|
||||
{- Runs the secure erase command if set, otherwise does nothing.
|
||||
- File may or may not be deleted at the end; caller is responsible for
|
||||
- making sure it's deleted. -}
|
||||
secureErase :: RawFilePath -> Annex ()
|
||||
secureErase :: OsPath -> Annex ()
|
||||
secureErase = void . runAnnexPathHook "%file"
|
||||
secureEraseAnnexHook annexSecureEraseCommand
|
||||
|
||||
|
@ -44,45 +43,48 @@ data LinkedOrCopied = Linked | Copied
|
|||
- execute bit will be set. The mode is not fully copied over because
|
||||
- git doesn't support file modes beyond execute.
|
||||
-}
|
||||
linkOrCopy :: Key -> RawFilePath -> RawFilePath -> Maybe FileMode -> Annex (Maybe LinkedOrCopied)
|
||||
linkOrCopy :: Key -> OsPath -> OsPath -> Maybe FileMode -> Annex (Maybe LinkedOrCopied)
|
||||
linkOrCopy = linkOrCopy' (annexThin <$> Annex.getGitConfig)
|
||||
|
||||
linkOrCopy' :: Annex Bool -> Key -> RawFilePath -> RawFilePath -> Maybe FileMode -> Annex (Maybe LinkedOrCopied)
|
||||
linkOrCopy' :: Annex Bool -> Key -> OsPath -> OsPath -> Maybe FileMode -> Annex (Maybe LinkedOrCopied)
|
||||
linkOrCopy' canhardlink key src dest destmode = catchDefaultIO Nothing $
|
||||
ifM canhardlink
|
||||
( hardlink
|
||||
( hardlinkorcopy
|
||||
, copy =<< getstat
|
||||
)
|
||||
where
|
||||
hardlink = do
|
||||
hardlinkorcopy = do
|
||||
s <- getstat
|
||||
if linkCount s > 1
|
||||
then copy s
|
||||
else liftIO (R.createLink src dest >> preserveGitMode dest destmode >> return (Just Linked))
|
||||
`catchIO` const (copy s)
|
||||
else hardlink `catchIO` const (copy s)
|
||||
hardlink = liftIO $ do
|
||||
R.createLink (fromOsPath src) (fromOsPath dest)
|
||||
void $ preserveGitMode dest destmode
|
||||
return (Just Linked)
|
||||
copy s = ifM (checkedCopyFile' key src dest destmode s)
|
||||
( return (Just Copied)
|
||||
, return Nothing
|
||||
)
|
||||
getstat = liftIO $ R.getFileStatus src
|
||||
getstat = liftIO $ R.getFileStatus (fromOsPath src)
|
||||
|
||||
{- Checks disk space before copying. -}
|
||||
checkedCopyFile :: Key -> RawFilePath -> RawFilePath -> Maybe FileMode -> Annex Bool
|
||||
checkedCopyFile :: Key -> OsPath -> OsPath -> Maybe FileMode -> Annex Bool
|
||||
checkedCopyFile key src dest destmode = catchBoolIO $
|
||||
checkedCopyFile' key src dest destmode
|
||||
=<< liftIO (R.getFileStatus src)
|
||||
=<< liftIO (R.getFileStatus (fromOsPath src))
|
||||
|
||||
checkedCopyFile' :: Key -> RawFilePath -> RawFilePath -> Maybe FileMode -> FileStatus -> Annex Bool
|
||||
checkedCopyFile' :: Key -> OsPath -> OsPath -> Maybe FileMode -> FileStatus -> Annex Bool
|
||||
checkedCopyFile' key src dest destmode s = catchBoolIO $ do
|
||||
sz <- liftIO $ getFileSize' src s
|
||||
ifM (checkDiskSpace' sz (Just $ P.takeDirectory dest) key 0 True)
|
||||
ifM (checkDiskSpace' sz (Just $ takeDirectory dest) key 0 True)
|
||||
( liftIO $
|
||||
copyFileExternal CopyAllMetaData (fromRawFilePath src) (fromRawFilePath dest)
|
||||
copyFileExternal CopyAllMetaData src dest
|
||||
<&&> preserveGitMode dest destmode
|
||||
, return False
|
||||
)
|
||||
|
||||
preserveGitMode :: RawFilePath -> Maybe FileMode -> IO Bool
|
||||
preserveGitMode :: OsPath -> Maybe FileMode -> IO Bool
|
||||
preserveGitMode f (Just mode)
|
||||
| isExecutable mode = catchBoolIO $ do
|
||||
modifyFileMode f $ addModes executeModes
|
||||
|
@ -100,12 +102,12 @@ preserveGitMode _ _ = return True
|
|||
- to be downloaded from the free space. This way, we avoid overcommitting
|
||||
- when doing concurrent downloads.
|
||||
-}
|
||||
checkDiskSpace :: Maybe FileSize -> Maybe RawFilePath -> Key -> Integer -> Bool -> Annex Bool
|
||||
checkDiskSpace :: Maybe FileSize -> Maybe OsPath -> Key -> Integer -> Bool -> Annex Bool
|
||||
checkDiskSpace msz destdir key = checkDiskSpace' sz destdir key
|
||||
where
|
||||
sz = fromMaybe 1 (fromKey keySize key <|> msz)
|
||||
|
||||
checkDiskSpace' :: FileSize -> Maybe RawFilePath -> Key -> Integer -> Bool -> Annex Bool
|
||||
checkDiskSpace' :: FileSize -> Maybe OsPath -> Key -> Integer -> Bool -> Annex Bool
|
||||
checkDiskSpace' sz destdir key alreadythere samefilesystem = ifM (Annex.getRead Annex.force)
|
||||
( return True
|
||||
, do
|
||||
|
@ -118,7 +120,7 @@ checkDiskSpace' sz destdir key alreadythere samefilesystem = ifM (Annex.getRead
|
|||
inprogress <- if samefilesystem
|
||||
then sizeOfDownloadsInProgress (/= key)
|
||||
else pure 0
|
||||
dir >>= liftIO . getDiskFree . fromRawFilePath >>= \case
|
||||
dir >>= liftIO . getDiskFree . fromOsPath >>= \case
|
||||
Just have -> do
|
||||
reserve <- annexDiskReserve <$> Annex.getGitConfig
|
||||
let delta = sz + reserve - have - alreadythere + inprogress
|
||||
|
|
|
@ -30,12 +30,14 @@ import System.PosixCompat.Files (fileMode)
|
|||
-
|
||||
- Returns an InodeCache if it populated the pointer file.
|
||||
-}
|
||||
populatePointerFile :: Restage -> Key -> RawFilePath -> RawFilePath -> Annex (Maybe InodeCache)
|
||||
populatePointerFile :: Restage -> Key -> OsPath -> OsPath -> Annex (Maybe InodeCache)
|
||||
populatePointerFile restage k obj f = go =<< liftIO (isPointerFile f)
|
||||
where
|
||||
go (Just k') | k == k' = do
|
||||
destmode <- liftIO $ catchMaybeIO $ fileMode <$> R.getFileStatus f
|
||||
liftIO $ removeWhenExistsWith R.removeLink f
|
||||
let f' = fromOsPath f
|
||||
destmode <- liftIO $ catchMaybeIO $
|
||||
fileMode <$> R.getFileStatus f'
|
||||
liftIO $ removeWhenExistsWith R.removeLink f'
|
||||
(ic, populated) <- replaceWorkTreeFile f $ \tmp -> do
|
||||
ok <- linkOrCopy k obj tmp destmode >>= \case
|
||||
Just _ -> thawContent tmp >> return True
|
||||
|
@ -47,23 +49,24 @@ populatePointerFile restage k obj f = go =<< liftIO (isPointerFile f)
|
|||
then return ic
|
||||
else return Nothing
|
||||
go _ = return Nothing
|
||||
|
||||
|
||||
{- Removes the content from a pointer file, replacing it with a pointer.
|
||||
-
|
||||
- Does not check if the pointer file is modified. -}
|
||||
depopulatePointerFile :: Key -> RawFilePath -> Annex ()
|
||||
depopulatePointerFile :: Key -> OsPath -> Annex ()
|
||||
depopulatePointerFile key file = do
|
||||
st <- liftIO $ catchMaybeIO $ R.getFileStatus file
|
||||
let file' = fromOsPath file
|
||||
st <- liftIO $ catchMaybeIO $ R.getFileStatus file'
|
||||
let mode = fmap fileMode st
|
||||
secureErase file
|
||||
liftIO $ removeWhenExistsWith R.removeLink file
|
||||
liftIO $ removeWhenExistsWith R.removeLink file'
|
||||
ic <- replaceWorkTreeFile file $ \tmp -> do
|
||||
liftIO $ writePointerFile tmp key mode
|
||||
#if ! defined(mingw32_HOST_OS)
|
||||
-- Don't advance mtime; this avoids unnecessary re-smudging
|
||||
-- by git in some cases.
|
||||
liftIO $ maybe noop
|
||||
(\t -> touch tmp t False)
|
||||
(\t -> touch (fromOsPath tmp) t False)
|
||||
(fmap Posix.modificationTimeHiRes st)
|
||||
#endif
|
||||
withTSDelta (liftIO . genInodeCache tmp)
|
||||
|
|
|
@ -41,18 +41,16 @@ import Config
|
|||
import Annex.Perms
|
||||
#endif
|
||||
|
||||
import qualified System.FilePath.ByteString as P
|
||||
|
||||
{- Checks if a given key's content is currently present. -}
|
||||
inAnnex :: Key -> Annex Bool
|
||||
inAnnex key = inAnnexCheck key $ liftIO . R.doesPathExist
|
||||
inAnnex key = inAnnexCheck key $ liftIO . R.doesPathExist . fromOsPath
|
||||
|
||||
{- Runs an arbitrary check on a key's content. -}
|
||||
inAnnexCheck :: Key -> (RawFilePath -> Annex Bool) -> Annex Bool
|
||||
inAnnexCheck :: Key -> (OsPath -> Annex Bool) -> Annex Bool
|
||||
inAnnexCheck key check = inAnnex' id False check key
|
||||
|
||||
{- inAnnex that performs an arbitrary check of the key's content. -}
|
||||
inAnnex' :: (a -> Bool) -> a -> (RawFilePath -> Annex a) -> Key -> Annex a
|
||||
inAnnex' :: (a -> Bool) -> a -> (OsPath -> Annex a) -> Key -> Annex a
|
||||
inAnnex' isgood bad check key = withObjectLoc key $ \loc -> do
|
||||
r <- check loc
|
||||
if isgood r
|
||||
|
@ -75,7 +73,7 @@ inAnnex' isgood bad check key = withObjectLoc key $ \loc -> do
|
|||
objectFileExists :: Key -> Annex Bool
|
||||
objectFileExists key =
|
||||
calcRepo (gitAnnexLocation key)
|
||||
>>= liftIO . R.doesPathExist
|
||||
>>= liftIO . doesFileExist
|
||||
|
||||
{- A safer check; the key's content must not only be present, but
|
||||
- is not in the process of being removed. -}
|
||||
|
@ -93,7 +91,7 @@ inAnnexSafe key = inAnnex' (fromMaybe True) (Just False) go key
|
|||
{- The content file must exist, but the lock file generally
|
||||
- won't exist unless a removal is in process. -}
|
||||
checklock (Just lockfile) contentfile =
|
||||
ifM (liftIO $ doesFileExist (fromRawFilePath contentfile))
|
||||
ifM (liftIO $ doesFileExist contentfile)
|
||||
( checkOr is_unlocked lockfile
|
||||
, return is_missing
|
||||
)
|
||||
|
@ -102,7 +100,7 @@ inAnnexSafe key = inAnnex' (fromMaybe True) (Just False) go key
|
|||
Just True -> is_locked
|
||||
Just False -> is_unlocked
|
||||
#else
|
||||
checklock Nothing contentfile = liftIO $ ifM (doesFileExist (fromRawFilePath contentfile))
|
||||
checklock Nothing contentfile = liftIO $ ifM (doesFileExist contentfile)
|
||||
( lockShared contentfile >>= \case
|
||||
Nothing -> return is_locked
|
||||
Just lockhandle -> do
|
||||
|
@ -113,7 +111,7 @@ inAnnexSafe key = inAnnex' (fromMaybe True) (Just False) go key
|
|||
{- In Windows, see if we can take a shared lock. If so,
|
||||
- remove the lock file to clean up after ourselves. -}
|
||||
checklock (Just lockfile) contentfile =
|
||||
ifM (liftIO $ doesFileExist (fromRawFilePath contentfile))
|
||||
ifM (liftIO $ doesFileExist contentfile)
|
||||
( modifyContentDir lockfile $ liftIO $
|
||||
lockShared lockfile >>= \case
|
||||
Nothing -> return is_locked
|
||||
|
@ -134,7 +132,7 @@ inAnnexSafe key = inAnnex' (fromMaybe True) (Just False) go key
|
|||
- content locking works, from running at the same time as content is locked
|
||||
- using the old method.
|
||||
-}
|
||||
withContentLockFile :: Key -> (Maybe RawFilePath -> Annex a) -> Annex a
|
||||
withContentLockFile :: Key -> (Maybe OsPath -> Annex a) -> Annex a
|
||||
withContentLockFile k a = do
|
||||
v <- getVersion
|
||||
if versionNeedsWritableContentFiles v
|
||||
|
@ -146,7 +144,7 @@ withContentLockFile k a = do
|
|||
- will switch over to v10 content lock files at the
|
||||
- right time. -}
|
||||
gitdir <- fromRepo Git.localGitDir
|
||||
let gitconfig = gitdir P.</> "config"
|
||||
let gitconfig = gitdir </> literalOsPath "config"
|
||||
ic <- withTSDelta (liftIO . genInodeCache gitconfig)
|
||||
oldic <- Annex.getState Annex.gitconfiginodecache
|
||||
v' <- if fromMaybe False (compareStrong <$> ic <*> oldic)
|
||||
|
@ -161,7 +159,7 @@ withContentLockFile k a = do
|
|||
where
|
||||
go v = contentLockFile k v >>= a
|
||||
|
||||
contentLockFile :: Key -> Maybe RepoVersion -> Annex (Maybe RawFilePath)
|
||||
contentLockFile :: Key -> Maybe RepoVersion -> Annex (Maybe OsPath)
|
||||
#ifndef mingw32_HOST_OS
|
||||
{- Older versions of git-annex locked content files themselves, but newer
|
||||
- versions use a separate lock file, to better support repos shared
|
||||
|
@ -177,7 +175,7 @@ contentLockFile key _ = Just <$> calcRepo (gitAnnexContentLock key)
|
|||
#endif
|
||||
|
||||
{- Performs an action, passing it the location to use for a key's content. -}
|
||||
withObjectLoc :: Key -> (RawFilePath -> Annex a) -> Annex a
|
||||
withObjectLoc :: Key -> (OsPath -> Annex a) -> Annex a
|
||||
withObjectLoc key a = a =<< calcRepo (gitAnnexLocation key)
|
||||
|
||||
{- Check if a file contains the unmodified content of the key.
|
||||
|
@ -185,7 +183,7 @@ withObjectLoc key a = a =<< calcRepo (gitAnnexLocation key)
|
|||
- The expensive way to tell is to do a verification of its content.
|
||||
- The cheaper way is to see if the InodeCache for the key matches the
|
||||
- file. -}
|
||||
isUnmodified :: Key -> RawFilePath -> Annex Bool
|
||||
isUnmodified :: Key -> OsPath -> Annex Bool
|
||||
isUnmodified key f =
|
||||
withTSDelta (liftIO . genInodeCache f) >>= \case
|
||||
Just fc -> do
|
||||
|
@ -193,7 +191,7 @@ isUnmodified key f =
|
|||
isUnmodified' key f fc ic
|
||||
Nothing -> return False
|
||||
|
||||
isUnmodified' :: Key -> RawFilePath -> InodeCache -> [InodeCache] -> Annex Bool
|
||||
isUnmodified' :: Key -> OsPath -> InodeCache -> [InodeCache] -> Annex Bool
|
||||
isUnmodified' = isUnmodifiedLowLevel Database.Keys.addInodeCaches
|
||||
|
||||
{- Cheap check if a file contains the unmodified content of the key,
|
||||
|
@ -206,7 +204,7 @@ isUnmodified' = isUnmodifiedLowLevel Database.Keys.addInodeCaches
|
|||
- this may report a false positive when repeated edits are made to a file
|
||||
- within a small time window (eg 1 second).
|
||||
-}
|
||||
isUnmodifiedCheap :: Key -> RawFilePath -> Annex Bool
|
||||
isUnmodifiedCheap :: Key -> OsPath -> Annex Bool
|
||||
isUnmodifiedCheap key f = maybe (pure False) (isUnmodifiedCheap' key)
|
||||
=<< withTSDelta (liftIO . genInodeCache f)
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ import Annex.Verify
|
|||
import Annex.InodeSentinal
|
||||
import Utility.InodeCache
|
||||
|
||||
isUnmodifiedLowLevel :: (Key -> [InodeCache] -> Annex ()) -> Key -> RawFilePath -> InodeCache -> [InodeCache] -> Annex Bool
|
||||
isUnmodifiedLowLevel :: (Key -> [InodeCache] -> Annex ()) -> Key -> OsPath -> InodeCache -> [InodeCache] -> Annex Bool
|
||||
isUnmodifiedLowLevel addinodecaches key f fc ic =
|
||||
isUnmodifiedCheapLowLevel fc ic <||> expensivecheck
|
||||
where
|
||||
|
|
|
@ -51,7 +51,7 @@ tryCopyCoW (CopyCoWTried copycowtried) src dest meterupdate =
|
|||
-- CoW is known to work, so delete
|
||||
-- dest if it exists in order to do a fast
|
||||
-- CoW copy.
|
||||
void $ tryIO $ removeFile dest
|
||||
void $ tryIO $ removeFile dest'
|
||||
docopycow
|
||||
, return False
|
||||
)
|
||||
|
@ -60,18 +60,18 @@ tryCopyCoW (CopyCoWTried copycowtried) src dest meterupdate =
|
|||
docopycow = watchFileSize dest' meterupdate $ const $
|
||||
copyCoW CopyTimeStamps src dest
|
||||
|
||||
dest' = toRawFilePath dest
|
||||
dest' = toOsPath dest
|
||||
|
||||
-- Check if the dest file already exists, which would prevent
|
||||
-- probing CoW. If the file exists but is empty, there's no benefit
|
||||
-- to resuming from it when CoW does not work, so remove it.
|
||||
destfilealreadypopulated =
|
||||
tryIO (R.getFileStatus dest') >>= \case
|
||||
tryIO (R.getFileStatus (toRawFilePath dest)) >>= \case
|
||||
Left _ -> return False
|
||||
Right st -> do
|
||||
sz <- getFileSize' dest' st
|
||||
if sz == 0
|
||||
then tryIO (removeFile dest) >>= \case
|
||||
then tryIO (removeFile dest') >>= \case
|
||||
Right () -> return False
|
||||
Left _ -> return True
|
||||
else return True
|
||||
|
@ -111,14 +111,15 @@ fileCopier copycowtried src dest meterupdate iv =
|
|||
docopy = do
|
||||
-- The file might have had the write bit removed,
|
||||
-- so make sure we can write to it.
|
||||
void $ tryIO $ allowWrite dest'
|
||||
void $ tryIO $ allowWrite (toOsPath dest)
|
||||
|
||||
withBinaryFile src ReadMode $ \hsrc ->
|
||||
fileContentCopier hsrc dest meterupdate iv
|
||||
|
||||
-- Copy src mode and mtime.
|
||||
mode <- fileMode <$> R.getFileStatus (toRawFilePath src)
|
||||
mtime <- utcTimeToPOSIXSeconds <$> getModificationTime src
|
||||
mtime <- utcTimeToPOSIXSeconds
|
||||
<$> getModificationTime (toOsPath src)
|
||||
R.setFileMode dest' mode
|
||||
touch dest' mtime False
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@ import qualified Data.List.NonEmpty as NE
|
|||
import qualified Data.ByteArray as BA
|
||||
import qualified Data.ByteArray.Encoding as BA
|
||||
import qualified Data.ByteString as S
|
||||
import qualified System.FilePath.ByteString as P
|
||||
|
||||
import Common
|
||||
import Key
|
||||
|
@ -32,7 +31,7 @@ import Types.Difference
|
|||
import Utility.Hash
|
||||
import Utility.MD5
|
||||
|
||||
type Hasher = Key -> RawFilePath
|
||||
type Hasher = Key -> OsPath
|
||||
|
||||
-- Number of hash levels to use. 2 is the default.
|
||||
newtype HashLevels = HashLevels Int
|
||||
|
@ -51,7 +50,7 @@ configHashLevels d config
|
|||
| hasDifference d (annexDifferences config) = HashLevels 1
|
||||
| otherwise = def
|
||||
|
||||
branchHashDir :: GitConfig -> Key -> S.ByteString
|
||||
branchHashDir :: GitConfig -> Key -> OsPath
|
||||
branchHashDir = hashDirLower . branchHashLevels
|
||||
|
||||
{- Two different directory hashes may be used. The mixed case hash
|
||||
|
@ -64,9 +63,10 @@ branchHashDir = hashDirLower . branchHashLevels
|
|||
dirHashes :: NE.NonEmpty (HashLevels -> Hasher)
|
||||
dirHashes = hashDirLower NE.:| [hashDirMixed]
|
||||
|
||||
hashDirs :: HashLevels -> Int -> S.ByteString -> RawFilePath
|
||||
hashDirs (HashLevels 1) sz s = P.addTrailingPathSeparator $ S.take sz s
|
||||
hashDirs _ sz s = P.addTrailingPathSeparator $ h P.</> t
|
||||
hashDirs :: HashLevels -> Int -> S.ByteString -> OsPath
|
||||
hashDirs (HashLevels 1) sz s = addTrailingPathSeparator $
|
||||
toOsPath (S.take sz s)
|
||||
hashDirs _ sz s = addTrailingPathSeparator $ toOsPath h </> toOsPath t
|
||||
where
|
||||
(h, t) = S.splitAt sz s
|
||||
|
||||
|
|
|
@ -108,7 +108,7 @@ handleDropsFrom locs rs reason fromhere key afile si preverified runner = do
|
|||
[ "dropped"
|
||||
, case afile of
|
||||
AssociatedFile Nothing -> serializeKey key
|
||||
AssociatedFile (Just af) -> fromRawFilePath af
|
||||
AssociatedFile (Just af) -> fromOsPath af
|
||||
, "(from " ++ maybe "here" show u ++ ")"
|
||||
, "(copies now " ++ show (have - 1) ++ ")"
|
||||
, ": " ++ reason
|
||||
|
|
|
@ -85,9 +85,9 @@ startExternalAddonProcess basecmd ps pid = do
|
|||
|
||||
runerr (Just cmd) =
|
||||
return $ Left $ ProgramFailure $
|
||||
"Cannot run " ++ cmd ++ " -- Make sure it's executable and that its dependencies are installed."
|
||||
"Cannot run " ++ fromOsPath cmd ++ " -- Make sure it's executable and that its dependencies are installed."
|
||||
runerr Nothing = do
|
||||
path <- intercalate ":" <$> getSearchPath
|
||||
path <- intercalate ":" . map fromOsPath <$> getSearchPath
|
||||
return $ Left $ ProgramNotInstalled $
|
||||
"Cannot run " ++ basecmd ++ " -- It is not installed in PATH (" ++ path ++ ")"
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
{-# LANGUAGE CPP #-}
|
||||
|
||||
module Annex.FileMatcher (
|
||||
|
@ -56,14 +57,14 @@ import Data.Either
|
|||
import qualified Data.Set as S
|
||||
import Control.Monad.Writer
|
||||
|
||||
type GetFileMatcher = RawFilePath -> Annex (FileMatcher Annex)
|
||||
type GetFileMatcher = OsPath -> Annex (FileMatcher Annex)
|
||||
|
||||
checkFileMatcher :: LiveUpdate -> GetFileMatcher -> RawFilePath -> Annex Bool
|
||||
checkFileMatcher :: LiveUpdate -> GetFileMatcher -> OsPath -> Annex Bool
|
||||
checkFileMatcher lu getmatcher file =
|
||||
checkFileMatcher' lu getmatcher file (return True)
|
||||
|
||||
-- | Allows running an action when no matcher is configured for the file.
|
||||
checkFileMatcher' :: LiveUpdate -> GetFileMatcher -> RawFilePath -> Annex Bool -> Annex Bool
|
||||
checkFileMatcher' :: LiveUpdate -> GetFileMatcher -> OsPath -> Annex Bool -> Annex Bool
|
||||
checkFileMatcher' lu getmatcher file notconfigured = do
|
||||
matcher <- getmatcher file
|
||||
checkMatcher matcher Nothing afile lu S.empty notconfigured d
|
||||
|
@ -120,7 +121,7 @@ checkMatcher' (matcher, (MatcherDesc matcherdesc)) mi lu notpresent =
|
|||
fromMaybe mempty descmsg <> UnquotedString s
|
||||
return False
|
||||
|
||||
fileMatchInfo :: RawFilePath -> Maybe Key -> Annex MatchInfo
|
||||
fileMatchInfo :: OsPath -> Maybe Key -> Annex MatchInfo
|
||||
fileMatchInfo file mkey = do
|
||||
matchfile <- getTopFilePath <$> inRepo (toTopFilePath file)
|
||||
return $ MatchingFile FileInfo
|
||||
|
@ -160,7 +161,7 @@ parseToken l t = case syntaxToken t of
|
|||
tokenizeMatcher :: String -> [String]
|
||||
tokenizeMatcher = filter (not . null) . concatMap splitparens . words
|
||||
where
|
||||
splitparens = segmentDelim (`elem` "()")
|
||||
splitparens = segmentDelim (`elem` ("()" :: String))
|
||||
|
||||
commonTokens :: LimitBy -> [ParseToken (MatchFiles Annex)]
|
||||
commonTokens lb =
|
||||
|
@ -201,7 +202,7 @@ preferredContentTokens pcd =
|
|||
, ValueToken "fullysizebalanced" (usev $ limitFullySizeBalanced (repoUUID pcd) (getGroupMap pcd))
|
||||
] ++ commonTokens LimitAnnexFiles
|
||||
where
|
||||
preferreddir = maybe "public" fromProposedAccepted $
|
||||
preferreddir = toOsPath $ maybe "public" fromProposedAccepted $
|
||||
M.lookup preferreddirField =<< (`M.lookup` configMap pcd) =<< repoUUID pcd
|
||||
|
||||
preferredContentParser :: [ParseToken (MatchFiles Annex)] -> String -> [ParseResult (MatchFiles Annex)]
|
||||
|
|
|
@ -18,10 +18,11 @@ import Utility.SafeCommand
|
|||
import Utility.Directory
|
||||
import Utility.Exception
|
||||
import Utility.Monad
|
||||
import Utility.FileSystemEncoding
|
||||
import Utility.SystemDirectory
|
||||
import Utility.OsPath
|
||||
import qualified Utility.RawFilePath as R
|
||||
import Utility.PartialPrelude
|
||||
import qualified Utility.OsString as OS
|
||||
|
||||
import System.IO
|
||||
import Data.List
|
||||
|
@ -29,8 +30,6 @@ import Data.Maybe
|
|||
import Control.Monad
|
||||
import Control.Monad.IfElse
|
||||
import qualified Data.Map as M
|
||||
import qualified Data.ByteString as S
|
||||
import System.FilePath.ByteString
|
||||
import Control.Applicative
|
||||
import Prelude
|
||||
|
||||
|
@ -109,28 +108,30 @@ fixupUnusualRepos r@(Repo { location = l@(Local { worktree = Just w, gitdir = d
|
|||
, return r
|
||||
)
|
||||
where
|
||||
dotgit = w </> ".git"
|
||||
dotgit = w </> literalOsPath ".git"
|
||||
|
||||
replacedotgit = whenM (doesFileExist (fromRawFilePath dotgit)) $ do
|
||||
replacedotgit = whenM (doesFileExist dotgit) $ do
|
||||
linktarget <- relPathDirToFile w d
|
||||
removeWhenExistsWith R.removeLink dotgit
|
||||
R.createSymbolicLink linktarget dotgit
|
||||
let dotgit' = fromOsPath dotgit
|
||||
removeWhenExistsWith R.removeLink dotgit'
|
||||
R.createSymbolicLink (fromOsPath linktarget) dotgit'
|
||||
|
||||
-- Unsetting a config fails if it's not set, so ignore failure.
|
||||
unsetcoreworktree = void $ Git.Config.unset "core.worktree" r
|
||||
|
||||
worktreefixup =
|
||||
worktreefixup = do
|
||||
-- git-worktree sets up a "commondir" file that contains
|
||||
-- the path to the main git directory.
|
||||
-- Using --separate-git-dir does not.
|
||||
catchDefaultIO Nothing (headMaybe . lines <$> readFile (fromRawFilePath (d </> "commondir"))) >>= \case
|
||||
let commondirfile = fromOsPath (d </> literalOsPath "commondir")
|
||||
catchDefaultIO Nothing (headMaybe . lines <$> readFile commondirfile) >>= \case
|
||||
Just gd -> do
|
||||
-- Make the worktree's git directory
|
||||
-- contain an annex symlink to the main
|
||||
-- repository's annex directory.
|
||||
let linktarget = toRawFilePath gd </> "annex"
|
||||
R.createSymbolicLink linktarget
|
||||
(dotgit </> "annex")
|
||||
let linktarget = toOsPath gd </> literalOsPath "annex"
|
||||
R.createSymbolicLink (fromOsPath linktarget) $
|
||||
fromOsPath $ dotgit </> literalOsPath "annex"
|
||||
Nothing -> return ()
|
||||
|
||||
-- Repo adjusted, so that symlinks to objects that get checked
|
||||
|
@ -143,7 +144,7 @@ fixupUnusualRepos r _ = return r
|
|||
|
||||
needsSubmoduleFixup :: Repo -> Bool
|
||||
needsSubmoduleFixup (Repo { location = (Local { worktree = Just _, gitdir = d }) }) =
|
||||
(".git" </> "modules") `S.isInfixOf` d
|
||||
(literalOsPath ".git" </> literalOsPath "modules") `OS.isInfixOf` d
|
||||
needsSubmoduleFixup _ = False
|
||||
|
||||
needsGitLinkFixup :: Repo -> IO Bool
|
||||
|
@ -151,6 +152,6 @@ needsGitLinkFixup (Repo { location = (Local { worktree = Just wt, gitdir = d })
|
|||
-- Optimization: Avoid statting .git in the common case; only
|
||||
-- when the gitdir is not in the usual place inside the worktree
|
||||
-- might .git be a file.
|
||||
| wt </> ".git" == d = return False
|
||||
| otherwise = doesFileExist (fromRawFilePath (wt </> ".git"))
|
||||
| wt </> literalOsPath ".git" == d = return False
|
||||
| otherwise = doesFileExist (wt </> literalOsPath ".git")
|
||||
needsGitLinkFixup _ = return False
|
||||
|
|
|
@ -23,7 +23,7 @@ import qualified Annex.Queue
|
|||
import Config.Smudge
|
||||
|
||||
{- Runs an action using a different git index file. -}
|
||||
withIndexFile :: AltIndexFile -> (FilePath -> Annex a) -> Annex a
|
||||
withIndexFile :: AltIndexFile -> (OsPath -> Annex a) -> Annex a
|
||||
withIndexFile i = withAltRepo usecachedgitenv restoregitenv
|
||||
where
|
||||
-- This is an optimisation. Since withIndexFile is run repeatedly,
|
||||
|
@ -58,7 +58,7 @@ withIndexFile i = withAltRepo usecachedgitenv restoregitenv
|
|||
f <- indexEnvVal $ case i of
|
||||
AnnexIndexFile -> gitAnnexIndex g
|
||||
ViewIndexFile -> gitAnnexViewIndex g
|
||||
g' <- addGitEnv g indexEnv f
|
||||
g' <- addGitEnv g indexEnv (fromOsPath f)
|
||||
return (g', f)
|
||||
|
||||
restoregitenv g g' = g' { gitEnv = gitEnv g }
|
||||
|
@ -66,13 +66,13 @@ withIndexFile i = withAltRepo usecachedgitenv restoregitenv
|
|||
{- Runs an action using a different git work tree.
|
||||
-
|
||||
- Smudge and clean filters are disabled in this work tree. -}
|
||||
withWorkTree :: FilePath -> Annex a -> Annex a
|
||||
withWorkTree :: OsPath -> Annex a -> Annex a
|
||||
withWorkTree d a = withAltRepo
|
||||
(\g -> return $ (g { location = modlocation (location g), gitGlobalOpts = gitGlobalOpts g ++ bypassSmudgeConfig }, ()))
|
||||
(\g g' -> g' { location = location g, gitGlobalOpts = gitGlobalOpts g })
|
||||
(const a)
|
||||
where
|
||||
modlocation l@(Local {}) = l { worktree = Just (toRawFilePath d) }
|
||||
modlocation l@(Local {}) = l { worktree = Just d }
|
||||
modlocation _ = giveup "withWorkTree of non-local git repo"
|
||||
|
||||
{- Runs an action with the git index file and HEAD, and a few other
|
||||
|
@ -83,13 +83,13 @@ withWorkTree d a = withAltRepo
|
|||
-
|
||||
- Needs git 2.2.0 or newer.
|
||||
-}
|
||||
withWorkTreeRelated :: FilePath -> Annex a -> Annex a
|
||||
withWorkTreeRelated :: OsPath -> Annex a -> Annex a
|
||||
withWorkTreeRelated d a = withAltRepo modrepo unmodrepo (const a)
|
||||
where
|
||||
modrepo g = liftIO $ do
|
||||
g' <- addGitEnv g "GIT_COMMON_DIR" . fromRawFilePath
|
||||
g' <- addGitEnv g "GIT_COMMON_DIR" . fromOsPath
|
||||
=<< absPath (localGitDir g)
|
||||
g'' <- addGitEnv g' "GIT_DIR" d
|
||||
g'' <- addGitEnv g' "GIT_DIR" (fromOsPath d)
|
||||
return (g'' { gitEnvOverridesGitDir = True }, ())
|
||||
unmodrepo g g' = g'
|
||||
{ gitEnv = gitEnv g
|
||||
|
|
|
@ -28,7 +28,7 @@ hashObjectStop = maybe noop stop =<< Annex.getState Annex.hashobjecthandle
|
|||
liftIO $ freeResourcePool p Git.HashObject.hashObjectStop
|
||||
Annex.changeState $ \s -> s { Annex.hashobjecthandle = Nothing }
|
||||
|
||||
hashFile :: RawFilePath -> Annex Sha
|
||||
hashFile :: OsPath -> Annex Sha
|
||||
hashFile f = withHashObjectHandle $ \h ->
|
||||
liftIO $ Git.HashObject.hashFile h f
|
||||
|
||||
|
|
|
@ -21,10 +21,11 @@ import Utility.Shell
|
|||
import qualified Data.Map as M
|
||||
|
||||
preCommitHook :: Git.Hook
|
||||
preCommitHook = Git.Hook "pre-commit" (mkHookScript "git annex pre-commit .") []
|
||||
preCommitHook = Git.Hook (literalOsPath "pre-commit")
|
||||
(mkHookScript "git annex pre-commit .") []
|
||||
|
||||
postReceiveHook :: Git.Hook
|
||||
postReceiveHook = Git.Hook "post-receive"
|
||||
postReceiveHook = Git.Hook (literalOsPath "post-receive")
|
||||
-- Only run git-annex post-receive when git-annex supports it,
|
||||
-- to avoid failing if the repository with this hook is used
|
||||
-- with an older version of git-annex.
|
||||
|
@ -34,10 +35,10 @@ postReceiveHook = Git.Hook "post-receive"
|
|||
]
|
||||
|
||||
postCheckoutHook :: Git.Hook
|
||||
postCheckoutHook = Git.Hook "post-checkout" smudgeHook []
|
||||
postCheckoutHook = Git.Hook (literalOsPath "post-checkout") smudgeHook []
|
||||
|
||||
postMergeHook :: Git.Hook
|
||||
postMergeHook = Git.Hook "post-merge" smudgeHook []
|
||||
postMergeHook = Git.Hook (literalOsPath "post-merge") smudgeHook []
|
||||
|
||||
-- Older versions of git-annex didn't support this command, but neither did
|
||||
-- they support v7 repositories.
|
||||
|
@ -45,28 +46,28 @@ smudgeHook :: String
|
|||
smudgeHook = mkHookScript "git annex smudge --update"
|
||||
|
||||
preCommitAnnexHook :: Git.Hook
|
||||
preCommitAnnexHook = Git.Hook "pre-commit-annex" "" []
|
||||
preCommitAnnexHook = Git.Hook (literalOsPath "pre-commit-annex") "" []
|
||||
|
||||
postUpdateAnnexHook :: Git.Hook
|
||||
postUpdateAnnexHook = Git.Hook "post-update-annex" "" []
|
||||
postUpdateAnnexHook = Git.Hook (literalOsPath "post-update-annex") "" []
|
||||
|
||||
preInitAnnexHook :: Git.Hook
|
||||
preInitAnnexHook = Git.Hook "pre-init-annex" "" []
|
||||
preInitAnnexHook = Git.Hook (literalOsPath "pre-init-annex") "" []
|
||||
|
||||
freezeContentAnnexHook :: Git.Hook
|
||||
freezeContentAnnexHook = Git.Hook "freezecontent-annex" "" []
|
||||
freezeContentAnnexHook = Git.Hook (literalOsPath "freezecontent-annex") "" []
|
||||
|
||||
thawContentAnnexHook :: Git.Hook
|
||||
thawContentAnnexHook = Git.Hook "thawcontent-annex" "" []
|
||||
thawContentAnnexHook = Git.Hook (literalOsPath "thawcontent-annex") "" []
|
||||
|
||||
secureEraseAnnexHook :: Git.Hook
|
||||
secureEraseAnnexHook = Git.Hook "secure-erase-annex" "" []
|
||||
secureEraseAnnexHook = Git.Hook (literalOsPath "secure-erase-annex") "" []
|
||||
|
||||
commitMessageAnnexHook :: Git.Hook
|
||||
commitMessageAnnexHook = Git.Hook "commitmessage-annex" "" []
|
||||
commitMessageAnnexHook = Git.Hook (literalOsPath "commitmessage-annex") "" []
|
||||
|
||||
httpHeadersAnnexHook :: Git.Hook
|
||||
httpHeadersAnnexHook = Git.Hook "http-headers-annex" "" []
|
||||
httpHeadersAnnexHook = Git.Hook (literalOsPath "http-headers-annex") "" []
|
||||
|
||||
mkHookScript :: String -> String
|
||||
mkHookScript s = unlines
|
||||
|
@ -87,8 +88,8 @@ hookWarning :: Git.Hook -> String -> Annex ()
|
|||
hookWarning h msg = do
|
||||
r <- gitRepo
|
||||
warning $ UnquotedString $
|
||||
fromRawFilePath (Git.hookName h) ++
|
||||
" hook (" ++ fromRawFilePath (Git.hookFile h r) ++ ") " ++ msg
|
||||
fromOsPath (Git.hookName h) ++
|
||||
" hook (" ++ fromOsPath (Git.hookFile h r) ++ ") " ++ msg
|
||||
|
||||
{- To avoid checking if the hook exists every time, the existing hooks
|
||||
- are cached. -}
|
||||
|
@ -121,7 +122,7 @@ runAnnexHook' hook commandcfg = ifM (doesAnnexHookExist hook)
|
|||
( return Nothing
|
||||
, do
|
||||
h <- fromRepo (Git.hookFile hook)
|
||||
commandfailed (fromRawFilePath h)
|
||||
commandfailed (fromOsPath h)
|
||||
)
|
||||
runcommandcfg = commandcfg <$> Annex.getGitConfig >>= \case
|
||||
Nothing -> return Nothing
|
||||
|
@ -132,18 +133,19 @@ runAnnexHook' hook commandcfg = ifM (doesAnnexHookExist hook)
|
|||
)
|
||||
commandfailed c = return $ Just c
|
||||
|
||||
runAnnexPathHook :: String -> Git.Hook -> (GitConfig -> Maybe String) -> RawFilePath -> Annex Bool
|
||||
runAnnexPathHook :: String -> Git.Hook -> (GitConfig -> Maybe String) -> OsPath -> Annex Bool
|
||||
runAnnexPathHook pathtoken hook commandcfg p = ifM (doesAnnexHookExist hook)
|
||||
( runhook
|
||||
, runcommandcfg
|
||||
)
|
||||
where
|
||||
runhook = inRepo $ Git.runHook boolSystem hook [ File (fromRawFilePath p) ]
|
||||
runhook = inRepo $ Git.runHook boolSystem hook [ File p' ]
|
||||
runcommandcfg = commandcfg <$> Annex.getGitConfig >>= \case
|
||||
Nothing -> return True
|
||||
Just basecmd -> liftIO $
|
||||
boolSystem "sh" [Param "-c", Param $ gencmd basecmd]
|
||||
gencmd = massReplace [ (pathtoken, shellEscape (fromRawFilePath p)) ]
|
||||
gencmd = massReplace [ (pathtoken, shellEscape p') ]
|
||||
p' = fromOsPath p
|
||||
|
||||
outputOfAnnexHook :: Git.Hook -> (GitConfig -> Maybe String) -> Annex (Maybe String)
|
||||
outputOfAnnexHook hook commandcfg = ifM (doesAnnexHookExist hook)
|
||||
|
|
|
@ -69,7 +69,6 @@ import Control.Concurrent.STM
|
|||
import qualified Data.Map.Strict as M
|
||||
import qualified Data.Set as S
|
||||
import qualified System.FilePath.Posix.ByteString as Posix
|
||||
import qualified System.FilePath.ByteString as P
|
||||
import qualified Data.ByteArray.Encoding as BA
|
||||
|
||||
{- Configures how to build an import tree. -}
|
||||
|
@ -154,7 +153,7 @@ recordImportTree remote importtreeconfig addunlockedmatcher imported = do
|
|||
let subtreeref = Ref $
|
||||
fromRef' finaltree
|
||||
<> ":"
|
||||
<> getTopFilePath dir
|
||||
<> fromOsPath (getTopFilePath dir)
|
||||
in fromMaybe emptyTree
|
||||
<$> inRepo (Git.Ref.tree subtreeref)
|
||||
updateexportdb importedtree
|
||||
|
@ -349,11 +348,11 @@ mkImportTreeItem maddunlockedmatcher msubdir loc v = case v of
|
|||
lf = fromImportLocation loc
|
||||
treepath = asTopFilePath lf
|
||||
topf = asTopFilePath $
|
||||
maybe lf (\sd -> getTopFilePath sd P.</> lf) msubdir
|
||||
maybe lf (\sd -> getTopFilePath sd </> lf) msubdir
|
||||
mklink k = do
|
||||
relf <- fromRepo $ fromTopFilePath topf
|
||||
symlink <- calcRepo $ gitAnnexLink relf k
|
||||
linksha <- hashSymlink symlink
|
||||
linksha <- hashSymlink (fromOsPath symlink)
|
||||
return $ TreeItem treepath (fromTreeItemType TreeSymlink) linksha
|
||||
mkpointer k = TreeItem treepath (fromTreeItemType TreeFile)
|
||||
<$> hashPointerFile k
|
||||
|
@ -429,7 +428,8 @@ buildImportTreesGeneric converttree basetree msubdir importable@(ImportableConte
|
|||
-- Full directory prefix where the sub tree is located.
|
||||
let fullprefix = asTopFilePath $ case msubdir of
|
||||
Nothing -> subdir
|
||||
Just d -> getTopFilePath d Posix.</> subdir
|
||||
Just d -> toOsPath $
|
||||
fromOsPath (getTopFilePath d) Posix.</> fromOsPath subdir
|
||||
Tree ts <- converttree (Just fullprefix) $
|
||||
map (\(p, i) -> (mkImportLocation p, i))
|
||||
(importableContentsSubTree c)
|
||||
|
@ -853,7 +853,7 @@ importKeys remote importtreeconfig importcontent thirdpartypopulated importablec
|
|||
let af = AssociatedFile (Just f)
|
||||
let downloader p' tmpfile = do
|
||||
_ <- Remote.retrieveExportWithContentIdentifier
|
||||
ia loc [cid] (fromRawFilePath tmpfile)
|
||||
ia loc [cid] tmpfile
|
||||
(Left k)
|
||||
(combineMeterUpdate p' p)
|
||||
ok <- moveAnnex k af tmpfile
|
||||
|
@ -871,7 +871,7 @@ importKeys remote importtreeconfig importcontent thirdpartypopulated importablec
|
|||
doimportsmall cidmap loc cid sz p = do
|
||||
let downloader tmpfile = do
|
||||
(k, _) <- Remote.retrieveExportWithContentIdentifier
|
||||
ia loc [cid] (fromRawFilePath tmpfile)
|
||||
ia loc [cid] tmpfile
|
||||
(Right (mkkey tmpfile))
|
||||
p
|
||||
case keyGitSha k of
|
||||
|
@ -894,7 +894,7 @@ importKeys remote importtreeconfig importcontent thirdpartypopulated importablec
|
|||
let af = AssociatedFile (Just f)
|
||||
let downloader tmpfile p = do
|
||||
(k, _) <- Remote.retrieveExportWithContentIdentifier
|
||||
ia loc [cid] (fromRawFilePath tmpfile)
|
||||
ia loc [cid] tmpfile
|
||||
(Right (mkkey tmpfile))
|
||||
p
|
||||
case keyGitSha k of
|
||||
|
@ -950,7 +950,7 @@ importKeys remote importtreeconfig importcontent thirdpartypopulated importablec
|
|||
case importtreeconfig of
|
||||
ImportTree -> fromImportLocation loc
|
||||
ImportSubTree subdir _ ->
|
||||
getTopFilePath subdir P.</> fromImportLocation loc
|
||||
getTopFilePath subdir </> fromImportLocation loc
|
||||
|
||||
getcidkey cidmap db cid = liftIO $
|
||||
-- Avoiding querying the database when it's empty speeds up
|
||||
|
@ -1091,7 +1091,7 @@ getImportableContents r importtreeconfig ci matcher = do
|
|||
isknown <||> (matches <&&> notignored)
|
||||
where
|
||||
-- Checks, from least to most expensive.
|
||||
ingitdir = ".git" `elem` Posix.splitDirectories (fromImportLocation loc)
|
||||
ingitdir = ".git" `elem` Posix.splitDirectories (fromOsPath (fromImportLocation loc))
|
||||
matches = matchesImportLocation matcher loc sz
|
||||
isknown = isKnownImportLocation dbhandle loc
|
||||
notignored = notIgnoredImportLocation importtreeconfig ci loc
|
||||
|
@ -1120,6 +1120,6 @@ notIgnoredImportLocation importtreeconfig ci loc = not <$> checkIgnored ci f
|
|||
where
|
||||
f = case importtreeconfig of
|
||||
ImportSubTree dir _ ->
|
||||
getTopFilePath dir P.</> fromImportLocation loc
|
||||
getTopFilePath dir </> fromImportLocation loc
|
||||
ImportTree ->
|
||||
fromImportLocation loc
|
||||
|
|
|
@ -66,7 +66,7 @@ data LockedDown = LockedDown
|
|||
data LockDownConfig = LockDownConfig
|
||||
{ lockingFile :: Bool
|
||||
-- ^ write bit removed during lock down
|
||||
, hardlinkFileTmpDir :: Maybe RawFilePath
|
||||
, hardlinkFileTmpDir :: Maybe OsPath
|
||||
-- ^ hard link to temp directory
|
||||
, checkWritePerms :: Bool
|
||||
-- ^ check that write perms are successfully removed
|
||||
|
@ -87,13 +87,13 @@ data LockDownConfig = LockDownConfig
|
|||
- Lockdown can fail if a file gets deleted, or if it's unable to remove
|
||||
- write permissions, and Nothing will be returned.
|
||||
-}
|
||||
lockDown :: LockDownConfig-> FilePath -> Annex (Maybe LockedDown)
|
||||
lockDown :: LockDownConfig-> OsPath -> Annex (Maybe LockedDown)
|
||||
lockDown cfg file = either
|
||||
(\e -> warning (UnquotedString (show e)) >> return Nothing)
|
||||
(return . Just)
|
||||
=<< lockDown' cfg file
|
||||
|
||||
lockDown' :: LockDownConfig -> FilePath -> Annex (Either SomeException LockedDown)
|
||||
lockDown' :: LockDownConfig -> OsPath -> Annex (Either SomeException LockedDown)
|
||||
lockDown' cfg file = tryNonAsync $ ifM crippledFileSystem
|
||||
( nohardlink
|
||||
, case hardlinkFileTmpDir cfg of
|
||||
|
@ -101,49 +101,46 @@ lockDown' cfg file = tryNonAsync $ ifM crippledFileSystem
|
|||
Just tmpdir -> withhardlink tmpdir
|
||||
)
|
||||
where
|
||||
file' = toRawFilePath file
|
||||
|
||||
nohardlink = do
|
||||
setperms
|
||||
withTSDelta $ liftIO . nohardlink'
|
||||
|
||||
nohardlink' delta = do
|
||||
cache <- genInodeCache file' delta
|
||||
cache <- genInodeCache file delta
|
||||
return $ LockedDown cfg $ KeySource
|
||||
{ keyFilename = file'
|
||||
, contentLocation = file'
|
||||
{ keyFilename = file
|
||||
, contentLocation = file
|
||||
, inodeCache = cache
|
||||
}
|
||||
|
||||
withhardlink tmpdir = do
|
||||
setperms
|
||||
withTSDelta $ \delta -> liftIO $ do
|
||||
(tmpfile, h) <- openTmpFileIn (toOsPath tmpdir) $
|
||||
relatedTemplate $ toRawFilePath $
|
||||
"ingest-" ++ takeFileName file
|
||||
(tmpfile, h) <- openTmpFileIn tmpdir $
|
||||
relatedTemplate $ fromOsPath $
|
||||
literalOsPath "ingest-" <> takeFileName file
|
||||
hClose h
|
||||
let tmpfile' = fromOsPath tmpfile
|
||||
removeWhenExistsWith R.removeLink tmpfile'
|
||||
withhardlink' delta tmpfile'
|
||||
removeWhenExistsWith R.removeLink (fromOsPath tmpfile)
|
||||
withhardlink' delta tmpfile
|
||||
`catchIO` const (nohardlink' delta)
|
||||
|
||||
withhardlink' delta tmpfile = do
|
||||
R.createLink file' tmpfile
|
||||
R.createLink (fromOsPath file) (fromOsPath tmpfile)
|
||||
cache <- genInodeCache tmpfile delta
|
||||
return $ LockedDown cfg $ KeySource
|
||||
{ keyFilename = file'
|
||||
{ keyFilename = file
|
||||
, contentLocation = tmpfile
|
||||
, inodeCache = cache
|
||||
}
|
||||
|
||||
setperms = when (lockingFile cfg) $ do
|
||||
freezeContent file'
|
||||
freezeContent file
|
||||
when (checkWritePerms cfg) $ do
|
||||
qp <- coreQuotePath <$> Annex.getGitConfig
|
||||
maybe noop (giveup . decodeBS . quote qp)
|
||||
=<< checkLockedDownWritePerms file' file'
|
||||
=<< checkLockedDownWritePerms file file
|
||||
|
||||
checkLockedDownWritePerms :: RawFilePath -> RawFilePath -> Annex (Maybe StringContainingQuotedPath)
|
||||
checkLockedDownWritePerms :: OsPath -> OsPath -> Annex (Maybe StringContainingQuotedPath)
|
||||
checkLockedDownWritePerms file displayfile = checkContentWritePerm file >>= return . \case
|
||||
Just False -> Just $ "Unable to remove all write permissions from "
|
||||
<> QuotedPath displayfile
|
||||
|
@ -167,7 +164,8 @@ ingestAdd' meterupdate ld@(Just (LockedDown cfg source)) mk = do
|
|||
then addSymlink f k mic
|
||||
else do
|
||||
mode <- liftIO $ catchMaybeIO $
|
||||
fileMode <$> R.getFileStatus (contentLocation source)
|
||||
fileMode <$> R.getFileStatus
|
||||
(fromOsPath (contentLocation source))
|
||||
stagePointerFile f mode =<< hashPointerFile k
|
||||
return (Just k)
|
||||
|
||||
|
@ -188,7 +186,7 @@ ingest' preferredbackend meterupdate (Just (LockedDown cfg source)) mk restage =
|
|||
fst <$> genKey source meterupdate backend
|
||||
Just k -> return k
|
||||
let src = contentLocation source
|
||||
ms <- liftIO $ catchMaybeIO $ R.getFileStatus src
|
||||
ms <- liftIO $ catchMaybeIO $ R.getFileStatus (fromOsPath src)
|
||||
mcache <- maybe (pure Nothing) (liftIO . toInodeCache delta src) ms
|
||||
case (mcache, inodeCache source) of
|
||||
(_, Nothing) -> go k mcache
|
||||
|
@ -263,12 +261,12 @@ populateUnlockedFiles key source restage _ = do
|
|||
|
||||
cleanCruft :: KeySource -> Annex ()
|
||||
cleanCruft source = when (contentLocation source /= keyFilename source) $
|
||||
liftIO $ removeWhenExistsWith R.removeLink $ contentLocation source
|
||||
liftIO $ removeWhenExistsWith removeFile $ contentLocation source
|
||||
|
||||
-- If a worktree file was was hard linked to an annex object before,
|
||||
-- modifying the file would have caused the object to have the wrong
|
||||
-- content. Clean up from that.
|
||||
cleanOldKeys :: RawFilePath -> Key -> Annex ()
|
||||
cleanOldKeys :: OsPath -> Key -> Annex ()
|
||||
cleanOldKeys file newkey = do
|
||||
g <- Annex.gitRepo
|
||||
topf <- inRepo (toTopFilePath file)
|
||||
|
@ -293,37 +291,38 @@ cleanOldKeys file newkey = do
|
|||
|
||||
{- On error, put the file back so it doesn't seem to have vanished.
|
||||
- This can be called before or after the symlink is in place. -}
|
||||
restoreFile :: RawFilePath -> Key -> SomeException -> Annex a
|
||||
restoreFile :: OsPath -> Key -> SomeException -> Annex a
|
||||
restoreFile file key e = do
|
||||
whenM (inAnnex key) $ do
|
||||
liftIO $ removeWhenExistsWith R.removeLink file
|
||||
liftIO $ removeWhenExistsWith removeFile file
|
||||
-- The key could be used by other files too, so leave the
|
||||
-- content in the annex, and make a copy back to the file.
|
||||
obj <- fromRawFilePath <$> calcRepo (gitAnnexLocation key)
|
||||
unlessM (liftIO $ copyFileExternal CopyTimeStamps obj (fromRawFilePath file)) $
|
||||
warning $ "Unable to restore content of " <> QuotedPath file <> "; it should be located in " <> QuotedPath (toRawFilePath obj)
|
||||
obj <- calcRepo (gitAnnexLocation key)
|
||||
unlessM (liftIO $ copyFileExternal CopyTimeStamps obj file) $
|
||||
warning $ "Unable to restore content of " <> QuotedPath file <> "; it should be located in " <> QuotedPath obj
|
||||
thawContent file
|
||||
throwM e
|
||||
|
||||
{- Creates the symlink to the annexed content, returns the link target. -}
|
||||
makeLink :: RawFilePath -> Key -> Maybe InodeCache -> Annex LinkTarget
|
||||
makeLink :: OsPath -> Key -> Maybe InodeCache -> Annex LinkTarget
|
||||
makeLink file key mcache = flip catchNonAsync (restoreFile file key) $ do
|
||||
l <- calcRepo $ gitAnnexLink file key
|
||||
l <- fromOsPath <$> calcRepo (gitAnnexLink file key)
|
||||
replaceWorkTreeFile file $ makeAnnexLink l
|
||||
|
||||
-- touch symlink to have same time as the original file,
|
||||
-- as provided in the InodeCache
|
||||
case mcache of
|
||||
Just c -> liftIO $ touch file (inodeCacheToMtime c) False
|
||||
Just c -> liftIO $
|
||||
touch (fromOsPath file) (inodeCacheToMtime c) False
|
||||
Nothing -> noop
|
||||
|
||||
return l
|
||||
|
||||
{- Creates the symlink to the annexed content, and stages it in git. -}
|
||||
addSymlink :: RawFilePath -> Key -> Maybe InodeCache -> Annex ()
|
||||
addSymlink :: OsPath -> Key -> Maybe InodeCache -> Annex ()
|
||||
addSymlink file key mcache = stageSymlink file =<< genSymlink file key mcache
|
||||
|
||||
genSymlink :: RawFilePath -> Key -> Maybe InodeCache -> Annex Git.Sha
|
||||
genSymlink :: OsPath -> Key -> Maybe InodeCache -> Annex Git.Sha
|
||||
genSymlink file key mcache = do
|
||||
linktarget <- makeLink file key mcache
|
||||
hashSymlink linktarget
|
||||
|
@ -368,12 +367,12 @@ addUnlocked matcher mi contentpresent =
|
|||
-
|
||||
- When the content of the key is not accepted into the annex, returns False.
|
||||
-}
|
||||
addAnnexedFile :: AddUnlockedMatcher -> RawFilePath -> Key -> Maybe RawFilePath -> Annex Bool
|
||||
addAnnexedFile :: AddUnlockedMatcher -> OsPath -> Key -> Maybe OsPath -> Annex Bool
|
||||
addAnnexedFile matcher file key mtmp = ifM (addUnlocked matcher mi (isJust mtmp))
|
||||
( do
|
||||
mode <- maybe
|
||||
(pure Nothing)
|
||||
(\tmp -> liftIO $ catchMaybeIO $ fileMode <$> R.getFileStatus tmp)
|
||||
(\tmp -> liftIO $ catchMaybeIO $ fileMode <$> R.getFileStatus (fromOsPath tmp))
|
||||
mtmp
|
||||
stagePointerFile file mode =<< hashPointerFile key
|
||||
Database.Keys.addAssociatedFile key =<< inRepo (toTopFilePath file)
|
||||
|
@ -411,7 +410,7 @@ addAnnexedFile matcher file key mtmp = ifM (addUnlocked matcher mi (isJust mtmp)
|
|||
{- Use with actions that add an already existing annex symlink or pointer
|
||||
- file. The warning avoids a confusing situation where the file got copied
|
||||
- from another git-annex repo, probably by accident. -}
|
||||
addingExistingLink :: RawFilePath -> Key -> Annex a -> Annex a
|
||||
addingExistingLink :: OsPath -> Key -> Annex a -> Annex a
|
||||
addingExistingLink f k a = do
|
||||
unlessM (isKnownKey k <||> inAnnex k) $ do
|
||||
islink <- isJust <$> isAnnexLink f
|
||||
|
|
|
@ -56,6 +56,7 @@ import Annex.Perms
|
|||
#ifndef mingw32_HOST_OS
|
||||
import Utility.ThreadScheduler
|
||||
import qualified Utility.RawFilePath as R
|
||||
import qualified Utility.FileIO as F
|
||||
import Utility.FileMode
|
||||
import System.Posix.User
|
||||
import qualified Utility.LockFile.Posix as Posix
|
||||
|
@ -66,7 +67,6 @@ import Control.Monad.IO.Class (MonadIO)
|
|||
#ifndef mingw32_HOST_OS
|
||||
import System.PosixCompat.Files (ownerReadMode, isNamedPipe)
|
||||
import Data.Either
|
||||
import qualified System.FilePath.ByteString as P
|
||||
import Control.Concurrent.Async
|
||||
#endif
|
||||
|
||||
|
@ -99,21 +99,20 @@ initializeAllowed = noAnnexFileContent' >>= \case
|
|||
Just _ -> return False
|
||||
|
||||
noAnnexFileContent' :: Annex (Maybe String)
|
||||
noAnnexFileContent' = inRepo $
|
||||
noAnnexFileContent . fmap fromRawFilePath . Git.repoWorkTree
|
||||
noAnnexFileContent' = inRepo $ noAnnexFileContent . Git.repoWorkTree
|
||||
|
||||
genDescription :: Maybe String -> Annex UUIDDesc
|
||||
genDescription (Just d) = return $ UUIDDesc $ encodeBS d
|
||||
genDescription Nothing = do
|
||||
reldir <- liftIO . relHome . fromRawFilePath
|
||||
reldir <- liftIO . relHome
|
||||
=<< liftIO . absPath
|
||||
=<< fromRepo Git.repoPath
|
||||
hostname <- fromMaybe "" <$> liftIO getHostname
|
||||
let at = if null hostname then "" else "@"
|
||||
v <- liftIO myUserName
|
||||
return $ UUIDDesc $ encodeBS $ concat $ case v of
|
||||
Right username -> [username, at, hostname, ":", reldir]
|
||||
Left _ -> [hostname, ":", reldir]
|
||||
Right username -> [username, at, hostname, ":", fromOsPath reldir]
|
||||
Left _ -> [hostname, ":", fromOsPath reldir]
|
||||
|
||||
initialize :: Annex () -> Maybe String -> Maybe RepoVersion -> Annex ()
|
||||
initialize startupannex mdescription mversion = checkInitializeAllowed $ \initallowed -> do
|
||||
|
@ -238,12 +237,12 @@ autoInitializeAllowed = Annex.Branch.hasSibling <&&> objectDirNotPresent
|
|||
|
||||
objectDirNotPresent :: Annex Bool
|
||||
objectDirNotPresent = do
|
||||
d <- fromRawFilePath <$> fromRepo gitAnnexObjectDir
|
||||
d <- fromRepo gitAnnexObjectDir
|
||||
exists <- liftIO $ doesDirectoryExist d
|
||||
when exists $ guardSafeToUseRepo $
|
||||
giveup $ unwords $
|
||||
[ "This repository is not initialized for use"
|
||||
, "by git-annex, but " ++ d ++ " exists,"
|
||||
, "by git-annex, but " ++ fromOsPath d ++ " exists,"
|
||||
, "which indicates this repository was used by"
|
||||
, "git-annex before, and may have lost its"
|
||||
, "annex.uuid and annex.version configs. Either"
|
||||
|
@ -263,7 +262,7 @@ guardSafeToUseRepo a = ifM (inRepo Git.Config.checkRepoConfigInaccessible)
|
|||
, ""
|
||||
-- This mirrors git's wording.
|
||||
, "To add an exception for this directory, call:"
|
||||
, "\tgit config --global --add safe.directory " ++ fromRawFilePath p
|
||||
, "\tgit config --global --add safe.directory " ++ fromOsPath p
|
||||
]
|
||||
, a
|
||||
)
|
||||
|
@ -301,40 +300,39 @@ probeCrippledFileSystem = withEventuallyCleanedOtherTmp $ \tmp -> do
|
|||
|
||||
probeCrippledFileSystem'
|
||||
:: (MonadIO m, MonadCatch m)
|
||||
=> RawFilePath
|
||||
-> Maybe (RawFilePath -> m ())
|
||||
-> Maybe (RawFilePath -> m ())
|
||||
=> OsPath
|
||||
-> Maybe (OsPath -> m ())
|
||||
-> Maybe (OsPath -> m ())
|
||||
-> Bool
|
||||
-> m (Bool, [String])
|
||||
#ifdef mingw32_HOST_OS
|
||||
probeCrippledFileSystem' _ _ _ _ = return (True, [])
|
||||
#else
|
||||
probeCrippledFileSystem' tmp freezecontent thawcontent hasfreezehook = do
|
||||
let f = tmp P.</> "gaprobe"
|
||||
let f' = fromRawFilePath f
|
||||
liftIO $ writeFile f' ""
|
||||
r <- probe f'
|
||||
let f = tmp </> literalOsPath "gaprobe"
|
||||
liftIO $ F.writeFile' f ""
|
||||
r <- probe f
|
||||
void $ tryNonAsync $ (fromMaybe (liftIO . allowWrite) thawcontent) f
|
||||
liftIO $ removeFile f'
|
||||
liftIO $ removeFile f
|
||||
return r
|
||||
where
|
||||
probe f = catchDefaultIO (True, []) $ do
|
||||
let f2 = f ++ "2"
|
||||
liftIO $ removeWhenExistsWith R.removeLink (toRawFilePath f2)
|
||||
liftIO $ R.createSymbolicLink (toRawFilePath f) (toRawFilePath f2)
|
||||
liftIO $ removeWhenExistsWith R.removeLink (toRawFilePath f2)
|
||||
(fromMaybe (liftIO . preventWrite) freezecontent) (toRawFilePath f)
|
||||
let f2 = f <> literalOsPath "2"
|
||||
liftIO $ removeWhenExistsWith removeFile f2
|
||||
liftIO $ R.createSymbolicLink (fromOsPath f) (fromOsPath f2)
|
||||
liftIO $ removeWhenExistsWith removeFile f2
|
||||
(fromMaybe (liftIO . preventWrite) freezecontent) f
|
||||
-- Should be unable to write to the file (unless
|
||||
-- running as root). But some crippled
|
||||
-- filesystems ignore write bit removals or ignore
|
||||
-- permissions entirely.
|
||||
ifM ((== Just False) <$> liftIO (checkContentWritePerm' UnShared (toRawFilePath f) Nothing hasfreezehook))
|
||||
ifM ((== Just False) <$> liftIO (checkContentWritePerm' UnShared f Nothing hasfreezehook))
|
||||
( return (True, ["Filesystem does not allow removing write bit from files."])
|
||||
, liftIO $ ifM ((== 0) <$> getRealUserID)
|
||||
( return (False, [])
|
||||
, do
|
||||
r <- catchBoolIO $ do
|
||||
writeFile f "2"
|
||||
F.writeFile' f "2"
|
||||
return True
|
||||
if r
|
||||
then return (True, ["Filesystem allows writing to files whose write bit is not set."])
|
||||
|
@ -363,19 +361,19 @@ probeLockSupport :: Annex Bool
|
|||
probeLockSupport = return True
|
||||
#else
|
||||
probeLockSupport = withEventuallyCleanedOtherTmp $ \tmp -> do
|
||||
let f = tmp P.</> "lockprobe"
|
||||
let f = tmp </> literalOsPath "lockprobe"
|
||||
mode <- annexFileMode
|
||||
annexrunner <- Annex.makeRunner
|
||||
liftIO $ withAsync (warnstall annexrunner) (const (go f mode))
|
||||
where
|
||||
go f mode = do
|
||||
removeWhenExistsWith R.removeLink f
|
||||
removeWhenExistsWith removeFile f
|
||||
let locktest = bracket
|
||||
(Posix.lockExclusive (Just mode) f)
|
||||
Posix.dropLock
|
||||
(const noop)
|
||||
ok <- isRight <$> tryNonAsync locktest
|
||||
removeWhenExistsWith R.removeLink f
|
||||
removeWhenExistsWith removeFile f
|
||||
return ok
|
||||
|
||||
warnstall annexrunner = do
|
||||
|
@ -391,17 +389,17 @@ probeFifoSupport = do
|
|||
return False
|
||||
#else
|
||||
withEventuallyCleanedOtherTmp $ \tmp -> do
|
||||
let f = tmp P.</> "gaprobe"
|
||||
let f2 = tmp P.</> "gaprobe2"
|
||||
let f = tmp </> literalOsPath "gaprobe"
|
||||
let f2 = tmp </> literalOsPath "gaprobe2"
|
||||
liftIO $ do
|
||||
removeWhenExistsWith R.removeLink f
|
||||
removeWhenExistsWith R.removeLink f2
|
||||
removeWhenExistsWith removeFile f
|
||||
removeWhenExistsWith removeFile f2
|
||||
ms <- tryIO $ do
|
||||
R.createNamedPipe f ownerReadMode
|
||||
R.createLink f f2
|
||||
R.getFileStatus f
|
||||
removeWhenExistsWith R.removeLink f
|
||||
removeWhenExistsWith R.removeLink f2
|
||||
R.createNamedPipe (fromOsPath f) ownerReadMode
|
||||
R.createLink (fromOsPath f) (fromOsPath f2)
|
||||
R.getFileStatus (fromOsPath f)
|
||||
removeWhenExistsWith removeFile f
|
||||
removeWhenExistsWith removeFile f2
|
||||
return $ either (const False) isNamedPipe ms
|
||||
#endif
|
||||
|
||||
|
@ -473,14 +471,14 @@ autoEnableSpecialRemotes remotelist = do
|
|||
-- could result in password prompts for http credentials,
|
||||
-- which would then not end up cached in this process's state.
|
||||
_ <- remotelist
|
||||
rp <- fromRawFilePath <$> fromRepo Git.repoPath
|
||||
rp <- fromRepo Git.repoPath
|
||||
withNullHandle $ \nullh -> gitAnnexChildProcess "init"
|
||||
[ Param "--autoenable" ]
|
||||
(\p -> p
|
||||
{ std_out = UseHandle nullh
|
||||
, std_err = UseHandle nullh
|
||||
, std_in = UseHandle nullh
|
||||
, cwd = Just rp
|
||||
, cwd = Just (fromOsPath rp)
|
||||
}
|
||||
)
|
||||
(\_ _ _ pid -> void $ waitForProcess pid)
|
||||
|
|
|
@ -30,22 +30,22 @@ compareInodeCachesWith = ifM inodesChanged ( return Weakly, return Strongly )
|
|||
|
||||
{- Checks if one of the provided old InodeCache matches the current
|
||||
- version of a file. -}
|
||||
sameInodeCache :: RawFilePath -> [InodeCache] -> Annex Bool
|
||||
sameInodeCache :: OsPath -> [InodeCache] -> Annex Bool
|
||||
sameInodeCache file [] = do
|
||||
fastDebug "Annex.InodeSentinal" $
|
||||
fromRawFilePath file ++ " inode cache empty"
|
||||
fromOsPath file ++ " inode cache empty"
|
||||
return False
|
||||
sameInodeCache file old = go =<< withTSDelta (liftIO . genInodeCache file)
|
||||
where
|
||||
go Nothing = do
|
||||
fastDebug "Annex.InodeSentinal" $
|
||||
fromRawFilePath file ++ " not present, cannot compare with inode cache"
|
||||
fromOsPath file ++ " not present, cannot compare with inode cache"
|
||||
return False
|
||||
go (Just curr) = ifM (elemInodeCaches curr old)
|
||||
( return True
|
||||
, do
|
||||
fastDebug "Annex.InodeSentinal" $
|
||||
fromRawFilePath file ++ " (" ++ show curr ++ ") does not match inode cache (" ++ show old ++ ")"
|
||||
fromOsPath file ++ " (" ++ show curr ++ ") does not match inode cache (" ++ show old ++ ")"
|
||||
return False
|
||||
)
|
||||
|
||||
|
@ -99,7 +99,7 @@ createInodeSentinalFile evenwithobjects =
|
|||
alreadyexists = liftIO. sentinalFileExists =<< annexSentinalFile
|
||||
hasobjects
|
||||
| evenwithobjects = pure False
|
||||
| otherwise = liftIO . doesDirectoryExist . fromRawFilePath
|
||||
| otherwise = liftIO . doesDirectoryExist
|
||||
=<< fromRepo gitAnnexObjectDir
|
||||
|
||||
annexSentinalFile :: Annex SentinalFile
|
||||
|
|
|
@ -26,13 +26,12 @@ import Annex.LockFile
|
|||
import Annex.BranchState
|
||||
import Types.BranchState
|
||||
import Utility.Directory.Stream
|
||||
import qualified Utility.RawFilePath as R
|
||||
import qualified Utility.FileIO as F
|
||||
import qualified Utility.OsString as OS
|
||||
|
||||
import qualified Data.Set as S
|
||||
import qualified Data.ByteString.Lazy as L
|
||||
import qualified Data.ByteString as B
|
||||
import qualified System.FilePath.ByteString as P
|
||||
import Data.ByteString.Builder
|
||||
import Data.Char
|
||||
|
||||
|
@ -83,7 +82,7 @@ privateUUIDsKnown' = not . S.null . annexPrivateRepos . Annex.gitconfig
|
|||
- interrupted write truncating information that was earlier read from the
|
||||
- file, and so losing data.
|
||||
-}
|
||||
setJournalFile :: Journalable content => JournalLocked -> RegardingUUID -> RawFilePath -> content -> Annex ()
|
||||
setJournalFile :: Journalable content => JournalLocked -> RegardingUUID -> OsPath -> content -> Annex ()
|
||||
setJournalFile _jl ru file content = withOtherTmp $ \tmp -> do
|
||||
st <- getState
|
||||
jd <- fromRepo =<< ifM (regardingPrivateUUID ru)
|
||||
|
@ -92,10 +91,10 @@ setJournalFile _jl ru file content = withOtherTmp $ \tmp -> do
|
|||
)
|
||||
-- journal file is written atomically
|
||||
let jfile = journalFile file
|
||||
let tmpfile = tmp P.</> jfile
|
||||
liftIO $ F.withFile (toOsPath tmpfile) WriteMode $ \h ->
|
||||
let tmpfile = tmp </> jfile
|
||||
liftIO $ F.withFile tmpfile WriteMode $ \h ->
|
||||
writeJournalHandle h content
|
||||
let dest = jd P.</> jfile
|
||||
let dest = jd </> jfile
|
||||
let mv = do
|
||||
liftIO $ moveFile tmpfile dest
|
||||
setAnnexFilePerm dest
|
||||
|
@ -103,20 +102,20 @@ setJournalFile _jl ru file content = withOtherTmp $ \tmp -> do
|
|||
-- exists
|
||||
mv `catchIO` (const (createAnnexDirectory jd >> mv))
|
||||
|
||||
newtype AppendableJournalFile = AppendableJournalFile (RawFilePath, RawFilePath)
|
||||
newtype AppendableJournalFile = AppendableJournalFile (OsPath, OsPath)
|
||||
|
||||
{- If the journal file does not exist, it cannot be appended to, because
|
||||
- that would overwrite whatever content the file has in the git-annex
|
||||
- branch. -}
|
||||
checkCanAppendJournalFile :: JournalLocked -> RegardingUUID -> RawFilePath -> Annex (Maybe AppendableJournalFile)
|
||||
checkCanAppendJournalFile :: JournalLocked -> RegardingUUID -> OsPath -> Annex (Maybe AppendableJournalFile)
|
||||
checkCanAppendJournalFile _jl ru file = do
|
||||
st <- getState
|
||||
jd <- fromRepo =<< ifM (regardingPrivateUUID ru)
|
||||
( return (gitAnnexPrivateJournalDir st)
|
||||
, return (gitAnnexJournalDir st)
|
||||
)
|
||||
let jfile = jd P.</> journalFile file
|
||||
ifM (liftIO $ R.doesPathExist jfile)
|
||||
let jfile = jd </> journalFile file
|
||||
ifM (liftIO $ doesFileExist jfile)
|
||||
( return (Just (AppendableJournalFile (jd, jfile)))
|
||||
, return Nothing
|
||||
)
|
||||
|
@ -134,7 +133,7 @@ checkCanAppendJournalFile _jl ru file = do
|
|||
-}
|
||||
appendJournalFile :: Journalable content => JournalLocked -> AppendableJournalFile -> content -> Annex ()
|
||||
appendJournalFile _jl (AppendableJournalFile (jd, jfile)) content = do
|
||||
let write = liftIO $ F.withFile (toOsPath jfile) ReadWriteMode $ \h -> do
|
||||
let write = liftIO $ F.withFile jfile ReadWriteMode $ \h -> do
|
||||
sz <- hFileSize h
|
||||
when (sz /= 0) $ do
|
||||
hSeek h SeekFromEnd (-1)
|
||||
|
@ -161,7 +160,7 @@ data JournalledContent
|
|||
-- information that were made after that journal file was written.
|
||||
|
||||
{- Gets any journalled content for a file in the branch. -}
|
||||
getJournalFile :: JournalLocked -> GetPrivate -> RawFilePath -> Annex JournalledContent
|
||||
getJournalFile :: JournalLocked -> GetPrivate -> OsPath -> Annex JournalledContent
|
||||
getJournalFile _jl = getJournalFileStale
|
||||
|
||||
data GetPrivate = GetPrivate Bool
|
||||
|
@ -179,7 +178,7 @@ data GetPrivate = GetPrivate Bool
|
|||
- (or is in progress when this is called), if the file content does not end
|
||||
- with a newline, it is truncated back to the previous newline.
|
||||
-}
|
||||
getJournalFileStale :: GetPrivate -> RawFilePath -> Annex JournalledContent
|
||||
getJournalFileStale :: GetPrivate -> OsPath -> Annex JournalledContent
|
||||
getJournalFileStale (GetPrivate getprivate) file = do
|
||||
st <- Annex.getState id
|
||||
let repo = Annex.repo st
|
||||
|
@ -205,7 +204,7 @@ getJournalFileStale (GetPrivate getprivate) file = do
|
|||
jfile = journalFile file
|
||||
getfrom d = catchMaybeIO $
|
||||
discardIncompleteAppend . L.fromStrict
|
||||
<$> F.readFile' (toOsPath (d P.</> jfile))
|
||||
<$> F.readFile' (d </> jfile)
|
||||
|
||||
-- Note that this forces read of the whole lazy bytestring.
|
||||
discardIncompleteAppend :: L.ByteString -> L.ByteString
|
||||
|
@ -224,18 +223,18 @@ discardIncompleteAppend v
|
|||
{- List of existing journal files in a journal directory, but without locking,
|
||||
- may miss new ones just being added, or may have false positives if the
|
||||
- journal is staged as it is run. -}
|
||||
getJournalledFilesStale :: (BranchState -> Git.Repo -> RawFilePath) -> Annex [RawFilePath]
|
||||
getJournalledFilesStale :: (BranchState -> Git.Repo -> OsPath) -> Annex [OsPath]
|
||||
getJournalledFilesStale getjournaldir = do
|
||||
bs <- getState
|
||||
repo <- Annex.gitRepo
|
||||
let d = getjournaldir bs repo
|
||||
fs <- liftIO $ catchDefaultIO [] $
|
||||
getDirectoryContents (fromRawFilePath d)
|
||||
return $ filter (`notElem` [".", ".."]) $
|
||||
map (fileJournal . toRawFilePath) fs
|
||||
getDirectoryContents d
|
||||
return $ filter (`notElem` dirCruft) $
|
||||
map fileJournal fs
|
||||
|
||||
{- Directory handle open on a journal directory. -}
|
||||
withJournalHandle :: (BranchState -> Git.Repo -> RawFilePath) -> (DirectoryHandle -> IO a) -> Annex a
|
||||
withJournalHandle :: (BranchState -> Git.Repo -> OsPath) -> (DirectoryHandle -> IO a) -> Annex a
|
||||
withJournalHandle getjournaldir a = do
|
||||
bs <- getState
|
||||
repo <- Annex.gitRepo
|
||||
|
@ -244,15 +243,15 @@ withJournalHandle getjournaldir a = do
|
|||
where
|
||||
-- avoid overhead of creating the journal directory when it already
|
||||
-- exists
|
||||
opendir d = liftIO (openDirectory d)
|
||||
opendir d = liftIO (openDirectory (fromOsPath d))
|
||||
`catchIO` (const (createAnnexDirectory d >> opendir d))
|
||||
|
||||
{- Checks if there are changes in the journal. -}
|
||||
journalDirty :: (BranchState -> Git.Repo -> RawFilePath) -> Annex Bool
|
||||
journalDirty :: (BranchState -> Git.Repo -> OsPath) -> Annex Bool
|
||||
journalDirty getjournaldir = do
|
||||
st <- getState
|
||||
d <- fromRepo (getjournaldir st)
|
||||
liftIO $ isDirectoryPopulated d
|
||||
liftIO $ isDirectoryPopulated (fromOsPath d)
|
||||
|
||||
{- Produces a filename to use in the journal for a file on the branch.
|
||||
- The filename does not include the journal directory.
|
||||
|
@ -261,33 +260,33 @@ journalDirty getjournaldir = do
|
|||
- used in the branch is not necessary, and all the files are put directly
|
||||
- in the journal directory.
|
||||
-}
|
||||
journalFile :: RawFilePath -> RawFilePath
|
||||
journalFile file = B.concatMap mangle file
|
||||
journalFile :: OsPath -> OsPath
|
||||
journalFile file = OS.concat $ map mangle $ OS.unpack file
|
||||
where
|
||||
mangle c
|
||||
| P.isPathSeparator c = B.singleton underscore
|
||||
| c == underscore = B.pack [underscore, underscore]
|
||||
| otherwise = B.singleton c
|
||||
underscore = fromIntegral (ord '_')
|
||||
| isPathSeparator c = OS.singleton underscore
|
||||
| c == underscore = OS.pack [underscore, underscore]
|
||||
| otherwise = OS.singleton c
|
||||
underscore = unsafeFromChar '_'
|
||||
|
||||
{- Converts a journal file (relative to the journal dir) back to the
|
||||
- filename on the branch. -}
|
||||
fileJournal :: RawFilePath -> RawFilePath
|
||||
fileJournal :: OsPath -> OsPath
|
||||
fileJournal = go
|
||||
where
|
||||
go b =
|
||||
let (h, t) = B.break (== underscore) b
|
||||
in h <> case B.uncons t of
|
||||
let (h, t) = OS.break (== underscore) b
|
||||
in h <> case OS.uncons t of
|
||||
Nothing -> t
|
||||
Just (_u, t') -> case B.uncons t' of
|
||||
Just (_u, t') -> case OS.uncons t' of
|
||||
Nothing -> t'
|
||||
Just (w, t'')
|
||||
| w == underscore ->
|
||||
B.cons underscore (go t'')
|
||||
OS.cons underscore (go t'')
|
||||
| otherwise ->
|
||||
B.cons P.pathSeparator (go t')
|
||||
OS.cons pathSeparator (go t')
|
||||
|
||||
underscore = fromIntegral (ord '_')
|
||||
underscore = unsafeFromChar '_'
|
||||
|
||||
{- Sentinal value, only produced by lockJournal; required
|
||||
- as a parameter by things that need to ensure the journal is
|
||||
|
|
|
@ -39,11 +39,11 @@ import Utility.CopyFile
|
|||
import qualified Database.Keys.Handle
|
||||
import qualified Utility.RawFilePath as R
|
||||
import qualified Utility.FileIO as F
|
||||
import qualified Utility.OsString as OS
|
||||
|
||||
import qualified Data.ByteString as S
|
||||
import qualified Data.ByteString.Char8 as S8
|
||||
import qualified Data.ByteString.Lazy as L
|
||||
import qualified System.FilePath.ByteString as P
|
||||
#ifndef mingw32_HOST_OS
|
||||
#if MIN_VERSION_unix(2,8,0)
|
||||
#else
|
||||
|
@ -54,7 +54,7 @@ import System.PosixCompat.Files (isSymbolicLink)
|
|||
type LinkTarget = S.ByteString
|
||||
|
||||
{- Checks if a file is a link to a key. -}
|
||||
isAnnexLink :: RawFilePath -> Annex (Maybe Key)
|
||||
isAnnexLink :: OsPath -> Annex (Maybe Key)
|
||||
isAnnexLink file = maybe Nothing parseLinkTargetOrPointer <$> getAnnexLinkTarget file
|
||||
|
||||
{- Gets the link target of a symlink.
|
||||
|
@ -65,13 +65,13 @@ isAnnexLink file = maybe Nothing parseLinkTargetOrPointer <$> getAnnexLinkTarget
|
|||
- Returns Nothing if the file is not a symlink, or not a link to annex
|
||||
- content.
|
||||
-}
|
||||
getAnnexLinkTarget :: RawFilePath -> Annex (Maybe LinkTarget)
|
||||
getAnnexLinkTarget :: OsPath -> Annex (Maybe LinkTarget)
|
||||
getAnnexLinkTarget f = getAnnexLinkTarget' f
|
||||
=<< (coreSymlinks <$> Annex.getGitConfig)
|
||||
|
||||
{- Pass False to force looking inside file, for when git checks out
|
||||
- symlinks as plain files. -}
|
||||
getAnnexLinkTarget' :: RawFilePath -> Bool -> Annex (Maybe S.ByteString)
|
||||
getAnnexLinkTarget' :: OsPath -> Bool -> Annex (Maybe LinkTarget)
|
||||
getAnnexLinkTarget' file coresymlinks = if coresymlinks
|
||||
then check probesymlink $
|
||||
return Nothing
|
||||
|
@ -86,9 +86,9 @@ getAnnexLinkTarget' file coresymlinks = if coresymlinks
|
|||
| otherwise -> return Nothing
|
||||
Nothing -> fallback
|
||||
|
||||
probesymlink = R.readSymbolicLink file
|
||||
probesymlink = R.readSymbolicLink (fromOsPath file)
|
||||
|
||||
probefilecontent = F.withFile (toOsPath file) ReadMode $ \h -> do
|
||||
probefilecontent = F.withFile file ReadMode $ \h -> do
|
||||
s <- S.hGet h maxSymlinkSz
|
||||
-- If we got the full amount, the file is too large
|
||||
-- to be a symlink target.
|
||||
|
@ -103,7 +103,7 @@ getAnnexLinkTarget' file coresymlinks = if coresymlinks
|
|||
then mempty
|
||||
else s
|
||||
|
||||
makeAnnexLink :: LinkTarget -> RawFilePath -> Annex ()
|
||||
makeAnnexLink :: LinkTarget -> OsPath -> Annex ()
|
||||
makeAnnexLink = makeGitLink
|
||||
|
||||
{- Creates a link on disk.
|
||||
|
@ -113,26 +113,31 @@ makeAnnexLink = makeGitLink
|
|||
- it's staged as such, so use addAnnexLink when adding a new file or
|
||||
- modified link to git.
|
||||
-}
|
||||
makeGitLink :: LinkTarget -> RawFilePath -> Annex ()
|
||||
makeGitLink :: LinkTarget -> OsPath -> Annex ()
|
||||
makeGitLink linktarget file = ifM (coreSymlinks <$> Annex.getGitConfig)
|
||||
( liftIO $ do
|
||||
void $ tryIO $ R.removeLink file
|
||||
R.createSymbolicLink linktarget file
|
||||
, liftIO $ F.writeFile' (toOsPath file) linktarget
|
||||
void $ tryIO $ R.removeLink file'
|
||||
R.createSymbolicLink linktarget file'
|
||||
, liftIO $ F.writeFile' file linktarget
|
||||
)
|
||||
where
|
||||
file' = fromOsPath file
|
||||
|
||||
{- Creates a link on disk, and additionally stages it in git. -}
|
||||
addAnnexLink :: LinkTarget -> RawFilePath -> Annex ()
|
||||
addAnnexLink :: LinkTarget -> OsPath -> Annex ()
|
||||
addAnnexLink linktarget file = do
|
||||
makeAnnexLink linktarget file
|
||||
stageSymlink file =<< hashSymlink linktarget
|
||||
|
||||
{- Injects a symlink target into git, returning its Sha. -}
|
||||
hashSymlink :: LinkTarget -> Annex Sha
|
||||
hashSymlink = hashBlob . toInternalGitPath
|
||||
hashSymlink = go . fromOsPath . toInternalGitPath . toOsPath
|
||||
where
|
||||
go :: LinkTarget -> Annex Sha
|
||||
go = hashBlob
|
||||
|
||||
{- Stages a symlink to an annexed object, using a Sha of its target. -}
|
||||
stageSymlink :: RawFilePath -> Sha -> Annex ()
|
||||
stageSymlink :: OsPath -> Sha -> Annex ()
|
||||
stageSymlink file sha =
|
||||
Annex.Queue.addUpdateIndex =<<
|
||||
inRepo (Git.UpdateIndex.stageSymlink file sha)
|
||||
|
@ -142,7 +147,7 @@ hashPointerFile :: Key -> Annex Sha
|
|||
hashPointerFile key = hashBlob $ formatPointer key
|
||||
|
||||
{- Stages a pointer file, using a Sha of its content -}
|
||||
stagePointerFile :: RawFilePath -> Maybe FileMode -> Sha -> Annex ()
|
||||
stagePointerFile :: OsPath -> Maybe FileMode -> Sha -> Annex ()
|
||||
stagePointerFile file mode sha =
|
||||
Annex.Queue.addUpdateIndex =<<
|
||||
inRepo (Git.UpdateIndex.stageFile sha treeitemtype file)
|
||||
|
@ -151,10 +156,10 @@ stagePointerFile file mode sha =
|
|||
| maybe False isExecutable mode = TreeExecutable
|
||||
| otherwise = TreeFile
|
||||
|
||||
writePointerFile :: RawFilePath -> Key -> Maybe FileMode -> IO ()
|
||||
writePointerFile :: OsPath -> Key -> Maybe FileMode -> IO ()
|
||||
writePointerFile file k mode = do
|
||||
F.writeFile' (toOsPath file) (formatPointer k)
|
||||
maybe noop (R.setFileMode file) mode
|
||||
F.writeFile' file (formatPointer k)
|
||||
maybe noop (R.setFileMode (fromOsPath file)) mode
|
||||
|
||||
newtype Restage = Restage Bool
|
||||
|
||||
|
@ -187,7 +192,7 @@ newtype Restage = Restage Bool
|
|||
- if the process is interrupted before the git queue is fulushed, the
|
||||
- restage will be taken care of later.
|
||||
-}
|
||||
restagePointerFile :: Restage -> RawFilePath -> InodeCache -> Annex ()
|
||||
restagePointerFile :: Restage -> OsPath -> InodeCache -> Annex ()
|
||||
restagePointerFile (Restage False) f orig = do
|
||||
flip writeRestageLog orig =<< inRepo (toTopFilePath f)
|
||||
toplevelWarning True $ unableToRestage $ Just f
|
||||
|
@ -225,17 +230,18 @@ restagePointerFiles r = unlessM (Annex.getState Annex.insmudgecleanfilter) $ do
|
|||
=<< Annex.getRead Annex.keysdbhandle
|
||||
realindex <- liftIO $ Git.Index.currentIndexFile r
|
||||
numsz@(numfiles, _) <- calcnumsz
|
||||
let lock = fromRawFilePath (Git.Index.indexFileLock realindex)
|
||||
let lock = Git.Index.indexFileLock realindex
|
||||
lockindex = liftIO $ catchMaybeIO $ Git.LockFile.openLock' lock
|
||||
unlockindex = liftIO . maybe noop Git.LockFile.closeLock
|
||||
showwarning = warning $ unableToRestage Nothing
|
||||
go Nothing = showwarning
|
||||
go (Just _) = withtmpdir $ \tmpdir -> do
|
||||
tsd <- getTSDelta
|
||||
let tmpindex = toRawFilePath (tmpdir </> "index")
|
||||
let tmpindex = tmpdir </> literalOsPath "index"
|
||||
let replaceindex = liftIO $ moveFile tmpindex realindex
|
||||
let updatetmpindex = do
|
||||
r' <- liftIO $ Git.Env.addGitEnv r Git.Index.indexEnv
|
||||
. fromOsPath
|
||||
=<< Git.Index.indexEnvVal tmpindex
|
||||
configfilterprocess numsz $
|
||||
runupdateindex tsd r' replaceindex
|
||||
|
@ -247,8 +253,8 @@ restagePointerFiles r = unlessM (Annex.getState Annex.insmudgecleanfilter) $ do
|
|||
bracket lockindex unlockindex go
|
||||
where
|
||||
withtmpdir = withTmpDirIn
|
||||
(fromRawFilePath $ Git.localGitDir r)
|
||||
(toOsPath "annexindex")
|
||||
(Git.localGitDir r)
|
||||
(literalOsPath "annexindex")
|
||||
|
||||
isunmodified tsd f orig =
|
||||
genInodeCache f tsd >>= return . \case
|
||||
|
@ -325,7 +331,7 @@ restagePointerFiles r = unlessM (Annex.getState Annex.insmudgecleanfilter) $ do
|
|||
ck = ConfigKey "filter.annex.process"
|
||||
ckd = ConfigKey "filter.annex.process-temp-disabled"
|
||||
|
||||
unableToRestage :: Maybe RawFilePath -> StringContainingQuotedPath
|
||||
unableToRestage :: Maybe OsPath -> StringContainingQuotedPath
|
||||
unableToRestage mf =
|
||||
"git status will show " <> maybe "some files" QuotedPath mf
|
||||
<> " to be modified, since content availability has changed"
|
||||
|
@ -361,7 +367,8 @@ parseLinkTargetOrPointer' b =
|
|||
Nothing -> Right Nothing
|
||||
where
|
||||
parsekey l
|
||||
| isLinkToAnnex l = fileKey $ snd $ S8.breakEnd pathsep l
|
||||
| isLinkToAnnex l = fileKey $ toOsPath $
|
||||
snd $ S8.breakEnd pathsep l
|
||||
| otherwise = Nothing
|
||||
|
||||
restvalid r
|
||||
|
@ -400,9 +407,9 @@ parseLinkTargetOrPointerLazy' b =
|
|||
in parseLinkTargetOrPointer' (L.toStrict b')
|
||||
|
||||
formatPointer :: Key -> S.ByteString
|
||||
formatPointer k = prefix <> keyFile k <> nl
|
||||
formatPointer k = fromOsPath prefix <> fromOsPath (keyFile k) <> nl
|
||||
where
|
||||
prefix = toInternalGitPath $ P.pathSeparator `S.cons` objectDir
|
||||
prefix = toInternalGitPath $ pathSeparator `OS.cons` objectDir
|
||||
nl = S8.singleton '\n'
|
||||
|
||||
{- Maximum size of a file that could be a pointer to a key.
|
||||
|
@ -434,21 +441,21 @@ maxSymlinkSz = 8192
|
|||
- an object that looks like a pointer file. Or that a non-annex
|
||||
- symlink does. Avoids a false positive in those cases.
|
||||
- -}
|
||||
isPointerFile :: RawFilePath -> IO (Maybe Key)
|
||||
isPointerFile :: OsPath -> IO (Maybe Key)
|
||||
isPointerFile f = catchDefaultIO Nothing $
|
||||
#if defined(mingw32_HOST_OS)
|
||||
F.withFile (toOsPath f) ReadMode readhandle
|
||||
F.withFile f ReadMode readhandle
|
||||
#else
|
||||
#if MIN_VERSION_unix(2,8,0)
|
||||
let open = do
|
||||
fd <- openFd (fromRawFilePath f) ReadOnly
|
||||
fd <- openFd (fromOsPath f) ReadOnly
|
||||
(defaultFileFlags { nofollow = True })
|
||||
fdToHandle fd
|
||||
in bracket open hClose readhandle
|
||||
#else
|
||||
ifM (isSymbolicLink <$> R.getSymbolicLinkStatus f)
|
||||
ifM (isSymbolicLink <$> R.getSymbolicLinkStatus (fromOsPath f))
|
||||
( return Nothing
|
||||
, F.withFile (toOsPath f) ReadMode readhandle
|
||||
, F.withFile f ReadMode readhandle
|
||||
)
|
||||
#endif
|
||||
#endif
|
||||
|
@ -463,13 +470,13 @@ isPointerFile f = catchDefaultIO Nothing $
|
|||
- than .git to be used.
|
||||
-}
|
||||
isLinkToAnnex :: S.ByteString -> Bool
|
||||
isLinkToAnnex s = p `S.isInfixOf` s
|
||||
isLinkToAnnex s = p `OS.isInfixOf` (toOsPath s)
|
||||
#ifdef mingw32_HOST_OS
|
||||
-- '/' is used inside pointer files on Windows, not the native '\'
|
||||
|| p' `S.isInfixOf` s
|
||||
|| p' `OS.isInfixOf` s
|
||||
#endif
|
||||
where
|
||||
p = P.pathSeparator `S.cons` objectDir
|
||||
p = pathSeparator `OS.cons` objectDir
|
||||
#ifdef mingw32_HOST_OS
|
||||
p' = toInternalGitPath p
|
||||
#endif
|
||||
|
|
|
@ -120,7 +120,7 @@ import Data.Char
|
|||
import Data.Default
|
||||
import qualified Data.List.NonEmpty as NE
|
||||
import qualified Data.ByteString.Char8 as S8
|
||||
import qualified System.FilePath.ByteString as P
|
||||
import qualified Data.ByteString.Short as SB
|
||||
|
||||
import Common
|
||||
import Key
|
||||
|
@ -134,7 +134,6 @@ import qualified Git.Types as Git
|
|||
import Git.FilePath
|
||||
import Annex.DirHashes
|
||||
import Annex.Fixup
|
||||
import qualified Utility.RawFilePath as R
|
||||
|
||||
{- Conventions:
|
||||
-
|
||||
|
@ -151,13 +150,13 @@ import qualified Utility.RawFilePath as R
|
|||
|
||||
{- The directory git annex uses for local state, relative to the .git
|
||||
- directory -}
|
||||
annexDir :: RawFilePath
|
||||
annexDir = P.addTrailingPathSeparator "annex"
|
||||
annexDir :: OsPath
|
||||
annexDir = addTrailingPathSeparator (literalOsPath "annex")
|
||||
|
||||
{- The directory git annex uses for locally available object content,
|
||||
- relative to the .git directory -}
|
||||
objectDir :: RawFilePath
|
||||
objectDir = P.addTrailingPathSeparator $ annexDir P.</> "objects"
|
||||
objectDir :: OsPath
|
||||
objectDir = addTrailingPathSeparator $ annexDir </> literalOsPath "objects"
|
||||
|
||||
{- Annexed file's possible locations relative to the .git directory
|
||||
- in a non-bare eepository.
|
||||
|
@ -165,24 +164,24 @@ objectDir = P.addTrailingPathSeparator $ annexDir P.</> "objects"
|
|||
- Normally it is hashDirMixed. However, it's always possible that a
|
||||
- bare repository was converted to non-bare, or that the cripped
|
||||
- filesystem setting changed, so still need to check both. -}
|
||||
annexLocationsNonBare :: GitConfig -> Key -> [RawFilePath]
|
||||
annexLocationsNonBare :: GitConfig -> Key -> [OsPath]
|
||||
annexLocationsNonBare config key =
|
||||
map (annexLocation config key) [hashDirMixed, hashDirLower]
|
||||
|
||||
{- Annexed file's possible locations relative to a bare repository. -}
|
||||
annexLocationsBare :: GitConfig -> Key -> [RawFilePath]
|
||||
annexLocationsBare :: GitConfig -> Key -> [OsPath]
|
||||
annexLocationsBare config key =
|
||||
map (annexLocation config key) [hashDirLower, hashDirMixed]
|
||||
|
||||
annexLocation :: GitConfig -> Key -> (HashLevels -> Hasher) -> RawFilePath
|
||||
annexLocation config key hasher = objectDir P.</> keyPath key (hasher $ objectHashLevels config)
|
||||
annexLocation :: GitConfig -> Key -> (HashLevels -> Hasher) -> OsPath
|
||||
annexLocation config key hasher = objectDir </> keyPath key (hasher $ objectHashLevels config)
|
||||
|
||||
{- For exportree remotes with annexobjects=true, objects are stored
|
||||
- in this location as well as in the exported tree. -}
|
||||
exportAnnexObjectLocation :: GitConfig -> Key -> ExportLocation
|
||||
exportAnnexObjectLocation gc k =
|
||||
mkExportLocation $
|
||||
".git" P.</> annexLocation gc k hashDirLower
|
||||
literalOsPath ".git" </> annexLocation gc k hashDirLower
|
||||
|
||||
{- Number of subdirectories from the gitAnnexObjectDir
|
||||
- to the gitAnnexLocation. -}
|
||||
|
@ -199,17 +198,17 @@ gitAnnexLocationDepth config = hashlevels + 1
|
|||
- When the file is not present, returns the location where the file should
|
||||
- be stored.
|
||||
-}
|
||||
gitAnnexLocation :: Key -> Git.Repo -> GitConfig -> IO RawFilePath
|
||||
gitAnnexLocation = gitAnnexLocation' R.doesPathExist
|
||||
gitAnnexLocation :: Key -> Git.Repo -> GitConfig -> IO OsPath
|
||||
gitAnnexLocation = gitAnnexLocation' doesPathExist
|
||||
|
||||
gitAnnexLocation' :: (RawFilePath -> IO Bool) -> Key -> Git.Repo -> GitConfig -> IO RawFilePath
|
||||
gitAnnexLocation' :: (OsPath -> IO Bool) -> Key -> Git.Repo -> GitConfig -> IO OsPath
|
||||
gitAnnexLocation' checker key r config = gitAnnexLocation'' key r config
|
||||
(annexCrippledFileSystem config)
|
||||
(coreSymlinks config)
|
||||
checker
|
||||
(Git.localGitDir r)
|
||||
|
||||
gitAnnexLocation'' :: Key -> Git.Repo -> GitConfig -> Bool -> Bool -> (RawFilePath -> IO Bool) -> RawFilePath -> IO RawFilePath
|
||||
gitAnnexLocation'' :: Key -> Git.Repo -> GitConfig -> Bool -> Bool -> (OsPath -> IO Bool) -> OsPath -> IO OsPath
|
||||
gitAnnexLocation'' key r config crippled symlinkssupported checker gitdir
|
||||
{- Bare repositories default to hashDirLower for new
|
||||
- content, as it's more portable. But check all locations. -}
|
||||
|
@ -228,14 +227,14 @@ gitAnnexLocation'' key r config crippled symlinkssupported checker gitdir
|
|||
only = return . inrepo . annexLocation config key
|
||||
checkall f = check $ map inrepo $ f config key
|
||||
|
||||
inrepo d = gitdir P.</> d
|
||||
inrepo d = gitdir </> d
|
||||
check locs@(l:_) = fromMaybe l <$> firstM checker locs
|
||||
check [] = error "internal"
|
||||
|
||||
{- Calculates a symlink target to link a file to an annexed object. -}
|
||||
gitAnnexLink :: RawFilePath -> Key -> Git.Repo -> GitConfig -> IO RawFilePath
|
||||
gitAnnexLink :: OsPath -> Key -> Git.Repo -> GitConfig -> IO OsPath
|
||||
gitAnnexLink file key r config = do
|
||||
currdir <- R.getCurrentDirectory
|
||||
currdir <- getCurrentDirectory
|
||||
let absfile = absNormPathUnix currdir file
|
||||
let gitdir = getgitdir currdir
|
||||
loc <- gitAnnexLocation'' key r config False False (\_ -> return True) gitdir
|
||||
|
@ -246,19 +245,19 @@ gitAnnexLink file key r config = do
|
|||
- supporting symlinks; generate link target that will
|
||||
- work portably. -}
|
||||
| not (coreSymlinks config) && needsSubmoduleFixup r =
|
||||
absNormPathUnix currdir (Git.repoPath r P.</> ".git")
|
||||
absNormPathUnix currdir (Git.repoPath r </> literalOsPath ".git")
|
||||
| otherwise = Git.localGitDir r
|
||||
absNormPathUnix d p = toInternalGitPath $
|
||||
absPathFrom (toInternalGitPath d) (toInternalGitPath p)
|
||||
|
||||
{- Calculates a symlink target as would be used in a typical git
|
||||
- repository, with .git in the top of the work tree. -}
|
||||
gitAnnexLinkCanonical :: RawFilePath -> Key -> Git.Repo -> GitConfig -> IO RawFilePath
|
||||
gitAnnexLinkCanonical :: OsPath -> Key -> Git.Repo -> GitConfig -> IO OsPath
|
||||
gitAnnexLinkCanonical file key r config = gitAnnexLink file key r' config'
|
||||
where
|
||||
r' = case r of
|
||||
Git.Repo { Git.location = l@Git.Local { Git.worktree = Just wt } } ->
|
||||
r { Git.location = l { Git.gitdir = wt P.</> ".git" } }
|
||||
r { Git.location = l { Git.gitdir = wt </> literalOsPath ".git" } }
|
||||
_ -> r
|
||||
config' = config
|
||||
{ annexCrippledFileSystem = False
|
||||
|
@ -266,23 +265,23 @@ gitAnnexLinkCanonical file key r config = gitAnnexLink file key r' config'
|
|||
}
|
||||
|
||||
{- File used to lock a key's content. -}
|
||||
gitAnnexContentLock :: Key -> Git.Repo -> GitConfig -> IO RawFilePath
|
||||
gitAnnexContentLock :: Key -> Git.Repo -> GitConfig -> IO OsPath
|
||||
gitAnnexContentLock key r config = do
|
||||
loc <- gitAnnexLocation key r config
|
||||
return $ loc <> ".lck"
|
||||
return $ loc <> literalOsPath ".lck"
|
||||
|
||||
{- File used to indicate a key's content should not be dropped until after
|
||||
- a specified time. -}
|
||||
gitAnnexContentRetentionTimestamp :: Key -> Git.Repo -> GitConfig -> IO RawFilePath
|
||||
gitAnnexContentRetentionTimestamp :: Key -> Git.Repo -> GitConfig -> IO OsPath
|
||||
gitAnnexContentRetentionTimestamp key r config = do
|
||||
loc <- gitAnnexLocation key r config
|
||||
return $ loc <> ".rtm"
|
||||
return $ loc <> literalOsPath ".rtm"
|
||||
|
||||
{- Lock file for gitAnnexContentRetentionTimestamp -}
|
||||
gitAnnexContentRetentionTimestampLock :: Key -> Git.Repo -> GitConfig -> IO RawFilePath
|
||||
gitAnnexContentRetentionTimestampLock :: Key -> Git.Repo -> GitConfig -> IO OsPath
|
||||
gitAnnexContentRetentionTimestampLock key r config = do
|
||||
loc <- gitAnnexLocation key r config
|
||||
return $ loc <> ".rtl"
|
||||
return $ loc <> literalOsPath ".rtl"
|
||||
|
||||
{- Lock that is held when taking the gitAnnexContentLock to support the v10
|
||||
- upgrade.
|
||||
|
@ -292,52 +291,52 @@ gitAnnexContentRetentionTimestampLock key r config = do
|
|||
- is mounted read-only. The gitAnnexInodeSentinal is created by git-annex
|
||||
- init, so should already exist.
|
||||
-}
|
||||
gitAnnexContentLockLock :: Git.Repo -> RawFilePath
|
||||
gitAnnexContentLockLock :: Git.Repo -> OsPath
|
||||
gitAnnexContentLockLock = gitAnnexInodeSentinal
|
||||
|
||||
gitAnnexInodeSentinal :: Git.Repo -> RawFilePath
|
||||
gitAnnexInodeSentinal r = gitAnnexDir r P.</> "sentinal"
|
||||
gitAnnexInodeSentinal :: Git.Repo -> OsPath
|
||||
gitAnnexInodeSentinal r = gitAnnexDir r </> literalOsPath "sentinal"
|
||||
|
||||
gitAnnexInodeSentinalCache :: Git.Repo -> RawFilePath
|
||||
gitAnnexInodeSentinalCache r = gitAnnexInodeSentinal r <> ".cache"
|
||||
gitAnnexInodeSentinalCache :: Git.Repo -> OsPath
|
||||
gitAnnexInodeSentinalCache r = gitAnnexInodeSentinal r <> literalOsPath ".cache"
|
||||
|
||||
{- The annex directory of a repository. -}
|
||||
gitAnnexDir :: Git.Repo -> RawFilePath
|
||||
gitAnnexDir r = P.addTrailingPathSeparator $ Git.localGitDir r P.</> annexDir
|
||||
gitAnnexDir :: Git.Repo -> OsPath
|
||||
gitAnnexDir r = addTrailingPathSeparator $ Git.localGitDir r </> annexDir
|
||||
|
||||
{- The part of the annex directory where file contents are stored. -}
|
||||
gitAnnexObjectDir :: Git.Repo -> RawFilePath
|
||||
gitAnnexObjectDir r = P.addTrailingPathSeparator $
|
||||
Git.localGitDir r P.</> objectDir
|
||||
gitAnnexObjectDir :: Git.Repo -> OsPath
|
||||
gitAnnexObjectDir r = addTrailingPathSeparator $
|
||||
Git.localGitDir r </> objectDir
|
||||
|
||||
{- .git/annex/tmp/ is used for temp files for key's contents -}
|
||||
gitAnnexTmpObjectDir :: Git.Repo -> RawFilePath
|
||||
gitAnnexTmpObjectDir r = P.addTrailingPathSeparator $
|
||||
gitAnnexDir r P.</> "tmp"
|
||||
gitAnnexTmpObjectDir :: Git.Repo -> OsPath
|
||||
gitAnnexTmpObjectDir r = addTrailingPathSeparator $
|
||||
gitAnnexDir r </> literalOsPath "tmp"
|
||||
|
||||
{- .git/annex/othertmp/ is used for other temp files -}
|
||||
gitAnnexTmpOtherDir :: Git.Repo -> RawFilePath
|
||||
gitAnnexTmpOtherDir r = P.addTrailingPathSeparator $
|
||||
gitAnnexDir r P.</> "othertmp"
|
||||
gitAnnexTmpOtherDir :: Git.Repo -> OsPath
|
||||
gitAnnexTmpOtherDir r = addTrailingPathSeparator $
|
||||
gitAnnexDir r </> literalOsPath "othertmp"
|
||||
|
||||
{- Lock file for gitAnnexTmpOtherDir. -}
|
||||
gitAnnexTmpOtherLock :: Git.Repo -> RawFilePath
|
||||
gitAnnexTmpOtherLock r = gitAnnexDir r P.</> "othertmp.lck"
|
||||
gitAnnexTmpOtherLock :: Git.Repo -> OsPath
|
||||
gitAnnexTmpOtherLock r = gitAnnexDir r </> literalOsPath "othertmp.lck"
|
||||
|
||||
{- .git/annex/misctmp/ was used by old versions of git-annex and is still
|
||||
- used during initialization -}
|
||||
gitAnnexTmpOtherDirOld :: Git.Repo -> RawFilePath
|
||||
gitAnnexTmpOtherDirOld r = P.addTrailingPathSeparator $
|
||||
gitAnnexDir r P.</> "misctmp"
|
||||
gitAnnexTmpOtherDirOld :: Git.Repo -> OsPath
|
||||
gitAnnexTmpOtherDirOld r = addTrailingPathSeparator $
|
||||
gitAnnexDir r </> literalOsPath "misctmp"
|
||||
|
||||
{- .git/annex/watchtmp/ is used by the watcher and assistant -}
|
||||
gitAnnexTmpWatcherDir :: Git.Repo -> RawFilePath
|
||||
gitAnnexTmpWatcherDir r = P.addTrailingPathSeparator $
|
||||
gitAnnexDir r P.</> "watchtmp"
|
||||
gitAnnexTmpWatcherDir :: Git.Repo -> OsPath
|
||||
gitAnnexTmpWatcherDir r = addTrailingPathSeparator $
|
||||
gitAnnexDir r </> literalOsPath "watchtmp"
|
||||
|
||||
{- The temp file to use for a given key's content. -}
|
||||
gitAnnexTmpObjectLocation :: Key -> Git.Repo -> RawFilePath
|
||||
gitAnnexTmpObjectLocation key r = gitAnnexTmpObjectDir r P.</> keyFile key
|
||||
gitAnnexTmpObjectLocation :: Key -> Git.Repo -> OsPath
|
||||
gitAnnexTmpObjectLocation key r = gitAnnexTmpObjectDir r </> keyFile key
|
||||
|
||||
{- Given a temp file such as gitAnnexTmpObjectLocation, makes a name for a
|
||||
- subdirectory in the same location, that can be used as a work area
|
||||
|
@ -346,339 +345,351 @@ gitAnnexTmpObjectLocation key r = gitAnnexTmpObjectDir r P.</> keyFile key
|
|||
- There are ordering requirements for creating these directories;
|
||||
- use Annex.Content.withTmpWorkDir to set them up.
|
||||
-}
|
||||
gitAnnexTmpWorkDir :: RawFilePath -> RawFilePath
|
||||
gitAnnexTmpWorkDir :: OsPath -> OsPath
|
||||
gitAnnexTmpWorkDir p =
|
||||
let (dir, f) = P.splitFileName p
|
||||
let (dir, f) = splitFileName p
|
||||
-- Using a prefix avoids name conflict with any other keys.
|
||||
in dir P.</> "work." <> f
|
||||
in dir </> literalOsPath "work." <> f
|
||||
|
||||
{- .git/annex/bad/ is used for bad files found during fsck -}
|
||||
gitAnnexBadDir :: Git.Repo -> RawFilePath
|
||||
gitAnnexBadDir r = P.addTrailingPathSeparator $ gitAnnexDir r P.</> "bad"
|
||||
gitAnnexBadDir :: Git.Repo -> OsPath
|
||||
gitAnnexBadDir r = addTrailingPathSeparator $
|
||||
gitAnnexDir r </> literalOsPath "bad"
|
||||
|
||||
{- The bad file to use for a given key. -}
|
||||
gitAnnexBadLocation :: Key -> Git.Repo -> RawFilePath
|
||||
gitAnnexBadLocation key r = gitAnnexBadDir r P.</> keyFile key
|
||||
gitAnnexBadLocation :: Key -> Git.Repo -> OsPath
|
||||
gitAnnexBadLocation key r = gitAnnexBadDir r </> keyFile key
|
||||
|
||||
{- .git/annex/foounused is used to number possibly unused keys -}
|
||||
gitAnnexUnusedLog :: RawFilePath -> Git.Repo -> RawFilePath
|
||||
gitAnnexUnusedLog prefix r = gitAnnexDir r P.</> (prefix <> "unused")
|
||||
gitAnnexUnusedLog :: OsPath -> Git.Repo -> OsPath
|
||||
gitAnnexUnusedLog prefix r =
|
||||
gitAnnexDir r </> (prefix <> literalOsPath "unused")
|
||||
|
||||
{- .git/annex/keysdb/ contains a database of information about keys. -}
|
||||
gitAnnexKeysDbDir :: Git.Repo -> GitConfig -> RawFilePath
|
||||
gitAnnexKeysDbDir r c = fromMaybe (gitAnnexDir r) (annexDbDir c) P.</> "keysdb"
|
||||
gitAnnexKeysDbDir :: Git.Repo -> GitConfig -> OsPath
|
||||
gitAnnexKeysDbDir r c =
|
||||
fromMaybe (gitAnnexDir r) (annexDbDir c) </> literalOsPath "keysdb"
|
||||
|
||||
{- Lock file for the keys database. -}
|
||||
gitAnnexKeysDbLock :: Git.Repo -> GitConfig -> RawFilePath
|
||||
gitAnnexKeysDbLock r c = gitAnnexKeysDbDir r c <> ".lck"
|
||||
gitAnnexKeysDbLock :: Git.Repo -> GitConfig -> OsPath
|
||||
gitAnnexKeysDbLock r c = gitAnnexKeysDbDir r c <> literalOsPath ".lck"
|
||||
|
||||
{- Contains the stat of the last index file that was
|
||||
- reconciled with the keys database. -}
|
||||
gitAnnexKeysDbIndexCache :: Git.Repo -> GitConfig -> RawFilePath
|
||||
gitAnnexKeysDbIndexCache r c = gitAnnexKeysDbDir r c <> ".cache"
|
||||
gitAnnexKeysDbIndexCache :: Git.Repo -> GitConfig -> OsPath
|
||||
gitAnnexKeysDbIndexCache r c =
|
||||
gitAnnexKeysDbDir r c <> literalOsPath ".cache"
|
||||
|
||||
{- .git/annex/fsck/uuid/ is used to store information about incremental
|
||||
- fscks. -}
|
||||
gitAnnexFsckDir :: UUID -> Git.Repo -> Maybe GitConfig -> RawFilePath
|
||||
gitAnnexFsckDir :: UUID -> Git.Repo -> Maybe GitConfig -> OsPath
|
||||
gitAnnexFsckDir u r mc = case annexDbDir =<< mc of
|
||||
Nothing -> go (gitAnnexDir r)
|
||||
Just d -> go d
|
||||
where
|
||||
go d = d P.</> "fsck" P.</> fromUUID u
|
||||
go d = d </> literalOsPath "fsck" </> fromUUID u
|
||||
|
||||
{- used to store information about incremental fscks. -}
|
||||
gitAnnexFsckState :: UUID -> Git.Repo -> RawFilePath
|
||||
gitAnnexFsckState u r = gitAnnexFsckDir u r Nothing P.</> "state"
|
||||
gitAnnexFsckState :: UUID -> Git.Repo -> OsPath
|
||||
gitAnnexFsckState u r = gitAnnexFsckDir u r Nothing </> literalOsPath "state"
|
||||
|
||||
{- Directory containing database used to record fsck info. -}
|
||||
gitAnnexFsckDbDir :: UUID -> Git.Repo -> GitConfig -> RawFilePath
|
||||
gitAnnexFsckDbDir u r c = gitAnnexFsckDir u r (Just c) P.</> "fsckdb"
|
||||
gitAnnexFsckDbDir :: UUID -> Git.Repo -> GitConfig -> OsPath
|
||||
gitAnnexFsckDbDir u r c = gitAnnexFsckDir u r (Just c) </> literalOsPath "fsckdb"
|
||||
|
||||
{- Directory containing old database used to record fsck info. -}
|
||||
gitAnnexFsckDbDirOld :: UUID -> Git.Repo -> GitConfig -> RawFilePath
|
||||
gitAnnexFsckDbDirOld u r c = gitAnnexFsckDir u r (Just c) P.</> "db"
|
||||
gitAnnexFsckDbDirOld :: UUID -> Git.Repo -> GitConfig -> OsPath
|
||||
gitAnnexFsckDbDirOld u r c = gitAnnexFsckDir u r (Just c) </> literalOsPath "db"
|
||||
|
||||
{- Lock file for the fsck database. -}
|
||||
gitAnnexFsckDbLock :: UUID -> Git.Repo -> GitConfig -> RawFilePath
|
||||
gitAnnexFsckDbLock u r c = gitAnnexFsckDir u r (Just c) P.</> "fsck.lck"
|
||||
gitAnnexFsckDbLock :: UUID -> Git.Repo -> GitConfig -> OsPath
|
||||
gitAnnexFsckDbLock u r c = gitAnnexFsckDir u r (Just c) </> literalOsPath "fsck.lck"
|
||||
|
||||
{- .git/annex/fsckresults/uuid is used to store results of git fscks -}
|
||||
gitAnnexFsckResultsLog :: UUID -> Git.Repo -> RawFilePath
|
||||
gitAnnexFsckResultsLog :: UUID -> Git.Repo -> OsPath
|
||||
gitAnnexFsckResultsLog u r =
|
||||
gitAnnexDir r P.</> "fsckresults" P.</> fromUUID u
|
||||
gitAnnexDir r </> literalOsPath "fsckresults" </> fromUUID u
|
||||
|
||||
{- .git/annex/upgrade.log is used to record repository version upgrades. -}
|
||||
gitAnnexUpgradeLog :: Git.Repo -> RawFilePath
|
||||
gitAnnexUpgradeLog r = gitAnnexDir r P.</> "upgrade.log"
|
||||
gitAnnexUpgradeLog :: Git.Repo -> OsPath
|
||||
gitAnnexUpgradeLog r = gitAnnexDir r </> literalOsPath "upgrade.log"
|
||||
|
||||
gitAnnexUpgradeLock :: Git.Repo -> RawFilePath
|
||||
gitAnnexUpgradeLock r = gitAnnexDir r P.</> "upgrade.lck"
|
||||
gitAnnexUpgradeLock :: Git.Repo -> OsPath
|
||||
gitAnnexUpgradeLock r = gitAnnexDir r </> literalOsPath "upgrade.lck"
|
||||
|
||||
{- .git/annex/smudge.log is used to log smudged worktree files that need to
|
||||
- be updated. -}
|
||||
gitAnnexSmudgeLog :: Git.Repo -> RawFilePath
|
||||
gitAnnexSmudgeLog r = gitAnnexDir r P.</> "smudge.log"
|
||||
gitAnnexSmudgeLog :: Git.Repo -> OsPath
|
||||
gitAnnexSmudgeLog r = gitAnnexDir r </> literalOsPath "smudge.log"
|
||||
|
||||
gitAnnexSmudgeLock :: Git.Repo -> RawFilePath
|
||||
gitAnnexSmudgeLock r = gitAnnexDir r P.</> "smudge.lck"
|
||||
gitAnnexSmudgeLock :: Git.Repo -> OsPath
|
||||
gitAnnexSmudgeLock r = gitAnnexDir r </> literalOsPath "smudge.lck"
|
||||
|
||||
{- .git/annex/restage.log is used to log worktree files that need to be
|
||||
- restaged in git -}
|
||||
gitAnnexRestageLog :: Git.Repo -> RawFilePath
|
||||
gitAnnexRestageLog r = gitAnnexDir r P.</> "restage.log"
|
||||
gitAnnexRestageLog :: Git.Repo -> OsPath
|
||||
gitAnnexRestageLog r = gitAnnexDir r </> literalOsPath "restage.log"
|
||||
|
||||
{- .git/annex/restage.old is used while restaging files in git -}
|
||||
gitAnnexRestageLogOld :: Git.Repo -> RawFilePath
|
||||
gitAnnexRestageLogOld r = gitAnnexDir r P.</> "restage.old"
|
||||
gitAnnexRestageLogOld :: Git.Repo -> OsPath
|
||||
gitAnnexRestageLogOld r = gitAnnexDir r </> literalOsPath "restage.old"
|
||||
|
||||
gitAnnexRestageLock :: Git.Repo -> RawFilePath
|
||||
gitAnnexRestageLock r = gitAnnexDir r P.</> "restage.lck"
|
||||
gitAnnexRestageLock :: Git.Repo -> OsPath
|
||||
gitAnnexRestageLock r = gitAnnexDir r </> literalOsPath "restage.lck"
|
||||
|
||||
{- .git/annex/adjust.log is used to log when the adjusted branch needs to
|
||||
- be updated. -}
|
||||
gitAnnexAdjustedBranchUpdateLog :: Git.Repo -> RawFilePath
|
||||
gitAnnexAdjustedBranchUpdateLog r = gitAnnexDir r P.</> "adjust.log"
|
||||
gitAnnexAdjustedBranchUpdateLog :: Git.Repo -> OsPath
|
||||
gitAnnexAdjustedBranchUpdateLog r = gitAnnexDir r </> literalOsPath "adjust.log"
|
||||
|
||||
gitAnnexAdjustedBranchUpdateLock :: Git.Repo -> RawFilePath
|
||||
gitAnnexAdjustedBranchUpdateLock r = gitAnnexDir r P.</> "adjust.lck"
|
||||
gitAnnexAdjustedBranchUpdateLock :: Git.Repo -> OsPath
|
||||
gitAnnexAdjustedBranchUpdateLock r = gitAnnexDir r </> literalOsPath "adjust.lck"
|
||||
|
||||
{- .git/annex/migrate.log is used to log migrations before committing them. -}
|
||||
gitAnnexMigrateLog :: Git.Repo -> RawFilePath
|
||||
gitAnnexMigrateLog r = gitAnnexDir r P.</> "migrate.log"
|
||||
gitAnnexMigrateLog :: Git.Repo -> OsPath
|
||||
gitAnnexMigrateLog r = gitAnnexDir r </> literalOsPath "migrate.log"
|
||||
|
||||
gitAnnexMigrateLock :: Git.Repo -> RawFilePath
|
||||
gitAnnexMigrateLock r = gitAnnexDir r P.</> "migrate.lck"
|
||||
gitAnnexMigrateLock :: Git.Repo -> OsPath
|
||||
gitAnnexMigrateLock r = gitAnnexDir r </> literalOsPath "migrate.lck"
|
||||
|
||||
{- .git/annex/migrations.log is used to log committed migrations. -}
|
||||
gitAnnexMigrationsLog :: Git.Repo -> RawFilePath
|
||||
gitAnnexMigrationsLog r = gitAnnexDir r P.</> "migrations.log"
|
||||
gitAnnexMigrationsLog :: Git.Repo -> OsPath
|
||||
gitAnnexMigrationsLog r = gitAnnexDir r </> literalOsPath "migrations.log"
|
||||
|
||||
gitAnnexMigrationsLock :: Git.Repo -> RawFilePath
|
||||
gitAnnexMigrationsLock r = gitAnnexDir r P.</> "migrations.lck"
|
||||
gitAnnexMigrationsLock :: Git.Repo -> OsPath
|
||||
gitAnnexMigrationsLock r = gitAnnexDir r </> literalOsPath "migrations.lck"
|
||||
|
||||
{- .git/annex/move.log is used to log moves that are in progress,
|
||||
- to better support resuming an interrupted move. -}
|
||||
gitAnnexMoveLog :: Git.Repo -> RawFilePath
|
||||
gitAnnexMoveLog r = gitAnnexDir r P.</> "move.log"
|
||||
gitAnnexMoveLog :: Git.Repo -> OsPath
|
||||
gitAnnexMoveLog r = gitAnnexDir r </> literalOsPath "move.log"
|
||||
|
||||
gitAnnexMoveLock :: Git.Repo -> RawFilePath
|
||||
gitAnnexMoveLock r = gitAnnexDir r P.</> "move.lck"
|
||||
gitAnnexMoveLock :: Git.Repo -> OsPath
|
||||
gitAnnexMoveLock r = gitAnnexDir r </> literalOsPath "move.lck"
|
||||
|
||||
{- .git/annex/export/ is used to store information about
|
||||
- exports to special remotes. -}
|
||||
gitAnnexExportDir :: Git.Repo -> GitConfig -> RawFilePath
|
||||
gitAnnexExportDir r c = fromMaybe (gitAnnexDir r) (annexDbDir c) P.</> "export"
|
||||
gitAnnexExportDir :: Git.Repo -> GitConfig -> OsPath
|
||||
gitAnnexExportDir r c = fromMaybe (gitAnnexDir r) (annexDbDir c)
|
||||
</> literalOsPath "export"
|
||||
|
||||
{- Directory containing database used to record export info. -}
|
||||
gitAnnexExportDbDir :: UUID -> Git.Repo -> GitConfig -> RawFilePath
|
||||
gitAnnexExportDbDir :: UUID -> Git.Repo -> GitConfig -> OsPath
|
||||
gitAnnexExportDbDir u r c =
|
||||
gitAnnexExportDir r c P.</> fromUUID u P.</> "exportdb"
|
||||
gitAnnexExportDir r c </> fromUUID u </> literalOsPath "exportdb"
|
||||
|
||||
{- Lock file for export database. -}
|
||||
gitAnnexExportLock :: UUID -> Git.Repo -> GitConfig -> RawFilePath
|
||||
gitAnnexExportLock u r c = gitAnnexExportDbDir u r c <> ".lck"
|
||||
gitAnnexExportLock :: UUID -> Git.Repo -> GitConfig -> OsPath
|
||||
gitAnnexExportLock u r c = gitAnnexExportDbDir u r c <> literalOsPath ".lck"
|
||||
|
||||
{- Lock file for updating the export database with information from the
|
||||
- repository. -}
|
||||
gitAnnexExportUpdateLock :: UUID -> Git.Repo -> GitConfig -> RawFilePath
|
||||
gitAnnexExportUpdateLock u r c = gitAnnexExportDbDir u r c <> ".upl"
|
||||
gitAnnexExportUpdateLock :: UUID -> Git.Repo -> GitConfig -> OsPath
|
||||
gitAnnexExportUpdateLock u r c = gitAnnexExportDbDir u r c <> literalOsPath ".upl"
|
||||
|
||||
{- Log file used to keep track of files that were in the tree exported to a
|
||||
- remote, but were excluded by its preferred content settings. -}
|
||||
gitAnnexExportExcludeLog :: UUID -> Git.Repo -> RawFilePath
|
||||
gitAnnexExportExcludeLog u r = gitAnnexDir r P.</> "export.ex" P.</> fromUUID u
|
||||
gitAnnexExportExcludeLog :: UUID -> Git.Repo -> OsPath
|
||||
gitAnnexExportExcludeLog u r = gitAnnexDir r
|
||||
</> literalOsPath "export.ex" </> fromUUID u
|
||||
|
||||
{- Directory containing database used to record remote content ids.
|
||||
-
|
||||
- (This used to be "cid", but a problem with the database caused it to
|
||||
- need to be rebuilt with a new name.)
|
||||
-}
|
||||
gitAnnexContentIdentifierDbDir :: Git.Repo -> GitConfig -> RawFilePath
|
||||
gitAnnexContentIdentifierDbDir :: Git.Repo -> GitConfig -> OsPath
|
||||
gitAnnexContentIdentifierDbDir r c =
|
||||
fromMaybe (gitAnnexDir r) (annexDbDir c) P.</> "cidsdb"
|
||||
fromMaybe (gitAnnexDir r) (annexDbDir c) </> literalOsPath "cidsdb"
|
||||
|
||||
{- Lock file for writing to the content id database. -}
|
||||
gitAnnexContentIdentifierLock :: Git.Repo -> GitConfig -> RawFilePath
|
||||
gitAnnexContentIdentifierLock r c = gitAnnexContentIdentifierDbDir r c <> ".lck"
|
||||
gitAnnexContentIdentifierLock :: Git.Repo -> GitConfig -> OsPath
|
||||
gitAnnexContentIdentifierLock r c =
|
||||
gitAnnexContentIdentifierDbDir r c <> literalOsPath ".lck"
|
||||
|
||||
{- .git/annex/import/ is used to store information about
|
||||
- imports from special remotes. -}
|
||||
gitAnnexImportDir :: Git.Repo -> GitConfig -> RawFilePath
|
||||
gitAnnexImportDir r c = fromMaybe (gitAnnexDir r) (annexDbDir c) P.</> "import"
|
||||
gitAnnexImportDir :: Git.Repo -> GitConfig -> OsPath
|
||||
gitAnnexImportDir r c =
|
||||
fromMaybe (gitAnnexDir r) (annexDbDir c) </> literalOsPath "import"
|
||||
|
||||
{- File containing state about the last import done from a remote. -}
|
||||
gitAnnexImportLog :: UUID -> Git.Repo -> GitConfig -> RawFilePath
|
||||
gitAnnexImportLog u r c =
|
||||
gitAnnexImportDir r c P.</> fromUUID u P.</> "log"
|
||||
gitAnnexImportLog :: UUID -> Git.Repo -> GitConfig -> OsPath
|
||||
gitAnnexImportLog u r c =
|
||||
gitAnnexImportDir r c </> fromUUID u </> literalOsPath "log"
|
||||
|
||||
{- Directory containing database used by importfeed. -}
|
||||
gitAnnexImportFeedDbDir :: Git.Repo -> GitConfig -> RawFilePath
|
||||
gitAnnexImportFeedDbDir :: Git.Repo -> GitConfig -> OsPath
|
||||
gitAnnexImportFeedDbDir r c =
|
||||
fromMaybe (gitAnnexDir r) (annexDbDir c) P.</> "importfeed"
|
||||
fromMaybe (gitAnnexDir r) (annexDbDir c) </> literalOsPath "importfeed"
|
||||
|
||||
{- Lock file for writing to the importfeed database. -}
|
||||
gitAnnexImportFeedDbLock :: Git.Repo -> GitConfig -> RawFilePath
|
||||
gitAnnexImportFeedDbLock r c = gitAnnexImportFeedDbDir r c <> ".lck"
|
||||
gitAnnexImportFeedDbLock :: Git.Repo -> GitConfig -> OsPath
|
||||
gitAnnexImportFeedDbLock r c =
|
||||
gitAnnexImportFeedDbDir r c <> literalOsPath ".lck"
|
||||
|
||||
{- Directory containing reposize database. -}
|
||||
gitAnnexRepoSizeDbDir :: Git.Repo -> GitConfig -> RawFilePath
|
||||
gitAnnexRepoSizeDbDir :: Git.Repo -> GitConfig -> OsPath
|
||||
gitAnnexRepoSizeDbDir r c =
|
||||
fromMaybe (gitAnnexDir r) (annexDbDir c) P.</> "reposize" P.</> "db"
|
||||
fromMaybe (gitAnnexDir r) (annexDbDir c) </> literalOsPath "reposize" </> literalOsPath "db"
|
||||
|
||||
{- Lock file for the reposize database. -}
|
||||
gitAnnexRepoSizeDbLock :: Git.Repo -> GitConfig -> RawFilePath
|
||||
gitAnnexRepoSizeDbLock :: Git.Repo -> GitConfig -> OsPath
|
||||
gitAnnexRepoSizeDbLock r c =
|
||||
fromMaybe (gitAnnexDir r) (annexDbDir c) P.</> "reposize" P.</> "lock"
|
||||
fromMaybe (gitAnnexDir r) (annexDbDir c) </> literalOsPath "reposize" </> literalOsPath "lock"
|
||||
|
||||
{- Directory containing liveness pid files. -}
|
||||
gitAnnexRepoSizeLiveDir :: Git.Repo -> GitConfig -> RawFilePath
|
||||
gitAnnexRepoSizeLiveDir :: Git.Repo -> GitConfig -> OsPath
|
||||
gitAnnexRepoSizeLiveDir r c =
|
||||
fromMaybe (gitAnnexDir r) (annexDbDir c) P.</> "reposize" P.</> "live"
|
||||
fromMaybe (gitAnnexDir r) (annexDbDir c) </> literalOsPath "reposize" </> literalOsPath "live"
|
||||
|
||||
{- .git/annex/schedulestate is used to store information about when
|
||||
- scheduled jobs were last run. -}
|
||||
gitAnnexScheduleState :: Git.Repo -> RawFilePath
|
||||
gitAnnexScheduleState r = gitAnnexDir r P.</> "schedulestate"
|
||||
gitAnnexScheduleState :: Git.Repo -> OsPath
|
||||
gitAnnexScheduleState r = gitAnnexDir r </> literalOsPath "schedulestate"
|
||||
|
||||
{- .git/annex/creds/ is used to store credentials to access some special
|
||||
- remotes. -}
|
||||
gitAnnexCredsDir :: Git.Repo -> RawFilePath
|
||||
gitAnnexCredsDir r = P.addTrailingPathSeparator $ gitAnnexDir r P.</> "creds"
|
||||
gitAnnexCredsDir :: Git.Repo -> OsPath
|
||||
gitAnnexCredsDir r = addTrailingPathSeparator $
|
||||
gitAnnexDir r </> literalOsPath "creds"
|
||||
|
||||
{- .git/annex/certificate.pem and .git/annex/key.pem are used by the webapp
|
||||
- when HTTPS is enabled -}
|
||||
gitAnnexWebCertificate :: Git.Repo -> FilePath
|
||||
gitAnnexWebCertificate r = fromRawFilePath $ gitAnnexDir r P.</> "certificate.pem"
|
||||
gitAnnexWebPrivKey :: Git.Repo -> FilePath
|
||||
gitAnnexWebPrivKey r = fromRawFilePath $ gitAnnexDir r P.</> "privkey.pem"
|
||||
gitAnnexWebCertificate :: Git.Repo -> OsPath
|
||||
gitAnnexWebCertificate r = gitAnnexDir r </> literalOsPath "certificate.pem"
|
||||
gitAnnexWebPrivKey :: Git.Repo -> OsPath
|
||||
gitAnnexWebPrivKey r = gitAnnexDir r </> literalOsPath "privkey.pem"
|
||||
|
||||
{- .git/annex/feeds/ is used to record per-key (url) state by importfeed -}
|
||||
gitAnnexFeedStateDir :: Git.Repo -> RawFilePath
|
||||
gitAnnexFeedStateDir r = P.addTrailingPathSeparator $
|
||||
gitAnnexDir r P.</> "feedstate"
|
||||
gitAnnexFeedStateDir :: Git.Repo -> OsPath
|
||||
gitAnnexFeedStateDir r = addTrailingPathSeparator $
|
||||
gitAnnexDir r </> literalOsPath "feedstate"
|
||||
|
||||
gitAnnexFeedState :: Key -> Git.Repo -> RawFilePath
|
||||
gitAnnexFeedState k r = gitAnnexFeedStateDir r P.</> keyFile k
|
||||
gitAnnexFeedState :: Key -> Git.Repo -> OsPath
|
||||
gitAnnexFeedState k r = gitAnnexFeedStateDir r </> keyFile k
|
||||
|
||||
{- .git/annex/merge/ is used as a empty work tree for merges in
|
||||
- adjusted branches. -}
|
||||
gitAnnexMergeDir :: Git.Repo -> FilePath
|
||||
gitAnnexMergeDir r = fromRawFilePath $
|
||||
P.addTrailingPathSeparator $ gitAnnexDir r P.</> "merge"
|
||||
gitAnnexMergeDir :: Git.Repo -> OsPath
|
||||
gitAnnexMergeDir r = addTrailingPathSeparator $
|
||||
gitAnnexDir r </> literalOsPath "merge"
|
||||
|
||||
{- .git/annex/transfer/ is used to record keys currently
|
||||
- being transferred, and other transfer bookkeeping info. -}
|
||||
gitAnnexTransferDir :: Git.Repo -> RawFilePath
|
||||
gitAnnexTransferDir :: Git.Repo -> OsPath
|
||||
gitAnnexTransferDir r =
|
||||
P.addTrailingPathSeparator $ gitAnnexDir r P.</> "transfer"
|
||||
addTrailingPathSeparator $ gitAnnexDir r </> literalOsPath "transfer"
|
||||
|
||||
{- .git/annex/journal/ is used to journal changes made to the git-annex
|
||||
- branch -}
|
||||
gitAnnexJournalDir :: BranchState -> Git.Repo -> RawFilePath
|
||||
gitAnnexJournalDir st r = P.addTrailingPathSeparator $
|
||||
gitAnnexJournalDir :: BranchState -> Git.Repo -> OsPath
|
||||
gitAnnexJournalDir st r = addTrailingPathSeparator $
|
||||
case alternateJournal st of
|
||||
Nothing -> gitAnnexDir r P.</> "journal"
|
||||
Nothing -> gitAnnexDir r </> literalOsPath "journal"
|
||||
Just d -> d
|
||||
|
||||
{- .git/annex/journal.private/ is used to journal changes regarding private
|
||||
- repositories. -}
|
||||
gitAnnexPrivateJournalDir :: BranchState -> Git.Repo -> RawFilePath
|
||||
gitAnnexPrivateJournalDir st r = P.addTrailingPathSeparator $
|
||||
gitAnnexPrivateJournalDir :: BranchState -> Git.Repo -> OsPath
|
||||
gitAnnexPrivateJournalDir st r = addTrailingPathSeparator $
|
||||
case alternateJournal st of
|
||||
Nothing -> gitAnnexDir r P.</> "journal-private"
|
||||
Nothing -> gitAnnexDir r </> literalOsPath "journal-private"
|
||||
Just d -> d
|
||||
|
||||
{- Lock file for the journal. -}
|
||||
gitAnnexJournalLock :: Git.Repo -> RawFilePath
|
||||
gitAnnexJournalLock r = gitAnnexDir r P.</> "journal.lck"
|
||||
gitAnnexJournalLock :: Git.Repo -> OsPath
|
||||
gitAnnexJournalLock r = gitAnnexDir r </> literalOsPath "journal.lck"
|
||||
|
||||
{- Lock file for flushing a git queue that writes to the git index or
|
||||
- other git state that should only have one writer at a time. -}
|
||||
gitAnnexGitQueueLock :: Git.Repo -> RawFilePath
|
||||
gitAnnexGitQueueLock r = gitAnnexDir r P.</> "gitqueue.lck"
|
||||
gitAnnexGitQueueLock :: Git.Repo -> OsPath
|
||||
gitAnnexGitQueueLock r = gitAnnexDir r </> literalOsPath "gitqueue.lck"
|
||||
|
||||
{- .git/annex/index is used to stage changes to the git-annex branch -}
|
||||
gitAnnexIndex :: Git.Repo -> RawFilePath
|
||||
gitAnnexIndex r = gitAnnexDir r P.</> "index"
|
||||
gitAnnexIndex :: Git.Repo -> OsPath
|
||||
gitAnnexIndex r = gitAnnexDir r </> literalOsPath "index"
|
||||
|
||||
{- .git/annex/index-private is used to store information that is not to
|
||||
- be exposed to the git-annex branch. -}
|
||||
gitAnnexPrivateIndex :: Git.Repo -> RawFilePath
|
||||
gitAnnexPrivateIndex r = gitAnnexDir r P.</> "index-private"
|
||||
gitAnnexPrivateIndex :: Git.Repo -> OsPath
|
||||
gitAnnexPrivateIndex r = gitAnnexDir r </> literalOsPath "index-private"
|
||||
|
||||
{- Holds the sha of the git-annex branch that the index was last updated to.
|
||||
-
|
||||
- The .lck in the name is a historical accident; this is not used as a
|
||||
- lock. -}
|
||||
gitAnnexIndexStatus :: Git.Repo -> RawFilePath
|
||||
gitAnnexIndexStatus r = gitAnnexDir r P.</> "index.lck"
|
||||
gitAnnexIndexStatus :: Git.Repo -> OsPath
|
||||
gitAnnexIndexStatus r = gitAnnexDir r </> literalOsPath "index.lck"
|
||||
|
||||
{- The index file used to generate a filtered branch view._-}
|
||||
gitAnnexViewIndex :: Git.Repo -> RawFilePath
|
||||
gitAnnexViewIndex r = gitAnnexDir r P.</> "viewindex"
|
||||
gitAnnexViewIndex :: Git.Repo -> OsPath
|
||||
gitAnnexViewIndex r = gitAnnexDir r </> literalOsPath "viewindex"
|
||||
|
||||
{- File containing a log of recently accessed views. -}
|
||||
gitAnnexViewLog :: Git.Repo -> RawFilePath
|
||||
gitAnnexViewLog r = gitAnnexDir r P.</> "viewlog"
|
||||
gitAnnexViewLog :: Git.Repo -> OsPath
|
||||
gitAnnexViewLog r = gitAnnexDir r </> literalOsPath "viewlog"
|
||||
|
||||
{- List of refs that have already been merged into the git-annex branch. -}
|
||||
gitAnnexMergedRefs :: Git.Repo -> RawFilePath
|
||||
gitAnnexMergedRefs r = gitAnnexDir r P.</> "mergedrefs"
|
||||
gitAnnexMergedRefs :: Git.Repo -> OsPath
|
||||
gitAnnexMergedRefs r = gitAnnexDir r </> literalOsPath "mergedrefs"
|
||||
|
||||
{- List of refs that should not be merged into the git-annex branch. -}
|
||||
gitAnnexIgnoredRefs :: Git.Repo -> RawFilePath
|
||||
gitAnnexIgnoredRefs r = gitAnnexDir r P.</> "ignoredrefs"
|
||||
gitAnnexIgnoredRefs :: Git.Repo -> OsPath
|
||||
gitAnnexIgnoredRefs r = gitAnnexDir r </> literalOsPath "ignoredrefs"
|
||||
|
||||
{- Pid file for daemon mode. -}
|
||||
gitAnnexPidFile :: Git.Repo -> RawFilePath
|
||||
gitAnnexPidFile r = gitAnnexDir r P.</> "daemon.pid"
|
||||
gitAnnexPidFile :: Git.Repo -> OsPath
|
||||
gitAnnexPidFile r = gitAnnexDir r </> literalOsPath "daemon.pid"
|
||||
|
||||
{- Pid lock file for pidlock mode -}
|
||||
gitAnnexPidLockFile :: Git.Repo -> RawFilePath
|
||||
gitAnnexPidLockFile r = gitAnnexDir r P.</> "pidlock"
|
||||
gitAnnexPidLockFile :: Git.Repo -> OsPath
|
||||
gitAnnexPidLockFile r = gitAnnexDir r </> literalOsPath "pidlock"
|
||||
|
||||
{- Status file for daemon mode. -}
|
||||
gitAnnexDaemonStatusFile :: Git.Repo -> FilePath
|
||||
gitAnnexDaemonStatusFile r = fromRawFilePath $
|
||||
gitAnnexDir r P.</> "daemon.status"
|
||||
gitAnnexDaemonStatusFile r = fromOsPath $
|
||||
gitAnnexDir r </> literalOsPath "daemon.status"
|
||||
|
||||
{- Log file for daemon mode. -}
|
||||
gitAnnexDaemonLogFile :: Git.Repo -> RawFilePath
|
||||
gitAnnexDaemonLogFile r = gitAnnexDir r P.</> "daemon.log"
|
||||
gitAnnexDaemonLogFile :: Git.Repo -> OsPath
|
||||
gitAnnexDaemonLogFile r = gitAnnexDir r </> literalOsPath "daemon.log"
|
||||
|
||||
{- Log file for fuzz test. -}
|
||||
gitAnnexFuzzTestLogFile :: Git.Repo -> FilePath
|
||||
gitAnnexFuzzTestLogFile r = fromRawFilePath $
|
||||
gitAnnexDir r P.</> "fuzztest.log"
|
||||
gitAnnexFuzzTestLogFile r = fromOsPath $
|
||||
gitAnnexDir r </> literalOsPath "fuzztest.log"
|
||||
|
||||
{- Html shim file used to launch the webapp. -}
|
||||
gitAnnexHtmlShim :: Git.Repo -> RawFilePath
|
||||
gitAnnexHtmlShim r = gitAnnexDir r P.</> "webapp.html"
|
||||
gitAnnexHtmlShim :: Git.Repo -> OsPath
|
||||
gitAnnexHtmlShim r = gitAnnexDir r </> literalOsPath "webapp.html"
|
||||
|
||||
{- File containing the url to the webapp. -}
|
||||
gitAnnexUrlFile :: Git.Repo -> RawFilePath
|
||||
gitAnnexUrlFile r = gitAnnexDir r P.</> "url"
|
||||
gitAnnexUrlFile :: Git.Repo -> OsPath
|
||||
gitAnnexUrlFile r = gitAnnexDir r </> literalOsPath "url"
|
||||
|
||||
{- Temporary file used to edit configuriation from the git-annex branch. -}
|
||||
gitAnnexTmpCfgFile :: Git.Repo -> RawFilePath
|
||||
gitAnnexTmpCfgFile r = gitAnnexDir r P.</> "config.tmp"
|
||||
gitAnnexTmpCfgFile :: Git.Repo -> OsPath
|
||||
gitAnnexTmpCfgFile r = gitAnnexDir r </> literalOsPath "config.tmp"
|
||||
|
||||
{- .git/annex/ssh/ is used for ssh connection caching -}
|
||||
gitAnnexSshDir :: Git.Repo -> RawFilePath
|
||||
gitAnnexSshDir r = P.addTrailingPathSeparator $ gitAnnexDir r P.</> "ssh"
|
||||
gitAnnexSshDir :: Git.Repo -> OsPath
|
||||
gitAnnexSshDir r = addTrailingPathSeparator $
|
||||
gitAnnexDir r </> literalOsPath "ssh"
|
||||
|
||||
{- .git/annex/remotes/ is used for remote-specific state. -}
|
||||
gitAnnexRemotesDir :: Git.Repo -> RawFilePath
|
||||
gitAnnexRemotesDir r =
|
||||
P.addTrailingPathSeparator $ gitAnnexDir r P.</> "remotes"
|
||||
gitAnnexRemotesDir :: Git.Repo -> OsPath
|
||||
gitAnnexRemotesDir r = addTrailingPathSeparator $
|
||||
gitAnnexDir r </> literalOsPath "remotes"
|
||||
|
||||
{- This is the base directory name used by the assistant when making
|
||||
- repositories, by default. -}
|
||||
gitAnnexAssistantDefaultDir :: FilePath
|
||||
gitAnnexAssistantDefaultDir = "annex"
|
||||
gitAnnexAssistantDefaultDir :: OsPath
|
||||
gitAnnexAssistantDefaultDir = literalOsPath "annex"
|
||||
|
||||
gitAnnexSimDir :: Git.Repo -> RawFilePath
|
||||
gitAnnexSimDir r = P.addTrailingPathSeparator $ gitAnnexDir r P.</> "sim"
|
||||
gitAnnexSimDir :: Git.Repo -> OsPath
|
||||
gitAnnexSimDir r = addTrailingPathSeparator $
|
||||
gitAnnexDir r </> literalOsPath "sim"
|
||||
|
||||
{- Sanitizes a String that will be used as part of a Key's keyName,
|
||||
- dealing with characters that cause problems.
|
||||
|
@ -730,23 +741,26 @@ reSanitizeKeyName = preSanitizeKeyName' True
|
|||
- Changing what this function escapes and how is not a good idea, as it
|
||||
- can cause existing objects to get lost.
|
||||
-}
|
||||
keyFile :: Key -> RawFilePath
|
||||
keyFile :: Key -> OsPath
|
||||
keyFile k =
|
||||
let b = serializeKey' k
|
||||
in if S8.any (`elem` ['&', '%', ':', '/']) b
|
||||
then S8.concatMap esc b
|
||||
let b = serializeKey'' k
|
||||
in toOsPath $ if SB.any (`elem` needesc) b
|
||||
then SB.concat $ map esc (SB.unpack b)
|
||||
else b
|
||||
where
|
||||
esc '&' = "&a"
|
||||
esc '%' = "&s"
|
||||
esc ':' = "&c"
|
||||
esc '/' = "%"
|
||||
esc c = S8.singleton c
|
||||
esc w = case chr (fromIntegral w) of
|
||||
'&' -> "&a"
|
||||
'%' -> "&s"
|
||||
':' -> "&c"
|
||||
'/' -> "%"
|
||||
_ -> SB.singleton w
|
||||
|
||||
needesc = map (fromIntegral . ord) ['&', '%', ':', '/']
|
||||
|
||||
{- Reverses keyFile, converting a filename fragment (ie, the basename of
|
||||
- the symlink target) into a key. -}
|
||||
fileKey :: RawFilePath -> Maybe Key
|
||||
fileKey = deserializeKey' . S8.intercalate "/" . map go . S8.split '%'
|
||||
fileKey :: OsPath -> Maybe Key
|
||||
fileKey = deserializeKey' . S8.intercalate "/" . map go . S8.split '%' . fromOsPath
|
||||
where
|
||||
go = S8.concat . unescafterfirst . S8.split '&'
|
||||
unescafterfirst [] = []
|
||||
|
@ -765,8 +779,8 @@ fileKey = deserializeKey' . S8.intercalate "/" . map go . S8.split '%'
|
|||
- The file is put in a directory with the same name, this allows
|
||||
- write-protecting the directory to avoid accidental deletion of the file.
|
||||
-}
|
||||
keyPath :: Key -> Hasher -> RawFilePath
|
||||
keyPath key hasher = hasher key P.</> f P.</> f
|
||||
keyPath :: Key -> Hasher -> OsPath
|
||||
keyPath key hasher = hasher key </> f </> f
|
||||
where
|
||||
f = keyFile key
|
||||
|
||||
|
@ -776,5 +790,6 @@ keyPath key hasher = hasher key P.</> f P.</> f
|
|||
- This is compatible with the annexLocationsNonBare and annexLocationsBare,
|
||||
- for interoperability between special remotes and git-annex repos.
|
||||
-}
|
||||
keyPaths :: Key -> NE.NonEmpty RawFilePath
|
||||
keyPaths :: Key -> NE.NonEmpty OsPath
|
||||
keyPaths key = NE.map (\h -> keyPath key (h def)) dirHashes
|
||||
|
||||
|
|
|
@ -26,11 +26,10 @@ import Annex.Perms
|
|||
import Annex.LockPool
|
||||
|
||||
import qualified Data.Map as M
|
||||
import qualified System.FilePath.ByteString as P
|
||||
|
||||
{- Create a specified lock file, and takes a shared lock, which is retained
|
||||
- in the cache. -}
|
||||
lockFileCached :: RawFilePath -> Annex ()
|
||||
lockFileCached :: OsPath -> Annex ()
|
||||
lockFileCached file = go =<< fromLockCache file
|
||||
where
|
||||
go (Just _) = noop -- already locked
|
||||
|
@ -43,7 +42,7 @@ lockFileCached file = go =<< fromLockCache file
|
|||
#endif
|
||||
changeLockCache $ M.insert file lockhandle
|
||||
|
||||
unlockFile :: RawFilePath -> Annex ()
|
||||
unlockFile :: OsPath -> Annex ()
|
||||
unlockFile file = maybe noop go =<< fromLockCache file
|
||||
where
|
||||
go lockhandle = do
|
||||
|
@ -53,7 +52,7 @@ unlockFile file = maybe noop go =<< fromLockCache file
|
|||
getLockCache :: Annex LockCache
|
||||
getLockCache = getState lockcache
|
||||
|
||||
fromLockCache :: RawFilePath -> Annex (Maybe LockHandle)
|
||||
fromLockCache :: OsPath -> Annex (Maybe LockHandle)
|
||||
fromLockCache file = M.lookup file <$> getLockCache
|
||||
|
||||
changeLockCache :: (LockCache -> LockCache) -> Annex ()
|
||||
|
@ -63,9 +62,9 @@ changeLockCache a = do
|
|||
|
||||
{- Runs an action with a shared lock held. If an exclusive lock is held,
|
||||
- blocks until it becomes free. -}
|
||||
withSharedLock :: RawFilePath -> Annex a -> Annex a
|
||||
withSharedLock :: OsPath -> Annex a -> Annex a
|
||||
withSharedLock lockfile a = debugLocks $ do
|
||||
createAnnexDirectory $ P.takeDirectory lockfile
|
||||
createAnnexDirectory $ takeDirectory lockfile
|
||||
mode <- annexFileMode
|
||||
bracket (lock mode lockfile) (liftIO . dropLock) (const a)
|
||||
where
|
||||
|
@ -77,16 +76,16 @@ withSharedLock lockfile a = debugLocks $ do
|
|||
|
||||
{- Runs an action with an exclusive lock held. If the lock is already
|
||||
- held, blocks until it becomes free. -}
|
||||
withExclusiveLock :: RawFilePath -> Annex a -> Annex a
|
||||
withExclusiveLock :: OsPath -> Annex a -> Annex a
|
||||
withExclusiveLock lockfile a = bracket
|
||||
(takeExclusiveLock lockfile)
|
||||
(liftIO . dropLock)
|
||||
(const a)
|
||||
|
||||
{- Takes an exclusive lock, blocking until it's free. -}
|
||||
takeExclusiveLock :: RawFilePath -> Annex LockHandle
|
||||
takeExclusiveLock :: OsPath -> Annex LockHandle
|
||||
takeExclusiveLock lockfile = debugLocks $ do
|
||||
createAnnexDirectory $ P.takeDirectory lockfile
|
||||
createAnnexDirectory $ takeDirectory lockfile
|
||||
mode <- annexFileMode
|
||||
lock mode lockfile
|
||||
where
|
||||
|
@ -98,9 +97,9 @@ takeExclusiveLock lockfile = debugLocks $ do
|
|||
|
||||
{- Tries to take an exclusive lock and run an action. If the lock is
|
||||
- already held, returns Nothing. -}
|
||||
tryExclusiveLock :: RawFilePath -> Annex a -> Annex (Maybe a)
|
||||
tryExclusiveLock :: OsPath -> Annex a -> Annex (Maybe a)
|
||||
tryExclusiveLock lockfile a = debugLocks $ do
|
||||
createAnnexDirectory $ P.takeDirectory lockfile
|
||||
createAnnexDirectory $ takeDirectory lockfile
|
||||
mode <- annexFileMode
|
||||
bracket (lock mode lockfile) (liftIO . unlock) go
|
||||
where
|
||||
|
@ -118,7 +117,7 @@ tryExclusiveLock lockfile a = debugLocks $ do
|
|||
- Does not create the lock directory or lock file if it does not exist,
|
||||
- taking an exclusive lock will create them.
|
||||
-}
|
||||
trySharedLock :: RawFilePath -> Annex (Maybe LockHandle)
|
||||
trySharedLock :: OsPath -> Annex (Maybe LockHandle)
|
||||
trySharedLock lockfile = debugLocks $
|
||||
#ifndef mingw32_HOST_OS
|
||||
tryLockShared Nothing lockfile
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
{-# LANGUAGE CPP #-}
|
||||
|
||||
module Annex.Magic (
|
||||
|
@ -16,6 +17,7 @@ module Annex.Magic (
|
|||
getMagicMimeEncoding,
|
||||
) where
|
||||
|
||||
import Common
|
||||
import Types.Mime
|
||||
import Control.Monad.IO.Class
|
||||
#ifdef WITH_MAGICMIME
|
||||
|
@ -23,7 +25,6 @@ import Magic
|
|||
import Utility.Env
|
||||
import Control.Concurrent
|
||||
import System.IO.Unsafe (unsafePerformIO)
|
||||
import Common
|
||||
#else
|
||||
type Magic = ()
|
||||
#endif
|
||||
|
@ -34,16 +35,18 @@ initMagicMime = catchMaybeIO $ do
|
|||
m <- magicOpen [MagicMime]
|
||||
liftIO $ getEnv "GIT_ANNEX_DIR" >>= \case
|
||||
Nothing -> magicLoadDefault m
|
||||
Just d -> magicLoad m
|
||||
(d </> "magic" </> "magic.mgc")
|
||||
Just d -> magicLoad m $ fromOsPath $
|
||||
toOsPath d
|
||||
</> literalOsPath "magic"
|
||||
</> literalOsPath "magic.mgc"
|
||||
return m
|
||||
#else
|
||||
initMagicMime = return Nothing
|
||||
#endif
|
||||
|
||||
getMagicMime :: Magic -> FilePath -> IO (Maybe (MimeType, MimeEncoding))
|
||||
getMagicMime :: Magic -> OsPath -> IO (Maybe (MimeType, MimeEncoding))
|
||||
#ifdef WITH_MAGICMIME
|
||||
getMagicMime m f = Just . parse <$> magicConcurrentSafe (magicFile m f)
|
||||
getMagicMime m f = Just . parse <$> magicConcurrentSafe (magicFile m (fromOsPath f))
|
||||
where
|
||||
parse s =
|
||||
let (mimetype, rest) = separate (== ';') s
|
||||
|
@ -55,10 +58,10 @@ getMagicMime m f = Just . parse <$> magicConcurrentSafe (magicFile m f)
|
|||
getMagicMime _ _ = return Nothing
|
||||
#endif
|
||||
|
||||
getMagicMimeType :: MonadIO m => Magic -> FilePath -> m (Maybe MimeType)
|
||||
getMagicMimeType :: MonadIO m => Magic -> OsPath -> m (Maybe MimeType)
|
||||
getMagicMimeType m f = liftIO $ fmap fst <$> getMagicMime m f
|
||||
|
||||
getMagicMimeEncoding :: MonadIO m => Magic -> FilePath -> m(Maybe MimeEncoding)
|
||||
getMagicMimeEncoding :: MonadIO m => Magic -> OsPath -> m(Maybe MimeEncoding)
|
||||
getMagicMimeEncoding m f = liftIO $ fmap snd <$> getMagicMime m f
|
||||
|
||||
#ifdef WITH_MAGICMIME
|
||||
|
|
|
@ -38,7 +38,7 @@ import Text.Read
|
|||
-
|
||||
- Also, can generate new metadata, if configured to do so.
|
||||
-}
|
||||
genMetaData :: Key -> RawFilePath -> Maybe POSIXTime -> Annex ()
|
||||
genMetaData :: Key -> OsPath -> Maybe POSIXTime -> Annex ()
|
||||
genMetaData key file mmtime = do
|
||||
catKeyFileHEAD file >>= \case
|
||||
Nothing -> noop
|
||||
|
@ -57,8 +57,8 @@ genMetaData key file mmtime = do
|
|||
Nothing -> noop
|
||||
where
|
||||
warncopied = warning $ UnquotedString $
|
||||
"Copied metadata from old version of " ++ fromRawFilePath file ++ " to new version. " ++
|
||||
"If you don't want this copied metadata, run: git annex metadata --remove-all " ++ fromRawFilePath file
|
||||
"Copied metadata from old version of " ++ fromOsPath file ++ " to new version. " ++
|
||||
"If you don't want this copied metadata, run: git annex metadata --remove-all " ++ fromOsPath file
|
||||
-- If the only fields copied were date metadata, and they'll
|
||||
-- be overwritten with the current mtime, no need to warn about
|
||||
-- copying.
|
||||
|
|
|
@ -7,20 +7,17 @@
|
|||
|
||||
module Annex.Multicast where
|
||||
|
||||
import Common
|
||||
import Annex.Path
|
||||
import Utility.Env
|
||||
import Utility.PartialPrelude
|
||||
|
||||
import System.Process
|
||||
import System.IO
|
||||
import GHC.IO.Handle.FD
|
||||
import Control.Applicative
|
||||
import Prelude
|
||||
|
||||
multicastReceiveEnv :: String
|
||||
multicastReceiveEnv = "GIT_ANNEX_MULTICAST_RECEIVE"
|
||||
|
||||
multicastCallbackEnv :: IO (FilePath, [(String, String)], Handle)
|
||||
multicastCallbackEnv :: IO (OsPath, [(String, String)], Handle)
|
||||
multicastCallbackEnv = do
|
||||
gitannex <- programPath
|
||||
-- This will even work on Windows
|
||||
|
|
|
@ -88,7 +88,7 @@ getMinCopies = fromSourcesOr defaultMinCopies
|
|||
|
||||
{- NumCopies and MinCopies value for a file, from any configuration source,
|
||||
- including .gitattributes. -}
|
||||
getFileNumMinCopies :: RawFilePath -> Annex (NumCopies, MinCopies)
|
||||
getFileNumMinCopies :: OsPath -> Annex (NumCopies, MinCopies)
|
||||
getFileNumMinCopies f = do
|
||||
fnumc <- getForcedNumCopies
|
||||
fminc <- getForcedMinCopies
|
||||
|
@ -141,7 +141,7 @@ getSafestNumMinCopies afile k =
|
|||
Database.Keys.getAssociatedFilesIncluding afile k
|
||||
>>= getSafestNumMinCopies' afile k
|
||||
|
||||
getSafestNumMinCopies' :: AssociatedFile -> Key -> [RawFilePath] -> Annex (NumCopies, MinCopies)
|
||||
getSafestNumMinCopies' :: AssociatedFile -> Key -> [OsPath] -> Annex (NumCopies, MinCopies)
|
||||
getSafestNumMinCopies' afile k fs = do
|
||||
l <- mapM getFileNumMinCopies fs
|
||||
let l' = zip l fs
|
||||
|
@ -174,13 +174,13 @@ getSafestNumMinCopies' afile k fs = do
|
|||
{- This is the globally visible numcopies value for a file. So it does
|
||||
- not include local configuration in the git config or command line
|
||||
- options. -}
|
||||
getGlobalFileNumCopies :: RawFilePath -> Annex NumCopies
|
||||
getGlobalFileNumCopies :: OsPath -> Annex NumCopies
|
||||
getGlobalFileNumCopies f = fromSourcesOr defaultNumCopies
|
||||
[ fst <$> getNumMinCopiesAttr f
|
||||
, getGlobalNumCopies
|
||||
]
|
||||
|
||||
getNumMinCopiesAttr :: RawFilePath -> Annex (Maybe NumCopies, Maybe MinCopies)
|
||||
getNumMinCopiesAttr :: OsPath -> Annex (Maybe NumCopies, Maybe MinCopies)
|
||||
getNumMinCopiesAttr file =
|
||||
checkAttrs ["annex.numcopies", "annex.mincopies"] file >>= \case
|
||||
(n:m:[]) -> return
|
||||
|
@ -196,12 +196,12 @@ getNumMinCopiesAttr file =
|
|||
- This is good enough for everything except dropping the file, which
|
||||
- requires active verification of the copies.
|
||||
-}
|
||||
numCopiesCheck :: RawFilePath -> Key -> (Int -> Int -> v) -> Annex v
|
||||
numCopiesCheck :: OsPath -> Key -> (Int -> Int -> v) -> Annex v
|
||||
numCopiesCheck file key vs = do
|
||||
have <- trustExclude UnTrusted =<< Remote.keyLocations key
|
||||
numCopiesCheck' file vs have
|
||||
|
||||
numCopiesCheck' :: RawFilePath -> (Int -> Int -> v) -> [UUID] -> Annex v
|
||||
numCopiesCheck' :: OsPath -> (Int -> Int -> v) -> [UUID] -> Annex v
|
||||
numCopiesCheck' file vs have = do
|
||||
needed <- fst <$> getFileNumMinCopies file
|
||||
let nhave = numCopiesCount have
|
||||
|
|
|
@ -40,20 +40,20 @@ import qualified Data.Map as M
|
|||
- git-annex-shell or git-remote-annex, this finds a git-annex program
|
||||
- instead.
|
||||
-}
|
||||
programPath :: IO FilePath
|
||||
programPath :: IO OsPath
|
||||
programPath = go =<< getEnv "GIT_ANNEX_DIR"
|
||||
where
|
||||
go (Just dir) = do
|
||||
name <- reqgitannex <$> getProgName
|
||||
return (dir </> name)
|
||||
return (toOsPath dir </> toOsPath name)
|
||||
go Nothing = do
|
||||
name <- getProgName
|
||||
exe <- if isgitannex name
|
||||
then getExecutablePath
|
||||
else pure "git-annex"
|
||||
p <- if isAbsolute exe
|
||||
p <- if isAbsolute (toOsPath exe)
|
||||
then return exe
|
||||
else fromMaybe exe <$> readProgramFile
|
||||
else maybe exe fromOsPath <$> readProgramFile
|
||||
maybe cannotFindProgram return =<< searchPath p
|
||||
|
||||
reqgitannex name
|
||||
|
@ -62,15 +62,15 @@ programPath = go =<< getEnv "GIT_ANNEX_DIR"
|
|||
isgitannex = flip M.notMember otherMulticallCommands
|
||||
|
||||
{- Returns the path for git-annex that is recorded in the programFile. -}
|
||||
readProgramFile :: IO (Maybe FilePath)
|
||||
readProgramFile :: IO (Maybe OsPath)
|
||||
readProgramFile = catchDefaultIO Nothing $ do
|
||||
programfile <- programFile
|
||||
headMaybe . lines <$> readFile programfile
|
||||
fmap toOsPath . headMaybe . lines <$> readFile (fromOsPath programfile)
|
||||
|
||||
cannotFindProgram :: IO a
|
||||
cannotFindProgram = do
|
||||
f <- programFile
|
||||
giveup $ "cannot find git-annex program in PATH or in " ++ f
|
||||
giveup $ "cannot find git-annex program in PATH or in " ++ fromOsPath f
|
||||
|
||||
{- Runs a git-annex child process.
|
||||
-
|
||||
|
@ -88,7 +88,7 @@ gitAnnexChildProcess
|
|||
gitAnnexChildProcess subcmd ps f a = do
|
||||
cmd <- liftIO programPath
|
||||
ps' <- gitAnnexChildProcessParams subcmd ps
|
||||
pidLockChildProcess cmd ps' f a
|
||||
pidLockChildProcess (fromOsPath cmd) ps' f a
|
||||
|
||||
{- Parameters to pass to a git-annex child process to run a subcommand
|
||||
- with some parameters.
|
||||
|
|
|
@ -49,20 +49,20 @@ import System.PosixCompat.Files (fileMode, intersectFileModes, nullFileMode, gro
|
|||
withShared :: (SharedRepository -> Annex a) -> Annex a
|
||||
withShared a = a =<< coreSharedRepository <$> Annex.getGitConfig
|
||||
|
||||
setAnnexFilePerm :: RawFilePath -> Annex ()
|
||||
setAnnexFilePerm :: OsPath -> Annex ()
|
||||
setAnnexFilePerm = setAnnexPerm False
|
||||
|
||||
setAnnexDirPerm :: RawFilePath -> Annex ()
|
||||
setAnnexDirPerm :: OsPath -> Annex ()
|
||||
setAnnexDirPerm = setAnnexPerm True
|
||||
|
||||
{- Sets appropriate file mode for a file or directory in the annex,
|
||||
- other than the content files and content directory. Normally,
|
||||
- don't change the mode, but with core.sharedRepository set,
|
||||
- allow the group to write, etc. -}
|
||||
setAnnexPerm :: Bool -> RawFilePath -> Annex ()
|
||||
setAnnexPerm :: Bool -> OsPath -> Annex ()
|
||||
setAnnexPerm isdir file = setAnnexPerm' Nothing isdir >>= \go -> liftIO (go file)
|
||||
|
||||
setAnnexPerm' :: Maybe ([FileMode] -> FileMode -> FileMode) -> Bool -> Annex (RawFilePath -> IO ())
|
||||
setAnnexPerm' :: Maybe ([FileMode] -> FileMode -> FileMode) -> Bool -> Annex (OsPath -> IO ())
|
||||
setAnnexPerm' modef isdir = ifM crippledFileSystem
|
||||
( return (const noop)
|
||||
, withShared $ \s -> return $ \file -> go s file
|
||||
|
@ -79,11 +79,12 @@ setAnnexPerm' modef isdir = ifM crippledFileSystem
|
|||
Nothing -> noop
|
||||
Just f -> void $ tryIO $
|
||||
modifyFileMode file $ f []
|
||||
go (UmaskShared n) file = void $ tryIO $ R.setFileMode file $
|
||||
if isdir then umaskSharedDirectory n else n
|
||||
go (UmaskShared n) file = void $ tryIO $
|
||||
R.setFileMode (fromOsPath file) $
|
||||
if isdir then umaskSharedDirectory n else n
|
||||
modef' = fromMaybe addModes modef
|
||||
|
||||
resetAnnexFilePerm :: RawFilePath -> Annex ()
|
||||
resetAnnexFilePerm :: OsPath -> Annex ()
|
||||
resetAnnexFilePerm = resetAnnexPerm False
|
||||
|
||||
{- Like setAnnexPerm, but ignores the current mode of the file entirely,
|
||||
|
@ -94,7 +95,7 @@ resetAnnexFilePerm = resetAnnexPerm False
|
|||
- which is going to be moved to a non-temporary location and needs
|
||||
- usual modes.
|
||||
-}
|
||||
resetAnnexPerm :: Bool -> RawFilePath -> Annex ()
|
||||
resetAnnexPerm :: Bool -> OsPath -> Annex ()
|
||||
resetAnnexPerm isdir file = unlessM crippledFileSystem $ do
|
||||
defmode <- liftIO defaultFileMode
|
||||
let modef moremodes _oldmode = addModes moremodes defmode
|
||||
|
@ -115,7 +116,7 @@ annexFileMode = do
|
|||
{- Creates a directory inside the gitAnnexDir (or possibly the dbdir),
|
||||
- creating any parent directories up to and including the gitAnnexDir.
|
||||
- Makes directories with appropriate permissions. -}
|
||||
createAnnexDirectory :: RawFilePath -> Annex ()
|
||||
createAnnexDirectory :: OsPath -> Annex ()
|
||||
createAnnexDirectory dir = do
|
||||
top <- parentDir <$> fromRepo gitAnnexDir
|
||||
tops <- annexDbDir <$> Annex.getGitConfig >>= return . \case
|
||||
|
@ -124,7 +125,7 @@ createAnnexDirectory dir = do
|
|||
createDirectoryUnder' tops dir createdir
|
||||
where
|
||||
createdir p = do
|
||||
liftIO $ R.createDirectory p
|
||||
liftIO $ createDirectory p
|
||||
setAnnexDirPerm p
|
||||
|
||||
{- Create a directory in the git work tree, creating any parent
|
||||
|
@ -132,7 +133,7 @@ createAnnexDirectory dir = do
|
|||
-
|
||||
- Uses default permissions.
|
||||
-}
|
||||
createWorkTreeDirectory :: RawFilePath -> Annex ()
|
||||
createWorkTreeDirectory :: OsPath -> Annex ()
|
||||
createWorkTreeDirectory dir = do
|
||||
fromRepo repoWorkTree >>= liftIO . \case
|
||||
Just wt -> createDirectoryUnder [wt] dir
|
||||
|
@ -159,16 +160,16 @@ createWorkTreeDirectory dir = do
|
|||
- it should not normally have. checkContentWritePerm can detect when
|
||||
- that happens with write permissions.
|
||||
-}
|
||||
freezeContent :: RawFilePath -> Annex ()
|
||||
freezeContent :: OsPath -> Annex ()
|
||||
freezeContent file =
|
||||
withShared $ \sr -> freezeContent' sr file
|
||||
|
||||
freezeContent' :: SharedRepository -> RawFilePath -> Annex ()
|
||||
freezeContent' :: SharedRepository -> OsPath -> Annex ()
|
||||
freezeContent' sr file = freezeContent'' sr file =<< getVersion
|
||||
|
||||
freezeContent'' :: SharedRepository -> RawFilePath -> Maybe RepoVersion -> Annex ()
|
||||
freezeContent'' :: SharedRepository -> OsPath -> Maybe RepoVersion -> Annex ()
|
||||
freezeContent'' sr file rv = do
|
||||
fastDebug "Annex.Perms" ("freezing content " ++ fromRawFilePath file)
|
||||
fastDebug "Annex.Perms" ("freezing content " ++ fromOsPath file)
|
||||
unlessM crippledFileSystem $ go sr
|
||||
freezeHook file
|
||||
where
|
||||
|
@ -211,7 +212,7 @@ freezeContent'' sr file rv = do
|
|||
- support removing write permissions, so when there is such a hook
|
||||
- write permissions are ignored.
|
||||
-}
|
||||
checkContentWritePerm :: RawFilePath -> Annex (Maybe Bool)
|
||||
checkContentWritePerm :: OsPath -> Annex (Maybe Bool)
|
||||
checkContentWritePerm file = ifM crippledFileSystem
|
||||
( return (Just True)
|
||||
, do
|
||||
|
@ -221,7 +222,7 @@ checkContentWritePerm file = ifM crippledFileSystem
|
|||
liftIO $ checkContentWritePerm' sr file rv hasfreezehook
|
||||
)
|
||||
|
||||
checkContentWritePerm' :: SharedRepository -> RawFilePath -> Maybe RepoVersion -> Bool -> IO (Maybe Bool)
|
||||
checkContentWritePerm' :: SharedRepository -> OsPath -> Maybe RepoVersion -> Bool -> IO (Maybe Bool)
|
||||
checkContentWritePerm' sr file rv hasfreezehook
|
||||
| hasfreezehook = return (Just True)
|
||||
| otherwise = case sr of
|
||||
|
@ -240,7 +241,7 @@ checkContentWritePerm' sr file rv hasfreezehook
|
|||
| otherwise -> want sharedret
|
||||
(\havemode -> havemode == removeModes writeModes n)
|
||||
where
|
||||
want mk f = catchMaybeIO (fileMode <$> R.getFileStatus file)
|
||||
want mk f = catchMaybeIO (fileMode <$> R.getFileStatus (fromOsPath file))
|
||||
>>= return . \case
|
||||
Just havemode -> mk (f havemode)
|
||||
Nothing -> mk True
|
||||
|
@ -253,18 +254,19 @@ checkContentWritePerm' sr file rv hasfreezehook
|
|||
|
||||
{- Allows writing to an annexed file that freezeContent was called on
|
||||
- before. -}
|
||||
thawContent :: RawFilePath -> Annex ()
|
||||
thawContent :: OsPath -> Annex ()
|
||||
thawContent file = withShared $ \sr -> thawContent' sr file
|
||||
|
||||
thawContent' :: SharedRepository -> RawFilePath -> Annex ()
|
||||
thawContent' :: SharedRepository -> OsPath -> Annex ()
|
||||
thawContent' sr file = do
|
||||
fastDebug "Annex.Perms" ("thawing content " ++ fromRawFilePath file)
|
||||
fastDebug "Annex.Perms" ("thawing content " ++ fromOsPath file)
|
||||
thawPerms (go sr) (thawHook file)
|
||||
where
|
||||
go GroupShared = liftIO $ void $ tryIO $ groupWriteRead file
|
||||
go AllShared = liftIO $ void $ tryIO $ groupWriteRead file
|
||||
go UnShared = liftIO $ allowWrite file
|
||||
go (UmaskShared n) = liftIO $ void $ tryIO $ R.setFileMode file n
|
||||
go (UmaskShared n) = liftIO $ void $ tryIO $
|
||||
R.setFileMode (fromOsPath file) n
|
||||
|
||||
{- Runs an action that thaws a file's permissions. This will probably
|
||||
- fail on a crippled filesystem. But, if file modes are supported on a
|
||||
|
@ -281,9 +283,9 @@ thawPerms a hook = ifM crippledFileSystem
|
|||
- is set, this is not done, since the group must be allowed to delete the
|
||||
- file without being able to thaw the directory.
|
||||
-}
|
||||
freezeContentDir :: RawFilePath -> Annex ()
|
||||
freezeContentDir :: OsPath -> Annex ()
|
||||
freezeContentDir file = do
|
||||
fastDebug "Annex.Perms" ("freezing content directory " ++ fromRawFilePath dir)
|
||||
fastDebug "Annex.Perms" ("freezing content directory " ++ fromOsPath dir)
|
||||
unlessM crippledFileSystem $ withShared go
|
||||
freezeHook dir
|
||||
where
|
||||
|
@ -291,29 +293,29 @@ freezeContentDir file = do
|
|||
go UnShared = liftIO $ preventWrite dir
|
||||
go GroupShared = liftIO $ void $ tryIO $ groupWriteRead dir
|
||||
go AllShared = liftIO $ void $ tryIO $ groupWriteRead dir
|
||||
go (UmaskShared n) = liftIO $ void $ tryIO $ R.setFileMode dir $
|
||||
go (UmaskShared n) = liftIO $ void $ tryIO $ R.setFileMode (fromOsPath dir) $
|
||||
umaskSharedDirectory $
|
||||
-- If n includes group or other write mode, leave them set
|
||||
-- to allow them to delete the file without being able to
|
||||
-- thaw the directory.
|
||||
-- If n includes group or other write mode, leave
|
||||
-- them set to allow them to delete the file without
|
||||
-- being able to thaw the directory.
|
||||
removeModes [ownerWriteMode] n
|
||||
|
||||
thawContentDir :: RawFilePath -> Annex ()
|
||||
thawContentDir :: OsPath -> Annex ()
|
||||
thawContentDir file = do
|
||||
fastDebug "Annex.Perms" ("thawing content directory " ++ fromRawFilePath dir)
|
||||
fastDebug "Annex.Perms" ("thawing content directory " ++ fromOsPath dir)
|
||||
thawPerms (withShared (liftIO . go)) (thawHook dir)
|
||||
where
|
||||
dir = parentDir file
|
||||
go UnShared = allowWrite dir
|
||||
go GroupShared = allowWrite dir
|
||||
go AllShared = allowWrite dir
|
||||
go (UmaskShared n) = R.setFileMode dir n
|
||||
go (UmaskShared n) = R.setFileMode (fromOsPath dir) n
|
||||
|
||||
{- Makes the directory tree to store an annexed file's content,
|
||||
- with appropriate permissions on each level. -}
|
||||
createContentDir :: RawFilePath -> Annex ()
|
||||
createContentDir :: OsPath -> Annex ()
|
||||
createContentDir dest = do
|
||||
unlessM (liftIO $ R.doesPathExist dir) $
|
||||
unlessM (liftIO $ doesDirectoryExist dir) $
|
||||
createAnnexDirectory dir
|
||||
-- might have already existed with restricted perms
|
||||
thawHook dir
|
||||
|
@ -324,7 +326,7 @@ createContentDir dest = do
|
|||
{- Creates the content directory for a file if it doesn't already exist,
|
||||
- or thaws it if it does, then runs an action to modify a file in the
|
||||
- directory, and finally, freezes the content directory. -}
|
||||
modifyContentDir :: RawFilePath -> Annex a -> Annex a
|
||||
modifyContentDir :: OsPath -> Annex a -> Annex a
|
||||
modifyContentDir f a = do
|
||||
createContentDir f -- also thaws it
|
||||
v <- tryNonAsync a
|
||||
|
@ -333,7 +335,7 @@ modifyContentDir f a = do
|
|||
|
||||
{- Like modifyContentDir, but avoids creating the content directory if it
|
||||
- does not already exist. In that case, the action will probably fail. -}
|
||||
modifyContentDirWhenExists :: RawFilePath -> Annex a -> Annex a
|
||||
modifyContentDirWhenExists :: OsPath -> Annex a -> Annex a
|
||||
modifyContentDirWhenExists f a = do
|
||||
thawContentDir f
|
||||
v <- tryNonAsync a
|
||||
|
@ -352,11 +354,11 @@ hasThawHook =
|
|||
<||>
|
||||
(doesAnnexHookExist thawContentAnnexHook)
|
||||
|
||||
freezeHook :: RawFilePath -> Annex ()
|
||||
freezeHook :: OsPath -> Annex ()
|
||||
freezeHook = void . runAnnexPathHook "%path"
|
||||
freezeContentAnnexHook annexFreezeContentCommand
|
||||
|
||||
thawHook :: RawFilePath -> Annex ()
|
||||
thawHook :: OsPath -> Annex ()
|
||||
thawHook = void . runAnnexPathHook "%path"
|
||||
thawContentAnnexHook annexThawContentCommand
|
||||
|
||||
|
|
|
@ -40,12 +40,13 @@ import Control.Concurrent
|
|||
import Control.Concurrent.STM
|
||||
import Control.Concurrent.Async
|
||||
import qualified Data.ByteString as B
|
||||
import qualified Data.ByteString as BS
|
||||
import qualified Data.ByteString.Lazy as L
|
||||
import qualified System.FilePath.ByteString as P
|
||||
import qualified Data.Map as M
|
||||
import qualified Data.Set as S
|
||||
#ifndef mingw32_HOST_OS
|
||||
import qualified Data.ByteString as BS
|
||||
import System.IO.Unsafe
|
||||
#endif
|
||||
|
||||
proxyRemoteSide :: ProtocolVersion -> Bypass -> Remote -> Annex RemoteSide
|
||||
proxyRemoteSide clientmaxversion bypass r
|
||||
|
@ -175,8 +176,8 @@ proxySpecialRemote protoversion r ihdl ohdl owaitv oclosedv mexportdb = go
|
|||
-- independently. Also, this key is not getting added into the
|
||||
-- local annex objects.
|
||||
withproxytmpfile k a = withOtherTmp $ \othertmpdir ->
|
||||
withTmpDirIn (fromRawFilePath othertmpdir) (toOsPath "proxy") $ \tmpdir ->
|
||||
a (toRawFilePath tmpdir P.</> keyFile k)
|
||||
withTmpDirIn othertmpdir (literalOsPath "proxy") $ \tmpdir ->
|
||||
a (tmpdir </> keyFile k)
|
||||
|
||||
proxyput af k = do
|
||||
liftIO $ sendmessage $ PUT_FROM (Offset 0)
|
||||
|
@ -186,14 +187,14 @@ proxySpecialRemote protoversion r ihdl ohdl owaitv oclosedv mexportdb = go
|
|||
-- the client, to avoid bad content
|
||||
-- being stored in the special remote.
|
||||
iv <- startVerifyKeyContentIncrementally Remote.AlwaysVerify k
|
||||
h <- liftIO $ F.openFile (toOsPath tmpfile) WriteMode
|
||||
let nuketmp = liftIO $ removeWhenExistsWith removeFile (fromRawFilePath tmpfile)
|
||||
h <- liftIO $ F.openFile tmpfile WriteMode
|
||||
let nuketmp = liftIO $ removeWhenExistsWith removeFile tmpfile
|
||||
gotall <- liftIO $ receivetofile iv h len
|
||||
liftIO $ hClose h
|
||||
verified <- if gotall
|
||||
then fst <$> finishVerifyKeyContentIncrementally' True iv
|
||||
else pure False
|
||||
let store = tryNonAsync (storeput k af (decodeBS tmpfile)) >>= \case
|
||||
let store = tryNonAsync (storeput k af tmpfile) >>= \case
|
||||
Right () -> liftIO $ sendmessage SUCCESS
|
||||
Left err -> liftIO $ propagateerror err
|
||||
if protoversion > ProtocolVersion 1
|
||||
|
@ -260,9 +261,13 @@ proxySpecialRemote protoversion r ihdl ohdl owaitv oclosedv mexportdb = go
|
|||
storetofile iv h (n - fromIntegral (B.length b)) bs
|
||||
|
||||
proxyget offset af k = withproxytmpfile k $ \tmpfile -> do
|
||||
let retrieve = tryNonAsync $ Remote.retrieveKeyFile r k af
|
||||
(fromRawFilePath tmpfile) nullMeterUpdate vc
|
||||
let retrieve = tryNonAsync $ Remote.retrieveKeyFile
|
||||
r k af tmpfile nullMeterUpdate vc
|
||||
#ifndef mingw32_HOST_OS
|
||||
ordered <- Remote.retrieveKeyFileInOrder r
|
||||
#else
|
||||
_ <- Remote.retrieveKeyFileInOrder r
|
||||
#endif
|
||||
case fromKey keySize k of
|
||||
#ifndef mingw32_HOST_OS
|
||||
Just size | size > 0 && ordered -> do
|
||||
|
@ -292,7 +297,7 @@ proxySpecialRemote protoversion r ihdl ohdl owaitv oclosedv mexportdb = go
|
|||
sendlen offset size
|
||||
waitforfile
|
||||
x <- tryNonAsync $ do
|
||||
h <- openFileBeingWritten f
|
||||
h <- openFileBeingWritten (fromOsPath f)
|
||||
hSeek h AbsoluteSeek offset
|
||||
senddata' h (getcontents size)
|
||||
case x of
|
||||
|
@ -344,7 +349,7 @@ proxySpecialRemote protoversion r ihdl ohdl owaitv oclosedv mexportdb = go
|
|||
senddata (Offset offset) f = do
|
||||
size <- fromIntegral <$> getFileSize f
|
||||
sendlen offset size
|
||||
withBinaryFile (fromRawFilePath f) ReadMode $ \h -> do
|
||||
F.withBinaryFile f ReadMode $ \h -> do
|
||||
hSeek h AbsoluteSeek offset
|
||||
senddata' h L.hGetContents
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ addCommand commonparams command params files = do
|
|||
store =<< flushWhenFull =<<
|
||||
(Git.Queue.addCommand commonparams command params files q =<< gitRepo)
|
||||
|
||||
addFlushAction :: Git.Queue.FlushActionRunner Annex -> [RawFilePath] -> Annex ()
|
||||
addFlushAction :: Git.Queue.FlushActionRunner Annex -> [OsPath] -> Annex ()
|
||||
addFlushAction runner files = do
|
||||
q <- get
|
||||
store =<< flushWhenFull =<<
|
||||
|
|
|
@ -21,20 +21,18 @@ import Utility.Tmp
|
|||
import Utility.Tmp.Dir
|
||||
import Utility.Directory.Create
|
||||
|
||||
import qualified System.FilePath.ByteString as P
|
||||
|
||||
{- replaceFile on a file located inside the gitAnnexDir. -}
|
||||
replaceGitAnnexDirFile :: RawFilePath -> (RawFilePath -> Annex a) -> Annex a
|
||||
replaceGitAnnexDirFile :: OsPath -> (OsPath -> Annex a) -> Annex a
|
||||
replaceGitAnnexDirFile = replaceFile createAnnexDirectory
|
||||
|
||||
{- replaceFile on a file located inside the .git directory. -}
|
||||
replaceGitDirFile :: RawFilePath -> (RawFilePath -> Annex a) -> Annex a
|
||||
replaceGitDirFile :: OsPath -> (OsPath -> Annex a) -> Annex a
|
||||
replaceGitDirFile = replaceFile $ \dir -> do
|
||||
top <- fromRepo localGitDir
|
||||
liftIO $ createDirectoryUnder [top] dir
|
||||
|
||||
{- replaceFile on a worktree file. -}
|
||||
replaceWorkTreeFile :: RawFilePath -> (RawFilePath -> Annex a) -> Annex a
|
||||
replaceWorkTreeFile :: OsPath -> (OsPath -> Annex a) -> Annex a
|
||||
replaceWorkTreeFile = replaceFile createWorkTreeDirectory
|
||||
|
||||
{- Replaces a possibly already existing file with a new version,
|
||||
|
@ -52,20 +50,20 @@ replaceWorkTreeFile = replaceFile createWorkTreeDirectory
|
|||
- The createdirectory action is only run when moving the file into place
|
||||
- fails, and can create any parent directory structure needed.
|
||||
-}
|
||||
replaceFile :: (RawFilePath -> Annex ()) -> RawFilePath -> (RawFilePath -> Annex a) -> Annex a
|
||||
replaceFile :: (OsPath -> Annex ()) -> OsPath -> (OsPath -> Annex a) -> Annex a
|
||||
replaceFile createdirectory file action = replaceFile' createdirectory file (const True) action
|
||||
|
||||
replaceFile' :: (RawFilePath -> Annex ()) -> RawFilePath -> (a -> Bool) -> (RawFilePath -> Annex a) -> Annex a
|
||||
replaceFile' :: (OsPath -> Annex ()) -> OsPath -> (a -> Bool) -> (OsPath -> Annex a) -> Annex a
|
||||
replaceFile' createdirectory file checkres action = withOtherTmp $ \othertmpdir -> do
|
||||
let basetmp = relatedTemplate' (P.takeFileName file)
|
||||
withTmpDirIn (fromRawFilePath othertmpdir) (toOsPath basetmp) $ \tmpdir -> do
|
||||
let tmpfile = toRawFilePath tmpdir P.</> basetmp
|
||||
let basetmp = relatedTemplate (fromOsPath (takeFileName file))
|
||||
withTmpDirIn othertmpdir basetmp $ \tmpdir -> do
|
||||
let tmpfile = tmpdir </> basetmp
|
||||
r <- action tmpfile
|
||||
when (checkres r) $
|
||||
replaceFileFrom tmpfile file createdirectory
|
||||
return r
|
||||
|
||||
replaceFileFrom :: RawFilePath -> RawFilePath -> (RawFilePath -> Annex ()) -> Annex ()
|
||||
replaceFileFrom :: OsPath -> OsPath -> (OsPath -> Annex ()) -> Annex ()
|
||||
replaceFileFrom src dest createdirectory = go `catchIO` fallback
|
||||
where
|
||||
go = liftIO $ moveFile src dest
|
||||
|
|
|
@ -23,8 +23,6 @@ import Utility.PID
|
|||
import Control.Concurrent
|
||||
import Text.Read
|
||||
import Data.Time.Clock.POSIX
|
||||
import qualified Utility.RawFilePath as R
|
||||
import qualified System.FilePath.ByteString as P
|
||||
|
||||
{- Called when a location log change is journalled, so the LiveUpdate
|
||||
- is done. This is called with the journal still locked, so no concurrent
|
||||
|
@ -146,12 +144,11 @@ checkStaleSizeChanges :: RepoSizeHandle -> Annex ()
|
|||
checkStaleSizeChanges h@(RepoSizeHandle (Just _) livev) = do
|
||||
livedir <- calcRepo' gitAnnexRepoSizeLiveDir
|
||||
pid <- liftIO getPID
|
||||
let pidlockfile = show pid
|
||||
let pidlockfile = toOsPath (show pid)
|
||||
now <- liftIO getPOSIXTime
|
||||
liftIO (takeMVar livev) >>= \case
|
||||
Nothing -> do
|
||||
lck <- takeExclusiveLock $
|
||||
livedir P.</> toRawFilePath pidlockfile
|
||||
lck <- takeExclusiveLock $ livedir </> pidlockfile
|
||||
go livedir lck pidlockfile now
|
||||
Just v@(lck, lastcheck)
|
||||
| now >= lastcheck + 60 ->
|
||||
|
@ -161,11 +158,11 @@ checkStaleSizeChanges h@(RepoSizeHandle (Just _) livev) = do
|
|||
where
|
||||
go livedir lck pidlockfile now = do
|
||||
void $ tryNonAsync $ do
|
||||
lockfiles <- liftIO $ filter (not . dirCruft . toRawFilePath)
|
||||
<$> getDirectoryContents (fromRawFilePath livedir)
|
||||
lockfiles <- liftIO $ filter (`notElem` dirCruft)
|
||||
<$> getDirectoryContents livedir
|
||||
stale <- forM lockfiles $ \lockfile ->
|
||||
if (lockfile /= pidlockfile)
|
||||
then case readMaybe lockfile of
|
||||
then case readMaybe (fromOsPath lockfile) of
|
||||
Nothing -> return Nothing
|
||||
Just pid -> checkstale livedir lockfile pid
|
||||
else return Nothing
|
||||
|
@ -176,7 +173,7 @@ checkStaleSizeChanges h@(RepoSizeHandle (Just _) livev) = do
|
|||
liftIO $ putMVar livev (Just (lck, now))
|
||||
|
||||
checkstale livedir lockfile pid =
|
||||
let f = livedir P.</> toRawFilePath lockfile
|
||||
let f = livedir </> lockfile
|
||||
in trySharedLock f >>= \case
|
||||
Nothing -> return Nothing
|
||||
Just lck -> do
|
||||
|
@ -184,6 +181,6 @@ checkStaleSizeChanges h@(RepoSizeHandle (Just _) livev) = do
|
|||
( StaleSizeChanger (SizeChangeProcessId pid)
|
||||
, do
|
||||
dropLock lck
|
||||
removeWhenExistsWith R.removeLink f
|
||||
removeWhenExistsWith removeFile f
|
||||
)
|
||||
checkStaleSizeChanges (RepoSizeHandle Nothing _) = noop
|
||||
|
|
47
Annex/Sim.hs
47
Annex/Sim.hs
|
@ -55,8 +55,6 @@ import qualified Data.ByteString as B
|
|||
import qualified Data.ByteString.Lazy as L
|
||||
import qualified Data.UUID as U
|
||||
import qualified Data.UUID.V5 as U5
|
||||
import qualified Utility.RawFilePath as R
|
||||
import qualified System.FilePath.ByteString as P
|
||||
|
||||
data SimState t = SimState
|
||||
{ simRepos :: M.Map RepoName UUID
|
||||
|
@ -342,7 +340,7 @@ applySimCommand c@(CommandVisit repo cmdparams) st _ =
|
|||
_ -> return ("sh", ["-c", unwords cmdparams])
|
||||
exitcode <- liftIO $
|
||||
safeSystem' cmd (map Param params)
|
||||
(\p -> p { cwd = Just dir })
|
||||
(\p -> p { cwd = Just (fromOsPath dir) })
|
||||
when (null cmdparams) $
|
||||
showLongNote "Finished visit to simulated repository."
|
||||
if null cmdparams
|
||||
|
@ -431,7 +429,7 @@ applySimCommand' (CommandAddTree repo expr) st _ =
|
|||
<$> inRepo (toTopFilePath f)
|
||||
ifM (checkMatcher matcher (Just k) afile NoLiveUpdate mempty (pure False) (pure False))
|
||||
( let st'' = setPresentKey True (u, repo) k u $ st'
|
||||
{ simFiles = M.insert f k (simFiles st')
|
||||
{ simFiles = M.insert (fromOsPath f) k (simFiles st')
|
||||
}
|
||||
in go matcher u st'' fs
|
||||
, go matcher u st' fs
|
||||
|
@ -758,7 +756,7 @@ overFilesRemote r u remote remotepred localpred checkwant handlewanted st =
|
|||
Right (Left (st, map (go remoteu) $ M.toList $ simFiles st))
|
||||
where
|
||||
go remoteu (f, k) st' =
|
||||
let af = AssociatedFile $ Just f
|
||||
let af = AssociatedFile $ Just $ toOsPath f
|
||||
in liftIO $ runSimRepo u st' $ \st'' rst ->
|
||||
case M.lookup remoteu (simRepoState st'') of
|
||||
Nothing -> return (st'', False)
|
||||
|
@ -814,7 +812,7 @@ simulateDropUnwanted st u dropfromname dropfrom =
|
|||
Right $ Left (st, map go $ M.toList $ simFiles st)
|
||||
where
|
||||
go (f, k) st' = liftIO $ runSimRepo u st' $ \st'' rst ->
|
||||
let af = AssociatedFile $ Just f
|
||||
let af = AssociatedFile $ Just $ toOsPath f
|
||||
in if present dropfrom rst k
|
||||
then updateLiveSizeChanges rst $
|
||||
ifM (wantDrop NoLiveUpdate False (Just dropfrom) (Just k) af Nothing)
|
||||
|
@ -1104,7 +1102,7 @@ initNewSimRepos = \st -> go st (M.toList $ simRepoState st)
|
|||
go st ((u, rst):rest) =
|
||||
case simRepo rst of
|
||||
Nothing -> do
|
||||
let d = simRepoDirectory st u
|
||||
let d = fromOsPath $ simRepoDirectory st u
|
||||
sr <- initSimRepo (simRepoName rst) u d st
|
||||
let rst' = rst { simRepo = Just sr }
|
||||
let st' = st
|
||||
|
@ -1114,8 +1112,8 @@ initNewSimRepos = \st -> go st (M.toList $ simRepoState st)
|
|||
go st' rest
|
||||
_ -> go st rest
|
||||
|
||||
simRepoDirectory :: SimState t -> UUID -> FilePath
|
||||
simRepoDirectory st u = simRootDirectory st </> fromUUID u
|
||||
simRepoDirectory :: SimState t -> UUID -> OsPath
|
||||
simRepoDirectory st u = toOsPath (simRootDirectory st) </> fromUUID u
|
||||
|
||||
initSimRepo :: RepoName -> UUID -> FilePath -> SimState SimRepo -> IO SimRepo
|
||||
initSimRepo simreponame u dest st = do
|
||||
|
@ -1126,7 +1124,7 @@ initSimRepo simreponame u dest st = do
|
|||
]
|
||||
unless inited $
|
||||
giveup "git init failed"
|
||||
simrepo <- Git.Construct.fromPath (toRawFilePath dest)
|
||||
simrepo <- Git.Construct.fromPath (toOsPath dest)
|
||||
ast <- Annex.new simrepo
|
||||
((), ast') <- Annex.run ast $ doQuietAction $ do
|
||||
storeUUID u
|
||||
|
@ -1301,15 +1299,14 @@ updateSimRepoState newst sr = do
|
|||
setdesc r u = describeUUID u $ toUUIDDesc $
|
||||
simulatedRepositoryDescription r
|
||||
stageannexedfile f k = do
|
||||
let f' = annexedfilepath f
|
||||
let f' = annexedfilepath (toOsPath f)
|
||||
l <- calcRepo $ gitAnnexLink f' k
|
||||
liftIO $ createDirectoryIfMissing True $
|
||||
takeDirectory $ fromRawFilePath f'
|
||||
addAnnexLink l f'
|
||||
unstageannexedfile f = do
|
||||
liftIO $ removeWhenExistsWith R.removeLink $
|
||||
annexedfilepath f
|
||||
annexedfilepath f = repoPath (simRepoGitRepo sr) P.</> f
|
||||
liftIO $ createDirectoryIfMissing True $ takeDirectory f'
|
||||
addAnnexLink (fromOsPath l) f'
|
||||
unstageannexedfile f =
|
||||
liftIO $ removeWhenExistsWith removeFile $
|
||||
annexedfilepath (toOsPath f)
|
||||
annexedfilepath f = repoPath (simRepoGitRepo sr) </> f
|
||||
getlocations = maybe mempty simLocations
|
||||
. M.lookup (simRepoUUID sr)
|
||||
. simRepoState
|
||||
|
@ -1359,19 +1356,21 @@ suspendSim st = do
|
|||
let st'' = st'
|
||||
{ simRepoState = M.map freeze (simRepoState st')
|
||||
}
|
||||
writeFile (simRootDirectory st'' </> "state") (show st'')
|
||||
let statefile = fromOsPath $
|
||||
toOsPath (simRootDirectory st'') </> literalOsPath "state"
|
||||
writeFile statefile (show st'')
|
||||
where
|
||||
freeze :: SimRepoState SimRepo -> SimRepoState ()
|
||||
freeze rst = rst { simRepo = Nothing }
|
||||
|
||||
restoreSim :: RawFilePath -> IO (Either String (SimState SimRepo))
|
||||
restoreSim :: OsPath -> IO (Either String (SimState SimRepo))
|
||||
restoreSim rootdir =
|
||||
tryIO (readFile (fromRawFilePath rootdir </> "state")) >>= \case
|
||||
tryIO (readFile statefile) >>= \case
|
||||
Left err -> return (Left (show err))
|
||||
Right c -> case readMaybe c :: Maybe (SimState ()) of
|
||||
Nothing -> return (Left "unable to parse sim state file")
|
||||
Just st -> do
|
||||
let st' = st { simRootDirectory = fromRawFilePath rootdir }
|
||||
let st' = st { simRootDirectory = fromOsPath rootdir }
|
||||
repostate <- M.fromList
|
||||
<$> mapM (thaw st') (M.toList (simRepoState st))
|
||||
let st'' = st'
|
||||
|
@ -1380,12 +1379,12 @@ restoreSim rootdir =
|
|||
}
|
||||
return (Right st'')
|
||||
where
|
||||
statefile = fromOsPath $ rootdir </> literalOsPath "state"
|
||||
thaw st (u, rst) = tryNonAsync (thaw' st u) >>= return . \case
|
||||
Left _ -> (u, rst { simRepo = Nothing })
|
||||
Right r -> (u, rst { simRepo = Just r })
|
||||
thaw' st u = do
|
||||
simrepo <- Git.Construct.fromPath $ toRawFilePath $
|
||||
simRepoDirectory st u
|
||||
simrepo <- Git.Construct.fromPath $ simRepoDirectory st u
|
||||
ast <- Annex.new simrepo
|
||||
return $ SimRepo
|
||||
{ simRepoGitRepo = simrepo
|
||||
|
|
63
Annex/Ssh.hs
63
Annex/Ssh.hs
|
@ -40,14 +40,14 @@ import Types.Concurrency
|
|||
import Git.Env
|
||||
import Git.Ssh
|
||||
import qualified Utility.RawFilePath as R
|
||||
import qualified Utility.OsString as OS
|
||||
import Annex.Perms
|
||||
#ifndef mingw32_HOST_OS
|
||||
import Annex.LockPool
|
||||
#endif
|
||||
|
||||
import Control.Concurrent.STM
|
||||
import qualified Data.ByteString as S
|
||||
import qualified System.FilePath.ByteString as P
|
||||
import qualified Data.ByteString.Short as SBS
|
||||
|
||||
{- Some ssh commands are fed stdin on a pipe and so should be allowed to
|
||||
- consume it. But ssh commands that are not piped stdin should generally
|
||||
|
@ -101,15 +101,15 @@ consumeStdinParams NoConsumeStdin = [Param "-n"]
|
|||
|
||||
{- Returns a filename to use for a ssh connection caching socket, and
|
||||
- parameters to enable ssh connection caching. -}
|
||||
sshCachingInfo :: (SshHost, Maybe Integer) -> Annex (Maybe RawFilePath, [CommandParam])
|
||||
sshCachingInfo :: (SshHost, Maybe Integer) -> Annex (Maybe OsPath, [CommandParam])
|
||||
sshCachingInfo (host, port) = go =<< sshCacheDir'
|
||||
where
|
||||
go (Right dir) =
|
||||
liftIO (bestSocketPath $ dir P.</> hostport2socket host port) >>= return . \case
|
||||
liftIO (bestSocketPath $ dir </> hostport2socket host port) >>= return . \case
|
||||
Nothing -> (Nothing, [])
|
||||
Just socketfile ->
|
||||
(Just socketfile
|
||||
, sshConnectionCachingParams (fromRawFilePath socketfile)
|
||||
, sshConnectionCachingParams (fromOsPath socketfile)
|
||||
)
|
||||
-- No connection caching with concurrency is not a good
|
||||
-- combination, so warn the user.
|
||||
|
@ -137,10 +137,10 @@ sshCachingInfo (host, port) = go =<< sshCacheDir'
|
|||
- file.
|
||||
-
|
||||
- If no path can be constructed that is a valid socket, returns Nothing. -}
|
||||
bestSocketPath :: RawFilePath -> IO (Maybe RawFilePath)
|
||||
bestSocketPath :: OsPath -> IO (Maybe OsPath)
|
||||
bestSocketPath abssocketfile = do
|
||||
relsocketfile <- liftIO $ relPathCwdToFile abssocketfile
|
||||
let socketfile = if S.length abssocketfile <= S.length relsocketfile
|
||||
let socketfile = if OS.length abssocketfile <= OS.length relsocketfile
|
||||
then abssocketfile
|
||||
else relsocketfile
|
||||
return $ if valid_unix_socket_path socketfile sshgarbagelen
|
||||
|
@ -167,10 +167,10 @@ sshSocketDirEnv = "GIT_ANNEX_SSH_SOCKET_DIR"
|
|||
-
|
||||
- The directory will be created if it does not exist.
|
||||
-}
|
||||
sshCacheDir :: Annex (Maybe RawFilePath)
|
||||
sshCacheDir :: Annex (Maybe OsPath)
|
||||
sshCacheDir = eitherToMaybe <$> sshCacheDir'
|
||||
|
||||
sshCacheDir' :: Annex (Either String RawFilePath)
|
||||
sshCacheDir' :: Annex (Either String OsPath)
|
||||
sshCacheDir' =
|
||||
ifM (fromMaybe BuildInfo.sshconnectioncaching . annexSshCaching <$> Annex.getGitConfig)
|
||||
( ifM crippledFileSystem
|
||||
|
@ -191,9 +191,9 @@ sshCacheDir' =
|
|||
gettmpdir = liftIO $ getEnv sshSocketDirEnv
|
||||
|
||||
usetmpdir tmpdir = do
|
||||
let socktmp = tmpdir </> "ssh"
|
||||
let socktmp = toOsPath tmpdir </> literalOsPath "ssh"
|
||||
createDirectoryIfMissing True socktmp
|
||||
return (toRawFilePath socktmp)
|
||||
return socktmp
|
||||
|
||||
crippledfswarning = unwords
|
||||
[ "This repository is on a crippled filesystem, so unix named"
|
||||
|
@ -216,7 +216,7 @@ portParams (Just port) = [Param "-p", Param $ show port]
|
|||
- Locks the socket lock file to prevent other git-annex processes from
|
||||
- stopping the ssh multiplexer on this socket.
|
||||
-}
|
||||
prepSocket :: RawFilePath -> SshHost -> [CommandParam] -> Annex ()
|
||||
prepSocket :: OsPath -> SshHost -> [CommandParam] -> Annex ()
|
||||
prepSocket socketfile sshhost sshparams = do
|
||||
-- There could be stale ssh connections hanging around
|
||||
-- from a previous git-annex run that was interrupted.
|
||||
|
@ -288,11 +288,11 @@ prepSocket socketfile sshhost sshparams = do
|
|||
- and this check makes such files be skipped since the corresponding lock
|
||||
- file won't exist.
|
||||
-}
|
||||
enumSocketFiles :: Annex [RawFilePath]
|
||||
enumSocketFiles :: Annex [OsPath]
|
||||
enumSocketFiles = liftIO . go =<< sshCacheDir
|
||||
where
|
||||
go Nothing = return []
|
||||
go (Just dir) = filterM (R.doesPathExist . socket2lock)
|
||||
go (Just dir) = filterM (R.doesPathExist . fromOsPath . socket2lock)
|
||||
=<< filter (not . isLock)
|
||||
<$> catchDefaultIO [] (dirContents dir)
|
||||
|
||||
|
@ -326,45 +326,45 @@ sshCleanup = mapM_ cleanup =<< enumSocketFiles
|
|||
forceSshCleanup :: Annex ()
|
||||
forceSshCleanup = mapM_ forceStopSsh =<< enumSocketFiles
|
||||
|
||||
forceStopSsh :: RawFilePath -> Annex ()
|
||||
forceStopSsh :: OsPath -> Annex ()
|
||||
forceStopSsh socketfile = withNullHandle $ \nullh -> do
|
||||
let (dir, base) = splitFileName (fromRawFilePath socketfile)
|
||||
let (dir, base) = splitFileName socketfile
|
||||
let p = (proc "ssh" $ toCommand $
|
||||
[ Param "-O", Param "stop" ] ++
|
||||
sshConnectionCachingParams base ++
|
||||
sshConnectionCachingParams (fromOsPath base) ++
|
||||
[Param "localhost"])
|
||||
{ cwd = Just dir
|
||||
{ cwd = Just (fromOsPath dir)
|
||||
-- "ssh -O stop" is noisy on stderr even with -q
|
||||
, std_out = UseHandle nullh
|
||||
, std_err = UseHandle nullh
|
||||
}
|
||||
void $ liftIO $ catchMaybeIO $ withCreateProcess p $ \_ _ _ pid ->
|
||||
forceSuccessProcess p pid
|
||||
liftIO $ removeWhenExistsWith R.removeLink socketfile
|
||||
liftIO $ removeWhenExistsWith removeFile socketfile
|
||||
|
||||
{- This needs to be as short as possible, due to limitations on the length
|
||||
- of the path to a socket file. At the same time, it needs to be unique
|
||||
- for each host.
|
||||
-}
|
||||
hostport2socket :: SshHost -> Maybe Integer -> RawFilePath
|
||||
hostport2socket :: SshHost -> Maybe Integer -> OsPath
|
||||
hostport2socket host Nothing = hostport2socket' $ fromSshHost host
|
||||
hostport2socket host (Just port) = hostport2socket' $
|
||||
fromSshHost host ++ "!" ++ show port
|
||||
hostport2socket' :: String -> RawFilePath
|
||||
hostport2socket' :: String -> OsPath
|
||||
hostport2socket' s
|
||||
| length s > lengthofmd5s = toRawFilePath $ show $ md5 $ encodeBL s
|
||||
| otherwise = toRawFilePath s
|
||||
| length s > lengthofmd5s = toOsPath $ show $ md5 $ encodeBL s
|
||||
| otherwise = toOsPath s
|
||||
where
|
||||
lengthofmd5s = 32
|
||||
|
||||
socket2lock :: RawFilePath -> RawFilePath
|
||||
socket2lock :: OsPath -> OsPath
|
||||
socket2lock socket = socket <> lockExt
|
||||
|
||||
isLock :: RawFilePath -> Bool
|
||||
isLock f = lockExt `S.isSuffixOf` f
|
||||
isLock :: OsPath -> Bool
|
||||
isLock f = lockExt `OS.isSuffixOf` f
|
||||
|
||||
lockExt :: S.ByteString
|
||||
lockExt = ".lock"
|
||||
lockExt :: OsPath
|
||||
lockExt = literalOsPath ".lock"
|
||||
|
||||
{- This is the size of the sun_path component of sockaddr_un, which
|
||||
- is the limit to the total length of the filename of a unix socket.
|
||||
|
@ -376,8 +376,9 @@ sizeof_sockaddr_un_sun_path = 100
|
|||
|
||||
{- Note that this looks at the true length of the path in bytes, as it will
|
||||
- appear on disk. -}
|
||||
valid_unix_socket_path :: RawFilePath -> Int -> Bool
|
||||
valid_unix_socket_path f n = S.length f + n < sizeof_sockaddr_un_sun_path
|
||||
valid_unix_socket_path :: OsPath -> Int -> Bool
|
||||
valid_unix_socket_path f n =
|
||||
SBS.length (fromOsPath f) + n < sizeof_sockaddr_un_sun_path
|
||||
|
||||
{- Parses the SSH port, and returns the other OpenSSH options. If
|
||||
- several ports are found, the last one takes precedence. -}
|
||||
|
@ -463,7 +464,7 @@ sshOptionsTo remote gc localr
|
|||
liftIO $ do
|
||||
localr' <- addGitEnv localr sshOptionsEnv
|
||||
(toSshOptionsEnv sshopts)
|
||||
addGitEnv localr' gitSshEnv command
|
||||
addGitEnv localr' gitSshEnv (fromOsPath command)
|
||||
|
||||
runSshOptions :: [String] -> String -> IO ()
|
||||
runSshOptions args s = do
|
||||
|
|
13
Annex/Tmp.hs
13
Annex/Tmp.hs
|
@ -23,7 +23,7 @@ import System.PosixCompat.Files (modificationTime)
|
|||
-- directory that is passed to it. However, once the action is done,
|
||||
-- any files left in that directory may be cleaned up by another process at
|
||||
-- any time.
|
||||
withOtherTmp :: (RawFilePath -> Annex a) -> Annex a
|
||||
withOtherTmp :: (OsPath -> Annex a) -> Annex a
|
||||
withOtherTmp a = do
|
||||
Annex.addCleanupAction OtherTmpCleanup cleanupOtherTmp
|
||||
tmpdir <- fromRepo gitAnnexTmpOtherDir
|
||||
|
@ -40,14 +40,14 @@ withOtherTmp a = do
|
|||
-- Unlike withOtherTmp, this does not rely on locking working.
|
||||
-- Its main use is in situations where the state of lockfile is not
|
||||
-- determined yet, eg during initialization.
|
||||
withEventuallyCleanedOtherTmp :: (RawFilePath -> Annex a) -> Annex a
|
||||
withEventuallyCleanedOtherTmp :: (OsPath -> Annex a) -> Annex a
|
||||
withEventuallyCleanedOtherTmp = bracket setup cleanup
|
||||
where
|
||||
setup = do
|
||||
tmpdir <- fromRepo gitAnnexTmpOtherDirOld
|
||||
void $ createAnnexDirectory tmpdir
|
||||
return tmpdir
|
||||
cleanup = liftIO . void . tryIO . removeDirectory . fromRawFilePath
|
||||
cleanup = liftIO . void . tryIO . removeDirectory
|
||||
|
||||
-- | Cleans up any tmp files that were left by a previous
|
||||
-- git-annex process that got interrupted or failed to clean up after
|
||||
|
@ -58,14 +58,13 @@ cleanupOtherTmp :: Annex ()
|
|||
cleanupOtherTmp = do
|
||||
tmplck <- fromRepo gitAnnexTmpOtherLock
|
||||
void $ tryIO $ tryExclusiveLock tmplck $ do
|
||||
tmpdir <- fromRawFilePath <$> fromRepo gitAnnexTmpOtherDir
|
||||
tmpdir <- fromRepo gitAnnexTmpOtherDir
|
||||
void $ liftIO $ tryIO $ removeDirectoryRecursive tmpdir
|
||||
oldtmp <- fromRepo gitAnnexTmpOtherDirOld
|
||||
liftIO $ mapM_ cleanold
|
||||
liftIO $ mapM_ (cleanold . fromOsPath)
|
||||
=<< emptyWhenDoesNotExist (dirContentsRecursive oldtmp)
|
||||
-- remove when empty
|
||||
liftIO $ void $ tryIO $
|
||||
removeDirectory (fromRawFilePath oldtmp)
|
||||
liftIO $ void $ tryIO $ removeDirectory oldtmp
|
||||
where
|
||||
cleanold f = do
|
||||
now <- liftIO getPOSIXTime
|
||||
|
|
|
@ -44,13 +44,11 @@ import Annex.TransferrerPool
|
|||
import Annex.StallDetection
|
||||
import Backend (isCryptographicallySecureKey)
|
||||
import Types.StallDetection
|
||||
import qualified Utility.RawFilePath as R
|
||||
|
||||
import Control.Concurrent
|
||||
import Control.Concurrent.Async
|
||||
import Control.Concurrent.STM hiding (retry)
|
||||
import qualified Data.Map.Strict as M
|
||||
import qualified System.FilePath.ByteString as P
|
||||
import Data.Ord
|
||||
|
||||
-- Upload, supporting canceling detected stalls.
|
||||
|
@ -83,7 +81,7 @@ download r key f d witness =
|
|||
go sd = getViaTmp (Remote.retrievalSecurityPolicy r) vc key f Nothing $ \dest ->
|
||||
download' (Remote.uuid r) key f sd d (go' dest) witness
|
||||
go' dest p = verifiedAction $
|
||||
Remote.retrieveKeyFile r key f (fromRawFilePath dest) p vc
|
||||
Remote.retrieveKeyFile r key f dest p vc
|
||||
vc = Remote.RemoteVerify r
|
||||
|
||||
-- Download, not supporting canceling detected stalls.
|
||||
|
@ -146,10 +144,10 @@ runTransfer' ignorelock t eventualbackend afile stalldetection retrydecider tran
|
|||
else recordFailedTransfer t info
|
||||
return v
|
||||
|
||||
prep :: RawFilePath -> Maybe RawFilePath -> Annex () -> ModeSetter -> Annex (Maybe (LockHandle, Maybe LockHandle), Bool)
|
||||
prep :: OsPath -> Maybe OsPath -> Annex () -> ModeSetter -> Annex (Maybe (LockHandle, Maybe LockHandle), Bool)
|
||||
#ifndef mingw32_HOST_OS
|
||||
prep lckfile moldlckfile createtfile mode = catchPermissionDenied (const prepfailed) $ do
|
||||
createAnnexDirectory $ P.takeDirectory lckfile
|
||||
createAnnexDirectory $ takeDirectory lckfile
|
||||
tryLockExclusive (Just mode) lckfile >>= \case
|
||||
Nothing -> return (Nothing, True)
|
||||
-- Since the lock file is removed in cleanup,
|
||||
|
@ -163,7 +161,7 @@ runTransfer' ignorelock t eventualbackend afile stalldetection retrydecider tran
|
|||
createtfile
|
||||
return (Just (lockhandle, Nothing), False)
|
||||
Just oldlckfile -> do
|
||||
createAnnexDirectory $ P.takeDirectory oldlckfile
|
||||
createAnnexDirectory $ takeDirectory oldlckfile
|
||||
tryLockExclusive (Just mode) oldlckfile >>= \case
|
||||
Nothing -> do
|
||||
liftIO $ dropLock lockhandle
|
||||
|
@ -183,14 +181,14 @@ runTransfer' ignorelock t eventualbackend afile stalldetection retrydecider tran
|
|||
)
|
||||
#else
|
||||
prep lckfile moldlckfile createtfile _mode = catchPermissionDenied (const prepfailed) $ do
|
||||
createAnnexDirectory $ P.takeDirectory lckfile
|
||||
createAnnexDirectory $ takeDirectory lckfile
|
||||
catchMaybeIO (liftIO $ lockExclusive lckfile) >>= \case
|
||||
Just (Just lockhandle) -> case moldlckfile of
|
||||
Nothing -> do
|
||||
createtfile
|
||||
return (Just (lockhandle, Nothing), False)
|
||||
Just oldlckfile -> do
|
||||
createAnnexDirectory $ P.takeDirectory oldlckfile
|
||||
createAnnexDirectory $ takeDirectory oldlckfile
|
||||
catchMaybeIO (liftIO $ lockExclusive oldlckfile) >>= \case
|
||||
Just (Just oldlockhandle) -> do
|
||||
createtfile
|
||||
|
@ -204,10 +202,10 @@ runTransfer' ignorelock t eventualbackend afile stalldetection retrydecider tran
|
|||
|
||||
cleanup _ _ _ Nothing = noop
|
||||
cleanup tfile lckfile moldlckfile (Just (lockhandle, moldlockhandle)) = do
|
||||
void $ tryIO $ R.removeLink tfile
|
||||
void $ tryIO $ removeFile tfile
|
||||
#ifndef mingw32_HOST_OS
|
||||
void $ tryIO $ R.removeLink lckfile
|
||||
maybe noop (void . tryIO . R.removeLink) moldlckfile
|
||||
void $ tryIO $ removeFile lckfile
|
||||
maybe noop (void . tryIO . removeFile) moldlckfile
|
||||
maybe noop dropLock moldlockhandle
|
||||
dropLock lockhandle
|
||||
#else
|
||||
|
@ -219,7 +217,7 @@ runTransfer' ignorelock t eventualbackend afile stalldetection retrydecider tran
|
|||
maybe noop dropLock moldlockhandle
|
||||
dropLock lockhandle
|
||||
void $ tryIO $ R.removeLink lckfile
|
||||
maybe noop (void . tryIO . R.removeLink) moldlckfile
|
||||
maybe noop (void . tryIO . removeFile) moldlckfile
|
||||
#endif
|
||||
|
||||
retry numretries oldinfo metervar run =
|
||||
|
|
|
@ -43,7 +43,7 @@ data RunTransferrer = RunTransferrer String [CommandParam] BatchCommandMaker
|
|||
|
||||
mkRunTransferrer :: BatchCommandMaker -> Annex RunTransferrer
|
||||
mkRunTransferrer batchmaker = RunTransferrer
|
||||
<$> liftIO programPath
|
||||
<$> liftIO (fromOsPath <$> programPath)
|
||||
<*> gitAnnexChildProcessParams "transferrer" []
|
||||
<*> pure batchmaker
|
||||
|
||||
|
|
|
@ -174,13 +174,13 @@ checkBoth url expected_size uo =
|
|||
Right r -> return r
|
||||
Left err -> warning (UnquotedString err) >> return False
|
||||
|
||||
download :: MeterUpdate -> Maybe IncrementalVerifier -> U.URLString -> FilePath -> U.UrlOptions -> Annex Bool
|
||||
download :: MeterUpdate -> Maybe IncrementalVerifier -> U.URLString -> OsPath -> U.UrlOptions -> Annex Bool
|
||||
download meterupdate iv url file uo =
|
||||
liftIO (U.download meterupdate iv url file uo) >>= \case
|
||||
Right () -> return True
|
||||
Left err -> warning (UnquotedString err) >> return False
|
||||
|
||||
download' :: MeterUpdate -> Maybe IncrementalVerifier -> U.URLString -> FilePath -> U.UrlOptions -> Annex (Either String ())
|
||||
download' :: MeterUpdate -> Maybe IncrementalVerifier -> U.URLString -> OsPath -> U.UrlOptions -> Annex (Either String ())
|
||||
download' meterupdate iv url file uo =
|
||||
liftIO (U.download meterupdate iv url file uo)
|
||||
|
||||
|
|
|
@ -5,21 +5,24 @@
|
|||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
|
||||
module Annex.VariantFile where
|
||||
|
||||
import Annex.Common
|
||||
import Utility.Hash
|
||||
import qualified Utility.OsString as OS
|
||||
|
||||
import qualified Data.ByteString as S
|
||||
|
||||
variantMarker :: String
|
||||
variantMarker = ".variant-"
|
||||
variantMarker :: OsPath
|
||||
variantMarker = literalOsPath ".variant-"
|
||||
|
||||
mkVariant :: FilePath -> String -> FilePath
|
||||
mkVariant :: OsPath -> OsPath -> OsPath
|
||||
mkVariant file variant = takeDirectory file
|
||||
</> dropExtension (takeFileName file)
|
||||
++ variantMarker ++ variant
|
||||
++ takeExtension file
|
||||
<> variantMarker <> variant
|
||||
<> takeExtension file
|
||||
|
||||
{- The filename to use when resolving a conflicted merge of a file,
|
||||
- that points to a key.
|
||||
|
@ -34,12 +37,12 @@ mkVariant file variant = takeDirectory file
|
|||
- conflicted merge resolution code. That case is detected, and the full
|
||||
- key is used in the filename.
|
||||
-}
|
||||
variantFile :: FilePath -> Key -> FilePath
|
||||
variantFile :: OsPath -> Key -> OsPath
|
||||
variantFile file key
|
||||
| doubleconflict = mkVariant file (fromRawFilePath (keyFile key))
|
||||
| otherwise = mkVariant file (shortHash $ serializeKey' key)
|
||||
| doubleconflict = mkVariant file (keyFile key)
|
||||
| otherwise = mkVariant file (toOsPath (shortHash $ serializeKey' key))
|
||||
where
|
||||
doubleconflict = variantMarker `isInfixOf` file
|
||||
doubleconflict = variantMarker `OS.isInfixOf` file
|
||||
|
||||
shortHash :: S.ByteString -> String
|
||||
shortHash = take 4 . show . md5s
|
||||
|
|
|
@ -39,13 +39,13 @@ import Utility.Metered
|
|||
import Annex.WorkerPool
|
||||
import Types.WorkerPool
|
||||
import Types.Key
|
||||
import qualified Utility.FileIO as F
|
||||
|
||||
import Control.Concurrent.STM
|
||||
import Control.Concurrent.Async
|
||||
import qualified Data.ByteString as S
|
||||
#if WITH_INOTIFY
|
||||
import qualified System.INotify as INotify
|
||||
import qualified System.FilePath.ByteString as P
|
||||
#endif
|
||||
|
||||
shouldVerify :: VerifyConfig -> Annex Bool
|
||||
|
@ -73,7 +73,7 @@ shouldVerify (RemoteVerify r) =
|
|||
- If the RetrievalSecurityPolicy requires verification and the key's
|
||||
- backend doesn't support it, the verification will fail.
|
||||
-}
|
||||
verifyKeyContentPostRetrieval :: RetrievalSecurityPolicy -> VerifyConfig -> Verification -> Key -> RawFilePath -> Annex Bool
|
||||
verifyKeyContentPostRetrieval :: RetrievalSecurityPolicy -> VerifyConfig -> Verification -> Key -> OsPath -> Annex Bool
|
||||
verifyKeyContentPostRetrieval rsp v verification k f = case (rsp, verification) of
|
||||
(_, Verified) -> return True
|
||||
(RetrievalVerifiableKeysSecure, _) -> ifM (isVerifiable k)
|
||||
|
@ -105,11 +105,11 @@ verifyKeyContentPostRetrieval rsp v verification k f = case (rsp, verification)
|
|||
-- When possible, does an incremental verification, because that can be
|
||||
-- faster. Eg, the VURL backend can need to try multiple checksums and only
|
||||
-- with an incremental verification does it avoid reading files twice.
|
||||
verifyKeyContent :: Key -> RawFilePath -> Annex Bool
|
||||
verifyKeyContent :: Key -> OsPath -> Annex Bool
|
||||
verifyKeyContent k f = verifyKeySize k f <&&> verifyKeyContent' k f
|
||||
|
||||
-- Does not verify size.
|
||||
verifyKeyContent' :: Key -> RawFilePath -> Annex Bool
|
||||
verifyKeyContent' :: Key -> OsPath -> Annex Bool
|
||||
verifyKeyContent' k f =
|
||||
Backend.maybeLookupBackendVariety (fromKey keyVariety k) >>= \case
|
||||
Nothing -> return True
|
||||
|
@ -119,7 +119,7 @@ verifyKeyContent' k f =
|
|||
iv <- mkiv k
|
||||
showAction (UnquotedString (descIncrementalVerifier iv))
|
||||
res <- liftIO $ catchDefaultIO Nothing $
|
||||
withBinaryFile (fromRawFilePath f) ReadMode $ \h -> do
|
||||
F.withBinaryFile f ReadMode $ \h -> do
|
||||
feedIncrementalVerifier h iv
|
||||
finalizeIncrementalVerifier iv
|
||||
case res of
|
||||
|
@ -129,7 +129,7 @@ verifyKeyContent' k f =
|
|||
Just verifier -> verifier k f
|
||||
(Nothing, Just verifier) -> verifier k f
|
||||
|
||||
resumeVerifyKeyContent :: Key -> RawFilePath -> IncrementalVerifier -> Annex Bool
|
||||
resumeVerifyKeyContent :: Key -> OsPath -> IncrementalVerifier -> Annex Bool
|
||||
resumeVerifyKeyContent k f iv = liftIO (positionIncrementalVerifier iv) >>= \case
|
||||
Nothing -> fallback
|
||||
Just endpos -> do
|
||||
|
@ -151,7 +151,7 @@ resumeVerifyKeyContent k f iv = liftIO (positionIncrementalVerifier iv) >>= \cas
|
|||
| otherwise = do
|
||||
showAction (UnquotedString (descIncrementalVerifier iv))
|
||||
liftIO $ catchDefaultIO (Just False) $
|
||||
withBinaryFile (fromRawFilePath f) ReadMode $ \h -> do
|
||||
F.withBinaryFile f ReadMode $ \h -> do
|
||||
hSeek h AbsoluteSeek endpos
|
||||
feedIncrementalVerifier h iv
|
||||
finalizeIncrementalVerifier iv
|
||||
|
@ -167,7 +167,7 @@ feedIncrementalVerifier h iv = do
|
|||
where
|
||||
chunk = 65536
|
||||
|
||||
verifyKeySize :: Key -> RawFilePath -> Annex Bool
|
||||
verifyKeySize :: Key -> OsPath -> Annex Bool
|
||||
verifyKeySize k f = case fromKey keySize k of
|
||||
Just size -> do
|
||||
size' <- liftIO $ catchDefaultIO 0 $ getFileSize f
|
||||
|
@ -295,7 +295,7 @@ resumeVerifyFromOffset o incrementalverifier meterupdate h
|
|||
-- and if the disk is slow, the reader may never catch up to the writer,
|
||||
-- and the disk cache may never speed up reads. So this should only be
|
||||
-- used when there's not a better way to incrementally verify.
|
||||
tailVerify :: Maybe IncrementalVerifier -> RawFilePath -> Annex a -> Annex a
|
||||
tailVerify :: Maybe IncrementalVerifier -> OsPath -> Annex a -> Annex a
|
||||
tailVerify (Just iv) f writer = do
|
||||
finished <- liftIO newEmptyTMVarIO
|
||||
t <- liftIO $ async $ tailVerify' iv f finished
|
||||
|
@ -305,7 +305,7 @@ tailVerify (Just iv) f writer = do
|
|||
writer `finally` finishtail
|
||||
tailVerify Nothing _ writer = writer
|
||||
|
||||
tailVerify' :: IncrementalVerifier -> RawFilePath -> TMVar () -> IO ()
|
||||
tailVerify' :: IncrementalVerifier -> OsPath -> TMVar () -> IO ()
|
||||
#if WITH_INOTIFY
|
||||
tailVerify' iv f finished =
|
||||
tryNonAsync go >>= \case
|
||||
|
@ -318,15 +318,16 @@ tailVerify' iv f finished =
|
|||
-- of resuming, and waiting for modification deals with such
|
||||
-- situations.
|
||||
inotifydirchange i cont =
|
||||
INotify.addWatch i [INotify.Modify] dir $ \case
|
||||
INotify.addWatch i [INotify.Modify] (fromOsPath dir) $ \case
|
||||
-- Ignore changes to other files in the directory.
|
||||
INotify.Modified { INotify.maybeFilePath = fn }
|
||||
| fn == Just basef -> cont
|
||||
| fn == Just basef' -> cont
|
||||
_ -> noop
|
||||
where
|
||||
(dir, basef) = P.splitFileName f
|
||||
(dir, basef) = splitFileName f
|
||||
basef' = fromOsPath basef
|
||||
|
||||
inotifyfilechange i = INotify.addWatch i [INotify.Modify] f . const
|
||||
inotifyfilechange i = INotify.addWatch i [INotify.Modify] (fromOsPath f) . const
|
||||
|
||||
go = INotify.withINotify $ \i -> do
|
||||
modified <- newEmptyTMVarIO
|
||||
|
@ -354,7 +355,7 @@ tailVerify' iv f finished =
|
|||
case v of
|
||||
Just () -> do
|
||||
r <- tryNonAsync $
|
||||
tryWhenExists (openBinaryFile (fromRawFilePath f) ReadMode) >>= \case
|
||||
tryWhenExists (F.openBinaryFile f ReadMode) >>= \case
|
||||
Just h -> return (Just h)
|
||||
-- File does not exist, must have been
|
||||
-- deleted. Wait for next modification
|
||||
|
|
|
@ -40,13 +40,12 @@ import Logs.View
|
|||
import Utility.Glob
|
||||
import Types.Command
|
||||
import CmdLine.Action
|
||||
import qualified Utility.RawFilePath as R
|
||||
import qualified Utility.OsString as OS
|
||||
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.ByteString as B
|
||||
import qualified Data.Set as S
|
||||
import qualified Data.Map as M
|
||||
import qualified System.FilePath.ByteString as P
|
||||
import Control.Concurrent.Async
|
||||
import "mtl" Control.Monad.Writer
|
||||
|
||||
|
@ -251,7 +250,7 @@ combineViewFilter (ExcludeValues _) new@(FilterGlobOrUnset _ _) = (new, Widening
|
|||
- evaluate this function with the view parameter and reuse
|
||||
- the result. The globs in the view will then be compiled and memoized.
|
||||
-}
|
||||
viewedFiles :: View -> MkViewedFile -> FilePath -> MetaData -> [ViewedFile]
|
||||
viewedFiles :: View -> MkViewedFile -> OsPath -> MetaData -> [ViewedFile]
|
||||
viewedFiles view =
|
||||
let matchers = map viewComponentMatcher (viewComponents view)
|
||||
in \mkviewedfile file metadata ->
|
||||
|
@ -260,7 +259,8 @@ viewedFiles view =
|
|||
then []
|
||||
else
|
||||
let paths = pathProduct $
|
||||
map (map toviewpath) (visible matches)
|
||||
map (map (toOsPath . toviewpath))
|
||||
(visible matches)
|
||||
in if null paths
|
||||
then [mkviewedfile file]
|
||||
else map (</> mkviewedfile file) paths
|
||||
|
@ -346,7 +346,7 @@ fromViewPath = toMetaValue . encodeBS . deescapepseudo []
|
|||
prop_viewPath_roundtrips :: MetaValue -> Bool
|
||||
prop_viewPath_roundtrips v = fromViewPath (toViewPath v) == v
|
||||
|
||||
pathProduct :: [[FilePath]] -> [FilePath]
|
||||
pathProduct :: [[OsPath]] -> [OsPath]
|
||||
pathProduct [] = []
|
||||
pathProduct (l:ls) = foldl combinel l ls
|
||||
where
|
||||
|
@ -364,7 +364,7 @@ fromView view f = MetaData $ m `M.difference` derived
|
|||
filter (not . isviewunset) (zip visible values)
|
||||
visible = filter viewVisible (viewComponents view)
|
||||
paths = splitDirectories (dropFileName f)
|
||||
values = map (S.singleton . fromViewPath) paths
|
||||
values = map (S.singleton . fromViewPath . fromOsPath) paths
|
||||
MetaData derived = getViewedFileMetaData f
|
||||
convfield (vc, v) = (viewField vc, v)
|
||||
|
||||
|
@ -385,9 +385,9 @@ fromView view f = MetaData $ m `M.difference` derived
|
|||
prop_view_roundtrips :: AssociatedFile -> MetaData -> Bool -> Bool
|
||||
prop_view_roundtrips (AssociatedFile Nothing) _ _ = True
|
||||
prop_view_roundtrips (AssociatedFile (Just f)) metadata visible = or
|
||||
[ B.null (P.takeFileName f) && B.null (P.takeDirectory f)
|
||||
[ OS.null (takeFileName f) && OS.null (takeDirectory f)
|
||||
, viewTooLarge view
|
||||
, all hasfields (viewedFiles view (viewedFileFromReference' Nothing Nothing) (fromRawFilePath f) metadata)
|
||||
, all hasfields (viewedFiles view (viewedFileFromReference' Nothing Nothing) f metadata)
|
||||
]
|
||||
where
|
||||
view = View (Git.Ref "foo") $
|
||||
|
@ -402,19 +402,19 @@ prop_view_roundtrips (AssociatedFile (Just f)) metadata visible = or
|
|||
- Note that this may generate MetaFields that legalField rejects.
|
||||
- This is necessary to have a 1:1 mapping between directory names and
|
||||
- fields. So this MetaData cannot safely be serialized. -}
|
||||
getDirMetaData :: FilePath -> MetaData
|
||||
getDirMetaData :: OsPath -> MetaData
|
||||
getDirMetaData d = MetaData $ M.fromList $ zip fields values
|
||||
where
|
||||
dirs = splitDirectories d
|
||||
fields = map (mkMetaFieldUnchecked . T.pack . addTrailingPathSeparator . joinPath)
|
||||
fields = map (mkMetaFieldUnchecked . T.pack . fromOsPath . addTrailingPathSeparator . joinPath)
|
||||
(inits dirs)
|
||||
values = map (S.singleton . toMetaValue . encodeBS . fromMaybe "" . headMaybe)
|
||||
(tails dirs)
|
||||
(tails (map fromOsPath dirs))
|
||||
|
||||
getWorkTreeMetaData :: FilePath -> MetaData
|
||||
getWorkTreeMetaData :: OsPath -> MetaData
|
||||
getWorkTreeMetaData = getDirMetaData . dropFileName
|
||||
|
||||
getViewedFileMetaData :: FilePath -> MetaData
|
||||
getViewedFileMetaData :: OsPath -> MetaData
|
||||
getViewedFileMetaData = getDirMetaData . dirFromViewedFile . takeFileName
|
||||
|
||||
{- Applies a view to the currently checked out branch, generating a new
|
||||
|
@ -439,7 +439,7 @@ narrowView = applyView' viewedFileReuse getViewedFileMetaData
|
|||
- Look up the metadata of annexed files, and generate any ViewedFiles,
|
||||
- and stage them.
|
||||
-}
|
||||
applyView' :: MkViewedFile -> (FilePath -> MetaData) -> View -> Maybe Adjustment -> Annex Git.Branch
|
||||
applyView' :: MkViewedFile -> (OsPath -> MetaData) -> View -> Maybe Adjustment -> Annex Git.Branch
|
||||
applyView' mkviewedfile getfilemetadata view madj = do
|
||||
top <- fromRepo Git.repoPath
|
||||
(l, clean) <- inRepo $ Git.LsFiles.inRepoDetails [] [top]
|
||||
|
@ -452,7 +452,7 @@ applyView' mkviewedfile getfilemetadata view madj = do
|
|||
|
||||
applyView''
|
||||
:: MkViewedFile
|
||||
-> (FilePath -> MetaData)
|
||||
-> (OsPath -> MetaData)
|
||||
-> View
|
||||
-> Maybe Adjustment
|
||||
-> [t]
|
||||
|
@ -488,18 +488,18 @@ applyView'' mkviewedfile getfilemetadata view madj l clean conv = do
|
|||
-- Git.UpdateIndex.streamUpdateIndex'
|
||||
-- here would race with process's calls
|
||||
-- to it.
|
||||
| "." `B.isPrefixOf` getTopFilePath topf ->
|
||||
feed "dummy"
|
||||
| literalOsPath "." `OS.isPrefixOf` getTopFilePath topf ->
|
||||
feed (literalOsPath "dummy")
|
||||
| otherwise -> noop
|
||||
getmetadata gc mdfeeder mdcloser ts
|
||||
|
||||
process uh mdreader = liftIO mdreader >>= \case
|
||||
Just ((topf, _, mtreeitemtype, Just k), mdlog) -> do
|
||||
let metadata = maybe emptyMetaData parseCurrentMetaData mdlog
|
||||
let f = fromRawFilePath $ getTopFilePath topf
|
||||
let f = getTopFilePath topf
|
||||
let metadata' = getfilemetadata f `unionMetaData` metadata
|
||||
forM_ (genviewedfiles f metadata') $ \fv -> do
|
||||
f' <- fromRepo (fromTopFilePath $ asTopFilePath $ toRawFilePath fv)
|
||||
f' <- fromRepo (fromTopFilePath $ asTopFilePath fv)
|
||||
stagefile uh f' k mtreeitemtype
|
||||
process uh mdreader
|
||||
Just ((topf, sha, Just treeitemtype, Nothing), _) -> do
|
||||
|
@ -527,7 +527,7 @@ applyView'' mkviewedfile getfilemetadata view madj l clean conv = do
|
|||
_ -> stagesymlink uh f k
|
||||
|
||||
stagesymlink uh f k = do
|
||||
linktarget <- calcRepo (gitAnnexLink f k)
|
||||
linktarget <- fromOsPath <$> calcRepo (gitAnnexLink f k)
|
||||
sha <- hashSymlink linktarget
|
||||
liftIO . Git.UpdateIndex.streamUpdateIndex' uh
|
||||
=<< inRepo (Git.UpdateIndex.stageSymlink f sha)
|
||||
|
@ -609,7 +609,7 @@ withViewChanges addmeta removemeta = do
|
|||
=<< catKey (DiffTree.dstsha item)
|
||||
| otherwise = noop
|
||||
handlechange item a = maybe noop
|
||||
(void . commandAction . a (fromRawFilePath $ getTopFilePath $ DiffTree.file item))
|
||||
(void . commandAction . a (getTopFilePath $ DiffTree.file item))
|
||||
|
||||
{- Runs an action using the view index file.
|
||||
- Note that the file does not necessarily exist, or can contain
|
||||
|
@ -619,7 +619,8 @@ withViewIndex = withIndexFile ViewIndexFile . const
|
|||
|
||||
withNewViewIndex :: Annex a -> Annex a
|
||||
withNewViewIndex a = do
|
||||
liftIO . removeWhenExistsWith R.removeLink =<< fromRepo gitAnnexViewIndex
|
||||
liftIO . removeWhenExistsWith removeFile
|
||||
=<< fromRepo gitAnnexViewIndex
|
||||
withViewIndex a
|
||||
|
||||
{- Generates a branch for a view, using the view index file
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
{-# LANGUAGE CPP #-}
|
||||
|
||||
module Annex.View.ViewedFile (
|
||||
|
@ -20,13 +21,13 @@ module Annex.View.ViewedFile (
|
|||
import Annex.Common
|
||||
import Utility.QuickCheck
|
||||
import Backend.Utilities (maxExtensions)
|
||||
import qualified Utility.OsString as OS
|
||||
|
||||
import qualified Data.ByteString as S
|
||||
|
||||
type FileName = String
|
||||
type ViewedFile = FileName
|
||||
type ViewedFile = OsPath
|
||||
|
||||
type MkViewedFile = FilePath -> ViewedFile
|
||||
type MkViewedFile = OsPath -> ViewedFile
|
||||
|
||||
{- Converts a filepath used in a reference branch to the
|
||||
- filename that will be used in the view.
|
||||
|
@ -43,24 +44,27 @@ viewedFileFromReference g = viewedFileFromReference'
|
|||
(annexMaxExtensions g)
|
||||
|
||||
viewedFileFromReference' :: Maybe Int -> Maybe Int -> MkViewedFile
|
||||
viewedFileFromReference' maxextlen maxextensions f = concat $
|
||||
[ escape (fromRawFilePath base')
|
||||
, if null dirs then "" else "_%" ++ intercalate "%" (map escape dirs) ++ "%"
|
||||
viewedFileFromReference' maxextlen maxextensions f = toOsPath $ concat $
|
||||
[ escape (fromOsPath base')
|
||||
, if null dirs
|
||||
then ""
|
||||
else "_%" ++ intercalate "%" (map (escape . fromOsPath) dirs) ++ "%"
|
||||
, escape $ fromRawFilePath $ S.concat extensions'
|
||||
]
|
||||
where
|
||||
(path, basefile) = splitFileName f
|
||||
dirs = filter (/= ".") $ map dropTrailingPathSeparator (splitPath path)
|
||||
dirs = filter (/= literalOsPath ".") $
|
||||
map dropTrailingPathSeparator (splitPath path)
|
||||
(base, extensions) = case maxextlen of
|
||||
Nothing -> splitShortExtensions (toRawFilePath basefile')
|
||||
Just n -> splitShortExtensions' (n+1) (toRawFilePath basefile')
|
||||
Nothing -> splitShortExtensions basefile'
|
||||
Just n -> splitShortExtensions' (n+1) basefile'
|
||||
{- Limit number of extensions. -}
|
||||
maxextensions' = fromMaybe maxExtensions maxextensions
|
||||
(base', extensions')
|
||||
| length extensions <= maxextensions' = (base, extensions)
|
||||
| otherwise =
|
||||
let (es,more) = splitAt maxextensions' (reverse extensions)
|
||||
in (base <> mconcat (reverse more), reverse es)
|
||||
in (base <> toOsPath (mconcat (reverse more)), reverse es)
|
||||
{- On Windows, if the filename looked like "dir/c:foo" then
|
||||
- basefile would look like it contains a drive letter, which will
|
||||
- not work. There cannot really be a filename like that, probably,
|
||||
|
@ -89,8 +93,8 @@ viewedFileReuse = takeFileName
|
|||
|
||||
{- Extracts from a ViewedFile the directory where the file is located on
|
||||
- in the reference branch. -}
|
||||
dirFromViewedFile :: ViewedFile -> FilePath
|
||||
dirFromViewedFile = joinPath . drop 1 . sep [] ""
|
||||
dirFromViewedFile :: ViewedFile -> OsPath
|
||||
dirFromViewedFile = joinPath . map toOsPath . drop 1 . sep [] "" . fromOsPath
|
||||
where
|
||||
sep l _ [] = reverse l
|
||||
sep l curr (c:cs)
|
||||
|
@ -103,10 +107,10 @@ dirFromViewedFile = joinPath . drop 1 . sep [] ""
|
|||
prop_viewedFile_roundtrips :: TestableFilePath -> Bool
|
||||
prop_viewedFile_roundtrips tf
|
||||
-- Relative filenames wanted, not directories.
|
||||
| any (isPathSeparator) (end f ++ beginning f) = True
|
||||
| isAbsolute f || isDrive f = True
|
||||
| OS.any isPathSeparator (toOsPath (end f ++ beginning f)) = True
|
||||
| isAbsolute (toOsPath f) || isDrive (toOsPath f) = True
|
||||
| otherwise = dir == dirFromViewedFile
|
||||
(viewedFileFromReference' Nothing Nothing f)
|
||||
(viewedFileFromReference' Nothing Nothing (toOsPath f))
|
||||
where
|
||||
f = fromTestableFilePath tf
|
||||
dir = joinPath $ beginning $ splitDirectories f
|
||||
dir = joinPath $ beginning $ splitDirectories (toOsPath f)
|
||||
|
|
|
@ -22,11 +22,11 @@ import qualified Database.Keys
|
|||
- When in an adjusted branch that may have hidden the file, looks for a
|
||||
- pointer to a key in the original branch.
|
||||
-}
|
||||
lookupKey :: RawFilePath -> Annex (Maybe Key)
|
||||
lookupKey :: OsPath -> Annex (Maybe Key)
|
||||
lookupKey = lookupKey' catkeyfile
|
||||
where
|
||||
catkeyfile file =
|
||||
ifM (liftIO $ doesFileExist $ fromRawFilePath file)
|
||||
ifM (liftIO $ doesFileExist file)
|
||||
( catKeyFile file
|
||||
, catKeyFileHidden file =<< getCurrentBranch
|
||||
)
|
||||
|
@ -35,22 +35,22 @@ lookupKey = lookupKey' catkeyfile
|
|||
- changes in the work tree. This means it's slower, but it also has
|
||||
- consistently the same behavior for locked files as for unlocked files.
|
||||
-}
|
||||
lookupKeyStaged :: RawFilePath -> Annex (Maybe Key)
|
||||
lookupKeyStaged :: OsPath -> Annex (Maybe Key)
|
||||
lookupKeyStaged file = catKeyFile file >>= \case
|
||||
Just k -> return (Just k)
|
||||
Nothing -> catKeyFileHidden file =<< getCurrentBranch
|
||||
|
||||
{- Like lookupKey, but does not find keys for hidden files. -}
|
||||
lookupKeyNotHidden :: RawFilePath -> Annex (Maybe Key)
|
||||
lookupKeyNotHidden :: OsPath -> Annex (Maybe Key)
|
||||
lookupKeyNotHidden = lookupKey' catkeyfile
|
||||
where
|
||||
catkeyfile file =
|
||||
ifM (liftIO $ doesFileExist $ fromRawFilePath file)
|
||||
ifM (liftIO $ doesFileExist file)
|
||||
( catKeyFile file
|
||||
, return Nothing
|
||||
)
|
||||
|
||||
lookupKey' :: (RawFilePath -> Annex (Maybe Key)) -> RawFilePath -> Annex (Maybe Key)
|
||||
lookupKey' :: (OsPath -> Annex (Maybe Key)) -> OsPath -> Annex (Maybe Key)
|
||||
lookupKey' catkeyfile file = isAnnexLink file >>= \case
|
||||
Just key -> return (Just key)
|
||||
Nothing -> catkeyfile file
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
{-# LANGUAGE DeriveGeneric #-}
|
||||
|
||||
module Annex.YoutubeDl (
|
||||
|
@ -30,7 +31,6 @@ import Utility.Metered
|
|||
import Utility.Tmp
|
||||
import Messages.Progress
|
||||
import Logs.Transfer
|
||||
import qualified Utility.RawFilePath as R
|
||||
import qualified Utility.FileIO as F
|
||||
|
||||
import Network.URI
|
||||
|
@ -72,20 +72,21 @@ youtubeDlNotAllowedMessage = unwords
|
|||
-- (This can fail, but youtube-dl is deprecated, and they closed my
|
||||
-- issue requesting something like --print-to-file;
|
||||
-- <https://github.com/rg3/youtube-dl/issues/14864>)
|
||||
youtubeDl :: URLString -> FilePath -> MeterUpdate -> Annex (Either String (Maybe FilePath))
|
||||
youtubeDl :: URLString -> OsPath -> MeterUpdate -> Annex (Either String (Maybe OsPath))
|
||||
youtubeDl url workdir p = ifM ipAddressesUnlimited
|
||||
( withUrlOptions $ youtubeDl' url workdir p
|
||||
, return $ Left youtubeDlNotAllowedMessage
|
||||
)
|
||||
|
||||
youtubeDl' :: URLString -> FilePath -> MeterUpdate -> UrlOptions -> Annex (Either String (Maybe FilePath))
|
||||
youtubeDl' :: URLString -> OsPath -> MeterUpdate -> UrlOptions -> Annex (Either String (Maybe OsPath))
|
||||
youtubeDl' url workdir p uo
|
||||
| supportedScheme uo url = do
|
||||
cmd <- youtubeDlCommand
|
||||
ifM (liftIO $ inSearchPath cmd)
|
||||
( runcmd cmd >>= \case
|
||||
Right True -> downloadedfiles cmd >>= \case
|
||||
(f:[]) -> return (Right (Just f))
|
||||
(f:[]) -> return $
|
||||
Right (Just (toOsPath f))
|
||||
[] -> return (nofiles cmd)
|
||||
fs -> return (toomanyfiles cmd fs)
|
||||
Right False -> workdirfiles >>= \case
|
||||
|
@ -100,13 +101,13 @@ youtubeDl' url workdir p uo
|
|||
toomanyfiles cmd fs = Left $ cmd ++ " downloaded multiple media files; git-annex is only able to deal with one per url: " ++ show fs
|
||||
downloadedfiles cmd
|
||||
| isytdlp cmd = liftIO $
|
||||
(nub . lines <$> readFile filelistfile)
|
||||
(nub . lines <$> readFile (fromOsPath filelistfile))
|
||||
`catchIO` (pure . const [])
|
||||
| otherwise = map fromRawFilePath <$> workdirfiles
|
||||
workdirfiles = liftIO $ filter (/= toRawFilePath filelistfile)
|
||||
<$> (filterM R.doesPathExist =<< dirContents (toRawFilePath workdir))
|
||||
| otherwise = map fromOsPath <$> workdirfiles
|
||||
workdirfiles = liftIO $ filter (/= filelistfile)
|
||||
<$> (filterM doesFileExist =<< dirContents workdir)
|
||||
filelistfile = workdir </> filelistfilebase
|
||||
filelistfilebase = "git-annex-file-list-file"
|
||||
filelistfilebase = literalOsPath "git-annex-file-list-file"
|
||||
isytdlp cmd = cmd == "yt-dlp"
|
||||
runcmd cmd = youtubeDlMaxSize workdir >>= \case
|
||||
Left msg -> return (Left msg)
|
||||
|
@ -122,7 +123,7 @@ youtubeDl' url workdir p uo
|
|||
liftIO $ commandMeter'
|
||||
(if isytdlp cmd then parseYtdlpProgress else parseYoutubeDlProgress)
|
||||
oh (Just meter) meterupdate cmd opts
|
||||
(\pr -> pr { cwd = Just workdir })
|
||||
(\pr -> pr { cwd = Just (fromOsPath workdir) })
|
||||
return (Right ok)
|
||||
dlopts cmd =
|
||||
[ Param url
|
||||
|
@ -145,7 +146,7 @@ youtubeDl' url workdir p uo
|
|||
, Param progressTemplate
|
||||
, Param "--print-to-file"
|
||||
, Param "after_move:filepath"
|
||||
, Param filelistfilebase
|
||||
, Param (fromOsPath filelistfilebase)
|
||||
]
|
||||
else []
|
||||
|
||||
|
@ -153,14 +154,14 @@ youtubeDl' url workdir p uo
|
|||
-- large a media file. Factors in other downloads that are in progress,
|
||||
-- and any files in the workdir that it may have partially downloaded
|
||||
-- before.
|
||||
youtubeDlMaxSize :: FilePath -> Annex (Either String [CommandParam])
|
||||
youtubeDlMaxSize :: OsPath -> Annex (Either String [CommandParam])
|
||||
youtubeDlMaxSize workdir = ifM (Annex.getRead Annex.force)
|
||||
( return $ Right []
|
||||
, liftIO (getDiskFree workdir) >>= \case
|
||||
, liftIO (getDiskFree (fromOsPath workdir)) >>= \case
|
||||
Just have -> do
|
||||
inprogress <- sizeOfDownloadsInProgress (const True)
|
||||
partial <- liftIO $ sum
|
||||
<$> (mapM getFileSize =<< dirContents (toRawFilePath workdir))
|
||||
<$> (mapM getFileSize =<< dirContents workdir)
|
||||
reserve <- annexDiskReserve <$> Annex.getGitConfig
|
||||
let maxsize = have - reserve - inprogress + partial
|
||||
if maxsize > 0
|
||||
|
@ -175,12 +176,12 @@ youtubeDlMaxSize workdir = ifM (Annex.getRead Annex.force)
|
|||
)
|
||||
|
||||
-- Download a media file to a destination,
|
||||
youtubeDlTo :: Key -> URLString -> FilePath -> MeterUpdate -> Annex Bool
|
||||
youtubeDlTo :: Key -> URLString -> OsPath -> MeterUpdate -> Annex Bool
|
||||
youtubeDlTo key url dest p = do
|
||||
res <- withTmpWorkDir key $ \workdir ->
|
||||
youtubeDl url (fromRawFilePath workdir) p >>= \case
|
||||
youtubeDl url workdir p >>= \case
|
||||
Right (Just mediafile) -> do
|
||||
liftIO $ moveFile (toRawFilePath mediafile) (toRawFilePath dest)
|
||||
liftIO $ moveFile mediafile dest
|
||||
return (Just True)
|
||||
Right Nothing -> return (Just False)
|
||||
Left msg -> do
|
||||
|
@ -225,7 +226,7 @@ youtubeDlCheck' url uo
|
|||
-- Ask youtube-dl for the filename of media in an url.
|
||||
--
|
||||
-- (This is not always identical to the filename it uses when downloading.)
|
||||
youtubeDlFileName :: URLString -> Annex (Either String FilePath)
|
||||
youtubeDlFileName :: URLString -> Annex (Either String OsPath)
|
||||
youtubeDlFileName url = withUrlOptions go
|
||||
where
|
||||
go uo
|
||||
|
@ -236,10 +237,10 @@ youtubeDlFileName url = withUrlOptions go
|
|||
|
||||
-- Does not check if the url contains htmlOnly; use when that's already
|
||||
-- been verified.
|
||||
youtubeDlFileNameHtmlOnly :: URLString -> Annex (Either String FilePath)
|
||||
youtubeDlFileNameHtmlOnly :: URLString -> Annex (Either String OsPath)
|
||||
youtubeDlFileNameHtmlOnly = withUrlOptions . youtubeDlFileNameHtmlOnly'
|
||||
|
||||
youtubeDlFileNameHtmlOnly' :: URLString -> UrlOptions -> Annex (Either String FilePath)
|
||||
youtubeDlFileNameHtmlOnly' :: URLString -> UrlOptions -> Annex (Either String OsPath)
|
||||
youtubeDlFileNameHtmlOnly' url uo
|
||||
| supportedScheme uo url = flip catchIO (pure . Left . show) go
|
||||
| otherwise = return nomedia
|
||||
|
@ -269,7 +270,7 @@ youtubeDlFileNameHtmlOnly' url uo
|
|||
ok <- liftIO $ checkSuccessProcess pid
|
||||
wait errt
|
||||
return $ case (ok, lines output) of
|
||||
(True, (f:_)) | not (null f) -> Right f
|
||||
(True, (f:_)) | not (null f) -> Right (toOsPath f)
|
||||
_ -> nomedia
|
||||
waitproc _ _ _ _ = error "internal"
|
||||
|
||||
|
@ -353,7 +354,7 @@ youtubePlaylist url = do
|
|||
else return $ Left $ "Scraping needs yt-dlp, but git-annex has been configured to use " ++ cmd
|
||||
|
||||
youtubePlaylist' :: URLString -> String -> IO (Either String [YoutubePlaylistItem])
|
||||
youtubePlaylist' url cmd = withTmpFile (toOsPath (toRawFilePath "yt-dlp")) $ \tmpfile h -> do
|
||||
youtubePlaylist' url cmd = withTmpFile (literalOsPath "yt-dlp") $ \tmpfile h -> do
|
||||
hClose h
|
||||
(outerr, ok) <- processTranscript cmd
|
||||
[ "--simulate"
|
||||
|
@ -363,7 +364,7 @@ youtubePlaylist' url cmd = withTmpFile (toOsPath (toRawFilePath "yt-dlp")) $ \tm
|
|||
, "--print-to-file"
|
||||
-- Write json with selected fields.
|
||||
, "%(.{" ++ intercalate "," youtubePlaylistItemFields ++ "})j"
|
||||
, fromRawFilePath (fromOsPath tmpfile)
|
||||
, fromOsPath tmpfile
|
||||
, url
|
||||
]
|
||||
Nothing
|
||||
|
@ -407,5 +408,6 @@ data YoutubePlaylistItem = YoutubePlaylistItem
|
|||
instance Aeson.FromJSON YoutubePlaylistItem
|
||||
where
|
||||
parseJSON = Aeson.genericParseJSON Aeson.defaultOptions
|
||||
{ Aeson.fieldLabelModifier = drop (length "youtube_") }
|
||||
|
||||
{ Aeson.fieldLabelModifier =
|
||||
drop (length ("youtube_" :: String))
|
||||
}
|
||||
|
|
21
Assistant.hs
21
Assistant.hs
|
@ -62,40 +62,39 @@ import qualified Utility.Debug as Debug
|
|||
import Network.Socket (HostName, PortNumber)
|
||||
|
||||
stopDaemon :: Annex ()
|
||||
stopDaemon = liftIO . Utility.Daemon.stopDaemon . fromRawFilePath
|
||||
=<< fromRepo gitAnnexPidFile
|
||||
stopDaemon = liftIO . Utility.Daemon.stopDaemon =<< fromRepo gitAnnexPidFile
|
||||
|
||||
{- Starts the daemon. If the daemon is run in the foreground, once it's
|
||||
- running, can start the browser.
|
||||
-
|
||||
- startbrowser is passed the url and html shim file, as well as the original
|
||||
- stdout and stderr descriptors. -}
|
||||
startDaemon :: Bool -> Bool -> Maybe Duration -> Maybe String -> Maybe HostName -> Maybe PortNumber -> Maybe (Maybe Handle -> Maybe Handle -> String -> FilePath -> IO ()) -> Annex ()
|
||||
startDaemon :: Bool -> Bool -> Maybe Duration -> Maybe String -> Maybe HostName -> Maybe PortNumber -> Maybe (Maybe Handle -> Maybe Handle -> String -> OsPath -> IO ()) -> Annex ()
|
||||
startDaemon assistant foreground startdelay cannotrun listenhost listenport startbrowser = do
|
||||
Annex.changeState $ \s -> s { Annex.daemon = True }
|
||||
enableInteractiveBranchAccess
|
||||
pidfile <- fromRepo gitAnnexPidFile
|
||||
logfile <- fromRepo gitAnnexDaemonLogFile
|
||||
liftIO $ Debug.debug "Assistant" $ "logging to " ++ fromRawFilePath logfile
|
||||
liftIO $ Debug.debug "Assistant" $ "logging to " ++ fromOsPath logfile
|
||||
createAnnexDirectory (parentDir pidfile)
|
||||
#ifndef mingw32_HOST_OS
|
||||
createAnnexDirectory (parentDir logfile)
|
||||
let logfd = handleToFd =<< openLog (fromRawFilePath logfile)
|
||||
let logfd = handleToFd =<< openLog (fromOsPath logfile)
|
||||
if foreground
|
||||
then do
|
||||
origout <- liftIO $ catchMaybeIO $
|
||||
fdToHandle =<< dup stdOutput
|
||||
origerr <- liftIO $ catchMaybeIO $
|
||||
fdToHandle =<< dup stdError
|
||||
let undaemonize = Utility.Daemon.foreground logfd (Just (fromRawFilePath pidfile))
|
||||
let undaemonize = Utility.Daemon.foreground logfd (Just pidfile)
|
||||
start undaemonize $
|
||||
case startbrowser of
|
||||
Nothing -> Nothing
|
||||
Just a -> Just $ a origout origerr
|
||||
else do
|
||||
git_annex <- liftIO programPath
|
||||
git_annex <- fromOsPath <$> liftIO programPath
|
||||
ps <- gitAnnexDaemonizeParams
|
||||
start (Utility.Daemon.daemonize git_annex ps logfd (Just (fromRawFilePath pidfile)) False) Nothing
|
||||
start (Utility.Daemon.daemonize git_annex ps logfd (Just pidfile) False) Nothing
|
||||
#else
|
||||
-- Windows doesn't daemonize, but does redirect output to the
|
||||
-- log file. The only way to do so is to restart the program.
|
||||
|
@ -104,7 +103,7 @@ startDaemon assistant foreground startdelay cannotrun listenhost listenport star
|
|||
createAnnexDirectory (parentDir logfile)
|
||||
ifM (liftIO $ isNothing <$> getEnv flag)
|
||||
( liftIO $ withNullHandle $ \nullh -> do
|
||||
loghandle <- openLog (fromRawFilePath logfile)
|
||||
loghandle <- openLog (fromOsPath logfile)
|
||||
e <- getEnvironment
|
||||
cmd <- programPath
|
||||
ps <- getArgs
|
||||
|
@ -117,7 +116,7 @@ startDaemon assistant foreground startdelay cannotrun listenhost listenport star
|
|||
exitcode <- withCreateProcess p $ \_ _ _ pid ->
|
||||
waitForProcess pid
|
||||
exitWith exitcode
|
||||
, start (Utility.Daemon.foreground (Just (fromRawFilePath pidfile))) $
|
||||
, start (Utility.Daemon.foreground (Just (fromOsPath pidfile))) $
|
||||
case startbrowser of
|
||||
Nothing -> Nothing
|
||||
Just a -> Just $ a Nothing Nothing
|
||||
|
@ -128,7 +127,7 @@ startDaemon assistant foreground startdelay cannotrun listenhost listenport star
|
|||
checkCanWatch
|
||||
dstatus <- startDaemonStatus
|
||||
logfile <- fromRepo gitAnnexDaemonLogFile
|
||||
liftIO $ Debug.debug "Assistant" $ "logging to " ++ fromRawFilePath logfile
|
||||
liftIO $ Debug.debug "Assistant" $ "logging to " ++ fromOsPath logfile
|
||||
liftIO $ daemonize $
|
||||
flip runAssistant (go webappwaiter)
|
||||
=<< newAssistantData st dstatus
|
||||
|
|
|
@ -395,7 +395,7 @@ fileAlert msg files = (activityAlert Nothing shortfiles)
|
|||
maxfilesshown = 10
|
||||
|
||||
(!somefiles, !counter) = splitcounter (dedupadjacent files)
|
||||
!shortfiles = map (fromString . shortFile . takeFileName) somefiles
|
||||
!shortfiles = map (fromString . shortFile . fromOsPath . takeFileName . toOsPath) somefiles
|
||||
|
||||
renderer alert = tenseWords $ msg : alertData alert ++ showcounter
|
||||
where
|
||||
|
|
|
@ -15,14 +15,14 @@ import Data.Time.Clock
|
|||
import Control.Concurrent.STM
|
||||
|
||||
{- Handlers call this when they made a change that needs to get committed. -}
|
||||
madeChange :: FilePath -> ChangeInfo -> Assistant (Maybe Change)
|
||||
madeChange :: OsPath -> ChangeInfo -> Assistant (Maybe Change)
|
||||
madeChange f t = Just <$> (Change <$> liftIO getCurrentTime <*> pure f <*> pure t)
|
||||
|
||||
noChange :: Assistant (Maybe Change)
|
||||
noChange = return Nothing
|
||||
|
||||
{- Indicates an add needs to be done, but has not started yet. -}
|
||||
pendingAddChange :: FilePath -> Assistant (Maybe Change)
|
||||
pendingAddChange :: OsPath -> Assistant (Maybe Change)
|
||||
pendingAddChange f = Just <$> (PendingAddChange <$> liftIO getCurrentTime <*> pure f)
|
||||
|
||||
{- Gets all unhandled changes.
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
{-# LANGUAGE CPP #-}
|
||||
|
||||
module Assistant.Install where
|
||||
|
@ -31,8 +32,8 @@ import Utility.Android
|
|||
import System.PosixCompat.Files (ownerExecuteMode)
|
||||
import qualified Data.ByteString.Char8 as S8
|
||||
|
||||
standaloneAppBase :: IO (Maybe FilePath)
|
||||
standaloneAppBase = getEnv "GIT_ANNEX_APP_BASE"
|
||||
standaloneAppBase :: IO (Maybe OsPath)
|
||||
standaloneAppBase = fmap toOsPath <$> getEnv "GIT_ANNEX_APP_BASE"
|
||||
|
||||
{- The standalone app does not have an installation process.
|
||||
- So when it's run, it needs to set up autostarting of the assistant
|
||||
|
@ -51,13 +52,12 @@ ensureInstalled = ifM (isJust <$> getEnv "GIT_ANNEX_PACKAGE_INSTALL")
|
|||
, go =<< standaloneAppBase
|
||||
)
|
||||
where
|
||||
go Nothing = installFileManagerHooks "git-annex"
|
||||
go Nothing = installFileManagerHooks (literalOsPath "git-annex")
|
||||
go (Just base) = do
|
||||
let program = base </> "git-annex"
|
||||
let program = base </> literalOsPath "git-annex"
|
||||
programfile <- programFile
|
||||
createDirectoryIfMissing True $
|
||||
fromRawFilePath (parentDir (toRawFilePath programfile))
|
||||
writeFile programfile program
|
||||
createDirectoryIfMissing True (parentDir programfile)
|
||||
writeFile (fromOsPath programfile) (fromOsPath program)
|
||||
|
||||
#ifdef darwin_HOST_OS
|
||||
autostartfile <- userAutoStart osxAutoStartLabel
|
||||
|
@ -67,24 +67,24 @@ ensureInstalled = ifM (isJust <$> getEnv "GIT_ANNEX_PACKAGE_INSTALL")
|
|||
( do
|
||||
-- Integration with the Termux:Boot app.
|
||||
home <- myHomeDir
|
||||
let bootfile = home </> ".termux" </> "boot" </> "git-annex"
|
||||
let bootfile = toOsPath home </> literalOsPath ".termux" </> literalOsPath "boot" </> literalOsPath "git-annex"
|
||||
unlessM (doesFileExist bootfile) $ do
|
||||
createDirectoryIfMissing True (takeDirectory bootfile)
|
||||
writeFile bootfile "git-annex assistant --autostart"
|
||||
writeFile (fromOsPath bootfile) "git-annex assistant --autostart"
|
||||
, do
|
||||
menufile <- desktopMenuFilePath "git-annex" <$> userDataDir
|
||||
icondir <- iconDir <$> userDataDir
|
||||
installMenu program menufile base icondir
|
||||
installMenu (fromOsPath program) menufile base icondir
|
||||
autostartfile <- autoStartPath "git-annex" <$> userConfigDir
|
||||
installAutoStart program autostartfile
|
||||
installAutoStart (fromOsPath program) autostartfile
|
||||
)
|
||||
#endif
|
||||
|
||||
sshdir <- sshDir
|
||||
let runshell var = "exec " ++ base </> "runshell " ++ var
|
||||
let runshell var = "exec " ++ fromOsPath (base </> literalOsPath "runshell ") ++ var
|
||||
let rungitannexshell var = runshell $ "git-annex-shell -c \"" ++ var ++ "\""
|
||||
|
||||
installWrapper (toRawFilePath (sshdir </> "git-annex-shell")) $
|
||||
installWrapper (sshdir </> literalOsPath "git-annex-shell") $
|
||||
[ shebang
|
||||
, "set -e"
|
||||
, "if [ \"x$SSH_ORIGINAL_COMMAND\" != \"x\" ]; then"
|
||||
|
@ -93,7 +93,7 @@ ensureInstalled = ifM (isJust <$> getEnv "GIT_ANNEX_PACKAGE_INSTALL")
|
|||
, rungitannexshell "$@"
|
||||
, "fi"
|
||||
]
|
||||
installWrapper (toRawFilePath (sshdir </> "git-annex-wrapper")) $
|
||||
installWrapper (sshdir </> literalOsPath "git-annex-wrapper") $
|
||||
[ shebang
|
||||
, "set -e"
|
||||
, runshell "\"$@\""
|
||||
|
@ -101,47 +101,46 @@ ensureInstalled = ifM (isJust <$> getEnv "GIT_ANNEX_PACKAGE_INSTALL")
|
|||
|
||||
installFileManagerHooks program
|
||||
|
||||
installWrapper :: RawFilePath -> [String] -> IO ()
|
||||
installWrapper :: OsPath -> [String] -> IO ()
|
||||
installWrapper file content = do
|
||||
let content' = map encodeBS content
|
||||
curr <- catchDefaultIO [] $ fileLines' <$> F.readFile' (toOsPath file)
|
||||
curr <- catchDefaultIO [] $ fileLines' <$> F.readFile' file
|
||||
when (curr /= content') $ do
|
||||
createDirectoryIfMissing True (fromRawFilePath (parentDir file))
|
||||
viaTmp F.writeFile' (toOsPath file) $
|
||||
linesFile' (S8.unlines content')
|
||||
createDirectoryIfMissing True (parentDir file)
|
||||
viaTmp F.writeFile' file $ linesFile' (S8.unlines content')
|
||||
modifyFileMode file $ addModes [ownerExecuteMode]
|
||||
|
||||
installFileManagerHooks :: FilePath -> IO ()
|
||||
installFileManagerHooks :: OsPath -> IO ()
|
||||
#ifdef linux_HOST_OS
|
||||
installFileManagerHooks program = unlessM osAndroid $ do
|
||||
let actions = ["get", "drop", "undo"]
|
||||
|
||||
-- Gnome
|
||||
nautilusScriptdir <- (\d -> d </> "nautilus" </> "scripts") <$> userDataDir
|
||||
nautilusScriptdir <- (\d -> d </> literalOsPath "nautilus" </> literalOsPath "scripts") <$> userDataDir
|
||||
createDirectoryIfMissing True nautilusScriptdir
|
||||
forM_ actions $
|
||||
genNautilusScript nautilusScriptdir
|
||||
|
||||
-- KDE
|
||||
userdata <- userDataDir
|
||||
let kdeServiceMenusdir = userdata </> "kservices5" </> "ServiceMenus"
|
||||
let kdeServiceMenusdir = userdata </> literalOsPath "kservices5" </> literalOsPath "ServiceMenus"
|
||||
createDirectoryIfMissing True kdeServiceMenusdir
|
||||
writeFile (kdeServiceMenusdir </> "git-annex.desktop")
|
||||
writeFile (fromOsPath (kdeServiceMenusdir </> literalOsPath "git-annex.desktop"))
|
||||
(kdeDesktopFile actions)
|
||||
where
|
||||
genNautilusScript scriptdir action =
|
||||
installscript (toRawFilePath (scriptdir </> scriptname action)) $ unlines
|
||||
installscript (scriptdir </> toOsPath (scriptname action)) $ unlines
|
||||
[ shebang
|
||||
, autoaddedcomment
|
||||
, "exec " ++ program ++ " " ++ action ++ " --notify-start --notify-finish -- \"$@\""
|
||||
, "exec " ++ fromOsPath program ++ " " ++ action ++ " --notify-start --notify-finish -- \"$@\""
|
||||
]
|
||||
scriptname action = "git-annex " ++ action
|
||||
installscript f c = whenM (safetoinstallscript f) $ do
|
||||
writeFile (fromRawFilePath f) c
|
||||
writeFile (fromOsPath f) c
|
||||
modifyFileMode f $ addModes [ownerExecuteMode]
|
||||
safetoinstallscript f = catchDefaultIO True $
|
||||
elem (encodeBS autoaddedcomment) . fileLines'
|
||||
<$> F.readFile' (toOsPath f)
|
||||
<$> F.readFile' f
|
||||
autoaddedcomment = "# " ++ autoaddedmsg ++ " (To disable, chmod 600 this file.)"
|
||||
autoaddedmsg = "Automatically added by git-annex, do not edit."
|
||||
|
||||
|
@ -167,7 +166,7 @@ installFileManagerHooks program = unlessM osAndroid $ do
|
|||
, "Icon=git-annex"
|
||||
, unwords
|
||||
[ "Exec=sh -c 'cd \"$(dirname \"$1\")\" &&"
|
||||
, program
|
||||
, fromOsPath program
|
||||
, command
|
||||
, "--notify-start --notify-finish -- \"$1\"'"
|
||||
, "false" -- this becomes $0 in sh, so unused
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
|
||||
module Assistant.Install.AutoStart where
|
||||
|
||||
import Common
|
||||
import Utility.FreeDesktop
|
||||
#ifdef darwin_HOST_OS
|
||||
import Utility.OSX
|
||||
|
@ -18,11 +19,11 @@ import Utility.SystemDirectory
|
|||
import Utility.FileSystemEncoding
|
||||
#endif
|
||||
|
||||
installAutoStart :: FilePath -> FilePath -> IO ()
|
||||
installAutoStart :: String -> OsPath -> IO ()
|
||||
installAutoStart command file = do
|
||||
#ifdef darwin_HOST_OS
|
||||
createDirectoryIfMissing True (fromRawFilePath (parentDir (toRawFilePath file)))
|
||||
writeFile file $ genOSXAutoStartFile osxAutoStartLabel command
|
||||
createDirectoryIfMissing True (parentDir file)
|
||||
writeFile (fromOsPath file) $ genOSXAutoStartFile osxAutoStartLabel command
|
||||
["assistant", "--autostart"]
|
||||
#else
|
||||
writeDesktopMenuFile (fdoAutostart command) file
|
||||
|
|
|
@ -5,31 +5,25 @@
|
|||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
{-# LANGUAGE CPP #-}
|
||||
{-# OPTIONS_GHC -fno-warn-tabs #-}
|
||||
|
||||
module Assistant.Install.Menu where
|
||||
|
||||
import Common
|
||||
import Utility.FreeDesktop
|
||||
import Utility.FileSystemEncoding
|
||||
import Utility.Path
|
||||
|
||||
import System.IO
|
||||
import Utility.SystemDirectory
|
||||
#ifndef darwin_HOST_OS
|
||||
import System.FilePath
|
||||
#endif
|
||||
|
||||
installMenu :: FilePath -> FilePath -> FilePath -> FilePath -> IO ()
|
||||
installMenu :: String -> OsPath -> OsPath -> OsPath -> IO ()
|
||||
#ifdef darwin_HOST_OS
|
||||
installMenu _command _menufile _iconsrcdir _icondir = return ()
|
||||
#else
|
||||
installMenu command menufile iconsrcdir icondir = do
|
||||
writeDesktopMenuFile (fdoDesktopMenu command) menufile
|
||||
installIcon (iconsrcdir </> "logo.svg") $
|
||||
iconFilePath (iconBaseName ++ ".svg") "scalable" icondir
|
||||
installIcon (iconsrcdir </> "logo_16x16.png") $
|
||||
iconFilePath (iconBaseName ++ ".png") "16x16" icondir
|
||||
installIcon (iconsrcdir </> literalOsPath "logo.svg") $
|
||||
iconFilePath (toOsPath (iconBaseName ++ ".svg")) "scalable" icondir
|
||||
installIcon (iconsrcdir </> literalOsPath "logo_16x16.png") $
|
||||
iconFilePath (toOsPath (iconBaseName ++ ".png")) "16x16" icondir
|
||||
#endif
|
||||
|
||||
{- The command can be either just "git-annex", or the full path to use
|
||||
|
@ -43,11 +37,11 @@ fdoDesktopMenu command = genDesktopEntry
|
|||
(Just iconBaseName)
|
||||
["Network", "FileTransfer"]
|
||||
|
||||
installIcon :: FilePath -> FilePath -> IO ()
|
||||
installIcon :: OsPath -> OsPath -> IO ()
|
||||
installIcon src dest = do
|
||||
createDirectoryIfMissing True (fromRawFilePath (parentDir (toRawFilePath dest)))
|
||||
withBinaryFile src ReadMode $ \hin ->
|
||||
withBinaryFile dest WriteMode $ \hout ->
|
||||
createDirectoryIfMissing True (parentDir dest)
|
||||
withBinaryFile (fromOsPath src) ReadMode $ \hin ->
|
||||
withBinaryFile (fromOsPath dest) WriteMode $ \hout ->
|
||||
hGetContents hin >>= hPutStr hout
|
||||
|
||||
iconBaseName :: String
|
||||
|
|
|
@ -28,7 +28,7 @@ import Config
|
|||
|
||||
{- Makes a new git repository. Or, if a git repository already
|
||||
- exists, returns False. -}
|
||||
makeRepo :: FilePath -> Bool -> IO Bool
|
||||
makeRepo :: OsPath -> Bool -> IO Bool
|
||||
makeRepo path bare = ifM (probeRepoExists path)
|
||||
( return False
|
||||
, do
|
||||
|
@ -41,19 +41,19 @@ makeRepo path bare = ifM (probeRepoExists path)
|
|||
where
|
||||
baseparams = [Param "init", Param "--quiet"]
|
||||
params
|
||||
| bare = baseparams ++ [Param "--bare", File path]
|
||||
| otherwise = baseparams ++ [File path]
|
||||
| bare = baseparams ++ [Param "--bare", File (fromOsPath path)]
|
||||
| otherwise = baseparams ++ [File (fromOsPath path)]
|
||||
|
||||
{- Runs an action in the git repository in the specified directory. -}
|
||||
inDir :: FilePath -> Annex a -> IO a
|
||||
inDir :: OsPath -> Annex a -> IO a
|
||||
inDir dir a = do
|
||||
state <- Annex.new
|
||||
=<< Git.Config.read
|
||||
=<< Git.Construct.fromPath (toRawFilePath dir)
|
||||
=<< Git.Construct.fromPath dir
|
||||
Annex.eval state $ a `finally` quiesce True
|
||||
|
||||
{- Creates a new repository, and returns its UUID. -}
|
||||
initRepo :: Bool -> Bool -> FilePath -> Maybe String -> Maybe StandardGroup -> IO UUID
|
||||
initRepo :: Bool -> Bool -> OsPath -> Maybe String -> Maybe StandardGroup -> IO UUID
|
||||
initRepo True primary_assistant_repo dir desc mgroup = inDir dir $ do
|
||||
initRepo' desc mgroup
|
||||
{- Initialize the master branch, so things that expect
|
||||
|
@ -94,6 +94,6 @@ initRepo' desc mgroup = unlessM isInitialized $ do
|
|||
Annex.Branch.commit =<< Annex.Branch.commitMessage
|
||||
|
||||
{- Checks if a git repo exists at a location. -}
|
||||
probeRepoExists :: FilePath -> IO Bool
|
||||
probeRepoExists :: OsPath -> IO Bool
|
||||
probeRepoExists dir = isJust <$>
|
||||
catchDefaultIO Nothing (Git.Construct.checkForRepo (encodeBS dir))
|
||||
catchDefaultIO Nothing (Git.Construct.checkForRepo dir)
|
||||
|
|
|
@ -22,11 +22,11 @@ import qualified Data.Text as T
|
|||
|
||||
{- Authorized keys are set up before pairing is complete, so that the other
|
||||
- side can immediately begin syncing. -}
|
||||
setupAuthorizedKeys :: PairMsg -> FilePath -> IO ()
|
||||
setupAuthorizedKeys :: PairMsg -> OsPath -> IO ()
|
||||
setupAuthorizedKeys msg repodir = case validateSshPubKey $ remoteSshPubKey $ pairMsgData msg of
|
||||
Left err -> giveup err
|
||||
Right pubkey -> do
|
||||
absdir <- fromRawFilePath <$> absPath (toRawFilePath repodir)
|
||||
absdir <- absPath repodir
|
||||
unlessM (liftIO $ addAuthorizedKeys True absdir pubkey) $
|
||||
giveup "failed setting up ssh authorized keys"
|
||||
|
||||
|
@ -66,7 +66,7 @@ pairMsgToSshData msg = do
|
|||
{ sshHostName = T.pack hostname
|
||||
, sshUserName = Just (T.pack $ remoteUserName d)
|
||||
, sshDirectory = T.pack dir
|
||||
, sshRepoName = genSshRepoName hostname dir
|
||||
, sshRepoName = genSshRepoName hostname (toOsPath dir)
|
||||
, sshPort = 22
|
||||
, needsPubKey = True
|
||||
, sshCapabilities = [GitAnnexShellCapable, GitCapable, RsyncCapable]
|
||||
|
|
|
@ -31,11 +31,9 @@ import qualified Data.Text as T
|
|||
#endif
|
||||
import qualified Utility.Lsof as Lsof
|
||||
import Utility.ThreadScheduler
|
||||
import qualified Utility.RawFilePath as R
|
||||
import qualified Utility.OsString as OS
|
||||
|
||||
import Control.Concurrent.Async
|
||||
import qualified Data.ByteString as S
|
||||
import qualified System.FilePath.ByteString as P
|
||||
|
||||
{- When the FsckResults require a repair, tries to do a non-destructive
|
||||
- repair. If that fails, pops up an alert. -}
|
||||
|
@ -98,7 +96,7 @@ runRepair u mrmt destructiverepair = do
|
|||
thisrepopath <- liftIO . absPath
|
||||
=<< liftAnnex (fromRepo Git.repoPath)
|
||||
a <- liftAnnex $ mkrepair $
|
||||
repair fsckresults (Just (fromRawFilePath thisrepopath))
|
||||
repair fsckresults (Just (fromOsPath thisrepopath))
|
||||
liftIO $ catchBoolIO a
|
||||
|
||||
repair fsckresults referencerepo = do
|
||||
|
@ -110,7 +108,7 @@ runRepair u mrmt destructiverepair = do
|
|||
|
||||
backgroundfsck params = liftIO $ void $ async $ do
|
||||
program <- programPath
|
||||
batchCommand program (Param "fsck" : params)
|
||||
batchCommand (fromOsPath program) (Param "fsck" : params)
|
||||
|
||||
{- Detect when a git lock file exists and has no git process currently
|
||||
- writing to it. This strongly suggests it is a stale lock file.
|
||||
|
@ -135,26 +133,26 @@ repairStaleGitLocks r = do
|
|||
repairStaleLocks lockfiles
|
||||
return $ not $ null lockfiles
|
||||
where
|
||||
findgitfiles = dirContentsRecursiveSkipping (== P.dropTrailingPathSeparator annexDir) True . Git.localGitDir
|
||||
findgitfiles = dirContentsRecursiveSkipping (== dropTrailingPathSeparator annexDir) True . Git.localGitDir
|
||||
islock f
|
||||
| "gc.pid" `S.isInfixOf` f = False
|
||||
| ".lock" `S.isSuffixOf` f = True
|
||||
| P.takeFileName f == "MERGE_HEAD" = True
|
||||
| literalOsPath "gc.pid" `OS.isInfixOf` f = False
|
||||
| literalOsPath ".lock" `OS.isSuffixOf` f = True
|
||||
| takeFileName f == literalOsPath "MERGE_HEAD" = True
|
||||
| otherwise = False
|
||||
|
||||
repairStaleLocks :: [RawFilePath] -> Assistant ()
|
||||
repairStaleLocks :: [OsPath] -> Assistant ()
|
||||
repairStaleLocks lockfiles = go =<< getsizes
|
||||
where
|
||||
getsize lf = catchMaybeIO $ (\s -> (lf, s))
|
||||
<$> getFileSize lf
|
||||
getsizes = liftIO $ catMaybes <$> mapM getsize lockfiles
|
||||
go [] = return ()
|
||||
go l = ifM (liftIO $ null <$> Lsof.query ("--" : map (fromRawFilePath . fst) l))
|
||||
go l = ifM (liftIO $ null <$> Lsof.query ("--" : map (fromOsPath . fst) l))
|
||||
( do
|
||||
waitforit "to check stale git lock file"
|
||||
l' <- getsizes
|
||||
if l' == l
|
||||
then liftIO $ mapM_ (removeWhenExistsWith R.removeLink . fst) l
|
||||
then liftIO $ mapM_ (removeWhenExistsWith removeFile . fst) l
|
||||
else go l'
|
||||
, do
|
||||
waitforit "for git lock file writer"
|
||||
|
|
|
@ -18,7 +18,6 @@ import Utility.NotificationBroadcaster
|
|||
import Utility.Url
|
||||
import Utility.Url.Parse
|
||||
import Utility.PID
|
||||
import qualified Utility.RawFilePath as R
|
||||
import qualified Git.Construct
|
||||
import qualified Git.Config
|
||||
import qualified Annex
|
||||
|
@ -41,8 +40,8 @@ import Network.URI
|
|||
prepRestart :: Assistant ()
|
||||
prepRestart = do
|
||||
liftIO . maybe noop (`throwTo` PauseWatcher) =<< namedThreadId watchThread
|
||||
liftIO . removeWhenExistsWith R.removeLink =<< liftAnnex (fromRepo gitAnnexUrlFile)
|
||||
liftIO . removeWhenExistsWith R.removeLink =<< liftAnnex (fromRepo gitAnnexPidFile)
|
||||
liftIO . removeWhenExistsWith removeFile =<< liftAnnex (fromRepo gitAnnexUrlFile)
|
||||
liftIO . removeWhenExistsWith removeFile =<< liftAnnex (fromRepo gitAnnexPidFile)
|
||||
|
||||
{- To finish a restart, send a global redirect to the new url
|
||||
- to any web browsers that are displaying the webapp.
|
||||
|
@ -66,21 +65,21 @@ terminateSelf =
|
|||
|
||||
runRestart :: Assistant URLString
|
||||
runRestart = liftIO . newAssistantUrl
|
||||
=<< liftAnnex (Git.repoLocation <$> Annex.gitRepo)
|
||||
=<< liftAnnex (Git.repoPath <$> Annex.gitRepo)
|
||||
|
||||
{- Starts up the assistant in the repository, and waits for it to create
|
||||
- a gitAnnexUrlFile. Waits for the assistant to be up and listening for
|
||||
- connections by testing the url. -}
|
||||
newAssistantUrl :: FilePath -> IO URLString
|
||||
newAssistantUrl :: OsPath -> IO URLString
|
||||
newAssistantUrl repo = do
|
||||
startAssistant repo
|
||||
geturl
|
||||
where
|
||||
geturl = do
|
||||
r <- Git.Config.read =<< Git.Construct.fromPath (toRawFilePath repo)
|
||||
waiturl $ fromRawFilePath $ gitAnnexUrlFile r
|
||||
r <- Git.Config.read =<< Git.Construct.fromPath repo
|
||||
waiturl $ gitAnnexUrlFile r
|
||||
waiturl urlfile = do
|
||||
v <- tryIO $ readFile urlfile
|
||||
v <- tryIO $ readFile (fromOsPath urlfile)
|
||||
case v of
|
||||
Left _ -> delayed $ waiturl urlfile
|
||||
Right url -> ifM (assistantListening url)
|
||||
|
@ -112,8 +111,8 @@ assistantListening url = catchBoolIO $ do
|
|||
- On windows, the assistant does not daemonize, which is why the forkIO is
|
||||
- done.
|
||||
-}
|
||||
startAssistant :: FilePath -> IO ()
|
||||
startAssistant :: OsPath -> IO ()
|
||||
startAssistant repo = void $ forkIO $ do
|
||||
program <- programPath
|
||||
let p = (proc program ["assistant"]) { cwd = Just repo }
|
||||
program <- fromOsPath <$> programPath
|
||||
let p = (proc program ["assistant"]) { cwd = Just (fromOsPath repo) }
|
||||
withCreateProcess p $ \_ _ _ pid -> void $ checkSuccessProcess pid
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
|
||||
module Assistant.Ssh where
|
||||
|
||||
import Annex.Common
|
||||
|
@ -18,6 +20,7 @@ import Git.Remote
|
|||
import Utility.SshHost
|
||||
import Utility.Process.Transcript
|
||||
import qualified Utility.FileIO as F
|
||||
import qualified Utility.OsString as OS
|
||||
|
||||
import Data.Text (Text)
|
||||
import qualified Data.Text as T
|
||||
|
@ -94,14 +97,14 @@ genSshUrl sshdata = case sshRepoUrl sshdata of
|
|||
{- Reverses genSshUrl -}
|
||||
parseSshUrl :: String -> Maybe SshData
|
||||
parseSshUrl u
|
||||
| "ssh://" `isPrefixOf` u = fromssh (drop (length "ssh://") u)
|
||||
| "ssh://" `isPrefixOf` u = fromssh (drop (length ("ssh://" :: String)) u)
|
||||
| otherwise = fromrsync u
|
||||
where
|
||||
mkdata (userhost, dir) = Just $ SshData
|
||||
{ sshHostName = T.pack host
|
||||
, sshUserName = if null user then Nothing else Just $ T.pack user
|
||||
, sshDirectory = T.pack dir
|
||||
, sshRepoName = genSshRepoName host dir
|
||||
, sshRepoName = genSshRepoName host (toOsPath dir)
|
||||
-- dummy values, cannot determine from url
|
||||
, sshPort = 22
|
||||
, needsPubKey = True
|
||||
|
@ -118,10 +121,10 @@ parseSshUrl u
|
|||
fromssh = mkdata . break (== '/')
|
||||
|
||||
{- Generates a git remote name, like host_dir or host -}
|
||||
genSshRepoName :: String -> FilePath -> String
|
||||
genSshRepoName :: String -> OsPath -> String
|
||||
genSshRepoName host dir
|
||||
| null dir = makeLegalName host
|
||||
| otherwise = makeLegalName $ host ++ "_" ++ dir
|
||||
| OS.null dir = makeLegalName host
|
||||
| otherwise = makeLegalName $ host ++ "_" ++ fromOsPath dir
|
||||
|
||||
{- The output of ssh, including both stdout and stderr. -}
|
||||
sshTranscript :: [String] -> SshHost -> String -> (Maybe String) -> IO (String, Bool)
|
||||
|
@ -149,17 +152,17 @@ validateSshPubKey pubkey
|
|||
where
|
||||
(ssh, keytype) = separate (== '-') prefix
|
||||
|
||||
addAuthorizedKeys :: Bool -> FilePath -> SshPubKey -> IO Bool
|
||||
addAuthorizedKeys :: Bool -> OsPath -> SshPubKey -> IO Bool
|
||||
addAuthorizedKeys gitannexshellonly dir pubkey = boolSystem "sh"
|
||||
[ Param "-c" , Param $ addAuthorizedKeysCommand gitannexshellonly dir pubkey ]
|
||||
|
||||
{- Should only be used within the same process that added the line;
|
||||
- the layout of the line is not kepy stable across versions. -}
|
||||
removeAuthorizedKeys :: Bool -> FilePath -> SshPubKey -> IO ()
|
||||
removeAuthorizedKeys :: Bool -> OsPath -> SshPubKey -> IO ()
|
||||
removeAuthorizedKeys gitannexshellonly dir pubkey = do
|
||||
let keyline = authorizedKeysLine gitannexshellonly dir pubkey
|
||||
sshdir <- sshDir
|
||||
let keyfile = toOsPath $ toRawFilePath $ sshdir </> "authorized_keys"
|
||||
let keyfile = sshdir </> literalOsPath "authorized_keys"
|
||||
tryWhenExists (map decodeBS . fileLines' <$> F.readFile' keyfile) >>= \case
|
||||
Just ls -> viaTmp writeSshConfig keyfile $
|
||||
unlines $ filter (/= keyline) ls
|
||||
|
@ -171,7 +174,7 @@ removeAuthorizedKeys gitannexshellonly dir pubkey = do
|
|||
- The ~/.ssh/git-annex-shell wrapper script is created if not already
|
||||
- present.
|
||||
-}
|
||||
addAuthorizedKeysCommand :: Bool -> FilePath -> SshPubKey -> String
|
||||
addAuthorizedKeysCommand :: Bool -> OsPath -> SshPubKey -> String
|
||||
addAuthorizedKeysCommand gitannexshellonly dir pubkey = intercalate "&&"
|
||||
[ "mkdir -p ~/.ssh"
|
||||
, intercalate "; "
|
||||
|
@ -202,27 +205,27 @@ addAuthorizedKeysCommand gitannexshellonly dir pubkey = intercalate "&&"
|
|||
]
|
||||
runshell var = "exec git-annex-shell -c \"" ++ var ++ "\""
|
||||
|
||||
authorizedKeysLine :: Bool -> FilePath -> SshPubKey -> String
|
||||
authorizedKeysLine :: Bool -> OsPath -> SshPubKey -> String
|
||||
authorizedKeysLine gitannexshellonly dir pubkey
|
||||
| gitannexshellonly = limitcommand ++ pubkey
|
||||
{- TODO: Locking down rsync is difficult, requiring a rather
|
||||
- long perl script. -}
|
||||
| otherwise = pubkey
|
||||
where
|
||||
limitcommand = "command=\"env GIT_ANNEX_SHELL_DIRECTORY="++shellEscape dir++" ~/.ssh/git-annex-shell\",no-agent-forwarding,no-port-forwarding,no-X11-forwarding,no-pty "
|
||||
limitcommand = "command=\"env GIT_ANNEX_SHELL_DIRECTORY="++shellEscape (fromOsPath dir)++" ~/.ssh/git-annex-shell\",no-agent-forwarding,no-port-forwarding,no-X11-forwarding,no-pty "
|
||||
|
||||
{- Generates a ssh key pair. -}
|
||||
genSshKeyPair :: IO SshKeyPair
|
||||
genSshKeyPair = withTmpDir (toOsPath (toRawFilePath "git-annex-keygen")) $ \dir -> do
|
||||
genSshKeyPair = withTmpDir (literalOsPath "git-annex-keygen") $ \dir -> do
|
||||
ok <- boolSystem "ssh-keygen"
|
||||
[ Param "-P", Param "" -- no password
|
||||
, Param "-f", File $ dir </> "key"
|
||||
, Param "-f", File $ fromOsPath (dir </> literalOsPath "key")
|
||||
]
|
||||
unless ok $
|
||||
giveup "ssh-keygen failed"
|
||||
SshKeyPair
|
||||
<$> readFile (dir </> "key.pub")
|
||||
<*> readFile (dir </> "key")
|
||||
<$> readFile (fromOsPath (dir </> literalOsPath "key.pub"))
|
||||
<*> readFile (fromOsPath (dir </> literalOsPath "key"))
|
||||
|
||||
{- Installs a ssh key pair, and sets up ssh config with a mangled hostname
|
||||
- that will enable use of the key. This way we avoid changing the user's
|
||||
|
@ -245,25 +248,28 @@ genSshKeyPair = withTmpDir (toOsPath (toRawFilePath "git-annex-keygen")) $ \dir
|
|||
installSshKeyPair :: SshKeyPair -> SshData -> IO SshData
|
||||
installSshKeyPair sshkeypair sshdata = do
|
||||
sshdir <- sshDir
|
||||
createDirectoryIfMissing True $ fromRawFilePath $
|
||||
parentDir $ toRawFilePath $ sshdir </> sshPrivKeyFile sshdata
|
||||
createDirectoryIfMissing True $
|
||||
parentDir $ sshdir </> sshPrivKeyFile sshdata
|
||||
|
||||
unlessM (doesFileExist $ sshdir </> sshPrivKeyFile sshdata) $
|
||||
writeFileProtected (toRawFilePath (sshdir </> sshPrivKeyFile sshdata)) (sshPrivKey sshkeypair)
|
||||
writeFileProtected (sshdir </> sshPrivKeyFile sshdata)
|
||||
(sshPrivKey sshkeypair)
|
||||
unlessM (doesFileExist $ sshdir </> sshPubKeyFile sshdata) $
|
||||
writeFile (sshdir </> sshPubKeyFile sshdata) (sshPubKey sshkeypair)
|
||||
writeFile (fromOsPath (sshdir </> sshPubKeyFile sshdata))
|
||||
(sshPubKey sshkeypair)
|
||||
|
||||
setSshConfig sshdata
|
||||
[ ("IdentityFile", "~/.ssh/" ++ sshPrivKeyFile sshdata)
|
||||
[ ("IdentityFile", "~/.ssh/" ++ fromOsPath (sshPrivKeyFile sshdata))
|
||||
, ("IdentitiesOnly", "yes")
|
||||
, ("StrictHostKeyChecking", "yes")
|
||||
]
|
||||
|
||||
sshPrivKeyFile :: SshData -> FilePath
|
||||
sshPrivKeyFile sshdata = "git-annex" </> "key." ++ mangleSshHostName sshdata
|
||||
sshPrivKeyFile :: SshData -> OsPath
|
||||
sshPrivKeyFile sshdata = literalOsPath "git-annex"
|
||||
</> literalOsPath "key." <> toOsPath (mangleSshHostName sshdata)
|
||||
|
||||
sshPubKeyFile :: SshData -> FilePath
|
||||
sshPubKeyFile sshdata = sshPrivKeyFile sshdata ++ ".pub"
|
||||
sshPubKeyFile :: SshData -> OsPath
|
||||
sshPubKeyFile sshdata = sshPrivKeyFile sshdata <> literalOsPath ".pub"
|
||||
|
||||
{- Generates an installs a new ssh key pair if one is not already
|
||||
- installed. Returns the modified SshData that will use the key pair,
|
||||
|
@ -271,8 +277,8 @@ sshPubKeyFile sshdata = sshPrivKeyFile sshdata ++ ".pub"
|
|||
setupSshKeyPair :: SshData -> IO (SshData, SshKeyPair)
|
||||
setupSshKeyPair sshdata = do
|
||||
sshdir <- sshDir
|
||||
mprivkey <- catchMaybeIO $ readFile (sshdir </> sshPrivKeyFile sshdata)
|
||||
mpubkey <- catchMaybeIO $ readFile (sshdir </> sshPubKeyFile sshdata)
|
||||
mprivkey <- catchMaybeIO $ readFile (fromOsPath (sshdir </> sshPrivKeyFile sshdata))
|
||||
mpubkey <- catchMaybeIO $ readFile (fromOsPath (sshdir </> sshPubKeyFile sshdata))
|
||||
keypair <- case (mprivkey, mpubkey) of
|
||||
(Just privkey, Just pubkey) -> return $ SshKeyPair
|
||||
{ sshPubKey = pubkey
|
||||
|
@ -324,7 +330,7 @@ setSshConfig :: SshData -> [(String, String)] -> IO SshData
|
|||
setSshConfig sshdata config = do
|
||||
sshdir <- sshDir
|
||||
createDirectoryIfMissing True sshdir
|
||||
let configfile = sshdir </> "config"
|
||||
let configfile = fromOsPath (sshdir </> literalOsPath "config")
|
||||
unlessM (catchBoolIO $ isInfixOf mangledhost <$> readFile configfile) $ do
|
||||
appendFile configfile $ unlines $
|
||||
[ ""
|
||||
|
@ -332,7 +338,7 @@ setSshConfig sshdata config = do
|
|||
, "Host " ++ mangledhost
|
||||
] ++ map (\(k, v) -> "\t" ++ k ++ " " ++ v)
|
||||
(settings ++ config)
|
||||
setSshConfigMode (toRawFilePath configfile)
|
||||
setSshConfigMode (toOsPath configfile)
|
||||
|
||||
return $ sshdata
|
||||
{ sshHostName = T.pack mangledhost
|
||||
|
@ -403,7 +409,7 @@ unMangleSshHostName h = case splitc '-' h of
|
|||
knownHost :: Text -> IO Bool
|
||||
knownHost hostname = do
|
||||
sshdir <- sshDir
|
||||
ifM (doesFileExist $ sshdir </> "known_hosts")
|
||||
ifM (doesFileExist $ sshdir </> literalOsPath "known_hosts")
|
||||
( not . null <$> checkhost
|
||||
, return False
|
||||
)
|
||||
|
|
|
@ -67,11 +67,10 @@ commitThread = namedThread "Committer" $ do
|
|||
liftAnnex $ do
|
||||
-- Clean up anything left behind by a previous process
|
||||
-- on unclean shutdown.
|
||||
void $ liftIO $ tryIO $ removeDirectoryRecursive
|
||||
(fromRawFilePath lockdowndir)
|
||||
void $ liftIO $ tryIO $ removeDirectoryRecursive lockdowndir
|
||||
void $ createAnnexDirectory lockdowndir
|
||||
waitChangeTime $ \(changes, time) -> do
|
||||
readychanges <- handleAdds (fromRawFilePath lockdowndir) havelsof largefilematcher annexdotfiles delayadd $
|
||||
readychanges <- handleAdds lockdowndir havelsof largefilematcher annexdotfiles delayadd $
|
||||
simplifyChanges changes
|
||||
if shouldCommit False time (length readychanges) readychanges
|
||||
then do
|
||||
|
@ -276,12 +275,12 @@ commitStaged msg = do
|
|||
- Any pending adds that are not ready yet are put back into the ChangeChan,
|
||||
- where they will be retried later.
|
||||
-}
|
||||
handleAdds :: FilePath -> Bool -> GetFileMatcher -> Bool -> Maybe Seconds -> [Change] -> Assistant [Change]
|
||||
handleAdds :: OsPath -> Bool -> GetFileMatcher -> Bool -> Maybe Seconds -> [Change] -> Assistant [Change]
|
||||
handleAdds lockdowndir havelsof largefilematcher annexdotfiles delayadd cs = returnWhen (null incomplete) $ do
|
||||
let (pending, inprocess) = partition isPendingAddChange incomplete
|
||||
let lockdownconfig = LockDownConfig
|
||||
{ lockingFile = False
|
||||
, hardlinkFileTmpDir = Just (toRawFilePath lockdowndir)
|
||||
, hardlinkFileTmpDir = Just lockdowndir
|
||||
, checkWritePerms = True
|
||||
}
|
||||
(postponed, toadd) <- partitionEithers
|
||||
|
@ -307,12 +306,13 @@ handleAdds lockdowndir havelsof largefilematcher annexdotfiles delayadd cs = ret
|
|||
| otherwise = a
|
||||
|
||||
checkpointerfile change = do
|
||||
let file = toRawFilePath $ changeFile change
|
||||
let file = changeFile change
|
||||
mk <- liftIO $ isPointerFile file
|
||||
case mk of
|
||||
Nothing -> return (Right change)
|
||||
Just key -> do
|
||||
mode <- liftIO $ catchMaybeIO $ fileMode <$> R.getFileStatus file
|
||||
mode <- liftIO $ catchMaybeIO $
|
||||
fileMode <$> R.getFileStatus (fromOsPath file)
|
||||
liftAnnex $ stagePointerFile file mode =<< hashPointerFile key
|
||||
return $ Left $ Change
|
||||
(changeTime change)
|
||||
|
@ -328,7 +328,7 @@ handleAdds lockdowndir havelsof largefilematcher annexdotfiles delayadd cs = ret
|
|||
else checkmatcher
|
||||
| otherwise = checkmatcher
|
||||
where
|
||||
f = toRawFilePath (changeFile change)
|
||||
f = changeFile change
|
||||
checkmatcher = ifM (liftAnnex $ checkFileMatcher NoLiveUpdate largefilematcher f)
|
||||
( return (Left change)
|
||||
, return (Right change)
|
||||
|
@ -336,9 +336,9 @@ handleAdds lockdowndir havelsof largefilematcher annexdotfiles delayadd cs = ret
|
|||
|
||||
addsmall [] = noop
|
||||
addsmall toadd = liftAnnex $ void $ tryIO $
|
||||
forM (map (toRawFilePath . changeFile) toadd) $ \f ->
|
||||
forM (map changeFile toadd) $ \f ->
|
||||
Command.Add.addFile Command.Add.Small f
|
||||
=<< liftIO (R.getSymbolicLinkStatus f)
|
||||
=<< liftIO (R.getSymbolicLinkStatus (fromOsPath f))
|
||||
|
||||
{- Avoid overhead of re-injesting a renamed unlocked file, by
|
||||
- examining the other Changes to see if a removed file has the
|
||||
|
@ -353,13 +353,13 @@ handleAdds lockdowndir havelsof largefilematcher annexdotfiles delayadd cs = ret
|
|||
delta <- liftAnnex getTSDelta
|
||||
let cfg = LockDownConfig
|
||||
{ lockingFile = False
|
||||
, hardlinkFileTmpDir = Just (toRawFilePath lockdowndir)
|
||||
, hardlinkFileTmpDir = Just lockdowndir
|
||||
, checkWritePerms = True
|
||||
}
|
||||
if M.null m
|
||||
then forM toadd (addannexed' cfg)
|
||||
else forM toadd $ \c -> do
|
||||
mcache <- liftIO $ genInodeCache (toRawFilePath (changeFile c)) delta
|
||||
mcache <- liftIO $ genInodeCache (changeFile c) delta
|
||||
case mcache of
|
||||
Nothing -> addannexed' cfg c
|
||||
Just cache ->
|
||||
|
@ -376,19 +376,19 @@ handleAdds lockdowndir havelsof largefilematcher annexdotfiles delayadd cs = ret
|
|||
(mkey, _mcache) <- liftAnnex $ do
|
||||
showStartMessage (StartMessage "add" (ActionItemOther (Just (QuotedPath (keyFilename ks)))) (SeekInput []))
|
||||
ingest nullMeterUpdate (Just $ LockedDown lockdownconfig ks) Nothing
|
||||
maybe (failedingest change) (done change $ fromRawFilePath $ keyFilename ks) mkey
|
||||
maybe (failedingest change) (done change $ keyFilename ks) mkey
|
||||
addannexed' _ _ = return Nothing
|
||||
|
||||
fastadd :: Change -> Key -> Assistant (Maybe Change)
|
||||
fastadd change key = do
|
||||
let source = keySource $ lockedDown change
|
||||
liftAnnex $ finishIngestUnlocked key source
|
||||
done change (fromRawFilePath $ keyFilename source) key
|
||||
done change (keyFilename source) key
|
||||
|
||||
removedKeysMap :: InodeComparisonType -> [Change] -> Annex (M.Map InodeCacheKey Key)
|
||||
removedKeysMap ct l = do
|
||||
mks <- forM (filter isRmChange l) $ \c ->
|
||||
catKeyFile $ toRawFilePath $ changeFile c
|
||||
catKeyFile $ changeFile c
|
||||
M.fromList . concat <$> mapM mkpairs (catMaybes mks)
|
||||
where
|
||||
mkpairs k = map (\c -> (inodeCacheToKey ct c, k)) <$>
|
||||
|
@ -401,8 +401,9 @@ handleAdds lockdowndir havelsof largefilematcher annexdotfiles delayadd cs = ret
|
|||
|
||||
done change file key = liftAnnex $ do
|
||||
logStatus NoLiveUpdate key InfoPresent
|
||||
mode <- liftIO $ catchMaybeIO $ fileMode <$> R.getFileStatus (toRawFilePath file)
|
||||
stagePointerFile (toRawFilePath file) mode =<< hashPointerFile key
|
||||
mode <- liftIO $ catchMaybeIO $
|
||||
fileMode <$> R.getFileStatus (fromOsPath file)
|
||||
stagePointerFile file mode =<< hashPointerFile key
|
||||
showEndOk
|
||||
return $ Just $ finishedChange change key
|
||||
|
||||
|
@ -410,14 +411,14 @@ handleAdds lockdowndir havelsof largefilematcher annexdotfiles delayadd cs = ret
|
|||
- and is still a hard link to its contentLocation,
|
||||
- before ingesting it. -}
|
||||
sanitycheck keysource a = do
|
||||
fs <- liftIO $ R.getSymbolicLinkStatus $ keyFilename keysource
|
||||
ks <- liftIO $ R.getSymbolicLinkStatus $ contentLocation keysource
|
||||
fs <- liftIO $ R.getSymbolicLinkStatus $ fromOsPath $ keyFilename keysource
|
||||
ks <- liftIO $ R.getSymbolicLinkStatus $ fromOsPath $ contentLocation keysource
|
||||
if deviceID ks == deviceID fs && fileID ks == fileID fs
|
||||
then a
|
||||
else do
|
||||
-- remove the hard link
|
||||
when (contentLocation keysource /= keyFilename keysource) $
|
||||
void $ liftIO $ tryIO $ removeFile $ fromRawFilePath $ contentLocation keysource
|
||||
void $ liftIO $ tryIO $ removeFile $ contentLocation keysource
|
||||
return Nothing
|
||||
|
||||
{- Shown an alert while performing an action to add a file or
|
||||
|
@ -430,7 +431,7 @@ handleAdds lockdowndir havelsof largefilematcher annexdotfiles delayadd cs = ret
|
|||
- the add succeeded.
|
||||
-}
|
||||
addaction [] a = a
|
||||
addaction toadd a = alertWhile' (addFileAlert $ map changeFile toadd) $
|
||||
addaction toadd a = alertWhile' (addFileAlert $ map (fromOsPath . changeFile) toadd) $
|
||||
(,)
|
||||
<$> pure True
|
||||
<*> a
|
||||
|
@ -440,7 +441,7 @@ handleAdds lockdowndir havelsof largefilematcher annexdotfiles delayadd cs = ret
|
|||
-
|
||||
- Check by running lsof on the repository.
|
||||
-}
|
||||
safeToAdd :: FilePath -> LockDownConfig -> Bool -> Maybe Seconds -> [Change] -> [Change] -> Assistant [Either Change Change]
|
||||
safeToAdd :: OsPath -> LockDownConfig -> Bool -> Maybe Seconds -> [Change] -> [Change] -> Assistant [Either Change Change]
|
||||
safeToAdd _ _ _ _ [] [] = return []
|
||||
safeToAdd lockdowndir lockdownconfig havelsof delayadd pending inprocess = do
|
||||
maybe noop (liftIO . threadDelaySeconds) delayadd
|
||||
|
@ -451,7 +452,8 @@ safeToAdd lockdowndir lockdownconfig havelsof delayadd pending inprocess = do
|
|||
then S.fromList . map fst3 . filter openwrite <$>
|
||||
findopenfiles (map (keySource . lockedDown) inprocess')
|
||||
else pure S.empty
|
||||
let checked = map (check openfiles) inprocess'
|
||||
let openfiles' = S.map toOsPath openfiles
|
||||
let checked = map (check openfiles') inprocess'
|
||||
|
||||
{- If new events are received when files are closed,
|
||||
- there's no need to retry any changes that cannot
|
||||
|
@ -463,7 +465,7 @@ safeToAdd lockdowndir lockdownconfig havelsof delayadd pending inprocess = do
|
|||
else return checked
|
||||
where
|
||||
check openfiles change@(InProcessAddChange { lockedDown = ld })
|
||||
| S.member (fromRawFilePath (contentLocation (keySource ld))) openfiles = Left change
|
||||
| S.member (contentLocation (keySource ld)) openfiles = Left change
|
||||
check _ change = Right change
|
||||
|
||||
mkinprocess (c, Just ld) = Just InProcessAddChange
|
||||
|
@ -478,7 +480,7 @@ safeToAdd lockdowndir lockdownconfig havelsof delayadd pending inprocess = do
|
|||
<> " still has writers, not adding"
|
||||
-- remove the hard link
|
||||
when (contentLocation ks /= keyFilename ks) $
|
||||
void $ liftIO $ tryIO $ removeFile $ fromRawFilePath $ contentLocation ks
|
||||
void $ liftIO $ tryIO $ removeFile $ contentLocation ks
|
||||
canceladd _ = noop
|
||||
|
||||
openwrite (_file, mode, _pid)
|
||||
|
@ -498,9 +500,9 @@ safeToAdd lockdowndir lockdownconfig havelsof delayadd pending inprocess = do
|
|||
findopenfiles keysources = ifM crippledFileSystem
|
||||
( liftIO $ do
|
||||
let segments = segmentXargsUnordered $
|
||||
map (fromRawFilePath . keyFilename) keysources
|
||||
map (fromOsPath . keyFilename) keysources
|
||||
concat <$> forM segments (\fs -> Lsof.query $ "--" : fs)
|
||||
, liftIO $ Lsof.queryDir lockdowndir
|
||||
, liftIO $ Lsof.queryDir (fromOsPath lockdowndir)
|
||||
)
|
||||
|
||||
{- After a Change is committed, queue any necessary transfers or drops
|
||||
|
@ -521,5 +523,5 @@ checkChangeContent change@(Change { changeInfo = i }) =
|
|||
handleDrops "file renamed" present k af []
|
||||
where
|
||||
f = changeFile change
|
||||
af = AssociatedFile (Just (toRawFilePath f))
|
||||
af = AssociatedFile (Just f)
|
||||
checkChangeContent _ = noop
|
||||
|
|
|
@ -44,7 +44,7 @@ configMonitorThread = namedThread "ConfigMonitor" $ loop =<< getConfigs
|
|||
when (old /= new) $ do
|
||||
let changedconfigs = new `S.difference` old
|
||||
debug $ "reloading config" :
|
||||
map (fromRawFilePath . fst)
|
||||
map (fromOsPath . fst)
|
||||
(S.toList changedconfigs)
|
||||
reloadConfigs new
|
||||
{- Record a commit to get this config
|
||||
|
@ -54,10 +54,10 @@ configMonitorThread = namedThread "ConfigMonitor" $ loop =<< getConfigs
|
|||
loop new
|
||||
|
||||
{- Config files, and their checksums. -}
|
||||
type Configs = S.Set (RawFilePath, Sha)
|
||||
type Configs = S.Set (OsPath, Sha)
|
||||
|
||||
{- All git-annex's config files, and actions to run when they change. -}
|
||||
configFilesActions :: [(RawFilePath, Assistant ())]
|
||||
configFilesActions :: [(OsPath, Assistant ())]
|
||||
configFilesActions =
|
||||
[ (uuidLog, void $ liftAnnex uuidDescMapLoad)
|
||||
, (remoteLog, void $ liftAnnex remotesChanged)
|
||||
|
@ -91,5 +91,5 @@ getConfigs :: Assistant Configs
|
|||
getConfigs = S.fromList . map extract
|
||||
<$> liftAnnex (inRepo $ LsTree.lsTreeFiles (LsTree.LsTreeLong False) Annex.Branch.fullname files)
|
||||
where
|
||||
files = map (fromRawFilePath . fst) configFilesActions
|
||||
files = map (fromOsPath . fst) configFilesActions
|
||||
extract treeitem = (getTopFilePath $ LsTree.file treeitem, LsTree.sha treeitem)
|
||||
|
|
|
@ -181,7 +181,7 @@ runActivity urlrenderer activity nowt = do
|
|||
|
||||
runActivity' :: UrlRenderer -> ScheduledActivity -> Assistant ()
|
||||
runActivity' urlrenderer (ScheduledSelfFsck _ d) = do
|
||||
program <- liftIO programPath
|
||||
program <- fromOsPath <$> liftIO programPath
|
||||
g <- liftAnnex gitRepo
|
||||
fsckresults <- showFscking urlrenderer Nothing $ tryNonAsync $ do
|
||||
void $ batchCommand program (Param "fsck" : annexFsckParams d)
|
||||
|
@ -196,7 +196,7 @@ runActivity' urlrenderer (ScheduledRemoteFsck u s d) = dispatch =<< liftAnnex (r
|
|||
dispatch Nothing = debug ["skipping remote fsck of uuid without a configured remote", fromUUID u, fromSchedule s]
|
||||
dispatch (Just rmt) = void $ case Remote.remoteFsck rmt of
|
||||
Nothing -> go rmt $ do
|
||||
program <- programPath
|
||||
program <- fromOsPath <$> programPath
|
||||
void $ batchCommand program $
|
||||
[ Param "fsck"
|
||||
-- avoid downloading files
|
||||
|
|
|
@ -24,8 +24,7 @@ import qualified Git
|
|||
import qualified Git.Branch
|
||||
import qualified Git.Ref
|
||||
import qualified Command.Sync
|
||||
|
||||
import qualified System.FilePath.ByteString as P
|
||||
import qualified Utility.OsString as OS
|
||||
|
||||
{- This thread watches for changes to .git/refs/, and handles incoming
|
||||
- pushes. -}
|
||||
|
@ -33,7 +32,7 @@ mergeThread :: NamedThread
|
|||
mergeThread = namedThread "Merger" $ do
|
||||
g <- liftAnnex gitRepo
|
||||
let gitd = Git.localGitDir g
|
||||
let dir = gitd P.</> "refs"
|
||||
let dir = gitd </> literalOsPath "refs"
|
||||
liftIO $ createDirectoryUnder [gitd] dir
|
||||
let hook a = Just <$> asIO2 (runHandler a)
|
||||
changehook <- hook onChange
|
||||
|
@ -43,21 +42,21 @@ mergeThread = namedThread "Merger" $ do
|
|||
, modifyHook = changehook
|
||||
, errHook = errhook
|
||||
}
|
||||
void $ liftIO $ watchDir (fromRawFilePath dir) (const False) True hooks id
|
||||
debug ["watching", fromRawFilePath dir]
|
||||
void $ liftIO $ watchDir dir (const False) True hooks id
|
||||
debug ["watching", fromOsPath dir]
|
||||
|
||||
type Handler = FilePath -> Assistant ()
|
||||
type Handler t = t -> Assistant ()
|
||||
|
||||
{- Runs an action handler.
|
||||
-
|
||||
- Exceptions are ignored, otherwise a whole thread could be crashed.
|
||||
-}
|
||||
runHandler :: Handler -> FilePath -> Maybe FileStatus -> Assistant ()
|
||||
runHandler :: Handler t -> t -> Maybe FileStatus -> Assistant ()
|
||||
runHandler handler file _filestatus =
|
||||
either (liftIO . print) (const noop) =<< tryIO <~> handler file
|
||||
|
||||
{- Called when there's an error with inotify. -}
|
||||
onErr :: Handler
|
||||
onErr :: Handler String
|
||||
onErr = giveup
|
||||
|
||||
{- Called when a new branch ref is written, or a branch ref is modified.
|
||||
|
@ -66,9 +65,9 @@ onErr = giveup
|
|||
- ok; it ensures that any changes pushed since the last time the assistant
|
||||
- ran are merged in.
|
||||
-}
|
||||
onChange :: Handler
|
||||
onChange :: Handler OsPath
|
||||
onChange file
|
||||
| ".lock" `isSuffixOf` file = noop
|
||||
| literalOsPath ".lock" `OS.isSuffixOf` file = noop
|
||||
| isAnnexBranch file = do
|
||||
branchChanged
|
||||
diverged <- liftAnnex Annex.Branch.forceUpdate >>= return . \case
|
||||
|
@ -112,7 +111,7 @@ onChange file
|
|||
- to the second branch, which should be merged into it? -}
|
||||
isRelatedTo :: Git.Ref -> Git.Ref -> Bool
|
||||
isRelatedTo x y
|
||||
| basex /= takeDirectory basex ++ "/" ++ basey = False
|
||||
| basex /= fromOsPath (takeDirectory (toOsPath basex)) ++ "/" ++ basey = False
|
||||
| "/synced/" `isInfixOf` Git.fromRef x = True
|
||||
| "refs/remotes/" `isPrefixOf` Git.fromRef x = True
|
||||
| otherwise = False
|
||||
|
@ -120,12 +119,12 @@ isRelatedTo x y
|
|||
basex = Git.fromRef $ Git.Ref.base x
|
||||
basey = Git.fromRef $ Git.Ref.base y
|
||||
|
||||
isAnnexBranch :: FilePath -> Bool
|
||||
isAnnexBranch f = n `isSuffixOf` f
|
||||
isAnnexBranch :: OsPath -> Bool
|
||||
isAnnexBranch f = n `isSuffixOf` fromOsPath f
|
||||
where
|
||||
n = '/' : Git.fromRef Annex.Branch.name
|
||||
|
||||
fileToBranch :: FilePath -> Git.Ref
|
||||
fileToBranch f = Git.Ref $ encodeBS $ "refs" </> base
|
||||
fileToBranch :: OsPath -> Git.Ref
|
||||
fileToBranch f = Git.Ref $ fromOsPath $ literalOsPath "refs" </> toOsPath base
|
||||
where
|
||||
base = Prelude.last $ split "/refs/" f
|
||||
base = Prelude.last $ split "/refs/" (fromOsPath f)
|
||||
|
|
|
@ -138,12 +138,12 @@ pollingThread urlrenderer = go =<< liftIO currentMountPoints
|
|||
|
||||
handleMounts :: UrlRenderer -> MountPoints -> MountPoints -> Assistant ()
|
||||
handleMounts urlrenderer wasmounted nowmounted =
|
||||
mapM_ (handleMount urlrenderer . mnt_dir) $
|
||||
mapM_ (handleMount urlrenderer . toOsPath . mnt_dir) $
|
||||
S.toList $ newMountPoints wasmounted nowmounted
|
||||
|
||||
handleMount :: UrlRenderer -> FilePath -> Assistant ()
|
||||
handleMount :: UrlRenderer -> OsPath -> Assistant ()
|
||||
handleMount urlrenderer dir = do
|
||||
debug ["detected mount of", dir]
|
||||
debug ["detected mount of", fromOsPath dir]
|
||||
rs <- filterM (Git.repoIsLocal <$$> liftAnnex . Remote.getRepo)
|
||||
=<< remotesUnder dir
|
||||
mapM_ (fsckNudge urlrenderer . Just) rs
|
||||
|
@ -157,7 +157,7 @@ handleMount urlrenderer dir = do
|
|||
- at startup time, or may have changed (it could even be a different
|
||||
- repository at the same remote location..)
|
||||
-}
|
||||
remotesUnder :: FilePath -> Assistant [Remote]
|
||||
remotesUnder :: OsPath -> Assistant [Remote]
|
||||
remotesUnder dir = do
|
||||
repotop <- liftAnnex $ fromRepo Git.repoPath
|
||||
rs <- liftAnnex remoteList
|
||||
|
@ -169,7 +169,7 @@ remotesUnder dir = do
|
|||
return $ mapMaybe snd $ filter fst pairs
|
||||
where
|
||||
checkremote repotop r = case Remote.localpath r of
|
||||
Just p | dirContains (toRawFilePath dir) (absPathFrom repotop (toRawFilePath p)) ->
|
||||
Just p | dirContains dir (absPathFrom repotop p) ->
|
||||
(,) <$> pure True <*> updateRemote r
|
||||
_ -> return (False, Just r)
|
||||
|
||||
|
|
|
@ -121,7 +121,7 @@ pairReqReceived False urlrenderer msg = do
|
|||
pairAckReceived :: Bool -> Maybe PairingInProgress -> PairMsg -> [PairingInProgress] -> Assistant [PairingInProgress]
|
||||
pairAckReceived True (Just pip) msg cache = do
|
||||
stopSending pip
|
||||
repodir <- fromRawFilePath . repoPath <$> liftAnnex gitRepo
|
||||
repodir <- repoPath <$> liftAnnex gitRepo
|
||||
liftIO $ setupAuthorizedKeys msg repodir
|
||||
finishedLocalPairing msg (inProgressSshKeyPair pip)
|
||||
startSending pip PairDone $ multicastPairMsg
|
||||
|
|
|
@ -28,7 +28,7 @@ import qualified Data.Set as S
|
|||
|
||||
remoteControlThread :: NamedThread
|
||||
remoteControlThread = namedThread "RemoteControl" $ do
|
||||
program <- liftIO programPath
|
||||
program <- liftIO $ fromOsPath <$> programPath
|
||||
(cmd, params) <- liftIO $ toBatchCommand
|
||||
(program, [Param "remotedaemon", Param "--foreground"])
|
||||
let p = proc cmd (toCommand params)
|
||||
|
|
|
@ -68,7 +68,7 @@ sanityCheckerStartupThread startupdelay = namedThreadUnchecked "SanityCheckerSta
|
|||
ifM (not <$> liftAnnex (inRepo checkIndexFast))
|
||||
( do
|
||||
debug ["corrupt index file found at startup; removing and restaging"]
|
||||
liftAnnex $ inRepo $ removeWhenExistsWith R.removeLink . indexFile
|
||||
liftAnnex $ inRepo $ removeWhenExistsWith removeFile . indexFile
|
||||
{- Normally the startup scan avoids re-staging files,
|
||||
- but with the index deleted, everything needs to be
|
||||
- restaged. -}
|
||||
|
@ -82,7 +82,7 @@ sanityCheckerStartupThread startupdelay = namedThreadUnchecked "SanityCheckerSta
|
|||
- will be automatically regenerated. -}
|
||||
unlessM (liftAnnex $ Annex.Branch.withIndex $ inRepo $ Git.Repair.checkIndexFast) $ do
|
||||
debug ["corrupt annex/index file found at startup; removing"]
|
||||
liftAnnex $ liftIO . removeWhenExistsWith R.removeLink =<< fromRepo gitAnnexIndex
|
||||
liftAnnex $ liftIO . removeWhenExistsWith removeFile =<< fromRepo gitAnnexIndex
|
||||
|
||||
{- Fix up ssh remotes set up by past versions of the assistant. -}
|
||||
liftIO $ fixUpSshRemotes
|
||||
|
@ -154,13 +154,13 @@ dailyCheck urlrenderer = do
|
|||
batchmaker <- liftIO getBatchCommandMaker
|
||||
|
||||
-- Find old unstaged symlinks, and add them to git.
|
||||
(unstaged, cleanup) <- liftIO $ Git.LsFiles.notInRepo [] False ["."] g
|
||||
(unstaged, cleanup) <- liftIO $ Git.LsFiles.notInRepo [] False [literalOsPath "."] g
|
||||
now <- liftIO getPOSIXTime
|
||||
forM_ unstaged $ \file -> do
|
||||
ms <- liftIO $ catchMaybeIO $ R.getSymbolicLinkStatus file
|
||||
ms <- liftIO $ catchMaybeIO $ R.getSymbolicLinkStatus $ fromOsPath file
|
||||
case ms of
|
||||
Just s | toonew (statusChangeTime s) now -> noop
|
||||
| isSymbolicLink s -> addsymlink (fromRawFilePath file) ms
|
||||
| isSymbolicLink s -> addsymlink file ms
|
||||
_ -> noop
|
||||
liftIO $ void cleanup
|
||||
|
||||
|
@ -182,7 +182,7 @@ dailyCheck urlrenderer = do
|
|||
{- Run git-annex unused once per day. This is run as a separate
|
||||
- process to stay out of the annex monad and so it can run as a
|
||||
- batch job. -}
|
||||
program <- liftIO programPath
|
||||
program <- fromOsPath <$> liftIO programPath
|
||||
let (program', params') = batchmaker (program, [Param "unused"])
|
||||
void $ liftIO $ boolSystem program' params'
|
||||
{- Invalidate unused keys cache, and queue transfers of all unused
|
||||
|
@ -202,7 +202,7 @@ dailyCheck urlrenderer = do
|
|||
void $ addAlert $ sanityCheckFixAlert msg
|
||||
addsymlink file s = do
|
||||
Watcher.runHandler Watcher.onAddSymlink file s
|
||||
insanity $ "found unstaged symlink: " ++ file
|
||||
insanity $ "found unstaged symlink: " ++ fromOsPath file
|
||||
|
||||
hourlyCheck :: Assistant ()
|
||||
hourlyCheck = do
|
||||
|
@ -222,14 +222,14 @@ hourlyCheck = do
|
|||
-}
|
||||
checkLogSize :: Int -> Assistant ()
|
||||
checkLogSize n = do
|
||||
f <- liftAnnex $ fromRawFilePath <$> fromRepo gitAnnexDaemonLogFile
|
||||
logs <- liftIO $ listLogs f
|
||||
totalsize <- liftIO $ sum <$> mapM (getFileSize . toRawFilePath) logs
|
||||
f <- liftAnnex $ fromRepo gitAnnexDaemonLogFile
|
||||
logs <- liftIO $ listLogs (fromOsPath f)
|
||||
totalsize <- liftIO $ sum <$> mapM (getFileSize . toOsPath) logs
|
||||
when (totalsize > 2 * oneMegabyte) $ do
|
||||
debug ["Rotated logs due to size:", show totalsize]
|
||||
liftIO $ openLog f >>= handleToFd >>= redirLog
|
||||
liftIO $ openLog (fromOsPath f) >>= handleToFd >>= redirLog
|
||||
when (n < maxLogs + 1) $ do
|
||||
df <- liftIO $ getDiskFree $ takeDirectory f
|
||||
df <- liftIO $ getDiskFree $ fromOsPath $ takeDirectory f
|
||||
case df of
|
||||
Just free
|
||||
| free < fromIntegral totalsize ->
|
||||
|
@ -270,5 +270,5 @@ checkOldUnused urlrenderer = go =<< annexExpireUnused <$> liftAnnex Annex.getGit
|
|||
checkRepoExists :: Assistant ()
|
||||
checkRepoExists = do
|
||||
g <- liftAnnex gitRepo
|
||||
liftIO $ unlessM (doesDirectoryExist $ fromRawFilePath $ Git.repoPath g) $
|
||||
liftIO $ unlessM (doesDirectoryExist $ Git.repoPath g) $
|
||||
terminateSelf
|
||||
|
|
|
@ -38,26 +38,26 @@ transferWatcherThread = namedThread "TransferWatcher" $ do
|
|||
, modifyHook = modifyhook
|
||||
, errHook = errhook
|
||||
}
|
||||
void $ liftIO $ watchDir (fromRawFilePath dir) (const False) True hooks id
|
||||
void $ liftIO $ watchDir dir (const False) True hooks id
|
||||
debug ["watching for transfers"]
|
||||
|
||||
type Handler = FilePath -> Assistant ()
|
||||
type Handler t = t -> Assistant ()
|
||||
|
||||
{- Runs an action handler.
|
||||
-
|
||||
- Exceptions are ignored, otherwise a whole thread could be crashed.
|
||||
-}
|
||||
runHandler :: Handler -> FilePath -> Maybe FileStatus -> Assistant ()
|
||||
runHandler :: Handler t -> t -> Maybe FileStatus -> Assistant ()
|
||||
runHandler handler file _filestatus =
|
||||
either (liftIO . print) (const noop) =<< tryIO <~> handler file
|
||||
|
||||
{- Called when there's an error with inotify. -}
|
||||
onErr :: Handler
|
||||
onErr :: Handler String
|
||||
onErr = giveup
|
||||
|
||||
{- Called when a new transfer information file is written. -}
|
||||
onAdd :: Handler
|
||||
onAdd file = case parseTransferFile (toRawFilePath file) of
|
||||
onAdd :: Handler OsPath
|
||||
onAdd file = case parseTransferFile file of
|
||||
Nothing -> noop
|
||||
Just t -> go t =<< liftAnnex (checkTransfer t)
|
||||
where
|
||||
|
@ -72,10 +72,10 @@ onAdd file = case parseTransferFile (toRawFilePath file) of
|
|||
-
|
||||
- The only thing that should change in the transfer info is the
|
||||
- bytesComplete, so that's the only thing updated in the DaemonStatus. -}
|
||||
onModify :: Handler
|
||||
onModify file = case parseTransferFile (toRawFilePath file) of
|
||||
onModify :: Handler OsPath
|
||||
onModify file = case parseTransferFile file of
|
||||
Nothing -> noop
|
||||
Just t -> go t =<< liftIO (readTransferInfoFile Nothing (toRawFilePath file))
|
||||
Just t -> go t =<< liftIO (readTransferInfoFile Nothing file)
|
||||
where
|
||||
go _ Nothing = noop
|
||||
go t (Just newinfo) = alterTransferInfo t $
|
||||
|
@ -87,8 +87,8 @@ watchesTransferSize :: Bool
|
|||
watchesTransferSize = modifyTracked
|
||||
|
||||
{- Called when a transfer information file is removed. -}
|
||||
onDel :: Handler
|
||||
onDel file = case parseTransferFile (toRawFilePath file) of
|
||||
onDel :: Handler OsPath
|
||||
onDel file = case parseTransferFile file of
|
||||
Nothing -> noop
|
||||
Just t -> do
|
||||
debug [ "transfer finishing:", show t]
|
||||
|
|
|
@ -46,7 +46,7 @@ upgradeWatcherThread urlrenderer = namedThread "UpgradeWatcher" $ do
|
|||
, modifyHook = changed
|
||||
, delDirHook = changed
|
||||
}
|
||||
let dir = fromRawFilePath (parentDir (toRawFilePath flagfile))
|
||||
let dir = parentDir flagfile
|
||||
let depth = length (splitPath dir) + 1
|
||||
let nosubdirs f = length (splitPath f) == depth
|
||||
void $ liftIO $ watchDir dir nosubdirs False hooks (startup mvar)
|
||||
|
@ -57,7 +57,7 @@ upgradeWatcherThread urlrenderer = namedThread "UpgradeWatcher" $ do
|
|||
void $ swapMVar mvar Started
|
||||
return r
|
||||
|
||||
changedFile :: UrlRenderer -> MVar WatcherState -> FilePath -> FilePath -> Maybe FileStatus -> Assistant ()
|
||||
changedFile :: UrlRenderer -> MVar WatcherState -> OsPath -> OsPath -> Maybe FileStatus -> Assistant ()
|
||||
changedFile urlrenderer mvar flagfile file _status
|
||||
| flagfile /= file = noop
|
||||
| otherwise = do
|
||||
|
|
|
@ -42,6 +42,7 @@ import Git.FilePath
|
|||
import Config.GitConfig
|
||||
import Utility.ThreadScheduler
|
||||
import Logs.Location
|
||||
import qualified Utility.OsString as OS
|
||||
import qualified Database.Keys
|
||||
#ifndef mingw32_HOST_OS
|
||||
import qualified Utility.Lsof as Lsof
|
||||
|
@ -94,16 +95,16 @@ runWatcher = do
|
|||
delhook <- hook onDel
|
||||
addsymlinkhook <- hook onAddSymlink
|
||||
deldirhook <- hook onDelDir
|
||||
errhook <- hook onErr
|
||||
errhook <- asIO2 onErr
|
||||
let hooks = mkWatchHooks
|
||||
{ addHook = addhook
|
||||
, delHook = delhook
|
||||
, addSymlinkHook = addsymlinkhook
|
||||
, delDirHook = deldirhook
|
||||
, errHook = errhook
|
||||
, errHook = Just errhook
|
||||
}
|
||||
scanevents <- liftAnnex $ annexStartupScan <$> Annex.getGitConfig
|
||||
h <- liftIO $ watchDir "." ignored scanevents hooks startup
|
||||
h <- liftIO $ watchDir (literalOsPath ".") ignored scanevents hooks startup
|
||||
debug [ "watching", "."]
|
||||
|
||||
{- Let the DirWatcher thread run until signalled to pause it,
|
||||
|
@ -138,9 +139,8 @@ startupScan scanner = do
|
|||
top <- liftAnnex $ fromRepo Git.repoPath
|
||||
(fs, cleanup) <- liftAnnex $ inRepo $ LsFiles.deleted [] [top]
|
||||
forM_ fs $ \f -> do
|
||||
let f' = fromRawFilePath f
|
||||
liftAnnex $ onDel' f'
|
||||
maybe noop recordChange =<< madeChange f' RmChange
|
||||
liftAnnex $ onDel' f
|
||||
maybe noop recordChange =<< madeChange f RmChange
|
||||
void $ liftIO cleanup
|
||||
|
||||
liftAnnex $ showAction "started"
|
||||
|
@ -157,30 +157,31 @@ startupScan scanner = do
|
|||
|
||||
{- Hardcoded ignores, passed to the DirWatcher so it can avoid looking
|
||||
- at the entire .git directory. Does not include .gitignores. -}
|
||||
ignored :: FilePath -> Bool
|
||||
ignored :: OsPath -> Bool
|
||||
ignored = ig . takeFileName
|
||||
where
|
||||
ig ".git" = True
|
||||
ig ".gitignore" = True
|
||||
ig ".gitattributes" = True
|
||||
ig f
|
||||
| f == literalOsPath ".git" = True
|
||||
| f == literalOsPath ".gitignore" = True
|
||||
| f == literalOsPath ".gitattributes" = True
|
||||
#ifdef darwin_HOST_OS
|
||||
ig ".DS_Store" = True
|
||||
| f == literlosPath ".DS_Store" = True
|
||||
#endif
|
||||
ig _ = False
|
||||
| otherwise = False
|
||||
|
||||
unlessIgnored :: FilePath -> Assistant (Maybe Change) -> Assistant (Maybe Change)
|
||||
unlessIgnored file a = ifM (liftAnnex $ checkIgnored (CheckGitIgnore True) (toRawFilePath file))
|
||||
unlessIgnored :: OsPath -> Assistant (Maybe Change) -> Assistant (Maybe Change)
|
||||
unlessIgnored file a = ifM (liftAnnex $ checkIgnored (CheckGitIgnore True) file)
|
||||
( noChange
|
||||
, a
|
||||
)
|
||||
|
||||
type Handler = FilePath -> Maybe FileStatus -> Assistant (Maybe Change)
|
||||
type Handler = OsPath -> Maybe FileStatus -> Assistant (Maybe Change)
|
||||
|
||||
{- Runs an action handler, and if there was a change, adds it to the ChangeChan.
|
||||
-
|
||||
- Exceptions are ignored, otherwise a whole watcher thread could be crashed.
|
||||
-}
|
||||
runHandler :: Handler -> FilePath -> Maybe FileStatus -> Assistant ()
|
||||
runHandler :: Handler -> OsPath -> Maybe FileStatus -> Assistant ()
|
||||
runHandler handler file filestatus = void $ do
|
||||
r <- tryIO <~> handler (normalize file) filestatus
|
||||
case r of
|
||||
|
@ -189,7 +190,7 @@ runHandler handler file filestatus = void $ do
|
|||
Right (Just change) -> recordChange change
|
||||
where
|
||||
normalize f
|
||||
| "./" `isPrefixOf` file = drop 2 f
|
||||
| literalOsPath "./" `OS.isPrefixOf` file = OS.drop 2 f
|
||||
| otherwise = f
|
||||
|
||||
shouldRestage :: DaemonStatus -> Bool
|
||||
|
@ -201,34 +202,34 @@ onAddFile symlinkssupported f fs =
|
|||
where
|
||||
addassociatedfile key file =
|
||||
Database.Keys.addAssociatedFile key
|
||||
=<< inRepo (toTopFilePath (toRawFilePath file))
|
||||
=<< inRepo (toTopFilePath file)
|
||||
samefilestatus key file status = do
|
||||
cache <- Database.Keys.getInodeCaches key
|
||||
curr <- withTSDelta $ \delta ->
|
||||
liftIO $ toInodeCache delta (toRawFilePath file) status
|
||||
liftIO $ toInodeCache delta file status
|
||||
case (cache, curr) of
|
||||
(_, Just c) -> elemInodeCaches c cache
|
||||
([], Nothing) -> return True
|
||||
_ -> return False
|
||||
contentchanged oldkey file = do
|
||||
Database.Keys.removeAssociatedFile oldkey
|
||||
=<< inRepo (toTopFilePath (toRawFilePath file))
|
||||
=<< inRepo (toTopFilePath file)
|
||||
unlessM (inAnnex oldkey) $
|
||||
logStatus NoLiveUpdate oldkey InfoMissing
|
||||
addlink file key = do
|
||||
mode <- liftIO $ catchMaybeIO $ fileMode <$> R.getFileStatus (toRawFilePath file)
|
||||
liftAnnex $ stagePointerFile (toRawFilePath file) mode =<< hashPointerFile key
|
||||
mode <- liftIO $ catchMaybeIO $ fileMode <$> R.getFileStatus (fromOsPath file)
|
||||
liftAnnex $ stagePointerFile file mode =<< hashPointerFile key
|
||||
madeChange file $ LinkChange (Just key)
|
||||
|
||||
onAddFile'
|
||||
:: (Key -> FilePath -> Annex ())
|
||||
-> (Key -> FilePath -> Annex ())
|
||||
-> (FilePath -> Key -> Assistant (Maybe Change))
|
||||
-> (Key -> FilePath -> FileStatus -> Annex Bool)
|
||||
:: (Key -> OsPath -> Annex ())
|
||||
-> (Key -> OsPath -> Annex ())
|
||||
-> (OsPath -> Key -> Assistant (Maybe Change))
|
||||
-> (Key -> OsPath -> FileStatus -> Annex Bool)
|
||||
-> Bool
|
||||
-> Handler
|
||||
onAddFile' contentchanged addassociatedfile addlink samefilestatus symlinkssupported file fs = do
|
||||
v <- liftAnnex $ catKeyFile (toRawFilePath file)
|
||||
v <- liftAnnex $ catKeyFile file
|
||||
case (v, fs) of
|
||||
(Just key, Just filestatus) ->
|
||||
ifM (liftAnnex $ samefilestatus key file filestatus)
|
||||
|
@ -242,13 +243,13 @@ onAddFile' contentchanged addassociatedfile addlink samefilestatus symlinkssuppo
|
|||
, noChange
|
||||
)
|
||||
, guardSymlinkStandin (Just key) $ do
|
||||
debug ["changed", file]
|
||||
debug ["changed", fromOsPath file]
|
||||
liftAnnex $ contentchanged key file
|
||||
pendingAddChange file
|
||||
)
|
||||
_ -> unlessIgnored file $
|
||||
guardSymlinkStandin Nothing $ do
|
||||
debug ["add", file]
|
||||
debug ["add", fromOsPath file]
|
||||
pendingAddChange file
|
||||
where
|
||||
{- On a filesystem without symlinks, we'll get changes for regular
|
||||
|
@ -258,8 +259,7 @@ onAddFile' contentchanged addassociatedfile addlink samefilestatus symlinkssuppo
|
|||
guardSymlinkStandin mk a
|
||||
| symlinkssupported = a
|
||||
| otherwise = do
|
||||
linktarget <- liftAnnex $ getAnnexLinkTarget $
|
||||
toRawFilePath file
|
||||
linktarget <- liftAnnex $ getAnnexLinkTarget file
|
||||
case linktarget of
|
||||
Nothing -> a
|
||||
Just lt -> do
|
||||
|
@ -275,21 +275,20 @@ onAddFile' contentchanged addassociatedfile addlink samefilestatus symlinkssuppo
|
|||
-}
|
||||
onAddSymlink :: Handler
|
||||
onAddSymlink file filestatus = unlessIgnored file $ do
|
||||
linktarget <- liftIO (catchMaybeIO $ R.readSymbolicLink file')
|
||||
kv <- liftAnnex (lookupKey file')
|
||||
linktarget <- liftIO $ catchMaybeIO $
|
||||
R.readSymbolicLink (fromOsPath file)
|
||||
kv <- liftAnnex (lookupKey file)
|
||||
onAddSymlink' linktarget kv file filestatus
|
||||
where
|
||||
file' = toRawFilePath file
|
||||
|
||||
onAddSymlink' :: Maybe LinkTarget -> Maybe Key -> Handler
|
||||
onAddSymlink' linktarget mk file filestatus = go mk
|
||||
where
|
||||
go (Just key) = do
|
||||
link <- liftAnnex $ calcRepo $ gitAnnexLink (toRawFilePath file) key
|
||||
link <- liftAnnex $ fromOsPath <$> calcRepo (gitAnnexLink file key)
|
||||
if linktarget == Just link
|
||||
then ensurestaged (Just link) =<< getDaemonStatus
|
||||
else do
|
||||
liftAnnex $ replaceWorkTreeFile (toRawFilePath file) $
|
||||
liftAnnex $ replaceWorkTreeFile file $
|
||||
makeAnnexLink link
|
||||
addLink file link (Just key)
|
||||
-- other symlink, not git-annex
|
||||
|
@ -315,33 +314,32 @@ onAddSymlink' linktarget mk file filestatus = go mk
|
|||
ensurestaged Nothing _ = noChange
|
||||
|
||||
{- For speed, tries to reuse the existing blob for symlink target. -}
|
||||
addLink :: FilePath -> LinkTarget -> Maybe Key -> Assistant (Maybe Change)
|
||||
addLink :: OsPath -> LinkTarget -> Maybe Key -> Assistant (Maybe Change)
|
||||
addLink file link mk = do
|
||||
debug ["add symlink", file]
|
||||
debug ["add symlink", fromOsPath file]
|
||||
liftAnnex $ do
|
||||
v <- catObjectDetails $ Ref $ encodeBS $ ':':file
|
||||
v <- catObjectDetails $ Ref $ encodeBS $ ':':fromOsPath file
|
||||
case v of
|
||||
Just (currlink, sha, _type)
|
||||
| L.fromStrict link == currlink ->
|
||||
stageSymlink (toRawFilePath file) sha
|
||||
_ -> stageSymlink (toRawFilePath file)
|
||||
=<< hashSymlink link
|
||||
stageSymlink file sha
|
||||
_ -> stageSymlink file =<< hashSymlink link
|
||||
madeChange file $ LinkChange mk
|
||||
|
||||
onDel :: Handler
|
||||
onDel file _ = do
|
||||
debug ["file deleted", file]
|
||||
debug ["file deleted", fromOsPath file]
|
||||
liftAnnex $ onDel' file
|
||||
madeChange file RmChange
|
||||
|
||||
onDel' :: FilePath -> Annex ()
|
||||
onDel' :: OsPath -> Annex ()
|
||||
onDel' file = do
|
||||
topfile <- inRepo (toTopFilePath (toRawFilePath file))
|
||||
topfile <- inRepo (toTopFilePath file)
|
||||
withkey $ flip Database.Keys.removeAssociatedFile topfile
|
||||
Annex.Queue.addUpdateIndex =<<
|
||||
inRepo (Git.UpdateIndex.unstageFile (toRawFilePath file))
|
||||
inRepo (Git.UpdateIndex.unstageFile file)
|
||||
where
|
||||
withkey a = maybe noop a =<< catKeyFile (toRawFilePath file)
|
||||
withkey a = maybe noop a =<< catKeyFile file
|
||||
|
||||
{- A directory has been deleted, or moved, so tell git to remove anything
|
||||
- that was inside it from its cache. Since it could reappear at any time,
|
||||
|
@ -351,23 +349,21 @@ onDel' file = do
|
|||
- pairing up renamed files when the directory was renamed. -}
|
||||
onDelDir :: Handler
|
||||
onDelDir dir _ = do
|
||||
debug ["directory deleted", dir]
|
||||
(fs, clean) <- liftAnnex $ inRepo $ LsFiles.deleted [] [toRawFilePath dir]
|
||||
let fs' = map fromRawFilePath fs
|
||||
debug ["directory deleted", fromOsPath dir]
|
||||
(fs, clean) <- liftAnnex $ inRepo $ LsFiles.deleted [] [dir]
|
||||
|
||||
liftAnnex $ mapM_ onDel' fs'
|
||||
liftAnnex $ mapM_ onDel' fs
|
||||
|
||||
-- Get the events queued up as fast as possible, so the
|
||||
-- committer sees them all in one block.
|
||||
now <- liftIO getCurrentTime
|
||||
recordChanges $ map (\f -> Change now f RmChange) fs'
|
||||
recordChanges $ map (\f -> Change now f RmChange) fs
|
||||
|
||||
void $ liftIO clean
|
||||
noChange
|
||||
|
||||
{- Called when there's an error with inotify or kqueue. -}
|
||||
onErr :: Handler
|
||||
onErr :: String -> Maybe FileStatus -> Assistant ()
|
||||
onErr msg _ = do
|
||||
liftAnnex $ warning (UnquotedString msg)
|
||||
void $ addAlert $ warningAlert "watcher" msg
|
||||
noChange
|
||||
|
|
|
@ -62,7 +62,7 @@ webAppThread
|
|||
-> Maybe (IO Url)
|
||||
-> Maybe HostName
|
||||
-> Maybe PortNumber
|
||||
-> Maybe (Url -> FilePath -> IO ())
|
||||
-> Maybe (Url -> OsPath -> IO ())
|
||||
-> NamedThread
|
||||
webAppThread assistantdata urlrenderer noannex cannotrun postfirstrun listenhost listenport onstartup = thread $ liftIO $ do
|
||||
listenhost' <- if isJust listenhost
|
||||
|
@ -89,15 +89,13 @@ webAppThread assistantdata urlrenderer noannex cannotrun postfirstrun listenhost
|
|||
, return app
|
||||
)
|
||||
runWebApp tlssettings listenhost' listenport' app' $ \addr -> if noannex
|
||||
then withTmpFile (toOsPath "webapp.html") $ \tmpfile h -> do
|
||||
then withTmpFile (literalOsPath "webapp.html") $ \tmpfile h -> do
|
||||
hClose h
|
||||
go tlssettings addr webapp (fromRawFilePath (fromOsPath tmpfile)) Nothing
|
||||
go tlssettings addr webapp tmpfile Nothing
|
||||
else do
|
||||
htmlshim <- getAnnex' $ fromRepo gitAnnexHtmlShim
|
||||
urlfile <- getAnnex' $ fromRepo gitAnnexUrlFile
|
||||
go tlssettings addr webapp
|
||||
(fromRawFilePath htmlshim)
|
||||
(Just urlfile)
|
||||
go tlssettings addr webapp htmlshim (Just urlfile)
|
||||
where
|
||||
-- The webapp thread does not wait for the startupSanityCheckThread
|
||||
-- to finish, so that the user interface remains responsive while
|
||||
|
@ -105,8 +103,8 @@ webAppThread assistantdata urlrenderer noannex cannotrun postfirstrun listenhost
|
|||
thread = namedThreadUnchecked "WebApp"
|
||||
getreldir
|
||||
| noannex = return Nothing
|
||||
| otherwise = Just <$>
|
||||
(relHome . fromRawFilePath =<< absPath =<< getAnnex' (fromRepo repoPath))
|
||||
| otherwise = Just . fromOsPath <$>
|
||||
(relHome =<< absPath =<< getAnnex' (fromRepo repoPath))
|
||||
go tlssettings addr webapp htmlshim urlfile = do
|
||||
let url = myUrl tlssettings webapp addr
|
||||
maybe noop (`writeFileProtected` url) urlfile
|
||||
|
@ -131,6 +129,8 @@ getTlsSettings = do
|
|||
cert <- fromRepo gitAnnexWebCertificate
|
||||
privkey <- fromRepo gitAnnexWebPrivKey
|
||||
ifM (liftIO $ allM doesFileExist [cert, privkey])
|
||||
( return $ Just $ TLS.tlsSettings cert privkey
|
||||
( return $ Just $ TLS.tlsSettings
|
||||
(fromOsPath cert)
|
||||
(fromOsPath privkey)
|
||||
, return Nothing
|
||||
)
|
||||
|
|
|
@ -174,7 +174,7 @@ genTransfer t info = case transferRemote info of
|
|||
AssociatedFile Nothing -> noop
|
||||
AssociatedFile (Just af) -> void $
|
||||
addAlert $ makeAlertFiller True $
|
||||
transferFileAlert direction True (fromRawFilePath af)
|
||||
transferFileAlert direction True (fromOsPath af)
|
||||
unless isdownload $
|
||||
handleDrops
|
||||
("object uploaded to " ++ show remote)
|
||||
|
|
|
@ -9,10 +9,10 @@
|
|||
|
||||
module Assistant.Types.Changes where
|
||||
|
||||
import Common
|
||||
import Types.KeySource
|
||||
import Types.Key
|
||||
import Utility.TList
|
||||
import Utility.FileSystemEncoding
|
||||
import Annex.Ingest
|
||||
|
||||
import Control.Concurrent.STM
|
||||
|
@ -34,12 +34,12 @@ newChangePool = atomically newTList
|
|||
data Change
|
||||
= Change
|
||||
{ changeTime :: UTCTime
|
||||
, _changeFile :: FilePath
|
||||
, _changeFile :: OsPath
|
||||
, changeInfo :: ChangeInfo
|
||||
}
|
||||
| PendingAddChange
|
||||
{ changeTime ::UTCTime
|
||||
, _changeFile :: FilePath
|
||||
, _changeFile :: OsPath
|
||||
}
|
||||
| InProcessAddChange
|
||||
{ changeTime ::UTCTime
|
||||
|
@ -55,10 +55,10 @@ changeInfoKey (AddKeyChange k) = Just k
|
|||
changeInfoKey (LinkChange (Just k)) = Just k
|
||||
changeInfoKey _ = Nothing
|
||||
|
||||
changeFile :: Change -> FilePath
|
||||
changeFile :: Change -> OsPath
|
||||
changeFile (Change _ f _) = f
|
||||
changeFile (PendingAddChange _ f) = f
|
||||
changeFile (InProcessAddChange _ ld) = fromRawFilePath $ keyFilename $ keySource ld
|
||||
changeFile (InProcessAddChange _ ld) = keyFilename $ keySource ld
|
||||
|
||||
isPendingAddChange :: Change -> Bool
|
||||
isPendingAddChange (PendingAddChange {}) = True
|
||||
|
|
|
@ -34,7 +34,7 @@ describeUnusedWhenBig = describeUnused' True
|
|||
- than the remaining free disk space, or more than 1/10th the total
|
||||
- disk space being unused keys all suggest a problem. -}
|
||||
describeUnused' :: Bool -> Assistant (Maybe TenseText)
|
||||
describeUnused' whenbig = liftAnnex $ go =<< readUnusedLog ""
|
||||
describeUnused' whenbig = liftAnnex $ go =<< readUnusedLog (literalOsPath "")
|
||||
where
|
||||
go m = do
|
||||
let num = M.size m
|
||||
|
@ -64,13 +64,13 @@ describeUnused' whenbig = liftAnnex $ go =<< readUnusedLog ""
|
|||
|
||||
sumkeysize s k = s + fromMaybe 0 (fromKey keySize k)
|
||||
|
||||
forpath a = inRepo $ liftIO . a . fromRawFilePath . Git.repoPath
|
||||
forpath a = inRepo $ liftIO . a . fromOsPath . Git.repoPath
|
||||
|
||||
{- With a duration, expires all unused files that are older.
|
||||
- With Nothing, expires *all* unused files. -}
|
||||
expireUnused :: Maybe Duration -> Assistant ()
|
||||
expireUnused duration = do
|
||||
m <- liftAnnex $ readUnusedLog ""
|
||||
m <- liftAnnex $ readUnusedLog (literalOsPath "")
|
||||
now <- liftIO getPOSIXTime
|
||||
let oldkeys = M.keys $ M.filter (tooold now) m
|
||||
forM_ oldkeys $ \k -> do
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
{-# LANGUAGE CPP #-}
|
||||
|
||||
module Assistant.Upgrade where
|
||||
|
@ -42,10 +43,10 @@ import qualified Annex.Url as Url hiding (download)
|
|||
import Utility.Tuple
|
||||
import qualified Utility.RawFilePath as R
|
||||
import qualified Utility.FileIO as F
|
||||
import qualified Utility.OsString as OS
|
||||
|
||||
import Data.Either
|
||||
import qualified Data.Map as M
|
||||
import qualified System.FilePath.ByteString as P
|
||||
|
||||
{- Upgrade without interaction in the webapp. -}
|
||||
unattendedUpgrade :: Assistant ()
|
||||
|
@ -89,12 +90,12 @@ startDistributionDownload d = go =<< liftIO . newVersionLocation d =<< liftIO ol
|
|||
hook <- asIO1 $ distributionDownloadComplete d dest cleanup
|
||||
modifyDaemonStatus_ $ \s -> s
|
||||
{ transferHook = M.insert k hook (transferHook s) }
|
||||
maybe noop (queueTransfer "upgrade" Next (AssociatedFile (Just (toRawFilePath f))) t)
|
||||
maybe noop (queueTransfer "upgrade" Next (AssociatedFile (Just f)) t)
|
||||
=<< liftAnnex (remoteFromUUID webUUID)
|
||||
startTransfer t
|
||||
k = mkKey $ const $ distributionKey d
|
||||
u = distributionUrl d
|
||||
f = takeFileName u ++ " (for upgrade)"
|
||||
f = takeFileName (toOsPath u) <> literalOsPath " (for upgrade)"
|
||||
t = Transfer
|
||||
{ transferDirection = Download
|
||||
, transferUUID = webUUID
|
||||
|
@ -110,7 +111,7 @@ startDistributionDownload d = go =<< liftIO . newVersionLocation d =<< liftIO ol
|
|||
-
|
||||
- Verifies the content of the downloaded key.
|
||||
-}
|
||||
distributionDownloadComplete :: GitAnnexDistribution -> FilePath -> Assistant () -> Transfer -> Assistant ()
|
||||
distributionDownloadComplete :: GitAnnexDistribution -> OsPath -> Assistant () -> Transfer -> Assistant ()
|
||||
distributionDownloadComplete d dest cleanup t
|
||||
| transferDirection t == Download = do
|
||||
debug ["finished downloading git-annex distribution"]
|
||||
|
@ -120,11 +121,11 @@ distributionDownloadComplete d dest cleanup t
|
|||
where
|
||||
k = mkKey $ const $ distributionKey d
|
||||
fsckit f = Backend.maybeLookupBackendVariety (fromKey keyVariety k) >>= \case
|
||||
Nothing -> return $ Just (fromRawFilePath f)
|
||||
Nothing -> return $ Just f
|
||||
Just b -> case Types.Backend.verifyKeyContent b of
|
||||
Nothing -> return $ Just (fromRawFilePath f)
|
||||
Nothing -> return $ Just f
|
||||
Just verifier -> ifM (verifier k f)
|
||||
( return $ Just (fromRawFilePath f)
|
||||
( return $ Just f
|
||||
, return Nothing
|
||||
)
|
||||
go f = do
|
||||
|
@ -142,7 +143,7 @@ distributionDownloadComplete d dest cleanup t
|
|||
- and unpack the new distribution next to it (in a versioned directory).
|
||||
- Then update the programFile to point to the new version.
|
||||
-}
|
||||
upgradeToDistribution :: FilePath -> Assistant () -> FilePath -> Assistant ()
|
||||
upgradeToDistribution :: OsPath -> Assistant () -> OsPath -> Assistant ()
|
||||
upgradeToDistribution newdir cleanup distributionfile = do
|
||||
liftIO $ createDirectoryIfMissing True newdir
|
||||
(program, deleteold) <- unpack
|
||||
|
@ -156,92 +157,92 @@ upgradeToDistribution newdir cleanup distributionfile = do
|
|||
postUpgrade url
|
||||
where
|
||||
changeprogram program = liftIO $ do
|
||||
unlessM (boolSystem program [Param "version"]) $
|
||||
unlessM (boolSystem (fromOsPath program) [Param "version"]) $
|
||||
giveup "New git-annex program failed to run! Not using."
|
||||
pf <- programFile
|
||||
liftIO $ writeFile pf program
|
||||
liftIO $ writeFile (fromOsPath pf) (fromOsPath program)
|
||||
|
||||
#ifdef darwin_HOST_OS
|
||||
{- OS X uses a dmg, so mount it, and copy the contents into place. -}
|
||||
unpack = liftIO $ do
|
||||
olddir <- oldVersionLocation
|
||||
withTmpDirIn (fromRawFilePath (parentDir (toRawFilePath newdir))) "git-annex.upgrade" $ \tmpdir -> do
|
||||
withTmpDirIn (parentDir newdir) (literalOsPath "git-annex.upgrade") $ \tmpdir -> do
|
||||
void $ boolSystem "hdiutil"
|
||||
[ Param "attach", File distributionfile
|
||||
, Param "-mountpoint", File tmpdir
|
||||
, Param "-mountpoint", File (fromOsPath tmpdir)
|
||||
]
|
||||
void $ boolSystem "cp"
|
||||
[ Param "-R"
|
||||
, File $ tmpdir </> installBase </> "Contents"
|
||||
, File $ fromOsPath $ tmpdir </> toOsPath installBase </> literalOsPath "Contents"
|
||||
, File $ newdir
|
||||
]
|
||||
void $ boolSystem "hdiutil"
|
||||
[ Param "eject"
|
||||
, File tmpdir
|
||||
, File (fromOsPath tmpdir)
|
||||
]
|
||||
sanitycheck newdir
|
||||
let deleteold = do
|
||||
deleteFromManifest $ olddir </> "Contents" </> "MacOS"
|
||||
deleteFromManifest $ toOsPath olddir </> literalOsPath "Contents" </> literalOsPath "MacOS"
|
||||
makeorigsymlink olddir
|
||||
return (newdir </> "Contents" </> "MacOS" </> "git-annex", deleteold)
|
||||
return (newdir </> literalOsPath "Contents" </> literalOsPath "MacOS" </> literalOsPath "git-annex", deleteold)
|
||||
#else
|
||||
{- Linux uses a tarball (so could other POSIX systems), so
|
||||
- untar it (into a temp directory) and move the directory
|
||||
- into place. -}
|
||||
unpack = liftIO $ do
|
||||
olddir <- oldVersionLocation
|
||||
withTmpDirIn (fromRawFilePath $ parentDir $ toRawFilePath newdir) (toOsPath $ toRawFilePath "git-annex.upgrade") $ \tmpdir -> do
|
||||
let tarball = tmpdir </> "tar"
|
||||
withTmpDirIn (parentDir newdir) (literalOsPath "git-annex.upgrade") $ \tmpdir -> do
|
||||
let tarball = tmpdir </> literalOsPath "tar"
|
||||
-- Cannot rely on filename extension, and this also
|
||||
-- avoids problems if tar doesn't support transparent
|
||||
-- decompression.
|
||||
void $ boolSystem "sh"
|
||||
[ Param "-c"
|
||||
, Param $ "zcat < " ++ shellEscape distributionfile ++
|
||||
" > " ++ shellEscape tarball
|
||||
, Param $ "zcat < " ++ shellEscape (fromOsPath distributionfile) ++
|
||||
" > " ++ shellEscape (fromOsPath tarball)
|
||||
]
|
||||
tarok <- boolSystem "tar"
|
||||
[ Param "xf"
|
||||
, Param tarball
|
||||
, Param "--directory", File tmpdir
|
||||
, Param (fromOsPath tarball)
|
||||
, Param "--directory", File (fromOsPath tmpdir)
|
||||
]
|
||||
unless tarok $
|
||||
giveup $ "failed to untar " ++ distributionfile
|
||||
sanitycheck $ tmpdir </> installBase
|
||||
installby R.rename newdir (tmpdir </> installBase)
|
||||
giveup $ "failed to untar " ++ fromOsPath distributionfile
|
||||
sanitycheck $ tmpdir </> toOsPath installBase
|
||||
installby R.rename newdir (tmpdir </> toOsPath installBase)
|
||||
let deleteold = do
|
||||
deleteFromManifest olddir
|
||||
makeorigsymlink olddir
|
||||
return (newdir </> "git-annex", deleteold)
|
||||
return (newdir </> literalOsPath "git-annex", deleteold)
|
||||
installby a dstdir srcdir =
|
||||
mapM_ (\x -> a x (toRawFilePath dstdir P.</> P.takeFileName x))
|
||||
=<< dirContents (toRawFilePath srcdir)
|
||||
mapM_ (\x -> a (fromOsPath x) (fromOsPath (dstdir </> takeFileName x)))
|
||||
=<< dirContents srcdir
|
||||
#endif
|
||||
sanitycheck dir =
|
||||
unlessM (doesDirectoryExist dir) $
|
||||
giveup $ "did not find " ++ dir ++ " in " ++ distributionfile
|
||||
giveup $ "did not find " ++ fromOsPath dir ++ " in " ++ fromOsPath distributionfile
|
||||
makeorigsymlink olddir = do
|
||||
let origdir = fromRawFilePath (parentDir (toRawFilePath olddir)) </> installBase
|
||||
removeWhenExistsWith R.removeLink (toRawFilePath origdir)
|
||||
R.createSymbolicLink (toRawFilePath newdir) (toRawFilePath origdir)
|
||||
let origdir = parentDir olddir </> toOsPath installBase
|
||||
removeWhenExistsWith removeFile origdir
|
||||
R.createSymbolicLink (fromOsPath newdir) (fromOsPath origdir)
|
||||
|
||||
{- Finds where the old version was installed. -}
|
||||
oldVersionLocation :: IO FilePath
|
||||
oldVersionLocation :: IO OsPath
|
||||
oldVersionLocation = readProgramFile >>= \case
|
||||
Nothing -> giveup "Cannot find old distribution bundle; not upgrading."
|
||||
Just pf -> do
|
||||
let pdir = fromRawFilePath $ parentDir $ toRawFilePath pf
|
||||
let pdir = parentDir pf
|
||||
#ifdef darwin_HOST_OS
|
||||
let dirs = splitDirectories pdir
|
||||
{- It will probably be deep inside a git-annex.app directory. -}
|
||||
let olddir = case findIndex ("git-annex.app" `isPrefixOf`) dirs of
|
||||
let olddir = case findIndex (literalOsPath "git-annex.app" `OS.isPrefixOf`) dirs of
|
||||
Nothing -> pdir
|
||||
Just i -> joinPath (take (i + 1) dirs)
|
||||
#else
|
||||
let olddir = pdir
|
||||
#endif
|
||||
when (null olddir) $
|
||||
giveup $ "Cannot find old distribution bundle; not upgrading. (Looked in " ++ pdir ++ ")"
|
||||
when (OS.null olddir) $
|
||||
giveup $ "Cannot find old distribution bundle; not upgrading. (Looked in " ++ fromOsPath pdir ++ ")"
|
||||
return olddir
|
||||
|
||||
{- Finds a place to install the new version.
|
||||
|
@ -251,15 +252,15 @@ oldVersionLocation = readProgramFile >>= \case
|
|||
-
|
||||
- The directory is created. If it already exists, returns Nothing.
|
||||
-}
|
||||
newVersionLocation :: GitAnnexDistribution -> FilePath -> IO (Maybe FilePath)
|
||||
newVersionLocation :: GitAnnexDistribution -> OsPath -> IO (Maybe OsPath)
|
||||
newVersionLocation d olddir =
|
||||
trymkdir newloc $ do
|
||||
home <- myHomeDir
|
||||
trymkdir (home </> s) $
|
||||
trymkdir (toOsPath home </> s) $
|
||||
return Nothing
|
||||
where
|
||||
s = installBase ++ "." ++ distributionVersion d
|
||||
topdir = fromRawFilePath $ parentDir $ toRawFilePath olddir
|
||||
s = toOsPath $ installBase ++ "." ++ distributionVersion d
|
||||
topdir = parentDir olddir
|
||||
newloc = topdir </> s
|
||||
trymkdir dir fallback =
|
||||
(createDirectory dir >> return (Just dir))
|
||||
|
@ -277,24 +278,25 @@ installBase = "git-annex." ++
|
|||
#endif
|
||||
#endif
|
||||
|
||||
deleteFromManifest :: FilePath -> IO ()
|
||||
deleteFromManifest :: OsPath -> IO ()
|
||||
deleteFromManifest dir = do
|
||||
fs <- map (dir </>) . lines <$> catchDefaultIO "" (readFile manifest)
|
||||
mapM_ (removeWhenExistsWith R.removeLink . toRawFilePath) fs
|
||||
removeWhenExistsWith R.removeLink (toRawFilePath manifest)
|
||||
removeEmptyRecursive (toRawFilePath dir)
|
||||
fs <- map (\f -> dir </> toOsPath f) . lines
|
||||
<$> catchDefaultIO "" (readFile (fromOsPath manifest))
|
||||
mapM_ (removeWhenExistsWith removeFile) fs
|
||||
removeWhenExistsWith removeFile manifest
|
||||
removeEmptyRecursive dir
|
||||
where
|
||||
manifest = dir </> "git-annex.MANIFEST"
|
||||
manifest = dir </> literalOsPath "git-annex.MANIFEST"
|
||||
|
||||
removeEmptyRecursive :: RawFilePath -> IO ()
|
||||
removeEmptyRecursive :: OsPath -> IO ()
|
||||
removeEmptyRecursive dir = do
|
||||
mapM_ removeEmptyRecursive =<< dirContents dir
|
||||
void $ tryIO $ removeDirectory (fromRawFilePath dir)
|
||||
void $ tryIO $ removeDirectory dir
|
||||
|
||||
{- This is a file that the UpgradeWatcher can watch for modifications to
|
||||
- detect when git-annex has been upgraded.
|
||||
-}
|
||||
upgradeFlagFile :: IO FilePath
|
||||
upgradeFlagFile :: IO OsPath
|
||||
upgradeFlagFile = programPath
|
||||
|
||||
{- Sanity check to see if an upgrade is complete and the program is ready
|
||||
|
@ -309,13 +311,13 @@ upgradeSanityCheck = ifM usingDistribution
|
|||
program <- programPath
|
||||
untilM (doesFileExist program <&&> nowriter program) $
|
||||
threadDelaySeconds (Seconds 60)
|
||||
boolSystem program [Param "version"]
|
||||
boolSystem (fromOsPath program) [Param "version"]
|
||||
)
|
||||
where
|
||||
nowriter f = null
|
||||
. filter (`elem` [Lsof.OpenReadWrite, Lsof.OpenWriteOnly])
|
||||
. map snd3
|
||||
<$> Lsof.query [f]
|
||||
<$> Lsof.query [fromOsPath f]
|
||||
|
||||
usingDistribution :: IO Bool
|
||||
usingDistribution = isJust <$> getEnv "GIT_ANNEX_STANDLONE_ENV"
|
||||
|
@ -324,14 +326,14 @@ downloadDistributionInfo :: Assistant (Maybe GitAnnexDistribution)
|
|||
downloadDistributionInfo = do
|
||||
uo <- liftAnnex Url.getUrlOptions
|
||||
gpgcmd <- liftAnnex $ gpgCmd <$> Annex.getGitConfig
|
||||
liftIO $ withTmpDir (toOsPath (toRawFilePath "git-annex.tmp")) $ \tmpdir -> do
|
||||
let infof = tmpdir </> "info"
|
||||
let sigf = infof ++ ".sig"
|
||||
liftIO $ withTmpDir (literalOsPath "git-annex.tmp") $ \tmpdir -> do
|
||||
let infof = tmpdir </> literalOsPath "info"
|
||||
let sigf = infof <> literalOsPath ".sig"
|
||||
ifM (isRight <$> Url.download nullMeterUpdate Nothing distributionInfoUrl infof uo
|
||||
<&&> (isRight <$> Url.download nullMeterUpdate Nothing distributionInfoSigUrl sigf uo)
|
||||
<&&> verifyDistributionSig gpgcmd sigf)
|
||||
( parseInfoFile . map decodeBS . fileLines'
|
||||
<$> F.readFile' (toOsPath (toRawFilePath infof))
|
||||
<$> F.readFile' infof
|
||||
, return Nothing
|
||||
)
|
||||
|
||||
|
@ -360,20 +362,20 @@ upgradeSupported = False
|
|||
- The gpg keyring used to verify the signature is located in
|
||||
- trustedkeys.gpg, next to the git-annex program.
|
||||
-}
|
||||
verifyDistributionSig :: GpgCmd -> FilePath -> IO Bool
|
||||
verifyDistributionSig :: GpgCmd -> OsPath -> IO Bool
|
||||
verifyDistributionSig gpgcmd sig = readProgramFile >>= \case
|
||||
Just p | isAbsolute p ->
|
||||
withUmask 0o0077 $ withTmpDir (toOsPath (toRawFilePath "git-annex-gpg.tmp")) $ \gpgtmp -> do
|
||||
let trustedkeys = takeDirectory p </> "trustedkeys.gpg"
|
||||
withUmask 0o0077 $ withTmpDir (literalOsPath "git-annex-gpg.tmp") $ \gpgtmp -> do
|
||||
let trustedkeys = takeDirectory p </> literalOsPath "trustedkeys.gpg"
|
||||
boolGpgCmd gpgcmd
|
||||
[ Param "--no-default-keyring"
|
||||
, Param "--no-auto-check-trustdb"
|
||||
, Param "--no-options"
|
||||
, Param "--homedir"
|
||||
, File gpgtmp
|
||||
, File (fromOsPath gpgtmp)
|
||||
, Param "--keyring"
|
||||
, File trustedkeys
|
||||
, File (fromOsPath trustedkeys)
|
||||
, Param "--verify"
|
||||
, File sig
|
||||
, File (fromOsPath sig)
|
||||
]
|
||||
_ -> return False
|
||||
|
|
|
@ -78,7 +78,7 @@ deleteCurrentRepository = dangerPage $ do
|
|||
sanityVerifierAForm $ SanityVerifier magicphrase
|
||||
case result of
|
||||
FormSuccess _ -> liftH $ do
|
||||
dir <- liftAnnex $ fromRawFilePath <$> fromRepo Git.repoPath
|
||||
dir <- liftAnnex $ fromRepo Git.repoPath
|
||||
liftIO $ removeAutoStartFile dir
|
||||
|
||||
{- Disable syncing to this repository, and all
|
||||
|
@ -89,9 +89,8 @@ deleteCurrentRepository = dangerPage $ do
|
|||
rs <- syncRemotes <$> getDaemonStatus
|
||||
mapM_ (\r -> changeSyncable (Just r) False) rs
|
||||
|
||||
liftAnnex $ prepareRemoveAnnexDir (toRawFilePath dir)
|
||||
liftIO $ removeDirectoryRecursive . fromRawFilePath
|
||||
=<< absPath (toRawFilePath dir)
|
||||
liftAnnex $ prepareRemoveAnnexDir dir
|
||||
liftIO $ removeDirectoryRecursive =<< absPath dir
|
||||
|
||||
redirect ShutdownConfirmedR
|
||||
_ -> $(widgetFile "configurators/delete/currentrepository")
|
||||
|
|
|
@ -121,7 +121,7 @@ setRepoConfig uuid mremote oldc newc = do
|
|||
Just t
|
||||
| T.null t -> noop
|
||||
| otherwise -> liftAnnex $ do
|
||||
let dir = takeBaseName $ T.unpack t
|
||||
let dir = fromOsPath $ takeBaseName $ toOsPath $ T.unpack t
|
||||
m <- remoteConfigMap
|
||||
case M.lookup uuid m of
|
||||
Nothing -> noop
|
||||
|
@ -246,8 +246,8 @@ checkAssociatedDirectory cfg (Just r) = do
|
|||
case repoGroup cfg of
|
||||
RepoGroupStandard gr -> case associatedDirectory repoconfig gr of
|
||||
Just d -> do
|
||||
top <- fromRawFilePath <$> fromRepo Git.repoPath
|
||||
createWorkTreeDirectory (toRawFilePath (top </> d))
|
||||
top <- fromRepo Git.repoPath
|
||||
createWorkTreeDirectory (top </> toOsPath d)
|
||||
Nothing -> noop
|
||||
_ -> noop
|
||||
|
||||
|
|
|
@ -81,24 +81,24 @@ checkRepositoryPath :: Text -> IO (Either (SomeMessage WebApp) (Maybe Text))
|
|||
checkRepositoryPath p = do
|
||||
home <- myHomeDir
|
||||
let basepath = expandTilde home $ T.unpack p
|
||||
path <- fromRawFilePath <$> absPath (toRawFilePath basepath)
|
||||
let parent = fromRawFilePath $ parentDir (toRawFilePath path)
|
||||
path <- absPath basepath
|
||||
let parent = parentDir path
|
||||
problems <- catMaybes <$> mapM runcheck
|
||||
[ (return $ path == "/", "Enter the full path to use for the repository.")
|
||||
, (return $ all isSpace basepath, "A blank path? Seems unlikely.")
|
||||
[ (return $ path == literalOsPath "/", "Enter the full path to use for the repository.")
|
||||
, (return $ all isSpace (fromOsPath basepath :: FilePath), "A blank path? Seems unlikely.")
|
||||
, (doesFileExist path, "A file already exists with that name.")
|
||||
, (return $ path == home, "Sorry, using git-annex for your whole home directory is not currently supported.")
|
||||
, (return $ fromOsPath path == home, "Sorry, using git-annex for your whole home directory is not currently supported.")
|
||||
, (not <$> doesDirectoryExist parent, "Parent directory does not exist.")
|
||||
, (not <$> canWrite path, "Cannot write a repository there.")
|
||||
]
|
||||
return $
|
||||
case headMaybe problems of
|
||||
Nothing -> Right $ Just $ T.pack basepath
|
||||
Nothing -> Right $ Just $ T.pack $ fromOsPath basepath
|
||||
Just prob -> Left prob
|
||||
where
|
||||
runcheck (chk, msg) = ifM chk ( return $ Just msg, return Nothing )
|
||||
expandTilde home ('~':'/':path) = home </> path
|
||||
expandTilde _ path = path
|
||||
expandTilde home ('~':'/':path) = toOsPath home </> toOsPath path
|
||||
expandTilde _ path = toOsPath path
|
||||
|
||||
{- On first run, if run in the home directory, default to putting it in
|
||||
- ~/Desktop/annex, when a Desktop directory exists, and ~/annex otherwise.
|
||||
|
@ -110,12 +110,12 @@ checkRepositoryPath p = do
|
|||
- the user probably wants to put it there. Unless that directory
|
||||
- contains a git-annex file, in which case the user has probably
|
||||
- browsed to a directory with git-annex and run it from there. -}
|
||||
defaultRepositoryPath :: Bool -> IO FilePath
|
||||
defaultRepositoryPath :: Bool -> IO OsPath
|
||||
defaultRepositoryPath firstrun = do
|
||||
#ifndef mingw32_HOST_OS
|
||||
home <- myHomeDir
|
||||
currdir <- liftIO getCurrentDirectory
|
||||
if home == currdir && firstrun
|
||||
if toOsPath home == currdir && firstrun
|
||||
then inhome
|
||||
else ifM (legit currdir <&&> canWrite currdir)
|
||||
( return currdir
|
||||
|
@ -130,29 +130,29 @@ defaultRepositoryPath firstrun = do
|
|||
where
|
||||
inhome = ifM osAndroid
|
||||
( do
|
||||
home <- myHomeDir
|
||||
let storageshared = home </> "storage" </> "shared"
|
||||
home <- toOsPath <$> myHomeDir
|
||||
let storageshared = home </> literalOsPath "storage" </> literalOsPath "shared"
|
||||
ifM (doesDirectoryExist storageshared)
|
||||
( relHome $ storageshared </> gitAnnexAssistantDefaultDir
|
||||
, return $ "~" </> gitAnnexAssistantDefaultDir
|
||||
, return $ literalOsPath "~" </> gitAnnexAssistantDefaultDir
|
||||
)
|
||||
, do
|
||||
desktop <- userDesktopDir
|
||||
desktop <- toOsPath <$> userDesktopDir
|
||||
ifM (doesDirectoryExist desktop <&&> canWrite desktop)
|
||||
( relHome $ desktop </> gitAnnexAssistantDefaultDir
|
||||
, return $ "~" </> gitAnnexAssistantDefaultDir
|
||||
, return $ literalOsPath "~" </> gitAnnexAssistantDefaultDir
|
||||
)
|
||||
)
|
||||
#ifndef mingw32_HOST_OS
|
||||
-- Avoid using eg, standalone build's git-annex.linux/ directory
|
||||
-- when run from there.
|
||||
legit d = not <$> doesFileExist (d </> "git-annex")
|
||||
legit d = not <$> doesFileExist (d </> literalOsPath "git-annex")
|
||||
#endif
|
||||
|
||||
newRepositoryForm :: FilePath -> Hamlet.Html -> MkMForm RepositoryPath
|
||||
newRepositoryForm :: OsPath -> Hamlet.Html -> MkMForm RepositoryPath
|
||||
newRepositoryForm defpath msg = do
|
||||
(pathRes, pathView) <- mreq (repositoryPathField True) (bfs "")
|
||||
(Just $ T.pack $ addTrailingPathSeparator defpath)
|
||||
(Just $ T.pack $ fromOsPath $ addTrailingPathSeparator defpath)
|
||||
let (err, errmsg) = case pathRes of
|
||||
FormMissing -> (False, "")
|
||||
FormFailure l -> (True, concatMap T.unpack l)
|
||||
|
@ -174,17 +174,17 @@ postFirstRepositoryR = page "Getting started" (Just Configuration) $ do
|
|||
((res, form), enctype) <- liftH $ runFormPostNoToken $ newRepositoryForm path
|
||||
case res of
|
||||
FormSuccess (RepositoryPath p) -> liftH $
|
||||
startFullAssistant (T.unpack p) ClientGroup Nothing
|
||||
startFullAssistant (toOsPath $ T.unpack p) ClientGroup Nothing
|
||||
_ -> $(widgetFile "configurators/newrepository/first")
|
||||
|
||||
getAndroidCameraRepositoryR :: Handler ()
|
||||
getAndroidCameraRepositoryR = do
|
||||
home <- liftIO myHomeDir
|
||||
let dcim = home </> "storage" </> "dcim"
|
||||
let dcim = toOsPath home </> literalOsPath "storage" </> literalOsPath "dcim"
|
||||
startFullAssistant dcim SourceGroup $ Just addignore
|
||||
where
|
||||
addignore = do
|
||||
liftIO $ unlessM (doesFileExist ".gitignore") $
|
||||
liftIO $ unlessM (doesFileExist $ literalOsPath ".gitignore") $
|
||||
writeFile ".gitignore" ".thumbnails"
|
||||
void $ inRepo $
|
||||
Git.Command.runBool [Param "add", File ".gitignore"]
|
||||
|
@ -195,20 +195,21 @@ getNewRepositoryR :: Handler Html
|
|||
getNewRepositoryR = postNewRepositoryR
|
||||
postNewRepositoryR :: Handler Html
|
||||
postNewRepositoryR = page "Add another repository" (Just Configuration) $ do
|
||||
home <- liftIO myHomeDir
|
||||
home <- toOsPath <$> liftIO myHomeDir
|
||||
((res, form), enctype) <- liftH $ runFormPostNoToken $ newRepositoryForm home
|
||||
case res of
|
||||
FormSuccess (RepositoryPath p) -> do
|
||||
let path = T.unpack p
|
||||
let path = toOsPath (T.unpack p)
|
||||
isnew <- liftIO $ makeRepo path False
|
||||
u <- liftIO $ initRepo isnew True path Nothing (Just ClientGroup)
|
||||
liftIO $ addAutoStartFile path
|
||||
liftIO $ startAssistant path
|
||||
askcombine u path
|
||||
askcombine u (fromOsPath path)
|
||||
_ -> $(widgetFile "configurators/newrepository")
|
||||
where
|
||||
askcombine newrepouuid newrepopath = do
|
||||
newrepo <- liftIO $ relHome newrepopath
|
||||
newrepo' <- liftIO $ relHome (toOsPath newrepopath)
|
||||
let newrepo = fromOsPath newrepo' :: FilePath
|
||||
mainrepo <- fromJust . relDir <$> liftH getYesod
|
||||
$(widgetFile "configurators/newrepository/combine")
|
||||
|
||||
|
@ -222,17 +223,18 @@ immediateSyncRemote r = do
|
|||
|
||||
getCombineRepositoryR :: FilePath -> UUID -> Handler Html
|
||||
getCombineRepositoryR newrepopath newrepouuid = do
|
||||
liftAssistant . immediateSyncRemote =<< combineRepos newrepopath remotename
|
||||
liftAssistant . immediateSyncRemote
|
||||
=<< combineRepos (toOsPath newrepopath) remotename
|
||||
redirect $ EditRepositoryR $ RepoUUID newrepouuid
|
||||
where
|
||||
remotename = takeFileName newrepopath
|
||||
remotename = fromOsPath $ takeFileName $ toOsPath newrepopath
|
||||
|
||||
selectDriveForm :: [RemovableDrive] -> Hamlet.Html -> MkMForm RemovableDrive
|
||||
selectDriveForm drives = renderBootstrap3 bootstrapFormLayout $ RemovableDrive
|
||||
<$> pure Nothing
|
||||
<*> areq (selectFieldList pairs `withNote` onlywritable) (bfs "Select drive:") Nothing
|
||||
<*> areq textField (bfs "Use this directory on the drive:")
|
||||
(Just $ T.pack gitAnnexAssistantDefaultDir)
|
||||
(Just $ T.pack $ fromOsPath gitAnnexAssistantDefaultDir)
|
||||
where
|
||||
pairs = zip (map describe drives) (map mountPoint drives)
|
||||
describe drive = case diskFree drive of
|
||||
|
@ -246,9 +248,9 @@ selectDriveForm drives = renderBootstrap3 bootstrapFormLayout $ RemovableDrive
|
|||
]
|
||||
onlywritable = [whamlet|This list only includes drives you can write to.|]
|
||||
|
||||
removableDriveRepository :: RemovableDrive -> FilePath
|
||||
removableDriveRepository :: RemovableDrive -> OsPath
|
||||
removableDriveRepository drive =
|
||||
T.unpack (mountPoint drive) </> T.unpack (driveRepoPath drive)
|
||||
toOsPath (T.unpack (mountPoint drive)) </> toOsPath (T.unpack (driveRepoPath drive))
|
||||
|
||||
{- Adding a removable drive. -}
|
||||
getAddDriveR :: Handler Html
|
||||
|
@ -257,7 +259,7 @@ postAddDriveR :: Handler Html
|
|||
postAddDriveR = page "Add a removable drive" (Just Configuration) $ do
|
||||
removabledrives <- liftIO driveList
|
||||
writabledrives <- liftIO $
|
||||
filterM (canWrite . T.unpack . mountPoint) removabledrives
|
||||
filterM (canWrite . toOsPath . T.unpack . mountPoint) removabledrives
|
||||
((res, form), enctype) <- liftH $ runFormPostNoToken $
|
||||
selectDriveForm (sort writabledrives)
|
||||
case res of
|
||||
|
@ -277,7 +279,7 @@ getConfirmAddDriveR drive = ifM (liftIO $ probeRepoExists dir)
|
|||
mu <- liftIO $ probeUUID dir
|
||||
case mu of
|
||||
Nothing -> maybe askcombine isknownuuid
|
||||
=<< liftAnnex (probeGCryptRemoteUUID dir)
|
||||
=<< liftAnnex (probeGCryptRemoteUUID $ fromOsPath dir)
|
||||
Just driveuuid -> isknownuuid driveuuid
|
||||
, newrepo
|
||||
)
|
||||
|
@ -317,19 +319,19 @@ getFinishAddDriveR drive = go
|
|||
where
|
||||
go (RepoKey keyid) = whenGcryptInstalled $ makewith $ const $ do
|
||||
r <- liftAnnex $ addRemote $
|
||||
makeGCryptRemote remotename dir keyid
|
||||
makeGCryptRemote remotename (fromOsPath dir) keyid
|
||||
return (Types.Remote.uuid r, r)
|
||||
go NoRepoKey = checkGCryptRepoEncryption dir makeunencrypted makeunencrypted $ do
|
||||
mu <- liftAnnex $ probeGCryptRemoteUUID dir
|
||||
go NoRepoKey = checkGCryptRepoEncryption (fromOsPath dir) makeunencrypted makeunencrypted $ do
|
||||
mu <- liftAnnex $ probeGCryptRemoteUUID (fromOsPath dir)
|
||||
case mu of
|
||||
Just u -> enableexistinggcryptremote u
|
||||
Nothing -> giveup "The drive contains a gcrypt repository that is not a git-annex special remote. This is not supported."
|
||||
enableexistinggcryptremote u = do
|
||||
remotename' <- liftAnnex $ getGCryptRemoteName u dir
|
||||
remotename' <- liftAnnex $ getGCryptRemoteName u (fromOsPath dir)
|
||||
makewith $ const $ do
|
||||
r <- liftAnnex $ addRemote $
|
||||
enableSpecialRemote remotename' GCrypt.remote Nothing $ M.fromList
|
||||
[(Proposed "gitrepo", Proposed dir)]
|
||||
[(Proposed "gitrepo", Proposed (fromOsPath dir))]
|
||||
return (u, r)
|
||||
{- Making a new unencrypted repo, or combining with an existing one. -}
|
||||
makeunencrypted = makewith $ \isnew -> (,)
|
||||
|
@ -347,21 +349,19 @@ getFinishAddDriveR drive = go
|
|||
liftAnnex $ defaultStandardGroup u TransferGroup
|
||||
liftAssistant $ immediateSyncRemote r
|
||||
redirect $ EditNewRepositoryR u
|
||||
mountpoint = T.unpack (mountPoint drive)
|
||||
mountpoint = toOsPath $ T.unpack (mountPoint drive)
|
||||
dir = removableDriveRepository drive
|
||||
remotename = takeFileName mountpoint
|
||||
remotename = fromOsPath $ takeFileName mountpoint
|
||||
|
||||
{- Each repository is made a remote of the other.
|
||||
- Next call syncRemote to get them in sync. -}
|
||||
combineRepos :: FilePath -> String -> Handler Remote
|
||||
combineRepos :: OsPath -> String -> Handler Remote
|
||||
combineRepos dir name = liftAnnex $ do
|
||||
hostname <- fromMaybe "host" <$> liftIO getHostname
|
||||
mylocation <- fromRepo Git.repoLocation
|
||||
mypath <- liftIO $ fromRawFilePath <$> relPathDirToFile
|
||||
(toRawFilePath dir)
|
||||
(toRawFilePath mylocation)
|
||||
liftIO $ inDir dir $ void $ makeGitRemote hostname mypath
|
||||
addRemote $ makeGitRemote name dir
|
||||
mylocation <- fromRepo Git.repoPath
|
||||
mypath <- liftIO $ relPathDirToFile dir mylocation
|
||||
liftIO $ inDir dir $ void $ makeGitRemote hostname (fromOsPath mypath)
|
||||
addRemote $ makeGitRemote name (fromOsPath dir)
|
||||
|
||||
getEnableDirectoryR :: UUID -> Handler Html
|
||||
getEnableDirectoryR uuid = page "Enable a repository" (Just Configuration) $ do
|
||||
|
@ -396,12 +396,12 @@ genRemovableDrive :: FilePath -> IO RemovableDrive
|
|||
genRemovableDrive dir = RemovableDrive
|
||||
<$> getDiskFree dir
|
||||
<*> pure (T.pack dir)
|
||||
<*> pure (T.pack gitAnnexAssistantDefaultDir)
|
||||
<*> pure (T.pack $ fromOsPath gitAnnexAssistantDefaultDir)
|
||||
|
||||
{- Bootstraps from first run mode to a fully running assistant in a
|
||||
- repository, by running the postFirstRun callback, which returns the
|
||||
- url to the new webapp. -}
|
||||
startFullAssistant :: FilePath -> StandardGroup -> Maybe (Annex ())-> Handler ()
|
||||
startFullAssistant :: OsPath -> StandardGroup -> Maybe (Annex ())-> Handler ()
|
||||
startFullAssistant path repogroup setup = do
|
||||
webapp <- getYesod
|
||||
url <- liftIO $ do
|
||||
|
@ -417,17 +417,17 @@ startFullAssistant path repogroup setup = do
|
|||
-
|
||||
- The directory may be in the process of being created; if so
|
||||
- the parent directory is checked instead. -}
|
||||
canWrite :: FilePath -> IO Bool
|
||||
canWrite :: OsPath -> IO Bool
|
||||
canWrite dir = do
|
||||
tocheck <- ifM (doesDirectoryExist dir)
|
||||
( return dir
|
||||
, return $ fromRawFilePath $ parentDir $ toRawFilePath dir
|
||||
, return $ parentDir dir
|
||||
)
|
||||
catchBoolIO $ R.fileAccess (toRawFilePath tocheck) False True False
|
||||
catchBoolIO $ R.fileAccess (fromOsPath tocheck) False True False
|
||||
|
||||
{- Gets the UUID of the git repo at a location, which may not exist, or
|
||||
- not be a git-annex repo. -}
|
||||
probeUUID :: FilePath -> IO (Maybe UUID)
|
||||
probeUUID :: OsPath -> IO (Maybe UUID)
|
||||
probeUUID dir = catchDefaultIO Nothing $ inDir dir $ do
|
||||
u <- getUUID
|
||||
return $ if u == NoUUID then Nothing else Just u
|
||||
|
|
|
@ -72,7 +72,7 @@ getPrepareWormholePairR pairingwith = do
|
|||
|
||||
enableTor :: Handler ()
|
||||
enableTor = do
|
||||
gitannex <- liftIO programPath
|
||||
gitannex <- fromOsPath <$> liftIO programPath
|
||||
(transcript, ok) <- liftIO $ processTranscript gitannex ["enable-tor"] Nothing
|
||||
if ok
|
||||
-- Reload remotedameon so it's serving the tor hidden
|
||||
|
@ -173,7 +173,7 @@ getFinishLocalPairR = postFinishLocalPairR
|
|||
postFinishLocalPairR :: PairMsg -> Handler Html
|
||||
#ifdef WITH_PAIRING
|
||||
postFinishLocalPairR msg = promptSecret (Just msg) $ \_ secret -> do
|
||||
repodir <- liftH $ fromRawFilePath . repoPath <$> liftAnnex gitRepo
|
||||
repodir <- liftH $ repoPath <$> liftAnnex gitRepo
|
||||
liftIO $ setup repodir
|
||||
startLocalPairing PairAck (cleanup repodir) alert uuid "" secret
|
||||
where
|
||||
|
|
|
@ -23,7 +23,6 @@ import Types.Distribution
|
|||
import Assistant.Upgrade
|
||||
|
||||
import qualified Data.Text as T
|
||||
import qualified System.FilePath.ByteString as P
|
||||
|
||||
data PrefsForm = PrefsForm
|
||||
{ diskReserve :: Text
|
||||
|
@ -89,7 +88,7 @@ storePrefs p = do
|
|||
unsetConfig (annexConfig "numcopies") -- deprecated
|
||||
setConfig (annexConfig "autoupgrade") (fromAutoUpgrade $ autoUpgrade p)
|
||||
unlessM ((==) <$> pure (autoStart p) <*> inAutoStartFile) $ do
|
||||
here <- fromRawFilePath <$> fromRepo Git.repoPath
|
||||
here <- fromRepo Git.repoPath
|
||||
liftIO $ if autoStart p
|
||||
then addAutoStartFile here
|
||||
else removeAutoStartFile here
|
||||
|
@ -110,5 +109,4 @@ postPreferencesR = page "Preferences" (Just Configuration) $ do
|
|||
inAutoStartFile :: Annex Bool
|
||||
inAutoStartFile = do
|
||||
here <- liftIO . absPath =<< fromRepo Git.repoPath
|
||||
any (`P.equalFilePath` here) . map toRawFilePath
|
||||
<$> liftIO readAutoStartFile
|
||||
any (`equalFilePath` here) <$> liftIO readAutoStartFile
|
||||
|
|
|
@ -76,7 +76,7 @@ mkSshData s = SshData
|
|||
, sshDirectory = fromMaybe "" $ inputDirectory s
|
||||
, sshRepoName = genSshRepoName
|
||||
(T.unpack $ fromJust $ inputHostname s)
|
||||
(maybe "" T.unpack $ inputDirectory s)
|
||||
(toOsPath (maybe "" T.unpack $ inputDirectory s))
|
||||
, sshPort = inputPort s
|
||||
, needsPubKey = False
|
||||
, sshCapabilities = [] -- untested
|
||||
|
@ -101,7 +101,7 @@ sshInputAForm hostnamefield d = normalize <$> gen
|
|||
<*> aopt check_username (bfs "User name") (Just $ inputUsername d)
|
||||
<*> areq (selectFieldList authmethods) (bfs "Authenticate with") (Just $ inputAuthMethod d)
|
||||
<*> aopt passwordField (bfs "Password") Nothing
|
||||
<*> aopt textField (bfs "Directory") (Just $ Just $ fromMaybe (T.pack gitAnnexAssistantDefaultDir) $ inputDirectory d)
|
||||
<*> aopt textField (bfs "Directory") (Just $ Just $ fromMaybe (T.pack $ fromOsPath gitAnnexAssistantDefaultDir) $ inputDirectory d)
|
||||
<*> areq intField (bfs "Port") (Just $ inputPort d)
|
||||
|
||||
authmethods :: [(Text, AuthMethod)]
|
||||
|
@ -389,13 +389,13 @@ sshAuthTranscript sshinput opts sshhost cmd input = case inputAuthMethod sshinpu
|
|||
v <- getCachedCred login
|
||||
liftIO $ case v of
|
||||
Nothing -> go [passwordprompts 0] Nothing
|
||||
Just pass -> withTmpFile (toOsPath "ssh") $ \passfile h -> do
|
||||
Just pass -> withTmpFile (literalOsPath "ssh") $ \passfile h -> do
|
||||
hClose h
|
||||
writeFileProtected (fromOsPath passfile) pass
|
||||
writeFileProtected passfile pass
|
||||
environ <- getEnvironment
|
||||
let environ' = addEntries
|
||||
[ ("SSH_ASKPASS", program)
|
||||
, (sshAskPassEnv, fromRawFilePath $ fromOsPath passfile)
|
||||
[ ("SSH_ASKPASS", fromOsPath program)
|
||||
, (sshAskPassEnv, fromOsPath passfile)
|
||||
, ("DISPLAY", ":0")
|
||||
] environ
|
||||
go [passwordprompts 1] (Just environ')
|
||||
|
@ -531,7 +531,7 @@ prepSsh' needsinit origsshdata sshdata keypair a
|
|||
]
|
||||
, if needsinit then Just (wrapCommand "git annex init") else Nothing
|
||||
, if needsPubKey origsshdata
|
||||
then addAuthorizedKeysCommand (hasCapability origsshdata GitAnnexShellCapable) remotedir . sshPubKey <$> keypair
|
||||
then addAuthorizedKeysCommand (hasCapability origsshdata GitAnnexShellCapable) (toOsPath remotedir) . sshPubKey <$> keypair
|
||||
else Nothing
|
||||
]
|
||||
rsynconly = onlyCapability origsshdata RsyncCapable
|
||||
|
@ -602,7 +602,7 @@ postAddRsyncNetR = do
|
|||
|]
|
||||
go sshinput = do
|
||||
let reponame = genSshRepoName "rsync.net"
|
||||
(maybe "" T.unpack $ inputDirectory sshinput)
|
||||
(toOsPath (maybe "" T.unpack $ inputDirectory sshinput))
|
||||
|
||||
prepRsyncNet sshinput reponame $ \sshdata -> inpage $
|
||||
checkExistingGCrypt sshdata $ do
|
||||
|
|
|
@ -51,7 +51,7 @@ postConfigUnusedR = page "Unused files" (Just Configuration) $ do
|
|||
redirect ConfigurationR
|
||||
_ -> do
|
||||
munuseddesc <- liftAssistant describeUnused
|
||||
ts <- liftAnnex $ dateUnusedLog ""
|
||||
ts <- liftAnnex $ dateUnusedLog (literalOsPath "")
|
||||
mlastchecked <- case ts of
|
||||
Nothing -> pure Nothing
|
||||
Just t -> Just <$> liftIO (durationSince t)
|
||||
|
|
|
@ -73,6 +73,6 @@ getRestartThreadR name = do
|
|||
getLogR :: Handler Html
|
||||
getLogR = page "Logs" Nothing $ do
|
||||
logfile <- liftAnnex $ fromRepo gitAnnexDaemonLogFile
|
||||
logs <- liftIO $ listLogs (fromRawFilePath logfile)
|
||||
logs <- liftIO $ listLogs (fromOsPath logfile)
|
||||
logcontent <- liftIO $ concat <$> mapM readFile logs
|
||||
$(widgetFile "control/log")
|
||||
|
|
|
@ -45,7 +45,7 @@ transfersDisplay = do
|
|||
transferPaused info || isNothing (startedTime info)
|
||||
desc transfer info = case associatedFile info of
|
||||
AssociatedFile Nothing -> serializeKey $ transferKey transfer
|
||||
AssociatedFile (Just af) -> fromRawFilePath af
|
||||
AssociatedFile (Just af) -> fromOsPath af
|
||||
|
||||
{- Simplifies a list of transfers, avoiding display of redundant
|
||||
- equivalent transfers. -}
|
||||
|
@ -118,7 +118,7 @@ getFileBrowserR = whenM openFileBrowser redirectBack
|
|||
- blocking the response to the browser on it. -}
|
||||
openFileBrowser :: Handler Bool
|
||||
openFileBrowser = do
|
||||
path <- fromRawFilePath
|
||||
path <- fromOsPath
|
||||
<$> (liftIO . absPath =<< liftAnnex (fromRepo Git.repoPath))
|
||||
#ifdef darwin_HOST_OS
|
||||
let cmd = "open"
|
||||
|
|
|
@ -16,10 +16,10 @@ import BuildFlags
|
|||
|
||||
{- The full license info may be included in a file on disk that can
|
||||
- be read in and displayed. -}
|
||||
licenseFile :: IO (Maybe FilePath)
|
||||
licenseFile :: IO (Maybe OsPath)
|
||||
licenseFile = do
|
||||
base <- standaloneAppBase
|
||||
return $ (</> "LICENSE") <$> base
|
||||
return $ (</> literalOsPath "LICENSE") <$> base
|
||||
|
||||
getAboutR :: Handler Html
|
||||
getAboutR = page "About git-annex" (Just About) $ do
|
||||
|
@ -34,7 +34,7 @@ getLicenseR = do
|
|||
Just f -> customPage (Just About) $ do
|
||||
-- no sidebar, just pages of legalese..
|
||||
setTitle "License"
|
||||
license <- liftIO $ readFile f
|
||||
license <- liftIO $ readFile (fromOsPath f)
|
||||
$(widgetFile "documentation/license")
|
||||
|
||||
getRepoGroupR :: Handler Html
|
||||
|
|
|
@ -15,7 +15,6 @@ import Assistant.WebApp.Page
|
|||
import Config.Files.AutoStart
|
||||
import Utility.Yesod
|
||||
import Assistant.Restart
|
||||
import qualified Utility.RawFilePath as R
|
||||
|
||||
getRepositorySwitcherR :: Handler Html
|
||||
getRepositorySwitcherR = page "Switch repository" Nothing $ do
|
||||
|
@ -25,15 +24,16 @@ getRepositorySwitcherR = page "Switch repository" Nothing $ do
|
|||
listOtherRepos :: IO [(String, String)]
|
||||
listOtherRepos = do
|
||||
dirs <- readAutoStartFile
|
||||
pwd <- R.getCurrentDirectory
|
||||
pwd <- getCurrentDirectory
|
||||
gooddirs <- filterM isrepo $
|
||||
filter (\d -> not $ toRawFilePath d `dirContains` pwd) dirs
|
||||
filter (\d -> not $ d `dirContains` pwd) dirs
|
||||
names <- mapM relHome gooddirs
|
||||
return $ sort $ zip names gooddirs
|
||||
return $ sort $ zip (map fromOsPath names) (map fromOsPath gooddirs)
|
||||
where
|
||||
isrepo d = doesDirectoryExist (d </> ".git")
|
||||
isrepo d = doesDirectoryExist (d </> literalOsPath ".git")
|
||||
|
||||
getSwitchToRepositoryR :: FilePath -> Handler Html
|
||||
getSwitchToRepositoryR repo = do
|
||||
liftIO $ addAutoStartFile repo -- make this the new default repo
|
||||
redirect =<< liftIO (newAssistantUrl repo)
|
||||
let repo' = toOsPath repo
|
||||
liftIO $ addAutoStartFile repo' -- make this the new default repo
|
||||
redirect =<< liftIO (newAssistantUrl repo')
|
||||
|
|
|
@ -63,11 +63,11 @@ genKey source meterupdate b = case B.genKey b of
|
|||
Nothing -> giveup $ "Cannot generate a key for backend " ++
|
||||
decodeBS (formatKeyVariety (B.backendVariety b))
|
||||
|
||||
getBackend :: FilePath -> Key -> Annex (Maybe Backend)
|
||||
getBackend :: OsPath -> Key -> Annex (Maybe Backend)
|
||||
getBackend file k = maybeLookupBackendVariety (fromKey keyVariety k) >>= \case
|
||||
Just backend -> return $ Just backend
|
||||
Nothing -> do
|
||||
warning $ "skipping " <> QuotedPath (toRawFilePath file) <> " (" <>
|
||||
warning $ "skipping " <> QuotedPath file <> " (" <>
|
||||
UnquotedString (unknownBackendVarietyMessage (fromKey keyVariety k)) <> ")"
|
||||
return Nothing
|
||||
|
||||
|
@ -78,7 +78,7 @@ unknownBackendVarietyMessage v =
|
|||
{- Looks up the backend that should be used for a file.
|
||||
- That can be configured on a per-file basis in the gitattributes file,
|
||||
- or forced with --backend. -}
|
||||
chooseBackend :: RawFilePath -> Annex Backend
|
||||
chooseBackend :: OsPath -> Annex Backend
|
||||
chooseBackend f = Annex.getRead Annex.forcebackend >>= go
|
||||
where
|
||||
go Nothing = do
|
||||
|
|
|
@ -96,7 +96,7 @@ genKeyExternal ebname hasext ks meterupdate =
|
|||
withExternalState ebname hasext $ \st ->
|
||||
handleRequest st req notavail go
|
||||
where
|
||||
req = GENKEY (fromRawFilePath (contentLocation ks))
|
||||
req = GENKEY (fromOsPath (contentLocation ks))
|
||||
notavail = giveup $ "Cannot generate a key, since " ++ externalBackendProgram ebname ++ " is not available."
|
||||
|
||||
go (GENKEY_SUCCESS pk) = Just $ Result <$> fromProtoKey pk hasext ks
|
||||
|
@ -107,12 +107,12 @@ genKeyExternal ebname hasext ks meterupdate =
|
|||
return $ GetNextMessage go
|
||||
go _ = Nothing
|
||||
|
||||
verifyKeyContentExternal :: ExternalBackendName -> HasExt -> MeterUpdate -> Key -> RawFilePath -> Annex Bool
|
||||
verifyKeyContentExternal :: ExternalBackendName -> HasExt -> MeterUpdate -> Key -> OsPath -> Annex Bool
|
||||
verifyKeyContentExternal ebname hasext meterupdate k f =
|
||||
withExternalState ebname hasext $ \st ->
|
||||
handleRequest st req notavail go
|
||||
where
|
||||
req = VERIFYKEYCONTENT (toProtoKey k) (fromRawFilePath f)
|
||||
req = VERIFYKEYCONTENT (toProtoKey k) (fromOsPath f)
|
||||
|
||||
-- This should not be able to happen, because CANVERIFY is checked
|
||||
-- before this function is enable, and so the external program
|
||||
|
|
|
@ -75,7 +75,7 @@ sameCheckSum key s = s == expected
|
|||
expected = reverse $ takeWhile (/= '-') $ reverse $
|
||||
decodeBS $ S.fromShort $ fromKey keyName key
|
||||
|
||||
genGitBundleKey :: UUID -> RawFilePath -> MeterUpdate -> Annex Key
|
||||
genGitBundleKey :: UUID -> OsPath -> MeterUpdate -> Annex Key
|
||||
genGitBundleKey remoteuuid file meterupdate = do
|
||||
filesize <- liftIO $ getFileSize file
|
||||
s <- Hash.hashFile hash file meterupdate
|
||||
|
|
|
@ -127,7 +127,7 @@ keyValueE hash source meterupdate =
|
|||
keyValue hash source meterupdate
|
||||
>>= addE source (const $ hashKeyVariety hash (HasExt True))
|
||||
|
||||
checkKeyChecksum :: (Key -> String -> Bool) -> Hash -> Key -> RawFilePath -> Annex Bool
|
||||
checkKeyChecksum :: (Key -> String -> Bool) -> Hash -> Key -> OsPath -> Annex Bool
|
||||
checkKeyChecksum issame hash key file = catchIOErrorType HardwareFault hwfault $ do
|
||||
showAction (UnquotedString descChecksum)
|
||||
issame key
|
||||
|
@ -187,7 +187,7 @@ trivialMigrate' oldkey newbackend afile maxextlen maxexts
|
|||
AssociatedFile Nothing -> Nothing
|
||||
AssociatedFile (Just file) -> Just $ alterKey oldkey $ \d -> d
|
||||
{ keyName = S.toShort $ keyHash oldkey
|
||||
<> selectExtension maxextlen maxexts file
|
||||
<> selectExtension maxextlen maxexts (fromOsPath file)
|
||||
, keyVariety = newvariety
|
||||
}
|
||||
{- Upgrade to fix bad previous migration that created a
|
||||
|
@ -205,9 +205,9 @@ trivialMigrate' oldkey newbackend afile maxextlen maxexts
|
|||
oldvariety = fromKey keyVariety oldkey
|
||||
newvariety = backendVariety newbackend
|
||||
|
||||
hashFile :: Hash -> RawFilePath -> MeterUpdate -> Annex String
|
||||
hashFile :: Hash -> OsPath -> MeterUpdate -> Annex String
|
||||
hashFile hash file meterupdate =
|
||||
liftIO $ withMeteredFile (fromRawFilePath file) meterupdate $ \b -> do
|
||||
liftIO $ withMeteredFile file meterupdate $ \b -> do
|
||||
let h = (fst $ hasher hash) b
|
||||
-- Force full evaluation of hash so whole file is read
|
||||
-- before returning.
|
||||
|
|
|
@ -49,7 +49,7 @@ addE source sethasext k = do
|
|||
let ext = selectExtension
|
||||
(annexMaxExtensionLength c)
|
||||
(annexMaxExtensions c)
|
||||
(keyFilename source)
|
||||
(fromOsPath (keyFilename source))
|
||||
return $ alterKey k $ \d -> d
|
||||
{ keyName = keyName d <> S.toShort ext
|
||||
, keyVariety = sethasext (keyVariety d)
|
||||
|
|
|
@ -43,7 +43,7 @@ migrateFromVURLToURL oldkey newbackend _af _
|
|||
| otherwise = return Nothing
|
||||
|
||||
-- The Backend must use a cryptographically secure hash.
|
||||
generateEquivilantKey :: Backend -> RawFilePath -> Annex (Maybe Key)
|
||||
generateEquivilantKey :: Backend -> OsPath -> Annex (Maybe Key)
|
||||
generateEquivilantKey b f =
|
||||
case genKey b of
|
||||
Just genkey -> do
|
||||
|
|
|
@ -42,9 +42,9 @@ backend = Backend
|
|||
keyValue :: KeySource -> MeterUpdate -> Annex Key
|
||||
keyValue source _ = do
|
||||
let f = contentLocation source
|
||||
stat <- liftIO $ R.getFileStatus f
|
||||
stat <- liftIO $ R.getFileStatus (fromOsPath f)
|
||||
sz <- liftIO $ getFileSize' f stat
|
||||
relf <- fromRawFilePath . getTopFilePath
|
||||
relf <- fromOsPath . getTopFilePath
|
||||
<$> inRepo (toTopFilePath $ keyFilename source)
|
||||
return $ mkKey $ \k -> k
|
||||
{ keyName = genKeyName relf
|
||||
|
|
|
@ -11,6 +11,7 @@ import Utility.SafeCommand
|
|||
import Utility.Env.Basic
|
||||
import qualified Git.Version
|
||||
import Utility.SystemDirectory
|
||||
import Utility.OsPath
|
||||
|
||||
import Control.Monad
|
||||
import Control.Applicative
|
||||
|
@ -91,11 +92,11 @@ getSshConnectionCaching = Config "sshconnectioncaching" . BoolConfig <$>
|
|||
|
||||
setup :: IO ()
|
||||
setup = do
|
||||
createDirectoryIfMissing True tmpDir
|
||||
createDirectoryIfMissing True (toOsPath tmpDir)
|
||||
writeFile testFile "test file contents"
|
||||
|
||||
cleanup :: IO ()
|
||||
cleanup = removeDirectoryRecursive tmpDir
|
||||
cleanup = removeDirectoryRecursive (toOsPath tmpDir)
|
||||
|
||||
run :: [TestCase] -> IO ()
|
||||
run ts = do
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue