more OsPath conversion
Sponsored-by: k0ld
This commit is contained in:
parent
474cf3bc8b
commit
71195cce13
33 changed files with 198 additions and 194 deletions
2
Annex.hs
2
Annex.hs
|
@ -221,7 +221,7 @@ data AnnexState = AnnexState
|
||||||
, existinghooks :: M.Map Git.Hook.Hook Bool
|
, existinghooks :: M.Map Git.Hook.Hook Bool
|
||||||
, workers :: Maybe (TMVar (WorkerPool (AnnexState, AnnexRead)))
|
, workers :: Maybe (TMVar (WorkerPool (AnnexState, AnnexRead)))
|
||||||
, cachedcurrentbranch :: (Maybe (Maybe Git.Branch, Maybe Adjustment))
|
, cachedcurrentbranch :: (Maybe (Maybe Git.Branch, Maybe Adjustment))
|
||||||
, cachedgitenv :: Maybe (AltIndexFile, FilePath, [(String, String)])
|
, cachedgitenv :: Maybe (AltIndexFile, OsPath, [(String, String)])
|
||||||
, urloptions :: Maybe UrlOptions
|
, urloptions :: Maybe UrlOptions
|
||||||
, insmudgecleanfilter :: Bool
|
, insmudgecleanfilter :: Bool
|
||||||
, getvectorclock :: IO CandidateVectorClock
|
, getvectorclock :: IO CandidateVectorClock
|
||||||
|
|
|
@ -54,7 +54,6 @@ import Data.Char
|
||||||
import Data.ByteString.Builder
|
import Data.ByteString.Builder
|
||||||
import Control.Concurrent (threadDelay)
|
import Control.Concurrent (threadDelay)
|
||||||
import Control.Concurrent.MVar
|
import Control.Concurrent.MVar
|
||||||
import qualified System.FilePath.ByteString as P
|
|
||||||
import System.PosixCompat.Files (isRegularFile)
|
import System.PosixCompat.Files (isRegularFile)
|
||||||
|
|
||||||
import Annex.Common
|
import Annex.Common
|
||||||
|
@ -644,7 +643,7 @@ branchFiles :: Annex ([OsPath], IO Bool)
|
||||||
branchFiles = withIndex $ inRepo branchFiles'
|
branchFiles = withIndex $ inRepo branchFiles'
|
||||||
|
|
||||||
branchFiles' :: Git.Repo -> IO ([OsPath], IO Bool)
|
branchFiles' :: Git.Repo -> IO ([OsPath], IO Bool)
|
||||||
branchFiles' = Git.Command.pipeNullSplit' $
|
branchFiles' = Git.Command.pipeNullSplit'' toOsPath $
|
||||||
lsTreeParams Git.LsTree.LsTreeRecursive (Git.LsTree.LsTreeLong False)
|
lsTreeParams Git.LsTree.LsTreeRecursive (Git.LsTree.LsTreeLong False)
|
||||||
fullname
|
fullname
|
||||||
[Param "--name-only"]
|
[Param "--name-only"]
|
||||||
|
@ -681,7 +680,8 @@ mergeIndex jl branches = do
|
||||||
prepareModifyIndex :: JournalLocked -> Annex ()
|
prepareModifyIndex :: JournalLocked -> Annex ()
|
||||||
prepareModifyIndex _jl = do
|
prepareModifyIndex _jl = do
|
||||||
index <- fromRepo gitAnnexIndex
|
index <- fromRepo gitAnnexIndex
|
||||||
void $ liftIO $ tryIO $ R.removeLink (index <> ".lock")
|
void $ liftIO $ tryIO $
|
||||||
|
removeFile (index <> literalOsPath ".lock")
|
||||||
|
|
||||||
{- Runs an action using the branch's index file. -}
|
{- Runs an action using the branch's index file. -}
|
||||||
withIndex :: Annex a -> Annex a
|
withIndex :: Annex a -> Annex a
|
||||||
|
@ -690,7 +690,7 @@ withIndex' :: Bool -> Annex a -> Annex a
|
||||||
withIndex' bootstrapping a = withIndexFile AnnexIndexFile $ \f -> do
|
withIndex' bootstrapping a = withIndexFile AnnexIndexFile $ \f -> do
|
||||||
checkIndexOnce $ unlessM (liftIO $ doesFileExist f) $ do
|
checkIndexOnce $ unlessM (liftIO $ doesFileExist f) $ do
|
||||||
unless bootstrapping create
|
unless bootstrapping create
|
||||||
createAnnexDirectory $ toOsPath $ takeDirectory f
|
createAnnexDirectory $ takeDirectory f
|
||||||
unless bootstrapping $ inRepo genIndex
|
unless bootstrapping $ inRepo genIndex
|
||||||
a
|
a
|
||||||
|
|
||||||
|
@ -712,7 +712,7 @@ forceUpdateIndex jl branchref = do
|
||||||
{- Checks if the index needs to be updated. -}
|
{- Checks if the index needs to be updated. -}
|
||||||
needUpdateIndex :: Git.Ref -> Annex Bool
|
needUpdateIndex :: Git.Ref -> Annex Bool
|
||||||
needUpdateIndex branchref = do
|
needUpdateIndex branchref = do
|
||||||
f <- toOsPath <$> fromRepo gitAnnexIndexStatus
|
f <- fromRepo gitAnnexIndexStatus
|
||||||
committedref <- Git.Ref . firstLine' <$>
|
committedref <- Git.Ref . firstLine' <$>
|
||||||
liftIO (catchDefaultIO mempty $ F.readFile' f)
|
liftIO (catchDefaultIO mempty $ F.readFile' f)
|
||||||
return (committedref /= branchref)
|
return (committedref /= branchref)
|
||||||
|
@ -748,19 +748,20 @@ stageJournal jl commitindex = withIndex $ withOtherTmp $ \tmpdir -> do
|
||||||
Git.UpdateIndex.streamUpdateIndex g
|
Git.UpdateIndex.streamUpdateIndex g
|
||||||
[genstream dir h jh jlogh]
|
[genstream dir h jh jlogh]
|
||||||
commitindex
|
commitindex
|
||||||
liftIO $ cleanup (fromOsPath dir) jlogh jlogf
|
liftIO $ cleanup dir jlogh jlogf
|
||||||
where
|
where
|
||||||
genstream dir h jh jlogh streamer = readDirectory jh >>= \case
|
genstream dir h jh jlogh streamer = readDirectory jh >>= \case
|
||||||
Nothing -> return ()
|
Nothing -> return ()
|
||||||
Just file -> do
|
Just file -> do
|
||||||
let path = dir P.</> file
|
let file' = toOsPath file
|
||||||
unless (dirCruft file) $ whenM (isfile path) $ do
|
let path = dir </> file'
|
||||||
|
unless (file' `elem` dirCruft) $ whenM (isfile path) $ do
|
||||||
sha <- Git.HashObject.hashFile h path
|
sha <- Git.HashObject.hashFile h path
|
||||||
B.hPutStr jlogh (file <> "\n")
|
B.hPutStr jlogh (file <> "\n")
|
||||||
streamer $ Git.UpdateIndex.updateIndexLine
|
streamer $ Git.UpdateIndex.updateIndexLine
|
||||||
sha TreeFile (asTopFilePath $ fileJournal file)
|
sha TreeFile (asTopFilePath $ fileJournal file')
|
||||||
genstream dir h jh jlogh streamer
|
genstream dir h jh jlogh streamer
|
||||||
isfile file = isRegularFile <$> R.getFileStatus file
|
isfile file = isRegularFile <$> R.getFileStatus (fromOsPath file)
|
||||||
-- Clean up the staged files, as listed in the temp log file.
|
-- Clean up the staged files, as listed in the temp log file.
|
||||||
-- The temp file is used to avoid needing to buffer all the
|
-- The temp file is used to avoid needing to buffer all the
|
||||||
-- filenames in memory.
|
-- filenames in memory.
|
||||||
|
@ -768,10 +769,10 @@ stageJournal jl commitindex = withIndex $ withOtherTmp $ \tmpdir -> do
|
||||||
hFlush jlogh
|
hFlush jlogh
|
||||||
hSeek jlogh AbsoluteSeek 0
|
hSeek jlogh AbsoluteSeek 0
|
||||||
stagedfs <- lines <$> hGetContents jlogh
|
stagedfs <- lines <$> hGetContents jlogh
|
||||||
mapM_ (removeFile . (dir </>)) stagedfs
|
mapM_ (removeFile . (dir </>) . toOsPath) stagedfs
|
||||||
hClose jlogh
|
hClose jlogh
|
||||||
removeWhenExistsWith (R.removeLink) (fromOsPath jlogf)
|
removeWhenExistsWith (R.removeLink) (fromOsPath jlogf)
|
||||||
openjlog tmpdir = liftIO $ openTmpFileIn (toOsPath tmpdir) (toOsPath "jlog")
|
openjlog tmpdir = liftIO $ openTmpFileIn tmpdir (literalOsPath "jlog")
|
||||||
|
|
||||||
getLocalTransitions :: Annex Transitions
|
getLocalTransitions :: Annex Transitions
|
||||||
getLocalTransitions =
|
getLocalTransitions =
|
||||||
|
@ -932,7 +933,7 @@ getIgnoredRefs =
|
||||||
S.fromList . mapMaybe Git.Sha.extractSha . fileLines' <$> content
|
S.fromList . mapMaybe Git.Sha.extractSha . fileLines' <$> content
|
||||||
where
|
where
|
||||||
content = do
|
content = do
|
||||||
f <- toOsPath <$> fromRepo gitAnnexIgnoredRefs
|
f <- fromRepo gitAnnexIgnoredRefs
|
||||||
liftIO $ catchDefaultIO mempty $ F.readFile' f
|
liftIO $ catchDefaultIO mempty $ F.readFile' f
|
||||||
|
|
||||||
addMergedRefs :: [(Git.Sha, Git.Branch)] -> Annex ()
|
addMergedRefs :: [(Git.Sha, Git.Branch)] -> Annex ()
|
||||||
|
@ -950,7 +951,7 @@ getMergedRefs = S.fromList . map fst <$> getMergedRefs'
|
||||||
|
|
||||||
getMergedRefs' :: Annex [(Git.Sha, Git.Branch)]
|
getMergedRefs' :: Annex [(Git.Sha, Git.Branch)]
|
||||||
getMergedRefs' = do
|
getMergedRefs' = do
|
||||||
f <- toOsPath <$> fromRepo gitAnnexMergedRefs
|
f <- fromRepo gitAnnexMergedRefs
|
||||||
s <- liftIO $ catchDefaultIO mempty $ F.readFile' f
|
s <- liftIO $ catchDefaultIO mempty $ F.readFile' f
|
||||||
return $ map parse $ fileLines' s
|
return $ map parse $ fileLines' s
|
||||||
where
|
where
|
||||||
|
|
|
@ -41,18 +41,16 @@ import Config
|
||||||
import Annex.Perms
|
import Annex.Perms
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
import qualified System.FilePath.ByteString as P
|
|
||||||
|
|
||||||
{- Checks if a given key's content is currently present. -}
|
{- Checks if a given key's content is currently present. -}
|
||||||
inAnnex :: Key -> Annex Bool
|
inAnnex :: Key -> Annex Bool
|
||||||
inAnnex key = inAnnexCheck key $ liftIO . R.doesPathExist
|
inAnnex key = inAnnexCheck key $ liftIO . R.doesPathExist . fromOsPath
|
||||||
|
|
||||||
{- Runs an arbitrary check on a key's content. -}
|
{- Runs an arbitrary check on a key's content. -}
|
||||||
inAnnexCheck :: Key -> (RawFilePath -> Annex Bool) -> Annex Bool
|
inAnnexCheck :: Key -> (OsPath -> Annex Bool) -> Annex Bool
|
||||||
inAnnexCheck key check = inAnnex' id False check key
|
inAnnexCheck key check = inAnnex' id False check key
|
||||||
|
|
||||||
{- inAnnex that performs an arbitrary check of the key's content. -}
|
{- inAnnex that performs an arbitrary check of the key's content. -}
|
||||||
inAnnex' :: (a -> Bool) -> a -> (RawFilePath -> Annex a) -> Key -> Annex a
|
inAnnex' :: (a -> Bool) -> a -> (OsPath -> Annex a) -> Key -> Annex a
|
||||||
inAnnex' isgood bad check key = withObjectLoc key $ \loc -> do
|
inAnnex' isgood bad check key = withObjectLoc key $ \loc -> do
|
||||||
r <- check loc
|
r <- check loc
|
||||||
if isgood r
|
if isgood r
|
||||||
|
@ -75,7 +73,7 @@ inAnnex' isgood bad check key = withObjectLoc key $ \loc -> do
|
||||||
objectFileExists :: Key -> Annex Bool
|
objectFileExists :: Key -> Annex Bool
|
||||||
objectFileExists key =
|
objectFileExists key =
|
||||||
calcRepo (gitAnnexLocation key)
|
calcRepo (gitAnnexLocation key)
|
||||||
>>= liftIO . R.doesPathExist
|
>>= liftIO . doesFileExist
|
||||||
|
|
||||||
{- A safer check; the key's content must not only be present, but
|
{- A safer check; the key's content must not only be present, but
|
||||||
- is not in the process of being removed. -}
|
- is not in the process of being removed. -}
|
||||||
|
@ -93,7 +91,7 @@ inAnnexSafe key = inAnnex' (fromMaybe True) (Just False) go key
|
||||||
{- The content file must exist, but the lock file generally
|
{- The content file must exist, but the lock file generally
|
||||||
- won't exist unless a removal is in process. -}
|
- won't exist unless a removal is in process. -}
|
||||||
checklock (Just lockfile) contentfile =
|
checklock (Just lockfile) contentfile =
|
||||||
ifM (liftIO $ doesFileExist (fromRawFilePath contentfile))
|
ifM (liftIO $ doesFileExist contentfile)
|
||||||
( checkOr is_unlocked lockfile
|
( checkOr is_unlocked lockfile
|
||||||
, return is_missing
|
, return is_missing
|
||||||
)
|
)
|
||||||
|
@ -102,7 +100,7 @@ inAnnexSafe key = inAnnex' (fromMaybe True) (Just False) go key
|
||||||
Just True -> is_locked
|
Just True -> is_locked
|
||||||
Just False -> is_unlocked
|
Just False -> is_unlocked
|
||||||
#else
|
#else
|
||||||
checklock Nothing contentfile = liftIO $ ifM (doesFileExist (fromRawFilePath contentfile))
|
checklock Nothing contentfile = liftIO $ ifM (doesFileExist contentfile)
|
||||||
( lockShared contentfile >>= \case
|
( lockShared contentfile >>= \case
|
||||||
Nothing -> return is_locked
|
Nothing -> return is_locked
|
||||||
Just lockhandle -> do
|
Just lockhandle -> do
|
||||||
|
@ -113,7 +111,7 @@ inAnnexSafe key = inAnnex' (fromMaybe True) (Just False) go key
|
||||||
{- In Windows, see if we can take a shared lock. If so,
|
{- In Windows, see if we can take a shared lock. If so,
|
||||||
- remove the lock file to clean up after ourselves. -}
|
- remove the lock file to clean up after ourselves. -}
|
||||||
checklock (Just lockfile) contentfile =
|
checklock (Just lockfile) contentfile =
|
||||||
ifM (liftIO $ doesFileExist (fromRawFilePath contentfile))
|
ifM (liftIO $ doesFileExist contentfile)
|
||||||
( modifyContentDir lockfile $ liftIO $
|
( modifyContentDir lockfile $ liftIO $
|
||||||
lockShared lockfile >>= \case
|
lockShared lockfile >>= \case
|
||||||
Nothing -> return is_locked
|
Nothing -> return is_locked
|
||||||
|
@ -134,7 +132,7 @@ inAnnexSafe key = inAnnex' (fromMaybe True) (Just False) go key
|
||||||
- content locking works, from running at the same time as content is locked
|
- content locking works, from running at the same time as content is locked
|
||||||
- using the old method.
|
- using the old method.
|
||||||
-}
|
-}
|
||||||
withContentLockFile :: Key -> (Maybe RawFilePath -> Annex a) -> Annex a
|
withContentLockFile :: Key -> (Maybe OsPath -> Annex a) -> Annex a
|
||||||
withContentLockFile k a = do
|
withContentLockFile k a = do
|
||||||
v <- getVersion
|
v <- getVersion
|
||||||
if versionNeedsWritableContentFiles v
|
if versionNeedsWritableContentFiles v
|
||||||
|
@ -146,7 +144,7 @@ withContentLockFile k a = do
|
||||||
- will switch over to v10 content lock files at the
|
- will switch over to v10 content lock files at the
|
||||||
- right time. -}
|
- right time. -}
|
||||||
gitdir <- fromRepo Git.localGitDir
|
gitdir <- fromRepo Git.localGitDir
|
||||||
let gitconfig = gitdir P.</> "config"
|
let gitconfig = gitdir </> literalOsPath "config"
|
||||||
ic <- withTSDelta (liftIO . genInodeCache gitconfig)
|
ic <- withTSDelta (liftIO . genInodeCache gitconfig)
|
||||||
oldic <- Annex.getState Annex.gitconfiginodecache
|
oldic <- Annex.getState Annex.gitconfiginodecache
|
||||||
v' <- if fromMaybe False (compareStrong <$> ic <*> oldic)
|
v' <- if fromMaybe False (compareStrong <$> ic <*> oldic)
|
||||||
|
@ -161,7 +159,7 @@ withContentLockFile k a = do
|
||||||
where
|
where
|
||||||
go v = contentLockFile k v >>= a
|
go v = contentLockFile k v >>= a
|
||||||
|
|
||||||
contentLockFile :: Key -> Maybe RepoVersion -> Annex (Maybe RawFilePath)
|
contentLockFile :: Key -> Maybe RepoVersion -> Annex (Maybe OsPath)
|
||||||
#ifndef mingw32_HOST_OS
|
#ifndef mingw32_HOST_OS
|
||||||
{- Older versions of git-annex locked content files themselves, but newer
|
{- Older versions of git-annex locked content files themselves, but newer
|
||||||
- versions use a separate lock file, to better support repos shared
|
- versions use a separate lock file, to better support repos shared
|
||||||
|
@ -177,7 +175,7 @@ contentLockFile key _ = Just <$> calcRepo (gitAnnexContentLock key)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
{- Performs an action, passing it the location to use for a key's content. -}
|
{- Performs an action, passing it the location to use for a key's content. -}
|
||||||
withObjectLoc :: Key -> (RawFilePath -> Annex a) -> Annex a
|
withObjectLoc :: Key -> (OsPath -> Annex a) -> Annex a
|
||||||
withObjectLoc key a = a =<< calcRepo (gitAnnexLocation key)
|
withObjectLoc key a = a =<< calcRepo (gitAnnexLocation key)
|
||||||
|
|
||||||
{- Check if a file contains the unmodified content of the key.
|
{- Check if a file contains the unmodified content of the key.
|
||||||
|
@ -185,7 +183,7 @@ withObjectLoc key a = a =<< calcRepo (gitAnnexLocation key)
|
||||||
- The expensive way to tell is to do a verification of its content.
|
- The expensive way to tell is to do a verification of its content.
|
||||||
- The cheaper way is to see if the InodeCache for the key matches the
|
- The cheaper way is to see if the InodeCache for the key matches the
|
||||||
- file. -}
|
- file. -}
|
||||||
isUnmodified :: Key -> RawFilePath -> Annex Bool
|
isUnmodified :: Key -> OsPath -> Annex Bool
|
||||||
isUnmodified key f =
|
isUnmodified key f =
|
||||||
withTSDelta (liftIO . genInodeCache f) >>= \case
|
withTSDelta (liftIO . genInodeCache f) >>= \case
|
||||||
Just fc -> do
|
Just fc -> do
|
||||||
|
@ -193,7 +191,7 @@ isUnmodified key f =
|
||||||
isUnmodified' key f fc ic
|
isUnmodified' key f fc ic
|
||||||
Nothing -> return False
|
Nothing -> return False
|
||||||
|
|
||||||
isUnmodified' :: Key -> RawFilePath -> InodeCache -> [InodeCache] -> Annex Bool
|
isUnmodified' :: Key -> OsPath -> InodeCache -> [InodeCache] -> Annex Bool
|
||||||
isUnmodified' = isUnmodifiedLowLevel Database.Keys.addInodeCaches
|
isUnmodified' = isUnmodifiedLowLevel Database.Keys.addInodeCaches
|
||||||
|
|
||||||
{- Cheap check if a file contains the unmodified content of the key,
|
{- Cheap check if a file contains the unmodified content of the key,
|
||||||
|
@ -206,7 +204,7 @@ isUnmodified' = isUnmodifiedLowLevel Database.Keys.addInodeCaches
|
||||||
- this may report a false positive when repeated edits are made to a file
|
- this may report a false positive when repeated edits are made to a file
|
||||||
- within a small time window (eg 1 second).
|
- within a small time window (eg 1 second).
|
||||||
-}
|
-}
|
||||||
isUnmodifiedCheap :: Key -> RawFilePath -> Annex Bool
|
isUnmodifiedCheap :: Key -> OsPath -> Annex Bool
|
||||||
isUnmodifiedCheap key f = maybe (pure False) (isUnmodifiedCheap' key)
|
isUnmodifiedCheap key f = maybe (pure False) (isUnmodifiedCheap' key)
|
||||||
=<< withTSDelta (liftIO . genInodeCache f)
|
=<< withTSDelta (liftIO . genInodeCache f)
|
||||||
|
|
||||||
|
|
|
@ -12,7 +12,7 @@ import Annex.Verify
|
||||||
import Annex.InodeSentinal
|
import Annex.InodeSentinal
|
||||||
import Utility.InodeCache
|
import Utility.InodeCache
|
||||||
|
|
||||||
isUnmodifiedLowLevel :: (Key -> [InodeCache] -> Annex ()) -> Key -> RawFilePath -> InodeCache -> [InodeCache] -> Annex Bool
|
isUnmodifiedLowLevel :: (Key -> [InodeCache] -> Annex ()) -> Key -> OsPath -> InodeCache -> [InodeCache] -> Annex Bool
|
||||||
isUnmodifiedLowLevel addinodecaches key f fc ic =
|
isUnmodifiedLowLevel addinodecaches key f fc ic =
|
||||||
isUnmodifiedCheapLowLevel fc ic <||> expensivecheck
|
isUnmodifiedCheapLowLevel fc ic <||> expensivecheck
|
||||||
where
|
where
|
||||||
|
|
|
@ -23,7 +23,7 @@ import qualified Annex.Queue
|
||||||
import Config.Smudge
|
import Config.Smudge
|
||||||
|
|
||||||
{- Runs an action using a different git index file. -}
|
{- Runs an action using a different git index file. -}
|
||||||
withIndexFile :: AltIndexFile -> (FilePath -> Annex a) -> Annex a
|
withIndexFile :: AltIndexFile -> (OsPath -> Annex a) -> Annex a
|
||||||
withIndexFile i = withAltRepo usecachedgitenv restoregitenv
|
withIndexFile i = withAltRepo usecachedgitenv restoregitenv
|
||||||
where
|
where
|
||||||
-- This is an optimisation. Since withIndexFile is run repeatedly,
|
-- This is an optimisation. Since withIndexFile is run repeatedly,
|
||||||
|
@ -58,7 +58,7 @@ withIndexFile i = withAltRepo usecachedgitenv restoregitenv
|
||||||
f <- indexEnvVal $ case i of
|
f <- indexEnvVal $ case i of
|
||||||
AnnexIndexFile -> gitAnnexIndex g
|
AnnexIndexFile -> gitAnnexIndex g
|
||||||
ViewIndexFile -> gitAnnexViewIndex g
|
ViewIndexFile -> gitAnnexViewIndex g
|
||||||
g' <- addGitEnv g indexEnv f
|
g' <- addGitEnv g indexEnv (fromOsPath f)
|
||||||
return (g', f)
|
return (g', f)
|
||||||
|
|
||||||
restoregitenv g g' = g' { gitEnv = gitEnv g }
|
restoregitenv g g' = g' { gitEnv = gitEnv g }
|
||||||
|
|
|
@ -54,7 +54,7 @@ import System.PosixCompat.Files (isSymbolicLink)
|
||||||
type LinkTarget = S.ByteString
|
type LinkTarget = S.ByteString
|
||||||
|
|
||||||
{- Checks if a file is a link to a key. -}
|
{- Checks if a file is a link to a key. -}
|
||||||
isAnnexLink :: RawFilePath -> Annex (Maybe Key)
|
isAnnexLink :: OsPath -> Annex (Maybe Key)
|
||||||
isAnnexLink file = maybe Nothing parseLinkTargetOrPointer <$> getAnnexLinkTarget file
|
isAnnexLink file = maybe Nothing parseLinkTargetOrPointer <$> getAnnexLinkTarget file
|
||||||
|
|
||||||
{- Gets the link target of a symlink.
|
{- Gets the link target of a symlink.
|
||||||
|
@ -65,13 +65,13 @@ isAnnexLink file = maybe Nothing parseLinkTargetOrPointer <$> getAnnexLinkTarget
|
||||||
- Returns Nothing if the file is not a symlink, or not a link to annex
|
- Returns Nothing if the file is not a symlink, or not a link to annex
|
||||||
- content.
|
- content.
|
||||||
-}
|
-}
|
||||||
getAnnexLinkTarget :: RawFilePath -> Annex (Maybe LinkTarget)
|
getAnnexLinkTarget :: OsPath -> Annex (Maybe LinkTarget)
|
||||||
getAnnexLinkTarget f = getAnnexLinkTarget' f
|
getAnnexLinkTarget f = getAnnexLinkTarget' f
|
||||||
=<< (coreSymlinks <$> Annex.getGitConfig)
|
=<< (coreSymlinks <$> Annex.getGitConfig)
|
||||||
|
|
||||||
{- Pass False to force looking inside file, for when git checks out
|
{- Pass False to force looking inside file, for when git checks out
|
||||||
- symlinks as plain files. -}
|
- symlinks as plain files. -}
|
||||||
getAnnexLinkTarget' :: RawFilePath -> Bool -> Annex (Maybe S.ByteString)
|
getAnnexLinkTarget' :: OsPath -> Bool -> Annex (Maybe S.ByteString)
|
||||||
getAnnexLinkTarget' file coresymlinks = if coresymlinks
|
getAnnexLinkTarget' file coresymlinks = if coresymlinks
|
||||||
then check probesymlink $
|
then check probesymlink $
|
||||||
return Nothing
|
return Nothing
|
||||||
|
@ -86,9 +86,9 @@ getAnnexLinkTarget' file coresymlinks = if coresymlinks
|
||||||
| otherwise -> return Nothing
|
| otherwise -> return Nothing
|
||||||
Nothing -> fallback
|
Nothing -> fallback
|
||||||
|
|
||||||
probesymlink = R.readSymbolicLink file
|
probesymlink = R.readSymbolicLink (fromOsPath file)
|
||||||
|
|
||||||
probefilecontent = F.withFile (toOsPath file) ReadMode $ \h -> do
|
probefilecontent = F.withFile file ReadMode $ \h -> do
|
||||||
s <- S.hGet h maxSymlinkSz
|
s <- S.hGet h maxSymlinkSz
|
||||||
-- If we got the full amount, the file is too large
|
-- If we got the full amount, the file is too large
|
||||||
-- to be a symlink target.
|
-- to be a symlink target.
|
||||||
|
@ -241,6 +241,7 @@ restagePointerFiles r = unlessM (Annex.getState Annex.insmudgecleanfilter) $ do
|
||||||
let replaceindex = liftIO $ moveFile tmpindex realindex
|
let replaceindex = liftIO $ moveFile tmpindex realindex
|
||||||
let updatetmpindex = do
|
let updatetmpindex = do
|
||||||
r' <- liftIO $ Git.Env.addGitEnv r Git.Index.indexEnv
|
r' <- liftIO $ Git.Env.addGitEnv r Git.Index.indexEnv
|
||||||
|
. fromOsPath
|
||||||
=<< Git.Index.indexEnvVal tmpindex
|
=<< Git.Index.indexEnvVal tmpindex
|
||||||
configfilterprocess numsz $
|
configfilterprocess numsz $
|
||||||
runupdateindex tsd r' replaceindex
|
runupdateindex tsd r' replaceindex
|
||||||
|
@ -452,7 +453,7 @@ isPointerFile f = catchDefaultIO Nothing $
|
||||||
fdToHandle fd
|
fdToHandle fd
|
||||||
in bracket open hClose readhandle
|
in bracket open hClose readhandle
|
||||||
#else
|
#else
|
||||||
ifM (isSymbolicLink <$> R.getSymbolicLinkStatus (toRawFilePath f))
|
ifM (isSymbolicLink <$> R.getSymbolicLinkStatus (fromOsPath f))
|
||||||
( return Nothing
|
( return Nothing
|
||||||
, F.withFile f ReadMode readhandle
|
, F.withFile f ReadMode readhandle
|
||||||
)
|
)
|
||||||
|
|
|
@ -38,7 +38,7 @@ import Text.Read
|
||||||
-
|
-
|
||||||
- Also, can generate new metadata, if configured to do so.
|
- Also, can generate new metadata, if configured to do so.
|
||||||
-}
|
-}
|
||||||
genMetaData :: Key -> RawFilePath -> Maybe POSIXTime -> Annex ()
|
genMetaData :: Key -> OsPath -> Maybe POSIXTime -> Annex ()
|
||||||
genMetaData key file mmtime = do
|
genMetaData key file mmtime = do
|
||||||
catKeyFileHEAD file >>= \case
|
catKeyFileHEAD file >>= \case
|
||||||
Nothing -> noop
|
Nothing -> noop
|
||||||
|
@ -57,8 +57,8 @@ genMetaData key file mmtime = do
|
||||||
Nothing -> noop
|
Nothing -> noop
|
||||||
where
|
where
|
||||||
warncopied = warning $ UnquotedString $
|
warncopied = warning $ UnquotedString $
|
||||||
"Copied metadata from old version of " ++ fromRawFilePath file ++ " to new version. " ++
|
"Copied metadata from old version of " ++ fromOsPath file ++ " to new version. " ++
|
||||||
"If you don't want this copied metadata, run: git annex metadata --remove-all " ++ fromRawFilePath file
|
"If you don't want this copied metadata, run: git annex metadata --remove-all " ++ fromOsPath file
|
||||||
-- If the only fields copied were date metadata, and they'll
|
-- If the only fields copied were date metadata, and they'll
|
||||||
-- be overwritten with the current mtime, no need to warn about
|
-- be overwritten with the current mtime, no need to warn about
|
||||||
-- copying.
|
-- copying.
|
||||||
|
|
|
@ -39,13 +39,13 @@ import Utility.Metered
|
||||||
import Annex.WorkerPool
|
import Annex.WorkerPool
|
||||||
import Types.WorkerPool
|
import Types.WorkerPool
|
||||||
import Types.Key
|
import Types.Key
|
||||||
|
import qualified Utility.FileIO as F
|
||||||
|
|
||||||
import Control.Concurrent.STM
|
import Control.Concurrent.STM
|
||||||
import Control.Concurrent.Async
|
import Control.Concurrent.Async
|
||||||
import qualified Data.ByteString as S
|
import qualified Data.ByteString as S
|
||||||
#if WITH_INOTIFY
|
#if WITH_INOTIFY
|
||||||
import qualified System.INotify as INotify
|
import qualified System.INotify as INotify
|
||||||
import qualified System.FilePath.ByteString as P
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
shouldVerify :: VerifyConfig -> Annex Bool
|
shouldVerify :: VerifyConfig -> Annex Bool
|
||||||
|
@ -73,7 +73,7 @@ shouldVerify (RemoteVerify r) =
|
||||||
- If the RetrievalSecurityPolicy requires verification and the key's
|
- If the RetrievalSecurityPolicy requires verification and the key's
|
||||||
- backend doesn't support it, the verification will fail.
|
- backend doesn't support it, the verification will fail.
|
||||||
-}
|
-}
|
||||||
verifyKeyContentPostRetrieval :: RetrievalSecurityPolicy -> VerifyConfig -> Verification -> Key -> RawFilePath -> Annex Bool
|
verifyKeyContentPostRetrieval :: RetrievalSecurityPolicy -> VerifyConfig -> Verification -> Key -> OsPath -> Annex Bool
|
||||||
verifyKeyContentPostRetrieval rsp v verification k f = case (rsp, verification) of
|
verifyKeyContentPostRetrieval rsp v verification k f = case (rsp, verification) of
|
||||||
(_, Verified) -> return True
|
(_, Verified) -> return True
|
||||||
(RetrievalVerifiableKeysSecure, _) -> ifM (isVerifiable k)
|
(RetrievalVerifiableKeysSecure, _) -> ifM (isVerifiable k)
|
||||||
|
@ -105,11 +105,11 @@ verifyKeyContentPostRetrieval rsp v verification k f = case (rsp, verification)
|
||||||
-- When possible, does an incremental verification, because that can be
|
-- When possible, does an incremental verification, because that can be
|
||||||
-- faster. Eg, the VURL backend can need to try multiple checksums and only
|
-- faster. Eg, the VURL backend can need to try multiple checksums and only
|
||||||
-- with an incremental verification does it avoid reading files twice.
|
-- with an incremental verification does it avoid reading files twice.
|
||||||
verifyKeyContent :: Key -> RawFilePath -> Annex Bool
|
verifyKeyContent :: Key -> OsPath -> Annex Bool
|
||||||
verifyKeyContent k f = verifyKeySize k f <&&> verifyKeyContent' k f
|
verifyKeyContent k f = verifyKeySize k f <&&> verifyKeyContent' k f
|
||||||
|
|
||||||
-- Does not verify size.
|
-- Does not verify size.
|
||||||
verifyKeyContent' :: Key -> RawFilePath -> Annex Bool
|
verifyKeyContent' :: Key -> OsPath -> Annex Bool
|
||||||
verifyKeyContent' k f =
|
verifyKeyContent' k f =
|
||||||
Backend.maybeLookupBackendVariety (fromKey keyVariety k) >>= \case
|
Backend.maybeLookupBackendVariety (fromKey keyVariety k) >>= \case
|
||||||
Nothing -> return True
|
Nothing -> return True
|
||||||
|
@ -119,7 +119,7 @@ verifyKeyContent' k f =
|
||||||
iv <- mkiv k
|
iv <- mkiv k
|
||||||
showAction (UnquotedString (descIncrementalVerifier iv))
|
showAction (UnquotedString (descIncrementalVerifier iv))
|
||||||
res <- liftIO $ catchDefaultIO Nothing $
|
res <- liftIO $ catchDefaultIO Nothing $
|
||||||
withBinaryFile (fromRawFilePath f) ReadMode $ \h -> do
|
F.withBinaryFile f ReadMode $ \h -> do
|
||||||
feedIncrementalVerifier h iv
|
feedIncrementalVerifier h iv
|
||||||
finalizeIncrementalVerifier iv
|
finalizeIncrementalVerifier iv
|
||||||
case res of
|
case res of
|
||||||
|
@ -129,7 +129,7 @@ verifyKeyContent' k f =
|
||||||
Just verifier -> verifier k f
|
Just verifier -> verifier k f
|
||||||
(Nothing, Just verifier) -> verifier k f
|
(Nothing, Just verifier) -> verifier k f
|
||||||
|
|
||||||
resumeVerifyKeyContent :: Key -> RawFilePath -> IncrementalVerifier -> Annex Bool
|
resumeVerifyKeyContent :: Key -> OsPath -> IncrementalVerifier -> Annex Bool
|
||||||
resumeVerifyKeyContent k f iv = liftIO (positionIncrementalVerifier iv) >>= \case
|
resumeVerifyKeyContent k f iv = liftIO (positionIncrementalVerifier iv) >>= \case
|
||||||
Nothing -> fallback
|
Nothing -> fallback
|
||||||
Just endpos -> do
|
Just endpos -> do
|
||||||
|
@ -151,7 +151,7 @@ resumeVerifyKeyContent k f iv = liftIO (positionIncrementalVerifier iv) >>= \cas
|
||||||
| otherwise = do
|
| otherwise = do
|
||||||
showAction (UnquotedString (descIncrementalVerifier iv))
|
showAction (UnquotedString (descIncrementalVerifier iv))
|
||||||
liftIO $ catchDefaultIO (Just False) $
|
liftIO $ catchDefaultIO (Just False) $
|
||||||
withBinaryFile (fromRawFilePath f) ReadMode $ \h -> do
|
F.withBinaryFile f ReadMode $ \h -> do
|
||||||
hSeek h AbsoluteSeek endpos
|
hSeek h AbsoluteSeek endpos
|
||||||
feedIncrementalVerifier h iv
|
feedIncrementalVerifier h iv
|
||||||
finalizeIncrementalVerifier iv
|
finalizeIncrementalVerifier iv
|
||||||
|
@ -167,7 +167,7 @@ feedIncrementalVerifier h iv = do
|
||||||
where
|
where
|
||||||
chunk = 65536
|
chunk = 65536
|
||||||
|
|
||||||
verifyKeySize :: Key -> RawFilePath -> Annex Bool
|
verifyKeySize :: Key -> OsPath -> Annex Bool
|
||||||
verifyKeySize k f = case fromKey keySize k of
|
verifyKeySize k f = case fromKey keySize k of
|
||||||
Just size -> do
|
Just size -> do
|
||||||
size' <- liftIO $ catchDefaultIO 0 $ getFileSize f
|
size' <- liftIO $ catchDefaultIO 0 $ getFileSize f
|
||||||
|
@ -295,7 +295,7 @@ resumeVerifyFromOffset o incrementalverifier meterupdate h
|
||||||
-- and if the disk is slow, the reader may never catch up to the writer,
|
-- and if the disk is slow, the reader may never catch up to the writer,
|
||||||
-- and the disk cache may never speed up reads. So this should only be
|
-- and the disk cache may never speed up reads. So this should only be
|
||||||
-- used when there's not a better way to incrementally verify.
|
-- used when there's not a better way to incrementally verify.
|
||||||
tailVerify :: Maybe IncrementalVerifier -> RawFilePath -> Annex a -> Annex a
|
tailVerify :: Maybe IncrementalVerifier -> OsPath -> Annex a -> Annex a
|
||||||
tailVerify (Just iv) f writer = do
|
tailVerify (Just iv) f writer = do
|
||||||
finished <- liftIO newEmptyTMVarIO
|
finished <- liftIO newEmptyTMVarIO
|
||||||
t <- liftIO $ async $ tailVerify' iv f finished
|
t <- liftIO $ async $ tailVerify' iv f finished
|
||||||
|
@ -305,7 +305,7 @@ tailVerify (Just iv) f writer = do
|
||||||
writer `finally` finishtail
|
writer `finally` finishtail
|
||||||
tailVerify Nothing _ writer = writer
|
tailVerify Nothing _ writer = writer
|
||||||
|
|
||||||
tailVerify' :: IncrementalVerifier -> RawFilePath -> TMVar () -> IO ()
|
tailVerify' :: IncrementalVerifier -> OsPath -> TMVar () -> IO ()
|
||||||
#if WITH_INOTIFY
|
#if WITH_INOTIFY
|
||||||
tailVerify' iv f finished =
|
tailVerify' iv f finished =
|
||||||
tryNonAsync go >>= \case
|
tryNonAsync go >>= \case
|
||||||
|
@ -318,15 +318,16 @@ tailVerify' iv f finished =
|
||||||
-- of resuming, and waiting for modification deals with such
|
-- of resuming, and waiting for modification deals with such
|
||||||
-- situations.
|
-- situations.
|
||||||
inotifydirchange i cont =
|
inotifydirchange i cont =
|
||||||
INotify.addWatch i [INotify.Modify] dir $ \case
|
INotify.addWatch i [INotify.Modify] (fromOsPath dir) $ \case
|
||||||
-- Ignore changes to other files in the directory.
|
-- Ignore changes to other files in the directory.
|
||||||
INotify.Modified { INotify.maybeFilePath = fn }
|
INotify.Modified { INotify.maybeFilePath = fn }
|
||||||
| fn == Just basef -> cont
|
| fn == Just basef' -> cont
|
||||||
_ -> noop
|
_ -> noop
|
||||||
where
|
where
|
||||||
(dir, basef) = P.splitFileName f
|
(dir, basef) = splitFileName f
|
||||||
|
basef' = fromOsPath basef
|
||||||
|
|
||||||
inotifyfilechange i = INotify.addWatch i [INotify.Modify] f . const
|
inotifyfilechange i = INotify.addWatch i [INotify.Modify] (fromOsPath f) . const
|
||||||
|
|
||||||
go = INotify.withINotify $ \i -> do
|
go = INotify.withINotify $ \i -> do
|
||||||
modified <- newEmptyTMVarIO
|
modified <- newEmptyTMVarIO
|
||||||
|
@ -354,7 +355,7 @@ tailVerify' iv f finished =
|
||||||
case v of
|
case v of
|
||||||
Just () -> do
|
Just () -> do
|
||||||
r <- tryNonAsync $
|
r <- tryNonAsync $
|
||||||
tryWhenExists (openBinaryFile (fromRawFilePath f) ReadMode) >>= \case
|
tryWhenExists (F.openBinaryFile f ReadMode) >>= \case
|
||||||
Just h -> return (Just h)
|
Just h -> return (Just h)
|
||||||
-- File does not exist, must have been
|
-- File does not exist, must have been
|
||||||
-- deleted. Wait for next modification
|
-- deleted. Wait for next modification
|
||||||
|
|
|
@ -22,11 +22,11 @@ import qualified Database.Keys
|
||||||
- When in an adjusted branch that may have hidden the file, looks for a
|
- When in an adjusted branch that may have hidden the file, looks for a
|
||||||
- pointer to a key in the original branch.
|
- pointer to a key in the original branch.
|
||||||
-}
|
-}
|
||||||
lookupKey :: RawFilePath -> Annex (Maybe Key)
|
lookupKey :: OsPath -> Annex (Maybe Key)
|
||||||
lookupKey = lookupKey' catkeyfile
|
lookupKey = lookupKey' catkeyfile
|
||||||
where
|
where
|
||||||
catkeyfile file =
|
catkeyfile file =
|
||||||
ifM (liftIO $ doesFileExist $ fromRawFilePath file)
|
ifM (liftIO $ doesFileExist file)
|
||||||
( catKeyFile file
|
( catKeyFile file
|
||||||
, catKeyFileHidden file =<< getCurrentBranch
|
, catKeyFileHidden file =<< getCurrentBranch
|
||||||
)
|
)
|
||||||
|
@ -35,22 +35,22 @@ lookupKey = lookupKey' catkeyfile
|
||||||
- changes in the work tree. This means it's slower, but it also has
|
- changes in the work tree. This means it's slower, but it also has
|
||||||
- consistently the same behavior for locked files as for unlocked files.
|
- consistently the same behavior for locked files as for unlocked files.
|
||||||
-}
|
-}
|
||||||
lookupKeyStaged :: RawFilePath -> Annex (Maybe Key)
|
lookupKeyStaged :: OsPath -> Annex (Maybe Key)
|
||||||
lookupKeyStaged file = catKeyFile file >>= \case
|
lookupKeyStaged file = catKeyFile file >>= \case
|
||||||
Just k -> return (Just k)
|
Just k -> return (Just k)
|
||||||
Nothing -> catKeyFileHidden file =<< getCurrentBranch
|
Nothing -> catKeyFileHidden file =<< getCurrentBranch
|
||||||
|
|
||||||
{- Like lookupKey, but does not find keys for hidden files. -}
|
{- Like lookupKey, but does not find keys for hidden files. -}
|
||||||
lookupKeyNotHidden :: RawFilePath -> Annex (Maybe Key)
|
lookupKeyNotHidden :: OsPath -> Annex (Maybe Key)
|
||||||
lookupKeyNotHidden = lookupKey' catkeyfile
|
lookupKeyNotHidden = lookupKey' catkeyfile
|
||||||
where
|
where
|
||||||
catkeyfile file =
|
catkeyfile file =
|
||||||
ifM (liftIO $ doesFileExist $ fromRawFilePath file)
|
ifM (liftIO $ doesFileExist file)
|
||||||
( catKeyFile file
|
( catKeyFile file
|
||||||
, return Nothing
|
, return Nothing
|
||||||
)
|
)
|
||||||
|
|
||||||
lookupKey' :: (RawFilePath -> Annex (Maybe Key)) -> RawFilePath -> Annex (Maybe Key)
|
lookupKey' :: (OsPath -> Annex (Maybe Key)) -> OsPath -> Annex (Maybe Key)
|
||||||
lookupKey' catkeyfile file = isAnnexLink file >>= \case
|
lookupKey' catkeyfile file = isAnnexLink file >>= \case
|
||||||
Just key -> return (Just key)
|
Just key -> return (Just key)
|
||||||
Nothing -> catkeyfile file
|
Nothing -> catkeyfile file
|
||||||
|
|
|
@ -67,7 +67,7 @@ getBackend :: FilePath -> Key -> Annex (Maybe Backend)
|
||||||
getBackend file k = maybeLookupBackendVariety (fromKey keyVariety k) >>= \case
|
getBackend file k = maybeLookupBackendVariety (fromKey keyVariety k) >>= \case
|
||||||
Just backend -> return $ Just backend
|
Just backend -> return $ Just backend
|
||||||
Nothing -> do
|
Nothing -> do
|
||||||
warning $ "skipping " <> QuotedPath (toRawFilePath file) <> " (" <>
|
warning $ "skipping " <> QuotedPath (toOsPath file) <> " (" <>
|
||||||
UnquotedString (unknownBackendVarietyMessage (fromKey keyVariety k)) <> ")"
|
UnquotedString (unknownBackendVarietyMessage (fromKey keyVariety k)) <> ")"
|
||||||
return Nothing
|
return Nothing
|
||||||
|
|
||||||
|
@ -78,7 +78,7 @@ unknownBackendVarietyMessage v =
|
||||||
{- Looks up the backend that should be used for a file.
|
{- Looks up the backend that should be used for a file.
|
||||||
- That can be configured on a per-file basis in the gitattributes file,
|
- That can be configured on a per-file basis in the gitattributes file,
|
||||||
- or forced with --backend. -}
|
- or forced with --backend. -}
|
||||||
chooseBackend :: RawFilePath -> Annex Backend
|
chooseBackend :: OsPath -> Annex Backend
|
||||||
chooseBackend f = Annex.getRead Annex.forcebackend >>= go
|
chooseBackend f = Annex.getRead Annex.forcebackend >>= go
|
||||||
where
|
where
|
||||||
go Nothing = do
|
go Nothing = do
|
||||||
|
|
|
@ -43,7 +43,7 @@ migrateFromVURLToURL oldkey newbackend _af _
|
||||||
| otherwise = return Nothing
|
| otherwise = return Nothing
|
||||||
|
|
||||||
-- The Backend must use a cryptographically secure hash.
|
-- The Backend must use a cryptographically secure hash.
|
||||||
generateEquivilantKey :: Backend -> RawFilePath -> Annex (Maybe Key)
|
generateEquivilantKey :: Backend -> OsPath -> Annex (Maybe Key)
|
||||||
generateEquivilantKey b f =
|
generateEquivilantKey b f =
|
||||||
case genKey b of
|
case genKey b of
|
||||||
Just genkey -> do
|
Just genkey -> do
|
||||||
|
|
|
@ -47,11 +47,9 @@ import Git.FilePath
|
||||||
import qualified Git.DiffTree as DiffTree
|
import qualified Git.DiffTree as DiffTree
|
||||||
import Logs
|
import Logs
|
||||||
import qualified Logs.ContentIdentifier as Log
|
import qualified Logs.ContentIdentifier as Log
|
||||||
import qualified Utility.RawFilePath as R
|
|
||||||
|
|
||||||
import Database.Persist.Sql hiding (Key)
|
import Database.Persist.Sql hiding (Key)
|
||||||
import Database.Persist.TH
|
import Database.Persist.TH
|
||||||
import qualified System.FilePath.ByteString as P
|
|
||||||
|
|
||||||
#if MIN_VERSION_persistent_sqlite(2,13,3)
|
#if MIN_VERSION_persistent_sqlite(2,13,3)
|
||||||
import Database.RawFilePath
|
import Database.RawFilePath
|
||||||
|
@ -99,14 +97,14 @@ openDb :: Annex ContentIdentifierHandle
|
||||||
openDb = do
|
openDb = do
|
||||||
dbdir <- calcRepo' gitAnnexContentIdentifierDbDir
|
dbdir <- calcRepo' gitAnnexContentIdentifierDbDir
|
||||||
let db = dbdir </> literalOsPath "db"
|
let db = dbdir </> literalOsPath "db"
|
||||||
isnew <- liftIO $ not <$> doesDirectoryPathExist db
|
isnew <- liftIO $ not <$> doesDirectoryExist db
|
||||||
if isnew
|
if isnew
|
||||||
then initDb db $ void $
|
then initDb db $ void $
|
||||||
runMigrationSilent migrateContentIdentifier
|
runMigrationSilent migrateContentIdentifier
|
||||||
-- Migrate from old versions of database, which had buggy
|
-- Migrate from old versions of database, which had buggy
|
||||||
-- and suboptimal uniqueness constraints.
|
-- and suboptimal uniqueness constraints.
|
||||||
#if MIN_VERSION_persistent_sqlite(2,13,3)
|
#if MIN_VERSION_persistent_sqlite(2,13,3)
|
||||||
else liftIO $ runSqlite' db $ void $
|
else liftIO $ runSqlite' (fromOsPath db) $ void $
|
||||||
runMigrationSilent migrateContentIdentifier
|
runMigrationSilent migrateContentIdentifier
|
||||||
#else
|
#else
|
||||||
else liftIO $ runSqlite (T.pack (fromRawFilePath db)) $ void $
|
else liftIO $ runSqlite (T.pack (fromRawFilePath db)) $ void $
|
||||||
|
|
|
@ -58,11 +58,9 @@ import Git.Types
|
||||||
import Git.Sha
|
import Git.Sha
|
||||||
import Git.FilePath
|
import Git.FilePath
|
||||||
import qualified Git.DiffTree
|
import qualified Git.DiffTree
|
||||||
import qualified Utility.RawFilePath as R
|
|
||||||
|
|
||||||
import Database.Persist.Sql hiding (Key)
|
import Database.Persist.Sql hiding (Key)
|
||||||
import Database.Persist.TH
|
import Database.Persist.TH
|
||||||
import qualified System.FilePath.ByteString as P
|
|
||||||
|
|
||||||
data ExportHandle = ExportHandle H.DbQueue UUID
|
data ExportHandle = ExportHandle H.DbQueue UUID
|
||||||
|
|
||||||
|
@ -98,8 +96,8 @@ ExportTreeCurrent
|
||||||
openDb :: UUID -> Annex ExportHandle
|
openDb :: UUID -> Annex ExportHandle
|
||||||
openDb u = do
|
openDb u = do
|
||||||
dbdir <- calcRepo' (gitAnnexExportDbDir u)
|
dbdir <- calcRepo' (gitAnnexExportDbDir u)
|
||||||
let db = dbdir P.</> "db"
|
let db = dbdir </> literalOsPath "db"
|
||||||
unlessM (liftIO $ R.doesPathExist db) $ do
|
unlessM (liftIO $ doesDirectoryExist db) $ do
|
||||||
initDb db $ void $
|
initDb db $ void $
|
||||||
runMigrationSilent migrateExport
|
runMigrationSilent migrateExport
|
||||||
h <- liftIO $ H.openDbQueue db "exported"
|
h <- liftIO $ H.openDbQueue db "exported"
|
||||||
|
@ -136,26 +134,27 @@ addExportedLocation :: ExportHandle -> Key -> ExportLocation -> IO ()
|
||||||
addExportedLocation h k el = queueDb h $ do
|
addExportedLocation h k el = queueDb h $ do
|
||||||
void $ insertUniqueFast $ Exported k ef
|
void $ insertUniqueFast $ Exported k ef
|
||||||
let edirs = map
|
let edirs = map
|
||||||
(\ed -> ExportedDirectory (SByteString (fromExportDirectory ed)) ef)
|
(\ed -> ExportedDirectory (SByteString (fromOsPath (fromExportDirectory ed))) ef)
|
||||||
(exportDirectories el)
|
(exportDirectories el)
|
||||||
putMany edirs
|
putMany edirs
|
||||||
where
|
where
|
||||||
ef = SByteString (fromExportLocation el)
|
ef = SByteString (fromOsPath (fromExportLocation el))
|
||||||
|
|
||||||
removeExportedLocation :: ExportHandle -> Key -> ExportLocation -> IO ()
|
removeExportedLocation :: ExportHandle -> Key -> ExportLocation -> IO ()
|
||||||
removeExportedLocation h k el = queueDb h $ do
|
removeExportedLocation h k el = queueDb h $ do
|
||||||
deleteWhere [ExportedKey ==. k, ExportedFile ==. ef]
|
deleteWhere [ExportedKey ==. k, ExportedFile ==. ef]
|
||||||
let subdirs = map (SByteString . fromExportDirectory)
|
let subdirs = map
|
||||||
|
(SByteString . fromOsPath . fromExportDirectory)
|
||||||
(exportDirectories el)
|
(exportDirectories el)
|
||||||
deleteWhere [ExportedDirectoryFile ==. ef, ExportedDirectorySubdir <-. subdirs]
|
deleteWhere [ExportedDirectoryFile ==. ef, ExportedDirectorySubdir <-. subdirs]
|
||||||
where
|
where
|
||||||
ef = SByteString (fromExportLocation el)
|
ef = SByteString (fromOsPath (fromExportLocation el))
|
||||||
|
|
||||||
{- Note that this does not see recently queued changes. -}
|
{- Note that this does not see recently queued changes. -}
|
||||||
getExportedLocation :: ExportHandle -> Key -> IO [ExportLocation]
|
getExportedLocation :: ExportHandle -> Key -> IO [ExportLocation]
|
||||||
getExportedLocation (ExportHandle h _) k = H.queryDbQueue h $ do
|
getExportedLocation (ExportHandle h _) k = H.queryDbQueue h $ do
|
||||||
l <- selectList [ExportedKey ==. k] []
|
l <- selectList [ExportedKey ==. k] []
|
||||||
return $ map (mkExportLocation . (\(SByteString f) -> f) . exportedFile . entityVal) l
|
return $ map (mkExportLocation . (\(SByteString f) -> toOsPath f) . exportedFile . entityVal) l
|
||||||
|
|
||||||
{- Note that this does not see recently queued changes. -}
|
{- Note that this does not see recently queued changes. -}
|
||||||
isExportDirectoryEmpty :: ExportHandle -> ExportDirectory -> IO Bool
|
isExportDirectoryEmpty :: ExportHandle -> ExportDirectory -> IO Bool
|
||||||
|
@ -163,13 +162,13 @@ isExportDirectoryEmpty (ExportHandle h _) d = H.queryDbQueue h $ do
|
||||||
l <- selectList [ExportedDirectorySubdir ==. ed] []
|
l <- selectList [ExportedDirectorySubdir ==. ed] []
|
||||||
return $ null l
|
return $ null l
|
||||||
where
|
where
|
||||||
ed = SByteString $ fromExportDirectory d
|
ed = SByteString $ fromOsPath $ fromExportDirectory d
|
||||||
|
|
||||||
{- Get locations in the export that might contain a key. -}
|
{- Get locations in the export that might contain a key. -}
|
||||||
getExportTree :: ExportHandle -> Key -> IO [ExportLocation]
|
getExportTree :: ExportHandle -> Key -> IO [ExportLocation]
|
||||||
getExportTree (ExportHandle h _) k = H.queryDbQueue h $ do
|
getExportTree (ExportHandle h _) k = H.queryDbQueue h $ do
|
||||||
l <- selectList [ExportTreeKey ==. k] []
|
l <- selectList [ExportTreeKey ==. k] []
|
||||||
return $ map (mkExportLocation . (\(SByteString f) -> f) . exportTreeFile . entityVal) l
|
return $ map (mkExportLocation . (\(SByteString f) -> toOsPath f) . exportTreeFile . entityVal) l
|
||||||
|
|
||||||
{- Get keys that might be currently exported to a location.
|
{- Get keys that might be currently exported to a location.
|
||||||
-
|
-
|
||||||
|
@ -180,19 +179,19 @@ getExportTreeKey (ExportHandle h _) el = H.queryDbQueue h $ do
|
||||||
map (exportTreeKey . entityVal)
|
map (exportTreeKey . entityVal)
|
||||||
<$> selectList [ExportTreeFile ==. ef] []
|
<$> selectList [ExportTreeFile ==. ef] []
|
||||||
where
|
where
|
||||||
ef = SByteString (fromExportLocation el)
|
ef = SByteString (fromOsPath (fromExportLocation el))
|
||||||
|
|
||||||
addExportTree :: ExportHandle -> Key -> ExportLocation -> IO ()
|
addExportTree :: ExportHandle -> Key -> ExportLocation -> IO ()
|
||||||
addExportTree h k loc = queueDb h $
|
addExportTree h k loc = queueDb h $
|
||||||
void $ insertUniqueFast $ ExportTree k ef
|
void $ insertUniqueFast $ ExportTree k ef
|
||||||
where
|
where
|
||||||
ef = SByteString (fromExportLocation loc)
|
ef = SByteString (fromOsPath (fromExportLocation loc))
|
||||||
|
|
||||||
removeExportTree :: ExportHandle -> Key -> ExportLocation -> IO ()
|
removeExportTree :: ExportHandle -> Key -> ExportLocation -> IO ()
|
||||||
removeExportTree h k loc = queueDb h $
|
removeExportTree h k loc = queueDb h $
|
||||||
deleteWhere [ExportTreeKey ==. k, ExportTreeFile ==. ef]
|
deleteWhere [ExportTreeKey ==. k, ExportTreeFile ==. ef]
|
||||||
where
|
where
|
||||||
ef = SByteString (fromExportLocation loc)
|
ef = SByteString (fromOsPath (fromExportLocation loc))
|
||||||
|
|
||||||
-- An action that is passed the old and new values that were exported,
|
-- An action that is passed the old and new values that were exported,
|
||||||
-- and updates state.
|
-- and updates state.
|
||||||
|
|
|
@ -40,11 +40,9 @@ import Logs.MetaData
|
||||||
import Types.MetaData
|
import Types.MetaData
|
||||||
import Annex.MetaData.StandardFields
|
import Annex.MetaData.StandardFields
|
||||||
import Annex.LockFile
|
import Annex.LockFile
|
||||||
import qualified Utility.RawFilePath as R
|
|
||||||
|
|
||||||
import Database.Persist.Sql hiding (Key)
|
import Database.Persist.Sql hiding (Key)
|
||||||
import Database.Persist.TH
|
import Database.Persist.TH
|
||||||
import qualified System.FilePath.ByteString as P
|
|
||||||
import qualified Data.ByteString as B
|
import qualified Data.ByteString as B
|
||||||
import qualified Data.Set as S
|
import qualified Data.Set as S
|
||||||
|
|
||||||
|
@ -75,8 +73,8 @@ AnnexBranch
|
||||||
openDb :: Annex ImportFeedDbHandle
|
openDb :: Annex ImportFeedDbHandle
|
||||||
openDb = do
|
openDb = do
|
||||||
dbdir <- calcRepo' gitAnnexImportFeedDbDir
|
dbdir <- calcRepo' gitAnnexImportFeedDbDir
|
||||||
let db = dbdir P.</> "db"
|
let db = dbdir </> literalOsPath "db"
|
||||||
isnew <- liftIO $ not <$> R.doesPathExist db
|
isnew <- liftIO $ not <$> doesDirectoryExist db
|
||||||
when isnew $
|
when isnew $
|
||||||
initDb db $ void $
|
initDb db $ void $
|
||||||
runMigrationSilent migrateImportFeed
|
runMigrationSilent migrateImportFeed
|
||||||
|
|
|
@ -54,11 +54,10 @@ import Git.Branch (writeTreeQuiet, update')
|
||||||
import qualified Git.Ref
|
import qualified Git.Ref
|
||||||
import Config
|
import Config
|
||||||
import Config.Smudge
|
import Config.Smudge
|
||||||
import qualified Utility.RawFilePath as R
|
import qualified Utility.OsString as OS
|
||||||
|
|
||||||
import qualified Data.ByteString as S
|
import qualified Data.ByteString as S
|
||||||
import qualified Data.ByteString.Char8 as S8
|
import qualified Data.ByteString.Char8 as S8
|
||||||
import qualified System.FilePath.ByteString as P
|
|
||||||
import Control.Concurrent.Async
|
import Control.Concurrent.Async
|
||||||
|
|
||||||
{- Runs an action that reads from the database.
|
{- Runs an action that reads from the database.
|
||||||
|
@ -129,8 +128,8 @@ openDb forwrite _ = do
|
||||||
lck <- calcRepo' gitAnnexKeysDbLock
|
lck <- calcRepo' gitAnnexKeysDbLock
|
||||||
catchPermissionDenied permerr $ withExclusiveLock lck $ do
|
catchPermissionDenied permerr $ withExclusiveLock lck $ do
|
||||||
dbdir <- calcRepo' gitAnnexKeysDbDir
|
dbdir <- calcRepo' gitAnnexKeysDbDir
|
||||||
let db = dbdir P.</> "db"
|
let db = dbdir </> literalOsPath "db"
|
||||||
dbexists <- liftIO $ R.doesPathExist db
|
dbexists <- liftIO $ doesDirectoryExist db
|
||||||
case dbexists of
|
case dbexists of
|
||||||
True -> open db False
|
True -> open db False
|
||||||
False -> do
|
False -> do
|
||||||
|
@ -182,7 +181,7 @@ emptyWhenBare a = ifM isBareRepo
|
||||||
)
|
)
|
||||||
|
|
||||||
{- Include a known associated file along with any recorded in the database. -}
|
{- Include a known associated file along with any recorded in the database. -}
|
||||||
getAssociatedFilesIncluding :: AssociatedFile -> Key -> Annex [RawFilePath]
|
getAssociatedFilesIncluding :: AssociatedFile -> Key -> Annex [OsPath]
|
||||||
getAssociatedFilesIncluding afile k = emptyWhenBare $ do
|
getAssociatedFilesIncluding afile k = emptyWhenBare $ do
|
||||||
g <- Annex.gitRepo
|
g <- Annex.gitRepo
|
||||||
l <- map (`fromTopFilePath` g) <$> getAssociatedFiles k
|
l <- map (`fromTopFilePath` g) <$> getAssociatedFiles k
|
||||||
|
@ -201,7 +200,7 @@ removeAssociatedFile k = runWriterIO AssociatedTable .
|
||||||
SQL.removeAssociatedFile k
|
SQL.removeAssociatedFile k
|
||||||
|
|
||||||
{- Stats the files, and stores their InodeCaches. -}
|
{- Stats the files, and stores their InodeCaches. -}
|
||||||
storeInodeCaches :: Key -> [RawFilePath] -> Annex ()
|
storeInodeCaches :: Key -> [OsPath] -> Annex ()
|
||||||
storeInodeCaches k fs = withTSDelta $ \d ->
|
storeInodeCaches k fs = withTSDelta $ \d ->
|
||||||
addInodeCaches k . catMaybes
|
addInodeCaches k . catMaybes
|
||||||
=<< liftIO (mapM (\f -> genInodeCache f d) fs)
|
=<< liftIO (mapM (\f -> genInodeCache f d) fs)
|
||||||
|
@ -265,7 +264,7 @@ reconcileStaged dbisnew qh = ifM isBareRepo
|
||||||
( return mempty
|
( return mempty
|
||||||
, do
|
, do
|
||||||
gitindex <- inRepo currentIndexFile
|
gitindex <- inRepo currentIndexFile
|
||||||
indexcache <- fromRawFilePath <$> calcRepo' gitAnnexKeysDbIndexCache
|
indexcache <- fromOsPath <$> calcRepo' gitAnnexKeysDbIndexCache
|
||||||
withTSDelta (liftIO . genInodeCache gitindex) >>= \case
|
withTSDelta (liftIO . genInodeCache gitindex) >>= \case
|
||||||
Just cur -> readindexcache indexcache >>= \case
|
Just cur -> readindexcache indexcache >>= \case
|
||||||
Nothing -> go cur indexcache =<< getindextree
|
Nothing -> go cur indexcache =<< getindextree
|
||||||
|
@ -356,8 +355,9 @@ reconcileStaged dbisnew qh = ifM isBareRepo
|
||||||
-- be a pointer file. And a pointer file that is replaced with
|
-- be a pointer file. And a pointer file that is replaced with
|
||||||
-- a non-pointer file will match this. This is only a
|
-- a non-pointer file will match this. This is only a
|
||||||
-- prefilter so that's ok.
|
-- prefilter so that's ok.
|
||||||
, Param $ "-G" ++ fromRawFilePath (toInternalGitPath $
|
, Param $ "-G" ++
|
||||||
P.pathSeparator `S.cons` objectDir)
|
fromOsPath (toInternalGitPath $
|
||||||
|
pathSeparator `OS.cons` objectDir)
|
||||||
-- Disable rename detection.
|
-- Disable rename detection.
|
||||||
, Param "--no-renames"
|
, Param "--no-renames"
|
||||||
-- Avoid other complications.
|
-- Avoid other complications.
|
||||||
|
@ -371,6 +371,7 @@ reconcileStaged dbisnew qh = ifM isBareRepo
|
||||||
procdiff mdfeeder (info:file:rest) conflicted
|
procdiff mdfeeder (info:file:rest) conflicted
|
||||||
| ":" `S.isPrefixOf` info = case S8.words info of
|
| ":" `S.isPrefixOf` info = case S8.words info of
|
||||||
(_colonsrcmode:dstmode:srcsha:dstsha:status:[]) -> do
|
(_colonsrcmode:dstmode:srcsha:dstsha:status:[]) -> do
|
||||||
|
let file' = asTopFilePath (toOsPath file)
|
||||||
let conflicted' = status == "U"
|
let conflicted' = status == "U"
|
||||||
-- avoid removing associated file when
|
-- avoid removing associated file when
|
||||||
-- there is a merge conflict
|
-- there is a merge conflict
|
||||||
|
@ -378,17 +379,15 @@ reconcileStaged dbisnew qh = ifM isBareRepo
|
||||||
send mdfeeder (Ref srcsha) $ \case
|
send mdfeeder (Ref srcsha) $ \case
|
||||||
Just oldkey -> do
|
Just oldkey -> do
|
||||||
liftIO $ SQL.removeAssociatedFile oldkey
|
liftIO $ SQL.removeAssociatedFile oldkey
|
||||||
(asTopFilePath file)
|
file' (SQL.WriteHandle qh)
|
||||||
(SQL.WriteHandle qh)
|
|
||||||
return True
|
return True
|
||||||
Nothing -> return False
|
Nothing -> return False
|
||||||
send mdfeeder (Ref dstsha) $ \case
|
send mdfeeder (Ref dstsha) $ \case
|
||||||
Just key -> do
|
Just key -> do
|
||||||
liftIO $ addassociatedfile key
|
liftIO $ addassociatedfile key
|
||||||
(asTopFilePath file)
|
file' (SQL.WriteHandle qh)
|
||||||
(SQL.WriteHandle qh)
|
|
||||||
when (dstmode /= fmtTreeItemType TreeSymlink) $
|
when (dstmode /= fmtTreeItemType TreeSymlink) $
|
||||||
reconcilepointerfile (asTopFilePath file) key
|
reconcilepointerfile file' key
|
||||||
return True
|
return True
|
||||||
Nothing -> return False
|
Nothing -> return False
|
||||||
procdiff mdfeeder rest
|
procdiff mdfeeder rest
|
||||||
|
@ -403,11 +402,11 @@ reconcileStaged dbisnew qh = ifM isBareRepo
|
||||||
procmergeconflictdiff mdfeeder (info:file:rest) conflicted
|
procmergeconflictdiff mdfeeder (info:file:rest) conflicted
|
||||||
| ":" `S.isPrefixOf` info = case S8.words info of
|
| ":" `S.isPrefixOf` info = case S8.words info of
|
||||||
(_colonmode:_mode:sha:_sha:status:[]) -> do
|
(_colonmode:_mode:sha:_sha:status:[]) -> do
|
||||||
|
let file' = asTopFilePath (toOsPath file)
|
||||||
send mdfeeder (Ref sha) $ \case
|
send mdfeeder (Ref sha) $ \case
|
||||||
Just key -> do
|
Just key -> do
|
||||||
liftIO $ SQL.addAssociatedFile key
|
liftIO $ SQL.addAssociatedFile key
|
||||||
(asTopFilePath file)
|
file' (SQL.WriteHandle qh)
|
||||||
(SQL.WriteHandle qh)
|
|
||||||
return True
|
return True
|
||||||
Nothing -> return False
|
Nothing -> return False
|
||||||
let conflicted' = status == "U"
|
let conflicted' = status == "U"
|
||||||
|
|
|
@ -123,9 +123,12 @@ pipeNullSplit params repo = do
|
||||||
- convenience.
|
- convenience.
|
||||||
-}
|
-}
|
||||||
pipeNullSplit' :: [CommandParam] -> Repo -> IO ([S.ByteString], IO Bool)
|
pipeNullSplit' :: [CommandParam] -> Repo -> IO ([S.ByteString], IO Bool)
|
||||||
pipeNullSplit' params repo = do
|
pipeNullSplit' = pipeNullSplit'' id
|
||||||
|
|
||||||
|
pipeNullSplit'' :: (S.ByteString -> t) -> [CommandParam] -> Repo -> IO ([t], IO Bool)
|
||||||
|
pipeNullSplit'' f params repo = do
|
||||||
(s, cleanup) <- pipeNullSplit params repo
|
(s, cleanup) <- pipeNullSplit params repo
|
||||||
return (map L.toStrict s, cleanup)
|
return (map (f . L.toStrict) s, cleanup)
|
||||||
|
|
||||||
pipeNullSplitStrict :: [CommandParam] -> Repo -> IO [S.ByteString]
|
pipeNullSplitStrict :: [CommandParam] -> Repo -> IO [S.ByteString]
|
||||||
pipeNullSplitStrict params repo = do
|
pipeNullSplitStrict params repo = do
|
||||||
|
|
|
@ -28,8 +28,8 @@ indexEnv = "GIT_INDEX_FILE"
|
||||||
-
|
-
|
||||||
- So, an absolute path is the only safe option for this to return.
|
- So, an absolute path is the only safe option for this to return.
|
||||||
-}
|
-}
|
||||||
indexEnvVal :: OsPath -> IO String
|
indexEnvVal :: OsPath -> IO OsPath
|
||||||
indexEnvVal p = fromOsPath <$> absPath p
|
indexEnvVal p = absPath p
|
||||||
|
|
||||||
{- Forces git to use the specified index file.
|
{- Forces git to use the specified index file.
|
||||||
-
|
-
|
||||||
|
@ -42,7 +42,7 @@ override :: OsPath -> Repo -> IO (IO ())
|
||||||
override index _r = do
|
override index _r = do
|
||||||
res <- getEnv var
|
res <- getEnv var
|
||||||
val <- indexEnvVal index
|
val <- indexEnvVal index
|
||||||
setEnv var val True
|
setEnv var (fromOsPath val) True
|
||||||
return $ reset res
|
return $ reset res
|
||||||
where
|
where
|
||||||
var = "GIT_INDEX_FILE"
|
var = "GIT_INDEX_FILE"
|
||||||
|
|
11
Git/Log.hs
11
Git/Log.hs
|
@ -19,7 +19,7 @@ import Data.Time.Clock.POSIX
|
||||||
data LoggedFileChange t = LoggedFileChange
|
data LoggedFileChange t = LoggedFileChange
|
||||||
{ changetime :: POSIXTime
|
{ changetime :: POSIXTime
|
||||||
, changed :: t
|
, changed :: t
|
||||||
, changedfile :: FilePath
|
, changedfile :: OsPath
|
||||||
, oldref :: Ref
|
, oldref :: Ref
|
||||||
, newref :: Ref
|
, newref :: Ref
|
||||||
}
|
}
|
||||||
|
@ -34,7 +34,7 @@ getGitLog
|
||||||
-> Maybe Ref
|
-> Maybe Ref
|
||||||
-> [FilePath]
|
-> [FilePath]
|
||||||
-> [CommandParam]
|
-> [CommandParam]
|
||||||
-> (Sha -> FilePath -> Maybe t)
|
-> (Sha -> OsPath -> Maybe t)
|
||||||
-> Repo
|
-> Repo
|
||||||
-> IO ([LoggedFileChange t], IO Bool)
|
-> IO ([LoggedFileChange t], IO Bool)
|
||||||
getGitLog ref stopref fs os selector repo = do
|
getGitLog ref stopref fs os selector repo = do
|
||||||
|
@ -75,7 +75,7 @@ commitinfoFormat = "%H %ct"
|
||||||
--
|
--
|
||||||
-- The commitinfo is not included before all changelines, so
|
-- The commitinfo is not included before all changelines, so
|
||||||
-- keep track of the most recently seen commitinfo.
|
-- keep track of the most recently seen commitinfo.
|
||||||
parseGitRawLog :: (Ref -> FilePath -> Maybe t) -> [String] -> [LoggedFileChange t]
|
parseGitRawLog :: (Ref -> OsPath -> Maybe t) -> [String] -> [LoggedFileChange t]
|
||||||
parseGitRawLog selector = parse (deleteSha, epoch)
|
parseGitRawLog selector = parse (deleteSha, epoch)
|
||||||
where
|
where
|
||||||
epoch = toEnum 0 :: POSIXTime
|
epoch = toEnum 0 :: POSIXTime
|
||||||
|
@ -91,11 +91,12 @@ parseGitRawLog selector = parse (deleteSha, epoch)
|
||||||
_ -> (oldcommitsha, oldts, cl')
|
_ -> (oldcommitsha, oldts, cl')
|
||||||
mrc = do
|
mrc = do
|
||||||
(old, new) <- parseRawChangeLine cl
|
(old, new) <- parseRawChangeLine cl
|
||||||
v <- selector commitsha c2
|
let c2' = toOsPath c2
|
||||||
|
v <- selector commitsha c2'
|
||||||
return $ LoggedFileChange
|
return $ LoggedFileChange
|
||||||
{ changetime = ts
|
{ changetime = ts
|
||||||
, changed = v
|
, changed = v
|
||||||
, changedfile = c2
|
, changedfile = c2'
|
||||||
, oldref = old
|
, oldref = old
|
||||||
, newref = new
|
, newref = new
|
||||||
}
|
}
|
||||||
|
|
|
@ -332,7 +332,7 @@ reduceUnmerged c (i:is) = reduceUnmerged (new:c) rest
|
||||||
- Note that this uses a --debug option whose output could change at some
|
- Note that this uses a --debug option whose output could change at some
|
||||||
- point in the future. If the output is not as expected, will use Nothing.
|
- point in the future. If the output is not as expected, will use Nothing.
|
||||||
-}
|
-}
|
||||||
inodeCaches :: [OsPath] -> Repo -> IO ([(FilePath, Maybe InodeCache)], IO Bool)
|
inodeCaches :: [OsPath] -> Repo -> IO ([(OsPath, Maybe InodeCache)], IO Bool)
|
||||||
inodeCaches locs repo = guardSafeForLsFiles repo $ do
|
inodeCaches locs repo = guardSafeForLsFiles repo $ do
|
||||||
(ls, cleanup) <- pipeNullSplit params repo
|
(ls, cleanup) <- pipeNullSplit params repo
|
||||||
return (parse Nothing (map decodeBL ls), cleanup)
|
return (parse Nothing (map decodeBL ls), cleanup)
|
||||||
|
@ -348,11 +348,11 @@ inodeCaches locs repo = guardSafeForLsFiles repo $ do
|
||||||
parse Nothing (f:ls) = parse (Just f) ls
|
parse Nothing (f:ls) = parse (Just f) ls
|
||||||
parse (Just f) (s:[]) =
|
parse (Just f) (s:[]) =
|
||||||
let i = parsedebug s
|
let i = parsedebug s
|
||||||
in (f, i) : []
|
in (toOsPath f, i) : []
|
||||||
parse (Just f) (s:ls) =
|
parse (Just f) (s:ls) =
|
||||||
let (d, f') = splitdebug s
|
let (d, f') = splitdebug s
|
||||||
i = parsedebug d
|
i = parsedebug d
|
||||||
in (f, i) : parse (Just f') ls
|
in (toOsPath f, i) : parse (Just f') ls
|
||||||
parse _ _ = []
|
parse _ _ = []
|
||||||
|
|
||||||
-- First 5 lines are --debug output, remainder is the next filename.
|
-- First 5 lines are --debug output, remainder is the next filename.
|
||||||
|
|
|
@ -130,7 +130,7 @@ getExportExcluded :: UUID -> Annex [Git.Tree.TreeItem]
|
||||||
getExportExcluded u = do
|
getExportExcluded u = do
|
||||||
logf <- fromRepo $ gitAnnexExportExcludeLog u
|
logf <- fromRepo $ gitAnnexExportExcludeLog u
|
||||||
liftIO $ catchDefaultIO [] $ exportExcludedParser
|
liftIO $ catchDefaultIO [] $ exportExcludedParser
|
||||||
<$> F.readFile (toOsPath logf)
|
<$> F.readFile logf
|
||||||
where
|
where
|
||||||
|
|
||||||
exportExcludedParser :: L.ByteString -> [Git.Tree.TreeItem]
|
exportExcludedParser :: L.ByteString -> [Git.Tree.TreeItem]
|
||||||
|
|
|
@ -124,7 +124,7 @@ parseLoggedLocationsWithoutClusters l =
|
||||||
map (toUUID . fromLogInfo . info)
|
map (toUUID . fromLogInfo . info)
|
||||||
(filterPresent (parseLog l))
|
(filterPresent (parseLog l))
|
||||||
|
|
||||||
getLoggedLocations :: (RawFilePath -> Annex [LogInfo]) -> Key -> Annex [UUID]
|
getLoggedLocations :: (OsPath -> Annex [LogInfo]) -> Key -> Annex [UUID]
|
||||||
getLoggedLocations getter key = do
|
getLoggedLocations getter key = do
|
||||||
config <- Annex.getGitConfig
|
config <- Annex.getGitConfig
|
||||||
locs <- map (toUUID . fromLogInfo) <$> getter (locationLogFile config key)
|
locs <- map (toUUID . fromLogInfo) <$> getter (locationLogFile config key)
|
||||||
|
@ -301,8 +301,8 @@ overLocationLogsJournal v branchsha keyaction mclusters =
|
||||||
changedlocs _ _ _ Nothing = pure (S.empty, S.empty)
|
changedlocs _ _ _ Nothing = pure (S.empty, S.empty)
|
||||||
|
|
||||||
overLocationLogsHelper
|
overLocationLogsHelper
|
||||||
:: ((RawFilePath -> Maybe Key) -> (Annex (FileContents Key b) -> Annex v) -> Annex a)
|
:: ((OsPath -> Maybe Key) -> (Annex (FileContents Key b) -> Annex v) -> Annex a)
|
||||||
-> ((Maybe L.ByteString -> [UUID]) -> Key -> RawFilePath -> Maybe (L.ByteString, Maybe b) -> Annex u)
|
-> ((Maybe L.ByteString -> [UUID]) -> Key -> OsPath -> Maybe (L.ByteString, Maybe b) -> Annex u)
|
||||||
-> Bool
|
-> Bool
|
||||||
-> v
|
-> v
|
||||||
-> (Annex (FileContents Key b) -> Annex v -> Annex v)
|
-> (Annex (FileContents Key b) -> Annex v -> Annex v)
|
||||||
|
|
|
@ -59,7 +59,7 @@ import qualified Data.ByteString.Lazy as L
|
||||||
getCurrentMetaData :: Key -> Annex MetaData
|
getCurrentMetaData :: Key -> Annex MetaData
|
||||||
getCurrentMetaData = getCurrentMetaData' metaDataLogFile
|
getCurrentMetaData = getCurrentMetaData' metaDataLogFile
|
||||||
|
|
||||||
getCurrentMetaData' :: (GitConfig -> Key -> RawFilePath) -> Key -> Annex MetaData
|
getCurrentMetaData' :: (GitConfig -> Key -> OsPath) -> Key -> Annex MetaData
|
||||||
getCurrentMetaData' getlogfile k = do
|
getCurrentMetaData' getlogfile k = do
|
||||||
config <- Annex.getGitConfig
|
config <- Annex.getGitConfig
|
||||||
parseCurrentMetaData <$> Annex.Branch.get (getlogfile config k)
|
parseCurrentMetaData <$> Annex.Branch.get (getlogfile config k)
|
||||||
|
@ -101,7 +101,7 @@ getCurrentRemoteMetaData (RemoteStateHandle u) k = extractRemoteMetaData u <$>
|
||||||
addMetaData :: Key -> MetaData -> Annex ()
|
addMetaData :: Key -> MetaData -> Annex ()
|
||||||
addMetaData = addMetaData' (Annex.Branch.RegardingUUID []) metaDataLogFile
|
addMetaData = addMetaData' (Annex.Branch.RegardingUUID []) metaDataLogFile
|
||||||
|
|
||||||
addMetaData' :: Annex.Branch.RegardingUUID -> (GitConfig -> Key -> RawFilePath) -> Key -> MetaData -> Annex ()
|
addMetaData' :: Annex.Branch.RegardingUUID -> (GitConfig -> Key -> OsPath) -> Key -> MetaData -> Annex ()
|
||||||
addMetaData' ru getlogfile k metadata =
|
addMetaData' ru getlogfile k metadata =
|
||||||
addMetaDataClocked' ru getlogfile k metadata =<< currentVectorClock
|
addMetaDataClocked' ru getlogfile k metadata =<< currentVectorClock
|
||||||
|
|
||||||
|
@ -112,7 +112,7 @@ addMetaData' ru getlogfile k metadata =
|
||||||
addMetaDataClocked :: Key -> MetaData -> CandidateVectorClock -> Annex ()
|
addMetaDataClocked :: Key -> MetaData -> CandidateVectorClock -> Annex ()
|
||||||
addMetaDataClocked = addMetaDataClocked' (Annex.Branch.RegardingUUID []) metaDataLogFile
|
addMetaDataClocked = addMetaDataClocked' (Annex.Branch.RegardingUUID []) metaDataLogFile
|
||||||
|
|
||||||
addMetaDataClocked' :: Annex.Branch.RegardingUUID -> (GitConfig -> Key -> RawFilePath) -> Key -> MetaData -> CandidateVectorClock -> Annex ()
|
addMetaDataClocked' :: Annex.Branch.RegardingUUID -> (GitConfig -> Key -> OsPath) -> Key -> MetaData -> CandidateVectorClock -> Annex ()
|
||||||
addMetaDataClocked' ru getlogfile k d@(MetaData m) c
|
addMetaDataClocked' ru getlogfile k d@(MetaData m) c
|
||||||
| d == emptyMetaData = noop
|
| d == emptyMetaData = noop
|
||||||
| otherwise = do
|
| otherwise = do
|
||||||
|
@ -160,5 +160,5 @@ copyMetaData oldkey newkey
|
||||||
(const $ buildLog l)
|
(const $ buildLog l)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
readLog :: RawFilePath -> Annex (Log MetaData)
|
readLog :: OsPath -> Annex (Log MetaData)
|
||||||
readLog = parseLog <$$> Annex.Branch.get
|
readLog = parseLog <$$> Annex.Branch.get
|
||||||
|
|
|
@ -56,11 +56,10 @@ import Git.Log
|
||||||
import Logs.File
|
import Logs.File
|
||||||
import Logs
|
import Logs
|
||||||
import Annex.CatFile
|
import Annex.CatFile
|
||||||
|
import qualified Utility.OsString as OS
|
||||||
|
|
||||||
import qualified Data.ByteString as B
|
|
||||||
import qualified Data.ByteString.Lazy as L
|
import qualified Data.ByteString.Lazy as L
|
||||||
import Control.Concurrent.STM
|
import Control.Concurrent.STM
|
||||||
import System.FilePath.ByteString as P
|
|
||||||
|
|
||||||
-- | What to use to record a migration. This should be the same Sha that is
|
-- | What to use to record a migration. This should be the same Sha that is
|
||||||
-- used to as the content of the annexed file in the HEAD branch.
|
-- used to as the content of the annexed file in the HEAD branch.
|
||||||
|
@ -95,7 +94,7 @@ commitMigration = do
|
||||||
n <- readTVar nv
|
n <- readTVar nv
|
||||||
let !n' = succ n
|
let !n' = succ n
|
||||||
writeTVar nv n'
|
writeTVar nv n'
|
||||||
return (asTopFilePath (encodeBS (show n')))
|
return (asTopFilePath (toOsPath (show n')))
|
||||||
let rec h r = liftIO $ sendMkTree h
|
let rec h r = liftIO $ sendMkTree h
|
||||||
(fromTreeItemType TreeFile)
|
(fromTreeItemType TreeFile)
|
||||||
BlobObject
|
BlobObject
|
||||||
|
@ -110,8 +109,8 @@ commitMigration = do
|
||||||
n <- liftIO $ atomically $ readTVar nv
|
n <- liftIO $ atomically $ readTVar nv
|
||||||
when (n > 0) $ do
|
when (n > 0) $ do
|
||||||
treesha <- liftIO $ flip recordTree g $ Tree
|
treesha <- liftIO $ flip recordTree g $ Tree
|
||||||
[ RecordedSubTree (asTopFilePath "old") oldt []
|
[ RecordedSubTree (asTopFilePath (literalOsPath "old")) oldt []
|
||||||
, RecordedSubTree (asTopFilePath "new") newt []
|
, RecordedSubTree (asTopFilePath (literalOsPath "new")) newt []
|
||||||
]
|
]
|
||||||
commitsha <- Annex.Branch.rememberTreeish treesha
|
commitsha <- Annex.Branch.rememberTreeish treesha
|
||||||
(asTopFilePath migrationTreeGraftPoint)
|
(asTopFilePath migrationTreeGraftPoint)
|
||||||
|
@ -129,7 +128,7 @@ streamNewDistributedMigrations incremental a = do
|
||||||
(stoppoint, toskip) <- getPerformedMigrations
|
(stoppoint, toskip) <- getPerformedMigrations
|
||||||
(l, cleanup) <- inRepo $ getGitLog branchsha
|
(l, cleanup) <- inRepo $ getGitLog branchsha
|
||||||
(if incremental then stoppoint else Nothing)
|
(if incremental then stoppoint else Nothing)
|
||||||
[fromRawFilePath migrationTreeGraftPoint]
|
[fromOsPath migrationTreeGraftPoint]
|
||||||
-- Need to follow because migrate.tree is grafted in
|
-- Need to follow because migrate.tree is grafted in
|
||||||
-- and then deleted, and normally git log stops when a file
|
-- and then deleted, and normally git log stops when a file
|
||||||
-- gets deleted.
|
-- gets deleted.
|
||||||
|
@ -142,7 +141,7 @@ streamNewDistributedMigrations incremental a = do
|
||||||
go toskip c
|
go toskip c
|
||||||
| newref c `elem` nullShas = return ()
|
| newref c `elem` nullShas = return ()
|
||||||
| changed c `elem` toskip = return ()
|
| changed c `elem` toskip = return ()
|
||||||
| not ("/new/" `B.isInfixOf` newfile) = return ()
|
| not (literalOsPath "/new/" `OS.isInfixOf` newfile) = return ()
|
||||||
| otherwise =
|
| otherwise =
|
||||||
catKey (newref c) >>= \case
|
catKey (newref c) >>= \case
|
||||||
Nothing -> return ()
|
Nothing -> return ()
|
||||||
|
@ -150,10 +149,10 @@ streamNewDistributedMigrations incremental a = do
|
||||||
Nothing -> return ()
|
Nothing -> return ()
|
||||||
Just oldkey -> a oldkey newkey
|
Just oldkey -> a oldkey newkey
|
||||||
where
|
where
|
||||||
newfile = toRawFilePath (changedfile c)
|
newfile = changedfile c
|
||||||
oldfile = migrationTreeGraftPoint
|
oldfile = migrationTreeGraftPoint
|
||||||
P.</> "old"
|
</> literalOsPath "old"
|
||||||
P.</> P.takeBaseName (fromInternalGitPath newfile)
|
</> takeBaseName (fromInternalGitPath newfile)
|
||||||
oldfileref = branchFileRef (changed c) oldfile
|
oldfileref = branchFileRef (changed c) oldfile
|
||||||
|
|
||||||
getPerformedMigrations :: Annex (Maybe Sha, [Sha])
|
getPerformedMigrations :: Annex (Maybe Sha, [Sha])
|
||||||
|
|
|
@ -32,7 +32,7 @@ requiredContentSet u expr = do
|
||||||
setLog requiredContentLog u expr
|
setLog requiredContentLog u expr
|
||||||
Annex.changeState $ \st -> st { Annex.requiredcontentmap = Nothing }
|
Annex.changeState $ \st -> st { Annex.requiredcontentmap = Nothing }
|
||||||
|
|
||||||
setLog :: RawFilePath -> UUID -> PreferredContentExpression -> Annex ()
|
setLog :: OsPath -> UUID -> PreferredContentExpression -> Annex ()
|
||||||
setLog logfile uuid@(UUID _) val = do
|
setLog logfile uuid@(UUID _) val = do
|
||||||
c <- currentVectorClock
|
c <- currentVectorClock
|
||||||
Annex.Branch.change (Annex.Branch.RegardingUUID [uuid]) logfile $
|
Annex.Branch.change (Annex.Branch.RegardingUUID [uuid]) logfile $
|
||||||
|
|
|
@ -32,11 +32,11 @@ import Git.Types (RefDate)
|
||||||
import qualified Data.ByteString.Lazy as L
|
import qualified Data.ByteString.Lazy as L
|
||||||
|
|
||||||
{- Adds to the log, removing any LogLines that are obsoleted. -}
|
{- Adds to the log, removing any LogLines that are obsoleted. -}
|
||||||
addLog :: Annex.Branch.RegardingUUID -> RawFilePath -> LogStatus -> LogInfo -> Annex ()
|
addLog :: Annex.Branch.RegardingUUID -> OsPath -> LogStatus -> LogInfo -> Annex ()
|
||||||
addLog ru file logstatus loginfo =
|
addLog ru file logstatus loginfo =
|
||||||
addLog' ru file logstatus loginfo =<< currentVectorClock
|
addLog' ru file logstatus loginfo =<< currentVectorClock
|
||||||
|
|
||||||
addLog' :: Annex.Branch.RegardingUUID -> RawFilePath -> LogStatus -> LogInfo -> CandidateVectorClock -> Annex ()
|
addLog' :: Annex.Branch.RegardingUUID -> OsPath -> LogStatus -> LogInfo -> CandidateVectorClock -> Annex ()
|
||||||
addLog' ru file logstatus loginfo c =
|
addLog' ru file logstatus loginfo c =
|
||||||
Annex.Branch.changeOrAppend ru file $ \b ->
|
Annex.Branch.changeOrAppend ru file $ \b ->
|
||||||
let old = parseLog b
|
let old = parseLog b
|
||||||
|
@ -53,7 +53,7 @@ addLog' ru file logstatus loginfo c =
|
||||||
- When the log was changed, the onchange action is run (with the journal
|
- When the log was changed, the onchange action is run (with the journal
|
||||||
- still locked to prevent any concurrent changes) and True is returned.
|
- still locked to prevent any concurrent changes) and True is returned.
|
||||||
-}
|
-}
|
||||||
maybeAddLog :: Annex.Branch.RegardingUUID -> RawFilePath -> LogStatus -> LogInfo -> Annex () -> Annex Bool
|
maybeAddLog :: Annex.Branch.RegardingUUID -> OsPath -> LogStatus -> LogInfo -> Annex () -> Annex Bool
|
||||||
maybeAddLog ru file logstatus loginfo onchange = do
|
maybeAddLog ru file logstatus loginfo onchange = do
|
||||||
c <- currentVectorClock
|
c <- currentVectorClock
|
||||||
let f = \b ->
|
let f = \b ->
|
||||||
|
@ -72,15 +72,15 @@ genLine logstatus loginfo c old = LogLine c' logstatus loginfo
|
||||||
|
|
||||||
{- Reads a log file.
|
{- Reads a log file.
|
||||||
- Note that the LogLines returned may be in any order. -}
|
- Note that the LogLines returned may be in any order. -}
|
||||||
readLog :: RawFilePath -> Annex [LogLine]
|
readLog :: OsPath -> Annex [LogLine]
|
||||||
readLog = parseLog <$$> Annex.Branch.get
|
readLog = parseLog <$$> Annex.Branch.get
|
||||||
|
|
||||||
{- Reads a log and returns only the info that is still present. -}
|
{- Reads a log and returns only the info that is still present. -}
|
||||||
presentLogInfo :: RawFilePath -> Annex [LogInfo]
|
presentLogInfo :: OsPath -> Annex [LogInfo]
|
||||||
presentLogInfo file = map info . filterPresent <$> readLog file
|
presentLogInfo file = map info . filterPresent <$> readLog file
|
||||||
|
|
||||||
{- Reads a log and returns only the info that is no longer present. -}
|
{- Reads a log and returns only the info that is no longer present. -}
|
||||||
notPresentLogInfo :: RawFilePath -> Annex [LogInfo]
|
notPresentLogInfo :: OsPath -> Annex [LogInfo]
|
||||||
notPresentLogInfo file = map info . filterNotPresent <$> readLog file
|
notPresentLogInfo file = map info . filterNotPresent <$> readLog file
|
||||||
|
|
||||||
{- Reads a historical version of a log and returns the info that was in
|
{- Reads a historical version of a log and returns the info that was in
|
||||||
|
@ -88,7 +88,7 @@ notPresentLogInfo file = map info . filterNotPresent <$> readLog file
|
||||||
-
|
-
|
||||||
- The date is formatted as shown in gitrevisions man page.
|
- The date is formatted as shown in gitrevisions man page.
|
||||||
-}
|
-}
|
||||||
historicalLogInfo :: RefDate -> RawFilePath -> Annex [LogInfo]
|
historicalLogInfo :: RefDate -> OsPath -> Annex [LogInfo]
|
||||||
historicalLogInfo refdate file = parseLogInfo
|
historicalLogInfo refdate file = parseLogInfo
|
||||||
<$> Annex.Branch.getHistorical refdate file
|
<$> Annex.Branch.getHistorical refdate file
|
||||||
|
|
||||||
|
|
|
@ -63,7 +63,7 @@ scheduleChange u a = scheduleSet u . S.toList . a =<< scheduleGet u
|
||||||
|
|
||||||
getLastRunTimes :: Annex (M.Map ScheduledActivity LocalTime)
|
getLastRunTimes :: Annex (M.Map ScheduledActivity LocalTime)
|
||||||
getLastRunTimes = do
|
getLastRunTimes = do
|
||||||
f <- fromRawFilePath <$> fromRepo gitAnnexScheduleState
|
f <- fromOsPath <$> fromRepo gitAnnexScheduleState
|
||||||
liftIO $ fromMaybe M.empty
|
liftIO $ fromMaybe M.empty
|
||||||
<$> catchDefaultIO Nothing (readish <$> readFile f)
|
<$> catchDefaultIO Nothing (readish <$> readFile f)
|
||||||
|
|
||||||
|
|
|
@ -27,13 +27,13 @@ import Annex.VectorClock
|
||||||
|
|
||||||
import qualified Data.Set as S
|
import qualified Data.Set as S
|
||||||
|
|
||||||
readLog :: (Ord v, SingleValueSerializable v) => RawFilePath -> Annex (Log v)
|
readLog :: (Ord v, SingleValueSerializable v) => OsPath -> Annex (Log v)
|
||||||
readLog = parseLog <$$> Annex.Branch.get
|
readLog = parseLog <$$> Annex.Branch.get
|
||||||
|
|
||||||
getLog :: (Ord v, SingleValueSerializable v) => RawFilePath -> Annex (Maybe v)
|
getLog :: (Ord v, SingleValueSerializable v) => OsPath -> Annex (Maybe v)
|
||||||
getLog = newestValue <$$> readLog
|
getLog = newestValue <$$> readLog
|
||||||
|
|
||||||
setLog :: (Ord v, SingleValueSerializable v) => Annex.Branch.RegardingUUID -> RawFilePath -> v -> Annex ()
|
setLog :: (Ord v, SingleValueSerializable v) => Annex.Branch.RegardingUUID -> OsPath -> v -> Annex ()
|
||||||
setLog ru f v = do
|
setLog ru f v = do
|
||||||
c <- currentVectorClock
|
c <- currentVectorClock
|
||||||
Annex.Branch.change ru f $ \old ->
|
Annex.Branch.change ru f $ \old ->
|
||||||
|
|
|
@ -34,6 +34,7 @@ import Backend (isStableKey)
|
||||||
import Annex.SpecialRemote.Config
|
import Annex.SpecialRemote.Config
|
||||||
import Annex.Verify
|
import Annex.Verify
|
||||||
import qualified Utility.RawFilePath as R
|
import qualified Utility.RawFilePath as R
|
||||||
|
import qualified Utility.FileIO as F
|
||||||
|
|
||||||
import qualified Data.ByteString as S
|
import qualified Data.ByteString as S
|
||||||
import qualified Data.ByteString.Lazy as L
|
import qualified Data.ByteString.Lazy as L
|
||||||
|
@ -120,7 +121,7 @@ storeChunks
|
||||||
-> ChunkConfig
|
-> ChunkConfig
|
||||||
-> EncKey
|
-> EncKey
|
||||||
-> Key
|
-> Key
|
||||||
-> FilePath
|
-> OsPath
|
||||||
-> MeterUpdate
|
-> MeterUpdate
|
||||||
-> Maybe (Cipher, EncKey)
|
-> Maybe (Cipher, EncKey)
|
||||||
-> encc
|
-> encc
|
||||||
|
@ -135,7 +136,7 @@ storeChunks u chunkconfig encryptor k f p enc encc storer checker =
|
||||||
-- possible without this check.
|
-- possible without this check.
|
||||||
(UnpaddedChunks chunksize) -> ifM (isStableKey k)
|
(UnpaddedChunks chunksize) -> ifM (isStableKey k)
|
||||||
( do
|
( do
|
||||||
h <- liftIO $ openBinaryFile f ReadMode
|
h <- liftIO $ F.openBinaryFile f ReadMode
|
||||||
go chunksize h
|
go chunksize h
|
||||||
liftIO $ hClose h
|
liftIO $ hClose h
|
||||||
, storechunk k (FileContent f) p
|
, storechunk k (FileContent f) p
|
||||||
|
@ -257,7 +258,7 @@ retrieveChunks
|
||||||
-> ChunkConfig
|
-> ChunkConfig
|
||||||
-> EncKey
|
-> EncKey
|
||||||
-> Key
|
-> Key
|
||||||
-> FilePath
|
-> OsPath
|
||||||
-> MeterUpdate
|
-> MeterUpdate
|
||||||
-> Maybe (Cipher, EncKey)
|
-> Maybe (Cipher, EncKey)
|
||||||
-> encc
|
-> encc
|
||||||
|
@ -276,7 +277,7 @@ retrieveChunks retriever u vc chunkconfig encryptor basek dest basep enc encc
|
||||||
where
|
where
|
||||||
go pe cks = do
|
go pe cks = do
|
||||||
let ls = map chunkKeyList cks
|
let ls = map chunkKeyList cks
|
||||||
currsize <- liftIO $ catchMaybeIO $ getFileSize (toRawFilePath dest)
|
currsize <- liftIO $ catchMaybeIO $ getFileSize dest
|
||||||
let ls' = maybe ls (setupResume ls) currsize
|
let ls' = maybe ls (setupResume ls) currsize
|
||||||
if any null ls'
|
if any null ls'
|
||||||
-- dest is already complete
|
-- dest is already complete
|
||||||
|
@ -339,7 +340,7 @@ retrieveChunks retriever u vc chunkconfig encryptor basek dest basep enc encc
|
||||||
-- passing the whole file content to the
|
-- passing the whole file content to the
|
||||||
-- incremental verifier though.
|
-- incremental verifier though.
|
||||||
Nothing -> do
|
Nothing -> do
|
||||||
retriever (encryptor basek) basep (toRawFilePath dest) iv $
|
retriever (encryptor basek) basep dest iv $
|
||||||
retrieved iv Nothing basep
|
retrieved iv Nothing basep
|
||||||
return $ case iv of
|
return $ case iv of
|
||||||
Nothing -> Right iv
|
Nothing -> Right iv
|
||||||
|
@ -347,13 +348,13 @@ retrieveChunks retriever u vc chunkconfig encryptor basek dest basep enc encc
|
||||||
|
|
||||||
opennew = do
|
opennew = do
|
||||||
iv <- startVerifyKeyContentIncrementally vc basek
|
iv <- startVerifyKeyContentIncrementally vc basek
|
||||||
h <- liftIO $ openBinaryFile dest WriteMode
|
h <- liftIO $ F.openBinaryFile dest WriteMode
|
||||||
return (h, iv)
|
return (h, iv)
|
||||||
|
|
||||||
-- Open the file and seek to the start point in order to resume.
|
-- Open the file and seek to the start point in order to resume.
|
||||||
openresume startpoint = do
|
openresume startpoint = do
|
||||||
-- ReadWriteMode allows seeking; AppendMode does not.
|
-- ReadWriteMode allows seeking; AppendMode does not.
|
||||||
h <- liftIO $ openBinaryFile dest ReadWriteMode
|
h <- liftIO $ F.openBinaryFile dest ReadWriteMode
|
||||||
liftIO $ hSeek h AbsoluteSeek startpoint
|
liftIO $ hSeek h AbsoluteSeek startpoint
|
||||||
-- No incremental verification when resuming, since that
|
-- No incremental verification when resuming, since that
|
||||||
-- would need to read up to the startpoint.
|
-- would need to read up to the startpoint.
|
||||||
|
@ -398,7 +399,7 @@ retrieveChunks retriever u vc chunkconfig encryptor basek dest basep enc encc
|
||||||
-}
|
-}
|
||||||
writeRetrievedContent
|
writeRetrievedContent
|
||||||
:: LensEncParams encc
|
:: LensEncParams encc
|
||||||
=> FilePath
|
=> OsPath
|
||||||
-> Maybe (Cipher, EncKey)
|
-> Maybe (Cipher, EncKey)
|
||||||
-> encc
|
-> encc
|
||||||
-> Maybe Handle
|
-> Maybe Handle
|
||||||
|
@ -409,7 +410,7 @@ writeRetrievedContent
|
||||||
writeRetrievedContent dest enc encc mh mp content miv = case (enc, mh, content) of
|
writeRetrievedContent dest enc encc mh mp content miv = case (enc, mh, content) of
|
||||||
(Nothing, Nothing, FileContent f)
|
(Nothing, Nothing, FileContent f)
|
||||||
| f == dest -> noop
|
| f == dest -> noop
|
||||||
| otherwise -> liftIO $ moveFile (toRawFilePath f) (toRawFilePath dest)
|
| otherwise -> liftIO $ moveFile f dest
|
||||||
(Just (cipher, _), _, ByteContent b) -> do
|
(Just (cipher, _), _, ByteContent b) -> do
|
||||||
cmd <- gpgCmd <$> Annex.getGitConfig
|
cmd <- gpgCmd <$> Annex.getGitConfig
|
||||||
decrypt cmd encc cipher (feedBytes b) $
|
decrypt cmd encc cipher (feedBytes b) $
|
||||||
|
@ -419,10 +420,10 @@ writeRetrievedContent dest enc encc mh mp content miv = case (enc, mh, content)
|
||||||
withBytes content $ \b ->
|
withBytes content $ \b ->
|
||||||
decrypt cmd encc cipher (feedBytes b) $
|
decrypt cmd encc cipher (feedBytes b) $
|
||||||
readBytes write
|
readBytes write
|
||||||
liftIO $ removeWhenExistsWith R.removeLink (toRawFilePath f)
|
liftIO $ removeWhenExistsWith R.removeLink (fromOsPath f)
|
||||||
(Nothing, _, FileContent f) -> do
|
(Nothing, _, FileContent f) -> do
|
||||||
withBytes content write
|
withBytes content write
|
||||||
liftIO $ removeWhenExistsWith R.removeLink (toRawFilePath f)
|
liftIO $ removeWhenExistsWith R.removeLink (fromOsPath f)
|
||||||
(Nothing, _, ByteContent b) -> write b
|
(Nothing, _, ByteContent b) -> write b
|
||||||
where
|
where
|
||||||
write b = case mh of
|
write b = case mh of
|
||||||
|
@ -437,7 +438,7 @@ writeRetrievedContent dest enc encc mh mp content miv = case (enc, mh, content)
|
||||||
Nothing -> S.hPut h
|
Nothing -> S.hPut h
|
||||||
in meteredWrite p writer b
|
in meteredWrite p writer b
|
||||||
Nothing -> L.hPut h b
|
Nothing -> L.hPut h b
|
||||||
opendest = openBinaryFile dest WriteMode
|
opendest = F.openBinaryFile dest WriteMode
|
||||||
|
|
||||||
{- Can resume when the chunk's offset is at or before the end of
|
{- Can resume when the chunk's offset is at or before the end of
|
||||||
- the dest file. -}
|
- the dest file. -}
|
||||||
|
@ -583,4 +584,4 @@ ensureChunksAreLogged _ _ (ChunkKeys _) = return ()
|
||||||
|
|
||||||
withBytes :: ContentSource -> (L.ByteString -> Annex a) -> Annex a
|
withBytes :: ContentSource -> (L.ByteString -> Annex a) -> Annex a
|
||||||
withBytes (ByteContent b) a = a b
|
withBytes (ByteContent b) a = a b
|
||||||
withBytes (FileContent f) a = a =<< liftIO (L.readFile f)
|
withBytes (FileContent f) a = a =<< liftIO (L.readFile (fromOsPath f))
|
||||||
|
|
|
@ -72,7 +72,7 @@ storeChunks key tmp dest storer recorder finalizer = do
|
||||||
when (null stored) $
|
when (null stored) $
|
||||||
giveup "no chunks were stored"
|
giveup "no chunks were stored"
|
||||||
where
|
where
|
||||||
basef = tmp ++ fromRawFilePath (keyFile key)
|
basef = tmp ++ fromOsPath (keyFile key)
|
||||||
tmpdests = map (basef ++ ) chunkStream
|
tmpdests = map (basef ++ ) chunkStream
|
||||||
|
|
||||||
{- Given a list of destinations to use, chunks the data according to the
|
{- Given a list of destinations to use, chunks the data according to the
|
||||||
|
|
|
@ -23,15 +23,14 @@ import Data.Time.Clock.POSIX
|
||||||
import System.PosixCompat.Files (modificationTime)
|
import System.PosixCompat.Files (modificationTime)
|
||||||
import qualified Data.Map as M
|
import qualified Data.Map as M
|
||||||
import qualified Data.Set as S
|
import qualified Data.Set as S
|
||||||
import qualified System.FilePath.ByteString as P
|
|
||||||
|
|
||||||
repoCheap :: Git.Repo -> Bool
|
repoCheap :: Git.Repo -> Bool
|
||||||
repoCheap = not . Git.repoIsUrl
|
repoCheap = not . Git.repoIsUrl
|
||||||
|
|
||||||
localpathCalc :: Git.Repo -> Maybe FilePath
|
localpathCalc :: Git.Repo -> Maybe OsPath
|
||||||
localpathCalc r
|
localpathCalc r
|
||||||
| not (Git.repoIsLocal r) && not (Git.repoIsLocalUnknown r) = Nothing
|
| not (Git.repoIsLocal r) && not (Git.repoIsLocalUnknown r) = Nothing
|
||||||
| otherwise = Just $ fromRawFilePath $ Git.repoPath r
|
| otherwise = Just $ Git.repoPath r
|
||||||
|
|
||||||
{- Checks relatively inexpensively if a repository is available for use. -}
|
{- Checks relatively inexpensively if a repository is available for use. -}
|
||||||
repoAvail :: Git.Repo -> Annex Availability
|
repoAvail :: Git.Repo -> Annex Availability
|
||||||
|
@ -63,8 +62,11 @@ guardUsable r fallback a
|
||||||
gitRepoInfo :: Remote -> Annex [(String, String)]
|
gitRepoInfo :: Remote -> Annex [(String, String)]
|
||||||
gitRepoInfo r = do
|
gitRepoInfo r = do
|
||||||
d <- fromRepo Git.localGitDir
|
d <- fromRepo Git.localGitDir
|
||||||
mtimes <- liftIO $ mapM (\p -> modificationTime <$> R.getFileStatus p)
|
let refsdir = d </> literalOsPath "refs"
|
||||||
=<< emptyWhenDoesNotExist (dirContentsRecursive (d P.</> "refs" P.</> "remotes" P.</> encodeBS (Remote.name r)))
|
</> literalOsPath "remotes"
|
||||||
|
</> toOsPath (Remote.name r)
|
||||||
|
mtimes <- liftIO $ mapM (\p -> modificationTime <$> R.getFileStatus (fromOsPath p))
|
||||||
|
=<< emptyWhenDoesNotExist (dirContentsRecursive refsdir)
|
||||||
let lastsynctime = case mtimes of
|
let lastsynctime = case mtimes of
|
||||||
[] -> "never"
|
[] -> "never"
|
||||||
_ -> show $ posixSecondsToUTCTime $ realToFrac $ maximum mtimes
|
_ -> show $ posixSecondsToUTCTime $ realToFrac $ maximum mtimes
|
||||||
|
|
|
@ -18,7 +18,7 @@ import qualified Data.ByteString.Lazy as L
|
||||||
|
|
||||||
-- A source of a Key's content.
|
-- A source of a Key's content.
|
||||||
data ContentSource
|
data ContentSource
|
||||||
= FileContent FilePath
|
= FileContent OsPath
|
||||||
| ByteContent L.ByteString
|
| ByteContent L.ByteString
|
||||||
|
|
||||||
isByteContent :: ContentSource -> Bool
|
isByteContent :: ContentSource -> Bool
|
||||||
|
@ -43,7 +43,7 @@ type Storer = Key -> ContentSource -> MeterUpdate -> Annex ()
|
||||||
-- content to the verifier before running the callback.
|
-- content to the verifier before running the callback.
|
||||||
-- This should not be done when it retrieves ByteContent.
|
-- This should not be done when it retrieves ByteContent.
|
||||||
type Retriever = forall a.
|
type Retriever = forall a.
|
||||||
Key -> MeterUpdate -> RawFilePath -> Maybe IncrementalVerifier
|
Key -> MeterUpdate -> OsPath -> Maybe IncrementalVerifier
|
||||||
-> (ContentSource -> Annex a) -> Annex a
|
-> (ContentSource -> Annex a) -> Annex a
|
||||||
|
|
||||||
-- Action that removes a Key's content from a remote.
|
-- Action that removes a Key's content from a remote.
|
||||||
|
|
|
@ -24,7 +24,6 @@ import Config
|
||||||
import qualified Utility.RawFilePath as R
|
import qualified Utility.RawFilePath as R
|
||||||
import qualified Utility.FileIO as F
|
import qualified Utility.FileIO as F
|
||||||
|
|
||||||
import qualified System.FilePath.ByteString as P
|
|
||||||
import System.PosixCompat.Files (isSymbolicLink)
|
import System.PosixCompat.Files (isSymbolicLink)
|
||||||
|
|
||||||
upgrade :: Bool -> Annex UpgradeResult
|
upgrade :: Bool -> Annex UpgradeResult
|
||||||
|
@ -40,48 +39,52 @@ upgrade automatic = do
|
||||||
-- The old content identifier database is deleted here, but the
|
-- The old content identifier database is deleted here, but the
|
||||||
-- new database is not populated. It will be automatically
|
-- new database is not populated. It will be automatically
|
||||||
-- populated from the git-annex branch the next time it is used.
|
-- populated from the git-annex branch the next time it is used.
|
||||||
removeOldDb . fromRawFilePath =<< fromRepo gitAnnexContentIdentifierDbDirOld
|
removeOldDb =<< fromRepo gitAnnexContentIdentifierDbDirOld
|
||||||
liftIO . removeWhenExistsWith R.removeLink
|
liftIO . removeWhenExistsWith (R.removeLink . fromOsPath)
|
||||||
=<< fromRepo gitAnnexContentIdentifierLockOld
|
=<< fromRepo gitAnnexContentIdentifierLockOld
|
||||||
|
|
||||||
-- The export databases are deleted here. The new databases
|
-- The export databases are deleted here. The new databases
|
||||||
-- will be populated by the next thing that needs them, the same
|
-- will be populated by the next thing that needs them, the same
|
||||||
-- way as they would be in a fresh clone.
|
-- way as they would be in a fresh clone.
|
||||||
removeOldDb . fromRawFilePath =<< calcRepo' gitAnnexExportDir
|
removeOldDb =<< calcRepo' gitAnnexExportDir
|
||||||
|
|
||||||
populateKeysDb
|
populateKeysDb
|
||||||
removeOldDb . fromRawFilePath =<< fromRepo gitAnnexKeysDbOld
|
removeOldDb =<< fromRepo gitAnnexKeysDbOld
|
||||||
liftIO . removeWhenExistsWith R.removeLink
|
liftIO . removeWhenExistsWith (R.removeLink . fromOsPath)
|
||||||
=<< fromRepo gitAnnexKeysDbIndexCacheOld
|
=<< fromRepo gitAnnexKeysDbIndexCacheOld
|
||||||
liftIO . removeWhenExistsWith R.removeLink
|
liftIO . removeWhenExistsWith (R.removeLink . fromOsPath)
|
||||||
=<< fromRepo gitAnnexKeysDbLockOld
|
=<< fromRepo gitAnnexKeysDbLockOld
|
||||||
|
|
||||||
updateSmudgeFilter
|
updateSmudgeFilter
|
||||||
|
|
||||||
return UpgradeSuccess
|
return UpgradeSuccess
|
||||||
|
|
||||||
gitAnnexKeysDbOld :: Git.Repo -> RawFilePath
|
gitAnnexKeysDbOld :: Git.Repo -> OsPath
|
||||||
gitAnnexKeysDbOld r = gitAnnexDir r P.</> "keys"
|
gitAnnexKeysDbOld r = gitAnnexDir r </> literalOsPath "keys"
|
||||||
|
|
||||||
gitAnnexKeysDbLockOld :: Git.Repo -> RawFilePath
|
gitAnnexKeysDbLockOld :: Git.Repo -> OsPath
|
||||||
gitAnnexKeysDbLockOld r = gitAnnexKeysDbOld r <> ".lck"
|
gitAnnexKeysDbLockOld r =
|
||||||
|
gitAnnexKeysDbOld r <> literalOsPath ".lck"
|
||||||
|
|
||||||
gitAnnexKeysDbIndexCacheOld :: Git.Repo -> RawFilePath
|
gitAnnexKeysDbIndexCacheOld :: Git.Repo -> OsPath
|
||||||
gitAnnexKeysDbIndexCacheOld r = gitAnnexKeysDbOld r <> ".cache"
|
gitAnnexKeysDbIndexCacheOld r =
|
||||||
|
gitAnnexKeysDbOld r <> literalOsPath ".cache"
|
||||||
|
|
||||||
gitAnnexContentIdentifierDbDirOld :: Git.Repo -> RawFilePath
|
gitAnnexContentIdentifierDbDirOld :: Git.Repo -> OsPath
|
||||||
gitAnnexContentIdentifierDbDirOld r = gitAnnexDir r P.</> "cids"
|
gitAnnexContentIdentifierDbDirOld r =
|
||||||
|
gitAnnexDir r </> literalOsPath "cids"
|
||||||
|
|
||||||
gitAnnexContentIdentifierLockOld :: Git.Repo -> RawFilePath
|
gitAnnexContentIdentifierLockOld :: Git.Repo -> OsPath
|
||||||
gitAnnexContentIdentifierLockOld r = gitAnnexContentIdentifierDbDirOld r <> ".lck"
|
gitAnnexContentIdentifierLockOld r =
|
||||||
|
gitAnnexContentIdentifierDbDirOld r <> literalOsPath ".lck"
|
||||||
|
|
||||||
removeOldDb :: FilePath -> Annex ()
|
removeOldDb :: OsPath -> Annex ()
|
||||||
removeOldDb db =
|
removeOldDb db =
|
||||||
whenM (liftIO $ doesDirectoryExist db) $ do
|
whenM (liftIO $ doesDirectoryExist db) $ do
|
||||||
v <- liftIO $ tryNonAsync $
|
v <- liftIO $ tryNonAsync $
|
||||||
removePathForcibly db
|
removePathForcibly db
|
||||||
case v of
|
case v of
|
||||||
Left ex -> giveup $ "Failed removing old database directory " ++ db ++ " during upgrade (" ++ show ex ++ ") -- delete that and re-run git-annex to finish the upgrade."
|
Left ex -> giveup $ "Failed removing old database directory " ++ fromOsPath db ++ " during upgrade (" ++ show ex ++ ") -- delete that and re-run git-annex to finish the upgrade."
|
||||||
Right () -> return ()
|
Right () -> return ()
|
||||||
|
|
||||||
-- Populate the new keys database with associated files and inode caches.
|
-- Populate the new keys database with associated files and inode caches.
|
||||||
|
@ -108,11 +111,11 @@ populateKeysDb = unlessM isBareRepo $ do
|
||||||
(l, cleanup) <- inRepo $ LsFiles.inodeCaches [top]
|
(l, cleanup) <- inRepo $ LsFiles.inodeCaches [top]
|
||||||
forM_ l $ \case
|
forM_ l $ \case
|
||||||
(_f, Nothing) -> giveup "Unable to parse git ls-files --debug output while upgrading git-annex sqlite databases."
|
(_f, Nothing) -> giveup "Unable to parse git ls-files --debug output while upgrading git-annex sqlite databases."
|
||||||
(f, Just ic) -> unlessM (liftIO $ catchBoolIO $ isSymbolicLink <$> R.getSymbolicLinkStatus (toRawFilePath f)) $ do
|
(f, Just ic) -> unlessM (liftIO $ catchBoolIO $ isSymbolicLink <$> R.getSymbolicLinkStatus (fromOsPath f)) $ do
|
||||||
catKeyFile (toRawFilePath f) >>= \case
|
catKeyFile f >>= \case
|
||||||
Nothing -> noop
|
Nothing -> noop
|
||||||
Just k -> do
|
Just k -> do
|
||||||
topf <- inRepo $ toTopFilePath $ toRawFilePath f
|
topf <- inRepo $ toTopFilePath f
|
||||||
Database.Keys.runWriter AssociatedTable $ \h -> liftIO $
|
Database.Keys.runWriter AssociatedTable $ \h -> liftIO $
|
||||||
Database.Keys.SQL.addAssociatedFile k topf h
|
Database.Keys.SQL.addAssociatedFile k topf h
|
||||||
Database.Keys.runWriter ContentTable $ \h -> liftIO $
|
Database.Keys.runWriter ContentTable $ \h -> liftIO $
|
||||||
|
@ -130,10 +133,10 @@ updateSmudgeFilter :: Annex ()
|
||||||
updateSmudgeFilter = do
|
updateSmudgeFilter = do
|
||||||
lf <- Annex.fromRepo Git.attributesLocal
|
lf <- Annex.fromRepo Git.attributesLocal
|
||||||
ls <- liftIO $ map decodeBS . fileLines'
|
ls <- liftIO $ map decodeBS . fileLines'
|
||||||
<$> catchDefaultIO "" (F.readFile' (toOsPath lf))
|
<$> catchDefaultIO "" (F.readFile' lf)
|
||||||
let ls' = removedotfilter ls
|
let ls' = removedotfilter ls
|
||||||
when (ls /= ls') $
|
when (ls /= ls') $
|
||||||
liftIO $ writeFile (fromRawFilePath lf) (unlines ls')
|
liftIO $ writeFile (fromOsPath lf) (unlines ls')
|
||||||
where
|
where
|
||||||
removedotfilter ("* filter=annex":".* !filter":rest) =
|
removedotfilter ("* filter=annex":".* !filter":rest) =
|
||||||
"* filter=annex" : removedotfilter rest
|
"* filter=annex" : removedotfilter rest
|
||||||
|
|
|
@ -187,7 +187,7 @@ insertAuthToken extractAuthToken predicate webapp root pathbits params =
|
||||||
- to avoid exposing the secret token when launching the web browser. -}
|
- to avoid exposing the secret token when launching the web browser. -}
|
||||||
writeHtmlShim :: String -> String -> FilePath -> IO ()
|
writeHtmlShim :: String -> String -> FilePath -> IO ()
|
||||||
writeHtmlShim title url file =
|
writeHtmlShim title url file =
|
||||||
viaTmp (writeFileProtected . fromOsPath)
|
viaTmp (writeFileProtected)
|
||||||
(toOsPath $ toRawFilePath file)
|
(toOsPath $ toRawFilePath file)
|
||||||
(genHtmlShim title url)
|
(genHtmlShim title url)
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue