bring back OsPath changes
I hope that the windows test suite failure on appveyor was fixed by updating to a newer windows there. I have not been able to reproduce that failure in a windows 11 VM run locally.
This commit is contained in:
parent
f0ab439c95
commit
84291b6014
119 changed files with 1003 additions and 647 deletions
|
@ -200,12 +200,12 @@ checkUrl addunlockedmatcher r o si u = do
|
|||
startRemote :: AddUnlockedMatcher -> Remote -> AddUrlOptions -> SeekInput -> FilePath -> URLString -> Maybe Integer -> CommandStart
|
||||
startRemote addunlockedmatcher r o si file uri sz = do
|
||||
pathmax <- liftIO $ fileNameLengthLimit "."
|
||||
let file' = joinPath $ map (truncateFilePath pathmax) $
|
||||
splitDirectories file
|
||||
let file' = P.joinPath $ map (truncateFilePath pathmax) $
|
||||
P.splitDirectories (toRawFilePath file)
|
||||
startingAddUrl si uri o $ do
|
||||
showNote $ UnquotedString $ "from " ++ Remote.name r
|
||||
showDestinationFile (toRawFilePath file')
|
||||
performRemote addunlockedmatcher r o uri (toRawFilePath file') sz
|
||||
showDestinationFile file'
|
||||
performRemote addunlockedmatcher r o uri file' sz
|
||||
|
||||
performRemote :: AddUnlockedMatcher -> Remote -> AddUrlOptions -> URLString -> RawFilePath -> Maybe Integer -> CommandPerform
|
||||
performRemote addunlockedmatcher r o uri file sz = lookupKey file >>= \case
|
||||
|
@ -279,7 +279,8 @@ sanitizeOrPreserveFilePath o f
|
|||
return f
|
||||
| otherwise = do
|
||||
pathmax <- liftIO $ fileNameLengthLimit "."
|
||||
return $ truncateFilePath pathmax $ sanitizeFilePath f
|
||||
return $ fromRawFilePath $ truncateFilePath pathmax $
|
||||
toRawFilePath $ sanitizeFilePath f
|
||||
|
||||
-- sanitizeFilePath avoids all these security problems
|
||||
-- (and probably others, but at least this catches the most egrarious ones).
|
||||
|
@ -353,7 +354,7 @@ downloadWeb addunlockedmatcher o url urlinfo file =
|
|||
urlkey = addSizeUrlKey urlinfo $ Backend.URL.fromUrl url Nothing (verifiableOption o)
|
||||
downloader f p = Url.withUrlOptions $ downloadUrl False urlkey p Nothing [url] f
|
||||
go Nothing = return Nothing
|
||||
go (Just (tmp, backend)) = ifM (useYoutubeDl o <&&> liftIO (isHtmlFile (fromRawFilePath tmp)))
|
||||
go (Just (tmp, backend)) = ifM (useYoutubeDl o <&&> liftIO (isHtmlFile tmp))
|
||||
( tryyoutubedl tmp backend
|
||||
, normalfinish tmp backend
|
||||
)
|
||||
|
@ -567,8 +568,8 @@ nodownloadWeb' o addunlockedmatcher url key file = checkCanAdd o file $ \canadd
|
|||
|
||||
url2file :: URI -> Maybe Int -> Int -> FilePath
|
||||
url2file url pathdepth pathmax = case pathdepth of
|
||||
Nothing -> truncateFilePath pathmax $ sanitizeFilePath fullurl
|
||||
Just depth
|
||||
Nothing -> truncatesanitize fullurl
|
||||
Just depth
|
||||
| depth >= length urlbits -> frombits id
|
||||
| depth > 0 -> frombits $ drop depth
|
||||
| depth < 0 -> frombits $ reverse . take (negate depth) . reverse
|
||||
|
@ -580,8 +581,12 @@ url2file url pathdepth pathmax = case pathdepth of
|
|||
, uriQuery url
|
||||
]
|
||||
frombits a = intercalate "/" $ a urlbits
|
||||
urlbits = map (truncateFilePath pathmax . sanitizeFilePath) $
|
||||
urlbits = map truncatesanitize $
|
||||
filter (not . null) $ splitc '/' fullurl
|
||||
truncatesanitize = fromRawFilePath
|
||||
. truncateFilePath pathmax
|
||||
. toRawFilePath
|
||||
. sanitizeFilePath
|
||||
|
||||
urlString2file :: URLString -> Maybe Int -> Int -> FilePath
|
||||
urlString2file s pathdepth pathmax = case Url.parseURIRelaxed s of
|
||||
|
|
|
@ -312,12 +312,12 @@ performExport r srcrs db ek af contentsha loc allfilledvar = do
|
|||
sent <- tryNonAsync $ if not (isGitShaKey ek)
|
||||
then tryrenameannexobject $ sendannexobject
|
||||
-- Sending a non-annexed file.
|
||||
else withTmpFile "export" $ \tmp h -> do
|
||||
else withTmpFile (toOsPath "export") $ \tmp h -> do
|
||||
b <- catObject contentsha
|
||||
liftIO $ L.hPut h b
|
||||
liftIO $ hClose h
|
||||
Remote.action $
|
||||
storer tmp ek loc nullMeterUpdate
|
||||
storer (fromRawFilePath (fromOsPath tmp)) ek loc nullMeterUpdate
|
||||
let failedsend = liftIO $ modifyMVar_ allfilledvar (pure . const (AllFilled False))
|
||||
case sent of
|
||||
Right True -> next $ cleanupExport r db ek loc True
|
||||
|
|
|
@ -72,7 +72,7 @@ start fixwhat si file key = do
|
|||
|
||||
breakHardLink :: RawFilePath -> Key -> RawFilePath -> CommandPerform
|
||||
breakHardLink file key obj = do
|
||||
replaceWorkTreeFile (fromRawFilePath file) $ \tmp -> do
|
||||
replaceWorkTreeFile file $ \tmp -> do
|
||||
mode <- liftIO $ catchMaybeIO $ fileMode <$> R.getFileStatus file
|
||||
unlessM (checkedCopyFile key obj tmp mode) $
|
||||
giveup "unable to break hard link"
|
||||
|
@ -83,7 +83,7 @@ breakHardLink file key obj = do
|
|||
|
||||
makeHardLink :: RawFilePath -> Key -> CommandPerform
|
||||
makeHardLink file key = do
|
||||
replaceWorkTreeFile (fromRawFilePath file) $ \tmp -> do
|
||||
replaceWorkTreeFile file $ \tmp -> do
|
||||
mode <- liftIO $ catchMaybeIO $ fileMode <$> R.getFileStatus file
|
||||
linkFromAnnex' key tmp mode >>= \case
|
||||
LinkAnnexFailed -> giveup "unable to make hard link"
|
||||
|
@ -97,7 +97,7 @@ fixSymlink file link = do
|
|||
mtime <- liftIO $ catchMaybeIO $ Posix.modificationTimeHiRes
|
||||
<$> R.getSymbolicLinkStatus file
|
||||
#endif
|
||||
replaceWorkTreeFile (fromRawFilePath file) $ \tmpfile -> do
|
||||
replaceWorkTreeFile file $ \tmpfile -> do
|
||||
liftIO $ R.createSymbolicLink link tmpfile
|
||||
#if ! defined(mingw32_HOST_OS)
|
||||
liftIO $ maybe noop (\t -> touch tmpfile t False) mtime
|
||||
|
|
|
@ -45,6 +45,7 @@ import qualified Database.Fsck as FsckDb
|
|||
import Types.CleanupActions
|
||||
import Types.Key
|
||||
import qualified Utility.RawFilePath as R
|
||||
import qualified Utility.FileIO as F
|
||||
|
||||
import Data.Time.Clock.POSIX
|
||||
import System.Posix.Types (EpochTime)
|
||||
|
@ -417,7 +418,7 @@ verifyWorkTree key file = do
|
|||
case mk of
|
||||
Just k | k == key -> whenM (inAnnex key) $ do
|
||||
showNote "fixing worktree content"
|
||||
replaceWorkTreeFile (fromRawFilePath file) $ \tmp -> do
|
||||
replaceWorkTreeFile file $ \tmp -> do
|
||||
mode <- liftIO $ catchMaybeIO $ fileMode <$> R.getFileStatus file
|
||||
ifM (annexThin <$> Annex.getGitConfig)
|
||||
( void $ linkFromAnnex' key tmp mode
|
||||
|
@ -678,7 +679,7 @@ recordStartTime u = do
|
|||
f <- fromRepo (gitAnnexFsckState u)
|
||||
createAnnexDirectory $ parentDir f
|
||||
liftIO $ removeWhenExistsWith R.removeLink f
|
||||
liftIO $ withFile (fromRawFilePath f) WriteMode $ \h -> do
|
||||
liftIO $ F.withFile (toOsPath f) WriteMode $ \h -> do
|
||||
#ifndef mingw32_HOST_OS
|
||||
t <- modificationTime <$> R.getFileStatus f
|
||||
#else
|
||||
|
@ -701,7 +702,7 @@ getStartTime u = do
|
|||
liftIO $ catchDefaultIO Nothing $ do
|
||||
timestamp <- modificationTime <$> R.getFileStatus f
|
||||
let fromstatus = Just (realToFrac timestamp)
|
||||
fromfile <- parsePOSIXTime <$> readFile (fromRawFilePath f)
|
||||
fromfile <- parsePOSIXTime <$> F.readFile' (toOsPath f)
|
||||
return $ if matchingtimestamp fromfile fromstatus
|
||||
then Just timestamp
|
||||
else Nothing
|
||||
|
|
|
@ -158,10 +158,11 @@ getFeed o url st =
|
|||
| scrapeOption o = scrape
|
||||
| otherwise = get
|
||||
|
||||
get = withTmpFile "feed" $ \tmpf h -> do
|
||||
get = withTmpFile (toOsPath "feed") $ \tmpf h -> do
|
||||
let tmpf' = fromRawFilePath $ fromOsPath tmpf
|
||||
liftIO $ hClose h
|
||||
ifM (downloadFeed url tmpf)
|
||||
( parse tmpf
|
||||
ifM (downloadFeed url tmpf')
|
||||
( parse tmpf'
|
||||
, do
|
||||
recordfail
|
||||
next $ feedProblem url
|
||||
|
|
|
@ -78,7 +78,7 @@ perform file key = do
|
|||
breakhardlink obj = whenM (catchBoolIO $ (> 1) . linkCount <$> liftIO (R.getFileStatus obj)) $ do
|
||||
mfc <- withTSDelta (liftIO . genInodeCache file)
|
||||
unlessM (sameInodeCache obj (maybeToList mfc)) $ do
|
||||
modifyContentDir obj $ replaceGitAnnexDirFile (fromRawFilePath obj) $ \tmp -> do
|
||||
modifyContentDir obj $ replaceGitAnnexDirFile obj $ \tmp -> do
|
||||
unlessM (checkedCopyFile key obj tmp Nothing) $
|
||||
giveup "unable to lock file"
|
||||
Database.Keys.storeInodeCaches key [obj]
|
||||
|
|
|
@ -130,7 +130,7 @@ send ups fs = do
|
|||
-- the names of keys, and would have to be copied, which is too
|
||||
-- expensive.
|
||||
starting "sending files" (ActionItemOther Nothing) (SeekInput []) $
|
||||
withTmpFile "send" $ \t h -> do
|
||||
withTmpFile (toOsPath "send") $ \t h -> do
|
||||
let ww = WarnUnmatchLsFiles "multicast"
|
||||
(fs', cleanup) <- seekHelper id ww LsFiles.inRepo
|
||||
=<< workTreeItems ww fs
|
||||
|
@ -163,7 +163,7 @@ send ups fs = do
|
|||
-- only allow clients on the authlist
|
||||
, Param "-H", Param ("@"++authlist)
|
||||
-- pass in list of files to send
|
||||
, Param "-i", File t
|
||||
, Param "-i", File (fromRawFilePath (fromOsPath t))
|
||||
] ++ ups
|
||||
liftIO (boolSystem "uftp" ps) >>= showEndResult
|
||||
next $ return True
|
||||
|
@ -178,7 +178,7 @@ receive ups = starting "receiving multicast files" ai si $ do
|
|||
(callback, environ, statush) <- liftIO multicastCallbackEnv
|
||||
tmpobjdir <- fromRepo gitAnnexTmpObjectDir
|
||||
createAnnexDirectory tmpobjdir
|
||||
withTmpDirIn (fromRawFilePath tmpobjdir) "multicast" $ \tmpdir -> withAuthList $ \authlist -> do
|
||||
withTmpDirIn (fromRawFilePath tmpobjdir) (toOsPath "multicast") $ \tmpdir -> withAuthList $ \authlist -> do
|
||||
abstmpdir <- liftIO $ absPath (toRawFilePath tmpdir)
|
||||
abscallback <- liftIO $ searchPath callback
|
||||
let ps =
|
||||
|
@ -245,10 +245,10 @@ uftpUID u = "0x" ++ (take 8 $ show $ sha2_256 $ B8.fromString (fromUUID u))
|
|||
withAuthList :: (FilePath -> Annex a) -> Annex a
|
||||
withAuthList a = do
|
||||
m <- knownFingerPrints
|
||||
withTmpFile "authlist" $ \t h -> do
|
||||
withTmpFile (toOsPath "authlist") $ \t h -> do
|
||||
liftIO $ hPutStr h (genAuthList m)
|
||||
liftIO $ hClose h
|
||||
a t
|
||||
a (fromRawFilePath (fromOsPath t))
|
||||
|
||||
genAuthList :: M.Map UUID Fingerprint -> String
|
||||
genAuthList = unlines . map fmt . M.toList
|
||||
|
|
|
@ -26,6 +26,7 @@ import Utility.FileMode
|
|||
import Utility.ThreadScheduler
|
||||
import Utility.SafeOutput
|
||||
import qualified Utility.RawFilePath as R
|
||||
import qualified Utility.FileIO as F
|
||||
import qualified Utility.MagicWormhole as Wormhole
|
||||
|
||||
import Control.Concurrent.Async
|
||||
|
@ -193,12 +194,11 @@ serializePairData :: PairData -> String
|
|||
serializePairData (PairData (HalfAuthToken ha) addrs) = unlines $
|
||||
T.unpack ha : map formatP2PAddress addrs
|
||||
|
||||
deserializePairData :: String -> Maybe PairData
|
||||
deserializePairData s = case lines s of
|
||||
[] -> Nothing
|
||||
(ha:l) -> do
|
||||
addrs <- mapM unformatP2PAddress l
|
||||
return (PairData (HalfAuthToken (T.pack ha)) addrs)
|
||||
deserializePairData :: [String] -> Maybe PairData
|
||||
deserializePairData [] = Nothing
|
||||
deserializePairData (ha:l) = do
|
||||
addrs <- mapM unformatP2PAddress l
|
||||
return (PairData (HalfAuthToken (T.pack ha)) addrs)
|
||||
|
||||
data PairingResult
|
||||
= PairSuccess
|
||||
|
@ -220,7 +220,7 @@ wormholePairing remotename ouraddrs ui = do
|
|||
-- files. Permissions of received files may allow others
|
||||
-- to read them. So, set up a temp directory that only
|
||||
-- we can read.
|
||||
withTmpDir "pair" $ \tmp -> do
|
||||
withTmpDir (toOsPath "pair") $ \tmp -> do
|
||||
liftIO $ void $ tryIO $ modifyFileMode (toRawFilePath tmp) $
|
||||
removeModes otherGroupModes
|
||||
let sendf = tmp </> "send"
|
||||
|
@ -245,13 +245,14 @@ wormholePairing remotename ouraddrs ui = do
|
|||
then return ReceiveFailed
|
||||
else do
|
||||
r <- liftIO $ tryIO $
|
||||
readFileStrict recvf
|
||||
map decodeBS . fileLines' <$> F.readFile'
|
||||
(toOsPath (toRawFilePath recvf))
|
||||
case r of
|
||||
Left _e -> return ReceiveFailed
|
||||
Right s -> maybe
|
||||
Right ls -> maybe
|
||||
(return ReceiveFailed)
|
||||
(finishPairing 100 remotename ourhalf)
|
||||
(deserializePairData s)
|
||||
(deserializePairData ls)
|
||||
|
||||
-- | Allow the peer we're pairing with to authenticate to us,
|
||||
-- using an authtoken constructed from the two HalfAuthTokens.
|
||||
|
|
|
@ -266,8 +266,8 @@ getAuthEnv = do
|
|||
|
||||
findRepos :: Options -> IO [Git.Repo]
|
||||
findRepos o = do
|
||||
files <- map toRawFilePath . concat
|
||||
<$> mapM dirContents (directoryOption o)
|
||||
files <- concat
|
||||
<$> mapM (dirContents . toRawFilePath) (directoryOption o)
|
||||
map Git.Construct.newFrom . catMaybes
|
||||
<$> mapM Git.Construct.checkForRepo files
|
||||
|
||||
|
|
|
@ -104,7 +104,7 @@ linkKey file oldkey newkey = ifM (isJust <$> isAnnexLink file)
|
|||
st <- liftIO $ R.getFileStatus file
|
||||
when (linkCount st > 1) $ do
|
||||
freezeContent oldobj
|
||||
replaceWorkTreeFile (fromRawFilePath file) $ \tmp -> do
|
||||
replaceWorkTreeFile file $ \tmp -> do
|
||||
unlessM (checkedCopyFile oldkey oldobj tmp Nothing) $
|
||||
giveup "can't lock old key"
|
||||
thawContent tmp
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
|
||||
module Command.ResolveMerge where
|
||||
|
||||
import Command
|
||||
|
@ -12,8 +14,9 @@ import qualified Git
|
|||
import Git.Sha
|
||||
import qualified Git.Branch
|
||||
import Annex.AutoMerge
|
||||
import qualified Utility.FileIO as F
|
||||
|
||||
import qualified Data.ByteString as S
|
||||
import qualified System.FilePath.ByteString as P
|
||||
|
||||
cmd :: Command
|
||||
cmd = command "resolvemerge" SectionPlumbing
|
||||
|
@ -26,10 +29,10 @@ seek = withNothing (commandAction start)
|
|||
start :: CommandStart
|
||||
start = starting "resolvemerge" (ActionItemOther Nothing) (SeekInput []) $ do
|
||||
us <- fromMaybe nobranch <$> inRepo Git.Branch.current
|
||||
d <- fromRawFilePath <$> fromRepo Git.localGitDir
|
||||
let merge_head = d </> "MERGE_HEAD"
|
||||
d <- fromRepo Git.localGitDir
|
||||
let merge_head = toOsPath $ d P.</> "MERGE_HEAD"
|
||||
them <- fromMaybe (giveup nomergehead) . extractSha
|
||||
<$> liftIO (S.readFile merge_head)
|
||||
<$> liftIO (F.readFile' merge_head)
|
||||
ifM (resolveMerge (Just us) them False)
|
||||
( do
|
||||
void $ commitResolvedMerge Git.Branch.ManualCommit
|
||||
|
|
|
@ -32,6 +32,7 @@ import Annex.SpecialRemote.Config (exportTreeField)
|
|||
import Remote.Helper.Chunked
|
||||
import Remote.Helper.Encryptable (encryptionField, highRandomQualityField)
|
||||
import Git.Types
|
||||
import qualified Utility.FileIO as F
|
||||
|
||||
import Test.Tasty
|
||||
import Test.Tasty.Runners
|
||||
|
@ -255,18 +256,18 @@ test runannex mkr mkk =
|
|||
get r k
|
||||
, check "fsck downloaded object" fsck
|
||||
, check "retrieveKeyFile resume from 0" $ \r k -> do
|
||||
tmp <- fromRawFilePath <$> prepTmp k
|
||||
liftIO $ writeFile tmp ""
|
||||
tmp <- toOsPath <$> prepTmp k
|
||||
liftIO $ F.writeFile' tmp mempty
|
||||
lockContentForRemoval k noop removeAnnex
|
||||
get r k
|
||||
, check "fsck downloaded object" fsck
|
||||
, check "retrieveKeyFile resume from 33%" $ \r k -> do
|
||||
loc <- fromRawFilePath <$> Annex.calcRepo (gitAnnexLocation k)
|
||||
tmp <- fromRawFilePath <$> prepTmp k
|
||||
tmp <- toOsPath <$> prepTmp k
|
||||
partial <- liftIO $ bracket (openBinaryFile loc ReadMode) hClose $ \h -> do
|
||||
sz <- hFileSize h
|
||||
L.hGet h $ fromInteger $ sz `div` 3
|
||||
liftIO $ L.writeFile tmp partial
|
||||
liftIO $ F.writeFile tmp partial
|
||||
lockContentForRemoval k noop removeAnnex
|
||||
get r k
|
||||
, check "fsck downloaded object" fsck
|
||||
|
@ -355,11 +356,11 @@ testExportTree runannex mkr mkk1 mkk2 =
|
|||
storeexport ea k = do
|
||||
loc <- fromRawFilePath <$> Annex.calcRepo (gitAnnexLocation k)
|
||||
Remote.storeExport ea loc k testexportlocation nullMeterUpdate
|
||||
retrieveexport ea k = withTmpFile "exported" $ \tmp h -> do
|
||||
retrieveexport ea k = withTmpFile (toOsPath "exported") $ \tmp h -> do
|
||||
liftIO $ hClose h
|
||||
tryNonAsync (Remote.retrieveExport ea k testexportlocation tmp nullMeterUpdate) >>= \case
|
||||
tryNonAsync (Remote.retrieveExport ea k testexportlocation (fromRawFilePath (fromOsPath tmp)) nullMeterUpdate) >>= \case
|
||||
Left _ -> return False
|
||||
Right v -> verifyKeyContentPostRetrieval RetrievalAllKeysSecure AlwaysVerify v k (toRawFilePath tmp)
|
||||
Right v -> verifyKeyContentPostRetrieval RetrievalAllKeysSecure AlwaysVerify v k (fromOsPath tmp)
|
||||
checkpresentexport ea k = Remote.checkPresentExport ea k testexportlocation
|
||||
removeexport ea k = Remote.removeExport ea k testexportlocation
|
||||
removeexportdirectory ea = case Remote.removeExportDirectory ea of
|
||||
|
@ -429,21 +430,21 @@ keySizes base fast = filter want
|
|||
| otherwise = sz > 0
|
||||
|
||||
randKey :: Int -> Annex Key
|
||||
randKey sz = withTmpFile "randkey" $ \f h -> do
|
||||
randKey sz = withTmpFile (toOsPath "randkey") $ \f h -> do
|
||||
gen <- liftIO (newGenIO :: IO SystemRandom)
|
||||
case genBytes sz gen of
|
||||
Left e -> giveup $ "failed to generate random key: " ++ show e
|
||||
Right (rand, _) -> liftIO $ B.hPut h rand
|
||||
liftIO $ hClose h
|
||||
let ks = KeySource
|
||||
{ keyFilename = toRawFilePath f
|
||||
, contentLocation = toRawFilePath f
|
||||
{ keyFilename = fromOsPath f
|
||||
, contentLocation = fromOsPath f
|
||||
, inodeCache = Nothing
|
||||
}
|
||||
k <- case Types.Backend.genKey Backend.Hash.testKeyBackend of
|
||||
Just a -> a ks nullMeterUpdate
|
||||
Nothing -> giveup "failed to generate random key (backend problem)"
|
||||
_ <- moveAnnex k (AssociatedFile Nothing) (toRawFilePath f)
|
||||
_ <- moveAnnex k (AssociatedFile Nothing) (fromOsPath f)
|
||||
return k
|
||||
|
||||
getReadonlyKey :: Remote -> RawFilePath -> Annex Key
|
||||
|
|
|
@ -102,14 +102,14 @@ startCheckIncomplete recordnotok file key =
|
|||
removeAnnexDir :: CommandCleanup -> CommandStart
|
||||
removeAnnexDir recordok = do
|
||||
Annex.Queue.flush
|
||||
annexdir <- fromRawFilePath <$> fromRepo gitAnnexDir
|
||||
annexdir <- fromRepo gitAnnexDir
|
||||
annexobjectdir <- fromRepo gitAnnexObjectDir
|
||||
starting ("uninit objects") (ActionItemOther Nothing) (SeekInput []) $ do
|
||||
leftovers <- removeUnannexed =<< listKeys InAnnex
|
||||
prepareRemoveAnnexDir annexdir
|
||||
if null leftovers
|
||||
then do
|
||||
liftIO $ removeDirectoryRecursive annexdir
|
||||
liftIO $ removeDirectoryRecursive (fromRawFilePath annexdir)
|
||||
next recordok
|
||||
else giveup $ unlines
|
||||
[ "Not fully uninitialized"
|
||||
|
@ -134,15 +134,15 @@ removeAnnexDir recordok = do
|
|||
-
|
||||
- Also closes sqlite databases that might be in the directory,
|
||||
- to avoid later failure to write any cached changes to them. -}
|
||||
prepareRemoveAnnexDir :: FilePath -> Annex ()
|
||||
prepareRemoveAnnexDir :: RawFilePath -> Annex ()
|
||||
prepareRemoveAnnexDir annexdir = do
|
||||
Database.Keys.closeDb
|
||||
liftIO $ prepareRemoveAnnexDir' annexdir
|
||||
|
||||
prepareRemoveAnnexDir' :: FilePath -> IO ()
|
||||
prepareRemoveAnnexDir' :: RawFilePath -> IO ()
|
||||
prepareRemoveAnnexDir' annexdir =
|
||||
emptyWhenDoesNotExist (dirTreeRecursiveSkipping (const False) annexdir)
|
||||
>>= mapM_ (void . tryIO . allowWrite . toRawFilePath)
|
||||
>>= mapM_ (void . tryIO . allowWrite)
|
||||
|
||||
{- Keys that were moved out of the annex have a hard link still in the
|
||||
- annex, with > 1 link count, and those can be removed.
|
||||
|
|
|
@ -51,7 +51,7 @@ start si file key = ifM (isJust <$> isAnnexLink file)
|
|||
perform :: RawFilePath -> Key -> CommandPerform
|
||||
perform dest key = do
|
||||
destmode <- liftIO $ catchMaybeIO $ fileMode <$> R.getFileStatus dest
|
||||
destic <- replaceWorkTreeFile (fromRawFilePath dest) $ \tmp -> do
|
||||
destic <- replaceWorkTreeFile dest $ \tmp -> do
|
||||
ifM (inAnnex key)
|
||||
( do
|
||||
r <- linkFromAnnex' key tmp destmode
|
||||
|
|
|
@ -35,6 +35,7 @@ import Remote
|
|||
import Git.Types (fromConfigKey, fromConfigValue)
|
||||
import Utility.DataUnits
|
||||
import qualified Utility.RawFilePath as R
|
||||
import qualified Utility.FileIO as F
|
||||
|
||||
cmd :: Command
|
||||
cmd = command "vicfg" SectionSetup "edit configuration in git-annex branch"
|
||||
|
@ -60,7 +61,10 @@ vicfg curcfg f = do
|
|||
-- Allow EDITOR to be processed by the shell, so it can contain options.
|
||||
unlessM (liftIO $ boolSystem "sh" [Param "-c", Param $ unwords [vi, shellEscape f]]) $
|
||||
giveup $ vi ++ " exited nonzero; aborting"
|
||||
r <- parseCfg (defCfg curcfg) <$> liftIO (readFileStrict f)
|
||||
r <- liftIO $ parseCfg (defCfg curcfg)
|
||||
. map decodeBS
|
||||
. fileLines'
|
||||
<$> F.readFile' (toOsPath (toRawFilePath f))
|
||||
liftIO $ removeWhenExistsWith R.removeLink (toRawFilePath f)
|
||||
case r of
|
||||
Left s -> do
|
||||
|
@ -278,8 +282,8 @@ lcom = map (\l -> if "#" `isPrefixOf` l then l else '#' : l)
|
|||
|
||||
{- If there's a parse error, returns a new version of the file,
|
||||
- with the problem lines noted. -}
|
||||
parseCfg :: Cfg -> String -> Either String Cfg
|
||||
parseCfg defcfg = go [] defcfg . lines
|
||||
parseCfg :: Cfg -> [String] -> Either String Cfg
|
||||
parseCfg defcfg = go [] defcfg
|
||||
where
|
||||
go c cfg []
|
||||
| null (mapMaybe fst c) = Right cfg
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue