Merge branch 'master' into sqlite
This commit is contained in:
commit
d4661959de
152 changed files with 2443 additions and 462 deletions
2
Annex.hs
2
Annex.hs
|
@ -114,7 +114,7 @@ data AnnexState = AnnexState
|
|||
, fast :: Bool
|
||||
, daemon :: Bool
|
||||
, branchstate :: BranchState
|
||||
, repoqueue :: Maybe Git.Queue.Queue
|
||||
, repoqueue :: Maybe (Git.Queue.Queue Annex)
|
||||
, catfilehandles :: M.Map FilePath CatFileHandle
|
||||
, hashobjecthandle :: Maybe HashObjectHandle
|
||||
, checkattrhandle :: Maybe CheckAttrHandle
|
||||
|
|
|
@ -224,8 +224,9 @@ adjustToCrippledFileSystem :: Annex ()
|
|||
adjustToCrippledFileSystem = do
|
||||
warning "Entering an adjusted branch where files are unlocked as this filesystem does not support locked files."
|
||||
checkVersionSupported
|
||||
whenM (isNothing <$> inRepo Git.Branch.current) $
|
||||
void $ inRepo $ Git.Branch.commitCommand Git.Branch.AutomaticCommit
|
||||
whenM (isNothing <$> inRepo Git.Branch.current) $ do
|
||||
cmode <- annexCommitMode <$> Annex.getGitConfig
|
||||
void $ inRepo $ Git.Branch.commitCommand cmode
|
||||
[ Param "--quiet"
|
||||
, Param "--allow-empty"
|
||||
, Param "-m"
|
||||
|
@ -310,12 +311,16 @@ commitAdjustedTree' :: Sha -> BasisBranch -> [Ref] -> Annex Sha
|
|||
commitAdjustedTree' treesha (BasisBranch basis) parents =
|
||||
go =<< catCommit basis
|
||||
where
|
||||
go Nothing = inRepo mkcommit
|
||||
go (Just basiscommit) = inRepo $ commitWithMetaData
|
||||
(commitAuthorMetaData basiscommit)
|
||||
(commitCommitterMetaData basiscommit)
|
||||
mkcommit
|
||||
mkcommit = Git.Branch.commitTree Git.Branch.AutomaticCommit
|
||||
go Nothing = do
|
||||
cmode <- annexCommitMode <$> Annex.getGitConfig
|
||||
inRepo $ mkcommit cmode
|
||||
go (Just basiscommit) = do
|
||||
cmode <- annexCommitMode <$> Annex.getGitConfig
|
||||
inRepo $ commitWithMetaData
|
||||
(commitAuthorMetaData basiscommit)
|
||||
(commitCommitterMetaData basiscommit)
|
||||
(mkcommit cmode)
|
||||
mkcommit cmode = Git.Branch.commitTree cmode
|
||||
adjustedBranchCommitMessage parents treesha
|
||||
|
||||
{- This message should never be changed. -}
|
||||
|
@ -444,7 +449,8 @@ mergeToAdjustedBranch tomerge (origbranch, adj) mergeconfig canresolvemerge comm
|
|||
reparent adjtree adjmergecommit (Just currentcommit) = do
|
||||
if (commitTree currentcommit /= adjtree)
|
||||
then do
|
||||
c <- inRepo $ Git.Branch.commitTree Git.Branch.AutomaticCommit
|
||||
cmode <- annexCommitMode <$> Annex.getGitConfig
|
||||
c <- inRepo $ Git.Branch.commitTree cmode
|
||||
("Merged " ++ fromRef tomerge) [adjmergecommit]
|
||||
(commitTree currentcommit)
|
||||
inRepo $ Git.Branch.update "updating adjusted branch" currbranch c
|
||||
|
@ -534,12 +540,14 @@ reverseAdjustedCommit commitparent adj (csha, basiscommit) origbranch
|
|||
| length (commitParent basiscommit) > 1 = return $
|
||||
Left $ "unable to propigate merge commit " ++ show csha ++ " back to " ++ show origbranch
|
||||
| otherwise = do
|
||||
cmode <- annexCommitMode <$> Annex.getGitConfig
|
||||
treesha <- reverseAdjustedTree commitparent adj csha
|
||||
revadjcommit <- inRepo $ commitWithMetaData
|
||||
(commitAuthorMetaData basiscommit)
|
||||
(commitCommitterMetaData basiscommit) $
|
||||
Git.Branch.commitTree Git.Branch.AutomaticCommit
|
||||
(commitMessage basiscommit) [commitparent] treesha
|
||||
Git.Branch.commitTree cmode
|
||||
(commitMessage basiscommit)
|
||||
[commitparent] treesha
|
||||
return (Right revadjcommit)
|
||||
|
||||
{- Adjusts the tree of the basis, changing only the files that the
|
||||
|
|
|
@ -109,8 +109,9 @@ getBranch = maybe (hasOrigin >>= go >>= use) return =<< branchsha
|
|||
[Param "branch", Param $ fromRef name, Param $ fromRef originname]
|
||||
fromMaybe (error $ "failed to create " ++ fromRef name)
|
||||
<$> branchsha
|
||||
go False = withIndex' True $
|
||||
inRepo $ Git.Branch.commitAlways Git.Branch.AutomaticCommit "branch created" fullname []
|
||||
go False = withIndex' True $ do
|
||||
cmode <- annexCommitMode <$> Annex.getGitConfig
|
||||
inRepo $ Git.Branch.commitAlways cmode "branch created" fullname []
|
||||
use sha = do
|
||||
setIndexSha sha
|
||||
return sha
|
||||
|
@ -317,7 +318,8 @@ commitIndex jl branchref message parents = do
|
|||
commitIndex' :: JournalLocked -> Git.Ref -> String -> String -> Integer -> [Git.Ref] -> Annex ()
|
||||
commitIndex' jl branchref message basemessage retrynum parents = do
|
||||
updateIndex jl branchref
|
||||
committedref <- inRepo $ Git.Branch.commitAlways Git.Branch.AutomaticCommit message fullname parents
|
||||
cmode <- annexCommitMode <$> Annex.getGitConfig
|
||||
committedref <- inRepo $ Git.Branch.commitAlways cmode message fullname parents
|
||||
setIndexSha committedref
|
||||
parentrefs <- commitparents <$> catObject committedref
|
||||
when (racedetected branchref parentrefs) $
|
||||
|
@ -551,7 +553,8 @@ performTransitionsLocked jl ts neednewlocalbranch transitionedrefs = do
|
|||
Annex.Queue.flush
|
||||
if neednewlocalbranch
|
||||
then do
|
||||
committedref <- inRepo $ Git.Branch.commitAlways Git.Branch.AutomaticCommit message fullname transitionedrefs
|
||||
cmode <- annexCommitMode <$> Annex.getGitConfig
|
||||
committedref <- inRepo $ Git.Branch.commitAlways cmode message fullname transitionedrefs
|
||||
setIndexSha committedref
|
||||
else do
|
||||
ref <- getBranch
|
||||
|
@ -657,9 +660,10 @@ rememberTreeish treeish graftpoint = lockJournal $ \jl -> do
|
|||
origtree <- fromMaybe (giveup "unable to determine git-annex branch tree") <$>
|
||||
inRepo (Git.Ref.tree branchref)
|
||||
addedt <- inRepo $ Git.Tree.graftTree treeish graftpoint origtree
|
||||
c <- inRepo $ Git.Branch.commitTree Git.Branch.AutomaticCommit
|
||||
cmode <- annexCommitMode <$> Annex.getGitConfig
|
||||
c <- inRepo $ Git.Branch.commitTree cmode
|
||||
"graft" [branchref] addedt
|
||||
c' <- inRepo $ Git.Branch.commitTree Git.Branch.AutomaticCommit
|
||||
c' <- inRepo $ Git.Branch.commitTree cmode
|
||||
"graft cleanup" [c] origtree
|
||||
inRepo $ Git.Branch.update' fullname c'
|
||||
-- The tree in c' is the same as the tree in branchref,
|
||||
|
|
|
@ -90,10 +90,20 @@ enteringStage newstage a = Annex.getState Annex.workers >>= \case
|
|||
Nothing -> a
|
||||
Just tv -> do
|
||||
mytid <- liftIO myThreadId
|
||||
let set = changeStageTo mytid tv newstage
|
||||
let restore = maybe noop (void . changeStageTo mytid tv)
|
||||
let set = changeStageTo mytid tv (const newstage)
|
||||
let restore = maybe noop (void . changeStageTo mytid tv . const)
|
||||
bracket set restore (const a)
|
||||
|
||||
{- Transition the current thread to the initial stage.
|
||||
- This is done once the thread is ready to begin work.
|
||||
-}
|
||||
enteringInitialStage :: Annex ()
|
||||
enteringInitialStage = Annex.getState Annex.workers >>= \case
|
||||
Nothing -> noop
|
||||
Just tv -> do
|
||||
mytid <- liftIO myThreadId
|
||||
void $ changeStageTo mytid tv initialStage
|
||||
|
||||
{- This needs to leave the WorkerPool with the same number of
|
||||
- idle and active threads, and with the same number of threads for each
|
||||
- WorkerStage. So, all it can do is swap the WorkerStage of our thread's
|
||||
|
@ -110,14 +120,15 @@ enteringStage newstage a = Annex.getState Annex.workers >>= \case
|
|||
- in the pool than spareVals. That does not prevent other threads that call
|
||||
- this from using them though, so it's fine.
|
||||
-}
|
||||
changeStageTo :: ThreadId -> TMVar (WorkerPool AnnexState) -> WorkerStage -> Annex (Maybe WorkerStage)
|
||||
changeStageTo mytid tv newstage = liftIO $
|
||||
changeStageTo :: ThreadId -> TMVar (WorkerPool AnnexState) -> (UsedStages -> WorkerStage) -> Annex (Maybe WorkerStage)
|
||||
changeStageTo mytid tv getnewstage = liftIO $
|
||||
replaceidle >>= maybe
|
||||
(return Nothing)
|
||||
(either waitidle (return . Just))
|
||||
where
|
||||
replaceidle = atomically $ do
|
||||
pool <- takeTMVar tv
|
||||
let newstage = getnewstage (usedStages pool)
|
||||
let notchanging = do
|
||||
putTMVar tv pool
|
||||
return Nothing
|
||||
|
@ -128,7 +139,7 @@ changeStageTo mytid tv newstage = liftIO $
|
|||
Nothing -> do
|
||||
putTMVar tv $
|
||||
addWorkerPool (IdleWorker oldstage) pool'
|
||||
return $ Just $ Left (myaid, oldstage)
|
||||
return $ Just $ Left (myaid, newstage, oldstage)
|
||||
Just pool'' -> do
|
||||
-- optimisation
|
||||
putTMVar tv $
|
||||
|
@ -139,27 +150,26 @@ changeStageTo mytid tv newstage = liftIO $
|
|||
_ -> notchanging
|
||||
else notchanging
|
||||
|
||||
waitidle (myaid, oldstage) = atomically $ do
|
||||
waitidle (myaid, newstage, oldstage) = atomically $ do
|
||||
pool <- waitIdleWorkerSlot newstage =<< takeTMVar tv
|
||||
putTMVar tv $ addWorkerPool (ActiveWorker myaid newstage) pool
|
||||
return (Just oldstage)
|
||||
|
||||
-- | Waits until there's an idle worker in the worker pool
|
||||
-- for its initial stage, removes it from the pool, and returns its state.
|
||||
-- | Waits until there's an idle StartStage worker in the worker pool,
|
||||
-- removes it from the pool, and returns its state.
|
||||
--
|
||||
-- If the worker pool is not already allocated, returns Nothing.
|
||||
waitInitialWorkerSlot :: TMVar (WorkerPool Annex.AnnexState) -> STM (Maybe (Annex.AnnexState, WorkerStage))
|
||||
waitInitialWorkerSlot tv = do
|
||||
waitStartWorkerSlot :: TMVar (WorkerPool Annex.AnnexState) -> STM (Maybe (Annex.AnnexState, WorkerStage))
|
||||
waitStartWorkerSlot tv = do
|
||||
pool <- takeTMVar tv
|
||||
let stage = initialStage (usedStages pool)
|
||||
st <- go stage pool
|
||||
return $ Just (st, stage)
|
||||
st <- go pool
|
||||
return $ Just (st, StartStage)
|
||||
where
|
||||
go wantstage pool = case spareVals pool of
|
||||
go pool = case spareVals pool of
|
||||
[] -> retry
|
||||
(v:vs) -> do
|
||||
let pool' = pool { spareVals = vs }
|
||||
putTMVar tv =<< waitIdleWorkerSlot wantstage pool'
|
||||
putTMVar tv =<< waitIdleWorkerSlot StartStage pool'
|
||||
return v
|
||||
|
||||
waitIdleWorkerSlot :: WorkerStage -> WorkerPool Annex.AnnexState -> STM (WorkerPool Annex.AnnexState)
|
||||
|
|
|
@ -776,7 +776,7 @@ downloadUrl k p urls file =
|
|||
-- download command is used.
|
||||
meteredFile file (Just p) k $
|
||||
Url.withUrlOptions $ \uo ->
|
||||
liftIO $ anyM (\u -> Url.download p u file uo) urls
|
||||
anyM (\u -> Url.download p u file uo) urls
|
||||
|
||||
{- Copies a key's content, when present, to a temp file.
|
||||
- This is used to speed up some rsyncs. -}
|
||||
|
|
|
@ -108,9 +108,9 @@ initialize' mversion = checkCanInitialize $ do
|
|||
unlessM (isJust <$> getVersion) $
|
||||
setVersion (fromMaybe defaultVersion mversion)
|
||||
configureSmudgeFilter
|
||||
showSideAction "scanning for unlocked files"
|
||||
scanUnlockedFiles
|
||||
unlessM isBareRepo $ do
|
||||
showSideAction "scanning for unlocked files"
|
||||
scanUnlockedFiles
|
||||
hookWrite postCheckoutHook
|
||||
hookWrite postMergeHook
|
||||
AdjustedBranch.checkAdjustedClone >>= \case
|
||||
|
|
|
@ -192,12 +192,13 @@ restagePointerFile (Restage True) f orig = withTSDelta $ \tsd -> do
|
|||
-- on all still-unmodified files, using a copy of the index file,
|
||||
-- to bypass the lock. Then replace the old index file with the new
|
||||
-- updated index file.
|
||||
runner :: Git.Queue.InternalActionRunner Annex
|
||||
runner = Git.Queue.InternalActionRunner "restagePointerFile" $ \r l -> do
|
||||
realindex <- Git.Index.currentIndexFile r
|
||||
realindex <- liftIO $ Git.Index.currentIndexFile r
|
||||
let lock = Git.Index.indexFileLock realindex
|
||||
lockindex = catchMaybeIO $ Git.LockFile.openLock' lock
|
||||
unlockindex = maybe noop Git.LockFile.closeLock
|
||||
showwarning = warningIO $ unableToRestage Nothing
|
||||
lockindex = liftIO $ catchMaybeIO $ Git.LockFile.openLock' lock
|
||||
unlockindex = liftIO . maybe noop Git.LockFile.closeLock
|
||||
showwarning = warning $ unableToRestage Nothing
|
||||
go Nothing = showwarning
|
||||
go (Just _) = withTmpDirIn (Git.localGitDir r) "annexindex" $ \tmpdir -> do
|
||||
let tmpindex = tmpdir </> "index"
|
||||
|
@ -216,7 +217,7 @@ restagePointerFile (Restage True) f orig = withTSDelta $ \tsd -> do
|
|||
let replaceindex = catchBoolIO $ do
|
||||
moveFile tmpindex realindex
|
||||
return True
|
||||
ok <- createLinkOrCopy realindex tmpindex
|
||||
ok <- liftIO $ createLinkOrCopy realindex tmpindex
|
||||
<&&> updatetmpindex
|
||||
<&&> replaceindex
|
||||
unless ok showwarning
|
||||
|
|
|
@ -28,24 +28,24 @@ import qualified Git.UpdateIndex
|
|||
addCommand :: String -> [CommandParam] -> [FilePath] -> Annex ()
|
||||
addCommand command params files = do
|
||||
q <- get
|
||||
store <=< flushWhenFull <=< inRepo $
|
||||
Git.Queue.addCommand command params files q
|
||||
store =<< flushWhenFull =<<
|
||||
(Git.Queue.addCommand command params files q =<< gitRepo)
|
||||
|
||||
addInternalAction :: Git.Queue.InternalActionRunner -> [(FilePath, IO Bool)] -> Annex ()
|
||||
addInternalAction :: Git.Queue.InternalActionRunner Annex -> [(FilePath, IO Bool)] -> Annex ()
|
||||
addInternalAction runner files = do
|
||||
q <- get
|
||||
store <=< flushWhenFull <=< inRepo $
|
||||
Git.Queue.addInternalAction runner files q
|
||||
store =<< flushWhenFull =<<
|
||||
(Git.Queue.addInternalAction runner files q =<< gitRepo)
|
||||
|
||||
{- Adds an update-index stream to the queue. -}
|
||||
addUpdateIndex :: Git.UpdateIndex.Streamer -> Annex ()
|
||||
addUpdateIndex streamer = do
|
||||
q <- get
|
||||
store <=< flushWhenFull <=< inRepo $
|
||||
Git.Queue.addUpdateIndex streamer q
|
||||
store =<< flushWhenFull =<<
|
||||
(Git.Queue.addUpdateIndex streamer q =<< gitRepo)
|
||||
|
||||
{- Runs the queue if it is full. -}
|
||||
flushWhenFull :: Git.Queue.Queue -> Annex Git.Queue.Queue
|
||||
flushWhenFull :: Git.Queue.Queue Annex -> Annex (Git.Queue.Queue Annex)
|
||||
flushWhenFull q
|
||||
| Git.Queue.full q = flush' q
|
||||
| otherwise = return q
|
||||
|
@ -64,25 +64,25 @@ flush = do
|
|||
- But, flushing two queues at the same time could lead to failures due to
|
||||
- git locking files. So, only one queue is allowed to flush at a time.
|
||||
-}
|
||||
flush' :: Git.Queue.Queue -> Annex Git.Queue.Queue
|
||||
flush' :: Git.Queue.Queue Annex -> Annex (Git.Queue.Queue Annex)
|
||||
flush' q = withExclusiveLock gitAnnexGitQueueLock $ do
|
||||
showStoringStateAction
|
||||
inRepo $ Git.Queue.flush q
|
||||
Git.Queue.flush q =<< gitRepo
|
||||
|
||||
{- Gets the size of the queue. -}
|
||||
size :: Annex Int
|
||||
size = Git.Queue.size <$> get
|
||||
|
||||
get :: Annex Git.Queue.Queue
|
||||
get :: Annex (Git.Queue.Queue Annex)
|
||||
get = maybe new return =<< getState repoqueue
|
||||
|
||||
new :: Annex Git.Queue.Queue
|
||||
new :: Annex (Git.Queue.Queue Annex)
|
||||
new = do
|
||||
q <- Git.Queue.new . annexQueueSize <$> getGitConfig
|
||||
store q
|
||||
return q
|
||||
|
||||
store :: Git.Queue.Queue -> Annex ()
|
||||
store :: Git.Queue.Queue Annex -> Annex ()
|
||||
store q = changeState $ \s -> s { repoqueue = Just q }
|
||||
|
||||
mergeFrom :: AnnexState -> Annex ()
|
||||
|
|
|
@ -17,6 +17,7 @@ module Annex.RemoteTrackingBranch
|
|||
|
||||
import Annex.Common
|
||||
import Annex.CatFile
|
||||
import qualified Annex
|
||||
import Git.Types
|
||||
import qualified Git.Ref
|
||||
import qualified Git.Branch
|
||||
|
@ -72,9 +73,10 @@ makeRemoteTrackingBranchMergeCommit tb commitsha =
|
|||
_ -> return commitsha
|
||||
|
||||
makeRemoteTrackingBranchMergeCommit' :: Sha -> Sha -> Sha -> Annex Sha
|
||||
makeRemoteTrackingBranchMergeCommit' commitsha importedhistory treesha =
|
||||
makeRemoteTrackingBranchMergeCommit' commitsha importedhistory treesha = do
|
||||
cmode <- annexCommitMode <$> Annex.getGitConfig
|
||||
inRepo $ Git.Branch.commitTree
|
||||
Git.Branch.AutomaticCommit
|
||||
cmode
|
||||
"remote tracking branch"
|
||||
[commitsha, importedhistory]
|
||||
treesha
|
||||
|
|
|
@ -34,21 +34,9 @@ findExisting name = do
|
|||
t <- trustMap
|
||||
headMaybe
|
||||
. sortBy (comparing $ \(u, _, _) -> Down $ M.lookup u t)
|
||||
. findByName name
|
||||
. findByRemoteConfig (\c -> lookupName c == Just name)
|
||||
<$> Logs.Remote.readRemoteLog
|
||||
|
||||
findByName :: RemoteName -> M.Map UUID RemoteConfig -> [(UUID, RemoteConfig, Maybe (ConfigFrom UUID))]
|
||||
findByName n = map sameasuuid . filter (matching . snd) . M.toList
|
||||
where
|
||||
matching c = case lookupName c of
|
||||
Nothing -> False
|
||||
Just n'
|
||||
| n' == n -> True
|
||||
| otherwise -> False
|
||||
sameasuuid (u, c) = case M.lookup sameasUUIDField c of
|
||||
Nothing -> (u, c, Nothing)
|
||||
Just u' -> (toUUID u', c, Just (ConfigFrom u))
|
||||
|
||||
newConfig
|
||||
:: RemoteName
|
||||
-> Maybe (Sameas UUID)
|
||||
|
|
|
@ -101,3 +101,11 @@ removeSameasInherited :: RemoteConfig -> RemoteConfig
|
|||
removeSameasInherited c = case M.lookup sameasUUIDField c of
|
||||
Nothing -> c
|
||||
Just _ -> M.withoutKeys c sameasInherits
|
||||
|
||||
{- Finds remote uuids with matching RemoteConfig. -}
|
||||
findByRemoteConfig :: (RemoteConfig -> Bool) -> M.Map UUID RemoteConfig -> [(UUID, RemoteConfig, Maybe (ConfigFrom UUID))]
|
||||
findByRemoteConfig matching = map sameasuuid . filter (matching . snd) . M.toList
|
||||
where
|
||||
sameasuuid (u, c) = case M.lookup sameasUUIDField c of
|
||||
Nothing -> (u, c, Nothing)
|
||||
Just u' -> (toUUID u', c, Just (ConfigFrom u))
|
||||
|
|
47
Annex/Url.hs
47
Annex/Url.hs
|
@ -1,24 +1,39 @@
|
|||
{- Url downloading, with git-annex user agent and configured http
|
||||
- headers, security restrictions, etc.
|
||||
-
|
||||
- Copyright 2013-2018 Joey Hess <id@joeyh.name>
|
||||
- Copyright 2013-2019 Joey Hess <id@joeyh.name>
|
||||
-
|
||||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Annex.Url (
|
||||
module U,
|
||||
withUrlOptions,
|
||||
getUrlOptions,
|
||||
getUserAgent,
|
||||
ipAddressesUnlimited,
|
||||
checkBoth,
|
||||
download,
|
||||
exists,
|
||||
getUrlInfo,
|
||||
U.downloadQuiet,
|
||||
U.URLString,
|
||||
U.UrlOptions(..),
|
||||
U.UrlInfo(..),
|
||||
U.sinkResponseFile,
|
||||
U.matchStatusCodeException,
|
||||
U.downloadConduit,
|
||||
U.downloadPartial,
|
||||
U.parseURIRelaxed,
|
||||
U.allowedScheme,
|
||||
U.assumeUrlExists,
|
||||
) where
|
||||
|
||||
import Annex.Common
|
||||
import qualified Annex
|
||||
import Utility.Url as U
|
||||
import qualified Utility.Url as U
|
||||
import Utility.IPAddress
|
||||
import Utility.HttpManagerRestricted
|
||||
import Utility.Metered
|
||||
import qualified BuildInfo
|
||||
|
||||
import Network.Socket
|
||||
|
@ -43,7 +58,7 @@ getUrlOptions = Annex.getState Annex.urloptions >>= \case
|
|||
where
|
||||
mk = do
|
||||
(urldownloader, manager) <- checkallowedaddr
|
||||
mkUrlOptions
|
||||
U.mkUrlOptions
|
||||
<$> (Just <$> getUserAgent)
|
||||
<*> headers
|
||||
<*> pure urldownloader
|
||||
|
@ -108,3 +123,27 @@ ipAddressesUnlimited =
|
|||
|
||||
withUrlOptions :: (U.UrlOptions -> Annex a) -> Annex a
|
||||
withUrlOptions a = a =<< getUrlOptions
|
||||
|
||||
checkBoth :: U.URLString -> Maybe Integer -> U.UrlOptions -> Annex Bool
|
||||
checkBoth url expected_size uo =
|
||||
liftIO (U.checkBoth url expected_size uo) >>= \case
|
||||
Right r -> return r
|
||||
Left err -> warning err >> return False
|
||||
|
||||
download :: MeterUpdate -> U.URLString -> FilePath -> U.UrlOptions -> Annex Bool
|
||||
download meterupdate url file uo =
|
||||
liftIO (U.download meterupdate url file uo) >>= \case
|
||||
Right () -> return True
|
||||
Left err -> warning err >> return False
|
||||
|
||||
exists :: U.URLString -> U.UrlOptions -> Annex Bool
|
||||
exists url uo = liftIO (U.exists url uo) >>= \case
|
||||
Right b -> return b
|
||||
Left err -> warning err >> return False
|
||||
|
||||
getUrlInfo :: U.URLString -> U.UrlOptions -> Annex U.UrlInfo
|
||||
getUrlInfo url uo = liftIO (U.getUrlInfo url uo) >>= \case
|
||||
Right i -> return i
|
||||
Left err -> do
|
||||
warning err
|
||||
return $ U.UrlInfo False Nothing Nothing
|
||||
|
|
|
@ -12,6 +12,7 @@ import Annex.View.ViewedFile
|
|||
import Types.View
|
||||
import Types.MetaData
|
||||
import Annex.MetaData
|
||||
import qualified Annex
|
||||
import qualified Git
|
||||
import qualified Git.DiffTree as DiffTree
|
||||
import qualified Git.Branch
|
||||
|
@ -418,7 +419,8 @@ withViewIndex a = do
|
|||
genViewBranch :: View -> Annex Git.Branch
|
||||
genViewBranch view = withViewIndex $ do
|
||||
let branch = branchView view
|
||||
void $ inRepo $ Git.Branch.commit Git.Branch.AutomaticCommit True (fromRef branch) branch []
|
||||
cmode <- annexCommitMode <$> Annex.getGitConfig
|
||||
void $ inRepo $ Git.Branch.commit cmode True (fromRef branch) branch []
|
||||
return branch
|
||||
|
||||
withCurrentView :: (View -> Annex a) -> Annex a
|
||||
|
|
|
@ -21,6 +21,7 @@ import qualified Git.LsTree
|
|||
import qualified Git.Types
|
||||
import qualified Database.Keys
|
||||
import qualified Database.Keys.SQL
|
||||
import Config
|
||||
|
||||
{- Looks up the key corresponding to an annexed file in the work tree,
|
||||
- by examining what the file links to.
|
||||
|
@ -74,7 +75,7 @@ ifAnnexed file yes no = maybe no yes =<< lookupFile file
|
|||
- as-is.
|
||||
-}
|
||||
scanUnlockedFiles :: Annex ()
|
||||
scanUnlockedFiles = whenM (inRepo Git.Ref.headExists) $ do
|
||||
scanUnlockedFiles = whenM (inRepo Git.Ref.headExists <&&> not <$> isBareRepo) $ do
|
||||
Database.Keys.runWriter $
|
||||
liftIO . Database.Keys.SQL.dropAllAssociatedFiles
|
||||
(l, cleanup) <- inRepo $ Git.LsTree.lsTree Git.LsTree.LsTreeRecursive Git.Ref.headRef
|
||||
|
|
|
@ -18,7 +18,6 @@ import Annex.Common
|
|||
import qualified Annex
|
||||
import Annex.Content
|
||||
import Annex.Url
|
||||
import Utility.Url (URLString)
|
||||
import Utility.DiskFree
|
||||
import Utility.HtmlDetect
|
||||
import Utility.Process.Transcript
|
||||
|
|
|
@ -53,8 +53,9 @@ initRepo True primary_assistant_repo dir desc mgroup = inDir dir $ do
|
|||
initRepo' desc mgroup
|
||||
{- Initialize the master branch, so things that expect
|
||||
- to have it will work, before any files are added. -}
|
||||
unlessM (Git.Config.isBare <$> gitRepo) $
|
||||
void $ inRepo $ Git.Branch.commitCommand Git.Branch.AutomaticCommit
|
||||
unlessM (Git.Config.isBare <$> gitRepo) $ do
|
||||
cmode <- annexCommitMode <$> Annex.getGitConfig
|
||||
void $ inRepo $ Git.Branch.commitCommand cmode
|
||||
[ Param "--quiet"
|
||||
, Param "--allow-empty"
|
||||
, Param "-m"
|
||||
|
|
|
@ -95,7 +95,9 @@ newAssistantUrl repo = do
|
|||
- warp-tls listens to http, in order to show an error page, so this works.
|
||||
-}
|
||||
assistantListening :: URLString -> IO Bool
|
||||
assistantListening url = catchBoolIO $ exists url' =<< defUrlOptions
|
||||
assistantListening url = catchBoolIO $ do
|
||||
uo <- defUrlOptions
|
||||
(== Right True) <$> exists url' uo
|
||||
where
|
||||
url' = case parseURI url of
|
||||
Nothing -> url
|
||||
|
|
|
@ -36,7 +36,6 @@ import qualified Annex
|
|||
import Utility.InodeCache
|
||||
import qualified Database.Keys
|
||||
import qualified Command.Sync
|
||||
import qualified Git.Branch
|
||||
import Utility.Tuple
|
||||
import Utility.Metered
|
||||
|
||||
|
@ -231,7 +230,8 @@ commitStaged msg = do
|
|||
case v of
|
||||
Left _ -> return False
|
||||
Right _ -> do
|
||||
ok <- Command.Sync.commitStaged Git.Branch.AutomaticCommit msg
|
||||
cmode <- annexCommitMode <$> Annex.getGitConfig
|
||||
ok <- Command.Sync.commitStaged cmode msg
|
||||
when ok $
|
||||
Command.Sync.updateBranches =<< getCurrentBranch
|
||||
return ok
|
||||
|
|
|
@ -14,6 +14,7 @@ import Assistant.Sync
|
|||
import Utility.DirWatcher
|
||||
import Utility.DirWatcher.Types
|
||||
import Annex.CurrentBranch
|
||||
import qualified Annex
|
||||
import qualified Annex.Branch
|
||||
import qualified Git
|
||||
import qualified Git.Branch
|
||||
|
@ -80,11 +81,13 @@ onChange file
|
|||
[ "merging", Git.fromRef changedbranch
|
||||
, "into", Git.fromRef b
|
||||
]
|
||||
void $ liftAnnex $ Command.Sync.merge
|
||||
currbranch Command.Sync.mergeConfig
|
||||
def
|
||||
Git.Branch.AutomaticCommit
|
||||
changedbranch
|
||||
void $ liftAnnex $ do
|
||||
cmode <- annexCommitMode <$> Annex.getGitConfig
|
||||
Command.Sync.merge
|
||||
currbranch Command.Sync.mergeConfig
|
||||
def
|
||||
cmode
|
||||
changedbranch
|
||||
mergecurrent' _ = noop
|
||||
|
||||
{- Is the first branch a synced branch or remote tracking branch related
|
||||
|
|
|
@ -183,7 +183,7 @@ runHandler :: Handler -> FilePath -> Maybe FileStatus -> Assistant ()
|
|||
runHandler handler file filestatus = void $ do
|
||||
r <- tryIO <~> handler (normalize file) filestatus
|
||||
case r of
|
||||
Left e -> liftIO $ warningIO $ show e
|
||||
Left e -> liftAnnex $ warning $ show e
|
||||
Right Nothing -> noop
|
||||
Right (Just change) -> recordChange change
|
||||
where
|
||||
|
|
|
@ -40,9 +40,10 @@ import Utility.Metered
|
|||
import qualified Utility.Lsof as Lsof
|
||||
import qualified BuildInfo
|
||||
import qualified Utility.Url as Url
|
||||
import qualified Annex.Url as Url
|
||||
import qualified Annex.Url as Url hiding (download)
|
||||
import Utility.Tuple
|
||||
|
||||
import Data.Either
|
||||
import qualified Data.Map as M
|
||||
|
||||
{- Upgrade without interaction in the webapp. -}
|
||||
|
@ -323,8 +324,8 @@ downloadDistributionInfo = do
|
|||
liftIO $ withTmpDir "git-annex.tmp" $ \tmpdir -> do
|
||||
let infof = tmpdir </> "info"
|
||||
let sigf = infof ++ ".sig"
|
||||
ifM (Url.download nullMeterUpdate distributionInfoUrl infof uo
|
||||
<&&> Url.download nullMeterUpdate distributionInfoSigUrl sigf uo
|
||||
ifM (isRight <$> Url.download nullMeterUpdate distributionInfoUrl infof uo
|
||||
<&&> (isRight <$> Url.download nullMeterUpdate distributionInfoSigUrl sigf uo)
|
||||
<&&> verifyDistributionSig gpgcmd sigf)
|
||||
( parseInfoFile <$> readFileStrict infof
|
||||
, return Nothing
|
||||
|
|
|
@ -192,7 +192,7 @@ escapeHeader = escapeURIString (\c -> isUnescapedInURI c && c /= ' ')
|
|||
getRepoInfo :: RemoteConfig -> Widget
|
||||
getRepoInfo c = do
|
||||
uo <- liftAnnex Url.getUrlOptions
|
||||
exists <- liftIO $ catchDefaultIO False $ Url.exists url uo
|
||||
exists <- liftAnnex $ catchDefaultIO False $ Url.exists url uo
|
||||
[whamlet|
|
||||
<a href="#{url}">
|
||||
Internet Archive item
|
||||
|
|
|
@ -26,6 +26,7 @@ import Git.Command
|
|||
|
||||
import Data.Time.Clock
|
||||
import Data.Char
|
||||
import Data.Either
|
||||
import System.Posix.Directory
|
||||
|
||||
-- git-annex distribution signing key (for Joey Hess)
|
||||
|
@ -86,7 +87,7 @@ getbuild repodir (url, f) = do
|
|||
putStrLn $ "*** " ++ s
|
||||
return Nothing
|
||||
uo <- defUrlOptions
|
||||
ifM (download nullMeterUpdate url tmp uo)
|
||||
ifM (isRight <$> download nullMeterUpdate url tmp uo)
|
||||
( ifM (liftIO $ virusFree tmp)
|
||||
( do
|
||||
bv2 <- getbv
|
||||
|
|
|
@ -50,8 +50,12 @@ installLibs appbase replacement_libs libmap = do
|
|||
let symdest = appbase </> shortlib
|
||||
-- This is a hack; libraries need to be in the same
|
||||
-- directory as the program, so also link them into the
|
||||
-- extra directory.
|
||||
let symdestextra = appbase </> "extra" </> shortlib
|
||||
-- extra and git-core directories so programs in those will
|
||||
-- find them.
|
||||
let symdestextra =
|
||||
[ appbase </> "extra" </> shortlib
|
||||
, appbase </> "git-core" </> shortlib
|
||||
]
|
||||
ifM (doesFileExist dest)
|
||||
( return Nothing
|
||||
, do
|
||||
|
@ -59,9 +63,11 @@ installLibs appbase replacement_libs libmap = do
|
|||
putStrLn $ "installing " ++ pathlib ++ " as " ++ shortlib
|
||||
unlessM (boolSystem "cp" [File pathlib, File dest]
|
||||
<&&> boolSystem "chmod" [Param "644", File dest]
|
||||
<&&> boolSystem "ln" [Param "-s", File fulllib, File symdest]
|
||||
<&&> boolSystem "ln" [Param "-s", File (".." </> fulllib), File symdestextra]) $
|
||||
<&&> boolSystem "ln" [Param "-s", File fulllib, File symdest]) $
|
||||
error "library install failed"
|
||||
forM_ symdestextra $ \d ->
|
||||
unlessM (boolSystem "ln" [Param "-s", File (".." </> fulllib), File d]) $
|
||||
error "library linking failed"
|
||||
return $ Just appbase
|
||||
)
|
||||
return (catMaybes libs, replacement_libs', libmap')
|
||||
|
|
|
@ -56,16 +56,26 @@ installGitLibs topdir = do
|
|||
if issymlink
|
||||
then do
|
||||
-- many git-core files may symlink to eg
|
||||
-- ../../git. The link targets are put
|
||||
-- into a subdirectory so all links to
|
||||
-- .../git get the same binary.
|
||||
-- ../../bin/git, which is located outside
|
||||
-- the git-core directory. The target of
|
||||
-- such links is installed into the progDir
|
||||
-- (if not already there), and the links
|
||||
-- repointed to it.
|
||||
--
|
||||
-- Other git-core files symlink to a file
|
||||
-- beside them in the directory. Those
|
||||
-- links can be copied as-is.
|
||||
linktarget <- readSymbolicLink f
|
||||
let linktarget' = gitcoredestdir </> "bin" </> takeFileName linktarget
|
||||
createDirectoryIfMissing True (takeDirectory linktarget')
|
||||
L.readFile f >>= L.writeFile linktarget'
|
||||
nukeFile destf
|
||||
rellinktarget <- relPathDirToFile (takeDirectory destf) linktarget'
|
||||
createSymbolicLink rellinktarget destf
|
||||
if takeFileName linktarget == linktarget
|
||||
then cp f destf
|
||||
else do
|
||||
let linktarget' = progDir topdir </> takeFileName linktarget
|
||||
unlessM (doesFileExist linktarget') $ do
|
||||
createDirectoryIfMissing True (takeDirectory linktarget')
|
||||
L.readFile f >>= L.writeFile linktarget'
|
||||
nukeFile destf
|
||||
rellinktarget <- relPathDirToFile (takeDirectory destf) linktarget'
|
||||
createSymbolicLink rellinktarget destf
|
||||
else cp f destf
|
||||
|
||||
-- install git's template files
|
||||
|
|
35
CHANGELOG
35
CHANGELOG
|
@ -13,11 +13,42 @@ git-annex (8.20191107) UNRELEASED; urgency=medium
|
|||
Microbenchmarks show around 10-25% speedup of sqlite database operations.
|
||||
* Improved serialization of filenames and keys to the sqlite databases,
|
||||
avoiding encoding problems.
|
||||
* Windows: Fix handling of changes to time zone. (Used to work but was
|
||||
broken in version 7.20181031.)
|
||||
|
||||
-- Joey Hess <id@joeyh.name> Tue, 29 Oct 2019 15:13:03 -0400
|
||||
|
||||
git-annex (7.20191115) UNRELEASED; urgency=medium
|
||||
|
||||
* Stop displaying rsync progress, and use git-annex's own progress display
|
||||
for local-to-local repo transfers.
|
||||
* git-lfs: The url provided to initremote/enableremote will now be
|
||||
stored in the git-annex branch, allowing enableremote to be used without
|
||||
an url. initremote --sameas can be used to add additional urls.
|
||||
* git-lfs: When there's a git remote with an url that's known to be
|
||||
used for git-lfs, automatically enable the special remote.
|
||||
* sync, assistant: Pull and push from git-lfs remotes.
|
||||
* Fix bug that made bare repos be treated as non-bare when --git-dir
|
||||
was used.
|
||||
* benchmark: Changed --databases to take a parameter specifiying the size
|
||||
of the database to benchmark.
|
||||
* benchmark --databases: Display size of the populated database.
|
||||
* benchmark --databases: Improve the "addAssociatedFile to (new)"
|
||||
benchmark to really add new values, not overwriting old values.
|
||||
|
||||
-- Joey Hess <id@joeyh.name> Fri, 15 Nov 2019 11:57:19 -0400
|
||||
|
||||
git-annex (7.20191114) upstream; urgency=medium
|
||||
|
||||
* Added annex.allowsign option.
|
||||
* Make --json-error-messages capture more errors,
|
||||
particularly url download errors.
|
||||
* Fix a crash (STM deadlock) when -J is used with multiple files
|
||||
that point to the same key.
|
||||
* linuxstandalone: Fix a regression that broke git-remote-https.
|
||||
* OSX git-annex.app: Fix a problem that prevented using the bundled
|
||||
git-remote-https, git-remote-http, and git-shell.
|
||||
|
||||
-- Joey Hess <id@joeyh.name> Thu, 14 Nov 2019 21:57:59 -0400
|
||||
|
||||
git-annex (7.20191106) upstream; urgency=medium
|
||||
|
||||
* init: Fix bug that lost modifications to unlocked files when init is
|
||||
|
|
|
@ -63,7 +63,7 @@ commandAction start = Annex.getState Annex.concurrency >>= \case
|
|||
runconcurrent = Annex.getState Annex.workers >>= \case
|
||||
Nothing -> runnonconcurrent
|
||||
Just tv ->
|
||||
liftIO (atomically (waitInitialWorkerSlot tv)) >>=
|
||||
liftIO (atomically (waitStartWorkerSlot tv)) >>=
|
||||
maybe runnonconcurrent (runconcurrent' tv)
|
||||
runconcurrent' tv (workerst, workerstage) = do
|
||||
aid <- liftIO $ async $ snd <$> Annex.run workerst
|
||||
|
@ -99,12 +99,13 @@ commandAction start = Annex.getState Annex.concurrency >>= \case
|
|||
case mkActionItem startmsg' of
|
||||
OnlyActionOn k' _ | k' /= k ->
|
||||
concurrentjob' workerst startmsg' perform'
|
||||
_ -> mkjob workerst startmsg' perform'
|
||||
_ -> beginjob workerst startmsg' perform'
|
||||
Nothing -> noop
|
||||
_ -> mkjob workerst startmsg perform
|
||||
_ -> beginjob workerst startmsg perform
|
||||
|
||||
mkjob workerst startmsg perform =
|
||||
inOwnConsoleRegion (Annex.output workerst) $
|
||||
beginjob workerst startmsg perform =
|
||||
inOwnConsoleRegion (Annex.output workerst) $ do
|
||||
enteringInitialStage
|
||||
void $ accountCommandAction startmsg $
|
||||
performconcurrent startmsg perform
|
||||
|
||||
|
|
|
@ -197,8 +197,7 @@ startWeb o urlstring = go $ fromMaybe bad $ parseURI urlstring
|
|||
pathmax <- liftIO $ fileNameLengthLimit "."
|
||||
urlinfo <- if relaxedOption (downloadOptions o)
|
||||
then pure Url.assumeUrlExists
|
||||
else Url.withUrlOptions $
|
||||
liftIO . Url.getUrlInfo urlstring
|
||||
else Url.withUrlOptions $ Url.getUrlInfo urlstring
|
||||
file <- adjustFile o <$> case fileOption (downloadOptions o) of
|
||||
Just f -> pure f
|
||||
Nothing -> case Url.urlSuggestedFile urlinfo of
|
||||
|
|
|
@ -26,7 +26,7 @@ cmd generator = command "benchmark" SectionTesting
|
|||
|
||||
data BenchmarkOptions
|
||||
= BenchmarkOptions CmdParams CriterionMode
|
||||
| BenchmarkDatabases CriterionMode
|
||||
| BenchmarkDatabases CriterionMode Integer
|
||||
|
||||
optParser :: CmdParamsDesc -> Parser BenchmarkOptions
|
||||
optParser desc = benchmarkoptions <|> benchmarkdatabases
|
||||
|
@ -36,10 +36,11 @@ optParser desc = benchmarkoptions <|> benchmarkdatabases
|
|||
<*> criterionopts
|
||||
benchmarkdatabases = BenchmarkDatabases
|
||||
<$> criterionopts
|
||||
<* flag' ()
|
||||
( long "databases"
|
||||
<*> option auto
|
||||
( long "databases"
|
||||
<> metavar paramNumber
|
||||
<> help "benchmark sqlite databases"
|
||||
)
|
||||
)
|
||||
#ifdef WITH_BENCHMARK
|
||||
criterionopts = parseWith defaultConfig
|
||||
#else
|
||||
|
@ -51,7 +52,7 @@ seek :: BenchmarkGenerator -> BenchmarkOptions -> CommandSeek
|
|||
seek generator (BenchmarkOptions ps mode) = do
|
||||
runner <- generator ps
|
||||
liftIO $ runMode mode [ bench (unwords ps) $ nfIO runner ]
|
||||
seek _ (BenchmarkDatabases mode) = benchmarkDbs mode
|
||||
seek _ (BenchmarkDatabases mode n) = benchmarkDbs mode n
|
||||
#else
|
||||
seek _ _ = giveup "git-annex is not built with benchmarking support"
|
||||
#endif
|
||||
|
|
|
@ -30,7 +30,6 @@ import Utility.InodeCache
|
|||
import Logs.Location
|
||||
import Git.FilePath
|
||||
import Git.Types
|
||||
import Git.Branch
|
||||
import Types.Import
|
||||
import Utility.Metered
|
||||
|
||||
|
@ -40,7 +39,7 @@ cmd :: Command
|
|||
cmd = notBareRepo $
|
||||
withGlobalOptions [jobsOption, jsonOptions, fileMatchingOptions] $
|
||||
command "import" SectionCommon
|
||||
"import files from elsewhere into the repository"
|
||||
"add a tree of files to the repository"
|
||||
(paramPaths ++ "|BRANCH[:SUBDIR]")
|
||||
(seek <$$> optParser)
|
||||
|
||||
|
@ -266,7 +265,8 @@ seekRemote remote branch msubdir = do
|
|||
Nothing -> giveup $ "Unable to find base tree for branch " ++ fromRef branch
|
||||
|
||||
trackingcommit <- fromtrackingbranch Git.Ref.sha
|
||||
let importcommitconfig = ImportCommitConfig trackingcommit AutomaticCommit importmessage
|
||||
cmode <- annexCommitMode <$> Annex.getGitConfig
|
||||
let importcommitconfig = ImportCommitConfig trackingcommit cmode importmessage
|
||||
let commitimport = commitRemote remote branch tb trackingcommit importtreeconfig importcommitconfig
|
||||
|
||||
importabletvar <- liftIO $ newTVarIO Nothing
|
||||
|
|
|
@ -146,13 +146,12 @@ findDownloads u f = catMaybes $ map mk (feedItems f)
|
|||
downloadFeed :: URLString -> Annex (Maybe String)
|
||||
downloadFeed url
|
||||
| Url.parseURIRelaxed url == Nothing = giveup "invalid feed url"
|
||||
| otherwise = Url.withUrlOptions $ \uo ->
|
||||
liftIO $ withTmpFile "feed" $ \f h -> do
|
||||
hClose h
|
||||
ifM (Url.download nullMeterUpdate url f uo)
|
||||
( Just <$> readFileStrict f
|
||||
, return Nothing
|
||||
)
|
||||
| otherwise = withTmpFile "feed" $ \f h -> do
|
||||
liftIO $ hClose h
|
||||
ifM (Url.withUrlOptions $ Url.download nullMeterUpdate url f)
|
||||
( Just <$> liftIO (readFileStrict f)
|
||||
, return Nothing
|
||||
)
|
||||
|
||||
performDownload :: ImportFeedOptions -> Cache -> ToDownload -> Annex Bool
|
||||
performDownload opts cache todownload = case location todownload of
|
||||
|
@ -164,7 +163,7 @@ performDownload opts cache todownload = case location todownload of
|
|||
urlinfo <- if relaxedOption (downloadOptions opts)
|
||||
then pure Url.assumeUrlExists
|
||||
else Url.withUrlOptions $
|
||||
liftIO . Url.getUrlInfo url
|
||||
Url.getUrlInfo url
|
||||
let dlopts = (downloadOptions opts)
|
||||
-- force using the filename
|
||||
-- chosen here
|
||||
|
|
|
@ -19,6 +19,7 @@ import Database.Init
|
|||
import Utility.Tmp.Dir
|
||||
import Git.FilePath
|
||||
import Types.Key
|
||||
import Utility.DataUnits
|
||||
|
||||
import Criterion.Main
|
||||
import Control.Monad.IO.Class (liftIO)
|
||||
|
@ -26,17 +27,12 @@ import qualified Data.ByteString.Char8 as B8
|
|||
import System.Random
|
||||
#endif
|
||||
|
||||
benchmarkDbs :: CriterionMode -> Annex ()
|
||||
benchmarkDbs :: CriterionMode -> Integer -> Annex ()
|
||||
#ifdef WITH_BENCHMARK
|
||||
benchmarkDbs mode = withTmpDirIn "." "benchmark" $ \tmpdir -> do
|
||||
-- benchmark different sizes of databases
|
||||
dbs <- mapM (benchDb tmpdir)
|
||||
[ 1000
|
||||
, 10000
|
||||
-- , 100000
|
||||
]
|
||||
benchmarkDbs mode n = withTmpDirIn "." "benchmark" $ \tmpdir -> do
|
||||
db <- benchDb tmpdir n
|
||||
liftIO $ runMode mode
|
||||
[ bgroup "keys database" $ flip concatMap dbs $ \db ->
|
||||
[ bgroup "keys database"
|
||||
[ getAssociatedFilesHitBench db
|
||||
, getAssociatedFilesMissBench db
|
||||
, getAssociatedKeyHitBench db
|
||||
|
@ -78,22 +74,22 @@ addAssociatedFileOldBench (BenchDb h num) = bench ("addAssociatedFile to " ++ sh
|
|||
addAssociatedFileNewBench :: BenchDb -> Benchmark
|
||||
addAssociatedFileNewBench (BenchDb h num) = bench ("addAssociatedFile to " ++ show num ++ " (new)") $ nfIO $ do
|
||||
n <- getStdRandom (randomR (1,num))
|
||||
SQL.addAssociatedFile (keyN n) (fileN (n+1)) (SQL.WriteHandle h)
|
||||
SQL.addAssociatedFile (keyN n) (fileN (num+n)) (SQL.WriteHandle h)
|
||||
H.flushDbQueue h
|
||||
|
||||
populateAssociatedFiles :: H.DbQueue -> Int -> IO ()
|
||||
populateAssociatedFiles :: H.DbQueue -> Integer -> IO ()
|
||||
populateAssociatedFiles h num = do
|
||||
forM_ [1..num] $ \n ->
|
||||
SQL.addAssociatedFile (keyN n) (fileN n) (SQL.WriteHandle h)
|
||||
H.flushDbQueue h
|
||||
|
||||
keyN :: Int -> Key
|
||||
keyN :: Integer -> Key
|
||||
keyN n = stubKey
|
||||
{ keyName = B8.pack $ "key" ++ show n
|
||||
, keyVariety = OtherKey "BENCH"
|
||||
}
|
||||
|
||||
fileN :: Int -> TopFilePath
|
||||
fileN :: Integer -> TopFilePath
|
||||
fileN n = asTopFilePath ("file" ++ show n)
|
||||
|
||||
keyMiss :: Key
|
||||
|
@ -102,14 +98,17 @@ keyMiss = keyN 0 -- 0 is never stored
|
|||
fileMiss :: TopFilePath
|
||||
fileMiss = fileN 0 -- 0 is never stored
|
||||
|
||||
data BenchDb = BenchDb H.DbQueue Int
|
||||
data BenchDb = BenchDb H.DbQueue Integer
|
||||
|
||||
benchDb :: FilePath -> Int -> Annex BenchDb
|
||||
benchDb :: FilePath -> Integer -> Annex BenchDb
|
||||
benchDb tmpdir num = do
|
||||
liftIO $ putStrLn $ "setting up database with " ++ show num
|
||||
liftIO $ putStrLn $ "setting up database with " ++ show num ++ " items"
|
||||
initDb db SQL.createTables
|
||||
h <- liftIO $ H.openDbQueue H.MultiWriter db SQL.containedTable
|
||||
liftIO $ populateAssociatedFiles h num
|
||||
sz <- liftIO $ getFileSize db
|
||||
liftIO $ putStrLn $ "size of database on disk: " ++
|
||||
roughSize storageUnits False sz
|
||||
return (BenchDb h num)
|
||||
where
|
||||
db = tmpdir </> show num </> "db"
|
||||
|
|
|
@ -94,6 +94,14 @@ store s repo = do
|
|||
, fullconfig = M.unionWith (++) c (fullconfig repo)
|
||||
}
|
||||
|
||||
{- Stores a single config setting in a Repo, returning the new version of
|
||||
- the Repo. Config settings can be updated incrementally. -}
|
||||
store' :: String -> String -> Repo -> Repo
|
||||
store' k v repo = repo
|
||||
{ config = M.singleton k v `M.union` config repo
|
||||
, fullconfig = M.unionWith (++) (M.singleton k [v]) (fullconfig repo)
|
||||
}
|
||||
|
||||
{- Updates the location of a repo, based on its configuration.
|
||||
-
|
||||
- Git.Construct makes LocalUknown repos, of which only a directory is
|
||||
|
|
|
@ -67,8 +67,12 @@ get = do
|
|||
configure (Just d) _ = do
|
||||
absd <- absPath d
|
||||
curr <- getCurrentDirectory
|
||||
Git.Config.read $ newFrom $
|
||||
r <- Git.Config.read $ newFrom $
|
||||
Local { gitdir = absd, worktree = Just curr }
|
||||
return $ if Git.Config.isBare r
|
||||
then r { location = (location r) { worktree = Nothing } }
|
||||
else r
|
||||
|
||||
configure Nothing Nothing = giveup "Not in a git repository."
|
||||
|
||||
addworktree w r = changelocation r $
|
||||
|
|
41
Git/Queue.hs
41
Git/Queue.hs
|
@ -1,6 +1,6 @@
|
|||
{- git repository command queue
|
||||
-
|
||||
- Copyright 2010-2018 Joey Hess <id@joeyh.name>
|
||||
- Copyright 2010-2019 Joey Hess <id@joeyh.name>
|
||||
-
|
||||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
@ -27,9 +27,10 @@ import Git.Command
|
|||
import qualified Git.UpdateIndex
|
||||
|
||||
import qualified Data.Map.Strict as M
|
||||
import Control.Monad.IO.Class
|
||||
|
||||
{- Queable actions that can be performed in a git repository. -}
|
||||
data Action
|
||||
data Action m
|
||||
{- Updating the index file, using a list of streamers that can
|
||||
- be added to as the queue grows. -}
|
||||
= UpdateIndexAction [Git.UpdateIndex.Streamer] -- in reverse order
|
||||
|
@ -43,21 +44,21 @@ data Action
|
|||
{- An internal action to run, on a list of files that can be added
|
||||
- to as the queue grows. -}
|
||||
| InternalAction
|
||||
{ getRunner :: InternalActionRunner
|
||||
{ getRunner :: InternalActionRunner m
|
||||
, getInternalFiles :: [(FilePath, IO Bool)]
|
||||
}
|
||||
|
||||
{- The String must be unique for each internal action. -}
|
||||
data InternalActionRunner = InternalActionRunner String (Repo -> [(FilePath, IO Bool)] -> IO ())
|
||||
data InternalActionRunner m = InternalActionRunner String (Repo -> [(FilePath, IO Bool)] -> m ())
|
||||
|
||||
instance Eq InternalActionRunner where
|
||||
instance Eq (InternalActionRunner m) where
|
||||
InternalActionRunner s1 _ == InternalActionRunner s2 _ = s1 == s2
|
||||
|
||||
{- A key that can uniquely represent an action in a Map. -}
|
||||
data ActionKey = UpdateIndexActionKey | CommandActionKey String | InternalActionKey String
|
||||
deriving (Eq, Ord)
|
||||
|
||||
actionKey :: Action -> ActionKey
|
||||
actionKey :: Action m -> ActionKey
|
||||
actionKey (UpdateIndexAction _) = UpdateIndexActionKey
|
||||
actionKey CommandAction { getSubcommand = s } = CommandActionKey s
|
||||
actionKey InternalAction { getRunner = InternalActionRunner s _ } = InternalActionKey s
|
||||
|
@ -65,10 +66,10 @@ actionKey InternalAction { getRunner = InternalActionRunner s _ } = InternalActi
|
|||
{- A queue of actions to perform (in any order) on a git repository,
|
||||
- with lists of files to perform them on. This allows coalescing
|
||||
- similar git commands. -}
|
||||
data Queue = Queue
|
||||
data Queue m = Queue
|
||||
{ size :: Int
|
||||
, _limit :: Int
|
||||
, items :: M.Map ActionKey Action
|
||||
, items :: M.Map ActionKey (Action m)
|
||||
}
|
||||
|
||||
{- A recommended maximum size for the queue, after which it should be
|
||||
|
@ -84,7 +85,7 @@ defaultLimit :: Int
|
|||
defaultLimit = 10240
|
||||
|
||||
{- Constructor for empty queue. -}
|
||||
new :: Maybe Int -> Queue
|
||||
new :: Maybe Int -> Queue m
|
||||
new lim = Queue 0 (fromMaybe defaultLimit lim) M.empty
|
||||
|
||||
{- Adds an git command to the queue.
|
||||
|
@ -93,7 +94,7 @@ new lim = Queue 0 (fromMaybe defaultLimit lim) M.empty
|
|||
- assumed to be equivilant enough to perform in any order with the same
|
||||
- result.
|
||||
-}
|
||||
addCommand :: String -> [CommandParam] -> [FilePath] -> Queue -> Repo -> IO Queue
|
||||
addCommand :: MonadIO m => String -> [CommandParam] -> [FilePath] -> Queue m -> Repo -> m (Queue m)
|
||||
addCommand subcommand params files q repo =
|
||||
updateQueue action different (length files) q repo
|
||||
where
|
||||
|
@ -107,7 +108,7 @@ addCommand subcommand params files q repo =
|
|||
different _ = True
|
||||
|
||||
{- Adds an internal action to the queue. -}
|
||||
addInternalAction :: InternalActionRunner -> [(FilePath, IO Bool)] -> Queue -> Repo -> IO Queue
|
||||
addInternalAction :: MonadIO m => InternalActionRunner m -> [(FilePath, IO Bool)] -> Queue m -> Repo -> m (Queue m)
|
||||
addInternalAction runner files q repo =
|
||||
updateQueue action different (length files) q repo
|
||||
where
|
||||
|
@ -120,7 +121,7 @@ addInternalAction runner files q repo =
|
|||
different _ = True
|
||||
|
||||
{- Adds an update-index streamer to the queue. -}
|
||||
addUpdateIndex :: Git.UpdateIndex.Streamer -> Queue -> Repo -> IO Queue
|
||||
addUpdateIndex :: MonadIO m => Git.UpdateIndex.Streamer -> Queue m -> Repo -> m (Queue m)
|
||||
addUpdateIndex streamer q repo =
|
||||
updateQueue action different 1 q repo
|
||||
where
|
||||
|
@ -133,7 +134,7 @@ addUpdateIndex streamer q repo =
|
|||
{- Updates or adds an action in the queue. If the queue already contains a
|
||||
- different action, it will be flushed; this is to ensure that conflicting
|
||||
- actions, like add and rm, are run in the right order.-}
|
||||
updateQueue :: Action -> (Action -> Bool) -> Int -> Queue -> Repo -> IO Queue
|
||||
updateQueue :: MonadIO m => Action m -> (Action m -> Bool) -> Int -> Queue m -> Repo -> m (Queue m)
|
||||
updateQueue !action different sizeincrease q repo
|
||||
| null (filter different (M.elems (items q))) = return $ go q
|
||||
| otherwise = go <$> flush q repo
|
||||
|
@ -150,7 +151,7 @@ updateQueue !action different sizeincrease q repo
|
|||
{- The new value comes first. It probably has a smaller list of files than
|
||||
- the old value. So, the list append of the new value first is more
|
||||
- efficient. -}
|
||||
combineNewOld :: Action -> Action -> Action
|
||||
combineNewOld :: Action m -> Action m -> Action m
|
||||
combineNewOld (CommandAction _sc1 _ps1 fs1) (CommandAction sc2 ps2 fs2) =
|
||||
CommandAction sc2 ps2 (fs1++fs2)
|
||||
combineNewOld (UpdateIndexAction s1) (UpdateIndexAction s2) =
|
||||
|
@ -162,18 +163,18 @@ combineNewOld anew _aold = anew
|
|||
{- Merges the contents of the second queue into the first.
|
||||
- This should only be used when the two queues are known to contain
|
||||
- non-conflicting actions. -}
|
||||
merge :: Queue -> Queue -> Queue
|
||||
merge :: Queue m -> Queue m -> Queue m
|
||||
merge origq newq = origq
|
||||
{ size = size origq + size newq
|
||||
, items = M.unionWith combineNewOld (items newq) (items origq)
|
||||
}
|
||||
|
||||
{- Is a queue large enough that it should be flushed? -}
|
||||
full :: Queue -> Bool
|
||||
full :: Queue m -> Bool
|
||||
full (Queue cur lim _) = cur >= lim
|
||||
|
||||
{- Runs a queue on a git repository. -}
|
||||
flush :: Queue -> Repo -> IO Queue
|
||||
flush :: MonadIO m => Queue m -> Repo -> m (Queue m)
|
||||
flush (Queue _ lim m) repo = do
|
||||
forM_ (M.elems m) $ runAction repo
|
||||
return $ Queue 0 lim M.empty
|
||||
|
@ -184,11 +185,11 @@ flush (Queue _ lim m) repo = do
|
|||
-
|
||||
- Intentionally runs the command even if the list of files is empty;
|
||||
- this allows queueing commands that do not need a list of files. -}
|
||||
runAction :: Repo -> Action -> IO ()
|
||||
runAction :: MonadIO m => Repo -> Action m -> m ()
|
||||
runAction repo (UpdateIndexAction streamers) =
|
||||
-- list is stored in reverse order
|
||||
Git.UpdateIndex.streamUpdateIndex repo $ reverse streamers
|
||||
runAction repo action@(CommandAction {}) = do
|
||||
liftIO $ Git.UpdateIndex.streamUpdateIndex repo $ reverse streamers
|
||||
runAction repo action@(CommandAction {}) = liftIO $ do
|
||||
#ifndef mingw32_HOST_OS
|
||||
let p = (proc "xargs" $ "-0":"git":toCommand gitparams) { env = gitEnv repo }
|
||||
withHandle StdinHandle createProcessSuccess p $ \h -> do
|
||||
|
|
|
@ -51,6 +51,7 @@ makeLegalName s = case filter legal $ replace "/" "_" s of
|
|||
legal c = isAlphaNum c
|
||||
|
||||
data RemoteLocation = RemoteUrl String | RemotePath FilePath
|
||||
deriving (Eq)
|
||||
|
||||
remoteLocationIsUrl :: RemoteLocation -> Bool
|
||||
remoteLocationIsUrl (RemoteUrl _) = True
|
||||
|
|
|
@ -206,7 +206,7 @@ downloadTorrentFile u = do
|
|||
withTmpFileIn othertmp "torrent" $ \f h -> do
|
||||
liftIO $ hClose h
|
||||
ok <- Url.withUrlOptions $
|
||||
liftIO . Url.download nullMeterUpdate u f
|
||||
Url.download nullMeterUpdate u f
|
||||
when ok $
|
||||
liftIO $ renameFile f torrent
|
||||
return ok
|
||||
|
|
|
@ -716,7 +716,7 @@ checkKeyUrl :: Git.Repo -> CheckPresent
|
|||
checkKeyUrl r k = do
|
||||
showChecking r
|
||||
us <- getWebUrls k
|
||||
anyM (\u -> withUrlOptions $ liftIO . checkBoth u (keySize k)) us
|
||||
anyM (\u -> withUrlOptions $ checkBoth u (keySize k)) us
|
||||
|
||||
getWebUrls :: Key -> Annex [URLString]
|
||||
getWebUrls key = filter supported <$> getUrls key
|
||||
|
|
|
@ -286,7 +286,7 @@ setupRepo gcryptid r
|
|||
{- Ask git-annex-shell to configure the repository as a gcrypt
|
||||
- repository. May fail if it is too old. -}
|
||||
gitannexshellsetup = Ssh.onRemote NoConsumeStdin r
|
||||
(boolSystem, return False)
|
||||
(\f p -> liftIO (boolSystem f p), return False)
|
||||
"gcryptsetup" [ Param gcryptid ] []
|
||||
|
||||
denyNonFastForwards = "receive.denyNonFastForwards"
|
||||
|
@ -451,7 +451,7 @@ getGCryptId fast r gc
|
|||
| Git.repoIsLocal r || Git.repoIsLocalUnknown r = extract <$>
|
||||
liftIO (catchMaybeIO $ Git.Config.read r)
|
||||
| not fast = extract . liftM fst <$> getM (eitherToMaybe <$>)
|
||||
[ Ssh.onRemote NoConsumeStdin r (Git.Config.fromPipe r, return (Left $ error "configlist failed")) "configlist" [] []
|
||||
[ Ssh.onRemote NoConsumeStdin r (\f p -> liftIO (Git.Config.fromPipe r f p), return (Left $ error "configlist failed")) "configlist" [] []
|
||||
, getConfigViaRsync r gc
|
||||
]
|
||||
| otherwise = return (Nothing, r)
|
||||
|
|
|
@ -143,7 +143,9 @@ configRead autoinit r = do
|
|||
(True, _, _)
|
||||
| remoteAnnexCheckUUID gc -> tryGitConfigRead autoinit r
|
||||
| otherwise -> return r
|
||||
(False, _, NoUUID) -> tryGitConfigRead autoinit r
|
||||
(False, _, NoUUID) -> configSpecialGitRemotes r >>= \case
|
||||
Nothing -> tryGitConfigRead autoinit r
|
||||
Just r' -> return r'
|
||||
_ -> return r
|
||||
|
||||
gen :: Git.Repo -> UUID -> RemoteConfig -> RemoteGitConfig -> RemoteStateHandle -> Annex (Maybe Remote)
|
||||
|
@ -231,7 +233,7 @@ repoAvail r
|
|||
tryGitConfigRead :: Bool -> Git.Repo -> Annex Git.Repo
|
||||
tryGitConfigRead autoinit r
|
||||
| haveconfig r = return r -- already read
|
||||
| Git.repoIsSsh r = store $ do
|
||||
| Git.repoIsSsh r = storeUpdatedRemote $ do
|
||||
v <- Ssh.onRemote NoConsumeStdin r
|
||||
(pipedconfig, return (Left $ giveup "configlist failed"))
|
||||
"configlist" [] configlistfields
|
||||
|
@ -240,30 +242,30 @@ tryGitConfigRead autoinit r
|
|||
| haveconfig r' -> return r'
|
||||
| otherwise -> configlist_failed
|
||||
Left _ -> configlist_failed
|
||||
| Git.repoIsHttp r = store geturlconfig
|
||||
| Git.repoIsHttp r = storeUpdatedRemote geturlconfig
|
||||
| Git.GCrypt.isEncrypted r = handlegcrypt =<< getConfigMaybe (remoteConfig r "uuid")
|
||||
| Git.repoIsUrl r = return r
|
||||
| otherwise = store $ liftIO $
|
||||
| otherwise = storeUpdatedRemote $ liftIO $
|
||||
readlocalannexconfig `catchNonAsync` (const $ return r)
|
||||
where
|
||||
haveconfig = not . M.null . Git.config
|
||||
|
||||
pipedconfig cmd params = do
|
||||
v <- Git.Config.fromPipe r cmd params
|
||||
v <- liftIO $ Git.Config.fromPipe r cmd params
|
||||
case v of
|
||||
Right (r', val) -> do
|
||||
unless (isUUIDConfigured r' || null val) $ do
|
||||
warningIO $ "Failed to get annex.uuid configuration of repository " ++ Git.repoDescribe r
|
||||
warningIO $ "Instead, got: " ++ show val
|
||||
warningIO $ "This is unexpected; please check the network transport!"
|
||||
warning $ "Failed to get annex.uuid configuration of repository " ++ Git.repoDescribe r
|
||||
warning $ "Instead, got: " ++ show val
|
||||
warning $ "This is unexpected; please check the network transport!"
|
||||
return $ Right r'
|
||||
Left l -> return $ Left l
|
||||
|
||||
geturlconfig = Url.withUrlOptions $ \uo -> do
|
||||
v <- liftIO $ withTmpFile "git-annex.tmp" $ \tmpfile h -> do
|
||||
hClose h
|
||||
v <- withTmpFile "git-annex.tmp" $ \tmpfile h -> do
|
||||
liftIO $ hClose h
|
||||
let url = Git.repoLocation r ++ "/config"
|
||||
ifM (Url.downloadQuiet nullMeterUpdate url tmpfile uo)
|
||||
ifM (liftIO $ Url.downloadQuiet nullMeterUpdate url tmpfile uo)
|
||||
( Just <$> pipedconfig "git" [Param "config", Param "--null", Param "--list", Param "--file", File tmpfile]
|
||||
, return Nothing
|
||||
)
|
||||
|
@ -278,18 +280,6 @@ tryGitConfigRead autoinit r
|
|||
set_ignore "not usable by git-annex" False
|
||||
return r
|
||||
|
||||
store = observe $ \r' -> do
|
||||
l <- Annex.getGitRemotes
|
||||
let rs = exchange l r'
|
||||
Annex.changeState $ \s -> s { Annex.gitremotes = Just rs }
|
||||
|
||||
exchange [] _ = []
|
||||
exchange (old:ls) new
|
||||
| Git.remoteName old == Git.remoteName new =
|
||||
new : exchange ls new
|
||||
| otherwise =
|
||||
old : exchange ls new
|
||||
|
||||
{- Is this remote just not available, or does
|
||||
- it not have git-annex-shell?
|
||||
- Find out by trying to fetch from the remote. -}
|
||||
|
@ -319,7 +309,7 @@ tryGitConfigRead autoinit r
|
|||
g <- gitRepo
|
||||
case Git.GCrypt.remoteRepoId g (Git.remoteName r) of
|
||||
Nothing -> return r
|
||||
Just v -> store $ liftIO $ setUUID r $
|
||||
Just v -> storeUpdatedRemote $ liftIO $ setUUID r $
|
||||
genUUIDInNameSpace gCryptNameSpace v
|
||||
|
||||
{- The local repo may not yet be initialized, so try to initialize
|
||||
|
@ -337,6 +327,31 @@ tryGitConfigRead autoinit r
|
|||
then [(Fields.autoInit, "1")]
|
||||
else []
|
||||
|
||||
{- Handles special remotes that can be enabled by the presence of
|
||||
- regular git remotes.
|
||||
-
|
||||
- When a remote repo is found to be such a special remote, its
|
||||
- UUID is cached in the git config, and the repo returned with
|
||||
- the UUID set.
|
||||
-}
|
||||
configSpecialGitRemotes :: Git.Repo -> Annex (Maybe Git.Repo)
|
||||
configSpecialGitRemotes r = Remote.GitLFS.configKnownUrl r >>= \case
|
||||
Nothing -> return Nothing
|
||||
Just r' -> Just <$> storeUpdatedRemote (return r')
|
||||
|
||||
storeUpdatedRemote :: Annex Git.Repo -> Annex Git.Repo
|
||||
storeUpdatedRemote = observe $ \r' -> do
|
||||
l <- Annex.getGitRemotes
|
||||
let rs = exchange l r'
|
||||
Annex.changeState $ \s -> s { Annex.gitremotes = Just rs }
|
||||
where
|
||||
exchange [] _ = []
|
||||
exchange (old:ls) new
|
||||
| Git.remoteName old == Git.remoteName new =
|
||||
new : exchange ls new
|
||||
| otherwise =
|
||||
old : exchange ls new
|
||||
|
||||
{- Checks if a given remote has the content for a key in its annex. -}
|
||||
inAnnex :: Remote -> State -> Key -> Annex Bool
|
||||
inAnnex rmt st key = do
|
||||
|
@ -352,11 +367,10 @@ inAnnex' repo rmt (State connpool duc _ _) key
|
|||
checkhttp = do
|
||||
showChecking repo
|
||||
gc <- Annex.getGitConfig
|
||||
ifM (Url.withUrlOptions $ \uo -> liftIO $
|
||||
anyM (\u -> Url.checkBoth u (keySize key) uo) (keyUrls gc repo rmt key))
|
||||
( return True
|
||||
, giveup "not found"
|
||||
)
|
||||
ifM (Url.withUrlOptions $ \uo -> anyM (\u -> Url.checkBoth u (keySize key) uo) (keyUrls gc repo rmt key))
|
||||
( return True
|
||||
, giveup "not found"
|
||||
)
|
||||
checkremote =
|
||||
let fallback = Ssh.inAnnex repo key
|
||||
in P2PHelper.checkpresent (Ssh.runProto rmt connpool (cantCheck rmt) fallback) key
|
||||
|
@ -498,8 +512,9 @@ copyFromRemote'' repo forcersync r st@(State connpool _ _ _) key file dest meter
|
|||
Just (object, checksuccess) -> do
|
||||
copier <- mkCopier hardlink st params
|
||||
runTransfer (Transfer Download u key)
|
||||
file stdRetry
|
||||
(\p -> copier object dest (combineMeterUpdate p meterupdate) checksuccess)
|
||||
file stdRetry $ \p ->
|
||||
metered (Just (combineMeterUpdate p meterupdate)) key $ \_ p' ->
|
||||
copier object dest p' checksuccess
|
||||
| Git.repoIsSsh repo = if forcersync
|
||||
then fallback meterupdate
|
||||
else P2PHelper.retrieve
|
||||
|
@ -632,15 +647,15 @@ copyToRemote' repo r st@(State connpool duc _ _) key file meterupdate
|
|||
-- run copy from perspective of remote
|
||||
onLocalFast repo r $ ifM (Annex.Content.inAnnex key)
|
||||
( return True
|
||||
, do
|
||||
, runTransfer (Transfer Download u key) file stdRetry $ \p -> do
|
||||
copier <- mkCopier hardlink st params
|
||||
let verify = Annex.Content.RemoteVerify r
|
||||
let rsp = RetrievalAllKeysSecure
|
||||
runTransfer (Transfer Download u key) file stdRetry $ \p ->
|
||||
let p' = combineMeterUpdate meterupdate p
|
||||
in Annex.Content.saveState True `after`
|
||||
Annex.Content.getViaTmp rsp verify key
|
||||
(\dest -> copier object dest p' (liftIO checksuccessio))
|
||||
res <- Annex.Content.getViaTmp rsp verify key $ \dest ->
|
||||
metered (Just (combineMeterUpdate meterupdate p)) key $ \_ p' ->
|
||||
copier object dest p' (liftIO checksuccessio)
|
||||
Annex.Content.saveState True
|
||||
return res
|
||||
)
|
||||
copyremotefallback p = Annex.Content.sendAnnex key noop $ \object -> do
|
||||
-- This is too broad really, but recvkey normally
|
||||
|
@ -750,7 +765,7 @@ rsyncOrCopyFile st rsyncparams src dest p =
|
|||
dorsync = do
|
||||
-- dest may already exist, so make sure rsync can write to it
|
||||
void $ liftIO $ tryIO $ allowWrite dest
|
||||
oh <- mkOutputHandler
|
||||
oh <- mkOutputHandlerQuiet
|
||||
Ssh.rsyncHelper oh (Just p) $
|
||||
rsyncparams ++ [File src, File dest]
|
||||
docopycow = docopywith copyCoW
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Remote.GitLFS (remote, gen) where
|
||||
module Remote.GitLFS (remote, gen, configKnownUrl) where
|
||||
|
||||
import Annex.Common
|
||||
import Types.Remote
|
||||
|
@ -13,9 +13,11 @@ import Annex.Url
|
|||
import Types.Key
|
||||
import Types.Creds
|
||||
import qualified Annex
|
||||
import qualified Annex.SpecialRemote.Config
|
||||
import qualified Git
|
||||
import qualified Git.Types as Git
|
||||
import qualified Git.Url
|
||||
import qualified Git.Remote
|
||||
import qualified Git.GCrypt
|
||||
import qualified Git.Credential as Git
|
||||
import Config
|
||||
|
@ -31,8 +33,10 @@ import Crypto
|
|||
import Backend.Hash
|
||||
import Utility.Hash
|
||||
import Utility.SshHost
|
||||
import Logs.Remote
|
||||
import Logs.RemoteState
|
||||
import qualified Utility.GitLFS as LFS
|
||||
import qualified Git.Config
|
||||
|
||||
import Control.Concurrent.STM
|
||||
import Data.String
|
||||
|
@ -145,21 +149,46 @@ mySetup _ mu _ c gc = do
|
|||
, "likely insecure configuration.)"
|
||||
]
|
||||
|
||||
-- The url is not stored in the remote log, because the same
|
||||
-- git-lfs repo can be accessed using different urls by different
|
||||
-- people (eg over ssh or http).
|
||||
--
|
||||
-- Instead, set up remote.name.url to point to the repo,
|
||||
-- Set up remote.name.url to point to the repo,
|
||||
-- (so it's also usable by git as a non-special remote),
|
||||
-- and set remote.name.git-lfs = true
|
||||
let c'' = M.delete "url" c'
|
||||
gitConfigSpecialRemote u c'' [("git-lfs", "true")]
|
||||
-- and set remote.name.annex-git-lfs = true
|
||||
gitConfigSpecialRemote u c' [("git-lfs", "true")]
|
||||
setConfig (ConfigKey ("remote." ++ getRemoteName c ++ ".url")) url
|
||||
return (c'', u)
|
||||
return (c', u)
|
||||
where
|
||||
url = fromMaybe (giveup "Specify url=") (M.lookup "url" c)
|
||||
remotename = fromJust (lookupName c)
|
||||
|
||||
{- Check if a remote's url is one known to belong to a git-lfs repository.
|
||||
- If so, set the necessary configuration to enable using the remote
|
||||
- with git-lfs. -}
|
||||
configKnownUrl :: Git.Repo -> Annex (Maybe Git.Repo)
|
||||
configKnownUrl r
|
||||
| Git.repoIsUrl r = do
|
||||
l <- readRemoteLog
|
||||
g <- Annex.gitRepo
|
||||
case Annex.SpecialRemote.Config.findByRemoteConfig (match g) l of
|
||||
((u, _, mcu):[]) -> Just <$> go u mcu
|
||||
_ -> return Nothing
|
||||
| otherwise = return Nothing
|
||||
where
|
||||
match g c = fromMaybe False $ do
|
||||
t <- M.lookup Annex.SpecialRemote.Config.typeField c
|
||||
u <- M.lookup "url" c
|
||||
let u' = Git.Remote.parseRemoteLocation u g
|
||||
return $ Git.Remote.RemoteUrl (Git.repoLocation r) == u'
|
||||
&& t == typename remote
|
||||
go u mcu = do
|
||||
r' <- set "uuid" (fromUUID u) =<< set "git-lfs" "true" r
|
||||
case mcu of
|
||||
Just (Annex.SpecialRemote.Config.ConfigFrom cu) ->
|
||||
set "config-uuid" (fromUUID cu) r'
|
||||
Nothing -> return r'
|
||||
set k v r' = do
|
||||
let ck@(ConfigKey k') = remoteConfig r' k
|
||||
setConfig ck v
|
||||
return $ Git.Config.store' k' v r'
|
||||
|
||||
data LFSHandle = LFSHandle
|
||||
{ downloadEndpoint :: Maybe LFS.Endpoint
|
||||
, uploadEndpoint :: Maybe LFS.Endpoint
|
||||
|
|
|
@ -83,7 +83,7 @@ git_annex_shell cs r command params fields
|
|||
onRemote
|
||||
:: ConsumeStdin
|
||||
-> Git.Repo
|
||||
-> (FilePath -> [CommandParam] -> IO a, Annex a)
|
||||
-> (FilePath -> [CommandParam] -> Annex a, Annex a)
|
||||
-> String
|
||||
-> [CommandParam]
|
||||
-> [(Field, String)]
|
||||
|
@ -91,7 +91,7 @@ onRemote
|
|||
onRemote cs r (with, errorval) command params fields = do
|
||||
s <- git_annex_shell cs r command params fields
|
||||
case s of
|
||||
Just (c, ps) -> liftIO $ with c ps
|
||||
Just (c, ps) -> with c ps
|
||||
Nothing -> errorval
|
||||
|
||||
{- Checks if a remote contains a key. -}
|
||||
|
@ -100,14 +100,14 @@ inAnnex r k = do
|
|||
showChecking r
|
||||
onRemote NoConsumeStdin r (runcheck, cantCheck r) "inannex" [Param $ serializeKey k] []
|
||||
where
|
||||
runcheck c p = dispatch =<< safeSystem c p
|
||||
runcheck c p = liftIO $ dispatch =<< safeSystem c p
|
||||
dispatch ExitSuccess = return True
|
||||
dispatch (ExitFailure 1) = return False
|
||||
dispatch _ = cantCheck r
|
||||
|
||||
{- Removes a key from a remote. -}
|
||||
dropKey :: Git.Repo -> Key -> Annex Bool
|
||||
dropKey r key = onRemote NoConsumeStdin r (boolSystem, return False) "dropkey"
|
||||
dropKey r key = onRemote NoConsumeStdin r (\f p -> liftIO (boolSystem f p), return False) "dropkey"
|
||||
[ Param "--quiet", Param "--force"
|
||||
, Param $ serializeKey key
|
||||
]
|
||||
|
|
|
@ -128,4 +128,8 @@ updateRemote remote = do
|
|||
{- Checks if a remote is syncable using git. -}
|
||||
gitSyncableRemote :: Remote -> Bool
|
||||
gitSyncableRemote r = remotetype r `elem`
|
||||
[ Remote.Git.remote, Remote.GCrypt.remote, Remote.P2P.remote ]
|
||||
[ Remote.Git.remote
|
||||
, Remote.GCrypt.remote
|
||||
, Remote.P2P.remote
|
||||
, Remote.GitLFS.remote
|
||||
]
|
||||
|
|
13
Remote/S3.hs
13
Remote/S3.hs
|
@ -58,11 +58,10 @@ import Logs.Web
|
|||
import Logs.MetaData
|
||||
import Types.MetaData
|
||||
import Utility.Metered
|
||||
import qualified Annex.Url as Url
|
||||
import Utility.DataUnits
|
||||
import Annex.Content
|
||||
import Annex.Url (getUrlOptions, withUrlOptions)
|
||||
import Utility.Url (checkBoth, UrlOptions(..))
|
||||
import qualified Annex.Url as Url
|
||||
import Annex.Url (getUrlOptions, withUrlOptions, UrlOptions(..))
|
||||
import Utility.Env
|
||||
|
||||
type BucketName = String
|
||||
|
@ -348,7 +347,7 @@ checkKey hv r rs c info k = withS3Handle hv $ \case
|
|||
Right us -> do
|
||||
showChecking r
|
||||
let check u = withUrlOptions $
|
||||
liftIO . checkBoth u (keySize k)
|
||||
Url.checkBoth u (keySize k)
|
||||
anyM check us
|
||||
|
||||
checkKeyHelper :: S3Info -> S3Handle -> (Either S3.Object S3VersionID) -> Annex Bool
|
||||
|
@ -397,7 +396,7 @@ retrieveExportS3 hv r info _k loc f p =
|
|||
warning $ needS3Creds (uuid r)
|
||||
return False
|
||||
Just geturl -> Url.withUrlOptions $
|
||||
liftIO . Url.download p (geturl exportloc) f
|
||||
Url.download p (geturl exportloc) f
|
||||
exportloc = bucketExportLocation info loc
|
||||
|
||||
removeExportS3 :: S3HandleVar -> Remote -> RemoteStateHandle -> S3Info -> Key -> ExportLocation -> Annex Bool
|
||||
|
@ -417,8 +416,8 @@ checkPresentExportS3 :: S3HandleVar -> Remote -> S3Info -> Key -> ExportLocation
|
|||
checkPresentExportS3 hv r info k loc = withS3Handle hv $ \case
|
||||
Just h -> checkKeyHelper info h (Left (T.pack $ bucketExportLocation info loc))
|
||||
Nothing -> case getPublicUrlMaker info of
|
||||
Just geturl -> withUrlOptions $ liftIO .
|
||||
checkBoth (geturl $ bucketExportLocation info loc) (keySize k)
|
||||
Just geturl -> withUrlOptions $
|
||||
Url.checkBoth (geturl $ bucketExportLocation info loc) (keySize k)
|
||||
Nothing -> do
|
||||
warning $ needS3Creds (uuid r)
|
||||
giveup "No S3 credentials configured"
|
||||
|
|
|
@ -116,9 +116,8 @@ checkKey' key us = firsthit us (Right False) $ \u -> do
|
|||
showChecking u'
|
||||
case downloader of
|
||||
YoutubeDownloader -> youtubeDlCheck u'
|
||||
_ -> do
|
||||
Url.withUrlOptions $ liftIO . catchMsgIO .
|
||||
Url.checkBoth u' (keySize key)
|
||||
_ -> catchMsgIO $
|
||||
Url.withUrlOptions $ Url.checkBoth u' (keySize key)
|
||||
where
|
||||
firsthit [] miss _ = return miss
|
||||
firsthit (u:rest) _ a = do
|
||||
|
|
|
@ -170,5 +170,5 @@ updateTransportHandle :: TransportHandle -> IO TransportHandle
|
|||
updateTransportHandle h@(TransportHandle _g annexstate) = do
|
||||
g' <- liftAnnex h $ do
|
||||
reloadConfig
|
||||
Annex.fromRepo id
|
||||
Annex.gitRepo
|
||||
return (TransportHandle (LocalRepo g') annexstate)
|
||||
|
|
|
@ -69,7 +69,7 @@ server ichan th@(TransportHandle (LocalRepo r) _) = go
|
|||
)
|
||||
unless ok $ do
|
||||
hClose conn
|
||||
warningIO "dropped Tor connection, too busy"
|
||||
liftAnnex th $ warning "dropped Tor connection, too busy"
|
||||
|
||||
handlecontrol servicerunning = do
|
||||
msg <- atomically $ readTChan ichan
|
||||
|
|
27
Test.hs
27
Test.hs
|
@ -84,6 +84,7 @@ import qualified Utility.Base64
|
|||
import qualified Utility.Tmp.Dir
|
||||
import qualified Utility.FileSystemEncoding
|
||||
import qualified Utility.Aeson
|
||||
import qualified Utility.CopyFile
|
||||
#ifndef mingw32_HOST_OS
|
||||
import qualified Remote.Helper.Encryptable
|
||||
import qualified Types.Crypto
|
||||
|
@ -248,6 +249,7 @@ unitTests note = testGroup ("Unit Tests " ++ note)
|
|||
, testCase "info" test_info
|
||||
, testCase "version" test_version
|
||||
, testCase "sync" test_sync
|
||||
, testCase "concurrent get of dup key regression" test_concurrent_get_of_dup_key_regression
|
||||
, testCase "union merge regression" test_union_merge_regression
|
||||
, testCase "adjusted branch merge regression" test_adjusted_branch_merge_regression
|
||||
, testCase "adjusted branch subtree regression" test_adjusted_branch_subtree_regression
|
||||
|
@ -951,6 +953,31 @@ test_sync = intmpclonerepo $ do
|
|||
git_annex "sync" ["--content"] @? "sync failed"
|
||||
git_annex_expectoutput "find" ["--in", "."] []
|
||||
|
||||
{- Regression test for the concurrency bug fixed in
|
||||
- 667d38a8f11c1ee8f256cdbd80e225c2bae06595 -}
|
||||
test_concurrent_get_of_dup_key_regression :: Assertion
|
||||
test_concurrent_get_of_dup_key_regression = intmpclonerepo $ do
|
||||
makedup dupfile
|
||||
-- This was sufficient currency to trigger the bug.
|
||||
git_annex "get" ["-J1", annexedfile, dupfile]
|
||||
@? "concurrent get -J1 with dup failed"
|
||||
git_annex "drop" ["-J1"]
|
||||
@? "drop with dup failed"
|
||||
-- With -J2, one more dup file was needed to trigger the bug.
|
||||
makedup dupfile2
|
||||
git_annex "get" ["-J2", annexedfile, dupfile, dupfile2]
|
||||
@? "concurrent get -J2 with dup failed"
|
||||
git_annex "drop" ["-J2"]
|
||||
@? "drop with dup failed"
|
||||
where
|
||||
dupfile = annexedfile ++ "2"
|
||||
dupfile2 = annexedfile ++ "3"
|
||||
makedup f = do
|
||||
Utility.CopyFile.copyFileExternal Utility.CopyFile.CopyAllMetaData annexedfile f
|
||||
@? "copying annexed file failed"
|
||||
boolSystem "git" [Param "add", File f]
|
||||
@? "git add failed"
|
||||
|
||||
{- Regression test for union merge bug fixed in
|
||||
- 0214e0fb175a608a49b812d81b4632c081f63027 -}
|
||||
test_union_merge_regression :: Assertion
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{- git-annex configuration
|
||||
-
|
||||
- Copyright 2012-2015 Joey Hess <id@joeyh.name>
|
||||
- Copyright 2012-2019 Joey Hess <id@joeyh.name>
|
||||
-
|
||||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
@ -21,6 +21,7 @@ import qualified Git.Config
|
|||
import qualified Git.Construct
|
||||
import Git.Types
|
||||
import Git.ConfigTypes
|
||||
import Git.Branch (CommitMode(..))
|
||||
import Utility.DataUnits
|
||||
import Config.Cost
|
||||
import Types.UUID
|
||||
|
@ -105,6 +106,7 @@ data GitConfig = GitConfig
|
|||
, annexJobs :: Concurrency
|
||||
, annexCacheCreds :: Bool
|
||||
, annexAutoUpgradeRepository :: Bool
|
||||
, annexCommitMode :: CommitMode
|
||||
, coreSymlinks :: Bool
|
||||
, coreSharedRepository :: SharedRepository
|
||||
, receiveDenyCurrentBranch :: DenyCurrentBranch
|
||||
|
@ -186,6 +188,9 @@ extractGitConfig r = GitConfig
|
|||
parseConcurrency =<< getmaybe (annex "jobs")
|
||||
, annexCacheCreds = getbool (annex "cachecreds") True
|
||||
, annexAutoUpgradeRepository = getbool (annex "autoupgraderepository") True
|
||||
, annexCommitMode = if getbool (annex "allowsign") False
|
||||
then ManualCommit
|
||||
else AutomaticCommit
|
||||
, coreSymlinks = getbool "core.symlinks" True
|
||||
, coreSharedRepository = getSharedRepository r
|
||||
, receiveDenyCurrentBranch = getDenyCurrentBranch r
|
||||
|
|
|
@ -20,7 +20,14 @@ data WorkerPool t = WorkerPool
|
|||
-- but there can temporarily be fewer values, when a thread is
|
||||
-- changing between stages.
|
||||
}
|
||||
deriving (Show)
|
||||
|
||||
instance Show (WorkerPool t) where
|
||||
show p = unwords
|
||||
[ "WorkerPool"
|
||||
, show (usedStages p)
|
||||
, show (workerList p)
|
||||
, show (length (spareVals p))
|
||||
]
|
||||
|
||||
-- | A worker can either be idle or running an Async action.
|
||||
-- And it is used for some stage.
|
||||
|
@ -33,7 +40,12 @@ instance Show (Worker t) where
|
|||
show (ActiveWorker _ s) = "ActiveWorker " ++ show s
|
||||
|
||||
data WorkerStage
|
||||
= PerformStage
|
||||
= StartStage
|
||||
-- ^ All threads start in this stage, and then transition away from
|
||||
-- it to the initialStage when they begin doing work. This should
|
||||
-- never be included in UsedStages, because transition from some
|
||||
-- other stage back to this one could result in a deadlock.
|
||||
| PerformStage
|
||||
-- ^ Running a CommandPerform action.
|
||||
| CleanupStage
|
||||
-- ^ Running a CommandCleanup action.
|
||||
|
@ -95,12 +107,13 @@ workerAsync (ActiveWorker aid _) = Just aid
|
|||
allocateWorkerPool :: t -> Int -> UsedStages -> WorkerPool t
|
||||
allocateWorkerPool t n u = WorkerPool
|
||||
{ usedStages = u
|
||||
, workerList = take totalthreads $ map IdleWorker stages
|
||||
, workerList = map IdleWorker $
|
||||
take totalthreads $ concat $ repeat stages
|
||||
, spareVals = replicate totalthreads t
|
||||
}
|
||||
where
|
||||
stages = concat $ repeat $ S.toList $ stageSet u
|
||||
totalthreads = n * S.size (stageSet u)
|
||||
stages = StartStage : S.toList (stageSet u)
|
||||
totalthreads = n * length stages
|
||||
|
||||
addWorkerPool :: Worker t -> WorkerPool t -> WorkerPool t
|
||||
addWorkerPool w pool = pool { workerList = w : workerList pool }
|
||||
|
|
|
@ -7,7 +7,9 @@
|
|||
- License: BSD-2-clause
|
||||
-}
|
||||
|
||||
module Utility.Android where
|
||||
module Utility.Android (
|
||||
osAndroid
|
||||
) where
|
||||
|
||||
#ifdef linux_HOST_OS
|
||||
import Common
|
||||
|
|
|
@ -5,7 +5,9 @@
|
|||
- License: BSD-2-clause
|
||||
-}
|
||||
|
||||
module Utility.Applicative where
|
||||
module Utility.Applicative (
|
||||
(<$$>),
|
||||
) where
|
||||
|
||||
{- Like <$> , but supports one level of currying.
|
||||
-
|
||||
|
|
|
@ -7,7 +7,14 @@
|
|||
|
||||
{-# LANGUAGE CPP #-}
|
||||
|
||||
module Utility.Batch where
|
||||
module Utility.Batch (
|
||||
batch,
|
||||
BatchCommandMaker,
|
||||
getBatchCommandMaker,
|
||||
toBatchCommand,
|
||||
batchCommand,
|
||||
batchCommandEnv,
|
||||
) where
|
||||
|
||||
import Common
|
||||
|
||||
|
|
|
@ -7,7 +7,13 @@
|
|||
|
||||
{-# LANGUAGE OverloadedStrings, ScopedTypeVariables #-}
|
||||
|
||||
module Utility.DBus where
|
||||
module Utility.DBus (
|
||||
ServiceName,
|
||||
listServiceNames,
|
||||
callDBus,
|
||||
runClient,
|
||||
persistentClient,
|
||||
) where
|
||||
|
||||
import Utility.PartialPrelude
|
||||
import Utility.Exception
|
||||
|
|
|
@ -7,7 +7,12 @@
|
|||
|
||||
{-# LANGUAGE CPP #-}
|
||||
|
||||
module Utility.Daemon where
|
||||
module Utility.Daemon (
|
||||
daemonize,
|
||||
foreground,
|
||||
checkDaemon,
|
||||
stopDaemon,
|
||||
) where
|
||||
|
||||
import Common
|
||||
import Utility.PID
|
||||
|
|
|
@ -7,7 +7,10 @@
|
|||
|
||||
{-# OPTIONS_GHC -fno-warn-tabs #-}
|
||||
|
||||
module Utility.Data where
|
||||
module Utility.Data (
|
||||
firstJust,
|
||||
eitherToMaybe,
|
||||
) where
|
||||
|
||||
{- First item in the list that is not Nothing. -}
|
||||
firstJust :: Eq a => [Maybe a] -> Maybe a
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
{-# LANGUAGE CPP #-}
|
||||
{-# LANGUAGE ScopedTypeVariables #-}
|
||||
|
||||
module Utility.DebugLocks where
|
||||
module Utility.DebugLocks (debugLocks) where
|
||||
|
||||
import Control.Monad.Catch
|
||||
import Control.Monad.IO.Class
|
||||
|
|
|
@ -11,7 +11,15 @@
|
|||
|
||||
{-# LANGUAGE CPP #-}
|
||||
|
||||
module Utility.DirWatcher where
|
||||
module Utility.DirWatcher (
|
||||
canWatch,
|
||||
eventsCoalesce,
|
||||
closingTracked,
|
||||
modifyTracked,
|
||||
DirWatcherHandle,
|
||||
watchDir,
|
||||
stopWatchDir,
|
||||
) where
|
||||
|
||||
import Utility.DirWatcher.Types
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
- License: BSD-2-clause
|
||||
-}
|
||||
|
||||
module Utility.DirWatcher.FSEvents where
|
||||
module Utility.DirWatcher.FSEvents (watchDir) where
|
||||
|
||||
import Common hiding (isDirectory)
|
||||
import Utility.DirWatcher.Types
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
- License: BSD-2-clause
|
||||
-}
|
||||
|
||||
module Utility.DirWatcher.INotify where
|
||||
module Utility.DirWatcher.INotify (watchDir) where
|
||||
|
||||
import Common hiding (isDirectory)
|
||||
import Utility.ThreadLock
|
||||
|
|
|
@ -5,7 +5,11 @@
|
|||
- License: BSD-2-clause
|
||||
-}
|
||||
|
||||
module Utility.DirWatcher.Types where
|
||||
module Utility.DirWatcher.Types (
|
||||
Hook,
|
||||
WatchHooks(..),
|
||||
mkWatchHooks,
|
||||
) where
|
||||
|
||||
import Common
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
- License: BSD-2-clause
|
||||
-}
|
||||
|
||||
module Utility.DirWatcher.Win32Notify where
|
||||
module Utility.DirWatcher.Win32Notify (watchDir) where
|
||||
|
||||
import Common hiding (isDirectory)
|
||||
import Utility.DirWatcher.Types
|
||||
|
|
|
@ -9,11 +9,16 @@
|
|||
{-# LANGUAGE LambdaCase #-}
|
||||
{-# OPTIONS_GHC -fno-warn-tabs #-}
|
||||
|
||||
module Utility.Directory.Stream where
|
||||
module Utility.Directory.Stream (
|
||||
DirectoryHandle,
|
||||
openDirectory,
|
||||
closeDirectory,
|
||||
readDirectory,
|
||||
isDirectoryEmpty,
|
||||
) where
|
||||
|
||||
import Control.Monad
|
||||
import System.FilePath
|
||||
import System.IO.Unsafe (unsafeInterleaveIO)
|
||||
import Control.Concurrent
|
||||
import Data.Maybe
|
||||
import Prelude
|
||||
|
@ -100,22 +105,6 @@ readDirectory hdl@(DirectoryHandle _ h fdat mv) = do
|
|||
return (Just filename)
|
||||
#endif
|
||||
|
||||
-- | Like getDirectoryContents, but rather than buffering the whole
|
||||
-- directory content in memory, lazily streams.
|
||||
--
|
||||
-- This is like lazy readFile in that the handle to the directory remains
|
||||
-- open until the whole list is consumed, or until the list is garbage
|
||||
-- collected. So use with caution particularly when traversing directory
|
||||
-- trees.
|
||||
streamDirectoryContents :: FilePath -> IO [FilePath]
|
||||
streamDirectoryContents d = openDirectory d >>= collect
|
||||
where
|
||||
collect hdl = readDirectory hdl >>= \case
|
||||
Nothing -> return []
|
||||
Just f -> do
|
||||
rest <- unsafeInterleaveIO (collect hdl)
|
||||
return (f:rest)
|
||||
|
||||
-- | True only when directory exists and contains nothing.
|
||||
-- Throws exception if directory does not exist.
|
||||
isDirectoryEmpty :: FilePath -> IO Bool
|
||||
|
|
|
@ -1,11 +1,23 @@
|
|||
{- a simple graphviz / dot(1) digraph description generator library
|
||||
-
|
||||
- import qualified
|
||||
-
|
||||
- Copyright 2010 Joey Hess <id@joeyh.name>
|
||||
-
|
||||
- License: BSD-2-clause
|
||||
-}
|
||||
|
||||
module Utility.Dot where -- import qualified
|
||||
module Utility.Dot (
|
||||
graph,
|
||||
graphNode,
|
||||
graphEdge,
|
||||
label,
|
||||
attr,
|
||||
fillColor,
|
||||
subGraph,
|
||||
indent,
|
||||
quote,
|
||||
) where
|
||||
|
||||
{- generates a graph description from a list of lines -}
|
||||
graph :: [String] -> String
|
||||
|
|
|
@ -7,7 +7,11 @@
|
|||
|
||||
{-# OPTIONS_GHC -fno-warn-tabs #-}
|
||||
|
||||
module Utility.DottedVersion where
|
||||
module Utility.DottedVersion (
|
||||
DottedVersion,
|
||||
fromDottedVersion,
|
||||
normalize,
|
||||
) where
|
||||
|
||||
import Common
|
||||
|
||||
|
|
|
@ -8,7 +8,14 @@
|
|||
{-# LANGUAGE CPP #-}
|
||||
{-# OPTIONS_GHC -fno-warn-tabs #-}
|
||||
|
||||
module Utility.Env where
|
||||
module Utility.Env (
|
||||
getEnv,
|
||||
getEnvDefault,
|
||||
getEnvironment,
|
||||
addEntry,
|
||||
addEntries,
|
||||
delEntry,
|
||||
) where
|
||||
|
||||
#ifdef mingw32_HOST_OS
|
||||
import Utility.Exception
|
||||
|
|
|
@ -7,7 +7,10 @@
|
|||
|
||||
{-# OPTIONS_GHC -fno-warn-tabs #-}
|
||||
|
||||
module Utility.Env.Basic where
|
||||
module Utility.Env.Basic (
|
||||
getEnv,
|
||||
getEnvDefault,
|
||||
) where
|
||||
|
||||
import Utility.Exception
|
||||
import Control.Applicative
|
||||
|
|
|
@ -7,7 +7,10 @@
|
|||
|
||||
{-# LANGUAGE CPP #-}
|
||||
|
||||
module Utility.Env.Set where
|
||||
module Utility.Env.Set (
|
||||
setEnv,
|
||||
unsetEnv,
|
||||
) where
|
||||
|
||||
#ifdef mingw32_HOST_OS
|
||||
import qualified System.SetEnv
|
||||
|
|
|
@ -5,7 +5,11 @@
|
|||
|
||||
{-# LANGUAGE CPP #-}
|
||||
|
||||
module Utility.FileSize where
|
||||
module Utility.FileSize (
|
||||
FileSize,
|
||||
getFileSize,
|
||||
getFileSize',
|
||||
) where
|
||||
|
||||
import System.PosixCompat.Files
|
||||
#ifdef mingw32_HOST_OS
|
||||
|
|
|
@ -7,7 +7,32 @@
|
|||
|
||||
{-# LANGUAGE CPP #-}
|
||||
|
||||
module Utility.Gpg where
|
||||
module Utility.Gpg (
|
||||
KeyId,
|
||||
KeyIds(..),
|
||||
GpgCmd(..),
|
||||
mkGpgCmd,
|
||||
boolGpgCmd,
|
||||
pkEncTo,
|
||||
stdEncryptionParams,
|
||||
pipeStrict,
|
||||
feedRead,
|
||||
pipeLazy,
|
||||
findPubKeys,
|
||||
UserId,
|
||||
secretKeys,
|
||||
KeyType(..),
|
||||
maxRecommendedKeySize,
|
||||
genSecretKey,
|
||||
genRandom,
|
||||
testKeyId,
|
||||
#ifndef mingw32_HOST_OS
|
||||
testHarness,
|
||||
testTestHarness,
|
||||
checkEncryptionFile,
|
||||
checkEncryptionStream,
|
||||
#endif
|
||||
) where
|
||||
|
||||
import Common
|
||||
import qualified BuildInfo
|
||||
|
@ -279,6 +304,7 @@ genRandom cmd highQuality size = checksize <$> readStrict cmd params
|
|||
- It has an empty passphrase. -}
|
||||
testKeyId :: String
|
||||
testKeyId = "129D6E0AC537B9C7"
|
||||
|
||||
testKey :: String
|
||||
testKey = keyBlock True
|
||||
[ "mI0ETvFAZgEEAKnqwWgZqznMhi1RQExem2H8t3OyKDxaNN3rBN8T6LWGGqAYV4wT"
|
||||
|
@ -299,6 +325,7 @@ testKey = keyBlock True
|
|||
, "+gQkDF9/"
|
||||
, "=1k11"
|
||||
]
|
||||
|
||||
testSecretKey :: String
|
||||
testSecretKey = keyBlock False
|
||||
[ "lQHYBE7xQGYBBACp6sFoGas5zIYtUUBMXpth/Ldzsig8WjTd6wTfE+i1hhqgGFeM"
|
||||
|
@ -332,6 +359,7 @@ testSecretKey = keyBlock False
|
|||
, "IJf+/dFjxEmflWpbxw/36pEd/EReLX8b8qDIYadK6BpiWN9xgEiBv/oEJAxffw=="
|
||||
, "=LDsg"
|
||||
]
|
||||
|
||||
keyBlock :: Bool -> [String] -> String
|
||||
keyBlock public ls = unlines
|
||||
[ "-----BEGIN PGP "++t++" KEY BLOCK-----"
|
||||
|
@ -381,9 +409,7 @@ testTestHarness :: FilePath -> GpgCmd -> IO Bool
|
|||
testTestHarness tmpdir cmd = do
|
||||
keys <- testHarness tmpdir cmd $ findPubKeys cmd testKeyId
|
||||
return $ KeyIds [testKeyId] == keys
|
||||
#endif
|
||||
|
||||
#ifndef mingw32_HOST_OS
|
||||
checkEncryptionFile :: GpgCmd -> FilePath -> Maybe KeyIds -> IO Bool
|
||||
checkEncryptionFile cmd filename keys =
|
||||
checkGpgPackets cmd keys =<< readStrict cmd params
|
||||
|
|
|
@ -5,7 +5,11 @@
|
|||
- License: BSD-2-clause
|
||||
-}
|
||||
|
||||
module Utility.HtmlDetect where
|
||||
module Utility.HtmlDetect (
|
||||
isHtml,
|
||||
isHtmlBs,
|
||||
htmlPrefixLength,
|
||||
) where
|
||||
|
||||
import Text.HTML.TagSoup
|
||||
import Data.Char
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
- License: BSD-2-clause
|
||||
-}
|
||||
|
||||
module Utility.HumanNumber where
|
||||
module Utility.HumanNumber (showImprecise) where
|
||||
|
||||
{- Displays a fractional value as a string with a limited number
|
||||
- of decimal digits. -}
|
||||
|
|
|
@ -5,7 +5,12 @@
|
|||
- License: BSD-2-clause
|
||||
-}
|
||||
|
||||
module Utility.IPAddress where
|
||||
module Utility.IPAddress (
|
||||
extractIPAddress,
|
||||
isLoopbackAddress,
|
||||
isPrivateAddress,
|
||||
makeAddressMatcher,
|
||||
) where
|
||||
|
||||
import Utility.Exception
|
||||
|
||||
|
|
|
@ -5,7 +5,11 @@
|
|||
- License: BSD-2-clause
|
||||
-}
|
||||
|
||||
module Utility.LinuxMkLibs where
|
||||
module Utility.LinuxMkLibs (
|
||||
installLib,
|
||||
parseLdd,
|
||||
glibcLibs,
|
||||
) where
|
||||
|
||||
import Utility.PartialPrelude
|
||||
import Utility.Directory
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
- License: BSD-2-clause
|
||||
-}
|
||||
|
||||
module Utility.LockFile.LockStatus where
|
||||
module Utility.LockFile.LockStatus (LockStatus(..)) where
|
||||
|
||||
import System.Posix
|
||||
|
||||
|
|
|
@ -7,7 +7,15 @@
|
|||
|
||||
{-# LANGUAGE CPP #-}
|
||||
|
||||
module Utility.LogFile where
|
||||
module Utility.LogFile (
|
||||
openLog,
|
||||
listLogs,
|
||||
maxLogs,
|
||||
#ifndef mingw32_HOST_OS
|
||||
redirLog,
|
||||
redir,
|
||||
#endif
|
||||
) where
|
||||
|
||||
import Common
|
||||
|
||||
|
|
|
@ -5,7 +5,12 @@
|
|||
- License: BSD-2-clause
|
||||
-}
|
||||
|
||||
module Utility.Lsof where
|
||||
module Utility.Lsof (
|
||||
LsofOpenMode(..),
|
||||
setup,
|
||||
queryDir,
|
||||
query,
|
||||
) where
|
||||
|
||||
import Common
|
||||
import BuildInfo
|
||||
|
|
|
@ -7,7 +7,40 @@
|
|||
|
||||
{-# LANGUAGE TypeSynonymInstances, BangPatterns #-}
|
||||
|
||||
module Utility.Metered where
|
||||
module Utility.Metered (
|
||||
MeterUpdate,
|
||||
nullMeterUpdate,
|
||||
combineMeterUpdate,
|
||||
BytesProcessed(..),
|
||||
toBytesProcessed,
|
||||
fromBytesProcessed,
|
||||
addBytesProcessed,
|
||||
zeroBytesProcessed,
|
||||
withMeteredFile,
|
||||
meteredWrite,
|
||||
meteredWrite',
|
||||
meteredWriteFile,
|
||||
offsetMeterUpdate,
|
||||
hGetContentsMetered,
|
||||
hGetMetered,
|
||||
defaultChunkSize,
|
||||
watchFileSize,
|
||||
OutputHandler(..),
|
||||
ProgressParser,
|
||||
commandMeter,
|
||||
commandMeter',
|
||||
demeterCommand,
|
||||
demeterCommandEnv,
|
||||
avoidProgress,
|
||||
rateLimitMeterUpdate,
|
||||
Meter,
|
||||
mkMeter,
|
||||
setMeterTotalSize,
|
||||
updateMeter,
|
||||
displayMeterHandle,
|
||||
clearMeterHandle,
|
||||
bandwidthMeter,
|
||||
) where
|
||||
|
||||
import Common
|
||||
import Utility.Percentage
|
||||
|
@ -80,11 +113,6 @@ withMeteredFile :: FilePath -> MeterUpdate -> (L.ByteString -> IO a) -> IO a
|
|||
withMeteredFile f meterupdate a = withBinaryFile f ReadMode $ \h ->
|
||||
hGetContentsMetered h meterupdate >>= a
|
||||
|
||||
{- Sends the content of a file to a Handle, updating the meter as it's
|
||||
- written. -}
|
||||
streamMeteredFile :: FilePath -> MeterUpdate -> Handle -> IO ()
|
||||
streamMeteredFile f meterupdate h = withMeteredFile f meterupdate $ L.hPut h
|
||||
|
||||
{- Writes a ByteString to a Handle, updating a meter as it's written. -}
|
||||
meteredWrite :: MeterUpdate -> Handle -> L.ByteString -> IO ()
|
||||
meteredWrite meterupdate h = void . meteredWrite' meterupdate h
|
||||
|
|
|
@ -7,7 +7,19 @@
|
|||
|
||||
{-# OPTIONS_GHC -fno-warn-tabs #-}
|
||||
|
||||
module Utility.Misc where
|
||||
module Utility.Misc (
|
||||
hGetContentsStrict,
|
||||
readFileStrict,
|
||||
separate,
|
||||
firstLine,
|
||||
segment,
|
||||
segmentDelim,
|
||||
massReplace,
|
||||
hGetSomeString,
|
||||
exitBool,
|
||||
|
||||
prop_segment_regressionTest,
|
||||
) where
|
||||
|
||||
import System.IO
|
||||
import Control.Monad
|
||||
|
|
|
@ -7,7 +7,19 @@
|
|||
|
||||
{-# OPTIONS_GHC -fno-warn-tabs #-}
|
||||
|
||||
module Utility.Monad where
|
||||
module Utility.Monad (
|
||||
firstM,
|
||||
getM,
|
||||
anyM,
|
||||
allM,
|
||||
untilTrue,
|
||||
ifM,
|
||||
(<||>),
|
||||
(<&&>),
|
||||
observe,
|
||||
after,
|
||||
noop,
|
||||
) where
|
||||
|
||||
import Data.Maybe
|
||||
import Control.Monad
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
{-# OPTIONS_GHC -fno-warn-tabs #-}
|
||||
|
||||
module Utility.Network where
|
||||
module Utility.Network (getHostname) where
|
||||
|
||||
import Utility.Process
|
||||
import Utility.Exception
|
||||
|
|
|
@ -7,7 +7,12 @@
|
|||
|
||||
{-# OPTIONS_GHC -fno-warn-tabs #-}
|
||||
|
||||
module Utility.OSX where
|
||||
module Utility.OSX (
|
||||
autoStartBase,
|
||||
systemAutoStart,
|
||||
userAutoStart,
|
||||
genOSXAutoStartFile,
|
||||
) where
|
||||
|
||||
import Utility.UserInfo
|
||||
|
||||
|
|
|
@ -5,7 +5,10 @@
|
|||
- License: BSD-2-clause
|
||||
-}
|
||||
|
||||
module Utility.OptParse where
|
||||
module Utility.OptParse (
|
||||
invertableSwitch,
|
||||
invertableSwitch',
|
||||
) where
|
||||
|
||||
import Options.Applicative
|
||||
import Data.Monoid
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
{-# LANGUAGE CPP #-}
|
||||
|
||||
module Utility.PID where
|
||||
module Utility.PID (PID, getPID) where
|
||||
|
||||
#ifndef mingw32_HOST_OS
|
||||
import System.Posix.Types (ProcessID)
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
- License: BSD-2-clause
|
||||
-}
|
||||
|
||||
module Utility.Parallel where
|
||||
module Utility.Parallel (inParallel) where
|
||||
|
||||
import Common
|
||||
|
||||
|
|
|
@ -7,7 +7,18 @@
|
|||
|
||||
{-# OPTIONS_GHC -fno-warn-tabs #-}
|
||||
|
||||
module Utility.PartialPrelude where
|
||||
module Utility.PartialPrelude (
|
||||
Utility.PartialPrelude.read,
|
||||
Utility.PartialPrelude.head,
|
||||
Utility.PartialPrelude.tail,
|
||||
Utility.PartialPrelude.init,
|
||||
Utility.PartialPrelude.last,
|
||||
Utility.PartialPrelude.readish,
|
||||
Utility.PartialPrelude.headMaybe,
|
||||
Utility.PartialPrelude.lastMaybe,
|
||||
Utility.PartialPrelude.beginning,
|
||||
Utility.PartialPrelude.end,
|
||||
) where
|
||||
|
||||
import qualified Data.Maybe
|
||||
|
||||
|
|
|
@ -8,7 +8,29 @@
|
|||
{-# LANGUAGE CPP #-}
|
||||
{-# OPTIONS_GHC -fno-warn-tabs #-}
|
||||
|
||||
module Utility.Path where
|
||||
module Utility.Path (
|
||||
simplifyPath,
|
||||
absPathFrom,
|
||||
parentDir,
|
||||
upFrom,
|
||||
dirContains,
|
||||
absPath,
|
||||
relPathCwdToFile,
|
||||
relPathDirToFile,
|
||||
relPathDirToFileAbs,
|
||||
segmentPaths,
|
||||
runSegmentPaths,
|
||||
relHome,
|
||||
inPath,
|
||||
searchPath,
|
||||
dotfile,
|
||||
sanitizeFilePath,
|
||||
splitShortExtensions,
|
||||
|
||||
prop_upFrom_basics,
|
||||
prop_relPathDirToFile_basics,
|
||||
prop_relPathDirToFile_regressionTest,
|
||||
) where
|
||||
|
||||
import System.FilePath
|
||||
import Data.List
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
{-# LANGUAGE CPP #-}
|
||||
{-# OPTIONS_GHC -fno-warn-tabs #-}
|
||||
|
||||
module Utility.Path.Max where
|
||||
module Utility.Path.Max (fileNameLengthLimit) where
|
||||
|
||||
#ifndef mingw32_HOST_OS
|
||||
import Utility.Exception
|
||||
|
|
|
@ -8,7 +8,11 @@
|
|||
{-# LANGUAGE CPP #-}
|
||||
{-# OPTIONS_GHC -fno-warn-tabs #-}
|
||||
|
||||
module Utility.Process.Transcript where
|
||||
module Utility.Process.Transcript (
|
||||
processTranscript,
|
||||
processTranscript',
|
||||
processTranscript'',
|
||||
) where
|
||||
|
||||
import Utility.Process
|
||||
import Utility.Misc
|
||||
|
|
|
@ -7,7 +7,17 @@
|
|||
|
||||
{-# LANGUAGE CPP #-}
|
||||
|
||||
module Utility.Rsync where
|
||||
module Utility.Rsync (
|
||||
rsyncShell,
|
||||
rsyncServerSend,
|
||||
rsyncServerReceive,
|
||||
rsyncUseDestinationPermissions,
|
||||
rsync,
|
||||
rsyncUrlIsShell,
|
||||
rsyncUrlIsPath,
|
||||
rsyncProgress,
|
||||
filterRsyncSafeOptions,
|
||||
) where
|
||||
|
||||
import Common
|
||||
import Utility.Metered
|
||||
|
@ -161,10 +171,8 @@ filterRsyncSafeOptions = fst3 . getOpt Permute
|
|||
- The virtual filesystem contains:
|
||||
- /c, /d, ... mount points for Windows drives
|
||||
-}
|
||||
#ifdef mingw32_HOST_OS
|
||||
toMSYS2Path :: FilePath -> FilePath
|
||||
#ifndef mingw32_HOST_OS
|
||||
toMSYS2Path = id
|
||||
#else
|
||||
toMSYS2Path p
|
||||
| null drive = recombine parts
|
||||
| otherwise = recombine $ "/" : driveletter drive : parts
|
||||
|
|
|
@ -7,7 +7,15 @@
|
|||
|
||||
{-# LANGUAGE CPP #-}
|
||||
|
||||
module Utility.Su where
|
||||
module Utility.Su (
|
||||
WhosePassword(..),
|
||||
PasswordPrompt(..),
|
||||
describePasswordPrompt,
|
||||
describePasswordPrompt',
|
||||
SuCommand,
|
||||
runSuCommand,
|
||||
mkSuCommand,
|
||||
) where
|
||||
|
||||
import Common
|
||||
|
||||
|
|
165
Utility/Url.hs
165
Utility/Url.hs
|
@ -138,22 +138,14 @@ curlParams uo ps = ps ++ uaparams ++ headerparams ++ addedparams ++ schemeparams
|
|||
]
|
||||
schemelist = map fromScheme $ S.toList $ allowedSchemes uo
|
||||
|
||||
checkPolicy :: UrlOptions -> URI -> a -> (String -> IO b) -> IO a -> IO a
|
||||
checkPolicy uo u onerr displayerror a
|
||||
checkPolicy :: UrlOptions -> URI -> IO (Either String a) -> IO (Either String a)
|
||||
checkPolicy uo u a
|
||||
| allowedScheme uo u = a
|
||||
| otherwise = do
|
||||
void $ displayerror $
|
||||
"Configuration does not allow accessing " ++ show u
|
||||
return onerr
|
||||
| otherwise = return $ Left $
|
||||
"Configuration does not allow accessing " ++ show u
|
||||
|
||||
unsupportedUrlScheme :: URI -> (String -> IO a) -> IO a
|
||||
unsupportedUrlScheme u displayerror =
|
||||
displayerror $ "Unsupported url scheme " ++ show u
|
||||
|
||||
warnError :: String -> IO ()
|
||||
warnError msg = do
|
||||
hPutStrLn stderr msg
|
||||
hFlush stderr
|
||||
unsupportedUrlScheme :: URI -> String
|
||||
unsupportedUrlScheme u = "Unsupported url scheme " ++ show u
|
||||
|
||||
allowedScheme :: UrlOptions -> URI -> Bool
|
||||
allowedScheme uo u = uscheme `S.member` allowedSchemes uo
|
||||
|
@ -161,14 +153,18 @@ allowedScheme uo u = uscheme `S.member` allowedSchemes uo
|
|||
uscheme = mkScheme $ takeWhile (/=':') (uriScheme u)
|
||||
|
||||
{- Checks that an url exists and could be successfully downloaded,
|
||||
- also checking that its size, if available, matches a specified size. -}
|
||||
checkBoth :: URLString -> Maybe Integer -> UrlOptions -> IO Bool
|
||||
checkBoth url expected_size uo = do
|
||||
v <- check url expected_size uo
|
||||
return (fst v && snd v)
|
||||
- also checking that its size, if available, matches a specified size.
|
||||
-
|
||||
- The Left error is returned if policy does not allow accessing the url
|
||||
- or the url scheme is not supported.
|
||||
-}
|
||||
checkBoth :: URLString -> Maybe Integer -> UrlOptions -> IO (Either String Bool)
|
||||
checkBoth url expected_size uo = fmap go <$> check url expected_size uo
|
||||
where
|
||||
go v = fst v && snd v
|
||||
|
||||
check :: URLString -> Maybe Integer -> UrlOptions -> IO (Bool, Bool)
|
||||
check url expected_size uo = go <$> getUrlInfo url uo
|
||||
check :: URLString -> Maybe Integer -> UrlOptions -> IO (Either String (Bool, Bool))
|
||||
check url expected_size uo = fmap go <$> getUrlInfo url uo
|
||||
where
|
||||
go (UrlInfo False _ _) = (False, False)
|
||||
go (UrlInfo True Nothing _) = (True, True)
|
||||
|
@ -176,8 +172,8 @@ check url expected_size uo = go <$> getUrlInfo url uo
|
|||
Just _ -> (True, expected_size == s)
|
||||
Nothing -> (True, True)
|
||||
|
||||
exists :: URLString -> UrlOptions -> IO Bool
|
||||
exists url uo = urlExists <$> getUrlInfo url uo
|
||||
exists :: URLString -> UrlOptions -> IO (Either String Bool)
|
||||
exists url uo = fmap urlExists <$> getUrlInfo url uo
|
||||
|
||||
data UrlInfo = UrlInfo
|
||||
{ urlExists :: Bool
|
||||
|
@ -190,32 +186,36 @@ assumeUrlExists :: UrlInfo
|
|||
assumeUrlExists = UrlInfo True Nothing Nothing
|
||||
|
||||
{- Checks that an url exists and could be successfully downloaded,
|
||||
- also returning its size and suggested filename if available. -}
|
||||
getUrlInfo :: URLString -> UrlOptions -> IO UrlInfo
|
||||
- also returning its size and suggested filename if available.
|
||||
-
|
||||
- The Left error is returned if policy does not allow accessing the url
|
||||
- or the url scheme is not supported.
|
||||
-}
|
||||
getUrlInfo :: URLString -> UrlOptions -> IO (Either String UrlInfo)
|
||||
getUrlInfo url uo = case parseURIRelaxed url of
|
||||
Just u -> checkPolicy uo u dne warnError $
|
||||
case (urlDownloader uo, parseUrlRequest (show u)) of
|
||||
(DownloadWithConduit (DownloadWithCurlRestricted r), Just req) -> catchJust
|
||||
-- When http redirects to a protocol which
|
||||
-- conduit does not support, it will throw
|
||||
-- a StatusCodeException with found302
|
||||
-- and a Response with the redir Location.
|
||||
(matchStatusCodeException (== found302))
|
||||
(existsconduit req)
|
||||
(followredir r)
|
||||
`catchNonAsync` (const $ return dne)
|
||||
(DownloadWithConduit (DownloadWithCurlRestricted r), Nothing)
|
||||
| isfileurl u -> existsfile u
|
||||
| isftpurl u -> existscurlrestricted r u url ftpport
|
||||
`catchNonAsync` (const $ return dne)
|
||||
| otherwise -> do
|
||||
unsupportedUrlScheme u warnError
|
||||
return dne
|
||||
(DownloadWithCurl _, _)
|
||||
| isfileurl u -> existsfile u
|
||||
| otherwise -> existscurl u (basecurlparams url)
|
||||
Nothing -> return dne
|
||||
where
|
||||
Just u -> checkPolicy uo u (go u)
|
||||
Nothing -> return (Right dne)
|
||||
where
|
||||
go :: URI -> IO (Either String UrlInfo)
|
||||
go u = case (urlDownloader uo, parseUrlRequest (show u)) of
|
||||
(DownloadWithConduit (DownloadWithCurlRestricted r), Just req) -> catchJust
|
||||
-- When http redirects to a protocol which
|
||||
-- conduit does not support, it will throw
|
||||
-- a StatusCodeException with found302
|
||||
-- and a Response with the redir Location.
|
||||
(matchStatusCodeException (== found302))
|
||||
(Right <$> existsconduit req)
|
||||
(followredir r)
|
||||
`catchNonAsync` (const $ return $ Right dne)
|
||||
(DownloadWithConduit (DownloadWithCurlRestricted r), Nothing)
|
||||
| isfileurl u -> Right <$> existsfile u
|
||||
| isftpurl u -> (Right <$> existscurlrestricted r u url ftpport)
|
||||
`catchNonAsync` (const $ return $ Right dne)
|
||||
| otherwise -> return $ Left $ unsupportedUrlScheme u
|
||||
(DownloadWithCurl _, _)
|
||||
| isfileurl u -> Right <$> existsfile u
|
||||
| otherwise -> Right <$> existscurl u (basecurlparams url)
|
||||
|
||||
dne = UrlInfo False Nothing Nothing
|
||||
found sz f = return $ UrlInfo True sz f
|
||||
|
||||
|
@ -291,11 +291,11 @@ getUrlInfo url uo = case parseURIRelaxed url of
|
|||
-- http to file redirect would not be secure,
|
||||
-- and http-conduit follows http to http.
|
||||
Just u' | isftpurl u' ->
|
||||
checkPolicy uo u' dne warnError $
|
||||
checkPolicy uo u' $ Right <$>
|
||||
existscurlrestricted r u' url' ftpport
|
||||
_ -> return dne
|
||||
Nothing -> return dne
|
||||
followredir _ _ = return dne
|
||||
_ -> return (Right dne)
|
||||
Nothing -> return (Right dne)
|
||||
followredir _ _ = return (Right dne)
|
||||
|
||||
-- Parse eg: attachment; filename="fname.ext"
|
||||
-- per RFC 2616
|
||||
|
@ -317,31 +317,32 @@ headRequest r = r
|
|||
|
||||
{- Download a perhaps large file, with auto-resume of incomplete downloads.
|
||||
-
|
||||
- Displays error message on stderr when download failed.
|
||||
- When the download fails, returns an error message.
|
||||
-}
|
||||
download :: MeterUpdate -> URLString -> FilePath -> UrlOptions -> IO Bool
|
||||
download :: MeterUpdate -> URLString -> FilePath -> UrlOptions -> IO (Either String ())
|
||||
download = download' False
|
||||
|
||||
{- Avoids displaying any error message. -}
|
||||
{- Avoids displaying any error message, including silencing curl errors. -}
|
||||
downloadQuiet :: MeterUpdate -> URLString -> FilePath -> UrlOptions -> IO Bool
|
||||
downloadQuiet = download' True
|
||||
downloadQuiet meterupdate url file uo = isRight
|
||||
<$> download' True meterupdate url file uo
|
||||
|
||||
download' :: Bool -> MeterUpdate -> URLString -> FilePath -> UrlOptions -> IO Bool
|
||||
download' noerror meterupdate url file uo =
|
||||
download' :: Bool -> MeterUpdate -> URLString -> FilePath -> UrlOptions -> IO (Either String ())
|
||||
download' nocurlerror meterupdate url file uo =
|
||||
catchJust matchHttpException go showhttpexception
|
||||
`catchNonAsync` (dlfailed . show)
|
||||
where
|
||||
go = case parseURIRelaxed url of
|
||||
Just u -> checkPolicy uo u False dlfailed $
|
||||
Just u -> checkPolicy uo u $
|
||||
case (urlDownloader uo, parseUrlRequest (show u)) of
|
||||
(DownloadWithConduit (DownloadWithCurlRestricted r), Just req) -> catchJust
|
||||
(matchStatusCodeException (== found302))
|
||||
(downloadConduit meterupdate req file uo >> return True)
|
||||
(downloadConduit meterupdate req file uo >> return (Right ()))
|
||||
(followredir r)
|
||||
(DownloadWithConduit (DownloadWithCurlRestricted r), Nothing)
|
||||
| isfileurl u -> downloadfile u
|
||||
| isftpurl u -> downloadcurlrestricted r u url ftpport
|
||||
| otherwise -> unsupportedUrlScheme u dlfailed
|
||||
| otherwise -> dlfailed $ unsupportedUrlScheme u
|
||||
(DownloadWithCurl _, _)
|
||||
| isfileurl u -> downloadfile u
|
||||
| otherwise -> downloadcurl url basecurlparams
|
||||
|
@ -354,27 +355,20 @@ download' noerror meterupdate url file uo =
|
|||
|
||||
ftpport = 21
|
||||
|
||||
showhttpexception he = do
|
||||
let msg = case he of
|
||||
HttpExceptionRequest _ (StatusCodeException r _) ->
|
||||
B8.toString $ statusMessage $ responseStatus r
|
||||
HttpExceptionRequest _ (InternalException ie) ->
|
||||
case fromException ie of
|
||||
Nothing -> show ie
|
||||
Just (ConnectionRestricted why) -> why
|
||||
HttpExceptionRequest _ other -> show other
|
||||
_ -> show he
|
||||
dlfailed msg
|
||||
|
||||
dlfailed msg
|
||||
| noerror = return False
|
||||
| otherwise = do
|
||||
hPutStrLn stderr $ "download failed: " ++ msg
|
||||
hFlush stderr
|
||||
return False
|
||||
showhttpexception he = dlfailed $ case he of
|
||||
HttpExceptionRequest _ (StatusCodeException r _) ->
|
||||
B8.toString $ statusMessage $ responseStatus r
|
||||
HttpExceptionRequest _ (InternalException ie) ->
|
||||
case fromException ie of
|
||||
Nothing -> show ie
|
||||
Just (ConnectionRestricted why) -> why
|
||||
HttpExceptionRequest _ other -> show other
|
||||
_ -> show he
|
||||
|
||||
dlfailed msg = return $ Left $ "download failed: " ++ msg
|
||||
|
||||
basecurlparams = curlParams uo
|
||||
[ if noerror
|
||||
[ if nocurlerror
|
||||
then Param "-S"
|
||||
else Param "-sS"
|
||||
, Param "-f"
|
||||
|
@ -387,7 +381,10 @@ download' noerror meterupdate url file uo =
|
|||
-- if the url happens to be empty, so pre-create.
|
||||
unlessM (doesFileExist file) $
|
||||
writeFile file ""
|
||||
boolSystem "curl" (curlparams ++ [Param "-o", File file, File rawurl])
|
||||
ifM (boolSystem "curl" (curlparams ++ [Param "-o", File file, File rawurl]))
|
||||
( return $ Right ()
|
||||
, return $ Left "download failed"
|
||||
)
|
||||
|
||||
downloadcurlrestricted r u rawurl defport =
|
||||
downloadcurl rawurl =<< curlRestrictedParams r u defport basecurlparams
|
||||
|
@ -396,7 +393,7 @@ download' noerror meterupdate url file uo =
|
|||
let src = unEscapeString (uriPath u)
|
||||
withMeteredFile src meterupdate $
|
||||
L.writeFile file
|
||||
return True
|
||||
return $ Right ()
|
||||
|
||||
-- Conduit does not support ftp, so will throw an exception on a
|
||||
-- redirect to a ftp url; fall back to curl.
|
||||
|
@ -404,7 +401,7 @@ download' noerror meterupdate url file uo =
|
|||
case headMaybe $ map decodeBS $ getResponseHeader hLocation resp of
|
||||
Just url' -> case parseURIRelaxed url' of
|
||||
Just u' | isftpurl u' ->
|
||||
checkPolicy uo u' False dlfailed $
|
||||
checkPolicy uo u' $
|
||||
downloadcurlrestricted r u' url' ftpport
|
||||
_ -> throwIO ex
|
||||
Nothing -> throwIO ex
|
||||
|
@ -448,7 +445,7 @@ downloadConduit meterupdate req file uo =
|
|||
liftIO $ debugM "url" (show req'')
|
||||
resp <- http req'' (httpManager uo)
|
||||
if responseStatus resp == partialContent206
|
||||
then store (BytesProcessed sz) AppendMode resp
|
||||
then store (toBytesProcessed sz) AppendMode resp
|
||||
else if responseStatus resp == ok200
|
||||
then store zeroBytesProcessed WriteMode resp
|
||||
else respfailure resp
|
||||
|
|
12
doc/bugs/OSX_dmg_git-core_binaries_do_not_link.mdwn
Normal file
12
doc/bugs/OSX_dmg_git-core_binaries_do_not_link.mdwn
Normal file
|
@ -0,0 +1,12 @@
|
|||
The OSX .dmg contains a few binaries in git-core like git-remote-http.
|
||||
They have been adjusted by otool to link to libraries in the same directory
|
||||
as the binary. However, the libraries are not located in the git-core
|
||||
directory, but in its parent directory, and so the git-core binaries don't
|
||||
link.
|
||||
|
||||
I don't think this is a new regression, but not entirely sure.
|
||||
|
||||
Seems that OSXMkLibs could symlink ../lib into git-core.
|
||||
--[[Joey]]
|
||||
|
||||
> [[fixed|done]] --[[Joey]]
|
|
@ -0,0 +1,52 @@
|
|||
[[!comment format=mdwn
|
||||
username="xwvvvvwx"
|
||||
avatar="http://cdn.libravatar.org/avatar/7198160b33539b5b1b2d56ca85c562d9"
|
||||
subject="comment 14"
|
||||
date="2019-11-21T17:32:31Z"
|
||||
content="""
|
||||
I just reproduced this when pushing to a gcrypt remote on rsync.net using the assistant. There is only one client pushing to the gcrypt remote.
|
||||
|
||||
It was during the initial sync of a moderately large amount of data (~22G), perhaps this has something to do with it?
|
||||
|
||||
I could reproduce the issue by cloning with gcrypt directly (`git clone gcrypt::ssh://....`).
|
||||
|
||||
I was able to recover by following the steps outlined in Schnouki's comment (#12), but this is obviously quite an unsatisfactory fix.
|
||||
|
||||
I am using annex to replicate important personal data, and I find this issue highly concerning.
|
||||
|
||||
Foolishly, I did not keep a copy of the bad repo before I forced pushed over it on the remote, so I do not have a copy available to experiment with :(
|
||||
|
||||
---
|
||||
|
||||
## logs
|
||||
|
||||
`daemon.log` excerpt: [https://ipfs.io/ipfs/QmcoPuTLY2v5FWPABQLVwgyqW5WdsvkBbVS33cJh6zjzi4](https://ipfs.io/ipfs/QmcoPuTLY2v5FWPABQLVwgyqW5WdsvkBbVS33cJh6zjzi4)
|
||||
|
||||
|
||||
`git clone` output:
|
||||
|
||||
```
|
||||
[annex@xwvvvvwx:~]$ git clone gcrypt::ssh://<URL> remote
|
||||
Cloning into 'remote'...
|
||||
gcrypt: Decrypting manifest
|
||||
gpg: Signature made Thu 21 Nov 2019 04:02:40 PM CET
|
||||
gpg: using RSA key 92E9F58E9F8C6845423C251AACD9A98951774194
|
||||
gpg: Good signature from \"git-annex <annex@xwvvvvwx.com>\" [ultimate]
|
||||
gcrypt: Remote ID is :id:tWrcOFKu2yX7y+jLDLxm
|
||||
gcrypt: Packfile e7b619864585f3c921b491fd041127cf0ae33c4480810610dcb2e37ec46a82be does not match digest!
|
||||
fatal: early EOF
|
||||
```
|
||||
|
||||
`git annex version`:
|
||||
|
||||
```
|
||||
git-annex version: 7.20191114
|
||||
build flags: Assistant Webapp Pairing S3 WebDAV Inotify DBus DesktopNotify TorrentParser MagicMime Feeds Testsuite
|
||||
dependency versions: aws-0.21.1 bloomfilter-2.0.1.0 cryptonite-0.25 DAV-1.3.3 feed-1.2.0.1 ghc-8.6.5 http-client-0.6.4 persistent-sqlite-2.9.3 torrent-10000.1.1 uuid-1.3.13 yesod-1.6.0
|
||||
key/value backends: SHA256E SHA256 SHA512E SHA512 SHA224E SHA224 SHA384E SHA384 SHA3_256E SHA3_256 SHA3_512E SHA3_512 SHA3_224E SHA3_224 SHA3_384E SHA3_384 SKEIN256E SKEIN256 SKEIN512E SKEIN512 BLAKE2B256E BLAKE2B256 BLAKE2B512E BLAKE2B512 BLAKE2B160E BLAKE2B160 BLAKE2B224E BLAKE2B224 BLAKE2B384E BLAKE2B384 BLAKE2BP512E BLAKE2BP512 BLAKE2S256E BLAKE2S256 BLAKE2S160E BLAKE2S160 BLAKE2S224E BLAKE2S224 BLAKE2SP256E BLAKE2SP256 BLAKE2SP224E BLAKE2SP224 SHA1E SHA1 MD5E MD5 WORM URL
|
||||
remote types: git gcrypt p2p S3 bup directory rsync web bittorrent webdav adb tahoe glacier ddar git-lfs hook external
|
||||
operating system: linux x86_64
|
||||
supported repository versions: 7
|
||||
upgrade supported from repository versions: 0 1 2 3 4 5 6
|
||||
```
|
||||
"""]]
|
|
@ -0,0 +1,24 @@
|
|||
[[!comment format=mdwn
|
||||
username="joey"
|
||||
subject="""comment 3"""
|
||||
date="2019-11-13T19:29:34Z"
|
||||
content="""
|
||||
--debug might provide some clue in its http dump.
|
||||
|
||||
The ParseError comes from attoparsec. Seems likely that aeson/aws is what's
|
||||
using it there, and that it is failing to parse something from S3.
|
||||
|
||||
Of course, the malloc error suggests a low-level memory problem, probably
|
||||
from C code. I don't think git-annex contains anything like that, so it
|
||||
must be from a dependency.
|
||||
|
||||
The S3 signature being wrong again points to the aws library, or something
|
||||
lower level. And then the following double free is another low-level memory
|
||||
problem.
|
||||
|
||||
So there's a pattern, and it seems to extend across linux and OSX.
|
||||
|
||||
Kind of wondering if something in the library stack is somehow failing to
|
||||
be concurrency safe. If two http requests end up using the same memory,
|
||||
it would kind of explain all of this.
|
||||
"""]]
|
406
doc/bugs/cygwin.mdwn
Normal file
406
doc/bugs/cygwin.mdwn
Normal file
|
@ -0,0 +1,406 @@
|
|||
Cygwin do not work with git-annex windows installed version
|
||||
|
||||
|
||||
### What steps will reproduce the problem?
|
||||
* Install git-annex windows version
|
||||
* Try run git annex test under cygwin, and got 65 test failed out of 101.
|
||||
* Try run git annex test under git bash windows and got 101 test passed.
|
||||
* NOTE: git-lfs windows installed version working fine under cygwin and git bash windows.
|
||||
|
||||
|
||||
### What version of git-annex are you using? On what operating system?
|
||||
git-annex version: 7.20191106-ge486fd5e0
|
||||
build flags: Assistant Webapp Pairing S3 WebDAV TorrentParser Feeds Testsuite
|
||||
dependency versions: aws-0.21.1 bloomfilter-2.0.1.0 cryptonite-0.25 DAV-1.3.3 feed-1.0.1.0 ghc-8.6.5 http-client-0.5.14 persistent-sqlite-2.9.3 torrent-10000.1.1 uuid-1.3.13 yesod-1.6.0
|
||||
key/value backends: SHA256E SHA256 SHA512E SHA512 SHA224E SHA224 SHA384E SHA384 SHA3_256E SHA3_256 SHA3_512E SHA3_512 SHA3_224E SHA3_224 SHA3_384E SHA3_384 SKEIN256E SKEIN256 SKEIN512E SKEIN512 BLAKE2B256E BLAKE2B256 BLAKE2B512E BLAKE2B512 BLAKE2B160E BLAKE2B160 BLAKE2B224E BLAKE2B224 BLAKE2B384E BLAKE2B384 BLAKE2BP512E BLAKE2BP512 BLAKE2S256E BLAKE2S256 BLAKE2S160E BLAKE2S160 BLAKE2S224E BLAKE2S224 BLAKE2SP256E BLAKE2SP256 BLAKE2SP224E BLAKE2SP224 SHA1E SHA1 MD5E MD5 WORM URL
|
||||
remote types: git gcrypt p2p S3 bup directory rsync web bittorrent webdav adb tahoe glacier ddar git-lfs hook external
|
||||
operating system: mingw32 x86_64
|
||||
supported repository versions: 7
|
||||
upgrade supported from repository versions: 2 3 4 5 6
|
||||
|
||||
### Please provide any additional information below.
|
||||
|
||||
[[!format sh """
|
||||
# If you can, paste a complete transcript of the problem occurring here.
|
||||
# If the problem is with the git-annex assistant, paste in .git/annex/daemon.log
|
||||
Cygwin ~/git..t_tools/wget/cache (test)
|
||||
(506)$ git-annex test
|
||||
Tests
|
||||
QuickCheck
|
||||
prop_encode_decode_roundtrip: OK (0.04s)
|
||||
+++ OK, passed 1000 tests.
|
||||
prop_encode_c_decode_c_roundtrip: OK (0.03s)
|
||||
+++ OK, passed 1000 tests.
|
||||
prop_isomorphic_key_encode: OK
|
||||
+++ OK, passed 1000 tests.
|
||||
prop_isomorphic_shellEscape: OK (0.02s)
|
||||
+++ OK, passed 1000 tests.
|
||||
prop_isomorphic_shellEscape_multiword: OK (0.70s)
|
||||
+++ OK, passed 1000 tests.
|
||||
prop_isomorphic_configEscape: OK (0.02s)
|
||||
+++ OK, passed 1000 tests.
|
||||
prop_parse_show_Config: OK (0.04s)
|
||||
+++ OK, passed 1000 tests.
|
||||
prop_upFrom_basics: OK (0.02s)
|
||||
+++ OK, passed 1000 tests.
|
||||
prop_relPathDirToFile_basics: OK (0.03s)
|
||||
+++ OK, passed 1000 tests.
|
||||
prop_relPathDirToFile_regressionTest: OK
|
||||
+++ OK, passed 1 test.
|
||||
prop_cost_sane: OK
|
||||
+++ OK, passed 1 test.
|
||||
prop_matcher_sane: OK
|
||||
+++ OK, passed 1 test.
|
||||
prop_HmacSha1WithCipher_sane: OK
|
||||
+++ OK, passed 1 test.
|
||||
prop_VectorClock_sane: OK
|
||||
+++ OK, passed 1 test.
|
||||
prop_addMapLog_sane: OK
|
||||
+++ OK, passed 1 test.
|
||||
prop_verifiable_sane: OK (0.07s)
|
||||
+++ OK, passed 1000 tests.
|
||||
prop_segment_regressionTest: OK
|
||||
+++ OK, passed 1 test.
|
||||
prop_read_write_transferinfo: OK (0.04s)
|
||||
+++ OK, passed 1000 tests.
|
||||
prop_read_show_inodecache: OK (0.02s)
|
||||
+++ OK, passed 1000 tests.
|
||||
prop_parse_build_presence_log: OK (1.27s)
|
||||
+++ OK, passed 1000 tests.
|
||||
prop_parse_build_contentidentifier_log: OK (1.23s)
|
||||
+++ OK, passed 1000 tests.
|
||||
prop_read_show_TrustLevel: OK
|
||||
+++ OK, passed 1 test.
|
||||
prop_parse_build_TrustLevelLog: OK
|
||||
+++ OK, passed 1 test.
|
||||
prop_hashes_stable: OK
|
||||
+++ OK, passed 1 test.
|
||||
prop_mac_stable: OK
|
||||
+++ OK, passed 1 test.
|
||||
prop_schedule_roundtrips: OK (0.01s)
|
||||
+++ OK, passed 1000 tests.
|
||||
prop_past_sane: OK
|
||||
+++ OK, passed 1 test.
|
||||
prop_duration_roundtrips: OK
|
||||
+++ OK, passed 1000 tests.
|
||||
prop_metadata_sane: OK (0.86s)
|
||||
+++ OK, passed 1000 tests.
|
||||
prop_metadata_serialize: OK (0.84s)
|
||||
+++ OK, passed 1000 tests.
|
||||
prop_branchView_legal: OK (0.77s)
|
||||
+++ OK, passed 1000 tests.
|
||||
prop_viewPath_roundtrips: OK (0.03s)
|
||||
+++ OK, passed 1000 tests.
|
||||
prop_view_roundtrips: OK (0.52s)
|
||||
+++ OK, passed 1000 tests.
|
||||
prop_viewedFile_rountrips: OK (0.02s)
|
||||
+++ OK, passed 1000 tests.
|
||||
prop_b64_roundtrips: OK
|
||||
+++ OK, passed 1000 tests.
|
||||
prop_standardGroups_parse: OK
|
||||
+++ OK, passed 1 test.
|
||||
Unit Tests v7 adjusted unlocked branch
|
||||
add dup: Init Tests
|
||||
init: init test repo
|
||||
Detected a filesystem without fifo support.
|
||||
|
||||
Disabling ssh connection caching.
|
||||
|
||||
Detected a crippled filesystem.
|
||||
|
||||
Disabling core.symlinks.
|
||||
(scanning for unlocked files...)
|
||||
|
||||
Entering an adjusted branch where files are unlocked as this filesystem does not support locked files.
|
||||
not found .
|
||||
git-annex.exe: pre-commit: 1 failed
|
||||
|
||||
Failed to enter adjusted branch!
|
||||
ok
|
||||
(recording state in git...)
|
||||
not found .
|
||||
git-annex.exe: pre-commit: 1 failed
|
||||
FAIL (6.92s)
|
||||
.\\Test\\Framework.hs:469:
|
||||
git commit failed
|
||||
add: add foo
|
||||
ok
|
||||
(recording state in git...)
|
||||
add sha1foo
|
||||
ok
|
||||
(recording state in git...)
|
||||
not found .
|
||||
git-annex.exe: pre-commit: 1 failed
|
||||
FAIL (8.10s)
|
||||
Test.hs:303:
|
||||
git commit failed
|
||||
|
||||
2 out of 2 tests failed (15.02s)
|
||||
FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
add extras: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
export_import: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
export_import_subdir: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
shared clone: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
log: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
import: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
reinject: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
unannex (no copy): FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
unannex (with copy): FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
drop (no remote): FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
drop (with remote): FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
drop (untrusted remote): FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
get: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
get (ssh remote): FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
move: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
move (ssh remote): FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
copy: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
lock: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
lock --force: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
edit (no pre-commit): FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
edit (pre-commit): FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
partial commit: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
fix: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
trust: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
fsck (basics): FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
fsck (bare): FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
fsck (local untrusted): FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
fsck (remote untrusted): FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
fsck --from remote: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
migrate: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
migrate (via gitattributes): FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
unused: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
describe: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
find: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
merge: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
info: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
version: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
sync: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
union merge regression: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
adjusted branch merge regression: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
adjusted branch subtree regression: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
conflict resolution: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
conflict resolution (adjusted branch): FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
conflict resolution movein regression: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
conflict resolution (mixed directory and file): FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
conflict resolution symlink bit: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
conflict resolution (uncommitted local file): FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
conflict resolution (removed file): FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
conflict resolution (nonannexed file): FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
conflict resolution (nonannexed symlink): FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
conflict resolution (mixed locked and unlocked file): FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
map: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
uninit: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
uninit (in git-annex branch): FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
upgrade: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
whereis: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
hook remote: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
directory remote: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
rsync remote: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
bup remote: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
crypto: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
preferred content: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
add subdirs: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
addurl: FAIL
|
||||
Exception: init tests failed! cannot continue
|
||||
CallStack (from HasCallStack):
|
||||
error, called at .\\Test\\Framework.hs:427:33 in main:Test.Framework
|
||||
|
||||
65 out of 101 tests failed (21.64s)
|
||||
(Failures above could be due to a bug in git-annex, or an incompatibility
|
||||
with utilities, such as git, installed on this system.)
|
||||
# End of transcript or log.
|
||||
"""]]
|
||||
|
||||
|
|
@ -22,3 +22,8 @@ If user convenience was something to strive for here, it should technically be p
|
|||
[[!meta author=yoh]]
|
||||
[[!tag projects/datalad]]
|
||||
|
||||
> [[fixed|done]], and I also converted a number of other places
|
||||
> where an error could leak through to stderr, although there are still
|
||||
> some places where direct writes to stderr happen -- I'll probably never
|
||||
> be able to guarantee --json-error-messages catches every possible stderr
|
||||
> output. --[[Joey]]
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
[[!comment format=mdwn
|
||||
username="joey"
|
||||
subject="""comment 1"""
|
||||
date="2019-11-19T17:12:41Z"
|
||||
content="""
|
||||
I think that you can accomplish what you want by making the directory
|
||||
you're importing from be a directory special remote with exporttree=yes
|
||||
importtree=yes and use the new `git annex import master --from remote`
|
||||
|
||||
If that does not do what you want, I'd prefer to look at making it be able
|
||||
to do so. I hope to eventually remove the legacy git-annex import from
|
||||
directory, since we have this new more general interface.
|
||||
"""]]
|
|
@ -0,0 +1,11 @@
|
|||
[[!comment format=mdwn
|
||||
username="joey"
|
||||
subject="""comment 2"""
|
||||
date="2019-11-13T19:37:16Z"
|
||||
content="""
|
||||
The signal 11 is very significant. It points to a problem in a lower-level
|
||||
library (or ghc runtime), or perhaps a bad memory problem. git-annex does
|
||||
not itself contain any code that can segfault, afaik.
|
||||
|
||||
Almost certianly the same as the other bug.
|
||||
"""]]
|
|
@ -0,0 +1,73 @@
|
|||
Originally was trying to reproduce [datalad/issues/3653](https://github.com/datalad/datalad/issues/3653) assuming that multiple files pointed to the same key.
|
||||
It was not the case, and my attempt revealed another bug - annex inability to "obtain" files in parallel when multiple of them point to the same key:
|
||||
|
||||
<details>
|
||||
<summary>setup of original repo(click to expand)</summary>
|
||||
|
||||
[[!format sh """
|
||||
/tmp > mkdir src; (cd src; git init; git annex init; dd if=/dev/zero of=1 count=1024 bs=1024; for f in {2..10}; do cp 1 $f; done ; git annex add *; git commit -m added; )
|
||||
Initialized empty Git repository in /tmp/src/.git/
|
||||
init (scanning for unlocked files...)
|
||||
ok
|
||||
(recording state in git...)
|
||||
1024+0 records in
|
||||
1024+0 records out
|
||||
1048576 bytes (1.0 MB, 1.0 MiB) copied, 0.00106651 s, 983 MB/s
|
||||
add 1
|
||||
ok
|
||||
add 10
|
||||
ok
|
||||
add 2
|
||||
ok
|
||||
add 3
|
||||
ok
|
||||
add 4
|
||||
ok
|
||||
add 5
|
||||
ok
|
||||
add 6
|
||||
ok
|
||||
add 7
|
||||
ok
|
||||
add 8
|
||||
ok
|
||||
add 9
|
||||
ok
|
||||
(recording state in git...)
|
||||
[master (root-commit) 63b1163] added
|
||||
10 files changed, 10 insertions(+)
|
||||
create mode 120000 1
|
||||
create mode 120000 10
|
||||
create mode 120000 2
|
||||
create mode 120000 3
|
||||
create mode 120000 4
|
||||
create mode 120000 5
|
||||
create mode 120000 6
|
||||
create mode 120000 7
|
||||
create mode 120000 8
|
||||
create mode 120000 9
|
||||
"""]]
|
||||
</details>
|
||||
|
||||
And that is what happens then when we try to get the same key in parallel:
|
||||
[[!format sh """
|
||||
|
||||
/tmp > git clone src dst; (cd dst; git annex get -J 5 *; )
|
||||
Cloning into 'dst'...
|
||||
done.
|
||||
(merging origin/git-annex into git-annex...)
|
||||
(recording state in git...)
|
||||
(scanning for unlocked files...)
|
||||
get 2 (from origin...) (checksum...)
|
||||
git-annex: thread blocked indefinitely in an STM transaction
|
||||
failed
|
||||
git-annex: thread blocked indefinitely in an MVar operation
|
||||
|
||||
"""]]
|
||||
|
||||
I felt like it is an old issue but failed to find a trace of it upon a quick lookup
|
||||
|
||||
[[!meta author=yoh]]
|
||||
[[!tag projects/datalad]]
|
||||
|
||||
> [[fixed|done]] --[[Joey]]
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue