Merge branch 'master' into database

This commit is contained in:
Joey Hess 2015-02-15 14:16:48 -04:00
commit bb242bdd82
4982 changed files with 89117 additions and 85285 deletions

4
.gitignore vendored
View file

@ -11,6 +11,7 @@ Build/EvilSplicer
Build/Standalone
Build/OSXMkLibs
Build/LinuxMkLibs
Build/BuildVersion
git-annex
git-annex.1
git-annex-shell.1
@ -23,6 +24,9 @@ html
dist
# Sandboxed builds
cabal-dev
.cabal-sandbox
cabal.sandbox.config
cabal.config
# Project-local emacs configuration
.dir-locals.el
# OSX related

View file

@ -1,6 +1,7 @@
Joey Hess <joey@kitenet.net> http://joey.kitenet.net/ <joey@web>
Joey Hess <joey@kitenet.net> http://joeyh.name/ <joey@web>
Joey Hess <joey@kitenet.net> http://joeyh.name/ <http://joeyh.name/@web>
Joey Hess <id@joeyh.name> http://joey.kitenet.net/ <joey@web>
Joey Hess <id@joeyh.name> http://joeyh.name/ <joey@web>
Joey Hess <id@joeyh.name> http://joeyh.name/ <http://joeyh.name/@web>
Yaroslav Halchenko <debian@onerussian.com>
Yaroslav Halchenko <debian@onerussian.com> http://yarikoptic.myopenid.com/ <site-myopenid@web>
Yaroslav Halchenko <debian@onerussian.com> https://www.google.com/accounts/o8/id?id=AItOawnx8kHW66N3BqmkVpgtXDlYMvr8TJ5VvfY <Yaroslav@web>
Richard Hartmann <richih@debian.org> https://www.google.com/accounts/o8/id?id=AItOawl9sYlePmv1xK-VvjBdN-5doOa_Xw-jH4U <Richard@web>

View file

@ -1,21 +1,21 @@
{- git-annex monad
-
- Copyright 2010-2013 Joey Hess <joey@kitenet.net>
- Copyright 2010-2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
{-# LANGUAGE GeneralizedNewtypeDeriving, PackageImports #-}
{-# LANGUAGE CPP, GeneralizedNewtypeDeriving, PackageImports #-}
module Annex (
Annex,
AnnexState(..),
PreferredContentMap,
new,
run,
eval,
getState,
changeState,
withState,
setFlag,
setField,
setOutput,
@ -29,13 +29,11 @@ module Annex (
getGitConfig,
changeGitConfig,
changeGitRepo,
getRemoteGitConfig,
withCurrentState,
changeDirectory,
) where
import "mtl" Control.Monad.Reader
import "MonadCatchIO-transformers" Control.Monad.CatchIO
import Control.Concurrent
import Common
import qualified Git
import qualified Git.Config
@ -60,29 +58,38 @@ import Types.FileMatcher
import Types.NumCopies
import Types.LockPool
import Types.MetaData
import Types.DesktopNotify
import Types.CleanupActions
import qualified Utility.Matcher
#ifdef WITH_QUVI
import Utility.Quvi (QuviVersion)
#endif
import Utility.InodeCache
import Utility.Url
import "mtl" Control.Monad.Reader
import Control.Concurrent
import qualified Data.Map as M
import qualified Data.Set as S
import Utility.Quvi (QuviVersion)
{- git-annex's monad is a ReaderT around an AnnexState stored in a MVar.
- This allows modifying the state in an exception-safe fashion.
- The MVar is not exposed outside this module.
-
- Note that when an Annex action fails and the exception is caught,
- ny changes the action has made to the AnnexState are retained,
- due to the use of the MVar to store the state.
-}
newtype Annex a = Annex { runAnnex :: ReaderT (MVar AnnexState) IO a }
deriving (
Monad,
MonadIO,
MonadReader (MVar AnnexState),
MonadCatchIO,
MonadCatch,
MonadThrow,
MonadMask,
Functor,
Applicative
)
type Matcher a = Either [Utility.Matcher.Token a] (Utility.Matcher.Matcher a)
type PreferredContentMap = M.Map UUID (Utility.Matcher.Matcher (S.Set UUID -> MatchInfo -> Annex Bool))
-- internal state storage
data AnnexState = AnnexState
{ repo :: Git.Repo
@ -103,9 +110,10 @@ data AnnexState = AnnexState
, forcebackend :: Maybe String
, globalnumcopies :: Maybe NumCopies
, forcenumcopies :: Maybe NumCopies
, limit :: Matcher (MatchInfo -> Annex Bool)
, limit :: ExpandableMatcher Annex
, uuidmap :: Maybe UUIDMap
, preferredcontentmap :: Maybe PreferredContentMap
, preferredcontentmap :: Maybe (FileMatcherMap Annex)
, requiredcontentmap :: Maybe (FileMatcherMap Annex)
, shared :: Maybe SharedRepository
, forcetrust :: TrustMap
, trustmap :: Maybe TrustMap
@ -116,12 +124,16 @@ data AnnexState = AnnexState
, fields :: M.Map String String
, modmeta :: [ModMeta]
, cleanup :: M.Map CleanupAction (Annex ())
, inodeschanged :: Maybe Bool
, sentinalstatus :: Maybe SentinalStatus
, useragent :: Maybe String
, errcounter :: Integer
, unusedkeys :: Maybe (S.Set Key)
, tempurls :: M.Map Key URLString
#ifdef WITH_QUVI
, quviversion :: Maybe QuviVersion
#endif
, existinghooks :: M.Map Git.Hook.Hook Bool
, desktopnotify :: DesktopNotify
}
newState :: GitConfig -> Git.Repo -> AnnexState
@ -144,9 +156,10 @@ newState c r = AnnexState
, forcebackend = Nothing
, globalnumcopies = Nothing
, forcenumcopies = Nothing
, limit = Left []
, limit = BuildingMatcher []
, uuidmap = Nothing
, preferredcontentmap = Nothing
, requiredcontentmap = Nothing
, shared = Nothing
, forcetrust = M.empty
, trustmap = Nothing
@ -157,19 +170,23 @@ newState c r = AnnexState
, fields = M.empty
, modmeta = []
, cleanup = M.empty
, inodeschanged = Nothing
, sentinalstatus = Nothing
, useragent = Nothing
, errcounter = 0
, unusedkeys = Nothing
, tempurls = M.empty
#ifdef WITH_QUVI
, quviversion = Nothing
#endif
, existinghooks = M.empty
, desktopnotify = mempty
}
{- Makes an Annex state object for the specified git repo.
- Ensures the config is read, if it was not already. -}
new :: Git.Repo -> IO AnnexState
new r = do
r' <- Git.Config.read r
r' <- Git.Config.read =<< Git.relPath r
let c = extractGitConfig r'
newState c <$> if annexDirect c then fixupDirect r' else return r'
@ -200,6 +217,11 @@ changeState modifier = do
mvar <- ask
liftIO $ modifyMVar_ mvar $ return . modifier
withState :: (AnnexState -> (AnnexState, b)) -> Annex b
withState modifier = do
mvar <- ask
liftIO $ modifyMVar mvar $ return . modifier
{- Sets a flag to True -}
setFlag :: String -> Annex ()
setFlag flag = changeState $ \s ->
@ -261,6 +283,13 @@ changeGitRepo r = changeState $ \s -> s
, gitconfig = extractGitConfig r
}
{- Gets the RemoteGitConfig from a remote, given the Git.Repo for that
- remote. -}
getRemoteGitConfig :: Git.Repo -> Annex RemoteGitConfig
getRemoteGitConfig r = do
g <- gitRepo
return $ extractRemoteGitConfig g (Git.repoDescribe r)
{- Converts an Annex action into an IO action, that runs with a copy
- of the current Annex state.
-
@ -270,3 +299,14 @@ withCurrentState :: Annex a -> Annex (IO a)
withCurrentState a = do
s <- getState id
return $ eval s a
{- It's not safe to use setCurrentDirectory in the Annex monad,
- because the git repo paths are stored relative.
- Instead, use this.
-}
changeDirectory :: FilePath -> Annex ()
changeDirectory d = do
r <- liftIO . Git.adjustPath absPath =<< gitRepo
liftIO $ setCurrentDirectory d
r' <- liftIO $ Git.relPath r
changeState $ \s -> s { repo = r' }

View file

@ -1,24 +1,27 @@
{- git-annex automatic merge conflict resolution
-
- Copyright 2012-2014 Joey Hess <joey@kitenet.net>
- Copyright 2012-2014 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
module Annex.AutoMerge (autoMergeFrom) where
module Annex.AutoMerge
( autoMergeFrom
, resolveMerge
, commitResolvedMerge
) where
import Common.Annex
import qualified Annex.Queue
import Annex.Direct
import Annex.CatFile
import Annex.Link
import qualified Git.Command
import qualified Git.LsFiles as LsFiles
import qualified Git.UpdateIndex as UpdateIndex
import qualified Git.Merge
import qualified Git.Ref
import qualified Git.Sha
import qualified Git
import qualified Git.Branch
import Git.Types (BlobType(..))
import Config
import Annex.ReplaceFile
@ -29,23 +32,22 @@ import qualified Data.Set as S
{- Merges from a branch into the current branch
- (which may not exist yet),
- with automatic merge conflict resolution. -}
autoMergeFrom :: Git.Ref -> (Maybe Git.Ref) -> Annex Bool
autoMergeFrom branch currbranch = do
- with automatic merge conflict resolution.
-
- Callers should use Git.Branch.changed first, to make sure that
- there are changed from the current branch to the branch being merged in.
-}
autoMergeFrom :: Git.Ref -> (Maybe Git.Ref) -> Git.Branch.CommitMode -> Annex Bool
autoMergeFrom branch currbranch commitmode = do
showOutput
case currbranch of
Nothing -> go Nothing
Just b -> go =<< inRepo (Git.Ref.sha b)
where
go old = ifM isDirect
( do
d <- fromRepo gitAnnexMergeDir
r <- inRepo (mergeDirect d branch)
<||> resolveMerge old branch
mergeDirectCleanup d (fromMaybe Git.Sha.emptyTree old) Git.Ref.headRef
return r
, inRepo (Git.Merge.mergeNonInteractive branch)
<||> resolveMerge old branch
( mergeDirect currbranch old branch (resolveMerge old branch) commitmode
, inRepo (Git.Merge.mergeNonInteractive branch commitmode)
<||> (resolveMerge old branch <&&> commitResolvedMerge commitmode)
)
{- Resolves a conflicted merge. It's important that any conflicts be
@ -70,9 +72,11 @@ autoMergeFrom branch currbranch = do
-
- In indirect mode, the merge is resolved in the work tree and files
- staged, to clean up from a conflicted merge that was run in the work
- tree. In direct mode, the work tree is not touched here; files are
- staged to the index, and written to the gitAnnexMergeDir, and later
- mergeDirectCleanup handles updating the work tree.
- tree.
-
- In direct mode, the work tree is not touched here; files are staged to
- the index, and written to the gitAnnexMergeDir, for later handling by
- the direct mode merge code.
-}
resolveMerge :: Maybe Git.Ref -> Git.Ref -> Annex Bool
resolveMerge us them = do
@ -92,14 +96,6 @@ resolveMerge us them = do
unlessM isDirect $
cleanConflictCruft mergedfs top
Annex.Queue.flush
whenM isDirect $
void preCommitDirect
void $ inRepo $ Git.Command.runBool
[ Param "commit"
, Param "--no-verify"
, Param "-m"
, Param "git-annex automatic merge conflict fix"
]
showLongNote "Merge conflict was automatically resolved; you may want to examine the result."
return merged
@ -118,11 +114,11 @@ resolveMerge' (Just us) them u = do
makelink keyUs
-- Our side is annexed file, other side is not.
(Just keyUs, Nothing) -> resolveby $ do
graftin them file
graftin them file LsFiles.valThem LsFiles.valThem
makelink keyUs
-- Our side is not annexed file, other side is.
(Nothing, Just keyThem) -> resolveby $ do
graftin us file
graftin us file LsFiles.valUs LsFiles.valUs
makelink keyThem
-- Neither side is annexed file; cannot resolve.
(Nothing, Nothing) -> return Nothing
@ -138,18 +134,42 @@ resolveMerge' (Just us) them u = do
makelink key = do
let dest = variantFile file key
l <- inRepo $ gitAnnexLink dest key
ifM isDirect
( do
d <- fromRepo gitAnnexMergeDir
replaceFile (d </> dest) $ makeAnnexLink l
, replaceFile dest $ makeAnnexLink l
)
l <- calcRepo $ gitAnnexLink dest key
replacewithlink dest l
stageSymlink dest =<< hashSymlink l
{- stage a graft of a directory or file from a branch -}
graftin b item = Annex.Queue.addUpdateIndex
=<< fromRepo (UpdateIndex.lsSubTree b item)
replacewithlink dest link = ifM isDirect
( do
d <- fromRepo gitAnnexMergeDir
replaceFile (d </> dest) $ makeGitLink link
, replaceFile dest $ makeGitLink link
)
{- Stage a graft of a directory or file from a branch.
-
- When there is a conflicted merge where one side is a directory
- or file, and the other side is a symlink, git merge always
- updates the work tree to contain the non-symlink. So, the
- directory or file will already be in the work tree correctly,
- and they just need to be staged into place. Do so by copying the
- index. (Note that this is also better than calling git-add
- because on a crippled filesystem, it preserves any symlink
- bits.)
-
- It's also possible for the branch to have a symlink in it,
- which is not a git-annex symlink. In this special case,
- git merge does not update the work tree to contain the symlink
- from the branch, so we have to do so manually.
-}
graftin b item select select' = do
Annex.Queue.addUpdateIndex
=<< fromRepo (UpdateIndex.lsSubTree b item)
when (select (LsFiles.unmergedBlobType u) == Just SymlinkBlob) $
case select' (LsFiles.unmergedSha u) of
Nothing -> noop
Just sha -> do
link <- catLink True sha
replacewithlink item link
resolveby a = do
{- Remove conflicted file from index so merge can be resolved. -}
@ -158,7 +178,7 @@ resolveMerge' (Just us) them u = do
return (Just file)
{- git-merge moves conflicting files away to files
- named something like f~HEAD or f~branch, but the
- named something like f~HEAD or f~branch or just f, but the
- exact name chosen can vary. Once the conflict is resolved,
- this cruft can be deleted. To avoid deleting legitimate
- files that look like this, only delete files that are
@ -175,5 +195,12 @@ cleanConflictCruft resolvedfs top = do
liftIO $ nukeFile f
| otherwise = noop
s = S.fromList resolvedfs
matchesresolved f = S.member (base f) s
matchesresolved f = S.member f s || S.member (base f) s
base f = reverse $ drop 1 $ dropWhile (/= '~') $ reverse f
commitResolvedMerge :: Git.Branch.CommitMode -> Annex Bool
commitResolvedMerge commitmode = inRepo $ Git.Branch.commitCommand commitmode
[ Param "--no-verify"
, Param "-m"
, Param "git-annex automatic merge conflict fix"
]

View file

@ -1,6 +1,6 @@
{- management of the git-annex branch
-
- Copyright 2011-2013 Joey Hess <joey@kitenet.net>
- Copyright 2011-2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -25,9 +25,11 @@ module Annex.Branch (
performTransitions,
) where
import qualified Data.ByteString.Lazy.Char8 as L
import qualified Data.ByteString.Lazy as L
import qualified Data.Set as S
import qualified Data.Map as M
import Data.Bits.Utils
import Control.Concurrent (threadDelay)
import Common.Annex
import Annex.BranchState
@ -48,9 +50,11 @@ import Annex.Perms
import Logs
import Logs.Transitions
import Logs.Trust.Pure
import Logs.Difference.Pure
import Annex.ReplaceFile
import qualified Annex.Queue
import Annex.Branch.Transitions
import qualified Annex
{- Name of the branch that is used to store git-annex's information. -}
name :: Git.Ref
@ -91,7 +95,7 @@ getBranch = maybe (hasOrigin >>= go >>= use) return =<< branchsha
fromMaybe (error $ "failed to create " ++ fromRef name)
<$> branchsha
go False = withIndex' True $
inRepo $ Git.Branch.commitAlways "branch created" fullname []
inRepo $ Git.Branch.commitAlways Git.Branch.AutomaticCommit "branch created" fullname []
use sha = do
setIndexSha sha
return sha
@ -159,6 +163,7 @@ updateTo pairs = do
<$> getLocal transitionsLog
unless (null branches) $ do
showSideAction merge_desc
mapM_ checkBranchDifferences refs
mergeIndex jl refs
let commitrefs = nub $ fullname:refs
unlessM (handleTransitions jl localtransitions commitrefs) $ do
@ -199,7 +204,7 @@ getHistorical :: RefDate -> FilePath -> Annex String
getHistorical date = getRef (Git.Ref.dateRef fullname date)
getRef :: Ref -> FilePath -> Annex String
getRef ref file = withIndex $ L.unpack <$> catFile ref file
getRef ref file = withIndex $ decodeBS <$> catFile ref file
{- Applies a function to modifiy the content of a file.
-
@ -217,7 +222,7 @@ set = setJournalFile
commit :: String -> Annex ()
commit = whenM journalDirty . forceCommit
{- Commits the current index to the branch even without any journalleda
{- Commits the current index to the branch even without any journalled
- changes. -}
forceCommit :: String -> Annex ()
forceCommit message = lockJournal $ \jl -> do
@ -228,30 +233,34 @@ forceCommit message = lockJournal $ \jl -> do
{- Commits the staged changes in the index to the branch.
-
- Ensures that the branch's index file is first updated to the state
- Ensures that the branch's index file is first updated to merge the state
- of the branch at branchref, before running the commit action. This
- is needed because the branch may have had changes pushed to it, that
- are not yet reflected in the index.
-
- Also safely handles a race that can occur if a change is being pushed
- into the branch at the same time. When the race happens, the commit will
- be made on top of the newly pushed change, but without the index file
- being updated to include it. The result is that the newly pushed
- change is reverted. This race is detected and another commit made
- to fix it.
-
- The branchref value can have been obtained using getBranch at any
- previous point, though getting it a long time ago makes the race
- more likely to occur.
-
- Note that changes may be pushed to the branch at any point in time!
- So, there's a race. If the commit is made using the newly pushed tip of
- the branch as its parent, and that ref has not yet been merged into the
- index, then the result is that the commit will revert the pushed
- changes, since they have not been merged into the index. This race
- is detected and another commit made to fix it.
-
- (It's also possible for the branch to be overwritten,
- losing the commit made here. But that's ok; the data is still in the
- index and will get committed again later.)
-}
commitIndex :: JournalLocked -> Git.Ref -> String -> [Git.Ref] -> Annex ()
commitIndex jl branchref message parents = do
showStoringStateAction
commitIndex' jl branchref message parents
commitIndex' :: JournalLocked -> Git.Ref -> String -> [Git.Ref] -> Annex ()
commitIndex' jl branchref message parents = do
commitIndex' jl branchref message message 0 parents
commitIndex' :: JournalLocked -> Git.Ref -> String -> String -> Integer -> [Git.Ref] -> Annex ()
commitIndex' jl branchref message basemessage retrynum parents = do
updateIndex jl branchref
committedref <- inRepo $ Git.Branch.commitAlways message fullname parents
committedref <- inRepo $ Git.Branch.commitAlways Git.Branch.AutomaticCommit message fullname parents
setIndexSha committedref
parentrefs <- commitparents <$> catObject committedref
when (racedetected branchref parentrefs) $
@ -259,7 +268,8 @@ commitIndex' jl branchref message parents = do
where
-- look for "parent ref" lines and return the refs
commitparents = map (Git.Ref . snd) . filter isparent .
map (toassoc . L.unpack) . L.lines
map (toassoc . decodeBS) . L.split newline
newline = c2w8 '\n'
toassoc = separate (== ' ')
isparent (k,_) = k == "parent"
@ -271,12 +281,16 @@ commitIndex' jl branchref message parents = do
| otherwise = True -- race!
{- To recover from the race, union merge the lost refs
- into the index, and recommit on top of the bad commit. -}
- into the index. -}
fixrace committedref lostrefs = do
showSideAction "recovering from race"
let retrynum' = retrynum+1
-- small sleep to let any activity that caused
-- the race settle down
liftIO $ threadDelay (100000 + fromInteger retrynum')
mergeIndex jl lostrefs
commitIndex jl committedref racemessage [committedref]
racemessage = message ++ " (recovery from race)"
let racemessage = basemessage ++ " (recovery from race #" ++ show retrynum' ++ "; expected commit parent " ++ show branchref ++ " but found " ++ show lostrefs ++ " )"
commitIndex' jl committedref racemessage basemessage retrynum' [committedref]
{- Lists all files on the branch. There may be duplicates in the list. -}
files :: Annex [FilePath]
@ -332,7 +346,7 @@ withIndex :: Annex a -> Annex a
withIndex = withIndex' False
withIndex' :: Bool -> Annex a -> Annex a
withIndex' bootstrapping a = do
f <- fromRepo gitAnnexIndex
f <- liftIO . absPath =<< fromRepo gitAnnexIndex
withIndexFile f $ do
checkIndexOnce $ unlessM (liftIO $ doesFileExist f) $ do
unless bootstrapping create
@ -387,19 +401,40 @@ stageJournal jl = withIndex $ do
prepareModifyIndex jl
g <- gitRepo
let dir = gitAnnexJournalDir g
fs <- getJournalFiles jl
liftIO $ do
(jlogf, jlogh) <- openjlog
withJournalHandle $ \jh -> do
h <- hashObjectStart g
Git.UpdateIndex.streamUpdateIndex g
[genstream dir h fs]
[genstream dir h jh jlogh]
hashObjectStop h
return $ liftIO $ mapM_ (removeFile . (dir </>)) fs
return $ cleanup dir jlogh jlogf
where
genstream dir h fs streamer = forM_ fs $ \file -> do
let path = dir </> file
sha <- hashFile h path
streamer $ Git.UpdateIndex.updateIndexLine
sha FileBlob (asTopFilePath $ fileJournal file)
genstream dir h jh jlogh streamer = do
v <- readDirectory jh
case v of
Nothing -> return ()
Just file -> do
unless (dirCruft file) $ do
let path = dir </> file
sha <- hashFile h path
hPutStrLn jlogh file
streamer $ Git.UpdateIndex.updateIndexLine
sha FileBlob (asTopFilePath $ fileJournal file)
genstream dir h jh jlogh streamer
-- Clean up the staged files, as listed in the temp log file.
-- The temp file is used to avoid needing to buffer all the
-- filenames in memory.
cleanup dir jlogh jlogf = do
hFlush jlogh
hSeek jlogh AbsoluteSeek 0
stagedfs <- lines <$> hGetContents jlogh
mapM_ (removeFile . (dir </>)) stagedfs
hClose jlogh
nukeFile jlogf
openjlog = do
tmpdir <- fromRepo gitAnnexTmpMiscDir
createAnnexDirectory tmpdir
liftIO $ openTempFile tmpdir "jlog"
{- This is run after the refs have been merged into the index,
- but before the result is committed to the branch.
@ -431,8 +466,8 @@ handleTransitions jl localts refs = do
ignoreRefs untransitionedrefs
return True
where
getreftransition ref = do
ts <- parseTransitionsStrictly "remote" . L.unpack
getreftransition ref = do
ts <- parseTransitionsStrictly "remote" . decodeBS
<$> catFile ref transitionsLog
return (ref, ts)
@ -447,7 +482,7 @@ ignoreRefs rs = do
getIgnoredRefs :: Annex (S.Set Git.Ref)
getIgnoredRefs = S.fromList . mapMaybe Git.Sha.extractSha . lines <$> content
where
content = do
content = do
f <- fromRepo gitAnnexIgnoredRefs
liftIO $ catchDefaultIO "" $ readFile f
@ -469,13 +504,13 @@ performTransitionsLocked jl ts neednewlocalbranch transitionedrefs = do
Annex.Queue.flush
if neednewlocalbranch
then do
committedref <- inRepo $ Git.Branch.commitAlways message fullname transitionedrefs
committedref <- inRepo $ Git.Branch.commitAlways Git.Branch.AutomaticCommit message fullname transitionedrefs
setIndexSha committedref
else do
ref <- getBranch
commitIndex jl ref message (nub $ fullname:transitionedrefs)
where
message
message
| neednewlocalbranch && null transitionedrefs = "new branch for transition " ++ tdesc
| otherwise = "continuing transition " ++ tdesc
tdesc = show $ map describeTransition $ transitionList ts
@ -514,3 +549,11 @@ performTransitionsLocked jl ts neednewlocalbranch transitionedrefs = do
apply rest hasher file content' trustmap
PreserveFile ->
apply rest hasher file content trustmap
checkBranchDifferences :: Git.Ref -> Annex ()
checkBranchDifferences ref = do
theirdiffs <- allDifferences . parseDifferencesLog . decodeBS
<$> catFile ref differenceLog
mydiffs <- annexDifferences <$> Annex.getGitConfig
when (theirdiffs /= mydiffs) $
error "Remote repository is tuned in incompatable way; cannot be merged with local repository."

View file

@ -1,6 +1,6 @@
{- git-annex branch transitions
-
- Copyright 2013 Joey Hess <joey@kitenet.net>
- Copyright 2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -12,12 +12,14 @@ module Annex.Branch.Transitions (
import Logs
import Logs.Transitions
import Logs.UUIDBased as UUIDBased
import Logs.Presence.Pure as Presence
import qualified Logs.UUIDBased as UUIDBased
import qualified Logs.Presence.Pure as Presence
import qualified Logs.Chunk.Pure as Chunk
import Types.TrustLevel
import Types.UUID
import qualified Data.Map as M
import Data.Default
data FileTransition
= ChangeFile String
@ -32,10 +34,16 @@ getTransitionCalculator ForgetDeadRemotes = Just dropDead
dropDead :: FilePath -> String -> TrustMap -> FileTransition
dropDead f content trustmap = case getLogVariety f of
Just UUIDBasedLog -> ChangeFile $
UUIDBased.showLog id $ dropDeadFromUUIDBasedLog trustmap $ UUIDBased.parseLog Just content
Just UUIDBasedLog
-- Don't remove the dead repo from the trust log,
-- because git remotes may still exist, and they need
-- to still know it's dead.
| f == trustLog -> PreserveFile
| otherwise -> ChangeFile $ UUIDBased.showLog id $ dropDeadFromMapLog trustmap id $ UUIDBased.parseLog Just content
Just NewUUIDBasedLog -> ChangeFile $
UUIDBased.showLogNew id $ dropDeadFromUUIDBasedLog trustmap $ UUIDBased.parseLogNew Just content
UUIDBased.showLogNew id $ dropDeadFromMapLog trustmap id $ UUIDBased.parseLogNew Just content
Just (ChunkLog _) -> ChangeFile $
Chunk.showLog $ dropDeadFromMapLog trustmap fst $ Chunk.parseLog content
Just (PresenceLog _) ->
let newlog = Presence.compactLog $ dropDeadFromPresenceLog trustmap $ Presence.parseLog content
in if null newlog
@ -44,8 +52,8 @@ dropDead f content trustmap = case getLogVariety f of
Just OtherLog -> PreserveFile
Nothing -> PreserveFile
dropDeadFromUUIDBasedLog :: TrustMap -> UUIDBased.Log String -> UUIDBased.Log String
dropDeadFromUUIDBasedLog trustmap = M.filterWithKey $ notDead trustmap . const
dropDeadFromMapLog :: Ord k => TrustMap -> (k -> UUID) -> M.Map k v -> M.Map k v
dropDeadFromMapLog trustmap getuuid = M.filterWithKey $ \k _v -> notDead trustmap getuuid k
{- Presence logs can contain UUIDs or other values. Any line that matches
- a dead uuid is dropped; any other values are passed through. -}
@ -53,4 +61,4 @@ dropDeadFromPresenceLog :: TrustMap -> [Presence.LogLine] -> [Presence.LogLine]
dropDeadFromPresenceLog trustmap = filter $ notDead trustmap (toUUID . Presence.info)
notDead :: TrustMap -> (v -> UUID) -> v -> Bool
notDead trustmap a v = M.findWithDefault SemiTrusted (a v) trustmap /= DeadTrusted
notDead trustmap a v = M.findWithDefault def (a v) trustmap /= DeadTrusted

View file

@ -2,7 +2,7 @@
-
- Runtime state about the git-annex branch.
-
- Copyright 2011-2012 Joey Hess <joey@kitenet.net>
- Copyright 2011-2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}

View file

@ -1,6 +1,6 @@
{- git cat-file interface, with handle automatically stored in the Annex monad
-
- Copyright 2011-2013 Joey Hess <joey@kitenet.net>
- Copyright 2011-2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -12,9 +12,11 @@ module Annex.CatFile (
catTree,
catObjectDetails,
catFileHandle,
catFileStop,
catKey,
catKeyFile,
catKeyFileHEAD,
catLink,
) where
import qualified Data.ByteString.Lazy as L
@ -70,6 +72,14 @@ catFileHandle = do
Annex.changeState $ \s -> s { Annex.catfilehandles = m' }
return h
{- Stops all running cat-files. Should only be run when it's known that
- nothing is using the handles, eg at shutdown. -}
catFileStop :: Annex ()
catFileStop = do
m <- Annex.withState $ \s ->
(s { Annex.catfilehandles = M.empty }, Annex.catfilehandles s)
liftIO $ mapM_ Git.CatFile.catFileStop (M.elems m)
{- From the Sha or Ref of a symlink back to the key.
-
- Requires a mode witness, to guarantee that the file is a symlink.
@ -77,21 +87,25 @@ catFileHandle = do
catKey :: Ref -> FileMode -> Annex (Maybe Key)
catKey = catKey' True
catKey' :: Bool -> Ref -> FileMode -> Annex (Maybe Key)
catKey' modeguaranteed ref mode
catKey' :: Bool -> Sha -> FileMode -> Annex (Maybe Key)
catKey' modeguaranteed sha mode
| isSymLink mode = do
l <- fromInternalGitPath . decodeBS <$> get
l <- catLink modeguaranteed sha
return $ if isLinkToAnnex l
then fileKey $ takeFileName l
else Nothing
| otherwise = return Nothing
{- Gets a symlink target. -}
catLink :: Bool -> Sha -> Annex String
catLink modeguaranteed sha = fromInternalGitPath . decodeBS <$> get
where
-- If the mode is not guaranteed to be correct, avoid
-- If the mode is not guaranteed to be correct, avoid
-- buffering the whole file content, which might be large.
-- 8192 is enough if it really is a symlink.
get
| modeguaranteed = catObject ref
| otherwise = L.take 8192 <$> catObject ref
get
| modeguaranteed = catObject sha
| otherwise = L.take 8192 <$> catObject sha
{- Looks up the key corresponding to the Ref using the running cat-file.
-
@ -106,7 +120,7 @@ catKeyChecked :: Bool -> Ref -> Annex (Maybe Key)
catKeyChecked needhead ref@(Ref r) =
catKey' False ref =<< findmode <$> catTree treeref
where
pathparts = split "/" r
pathparts = split "/" r
dir = intercalate "/" $ take (length pathparts - 1) pathparts
file = fromMaybe "" $ lastMaybe pathparts
treeref = Ref $ if needhead then "HEAD" ++ dir ++ "/" else dir ++ "/"

View file

@ -1,6 +1,6 @@
{- git check-attr interface, with handle automatically stored in the Annex monad
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}

View file

@ -1,7 +1,7 @@
{- git check-ignore interface, with handle automatically stored in
- the Annex monad
-
- Copyright 2013 Joey Hess <joey@kitenet.net>
- Copyright 2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -18,7 +18,7 @@ import qualified Annex
checkIgnored :: FilePath -> Annex Bool
checkIgnored file = go =<< checkIgnoreHandle
where
go Nothing = return False
go Nothing = return False
go (Just h) = liftIO $ Git.checkIgnored h file
checkIgnoreHandle :: Annex (Maybe Git.CheckIgnoreHandle)

View file

@ -1,6 +1,6 @@
{- git-annex file content managing
-
- Copyright 2010-2014 Joey Hess <joey@kitenet.net>
- Copyright 2010-2014 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -16,6 +16,7 @@ module Annex.Content (
getViaTmpChecked,
getViaTmpUnchecked,
prepGetViaTmpChecked,
prepTmp,
withTmp,
checkDiskSpace,
moveAnnex,
@ -55,11 +56,7 @@ import Annex.Perms
import Annex.Link
import Annex.Content.Direct
import Annex.ReplaceFile
import Annex.Exception
#ifdef mingw32_HOST_OS
import Utility.WinLock
#endif
import Utility.LockFile
{- Checks if a given key's content is currently present. -}
inAnnex :: Key -> Annex Bool
@ -104,27 +101,32 @@ inAnnexSafe key = inAnnex' (fromMaybe False) (Just False) go key
=<< contentLockFile key
#ifndef mingw32_HOST_OS
checkindirect f = liftIO $ openforlock f >>= check is_missing
checkindirect contentfile = liftIO $ checkOr is_missing contentfile
{- In direct mode, the content file must exist, but
- the lock file often generally won't exist unless a removal is in
- process. This does not create the lock file, it only checks for
- it. -}
- the lock file generally won't exist unless a removal is in
- process. -}
checkdirect contentfile lockfile = liftIO $
ifM (doesFileExist contentfile)
( openforlock lockfile >>= check is_unlocked
( checkOr is_unlocked lockfile
, return is_missing
)
openforlock f = catchMaybeIO $
openFd f ReadOnly Nothing defaultFileFlags
check _ (Just h) = do
v <- getLock h (ReadLock, AbsoluteSeek, 0, 0)
closeFd h
checkOr d lockfile = do
v <- checkLocked lockfile
return $ case v of
Just _ -> is_locked
Nothing -> is_unlocked
check def Nothing = return def
Nothing -> d
Just True -> is_locked
Just False -> is_unlocked
#else
checkindirect _ = return is_missing
checkindirect f = liftIO $ ifM (doesFileExist f)
( do
v <- lockShared f
case v of
Nothing -> return is_locked
Just lockhandle -> do
dropLock lockhandle
return is_unlocked
, return is_missing
)
{- In Windows, see if we can take a shared lock. If so,
- remove the lock file to clean up after ourselves. -}
checkdirect contentfile lockfile =
@ -150,14 +152,20 @@ contentLockFile key = ifM isDirect
, return Nothing
)
newtype ContentLock = ContentLock Key
{- Content is exclusively locked while running an action that might remove
- it. (If the content is not present, no locking is done.) -}
lockContent :: Key -> Annex a -> Annex a
- it. (If the content is not present, no locking is done.)
-}
lockContent :: Key -> (ContentLock -> Annex a) -> Annex a
lockContent key a = do
contentfile <- calcRepo $ gitAnnexLocation key
lockfile <- contentLockFile key
maybe noop setuplockfile lockfile
bracketAnnex (liftIO $ lock contentfile lockfile) (unlock lockfile) (const a)
bracket
(lock contentfile lockfile)
(unlock lockfile)
(const $ a $ ContentLock key)
where
alreadylocked = error "content is locked"
setuplockfile lockfile = modifyContent lockfile $
@ -167,17 +175,17 @@ lockContent key a = do
void $ liftIO $ tryIO $
nukeFile lockfile
#ifndef mingw32_HOST_OS
lock contentfile Nothing = opencontentforlock contentfile >>= dolock
lock _ (Just lockfile) = openforlock lockfile >>= dolock . Just
lock contentfile Nothing = liftIO $
opencontentforlock contentfile >>= dolock
lock _ (Just lockfile) = do
mode <- annexFileMode
liftIO $ createLockFile mode lockfile >>= dolock . Just
{- Since content files are stored with the write bit disabled, have
- to fiddle with permissions to open for an exclusive lock. -}
opencontentforlock f = catchMaybeIO $ ifM (doesFileExist f)
( withModifiedFileMode f
opencontentforlock f = catchDefaultIO Nothing $
withModifiedFileMode f
(`unionFileModes` ownerWriteMode)
(openforlock f)
, openforlock f
)
openforlock f = openFd f ReadWrite Nothing defaultFileFlags
(openExistingLockFile f)
dolock Nothing = return Nothing
dolock (Just fd) = do
v <- tryIO $ setLock fd (WriteLock, AbsoluteSeek, 0, 0)
@ -188,7 +196,8 @@ lockContent key a = do
maybe noop cleanuplockfile mlockfile
liftIO $ maybe noop closeFd mfd
#else
lock _ (Just lockfile) = maybe alreadylocked (return . Just) =<< lockExclusive lockfile
lock _ (Just lockfile) = liftIO $
maybe alreadylocked (return . Just) =<< lockExclusive lockfile
lock _ Nothing = return Nothing
unlock mlockfile mlockhandle = do
liftIO $ maybe noop dropLock mlockhandle
@ -209,7 +218,7 @@ getViaTmpUnchecked = finishGetViaTmp (return True)
getViaTmpChecked :: Annex Bool -> Key -> (FilePath -> Annex Bool) -> Annex Bool
getViaTmpChecked check key action =
prepGetViaTmpChecked key $
prepGetViaTmpChecked key False $
finishGetViaTmp check key action
{- Prepares to download a key via a tmp file, and checks that there is
@ -220,20 +229,20 @@ getViaTmpChecked check key action =
-
- Wen there's enough free space, runs the download action.
-}
prepGetViaTmpChecked :: Key -> Annex Bool -> Annex Bool
prepGetViaTmpChecked key getkey = do
prepGetViaTmpChecked :: Key -> a -> Annex a -> Annex a
prepGetViaTmpChecked key unabletoget getkey = do
tmp <- fromRepo $ gitAnnexTmpObjectLocation key
e <- liftIO $ doesFileExist tmp
alreadythere <- if e
then fromIntegral . fileSize <$> liftIO (getFileStatus tmp)
alreadythere <- liftIO $ if e
then getFileSize tmp
else return 0
ifM (checkDiskSpace Nothing key alreadythere)
( do
-- The tmp file may not have been left writable
when e $ thawContent tmp
getkey
, return False
, return unabletoget
)
finishGetViaTmp :: Annex Bool -> Key -> (FilePath -> Annex Bool) -> Annex Bool
@ -255,7 +264,10 @@ prepTmp key = do
createAnnexDirectory (parentDir tmp)
return tmp
{- Creates a temp file, runs an action on it, and cleans up the temp file. -}
{- Creates a temp file for a key, runs an action on it, and cleans up
- the temp file. If the action throws an exception, the temp file is
- left behind, which allows for resuming.
-}
withTmp :: Key -> (FilePath -> Annex a) -> Annex a
withTmp key action = do
tmp <- prepTmp key
@ -365,7 +377,7 @@ sendAnnex key rollback sendobject = go =<< prepSendAnnex key
)
{- Returns a file that contains an object's content,
- and an check to run after the transfer is complete.
- and a check to run after the transfer is complete.
-
- In direct mode, it's possible for the file to change as it's being sent,
- and the check detects this case and returns False.
@ -407,7 +419,7 @@ withObjectLoc key indirect direct = ifM isDirect
cleanObjectLoc :: Key -> Annex () -> Annex ()
cleanObjectLoc key cleaner = do
file <- calcRepo $ gitAnnexLocation key
void $ tryAnnexIO $ thawContentDir file
void $ tryIO $ thawContentDir file
cleaner
liftIO $ removeparents file (3 :: Int)
where
@ -420,9 +432,10 @@ cleanObjectLoc key cleaner = do
{- Removes a key's file from .git/annex/objects/
-
- In direct mode, deletes the associated files or files, and replaces
- them with symlinks. -}
removeAnnex :: Key -> Annex ()
removeAnnex key = withObjectLoc key remove removedirect
- them with symlinks.
-}
removeAnnex :: ContentLock -> Annex ()
removeAnnex (ContentLock key) = withObjectLoc key remove removedirect
where
remove file = cleanObjectLoc key $ do
secureErase file
@ -433,7 +446,7 @@ removeAnnex key = withObjectLoc key remove removedirect
removeInodeCache key
mapM_ (resetfile cache) fs
resetfile cache f = whenM (sameInodeCache f cache) $ do
l <- inRepo $ gitAnnexLink f key
l <- calcRepo $ gitAnnexLink f key
secureErase f
replaceFile f $ makeAnnexLink l
@ -443,7 +456,7 @@ removeAnnex key = withObjectLoc key remove removedirect
secureErase :: FilePath -> Annex ()
secureErase file = maybe noop go =<< annexSecureEraseCommand <$> Annex.getGitConfig
where
go basecmd = void $ liftIO $
go basecmd = void $ liftIO $
boolSystem "sh" [Param "-c", Param $ gencmd basecmd]
gencmd = massReplace [ ("%file", shellEscape file) ]
@ -542,7 +555,7 @@ saveState nocommit = doSideAction $ do
downloadUrl :: [Url.URLString] -> FilePath -> Annex Bool
downloadUrl urls file = go =<< annexWebDownloadCommand <$> Annex.getGitConfig
where
go Nothing = Url.withUrlOptions $ \uo ->
go Nothing = Url.withUrlOptions $ \uo ->
anyM (\u -> Url.download u file uo) urls
go (Just basecmd) = liftIO $ anyM (downloadcmd basecmd) urls
downloadcmd basecmd url =
@ -567,7 +580,7 @@ preseedTmp key file = go =<< inAnnex key
( return True
, do
s <- calcRepo $ gitAnnexLocation key
liftIO $ copyFileExternal s file
liftIO $ copyFileExternal CopyTimeStamps s file
)
{- Blocks writing to an annexed file, and modifies file permissions to

View file

@ -1,10 +1,12 @@
{- git-annex file content managing for direct mode
-
- Copyright 2012-2013 Joey Hess <joey@kitenet.net>
- Copyright 2012-2014 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
{-# LANGUAGE CPP #-}
module Annex.Content.Direct (
associatedFiles,
associatedFilesRelative,
@ -27,6 +29,8 @@ module Annex.Content.Direct (
inodesChanged,
createInodeSentinalFile,
addContentWhenNotPresent,
withTSDelta,
getTSDelta,
) where
import Common.Annex
@ -110,7 +114,7 @@ addAssociatedFile key file = do
normaliseAssociatedFile :: FilePath -> Annex FilePath
normaliseAssociatedFile file = do
top <- fromRepo Git.repoPath
liftIO $ relPathDirToFile top <$> absPath file
liftIO $ relPathDirToFile top file
{- Checks if a file in the tree, associated with a key, has not been modified.
-
@ -136,7 +140,7 @@ recordedInodeCache key = withInodeCacheFile key $ \f ->
-}
updateInodeCache :: Key -> FilePath -> Annex ()
updateInodeCache key file = maybe noop (addInodeCache key)
=<< liftIO (genInodeCache file)
=<< withTSDelta (liftIO . genInodeCache file)
{- Adds another inode to the cache for a key. -}
addInodeCache :: Key -> InodeCache -> Annex ()
@ -164,16 +168,16 @@ withInodeCacheFile key a = a =<< calcRepo (gitAnnexInodeCache key)
{- Checks if a InodeCache matches the current version of a file. -}
sameInodeCache :: FilePath -> [InodeCache] -> Annex Bool
sameInodeCache _ [] = return False
sameInodeCache file old = go =<< liftIO (genInodeCache file)
sameInodeCache file old = go =<< withTSDelta (liftIO . genInodeCache file)
where
go Nothing = return False
go (Just curr) = elemInodeCaches curr old
{- Checks if a FileStatus matches the recorded InodeCache of a file. -}
sameFileStatus :: Key -> FileStatus -> Annex Bool
sameFileStatus key status = do
sameFileStatus :: Key -> FilePath -> FileStatus -> Annex Bool
sameFileStatus key f status = do
old <- recordedInodeCache key
let curr = toInodeCache status
curr <- withTSDelta $ \delta -> liftIO $ toInodeCache delta f status
case (old, curr) of
(_, Just c) -> elemInodeCaches c old
([], Nothing) -> return True
@ -206,7 +210,7 @@ addContentWhenNotPresent key contentfile associatedfile = do
v <- isAnnexLink associatedfile
when (Just key == v) $
replaceFile associatedfile $
liftIO . void . copyFileExternal contentfile
liftIO . void . copyFileExternal CopyAllMetaData contentfile
updateInodeCache key associatedfile
{- Some filesystems get new inodes each time they are mounted.
@ -217,40 +221,43 @@ addContentWhenNotPresent key contentfile associatedfile = do
- inodes have changed.
-}
inodesChanged :: Annex Bool
inodesChanged = maybe calc return =<< Annex.getState Annex.inodeschanged
inodesChanged = sentinalInodesChanged <$> sentinalStatus
withTSDelta :: (TSDelta -> Annex a) -> Annex a
withTSDelta a = a =<< getTSDelta
getTSDelta :: Annex TSDelta
#ifdef mingw32_HOST_OS
getTSDelta = sentinalTSDelta <$> sentinalStatus
#else
getTSDelta = pure noTSDelta -- optimisation
#endif
sentinalStatus :: Annex SentinalStatus
sentinalStatus = maybe check return =<< Annex.getState Annex.sentinalstatus
where
calc = do
scache <- liftIO . genInodeCache
=<< fromRepo gitAnnexInodeSentinal
scached <- readInodeSentinalFile
let changed = case (scache, scached) of
(Just c1, Just c2) -> not $ compareStrong c1 c2
_ -> True
Annex.changeState $ \s -> s { Annex.inodeschanged = Just changed }
return changed
readInodeSentinalFile :: Annex (Maybe InodeCache)
readInodeSentinalFile = do
sentinalcachefile <- fromRepo gitAnnexInodeSentinalCache
liftIO $ catchDefaultIO Nothing $
readInodeCache <$> readFile sentinalcachefile
writeInodeSentinalFile :: Annex ()
writeInodeSentinalFile = do
sentinalfile <- fromRepo gitAnnexInodeSentinal
createAnnexDirectory (parentDir sentinalfile)
sentinalcachefile <- fromRepo gitAnnexInodeSentinalCache
liftIO $ writeFile sentinalfile ""
liftIO $ maybe noop (writeFile sentinalcachefile . showInodeCache)
=<< genInodeCache sentinalfile
check = do
sc <- liftIO . checkSentinalFile =<< annexSentinalFile
Annex.changeState $ \s -> s { Annex.sentinalstatus = Just sc }
return sc
{- The sentinal file is only created when first initializing a repository.
- If there are any annexed objects in the repository already, creating
- the file would invalidate their inode caches. -}
createInodeSentinalFile :: Annex ()
createInodeSentinalFile =
unlessM (alreadyexists <||> hasobjects)
writeInodeSentinalFile
createInodeSentinalFile = unlessM (alreadyexists <||> hasobjects) $ do
s <- annexSentinalFile
createAnnexDirectory (parentDir (sentinalFile s))
liftIO $ writeSentinalFile s
where
alreadyexists = isJust <$> readInodeSentinalFile
alreadyexists = liftIO. sentinalFileExists =<< annexSentinalFile
hasobjects = liftIO . doesDirectoryExist =<< fromRepo gitAnnexObjectDir
annexSentinalFile :: Annex SentinalFile
annexSentinalFile = do
sentinalfile <- fromRepo gitAnnexInodeSentinal
sentinalcachefile <- fromRepo gitAnnexInodeSentinalCache
return $ SentinalFile
{ sentinalFile = sentinalfile
, sentinalCacheFile = sentinalcachefile
}

58
Annex/Difference.hs Normal file
View file

@ -0,0 +1,58 @@
{- git-annex repository differences
-
- Copyright 2015 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
module Annex.Difference (
module Types.Difference,
setDifferences,
) where
import Common.Annex
import Types.Difference
import Logs.Difference
import Config
import Annex.UUID
import Logs.UUID
import Annex.Version
import qualified Annex
import qualified Data.Map as M
-- Differences are only allowed to be tweaked when initializing a
-- repository for the first time, and then only if there is not another
-- known uuid. If the repository was cloned from elsewhere, it inherits
-- the existing settings.
--
-- Must be called before setVersion, so it can check if this is the first
-- time the repository is being initialized.
setDifferences :: Annex ()
setDifferences = do
u <- getUUID
otherds <- allDifferences <$> recordedDifferences
ds <- mappend otherds . annexDifferences <$> Annex.getGitConfig
when (ds /= mempty) $ do
ds' <- ifM (isJust <$> getVersion)
( do
oldds <- recordedDifferencesFor u
when (ds /= oldds) $
warning $ "Cannot change tunable parameters in already initialized repository."
return oldds
, if otherds == mempty
then ifM (not . null . filter (/= u) . M.keys <$> uuidMap)
( do
warning "Cannot change tunable parameters in a clone of an existing repository."
return mempty
, return ds
)
else if otherds /= ds
then do
warning "The specified tunable parameters differ from values being used in other clones of this repository."
return otherds
else return ds
)
forM_ (listDifferences ds') $ \d ->
setConfig (ConfigKey $ differenceConfigKey d) (differenceConfigVal d)
recordDifferences ds' u

86
Annex/DirHashes.hs Normal file
View file

@ -0,0 +1,86 @@
{- git-annex file locations
-
- Copyright 2010-2015 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
module Annex.DirHashes (
Hasher,
HashLevels(..),
objectHashLevels,
branchHashLevels,
branchHashDir,
dirHashes,
hashDirMixed,
hashDirLower,
) where
import Data.Bits
import Data.Word
import Data.Hash.MD5
import Data.Default
import Common
import Types.Key
import Types.GitConfig
import Types.Difference
type Hasher = Key -> FilePath
-- Number of hash levels to use. 2 is the default.
newtype HashLevels = HashLevels Int
instance Default HashLevels where
def = HashLevels 2
objectHashLevels :: GitConfig -> HashLevels
objectHashLevels = configHashLevels OneLevelObjectHash
branchHashLevels :: GitConfig -> HashLevels
branchHashLevels = configHashLevels OneLevelBranchHash
configHashLevels :: Difference -> GitConfig -> HashLevels
configHashLevels d config
| hasDifference d (annexDifferences config) = HashLevels 1
| otherwise = def
branchHashDir :: GitConfig -> Key -> String
branchHashDir config key = hashDirLower (branchHashLevels config) key
{- Two different directory hashes may be used. The mixed case hash
- came first, and is fine, except for the problem of case-strict
- filesystems such as Linux VFAT (mounted with shortname=mixed),
- which do not allow using a directory "XX" when "xx" already exists.
- To support that, most repositories use the lower case hash for new data. -}
dirHashes :: [HashLevels -> Hasher]
dirHashes = [hashDirLower, hashDirMixed]
hashDirs :: HashLevels -> Int -> String -> FilePath
hashDirs (HashLevels 1) sz s = addTrailingPathSeparator $ take sz s
hashDirs _ sz s = addTrailingPathSeparator $ take sz s </> drop sz s
hashDirMixed :: HashLevels -> Hasher
hashDirMixed n k = hashDirs n 2 $ take 4 $ display_32bits_as_dir =<< [a,b,c,d]
where
ABCD (a,b,c,d) = md5 $ md5FilePath $ key2file $ nonChunkKey k
hashDirLower :: HashLevels -> Hasher
hashDirLower n k = hashDirs n 3 $ take 6 $ md5s $ md5FilePath $ key2file $ nonChunkKey k
{- modified version of display_32bits_as_hex from Data.Hash.MD5
- Copyright (C) 2001 Ian Lynagh
- License: Either BSD or GPL
-}
display_32bits_as_dir :: Word32 -> String
display_32bits_as_dir w = trim $ swap_pairs cs
where
-- Need 32 characters to use. To avoid inaverdently making
-- a real word, use letters that appear less frequently.
chars = ['0'..'9'] ++ "zqjxkmvwgpfZQJXKMVWGPF"
cs = map (\x -> getc $ (shiftR w (6*x)) .&. 31) [0..7]
getc n = chars !! fromIntegral n
swap_pairs (x1:x2:xs) = x2:x1:swap_pairs xs
swap_pairs _ = []
-- Last 2 will always be 00, so omit.
trim = take 6

View file

@ -1,6 +1,6 @@
{- git-annex direct mode
-
- Copyright 2012, 2013 Joey Hess <joey@kitenet.net>
- Copyright 2012-2014 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -32,8 +32,10 @@ import Utility.InodeCache
import Utility.CopyFile
import Annex.Perms
import Annex.ReplaceFile
import Annex.Exception
import Annex.VariantFile
import Git.Index
import Annex.Index
import Annex.LockFile
{- Uses git ls-files to find files that need to be committed, and stages
- them into the index. Returns True if some changes were staged. -}
@ -51,11 +53,12 @@ stageDirect = do
{- Determine what kind of modified or deleted file this is, as
- efficiently as we can, by getting any key that's associated
- with it in git, as well as its stat info. -}
go (file, Just sha, Just mode) = do
go (file, Just sha, Just mode) = withTSDelta $ \delta -> do
shakey <- catKey sha mode
mstat <- liftIO $ catchMaybeIO $ getSymbolicLinkStatus file
mcache <- liftIO $ maybe (pure Nothing) (toInodeCache delta file) mstat
filekey <- isAnnexLink file
case (shakey, filekey, mstat, toInodeCache =<< mstat) of
case (shakey, filekey, mstat, mcache) of
(_, Just key, _, _)
| shakey == filekey -> noop
{- A changed symlink. -}
@ -83,7 +86,7 @@ stageDirect = do
deletegit file
stageannexlink file key = do
l <- inRepo $ gitAnnexLink file key
l <- calcRepo $ gitAnnexLink file key
stageSymlink file =<< hashSymlink l
void $ addAssociatedFile key file
@ -128,7 +131,7 @@ addDirect file cache = do
return False
got (Just (key, _)) = ifM (sameInodeCache file [cache])
( do
l <- inRepo $ gitAnnexLink file key
l <- calcRepo $ gitAnnexLink file key
stageSymlink file =<< hashSymlink l
addInodeCache key cache
void $ addAssociatedFile key file
@ -141,21 +144,101 @@ addDirect file cache = do
)
{- In direct mode, git merge would usually refuse to do anything, since it
- sees present direct mode files as type changed files. To avoid this,
- merge is run with the work tree set to a temp directory.
- sees present direct mode files as type changed files.
-
- So, to handle a merge, it's run with the work tree set to a temp
- directory, and the merge is staged into a copy of the index.
- Then the work tree is updated to reflect the merge, and
- finally, the merge is committed and the real index updated.
-
- A lock file is used to avoid races with any other caller of mergeDirect.
-
- To avoid other git processes from making change to the index while our
- merge is in progress, the index lock file is used as the temp index
- file. This is the same as what git does when updating the index
- normally.
-}
mergeDirect :: FilePath -> Git.Ref -> Git.Repo -> IO Bool
mergeDirect d branch g = do
whenM (doesDirectoryExist d) $
removeDirectoryRecursive d
createDirectoryIfMissing True d
let g' = g { location = Local { gitdir = Git.localGitDir g, worktree = Just d } }
Git.Merge.mergeNonInteractive branch g'
mergeDirect :: Maybe Git.Ref -> Maybe Git.Ref -> Git.Branch -> Annex Bool -> Git.Branch.CommitMode -> Annex Bool
mergeDirect startbranch oldref branch resolvemerge commitmode = exclusively $ do
reali <- liftIO . absPath =<< fromRepo indexFile
tmpi <- liftIO . absPath =<< fromRepo indexFileLock
liftIO $ copyFile reali tmpi
{- Cleans up after a direct mode merge. The merge must have been committed,
- and the commit sha passed in, along with the old sha of the tree
- before the merge. Uses git diff-tree to find files that changed between
- the two shas, and applies those changes to the work tree.
d <- fromRepo gitAnnexMergeDir
liftIO $ do
whenM (doesDirectoryExist d) $
removeDirectoryRecursive d
createDirectoryIfMissing True d
withIndexFile tmpi $ do
merged <- stageMerge d branch commitmode
r <- if merged
then return True
else resolvemerge
mergeDirectCleanup d (fromMaybe Git.Sha.emptyTree oldref)
mergeDirectCommit merged startbranch branch commitmode
liftIO $ rename tmpi reali
return r
where
exclusively = withExclusiveLock gitAnnexMergeLock
{- Stage a merge into the index, avoiding changing HEAD or the current
- branch. -}
stageMerge :: FilePath -> Git.Branch -> Git.Branch.CommitMode -> Annex Bool
stageMerge d branch commitmode = do
-- XXX A bug in git makes stageMerge unsafe to use if the git repo
-- is configured with core.symlinks=false
-- Using mergeNonInteractive is not ideal though, since it will
-- update the current branch immediately, before the work tree
-- has been updated, which would leave things in an inconsistent
-- state if mergeDirectCleanup is interrupted.
-- <http://marc.info/?l=git&m=140262402204212&w=2>
merger <- ifM (coreSymlinks <$> Annex.getGitConfig)
( return Git.Merge.stageMerge
, return $ \ref -> Git.Merge.mergeNonInteractive ref commitmode
)
inRepo $ \g -> do
wd <- liftIO $ absPath d
gd <- liftIO $ absPath $ Git.localGitDir g
merger branch $
g { location = Local { gitdir = gd, worktree = Just (addTrailingPathSeparator wd) } }
{- Commits after a direct mode merge is complete, and after the work
- tree has been updated by mergeDirectCleanup.
-}
mergeDirectCommit :: Bool -> Maybe Git.Ref -> Git.Branch -> Git.Branch.CommitMode -> Annex ()
mergeDirectCommit allowff old branch commitmode = do
void preCommitDirect
d <- fromRepo Git.localGitDir
let merge_head = d </> "MERGE_HEAD"
let merge_msg = d </> "MERGE_MSG"
let merge_mode = d </> "MERGE_MODE"
ifM (pure allowff <&&> canff)
( inRepo $ Git.Branch.update Git.Ref.headRef branch -- fast forward
, do
msg <- liftIO $
catchDefaultIO ("merge " ++ fromRef branch) $
readFile merge_msg
void $ inRepo $ Git.Branch.commit commitmode False msg
Git.Ref.headRef [Git.Ref.headRef, branch]
)
liftIO $ mapM_ nukeFile [merge_head, merge_msg, merge_mode]
where
canff = maybe (return False) (\o -> inRepo $ Git.Branch.fastForwardable o branch) old
mergeDirectCleanup :: FilePath -> Git.Ref -> Annex ()
mergeDirectCleanup d oldref = do
updateWorkTree d oldref
liftIO $ removeDirectoryRecursive d
{- Updates the direct mode work tree to reflect the changes staged in the
- index by a git command, that was run in a temporary work tree.
-
- Uses diff-index to compare the staged changes with provided ref
- which should be the tree before the merge, and applies those
- changes to the work tree.
-
- There are really only two types of changes: An old item can be deleted,
- or a new item added. Two passes are made, first deleting and then
@ -164,9 +247,9 @@ mergeDirect d branch g = do
- order, but we cannot add the directory until the file with the
- same name is removed.)
-}
mergeDirectCleanup :: FilePath -> Git.Ref -> Git.Ref -> Annex ()
mergeDirectCleanup d oldsha newsha = do
(items, cleanup) <- inRepo $ DiffTree.diffTreeRecursive oldsha newsha
updateWorkTree :: FilePath -> Git.Ref -> Annex ()
updateWorkTree d oldref = do
(items, cleanup) <- inRepo $ DiffTree.diffIndex oldref
makeabs <- flip fromTopFilePath <$> gitRepo
let fsitems = zip (map (makeabs . DiffTree.file) items) items
forM_ fsitems $
@ -174,12 +257,11 @@ mergeDirectCleanup d oldsha newsha = do
forM_ fsitems $
go makeabs DiffTree.dstsha DiffTree.dstmode movein movein_raw
void $ liftIO cleanup
liftIO $ removeDirectoryRecursive d
where
go makeabs getsha getmode a araw (f, item)
| getsha item == nullSha = noop
| otherwise = void $
tryAnnex . maybe (araw item makeabs f) (\k -> void $ a item makeabs k f)
tryNonAsync . maybe (araw item makeabs f) (\k -> void $ a item makeabs k f)
=<< catKey (getsha item) (getmode item)
moveout _ _ = removeDirect
@ -194,26 +276,26 @@ mergeDirectCleanup d oldsha newsha = do
- key, it's left alone.
-
- If the file is already present, and does not exist in the
- oldsha branch, preserve this local file.
- oldref, preserve this local file.
-
- Otherwise, create the symlink and then if possible, replace it
- with the content. -}
movein item makeabs k f = unlessM (goodContent k f) $ do
preserveUnannexed item makeabs f oldsha
l <- inRepo $ gitAnnexLink f k
preserveUnannexed item makeabs f oldref
l <- calcRepo $ gitAnnexLink f k
replaceFile f $ makeAnnexLink l
toDirect k f
{- Any new, modified, or renamed files were written to the temp
- directory by the merge, and are moved to the real work tree. -}
movein_raw item makeabs f = do
preserveUnannexed item makeabs f oldsha
preserveUnannexed item makeabs f oldref
liftIO $ do
createDirectoryIfMissing True $ parentDir f
void $ tryIO $ rename (d </> getTopFilePath (DiffTree.file item)) f
{- If the file that's being moved in is already present in the work
- tree, but did not exist in the oldsha branch, preserve this
- tree, but did not exist in the oldref, preserve this
- local, unannexed file (or directory), as "variant-local".
-
- It's also possible that the file that's being moved in
@ -221,15 +303,15 @@ mergeDirectCleanup d oldsha newsha = do
- file (not a directory), which should be preserved.
-}
preserveUnannexed :: DiffTree.DiffTreeItem -> (TopFilePath -> FilePath) -> FilePath -> Ref -> Annex ()
preserveUnannexed item makeabs absf oldsha = do
preserveUnannexed item makeabs absf oldref = do
whenM (liftIO (collidingitem absf) <&&> unannexed absf) $
liftIO $ findnewname absf 0
checkdirs (DiffTree.file item)
where
checkdirs from = do
let p = parentDir (getTopFilePath from)
let d = asTopFilePath p
unless (null p) $ do
checkdirs from = case upFrom (getTopFilePath from) of
Nothing -> noop
Just p -> do
let d = asTopFilePath p
let absd = makeabs d
whenM (liftIO (colliding_nondir absd) <&&> unannexed absd) $
liftIO $ findnewname absd 0
@ -241,7 +323,7 @@ preserveUnannexed item makeabs absf oldsha = do
<$> catchMaybeIO (getSymbolicLinkStatus f)
unannexed f = (isNothing <$> isAnnexLink f)
<&&> (isNothing <$> catFileDetails oldsha f)
<&&> (isNothing <$> catFileDetails oldref f)
findnewname :: FilePath -> Int -> IO ()
findnewname f n = do
@ -275,16 +357,17 @@ toDirectGen k f = do
(dloc:_) -> return $ Just $ fromdirect dloc
)
where
fromindirect loc = do
fromindirect loc = do
{- Move content from annex to direct file. -}
updateInodeCache k loc
void $ addAssociatedFile k f
modifyContent loc $ do
thawContent loc
replaceFile f $ liftIO . moveFile loc
liftIO (replaceFileFrom loc f)
`catchIO` (\_ -> freezeContent loc)
fromdirect loc = do
replaceFile f $
liftIO . void . copyFileExternal loc
liftIO . void . copyFileExternal CopyAllMetaData loc
updateInodeCache k f
{- Removes a direct mode file, while retaining its content in the annex

View file

@ -1,6 +1,6 @@
{- git-annex direct mode guard fixup
-
- Copyright 2013 Joey Hess <joey@kitenet.net>
- Copyright 2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}

View file

@ -1,6 +1,6 @@
{- dropping of unwanted content
-
- Copyright 2012-2014 Joey Hess <joey@kitenet.net>
- Copyright 2012-2014 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -16,7 +16,6 @@ import qualified Remote
import qualified Command.Drop
import Command
import Annex.Wanted
import Annex.Exception
import Config
import Annex.Content.Direct
@ -120,5 +119,5 @@ handleDropsFrom locs rs reason fromhere key afile knownpresentremote runner = do
slocs = S.fromList locs
safely a = either (const False) id <$> tryAnnex a
safely a = either (const False) id <$> tryNonAsync a

View file

@ -1,6 +1,6 @@
{- git-annex environment
-
- Copyright 2012, 2013 Joey Hess <joey@kitenet.net>
- Copyright 2012, 2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -13,11 +13,7 @@ import Common.Annex
import Utility.UserInfo
import qualified Git.Config
import Config
import Annex.Exception
#ifndef mingw32_HOST_OS
import Utility.Env
#endif
{- Checks that the system's environment allows git to function.
- Git requires a GECOS username, or suitable git configuration, or
@ -36,30 +32,27 @@ checkEnvironment = do
liftIO checkEnvironmentIO
checkEnvironmentIO :: IO ()
checkEnvironmentIO =
#ifdef mingw32_HOST_OS
noop
#else
whenM (null <$> myUserGecos) $ do
username <- myUserName
ensureEnv "GIT_AUTHOR_NAME" username
ensureEnv "GIT_COMMITTER_NAME" username
checkEnvironmentIO = whenM (isNothing <$> myUserGecos) $ do
username <- myUserName
ensureEnv "GIT_AUTHOR_NAME" username
ensureEnv "GIT_COMMITTER_NAME" username
where
#ifndef __ANDROID__
-- existing environment is not overwritten
ensureEnv var val = void $ setEnv var val False
-- existing environment is not overwritten
ensureEnv var val = setEnv var val False
#else
-- Environment setting is broken on Android, so this is dealt with
-- in runshell instead.
ensureEnv _ _ = noop
#endif
#endif
{- Runs an action that commits to the repository, and if it fails,
- sets user.email to a dummy value and tries the action again. -}
- sets user.email and user.name to a dummy value and tries the action again. -}
ensureCommit :: Annex a -> Annex a
ensureCommit a = either retry return =<< tryAnnex a
ensureCommit a = either retry return =<< tryNonAsync a
where
retry _ = do
setConfig (ConfigKey "user.email") =<< liftIO myUserName
retry _ = do
name <- liftIO myUserName
setConfig (ConfigKey "user.name") name
setConfig (ConfigKey "user.email") name
a

View file

@ -1,50 +0,0 @@
{- exception handling in the git-annex monad
-
- Note that when an Annex action fails and the exception is handled
- by these functions, any changes the action has made to the
- AnnexState are retained. This works because the Annex monad
- internally stores the AnnexState in a MVar.
-
- Copyright 2011-2013 Joey Hess <joey@kitenet.net>
-
- Licensed under the GNU GPL version 3 or higher.
-}
{-# LANGUAGE PackageImports #-}
module Annex.Exception (
bracketIO,
bracketAnnex,
tryAnnex,
tryAnnexIO,
throwAnnex,
catchAnnex,
) where
import qualified "MonadCatchIO-transformers" Control.Monad.CatchIO as M
import Control.Exception
import Common.Annex
{- Runs an Annex action, with setup and cleanup both in the IO monad. -}
bracketIO :: IO v -> (v -> IO b) -> (v -> Annex a) -> Annex a
bracketIO setup cleanup = M.bracket (liftIO setup) (liftIO . cleanup)
bracketAnnex :: Annex v -> (v -> Annex b) -> (v -> Annex a) -> Annex a
bracketAnnex = M.bracket
{- try in the Annex monad -}
tryAnnex :: Annex a -> Annex (Either SomeException a)
tryAnnex = M.try
{- try in the Annex monad, but only catching IO exceptions -}
tryAnnexIO :: Annex a -> Annex (Either IOException a)
tryAnnexIO = M.try
{- throw in the Annex monad -}
throwAnnex :: Exception e => e -> Annex a
throwAnnex = M.throw
{- catch in the Annex monad -}
catchAnnex :: Exception e => Annex a -> (e -> Annex a) -> Annex a
catchAnnex = M.catch

View file

@ -1,6 +1,6 @@
{- git-annex file matching
-
- Copyright 2012-2014 Joey Hess <joey@kitenet.net>
- Copyright 2012-2014 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -13,7 +13,6 @@ import Common.Annex
import Limit
import Utility.Matcher
import Types.Group
import Types.Limit
import Logs.Group
import Logs.Remote
import Annex.UUID
@ -25,18 +24,16 @@ import Types.Remote (RemoteConfig)
import Data.Either
import qualified Data.Set as S
type FileMatcher = Matcher MatchFiles
checkFileMatcher :: FileMatcher -> FilePath -> Annex Bool
checkFileMatcher :: (FileMatcher Annex) -> FilePath -> Annex Bool
checkFileMatcher matcher file = checkMatcher matcher Nothing (Just file) S.empty True
checkMatcher :: FileMatcher -> Maybe Key -> AssociatedFile -> AssumeNotPresent -> Bool -> Annex Bool
checkMatcher matcher mkey afile notpresent def
| isEmpty matcher = return def
checkMatcher :: (FileMatcher Annex) -> Maybe Key -> AssociatedFile -> AssumeNotPresent -> Bool -> Annex Bool
checkMatcher matcher mkey afile notpresent d
| isEmpty matcher = return d
| otherwise = case (mkey, afile) of
(_, Just file) -> go =<< fileMatchInfo file
(Just key, _) -> go (MatchingKey key)
_ -> return def
_ -> return d
where
go mi = matchMrun matcher $ \a -> a notpresent mi
@ -45,18 +42,18 @@ fileMatchInfo file = do
matchfile <- getTopFilePath <$> inRepo (toTopFilePath file)
return $ MatchingFile FileInfo
{ matchFile = matchfile
, relFile = file
, currFile = file
}
matchAll :: FileMatcher
matchAll :: FileMatcher Annex
matchAll = generate []
parsedToMatcher :: [Either String (Token MatchFiles)] -> Either String FileMatcher
parsedToMatcher :: [Either String (Token (MatchFiles Annex))] -> Either String (FileMatcher Annex)
parsedToMatcher parsed = case partitionEithers parsed of
([], vs) -> Right $ generate vs
(es, _) -> Left $ unwords $ map ("Parse failure: " ++) es
exprParser :: FileMatcher -> FileMatcher -> GroupMap -> M.Map UUID RemoteConfig -> Maybe UUID -> String -> [Either String (Token MatchFiles)]
exprParser :: FileMatcher Annex -> FileMatcher Annex -> GroupMap -> M.Map UUID RemoteConfig -> Maybe UUID -> String -> [Either String (Token (MatchFiles Annex))]
exprParser matchstandard matchgroupwanted groupmap configmap mu expr =
map parse $ tokenizeMatcher expr
where
@ -69,7 +66,7 @@ exprParser matchstandard matchgroupwanted groupmap configmap mu expr =
preferreddir = fromMaybe "public" $
M.lookup "preferreddir" =<< (`M.lookup` configmap) =<< mu
parseToken :: FileMatcher -> FileMatcher -> MkLimit -> MkLimit -> GroupMap -> String -> Either String (Token MatchFiles)
parseToken :: FileMatcher Annex -> FileMatcher Annex -> MkLimit Annex -> MkLimit Annex -> GroupMap -> String -> Either String (Token (MatchFiles Annex))
parseToken matchstandard matchgroupwanted checkpresent checkpreferreddir groupmap t
| t `elem` tokens = Right $ token t
| t == "standard" = call matchstandard
@ -106,10 +103,10 @@ tokenizeMatcher = filter (not . null ) . concatMap splitparens . words
{- Generates a matcher for files large enough (or meeting other criteria)
- to be added to the annex, rather than directly to git. -}
largeFilesMatcher :: Annex FileMatcher
largeFilesMatcher :: Annex (FileMatcher Annex)
largeFilesMatcher = go =<< annexLargeFiles <$> Annex.getGitConfig
where
go Nothing = return matchAll
go Nothing = return matchAll
go (Just expr) = do
gm <- groupMap
rc <- readRemoteLog

View file

@ -4,7 +4,7 @@
- not change, otherwise removing old hooks using an old version of
- the script would fail.
-
- Copyright 2013-2014 Joey Hess <joey@kitenet.net>
- Copyright 2013-2014 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -16,7 +16,6 @@ import qualified Git.Hook as Git
import Config
import qualified Annex
import Utility.Shell
import Utility.FileMode
import qualified Data.Map as M
@ -53,19 +52,16 @@ hookWarning h msg = do
- the existing hooks are cached. -}
runAnnexHook :: Git.Hook -> Annex ()
runAnnexHook hook = do
cmd <- fromRepo $ Git.hookFile hook
m <- Annex.getState Annex.existinghooks
case M.lookup hook m of
Just True -> run cmd
Just True -> run
Just False -> noop
Nothing -> do
exists <- hookexists cmd
exists <- inRepo $ Git.hookExists hook
Annex.changeState $ \s -> s
{ Annex.existinghooks = M.insert hook exists m }
when exists $
run cmd
when exists run
where
hookexists f = liftIO $ catchBoolIO $
isExecutable . fileMode <$> getFileStatus f
run cmd = unlessM (liftIO $ boolSystem cmd []) $
warning $ cmd ++ " failed"
run = unlessM (inRepo $ Git.runHook hook) $ do
h <- fromRepo $ Git.hookFile hook
warning $ h ++ " failed"

View file

@ -1,6 +1,6 @@
{- Using other git index files
-
- Copyright 2014 Joey Hess <joey@kitenet.net>
- Copyright 2014 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -9,6 +9,7 @@
module Annex.Index (
withIndexFile,
addGitEnv,
) where
import qualified Control.Exception as E
@ -17,30 +18,35 @@ import Common.Annex
import Git.Types
import qualified Annex
import Utility.Env
import Annex.Exception
{- Runs an action using a different git index file. -}
withIndexFile :: FilePath -> Annex a -> Annex a
withIndexFile f a = do
g <- gitRepo
#ifdef __ANDROID__
{- This should not be necessary on Android, but there is some
- weird getEnvironment breakage. See
- https://github.com/neurocyte/ghc-android/issues/7
- Use getEnv to get some key environment variables that
- git expects to have. -}
let keyenv = words "USER PATH GIT_EXEC_PATH HOSTNAME HOME"
let getEnvPair k = maybe Nothing (\v -> Just (k, v)) <$> getEnv k
e <- liftIO $ catMaybes <$> forM keyenv getEnvPair
let e' = ("GIT_INDEX_FILE", f):e
#else
e <- liftIO getEnvironment
let e' = addEntry "GIT_INDEX_FILE" f e
#endif
let g' = g { gitEnv = Just e' }
g' <- liftIO $ addGitEnv g "GIT_INDEX_FILE" f
r <- tryAnnex $ do
r <- tryNonAsync $ do
Annex.changeState $ \s -> s { Annex.repo = g' }
a
Annex.changeState $ \s -> s { Annex.repo = (Annex.repo s) { gitEnv = gitEnv g} }
either E.throw return r
addGitEnv :: Repo -> String -> String -> IO Repo
addGitEnv g var val = do
e <- maybe copyenv return (gitEnv g)
let e' = addEntry var val e
return $ g { gitEnv = Just e' }
where
copyenv = do
#ifdef __ANDROID__
{- This should not be necessary on Android, but there is some
- weird getEnvironment breakage. See
- https://github.com/neurocyte/ghc-android/issues/7
- Use getEnv to get some key environment variables that
- git expects to have. -}
let keyenv = words "USER PATH GIT_EXEC_PATH HOSTNAME HOME"
let getEnvPair k = maybe Nothing (\v -> Just (k, v)) <$> getEnv k
liftIO $ catMaybes <$> forM keyenv getEnvPair
#else
liftIO getEnvironment
#endif

View file

@ -1,6 +1,6 @@
{- git-annex repository initialization
-
- Copyright 2011 Joey Hess <joey@kitenet.net>
- Copyright 2011 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -11,44 +11,41 @@ module Annex.Init (
ensureInitialized,
isInitialized,
initialize,
initialize',
uninitialize,
probeCrippledFileSystem,
) where
import Common.Annex
import Utility.Network
import qualified Annex
import qualified Git
import qualified Git.LsFiles
import qualified Git.Config
import qualified Git.Construct
import qualified Git.Types as Git
import qualified Git.Objects
import qualified Annex.Branch
import Logs.UUID
import Logs.Trust.Basic
import Types.TrustLevel
import Annex.Version
import Annex.Difference
import Annex.UUID
import Config
import Annex.Direct
import Annex.Content.Direct
import Annex.Environment
import Annex.Perms
import Backend
import Annex.Hook
import Upgrade
#ifndef mingw32_HOST_OS
import Utility.UserInfo
import Utility.FileMode
import Annex.Perms
#endif
import Annex.Hook
import Git.Hook (hookFile)
import Upgrade
import Annex.Content
import Logs.Location
import System.Log.Logger
genDescription :: Maybe String -> Annex String
genDescription (Just d) = return d
genDescription Nothing = do
reldir <- liftIO . relHome =<< fromRepo Git.repoPath
reldir <- liftIO . relHome =<< liftIO . absPath =<< fromRepo Git.repoPath
hostname <- fromMaybe "" <$> liftIO getHostname
#ifndef mingw32_HOST_OS
let at = if null hostname then "" else "@"
@ -61,10 +58,23 @@ genDescription Nothing = do
initialize :: Maybe String -> Annex ()
initialize mdescription = do
prepUUID
initialize'
u <- getUUID
{- This will make the first commit to git, so ensure git is set up
- properly to allow commits when running it. -}
ensureCommit $ do
Annex.Branch.create
describeUUID u =<< genDescription mdescription
-- Everything except for uuid setup.
initialize' :: Annex ()
initialize' = do
checkFifoSupport
checkCrippledFileSystem
unlessM isBare $
hookWrite preCommitHook
setDifferences
setVersion supportedVersion
ifM (crippledFileSystem <&&> not <$> isBare)
( do
@ -76,12 +86,7 @@ initialize mdescription = do
switchHEADBack
)
createInodeSentinalFile
u <- getUUID
{- This will make the first commit to git, so ensure git is set up
- properly to allow commits when running it. -}
ensureCommit $ do
Annex.Branch.create
describeUUID u =<< genDescription mdescription
checkSharedClone
uninitialize :: Annex ()
uninitialize = do
@ -97,9 +102,7 @@ uninitialize = do
- Checks repository version and handles upgrades too.
-}
ensureInitialized :: Annex ()
ensureInitialized = do
getVersion >>= maybe needsinit checkUpgrade
fixBadBare
ensureInitialized = getVersion >>= maybe needsinit checkUpgrade
where
needsinit = ifM Annex.Branch.hasSibling
( initialize Nothing
@ -184,56 +187,9 @@ enableDirectMode = unlessM isDirect $ do
maybe noop (`toDirect` f) =<< isAnnexLink f
void $ liftIO clean
{- Work around for git-annex version 5.20131118 - 5.20131127, which
- had a bug that unset core.bare when initializing a bare repository.
-
- This resulted in objects sent to the repository being stored in
- repo/.git/annex/objects, so move them to repo/annex/objects.
-
- This check slows down every git-annex run somewhat (by one file stat),
- so should be removed after a suitable period of time has passed.
- Since the bare repository may be on an offline USB drive, best to
- keep it for a while. However, git-annex was only buggy for a few
- weeks, so not too long.
-}
fixBadBare :: Annex ()
fixBadBare = whenM checkBadBare $ do
ks <- getKeysPresent InAnnex
liftIO $ debugM "Init" $ unwords
[ "Detected bad bare repository with"
, show (length ks)
, "objects; fixing"
]
g <- Annex.gitRepo
gc <- Annex.getGitConfig
d <- Git.repoPath <$> Annex.gitRepo
void $ liftIO $ boolSystem "git"
[ Param $ "--git-dir=" ++ d
, Param "config"
, Param Git.Config.coreBare
, Param $ Git.Config.boolConfig True
]
g' <- liftIO $ Git.Construct.fromPath d
s' <- liftIO $ Annex.new $ g' { Git.location = Git.Local { Git.gitdir = d, Git.worktree = Nothing } }
Annex.changeState $ \s -> s
{ Annex.repo = Annex.repo s'
, Annex.gitconfig = Annex.gitconfig s'
}
forM_ ks $ \k -> do
oldloc <- liftIO $ gitAnnexLocation k g gc
thawContentDir oldloc
moveAnnex k oldloc
logStatus k InfoPresent
let dotgit = d </> ".git"
liftIO $ removeDirectoryRecursive dotgit
`catchIO` const (renameDirectory dotgit (d </> "removeme"))
{- A repostory with the problem won't know it's a bare repository, but will
- have no pre-commit hook (which is not set up in a bare repository),
- and will not have a HEAD file in its .git directory. -}
checkBadBare :: Annex Bool
checkBadBare = allM (not <$>)
[isBare, hasPreCommitHook, hasDotGitHEAD]
where
hasPreCommitHook = inRepo $ doesFileExist . hookFile preCommitHook
hasDotGitHEAD = inRepo $ \r -> doesFileExist $ Git.localGitDir r </> "HEAD"
checkSharedClone :: Annex ()
checkSharedClone = whenM (inRepo Git.Objects.isSharedClone) $ do
showSideAction "Repository was cloned with --shared; setting annex.hardlink=true and making repository untrusted."
u <- getUUID
trustSet u UnTrusted
setConfig (annexConfig "hardlink") (Git.Config.boolConfig True)

View file

@ -4,7 +4,7 @@
- git-annex branch. Among other things, it ensures that if git-annex is
- interrupted, its recorded data is not lost.
-
- Copyright 2011-2013 Joey Hess <joey@kitenet.net>
- Copyright 2011-2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -13,16 +13,10 @@
module Annex.Journal where
import System.IO.Binary
import Common.Annex
import Annex.Exception
import qualified Git
import Annex.Perms
#ifdef mingw32_HOST_OS
import Utility.WinLock
#endif
import Annex.LockFile
{- Records content for a file in the branch to the journal.
-
@ -42,7 +36,12 @@ setJournalFile _jl file content = do
jfile <- fromRepo $ journalFile file
let tmpfile = tmp </> takeFileName jfile
liftIO $ do
writeBinaryFile tmpfile content
withFile tmpfile WriteMode $ \h -> do
fileEncoding h
#ifdef mingw32_HOST_OS
hSetNewlineMode h noNewlineTranslation
#endif
hPutStr h content
moveFile tmpfile jfile
{- Gets any journalled content for a file in the branch. -}
@ -54,7 +53,7 @@ getJournalFile _jl = getJournalFileStale
- changes. -}
getJournalFileStale :: FilePath -> Annex (Maybe String)
getJournalFileStale file = inRepo $ \g -> catchMaybeIO $
readFileStrict $ journalFile file g
readFileStrictAnyEncoding $ journalFile file g
{- List of files that have updated content in the journal. -}
getJournalledFiles :: JournalLocked -> Annex [FilePath]
@ -77,9 +76,18 @@ getJournalFilesStale = do
getDirectoryContents $ gitAnnexJournalDir g
return $ filter (`notElem` [".", ".."]) fs
withJournalHandle :: (DirectoryHandle -> IO a) -> Annex a
withJournalHandle a = do
d <- fromRepo gitAnnexJournalDir
bracketIO (openDirectory d) closeDirectory (liftIO . a)
{- Checks if there are changes in the journal. -}
journalDirty :: Annex Bool
journalDirty = not . null <$> getJournalFilesStale
journalDirty = do
d <- fromRepo gitAnnexJournalDir
liftIO $
(not <$> isDirectoryEmpty d)
`catchIO` (const $ doesDirectoryExist d)
{- Produces a filename to use in the journal for a file on the branch.
-
@ -109,19 +117,4 @@ data JournalLocked = ProduceJournalLocked
{- Runs an action that modifies the journal, using locking to avoid
- contention with other git-annex processes. -}
lockJournal :: (JournalLocked -> Annex a) -> Annex a
lockJournal a = do
lockfile <- fromRepo gitAnnexJournalLock
createAnnexDirectory $ takeDirectory lockfile
mode <- annexFileMode
bracketIO (lock lockfile mode) unlock (const $ a ProduceJournalLocked)
where
#ifndef mingw32_HOST_OS
lock lockfile mode = do
l <- noUmask mode $ createFile lockfile mode
waitToSetLock l (WriteLock, AbsoluteSeek, 0, 0)
return l
unlock = closeFd
#else
lock lockfile _mode = waitToLock $ lockExclusive lockfile
unlock = dropLock
#endif
lockJournal a = withExclusiveLock gitAnnexJournalLock $ a ProduceJournalLocked

View file

@ -5,7 +5,7 @@
- On other filesystems, git instead stores the symlink target in a regular
- file.
-
- Copyright 2013 Joey Hess <joey@kitenet.net>
- Copyright 2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -35,13 +35,17 @@ isAnnexLink file = maybe Nothing (fileKey . takeFileName) <$> getAnnexLinkTarget
- content.
-}
getAnnexLinkTarget :: FilePath -> Annex (Maybe LinkTarget)
getAnnexLinkTarget file = ifM (coreSymlinks <$> Annex.getGitConfig)
( check readSymbolicLink $
getAnnexLinkTarget f = getAnnexLinkTarget' f
=<< (coreSymlinks <$> Annex.getGitConfig)
{- Pass False to force looking inside file. -}
getAnnexLinkTarget' :: FilePath -> Bool -> Annex (Maybe LinkTarget)
getAnnexLinkTarget' file coresymlinks = if coresymlinks
then check readSymbolicLink $
return Nothing
, check readSymbolicLink $
else check readSymbolicLink $
check probefilecontent $
return Nothing
)
where
check getlinktarget fallback = do
v <- liftIO $ catchMaybeIO $ getlinktarget file
@ -68,6 +72,9 @@ getAnnexLinkTarget file = ifM (coreSymlinks <$> Annex.getGitConfig)
then ""
else s
makeAnnexLink :: LinkTarget -> FilePath -> Annex ()
makeAnnexLink = makeGitLink
{- Creates a link on disk.
-
- On a filesystem that does not support symlinks, writes the link target
@ -75,8 +82,8 @@ getAnnexLinkTarget file = ifM (coreSymlinks <$> Annex.getGitConfig)
- it's staged as such, so use addAnnexLink when adding a new file or
- modified link to git.
-}
makeAnnexLink :: LinkTarget -> FilePath -> Annex ()
makeAnnexLink linktarget file = ifM (coreSymlinks <$> Annex.getGitConfig)
makeGitLink :: LinkTarget -> FilePath -> Annex ()
makeGitLink linktarget file = ifM (coreSymlinks <$> Annex.getGitConfig)
( liftIO $ do
void $ tryIO $ removeFile file
createSymbolicLink linktarget file

72
Annex/LockFile.hs Normal file
View file

@ -0,0 +1,72 @@
{- git-annex lock files.
-
- Copyright 2012, 2014 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
{-# LANGUAGE CPP #-}
module Annex.LockFile (
lockFileShared,
unlockFile,
getLockPool,
withExclusiveLock,
) where
import Common.Annex
import Annex
import Types.LockPool
import qualified Git
import Annex.Perms
import Utility.LockFile
import qualified Data.Map as M
{- Create a specified lock file, and takes a shared lock, which is retained
- in the pool. -}
lockFileShared :: FilePath -> Annex ()
lockFileShared file = go =<< fromLockPool file
where
go (Just _) = noop -- already locked
go Nothing = do
#ifndef mingw32_HOST_OS
mode <- annexFileMode
lockhandle <- liftIO $ noUmask mode $ lockShared (Just mode) file
#else
lockhandle <- liftIO $ waitToLock $ lockShared file
#endif
changeLockPool $ M.insert file lockhandle
unlockFile :: FilePath -> Annex ()
unlockFile file = maybe noop go =<< fromLockPool file
where
go lockhandle = do
liftIO $ dropLock lockhandle
changeLockPool $ M.delete file
getLockPool :: Annex LockPool
getLockPool = getState lockpool
fromLockPool :: FilePath -> Annex (Maybe LockHandle)
fromLockPool file = M.lookup file <$> getLockPool
changeLockPool :: (LockPool -> LockPool) -> Annex ()
changeLockPool a = do
m <- getLockPool
changeState $ \s -> s { lockpool = a m }
{- Runs an action with an exclusive lock held. If the lock is already
- held, blocks until it becomes free. -}
withExclusiveLock :: (Git.Repo -> FilePath) -> Annex a -> Annex a
withExclusiveLock getlockfile a = do
lockfile <- fromRepo getlockfile
createAnnexDirectory $ takeDirectory lockfile
mode <- annexFileMode
bracketIO (lock mode lockfile) dropLock (const a)
where
#ifndef mingw32_HOST_OS
lock mode = noUmask mode . lockExclusive (Just mode)
#else
lock _mode = waitToLock . lockExclusive
#endif

View file

@ -1,60 +0,0 @@
{- git-annex lock pool
-
- Copyright 2012, 2014 Joey Hess <joey@kitenet.net>
-
- Licensed under the GNU GPL version 3 or higher.
-}
{-# LANGUAGE CPP #-}
module Annex.LockPool where
import Common.Annex
import Annex
import Types.LockPool
import qualified Data.Map as M
#ifndef mingw32_HOST_OS
import Annex.Perms
#else
import Utility.WinLock
#endif
{- Create a specified lock file, and takes a shared lock. -}
lockFile :: FilePath -> Annex ()
lockFile file = go =<< fromPool file
where
go (Just _) = noop -- already locked
go Nothing = do
#ifndef mingw32_HOST_OS
mode <- annexFileMode
lockhandle <- liftIO $ noUmask mode $
openFd file ReadOnly (Just mode) defaultFileFlags
liftIO $ waitToSetLock lockhandle (ReadLock, AbsoluteSeek, 0, 0)
#else
lockhandle <- liftIO $ waitToLock $ lockShared file
#endif
changePool $ M.insert file lockhandle
unlockFile :: FilePath -> Annex ()
unlockFile file = maybe noop go =<< fromPool file
where
go lockhandle = do
#ifndef mingw32_HOST_OS
liftIO $ closeFd lockhandle
#else
liftIO $ dropLock lockhandle
#endif
changePool $ M.delete file
getPool :: Annex LockPool
getPool = getState lockpool
fromPool :: FilePath -> Annex (Maybe LockHandle)
fromPool file = M.lookup file <$> getPool
changePool :: (LockPool -> LockPool) -> Annex ()
changePool a = do
m <- getPool
changeState $ \s -> s { lockpool = a m }

88
Annex/MakeRepo.hs Normal file
View file

@ -0,0 +1,88 @@
{- making local repositories (used by webapp mostly)
-
- Copyright 2012-2014 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
module Annex.MakeRepo where
import Assistant.WebApp.Common
import Annex.Init
import qualified Git.Construct
import qualified Git.Config
import qualified Git.Command
import qualified Git.Branch
import qualified Annex
import Annex.UUID
import Annex.Direct
import Types.StandardGroups
import Logs.PreferredContent
import qualified Annex.Branch
{- Makes a new git repository. Or, if a git repository already
- exists, returns False. -}
makeRepo :: FilePath -> Bool -> IO Bool
makeRepo path bare = ifM (probeRepoExists path)
( return False
, do
(transcript, ok) <-
processTranscript "git" (toCommand params) Nothing
unless ok $
error $ "git init failed!\nOutput:\n" ++ transcript
return True
)
where
baseparams = [Param "init", Param "--quiet"]
params
| bare = baseparams ++ [Param "--bare", File path]
| otherwise = baseparams ++ [File path]
{- Runs an action in the git repository in the specified directory. -}
inDir :: FilePath -> Annex a -> IO a
inDir dir a = do
state <- Annex.new =<< Git.Config.read =<< Git.Construct.fromPath dir
Annex.eval state a
{- Creates a new repository, and returns its UUID. -}
initRepo :: Bool -> Bool -> FilePath -> Maybe String -> Maybe StandardGroup -> IO UUID
initRepo True primary_assistant_repo dir desc mgroup = inDir dir $ do
initRepo' desc mgroup
{- Initialize the master branch, so things that expect
- to have it will work, before any files are added. -}
unlessM (Git.Config.isBare <$> gitRepo) $
void $ inRepo $ Git.Branch.commitCommand Git.Branch.AutomaticCommit
[ Param "--quiet"
, Param "--allow-empty"
, Param "-m"
, Param "created repository"
]
{- Repositories directly managed by the assistant use direct mode.
-
- Automatic gc is disabled, as it can be slow. Insted, gc is done
- once a day.
-}
when primary_assistant_repo $ do
setDirect True
inRepo $ Git.Command.run
[Param "config", Param "gc.auto", Param "0"]
getUUID
{- Repo already exists, could be a non-git-annex repo though so
- still initialize it. -}
initRepo False _ dir desc mgroup = inDir dir $ do
initRepo' desc mgroup
getUUID
initRepo' :: Maybe String -> Maybe StandardGroup -> Annex ()
initRepo' desc mgroup = unlessM isInitialized $ do
initialize desc
u <- getUUID
maybe noop (defaultStandardGroup u) mgroup
{- Ensure branch gets committed right away so it is
- available for merging immediately. -}
Annex.Branch.commit "update"
{- Checks if a git repo exists at a location. -}
probeRepoExists :: FilePath -> IO Bool
probeRepoExists dir = isJust <$>
catchDefaultIO Nothing (Git.Construct.checkForRepo dir)

View file

@ -1,12 +1,13 @@
{- git-annex metadata
-
- Copyright 2014 Joey Hess <joey@kitenet.net>
- Copyright 2014 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
module Annex.MetaData (
genMetaData,
dateMetaData,
module X
) where
@ -37,20 +38,18 @@ genMetaData :: Key -> FilePath -> FileStatus -> Annex ()
genMetaData key file status = do
maybe noop (flip copyMetaData key) =<< catKeyFileHEAD file
whenM (annexGenMetaData <$> Annex.getGitConfig) $ do
metadata <- getCurrentMetaData key
let metadata' = genMetaData' status metadata
unless (metadata' == emptyMetaData) $
addMetaData key metadata'
curr <- getCurrentMetaData key
addMetaData key (dateMetaData mtime curr)
where
mtime = posixSecondsToUTCTime $ realToFrac $ modificationTime status
{- Generates metadata from the FileStatus.
{- Generates metadata for a file's date stamp.
- Does not overwrite any existing metadata values. -}
genMetaData' :: FileStatus -> MetaData -> MetaData
genMetaData' status old = MetaData $ M.fromList $ filter isnew
dateMetaData :: UTCTime -> MetaData -> MetaData
dateMetaData mtime old = MetaData $ M.fromList $ filter isnew
[ (yearMetaField, S.singleton $ toMetaValue $ show y)
, (monthMetaField, S.singleton $ toMetaValue $ show m)
]
where
isnew (f, _) = S.null (currentMetaDataValues f old)
(y, m, _d) = toGregorian $ utctDay $
posixSecondsToUTCTime $ realToFrac $
modificationTime status
(y, m, _d) = toGregorian $ utctDay $ mtime

View file

@ -1,6 +1,6 @@
{- git-annex metadata, standard fields
-
- Copyright 2014 Joey Hess <joey@kitenet.net>
- Copyright 2014 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}

101
Annex/Notification.hs Normal file
View file

@ -0,0 +1,101 @@
{- git-annex desktop notifications
-
- Copyright 2014 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
{-# LANGUAGE CPP #-}
module Annex.Notification (NotifyWitness, notifyTransfer, notifyDrop) where
import Common.Annex
import Logs.Transfer
#ifdef WITH_DBUS_NOTIFICATIONS
import qualified Annex
import Types.DesktopNotify
import qualified DBus.Notify as Notify
import qualified DBus.Client
#endif
-- Witness that notification has happened.
data NotifyWitness = NotifyWitness
{- Wrap around an action that performs a transfer, which may run multiple
- attempts. Displays notification when supported and when the user asked
- for it. -}
notifyTransfer :: Direction -> Maybe FilePath -> (NotifyWitness -> Annex Bool) -> Annex Bool
notifyTransfer _ Nothing a = a NotifyWitness
#ifdef WITH_DBUS_NOTIFICATIONS
notifyTransfer direction (Just f) a = do
wanted <- Annex.getState Annex.desktopnotify
if (notifyStart wanted || notifyFinish wanted)
then do
client <- liftIO DBus.Client.connectSession
startnotification <- liftIO $ if notifyStart wanted
then Just <$> Notify.notify client (startedTransferNote direction f)
else pure Nothing
ok <- a NotifyWitness
when (notifyFinish wanted) $ liftIO $ void $ maybe
(Notify.notify client $ finishedTransferNote ok direction f)
(\n -> Notify.replace client n $ finishedTransferNote ok direction f)
startnotification
return ok
else a NotifyWitness
#else
notifyTransfer _ (Just _) a = do a NotifyWitness
#endif
notifyDrop :: Maybe FilePath -> Bool -> Annex ()
notifyDrop Nothing _ = noop
#ifdef WITH_DBUS_NOTIFICATIONS
notifyDrop (Just f) ok = do
wanted <- Annex.getState Annex.desktopnotify
when (notifyFinish wanted) $ liftIO $ do
client <- DBus.Client.connectSession
void $ Notify.notify client (droppedNote ok f)
#else
notifyDrop (Just _) _ = noop
#endif
#ifdef WITH_DBUS_NOTIFICATIONS
startedTransferNote :: Direction -> FilePath -> Notify.Note
startedTransferNote Upload = mkNote Notify.Transfer Notify.Low iconUpload
"Uploading"
startedTransferNote Download = mkNote Notify.Transfer Notify.Low iconDownload
"Downloading"
finishedTransferNote :: Bool -> Direction -> FilePath -> Notify.Note
finishedTransferNote False Upload = mkNote Notify.TransferError Notify.Normal iconFailure
"Failed to upload"
finishedTransferNote False Download = mkNote Notify.TransferError Notify.Normal iconFailure
"Failed to download"
finishedTransferNote True Upload = mkNote Notify.TransferComplete Notify.Low iconSuccess
"Finished uploading"
finishedTransferNote True Download = mkNote Notify.TransferComplete Notify.Low iconSuccess
"Finished downloading"
droppedNote :: Bool -> FilePath -> Notify.Note
droppedNote False = mkNote Notify.TransferError Notify.Normal iconFailure
"Failed to drop"
droppedNote True = mkNote Notify.TransferComplete Notify.Low iconSuccess
"Dropped"
iconUpload, iconDownload, iconFailure, iconSuccess :: String
iconUpload = "network-transmit"
iconDownload = "network-receive"
iconFailure = "dialog-error"
iconSuccess = "git-annex" -- Is there a standard icon for success/completion?
mkNote :: Notify.Category -> Notify.UrgencyLevel -> String -> String -> FilePath -> Notify.Note
mkNote category urgency icon desc path = Notify.blankNote
{ Notify.appName = "git-annex"
, Notify.appImage = Just (Notify.Icon icon)
, Notify.summary = desc ++ " " ++ path
, Notify.hints =
[ Notify.Category category
, Notify.Urgency urgency
, Notify.SuppressSound True
]
}
#endif

View file

@ -1,6 +1,6 @@
{- git-annex program path
-
- Copyright 2013 Joey Hess <joey@kitenet.net>
- Copyright 2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}

View file

@ -1,6 +1,6 @@
{- git-annex file permissions
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -21,7 +21,6 @@ import Common.Annex
import Utility.FileMode
import Git.SharedRepository
import qualified Annex
import Annex.Exception
import Config
import System.Posix.Types
@ -120,6 +119,6 @@ createContentDir dest = do
modifyContent :: FilePath -> Annex a -> Annex a
modifyContent f a = do
createContentDir f -- also thaws it
v <- tryAnnex a
v <- tryNonAsync a
freezeContentDir f
either throwAnnex return v
either throwM return v

View file

@ -1,6 +1,6 @@
{- git-annex command queue
-
- Copyright 2011, 2012 Joey Hess <joey@kitenet.net>
- Copyright 2011, 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}

View file

@ -1,6 +1,6 @@
{- quvi options for git-annex
-
- Copyright 2013 Joey Hess <joey@kitenet.net>
- Copyright 2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}

View file

@ -1,6 +1,6 @@
{- git-annex file replacing
-
- Copyright 2013 Joey Hess <joey@kitenet.net>
- Copyright 2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -9,7 +9,6 @@ module Annex.ReplaceFile where
import Common.Annex
import Annex.Perms
import Annex.Exception
{- Replaces a possibly already existing file with a new version,
- atomically, by running an action.
@ -23,17 +22,29 @@ import Annex.Exception
- Throws an IO exception when it was unable to replace the file.
-}
replaceFile :: FilePath -> (FilePath -> Annex ()) -> Annex ()
replaceFile file a = do
replaceFile file action = replaceFileOr file action (liftIO . nukeFile)
{- If unable to replace the file with the temp file, runs the
- rollback action, which is responsible for cleaning up the temp file. -}
replaceFileOr :: FilePath -> (FilePath -> Annex ()) -> (FilePath -> Annex ()) -> Annex ()
replaceFileOr file action rollback = do
tmpdir <- fromRepo gitAnnexTmpMiscDir
void $ createAnnexDirectory tmpdir
bracketIO (setup tmpdir) nukeFile $ \tmpfile -> do
a tmpfile
liftIO $ catchIO (rename tmpfile file) (fallback tmpfile)
tmpfile <- liftIO $ setup tmpdir
go tmpfile `catchNonAsync` (const $ rollback tmpfile)
where
setup tmpdir = do
setup tmpdir = do
(tmpfile, h) <- openTempFileWithDefaultPermissions tmpdir "tmp"
hClose h
return tmpfile
fallback tmpfile _ = do
createDirectoryIfMissing True $ parentDir file
moveFile tmpfile file
go tmpfile = do
action tmpfile
liftIO $ replaceFileFrom tmpfile file
replaceFileFrom :: FilePath -> FilePath -> IO ()
replaceFileFrom src dest = go `catchIO` fallback
where
go = moveFile src dest
fallback _ = do
createDirectoryIfMissing True $ parentDir dest
go

View file

@ -1,6 +1,6 @@
{- git-annex ssh interface, with connection caching
-
- Copyright 2012,2013 Joey Hess <joey@kitenet.net>
- Copyright 2012-2015 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -8,50 +8,59 @@
{-# LANGUAGE CPP #-}
module Annex.Ssh (
sshCachingOptions,
sshOptions,
sshCacheDir,
sshReadPort,
forceSshCleanup,
sshOptionsEnv,
sshOptionsTo,
inRepoWithSshOptionsTo,
runSshOptions,
sshAskPassEnv,
runSshAskPass
) where
import qualified Data.Map as M
import Data.Hash.MD5
import System.Process (cwd)
import System.Exit
import Common.Annex
import Annex.LockPool
import Annex.LockFile
import qualified Build.SysConfig as SysConfig
import qualified Annex
import qualified Git
import qualified Git.Url
import Config
import Config.Files
import Utility.Env
import Types.CleanupActions
import Annex.Index (addGitEnv)
#ifndef mingw32_HOST_OS
import Annex.Perms
import Utility.LockFile
#endif
{- Generates parameters to ssh to a given host (or user@host) on a given
- port, with connection caching. -}
sshCachingOptions :: (String, Maybe Integer) -> [CommandParam] -> Annex [CommandParam]
sshCachingOptions (host, port) opts = do
Annex.addCleanup SshCachingCleanup sshCleanup
go =<< sshInfo (host, port)
- port. This includes connection caching parameters, and any ssh-options. -}
sshOptions :: (String, Maybe Integer) -> RemoteGitConfig -> [CommandParam] -> Annex [CommandParam]
sshOptions (host, port) gc opts = go =<< sshCachingInfo (host, port)
where
go (Nothing, params) = ret params
go (Just socketfile, params) = do
cleanstale
liftIO $ createDirectoryIfMissing True $ parentDir socketfile
lockFile $ socket2lock socketfile
prepSocket socketfile
ret params
ret ps = return $ ps ++ opts ++ portParams port ++ [Param "-T"]
-- If the lock pool is empty, this is the first ssh of this
-- run. There could be stale ssh connections hanging around
-- from a previous git-annex run that was interrupted.
cleanstale = whenM (not . any isLock . M.keys <$> getPool)
sshCleanup
ret ps = return $ concat
[ ps
, map Param (remoteAnnexSshOptions gc)
, opts
, portParams port
, [Param "-T"]
]
{- Returns a filename to use for a ssh connection caching socket, and
- parameters to enable ssh connection caching. -}
sshInfo :: (String, Maybe Integer) -> Annex (Maybe FilePath, [CommandParam])
sshInfo (host, port) = go =<< sshCacheDir
sshCachingInfo :: (String, Maybe Integer) -> Annex (Maybe FilePath, [CommandParam])
sshCachingInfo (host, port) = go =<< sshCacheDir
where
go Nothing = return (Nothing, [])
go (Just dir) = do
@ -75,10 +84,10 @@ bestSocketPath abssocketfile = do
then Just socketfile
else Nothing
where
-- ssh appends a 16 char extension to the socket when setting it
-- ssh appends a 16 char extension to the socket when setting it
-- up, which needs to be taken into account when checking
-- that a valid socket was constructed.
sshgarbage = replicate (1+16) 'X'
sshgarbage = replicate (1+16) 'X'
sshConnectionCachingParams :: FilePath -> [CommandParam]
sshConnectionCachingParams socketfile =
@ -102,55 +111,79 @@ sshCacheDir
where
gettmpdir = liftIO $ getEnv "GIT_ANNEX_TMP_DIR"
usetmpdir tmpdir = liftIO $ catchMaybeIO $ do
createDirectoryIfMissing True tmpdir
return tmpdir
let socktmp = tmpdir </> "ssh"
createDirectoryIfMissing True socktmp
return socktmp
portParams :: Maybe Integer -> [CommandParam]
portParams Nothing = []
portParams (Just port) = [Param "-p", Param $ show port]
{- Stop any unused ssh processes. -}
sshCleanup :: Annex ()
sshCleanup = go =<< sshCacheDir
{- Prepare to use a socket file. Locks a lock file to prevent
- other git-annex processes from stopping the ssh on this socket. -}
prepSocket :: FilePath -> Annex ()
prepSocket socketfile = do
-- If the lock pool is empty, this is the first ssh of this
-- run. There could be stale ssh connections hanging around
-- from a previous git-annex run that was interrupted.
whenM (not . any isLock . M.keys <$> getLockPool)
sshCleanup
-- Cleanup at end of this run.
Annex.addCleanup SshCachingCleanup sshCleanup
liftIO $ createDirectoryIfMissing True $ parentDir socketfile
lockFileShared $ socket2lock socketfile
enumSocketFiles :: Annex [FilePath]
enumSocketFiles = go =<< sshCacheDir
where
go Nothing = return []
go (Just dir) = liftIO $ filter (not . isLock)
<$> catchDefaultIO [] (dirContents dir)
{- Stop any unused ssh connection caching processes. -}
sshCleanup :: Annex ()
sshCleanup = mapM_ cleanup =<< enumSocketFiles
where
go Nothing = noop
go (Just dir) = do
sockets <- liftIO $ filter (not . isLock)
<$> catchDefaultIO [] (dirContents dir)
forM_ sockets cleanup
cleanup socketfile = do
#ifndef mingw32_HOST_OS
-- Drop any shared lock we have, and take an
-- exclusive lock, without blocking. If the lock
-- succeeds, nothing is using this ssh, and it can
-- be stopped.
--
-- After ssh is stopped cannot remove the lock file;
-- other processes may be waiting on our exclusive
-- lock to use it.
let lockfile = socket2lock socketfile
unlockFile lockfile
mode <- annexFileMode
fd <- liftIO $ noUmask mode $
openFd lockfile ReadWrite (Just mode) defaultFileFlags
v <- liftIO $ tryIO $
setLock fd (WriteLock, AbsoluteSeek, 0, 0)
v <- liftIO $ noUmask mode $ tryLockExclusive (Just mode) lockfile
case v of
Left _ -> noop
Right _ -> stopssh socketfile
liftIO $ closeFd fd
Nothing -> noop
Just lck -> do
forceStopSsh socketfile
liftIO $ dropLock lck
#else
stopssh socketfile
forceStopSsh socketfile
#endif
stopssh socketfile = do
let (dir, base) = splitFileName socketfile
let params = sshConnectionCachingParams base
-- "ssh -O stop" is noisy on stderr even with -q
void $ liftIO $ catchMaybeIO $
withQuietOutput createProcessSuccess $
(proc "ssh" $ toCommand $
[ Params "-O stop"
] ++ params ++ [Param "localhost"])
{ cwd = Just dir }
liftIO $ nukeFile socketfile
-- Cannot remove the lock file; other processes may
-- be waiting on our exclusive lock to use it.
{- Stop all ssh connection caching processes, even when they're in use. -}
forceSshCleanup :: Annex ()
forceSshCleanup = mapM_ forceStopSsh =<< enumSocketFiles
forceStopSsh :: FilePath -> Annex ()
forceStopSsh socketfile = do
let (dir, base) = splitFileName socketfile
let params = sshConnectionCachingParams base
-- "ssh -O stop" is noisy on stderr even with -q
void $ liftIO $ catchMaybeIO $
withQuietOutput createProcessSuccess $
(proc "ssh" $ toCommand $
[ Params "-O stop"
] ++ params ++ [Param "localhost"])
{ cwd = Just dir }
liftIO $ nukeFile socketfile
{- This needs to be as short as possible, due to limitations on the length
- of the path to a socket file. At the same time, it needs to be unique
@ -199,3 +232,70 @@ sshReadPort params = (port, reverse args)
aux (p,ps) (q:rest) | "-p" `isPrefixOf` q = aux (readPort $ drop 2 q, ps) rest
| otherwise = aux (p,q:ps) rest
readPort p = fmap fst $ listToMaybe $ reads p
{- When this env var is set, git-annex runs ssh with the specified
- options. (The options are separated by newlines.)
-
- This is a workaround for GIT_SSH not being able to contain
- additional parameters to pass to ssh. -}
sshOptionsEnv :: String
sshOptionsEnv = "GIT_ANNEX_SSHOPTION"
toSshOptionsEnv :: [CommandParam] -> String
toSshOptionsEnv = unlines . toCommand
fromSshOptionsEnv :: String -> [CommandParam]
fromSshOptionsEnv = map Param . lines
{- Enables ssh caching for git push/pull to a particular
- remote git repo. (Can safely be used on non-ssh remotes.)
-
- Also propigates any configured ssh-options.
-
- Like inRepo, the action is run with the local git repo.
- But here it's a modified version, with gitEnv to set GIT_SSH=git-annex,
- and sshOptionsEnv set so that git-annex will know what socket
- file to use. -}
inRepoWithSshOptionsTo :: Git.Repo -> RemoteGitConfig -> (Git.Repo -> IO a) -> Annex a
inRepoWithSshOptionsTo remote gc a =
liftIO . a =<< sshOptionsTo remote gc =<< gitRepo
{- To make any git commands be run with ssh caching enabled,
- and configured ssh-options alters the local Git.Repo's gitEnv
- to set GIT_SSH=git-annex, and sets sshOptionsEnv. -}
sshOptionsTo :: Git.Repo -> RemoteGitConfig -> Git.Repo -> Annex Git.Repo
sshOptionsTo remote gc g
| not (Git.repoIsUrl remote) || Git.repoIsHttp remote = uncached
| otherwise = case Git.Url.hostuser remote of
Nothing -> uncached
Just host -> do
(msockfile, _) <- sshCachingInfo (host, Git.Url.port remote)
case msockfile of
Nothing -> return g
Just sockfile -> do
command <- liftIO readProgramFile
prepSocket sockfile
let val = toSshOptionsEnv $ concat
[ sshConnectionCachingParams sockfile
, map Param (remoteAnnexSshOptions gc)
]
liftIO $ do
g' <- addGitEnv g sshOptionsEnv val
addGitEnv g' "GIT_SSH" command
where
uncached = return g
runSshOptions :: [String] -> String -> IO ()
runSshOptions args s = do
let args' = toCommand (fromSshOptionsEnv s) ++ args
let p = proc "ssh" args'
exitWith =<< waitForProcess . processHandle =<< createProcess p
{- When this env var is set, git-annex is being used as a ssh-askpass
- program, and should read the password from the specified location,
- and output it for ssh to read. -}
sshAskPassEnv :: String
sshAskPassEnv = "GIT_ANNEX_SSHASKPASS"
runSshAskPass :: FilePath -> IO ()
runSshAskPass passfile = putStrLn =<< readFile passfile

View file

@ -1,6 +1,6 @@
{- git-annex tagged pushes
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -49,13 +49,13 @@ fromTaggedBranch b = case split "/" $ Git.fromRef b of
taggedPush :: UUID -> Maybe String -> Git.Ref -> Remote -> Git.Repo -> IO Bool
taggedPush u info branch remote = Git.Command.runBool
[ Param "push"
, Param $ Remote.name remote
[ Param "push"
, Param $ Remote.name remote
{- Using forcePush here is safe because we "own" the tagged branch
- we're pushing; it has no other writers. Ensures it is pushed
- even if it has been rewritten by a transition. -}
, Param $ Git.Branch.forcePush $ refspec Annex.Branch.name
, Param $ refspec branch
]
, Param $ Git.Branch.forcePush $ refspec Annex.Branch.name
, Param $ refspec branch
]
where
refspec b = Git.fromRef b ++ ":" ++ Git.fromRef (toTaggedBranch u info b)

145
Annex/Transfer.hs Normal file
View file

@ -0,0 +1,145 @@
{- git-annex transfers
-
- Copyright 2012-2014 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
{-# LANGUAGE CPP #-}
module Annex.Transfer (
module X,
upload,
download,
runTransfer,
alwaysRunTransfer,
noRetry,
forwardRetry,
) where
import Common.Annex
import Logs.Transfer as X
import Annex.Notification as X
import Annex.Perms
import Utility.Metered
#ifdef mingw32_HOST_OS
import Utility.LockFile
#endif
import Control.Concurrent
upload :: UUID -> Key -> AssociatedFile -> RetryDecider -> (MeterUpdate -> Annex Bool) -> NotifyWitness -> Annex Bool
upload u key f d a _witness = runTransfer (Transfer Upload u key) f d a
download :: UUID -> Key -> AssociatedFile -> RetryDecider -> (MeterUpdate -> Annex Bool) -> NotifyWitness -> Annex Bool
download u key f d a _witness = runTransfer (Transfer Download u key) f d a
{- Runs a transfer action. Creates and locks the lock file while the
- action is running, and stores info in the transfer information
- file.
-
- If the transfer action returns False, the transfer info is
- left in the failedTransferDir.
-
- If the transfer is already in progress, returns False.
-
- An upload can be run from a read-only filesystem, and in this case
- no transfer information or lock file is used.
-}
runTransfer :: Transfer -> Maybe FilePath -> RetryDecider -> (MeterUpdate -> Annex Bool) -> Annex Bool
runTransfer = runTransfer' False
{- Like runTransfer, but ignores any existing transfer lock file for the
- transfer, allowing re-running a transfer that is already in progress.
-
- Note that this may result in confusing progress meter display in the
- webapp, if multiple processes are writing to the transfer info file. -}
alwaysRunTransfer :: Transfer -> Maybe FilePath -> RetryDecider -> (MeterUpdate -> Annex Bool) -> Annex Bool
alwaysRunTransfer = runTransfer' True
runTransfer' :: Bool -> Transfer -> Maybe FilePath -> RetryDecider -> (MeterUpdate -> Annex Bool) -> Annex Bool
runTransfer' ignorelock t file shouldretry a = do
info <- liftIO $ startTransferInfo file
(meter, tfile, metervar) <- mkProgressUpdater t info
mode <- annexFileMode
(fd, inprogress) <- liftIO $ prep tfile mode info
if inprogress && not ignorelock
then do
showNote "transfer already in progress"
return False
else do
ok <- retry info metervar $
bracketIO (return fd) (cleanup tfile) (const $ a meter)
unless ok $ recordFailedTransfer t info
return ok
where
#ifndef mingw32_HOST_OS
prep tfile mode info = do
mfd <- catchMaybeIO $
openFd (transferLockFile tfile) ReadWrite (Just mode)
defaultFileFlags { trunc = True }
case mfd of
Nothing -> return (Nothing, False)
Just fd -> do
setFdOption fd CloseOnExec True
locked <- catchMaybeIO $
setLock fd (WriteLock, AbsoluteSeek, 0, 0)
if isNothing locked
then do
closeFd fd
return (Nothing, True)
else do
void $ tryIO $ writeTransferInfoFile info tfile
return (mfd, False)
#else
prep tfile _mode info = do
v <- catchMaybeIO $ lockExclusive (transferLockFile tfile)
case v of
Nothing -> return (Nothing, False)
Just Nothing -> return (Nothing, True)
Just (Just lockhandle) -> do
void $ tryIO $ writeTransferInfoFile info tfile
return (Just lockhandle, False)
#endif
cleanup _ Nothing = noop
cleanup tfile (Just lockhandle) = do
void $ tryIO $ removeFile tfile
#ifndef mingw32_HOST_OS
void $ tryIO $ removeFile $ transferLockFile tfile
closeFd lockhandle
#else
{- Windows cannot delete the lockfile until the lock
- is closed. So it's possible to race with another
- process that takes the lock before it's removed,
- so ignore failure to remove.
-}
dropLock lockhandle
void $ tryIO $ removeFile $ transferLockFile tfile
#endif
retry oldinfo metervar run = do
v <- tryNonAsync run
case v of
Right b -> return b
Left e -> do
warning (show e)
b <- getbytescomplete metervar
let newinfo = oldinfo { bytesComplete = Just b }
if shouldretry oldinfo newinfo
then retry newinfo metervar run
else return False
getbytescomplete metervar
| transferDirection t == Upload =
liftIO $ readMVar metervar
| otherwise = do
f <- fromRepo $ gitAnnexTmpObjectLocation (transferKey t)
liftIO $ catchDefaultIO 0 $ getFileSize f
type RetryDecider = TransferInfo -> TransferInfo -> Bool
noRetry :: RetryDecider
noRetry _ _ = False
{- Retries a transfer when it fails, as long as the failed transfer managed
- to send some data. -}
forwardRetry :: RetryDecider
forwardRetry old new = bytesComplete old < bytesComplete new

View file

@ -6,7 +6,7 @@
- UUIDs of remotes are cached in git config, using keys named
- remote.<name>.annex-uuid
-
- Copyright 2010-2013 Joey Hess <joey@kitenet.net>
- Copyright 2010-2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -21,7 +21,10 @@ module Annex.UUID (
gCryptNameSpace,
removeRepoUUID,
storeUUID,
storeUUIDIn,
setUUID,
webUUID,
bitTorrentUUID,
) where
import Common.Annex
@ -70,7 +73,7 @@ getRepoUUID r = do
where
updatecache u = do
g <- gitRepo
when (g /= r) $ storeUUID cachekey u
when (g /= r) $ storeUUIDIn cachekey u
cachekey = remoteConfig r "uuid"
removeRepoUUID :: Annex ()
@ -84,13 +87,24 @@ getUncachedUUID = toUUID . Git.Config.get key ""
{- Make sure that the repo has an annex.uuid setting. -}
prepUUID :: Annex ()
prepUUID = whenM ((==) NoUUID <$> getUUID) $
storeUUID configkey =<< liftIO genUUID
storeUUID =<< liftIO genUUID
storeUUID :: ConfigKey -> UUID -> Annex ()
storeUUID configfield = setConfig configfield . fromUUID
storeUUID :: UUID -> Annex ()
storeUUID = storeUUIDIn configkey
storeUUIDIn :: ConfigKey -> UUID -> Annex ()
storeUUIDIn configfield = setConfig configfield . fromUUID
{- Only sets the configkey in the Repo; does not change .git/config -}
setUUID :: Git.Repo -> UUID -> IO Git.Repo
setUUID r u = do
let s = show configkey ++ "=" ++ fromUUID u
Git.Config.store s r
-- Dummy uuid for the whole web. Do not alter.
webUUID :: UUID
webUUID = UUID "00000000-0000-0000-0000-000000000001"
-- Dummy uuid for bittorrent. Do not alter.
bitTorrentUUID :: UUID
bitTorrentUUID = UUID "00000000-0000-0000-0000-000000000002"

View file

@ -1,7 +1,7 @@
{- Url downloading, with git-annex user agent and configured http
- headers and wget/curl options.
-
- Copyright 2013-2014 Joey Hess <joey@kitenet.net>
- Copyright 2013-2014 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -26,7 +26,7 @@ getUserAgent = Annex.getState $
Just . fromMaybe defaultUserAgent . Annex.useragent
getUrlOptions :: Annex U.UrlOptions
getUrlOptions = U.UrlOptions
getUrlOptions = mkUrlOptions
<$> getUserAgent
<*> headers
<*> options

View file

@ -1,6 +1,6 @@
{- git-annex .variant files for automatic merge conflict resolution
-
- Copyright 2014 Joey Hess <joey@kitenet.net>
- Copyright 2014 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}

View file

@ -1,6 +1,6 @@
{- git-annex repository versioning
-
- Copyright 2010,2013 Joey Hess <joey@kitenet.net>
- Copyright 2010,2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}

View file

@ -1,6 +1,6 @@
{- metadata based branch views
-
- Copyright 2014 Joey Hess <joey@kitenet.net>
- Copyright 2014 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -102,7 +102,7 @@ refineView origview = checksize . calc Unchanged origview
let (components', viewchanges) = runWriter $
mapM (\c -> updateViewComponent c field vf) (viewComponents view)
viewchange = if field `elem` map viewField (viewComponents origview)
then maximum viewchanges
then maximum viewchanges
else Narrowing
in (view { viewComponents = components' }, viewchange)
| otherwise =
@ -207,7 +207,7 @@ viewComponentMatcher :: ViewComponent -> (MetaData -> Maybe [MetaValue])
viewComponentMatcher viewcomponent = \metadata ->
matcher (currentMetaDataValues metafield metadata)
where
metafield = viewField viewcomponent
metafield = viewField viewcomponent
matcher = case viewFilter viewcomponent of
FilterValues s -> \values -> setmatches $
S.intersection s values
@ -236,8 +236,8 @@ toViewPath = concatMap escapeslash . fromMetaValue
fromViewPath :: FilePath -> MetaValue
fromViewPath = toMetaValue . deescapeslash []
where
deescapeslash s [] = reverse s
deescapeslash s (c:cs)
deescapeslash s [] = reverse s
deescapeslash s (c:cs)
| c == pseudoSlash = case cs of
(c':cs')
| c' == pseudoSlash -> deescapeslash (pseudoSlash:s) cs'
@ -340,19 +340,21 @@ applyView' mkviewedfile getfilemetadata view = do
genViewBranch view $ do
uh <- inRepo Git.UpdateIndex.startUpdateIndex
hasher <- inRepo hashObjectStart
forM_ l $ \f ->
go uh hasher f =<< Backend.lookupFile f
forM_ l $ \f -> do
relf <- getTopFilePath <$> inRepo (toTopFilePath f)
go uh hasher relf =<< Backend.lookupFile f
liftIO $ do
hashObjectStop hasher
void $ stopUpdateIndex uh
void clean
where
genviewedfiles = viewedFiles view mkviewedfile -- enables memoization
go uh hasher f (Just (k, _)) = do
go uh hasher f (Just k) = do
metadata <- getCurrentMetaData k
let metadata' = getfilemetadata f `unionMetaData` metadata
forM_ (genviewedfiles f metadata') $ \fv -> do
stagesymlink uh hasher fv =<< inRepo (gitAnnexLink fv k)
f' <- fromRepo $ fromTopFilePath $ asTopFilePath fv
stagesymlink uh hasher f' =<< calcRepo (gitAnnexLink f' k)
go uh hasher f Nothing
| "." `isPrefixOf` f = do
s <- liftIO $ getSymbolicLinkStatus f
@ -410,19 +412,19 @@ withViewChanges addmeta removemeta = do
where
handleremovals item
| DiffTree.srcsha item /= nullSha =
handle item removemeta
handlechange item removemeta
=<< catKey (DiffTree.srcsha item) (DiffTree.srcmode item)
| otherwise = noop
handleadds makeabs item
| DiffTree.dstsha item /= nullSha =
handle item addmeta
handlechange item addmeta
=<< ifM isDirect
( catKey (DiffTree.dstsha item) (DiffTree.dstmode item)
-- optimisation
, isAnnexLink $ makeabs $ DiffTree.file item
)
| otherwise = noop
handle item a = maybe noop
handlechange item a = maybe noop
(void . commandAction . a (getTopFilePath $ DiffTree.file item))
{- Generates a branch for a view. This is done using a different index
@ -433,7 +435,7 @@ genViewBranch :: View -> Annex () -> Annex Git.Branch
genViewBranch view a = withIndex $ do
a
let branch = branchView view
void $ inRepo $ Git.Branch.commit True (fromRef branch) branch []
void $ inRepo $ Git.Branch.commit Git.Branch.AutomaticCommit True (fromRef branch) branch []
return branch
{- Runs an action using the view index file.

View file

@ -1,10 +1,12 @@
{- filenames (not paths) used in views
-
- Copyright 2014 Joey Hess <joey@kitenet.net>
- Copyright 2014 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
{-# LANGUAGE CPP #-}
module Annex.View.ViewedFile (
ViewedFile,
MkViewedFile,
@ -43,10 +45,18 @@ viewedFileFromReference f = concat
{- To avoid collisions with filenames or directories that contain
- '%', and to allow the original directories to be extracted
- from the ViewedFile, '%' is escaped to '\%' (and '\' to '\\').
- from the ViewedFile, '%' is escaped. )
-}
escape :: String -> String
escape = replace "%" "\\%" . replace "\\" "\\\\"
escape = replace "%" (escchar:'%':[]) . replace [escchar] [escchar, escchar]
escchar :: Char
#ifndef mingw32_HOST_OS
escchar = '\\'
#else
-- \ is path separator on Windows, so instead use !
escchar = '!'
#endif
{- For use when operating already within a view, so whatever filepath
- is present in the work tree is already a ViewedFile. -}
@ -58,10 +68,10 @@ viewedFileReuse = takeFileName
dirFromViewedFile :: ViewedFile -> FilePath
dirFromViewedFile = joinPath . drop 1 . sep [] ""
where
sep l _ [] = reverse l
sep l _ [] = reverse l
sep l curr (c:cs)
| c == '%' = sep (reverse curr:l) "" cs
| c == '\\' = case cs of
| c == escchar = case cs of
(c':cs') -> sep l (c':curr) cs'
[] -> sep l curr cs
| otherwise = sep l (c:curr) cs
@ -70,6 +80,7 @@ prop_viewedFile_roundtrips :: FilePath -> Bool
prop_viewedFile_roundtrips f
-- Relative filenames wanted, not directories.
| any (isPathSeparator) (end f ++ beginning f) = True
| isAbsolute f = True
| otherwise = dir == dirFromViewedFile (viewedFileFromReference f)
where
dir = joinPath $ beginning $ splitDirectories f

View file

@ -1,6 +1,6 @@
{- git-annex checking whether content is wanted
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -15,15 +15,15 @@ import qualified Data.Set as S
{- Check if a file is preferred content for the local repository. -}
wantGet :: Bool -> Maybe Key -> AssociatedFile -> Annex Bool
wantGet def key file = isPreferredContent Nothing S.empty key file def
wantGet d key file = isPreferredContent Nothing S.empty key file d
{- Check if a file is preferred content for a remote. -}
wantSend :: Bool -> Maybe Key -> AssociatedFile -> UUID -> Annex Bool
wantSend def key file to = isPreferredContent (Just to) S.empty key file def
wantSend d key file to = isPreferredContent (Just to) S.empty key file d
{- Check if a file can be dropped, maybe from a remote.
- Don't drop files that are preferred content. -}
wantDrop :: Bool -> Maybe UUID -> Maybe Key -> AssociatedFile -> Annex Bool
wantDrop def from key file = do
wantDrop d from key file = do
u <- maybe getUUID (return . id) from
not <$> isPreferredContent (Just u) (S.singleton u) key file def
not <$> isPreferredContent (Just u) (S.singleton u) key file d

View file

@ -1,6 +1,6 @@
{- git-annex assistant daemon
-
- Copyright 2012-2013 Joey Hess <joey@kitenet.net>
- Copyright 2012-2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -21,6 +21,7 @@ import Assistant.Threads.Pusher
import Assistant.Threads.Merger
import Assistant.Threads.TransferWatcher
import Assistant.Threads.Transferrer
import Assistant.Threads.RemoteControl
import Assistant.Threads.SanityChecker
import Assistant.Threads.Cronner
import Assistant.Threads.ProblemFixer
@ -51,9 +52,12 @@ import qualified Utility.Daemon
import Utility.ThreadScheduler
import Utility.HumanTime
import qualified Build.SysConfig as SysConfig
#ifndef mingw32_HOST_OS
import Utility.LogFile
import Annex.Perms
import Utility.LogFile
#ifdef mingw32_HOST_OS
import Utility.Env
import Config.Files
import System.Environment (getArgs)
#endif
import System.Log.Logger
@ -69,23 +73,21 @@ stopDaemon = liftIO . Utility.Daemon.stopDaemon =<< fromRepo gitAnnexPidFile
- stdout and stderr descriptors. -}
startDaemon :: Bool -> Bool -> Maybe Duration -> Maybe String -> Maybe HostName -> Maybe (Maybe Handle -> Maybe Handle -> String -> FilePath -> IO ()) -> Annex ()
startDaemon assistant foreground startdelay cannotrun listenhost startbrowser = do
Annex.changeState $ \s -> s { Annex.daemon = True }
pidfile <- fromRepo gitAnnexPidFile
#ifndef mingw32_HOST_OS
logfile <- fromRepo gitAnnexLogFile
liftIO $ debugM desc $ "logging to " ++ logfile
#ifndef mingw32_HOST_OS
createAnnexDirectory (parentDir logfile)
logfd <- liftIO $ openLog logfile
logfd <- liftIO $ handleToFd =<< openLog logfile
if foreground
then do
origout <- liftIO $ catchMaybeIO $
fdToHandle =<< dup stdOutput
origerr <- liftIO $ catchMaybeIO $
fdToHandle =<< dup stdError
let undaemonize a = do
debugM desc $ "logging to " ++ logfile
Utility.Daemon.lockPidFile pidfile
Utility.LogFile.redirLog logfd
a
let undaemonize = Utility.Daemon.foreground logfd (Just pidfile)
start undaemonize $
case startbrowser of
Nothing -> Nothing
@ -93,16 +95,32 @@ startDaemon assistant foreground startdelay cannotrun listenhost startbrowser =
else
start (Utility.Daemon.daemonize logfd (Just pidfile) False) Nothing
#else
-- Windows is always foreground, and has no log file.
-- Windows doesn't daemonize, but does redirect output to the
-- log file. The only way to do so is to restart the program.
when (foreground || not foreground) $ do
liftIO $ Utility.Daemon.lockPidFile pidfile
start id $ do
case startbrowser of
Nothing -> Nothing
Just a -> Just $ a Nothing Nothing
let flag = "GIT_ANNEX_OUTPUT_REDIR"
createAnnexDirectory (parentDir logfile)
ifM (liftIO $ isNothing <$> getEnv flag)
( liftIO $ withFile devNull WriteMode $ \nullh -> do
loghandle <- openLog logfile
e <- getEnvironment
cmd <- readProgramFile
ps <- getArgs
(_, _, _, pid) <- createProcess (proc cmd ps)
{ env = Just (addEntry flag "1" e)
, std_in = UseHandle nullh
, std_out = UseHandle loghandle
, std_err = UseHandle loghandle
}
exitWith =<< waitForProcess pid
, start (Utility.Daemon.foreground (Just pidfile)) $
case startbrowser of
Nothing -> Nothing
Just a -> Just $ a Nothing Nothing
)
#endif
where
desc
desc
| assistant = "assistant"
| otherwise = "watch"
start daemonize webappwaiter = withThreadState $ \st -> do
@ -130,7 +148,7 @@ startDaemon assistant foreground startdelay cannotrun listenhost startbrowser =
let threads = if isJust cannotrun
then webappthread
else webappthread ++
[ watch $ commitThread
[ watch commitThread
#ifdef WITH_WEBAPP
#ifdef WITH_PAIRING
, assist $ pairListenerThread urlrenderer
@ -141,28 +159,29 @@ startDaemon assistant foreground startdelay cannotrun listenhost startbrowser =
, assist $ xmppReceivePackThread urlrenderer
#endif
#endif
, assist $ pushThread
, assist $ pushRetryThread
, assist $ mergeThread
, assist $ transferWatcherThread
, assist $ transferPollerThread
, assist $ transfererThread
, assist $ daemonStatusThread
, assist pushThread
, assist pushRetryThread
, assist mergeThread
, assist transferWatcherThread
, assist transferPollerThread
, assist transfererThread
, assist remoteControlThread
, assist daemonStatusThread
, assist $ sanityCheckerDailyThread urlrenderer
, assist $ sanityCheckerHourlyThread
, assist sanityCheckerHourlyThread
, assist $ problemFixerThread urlrenderer
#ifdef WITH_CLIBS
, assist $ mountWatcherThread urlrenderer
#endif
, assist $ netWatcherThread
, assist netWatcherThread
, assist $ upgraderThread urlrenderer
, assist $ upgradeWatcherThread urlrenderer
, assist $ netWatcherFallbackThread
, assist netWatcherFallbackThread
, assist $ transferScannerThread urlrenderer
, assist $ cronnerThread urlrenderer
, assist $ configMonitorThread
, assist $ glacierThread
, watch $ watchThread
, assist configMonitorThread
, assist glacierThread
, watch watchThread
-- must come last so that all threads that wait
-- on it have already started waiting
, watch $ sanityCheckerStartupThread startdelay

View file

@ -1,6 +1,6 @@
{- git-annex assistant alerts
-
- Copyright 2012-2014 Joey Hess <joey@kitenet.net>
- Copyright 2012-2014 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -16,6 +16,7 @@ import qualified Remote
import Utility.Tense
import Logs.Transfer
import Types.Distribution
import Git.Types (RemoteName)
import Data.String
import qualified Data.Text as T
@ -117,11 +118,14 @@ commitAlert :: Alert
commitAlert = activityAlert Nothing
[Tensed "Committing" "Committed", "changes to git"]
showRemotes :: [Remote] -> TenseChunk
showRemotes = UnTensed . T.intercalate ", " . map (T.pack . Remote.name)
showRemotes :: [RemoteName] -> TenseChunk
showRemotes = UnTensed . T.intercalate ", " . map T.pack
syncAlert :: [Remote] -> Alert
syncAlert rs = baseActivityAlert
syncAlert = syncAlert' . map Remote.name
syncAlert' :: [RemoteName] -> Alert
syncAlert' rs = baseActivityAlert
{ alertName = Just SyncAlert
, alertHeader = Just $ tenseWords
[Tensed "Syncing" "Synced", "with", showRemotes rs]
@ -130,13 +134,18 @@ syncAlert rs = baseActivityAlert
}
syncResultAlert :: [Remote] -> [Remote] -> Alert
syncResultAlert succeeded failed = makeAlertFiller (not $ null succeeded) $
syncResultAlert succeeded failed = syncResultAlert'
(map Remote.name succeeded)
(map Remote.name failed)
syncResultAlert' :: [RemoteName] -> [RemoteName] -> Alert
syncResultAlert' succeeded failed = makeAlertFiller (not $ null succeeded) $
baseActivityAlert
{ alertName = Just SyncAlert
, alertHeader = Just $ tenseWords msg
}
where
msg
msg
| null succeeded = ["Failed to sync with", showRemotes failed]
| null failed = ["Synced with", showRemotes succeeded]
| otherwise =
@ -320,10 +329,10 @@ pairRequestAcknowledgedAlert who button = baseActivityAlert
, alertButtons = maybeToList button
}
xmppNeededAlert :: AlertButton -> Alert
xmppNeededAlert button = Alert
connectionNeededAlert :: AlertButton -> Alert
connectionNeededAlert button = Alert
{ alertHeader = Just "Share with friends, and keep your devices in sync across the cloud."
, alertIcon = Just TheCloud
, alertIcon = Just ConnectionIcon
, alertPriority = High
, alertButtons = [button]
, alertClosable = True
@ -331,7 +340,7 @@ xmppNeededAlert button = Alert
, alertMessageRender = renderData
, alertCounter = 0
, alertBlockDisplay = True
, alertName = Just $ XMPPNeededAlert
, alertName = Just ConnectionNeededAlert
, alertCombiner = Just $ dataCombiner $ \_old new -> new
, alertData = []
}

View file

@ -1,6 +1,6 @@
{- git-annex assistant alert utilities
-
- Copyright 2012, 2013 Joey Hess <joey@kitenet.net>
- Copyright 2012, 2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -14,7 +14,6 @@ import Utility.Tense
import qualified Data.Text as T
import Data.Text (Text)
import qualified Data.Map as M
import Data.Monoid
{- This is as many alerts as it makes sense to display at a time.
- A display might be smaller, or larger, the point is to not overwhelm the
@ -120,7 +119,7 @@ mergeAlert i al m = maybe updatePrune updateCombine (alertCombiner al)
where
bloat = M.size m' - maxAlerts
pruneold l =
let (f, rest) = partition (\(_, a) -> isFiller a) l
let (f, rest) = partition (\(_, a) -> isFiller a) l
in drop bloat f ++ rest
updatePrune = pruneBloat $ M.filterWithKey pruneSame $
M.insertWith' const i al m

View file

@ -1,6 +1,6 @@
{- git-annex assistant git-annex branch change tracking
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}

View file

@ -1,6 +1,6 @@
{- git-annex assistant change tracking
-
- Copyright 2012-2013 Joey Hess <joey@kitenet.net>
- Copyright 2012-2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}

View file

@ -1,6 +1,6 @@
{- git-annex assistant commit tracking
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}

View file

@ -1,6 +1,6 @@
{- Common infrastructure for the git-annex assistant.
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}

View file

@ -0,0 +1,53 @@
{- git-annex assistant CredPair cache.
-
- Copyright 2014 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
{-# LANGUAGE BangPatterns #-}
module Assistant.CredPairCache (
cacheCred,
getCachedCred,
expireCachedCred,
) where
import Assistant.Types.CredPairCache
import Types.Creds
import Assistant.Common
import Utility.ThreadScheduler
import qualified Data.Map as M
import Control.Concurrent
{- Caches a CredPair, but only for a limited time, after which it
- will expire.
-
- Note that repeatedly caching the same CredPair
- does not reset its expiry time.
-}
cacheCred :: CredPair -> Seconds -> Assistant ()
cacheCred (login, password) expireafter = do
cache <- getAssistant credPairCache
liftIO $ do
changeStrict cache $ M.insert login password
void $ forkIO $ do
threadDelaySeconds expireafter
changeStrict cache $ M.delete login
getCachedCred :: Login -> Assistant (Maybe Password)
getCachedCred login = do
cache <- getAssistant credPairCache
liftIO $ M.lookup login <$> readMVar cache
expireCachedCred :: Login -> Assistant ()
expireCachedCred login = do
cache <- getAssistant credPairCache
liftIO $ changeStrict cache $ M.delete login
{- Update map strictly to avoid keeping references to old creds in memory. -}
changeStrict :: CredPairCache -> (M.Map Login Password -> M.Map Login Password) -> IO ()
changeStrict cache a = modifyMVar_ cache $ \m -> do
let !m' = a m
return m'

View file

@ -1,6 +1,6 @@
{- git-annex assistant daemon status
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -26,6 +26,7 @@ import Data.Time.Clock.POSIX
import Data.Time
import System.Locale
import qualified Data.Map as M
import qualified Data.Set as S
import qualified Data.Text as T
getDaemonStatus :: Assistant DaemonStatus
@ -64,7 +65,7 @@ calcSyncRemotes = do
, syncingToCloudRemote = any iscloud syncdata
}
where
iscloud r = not (Remote.readonly r) && Remote.availability r == Remote.GloballyAvailable
iscloud r = not (Remote.readonly r) && Remote.availability r == Remote.GloballyAvailable
{- Updates the syncRemotes list from the list of all remotes in Annex state. -}
updateSyncRemotes :: Assistant ()
@ -78,6 +79,15 @@ updateSyncRemotes = do
M.filter $ \alert ->
alertName alert /= Just CloudRepoNeededAlert
changeCurrentlyConnected :: (S.Set UUID -> S.Set UUID) -> Assistant ()
changeCurrentlyConnected sm = do
modifyDaemonStatus_ $ \ds -> ds
{ currentlyConnectedRemotes = sm (currentlyConnectedRemotes ds)
}
v <- currentlyConnectedRemotes <$> getDaemonStatus
debug [show v]
liftIO . sendNotification =<< syncRemotesNotifier <$> getDaemonStatus
updateScheduleLog :: Assistant ()
updateScheduleLog =
liftIO . sendNotification =<< scheduleLogNotifier <$> getDaemonStatus

View file

@ -1,6 +1,6 @@
{- git-annex assistant remote deletion utilities
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -17,7 +17,7 @@ import Logs.Location
import Assistant.DaemonStatus
import qualified Remote
import Remote.List
import qualified Git.Remote
import qualified Git.Remote.Remove
import Logs.Trust
import qualified Annex
@ -34,7 +34,7 @@ disableRemote uuid = do
remote <- fromMaybe (error "unknown remote")
<$> liftAnnex (Remote.remoteFromUUID uuid)
liftAnnex $ do
inRepo $ Git.Remote.remove (Remote.name remote)
inRepo $ Git.Remote.Remove.remove (Remote.name remote)
void $ remoteListRefresh
updateSyncRemotes
return remote
@ -62,7 +62,7 @@ removableRemote urlrenderer uuid = do
<$> liftAnnex (Remote.remoteFromUUID uuid)
mapM_ (queueremaining r) keys
where
queueremaining r k =
queueremaining r k =
queueTransferWhenSmall "remaining object in unwanted remote"
Nothing (Transfer Download uuid k) r
{- Scanning for keys can take a long time; do not tie up

View file

@ -1,6 +1,6 @@
{- git-annex assistant dropping of unwanted content
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}

View file

@ -1,6 +1,6 @@
{- git-annex assistant fscking
-
- Copyright 2013 Joey Hess <joey@kitenet.net>
- Copyright 2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU AGPL version 3 or higher.
-}

View file

@ -1,6 +1,6 @@
{- git-annex assistant gpg stuff
-
- Copyright 2013 Joey Hess <joey@kitenet.net>
- Copyright 2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU AGPL version 3 or higher.
-}
@ -20,7 +20,7 @@ newUserId :: IO UserId
newUserId = do
oldkeys <- secretKeys
username <- myUserName
let basekeyname = username ++ "'s git-annex encryption key"
let basekeyname = username ++ "'s git-annex encryption key"
return $ Prelude.head $ filter (\n -> M.null $ M.filter (== n) oldkeys)
( basekeyname
: map (\n -> basekeyname ++ show n) ([2..] :: [Int])

View file

@ -1,6 +1,6 @@
{- Assistant installation
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -22,6 +22,9 @@ import Utility.SshConfig
import Utility.OSX
#else
import Utility.FreeDesktop
#ifdef linux_HOST_OS
import Utility.UserInfo
#endif
import Assistant.Install.Menu
#endif
@ -30,16 +33,19 @@ standaloneAppBase = getEnv "GIT_ANNEX_APP_BASE"
{- The standalone app does not have an installation process.
- So when it's run, it needs to set up autostarting of the assistant
- daemon, as well as writing the programFile, and putting a
- git-annex-shell wrapper into ~/.ssh
- daemon, as well as writing the programFile, and putting the
- git-annex-shell and git-annex-wrapper wrapper scripts into ~/.ssh
-
- Note that this is done every time it's started, so if the user moves
- it around, the paths this sets up won't break.
-
- File manager hook script installation is done even for
- packaged apps, since it has to go into the user's home directory.
-}
ensureInstalled :: IO ()
ensureInstalled = go =<< standaloneAppBase
where
go Nothing = noop
go Nothing = installFileManagerHooks "git-annex"
go (Just base) = do
let program = base </> "git-annex"
programfile <- programFile
@ -56,27 +62,98 @@ ensureInstalled = go =<< standaloneAppBase
#endif
installAutoStart program autostartfile
{- This shim is only updated if it doesn't
- already exist with the right content. -}
sshdir <- sshDir
let shim = sshdir </> "git-annex-shell"
let runshell var = "exec " ++ base </> "runshell" ++
" git-annex-shell -c \"" ++ var ++ "\""
let content = unlines
let runshell var = "exec " ++ base </> "runshell " ++ var
let rungitannexshell var = runshell $ "git-annex-shell -c \"" ++ var ++ "\""
installWrapper (sshdir </> "git-annex-shell") $ unlines
[ shebang_local
, "set -e"
, "if [ \"x$SSH_ORIGINAL_COMMAND\" != \"x\" ]; then"
, runshell "$SSH_ORIGINAL_COMMAND"
, rungitannexshell "$SSH_ORIGINAL_COMMAND"
, "else"
, runshell "$@"
, rungitannexshell "$@"
, "fi"
]
installWrapper (sshdir </> "git-annex-wrapper") $ unlines
[ shebang_local
, "set -e"
, runshell "\"$@\""
]
curr <- catchDefaultIO "" $ readFileStrict shim
when (curr /= content) $ do
createDirectoryIfMissing True (parentDir shim)
viaTmp writeFile shim content
modifyFileMode shim $ addModes [ownerExecuteMode]
installFileManagerHooks program
installWrapper :: FilePath -> String -> IO ()
installWrapper file content = do
curr <- catchDefaultIO "" $ readFileStrict file
when (curr /= content) $ do
createDirectoryIfMissing True (parentDir file)
viaTmp writeFile file content
modifyFileMode file $ addModes [ownerExecuteMode]
installFileManagerHooks :: FilePath -> IO ()
#ifdef linux_HOST_OS
installFileManagerHooks program = do
let actions = ["get", "drop", "undo"]
-- Gnome
nautilusScriptdir <- (\d -> d </> "nautilus" </> "scripts") <$> userDataDir
createDirectoryIfMissing True nautilusScriptdir
forM_ actions $
genNautilusScript nautilusScriptdir
-- KDE
home <- myHomeDir
let kdeServiceMenusdir = home </> ".kde" </> "share" </> "kde4" </> "services" </> "ServiceMenus"
createDirectoryIfMissing True kdeServiceMenusdir
writeFile (kdeServiceMenusdir </> "git-annex.desktop")
(kdeDesktopFile actions)
where
genNautilusScript scriptdir action =
installscript (scriptdir </> scriptname action) $ unlines
[ shebang_local
, autoaddedcomment
, "exec " ++ program ++ " " ++ action ++ " --notify-start --notify-finish -- \"$@\""
]
scriptname action = "git-annex " ++ action
installscript f c = whenM (safetoinstallscript f) $ do
writeFile f c
modifyFileMode f $ addModes [ownerExecuteMode]
safetoinstallscript f = catchDefaultIO True $
elem autoaddedcomment . lines <$> readFileStrict f
autoaddedcomment = "# " ++ autoaddedmsg ++ " (To disable, chmod 600 this file.)"
autoaddedmsg = "Automatically added by git-annex, do not edit."
kdeDesktopFile actions = unlines $ concat $
kdeDesktopHeader actions : map kdeDesktopAction actions
kdeDesktopHeader actions =
[ "# " ++ autoaddedmsg
, "[Desktop Entry]"
, "Type=Service"
, "ServiceTypes=all/allfiles"
, "MimeType=all/all;"
, "Actions=" ++ intercalate ";" (map kdeDesktopSection actions)
, "X-KDE-Priority=TopLevel"
, "X-KDE-Submenu=Git-Annex"
, "X-KDE-Icon=git-annex"
, "X-KDE-ServiceTypes=KonqPopupMenu/Plugin"
]
kdeDesktopSection command = "GitAnnex" ++ command
kdeDesktopAction command =
[ ""
, "[Desktop Action " ++ kdeDesktopSection command ++ "]"
, "Name=" ++ command
, "Icon=git-annex"
, unwords
[ "Exec=sh -c 'cd \"$(dirname '%U')\" &&"
, program
, command
, "--notify-start --notify-finish -- %U'"
]
]
#else
installFileManagerHooks _ = noop
#endif
{- Returns a cleaned up environment that lacks settings used to make the
- standalone builds use their bundled libraries and programs.
@ -87,15 +164,15 @@ ensureInstalled = go =<< standaloneAppBase
cleanEnvironment :: IO (Maybe [(String, String)])
cleanEnvironment = clean <$> getEnvironment
where
clean env
clean environ
| null vars = Nothing
| otherwise = Just $ catMaybes $ map (restoreorig env) env
| otherwise = Just $ catMaybes $ map (restoreorig environ) environ
| otherwise = Nothing
where
vars = words $ fromMaybe "" $
lookup "GIT_ANNEX_STANDLONE_ENV" env
restoreorig oldenv p@(k, _v)
| k `elem` vars = case lookup ("ORIG_" ++ k) oldenv of
lookup "GIT_ANNEX_STANDLONE_ENV" environ
restoreorig oldenviron p@(k, _v)
| k `elem` vars = case lookup ("ORIG_" ++ k) oldenviron of
(Just v')
| not (null v') -> Just (k, v')
_ -> Nothing

View file

@ -1,6 +1,6 @@
{- Assistant autostart file installation
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}

View file

@ -1,6 +1,6 @@
{- Assistant menu installation.
-
- Copyright 2013 Joey Hess <joey@kitenet.net>
- Copyright 2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}

View file

@ -1,6 +1,6 @@
{- git-annex assistant remote creation utilities
-
- Copyright 2012, 2013 Joey Hess <joey@kitenet.net>
- Copyright 2012, 2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -48,7 +48,7 @@ makeRsyncRemote :: RemoteName -> String -> Annex String
makeRsyncRemote name location = makeRemote name location $ const $ void $
go =<< Command.InitRemote.findExisting name
where
go Nothing = setupSpecialRemote name Rsync.remote config Nothing
go Nothing = setupSpecialRemote name Rsync.remote config Nothing
(Nothing, Command.InitRemote.newConfig name)
go (Just (u, c)) = setupSpecialRemote name Rsync.remote config Nothing
(Just u, c)
@ -90,18 +90,23 @@ enableSpecialRemote name remotetype mcreds config = do
r <- Command.InitRemote.findExisting name
case r of
Nothing -> error $ "Cannot find a special remote named " ++ name
Just (u, c) -> setupSpecialRemote name remotetype config mcreds (Just u, c)
Just (u, c) -> setupSpecialRemote' False name remotetype config mcreds (Just u, c)
setupSpecialRemote :: RemoteName -> RemoteType -> R.RemoteConfig -> Maybe CredPair -> (Maybe UUID, R.RemoteConfig) -> Annex RemoteName
setupSpecialRemote name remotetype config mcreds (mu, c) = do
setupSpecialRemote = setupSpecialRemote' True
setupSpecialRemote' :: Bool -> RemoteName -> RemoteType -> R.RemoteConfig -> Maybe CredPair -> (Maybe UUID, R.RemoteConfig) -> Annex RemoteName
setupSpecialRemote' setdesc name remotetype config mcreds (mu, c) = do
{- Currently, only 'weak' ciphers can be generated from the
- assistant, because otherwise GnuPG may block once the entropy
- pool is drained, and as of now there's no way to tell the user
- to perform IO actions to refill the pool. -}
(c', u) <- R.setup remotetype mu mcreds $
M.insert "highRandomQuality" "false" $ M.union config c
describeUUID u name
configSet u c'
when setdesc $
whenM (isNothing . M.lookup u <$> uuidMap) $
describeUUID u name
return name
{- Returns the name of the git remote it created. If there's already a

View file

@ -1,6 +1,6 @@
{- git-annex assistant monad
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -43,6 +43,8 @@ import Assistant.Types.RepoProblem
import Assistant.Types.Buddies
import Assistant.Types.NetMessager
import Assistant.Types.ThreadName
import Assistant.Types.RemoteControl
import Assistant.Types.CredPairCache
newtype Assistant a = Assistant { mkAssistant :: ReaderT AssistantData IO a }
deriving (
@ -68,6 +70,8 @@ data AssistantData = AssistantData
, branchChangeHandle :: BranchChangeHandle
, buddyList :: BuddyList
, netMessager :: NetMessager
, remoteControl :: RemoteControl
, credPairCache :: CredPairCache
}
newAssistantData :: ThreadState -> DaemonStatusHandle -> IO AssistantData
@ -86,6 +90,8 @@ newAssistantData st dstatus = AssistantData
<*> newBranchChangeHandle
<*> newBuddyList
<*> newNetMessager
<*> newRemoteControl
<*> newCredPairCache
runAssistant :: AssistantData -> Assistant a -> IO a
runAssistant d a = runReaderT (mkAssistant a) d

View file

@ -1,6 +1,6 @@
{- git-annex assistant named threads.
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}

View file

@ -1,6 +1,6 @@
{- git-annex assistant out of band network messager interface
-
- Copyright 2012-2013 Joey Hess <joey@kitenet.net>
- Copyright 2012-2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -80,7 +80,7 @@ checkImportantNetMessages (storedclient, sentclient) = go <<~ netMessager
queuePushInitiation :: NetMessage -> Assistant ()
queuePushInitiation msg@(Pushing clientid stage) = do
tv <- getPushInitiationQueue side
liftIO $ atomically $ do
liftIO $ atomically $ do
r <- tryTakeTMVar tv
case r of
Nothing -> putTMVar tv [msg]
@ -88,7 +88,7 @@ queuePushInitiation msg@(Pushing clientid stage) = do
let !l' = msg : filter differentclient l
putTMVar tv l'
where
side = pushDestinationSide stage
side = pushDestinationSide stage
differentclient (Pushing cid _) = cid /= clientid
differentclient _ = True
queuePushInitiation _ = noop

View file

@ -1,6 +1,6 @@
{- git-annex assistant repo pairing, core data types
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -58,6 +58,15 @@ data PairData = PairData
}
deriving (Eq, Read, Show)
checkSane :: PairData -> Bool
checkSane p = all (not . any isControl)
[ fromMaybe "" (remoteHostName p)
, remoteUserName p
, remoteDirectory p
, remoteSshPubKey p
, fromUUID (pairUUID p)
]
type UserName = String
{- A pairing that is in progress has a secret, a thread that is

View file

@ -1,6 +1,6 @@
{- git-annex assistant pairing remote creation
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -23,12 +23,11 @@ import qualified Data.Text as T
{- Authorized keys are set up before pairing is complete, so that the other
- side can immediately begin syncing. -}
setupAuthorizedKeys :: PairMsg -> FilePath -> IO ()
setupAuthorizedKeys msg repodir = do
validateSshPubKey pubkey
unlessM (liftIO $ addAuthorizedKeys True repodir pubkey) $
error "failed setting up ssh authorized keys"
where
pubkey = remoteSshPubKey $ pairMsgData msg
setupAuthorizedKeys msg repodir = case validateSshPubKey $ remoteSshPubKey $ pairMsgData msg of
Left err -> error err
Right pubkey ->
unlessM (liftIO $ addAuthorizedKeys True repodir pubkey) $
error "failed setting up ssh authorized keys"
{- When local pairing is complete, this is used to set up the remote for
- the host we paired with. -}

View file

@ -4,7 +4,7 @@
- each message is repeated until acknowledged. This is done using a
- thread, that gets stopped before the next message is sent.
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -20,7 +20,6 @@ import Utility.Verifiable
import Network.Multicast
import Network.Info
import Network.Socket
import Control.Exception (bracket)
import qualified Data.Map as M
import Control.Concurrent

View file

@ -1,6 +1,6 @@
{- git-annex assistant push tracking
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}

View file

@ -0,0 +1,21 @@
{- git-annex assistant RemoteDaemon control
-
- Copyright 2014 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
module Assistant.RemoteControl (
sendRemoteControl,
RemoteDaemon.Consumed(..)
) where
import Assistant.Common
import qualified RemoteDaemon.Types as RemoteDaemon
import Control.Concurrent
sendRemoteControl :: RemoteDaemon.Consumed -> Assistant ()
sendRemoteControl msg = do
clicker <- getAssistant remoteControl
liftIO $ writeChan clicker msg

View file

@ -1,6 +1,6 @@
{- git-annex assistant repository repair
-
- Copyright 2013 Joey Hess <joey@kitenet.net>
- Copyright 2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU AGPL version 3 or higher.
-}
@ -63,7 +63,7 @@ runRepair u mrmt destructiverepair = do
return ok
where
localrepair fsckresults = do
localrepair fsckresults = do
-- Stop the watcher from running while running repairs.
changeSyncable Nothing False
@ -140,9 +140,8 @@ repairStaleGitLocks r = do
repairStaleLocks :: [FilePath] -> Assistant ()
repairStaleLocks lockfiles = go =<< getsizes
where
getsize lf = catchMaybeIO $
(\s -> (lf, fileSize s)) <$> getFileStatus lf
getsizes = liftIO $ catMaybes <$> mapM getsize lockfiles
getsize lf = catchMaybeIO $ (\s -> (lf, s)) <$> getFileSize lf
getsizes = liftIO $ catMaybes <$> mapM getsize lockfiles
go [] = return ()
go l = ifM (liftIO $ null <$> Lsof.query ("--" : map fst l))
( do

View file

@ -1,6 +1,6 @@
{- git-annex assistant remote problem handling
-
- Copyright 2013 Joey Hess <joey@kitenet.net>
- Copyright 2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}

View file

@ -1,6 +1,6 @@
{- git-annex assistant restarting
-
- Copyright 2013 Joey Hess <joey@kitenet.net>
- Copyright 2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU AGPL version 3 or higher.
-}
@ -24,13 +24,11 @@ import qualified Annex
import qualified Git
import Control.Concurrent
import System.Process (cwd)
#ifndef mingw32_HOST_OS
import System.Posix (signalProcess, sigTERM)
#else
import Utility.WinProcess
#endif
import Data.Default
import Network.URI
{- Before the assistant can be restarted, have to remove our
@ -54,6 +52,10 @@ postRestart url = do
liftIO . sendNotification . globalRedirNotifier =<< getDaemonStatus
void $ liftIO $ forkIO $ do
threadDelaySeconds (Seconds 120)
terminateSelf
terminateSelf :: IO ()
terminateSelf =
#ifndef mingw32_HOST_OS
signalProcess sigTERM =<< getPID
#else
@ -93,7 +95,7 @@ newAssistantUrl repo = do
- warp-tls listens to http, in order to show an error page, so this works.
-}
assistantListening :: URLString -> IO Bool
assistantListening url = catchBoolIO $ fst <$> exists url' def
assistantListening url = catchBoolIO $ exists url' def
where
url' = case parseURI url of
Nothing -> url

View file

@ -1,6 +1,6 @@
{- git-annex assistant remotes needing scanning
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}

View file

@ -1,6 +1,6 @@
{- git-annex assistant ssh utilities
-
- Copyright 2012-2013 Joey Hess <joey@kitenet.net>
- Copyright 2012-2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -92,7 +92,7 @@ parseSshUrl u
, sshCapabilities = []
}
where
(user, host) = if '@' `elem` userhost
(user, host) = if '@' `elem` userhost
then separate (== '@') userhost
else ("", userhost)
fromrsync s
@ -111,34 +111,26 @@ sshTranscript :: [String] -> (Maybe String) -> IO (String, Bool)
sshTranscript opts input = processTranscript "ssh" opts input
{- Ensure that the ssh public key doesn't include any ssh options, like
- command=foo, or other weirdness -}
validateSshPubKey :: SshPubKey -> IO ()
- command=foo, or other weirdness.
-
- The returned version of the key has its comment removed.
-}
validateSshPubKey :: SshPubKey -> Either String SshPubKey
validateSshPubKey pubkey
| length (lines pubkey) == 1 =
either error return $ check $ words pubkey
| otherwise = error "too many lines in ssh public key"
| length (lines pubkey) == 1 = check $ words pubkey
| otherwise = Left "too many lines in ssh public key"
where
check [prefix, _key, comment] = do
checkprefix prefix
checkcomment comment
check [prefix, _key] =
checkprefix prefix
check (prefix:key:_) = checkprefix prefix (unwords [prefix, key])
check _ = err "wrong number of words in ssh public key"
ok = Right ()
err msg = Left $ unwords [msg, pubkey]
checkprefix prefix
| ssh == "ssh" && all isAlphaNum keytype = ok
checkprefix prefix validpubkey
| ssh == "ssh" && all isAlphaNum keytype = Right validpubkey
| otherwise = err "bad ssh public key prefix"
where
(ssh, keytype) = separate (== '-') prefix
checkcomment comment = case filter (not . safeincomment) comment of
[] -> ok
badstuff -> err $ "bad comment in ssh public key (contains: \"" ++ badstuff ++ "\")"
safeincomment c = isAlphaNum c || c == '@' || c == '-' || c == '_' || c == '.'
addAuthorizedKeys :: Bool -> FilePath -> SshPubKey -> IO Bool
addAuthorizedKeys gitannexshellonly dir pubkey = boolSystem "sh"
[ Param "-c" , Param $ addAuthorizedKeysCommand gitannexshellonly dir pubkey ]
@ -197,7 +189,7 @@ authorizedKeysLine gitannexshellonly dir pubkey
- long perl script. -}
| otherwise = pubkey
where
limitcommand = "command=\"GIT_ANNEX_SHELL_DIRECTORY="++shellEscape dir++" ~/.ssh/git-annex-shell\",no-agent-forwarding,no-port-forwarding,no-X11-forwarding,no-pty "
limitcommand = "command=\"env GIT_ANNEX_SHELL_DIRECTORY="++shellEscape dir++" ~/.ssh/git-annex-shell\",no-agent-forwarding,no-port-forwarding,no-X11-forwarding,no-pty "
{- Generates a ssh key pair. -}
genSshKeyPair :: IO SshKeyPair
@ -260,7 +252,7 @@ setupSshKeyPair sshkeypair sshdata = do
fixSshKeyPairIdentitiesOnly :: IO ()
fixSshKeyPairIdentitiesOnly = changeUserSshConfig $ unlines . go [] . lines
where
go c [] = reverse c
go c [] = reverse c
go c (l:[])
| all (`isInfixOf` l) indicators = go (fixedline l:l:c) []
| otherwise = go (l:c) []
@ -268,7 +260,7 @@ fixSshKeyPairIdentitiesOnly = changeUserSshConfig $ unlines . go [] . lines
| all (`isInfixOf` l) indicators && not ("IdentitiesOnly" `isInfixOf` next) =
go (fixedline l:l:c) (next:rest)
| otherwise = go (l:c) (next:rest)
indicators = ["IdentityFile", "key.git-annex"]
indicators = ["IdentityFile", "key.git-annex"]
fixedline tmpl = takeWhile isSpace tmpl ++ "IdentitiesOnly yes"
{- Add StrictHostKeyChecking to any ssh config stanzas that were written
@ -312,7 +304,7 @@ setSshConfig sshdata config = do
{- This hostname is specific to a given repository on the ssh host,
- so it is based on the real hostname, the username, and the directory.
-
- The mangled hostname has the form "git-annex-realhostname-username_dir".
- The mangled hostname has the form "git-annex-realhostname-username-port_dir".
- The only use of "-" is to separate the parts shown; this is necessary
- to allow unMangleSshHostName to work. Any unusual characters in the
- username or directory are url encoded, except using "." rather than "%"
@ -324,6 +316,7 @@ mangleSshHostName sshdata = "git-annex-" ++ T.unpack (sshHostName sshdata)
where
extra = intercalate "_" $ map T.unpack $ catMaybes
[ sshUserName sshdata
, Just $ T.pack $ show $ sshPort sshdata
, Just $ sshDirectory sshdata
]
safe c

View file

@ -1,6 +1,6 @@
{- git-annex assistant repo syncing
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -15,6 +15,7 @@ import Assistant.Alert
import Assistant.Alert.Utility
import Assistant.DaemonStatus
import Assistant.ScanRemotes
import Assistant.RemoteControl
import qualified Command.Sync
import Utility.Parallel
import qualified Git
@ -95,7 +96,7 @@ reconnectRemotes notifypushes rs = void $ do
=<< fromMaybe [] . M.lookup (Remote.uuid r) . connectRemoteNotifiers
<$> getDaemonStatus
{- Updates the local sync branch, then pushes it to all remotes, in
{- Pushes the local sync branch to all remotes, in
- parallel, along with the git-annex branch. This is the same
- as "git annex sync", except in parallel, and will co-exist with use of
- "git annex sync".
@ -147,7 +148,6 @@ pushToRemotes' now notifypushes remotes = do
go _ _ _ _ [] = return [] -- no remotes, so nothing to do
go shouldretry (Just branch) g u rs = do
debug ["pushing to", show rs]
liftIO $ Command.Sync.updateBranch (Command.Sync.syncBranch branch) g
(succeeded, failed) <- liftIO $ inParallel (push g branch) rs
updatemap succeeded []
if null failed
@ -258,6 +258,7 @@ changeSyncable Nothing enable = do
changeSyncable (Just r) True = do
liftAnnex $ changeSyncFlag r True
syncRemote r
sendRemoteControl RELOAD
changeSyncable (Just r) False = do
liftAnnex $ changeSyncFlag r False
updateSyncRemotes

View file

@ -1,6 +1,6 @@
{- git-annex assistant commit thread
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -27,7 +27,6 @@ import qualified Utility.Lsof as Lsof
import qualified Utility.DirWatcher as DirWatcher
import Types.KeySource
import Config
import Annex.Exception
import Annex.Content
import Annex.Link
import Annex.CatFile
@ -35,6 +34,7 @@ import qualified Annex
import Utility.InodeCache
import Annex.Content.Direct
import qualified Command.Sync
import qualified Git.Branch
import Data.Time.Clock
import Data.Tuple.Utils
@ -50,6 +50,7 @@ commitThread = namedThread "Committer" $ do
delayadd <- liftAnnex $
maybe delayaddDefault (return . Just . Seconds)
=<< annexDelayAdd <$> Annex.getGitConfig
msg <- liftAnnex Command.Sync.commitMsg
waitChangeTime $ \(changes, time) -> do
readychanges <- handleAdds havelsof delayadd changes
if shouldCommit False time (length readychanges) readychanges
@ -60,7 +61,7 @@ commitThread = namedThread "Committer" $ do
, "changes"
]
void $ alertWhile commitAlert $
liftAnnex commitStaged
liftAnnex $ commitStaged msg
recordCommit
let numchanges = length readychanges
mapM_ checkChangeContent readychanges
@ -164,8 +165,8 @@ waitChangeTime a = waitchanges 0
-}
aftermaxcommit oldchanges = loop (30 :: Int)
where
loop 0 = continue oldchanges
loop n = do
loop 0 = continue oldchanges
loop n = do
liftAnnex noop -- ensure Annex state is free
liftIO $ threadDelaySeconds (Seconds 1)
changes <- getAnyChanges
@ -212,14 +213,18 @@ shouldCommit scanning now len changes
recentchanges = filter thissecond changes
timeDelta c = now `diffUTCTime` changeTime c
commitStaged :: Annex Bool
commitStaged = do
commitStaged :: String -> Annex Bool
commitStaged msg = do
{- This could fail if there's another commit being made by
- something else. -}
v <- tryAnnex Annex.Queue.flush
v <- tryNonAsync Annex.Queue.flush
case v of
Left _ -> return False
Right _ -> Command.Sync.commitStaged ""
Right _ -> do
ok <- Command.Sync.commitStaged Git.Branch.AutomaticCommit msg
when ok $
Command.Sync.updateSyncBranch =<< inRepo Git.Branch.current
return ok
{- OSX needs a short delay after a file is added before locking it down,
- when using a non-direct mode repository, as pasting a file seems to
@ -297,7 +302,7 @@ handleAdds havelsof delayadd cs = returnWhen (null incomplete) $ do
add change@(InProcessAddChange { keySource = ks }) =
catchDefaultIO Nothing <~> doadd
where
doadd = sanitycheck ks $ do
doadd = sanitycheck ks $ do
(mkey, mcache) <- liftAnnex $ do
showStart "add" $ keyFilename ks
Command.Add.ingest $ Just ks
@ -313,10 +318,11 @@ handleAdds havelsof delayadd cs = returnWhen (null incomplete) $ do
adddirect toadd = do
ct <- liftAnnex compareInodeCachesWith
m <- liftAnnex $ removedKeysMap ct cs
delta <- liftAnnex getTSDelta
if M.null m
then forM toadd add
else forM toadd $ \c -> do
mcache <- liftIO $ genInodeCache $ changeFile c
mcache <- liftIO $ genInodeCache (changeFile c) delta
case mcache of
Nothing -> add c
Just cache ->
@ -347,7 +353,7 @@ handleAdds havelsof delayadd cs = returnWhen (null incomplete) $ do
done change mcache file key = liftAnnex $ do
logStatus key InfoPresent
link <- ifM isDirect
( inRepo $ gitAnnexLink file key
( calcRepo $ gitAnnexLink file key
, Command.Add.link file key mcache
)
whenM (pure DirWatcher.eventsCoalesce <||> isDirect) $

View file

@ -1,6 +1,6 @@
{- git-annex assistant config monitor thread
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -62,15 +62,17 @@ configFilesActions =
, (groupLog, void $ liftAnnex groupMapLoad)
, (numcopiesLog, void $ liftAnnex globalNumCopiesLoad)
, (scheduleLog, void updateScheduleLog)
-- Preferred content settings depend on most of the other configs,
-- so will be reloaded whenever any configs change.
-- Preferred and required content settings depend on most of the
-- other configs, so will be reloaded whenever any configs change.
, (preferredContentLog, noop)
, (requiredContentLog, noop)
, (groupPreferredContentLog, noop)
]
reloadConfigs :: Configs -> Assistant ()
reloadConfigs changedconfigs = do
sequence_ as
void $ liftAnnex preferredContentMapLoad
void $ liftAnnex preferredRequiredMapsLoad
{- Changes to the remote log, or the trust log, can affect the
- syncRemotes list. Changes to the uuid log may affect its
- display so are also included. -}

View file

@ -1,6 +1,6 @@
{- git-annex assistant sceduled jobs runner
-
- Copyright 2013 Joey Hess <joey@kitenet.net>
- Copyright 2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -87,7 +87,7 @@ cronnerThread urlrenderer = namedThreadUnchecked "Cronner" $ do
liftIO $ waitNotification h
debug ["reloading changed activities"]
go h amap' nmap'
startactivities as lastruntimes = forM as $ \activity ->
startactivities as lastruntimes = forM as $ \activity ->
case connectActivityUUID activity of
Nothing -> do
runner <- asIO2 (sleepingActivityThread urlrenderer)
@ -108,8 +108,8 @@ cronnerThread urlrenderer = namedThreadUnchecked "Cronner" $ do
sleepingActivityThread :: UrlRenderer -> ScheduledActivity -> Maybe LocalTime -> Assistant ()
sleepingActivityThread urlrenderer activity lasttime = go lasttime =<< getnexttime lasttime
where
getnexttime = liftIO . nextTime schedule
go _ Nothing = debug ["no scheduled events left for", desc]
getnexttime = liftIO . nextTime schedule
go _ Nothing = debug ["no scheduled events left for", desc]
go l (Just (NextTimeExactly t)) = waitrun l t Nothing
go l (Just (NextTimeWindow windowstart windowend)) =
waitrun l windowstart (Just windowend)
@ -129,7 +129,7 @@ sleepingActivityThread urlrenderer activity lasttime = go lasttime =<< getnextti
go l =<< getnexttime l
else run nowt
where
tolate nowt tz = case mmaxt of
tolate nowt tz = case mmaxt of
Just maxt -> nowt > maxt
-- allow the job to start 10 minutes late
Nothing ->diffUTCTime
@ -191,10 +191,10 @@ runActivity' urlrenderer (ScheduledSelfFsck _ d) = do
mapM_ reget =<< liftAnnex (dirKeys gitAnnexBadDir)
where
reget k = queueTransfers "fsck found bad file; redownloading" Next k Nothing Download
runActivity' urlrenderer (ScheduledRemoteFsck u s d) = handle =<< liftAnnex (remoteFromUUID u)
runActivity' urlrenderer (ScheduledRemoteFsck u s d) = dispatch =<< liftAnnex (remoteFromUUID u)
where
handle Nothing = debug ["skipping remote fsck of uuid without a configured remote", fromUUID u, fromSchedule s]
handle (Just rmt) = void $ case Remote.remoteFsck rmt of
dispatch Nothing = debug ["skipping remote fsck of uuid without a configured remote", fromUUID u, fromSchedule s]
dispatch (Just rmt) = void $ case Remote.remoteFsck rmt of
Nothing -> go rmt $ do
program <- readProgramFile
void $ batchCommand program $

View file

@ -1,6 +1,6 @@
{- git-annex assistant daemon status thread
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}

View file

@ -1,6 +1,6 @@
{- git-annex assistant Amazon Glacier retrieval
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}

View file

@ -1,6 +1,6 @@
{- git-annex assistant git merge thread
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -78,12 +78,13 @@ onChange file
changedbranch = fileToBranch file
mergecurrent (Just current)
| equivBranches changedbranch current = do
debug
[ "merging", Git.fromRef changedbranch
, "into", Git.fromRef current
]
void $ liftAnnex $ autoMergeFrom changedbranch (Just current)
| equivBranches changedbranch current =
whenM (liftAnnex $ inRepo $ Git.Branch.changed current changedbranch) $ do
debug
[ "merging", Git.fromRef changedbranch
, "into", Git.fromRef current
]
void $ liftAnnex $ autoMergeFrom changedbranch (Just current) Git.Branch.AutomaticCommit
mergecurrent _ = noop
handleDesynced = case fromTaggedBranch changedbranch of

View file

@ -1,6 +1,6 @@
{- git-annex assistant mount watcher, using either dbus or mtab polling
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -63,7 +63,11 @@ dbusThread urlrenderer = do
wasmounted <- liftIO $ swapMVar mvar nowmounted
handleMounts urlrenderer wasmounted nowmounted
liftIO $ forM_ mountChanged $ \matcher ->
#if MIN_VERSION_dbus(0,10,7)
void $ addMatch client matcher handleevent
#else
listen client matcher handleevent
#endif
, do
liftAnnex $
warning "No known volume monitor available through dbus; falling back to mtab polling"

View file

@ -1,6 +1,6 @@
{- git-annex assistant network connection watcher, using dbus
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -18,10 +18,10 @@ import Assistant.DaemonStatus
import Utility.NotificationBroadcaster
#if WITH_DBUS
import Assistant.RemoteControl
import Utility.DBus
import DBus.Client
import DBus
import Data.Word (Word32)
import Assistant.NetMessager
#else
#ifdef linux_HOST_OS
@ -44,8 +44,9 @@ netWatcherThread = thread noop
- while (despite the local network staying up), are synced with
- periodically.
-
- Note that it does not call notifyNetMessagerRestart, because
- it doesn't know that the network has changed.
- Note that it does not call notifyNetMessagerRestart, or
- signal the RemoteControl, because it doesn't know that the
- network has changed.
-}
netWatcherFallbackThread :: NamedThread
netWatcherFallbackThread = namedThread "NetWatcherFallback" $
@ -61,16 +62,22 @@ dbusThread = do
where
go client = ifM (checkNetMonitor client)
( do
listenNMConnections client <~> handleconn
listenWicdConnections client <~> handleconn
callback <- asIO1 connchange
liftIO $ do
listenNMConnections client callback
listenWicdConnections client callback
, do
liftAnnex $
warning "No known network monitor available through dbus; falling back to polling"
)
handleconn = do
connchange False = do
debug ["detected network disconnection"]
sendRemoteControl LOSTNET
connchange True = do
debug ["detected network connection"]
notifyNetMessagerRestart
handleConnection
sendRemoteControl RESUME
onerr e _ = do
liftAnnex $
warning $ "lost dbus connection; falling back to polling (" ++ show e ++ ")"
@ -95,38 +102,75 @@ checkNetMonitor client = do
networkmanager = "org.freedesktop.NetworkManager"
wicd = "org.wicd.daemon"
{- Listens for new NetworkManager connections. -}
listenNMConnections :: Client -> IO () -> IO ()
listenNMConnections client callback =
listen client matcher $ \event ->
when (Just True == anyM activeconnection (signalBody event)) $
callback
{- Listens for NetworkManager connections and diconnections.
-
- Connection example (once fully connected):
- [Variant {"ActivatingConnection": Variant (ObjectPath "/"), "PrimaryConnection": Variant (ObjectPath "/org/freedesktop/NetworkManager/ActiveConnection/34"), "State": Variant 70}]
-
- Disconnection example:
- [Variant {"ActiveConnections": Variant []}]
-}
listenNMConnections :: Client -> (Bool -> IO ()) -> IO ()
listenNMConnections client setconnected =
#if MIN_VERSION_dbus(0,10,7)
void $ addMatch client matcher
#else
listen client matcher
#endif
$ \event -> mapM_ handleevent
(map dictionaryItems $ mapMaybe fromVariant $ signalBody event)
where
matcher = matchAny
{ matchInterface = Just "org.freedesktop.NetworkManager.Connection.Active"
{ matchInterface = Just "org.freedesktop.NetworkManager"
, matchMember = Just "PropertiesChanged"
}
nm_connection_activated = toVariant (2 :: Word32)
nm_state_key = toVariant ("State" :: String)
activeconnection v = do
m <- fromVariant v
vstate <- lookup nm_state_key $ dictionaryItems m
state <- fromVariant vstate
return $ state == nm_connection_activated
nm_active_connections_key = toVariant ("ActiveConnections" :: String)
nm_activatingconnection_key = toVariant ("ActivatingConnection" :: String)
noconnections = Just $ toVariant $ toVariant ([] :: [ObjectPath])
rootconnection = Just $ toVariant $ toVariant $ objectPath_ "/"
handleevent m
| lookup nm_active_connections_key m == noconnections =
setconnected False
| lookup nm_activatingconnection_key m == rootconnection =
setconnected True
| otherwise = noop
{- Listens for new Wicd connections. -}
listenWicdConnections :: Client -> IO () -> IO ()
listenWicdConnections client callback =
listen client matcher $ \event ->
{- Listens for Wicd connections and disconnections.
-
- Connection example:
- ConnectResultsSent:
- Variant "success"
-
- Diconnection example:
- StatusChanged
- [Variant 0, Variant [Varient ""]]
-}
listenWicdConnections :: Client -> (Bool -> IO ()) -> IO ()
listenWicdConnections client setconnected = do
match connmatcher $ \event ->
when (any (== wicd_success) (signalBody event)) $
callback
setconnected True
match statusmatcher $ \event -> handleevent (signalBody event)
where
matcher = matchAny
connmatcher = matchAny
{ matchInterface = Just "org.wicd.daemon"
, matchMember = Just "ConnectResultsSent"
}
statusmatcher = matchAny
{ matchInterface = Just "org.wicd.daemon"
, matchMember = Just "StatusChanged"
}
wicd_success = toVariant ("success" :: String)
wicd_disconnected = toVariant [toVariant ("" :: String)]
handleevent status
| any (== wicd_disconnected) status = setconnected False
| otherwise = noop
match matcher a =
#if MIN_VERSION_dbus(0,10,7)
void $ addMatch client matcher a
#else
listen client matcher a
#endif
#endif
handleConnection :: Assistant ()

View file

@ -1,6 +1,6 @@
{- git-annex assistant thread to listen for incoming pairing traffic
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -16,13 +16,11 @@ import Assistant.WebApp.Types
import Assistant.Alert
import Assistant.DaemonStatus
import Utility.ThreadScheduler
import Utility.Format
import Git
import Network.Multicast
import Network.Socket
import qualified Data.Text as T
import Data.Char
pairListenerThread :: UrlRenderer -> NamedThread
pairListenerThread urlrenderer = namedThread "PairListener" $ do
@ -39,16 +37,18 @@ pairListenerThread urlrenderer = namedThread "PairListener" $ do
Nothing -> go reqs cache sock
Just m -> do
debug ["received", show msg]
sane <- checkSane msg
(pip, verified) <- verificationCheck m
=<< (pairingInProgress <$> getDaemonStatus)
let wrongstage = maybe False (\p -> pairMsgStage m <= inProgressPairStage p) pip
let fromus = maybe False (\p -> remoteSshPubKey (pairMsgData m) == remoteSshPubKey (inProgressPairData p)) pip
case (wrongstage, fromus, sane, pairMsgStage m) of
case (wrongstage, fromus, checkSane (pairMsgData m), pairMsgStage m) of
(_, True, _, _) -> do
debug ["ignoring message that looped back"]
go reqs cache sock
(_, _, False, _) -> go reqs cache sock
(_, _, False, _) -> do
liftAnnex $ warning
"illegal control characters in pairing message; ignoring"
go reqs cache sock
-- PairReq starts a pairing process, so a
-- new one is always heeded, even if
-- some other pairing is in process.
@ -83,19 +83,10 @@ pairListenerThread urlrenderer = namedThread "PairListener" $ do
"detected possible pairing brute force attempt; disabled pairing"
stopSending pip
return (Nothing, False)
|otherwise = return (Just pip, verified && sameuuid)
| otherwise = return (Just pip, verified && sameuuid)
where
verified = verifiedPairMsg m pip
sameuuid = pairUUID (inProgressPairData pip) == pairUUID (pairMsgData m)
checkSane msg
{- Control characters could be used in a
- console poisoning attack. -}
| any isControl (filter (/= '\n') (decode_c msg)) = do
liftAnnex $ warning
"illegal control characters in pairing message; ignoring"
return False
| otherwise = return True
{- PairReqs invalidate the cache of recently finished pairings.
- This is so that, if a new pairing is started with the

View file

@ -1,6 +1,6 @@
{- git-annex assistant thread to handle fixing problems with repositories
-
- Copyright 2013 Joey Hess <joey@kitenet.net>
- Copyright 2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}

View file

@ -1,6 +1,6 @@
{- git-annex assistant git pushing thread
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}

View file

@ -0,0 +1,121 @@
{- git-annex assistant communication with remotedaemon
-
- Copyright 2014 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
module Assistant.Threads.RemoteControl where
import Assistant.Common
import RemoteDaemon.Types
import Config.Files
import Utility.Batch
import Utility.SimpleProtocol
import Assistant.Alert
import Assistant.Alert.Utility
import Assistant.DaemonStatus
import qualified Git
import qualified Git.Types as Git
import qualified Remote
import qualified Types.Remote as Remote
import Control.Concurrent
import Control.Concurrent.Async
import Network.URI
import qualified Data.Map as M
import qualified Data.Set as S
remoteControlThread :: NamedThread
remoteControlThread = namedThread "RemoteControl" $ do
program <- liftIO readProgramFile
(cmd, params) <- liftIO $ toBatchCommand
(program, [Param "remotedaemon"])
let p = proc cmd (toCommand params)
(Just toh, Just fromh, _, pid) <- liftIO $ createProcess p
{ std_in = CreatePipe
, std_out = CreatePipe
}
urimap <- liftIO . newMVar =<< liftAnnex getURIMap
controller <- asIO $ remoteControllerThread toh
responder <- asIO $ remoteResponderThread fromh urimap
-- run controller and responder until the remotedaemon dies
liftIO $ void $ tryNonAsync $ controller `concurrently` responder
debug ["remotedaemon exited"]
liftIO $ forceSuccessProcess p pid
-- feed from the remoteControl channel into the remotedaemon
remoteControllerThread :: Handle -> Assistant ()
remoteControllerThread toh = do
clicker <- getAssistant remoteControl
forever $ do
msg <- liftIO $ readChan clicker
debug [show msg]
liftIO $ do
hPutStrLn toh $ unwords $ formatMessage msg
hFlush toh
-- read status messages emitted by the remotedaemon and handle them
remoteResponderThread :: Handle -> MVar (M.Map URI Remote) -> Assistant ()
remoteResponderThread fromh urimap = go M.empty
where
go syncalerts = do
l <- liftIO $ hGetLine fromh
debug [l]
case parseMessage l of
Just (CONNECTED uri) -> changeconnected S.insert uri
Just (DISCONNECTED uri) -> changeconnected S.delete uri
Just (SYNCING uri) -> withr uri $ \r ->
if M.member (Remote.uuid r) syncalerts
then go syncalerts
else do
i <- addAlert $ syncAlert [r]
go (M.insert (Remote.uuid r) i syncalerts)
Just (DONESYNCING uri status) -> withr uri $ \r ->
case M.lookup (Remote.uuid r) syncalerts of
Nothing -> cont
Just i -> do
let (succeeded, failed) = if status
then ([r], [])
else ([], [r])
updateAlertMap $ mergeAlert i $
syncResultAlert succeeded failed
go (M.delete (Remote.uuid r) syncalerts)
Just (WARNING (RemoteURI uri) msg) -> do
void $ addAlert $
warningAlert ("RemoteControl "++ show uri) msg
cont
Nothing -> do
debug ["protocol error from remotedaemon: ", l]
cont
where
cont = go syncalerts
withr uri = withRemote uri urimap cont
changeconnected sm uri = withr uri $ \r -> do
changeCurrentlyConnected $ sm $ Remote.uuid r
cont
getURIMap :: Annex (M.Map URI Remote)
getURIMap = Remote.remoteMap' id (mkk . Git.location . Remote.repo)
where
mkk (Git.Url u) = Just u
mkk _ = Nothing
withRemote
:: RemoteURI
-> MVar (M.Map URI Remote)
-> Assistant a
-> (Remote -> Assistant a)
-> Assistant a
withRemote (RemoteURI uri) remotemap noremote a = do
m <- liftIO $ readMVar remotemap
case M.lookup uri m of
Just r -> a r
Nothing -> do
{- Reload map, in case a new remote has been added. -}
m' <- liftAnnex getURIMap
void $ liftIO $ swapMVar remotemap $ m'
maybe noremote a (M.lookup uri m')

View file

@ -1,6 +1,6 @@
{- git-annex assistant sanity checker
-
- Copyright 2012, 2013 Joey Hess <joey@kitenet.net>
- Copyright 2012, 2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -21,9 +21,11 @@ import Assistant.Drop
import Assistant.Ssh
import Assistant.TransferQueue
import Assistant.Types.UrlRenderer
import Assistant.Restart
import qualified Annex.Branch
import qualified Git
import qualified Git.LsFiles
import qualified Git.Command
import qualified Git.Command.Batch
import qualified Git.Config
import Utility.ThreadScheduler
import qualified Assistant.Threads.Watcher as Watcher
@ -38,13 +40,14 @@ import Assistant.Unused
import Logs.Unused
import Logs.Transfer
import Config.Files
import Utility.DiskFree
import Types.Key (keyBackendName)
import qualified Annex
#ifdef WITH_WEBAPP
import Assistant.WebApp.Types
#endif
#ifndef mingw32_HOST_OS
import Utility.LogFile
import Utility.DiskFree
#endif
import Data.Time.Clock.POSIX
@ -82,6 +85,11 @@ sanityCheckerStartupThread startupdelay = namedThreadUnchecked "SanityCheckerSta
{- Fix up ssh remotes set up by past versions of the assistant. -}
liftIO $ fixUpSshRemotes
{- Clean up old temp files. -}
void $ liftAnnex $ tryNonAsync $ do
cleanOldTmpMisc
cleanReallyOldTmp
{- If there's a startup delay, it's done here. -}
liftIO $ maybe noop (threadDelaySeconds . Seconds . fromIntegral . durationSeconds) startupdelay
@ -140,6 +148,8 @@ waitForNextCheck = do
- will block the watcher. -}
dailyCheck :: UrlRenderer -> Assistant Bool
dailyCheck urlrenderer = do
checkRepoExists
g <- liftAnnex gitRepo
batchmaker <- liftIO getBatchCommandMaker
@ -160,7 +170,7 @@ dailyCheck urlrenderer = do
- to have a lot of small objects and they should not be a
- significant size. -}
when (Git.Config.getMaybe "gc.auto" g == Just "0") $
liftIO $ void $ Git.Command.runBatch batchmaker
liftIO $ void $ Git.Command.Batch.run batchmaker
[ Param "-c", Param "gc.auto=670000"
, Param "gc"
, Param "--auto"
@ -197,6 +207,7 @@ dailyCheck urlrenderer = do
hourlyCheck :: Assistant ()
hourlyCheck = do
checkRepoExists
#ifndef mingw32_HOST_OS
checkLogSize 0
#else
@ -214,10 +225,10 @@ checkLogSize :: Int -> Assistant ()
checkLogSize n = do
f <- liftAnnex $ fromRepo gitAnnexLogFile
logs <- liftIO $ listLogs f
totalsize <- liftIO $ sum <$> mapM filesize logs
totalsize <- liftIO $ sum <$> mapM getFileSize logs
when (totalsize > 2 * oneMegabyte) $ do
notice ["Rotated logs due to size:", show totalsize]
liftIO $ openLog f >>= redirLog
liftIO $ openLog f >>= handleToFd >>= redirLog
when (n < maxLogs + 1) $ do
df <- liftIO $ getDiskFree $ takeDirectory f
case df of
@ -226,9 +237,7 @@ checkLogSize n = do
checkLogSize (n + 1)
_ -> noop
where
filesize f = fromIntegral . fileSize <$> liftIO (getFileStatus f)
oneMegabyte :: Int
oneMegabyte :: Integer
oneMegabyte = 1000000
#endif
@ -247,7 +256,7 @@ checkOldUnused :: UrlRenderer -> Assistant ()
checkOldUnused urlrenderer = go =<< annexExpireUnused <$> liftAnnex Annex.getGitConfig
where
go (Just Nothing) = noop
go (Just (Just expireunused)) = expireUnused (Just expireunused)
go (Just (Just expireunused)) = expireUnused (Just expireunused)
go Nothing = maybe noop prompt =<< describeUnusedWhenBig
prompt msg =
@ -258,3 +267,61 @@ checkOldUnused urlrenderer = go =<< annexExpireUnused <$> liftAnnex Annex.getGit
#else
debug [show $ renderTense Past msg]
#endif
{- Files may be left in misctmp by eg, an interrupted add of files
- by the assistant, which hard links files to there as part of lockdown
- checks. Delete these files if they're more than a day old.
-
- Note that this is not safe to run after the Watcher starts up, since it
- will create such files, and due to hard linking they may have old
- mtimes. So, this should only be called from the
- sanityCheckerStartupThread, which runs before the Watcher starts up.
-
- Also, if a git-annex add is being run at the same time the assistant
- starts up, its tmp files could be deleted. However, the watcher will
- come along and add everything once it starts up anyway, so at worst
- this would make the git-annex add fail unexpectedly.
-}
cleanOldTmpMisc :: Annex ()
cleanOldTmpMisc = do
now <- liftIO getPOSIXTime
let oldenough = now - (60 * 60 * 24)
tmp <- fromRepo gitAnnexTmpMiscDir
liftIO $ mapM_ (cleanOld (<= oldenough)) =<< dirContentsRecursive tmp
{- While .git/annex/tmp is now only used for storing partially transferred
- objects, older versions of git-annex used it for misctemp. Clean up any
- files that might be left from that, by looking for files whose names
- cannot be the key of an annexed object. Only delete files older than
- 1 week old.
-
- Also, some remotes such as rsync may use this temp directory for storing
- eg, encrypted objects that are being transferred. So, delete old
- objects that use a GPGHMAC backend.
-}
cleanReallyOldTmp :: Annex ()
cleanReallyOldTmp = do
now <- liftIO getPOSIXTime
let oldenough = now - (60 * 60 * 24 * 7)
tmp <- fromRepo gitAnnexTmpObjectDir
liftIO $ mapM_ (cleanjunk (<= oldenough)) =<< dirContentsRecursive tmp
where
cleanjunk check f = case fileKey (takeFileName f) of
Nothing -> cleanOld check f
Just k
| "GPGHMAC" `isPrefixOf` keyBackendName k ->
cleanOld check f
| otherwise -> noop
cleanOld :: (POSIXTime -> Bool) -> FilePath -> IO ()
cleanOld check f = go =<< catchMaybeIO getmtime
where
getmtime = realToFrac . modificationTime <$> getSymbolicLinkStatus f
go (Just mtime) | check mtime = nukeFile f
go _ = noop
checkRepoExists :: Assistant ()
checkRepoExists = do
g <- liftAnnex gitRepo
liftIO $ unlessM (doesDirectoryExist $ Git.repoPath g) $
terminateSelf

View file

@ -1,6 +1,6 @@
{- git-annex assistant transfer polling thread
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -36,8 +36,7 @@ transferPollerThread = namedThread "TransferPoller" $ do
- temp file being used for the transfer. -}
| transferDirection t == Download = do
let f = gitAnnexTmpObjectLocation (transferKey t) g
sz <- liftIO $ catchMaybeIO $
fromIntegral . fileSize <$> getFileStatus f
sz <- liftIO $ catchMaybeIO $ getFileSize f
newsize t info sz
{- Uploads don't need to be polled for when the TransferWatcher
- thread can track file modifications. -}

View file

@ -1,6 +1,6 @@
{- git-annex assistant thread to scan remotes to find needed transfers
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -19,7 +19,6 @@ import Assistant.Types.UrlRenderer
import Logs.Transfer
import Logs.Location
import Logs.Group
import Logs.Web (webUUID)
import qualified Remote
import qualified Types.Remote as Remote
import Utility.ThreadScheduler
@ -115,7 +114,7 @@ failedTransferScan r = do
- since we need to look at the locations of all keys anyway.
-}
expensiveScan :: UrlRenderer -> [Remote] -> Assistant ()
expensiveScan urlrenderer rs = unless onlyweb $ batch <~> do
expensiveScan urlrenderer rs = batch <~> do
debug ["starting scan of", show visiblers]
let us = map Remote.uuid rs
@ -135,7 +134,6 @@ expensiveScan urlrenderer rs = unless onlyweb $ batch <~> do
remove <- asIO1 $ removableRemote urlrenderer
liftIO $ mapM_ (void . tryNonAsync . remove) $ S.toList removablers
where
onlyweb = all (== webUUID) $ map Remote.uuid rs
visiblers = let rs' = filter (not . Remote.readonly) rs
in if null rs' then rs else rs'
@ -151,7 +149,7 @@ expensiveScan urlrenderer rs = unless onlyweb $ batch <~> do
enqueue f (r, t) =
queueTransferWhenSmall "expensive scan found missing object"
(Just f) t r
findtransfers f unwanted (key, _) = do
findtransfers f unwanted key = do
{- The syncable remotes may have changed since this
- scan began. -}
syncrs <- syncDataRemotes <$> getDaemonStatus

View file

@ -1,6 +1,6 @@
{- git-annex assistant transfer watching thread
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}

View file

@ -1,6 +1,6 @@
{- git-annex assistant data transferrer thread
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}

View file

@ -1,6 +1,6 @@
{- git-annex assistant thread to detect when git-annex is upgraded
-
- Copyright 2013 Joey Hess <joey@kitenet.net>
- Copyright 2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -51,9 +51,9 @@ upgradeWatcherThread urlrenderer = namedThread "UpgradeWatcher" $ do
let depth = length (splitPath dir) + 1
let nosubdirs f = length (splitPath f) == depth
void $ liftIO $ watchDir dir nosubdirs False hooks (startup mvar)
-- Ignore bogus events generated during the startup scan.
-- Ignore bogus events generated during the startup scan.
-- We ask the watcher to not generate them, but just to be safe..
startup mvar scanner = do
startup mvar scanner = do
r <- scanner
void $ swapMVar mvar Started
return r

View file

@ -1,6 +1,6 @@
{- git-annex assistant thread to detect when upgrade is available
-
- Copyright 2013 Joey Hess <joey@kitenet.net>
- Copyright 2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -18,11 +18,8 @@ import Assistant.Types.UrlRenderer
import Assistant.DaemonStatus
import Assistant.Alert
import Utility.NotificationBroadcaster
import Utility.Tmp
import qualified Annex
import qualified Build.SysConfig
import qualified Utility.Url as Url
import qualified Annex.Url as Url
import qualified Git.Version
import Types.Distribution
#ifdef WITH_WEBAPP
@ -42,7 +39,7 @@ upgraderThread urlrenderer = namedThread "Upgrader" $
h <- liftIO . newNotificationHandle False . networkConnectedNotifier =<< getDaemonStatus
go h =<< liftIO getCurrentTime
where
{- Wait for a network connection event. Then see if it's been
{- Wait for a network connection event. Then see if it's been
- half a day since the last upgrade check. If so, proceed with
- check. -}
go h lastchecked = do
@ -62,7 +59,7 @@ upgraderThread urlrenderer = namedThread "Upgrader" $
checkUpgrade :: UrlRenderer -> Assistant ()
checkUpgrade urlrenderer = do
debug [ "Checking if an upgrade is available." ]
go =<< getDistributionInfo
go =<< downloadDistributionInfo
where
go Nothing = debug [ "Failed to check if upgrade is available." ]
go (Just d) = do
@ -86,16 +83,3 @@ canUpgrade urgency urlrenderer d = ifM autoUpgradeEnabled
noop
#endif
)
getDistributionInfo :: Assistant (Maybe GitAnnexDistribution)
getDistributionInfo = do
uo <- liftAnnex Url.getUrlOptions
liftIO $ withTmpFile "git-annex.tmp" $ \tmpfile h -> do
hClose h
ifM (Url.downloadQuiet distributionInfoUrl tmpfile uo)
( readish <$> readFileStrict tmpfile
, return Nothing
)
distributionInfoUrl :: String
distributionInfoUrl = fromJust Build.SysConfig.upgradelocation ++ ".info"

View file

@ -1,6 +1,6 @@
{- git-annex assistant tree watcher
-
- Copyright 2012-2013 Joey Hess <joey@kitenet.net>
- Copyright 2012-2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -35,6 +35,7 @@ import Annex.CatFile
import Annex.CheckIgnore
import Annex.Link
import Annex.FileMatcher
import Types.FileMatcher
import Annex.ReplaceFile
import Git.Types
import Config
@ -71,7 +72,7 @@ needLsof = error $ unlines
{- A special exception that can be thrown to pause or resume the watcher. -}
data WatcherControl = PauseWatcher | ResumeWatcher
deriving (Show, Eq, Typeable)
deriving (Show, Eq, Typeable)
instance E.Exception WatcherControl
@ -103,13 +104,13 @@ runWatcher = do
, errHook = errhook
}
scanevents <- liftAnnex $ annexStartupScan <$> Annex.getGitConfig
handle <- liftIO $ watchDir "." ignored scanevents hooks startup
h <- liftIO $ watchDir "." ignored scanevents hooks startup
debug [ "watching", "."]
{- Let the DirWatcher thread run until signalled to pause it,
- then wait for a resume signal, and restart. -}
waitFor PauseWatcher $ do
liftIO $ stopWatchDir handle
liftIO $ stopWatchDir h
waitFor ResumeWatcher runWatcher
where
hook a = Just <$> asIO2 (runHandler a)
@ -183,7 +184,7 @@ runHandler :: Handler -> FilePath -> Maybe FileStatus -> Assistant ()
runHandler handler file filestatus = void $ do
r <- tryIO <~> handler (normalize file) filestatus
case r of
Left e -> liftIO $ print e
Left e -> liftIO $ warningIO $ show e
Right Nothing -> noop
Right (Just change) -> do
-- Just in case the commit thread is not
@ -191,12 +192,12 @@ runHandler handler file filestatus = void $ do
liftAnnex Annex.Queue.flushWhenFull
recordChange change
where
normalize f
normalize f
| "./" `isPrefixOf` file = drop 2 f
| otherwise = f
{- Small files are added to git as-is, while large ones go into the annex. -}
add :: FileMatcher -> FilePath -> Assistant (Maybe Change)
add :: FileMatcher Annex -> FilePath -> Assistant (Maybe Change)
add bigfilematcher file = ifM (liftAnnex $ checkFileMatcher bigfilematcher file)
( pendingAddChange file
, do
@ -205,7 +206,7 @@ add bigfilematcher file = ifM (liftAnnex $ checkFileMatcher bigfilematcher file)
madeChange file AddFileChange
)
onAdd :: FileMatcher -> Handler
onAdd :: FileMatcher Annex -> Handler
onAdd matcher file filestatus
| maybe False isRegularFile filestatus =
unlessIgnored file $
@ -218,12 +219,12 @@ shouldRestage ds = scanComplete ds || forceRestage ds
{- In direct mode, add events are received for both new files, and
- modified existing files.
-}
onAddDirect :: Bool -> FileMatcher -> Handler
onAddDirect :: Bool -> FileMatcher Annex -> Handler
onAddDirect symlinkssupported matcher file fs = do
v <- liftAnnex $ catKeyFile file
case (v, fs) of
(Just key, Just filestatus) ->
ifM (liftAnnex $ sameFileStatus key filestatus)
ifM (liftAnnex $ sameFileStatus key file filestatus)
{- It's possible to get an add event for
- an existing file that is not
- really modified, but it might have
@ -231,7 +232,7 @@ onAddDirect symlinkssupported matcher file fs = do
- so it symlink is restaged to make sure. -}
( ifM (shouldRestage <$> getDaemonStatus)
( do
link <- liftAnnex $ inRepo $ gitAnnexLink file key
link <- liftAnnex $ calcRepo $ gitAnnexLink file key
addLink file link (Just key)
, noChange
)
@ -245,7 +246,7 @@ onAddDirect symlinkssupported matcher file fs = do
debug ["add direct", file]
add matcher file
where
{- On a filesystem without symlinks, we'll get changes for regular
{- On a filesystem without symlinks, we'll get changes for regular
- files that git uses to stand-in for symlinks. Detect when
- this happens, and stage the symlink, rather than annexing the
- file. -}
@ -270,15 +271,15 @@ onAddSymlink :: Bool -> Handler
onAddSymlink isdirect file filestatus = unlessIgnored file $ do
linktarget <- liftIO (catchMaybeIO $ readSymbolicLink file)
kv <- liftAnnex (Backend.lookupFile file)
onAddSymlink' linktarget (fmap fst kv) isdirect file filestatus
onAddSymlink' linktarget kv isdirect file filestatus
onAddSymlink' :: Maybe String -> Maybe Key -> Bool -> Handler
onAddSymlink' linktarget mk isdirect file filestatus = go mk
where
go (Just key) = do
go (Just key) = do
when isdirect $
liftAnnex $ void $ addAssociatedFile key file
link <- liftAnnex $ inRepo $ gitAnnexLink file key
link <- liftAnnex $ calcRepo $ gitAnnexLink file key
if linktarget == Just link
then ensurestaged (Just link) =<< getDaemonStatus
else do

View file

@ -1,11 +1,12 @@
{- git-annex assistant webapp thread
-
- Copyright 2012-2014 Joey Hess <joey@kitenet.net>
- Copyright 2012-2014 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
{-# LANGUAGE TemplateHaskell, MultiParamTypeClasses #-}
{-# LANGUAGE ViewPatterns, OverloadedStrings #-}
{-# LANGUAGE CPP #-}
{-# OPTIONS_GHC -fno-warn-orphans #-}
@ -47,6 +48,8 @@ import Yesod
import Network.Socket (SockAddr, HostName)
import Data.Text (pack, unpack)
import qualified Network.Wai.Handler.WarpTLS as TLS
import Network.Wai.Middleware.RequestLogger
import System.Log.Logger
mkYesodDispatch "WebApp" $(parseRoutesFile "Assistant/WebApp/routes")
@ -83,7 +86,7 @@ webAppThread assistantdata urlrenderer noannex cannotrun postfirstrun listenhost
setUrlRenderer urlrenderer $ yesodRender webapp (pack "")
app <- toWaiAppPlain webapp
app' <- ifM debugEnabled
( return $ httpDebugLogger app
( return $ logStdout app
, return app
)
runWebApp tlssettings listenhost' app' $ \addr -> if noannex
@ -95,7 +98,7 @@ webAppThread assistantdata urlrenderer noannex cannotrun postfirstrun listenhost
urlfile <- getAnnex' $ fromRepo gitAnnexUrlFile
go tlssettings addr webapp htmlshim (Just urlfile)
where
-- The webapp thread does not wait for the startupSanityCheckThread
-- The webapp thread does not wait for the startupSanityCheckThread
-- to finish, so that the user interface remains responsive while
-- that's going on.
thread = namedThreadUnchecked "WebApp"
@ -135,3 +138,9 @@ getTlsSettings = do
#else
return Nothing
#endif
{- Checks if debugging is actually enabled. -}
debugEnabled :: IO Bool
debugEnabled = do
l <- getRootLogger
return $ getLevel l <= Just DEBUG

View file

@ -1,6 +1,6 @@
{- git-annex XMPP client
-
- Copyright 2012, 2013 Joey Hess <joey@kitenet.net>
- Copyright 2012, 2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -42,17 +42,20 @@ xmppClientThread urlrenderer = namedThread "XMPPClient" $
restartableClient . xmppClient urlrenderer =<< getAssistant id
{- Runs the client, handing restart events. -}
restartableClient :: (XMPPCreds -> IO ()) -> Assistant ()
restartableClient :: (XMPPCreds -> UUID -> IO ()) -> Assistant ()
restartableClient a = forever $ go =<< liftAnnex getXMPPCreds
where
go Nothing = waitNetMessagerRestart
go (Just creds) = do
tid <- liftIO $ forkIO $ a creds
xmppuuid <- maybe NoUUID Remote.uuid . headMaybe
. filter Remote.isXMPPRemote . syncRemotes
<$> getDaemonStatus
tid <- liftIO $ forkIO $ a creds xmppuuid
waitNetMessagerRestart
liftIO $ killThread tid
xmppClient :: UrlRenderer -> AssistantData -> XMPPCreds -> IO ()
xmppClient urlrenderer d creds =
xmppClient :: UrlRenderer -> AssistantData -> XMPPCreds -> UUID -> IO ()
xmppClient urlrenderer d creds xmppuuid =
retry (runclient creds) =<< getCurrentTime
where
liftAssistant = runAssistant d
@ -68,8 +71,11 @@ xmppClient urlrenderer d creds =
liftAssistant $
updateBuddyList (const noBuddies) <<~ buddyList
void client
liftAssistant $ modifyDaemonStatus_ $ \s -> s
{ xmppClientID = Nothing }
liftAssistant $ do
modifyDaemonStatus_ $ \s -> s
{ xmppClientID = Nothing }
changeCurrentlyConnected $ S.delete xmppuuid
now <- getCurrentTime
if diffUTCTime now starttime > 300
then do
@ -87,6 +93,7 @@ xmppClient urlrenderer d creds =
inAssistant $ do
modifyDaemonStatus_ $ \s -> s
{ xmppClientID = Just $ xmppJID creds }
changeCurrentlyConnected $ S.insert xmppuuid
debug ["connected", logJid selfjid]
lasttraffic <- liftIO $ atomically . newTMVar =<< getCurrentTime
@ -110,7 +117,7 @@ xmppClient urlrenderer d creds =
void $ liftIO $ atomically . swapTMVar lasttraffic =<< getCurrentTime
inAssistant $ debug
["received:", show $ map logXMPPEvent l]
mapM_ (handle selfjid) l
mapM_ (handlemsg selfjid) l
sendpings selfjid lasttraffic = forever $ do
putStanza pingstanza
@ -124,23 +131,23 @@ xmppClient urlrenderer d creds =
{- XEP-0199 says that the server will respond with either
- a ping response or an error message. Either will
- cause traffic, so good enough. -}
pingstanza = xmppPing selfjid
pingstanza = xmppPing selfjid
handle selfjid (PresenceMessage p) = do
handlemsg selfjid (PresenceMessage p) = do
void $ inAssistant $
updateBuddyList (updateBuddies p) <<~ buddyList
resendImportantMessages selfjid p
handle _ (GotNetMessage QueryPresence) = putStanza gitAnnexSignature
handle _ (GotNetMessage (NotifyPush us)) = void $ inAssistant $ pull us
handle selfjid (GotNetMessage (PairingNotification stage c u)) =
handlemsg _ (GotNetMessage QueryPresence) = putStanza gitAnnexSignature
handlemsg _ (GotNetMessage (NotifyPush us)) = void $ inAssistant $ pull us
handlemsg selfjid (GotNetMessage (PairingNotification stage c u)) =
maybe noop (inAssistant . pairMsgReceived urlrenderer stage u selfjid) (parseJID c)
handle _ (GotNetMessage m@(Pushing _ pushstage))
handlemsg _ (GotNetMessage m@(Pushing _ pushstage))
| isPushNotice pushstage = inAssistant $ handlePushNotice m
| isPushInitiation pushstage = inAssistant $ queuePushInitiation m
| otherwise = inAssistant $ storeInbox m
handle _ (Ignorable _) = noop
handle _ (Unknown _) = noop
handle _ (ProtocolError _) = noop
handlemsg _ (Ignorable _) = noop
handlemsg _ (Unknown _) = noop
handlemsg _ (ProtocolError _) = noop
resendImportantMessages selfjid (Presence { presenceFrom = Just jid }) = do
let c = formatJID jid

View file

@ -9,7 +9,7 @@
- they would deadlock with only one thread. For larger numbers of
- clients, the two threads are also sufficient.
-
- Copyright 2013 Joey Hess <joey@kitenet.net>
- Copyright 2013 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -34,7 +34,7 @@ xmppReceivePackThread = pusherThread "XMPPReceivePack" ReceivePack
pusherThread :: String -> PushSide -> UrlRenderer -> NamedThread
pusherThread threadname side urlrenderer = namedThread threadname $ go Nothing
where
go lastpushedto = do
go lastpushedto = do
msg <- waitPushInitiation side $ selectNextPush lastpushedto
debug ["started running push", logNetMessage msg]
@ -78,4 +78,4 @@ selectNextPush lastpushedto l = go [] l
(Pushing clientid _)
| Just clientid /= lastpushedto -> (m, rejected ++ ms)
_ -> go (m:rejected) ms
go [] [] = undefined
go [] [] = undefined

Some files were not shown because too many files have changed in this diff Show more