Merge branch 'master' into tasty-tests

Conflicts:
	Test.hs
This commit is contained in:
Joey Hess 2013-11-14 17:04:58 -04:00
commit 2755c7f558
1642 changed files with 41666 additions and 10930 deletions

11
.gitignore vendored
View file

@ -1,3 +1,7 @@
tags
Setup
*.hi
*.o
tmp
test
build-stamp
@ -9,7 +13,10 @@ Build/OSXMkLibs
git-annex
git-annex.1
git-annex-shell.1
git-union-merge
git-union-merge.1
git-recover-repository
git-recover-repository.1
doc/.ikiwiki
html
*.tix
@ -22,7 +29,3 @@ cabal-dev
# OSX related
.DS_Store
.virthualenv
tags
Setup
*.hi
*.o

View file

@ -5,14 +5,13 @@
- Licensed under the GNU GPL version 3 or higher.
-}
{-# LANGUAGE GeneralizedNewtypeDeriving, TypeFamilies, MultiParamTypeClasses #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
module Annex (
Annex,
AnnexState(..),
PreferredContentMap,
new,
newState,
run,
eval,
getState,
@ -41,10 +40,12 @@ import Control.Concurrent
import Common
import qualified Git
import qualified Git.Config
import Git.Types hiding (remotes)
import Git.CatFile
import Git.CheckAttr
import Git.CheckIgnore
import Git.SharedRepository
import Git.Config
import qualified Git.Queue
import Types.Backend
import Types.GitConfig
@ -108,12 +109,13 @@ data AnnexState = AnnexState
, fields :: M.Map String String
, cleanup :: M.Map String (Annex ())
, inodeschanged :: Maybe Bool
, useragent :: Maybe String
}
newState :: Git.Repo -> AnnexState
newState gitrepo = AnnexState
{ repo = gitrepo
, gitconfig = extractGitConfig gitrepo
newState r = AnnexState
{ repo = if annexDirect c then fixupDirect r else r
, gitconfig = c
, backends = []
, remotes = []
, output = defaultMessageState
@ -141,7 +143,10 @@ newState gitrepo = AnnexState
, fields = M.empty
, cleanup = M.empty
, inodeschanged = Nothing
, useragent = Nothing
}
where
c = extractGitConfig r
{- Makes an Annex state object for the specified git repo.
- Ensures the config is read, if it was not already. -}
@ -245,3 +250,17 @@ withCurrentState :: Annex a -> Annex (IO a)
withCurrentState a = do
s <- getState id
return $ eval s a
{- Direct mode repos have core.bare=true, but are not really bare.
- Fix up the Repo to be a non-bare repo, and arrange for git commands
- run by git-annex to be passed parameters that override this setting. -}
fixupDirect :: Git.Repo -> Git.Repo
fixupDirect r@(Repo { location = Local { gitdir = d, worktree = Nothing } }) =
r
{ location = Local { gitdir = d </> ".git", worktree = Just d }
, gitGlobalOpts = gitGlobalOpts r ++
[ Param "-c"
, Param $ coreBare ++ "=" ++ boolConfig False
]
}
fixupDirect r = r

View file

@ -1,6 +1,6 @@
{- management of the git-annex branch
-
- Copyright 2011-2012 Joey Hess <joey@kitenet.net>
- Copyright 2011-2013 Joey Hess <joey@kitenet.net>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -20,11 +20,16 @@ module Annex.Branch (
get,
change,
commit,
forceCommit,
files,
withIndex,
performTransitions,
) where
import qualified Data.ByteString.Lazy.Char8 as L
import qualified Data.Set as S
import qualified Data.Map as M
import qualified Control.Exception as E
import Common.Annex
import Annex.BranchState
@ -32,6 +37,7 @@ import Annex.Journal
import qualified Git
import qualified Git.Command
import qualified Git.Ref
import qualified Git.Sha
import qualified Git.Branch
import qualified Git.UnionMerge
import qualified Git.UpdateIndex
@ -42,6 +48,13 @@ import Annex.CatFile
import Annex.Perms
import qualified Annex
import Utility.Env
import Logs
import Logs.Transitions
import Logs.Trust.Pure
import Annex.ReplaceFile
import qualified Annex.Queue
import Annex.Branch.Transitions
import Annex.Exception
{- Name of the branch that is used to store git-annex's information. -}
name :: Git.Ref
@ -110,6 +123,9 @@ forceUpdate = updateTo =<< siblingBranches
- later get staged, and might overwrite changes made during the merge.
- This is only done if some of the Refs do need to be merged.
-
- Also handles performing any Transitions that have not yet been
- performed, in either the local branch, or the Refs.
-
- Returns True if any refs were merged in, False otherwise.
-}
updateTo :: [(Git.Ref, Git.Branch)] -> Annex Bool
@ -117,65 +133,71 @@ updateTo pairs = do
-- ensure branch exists, and get its current ref
branchref <- getBranch
dirty <- journalDirty
(refs, branches) <- unzip <$> filterM isnewer pairs
ignoredrefs <- getIgnoredRefs
(refs, branches) <- unzip <$> filterM (isnewer ignoredrefs) pairs
if null refs
{- Even when no refs need to be merged, the index
- may still be updated if the branch has gotten ahead
- of the index. -}
then whenM (needUpdateIndex branchref) $ lockJournal $ do
forceUpdateIndex branchref
then whenM (needUpdateIndex branchref) $ lockJournal $ \jl -> do
forceUpdateIndex jl branchref
{- When there are journalled changes
- as well as the branch being updated,
- a commit needs to be done. -}
when dirty $
go branchref True [] []
go branchref True [] [] jl
else lockJournal $ go branchref dirty refs branches
return $ not $ null refs
where
isnewer (r, _) = inRepo $ Git.Branch.changed fullname r
go branchref dirty refs branches = withIndex $ do
cleanjournal <- if dirty then stageJournal else return noop
isnewer ignoredrefs (r, _)
| S.member r ignoredrefs = return False
| otherwise = inRepo $ Git.Branch.changed fullname r
go branchref dirty refs branches jl = withIndex $ do
cleanjournal <- if dirty then stageJournal jl else return noop
let merge_desc = if null branches
then "update"
else "merging " ++
unwords (map Git.Ref.describe branches) ++
" into " ++ show name
localtransitions <- parseTransitionsStrictly "local"
<$> getLocal transitionsLog
unless (null branches) $ do
showSideAction merge_desc
mergeIndex refs
ff <- if dirty
then return False
else inRepo $ Git.Branch.fastForward fullname refs
if ff
then updateIndex branchref
else commitBranch branchref merge_desc
(nub $ fullname:refs)
mergeIndex jl refs
let commitrefs = nub $ fullname:refs
unlessM (handleTransitions jl localtransitions commitrefs) $ do
ff <- if dirty
then return False
else inRepo $ Git.Branch.fastForward fullname refs
if ff
then updateIndex jl branchref
else commitIndex jl branchref merge_desc commitrefs
liftIO cleanjournal
{- Gets the content of a file, which may be in the journal, or in the index
- (and committed to the branch).
-
- Updates the branch if necessary, to ensure the most up-to-date available
- content is available.
- content is returned.
-
- Returns an empty string if the file doesn't exist yet. -}
get :: FilePath -> Annex String
get file = do
update
get' file
getLocal file
{- Like get, but does not merge the branch, so the info returned may not
- reflect changes in remotes.
- (Changing the value this returns, and then merging is always the
- same as using get, and then changing its value.) -}
getStale :: FilePath -> Annex String
getStale = get'
get' :: FilePath -> Annex String
get' file = go =<< getJournalFile file
getLocal :: FilePath -> Annex String
getLocal file = go =<< getJournalFileStale file
where
go (Just journalcontent) = return journalcontent
go Nothing = withIndex $ L.unpack <$> catFile fullname file
go Nothing = getRaw file
getRaw :: FilePath -> Annex String
getRaw file = withIndex $ L.unpack <$> catFile fullname file
{- Applies a function to modifiy the content of a file.
-
@ -183,18 +205,23 @@ get' file = go =<< getJournalFile file
- modifes the current content of the file on the branch.
-}
change :: FilePath -> (String -> String) -> Annex ()
change file a = lockJournal $ a <$> getStale file >>= set file
change file a = lockJournal $ \jl -> a <$> getLocal file >>= set jl file
{- Records new content of a file into the journal -}
set :: FilePath -> String -> Annex ()
set :: JournalLocked -> FilePath -> String -> Annex ()
set = setJournalFile
{- Stages the journal, and commits staged changes to the branch. -}
commit :: String -> Annex ()
commit message = whenM journalDirty $ lockJournal $ do
cleanjournal <- stageJournal
commit = whenM journalDirty . forceCommit
{- Commits the current index to the branch even without any journalleda
- changes. -}
forceCommit :: String -> Annex ()
forceCommit message = lockJournal $ \jl -> do
cleanjournal <- stageJournal jl
ref <- getBranch
withIndex $ commitBranch ref message [fullname]
withIndex $ commitIndex jl ref message [fullname]
liftIO cleanjournal
{- Commits the staged changes in the index to the branch.
@ -215,17 +242,18 @@ commit message = whenM journalDirty $ lockJournal $ do
- previous point, though getting it a long time ago makes the race
- more likely to occur.
-}
commitBranch :: Git.Ref -> String -> [Git.Ref] -> Annex ()
commitBranch branchref message parents = do
commitIndex :: JournalLocked -> Git.Ref -> String -> [Git.Ref] -> Annex ()
commitIndex jl branchref message parents = do
showStoringStateAction
commitBranch' branchref message parents
commitBranch' :: Git.Ref -> String -> [Git.Ref] -> Annex ()
commitBranch' branchref message parents = do
updateIndex branchref
commitIndex' jl branchref message parents
commitIndex' :: JournalLocked -> Git.Ref -> String -> [Git.Ref] -> Annex ()
commitIndex' jl branchref message parents = do
updateIndex jl branchref
committedref <- inRepo $ Git.Branch.commit message fullname parents
setIndexSha committedref
parentrefs <- commitparents <$> catObject committedref
when (racedetected branchref parentrefs) $
when (racedetected branchref parentrefs) $ do
liftIO $ print ("race detected", branchref, parentrefs, "committing", (branchref, parents))
fixrace committedref parentrefs
where
-- look for "parent ref" lines and return the refs
@ -244,8 +272,8 @@ commitBranch' branchref message parents = do
{- To recover from the race, union merge the lost refs
- into the index, and recommit on top of the bad commit. -}
fixrace committedref lostrefs = do
mergeIndex lostrefs
commitBranch committedref racemessage [committedref]
mergeIndex jl lostrefs
commitIndex jl committedref racemessage [committedref]
racemessage = message ++ " (recovery from race)"
@ -253,13 +281,17 @@ commitBranch' branchref message parents = do
files :: Annex [FilePath]
files = do
update
withIndex $ do
bfiles <- inRepo $ Git.Command.pipeNullSplitZombie
[ Params "ls-tree --name-only -r -z"
, Param $ show fullname
]
jfiles <- getJournalledFiles
return $ jfiles ++ bfiles
(++)
<$> branchFiles
<*> getJournalledFilesStale
{- Files in the branch, not including any from journalled changes,
- and without updating the branch. -}
branchFiles :: Annex [FilePath]
branchFiles = withIndex $ inRepo $ Git.Command.pipeNullSplitZombie
[ Params "ls-tree --name-only -r -z"
, Param $ show fullname
]
{- Populates the branch's index file with the current branch contents.
-
@ -273,11 +305,27 @@ genIndex g = Git.UpdateIndex.streamUpdateIndex g
{- Merges the specified refs into the index.
- Any changes staged in the index will be preserved. -}
mergeIndex :: [Git.Ref] -> Annex ()
mergeIndex branches = do
mergeIndex :: JournalLocked -> [Git.Ref] -> Annex ()
mergeIndex jl branches = do
prepareModifyIndex jl
h <- catFileHandle
inRepo $ \g -> Git.UnionMerge.mergeIndex h g branches
{- Removes any stale git lock file, to avoid git falling over when
- updating the index.
-
- Since all modifications of the index are performed inside this module,
- and only when the journal is locked, the fact that the journal has to be
- locked when this is called ensures that no other process is currently
- modifying the index. So any index.lock file must be stale, caused
- by git running when the system crashed, or the repository's disk was
- removed, etc.
-}
prepareModifyIndex :: JournalLocked -> Annex ()
prepareModifyIndex _jl = do
index <- fromRepo gitAnnexIndex
void $ liftIO $ tryIO $ removeFile $ index ++ ".lock"
{- Runs an action using the branch's index file. -}
withIndex :: Annex a -> Annex a
withIndex = withIndex' False
@ -299,15 +347,15 @@ withIndex' bootstrapping a = do
#endif
let g' = g { gitEnv = Just $ ("GIT_INDEX_FILE", f):e }
Annex.changeState $ \s -> s { Annex.repo = g' }
checkIndexOnce $ unlessM (liftIO $ doesFileExist f) $ do
unless bootstrapping create
liftIO $ createDirectoryIfMissing True $ takeDirectory f
unless bootstrapping $ inRepo genIndex
r <- a
r <- tryAnnex $ do
Annex.changeState $ \s -> s { Annex.repo = g' }
checkIndexOnce $ unlessM (liftIO $ doesFileExist f) $ do
unless bootstrapping create
liftIO $ createDirectoryIfMissing True $ takeDirectory f
unless bootstrapping $ inRepo genIndex
a
Annex.changeState $ \s -> s { Annex.repo = (Annex.repo s) { gitEnv = gitEnv g} }
return r
either E.throw return r
{- Updates the branch's index to reflect the current contents of the branch.
- Any changes staged in the index will be preserved.
@ -315,40 +363,48 @@ withIndex' bootstrapping a = do
- Compares the ref stored in the lock file with the current
- ref of the branch to see if an update is needed.
-}
updateIndex :: Git.Ref -> Annex ()
updateIndex branchref = whenM (needUpdateIndex branchref) $
forceUpdateIndex branchref
updateIndex :: JournalLocked -> Git.Ref -> Annex ()
updateIndex jl branchref = whenM (needUpdateIndex branchref) $
forceUpdateIndex jl branchref
forceUpdateIndex :: Git.Ref -> Annex ()
forceUpdateIndex branchref = do
withIndex $ mergeIndex [fullname]
forceUpdateIndex :: JournalLocked -> Git.Ref -> Annex ()
forceUpdateIndex jl branchref = do
withIndex $ mergeIndex jl [fullname]
setIndexSha branchref
{- Checks if the index needs to be updated. -}
needUpdateIndex :: Git.Ref -> Annex Bool
needUpdateIndex branchref = do
lock <- fromRepo gitAnnexIndexLock
lockref <- Git.Ref . firstLine <$>
liftIO (catchDefaultIO "" $ readFileStrict lock)
return (lockref /= branchref)
f <- fromRepo gitAnnexIndexStatus
committedref <- Git.Ref . firstLine <$>
liftIO (catchDefaultIO "" $ readFileStrict f)
return (committedref /= branchref)
{- Record that the branch's index has been updated to correspond to a
- given ref of the branch. -}
setIndexSha :: Git.Ref -> Annex ()
setIndexSha ref = do
lock <- fromRepo gitAnnexIndexLock
liftIO $ writeFile lock $ show ref ++ "\n"
setAnnexPerm lock
f <- fromRepo gitAnnexIndexStatus
liftIO $ writeFile f $ show ref ++ "\n"
setAnnexPerm f
{- Stages the journal into the index and returns an action that will
- clean up the staged journal files, which should only be run once
- the index has been committed to the branch. Should be run within
- lockJournal, to prevent others from modifying the journal. -}
stageJournal :: Annex (IO ())
stageJournal = withIndex $ do
- the index has been committed to the branch.
-
- Before staging, this removes any existing git index file lock.
- This is safe to do because stageJournal is the only thing that
- modifies this index file, and only one can run at a time, because
- the journal is locked. So any existing git index file lock must be
- stale, and the journal must contain any data that was in the process
- of being written to the index file when it crashed.
-}
stageJournal :: JournalLocked -> Annex (IO ())
stageJournal jl = withIndex $ do
prepareModifyIndex jl
g <- gitRepo
let dir = gitAnnexJournalDir g
fs <- getJournalFiles
fs <- getJournalFiles jl
liftIO $ do
h <- hashObjectStart g
Git.UpdateIndex.streamUpdateIndex g
@ -361,3 +417,117 @@ stageJournal = withIndex $ do
sha <- hashFile h path
streamer $ Git.UpdateIndex.updateIndexLine
sha FileBlob (asTopFilePath $ fileJournal file)
{- This is run after the refs have been merged into the index,
- but before the result is committed to the branch.
- (Which is why it's passed the contents of the local branches's
- transition log before that merge took place.)
-
- When the refs contain transitions that have not yet been done locally,
- the transitions are performed on the index, and a new branch
- is created from the result.
-
- When there are transitions recorded locally that have not been done
- to the remote refs, the transitions are performed in the index,
- and committed to the existing branch. In this case, the untransitioned
- remote refs cannot be merged into the branch (since transitions
- throw away history), so they are added to the list of refs to ignore,
- to avoid re-merging content from them again.
-}
handleTransitions :: JournalLocked -> Transitions -> [Git.Ref] -> Annex Bool
handleTransitions jl localts refs = do
m <- M.fromList <$> mapM getreftransition refs
let remotets = M.elems m
if all (localts ==) remotets
then return False
else do
let allts = combineTransitions (localts:remotets)
let (transitionedrefs, untransitionedrefs) =
partition (\r -> M.lookup r m == Just allts) refs
performTransitionsLocked jl allts (localts /= allts) transitionedrefs
ignoreRefs untransitionedrefs
return True
where
getreftransition ref = do
ts <- parseTransitionsStrictly "remote" . L.unpack
<$> catFile ref transitionsLog
return (ref, ts)
ignoreRefs :: [Git.Ref] -> Annex ()
ignoreRefs rs = do
old <- getIgnoredRefs
let s = S.unions [old, S.fromList rs]
f <- fromRepo gitAnnexIgnoredRefs
replaceFile f $ \tmp -> liftIO $ writeFile tmp $
unlines $ map show $ S.elems s
getIgnoredRefs :: Annex (S.Set Git.Ref)
getIgnoredRefs = S.fromList . mapMaybe Git.Sha.extractSha . lines <$> content
where
content = do
f <- fromRepo gitAnnexIgnoredRefs
liftIO $ catchDefaultIO "" $ readFile f
{- Performs the specified transitions on the contents of the index file,
- commits it to the branch, or creates a new branch.
-}
performTransitions :: Transitions -> Bool -> [Ref] -> Annex ()
performTransitions ts neednewlocalbranch transitionedrefs = lockJournal $ \jl ->
performTransitionsLocked jl ts neednewlocalbranch transitionedrefs
performTransitionsLocked :: JournalLocked -> Transitions -> Bool -> [Ref] -> Annex ()
performTransitionsLocked jl ts neednewlocalbranch transitionedrefs = do
-- For simplicity & speed, we're going to use the Annex.Queue to
-- update the git-annex branch, while it usually holds changes
-- for the head branch. Flush any such changes.
Annex.Queue.flush
withIndex $ do
prepareModifyIndex jl
run $ mapMaybe getTransitionCalculator $ transitionList ts
Annex.Queue.flush
if neednewlocalbranch
then do
committedref <- inRepo $ Git.Branch.commit message fullname transitionedrefs
setIndexSha committedref
else do
ref <- getBranch
commitIndex jl ref message (nub $ fullname:transitionedrefs)
where
message
| neednewlocalbranch && null transitionedrefs = "new branch for transition " ++ tdesc
| otherwise = "continuing transition " ++ tdesc
tdesc = show $ map describeTransition $ transitionList ts
{- The changes to make to the branch are calculated and applied to
- the branch directly, rather than going through the journal,
- which would be innefficient. (And the journal is not designed
- to hold changes to every file in the branch at once.)
-
- When a file in the branch is changed by transition code,
- that value is remembered and fed into the code for subsequent
- transitions.
-}
run [] = noop
run changers = do
trustmap <- calcTrustMap <$> getRaw trustLog
fs <- branchFiles
hasher <- inRepo hashObjectStart
forM_ fs $ \f -> do
content <- getRaw f
apply changers hasher f content trustmap
liftIO $ hashObjectStop hasher
apply [] _ _ _ _ = return ()
apply (changer:rest) hasher file content trustmap =
case changer file content trustmap of
RemoveFile -> do
Annex.Queue.addUpdateIndex
=<< inRepo (Git.UpdateIndex.unstageFile file)
-- File is deleted; can't run any other
-- transitions on it.
return ()
ChangeFile content' -> do
sha <- inRepo $ hashObject BlobObject content'
Annex.Queue.addUpdateIndex $ Git.UpdateIndex.pureStreamer $
Git.UpdateIndex.updateIndexLine sha FileBlob (asTopFilePath file)
apply rest hasher file content' trustmap
PreserveFile ->
apply rest hasher file content trustmap

View file

@ -0,0 +1,53 @@
{- git-annex branch transitions
-
- Copyright 2013 Joey Hess <joey@kitenet.net>
-
- Licensed under the GNU GPL version 3 or higher.
-}
module Annex.Branch.Transitions (
FileTransition(..),
getTransitionCalculator
) where
import Logs
import Logs.Transitions
import Logs.UUIDBased as UUIDBased
import Logs.Presence.Pure as Presence
import Types.TrustLevel
import Types.UUID
import qualified Data.Map as M
data FileTransition
= ChangeFile String
| RemoveFile
| PreserveFile
type TransitionCalculator = FilePath -> String -> TrustMap -> FileTransition
getTransitionCalculator :: Transition -> Maybe TransitionCalculator
getTransitionCalculator ForgetGitHistory = Nothing
getTransitionCalculator ForgetDeadRemotes = Just dropDead
dropDead :: FilePath -> String -> TrustMap -> FileTransition
dropDead f content trustmap = case getLogVariety f of
Just UUIDBasedLog -> ChangeFile $
UUIDBased.showLog id $ dropDeadFromUUIDBasedLog trustmap $ UUIDBased.parseLog Just content
Just (PresenceLog _) ->
let newlog = Presence.compactLog $ dropDeadFromPresenceLog trustmap $ Presence.parseLog content
in if null newlog
then RemoveFile
else ChangeFile $ Presence.showLog newlog
Nothing -> PreserveFile
dropDeadFromUUIDBasedLog :: TrustMap -> UUIDBased.Log String -> UUIDBased.Log String
dropDeadFromUUIDBasedLog trustmap = M.filterWithKey $ notDead trustmap . const
{- Presence logs can contain UUIDs or other values. Any line that matches
- a dead uuid is dropped; any other values are passed through. -}
dropDeadFromPresenceLog :: TrustMap -> [Presence.LogLine] -> [Presence.LogLine]
dropDeadFromPresenceLog trustmap = filter $ notDead trustmap (toUUID . Presence.info)
notDead :: TrustMap -> (v -> UUID) -> v -> Bool
notDead trustmap a v = M.findWithDefault SemiTrusted (a v) trustmap /= DeadTrusted

View file

@ -8,14 +8,17 @@
module Annex.CatFile (
catFile,
catObject,
catTree,
catObjectDetails,
catFileHandle,
catKey,
catKeyFile,
catKeyFileHEAD,
) where
import qualified Data.ByteString.Lazy as L
import qualified Data.Map as M
import System.PosixCompat.Types
import Common.Annex
import qualified Git
@ -23,6 +26,8 @@ import qualified Git.CatFile
import qualified Annex
import Git.Types
import Git.FilePath
import Git.FileMode
import qualified Git.Ref
catFile :: Git.Branch -> FilePath -> Annex L.ByteString
catFile branch file = do
@ -34,7 +39,12 @@ catObject ref = do
h <- catFileHandle
liftIO $ Git.CatFile.catObject h ref
catObjectDetails :: Git.Ref -> Annex (Maybe (L.ByteString, Sha))
catTree :: Git.Ref -> Annex [(FilePath, FileMode)]
catTree ref = do
h <- catFileHandle
liftIO $ Git.CatFile.catTree h ref
catObjectDetails :: Git.Ref -> Annex (Maybe (L.ByteString, Sha, ObjectType))
catObjectDetails ref = do
h <- catFileHandle
liftIO $ Git.CatFile.catObjectDetails h ref
@ -54,18 +64,51 @@ catFileHandle = do
Annex.changeState $ \s -> s { Annex.catfilehandles = m' }
return h
{- From the Sha or Ref of a symlink back to the key. -}
catKey :: Ref -> Annex (Maybe Key)
catKey ref = do
l <- fromInternalGitPath . encodeW8 . L.unpack <$> catObject ref
return $ if isLinkToAnnex l
then fileKey $ takeFileName l
else Nothing
{- From the Sha or Ref of a symlink back to the key.
-
- Requires a mode witness, to guarantee that the file is a symlink.
-}
catKey :: Ref -> FileMode -> Annex (Maybe Key)
catKey = catKey' True
catKey' :: Bool -> Ref -> FileMode -> Annex (Maybe Key)
catKey' modeguaranteed ref mode
| isSymLink mode = do
l <- fromInternalGitPath . encodeW8 . L.unpack <$> get
return $ if isLinkToAnnex l
then fileKey $ takeFileName l
else Nothing
| otherwise = return Nothing
where
-- If the mode is not guaranteed to be correct, avoid
-- buffering the whole file content, which might be large.
-- 8192 is enough if it really is a symlink.
get
| modeguaranteed = catObject ref
| otherwise = L.take 8192 <$> catObject ref
{- Looks up the file mode corresponding to the Ref using the running
- cat-file.
-
- Currently this always has to look in HEAD, because cat-file --batch
- does not offer a way to specify that we want to look up a tree object
- in the index. So if the index has a file staged not as a symlink,
- and it is a symlink in head, the wrong mode is gotten.
- Also, we have to assume the file is a symlink if it's not yet committed
- to HEAD. For these reasons, modeguaranteed is not set.
-}
catKeyChecked :: Bool -> Ref -> Annex (Maybe Key)
catKeyChecked needhead ref@(Ref r) =
catKey' False ref =<< findmode <$> catTree treeref
where
pathparts = split "/" r
dir = intercalate "/" $ take (length pathparts - 1) pathparts
file = fromMaybe "" $ lastMaybe pathparts
treeref = Ref $ if needhead then "HEAD" ++ dir ++ "/" else dir ++ "/"
findmode = fromMaybe symLinkMode . headMaybe .
map snd . filter (\p -> fst p == file)
{- From a file in the repository back to the key.
-
- Prefixing the file with ./ makes this work even if in a subdirectory
- of a repo.
-
- Ideally, this should reflect the key that's staged in the index,
- not the key that's committed to HEAD. Unfortunately, git cat-file
@ -75,7 +118,8 @@ catKey ref = do
-
- For command-line git-annex use, that doesn't matter. It's perfectly
- reasonable for things staged in the index after the currently running
- git-annex process to not be noticed by it.
- git-annex process to not be noticed by it. However, we do want to see
- what's in the index, since it may have uncommitted changes not in HEAD>
-
- For the assistant, this is much more of a problem, since it commits
- files and then needs to be able to immediately look up their keys.
@ -87,6 +131,9 @@ catKey ref = do
-}
catKeyFile :: FilePath -> Annex (Maybe Key)
catKeyFile f = ifM (Annex.getState Annex.daemon)
( catKey $ Ref $ "HEAD:./" ++ f
, catKey $ Ref $ ":./" ++ f
( catKeyFileHEAD f
, catKeyChecked True $ Git.Ref.fileRef f
)
catKeyFileHEAD :: FilePath -> Annex (Maybe Key)
catKeyFileHEAD f = catKeyChecked False $ Git.Ref.fileFromRef Git.Ref.headRef f

View file

@ -25,7 +25,7 @@ checkIgnoreHandle :: Annex (Maybe Git.CheckIgnoreHandle)
checkIgnoreHandle = maybe startup return =<< Annex.getState Annex.checkignorehandle
where
startup = do
v <- inRepo $ Git.checkIgnoreStart
v <- inRepo Git.checkIgnoreStart
when (isNothing v) $
warning "The installed version of git is too old for .gitignores to be honored by git-annex."
Annex.changeState $ \s -> s { Annex.checkignorehandle = Just v }

View file

@ -30,6 +30,7 @@ module Annex.Content (
freezeContent,
thawContent,
cleanObjectLoc,
dirKeys,
) where
import System.IO.Unsafe (unsafeInterleaveIO)
@ -43,7 +44,7 @@ import qualified Annex.Queue
import qualified Annex.Branch
import Utility.DiskFree
import Utility.FileMode
import qualified Utility.Url as Url
import qualified Annex.Url as Url
import Types.Key
import Utility.DataUnits
import Utility.CopyFile
@ -275,10 +276,11 @@ moveAnnex key src = withObjectLoc key storeobject storedirect
thawContentDir =<< calcRepo (gitAnnexLocation key)
thawContent src
v <- isAnnexLink f
if (Just key == v)
if Just key == v
then do
updateInodeCache key src
replaceFile f $ liftIO . moveFile src
chmodContent f
forM_ fs $
addContentWhenNotPresent key f
else ifM (goodContent key f)
@ -457,7 +459,7 @@ downloadUrl urls file = go =<< annexWebDownloadCommand <$> Annex.getGitConfig
go Nothing = do
opts <- map Param . annexWebOptions <$> Annex.getGitConfig
headers <- getHttpHeaders
liftIO $ anyM (\u -> Url.download u headers opts file) urls
anyM (\u -> Url.withUserAgent $ Url.download u headers opts file) urls
go (Just basecmd) = liftIO $ anyM (downloadcmd basecmd) urls
downloadcmd basecmd url =
boolSystem "sh" [Param "-c", Param $ gencmd url basecmd]
@ -500,6 +502,18 @@ freezeContent file = unlessM crippledFileSystem $
removeModes writeModes .
addModes [ownerReadMode]
{- Adjusts read mode of annexed file per core.sharedRepository setting. -}
chmodContent :: FilePath -> Annex ()
chmodContent file = unlessM crippledFileSystem $
liftIO . go =<< fromRepo getSharedRepository
where
go GroupShared = modifyFileMode file $
addModes [ownerReadMode, groupReadMode]
go AllShared = modifyFileMode file $
addModes readModes
go _ = modifyFileMode file $
addModes [ownerReadMode]
{- Allows writing to an annexed file that freezeContent was called on
- before. -}
thawContent :: FilePath -> Annex ()
@ -509,3 +523,18 @@ thawContent file = unlessM crippledFileSystem $
go GroupShared = groupWriteRead file
go AllShared = groupWriteRead file
go _ = allowWrite file
{- Finds files directly inside a directory like gitAnnexBadDir
- (not in subdirectories) and returns the corresponding keys. -}
dirKeys :: (Git.Repo -> FilePath) -> Annex [Key]
dirKeys dirspec = do
dir <- fromRepo dirspec
ifM (liftIO $ doesDirectoryExist dir)
( do
contents <- liftIO $ getDirectoryContents dir
files <- liftIO $ filterM doesFileExist $
map (dir </>) contents
return $ mapMaybe (fileKey . takeFileName) files
, return []
)

View file

@ -101,7 +101,7 @@ addAssociatedFile key file = do
else file':files
{- Associated files are always stored relative to the top of the repository.
- The input FilePath is relative to the CWD. -}
- The input FilePath is relative to the CWD, or is absolute. -}
normaliseAssociatedFile :: FilePath -> Annex FilePath
normaliseAssociatedFile file = do
top <- fromRepo Git.repoPath
@ -199,7 +199,7 @@ compareInodeCachesWith = ifM inodesChanged ( return Weakly, return Strongly )
addContentWhenNotPresent :: Key -> FilePath -> FilePath -> Annex ()
addContentWhenNotPresent key contentfile associatedfile = do
v <- isAnnexLink associatedfile
when (Just key == v) $ do
when (Just key == v) $
replaceFile associatedfile $
liftIO . void . copyFileExternal contentfile
updateInodeCache key associatedfile

View file

@ -8,14 +8,19 @@
module Annex.Direct where
import Common.Annex
import qualified Annex
import qualified Git
import qualified Git.LsFiles
import qualified Git.Merge
import qualified Git.DiffTree as DiffTree
import qualified Git.Config
import qualified Git.Ref
import qualified Git.Branch
import Git.Sha
import Git.FilePath
import Git.Types
import Config
import Annex.CatFile
import Utility.FileMode
import qualified Annex.Queue
import Logs.Location
import Backend
@ -45,8 +50,8 @@ stageDirect = do
{- Determine what kind of modified or deleted file this is, as
- efficiently as we can, by getting any key that's associated
- with it in git, as well as its stat info. -}
go (file, Just sha) = do
shakey <- catKey sha
go (file, Just sha, Just mode) = do
shakey <- catKey sha mode
mstat <- liftIO $ catchMaybeIO $ getSymbolicLinkStatus file
filekey <- isAnnexLink file
case (shakey, filekey, mstat, toInodeCache =<< mstat) of
@ -123,6 +128,8 @@ addDirect file cache = do
-}
mergeDirect :: FilePath -> Git.Ref -> Git.Repo -> IO Bool
mergeDirect d branch g = do
whenM (doesDirectoryExist d) $
removeDirectoryRecursive d
createDirectoryIfMissing True d
let g' = g { location = Local { gitdir = Git.localGitDir g, worktree = Just d } }
Git.Merge.mergeNonInteractive branch g'
@ -135,23 +142,22 @@ mergeDirect d branch g = do
mergeDirectCleanup :: FilePath -> Git.Ref -> Git.Ref -> Annex ()
mergeDirectCleanup d oldsha newsha = do
(items, cleanup) <- inRepo $ DiffTree.diffTreeRecursive oldsha newsha
forM_ items updated
makeabs <- flip fromTopFilePath <$> gitRepo
forM_ items (updated makeabs)
void $ liftIO cleanup
liftIO $ removeDirectoryRecursive d
where
updated item = do
updated makeabs item = do
let f = makeabs (DiffTree.file item)
void $ tryAnnex $
go DiffTree.srcsha DiffTree.srcmode moveout moveout_raw
go f DiffTree.srcsha DiffTree.srcmode moveout moveout_raw
void $ tryAnnex $
go DiffTree.dstsha DiffTree.dstmode movein movein_raw
go f DiffTree.dstsha DiffTree.dstmode movein movein_raw
where
go getsha getmode a araw
go f getsha getmode a araw
| getsha item == nullSha = noop
| isSymLink (getmode item) =
maybe (araw f) (\k -> void $ a k f)
=<< catKey (getsha item)
| otherwise = araw f
f = DiffTree.file item
| otherwise = maybe (araw f) (\k -> void $ a k f)
=<< catKey (getsha item) (getmode item)
moveout = removeDirect
@ -230,3 +236,66 @@ changedDirect oldk f = do
locs <- removeAssociatedFile oldk f
whenM (pure (null locs) <&&> not <$> inAnnex oldk) $
logStatus oldk InfoMissing
{- Enable/disable direct mode. -}
setDirect :: Bool -> Annex ()
setDirect wantdirect = do
if wantdirect
then do
switchHEAD
setbare
else do
setbare
switchHEADBack
setConfig (annexConfig "direct") val
Annex.changeGitConfig $ \c -> c { annexDirect = wantdirect }
where
val = Git.Config.boolConfig wantdirect
setbare = setConfig (ConfigKey Git.Config.coreBare) val
{- Since direct mode sets core.bare=true, incoming pushes could change
- the currently checked out branch. To avoid this problem, HEAD
- is changed to a internal ref that nothing is going to push to.
-
- For refs/heads/master, use refs/heads/annex/direct/master;
- this way things that show HEAD (eg shell prompts) will
- hopefully show just "master". -}
directBranch :: Ref -> Ref
directBranch orighead = case split "/" $ show orighead of
("refs":"heads":"annex":"direct":_) -> orighead
("refs":"heads":rest) ->
Ref $ "refs/heads/annex/direct/" ++ intercalate "/" rest
_ -> Ref $ "refs/heads/" ++ show (Git.Ref.base orighead)
{- Converts a directBranch back to the original branch.
-
- Any other ref is left unchanged.
-}
fromDirectBranch :: Ref -> Ref
fromDirectBranch directhead = case split "/" $ show directhead of
("refs":"heads":"annex":"direct":rest) ->
Ref $ "refs/heads/" ++ intercalate "/" rest
_ -> directhead
switchHEAD :: Annex ()
switchHEAD = maybe noop switch =<< inRepo Git.Branch.currentUnsafe
where
switch orighead = do
let newhead = directBranch orighead
maybe noop (inRepo . Git.Branch.update newhead)
=<< inRepo (Git.Ref.sha orighead)
inRepo $ Git.Branch.checkout newhead
switchHEADBack :: Annex ()
switchHEADBack = maybe noop switch =<< inRepo Git.Branch.currentUnsafe
where
switch currhead = do
let orighead = fromDirectBranch currhead
v <- inRepo $ Git.Ref.sha currhead
case v of
Just headsha
| orighead /= currhead -> do
inRepo $ Git.Branch.update orighead headsha
inRepo $ Git.Branch.checkout orighead
inRepo $ Git.Branch.delete currhead
_ -> inRepo $ Git.Branch.checkout orighead

View file

@ -32,7 +32,7 @@ import Utility.Env
checkEnvironment :: Annex ()
checkEnvironment = do
gitusername <- fromRepo $ Git.Config.getMaybe "user.name"
when (gitusername == Nothing || gitusername == Just "") $
when (isNothing gitusername || gitusername == Just "") $
liftIO checkEnvironmentIO
checkEnvironmentIO :: IO ()

View file

@ -13,6 +13,7 @@
module Annex.Exception (
bracketIO,
tryAnnex,
tryAnnexIO,
throwAnnex,
catchAnnex,
) where
@ -24,12 +25,16 @@ import Common.Annex
{- Runs an Annex action, with setup and cleanup both in the IO monad. -}
bracketIO :: IO v -> (v -> IO b) -> (v -> Annex a) -> Annex a
bracketIO setup cleanup go = M.bracket (liftIO setup) (liftIO . cleanup) go
bracketIO setup cleanup = M.bracket (liftIO setup) (liftIO . cleanup)
{- try in the Annex monad -}
tryAnnex :: Annex a -> Annex (Either SomeException a)
tryAnnex = M.try
{- try in the Annex monad, but only catching IO exceptions -}
tryAnnexIO :: Annex a -> Annex (Either IOException a)
tryAnnexIO = M.try
{- throw in the Annex monad -}
throwAnnex :: Exception e => e -> Annex a
throwAnnex = M.throw

View file

@ -13,6 +13,7 @@ import Common.Annex
import Limit
import Utility.Matcher
import Types.Group
import Types.Limit
import Logs.Group
import Logs.Remote
import Annex.UUID

42
Annex/Hook.hs Normal file
View file

@ -0,0 +1,42 @@
{- git-annex git hooks
-
- Note that it's important that the scripts not change, otherwise
- removing old hooks using an old version of the script would fail.
-
- Copyright 2013 Joey Hess <joey@kitenet.net>
-
- Licensed under the GNU GPL version 3 or higher.
-}
module Annex.Hook where
import Common.Annex
import qualified Git.Hook as Git
import Utility.Shell
import Config
preCommitHook :: Git.Hook
preCommitHook = Git.Hook "pre-commit" (mkHookScript "git annex pre-commit .")
mkHookScript :: String -> String
mkHookScript s = unlines
[ shebang_local
, "# automatically configured by git-annex"
, s
]
hookWrite :: Git.Hook -> Annex ()
hookWrite h =
-- cannot have git hooks in a crippled filesystem (no execute bit)
unlessM crippledFileSystem $
unlessM (inRepo $ Git.hookWrite h) $
hookWarning h "already exists, not configuring"
hookUnWrite :: Git.Hook -> Annex ()
hookUnWrite h = unlessM (inRepo $ Git.hookUnWrite h) $
hookWarning h "contents modified; not deleting. Edit it to remove call to git annex."
hookWarning :: Git.Hook -> String -> Annex ()
hookWarning h msg = do
r <- gitRepo
warning $ Git.hookName h ++ " hook (" ++ Git.hookFile h r ++ ") " ++ msg

View file

@ -1,10 +1,10 @@
{- management of the git-annex journal
-
- The journal is used to queue up changes before they are committed to the
- git-annex branch. Amoung other things, it ensures that if git-annex is
- git-annex branch. Among other things, it ensures that if git-annex is
- interrupted, its recorded data is not lost.
-
- Copyright 2011 Joey Hess <joey@kitenet.net>
- Copyright 2011-2013 Joey Hess <joey@kitenet.net>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -23,9 +23,14 @@ import Annex.Perms
{- Records content for a file in the branch to the journal.
-
- Using the journal, rather than immediatly staging content to the index
- avoids git needing to rewrite the index after every change. -}
setJournalFile :: FilePath -> String -> Annex ()
setJournalFile file content = do
- avoids git needing to rewrite the index after every change.
-
- The file in the journal is updated atomically, which allows
- getJournalFileStale to always return a consistent journal file
- content, although possibly not the most current one.
-}
setJournalFile :: JournalLocked -> FilePath -> String -> Annex ()
setJournalFile _jl file content = do
createAnnexDirectory =<< fromRepo gitAnnexJournalDir
createAnnexDirectory =<< fromRepo gitAnnexTmpDir
-- journal file is written atomically
@ -37,17 +42,32 @@ setJournalFile file content = do
moveFile tmpfile jfile
{- Gets any journalled content for a file in the branch. -}
getJournalFile :: FilePath -> Annex (Maybe String)
getJournalFile file = inRepo $ \g -> catchMaybeIO $
getJournalFile :: JournalLocked -> FilePath -> Annex (Maybe String)
getJournalFile _jl = getJournalFileStale
{- Without locking, this is not guaranteed to be the most recent
- version of the file in the journal, so should not be used as a basis for
- changes. -}
getJournalFileStale :: FilePath -> Annex (Maybe String)
getJournalFileStale file = inRepo $ \g -> catchMaybeIO $
readFileStrict $ journalFile file g
{- List of files that have updated content in the journal. -}
getJournalledFiles :: Annex [FilePath]
getJournalledFiles = map fileJournal <$> getJournalFiles
getJournalledFiles :: JournalLocked -> Annex [FilePath]
getJournalledFiles jl = map fileJournal <$> getJournalFiles jl
getJournalledFilesStale :: Annex [FilePath]
getJournalledFilesStale = map fileJournal <$> getJournalFilesStale
{- List of existing journal files. -}
getJournalFiles :: Annex [FilePath]
getJournalFiles = do
getJournalFiles :: JournalLocked -> Annex [FilePath]
getJournalFiles _jl = getJournalFilesStale
{- List of existing journal files, but without locking, may miss new ones
- just being added, or may have false positives if the journal is staged
- as it is run. -}
getJournalFilesStale :: Annex [FilePath]
getJournalFilesStale = do
g <- gitRepo
fs <- liftIO $ catchDefaultIO [] $
getDirectoryContents $ gitAnnexJournalDir g
@ -55,7 +75,7 @@ getJournalFiles = do
{- Checks if there are changes in the journal. -}
journalDirty :: Annex Bool
journalDirty = not . null <$> getJournalFiles
journalDirty = not . null <$> getJournalFilesStale
{- Produces a filename to use in the journal for a file on the branch.
-
@ -77,14 +97,19 @@ fileJournal :: FilePath -> FilePath
fileJournal = replace [pathSeparator, pathSeparator] "_" .
replace "_" [pathSeparator]
{- Sentinal value, only produced by lockJournal; required
- as a parameter by things that need to ensure the journal is
- locked. -}
data JournalLocked = ProduceJournalLocked
{- Runs an action that modifies the journal, using locking to avoid
- contention with other git-annex processes. -}
lockJournal :: Annex a -> Annex a
lockJournal :: (JournalLocked -> Annex a) -> Annex a
lockJournal a = do
lockfile <- fromRepo gitAnnexJournalLock
createAnnexDirectory $ takeDirectory lockfile
mode <- annexFileMode
bracketIO (lock lockfile mode) unlock (const a)
bracketIO (lock lockfile mode) unlock (const $ a ProduceJournalLocked)
where
#ifndef mingw32_HOST_OS
lock lockfile mode = do
@ -101,4 +126,3 @@ lockJournal a = do
#else
unlock = removeFile
#endif

View file

@ -68,9 +68,9 @@ getAnnexLinkTarget file = ifM (coreSymlinks <$> Annex.getGitConfig)
-- characters, or whitespace, we
-- certianly don't have a link to a
-- git-annex key.
if any (`elem` s) "\0\n\r \t"
then return ""
else return s
return $ if any (`elem` s) "\0\n\r \t"
then ""
else s
{- Creates a link on disk.
-

20
Annex/Quvi.hs Normal file
View file

@ -0,0 +1,20 @@
{- quvi options for git-annex
-
- Copyright 2013 Joey Hess <joey@kitenet.net>
-
- Licensed under the GNU GPL version 3 or higher.
-}
{-# LANGUAGE Rank2Types #-}
module Annex.Quvi where
import Common.Annex
import qualified Annex
import Utility.Quvi
import Utility.Url
withQuviOptions :: forall a. Query a -> [CommandParam] -> URLString -> Annex a
withQuviOptions a ps url = do
opts <- map Param . annexQuviOptions <$> Annex.getGitConfig
liftIO $ a (ps++opts) url

View file

@ -16,6 +16,7 @@ module Annex.Ssh (
import qualified Data.Map as M
import Data.Hash.MD5
import System.Process (cwd)
import Common.Annex
import Annex.LockPool
@ -42,7 +43,7 @@ sshCachingOptions (host, port) opts = go =<< sshInfo (host, port)
-- If the lock pool is empty, this is the first ssh of this
-- run. There could be stale ssh connections hanging around
-- from a previous git-annex run that was interrupted.
cleanstale = whenM (not . any isLock . M.keys <$> getPool) $
cleanstale = whenM (not . any isLock . M.keys <$> getPool)
sshCleanup
{- Returns a filename to use for a ssh connection caching socket, and
@ -52,14 +53,30 @@ sshInfo (host, port) = go =<< sshCacheDir
where
go Nothing = return (Nothing, [])
go (Just dir) = do
let socketfile = dir </> hostport2socket host port
if valid_unix_socket_path socketfile
then return (Just socketfile, sshConnectionCachingParams socketfile)
else do
socketfile' <- liftIO $ relPathCwdToFile socketfile
if valid_unix_socket_path socketfile'
then return (Just socketfile', sshConnectionCachingParams socketfile')
else return (Nothing, [])
r <- liftIO $ bestSocketPath $ dir </> hostport2socket host port
return $ case r of
Nothing -> (Nothing, [])
Just socketfile -> (Just socketfile, sshConnectionCachingParams socketfile)
{- Given an absolute path to use for a socket file,
- returns whichever is shorter of that or the relative path to the same
- file.
-
- If no path can be constructed that is a valid socket, returns Nothing. -}
bestSocketPath :: FilePath -> IO (Maybe FilePath)
bestSocketPath abssocketfile = do
relsocketfile <- liftIO $ relPathCwdToFile abssocketfile
let socketfile = if length abssocketfile <= length relsocketfile
then abssocketfile
else relsocketfile
return $ if valid_unix_socket_path (socketfile ++ sshgarbage)
then Just socketfile
else Nothing
where
-- ssh appends a 16 char extension to the socket when setting it
-- up, which needs to be taken into account when checking
-- that a valid socket was constructed.
sshgarbage = take (1+16) $ repeat 'X'
sshConnectionCachingParams :: FilePath -> [CommandParam]
sshConnectionCachingParams socketfile =
@ -96,8 +113,8 @@ sshCleanup = go =<< sshCacheDir
where
go Nothing = noop
go (Just dir) = do
sockets <- filter (not . isLock) <$>
liftIO (catchDefaultIO [] $ dirContents dir)
sockets <- liftIO $ filter (not . isLock)
<$> catchDefaultIO [] (dirContents dir)
forM_ sockets cleanup
cleanup socketfile = do
#ifndef mingw32_HOST_OS
@ -120,13 +137,15 @@ sshCleanup = go =<< sshCacheDir
stopssh socketfile
#endif
stopssh socketfile = do
let params = sshConnectionCachingParams socketfile
let (dir, base) = splitFileName socketfile
let params = sshConnectionCachingParams base
-- "ssh -O stop" is noisy on stderr even with -q
void $ liftIO $ catchMaybeIO $
withQuietOutput createProcessSuccess $
proc "ssh" $ toCommand $
(proc "ssh" $ toCommand $
[ Params "-O stop"
] ++ params ++ [Param "any"]
] ++ params ++ [Param "any"])
{ cwd = Just dir }
-- Cannot remove the lock file; other processes may
-- be waiting on our exclusive lock to use it.
@ -139,8 +158,10 @@ hostport2socket host Nothing = hostport2socket' host
hostport2socket host (Just port) = hostport2socket' $ host ++ "!" ++ show port
hostport2socket' :: String -> FilePath
hostport2socket' s
| length s > 32 = md5s (Str s)
| length s > lengthofmd5s = md5s (Str s)
| otherwise = s
where
lengthofmd5s = 32
socket2lock :: FilePath -> FilePath
socket2lock socket = socket ++ lockExt

View file

@ -13,13 +13,14 @@ import qualified Annex.Branch
import qualified Git
import qualified Git.Ref
import qualified Git.Command
import qualified Git.Branch
import Utility.Base64
{- Converts a git branch into a branch that is tagged with a UUID, typically
- the UUID of the repo that will be pushing it, and possibly with other
- information.
-
- Pushing to branches on the remote that have out uuid in them is ugly,
- Pushing to branches on the remote that have our uuid in them is ugly,
- but it reserves those branches for pushing by us, and so our pushes will
- never conflict with other pushes.
-
@ -50,7 +51,10 @@ taggedPush :: UUID -> Maybe String -> Git.Ref -> Remote -> Git.Repo -> IO Bool
taggedPush u info branch remote = Git.Command.runBool
[ Param "push"
, Param $ Remote.name remote
, Param $ refspec Annex.Branch.name
{- Using forcePush here is safe because we "own" the tagged branch
- we're pushing; it has no other writers. Ensures it is pushed
- even if it has been rewritten by a transition. -}
, Param $ Git.Branch.forcePush $ refspec Annex.Branch.name
, Param $ refspec branch
]
where

View file

@ -17,8 +17,11 @@ module Annex.UUID (
getUncachedUUID,
prepUUID,
genUUID,
genUUIDInNameSpace,
gCryptNameSpace,
removeRepoUUID,
storeUUID,
setUUID,
) where
import Common.Annex
@ -27,7 +30,9 @@ import qualified Git.Config
import Config
import qualified Data.UUID as U
import qualified Data.UUID.V5 as U5
import System.Random
import Data.Bits.Utils
configkey :: ConfigKey
configkey = annexConfig "uuid"
@ -36,6 +41,17 @@ configkey = annexConfig "uuid"
genUUID :: IO UUID
genUUID = UUID . show <$> (randomIO :: IO U.UUID)
{- Generates a UUID from a given string, using a namespace.
- Given the same namespace, the same string will always result
- in the same UUID. -}
genUUIDInNameSpace :: U.UUID -> String -> UUID
genUUIDInNameSpace namespace = UUID . show . U5.generateNamed namespace . s2w8
{- Namespace used for UUIDs derived from git-remote-gcrypt ids. -}
gCryptNameSpace :: U.UUID
gCryptNameSpace = U5.generateNamed U5.namespaceURL $
s2w8 "http://git-annex.branchable.com/design/gcrypt/"
{- Get current repository's UUID. -}
getUUID :: Annex UUID
getUUID = getRepoUUID =<< gitRepo
@ -72,3 +88,9 @@ prepUUID = whenM ((==) NoUUID <$> getUUID) $
storeUUID :: ConfigKey -> UUID -> Annex ()
storeUUID configfield = setConfig configfield . fromUUID
{- Only sets the configkey in the Repo; does not change .git/config -}
setUUID :: Git.Repo -> UUID -> IO Git.Repo
setUUID r u = do
let s = show configkey ++ "=" ++ fromUUID u
Git.Config.store s r

27
Annex/Url.hs Normal file
View file

@ -0,0 +1,27 @@
{- Url downloading, with git-annex user agent.
-
- Copyright 2013 Joey Hess <joey@kitenet.net>
-
- Licensed under the GNU GPL version 3 or higher.
-}
module Annex.Url (
module U,
withUserAgent,
getUserAgent,
) where
import Common.Annex
import qualified Annex
import Utility.Url as U
import qualified Build.SysConfig as SysConfig
defaultUserAgent :: U.UserAgent
defaultUserAgent = "git-annex/" ++ SysConfig.packageversion
getUserAgent :: Annex (Maybe U.UserAgent)
getUserAgent = Annex.getState $
Just . fromMaybe defaultUserAgent . Annex.useragent
withUserAgent :: (Maybe U.UserAgent -> IO a) -> Annex a
withUserAgent a = liftIO . a =<< getUserAgent

View file

@ -19,18 +19,21 @@ defaultVersion :: Version
defaultVersion = "3"
directModeVersion :: Version
directModeVersion = "4"
directModeVersion = "5"
supportedVersions :: [Version]
supportedVersions = [defaultVersion, directModeVersion]
upgradableVersions :: [Version]
#ifndef mingw32_HOST_OS
upgradableVersions = ["0", "1", "2"]
upgradableVersions = ["0", "1", "2", "4"]
#else
upgradableVersions = ["2"]
upgradableVersions = ["2", "4"]
#endif
autoUpgradeableVersions :: [Version]
autoUpgradeableVersions = ["4"]
versionField :: ConfigKey
versionField = annexConfig "version"
@ -42,12 +45,3 @@ setVersion = setConfig versionField
removeVersion :: Annex ()
removeVersion = unsetConfig versionField
checkVersion :: Version -> Annex ()
checkVersion v
| v `elem` supportedVersions = noop
| v `elem` upgradableVersions = err "Upgrade this repository: git-annex upgrade"
| otherwise = err "Upgrade git-annex."
where
err msg = error $ "Repository version " ++ v ++
" is not supported. " ++ msg

View file

@ -1,4 +1,4 @@
{- git-annex control over whether content is wanted
{- git-annex checking whether content is wanted
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
-

View file

@ -22,6 +22,8 @@ import Assistant.Threads.Merger
import Assistant.Threads.TransferWatcher
import Assistant.Threads.Transferrer
import Assistant.Threads.SanityChecker
import Assistant.Threads.Cronner
import Assistant.Threads.ProblemFixer
#ifdef WITH_CLIBS
import Assistant.Threads.MountWatcher
#endif
@ -47,6 +49,8 @@ import Assistant.Types.UrlRenderer
import qualified Utility.Daemon
import Utility.LogFile
import Utility.ThreadScheduler
import Utility.HumanTime
import Annex.Perms
import qualified Build.SysConfig as SysConfig
import System.Log.Logger
@ -60,11 +64,13 @@ stopDaemon = liftIO . Utility.Daemon.stopDaemon =<< fromRepo gitAnnexPidFile
-
- startbrowser is passed the url and html shim file, as well as the original
- stdout and stderr descriptors. -}
startDaemon :: Bool -> Bool -> Maybe HostName -> Maybe (Maybe Handle -> Maybe Handle -> String -> FilePath -> IO ()) -> Annex ()
startDaemon assistant foreground listenhost startbrowser = do
startDaemon :: Bool -> Bool -> Maybe Duration -> Maybe HostName -> Maybe (Maybe Handle -> Maybe Handle -> String -> FilePath -> IO ()) -> Annex ()
startDaemon assistant foreground startdelay listenhost startbrowser = do
Annex.changeState $ \s -> s { Annex.daemon = True }
pidfile <- fromRepo gitAnnexPidFile
logfile <- fromRepo gitAnnexLogFile
#ifndef mingw32_HOST_OS
createAnnexDirectory (parentDir logfile)
logfd <- liftIO $ openLog logfile
if foreground
then do
@ -83,6 +89,13 @@ startDaemon assistant foreground listenhost startbrowser = do
Just a -> Just $ a origout origerr
else
start (Utility.Daemon.daemonize logfd (Just pidfile) False) Nothing
#else
-- Windows is always foreground, and has no log file.
start id $
case startbrowser of
Nothing -> Nothing
Just a -> Just $ a Nothing Nothing
#endif
where
desc
| assistant = "assistant"
@ -96,7 +109,6 @@ startDaemon assistant foreground listenhost startbrowser = do
flip runAssistant (go webappwaiter)
=<< newAssistantData st dstatus
#ifdef WITH_WEBAPP
go webappwaiter = do
d <- getAssistant id
@ -127,15 +139,20 @@ startDaemon assistant foreground listenhost startbrowser = do
, assist $ daemonStatusThread
, assist $ sanityCheckerDailyThread
, assist $ sanityCheckerHourlyThread
, assist $ problemFixerThread urlrenderer
#ifdef WITH_CLIBS
, assist $ mountWatcherThread
, assist $ mountWatcherThread urlrenderer
#endif
, assist $ netWatcherThread
, assist $ netWatcherFallbackThread
, assist $ transferScannerThread urlrenderer
, assist $ cronnerThread urlrenderer
, assist $ configMonitorThread
, assist $ glacierThread
, watch $ watchThread
-- must come last so that all threads that wait
-- on it have already started waiting
, watch $ sanityCheckerStartupThread startdelay
]
liftIO waitForTermination

View file

@ -18,26 +18,30 @@ import Logs.Transfer
import Data.String
import qualified Data.Text as T
import qualified Control.Exception as E
#ifdef WITH_WEBAPP
import Assistant.Monad
import Assistant.DaemonStatus
import Assistant.WebApp.Types
import Assistant.WebApp
import Assistant.WebApp (renderUrl)
import Yesod
#endif
import Assistant.Monad
import Assistant.Types.UrlRenderer
{- Makes a button for an alert that opens a Route. The button will
- close the alert it's attached to when clicked. -}
{- Makes a button for an alert that opens a Route.
-
- If autoclose is set, the button will close the alert it's
- attached to when clicked. -}
#ifdef WITH_WEBAPP
mkAlertButton :: T.Text -> UrlRenderer -> Route WebApp -> Assistant AlertButton
mkAlertButton label urlrenderer route = do
mkAlertButton :: Bool -> T.Text -> UrlRenderer -> Route WebApp -> Assistant AlertButton
mkAlertButton autoclose label urlrenderer route = do
close <- asIO1 removeAlert
url <- liftIO $ renderUrl urlrenderer route []
return $ AlertButton
{ buttonLabel = label
, buttonUrl = url
, buttonAction = Just close
, buttonAction = if autoclose then Just close else Nothing
}
#endif
@ -76,6 +80,22 @@ warningAlert name msg = Alert
, alertButton = Nothing
}
errorAlert :: String -> AlertButton -> Alert
errorAlert msg button = Alert
{ alertClass = Error
, alertHeader = Nothing
, alertMessageRender = renderData
, alertData = [UnTensed $ T.pack msg]
, alertCounter = 0
, alertBlockDisplay = True
, alertClosable = True
, alertPriority = Pinned
, alertIcon = Just ErrorIcon
, alertCombiner = Nothing
, alertName = Nothing
, alertButton = Just button
}
activityAlert :: Maybe TenseText -> [TenseChunk] -> Alert
activityAlert header dat = baseActivityAlert
{ alertHeader = header
@ -147,6 +167,63 @@ sanityCheckFixAlert msg = Alert
alerthead = "The daily sanity check found and fixed a problem:"
alertfoot = "If these problems persist, consider filing a bug report."
fsckingAlert :: AlertButton -> Maybe Remote -> Alert
fsckingAlert button mr = baseActivityAlert
{ alertData = case mr of
Nothing -> [ UnTensed $ T.pack $ "Consistency check in progress" ]
Just r -> [ UnTensed $ T.pack $ "Consistency check of " ++ Remote.name r ++ " in progress"]
, alertButton = Just button
}
showFscking :: UrlRenderer -> Maybe Remote -> IO (Either E.SomeException a) -> Assistant a
showFscking urlrenderer mr a = do
#ifdef WITH_WEBAPP
button <- mkAlertButton False (T.pack "Configure") urlrenderer ConfigFsckR
r <- alertDuring (fsckingAlert button mr) $
liftIO a
#else
r <- liftIO a
#endif
either (liftIO . E.throwIO) return r
notFsckedNudge :: UrlRenderer -> Maybe Remote -> Assistant ()
#ifdef WITH_WEBAPP
notFsckedNudge urlrenderer mr = do
button <- mkAlertButton True (T.pack "Configure") urlrenderer ConfigFsckR
void $ addAlert (notFsckedAlert mr button)
#else
notFsckedNudge _ _ = noop
#endif
notFsckedAlert :: Maybe Remote -> AlertButton -> Alert
notFsckedAlert mr button = Alert
{ alertHeader = Just $ fromString $ concat
[ "You should enable consistency checking to protect your data"
, maybe "" (\r -> " in " ++ Remote.name r) mr
, "."
]
, alertIcon = Just InfoIcon
, alertPriority = High
, alertButton = Just button
, alertClosable = True
, alertClass = Message
, alertMessageRender = renderData
, alertCounter = 0
, alertBlockDisplay = True
, alertName = Just NotFsckedAlert
, alertCombiner = Just $ dataCombiner $ \_old new -> new
, alertData = []
}
brokenRepositoryAlert :: AlertButton -> Alert
brokenRepositoryAlert = errorAlert "Serious problems have been detected with your repository. This needs your immediate attention!"
repairingAlert :: String -> Alert
repairingAlert repodesc = activityAlert Nothing
[ Tensed "Attempting to repair" "Repaired"
, UnTensed $ T.pack repodesc
]
pairingAlert :: AlertButton -> Alert
pairingAlert button = baseActivityAlert
{ alertData = [ UnTensed "Pairing in progress" ]

View file

@ -57,8 +57,7 @@ calcSyncRemotes = do
return $ \dstatus -> dstatus
{ syncRemotes = syncable
, syncGitRemotes =
filter (not . Remote.specialRemote) syncable
, syncGitRemotes = filter Remote.syncableRemote syncable
, syncDataRemotes = syncdata
, syncingToCloudRemote = any iscloud syncdata
}
@ -77,6 +76,10 @@ updateSyncRemotes = do
M.filter $ \alert ->
alertName alert /= Just CloudRepoNeededAlert
updateScheduleLog :: Assistant ()
updateScheduleLog =
liftIO . sendNotification =<< scheduleLogNotifier <$> getDaemonStatus
{- Load any previous daemon status file, and store it in a MVar for this
- process to use as its DaemonStatus. Also gets current transfer status. -}
startDaemonStatus :: Annex DaemonStatusHandle

View file

@ -17,8 +17,7 @@ import Logs.Location
import Assistant.DaemonStatus
import qualified Remote
import Remote.List
import qualified Git.Command
import qualified Git.BuildVersion
import qualified Git.Remote
import Logs.Trust
import qualified Annex
@ -35,15 +34,7 @@ disableRemote uuid = do
remote <- fromMaybe (error "unknown remote")
<$> liftAnnex (Remote.remoteFromUUID uuid)
liftAnnex $ do
inRepo $ Git.Command.run
[ Param "remote"
-- name of this subcommand changed
, Param $
if Git.BuildVersion.older "1.8.0"
then "rm"
else "remove"
, Param (Remote.name remote)
]
inRepo $ Git.Remote.remove (Remote.name remote)
void $ remoteListRefresh
updateSyncRemotes
return remote
@ -90,7 +81,7 @@ finishRemovingRemote :: UrlRenderer -> UUID -> Assistant ()
#ifdef WITH_WEBAPP
finishRemovingRemote urlrenderer uuid = do
desc <- liftAnnex $ Remote.prettyUUID uuid
button <- mkAlertButton (T.pack "Finish deletion process") urlrenderer $
button <- mkAlertButton True (T.pack "Finish deletion process") urlrenderer $
FinishDeleteRepositoryR uuid
void $ addAlert $ remoteRemovalAlert desc button
#else

50
Assistant/Fsck.hs Normal file
View file

@ -0,0 +1,50 @@
{- git-annex assistant fscking
-
- Copyright 2013 Joey Hess <joey@kitenet.net>
-
- Licensed under the GNU AGPL version 3 or higher.
-}
module Assistant.Fsck where
import Assistant.Common
import Types.ScheduledActivity
import qualified Types.Remote as Remote
import Annex.UUID
import Assistant.Alert
import Assistant.Types.UrlRenderer
import Logs.Schedule
import qualified Annex
import qualified Data.Set as S
{- Displays a nudge in the webapp if a fsck is not configured for
- the specified remote, or for the local repository. -}
fsckNudge :: UrlRenderer -> Maybe Remote -> Assistant ()
fsckNudge urlrenderer mr
| maybe True fsckableRemote mr =
whenM (liftAnnex $ annexFsckNudge <$> Annex.getGitConfig) $
unlessM (liftAnnex $ checkFscked mr) $
notFsckedNudge urlrenderer mr
| otherwise = noop
fsckableRemote :: Remote -> Bool
fsckableRemote = isJust . Remote.remoteFsck
{- Checks if the remote, or the local repository, has a fsck scheduled.
- Only looks at fscks configured to run via the local repository, not
- other repositories. -}
checkFscked :: Maybe Remote -> Annex Bool
checkFscked mr = any wanted . S.toList <$> (scheduleGet =<< getUUID)
where
wanted = case mr of
Nothing -> isSelfFsck
Just r -> flip isFsckOf (Remote.uuid r)
isSelfFsck :: ScheduledActivity -> Bool
isSelfFsck (ScheduledSelfFsck _ _) = True
isSelfFsck _ = False
isFsckOf :: ScheduledActivity -> UUID -> Bool
isFsckOf (ScheduledRemoteFsck u _ _) u' = u == u'
isFsckOf _ _ = False

36
Assistant/Gpg.hs Normal file
View file

@ -0,0 +1,36 @@
{- git-annex assistant gpg stuff
-
- Copyright 2013 Joey Hess <joey@kitenet.net>
-
- Licensed under the GNU AGPL version 3 or higher.
-}
{-# LANGUAGE QuasiQuotes, TemplateHaskell, OverloadedStrings #-}
module Assistant.Gpg where
import Utility.Gpg
import Utility.UserInfo
import Types.Remote (RemoteConfigKey)
import qualified Data.Map as M
{- Generates a gpg user id that is not used by any existing secret key -}
newUserId :: IO UserId
newUserId = do
oldkeys <- secretKeys
username <- myUserName
let basekeyname = username ++ "'s git-annex encryption key"
return $ Prelude.head $ filter (\n -> M.null $ M.filter (== n) oldkeys)
( basekeyname
: map (\n -> basekeyname ++ show n) ([2..] :: [Int])
)
data EnableEncryption = HybridEncryption | SharedEncryption | NoEncryption
deriving (Eq)
{- Generates Remote configuration for encryption. -}
configureEncryption :: EnableEncryption -> (RemoteConfigKey, String)
configureEncryption SharedEncryption = ("encryption", "shared")
configureEncryption NoEncryption = ("encryption", "none")
configureEncryption HybridEncryption = ("encryption", "hybrid")

View file

@ -9,51 +9,32 @@ module Assistant.MakeRemote where
import Assistant.Common
import Assistant.Ssh
import Assistant.Sync
import qualified Types.Remote as R
import qualified Remote
import Remote.List
import qualified Remote.Rsync as Rsync
import qualified Remote.GCrypt as GCrypt
import qualified Git
import qualified Git.Command
import qualified Command.InitRemote
import Logs.UUID
import Logs.Remote
import Git.Remote
import Config
import Config.Cost
import Git.Types (RemoteName)
import Creds
import Assistant.Gpg
import Utility.Gpg (KeyId)
import qualified Data.Text as T
import qualified Data.Map as M
type RemoteName = String
{- Sets up and begins syncing with a new ssh or rsync remote. -}
makeSshRemote :: Bool -> SshData -> Maybe Cost -> Assistant Remote
makeSshRemote forcersync sshdata mcost = do
r <- liftAnnex $
addRemote $ maker (sshRepoName sshdata) sshurl
liftAnnex $ maybe noop (setRemoteCost r) mcost
syncRemote r
return r
{- Sets up a new git or rsync remote, accessed over ssh. -}
makeSshRemote :: SshData -> Annex RemoteName
makeSshRemote sshdata = maker (sshRepoName sshdata) (genSshUrl sshdata)
where
rsync = forcersync || rsyncOnly sshdata
maker
| rsync = makeRsyncRemote
| onlyCapability sshdata RsyncCapable = makeRsyncRemote
| otherwise = makeGitRemote
sshurl = T.unpack $ T.concat $
if rsync
then [u, h, T.pack ":", sshDirectory sshdata, T.pack "/"]
else [T.pack "ssh://", u, h, d, T.pack "/"]
where
u = maybe (T.pack "") (\v -> T.concat [v, T.pack "@"]) $ sshUserName sshdata
h = sshHostName sshdata
d
| T.pack "/" `T.isPrefixOf` sshDirectory sshdata = sshDirectory sshdata
| T.pack "~/" `T.isPrefixOf` sshDirectory sshdata = T.concat [T.pack "/", sshDirectory sshdata]
| otherwise = T.concat [T.pack "/~/", sshDirectory sshdata]
{- Runs an action that returns a name of the remote, and finishes adding it. -}
addRemote :: Annex RemoteName -> Annex Remote
addRemote a = do
@ -68,14 +49,24 @@ makeRsyncRemote name location = makeRemote name location $ const $ void $
go =<< Command.InitRemote.findExisting name
where
go Nothing = setupSpecialRemote name Rsync.remote config
=<< Command.InitRemote.generateNew name
go (Just v) = setupSpecialRemote name Rsync.remote config v
(Nothing, Command.InitRemote.newConfig name)
go (Just (u, c)) = setupSpecialRemote name Rsync.remote config (Just u, c)
config = M.fromList
[ ("encryption", "shared")
, ("rsyncurl", location)
, ("type", "rsync")
]
{- Inits a gcrypt special remote, and returns its name. -}
makeGCryptRemote :: RemoteName -> String -> KeyId -> Annex RemoteName
makeGCryptRemote remotename location keyid =
initSpecialRemote remotename GCrypt.remote $ M.fromList
[ ("type", "gcrypt")
, ("gitrepo", location)
, configureEncryption HybridEncryption
, ("keyid", keyid)
]
type SpecialRemoteMaker = RemoteName -> RemoteType -> R.RemoteConfig -> Annex RemoteName
{- Inits a new special remote. The name is used as a suggestion, but
@ -89,7 +80,7 @@ initSpecialRemote name remotetype config = go 0
r <- Command.InitRemote.findExisting fullname
case r of
Nothing -> setupSpecialRemote fullname remotetype config
=<< Command.InitRemote.generateNew fullname
(Nothing, Command.InitRemote.newConfig fullname)
Just _ -> go (n + 1)
{- Enables an existing special remote. -}
@ -98,15 +89,15 @@ enableSpecialRemote name remotetype config = do
r <- Command.InitRemote.findExisting name
case r of
Nothing -> error $ "Cannot find a special remote named " ++ name
Just v -> setupSpecialRemote name remotetype config v
Just (u, c) -> setupSpecialRemote name remotetype config (Just u, c)
setupSpecialRemote :: RemoteName -> RemoteType -> R.RemoteConfig -> (UUID, R.RemoteConfig) -> Annex RemoteName
setupSpecialRemote name remotetype config (u, c) = do
setupSpecialRemote :: RemoteName -> RemoteType -> R.RemoteConfig -> (Maybe UUID, R.RemoteConfig) -> Annex RemoteName
setupSpecialRemote name remotetype config (mu, c) = do
{- Currently, only 'weak' ciphers can be generated from the
- assistant, because otherwise GnuPG may block once the entropy
- pool is drained, and as of now there's no way to tell the user
- to perform IO actions to refill the pool. -}
c' <- R.setup remotetype u $
(c', u) <- R.setup remotetype mu $
M.insert "highRandomQuality" "false" $ M.union config c
describeUUID u name
configSet u c'
@ -128,7 +119,6 @@ makeRemote basename location a = do
g <- gitRepo
if not (any samelocation $ Git.remotes g)
then do
let name = uniqueRemoteName basename 0 g
a name
return name

View file

@ -39,6 +39,7 @@ import Assistant.Types.Pushes
import Assistant.Types.BranchChange
import Assistant.Types.Commits
import Assistant.Types.Changes
import Assistant.Types.RepoProblem
import Assistant.Types.Buddies
import Assistant.Types.NetMessager
import Assistant.Types.ThreadName
@ -63,6 +64,7 @@ data AssistantData = AssistantData
, failedPushMap :: FailedPushMap
, commitChan :: CommitChan
, changePool :: ChangePool
, repoProblemChan :: RepoProblemChan
, branchChangeHandle :: BranchChangeHandle
, buddyList :: BuddyList
, netMessager :: NetMessager
@ -80,6 +82,7 @@ newAssistantData st dstatus = AssistantData
<*> newFailedPushMap
<*> newCommitChan
<*> newChangePool
<*> newRepoProblemChan
<*> newBranchChangeHandle
<*> newBuddyList
<*> newNetMessager

View file

@ -16,6 +16,7 @@ import Assistant.Types.DaemonStatus
import Assistant.Types.UrlRenderer
import Assistant.DaemonStatus
import Assistant.Monad
import Utility.NotificationBroadcaster
import Control.Concurrent
import Control.Concurrent.Async
@ -34,7 +35,7 @@ import qualified Data.Text as T
- Named threads are run by a management thread, so if they crash
- an alert is displayed, allowing the thread to be restarted. -}
startNamedThread :: UrlRenderer -> NamedThread -> Assistant ()
startNamedThread urlrenderer namedthread@(NamedThread name a) = do
startNamedThread urlrenderer (NamedThread afterstartupsanitycheck name a) = do
m <- startedThreads <$> getDaemonStatus
case M.lookup name m of
Nothing -> start
@ -44,14 +45,24 @@ startNamedThread urlrenderer namedthread@(NamedThread name a) = do
Right Nothing -> noop
_ -> start
where
start = do
start
| afterstartupsanitycheck = do
status <- getDaemonStatus
h <- liftIO $ newNotificationHandle False $
startupSanityCheckNotifier status
startwith $ runmanaged $
liftIO $ waitNotification h
| otherwise = startwith $ runmanaged noop
startwith runner = do
d <- getAssistant id
aid <- liftIO $ runmanaged $ d { threadName = name }
restart <- asIO $ startNamedThread urlrenderer namedthread
aid <- liftIO $ runner $ d { threadName = name }
restart <- asIO $ startNamedThread urlrenderer (NamedThread False name a)
modifyDaemonStatus_ $ \s -> s
{ startedThreads = M.insertWith' const name (aid, restart) (startedThreads s) }
runmanaged d = do
aid <- async $ runAssistant d a
runmanaged first d = do
aid <- async $ runAssistant d $ do
void first
a
void $ forkIO $ manager d aid
return aid
manager d aid = do
@ -65,7 +76,7 @@ startNamedThread urlrenderer namedthread@(NamedThread name a) = do
]
hPutStrLn stderr msg
#ifdef WITH_WEBAPP
button <- runAssistant d $ mkAlertButton
button <- runAssistant d $ mkAlertButton True
(T.pack "Restart Thread")
urlrenderer
(RestartThreadR name)
@ -75,7 +86,7 @@ startNamedThread urlrenderer namedthread@(NamedThread name a) = do
#endif
namedThreadId :: NamedThread -> Assistant (Maybe ThreadId)
namedThreadId (NamedThread name _) = do
namedThreadId (NamedThread _ name _) = do
m <- startedThreads <$> getDaemonStatus
return $ asyncThreadId . fst <$> M.lookup name m

View file

@ -29,6 +29,10 @@ notifyNetMessagerRestart :: Assistant ()
notifyNetMessagerRestart =
flip writeSV () <<~ (netMessagerRestart . netMessager)
{- This can be used to get an early indication if the network has
- changed, to immediately restart a connection. However, that is not
- available on all systems, so clients also need to deal with
- restarting dropped connections in the usual way. -}
waitNetMessagerRestart :: Assistant ()
waitNetMessagerRestart = readSV <<~ (netMessagerRestart . netMessager)

View file

@ -28,7 +28,7 @@ data PairStage
| PairAck
{- "I saw your PairAck; you can stop sending them." -}
| PairDone
deriving (Eq, Read, Show, Ord)
deriving (Eq, Read, Show, Ord, Enum)
newtype PairMsg = PairMsg (Verifiable (PairStage, PairData, SomeAddr))
deriving (Eq, Read, Show)

View file

@ -12,7 +12,9 @@ import Assistant.Ssh
import Assistant.Pairing
import Assistant.Pairing.Network
import Assistant.MakeRemote
import Assistant.Sync
import Config.Cost
import Config
import Network.Socket
import qualified Data.Text as T
@ -22,7 +24,7 @@ import qualified Data.Text as T
setupAuthorizedKeys :: PairMsg -> FilePath -> IO ()
setupAuthorizedKeys msg repodir = do
validateSshPubKey pubkey
unlessM (liftIO $ addAuthorizedKeys False repodir pubkey) $
unlessM (liftIO $ addAuthorizedKeys True repodir pubkey) $
error "failed setting up ssh authorized keys"
where
pubkey = remoteSshPubKey $ pairMsgData msg
@ -43,7 +45,9 @@ finishedLocalPairing msg keypair = do
, "git-annex-shell -c configlist " ++ T.unpack (sshDirectory sshdata)
]
Nothing
void $ makeSshRemote False sshdata (Just semiExpensiveRemoteCost)
r <- liftAnnex $ addRemote $ makeSshRemote sshdata
liftAnnex $ setRemoteCost r semiExpensiveRemoteCost
syncRemote r
{- Mostly a straightforward conversion. Except:
- * Determine the best hostname to use to contact the host.
@ -63,7 +67,7 @@ pairMsgToSshData msg = do
, sshRepoName = genSshRepoName hostname dir
, sshPort = 22
, needsPubKey = True
, rsyncOnly = False
, sshCapabilities = [GitAnnexShellCapable, GitCapable, RsyncCapable]
}
{- Finds the best hostname to use for the host that sent the PairMsg.

153
Assistant/Repair.hs Normal file
View file

@ -0,0 +1,153 @@
{- git-annex assistant repository repair
-
- Copyright 2013 Joey Hess <joey@kitenet.net>
-
- Licensed under the GNU AGPL version 3 or higher.
-}
{-# LANGUAGE CPP #-}
module Assistant.Repair where
import Assistant.Common
import Command.Repair (repairAnnexBranch)
import Git.Fsck (FsckResults, foundBroken)
import Git.Repair (runRepairOf)
import qualified Git
import qualified Remote
import qualified Types.Remote as Remote
import Logs.FsckResults
import Annex.UUID
import Utility.Batch
import Config.Files
import Assistant.Sync
import Assistant.Alert
import Assistant.DaemonStatus
import Assistant.Types.UrlRenderer
#ifdef WITH_WEBAPP
import Assistant.WebApp.Types
import qualified Data.Text as T
#endif
import qualified Utility.Lsof as Lsof
import Utility.ThreadScheduler
import Control.Concurrent.Async
{- When the FsckResults require a repair, tries to do a non-destructive
- repair. If that fails, pops up an alert. -}
repairWhenNecessary :: UrlRenderer -> UUID -> Maybe Remote -> FsckResults -> Assistant Bool
repairWhenNecessary urlrenderer u mrmt fsckresults
| foundBroken fsckresults = do
liftAnnex $ writeFsckResults u fsckresults
repodesc <- liftAnnex $ Remote.prettyUUID u
ok <- alertWhile (repairingAlert repodesc)
(runRepair u mrmt False)
#ifdef WITH_WEBAPP
unless ok $ do
button <- mkAlertButton True (T.pack "Click Here") urlrenderer $
RepairRepositoryR u
void $ addAlert $ brokenRepositoryAlert button
#endif
return ok
| otherwise = return False
runRepair :: UUID -> Maybe Remote -> Bool -> Assistant Bool
runRepair u mrmt destructiverepair = do
fsckresults <- liftAnnex $ readFsckResults u
myu <- liftAnnex getUUID
ok <- if u == myu
then localrepair fsckresults
else remoterepair fsckresults
liftAnnex $ writeFsckResults u Nothing
debug [ "Repaired", show u, show ok ]
return ok
where
localrepair fsckresults = do
-- Stop the watcher from running while running repairs.
changeSyncable Nothing False
-- This intentionally runs the repair inside the Annex
-- monad, which is not strictly necessary, but keeps
-- other threads that might be trying to use the Annex
-- from running until it completes.
ok <- liftAnnex $ repair fsckresults Nothing
-- Run a background fast fsck if a destructive repair had
-- to be done, to ensure that the git-annex branch
-- reflects the current state of the repo.
when destructiverepair $
backgroundfsck [ Param "--fast" ]
-- Start the watcher running again. This also triggers it to
-- do a startup scan, which is especially important if the
-- git repo repair removed files from the index file. Those
-- files will be seen as new, and re-added to the repository.
when (ok || destructiverepair) $
changeSyncable Nothing True
return ok
remoterepair fsckresults = case Remote.repairRepo =<< mrmt of
Nothing -> return False
Just mkrepair -> do
thisrepopath <- liftIO . absPath
=<< liftAnnex (fromRepo Git.repoPath)
a <- liftAnnex $ mkrepair $
repair fsckresults (Just thisrepopath)
liftIO $ catchBoolIO a
repair fsckresults referencerepo = do
(ok, stillmissing, modifiedbranches) <- inRepo $
runRepairOf fsckresults destructiverepair referencerepo
when destructiverepair $
repairAnnexBranch stillmissing modifiedbranches
return ok
backgroundfsck params = liftIO $ void $ async $ do
program <- readProgramFile
batchCommand program (Param "fsck" : params)
{- Detect when a git lock file exists and has no git process currently
- writing to it. This strongly suggests it is a stale lock file.
-
- However, this could be on a network filesystem. Which is not very safe
- anyway (the assistant relies on being able to check when files have
- no writers to know when to commit them). Just in case, when the lock
- file appears stale, we delay for one minute, and check its size. If
- the size changed, delay for another minute, and so on. This will at
- least work to detect is another machine is writing out a new index
- file, since git does so by writing the new content to index.lock.
-
- Returns true if locks were cleaned up.
-}
repairStaleGitLocks :: Git.Repo -> Assistant Bool
repairStaleGitLocks r = do
lockfiles <- filter (not . isInfixOf "gc.pid")
. filter (".lock" `isSuffixOf`)
<$> liftIO (findgitfiles r)
repairStaleLocks lockfiles
return $ not $ null lockfiles
where
findgitfiles = dirContentsRecursiveSkipping (== dropTrailingPathSeparator annexDir) . Git.localGitDir
repairStaleLocks :: [FilePath] -> Assistant ()
repairStaleLocks lockfiles = go =<< getsizes
where
getsize lf = catchMaybeIO $
(\s -> (lf, fileSize s)) <$> getFileStatus lf
getsizes = liftIO $ catMaybes <$> mapM getsize lockfiles
go [] = return ()
go l = ifM (liftIO $ null <$> Lsof.query ("--" : map fst l))
( do
waitforit "to check stale git lock file"
l' <- getsizes
if l' == l
then liftIO $ mapM_ nukeFile (map fst l)
else go l'
, do
waitforit "for git lock file writer"
go =<< getsizes
)
waitforit why = do
notice ["Waiting for 60 seconds", why]
liftIO $ threadDelaySeconds $ Seconds 60

34
Assistant/RepoProblem.hs Normal file
View file

@ -0,0 +1,34 @@
{- git-annex assistant remote problem handling
-
- Copyright 2013 Joey Hess <joey@kitenet.net>
-
- Licensed under the GNU GPL version 3 or higher.
-}
module Assistant.RepoProblem where
import Assistant.Common
import Assistant.Types.RepoProblem
import Utility.TList
import Control.Concurrent.STM
{- Gets all repositories that have problems. Blocks until there is at
- least one. -}
getRepoProblems :: Assistant [RepoProblem]
getRepoProblems = nubBy sameRepoProblem
<$> (atomically . getTList) <<~ repoProblemChan
{- Indicates that there was a problem with a repository, and the problem
- appears to not be a transient (eg network connection) problem.
-
- If the problem is able to be repaired, the passed action will be run.
- (However, if multiple problems are reported with a single repository,
- only a single action will be run.)
-}
repoHasProblem :: UUID -> Assistant () -> Assistant ()
repoHasProblem u afterrepair = do
rp <- RepoProblem
<$> pure u
<*> asIO afterrepair
(atomically . flip consTList rp) <<~ repoProblemChan

View file

@ -1,6 +1,6 @@
{- git-annex assistant ssh utilities
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012-2013 Joey Hess <joey@kitenet.net>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@ -11,6 +11,8 @@ import Common.Annex
import Utility.Tmp
import Utility.UserInfo
import Utility.Shell
import Utility.Rsync
import Utility.FileMode
import Git.Remote
import Data.Text (Text)
@ -25,10 +27,19 @@ data SshData = SshData
, sshRepoName :: String
, sshPort :: Int
, needsPubKey :: Bool
, rsyncOnly :: Bool
, sshCapabilities :: [SshServerCapability]
}
deriving (Read, Show, Eq)
data SshServerCapability = GitAnnexShellCapable | GitCapable | RsyncCapable
deriving (Read, Show, Eq)
hasCapability :: SshData -> SshServerCapability -> Bool
hasCapability d c = c `elem` sshCapabilities d
onlyCapability :: SshData -> SshServerCapability -> Bool
onlyCapability d c = all (== c) (sshCapabilities d)
data SshKeyPair = SshKeyPair
{ sshPubKey :: String
, sshPrivKey :: String
@ -52,6 +63,48 @@ sshDir = do
genSshHost :: Text -> Maybe Text -> String
genSshHost host user = maybe "" (\v -> T.unpack v ++ "@") user ++ T.unpack host
{- Generates a ssh or rsync url from a SshData. -}
genSshUrl :: SshData -> String
genSshUrl sshdata = addtrailingslash $ T.unpack $ T.concat $
if (onlyCapability sshdata RsyncCapable)
then [u, h, T.pack ":", sshDirectory sshdata]
else [T.pack "ssh://", u, h, d]
where
u = maybe (T.pack "") (\v -> T.concat [v, T.pack "@"]) $ sshUserName sshdata
h = sshHostName sshdata
d
| T.pack "/" `T.isPrefixOf` sshDirectory sshdata = sshDirectory sshdata
| T.pack "~/" `T.isPrefixOf` sshDirectory sshdata = T.concat [T.pack "/", sshDirectory sshdata]
| otherwise = T.concat [T.pack "/~/", sshDirectory sshdata]
addtrailingslash s
| "/" `isSuffixOf` s = s
| otherwise = s ++ "/"
{- Reverses genSshUrl -}
parseSshUrl :: String -> Maybe SshData
parseSshUrl u
| "ssh://" `isPrefixOf` u = fromssh (drop (length "ssh://") u)
| otherwise = fromrsync u
where
mkdata (userhost, dir) = Just $ SshData
{ sshHostName = T.pack host
, sshUserName = if null user then Nothing else Just $ T.pack user
, sshDirectory = T.pack dir
, sshRepoName = genSshRepoName host dir
-- dummy values, cannot determine from url
, sshPort = 22
, needsPubKey = True
, sshCapabilities = []
}
where
(user, host) = if '@' `elem` userhost
then separate (== '@') userhost
else ("", userhost)
fromrsync s
| not (rsyncUrlIsShell u) = Nothing
| otherwise = mkdata $ separate (== ':') s
fromssh = mkdata . break (== '/')
{- Generates a git remote name, like host_dir or host -}
genSshRepoName :: String -> FilePath -> String
genSshRepoName host dir
@ -92,12 +145,12 @@ validateSshPubKey pubkey
safeincomment c = isAlphaNum c || c == '@' || c == '-' || c == '_' || c == '.'
addAuthorizedKeys :: Bool -> FilePath -> SshPubKey -> IO Bool
addAuthorizedKeys rsynconly dir pubkey = boolSystem "sh"
[ Param "-c" , Param $ addAuthorizedKeysCommand rsynconly dir pubkey ]
addAuthorizedKeys gitannexshellonly dir pubkey = boolSystem "sh"
[ Param "-c" , Param $ addAuthorizedKeysCommand gitannexshellonly dir pubkey ]
removeAuthorizedKeys :: Bool -> FilePath -> SshPubKey -> IO ()
removeAuthorizedKeys rsynconly dir pubkey = do
let keyline = authorizedKeysLine rsynconly dir pubkey
removeAuthorizedKeys gitannexshellonly dir pubkey = do
let keyline = authorizedKeysLine gitannexshellonly dir pubkey
sshdir <- sshDir
let keyfile = sshdir </> "authorized_keys"
ls <- lines <$> readFileStrict keyfile
@ -110,7 +163,7 @@ removeAuthorizedKeys rsynconly dir pubkey = do
- present.
-}
addAuthorizedKeysCommand :: Bool -> FilePath -> SshPubKey -> String
addAuthorizedKeysCommand rsynconly dir pubkey = intercalate "&&"
addAuthorizedKeysCommand gitannexshellonly dir pubkey = intercalate "&&"
[ "mkdir -p ~/.ssh"
, intercalate "; "
[ "if [ ! -e " ++ wrapper ++ " ]"
@ -122,7 +175,7 @@ addAuthorizedKeysCommand rsynconly dir pubkey = intercalate "&&"
, "chmod 600 ~/.ssh/authorized_keys"
, unwords
[ "echo"
, shellEscape $ authorizedKeysLine rsynconly dir pubkey
, shellEscape $ authorizedKeysLine gitannexshellonly dir pubkey
, ">>~/.ssh/authorized_keys"
]
]
@ -141,11 +194,11 @@ addAuthorizedKeysCommand rsynconly dir pubkey = intercalate "&&"
runshell var = "exec git-annex-shell -c \"" ++ var ++ "\""
authorizedKeysLine :: Bool -> FilePath -> SshPubKey -> String
authorizedKeysLine rsynconly dir pubkey
authorizedKeysLine gitannexshellonly dir pubkey
| gitannexshellonly = limitcommand ++ pubkey
{- TODO: Locking down rsync is difficult, requiring a rather
- long perl script. -}
| rsynconly = pubkey
| otherwise = limitcommand ++ pubkey
| otherwise = pubkey
where
limitcommand = "command=\"GIT_ANNEX_SHELL_DIRECTORY="++shellEscape dir++" ~/.ssh/git-annex-shell\",no-agent-forwarding,no-port-forwarding,no-X11-forwarding "
@ -181,12 +234,8 @@ setupSshKeyPair sshkeypair sshdata = do
sshdir <- sshDir
createDirectoryIfMissing True $ parentDir $ sshdir </> sshprivkeyfile
unlessM (doesFileExist $ sshdir </> sshprivkeyfile) $ do
h <- fdToHandle =<<
createFile (sshdir </> sshprivkeyfile)
(unionFileModes ownerWriteMode ownerReadMode)
hPutStr h (sshPrivKey sshkeypair)
hClose h
unlessM (doesFileExist $ sshdir </> sshprivkeyfile) $
writeFileProtected (sshdir </> sshprivkeyfile) (sshPrivKey sshkeypair)
unlessM (doesFileExist $ sshdir </> sshpubkeyfile) $
writeFile (sshdir </> sshpubkeyfile) (sshPubKey sshkeypair)

View file

@ -23,9 +23,18 @@ import qualified Git.Command
import qualified Git.Ref
import qualified Remote
import qualified Types.Remote as Remote
import qualified Remote.List as Remote
import qualified Annex.Branch
import Annex.UUID
import Annex.TaggedPush
import qualified Config
import Git.Config
import Assistant.NamedThread
import Assistant.Threads.Watcher (watchThread, WatcherControl(..))
import Assistant.TransferSlots
import Assistant.TransferQueue
import Assistant.RepoProblem
import Logs.Transfer
import Data.Time.Clock
import qualified Data.Map as M
@ -44,13 +53,22 @@ import Control.Concurrent
- they push to us. Since XMPP pushes run ansynchronously, any scan of the
- XMPP remotes has to be deferred until they're done pushing to us, so
- all XMPP remotes are marked as possibly desynced.
-
- Also handles signaling any connectRemoteNotifiers, after the syncing is
- done.
-}
reconnectRemotes :: Bool -> [Remote] -> Assistant ()
reconnectRemotes _ [] = noop
reconnectRemotes notifypushes rs = void $ do
modifyDaemonStatus_ $ \s -> s
{ desynced = S.union (S.fromList $ map Remote.uuid xmppremotes) (desynced s) }
syncAction rs (const go)
rs' <- liftIO $ filterM (Remote.checkAvailable True) rs
unless (null rs') $ do
modifyDaemonStatus_ $ \s -> s
{ desynced = S.union (S.fromList $ map Remote.uuid xmppremotes) (desynced s) }
failedrs <- syncAction rs' (const go)
forM_ failedrs $ \r ->
whenM (liftIO $ Remote.checkAvailable False r) $
repoHasProblem (Remote.uuid r) (syncRemote r)
mapM_ signal $ filter (`notElem` failedrs) rs'
where
gitremotes = filter (notspecialremote . Remote.repo) rs
(xmppremotes, nonxmppremotes) = partition isXMPPRemote rs
@ -73,6 +91,9 @@ reconnectRemotes notifypushes rs = void $ do
filter (not . remoteAnnexIgnore . Remote.gitconfig)
nonxmppremotes
return failed
signal r = liftIO . mapM_ (flip tryPutMVar ())
=<< fromMaybe [] . M.lookup (Remote.uuid r) . connectRemoteNotifiers
<$> getDaemonStatus
{- Updates the local sync branch, then pushes it to all remotes, in
- parallel, along with the git-annex branch. This is the same
@ -220,3 +241,36 @@ syncRemote remote = do
reconnectRemotes False [remote]
addScanRemotes True [remote]
void $ liftIO $ forkIO $ thread
{- Use Nothing to change autocommit setting; or a remote to change
- its sync setting. -}
changeSyncable :: Maybe Remote -> Bool -> Assistant ()
changeSyncable Nothing enable = do
liftAnnex $ Config.setConfig key (boolConfig enable)
liftIO . maybe noop (`throwTo` signal)
=<< namedThreadId watchThread
where
key = Config.annexConfig "autocommit"
signal
| enable = ResumeWatcher
| otherwise = PauseWatcher
changeSyncable (Just r) True = do
liftAnnex $ changeSyncFlag r True
syncRemote r
changeSyncable (Just r) False = do
liftAnnex $ changeSyncFlag r False
updateSyncRemotes
{- Stop all transfers to or from this remote.
- XXX Can't stop any ongoing scan, or git syncs. -}
void $ dequeueTransfers tofrom
mapM_ (cancelTransfer False) =<<
filter tofrom . M.keys . currentTransfers <$> getDaemonStatus
where
tofrom t = transferUUID t == Remote.uuid r
changeSyncFlag :: Remote -> Bool -> Annex ()
changeSyncFlag r enabled = do
Config.setConfig key (boolConfig enabled)
void Remote.remoteListRefresh
where
key = Config.remoteConfig (Remote.repo r) "sync"

View file

@ -112,7 +112,7 @@ waitChangeTime a = waitchanges 0
- that make up a file rename? Or some of the pairs that make up
- a directory rename?
-}
possiblyrename cs = all renamepart cs
possiblyrename = all renamepart
renamepart (PendingAddChange _ _) = True
renamepart c = isRmChange c
@ -309,7 +309,7 @@ handleAdds delayadd cs = returnWhen (null incomplete) $ do
inRepo (Git.LsFiles.notInRepo False $ map changeFile pending)
-- note: timestamp info is lost here
let ts = changeTime exemplar
return (map (PendingAddChange ts) newfiles, void $ liftIO $ cleanup)
return (map (PendingAddChange ts) newfiles, void $ liftIO cleanup)
returnWhen c a
| c = return otherchanges
@ -317,12 +317,13 @@ handleAdds delayadd cs = returnWhen (null incomplete) $ do
add :: Change -> Assistant (Maybe Change)
add change@(InProcessAddChange { keySource = ks }) =
catchDefaultIO Nothing <~> do
sanitycheck ks $ do
key <- liftAnnex $ do
showStart "add" $ keyFilename ks
Command.Add.ingest $ Just ks
maybe (failedingest change) (done change $ keyFilename ks) key
catchDefaultIO Nothing <~> doadd
where
doadd = sanitycheck ks $ do
(mkey, mcache) <- liftAnnex $ do
showStart "add" $ keyFilename ks
Command.Add.ingest $ Just ks
maybe (failedingest change) (done change mcache $ keyFilename ks) mkey
add _ = return Nothing
{- In direct mode, avoid overhead of re-injesting a renamed
@ -349,7 +350,7 @@ handleAdds delayadd cs = returnWhen (null incomplete) $ do
fastadd change key = do
let source = keySource change
liftAnnex $ Command.Add.finishIngestDirect key source
done change (keyFilename source) key
done change Nothing (keyFilename source) key
removedKeysMap :: InodeComparisonType -> [Change] -> Annex (M.Map InodeCacheKey Key)
removedKeysMap ct l = do
@ -365,13 +366,13 @@ handleAdds delayadd cs = returnWhen (null incomplete) $ do
liftAnnex showEndFail
return Nothing
done change file key = liftAnnex $ do
done change mcache file key = liftAnnex $ do
logStatus key InfoPresent
link <- ifM isDirect
( inRepo $ gitAnnexLink file key
, Command.Add.link file key True
, Command.Add.link file key mcache
)
whenM (pure DirWatcher.eventsCoalesce <||> isDirect) $ do
whenM (pure DirWatcher.eventsCoalesce <||> isDirect) $
stageSymlink file =<< hashSymlink link
showEndOk
return $ Just $ finishedChange change key
@ -415,8 +416,8 @@ safeToAdd _ [] [] = return []
safeToAdd delayadd pending inprocess = do
maybe noop (liftIO . threadDelaySeconds) delayadd
liftAnnex $ do
keysources <- mapM Command.Add.lockDown (map changeFile pending)
let inprocess' = inprocess ++ catMaybes (map mkinprocess $ zip pending keysources)
keysources <- forM pending $ Command.Add.lockDown . changeFile
let inprocess' = inprocess ++ mapMaybe mkinprocess (zip pending keysources)
openfiles <- S.fromList . map fst3 . filter openwrite <$>
findopenfiles (map keySource inprocess')
let checked = map (check openfiles) inprocess'
@ -434,7 +435,7 @@ safeToAdd delayadd pending inprocess = do
| S.member (contentLocation ks) openfiles = Left change
check _ change = Right change
mkinprocess (c, Just ks) = Just $ InProcessAddChange
mkinprocess (c, Just ks) = Just InProcessAddChange
{ changeTime = changeTime c
, keySource = ks
}

View file

@ -12,13 +12,14 @@ import Assistant.BranchChange
import Assistant.DaemonStatus
import Assistant.Commits
import Utility.ThreadScheduler
import Logs
import Logs.UUID
import Logs.Trust
import Logs.Remote
import Logs.PreferredContent
import Logs.Group
import Remote.List (remoteListRefresh)
import qualified Git.LsTree as LsTree
import Git.FilePath
import qualified Annex.Branch
import qualified Data.Set as S
@ -52,12 +53,13 @@ configMonitorThread = namedThread "ConfigMonitor" $ loop =<< getConfigs
type Configs = S.Set (FilePath, String)
{- All git-annex's config files, and actions to run when they change. -}
configFilesActions :: [(FilePath, Annex ())]
configFilesActions :: [(FilePath, Assistant ())]
configFilesActions =
[ (uuidLog, void $ uuidMapLoad)
, (remoteLog, void remoteListRefresh)
, (trustLog, void trustMapLoad)
, (groupLog, void groupMapLoad)
[ (uuidLog, void $ liftAnnex uuidMapLoad)
, (remoteLog, void $ liftAnnex remoteListRefresh)
, (trustLog, void $ liftAnnex trustMapLoad)
, (groupLog, void $ liftAnnex groupMapLoad)
, (scheduleLog, void updateScheduleLog)
-- Preferred content settings depend on most of the other configs,
-- so will be reloaded whenever any configs change.
, (preferredContentLog, noop)
@ -65,13 +67,12 @@ configFilesActions =
reloadConfigs :: Configs -> Assistant ()
reloadConfigs changedconfigs = do
liftAnnex $ do
sequence_ as
void preferredContentMapLoad
sequence_ as
void $ liftAnnex preferredContentMapLoad
{- Changes to the remote log, or the trust log, can affect the
- syncRemotes list. Changes to the uuid log may affect its
- display so are also included. -}
when (any (`elem` fs) [remoteLog, trustLog, uuidLog]) $
when (any (`elem` fs) [remoteLog, trustLog, uuidLog])
updateSyncRemotes
where
(fs, as) = unzip $ filter (flip S.member changedfiles . fst)
@ -83,4 +84,4 @@ getConfigs = S.fromList . map extract
<$> liftAnnex (inRepo $ LsTree.lsTreeFiles Annex.Branch.fullname files)
where
files = map fst configFilesActions
extract treeitem = (LsTree.file treeitem, LsTree.sha treeitem)
extract treeitem = (getTopFilePath $ LsTree.file treeitem, LsTree.sha treeitem)

View file

@ -0,0 +1,225 @@
{- git-annex assistant sceduled jobs runner
-
- Copyright 2013 Joey Hess <joey@kitenet.net>
-
- Licensed under the GNU GPL version 3 or higher.
-}
{-# LANGUAGE DeriveDataTypeable #-}
module Assistant.Threads.Cronner (
cronnerThread
) where
import Assistant.Common
import Assistant.DaemonStatus
import Utility.NotificationBroadcaster
import Annex.UUID
import Config.Files
import Logs.Schedule
import Utility.Scheduled
import Types.ScheduledActivity
import Utility.ThreadScheduler
import Utility.HumanTime
import Utility.Batch
import Assistant.TransferQueue
import Annex.Content
import Logs.Transfer
import Assistant.Types.UrlRenderer
import Assistant.Alert
import Remote
import qualified Types.Remote as Remote
import qualified Git
import qualified Git.Fsck
import Assistant.Fsck
import Assistant.Repair
import Control.Concurrent.Async
import Control.Concurrent.MVar
import Data.Time.LocalTime
import Data.Time.Clock
import qualified Data.Map as M
import qualified Data.Set as S
{- Loads schedules for this repository, and fires off one thread for each
- scheduled event that runs on this repository. Each thread sleeps until
- its event is scheduled to run.
-
- To handle events that run on remotes, which need to only run when
- their remote gets connected, threads are also started, and are passed
- a MVar to wait on, which is stored in the DaemonStatus's
- connectRemoteNotifiers.
-
- In the meantime the main thread waits for any changes to the
- schedules. When there's a change, compare the old and new list of
- schedules to find deleted and added ones. Start new threads for added
- ones, and kill the threads for deleted ones. -}
cronnerThread :: UrlRenderer -> NamedThread
cronnerThread urlrenderer = namedThreadUnchecked "Cronner" $ do
fsckNudge urlrenderer Nothing
dstatus <- getDaemonStatus
h <- liftIO $ newNotificationHandle False (scheduleLogNotifier dstatus)
go h M.empty M.empty
where
go h amap nmap = do
activities <- liftAnnex $ scheduleGet =<< getUUID
let addedactivities = activities `S.difference` M.keysSet amap
let removedactivities = M.keysSet amap `S.difference` activities
forM_ (S.toList removedactivities) $ \activity ->
case M.lookup activity amap of
Just a -> do
debug ["stopping removed job for", fromScheduledActivity activity, show (asyncThreadId a)]
liftIO $ cancel a
Nothing -> noop
lastruntimes <- liftAnnex getLastRunTimes
started <- startactivities (S.toList addedactivities) lastruntimes
let addedamap = M.fromList $ map fst started
let addednmap = M.fromList $ catMaybes $ map snd started
let removefiltered = M.filterWithKey (\k _ -> S.member k removedactivities)
let amap' = M.difference (M.union addedamap amap) (removefiltered amap)
let nmap' = M.difference (M.union addednmap nmap) (removefiltered nmap)
modifyDaemonStatus_ $ \s -> s { connectRemoteNotifiers = M.fromListWith (++) (M.elems nmap') }
liftIO $ waitNotification h
debug ["reloading changed activities"]
go h amap' nmap'
startactivities as lastruntimes = forM as $ \activity ->
case connectActivityUUID activity of
Nothing -> do
runner <- asIO2 (sleepingActivityThread urlrenderer)
a <- liftIO $ async $
runner activity (M.lookup activity lastruntimes)
return ((activity, a), Nothing)
Just u -> do
mvar <- liftIO newEmptyMVar
runner <- asIO2 (remoteActivityThread urlrenderer mvar)
a <- liftIO $ async $
runner activity (M.lookup activity lastruntimes)
return ((activity, a), Just (activity, (u, [mvar])))
{- Calculate the next time the activity is scheduled to run, then
- sleep until that time, and run it. Then call setLastRunTime, and
- loop.
-}
sleepingActivityThread :: UrlRenderer -> ScheduledActivity -> Maybe LocalTime -> Assistant ()
sleepingActivityThread urlrenderer activity lasttime = go lasttime =<< getnexttime lasttime
where
getnexttime = liftIO . nextTime schedule
go _ Nothing = debug ["no scheduled events left for", desc]
go l (Just (NextTimeExactly t)) = waitrun l t Nothing
go l (Just (NextTimeWindow windowstart windowend)) =
waitrun l windowstart (Just windowend)
desc = fromScheduledActivity activity
schedule = getSchedule activity
waitrun l t mmaxt = do
seconds <- liftIO $ secondsUntilLocalTime t
when (seconds > Seconds 0) $ do
debug ["waiting", show seconds, "for next scheduled", desc]
liftIO $ threadDelaySeconds seconds
now <- liftIO getCurrentTime
tz <- liftIO $ getTimeZone now
let nowt = utcToLocalTime tz now
if tolate nowt tz
then do
debug ["too late to run scheduled", desc]
go l =<< getnexttime l
else run nowt
where
tolate nowt tz = case mmaxt of
Just maxt -> nowt > maxt
-- allow the job to start 10 minutes late
Nothing ->diffUTCTime
(localTimeToUTC tz nowt)
(localTimeToUTC tz t) > 600
run nowt = do
runActivity urlrenderer activity nowt
go (Just nowt) =<< getnexttime (Just nowt)
{- Wait for the remote to become available by waiting on the MVar.
- Then check if the time is within a time window when activity
- is scheduled to run, and if so run it.
- Otherwise, just wait again on the MVar.
-}
remoteActivityThread :: UrlRenderer -> MVar () -> ScheduledActivity -> Maybe LocalTime -> Assistant ()
remoteActivityThread urlrenderer mvar activity lasttime = do
liftIO $ takeMVar mvar
go =<< liftIO (nextTime (getSchedule activity) lasttime)
where
go (Just (NextTimeWindow windowstart windowend)) = do
now <- liftIO getCurrentTime
tz <- liftIO $ getTimeZone now
if now >= localTimeToUTC tz windowstart && now <= localTimeToUTC tz windowend
then do
let nowt = utcToLocalTime tz now
runActivity urlrenderer activity nowt
loop (Just nowt)
else loop lasttime
go _ = noop -- running at exact time not handled here
loop = remoteActivityThread urlrenderer mvar activity
secondsUntilLocalTime :: LocalTime -> IO Seconds
secondsUntilLocalTime t = do
now <- getCurrentTime
tz <- getTimeZone now
let secs = truncate $ diffUTCTime (localTimeToUTC tz t) now
return $ if secs > 0
then Seconds secs
else Seconds 0
runActivity :: UrlRenderer -> ScheduledActivity -> LocalTime -> Assistant ()
runActivity urlrenderer activity nowt = do
debug ["starting", desc]
runActivity' urlrenderer activity
debug ["finished", desc]
liftAnnex $ setLastRunTime activity nowt
where
desc = fromScheduledActivity activity
runActivity' :: UrlRenderer -> ScheduledActivity -> Assistant ()
runActivity' urlrenderer (ScheduledSelfFsck _ d) = do
program <- liftIO $ readProgramFile
g <- liftAnnex gitRepo
fsckresults <- showFscking urlrenderer Nothing $ tryNonAsync $ do
void $ batchCommand program (Param "fsck" : annexFsckParams d)
Git.Fsck.findBroken True g
u <- liftAnnex getUUID
void $ repairWhenNecessary urlrenderer u Nothing fsckresults
mapM_ reget =<< liftAnnex (dirKeys gitAnnexBadDir)
where
reget k = queueTransfers "fsck found bad file; redownloading" Next k Nothing Download
runActivity' urlrenderer (ScheduledRemoteFsck u s d) = handle =<< liftAnnex (remoteFromUUID u)
where
handle Nothing = debug ["skipping remote fsck of uuid without a configured remote", fromUUID u, fromSchedule s]
handle (Just rmt) = void $ case Remote.remoteFsck rmt of
Nothing -> go rmt $ do
program <- readProgramFile
void $ batchCommand program $
[ Param "fsck"
-- avoid downloading files
, Param "--fast"
, Param "--from"
, Param $ Remote.name rmt
] ++ annexFsckParams d
Just mkfscker -> do
{- Note that having mkfsker return an IO action
- avoids running a long duration fsck in the
- Annex monad. -}
go rmt =<< liftAnnex (mkfscker (annexFsckParams d))
go rmt annexfscker = do
fsckresults <- showFscking urlrenderer (Just rmt) $ tryNonAsync $ do
void annexfscker
let r = Remote.repo rmt
if Git.repoIsLocal r && not (Git.repoIsLocalUnknown r)
then Just <$> Git.Fsck.findBroken True r
else pure Nothing
maybe noop (void . repairWhenNecessary urlrenderer u (Just rmt)) fsckresults
annexFsckParams :: Duration -> [CommandParam]
annexFsckParams d =
[ Param "--incremental-schedule=1d"
, Param $ "--time-limit=" ++ fromDuration d
]

View file

@ -30,7 +30,7 @@ glacierThread = namedThread "Glacier" $ runEvery (Seconds 3600) <~> go
go = do
rs <- filter isglacier . syncDataRemotes <$> getDaemonStatus
forM_ rs $ \r ->
check r =<< (liftAnnex $ getFailedTransfers $ Remote.uuid r)
check r =<< liftAnnex (getFailedTransfers $ Remote.uuid r)
check _ [] = noop
check r l = do
let keys = map getkey l

View file

@ -54,7 +54,7 @@ runHandler handler file _filestatus =
{- Called when there's an error with inotify. -}
onErr :: Handler
onErr msg = error msg
onErr = error
{- Called when a new branch ref is written, or a branch ref is modified.
-
@ -110,7 +110,7 @@ equivBranches x y = base x == base y
isAnnexBranch :: FilePath -> Bool
isAnnexBranch f = n `isSuffixOf` f
where
n = "/" ++ show Annex.Branch.name
n = '/' : show Annex.Branch.name
fileToBranch :: FilePath -> Git.Ref
fileToBranch f = Git.Ref $ "refs" </> base

View file

@ -19,6 +19,8 @@ import Utility.ThreadScheduler
import Utility.Mounts
import Remote.List
import qualified Types.Remote as Remote
import Assistant.Types.UrlRenderer
import Assistant.Fsck
import qualified Data.Set as S
@ -33,18 +35,18 @@ import qualified Control.Exception as E
#warning Building without dbus support; will use mtab polling
#endif
mountWatcherThread :: NamedThread
mountWatcherThread = namedThread "MountWatcher" $
mountWatcherThread :: UrlRenderer -> NamedThread
mountWatcherThread urlrenderer = namedThread "MountWatcher" $
#if WITH_DBUS
dbusThread
dbusThread urlrenderer
#else
pollingThread
pollingThread urlrenderer
#endif
#if WITH_DBUS
dbusThread :: Assistant ()
dbusThread = do
dbusThread :: UrlRenderer -> Assistant ()
dbusThread urlrenderer = do
runclient <- asIO1 go
r <- liftIO $ E.try $ runClient getSessionAddress runclient
either onerr (const noop) r
@ -59,13 +61,13 @@ dbusThread = do
handleevent <- asIO1 $ \_event -> do
nowmounted <- liftIO $ currentMountPoints
wasmounted <- liftIO $ swapMVar mvar nowmounted
handleMounts wasmounted nowmounted
handleMounts urlrenderer wasmounted nowmounted
liftIO $ forM_ mountChanged $ \matcher ->
listen client matcher handleevent
, do
liftAnnex $
warning "No known volume monitor available through dbus; falling back to mtab polling"
pollingThread
pollingThread urlrenderer
)
onerr :: E.SomeException -> Assistant ()
onerr e = do
@ -76,7 +78,7 @@ dbusThread = do
- done in this situation. -}
liftAnnex $
warning $ "dbus failed; falling back to mtab polling (" ++ show e ++ ")"
pollingThread
pollingThread urlrenderer
{- Examine the list of services connected to dbus, to see if there
- are any we can use to monitor mounts. If not, will attempt to start one. -}
@ -139,24 +141,25 @@ mountChanged = [gvfs True, gvfs False, kde, kdefallback]
#endif
pollingThread :: Assistant ()
pollingThread = go =<< liftIO currentMountPoints
pollingThread :: UrlRenderer -> Assistant ()
pollingThread urlrenderer = go =<< liftIO currentMountPoints
where
go wasmounted = do
liftIO $ threadDelaySeconds (Seconds 10)
nowmounted <- liftIO currentMountPoints
handleMounts wasmounted nowmounted
handleMounts urlrenderer wasmounted nowmounted
go nowmounted
handleMounts :: MountPoints -> MountPoints -> Assistant ()
handleMounts wasmounted nowmounted =
mapM_ (handleMount . mnt_dir) $
handleMounts :: UrlRenderer -> MountPoints -> MountPoints -> Assistant ()
handleMounts urlrenderer wasmounted nowmounted =
mapM_ (handleMount urlrenderer . mnt_dir) $
S.toList $ newMountPoints wasmounted nowmounted
handleMount :: FilePath -> Assistant ()
handleMount dir = do
handleMount :: UrlRenderer -> FilePath -> Assistant ()
handleMount urlrenderer dir = do
debug ["detected mount of", dir]
rs <- filter (Git.repoIsLocal . Remote.repo) <$> remotesUnder dir
mapM_ (fsckNudge urlrenderer . Just) rs
reconnectRemotes True rs
{- Finds remotes located underneath the mount point.
@ -173,15 +176,15 @@ remotesUnder dir = do
rs <- liftAnnex remoteList
pairs <- liftAnnex $ mapM (checkremote repotop) rs
let (waschanged, rs') = unzip pairs
when (any id waschanged) $ do
liftAnnex $ Annex.changeState $ \s -> s { Annex.remotes = rs' }
when (or waschanged) $ do
liftAnnex $ Annex.changeState $ \s -> s { Annex.remotes = catMaybes rs' }
updateSyncRemotes
return $ map snd $ filter fst pairs
return $ mapMaybe snd $ filter fst pairs
where
checkremote repotop r = case Remote.localpath r of
Just p | dirContains dir (absPathFrom repotop p) ->
(,) <$> pure True <*> updateRemote r
_ -> return (False, r)
_ -> return (False, Just r)
type MountPoints = S.Set Mntent

View file

@ -39,7 +39,11 @@ netWatcherThread = thread noop
- network connection changes, but it also ensures that
- any networked remotes that may have not been routable for a
- while (despite the local network staying up), are synced with
- periodically. -}
- periodically.
-
- Note that it does not call notifyNetMessagerRestart, because
- it doesn't know that the network has changed.
-}
netWatcherFallbackThread :: NamedThread
netWatcherFallbackThread = namedThread "NetWatcherFallback" $
runEvery (Seconds 3600) <~> handleConnection

View file

@ -16,6 +16,7 @@ import Assistant.WebApp.Types
import Assistant.Alert
import Assistant.DaemonStatus
import Utility.ThreadScheduler
import Utility.Format
import Git
import Network.Multicast
@ -27,7 +28,7 @@ pairListenerThread :: UrlRenderer -> NamedThread
pairListenerThread urlrenderer = namedThread "PairListener" $ do
listener <- asIO1 $ go [] []
liftIO $ withSocketsDo $
runEvery (Seconds 1) $ void $ tryIO $
runEvery (Seconds 60) $ void $ tryIO $
listener =<< getsock
where
{- Note this can crash if there's no network interface,
@ -42,20 +43,32 @@ pairListenerThread urlrenderer = namedThread "PairListener" $ do
(pip, verified) <- verificationCheck m
=<< (pairingInProgress <$> getDaemonStatus)
let wrongstage = maybe False (\p -> pairMsgStage m <= inProgressPairStage p) pip
case (wrongstage, sane, pairMsgStage m) of
-- ignore our own messages, and
-- out of order messages
(True, _, _) -> go reqs cache sock
(_, False, _) -> go reqs cache sock
(_, _, PairReq) -> if m `elem` reqs
let fromus = maybe False (\p -> remoteSshPubKey (pairMsgData m) == remoteSshPubKey (inProgressPairData p)) pip
case (wrongstage, fromus, sane, pairMsgStage m) of
(_, True, _, _) -> do
debug ["ignoring message that looped back"]
go reqs cache sock
(_, _, False, _) -> go reqs cache sock
-- PairReq starts a pairing process, so a
-- new one is always heeded, even if
-- some other pairing is in process.
(_, _, _, PairReq) -> if m `elem` reqs
then go reqs (invalidateCache m cache) sock
else do
pairReqReceived verified urlrenderer m
go (m:take 10 reqs) (invalidateCache m cache) sock
(_, _, PairAck) -> do
(True, _, _, _) -> do
debug
["ignoring out of order message"
, show (pairMsgStage m)
, "expected"
, show (succ . inProgressPairStage <$> pip)
]
go reqs cache sock
(_, _, _, PairAck) -> do
cache' <- pairAckReceived verified pip m cache
go reqs cache' sock
(_, _, PairDone) -> do
(_,_ , _, PairDone) -> do
pairDoneReceived verified pip m
go reqs cache sock
@ -75,11 +88,10 @@ pairListenerThread urlrenderer = namedThread "PairListener" $ do
verified = verifiedPairMsg m pip
sameuuid = pairUUID (inProgressPairData pip) == pairUUID (pairMsgData m)
{- Various sanity checks on the content of the message. -}
checkSane msg
checkSane msg
{- Control characters could be used in a
- console poisoning attack. -}
| any isControl msg || any (`elem` "\r\n") msg = do
| any isControl (filter (/= '\n') (decode_c msg)) = do
liftAnnex $ warning
"illegal control characters in pairing message; ignoring"
return False
@ -102,7 +114,7 @@ pairListenerThread urlrenderer = namedThread "PairListener" $ do
pairReqReceived :: Bool -> UrlRenderer -> PairMsg -> Assistant ()
pairReqReceived True _ _ = noop -- ignore our own PairReq
pairReqReceived False urlrenderer msg = do
button <- mkAlertButton (T.pack "Respond") urlrenderer (FinishLocalPairR msg)
button <- mkAlertButton True (T.pack "Respond") urlrenderer (FinishLocalPairR msg)
void $ addAlert $ pairRequestReceivedAlert repo button
where
repo = pairRepo msg

View file

@ -0,0 +1,70 @@
{- git-annex assistant thread to handle fixing problems with repositories
-
- Copyright 2013 Joey Hess <joey@kitenet.net>
-
- Licensed under the GNU GPL version 3 or higher.
-}
module Assistant.Threads.ProblemFixer (
problemFixerThread
) where
import Assistant.Common
import Assistant.Types.RepoProblem
import Assistant.RepoProblem
import Assistant.Types.UrlRenderer
import Assistant.Alert
import Remote
import qualified Types.Remote as Remote
import qualified Git.Fsck
import Assistant.Repair
import qualified Git
import Annex.UUID
import Utility.ThreadScheduler
{- Waits for problems with a repo, and tries to fsck the repo and repair
- the problem. -}
problemFixerThread :: UrlRenderer -> NamedThread
problemFixerThread urlrenderer = namedThread "ProblemFixer" $
go =<< getRepoProblems
where
go problems = do
mapM_ (handleProblem urlrenderer) problems
liftIO $ threadDelaySeconds (Seconds 60)
-- Problems may have been re-reported while they were being
-- fixed, so ignore those. If a new unique problem happened
-- 60 seconds after the last was fixed, we're unlikely
-- to do much good anyway.
go =<< filter (\p -> not (any (sameRepoProblem p) problems))
<$> getRepoProblems
handleProblem :: UrlRenderer -> RepoProblem -> Assistant ()
handleProblem urlrenderer repoproblem = do
fixed <- ifM ((==) (problemUUID repoproblem) <$> liftAnnex getUUID)
( handleLocalRepoProblem urlrenderer
, maybe (return False) (handleRemoteProblem urlrenderer)
=<< liftAnnex (remoteFromUUID $ problemUUID repoproblem)
)
when fixed $
liftIO $ afterFix repoproblem
handleRemoteProblem :: UrlRenderer -> Remote -> Assistant Bool
handleRemoteProblem urlrenderer rmt
| Git.repoIsLocal r && not (Git.repoIsLocalUnknown r) =
ifM (liftIO $ checkAvailable True rmt)
( do
fixedlocks <- repairStaleGitLocks r
fsckresults <- showFscking urlrenderer (Just rmt) $ tryNonAsync $
Git.Fsck.findBroken True r
repaired <- repairWhenNecessary urlrenderer (Remote.uuid rmt) (Just rmt) fsckresults
return $ fixedlocks || repaired
, return False
)
| otherwise = return False
where
r = Remote.repo rmt
{- This is not yet used, and should probably do a fsck. -}
handleLocalRepoProblem :: UrlRenderer -> Assistant Bool
handleLocalRepoProblem _urlrenderer = do
repairStaleGitLocks =<< liftAnnex gitRepo

View file

@ -13,6 +13,7 @@ import Assistant.Pushes
import Assistant.DaemonStatus
import Assistant.Sync
import Utility.ThreadScheduler
import qualified Remote
import qualified Types.Remote as Remote
{- This thread retries pushes that failed before. -}
@ -42,7 +43,7 @@ pushThread = namedThread "Pusher" $ runEvery (Seconds 2) <~> do
- to avoid ugly messages when a removable drive is not attached.
-}
pushTargets :: Assistant [Remote]
pushTargets = liftIO . filterM available =<< candidates <$> getDaemonStatus
pushTargets = liftIO . filterM (Remote.checkAvailable True)
=<< candidates <$> getDaemonStatus
where
candidates = filter (not . Remote.readonly) . syncGitRemotes
available = maybe (return True) doesDirectoryExist . Remote.localpath

View file

@ -1,11 +1,12 @@
{- git-annex assistant sanity checker
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012, 2013 Joey Hess <joey@kitenet.net>
-
- Licensed under the GNU GPL version 3 or higher.
-}
module Assistant.Threads.SanityChecker (
sanityCheckerStartupThread,
sanityCheckerDailyThread,
sanityCheckerHourlyThread
) where
@ -13,6 +14,7 @@ module Assistant.Threads.SanityChecker (
import Assistant.Common
import Assistant.DaemonStatus
import Assistant.Alert
import Assistant.Repair
import qualified Git.LsFiles
import qualified Git.Command
import qualified Git.Config
@ -20,9 +22,43 @@ import Utility.ThreadScheduler
import qualified Assistant.Threads.Watcher as Watcher
import Utility.LogFile
import Utility.Batch
import Utility.NotificationBroadcaster
import Config
import Utility.HumanTime
import Git.Repair
import Data.Time.Clock.POSIX
import qualified Data.Set as S
{- This thread runs once at startup, and most other threads wait for it
- to finish. (However, the webapp thread does not, to prevent the UI
- being nonresponsive.) -}
sanityCheckerStartupThread :: Maybe Duration -> NamedThread
sanityCheckerStartupThread startupdelay = namedThreadUnchecked "SanityCheckerStartup" $ do
{- Stale git locks can prevent commits from happening, etc. -}
void $ repairStaleGitLocks =<< liftAnnex gitRepo
{- A corrupt index file can prevent the assistant from working at
- all, so detect and repair. -}
ifM (not <$> liftAnnex (inRepo (checkIndex S.empty)))
( do
notice ["corrupt index file found at startup; removing and restaging"]
liftAnnex $ inRepo nukeIndex
{- Normally the startup scan avoids re-staging files,
- but with the index deleted, everything needs to be
- restaged. -}
modifyDaemonStatus_ $ \s -> s { forceRestage = True }
, whenM (liftAnnex $ inRepo missingIndex) $ do
debug ["no index file; restaging"]
modifyDaemonStatus_ $ \s -> s { forceRestage = True }
)
{- If there's a startup delay, it's done here. -}
liftIO $ maybe noop (threadDelaySeconds . Seconds . fromIntegral . durationSeconds) startupdelay
{- Notify other threads that the startup sanity check is done. -}
status <- getDaemonStatus
liftIO $ sendNotification $ startupSanityCheckNotifier status
{- This thread wakes up hourly for inxepensive frequent sanity checks. -}
sanityCheckerHourlyThread :: NamedThread
@ -42,7 +78,7 @@ sanityCheckerDailyThread = namedThread "SanityCheckerDaily" $ forever $ do
go = do
modifyDaemonStatus_ $ \s -> s { sanityCheckRunning = True }
now <- liftIO $ getPOSIXTime -- before check started
now <- liftIO getPOSIXTime -- before check started
r <- either showerr return =<< (tryIO . batch) <~> dailyCheck
modifyDaemonStatus_ $ \s -> s
@ -78,7 +114,7 @@ dailyCheck = do
-- Find old unstaged symlinks, and add them to git.
(unstaged, cleanup) <- liftIO $ Git.LsFiles.notInRepo False ["."] g
now <- liftIO $ getPOSIXTime
now <- liftIO getPOSIXTime
forM_ unstaged $ \file -> do
ms <- liftIO $ catchMaybeIO $ getSymbolicLinkStatus file
case ms of
@ -136,3 +172,4 @@ oneHour = 60 * 60
oneDay :: Int
oneDay = 24 * oneHour

View file

@ -81,24 +81,23 @@ transferScannerThread urlrenderer = namedThread "TransferScanner" $ do
{- This is a cheap scan for failed transfers involving a remote. -}
failedTransferScan :: Remote -> Assistant ()
failedTransferScan r = do
failed <- liftAnnex $ getFailedTransfers (Remote.uuid r)
liftAnnex $ mapM_ removeFailedTransfer $ map fst failed
failed <- liftAnnex $ clearFailedTransfers (Remote.uuid r)
mapM_ retry failed
where
retry (t, info)
| transferDirection t == Download = do
| transferDirection t == Download =
{- Check if the remote still has the key.
- If not, relies on the expensiveScan to
- get it queued from some other remote. -}
whenM (liftAnnex $ remoteHas r $ transferKey t) $
requeue t info
| otherwise = do
| otherwise =
{- The Transferrer checks when uploading
- that the remote doesn't already have the
- key, so it's not redundantly checked here. -}
requeue t info
requeue t info = queueTransferWhenSmall "retrying failed transfer" (associatedFile info) t r
{- This is a expensive scan through the full git work tree, finding
- files to transfer. The scan is blocked when the transfer queue gets
- too large.
@ -118,8 +117,12 @@ expensiveScan :: UrlRenderer -> [Remote] -> Assistant ()
expensiveScan urlrenderer rs = unless onlyweb $ batch <~> do
debug ["starting scan of", show visiblers]
let us = map Remote.uuid rs
mapM_ (liftAnnex . clearFailedTransfers) us
unwantedrs <- liftAnnex $ S.fromList
<$> filterM inUnwantedGroup (map Remote.uuid rs)
<$> filterM inUnwantedGroup us
g <- liftAnnex gitRepo
(files, cleanup) <- liftIO $ LsFiles.inRepo [] g
@ -158,7 +161,7 @@ expensiveScan urlrenderer rs = unless onlyweb $ batch <~> do
present key (Just f) Nothing
liftAnnex $ do
let slocs = S.fromList locs
let use a = return $ catMaybes $ map (a key slocs) syncrs
let use a = return $ mapMaybe (a key slocs) syncrs
ts <- if present
then filterM (wantSend True (Just f) . Remote.uuid . fst)
=<< use (genTransfer Upload False)
@ -170,7 +173,7 @@ expensiveScan urlrenderer rs = unless onlyweb $ batch <~> do
genTransfer :: Direction -> Bool -> Key -> S.Set UUID -> Remote -> Maybe (Remote, Transfer)
genTransfer direction want key slocs r
| direction == Upload && Remote.readonly r = Nothing
| (S.member (Remote.uuid r) slocs) == want = Just
| S.member (Remote.uuid r) slocs == want = Just
(r, Transfer direction (Remote.uuid r) key)
| otherwise = Nothing

View file

@ -9,9 +9,7 @@ module Assistant.Threads.TransferWatcher where
import Assistant.Common
import Assistant.DaemonStatus
import Assistant.TransferQueue
import Assistant.Drop
import Annex.Content
import Assistant.TransferSlots
import Logs.Transfer
import Utility.DirWatcher
import Utility.DirWatcher.Types
@ -51,7 +49,7 @@ runHandler handler file _filestatus =
{- Called when there's an error with inotify. -}
onErr :: Handler
onErr msg = error msg
onErr = error
{- Called when a new transfer information file is written. -}
onAdd :: Handler
@ -70,10 +68,9 @@ onAdd file = case parseTransferFile file of
- The only thing that should change in the transfer info is the
- bytesComplete, so that's the only thing updated in the DaemonStatus. -}
onModify :: Handler
onModify file = do
case parseTransferFile file of
Nothing -> noop
Just t -> go t =<< liftIO (readTransferInfoFile Nothing file)
onModify file = case parseTransferFile file of
Nothing -> noop
Just t -> go t =<< liftIO (readTransferInfoFile Nothing file)
where
go _ Nothing = noop
go t (Just newinfo) = alterTransferInfo t $
@ -99,28 +96,3 @@ onDel file = case parseTransferFile file of
- runs. -}
threadDelay 10000000 -- 10 seconds
finished t minfo
{- Queue uploads of files downloaded to us, spreading them
- out to other reachable remotes.
-
- Downloading a file may have caused a remote to not want it;
- so check for drops from remotes.
-
- Uploading a file may cause the local repo, or some other remote to not
- want it; handle that too.
-}
finishedTransfer :: Transfer -> Maybe TransferInfo -> Assistant ()
finishedTransfer t (Just info)
| transferDirection t == Download =
whenM (liftAnnex $ inAnnex $ transferKey t) $ do
dodrops False
queueTransfersMatching (/= transferUUID t)
"newly received object"
Later (transferKey t) (associatedFile info) Upload
| otherwise = dodrops True
where
dodrops fromhere = handleDrops
("drop wanted after " ++ describeTransfer t info)
fromhere (transferKey t) (associatedFile info) Nothing
finishedTransfer _ _ = noop

View file

@ -8,133 +8,18 @@
module Assistant.Threads.Transferrer where
import Assistant.Common
import Assistant.DaemonStatus
import Assistant.TransferQueue
import Assistant.TransferSlots
import Assistant.Alert
import Assistant.Alert.Utility
import Assistant.Commits
import Assistant.Drop
import Assistant.TransferrerPool
import Logs.Transfer
import Logs.Location
import Annex.Content
import qualified Remote
import qualified Types.Remote as Remote
import qualified Git
import Config.Files
import Assistant.Threads.TransferWatcher
import Annex.Wanted
{- Dispatches transfers from the queue. -}
transfererThread :: NamedThread
transfererThread = namedThread "Transferrer" $ do
program <- liftIO readProgramFile
forever $ inTransferSlot program $
maybe (return Nothing) (uncurry $ genTransfer)
maybe (return Nothing) (uncurry genTransfer)
=<< getNextTransfer notrunning
where
{- Skip transfers that are already running. -}
notrunning = isNothing . startedTime
{- By the time this is called, the daemonstatus's currentTransfers map should
- already have been updated to include the transfer. -}
genTransfer :: Transfer -> TransferInfo -> TransferGenerator
genTransfer t info = case (transferRemote info, associatedFile info) of
(Just remote, Just file)
| Git.repoIsLocalUnknown (Remote.repo remote) -> do
-- optimisation for removable drives not plugged in
liftAnnex $ recordFailedTransfer t info
void $ removeTransfer t
return Nothing
| otherwise -> ifM (liftAnnex $ shouldTransfer t info)
( do
debug [ "Transferring:" , describeTransfer t info ]
notifyTransfer
return $ Just (t, info, go remote file)
, do
debug [ "Skipping unnecessary transfer:",
describeTransfer t info ]
void $ removeTransfer t
finishedTransfer t (Just info)
return Nothing
)
_ -> return Nothing
where
direction = transferDirection t
isdownload = direction == Download
{- Alerts are only shown for successful transfers.
- Transfers can temporarily fail for many reasons,
- so there's no point in bothering the user about
- those. The assistant should recover.
-
- After a successful upload, handle dropping it from
- here, if desired. In this case, the remote it was
- uploaded to is known to have it.
-
- Also, after a successful transfer, the location
- log has changed. Indicate that a commit has been
- made, in order to queue a push of the git-annex
- branch out to remotes that did not participate
- in the transfer.
-
- If the process failed, it could have crashed,
- so remove the transfer from the list of current
- transfers, just in case it didn't stop
- in a way that lets the TransferWatcher do its
- usual cleanup. However, first check if something else is
- running the transfer, to avoid removing active transfers.
-}
go remote file transferrer = ifM (liftIO $ performTransfer transferrer t $ associatedFile info)
( do
void $ addAlert $ makeAlertFiller True $
transferFileAlert direction True file
unless isdownload $
handleDrops
("object uploaded to " ++ show remote)
True (transferKey t)
(associatedFile info)
(Just remote)
void $ recordCommit
, whenM (liftAnnex $ isNothing <$> checkTransfer t) $
void $ removeTransfer t
)
{- Called right before a transfer begins, this is a last chance to avoid
- unnecessary transfers.
-
- For downloads, we obviously don't need to download if the already
- have the object.
-
- Smilarly, for uploads, check if the remote is known to already have
- the object.
-
- Also, uploads get queued to all remotes, in order of cost.
- This may mean, for example, that an object is uploaded over the LAN
- to a locally paired client, and once that upload is done, a more
- expensive transfer remote no longer wants the object. (Since
- all the clients have it already.) So do one last check if this is still
- preferred content.
-
- We'll also do one last preferred content check for downloads. An
- example of a case where this could be needed is if a download is queued
- for a file that gets moved out of an archive directory -- but before
- that download can happen, the file is put back in the archive.
-}
shouldTransfer :: Transfer -> TransferInfo -> Annex Bool
shouldTransfer t info
| transferDirection t == Download =
(not <$> inAnnex key) <&&> wantGet True file
| transferDirection t == Upload = case transferRemote info of
Nothing -> return False
Just r -> notinremote r
<&&> wantSend True file (Remote.uuid r)
| otherwise = return False
where
key = transferKey t
file = associatedFile info
{- Trust the location log to check if the remote already has
- the key. This avoids a roundtrip to the remote. -}
notinremote r = notElem (Remote.uuid r) <$> loggedLocations key

View file

@ -5,11 +5,11 @@
- Licensed under the GNU GPL version 3 or higher.
-}
{-# LANGUAGE DeriveDataTypeable, BangPatterns, CPP #-}
{-# LANGUAGE DeriveDataTypeable, CPP #-}
module Assistant.Threads.Watcher (
watchThread,
WatcherException(..),
WatcherControl(..),
checkCanWatch,
needLsof,
onAddSymlink,
@ -23,7 +23,7 @@ import Assistant.Types.Changes
import Assistant.Alert
import Utility.DirWatcher
import Utility.DirWatcher.Types
import Utility.Lsof
import qualified Utility.Lsof as Lsof
import qualified Annex
import qualified Annex.Queue
import qualified Git
@ -50,7 +50,7 @@ import Data.Time.Clock
checkCanWatch :: Annex ()
checkCanWatch
| canWatch = do
liftIO setupLsof
liftIO Lsof.setup
unlessM (liftIO (inPath "lsof") <||> Annex.getState Annex.force)
needLsof
| otherwise = error "watch mode is not available on this system"
@ -64,10 +64,10 @@ needLsof = error $ unlines
]
{- A special exception that can be thrown to pause or resume the watcher. -}
data WatcherException = PauseWatcher | ResumeWatcher
data WatcherControl = PauseWatcher | ResumeWatcher
deriving (Show, Eq, Typeable)
instance E.Exception WatcherException
instance E.Exception WatcherControl
watchThread :: NamedThread
watchThread = namedThread "Watcher" $
@ -79,7 +79,7 @@ watchThread = namedThread "Watcher" $
runWatcher :: Assistant ()
runWatcher = do
startup <- asIO1 startupScan
matcher <- liftAnnex $ largeFilesMatcher
matcher <- liftAnnex largeFilesMatcher
direct <- liftAnnex isDirect
symlinkssupported <- liftAnnex $ coreSymlinks <$> Annex.getGitConfig
addhook <- hook $ if direct
@ -107,9 +107,9 @@ runWatcher = do
where
hook a = Just <$> asIO2 (runHandler a)
waitFor :: WatcherException -> Assistant () -> Assistant ()
waitFor :: WatcherControl -> Assistant () -> Assistant ()
waitFor sig next = do
r <- liftIO $ (E.try pause :: IO (Either E.SomeException ()))
r <- liftIO (E.try pause :: IO (Either E.SomeException ()))
case r of
Left e -> case E.fromException e of
Just s
@ -124,7 +124,7 @@ startupScan :: IO a -> Assistant a
startupScan scanner = do
liftAnnex $ showAction "scanning"
alertWhile' startupScanAlert $ do
r <- liftIO $ scanner
r <- liftIO scanner
-- Notice any files that were deleted before
-- watching was started.
@ -133,7 +133,7 @@ startupScan scanner = do
forM_ fs $ \f -> do
liftAnnex $ onDel' f
maybe noop recordChange =<< madeChange f RmChange
void $ liftIO $ cleanup
void $ liftIO cleanup
liftAnnex $ showAction "started"
liftIO $ putStrLn ""
@ -176,7 +176,7 @@ runHandler handler file filestatus = void $ do
Right (Just change) -> do
-- Just in case the commit thread is not
-- flushing the queue fast enough.
liftAnnex $ Annex.Queue.flushWhenFull
liftAnnex Annex.Queue.flushWhenFull
recordChange change
where
normalize f
@ -200,6 +200,9 @@ onAdd matcher file filestatus
add matcher file
| otherwise = noChange
shouldRestage :: DaemonStatus -> Bool
shouldRestage ds = scanComplete ds || forceRestage ds
{- In direct mode, add events are received for both new files, and
- modified existing files.
-}
@ -214,7 +217,7 @@ onAddDirect symlinkssupported matcher file fs = do
- really modified, but it might have
- just been deleted and been put back,
- so it symlink is restaged to make sure. -}
( ifM (scanComplete <$> getDaemonStatus)
( ifM (shouldRestage <$> getDaemonStatus)
( do
link <- liftAnnex $ inRepo $ gitAnnexLink file key
addLink file link (Just key)
@ -286,7 +289,7 @@ onAddSymlink' linktarget mk isdirect file filestatus = go mk
- links too.)
-}
ensurestaged (Just link) daemonstatus
| scanComplete daemonstatus = addLink file link mk
| shouldRestage daemonstatus = addLink file link mk
| otherwise = case filestatus of
Just s
| not (afterLastDaemonRun (statusChangeTime s) daemonstatus) -> noChange
@ -300,7 +303,7 @@ addLink file link mk = do
liftAnnex $ do
v <- catObjectDetails $ Ref $ ':':file
case v of
Just (currlink, sha)
Just (currlink, sha, _type)
| s2w8 link == L.unpack currlink ->
stageSymlink file sha
_ -> stageSymlink file =<< hashSymlink link
@ -340,8 +343,8 @@ onDelDir dir _ = do
now <- liftIO getCurrentTime
recordChanges $ map (\f -> Change now f RmChange) fs
void $ liftIO $ clean
liftAnnex $ Annex.Queue.flushWhenFull
void $ liftIO clean
liftAnnex Annex.Queue.flushWhenFull
noChange
{- Called when there's an error with inotify or kqueue. -}

View file

@ -29,9 +29,11 @@ import Assistant.WebApp.Configurators.XMPP
import Assistant.WebApp.Configurators.Preferences
import Assistant.WebApp.Configurators.Edit
import Assistant.WebApp.Configurators.Delete
import Assistant.WebApp.Configurators.Fsck
import Assistant.WebApp.Documentation
import Assistant.WebApp.Control
import Assistant.WebApp.OtherRepos
import Assistant.WebApp.Repair
import Assistant.Types.ThreadedMonad
import Utility.WebApp
import Utility.Tmp
@ -83,7 +85,10 @@ webAppThread assistantdata urlrenderer noannex listenhost postfirstrun onstartup
urlfile <- runThreadState st $ fromRepo gitAnnexUrlFile
go addr webapp htmlshim (Just urlfile)
where
thread = namedThread "WebApp"
-- The webapp thread does not wait for the startupSanityCheckThread
-- to finish, so that the user interface remains responsive while
-- that's going on.
thread = namedThreadUnchecked "WebApp"
getreldir
| noannex = return Nothing
| otherwise = Just <$>

View file

@ -103,9 +103,8 @@ xmppClient urlrenderer d creds =
- will also be killed. -}
liftIO $ pinger `concurrently` sender `concurrently` receiver
sendnotifications selfjid = forever $ do
a <- inAssistant $ relayNetMessage selfjid
a
sendnotifications selfjid = forever $
join $ inAssistant $ relayNetMessage selfjid
receivenotifications selfjid lasttraffic = forever $ do
l <- decodeStanza selfjid <$> getStanza
void $ liftIO $ atomically . swapTMVar lasttraffic =<< getCurrentTime
@ -115,7 +114,7 @@ xmppClient urlrenderer d creds =
sendpings selfjid lasttraffic = forever $ do
putStanza pingstanza
startping <- liftIO $ getCurrentTime
startping <- liftIO getCurrentTime
liftIO $ threadDelaySeconds (Seconds 120)
t <- liftIO $ atomically $ readTMVar lasttraffic
when (t < startping) $ do
@ -154,8 +153,7 @@ xmppClient urlrenderer d creds =
, logJid jid
, show $ logNetMessage msg'
]
a <- inAssistant $ convertNetMsg msg' selfjid
a
join $ inAssistant $ convertNetMsg msg' selfjid
inAssistant $ sentImportantNetMessage msg c
resendImportantMessages _ _ = noop
@ -196,7 +194,7 @@ logClient (Client jid) = logJid jid
decodeStanza :: JID -> ReceivedStanza -> [XMPPEvent]
decodeStanza selfjid s@(ReceivedPresence p)
| presenceType p == PresenceError = [ProtocolError s]
| presenceFrom p == Nothing = [Ignorable s]
| isNothing (presenceFrom p) = [Ignorable s]
| presenceFrom p == Just selfjid = [Ignorable s]
| otherwise = maybe [PresenceMessage p] decode (gitAnnexTagInfo p)
where
@ -209,7 +207,7 @@ decodeStanza selfjid s@(ReceivedPresence p)
- along with their real meaning. -}
impliedp v = [PresenceMessage p, v]
decodeStanza selfjid s@(ReceivedMessage m)
| messageFrom m == Nothing = [Ignorable s]
| isNothing (messageFrom m) = [Ignorable s]
| messageFrom m == Just selfjid = [Ignorable s]
| messageType m == MessageError = [ProtocolError s]
| otherwise = [fromMaybe (Unknown s) (GotNetMessage <$> decodeMessage m)]
@ -241,13 +239,13 @@ relayNetMessage selfjid = do
\c -> (baseJID <$> parseJID c) == Just tojid
return $ putStanza presenceQuery
_ -> return noop
convert (Pushing c pushstage) = withOtherClient selfjid c $ \tojid -> do
convert (Pushing c pushstage) = withOtherClient selfjid c $ \tojid ->
if tojid == baseJID tojid
then do
clients <- maybe [] (S.toList . buddyAssistants)
<$> getBuddy (genBuddyKey tojid) <<~ buddyList
debug ["exploded undirected message to clients", unwords $ map logClient clients]
return $ forM_ (clients) $ \(Client jid) ->
return $ forM_ clients $ \(Client jid) ->
putStanza $ pushMessage pushstage jid selfjid
else do
debug ["to client:", logJid tojid]
@ -266,7 +264,7 @@ convertNetMsg msg selfjid = convert msg
convert (Pushing c pushstage) = withOtherClient selfjid c $ \tojid ->
return $ putStanza $ pushMessage pushstage tojid selfjid
withOtherClient :: JID -> ClientID -> (JID -> Assistant (XMPP ())) -> (Assistant (XMPP ()))
withOtherClient :: JID -> ClientID -> (JID -> Assistant (XMPP ())) -> Assistant (XMPP ())
withOtherClient selfjid c a = case parseJID c of
Nothing -> return noop
Just tojid
@ -323,10 +321,10 @@ pairMsgReceived :: UrlRenderer -> PairStage -> UUID -> JID -> JID -> Assistant (
pairMsgReceived urlrenderer PairReq theiruuid selfjid theirjid
| baseJID selfjid == baseJID theirjid = autoaccept
| otherwise = do
knownjids <- catMaybes . map (parseJID . getXMPPClientID)
knownjids <- mapMaybe (parseJID . getXMPPClientID)
. filter isXMPPRemote . syncRemotes <$> getDaemonStatus
um <- liftAnnex uuidMap
if any (== baseJID theirjid) knownjids && M.member theiruuid um
if elem (baseJID theirjid) knownjids && M.member theiruuid um
then autoaccept
else showalert
@ -338,7 +336,7 @@ pairMsgReceived urlrenderer PairReq theiruuid selfjid theirjid
finishXMPPPairing theirjid theiruuid
-- Show an alert to let the user decide if they want to pair.
showalert = do
button <- mkAlertButton (T.pack "Respond") urlrenderer $
button <- mkAlertButton True (T.pack "Respond") urlrenderer $
ConfirmXMPPPairFriendR $
PairKey theiruuid $ formatJID theirjid
void $ addAlert $ pairRequestReceivedAlert

View file

@ -5,6 +5,8 @@
- Licensed under the GNU GPL version 3 or higher.
-}
{-# LANGUAGE CPP #-}
module Assistant.TransferSlots where
import Assistant.Common
@ -13,11 +15,29 @@ import Assistant.Types.TransferSlots
import Assistant.DaemonStatus
import Assistant.TransferrerPool
import Assistant.Types.TransferrerPool
import Assistant.Types.TransferQueue
import Assistant.TransferQueue
import Assistant.Alert
import Assistant.Alert.Utility
import Assistant.Commits
import Assistant.Drop
import Logs.Transfer
import Logs.Location
import qualified Git
import qualified Remote
import qualified Types.Remote as Remote
import Annex.Content
import Annex.Wanted
import Config.Files
import qualified Data.Map as M
import qualified Control.Exception as E
import Control.Concurrent
import qualified Control.Concurrent.MSemN as MSemN
#ifndef mingw32_HOST_OS
import System.Posix.Process (getProcessGroupIDOf)
import System.Posix.Signals (signalProcessGroup, sigTERM, sigKILL)
#endif
type TransferGenerator = Assistant (Maybe (Transfer, TransferInfo, Transferrer -> Assistant ()))
@ -76,3 +96,191 @@ runTransferThread' program d run = go
_ -> done
done = runAssistant d $
flip MSemN.signal 1 <<~ transferSlots
{- By the time this is called, the daemonstatus's currentTransfers map should
- already have been updated to include the transfer. -}
genTransfer :: Transfer -> TransferInfo -> TransferGenerator
genTransfer t info = case (transferRemote info, associatedFile info) of
(Just remote, Just file)
| Git.repoIsLocalUnknown (Remote.repo remote) -> do
-- optimisation for removable drives not plugged in
liftAnnex $ recordFailedTransfer t info
void $ removeTransfer t
return Nothing
| otherwise -> ifM (liftAnnex $ shouldTransfer t info)
( do
debug [ "Transferring:" , describeTransfer t info ]
notifyTransfer
return $ Just (t, info, go remote file)
, do
debug [ "Skipping unnecessary transfer:",
describeTransfer t info ]
void $ removeTransfer t
finishedTransfer t (Just info)
return Nothing
)
_ -> return Nothing
where
direction = transferDirection t
isdownload = direction == Download
{- Alerts are only shown for successful transfers.
- Transfers can temporarily fail for many reasons,
- so there's no point in bothering the user about
- those. The assistant should recover.
-
- After a successful upload, handle dropping it from
- here, if desired. In this case, the remote it was
- uploaded to is known to have it.
-
- Also, after a successful transfer, the location
- log has changed. Indicate that a commit has been
- made, in order to queue a push of the git-annex
- branch out to remotes that did not participate
- in the transfer.
-
- If the process failed, it could have crashed,
- so remove the transfer from the list of current
- transfers, just in case it didn't stop
- in a way that lets the TransferWatcher do its
- usual cleanup. However, first check if something else is
- running the transfer, to avoid removing active transfers.
-}
go remote file transferrer = ifM (liftIO $ performTransfer transferrer t $ associatedFile info)
( do
void $ addAlert $ makeAlertFiller True $
transferFileAlert direction True file
unless isdownload $
handleDrops
("object uploaded to " ++ show remote)
True (transferKey t)
(associatedFile info)
(Just remote)
void recordCommit
, whenM (liftAnnex $ isNothing <$> checkTransfer t) $
void $ removeTransfer t
)
{- Called right before a transfer begins, this is a last chance to avoid
- unnecessary transfers.
-
- For downloads, we obviously don't need to download if the already
- have the object.
-
- Smilarly, for uploads, check if the remote is known to already have
- the object.
-
- Also, uploads get queued to all remotes, in order of cost.
- This may mean, for example, that an object is uploaded over the LAN
- to a locally paired client, and once that upload is done, a more
- expensive transfer remote no longer wants the object. (Since
- all the clients have it already.) So do one last check if this is still
- preferred content.
-
- We'll also do one last preferred content check for downloads. An
- example of a case where this could be needed is if a download is queued
- for a file that gets moved out of an archive directory -- but before
- that download can happen, the file is put back in the archive.
-}
shouldTransfer :: Transfer -> TransferInfo -> Annex Bool
shouldTransfer t info
| transferDirection t == Download =
(not <$> inAnnex key) <&&> wantGet True file
| transferDirection t == Upload = case transferRemote info of
Nothing -> return False
Just r -> notinremote r
<&&> wantSend True file (Remote.uuid r)
| otherwise = return False
where
key = transferKey t
file = associatedFile info
{- Trust the location log to check if the remote already has
- the key. This avoids a roundtrip to the remote. -}
notinremote r = notElem (Remote.uuid r) <$> loggedLocations key
{- Queue uploads of files downloaded to us, spreading them
- out to other reachable remotes.
-
- Downloading a file may have caused a remote to not want it;
- so check for drops from remotes.
-
- Uploading a file may cause the local repo, or some other remote to not
- want it; handle that too.
-}
finishedTransfer :: Transfer -> Maybe TransferInfo -> Assistant ()
finishedTransfer t (Just info)
| transferDirection t == Download =
whenM (liftAnnex $ inAnnex $ transferKey t) $ do
dodrops False
queueTransfersMatching (/= transferUUID t)
"newly received object"
Later (transferKey t) (associatedFile info) Upload
| otherwise = dodrops True
where
dodrops fromhere = handleDrops
("drop wanted after " ++ describeTransfer t info)
fromhere (transferKey t) (associatedFile info) Nothing
finishedTransfer _ _ = noop
{- Pause a running transfer. -}
pauseTransfer :: Transfer -> Assistant ()
pauseTransfer = cancelTransfer True
{- Cancel a running transfer. -}
cancelTransfer :: Bool -> Transfer -> Assistant ()
cancelTransfer pause t = do
m <- getCurrentTransfers
unless pause $
{- remove queued transfer -}
void $ dequeueTransfers $ equivilantTransfer t
{- stop running transfer -}
maybe noop stop (M.lookup t m)
where
stop info = do
{- When there's a thread associated with the
- transfer, it's signaled first, to avoid it
- displaying any alert about the transfer having
- failed when the transfer process is killed. -}
liftIO $ maybe noop signalthread $ transferTid info
liftIO $ maybe noop killproc $ transferPid info
if pause
then void $ alterTransferInfo t $
\i -> i { transferPaused = True }
else void $ removeTransfer t
signalthread tid
| pause = throwTo tid PauseTransfer
| otherwise = killThread tid
killproc pid = void $ tryIO $ do
#ifndef mingw32_HOST_OS
{- In order to stop helper processes like rsync,
- kill the whole process group of the process
- running the transfer. -}
g <- getProcessGroupIDOf pid
void $ tryIO $ signalProcessGroup sigTERM g
threadDelay 50000 -- 0.05 second grace period
void $ tryIO $ signalProcessGroup sigKILL g
#else
error "TODO: cancelTransfer not implemented on Windows"
#endif
{- Start or resume a transfer. -}
startTransfer :: Transfer -> Assistant ()
startTransfer t = do
m <- getCurrentTransfers
maybe startqueued go (M.lookup t m)
where
go info = maybe (start info) resume $ transferTid info
startqueued = do
is <- map snd <$> getMatchingTransfers (== t)
maybe noop start $ headMaybe is
resume tid = do
alterTransferInfo t $ \i -> i { transferPaused = False }
liftIO $ throwTo tid ResumeTransfer
start info = do
program <- liftIO readProgramFile
inImmediateTransferSlot program $
genTransfer t info
getCurrentTransfers :: Assistant TransferMap
getCurrentTransfers = currentTransfers <$> getDaemonStatus

View file

@ -5,12 +5,17 @@
- Licensed under the GNU GPL version 3 or higher.
-}
{-# LANGUAGE CPP #-}
module Assistant.TransferrerPool where
import Assistant.Common
import Assistant.Types.TransferrerPool
import Logs.Transfer
#ifndef mingw32_HOST_OS
import qualified Command.TransferKeys as T
#endif
import Control.Concurrent.STM
import System.Process (create_group)
@ -38,13 +43,18 @@ withTransferrer program pool a = do
- finish. -}
performTransfer :: Transferrer -> Transfer -> AssociatedFile -> IO Bool
performTransfer transferrer t f = catchBoolIO $ do
#ifndef mingw32_HOST_OS
T.sendRequest t f (transferrerWrite transferrer)
T.readResponse (transferrerRead transferrer)
#else
error "TODO performTransfer not implemented on Windows"
#endif
{- Starts a new git-annex transferkeys process, setting up a pipe
- that will be used to communicate with it. -}
mkTransferrer :: FilePath -> IO Transferrer
mkTransferrer program = do
#ifndef mingw32_HOST_OS
(myread, twrite) <- createPipe
(tread, mywrite) <- createPipe
mapM_ (\fd -> setFdOption fd CloseOnExec True) [myread, mywrite]
@ -68,6 +78,9 @@ mkTransferrer program = do
, transferrerWrite = mywriteh
, transferrerHandle = pid
}
#else
error "TODO mkTransferrer not implemented on Windows"
#endif
{- Checks if a Transferrer is still running. If not, makes a new one. -}
checkTransferrer :: FilePath -> Transferrer -> IO Transferrer

View file

@ -30,6 +30,7 @@ data AlertName
| RemoteRemovalAlert String
| CloudRepoNeededAlert
| SyncAlert
| NotFsckedAlert
deriving (Eq)
{- The first alert is the new alert, the second is an old alert.

View file

@ -5,8 +5,6 @@
- Licensed under the GNU GPL version 3 or higher.
-}
{-# LANGUAGE RankNTypes, ImpredicativeTypes #-}
module Assistant.Types.DaemonStatus where
import Common.Annex
@ -18,6 +16,7 @@ import Assistant.Types.NetMessager
import Assistant.Types.Alert
import Control.Concurrent.STM
import Control.Concurrent.MVar
import Control.Concurrent.Async
import Data.Time.Clock.POSIX
import qualified Data.Map as M
@ -29,11 +28,13 @@ data DaemonStatus = DaemonStatus
{ startedThreads :: M.Map ThreadName (Async (), IO ())
-- False when the daemon is performing its startup scan
, scanComplete :: Bool
-- True when all files should be restaged.
, forceRestage :: Bool
-- Time when a previous process of the daemon was running ok
, lastRunning :: Maybe POSIXTime
-- True when the sanity checker is running
-- True when the daily sanity checker is running
, sanityCheckRunning :: Bool
-- Last time the sanity checker ran
-- Last time the daily sanity checker ran
, lastSanityCheck :: Maybe POSIXTime
-- True when a scan for file transfers is running
, transferScanRunning :: Bool
@ -62,9 +63,15 @@ data DaemonStatus = DaemonStatus
, alertNotifier :: NotificationBroadcaster
-- Broadcasts notifications when the syncRemotes change
, syncRemotesNotifier :: NotificationBroadcaster
-- Broadcasts notifications when the scheduleLog changes
, scheduleLogNotifier :: NotificationBroadcaster
-- Broadcasts a notification once the startup sanity check has run.
, startupSanityCheckNotifier :: NotificationBroadcaster
-- When the XMPP client is connected, this will contain the XMPP
-- address.
, xmppClientID :: Maybe ClientID
-- MVars to signal when a remote gets connected.
, connectRemoteNotifiers :: M.Map UUID [MVar ()]
}
type TransferMap = M.Map Transfer TransferInfo
@ -76,6 +83,7 @@ newDaemonStatus :: IO DaemonStatus
newDaemonStatus = DaemonStatus
<$> pure M.empty
<*> pure False
<*> pure False
<*> pure Nothing
<*> pure False
<*> pure Nothing
@ -93,4 +101,7 @@ newDaemonStatus = DaemonStatus
<*> newNotificationBroadcaster
<*> newNotificationBroadcaster
<*> newNotificationBroadcaster
<*> newNotificationBroadcaster
<*> newNotificationBroadcaster
<*> pure Nothing
<*> pure M.empty

View file

@ -11,7 +11,11 @@ import Assistant.Monad
import Assistant.Types.ThreadName
{- Information about a named thread that can be run. -}
data NamedThread = NamedThread ThreadName (Assistant ())
data NamedThread = NamedThread Bool ThreadName (Assistant ())
namedThread :: String -> Assistant () -> NamedThread
namedThread = NamedThread . ThreadName
namedThread = NamedThread True . ThreadName
{- A named thread that can start running before the startup sanity check. -}
namedThreadUnchecked :: String -> Assistant () -> NamedThread
namedThreadUnchecked = NamedThread False . ThreadName

View file

@ -0,0 +1,28 @@
{- git-annex assistant repository problem tracking
-
- Copyright 2013 Joey Hess <joey@kitenet.net>
-
- Licensed under the GNU GPL version 3 or higher.
-}
module Assistant.Types.RepoProblem where
import Types
import Utility.TList
import Control.Concurrent.STM
import Data.Function
data RepoProblem = RepoProblem
{ problemUUID :: UUID
, afterFix :: IO ()
}
{- The afterFix actions are assumed to all be equivilant. -}
sameRepoProblem :: RepoProblem -> RepoProblem -> Bool
sameRepoProblem = (==) `on` problemUUID
type RepoProblemChan = TList RepoProblem
newRepoProblemChan :: IO RepoProblemChan
newRepoProblemChan = atomically newTList

View file

@ -12,6 +12,7 @@ import Assistant.WebApp as X
import Assistant.WebApp.Page as X
import Assistant.WebApp.Form as X
import Assistant.WebApp.Types as X
import Assistant.WebApp.RepoId as X
import Utility.Yesod as X hiding (textField, passwordField, insertBy, replace, joinPath, deleteBy, delete, insert, Key, Option)
import Data.Text as X (Text)

View file

@ -17,7 +17,7 @@ import Assistant.XMPP.Client
{- The main configuration screen. -}
getConfigurationR :: Handler Html
getConfigurationR = ifM (inFirstRun)
getConfigurationR = ifM inFirstRun
( redirect FirstRepositoryR
, page "Configuration" (Just Configuration) $ do
#ifdef WITH_XMPP

View file

@ -10,8 +10,7 @@
module Assistant.WebApp.Configurators.AWS where
import Assistant.WebApp.Common
import Assistant.MakeRemote
import Assistant.Sync
import Assistant.WebApp.MakeRemote
#ifdef WITH_S3
import qualified Remote.S3 as S3
#endif
@ -22,8 +21,9 @@ import qualified Remote
import qualified Types.Remote as Remote
import Types.Remote (RemoteConfig)
import Types.StandardGroups
import Logs.PreferredContent
import Creds
import Assistant.Gpg
import Git.Types (RemoteName)
import qualified Data.Text as T
import qualified Data.Map as M
@ -93,10 +93,10 @@ awsCredsAForm defcreds = AWSCreds
<*> secretAccessKeyField (T.pack . snd <$> defcreds)
accessKeyIDField :: Widget -> Maybe Text -> MkAForm Text
accessKeyIDField help def = areq (textField `withNote` help) "Access Key ID" def
accessKeyIDField help = areq (textField `withNote` help) "Access Key ID"
accessKeyIDFieldWithHelp :: Maybe Text -> MkAForm Text
accessKeyIDFieldWithHelp def = accessKeyIDField help def
accessKeyIDFieldWithHelp = accessKeyIDField help
where
help = [whamlet|
<a href="https://portal.aws.amazon.com/gp/aws/securityCredentials#id_block">
@ -104,7 +104,7 @@ accessKeyIDFieldWithHelp def = accessKeyIDField help def
|]
secretAccessKeyField :: Maybe Text -> MkAForm Text
secretAccessKeyField def = areq passwordField "Secret Access Key" def
secretAccessKeyField = areq passwordField "Secret Access Key"
datacenterField :: AWS.Service -> MkAForm Text
datacenterField service = areq (selectFieldList list) "Datacenter" defregion
@ -120,20 +120,17 @@ postAddS3R :: Handler Html
postAddS3R = awsConfigurator $ do
defcreds <- liftAnnex previouslyUsedAWSCreds
((result, form), enctype) <- liftH $
runFormPost $ renderBootstrap $ s3InputAForm defcreds
runFormPostNoToken $ renderBootstrap $ s3InputAForm defcreds
case result of
FormSuccess input -> liftH $ do
let name = T.unpack $ repoName input
makeAWSRemote initSpecialRemote S3.remote (extractCreds input) name setgroup $ M.fromList
makeAWSRemote initSpecialRemote S3.remote TransferGroup (extractCreds input) name $ M.fromList
[ configureEncryption $ enableEncryption input
, ("type", "S3")
, ("datacenter", T.unpack $ datacenter input)
, ("storageclass", show $ storageClass input)
]
_ -> $(widgetFile "configurators/adds3")
where
setgroup r = liftAnnex $
setStandardGroup (Remote.uuid r) TransferGroup
#else
postAddS3R = error "S3 not supported by this build"
#endif
@ -146,19 +143,16 @@ postAddGlacierR :: Handler Html
postAddGlacierR = glacierConfigurator $ do
defcreds <- liftAnnex previouslyUsedAWSCreds
((result, form), enctype) <- liftH $
runFormPost $ renderBootstrap $ glacierInputAForm defcreds
runFormPostNoToken $ renderBootstrap $ glacierInputAForm defcreds
case result of
FormSuccess input -> liftH $ do
let name = T.unpack $ repoName input
makeAWSRemote initSpecialRemote Glacier.remote (extractCreds input) name setgroup $ M.fromList
makeAWSRemote initSpecialRemote Glacier.remote SmallArchiveGroup (extractCreds input) name $ M.fromList
[ configureEncryption $ enableEncryption input
, ("type", "glacier")
, ("datacenter", T.unpack $ datacenter input)
]
_ -> $(widgetFile "configurators/addglacier")
where
setgroup r = liftAnnex $
setStandardGroup (Remote.uuid r) SmallArchiveGroup
#else
postAddGlacierR = error "S3 not supported by this build"
#endif
@ -192,13 +186,13 @@ enableAWSRemote :: RemoteType -> UUID -> Widget
enableAWSRemote remotetype uuid = do
defcreds <- liftAnnex previouslyUsedAWSCreds
((result, form), enctype) <- liftH $
runFormPost $ renderBootstrap $ awsCredsAForm defcreds
runFormPostNoToken $ renderBootstrap $ awsCredsAForm defcreds
case result of
FormSuccess creds -> liftH $ do
m <- liftAnnex readRemoteLog
let name = fromJust $ M.lookup "name" $
fromJust $ M.lookup uuid m
makeAWSRemote enableSpecialRemote remotetype creds name (const noop) M.empty
makeAWSRemote enableSpecialRemote remotetype SmallArchiveGroup creds name M.empty
_ -> do
description <- liftAnnex $
T.pack <$> Remote.prettyUUID uuid
@ -207,14 +201,11 @@ enableAWSRemote remotetype uuid = do
enableAWSRemote _ _ = error "S3 not supported by this build"
#endif
makeAWSRemote :: SpecialRemoteMaker -> RemoteType -> AWSCreds -> String -> (Remote -> Handler ()) -> RemoteConfig -> Handler ()
makeAWSRemote maker remotetype (AWSCreds ak sk) name setup config = do
makeAWSRemote :: SpecialRemoteMaker -> RemoteType -> StandardGroup -> AWSCreds -> RemoteName -> RemoteConfig -> Handler ()
makeAWSRemote maker remotetype defaultgroup (AWSCreds ak sk) name config = do
liftIO $ AWS.setCredsEnv (T.unpack ak, T.unpack sk)
r <- liftAnnex $ addRemote $ do
setupCloudRemote defaultgroup Nothing $
maker hostname remotetype config
setup r
liftAssistant $ syncRemote r
redirect $ EditNewCloudRepositoryR $ Remote.uuid r
where
{- AWS services use the remote name as the basis for a host
- name, so filter it to contain valid characters. -}

View file

@ -11,9 +11,9 @@ module Assistant.WebApp.Configurators.Delete where
import Assistant.WebApp.Common
import Assistant.DeleteRemote
import Assistant.WebApp.Utility
import Assistant.DaemonStatus
import Assistant.ScanRemotes
import Assistant.Sync
import qualified Remote
import qualified Git
import Config.Files
@ -22,6 +22,7 @@ import Logs.Trust
import Logs.Remote
import Logs.PreferredContent
import Types.StandardGroups
import Annex.UUID
import System.IO.HVFS (SystemFS(..))
import qualified Data.Text as T
@ -29,9 +30,13 @@ import qualified Data.Map as M
import System.Path
notCurrentRepo :: UUID -> Handler Html -> Handler Html
notCurrentRepo uuid a = go =<< liftAnnex (Remote.remoteFromUUID uuid)
notCurrentRepo uuid a = do
u <- liftAnnex getUUID
if u == uuid
then redirect DeleteCurrentRepositoryR
else go =<< liftAnnex (Remote.remoteFromUUID uuid)
where
go Nothing = redirect DeleteCurrentRepositoryR
go Nothing = error "Unknown UUID"
go (Just _) = a
getDisableRepositoryR :: UUID -> Handler Html
@ -76,7 +81,7 @@ deleteCurrentRepository = dangerPage $ do
havegitremotes <- haveremotes syncGitRemotes
havedataremotes <- haveremotes syncDataRemotes
((result, form), enctype) <- liftH $
runFormPost $ renderBootstrap $ sanityVerifierAForm $
runFormPostNoToken $ renderBootstrap $ sanityVerifierAForm $
SanityVerifier magicphrase
case result of
FormSuccess _ -> liftH $ do
@ -86,9 +91,10 @@ deleteCurrentRepository = dangerPage $ do
{- Disable syncing to this repository, and all
- remotes. This stops all transfers, and all
- file watching. -}
changeSyncable Nothing False
rs <- liftAssistant $ syncRemotes <$> getDaemonStatus
mapM_ (\r -> changeSyncable (Just r) False) rs
liftAssistant $ do
changeSyncable Nothing False
rs <- syncRemotes <$> getDaemonStatus
mapM_ (\r -> changeSyncable (Just r) False) rs
{- Make all directories writable, so all annexed
- content can be deleted. -}

View file

@ -10,11 +10,12 @@
module Assistant.WebApp.Configurators.Edit where
import Assistant.WebApp.Common
import Assistant.WebApp.Utility
import Assistant.WebApp.Gpg
import Assistant.DaemonStatus
import Assistant.MakeRemote (uniqueRemoteName)
import Assistant.WebApp.MakeRemote (uniqueRemoteName)
import Assistant.WebApp.Configurators.XMPP (xmppNeeded)
import Assistant.ScanRemotes
import Assistant.Sync
import qualified Assistant.WebApp.Configurators.AWS as AWS
import qualified Assistant.WebApp.Configurators.IA as IA
#ifdef WITH_S3
@ -33,6 +34,12 @@ import qualified Git.Command
import qualified Git.Config
import qualified Annex
import Git.Remote
import Remote.Helper.Encryptable (extractCipher)
import Types.Crypto
import Utility.Gpg
import Annex.UUID
import Assistant.Ssh
import Config
import qualified Data.Text as T
import qualified Data.Map as M
@ -58,7 +65,7 @@ getRepoConfig uuid mremote = do
Nothing -> (RepoGroupCustom $ unwords $ S.toList groups, Nothing)
Just g -> (RepoGroupStandard g, associatedDirectory remoteconfig g)
description <- maybe Nothing (Just . T.pack) . M.lookup uuid <$> uuidMap
description <- fmap T.pack . M.lookup uuid <$> uuidMap
syncable <- case mremote of
Just r -> return $ remoteAnnexSync $ Remote.gitconfig r
@ -95,7 +102,7 @@ setRepoConfig uuid mremote oldc newc = do
, Param $ T.unpack $ repoName oldc
, Param name
]
void $ Remote.remoteListRefresh
void Remote.remoteListRefresh
liftAssistant updateSyncRemotes
when associatedDirectoryChanged $ case repoAssociatedDirectory newc of
Nothing -> noop
@ -116,13 +123,11 @@ setRepoConfig uuid mremote oldc newc = do
- so avoid queueing a duplicate scan. -}
when (repoSyncable newc && not syncableChanged) $ liftAssistant $
case mremote of
Just remote -> do
addScanRemotes True [remote]
Nothing -> do
addScanRemotes True
=<< syncDataRemotes <$> getDaemonStatus
Just remote -> addScanRemotes True [remote]
Nothing -> addScanRemotes True
=<< syncDataRemotes <$> getDaemonStatus
when syncableChanged $
changeSyncable mremote (repoSyncable newc)
liftAssistant $ changeSyncable mremote (repoSyncable newc)
where
syncableChanged = repoSyncable oldc /= repoSyncable newc
associatedDirectoryChanged = repoAssociatedDirectory oldc /= repoAssociatedDirectory newc
@ -155,31 +160,34 @@ editRepositoryAForm ishere def = RepoConfig
Nothing -> aopt hiddenField "" Nothing
Just d -> aopt textField "Associated directory" (Just $ Just d)
getEditRepositoryR :: UUID -> Handler Html
getEditRepositoryR :: RepoId -> Handler Html
getEditRepositoryR = postEditRepositoryR
postEditRepositoryR :: UUID -> Handler Html
postEditRepositoryR :: RepoId -> Handler Html
postEditRepositoryR = editForm False
getEditNewRepositoryR :: UUID -> Handler Html
getEditNewRepositoryR = postEditNewRepositoryR
postEditNewRepositoryR :: UUID -> Handler Html
postEditNewRepositoryR = editForm True
postEditNewRepositoryR = editForm True . RepoUUID
getEditNewCloudRepositoryR :: UUID -> Handler Html
getEditNewCloudRepositoryR = postEditNewCloudRepositoryR
postEditNewCloudRepositoryR :: UUID -> Handler Html
postEditNewCloudRepositoryR uuid = xmppNeeded >> editForm True uuid
postEditNewCloudRepositoryR uuid = xmppNeeded >> editForm True (RepoUUID uuid)
editForm :: Bool -> UUID -> Handler Html
editForm new uuid = page "Edit repository" (Just Configuration) $ do
editForm :: Bool -> RepoId -> Handler Html
editForm new (RepoUUID uuid) = page "Edit repository" (Just Configuration) $ do
mremote <- liftAnnex $ Remote.remoteFromUUID uuid
when (mremote == Nothing) $
whenM ((/=) uuid <$> liftAnnex getUUID) $
error "unknown remote"
curr <- liftAnnex $ getRepoConfig uuid mremote
liftAnnex $ checkAssociatedDirectory curr mremote
((result, form), enctype) <- liftH $
runFormPost $ renderBootstrap $ editRepositoryAForm (isNothing mremote) curr
runFormPostNoToken $ renderBootstrap $ editRepositoryAForm (isNothing mremote) curr
case result of
FormSuccess input -> liftH $ do
setRepoConfig uuid mremote curr input
@ -187,9 +195,16 @@ editForm new uuid = page "Edit repository" (Just Configuration) $ do
redirect DashboardR
_ -> do
let istransfer = repoGroup curr == RepoGroupStandard TransferGroup
repoInfo <- getRepoInfo mremote . M.lookup uuid
<$> liftAnnex readRemoteLog
$(widgetFile "configurators/editrepository")
config <- liftAnnex $ M.lookup uuid <$> readRemoteLog
let repoInfo = getRepoInfo mremote config
let repoEncryption = getRepoEncryption mremote config
$(widgetFile "configurators/edit/repository")
editForm new r@(RepoName _) = page "Edit repository" (Just Configuration) $ do
mr <- liftAnnex (repoIdRemote r)
let repoInfo = getRepoInfo mr Nothing
g <- liftAnnex gitRepo
let sshrepo = maybe False (remoteLocationIsSshUrl . flip parseRemoteLocation g . Git.repoLocation . Remote.repo) mr
$(widgetFile "configurators/edit/nonannexremote")
{- Makes any directory associated with the repository. -}
checkAssociatedDirectory :: RepoConfig -> Maybe Remote -> Annex ()
@ -221,3 +236,34 @@ getGitRepoInfo :: Git.Repo -> Widget
getGitRepoInfo r = do
let loc = Git.repoLocation r
[whamlet|git repository located at <tt>#{loc}</tt>|]
getRepoEncryption :: Maybe Remote.Remote -> Maybe Remote.RemoteConfig -> Widget
getRepoEncryption (Just _) (Just c) = case extractCipher c of
Nothing ->
[whamlet|not encrypted|]
(Just (SharedCipher _)) ->
[whamlet|encrypted: encryption key stored in git repository|]
(Just (EncryptedCipher _ _ (KeyIds { keyIds = ks }))) -> do
knownkeys <- liftIO secretKeys
[whamlet|
encrypted using gpg key:
<ul style="list-style: none">
$forall k <- ks
<li>
^{gpgKeyDisplay k (M.lookup k knownkeys)}
|]
getRepoEncryption _ _ = return () -- local repo
getUpgradeRepositoryR :: RepoId -> Handler ()
getUpgradeRepositoryR (RepoUUID _) = redirect DashboardR
getUpgradeRepositoryR r = go =<< liftAnnex (repoIdRemote r)
where
go Nothing = redirect DashboardR
go (Just rmt) = do
liftIO fixSshKeyPair
liftAnnex $ setConfig
(remoteConfig (Remote.repo rmt) "ignore")
(Git.Config.boolConfig False)
liftAssistant $ syncRemote rmt
liftAnnex $ void Remote.remoteListRefresh
redirect DashboardR

View file

@ -0,0 +1,195 @@
{- git-annex assistant fsck configuration
-
- Copyright 2013 Joey Hess <joey@kitenet.net>
-
- Licensed under the GNU AGPL version 3 or higher.
-}
{-# LANGUAGE QuasiQuotes, TemplateHaskell, OverloadedStrings #-}
module Assistant.WebApp.Configurators.Fsck where
import qualified Data.Map as M
import qualified Data.Set as S
import qualified Data.Text as T
import Assistant.WebApp.Common
import Types.ScheduledActivity
import Utility.HumanTime
import Utility.Scheduled
import Logs.Schedule
import Annex.UUID
import qualified Remote
import Assistant.DaemonStatus
import qualified Annex.Branch
import Assistant.Fsck
import Config
import Git.Config
import qualified Annex
{- This adds a form to the page. It does not handle posting of the form,
- because unlike a typical yesod form that posts using the same url
- that generated it, this form posts using one of two other routes. -}
showFsckForm :: Bool -> ScheduledActivity -> Widget
showFsckForm new activity = do
u <- liftAnnex getUUID
let action = if new
then AddActivityR u
else ChangeActivityR u activity
((res, form), enctype) <- liftH $ runFsckForm new activity
case res of
FormSuccess _ -> noop
_ -> $(widgetFile "configurators/fsck/form")
{- This does not display a form, but it does get it from a post, and run
- some Annex action on it. -}
withFsckForm :: (ScheduledActivity -> Annex ()) -> Handler ()
withFsckForm a = do
((res, _form), _enctype) <- runFsckForm False $ defaultFsck Nothing
case res of
FormSuccess activity -> liftAnnex $ a activity
_ -> noop
mkFsck :: UUID -> UUID -> Schedule -> Duration -> ScheduledActivity
mkFsck hereu u s d
| u == hereu = ScheduledSelfFsck s d
| otherwise = ScheduledRemoteFsck u s d
runFsckForm :: Bool -> ScheduledActivity -> Handler ((FormResult ScheduledActivity, Widget), Enctype)
runFsckForm new activity = case activity of
ScheduledSelfFsck s d -> go s d =<< liftAnnex getUUID
ScheduledRemoteFsck ru s d -> go s d ru
where
go (Schedule r t) d ru = do
u <- liftAnnex getUUID
repolist <- liftAssistant (getrepolist ru)
runFormPostNoToken $ \msg -> do
(reposRes, reposView) <- mreq (selectFieldList repolist) "" (Just ru)
(durationRes, durationView) <- mreq intField "" (Just $ durationSeconds d `quot` 60 )
(timeRes, timeView) <- mreq (selectFieldList times) "" (Just t)
(recurranceRes, recurranceView) <- mreq (selectFieldList recurrances) "" (Just r)
let form = do
webAppFormAuthToken
$(widgetFile "configurators/fsck/formcontent")
let formresult = mkFsck
<$> pure u
<*> reposRes
<*> (Schedule <$> recurranceRes <*> timeRes)
<*> (Duration <$> ((60 *) <$> durationRes))
return (formresult, form)
where
times :: [(Text, ScheduledTime)]
times = ensurevalue t (T.pack $ fromScheduledTime t) $
map (\x -> (T.pack $ fromScheduledTime x, x)) $
AnyTime : map (\h -> SpecificTime h 0) [0..23]
recurrances :: [(Text, Recurrance)]
recurrances = ensurevalue r (T.pack $ fromRecurrance r) $
[ ("every day", Daily)
, ("every Sunday", Weekly $ Just 1)
, ("every Monday", Weekly $ Just 2)
, ("every Tuesday", Weekly $ Just 3)
, ("every Wednesday", Weekly $ Just 4)
, ("every Thursday", Weekly $ Just 5)
, ("every Friday", Weekly $ Just 6)
, ("every Saturday", Weekly $ Just 7)
, ("monthly", Monthly Nothing)
, ("twice a month", Divisible 2 (Weekly Nothing))
, ("yearly", Yearly Nothing)
, ("twice a year", Divisible 6 (Monthly Nothing))
, ("quarterly", Divisible 4 (Monthly Nothing))
]
ensurevalue v desc l = case M.lookup v (M.fromList $ map (\(x,y) -> (y,x)) l) of
Just _ -> l
Nothing -> (desc, v) : l
getrepolist :: UUID -> Assistant [(Text, UUID)]
getrepolist ensureu = do
-- It is possible to have fsck jobs for remotes that
-- do not implement remoteFsck, but it's not too useful,
-- so omit them from the UI normally.
remotes <- filter (\r -> Remote.uuid r == ensureu || isJust (Remote.remoteFsck r)) . syncRemotes
<$> getDaemonStatus
u <- liftAnnex getUUID
let us = u : (map Remote.uuid remotes)
liftAnnex $
zip <$> (map T.pack <$> Remote.prettyListUUIDs us) <*> pure us
defaultFsck :: Maybe Remote -> ScheduledActivity
defaultFsck Nothing = ScheduledSelfFsck (Schedule Daily AnyTime) (Duration $ 60*60)
defaultFsck (Just r) = ScheduledRemoteFsck (Remote.uuid r) (Schedule Daily AnyTime) (Duration $ 60*60)
showFsckStatus :: ScheduledActivity -> Widget
showFsckStatus activity = do
m <- liftAnnex getLastRunTimes
let lastrun = M.lookup activity m
$(widgetFile "configurators/fsck/status")
getConfigFsckR :: Handler Html
getConfigFsckR = postConfigFsckR
postConfigFsckR :: Handler Html
postConfigFsckR = page "Consistency checks" (Just Configuration) $ do
scheduledchecks <- liftAnnex $
S.toList <$> (scheduleGet =<< getUUID)
rs <- liftAssistant $
filter fsckableRemote . syncRemotes <$> getDaemonStatus
recommendedchecks <- liftAnnex $ map defaultFsck
<$> filterM (not <$$> checkFscked) (Nothing : map Just rs)
$(widgetFile "configurators/fsck")
changeSchedule :: Handler () -> Handler Html
changeSchedule a = do
a
liftAnnex $ Annex.Branch.commit "update"
redirect ConfigFsckR
getRemoveActivityR :: UUID -> ScheduledActivity -> Handler Html
getRemoveActivityR u activity = changeSchedule $
liftAnnex $ scheduleRemove u activity
getAddActivityR :: UUID -> Handler Html
getAddActivityR = postAddActivityR
postAddActivityR :: UUID -> Handler Html
postAddActivityR u = changeSchedule $
withFsckForm $ scheduleAdd u
getChangeActivityR :: UUID -> ScheduledActivity -> Handler Html
getChangeActivityR = postChangeActivityR
postChangeActivityR :: UUID -> ScheduledActivity -> Handler Html
postChangeActivityR u oldactivity = changeSchedule $
withFsckForm $ \newactivity -> scheduleChange u $
S.insert newactivity . S.delete oldactivity
data FsckPreferences = FsckPreferences
{ enableFsckNudge :: Bool
}
getFsckPreferences :: Annex FsckPreferences
getFsckPreferences = FsckPreferences
<$> (annexFsckNudge <$> Annex.getGitConfig)
fsckPreferencesAForm :: FsckPreferences -> MkAForm FsckPreferences
fsckPreferencesAForm def = FsckPreferences
<$> areq (checkBoxField `withNote` nudgenote) "Reminders" (Just $ enableFsckNudge def)
where
nudgenote = [whamlet|Remind me when using repositories that lack consistency checks.|]
runFsckPreferencesForm :: Handler ((FormResult FsckPreferences, Widget), Enctype)
runFsckPreferencesForm = do
prefs <- liftAnnex getFsckPreferences
runFormPostNoToken $ renderBootstrap $ fsckPreferencesAForm prefs
showFsckPreferencesForm :: Widget
showFsckPreferencesForm = do
((res, form), enctype) <- liftH $ runFsckPreferencesForm
case res of
FormSuccess _ -> noop
_ -> $(widgetFile "configurators/fsck/preferencesform")
postConfigFsckPreferencesR :: Handler Html
postConfigFsckPreferencesR = do
((res, _form), _enctype) <- runFsckPreferencesForm
case res of
FormSuccess prefs ->
liftAnnex $ setConfig (annexConfig "fscknudge")
(boolConfig $ enableFsckNudge prefs)
_ -> noop
redirect ConfigFsckR

View file

@ -14,16 +14,16 @@ import qualified Assistant.WebApp.Configurators.AWS as AWS
#ifdef WITH_S3
import qualified Remote.S3 as S3
import qualified Remote.Helper.AWS as AWS
import Assistant.MakeRemote
import Assistant.WebApp.MakeRemote
#endif
import qualified Remote
import qualified Types.Remote as Remote
import Types.StandardGroups
import Types.Remote (RemoteConfig)
import Logs.PreferredContent
import Logs.Remote
import qualified Utility.Url as Url
import qualified Annex.Url as Url
import Creds
import Assistant.Gpg
import qualified Data.Text as T
import qualified Data.Map as M
@ -111,7 +111,7 @@ previouslyUsedIACreds = previouslyUsedCredPair AWS.creds S3.remote $
#endif
accessKeyIDFieldWithHelp :: Maybe Text -> MkAForm Text
accessKeyIDFieldWithHelp def = AWS.accessKeyIDField help def
accessKeyIDFieldWithHelp = AWS.accessKeyIDField help
where
help = [whamlet|
<a href="http://archive.org/account/s3.php">
@ -126,11 +126,11 @@ postAddIAR :: Handler Html
postAddIAR = iaConfigurator $ do
defcreds <- liftAnnex previouslyUsedIACreds
((result, form), enctype) <- liftH $
runFormPost $ renderBootstrap $ iaInputAForm defcreds
runFormPostNoToken $ renderBootstrap $ iaInputAForm defcreds
case result of
FormSuccess input -> liftH $ do
let name = escapeBucket $ T.unpack $ itemName input
AWS.makeAWSRemote initSpecialRemote S3.remote (extractCreds input) name setgroup $
AWS.makeAWSRemote initSpecialRemote S3.remote PublicGroup (extractCreds input) name $
M.fromList $ catMaybes
[ Just $ configureEncryption NoEncryption
, Just ("type", "S3")
@ -146,9 +146,6 @@ postAddIAR = iaConfigurator $ do
, Just ("preferreddir", name)
]
_ -> $(widgetFile "configurators/addia")
where
setgroup r = liftAnnex $
setStandardGroup (Remote.uuid r) PublicGroup
#else
postAddIAR = error "S3 not supported by this build"
#endif
@ -168,13 +165,13 @@ enableIARemote :: UUID -> Widget
enableIARemote uuid = do
defcreds <- liftAnnex previouslyUsedIACreds
((result, form), enctype) <- liftH $
runFormPost $ renderBootstrap $ iaCredsAForm defcreds
runFormPostNoToken $ renderBootstrap $ iaCredsAForm defcreds
case result of
FormSuccess creds -> liftH $ do
m <- liftAnnex readRemoteLog
let name = fromJust $ M.lookup "name" $
fromJust $ M.lookup uuid m
AWS.makeAWSRemote enableSpecialRemote S3.remote creds name (const noop) M.empty
AWS.makeAWSRemote enableSpecialRemote S3.remote PublicGroup creds name M.empty
_ -> do
description <- liftAnnex $
T.pack <$> Remote.prettyUUID uuid
@ -193,7 +190,8 @@ escapeHeader = escapeURIString (\c -> isUnescapedInURI c && c /= ' ')
getRepoInfo :: RemoteConfig -> Widget
getRepoInfo c = do
exists <- liftIO $ catchDefaultIO False $ fst <$> Url.exists url []
ua <- liftAnnex Url.getUserAgent
exists <- liftIO $ catchDefaultIO False $ fst <$> Url.exists url [] ua
[whamlet|
<a href="#{url}">
Internet Archive item

View file

@ -11,7 +11,8 @@ module Assistant.WebApp.Configurators.Local where
import Assistant.WebApp.Common
import Assistant.WebApp.OtherRepos
import Assistant.MakeRemote
import Assistant.WebApp.Gpg
import Assistant.WebApp.MakeRemote
import Assistant.Sync
import Init
import qualified Git
@ -23,21 +24,27 @@ import Config.Files
import Utility.FreeDesktop
#ifdef WITH_CLIBS
import Utility.Mounts
#endif
import Utility.DiskFree
#endif
import Utility.DataUnits
import Utility.Network
import Remote (prettyUUID)
import Annex.UUID
import Annex.Direct
import Types.StandardGroups
import Logs.PreferredContent
import Logs.UUID
import Utility.UserInfo
import Config
import Utility.Gpg
import qualified Annex.Branch
import qualified Remote.GCrypt as GCrypt
import qualified Types.Remote
import qualified Data.Text as T
import qualified Data.Map as M
import Data.Char
import Data.Ord
import qualified Text.Hamlet as Hamlet
data RepositoryPath = RepositoryPath Text
@ -94,7 +101,7 @@ checkRepositoryPath p = do
Nothing -> Right $ Just $ T.pack basepath
Just prob -> Left prob
where
runcheck (chk, msg) = ifM (chk) ( return $ Just msg, return Nothing )
runcheck (chk, msg) = ifM chk ( return $ Just msg, return Nothing )
expandTilde home ('~':'/':path) = home </> path
expandTilde _ path = path
@ -107,7 +114,7 @@ checkRepositoryPath p = do
- browsed to a directory with git-annex and run it from there. -}
defaultRepositoryPath :: Bool -> IO FilePath
defaultRepositoryPath firstrun = do
cwd <- liftIO $ getCurrentDirectory
cwd <- liftIO getCurrentDirectory
home <- myHomeDir
if home == cwd && firstrun
then inhome
@ -130,7 +137,7 @@ newRepositoryForm defpath msg = do
(Just $ T.pack $ addTrailingPathSeparator defpath)
let (err, errmsg) = case pathRes of
FormMissing -> (False, "")
FormFailure l -> (True, concat $ map T.unpack l)
FormFailure l -> (True, concatMap T.unpack l)
FormSuccess _ -> (False, "")
let form = do
webAppFormAuthToken
@ -149,7 +156,7 @@ postFirstRepositoryR = page "Getting started" (Just Configuration) $ do
let androidspecial = False
path <- liftIO . defaultRepositoryPath =<< liftH inFirstRun
#endif
((res, form), enctype) <- liftH $ runFormPost $ newRepositoryForm path
((res, form), enctype) <- liftH $ runFormPostNoToken $ newRepositoryForm path
case res of
FormSuccess (RepositoryPath p) -> liftH $
startFullAssistant (T.unpack p) ClientGroup Nothing
@ -172,7 +179,7 @@ getNewRepositoryR = postNewRepositoryR
postNewRepositoryR :: Handler Html
postNewRepositoryR = page "Add another repository" (Just Configuration) $ do
home <- liftIO myHomeDir
((res, form), enctype) <- liftH $ runFormPost $ newRepositoryForm home
((res, form), enctype) <- liftH $ runFormPostNoToken $ newRepositoryForm home
case res of
FormSuccess (RepositoryPath p) -> do
let path = T.unpack p
@ -189,11 +196,11 @@ postNewRepositoryR = page "Add another repository" (Just Configuration) $ do
mainrepo <- fromJust . relDir <$> liftH getYesod
$(widgetFile "configurators/newrepository/combine")
getCombineRepositoryR :: FilePathAndUUID -> Handler Html
getCombineRepositoryR (FilePathAndUUID newrepopath newrepouuid) = do
getCombineRepositoryR :: FilePath -> UUID -> Handler Html
getCombineRepositoryR newrepopath newrepouuid = do
r <- combineRepos newrepopath remotename
liftAssistant $ syncRemote r
redirect $ EditRepositoryR newrepouuid
redirect $ EditRepositoryR $ RepoUUID newrepouuid
where
remotename = takeFileName newrepopath
@ -224,10 +231,10 @@ getAddDriveR :: Handler Html
getAddDriveR = postAddDriveR
postAddDriveR :: Handler Html
postAddDriveR = page "Add a removable drive" (Just Configuration) $ do
removabledrives <- liftIO $ driveList
removabledrives <- liftIO driveList
writabledrives <- liftIO $
filterM (canWrite . T.unpack . mountPoint) removabledrives
((res, form), enctype) <- liftH $ runFormPost $
((res, form), enctype) <- liftH $ runFormPostNoToken $
selectDriveForm (sort writabledrives)
case res of
FormSuccess drive -> liftH $ redirect $ ConfirmAddDriveR drive
@ -236,46 +243,85 @@ postAddDriveR = page "Add a removable drive" (Just Configuration) $ do
{- The repo may already exist, when adding removable media
- that has already been used elsewhere. If so, check
- the UUID of the repo and see if it's one we know. If not,
- the user must confirm the repository merge. -}
- the user must confirm the repository merge.
-
- If the repo does not already exist on the drive, prompt about
- encryption. -}
getConfirmAddDriveR :: RemovableDrive -> Handler Html
getConfirmAddDriveR drive = do
ifM (needconfirm)
( page "Combine repositories?" (Just Configuration) $
$(widgetFile "configurators/adddrive/confirm")
, do
getFinishAddDriveR drive
)
getConfirmAddDriveR drive = ifM (liftIO $ probeRepoExists dir)
( do
mu <- liftIO $ probeUUID dir
case mu of
Nothing -> maybe askcombine isknownuuid
=<< liftAnnex (probeGCryptRemoteUUID dir)
Just driveuuid -> isknownuuid driveuuid
, newrepo
)
where
dir = removableDriveRepository drive
needconfirm = ifM (liftIO $ doesDirectoryExist dir)
( liftAnnex $ do
mu <- liftIO $ catchMaybeIO $
inDir dir $ getUUID
case mu of
Nothing -> return False
Just driveuuid -> not .
M.member driveuuid <$> uuidMap
, return False
newrepo = do
secretkeys <- sortBy (comparing snd) . M.toList
<$> liftIO secretKeys
page "Encrypt repository?" (Just Configuration) $
$(widgetFile "configurators/adddrive/encrypt")
knownrepo = getFinishAddDriveR drive NoRepoKey
askcombine = page "Combine repositories?" (Just Configuration) $
$(widgetFile "configurators/adddrive/combine")
isknownuuid driveuuid =
ifM (M.member driveuuid <$> liftAnnex uuidMap)
( knownrepo
, askcombine
)
setupDriveModal :: Widget
setupDriveModal = $(widgetFile "configurators/adddrive/setupmodal")
getGenKeyForDriveR :: RemovableDrive -> Handler Html
getGenKeyForDriveR drive = withNewSecretKey $ \keyid ->
{- Generating a key takes a long time, and
- the removable drive may have been disconnected
- in the meantime. Check that it is still mounted
- before finishing. -}
ifM (liftIO $ any (\d -> mountPoint d == mountPoint drive) <$> driveList)
( getFinishAddDriveR drive (RepoKey keyid)
, getAddDriveR
)
cloneModal :: Widget
cloneModal = $(widgetFile "configurators/adddrive/clonemodal")
getFinishAddDriveR :: RemovableDrive -> Handler Html
getFinishAddDriveR drive = make >>= redirect . EditNewRepositoryR
getFinishAddDriveR :: RemovableDrive -> RepoKey -> Handler Html
getFinishAddDriveR drive = go
where
make = do
{- Set up new gcrypt special remote. -}
go (RepoKey keyid) = whenGcryptInstalled $ makewith $ const $ do
r <- liftAnnex $ addRemote $
makeGCryptRemote remotename dir keyid
return (Types.Remote.uuid r, r)
go NoRepoKey = checkGCryptRepoEncryption dir makeunencrypted makeunencrypted $ do
mu <- liftAnnex $ probeGCryptRemoteUUID dir
case mu of
Just u -> enableexistinggcryptremote u
Nothing -> error "The drive contains a gcrypt repository that is not a git-annex special remote. This is not supported."
enableexistinggcryptremote u = do
remotename' <- liftAnnex $ getGCryptRemoteName u dir
makewith $ const $ do
r <- liftAnnex $ addRemote $
enableSpecialRemote remotename' GCrypt.remote $ M.fromList
[("gitrepo", dir)]
return (u, r)
{- Making a new unencrypted repo, or combining with an existing one. -}
makeunencrypted = makewith $ \isnew -> (,)
<$> liftIO (initRepo isnew False dir $ Just remotename)
<*> combineRepos dir remotename
makewith a = do
liftIO $ createDirectoryIfMissing True dir
isnew <- liftIO $ makeRepo dir True
u <- liftIO $ initRepo isnew False dir $ Just remotename
{- Removable drives are not reliable media, so enable fsync. -}
liftIO $ inDir dir $
setConfig (ConfigKey "core.fsyncobjectfiles")
(Git.Config.boolConfig True)
r <- combineRepos dir remotename
(u, r) <- a isnew
liftAnnex $ setStandardGroup u TransferGroup
liftAssistant $ syncRemote r
return u
redirect $ EditNewRepositoryR u
mountpoint = T.unpack (mountPoint drive)
dir = removableDriveRepository drive
remotename = takeFileName mountpoint
@ -284,7 +330,7 @@ getFinishAddDriveR drive = make >>= redirect . EditNewRepositoryR
- Next call syncRemote to get them in sync. -}
combineRepos :: FilePath -> String -> Handler Remote
combineRepos dir name = liftAnnex $ do
hostname <- maybe "host" id <$> liftIO getHostname
hostname <- fromMaybe "host" <$> liftIO getHostname
hostlocation <- fromRepo Git.repoLocation
liftIO $ inDir dir $ void $ makeGitRemote hostname hostlocation
addRemote $ makeGitRemote name dir
@ -335,7 +381,7 @@ startFullAssistant path repogroup setup = do
u <- initRepo isnew True path Nothing
inDir path $ do
setStandardGroup u repogroup
maybe noop id setup
fromMaybe noop setup
addAutoStartFile path
setCurrentDirectory path
fromJust $ postFirstRun webapp
@ -344,7 +390,7 @@ startFullAssistant path repogroup setup = do
{- Makes a new git repository. Or, if a git repository already
- exists, returns False. -}
makeRepo :: FilePath -> Bool -> IO Bool
makeRepo path bare = ifM alreadyexists
makeRepo path bare = ifM (probeRepoExists path)
( return False
, do
(transcript, ok) <-
@ -354,14 +400,12 @@ makeRepo path bare = ifM alreadyexists
return True
)
where
alreadyexists = isJust <$>
catchDefaultIO Nothing (Git.Construct.checkForRepo path)
baseparams = [Param "init", Param "--quiet"]
params
| bare = baseparams ++ [Param "--bare", File path]
| otherwise = baseparams ++ [File path]
{- Runs an action in the git-annex repository in the specified directory. -}
{- Runs an action in the git repository in the specified directory. -}
inDir :: FilePath -> Annex a -> IO a
inDir dir a = do
state <- Annex.new =<< Git.Config.read =<< Git.Construct.fromPath dir
@ -397,9 +441,12 @@ initRepo False _ dir desc = inDir dir $ do
getUUID
initRepo' :: Maybe String -> Annex ()
initRepo' desc = do
unlessM isInitialized $
initialize desc
initRepo' desc = unlessM isInitialized $ do
initialize desc
{- Ensure branch gets committed right away so it is
- available for merging when a removable drive repo is being
- added. -}
Annex.Branch.commit "update"
{- Checks if the user can write to a directory.
-
@ -410,3 +457,15 @@ canWrite dir = do
tocheck <- ifM (doesDirectoryExist dir)
(return dir, return $ parentDir dir)
catchBoolIO $ fileAccess tocheck False True False
{- Checks if a git repo exists at a location. -}
probeRepoExists :: FilePath -> IO Bool
probeRepoExists dir = isJust <$>
catchDefaultIO Nothing (Git.Construct.checkForRepo dir)
{- Gets the UUID of the git repo at a location, which may not exist, or
- not be a git-annex repo. -}
probeUUID :: FilePath -> IO (Maybe UUID)
probeUUID dir = catchDefaultIO Nothing $ inDir dir $ do
u <- getUUID
return $ if u == NoUUID then Nothing else Just u

View file

@ -152,7 +152,7 @@ postFinishLocalPairR msg = promptSecret (Just msg) $ \_ secret -> do
where
alert = pairRequestAcknowledgedAlert (pairRepo msg) . Just
setup repodir = setupAuthorizedKeys msg repodir
cleanup repodir = removeAuthorizedKeys False repodir $
cleanup repodir = removeAuthorizedKeys True repodir $
remoteSshPubKey $ pairMsgData msg
uuid = Just $ pairUUID $ pairMsgData msg
#else
@ -265,7 +265,7 @@ data InputSecret = InputSecret { secretText :: Maybe Text }
promptSecret :: Maybe PairMsg -> (Text -> Secret -> Widget) -> Handler Html
promptSecret msg cont = pairPage $ do
((result, form), enctype) <- liftH $
runFormPost $ renderBootstrap $
runFormPostNoToken $ renderBootstrap $
InputSecret <$> aopt textField "Secret phrase" Nothing
case result of
FormSuccess v -> do
@ -300,7 +300,7 @@ secretProblem :: Secret -> Maybe Text
secretProblem s
| B.null s = Just "The secret phrase cannot be left empty. (Remember that punctuation and white space is ignored.)"
| B.length s < 6 = Just "Enter a longer secret phrase, at least 6 characters, but really, a phrase is best! This is not a password you'll need to enter every day."
| s == toSecret sampleQuote = Just "Speaking of foolishness, don't paste in the example I gave. Enter a different phrase, please!"
| s == toSecret sampleQuote = Just "Speaking of foolishness, don't paste in the example I gave. Enter a different phrase, please!"
| otherwise = Nothing
toSecret :: Text -> Secret

View file

@ -90,7 +90,7 @@ postPreferencesR :: Handler Html
postPreferencesR = page "Preferences" (Just Configuration) $ do
((result, form), enctype) <- liftH $ do
current <- liftAnnex getPrefs
runFormPost $ renderBootstrap $ prefsAForm current
runFormPostNoToken $ renderBootstrap $ prefsAForm current
case result of
FormSuccess new -> liftH $ do
liftAnnex $ storePrefs new

View file

@ -1,6 +1,6 @@
{- git-annex assistant webapp configurator for ssh-based remotes
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
- Copyright 2012-2013 Joey Hess <joey@kitenet.net>
-
- Licensed under the GNU AGPL version 3 or higher.
-}
@ -11,18 +11,24 @@
module Assistant.WebApp.Configurators.Ssh where
import Assistant.WebApp.Common
import Assistant.WebApp.Gpg
import Assistant.Ssh
import Assistant.MakeRemote
import Utility.Rsync (rsyncUrlIsShell)
import Assistant.WebApp.MakeRemote
import Logs.Remote
import Remote
import Logs.PreferredContent
import Types.StandardGroups
import Utility.UserInfo
import Utility.Gpg
import Types.Remote (RemoteConfig)
import Git.Types (RemoteName)
import qualified Remote.GCrypt as GCrypt
import Annex.UUID
import Logs.UUID
import qualified Data.Text as T
import qualified Data.Map as M
import Network.Socket
import Data.Ord
sshConfigurator :: Widget -> Handler Html
sshConfigurator = page "Add a remote server" (Just Configuration)
@ -47,7 +53,7 @@ mkSshData s = SshData
(maybe "" T.unpack $ inputDirectory s)
, sshPort = inputPort s
, needsPubKey = False
, rsyncOnly = False
, sshCapabilities = [] -- untested
}
mkSshInput :: SshData -> SshInput
@ -81,7 +87,7 @@ sshInputAForm hostnamefield def = SshInput
let h = T.unpack t
let canonname = Just $ defaultHints { addrFlags = [AI_CANONNAME] }
r <- catchMaybeIO $ getAddrInfo canonname (Just h) Nothing
return $ case catMaybes . map addrCanonName <$> r of
return $ case mapMaybe addrCanonName <$> r of
-- canonicalize input hostname if it had no dot
Just (fullname:_)
| '.' `elem` h -> Right t
@ -96,30 +102,27 @@ sshInputAForm hostnamefield def = SshInput
data ServerStatus
= UntestedServer
| UnusableServer Text -- reason why it's not usable
| UsableRsyncServer
| UsableSshInput
| UsableServer [SshServerCapability]
deriving (Eq)
usable :: ServerStatus -> Bool
usable UntestedServer = False
usable (UnusableServer _) = False
usable UsableRsyncServer = True
usable UsableSshInput = True
capabilities :: ServerStatus -> [SshServerCapability]
capabilities (UsableServer cs) = cs
capabilities _ = []
getAddSshR :: Handler Html
getAddSshR = postAddSshR
postAddSshR :: Handler Html
postAddSshR = sshConfigurator $ do
u <- liftIO $ T.pack <$> myUserName
username <- liftIO $ T.pack <$> myUserName
((result, form), enctype) <- liftH $
runFormPost $ renderBootstrap $ sshInputAForm textField $
SshInput Nothing (Just u) Nothing 22
runFormPostNoToken $ renderBootstrap $ sshInputAForm textField $
SshInput Nothing (Just username) Nothing 22
case result of
FormSuccess sshinput -> do
s <- liftIO $ testServer sshinput
case s of
Left status -> showform form enctype status
Right sshdata -> liftH $ redirect $ ConfirmSshR sshdata
Right (sshdata, u) -> liftH $ redirect $ ConfirmSshR sshdata u
_ -> showform form enctype UntestedServer
where
showform form enctype status = $(widgetFile "configurators/ssh/add")
@ -127,64 +130,64 @@ postAddSshR = sshConfigurator $ do
sshTestModal :: Widget
sshTestModal = $(widgetFile "configurators/ssh/testmodal")
{- To enable an existing rsync special remote, parse the SshInput from
- its rsyncurl, and display a form whose only real purpose is to check
- if ssh public keys need to be set up. From there, we can proceed with
- the usual repo setup; all that code is idempotent.
-
- Note that there's no EnableSshR because ssh remotes are not special
- remotes, and so their configuration is not shared between repositories.
-}
sshSetupModal :: SshData -> Widget
sshSetupModal sshdata = $(widgetFile "configurators/ssh/setupmodal")
getEnableRsyncR :: UUID -> Handler Html
getEnableRsyncR = postEnableRsyncR
postEnableRsyncR :: UUID -> Handler Html
postEnableRsyncR u = do
postEnableRsyncR = enableSpecialSshRemote getsshinput enableRsyncNet enablersync
where
enablersync sshdata u = redirect $ ConfirmSshR
(sshdata { sshCapabilities = [RsyncCapable] }) u
getsshinput = parseSshUrl <=< M.lookup "rsyncurl"
{- This only handles gcrypt repositories that are located on ssh servers;
- ones on local drives are handled via another part of the UI. -}
getEnableSshGCryptR :: UUID -> Handler Html
getEnableSshGCryptR = postEnableSshGCryptR
postEnableSshGCryptR :: UUID -> Handler Html
postEnableSshGCryptR u = whenGcryptInstalled $
enableSpecialSshRemote getsshinput enableRsyncNetGCrypt enablegcrypt u
where
enablegcrypt sshdata _ = prepSsh True sshdata $ \sshdata' ->
sshConfigurator $
checkExistingGCrypt sshdata' $
error "Expected to find an encrypted git repository, but did not."
getsshinput = parseSshUrl <=< M.lookup "gitrepo"
{- To enable a special remote that uses ssh as its transport,
- parse a config key to get its url, and display a form whose
- only real purpose is to check if ssh public keys need to be
- set up.
-}
enableSpecialSshRemote :: (RemoteConfig -> Maybe SshData) -> (SshInput -> RemoteName -> Handler Html) -> (SshData -> UUID -> Handler Html) -> UUID -> Handler Html
enableSpecialSshRemote getsshinput rsyncnetsetup genericsetup u = do
m <- fromMaybe M.empty . M.lookup u <$> liftAnnex readRemoteLog
case (parseSshRsyncUrl =<< M.lookup "rsyncurl" m, M.lookup "name" m) of
case (mkSshInput . unmangle <$> getsshinput m, M.lookup "name" m) of
(Just sshinput, Just reponame) -> sshConfigurator $ do
((result, form), enctype) <- liftH $
runFormPost $ renderBootstrap $ sshInputAForm textField sshinput
runFormPostNoToken $ renderBootstrap $ sshInputAForm textField sshinput
case result of
FormSuccess sshinput'
| isRsyncNet (inputHostname sshinput') ->
void $ liftH $ makeRsyncNet sshinput' reponame (const noop)
void $ liftH $ rsyncnetsetup sshinput' reponame
| otherwise -> do
s <- liftIO $ testServer sshinput'
case s of
Left status -> showform form enctype status
Right sshdata -> enable sshdata
{ sshRepoName = reponame }
Right (sshdata, _u) -> void $ liftH $ genericsetup
( sshdata { sshRepoName = reponame } ) u
_ -> showform form enctype UntestedServer
_ -> redirect AddSshR
where
unmangle sshdata = sshdata
{ sshHostName = T.pack $ unMangleSshHostName $
T.unpack $ sshHostName sshdata
}
showform form enctype status = do
description <- liftAnnex $ T.pack <$> prettyUUID u
$(widgetFile "configurators/ssh/enable")
enable sshdata = liftH $ redirect $ ConfirmSshR $
sshdata { rsyncOnly = True }
{- Converts a rsyncurl value to a SshInput. But only if it's a ssh rsync
- url; rsync:// urls or bare path names are not supported.
-
- The hostname is stored mangled in the remote log for rsync special
- remotes configured by this webapp. So that mangling has to reversed
- here to get back the original hostname.
-}
parseSshRsyncUrl :: String -> Maybe SshInput
parseSshRsyncUrl u
| not (rsyncUrlIsShell u) = Nothing
| otherwise = Just $ SshInput
{ inputHostname = val $ unMangleSshHostName host
, inputUsername = if null user then Nothing else val user
, inputDirectory = val dir
, inputPort = 22
}
where
val = Just . T.pack
(userhost, dir) = separate (== ':') u
(user, host) = if '@' `elem` userhost
then separate (== '@') userhost
else (userhost, "")
{- Test if we can ssh into the server.
-
@ -193,33 +196,41 @@ parseSshRsyncUrl u
- passwordless login is already enabled, use it. Otherwise,
- a special ssh key will need to be generated just for this server.
-
- Once logged into the server, probe to see if git-annex-shell is
- available, or rsync. Note that, ~/.ssh/git-annex-shell may be
- Once logged into the server, probe to see if git-annex-shell,
- git, and rsync are available.
- Note that, ~/.ssh/git-annex-shell may be
- present, while git-annex-shell is not in PATH.
-
- Also probe to see if there is already a git repository at the location
- with either an annex-uuid or a gcrypt-id set. (If not, returns NoUUID.)
-}
testServer :: SshInput -> IO (Either ServerStatus SshData)
testServer :: SshInput -> IO (Either ServerStatus (SshData, UUID))
testServer (SshInput { inputHostname = Nothing }) = return $
Left $ UnusableServer "Please enter a host name."
testServer sshinput@(SshInput { inputHostname = Just hn }) = do
status <- probe [sshOpt "NumberOfPasswordPrompts" "0"]
if usable status
then ret status False
else do
status' <- probe []
if usable status'
then ret status' True
else return $ Left status'
(status, u) <- probe [sshOpt "NumberOfPasswordPrompts" "0"]
case capabilities status of
[] -> do
(status', u') <- probe []
case capabilities status' of
[] -> return $ Left status'
cs -> ret cs True u'
cs -> ret cs False u
where
ret status needspubkey = return $ Right $ (mkSshData sshinput)
{ needsPubKey = needspubkey
, rsyncOnly = status == UsableRsyncServer
}
ret cs needspubkey u = do
let sshdata = (mkSshData sshinput)
{ needsPubKey = needspubkey
, sshCapabilities = cs
}
return $ Right (sshdata, u)
probe extraopts = do
let remotecommand = shellWrap $ intercalate ";"
[ report "loggedin"
, checkcommand "git-annex-shell"
, checkcommand "git"
, checkcommand "rsync"
, checkcommand shim
, getgitconfig (T.unpack <$> inputDirectory sshinput)
]
knownhost <- knownHost hn
let sshopts = filter (not . null) $ extraopts ++
@ -235,21 +246,35 @@ testServer sshinput@(SshInput { inputHostname = Just hn }) = do
, remotecommand
]
parsetranscript . fst <$> sshTranscript sshopts Nothing
parsetranscript s
| reported "git-annex-shell" = UsableSshInput
| reported shim = UsableSshInput
| reported "rsync" = UsableRsyncServer
| reported "loggedin" = UnusableServer
"Neither rsync nor git-annex are installed on the server. Perhaps you should go install them?"
| otherwise = UnusableServer $ T.pack $
"Failed to ssh to the server. Transcript: " ++ s
parsetranscript s =
let cs = map snd $ filter (reported . fst)
[ ("git-annex-shell", GitAnnexShellCapable)
, (shim, GitAnnexShellCapable)
, ("git", GitCapable)
, ("rsync", RsyncCapable)
]
u = fromMaybe NoUUID $ headMaybe $ mapMaybe finduuid $
map (separate (== '=')) $ lines s
in if null cs
then (UnusableServer unusablereason, u)
else (UsableServer cs, u)
where
reported r = token r `isInfixOf` s
unusablereason = if reported "loggedin"
then "Neither rsync nor git-annex are installed on the server. Perhaps you should go install them?"
else T.pack $ "Failed to ssh to the server. Transcript: " ++ s
finduuid (k, v)
| k == "annex.uuid" = Just $ toUUID v
| k == GCrypt.coreGCryptId = Just $ genUUIDInNameSpace gCryptNameSpace v
| otherwise = Nothing
checkcommand c = "if which " ++ c ++ "; then " ++ report c ++ "; fi"
token r = "git-annex-probe " ++ r
report r = "echo " ++ token r
shim = "~/.ssh/git-annex-shell"
getgitconfig (Just d)
| not (null d) = "cd " ++ shellEscape d ++ " && git config --list"
getgitconfig _ = "echo"
{- Runs a ssh command; if it fails shows the user the transcript,
- and if it succeeds, runs an action. -}
@ -264,75 +289,141 @@ showSshErr :: String -> Handler Html
showSshErr msg = sshConfigurator $
$(widgetFile "configurators/ssh/error")
getConfirmSshR :: SshData -> Handler Html
getConfirmSshR sshdata = sshConfigurator $
$(widgetFile "configurators/ssh/confirm")
{- The UUID will be NoUUID when the repository does not already exist. -}
getConfirmSshR :: SshData -> UUID -> Handler Html
getConfirmSshR sshdata u
| u == NoUUID = handlenew
| otherwise = handleexisting =<< (M.lookup u <$> liftAnnex uuidMap)
where
handlenew = sshConfigurator $ do
secretkeys <- sortBy (comparing snd) . M.toList
<$> liftIO secretKeys
$(widgetFile "configurators/ssh/confirm")
handleexisting Nothing = sshConfigurator $
-- Not a UUID we know, so prompt about combining.
$(widgetFile "configurators/ssh/combine")
handleexisting (Just _) = prepSsh False sshdata $ \sshdata' -> do
m <- liftAnnex readRemoteLog
case M.lookup "type" =<< M.lookup u m of
Just "gcrypt" -> combineExistingGCrypt sshdata' u
-- This handles enabling git repositories
-- that already exist.
_ -> makeSshRepo sshdata'
{- The user has confirmed they want to combine with a ssh repository,
- which is not known to us. So it might be using gcrypt. -}
getCombineSshR :: SshData -> Handler Html
getCombineSshR sshdata = prepSsh False sshdata $ \sshdata' ->
sshConfigurator $
checkExistingGCrypt sshdata' $
void $ liftH $ makeSshRepo sshdata'
getRetrySshR :: SshData -> Handler ()
getRetrySshR sshdata = do
s <- liftIO $ testServer $ mkSshInput sshdata
redirect $ either (const $ ConfirmSshR sshdata) ConfirmSshR s
redirect $ either (const $ ConfirmSshR sshdata NoUUID) (uncurry ConfirmSshR) s
getMakeSshGitR :: SshData -> Handler Html
getMakeSshGitR = makeSsh False setupGroup
getMakeSshGitR sshdata = prepSsh False sshdata makeSshRepo
getMakeSshRsyncR :: SshData -> Handler Html
getMakeSshRsyncR = makeSsh True setupGroup
getMakeSshRsyncR sshdata = prepSsh False (rsyncOnly sshdata) makeSshRepo
makeSsh :: Bool -> (Remote -> Handler ()) -> SshData -> Handler Html
makeSsh rsync setup sshdata
rsyncOnly :: SshData -> SshData
rsyncOnly sshdata = sshdata { sshCapabilities = [RsyncCapable] }
getMakeSshGCryptR :: SshData -> RepoKey -> Handler Html
getMakeSshGCryptR sshdata NoRepoKey = whenGcryptInstalled $
withNewSecretKey $ getMakeSshGCryptR sshdata . RepoKey
getMakeSshGCryptR sshdata (RepoKey keyid) = whenGcryptInstalled $
prepSsh True sshdata $ makeGCryptRepo keyid
{- Detect if the user entered a location with an existing, known
- gcrypt repository, and enable it. Otherwise, runs the action. -}
checkExistingGCrypt :: SshData -> Widget -> Widget
checkExistingGCrypt sshdata nope = checkGCryptRepoEncryption repourl nope nope $ do
mu <- liftAnnex $ probeGCryptRemoteUUID repourl
case mu of
Just u -> void $ liftH $
combineExistingGCrypt sshdata u
Nothing -> error "The location contains a gcrypt repository that is not a git-annex special remote. This is not supported."
where
repourl = genSshUrl sshdata
{- Enables an existing gcrypt special remote. -}
enableGCrypt :: SshData -> RemoteName -> Handler Html
enableGCrypt sshdata reponame =
setupCloudRemote TransferGroup Nothing $
enableSpecialRemote reponame GCrypt.remote $ M.fromList
[("gitrepo", genSshUrl sshdata)]
{- Combining with a gcrypt repository that may not be
- known in remote.log, so probe the gcrypt repo. -}
combineExistingGCrypt :: SshData -> UUID -> Handler Html
combineExistingGCrypt sshdata u = do
reponame <- liftAnnex $ getGCryptRemoteName u repourl
enableGCrypt sshdata reponame
where
repourl = genSshUrl sshdata
{- Sets up remote repository for ssh, or directory for rsync. -}
prepSsh :: Bool -> SshData -> (SshData -> Handler Html) -> Handler Html
prepSsh newgcrypt sshdata a
| needsPubKey sshdata = do
keypair <- liftIO genSshKeyPair
sshdata' <- liftIO $ setupSshKeyPair keypair sshdata
makeSsh' rsync setup sshdata sshdata' (Just keypair)
prepSsh' newgcrypt sshdata sshdata' (Just keypair) a
| sshPort sshdata /= 22 = do
sshdata' <- liftIO $ setSshConfig sshdata []
makeSsh' rsync setup sshdata sshdata' Nothing
| otherwise = makeSsh' rsync setup sshdata sshdata Nothing
prepSsh' newgcrypt sshdata sshdata' Nothing a
| otherwise = prepSsh' newgcrypt sshdata sshdata Nothing a
makeSsh' :: Bool -> (Remote -> Handler ()) -> SshData -> SshData -> Maybe SshKeyPair -> Handler Html
makeSsh' rsync setup origsshdata sshdata keypair = do
sshSetup ["-p", show (sshPort origsshdata), sshhost, remoteCommand] "" $
makeSshRepo rsync setup sshdata
prepSsh' :: Bool -> SshData -> SshData -> Maybe SshKeyPair -> (SshData -> Handler Html) -> Handler Html
prepSsh' newgcrypt origsshdata sshdata keypair a = sshSetup
[ "-p", show (sshPort origsshdata)
, genSshHost (sshHostName origsshdata) (sshUserName origsshdata)
, remoteCommand
] "" (a sshdata)
where
sshhost = genSshHost (sshHostName origsshdata) (sshUserName origsshdata)
remotedir = T.unpack $ sshDirectory sshdata
remoteCommand = shellWrap $ intercalate "&&" $ catMaybes
[ Just $ "mkdir -p " ++ shellEscape remotedir
, Just $ "cd " ++ shellEscape remotedir
, if rsync then Nothing else Just "if [ ! -d .git ]; then git init --bare --shared; fi"
, if rsync then Nothing else Just "git annex init"
, if needsPubKey sshdata
then addAuthorizedKeysCommand (rsync || rsyncOnly sshdata) remotedir . sshPubKey <$> keypair
, if rsynconly then Nothing else Just "if [ ! -d .git ]; then git init --bare --shared && git config receive.denyNonFastforwards false; fi"
, if rsynconly || newgcrypt then Nothing else Just "git annex init"
, if needsPubKey origsshdata
then addAuthorizedKeysCommand (hasCapability origsshdata GitAnnexShellCapable) remotedir . sshPubKey <$> keypair
else Nothing
]
rsynconly = onlyCapability origsshdata RsyncCapable
makeSshRepo :: Bool -> (Remote -> Handler ()) -> SshData -> Handler Html
makeSshRepo forcersync setup sshdata = do
r <- liftAssistant $ makeSshRemote forcersync sshdata Nothing
setup r
redirect $ EditNewCloudRepositoryR $ Remote.uuid r
makeSshRepo :: SshData -> Handler Html
makeSshRepo sshdata = setupCloudRemote TransferGroup Nothing $
makeSshRemote sshdata
makeGCryptRepo :: KeyId -> SshData -> Handler Html
makeGCryptRepo keyid sshdata = setupCloudRemote TransferGroup Nothing $
makeGCryptRemote (sshRepoName sshdata) (genSshUrl sshdata) keyid
getAddRsyncNetR :: Handler Html
getAddRsyncNetR = postAddRsyncNetR
postAddRsyncNetR :: Handler Html
postAddRsyncNetR = do
((result, form), enctype) <- runFormPost $
((result, form), enctype) <- runFormPostNoToken $
renderBootstrap $ sshInputAForm hostnamefield $
SshInput Nothing Nothing Nothing 22
let showform status = page "Add a Rsync.net repository" (Just Configuration) $
$(widgetFile "configurators/addrsync.net")
let showform status = inpage $
$(widgetFile "configurators/rsync.net/add")
case result of
FormSuccess sshinput
| isRsyncNet (inputHostname sshinput) -> do
let reponame = genSshRepoName "rsync.net"
(maybe "" T.unpack $ inputDirectory sshinput)
makeRsyncNet sshinput reponame setupGroup
| isRsyncNet (inputHostname sshinput) ->
go sshinput
| otherwise ->
showform $ UnusableServer
"That is not a rsync.net host name."
_ -> showform UntestedServer
where
inpage = page "Add a Rsync.net repository" (Just Configuration)
hostnamefield = textField `withExpandableNote` ("Help", help)
help = [whamlet|
<div>
@ -342,16 +433,52 @@ postAddRsyncNetR = do
The host name will be something like "usw-s001.rsync.net", and the #
user name something like "7491"
|]
go sshinput = do
let reponame = genSshRepoName "rsync.net"
(maybe "" T.unpack $ inputDirectory sshinput)
prepRsyncNet sshinput reponame $ \sshdata -> inpage $
checkExistingGCrypt sshdata $ do
secretkeys <- sortBy (comparing snd) . M.toList
<$> liftIO secretKeys
$(widgetFile "configurators/rsync.net/encrypt")
makeRsyncNet :: SshInput -> String -> (Remote -> Handler ()) -> Handler Html
makeRsyncNet sshinput reponame setup = do
getMakeRsyncNetSharedR :: SshData -> Handler Html
getMakeRsyncNetSharedR = makeSshRepo . rsyncOnly
{- Make a gcrypt special remote on rsync.net. -}
getMakeRsyncNetGCryptR :: SshData -> RepoKey -> Handler Html
getMakeRsyncNetGCryptR sshdata NoRepoKey = whenGcryptInstalled $
withNewSecretKey $ getMakeRsyncNetGCryptR sshdata . RepoKey
getMakeRsyncNetGCryptR sshdata (RepoKey keyid) = whenGcryptInstalled $
sshSetup [sshhost, gitinit] [] $ makeGCryptRepo keyid sshdata
where
sshhost = genSshHost (sshHostName sshdata) (sshUserName sshdata)
gitinit = "git init --bare " ++ T.unpack (sshDirectory sshdata)
enableRsyncNet :: SshInput -> String -> Handler Html
enableRsyncNet sshinput reponame =
prepRsyncNet sshinput reponame $ makeSshRepo . rsyncOnly
enableRsyncNetGCrypt :: SshInput -> RemoteName -> Handler Html
enableRsyncNetGCrypt sshinput reponame =
prepRsyncNet sshinput reponame $ \sshdata -> whenGcryptInstalled $
checkGCryptRepoEncryption (genSshUrl sshdata) notencrypted notinstalled $
enableGCrypt sshdata reponame
where
notencrypted = error "Unexpectedly found a non-encrypted git repository, instead of the expected encrypted git repository."
notinstalled = error "internal"
{- Prepares rsync.net ssh key, and if successful, runs an action with
- its SshData. -}
prepRsyncNet :: SshInput -> String -> (SshData -> Handler Html) -> Handler Html
prepRsyncNet sshinput reponame a = do
knownhost <- liftIO $ maybe (return False) knownHost (inputHostname sshinput)
keypair <- liftIO $ genSshKeyPair
keypair <- liftIO genSshKeyPair
sshdata <- liftIO $ setupSshKeyPair keypair $
(mkSshData sshinput)
{ sshRepoName = reponame
, needsPubKey = True
, rsyncOnly = True
, sshCapabilities = [RsyncCapable]
}
{- I'd prefer to separate commands with && , but
- rsync.net's shell does not support that.
@ -371,12 +498,8 @@ makeRsyncNet sshinput reponame setup = do
, genSshHost (sshHostName sshdata) (sshUserName sshdata)
, remotecommand
]
sshSetup sshopts (sshPubKey keypair) $
makeSshRepo True setup sshdata
sshSetup sshopts (sshPubKey keypair) $ a sshdata
isRsyncNet :: Maybe Text -> Bool
isRsyncNet Nothing = False
isRsyncNet (Just host) = ".rsync.net" `T.isSuffixOf` T.toLower host
setupGroup :: Remote -> Handler ()
setupGroup r = liftAnnex $ setStandardGroup (Remote.uuid r) TransferGroup

View file

@ -5,7 +5,7 @@
- Licensed under the GNU AGPL version 3 or higher.
-}
{-# LANGUAGE CPP, QuasiQuotes, TemplateHaskell, OverloadedStrings #-}
{-# LANGUAGE CPP, TemplateHaskell, OverloadedStrings #-}
module Assistant.WebApp.Configurators.WebDAV where
@ -13,18 +13,18 @@ import Assistant.WebApp.Common
import Creds
#ifdef WITH_WEBDAV
import qualified Remote.WebDAV as WebDAV
import Assistant.MakeRemote
import Assistant.Sync
import Assistant.WebApp.MakeRemote
import qualified Remote
import Types.Remote (RemoteConfig)
import Types.StandardGroups
import Logs.PreferredContent
import Logs.Remote
import Git.Types (RemoteName)
import qualified Data.Map as M
#endif
import qualified Data.Text as T
import Network.URI
import Assistant.Gpg
webDAVConfigurator :: Widget -> Handler Html
webDAVConfigurator = page "Add a WebDAV repository" (Just Configuration)
@ -66,10 +66,10 @@ postAddBoxComR :: Handler Html
postAddBoxComR = boxConfigurator $ do
defcreds <- liftAnnex $ previouslyUsedWebDAVCreds "box.com"
((result, form), enctype) <- liftH $
runFormPost $ renderBootstrap $ boxComAForm defcreds
runFormPostNoToken $ renderBootstrap $ boxComAForm defcreds
case result of
FormSuccess input -> liftH $
makeWebDavRemote initSpecialRemote "box.com" (toCredPair input) setgroup $ M.fromList
makeWebDavRemote initSpecialRemote "box.com" (toCredPair input) $ M.fromList
[ configureEncryption $ enableEncryption input
, ("embedcreds", if embedCreds input then "yes" else "no")
, ("type", "webdav")
@ -80,9 +80,6 @@ postAddBoxComR = boxConfigurator $ do
, ("chunksize", "10mb")
]
_ -> $(widgetFile "configurators/addbox.com")
where
setgroup r = liftAnnex $
setStandardGroup (Remote.uuid r) TransferGroup
#else
postAddBoxComR = error "WebDAV not supported by this build"
#endif
@ -100,7 +97,7 @@ postEnableWebDAVR uuid = do
getRemoteCredPairFor "webdav" c (WebDAV.davCreds uuid)
case mcreds of
Just creds -> webDAVConfigurator $ liftH $
makeWebDavRemote enableSpecialRemote name creds (const noop) M.empty
makeWebDavRemote enableSpecialRemote name creds M.empty
Nothing
| "box.com/" `isInfixOf` url ->
boxConfigurator $ showform name url
@ -112,10 +109,10 @@ postEnableWebDAVR uuid = do
maybe (pure Nothing) previouslyUsedWebDAVCreds $
urlHost url
((result, form), enctype) <- liftH $
runFormPost $ renderBootstrap $ webDAVCredsAForm defcreds
runFormPostNoToken $ renderBootstrap $ webDAVCredsAForm defcreds
case result of
FormSuccess input -> liftH $
makeWebDavRemote enableSpecialRemote name (toCredPair input) (const noop) M.empty
makeWebDavRemote enableSpecialRemote name (toCredPair input) M.empty
_ -> do
description <- liftAnnex $
T.pack <$> Remote.prettyUUID uuid
@ -125,13 +122,11 @@ postEnableWebDAVR _ = error "WebDAV not supported by this build"
#endif
#ifdef WITH_WEBDAV
makeWebDavRemote :: SpecialRemoteMaker -> String -> CredPair -> (Remote -> Handler ()) -> RemoteConfig -> Handler ()
makeWebDavRemote maker name creds setup config = do
makeWebDavRemote :: SpecialRemoteMaker -> RemoteName -> CredPair -> RemoteConfig -> Handler ()
makeWebDavRemote maker name creds config = do
liftIO $ WebDAV.setCredsEnv creds
r <- liftAnnex $ addRemote $ maker name WebDAV.remote config
setup r
liftAssistant $ syncRemote r
redirect $ EditNewCloudRepositoryR $ Remote.uuid r
setupCloudRemote TransferGroup Nothing $
maker name WebDAV.remote config
{- Only returns creds previously used for the same hostname. -}
previouslyUsedWebDAVCreds :: String -> Annex (Maybe CredPair)

View file

@ -55,7 +55,7 @@ checkCloudRepos :: UrlRenderer -> Remote -> Assistant ()
checkCloudRepos urlrenderer r =
unlessM (syncingToCloudRemote <$> getDaemonStatus) $ do
buddyname <- getBuddyName $ Remote.uuid r
button <- mkAlertButton "Add a cloud repository" urlrenderer $
button <- mkAlertButton True "Add a cloud repository" urlrenderer $
NeedCloudRepoR $ Remote.uuid r
void $ addAlert $ cloudRepoNeededAlert buddyname button
#else
@ -112,7 +112,7 @@ xmppform :: Route WebApp -> Handler Html
xmppform next = xmppPage $ do
((result, form), enctype) <- liftH $ do
oldcreds <- liftAnnex getXMPPCreds
runFormPost $ renderBootstrap $ xmppAForm $
runFormPostNoToken $ renderBootstrap $ xmppAForm $
creds2Form <$> oldcreds
let showform problem = $(widgetFile "configurators/xmpp")
case result of
@ -151,6 +151,8 @@ buddyListDisplay = do
catMaybes . map (buddySummary pairedwith)
<$> (getBuddyList <<~ buddyList)
$(widgetFile "configurators/xmpp/buddylist")
#else
noop
#endif
where
ident = "buddylist"

View file

@ -13,8 +13,8 @@ import Assistant.WebApp.Common
import Config.Files
import Utility.LogFile
import Assistant.DaemonStatus
import Assistant.WebApp.Utility
import Assistant.Alert
import Assistant.TransferSlots
import Control.Concurrent
import System.Posix (getProcessID, signalProcess, sigTERM)
@ -26,16 +26,16 @@ getShutdownR = page "Shutdown" Nothing $
getShutdownConfirmedR :: Handler Html
getShutdownConfirmedR = do
{- Remove all alerts for currently running activities. -}
liftAssistant $ do
{- Remove all alerts for currently running activities. -}
updateAlertMap $ M.filter $ \a -> alertClass a /= Activity
void $ addAlert shutdownAlert
{- Stop transfers the assistant is running,
- otherwise they would continue past shutdown.
- Pausing transfers prevents more being started up (and stops
- the transfer processes). -}
ts <- liftAssistant $ M.keys . currentTransfers <$> getDaemonStatus
mapM_ pauseTransfer ts
{- Stop transfers the assistant is running,
- otherwise they would continue past shutdown.
- Pausing transfers prevents more being started up (and stops
- the transfer processes). -}
ts <- M.keys . currentTransfers <$> getDaemonStatus
mapM_ pauseTransfer ts
page "Shutdown" Nothing $ do
{- Wait 2 seconds before shutting down, to give the web
- page time to load in the browser. -}
@ -67,5 +67,9 @@ getLogR :: Handler Html
getLogR = page "Logs" Nothing $ do
logfile <- liftAnnex $ fromRepo gitAnnexLogFile
logs <- liftIO $ listLogs logfile
logcontent <- liftIO $ concat <$> mapM readFile logs
logcontent <- liftIO $ concat <$> mapM readlog logs
$(widgetFile "control/log")
where
readlog f = withFile f ReadMode $ \h -> do
fileEncoding h -- log may contain invalid utf-8
hClose h `after` hGetContentsStrict h

View file

@ -10,10 +10,10 @@
module Assistant.WebApp.DashBoard where
import Assistant.WebApp.Common
import Assistant.WebApp.Utility
import Assistant.WebApp.RepoList
import Assistant.WebApp.Notifications
import Assistant.TransferQueue
import Assistant.TransferSlots
import Assistant.DaemonStatus
import Utility.NotificationBroadcaster
import Logs.Transfer
@ -31,7 +31,7 @@ import Control.Concurrent
transfersDisplay :: Bool -> Widget
transfersDisplay warnNoScript = do
webapp <- liftH getYesod
current <- liftH $ M.toList <$> getCurrentTransfers
current <- liftAssistant $ M.toList <$> getCurrentTransfers
queued <- take 10 <$> liftAssistant getTransferQueue
autoUpdate ident NotifierTransfersR (10 :: Int) (10 :: Int)
let transfers = simplifyTransfers $ current ++ queued
@ -52,7 +52,7 @@ simplifyTransfers [] = []
simplifyTransfers (x:[]) = [x]
simplifyTransfers (v@(t1, _):r@((t2, _):l))
| equivilantTransfer t1 t2 = simplifyTransfers (v:l)
| otherwise = v : (simplifyTransfers r)
| otherwise = v : simplifyTransfers r
{- Called by client to get a display of currently in process transfers.
-
@ -78,7 +78,7 @@ dashboard warnNoScript = do
$(widgetFile "dashboard/main")
getDashboardR :: Handler Html
getDashboardR = ifM (inFirstRun)
getDashboardR = ifM inFirstRun
( redirect ConfigurationR
, page "" (Just DashBoard) $ dashboard True
)
@ -107,7 +107,7 @@ postFileBrowserR = void openFileBrowser
{- Used by non-javascript browsers, where clicking on the link actually
- opens this page, so we redirect back to the referrer. -}
getFileBrowserR :: Handler ()
getFileBrowserR = whenM openFileBrowser $ redirectBack
getFileBrowserR = whenM openFileBrowser redirectBack
{- Opens the system file browser on the repo, or, as a fallback,
- goes to a file:// url. Returns True if it's ok to redirect away
@ -137,14 +137,17 @@ openFileBrowser = do
{- Transfer controls. The GET is done in noscript mode and redirects back
- to the referring page. The POST is called by javascript. -}
getPauseTransferR :: Transfer -> Handler ()
getPauseTransferR t = pauseTransfer t >> redirectBack
getPauseTransferR = noscript postPauseTransferR
postPauseTransferR :: Transfer -> Handler ()
postPauseTransferR t = pauseTransfer t
postPauseTransferR = liftAssistant . pauseTransfer
getStartTransferR :: Transfer -> Handler ()
getStartTransferR t = startTransfer t >> redirectBack
getStartTransferR = noscript postStartTransferR
postStartTransferR :: Transfer -> Handler ()
postStartTransferR t = startTransfer t
postStartTransferR = liftAssistant . startTransfer
getCancelTransferR :: Transfer -> Handler ()
getCancelTransferR t = cancelTransfer False t >> redirectBack
getCancelTransferR = noscript postCancelTransferR
postCancelTransferR :: Transfer -> Handler ()
postCancelTransferR t = cancelTransfer False t
postCancelTransferR = liftAssistant . cancelTransfer False
noscript :: (Transfer -> Handler ()) -> Transfer -> Handler ()
noscript a t = a t >> redirectBack

View file

@ -38,5 +38,5 @@ getLicenseR = do
$(widgetFile "documentation/license")
getRepoGroupR :: Handler Html
getRepoGroupR = page "About repository groups" (Just About) $ do
getRepoGroupR = page "About repository groups" (Just About) $
$(widgetFile "documentation/repogroup")

View file

@ -12,8 +12,8 @@
module Assistant.WebApp.Form where
import Types.Remote (RemoteConfigKey)
import Assistant.WebApp.Types
import Assistant.Gpg
import Yesod hiding (textField, passwordField)
import Yesod.Form.Fields as F
@ -67,17 +67,13 @@ withExpandableNote :: (Monad m, ToWidget (HandlerSite m) w) => Field m v -> (Str
withExpandableNote :: Field sub master v -> (String, GWidget sub master ()) -> Field sub master v
#endif
withExpandableNote field (toggle, note) = withNote field $ [whamlet|
<a .btn data-toggle="collapse" data-target="##{ident}">
#{toggle}
<a .btn data-toggle="collapse" data-target="##{ident}">#{toggle}</a>
<div ##{ident} .collapse>
^{note}
|]
where
ident = "toggle_" ++ toggle
data EnableEncryption = SharedEncryption | NoEncryption
deriving (Eq)
{- Adds a check box to an AForm to control encryption. -}
#if MIN_VERSION_yesod(1,2,0)
enableEncryptionField :: (RenderMessage site FormMessage) => AForm (HandlerT site IO) EnableEncryption
@ -91,8 +87,3 @@ enableEncryptionField = areq (selectFieldList choices) "Encryption" (Just Shared
[ ("Encrypt all data", SharedEncryption)
, ("Disable encryption", NoEncryption)
]
{- Generates Remote configuration for encryption. -}
configureEncryption :: EnableEncryption -> (RemoteConfigKey, String)
configureEncryption SharedEncryption = ("encryption", "shared")
configureEncryption NoEncryption = ("encryption", "none")

106
Assistant/WebApp/Gpg.hs Normal file
View file

@ -0,0 +1,106 @@
{- git-annex webapp gpg stuff
-
- Copyright 2013 Joey Hess <joey@kitenet.net>
-
- Licensed under the GNU AGPL version 3 or higher.
-}
{-# LANGUAGE QuasiQuotes, TemplateHaskell, OverloadedStrings #-}
module Assistant.WebApp.Gpg where
import Assistant.WebApp.Common
import Assistant.Gpg
import Utility.Gpg
import qualified Git.Command
import qualified Git.Remote
import qualified Git.Construct
import qualified Annex.Branch
import qualified Git.GCrypt
import qualified Remote.GCrypt as GCrypt
import Git.Types (RemoteName)
import Assistant.WebApp.MakeRemote
import Logs.Remote
import qualified Data.Map as M
gpgKeyDisplay :: KeyId -> Maybe UserId -> Widget
gpgKeyDisplay keyid userid = [whamlet|
<span title="key id #{keyid}">
<i .icon-user></i> #
$maybe name <- userid
#{name}
$nothing
key id #{keyid}
|]
genKeyModal :: Widget
genKeyModal = $(widgetFile "configurators/genkeymodal")
isGcryptInstalled :: IO Bool
isGcryptInstalled = inPath "git-remote-gcrypt"
whenGcryptInstalled :: Handler Html -> Handler Html
whenGcryptInstalled a = ifM (liftIO isGcryptInstalled)
( a
, page "Need git-remote-gcrypt" (Just Configuration) $
$(widgetFile "configurators/needgcrypt")
)
withNewSecretKey :: (KeyId -> Handler Html) -> Handler Html
withNewSecretKey use = do
userid <- liftIO newUserId
liftIO $ genSecretKey RSA "" userid maxRecommendedKeySize
results <- M.keys . M.filter (== userid) <$> liftIO secretKeys
case results of
[] -> error "Failed to generate gpg key!"
(key:_) -> use key
{- Tries to find the name used in remote.log for a gcrypt repository
- with a given uuid.
-
- The gcrypt remote may not be on that is listed in the local remote.log
- (or the info may be out of date), so this actually fetches the git-annex
- branch from the gcrypt remote and merges it in, and then looks up
- the name.
-}
getGCryptRemoteName :: UUID -> String -> Annex RemoteName
getGCryptRemoteName u repoloc = do
tmpremote <- uniqueRemoteName "tmpgcryptremote" 0 <$> gitRepo
void $ inRepo $ Git.Command.runBool
[Params "remote add", Param tmpremote, Param $ Git.GCrypt.urlPrefix ++ repoloc]
mname <- ifM (inRepo $ Git.Command.runBool [Param "fetch", Param tmpremote])
( do
void Annex.Branch.forceUpdate
(M.lookup "name" <=< M.lookup u) <$> readRemoteLog
, return Nothing
)
void $ inRepo $ Git.Remote.remove tmpremote
maybe missing return mname
where
missing = error $ "Cannot find configuration for the gcrypt remote at " ++ repoloc
{- Checks to see if a repo is encrypted with gcrypt, and runs one action if
- it's not an another if it is.
-
- Since the probing requires gcrypt to be installed, a third action must
- be provided to run if it's not installed.
-}
checkGCryptRepoEncryption :: (Monad m, LiftAnnex m) => String -> m a -> m a -> m a -> m a
checkGCryptRepoEncryption location notencrypted notinstalled encrypted =
ifM (liftAnnex $ liftIO isGcryptInstalled)
( dispatch =<< liftAnnex (inRepo $ Git.GCrypt.probeRepo location)
, notinstalled
)
where
dispatch Git.GCrypt.Decryptable = encrypted
dispatch Git.GCrypt.NotEncrypted = notencrypted
dispatch Git.GCrypt.NotDecryptable =
error "This git repository is encrypted with a GnuPG key that you do not have."
{- Gets the UUID of the gcrypt repo at a location, which may not exist.
- Only works if the gcrypt repo was created as a git-annex remote. -}
probeGCryptRemoteUUID :: String -> Annex (Maybe UUID)
probeGCryptRemoteUUID repolocation = do
r <- inRepo $ Git.Construct.fromRemoteLocation repolocation
GCrypt.getGCryptUUID False r

View file

@ -0,0 +1,36 @@
{- git-annex assistant webapp making remotes
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
-
- Licensed under the GNU AGPL version 3 or higher.
-}
module Assistant.WebApp.MakeRemote (
module Assistant.MakeRemote,
module Assistant.WebApp.MakeRemote
) where
import Assistant.Common
import Assistant.WebApp.Types
import Assistant.Sync
import qualified Remote
import qualified Config
import Config.Cost
import Types.StandardGroups
import Git.Types (RemoteName)
import Logs.PreferredContent
import Assistant.MakeRemote
import Utility.Yesod
{- Runs an action that creates or enables a cloud remote,
- and finishes setting it up, then starts syncing with it,
- and finishes by displaying the page to edit it. -}
setupCloudRemote :: StandardGroup -> Maybe Cost -> Annex RemoteName -> Handler a
setupCloudRemote defaultgroup mcost maker = do
r <- liftAnnex $ addRemote maker
liftAnnex $ do
setStandardGroup (Remote.uuid r) defaultgroup
maybe noop (Config.setRemoteCost r) mcost
liftAssistant $ syncRemote r
redirect $ EditNewCloudRepositoryR $ Remote.uuid r

View file

@ -80,7 +80,7 @@ getNotifierBuddyListR = notifierUrl BuddyListR getBuddyListBroadcaster
getNotifierRepoListR :: RepoSelector -> Handler RepPlain
getNotifierRepoListR reposelector = notifierUrl route getRepoListBroadcaster
where
route nid = RepoListR $ RepoListNotificationId nid reposelector
route nid = RepoListR nid reposelector
getTransferBroadcaster :: Assistant NotificationBroadcaster
getTransferBroadcaster = transferNotifier <$> getDaemonStatus

View file

@ -56,13 +56,17 @@ getSwitchToRepositoryR repo = do
( return url
, delayed $ waiturl urlfile
)
listening url = catchBoolIO $ fst <$> Url.exists url []
listening url = catchBoolIO $ fst <$> Url.exists url [] Nothing
delayed a = do
threadDelay 100000 -- 1/10th of a second
a
{- Returns once the assistant has daemonized, but possibly before it's
- listening for web connections. -}
startAssistant :: FilePath -> IO ()
startAssistant repo = do
program <- readProgramFile
void $ forkIO $ void $ createProcess $
(proc program ["assistant"]) { cwd = Just repo }
(_, _, _, pid) <-
createProcess $
(proc program ["assistant"]) { cwd = Just repo }
void $ checkSuccessProcess pid

View file

@ -38,15 +38,15 @@ firstRunNavBar :: [NavBarItem]
firstRunNavBar = [Configuration, About]
selectNavBar :: Handler [NavBarItem]
selectNavBar = ifM (inFirstRun) (return firstRunNavBar, return defaultNavBar)
selectNavBar = ifM inFirstRun (return firstRunNavBar, return defaultNavBar)
{- A standard page of the webapp, with a title, a sidebar, and that may
- be highlighted on the navbar. -}
page :: Hamlet.Html -> Maybe NavBarItem -> Widget -> Handler Html
page title navbaritem content = customPage navbaritem $ do
setTitle title
sideBarDisplay
content
sideBarDisplay
{- A custom page, with no title or sidebar set. -}
customPage :: Maybe NavBarItem -> Widget -> Handler Html

View file

@ -0,0 +1,35 @@
{- git-annex assistant repository repair
-
- Copyright 2013 Joey Hess <joey@kitenet.net>
-
- Licensed under the GNU AGPL version 3 or higher.
-}
{-# LANGUAGE QuasiQuotes, TemplateHaskell, OverloadedStrings #-}
module Assistant.WebApp.Repair where
import Assistant.WebApp.Common
import Assistant.WebApp.RepoList
import Remote (prettyUUID, remoteFromUUID)
import Annex.UUID (getUUID)
import Assistant.Repair
getRepairRepositoryR :: UUID -> Handler Html
getRepairRepositoryR = postRepairRepositoryR
postRepairRepositoryR :: UUID -> Handler Html
postRepairRepositoryR u = page "Repair repository" Nothing $ do
repodesc <- liftAnnex $ prettyUUID u
repairingmainrepo <- (==) u <$> liftAnnex getUUID
$(widgetFile "control/repairrepository")
getRepairRepositoryRunR :: UUID -> Handler Html
getRepairRepositoryRunR = postRepairRepositoryRunR
postRepairRepositoryRunR :: UUID -> Handler Html
postRepairRepositoryRunR u = do
r <- liftAnnex $ remoteFromUUID u
void $ liftAssistant $ runRepair u r True
page "Repair repository" Nothing $ do
let repolist = repoListDisplay $
mainRepoSelector { nudgeAddMore = True }
$(widgetFile "control/repairrepository/done")

View file

@ -0,0 +1,40 @@
{- git-annex assistant webapp RepoId type
-
- Copyright 2012,2013 Joey Hess <joey@kitenet.net>
-
- Licensed under the GNU AGPL version 3 or higher.
-}
module Assistant.WebApp.RepoId where
import Common.Annex
import Git.Types (RemoteName)
import qualified Remote
{- Parts of the webapp need to be able to act on repositories that may or
- may not have a UUID. -}
data RepoId
= RepoUUID UUID
| RepoName RemoteName
deriving (Eq, Ord, Show, Read)
mkRepoId :: Remote -> RepoId
mkRepoId r = case Remote.uuid r of
NoUUID -> RepoName (Remote.name r)
u -> RepoUUID u
describeRepoId :: RepoId -> Annex String
describeRepoId (RepoUUID u) = Remote.prettyUUID u
describeRepoId (RepoName n) = return n
repoIdRemote :: RepoId -> Annex (Maybe Remote)
repoIdRemote (RepoUUID u) = Remote.remoteFromUUID u
repoIdRemote (RepoName n) = Remote.byNameOnly n
lacksUUID :: RepoId -> Bool
lacksUUID r = asUUID r == NoUUID
asUUID :: RepoId -> UUID
asUUID (RepoUUID u) = u
asUUID _ = NoUUID

View file

@ -12,8 +12,6 @@ module Assistant.WebApp.RepoList where
import Assistant.WebApp.Common
import Assistant.DaemonStatus
import Assistant.WebApp.Notifications
import Assistant.WebApp.Utility
import Assistant.Ssh
import qualified Annex
import qualified Remote
import qualified Types.Remote as Remote
@ -23,17 +21,22 @@ import Logs.Remote
import Logs.Trust
import Logs.Group
import Config
import Git.Config
import Git.Remote
import Assistant.Sync
import Config.Cost
import Utility.NotificationBroadcaster
import qualified Git
#ifdef WITH_XMPP
#endif
import qualified Data.Map as M
import qualified Data.Set as S
import qualified Data.Text as T
import Data.Function
type RepoList = [(RepoDesc, RepoId, Actions)]
type RepoDesc = String
{- Actions that can be performed on a repo in the list. -}
data Actions
= DisabledRepoActions
{ setupRepoLink :: Route WebApp }
@ -48,21 +51,21 @@ data Actions
| UnwantedRepoActions
{ setupRepoLink :: Route WebApp }
mkSyncingRepoActions :: UUID -> Actions
mkSyncingRepoActions u = SyncingRepoActions
{ setupRepoLink = EditRepositoryR u
, syncToggleLink = DisableSyncR u
mkSyncingRepoActions :: RepoId -> Actions
mkSyncingRepoActions repoid = SyncingRepoActions
{ setupRepoLink = EditRepositoryR repoid
, syncToggleLink = DisableSyncR repoid
}
mkNotSyncingRepoActions :: UUID -> Actions
mkNotSyncingRepoActions u = NotSyncingRepoActions
{ setupRepoLink = EditRepositoryR u
, syncToggleLink = EnableSyncR u
mkNotSyncingRepoActions :: RepoId -> Actions
mkNotSyncingRepoActions repoid = NotSyncingRepoActions
{ setupRepoLink = EditRepositoryR repoid
, syncToggleLink = EnableSyncR repoid
}
mkUnwantedRepoActions :: UUID -> Actions
mkUnwantedRepoActions u = UnwantedRepoActions
{ setupRepoLink = EditRepositoryR u
mkUnwantedRepoActions :: RepoId -> Actions
mkUnwantedRepoActions repoid = UnwantedRepoActions
{ setupRepoLink = EditRepositoryR repoid
}
needsEnabled :: Actions -> Bool
@ -82,8 +85,8 @@ notWanted _ = False
-
- Returns a div, which will be inserted into the calling page.
-}
getRepoListR :: RepoListNotificationId -> Handler Html
getRepoListR (RepoListNotificationId nid reposelector) = do
getRepoListR :: NotificationId -> RepoSelector -> Handler Html
getRepoListR nid reposelector = do
waitNotifier getRepoListBroadcaster nid
p <- widgetToPageContent $ repoListDisplay reposelector
giveUrlRenderer $ [hamlet|^{pageBody p}|]
@ -98,7 +101,7 @@ mainRepoSelector = RepoSelector
{- List of cloud repositories, configured and not. -}
cloudRepoList :: Widget
cloudRepoList = repoListDisplay $ RepoSelector
cloudRepoList = repoListDisplay RepoSelector
{ onlyCloud = True
, onlyConfigured = False
, includeHere = False
@ -120,9 +123,6 @@ repoListDisplay reposelector = do
$(widgetFile "repolist")
where
ident = "repolist"
unfinished uuid = uuid == NoUUID
type RepoList = [(String, UUID, Actions)]
{- A list of known repositories, with actions that can be taken on them. -}
repoList :: RepoSelector -> Handler RepoList
@ -131,43 +131,46 @@ repoList reposelector
| otherwise = list =<< (++) <$> configured <*> unconfigured
where
configured = do
syncing <- S.fromList . map Remote.uuid . syncRemotes
<$> liftAssistant getDaemonStatus
syncremotes <- syncRemotes <$> liftAssistant getDaemonStatus
let syncing = S.fromList $ map mkRepoId syncremotes
liftAnnex $ do
unwanted <- S.fromList
<$> filterM inUnwantedGroup (S.toList syncing)
<$> filterM inUnwantedGroup (map Remote.uuid syncremotes)
rs <- filter selectedrepo . concat . Remote.byCost
<$> Remote.remoteList
let us = map Remote.uuid rs
let maker u
| u `S.member` unwanted = mkUnwantedRepoActions u
| u `S.member` syncing = mkSyncingRepoActions u
| otherwise = mkNotSyncingRepoActions u
let l = zip us $ map (maker . Remote.uuid) rs
let l = flip map (map mkRepoId rs) $ \r -> case r of
(RepoUUID u)
| u `S.member` unwanted -> (r, mkUnwantedRepoActions r)
_
| r `S.member` syncing -> (r, mkSyncingRepoActions r)
| otherwise -> (r, mkNotSyncingRepoActions r)
if includeHere reposelector
then do
u <- getUUID
r <- RepoUUID <$> getUUID
autocommit <- annexAutoCommit <$> Annex.getGitConfig
let hereactions = if autocommit
then mkSyncingRepoActions u
else mkNotSyncingRepoActions u
let here = (u, hereactions)
then mkSyncingRepoActions r
else mkNotSyncingRepoActions r
let here = (r, hereactions)
return $ here : l
else return l
unconfigured = liftAnnex $ do
m <- readRemoteLog
g <- gitRepo
map snd . catMaybes . filter selectedremote
. map (findinfo m)
<$> (trustExclude DeadTrusted $ M.keys m)
. map (findinfo m g)
<$> trustExclude DeadTrusted (M.keys m)
selectedrepo r
| Remote.readonly r = False
| onlyCloud reposelector = Git.repoIsUrl (Remote.repo r) && not (isXMPPRemote r)
| onlyCloud reposelector = Git.repoIsUrl (Remote.repo r)
&& Remote.uuid r /= NoUUID
&& not (isXMPPRemote r)
| otherwise = True
selectedremote Nothing = False
selectedremote (Just (iscloud, _))
| onlyCloud reposelector = iscloud
| otherwise = True
findinfo m u = case M.lookup "type" =<< M.lookup u m of
findinfo m g u = case getconfig "type" of
Just "rsync" -> val True EnableRsyncR
Just "directory" -> val False EnableDirectoryR
#ifdef WITH_S3
@ -177,26 +180,34 @@ repoList reposelector
#ifdef WITH_WEBDAV
Just "webdav" -> val True EnableWebDAVR
#endif
Just "gcrypt" ->
-- Skip gcrypt repos on removable drives;
-- handled separately.
case getconfig "gitrepo" of
Just rr | remoteLocationIsUrl (parseRemoteLocation rr g) ->
val True EnableSshGCryptR
_ -> Nothing
_ -> Nothing
where
val iscloud r = Just (iscloud, (u, DisabledRepoActions $ r u))
list l = liftAnnex $ do
let l' = nubBy (\x y -> fst x == fst y) l
l'' <- zip
<$> Remote.prettyListUUIDs (map fst l')
<*> pure l'
return $ map (\(name, (uuid, actions)) -> (name, uuid, actions)) l''
getconfig k = M.lookup k =<< M.lookup u m
val iscloud r = Just (iscloud, (RepoUUID u, DisabledRepoActions $ r u))
list l = liftAnnex $
forM (nubBy ((==) `on` fst) l) $ \(repoid, actions) ->
(,,)
<$> describeRepoId repoid
<*> pure repoid
<*> pure actions
getEnableSyncR :: UUID -> Handler ()
getEnableSyncR :: RepoId -> Handler ()
getEnableSyncR = flipSync True
getDisableSyncR :: UUID -> Handler ()
getDisableSyncR :: RepoId -> Handler ()
getDisableSyncR = flipSync False
flipSync :: Bool -> UUID -> Handler ()
flipSync enable uuid = do
mremote <- liftAnnex $ Remote.remoteFromUUID uuid
changeSyncable mremote enable
flipSync :: Bool -> RepoId -> Handler ()
flipSync enable repoid = do
mremote <- liftAnnex $ repoIdRemote repoid
liftAssistant $ changeSyncable mremote enable
redirectBack
getRepositoriesReorderR :: Handler ()
@ -227,29 +238,3 @@ reorderCosts remote rs = zip rs'' (insertCostAfter costs i)
costs = map Remote.cost rs'
rs'' = (\(x, y) -> x ++ [remote] ++ y) $ splitAt (i + 1) rs'
{- Checks to see if any repositories with NoUUID have annex-ignore set.
- That could happen if there's a problem contacting a ssh remote
- soon after it was added. -}
getCheckUnfinishedRepositoriesR :: Handler Html
getCheckUnfinishedRepositoriesR = page "Unfinished repositories" (Just Configuration) $ do
stalled <- liftAnnex findStalled
$(widgetFile "configurators/checkunfinished")
findStalled :: Annex [Remote]
findStalled = filter isstalled <$> remoteListRefresh
where
isstalled r = Remote.uuid r == NoUUID
&& remoteAnnexIgnore (Remote.gitconfig r)
getRetryUnfinishedRepositoriesR :: Handler ()
getRetryUnfinishedRepositoriesR = do
liftAssistant $ mapM_ unstall =<< liftAnnex findStalled
redirect DashboardR
where
unstall r = do
liftIO $ fixSshKeyPair
liftAnnex $ setConfig
(remoteConfig (Remote.repo r) "ignore")
(boolConfig False)
syncRemote r
liftAnnex $ void remoteListRefresh

View file

@ -21,7 +21,10 @@ import Utility.NotificationBroadcaster
import Utility.WebApp
import Utility.Yesod
import Logs.Transfer
import Utility.Gpg (KeyId)
import Build.SysConfig (packageversion)
import Types.ScheduledActivity
import Assistant.WebApp.RepoId
import Yesod.Static
import Text.Hamlet
@ -149,9 +152,6 @@ data RepoSelector = RepoSelector
}
deriving (Read, Show, Eq)
data RepoListNotificationId = RepoListNotificationId NotificationId RepoSelector
deriving (Read, Show, Eq)
data RemovableDrive = RemovableDrive
{ diskFree :: Maybe Integer
, mountPoint :: Text
@ -159,16 +159,14 @@ data RemovableDrive = RemovableDrive
}
deriving (Read, Show, Eq, Ord)
{- Only needed to work around old-yesod bug that emits a warning message
- when a route has two parameters. -}
data FilePathAndUUID = FilePathAndUUID FilePath UUID
deriving (Read, Show, Eq)
data RepoKey = RepoKey KeyId | NoRepoKey
deriving (Read, Show, Eq, Ord)
instance PathPiece FilePathAndUUID where
instance PathPiece RemovableDrive where
toPathPiece = pack . show
fromPathPiece = readish . unpack
instance PathPiece RemovableDrive where
instance PathPiece RepoKey where
toPathPiece = pack . show
fromPathPiece = readish . unpack
@ -208,10 +206,6 @@ instance PathPiece PairKey where
toPathPiece = pack . show
fromPathPiece = readish . unpack
instance PathPiece RepoListNotificationId where
toPathPiece = pack . show
fromPathPiece = readish . unpack
instance PathPiece RepoSelector where
toPathPiece = pack . show
fromPathPiece = readish . unpack
@ -219,3 +213,11 @@ instance PathPiece RepoSelector where
instance PathPiece ThreadName where
toPathPiece = pack . show
fromPathPiece = readish . unpack
instance PathPiece ScheduledActivity where
toPathPiece = pack . show
fromPathPiece = readish . unpack
instance PathPiece RepoId where
toPathPiece = pack . show
fromPathPiece = readish . unpack

View file

@ -1,120 +0,0 @@
{- git-annex assistant webapp utilities
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
-
- Licensed under the GNU AGPL version 3 or higher.
-}
module Assistant.WebApp.Utility where
import Assistant.Common
import Assistant.WebApp.Types
import Assistant.DaemonStatus
import Assistant.TransferQueue
import Assistant.Types.TransferSlots
import Assistant.TransferSlots
import Assistant.Sync
import qualified Remote
import qualified Types.Remote as Remote
import qualified Remote.List as Remote
import qualified Assistant.Threads.Transferrer as Transferrer
import Logs.Transfer
import qualified Config
import Config.Files
import Git.Config
import Assistant.Threads.Watcher
import Assistant.NamedThread
import qualified Data.Map as M
import Control.Concurrent
import System.Posix.Signals (signalProcessGroup, sigTERM, sigKILL)
import System.Posix.Process (getProcessGroupIDOf)
{- Use Nothing to change autocommit setting; or a remote to change
- its sync setting. -}
changeSyncable :: (Maybe Remote) -> Bool -> Handler ()
changeSyncable Nothing enable = do
liftAnnex $ Config.setConfig key (boolConfig enable)
liftIO . maybe noop (`throwTo` signal)
=<< liftAssistant (namedThreadId watchThread)
where
key = Config.annexConfig "autocommit"
signal
| enable = ResumeWatcher
| otherwise = PauseWatcher
changeSyncable (Just r) True = do
changeSyncFlag r True
liftAssistant $ syncRemote r
changeSyncable (Just r) False = do
changeSyncFlag r False
liftAssistant $ updateSyncRemotes
{- Stop all transfers to or from this remote.
- XXX Can't stop any ongoing scan, or git syncs. -}
void $ liftAssistant $ dequeueTransfers tofrom
mapM_ (cancelTransfer False) =<<
filter tofrom . M.keys <$>
liftAssistant (currentTransfers <$> getDaemonStatus)
where
tofrom t = transferUUID t == Remote.uuid r
changeSyncFlag :: Remote -> Bool -> Handler ()
changeSyncFlag r enabled = liftAnnex $ do
Config.setConfig key (boolConfig enabled)
void $ Remote.remoteListRefresh
where
key = Config.remoteConfig (Remote.repo r) "sync"
pauseTransfer :: Transfer -> Handler ()
pauseTransfer = cancelTransfer True
cancelTransfer :: Bool -> Transfer -> Handler ()
cancelTransfer pause t = do
m <- getCurrentTransfers
unless pause $
{- remove queued transfer -}
void $ liftAssistant $ dequeueTransfers $ equivilantTransfer t
{- stop running transfer -}
maybe noop stop (M.lookup t m)
where
stop info = liftAssistant $ do
{- When there's a thread associated with the
- transfer, it's signaled first, to avoid it
- displaying any alert about the transfer having
- failed when the transfer process is killed. -}
liftIO $ maybe noop signalthread $ transferTid info
liftIO $ maybe noop killproc $ transferPid info
if pause
then void $ alterTransferInfo t $
\i -> i { transferPaused = True }
else void $ removeTransfer t
signalthread tid
| pause = throwTo tid PauseTransfer
| otherwise = killThread tid
{- In order to stop helper processes like rsync,
- kill the whole process group of the process running the transfer. -}
killproc pid = void $ tryIO $ do
g <- getProcessGroupIDOf pid
void $ tryIO $ signalProcessGroup sigTERM g
threadDelay 50000 -- 0.05 second grace period
void $ tryIO $ signalProcessGroup sigKILL g
startTransfer :: Transfer -> Handler ()
startTransfer t = do
m <- getCurrentTransfers
maybe startqueued go (M.lookup t m)
where
go info = maybe (start info) resume $ transferTid info
startqueued = do
is <- liftAssistant $ map snd <$> getMatchingTransfers (== t)
maybe noop start $ headMaybe is
resume tid = do
liftAssistant $ alterTransferInfo t $
\i -> i { transferPaused = False }
liftIO $ throwTo tid ResumeTransfer
start info = liftAssistant $ do
program <- liftIO readProgramFile
inImmediateTransferSlot program $
Transferrer.genTransfer t info
getCurrentTransfers :: Handler TransferMap
getCurrentTransfers = currentTransfers <$> liftAssistant getDaemonStatus

View file

@ -19,6 +19,8 @@
/config/xmpp/for/self XMPPConfigForPairSelfR GET POST
/config/xmpp/for/frield XMPPConfigForPairFriendR GET POST
/config/xmpp/needcloudrepo/#UUID NeedCloudRepoR GET
/config/fsck ConfigFsckR GET POST
/config/fsck/preferences ConfigFsckPreferencesR POST
/config/addrepository AddRepositoryR GET
/config/repository/new NewRepositoryR GET POST
@ -26,24 +28,28 @@
/config/repository/new/androidcamera AndroidCameraRepositoryR GET
/config/repository/switcher RepositorySwitcherR GET
/config/repository/switchto/#FilePath SwitchToRepositoryR GET
/config/repository/combine/#FilePathAndUUID CombineRepositoryR GET
/config/repository/edit/#UUID EditRepositoryR GET POST
/config/repository/combine/#FilePath/#UUID CombineRepositoryR GET
/config/repository/edit/#RepoId EditRepositoryR GET POST
/config/repository/edit/new/#UUID EditNewRepositoryR GET POST
/config/repository/edit/new/cloud/#UUID EditNewCloudRepositoryR GET POST
/config/repository/sync/disable/#UUID DisableSyncR GET
/config/repository/sync/enable/#UUID EnableSyncR GET
/config/repository/unfinished/check CheckUnfinishedRepositoriesR GET
/config/repository/unfinished/retry RetryUnfinishedRepositoriesR GET
/config/repository/sync/disable/#RepoId DisableSyncR GET
/config/repository/sync/enable/#RepoId EnableSyncR GET
/config/repository/upgrade/#RepoId UpgradeRepositoryR GET
/config/repository/add/drive AddDriveR GET POST
/config/repository/add/drive/confirm/#RemovableDrive ConfirmAddDriveR GET
/config/repository/add/drive/finish/#RemovableDrive FinishAddDriveR GET
/config/repository/add/drive/genkey/#RemovableDrive GenKeyForDriveR GET
/config/repository/add/drive/finish/#RemovableDrive/#RepoKey FinishAddDriveR GET
/config/repository/add/ssh AddSshR GET POST
/config/repository/add/ssh/confirm/#SshData ConfirmSshR GET
/config/repository/add/ssh/confirm/#SshData/#UUID ConfirmSshR GET
/config/repository/add/ssh/retry/#SshData RetrySshR GET
/config/repository/add/ssh/make/git/#SshData MakeSshGitR GET
/config/repository/add/ssh/make/rsync/#SshData MakeSshRsyncR GET
/config/repository/add/ssh/make/gcrypt/#SshData/#RepoKey MakeSshGCryptR GET
/config/repository/add/ssh/combine/#SshData CombineSshR GET
/config/repository/add/cloud/rsync.net AddRsyncNetR GET POST
/config/repository/add/cloud/rsync.net/shared/#SshData MakeRsyncNetSharedR GET
/config/repository/add/cloud/rsync.net/gcrypt/#SshData/#RepoKey MakeRsyncNetGCryptR GET
/config/repository/add/cloud/S3 AddS3R GET POST
/config/repository/add/cloud/IA AddIAR GET POST
/config/repository/add/cloud/glacier AddGlacierR GET POST
@ -62,6 +68,7 @@
/config/repository/pair/xmpp/friend/finish/#PairKey FinishXMPPPairFriendR GET
/config/repository/enable/rsync/#UUID EnableRsyncR GET POST
/config/repository/enable/gcrypt/#UUID EnableSshGCryptR GET POST
/config/repository/enable/directory/#UUID EnableDirectoryR GET
/config/repository/enable/S3/#UUID EnableS3R GET POST
/config/repository/enable/IA/#UUID EnableIAR GET POST
@ -77,6 +84,10 @@
/config/repository/delete/finish/#UUID FinishDeleteRepositoryR GET
/config/repository/delete/here DeleteCurrentRepositoryR GET POST
/config/activity/add/#UUID AddActivityR GET POST
/config/activity/change/#UUID/#ScheduledActivity ChangeActivityR GET POST
/config/activity/remove/#UUID/#ScheduledActivity RemoveActivityR GET
/transfers/#NotificationId TransfersR GET
/notifier/transfers NotifierTransfersR GET
@ -86,7 +97,7 @@
/buddylist/#NotificationId BuddyListR GET
/notifier/buddylist NotifierBuddyListR GET
/repolist/#RepoListNotificationId RepoListR GET
/repolist/#NotificationId/#RepoSelector RepoListR GET
/notifier/repolist/#RepoSelector NotifierRepoListR GET
/alert/close/#AlertId CloseAlert GET
@ -97,4 +108,7 @@
/transfer/start/#Transfer StartTransferR GET POST
/transfer/cancel/#Transfer CancelTransferR GET POST
/repair/#UUID RepairRepositoryR GET POST
/repair/run/#UUID RepairRepositoryRunR GET POST
/static StaticR Static getStatic

View file

@ -21,7 +21,7 @@ import qualified Data.Map as M
import Data.ByteString (ByteString)
import qualified Data.ByteString as B
import Data.XML.Types
import qualified Codec.Binary.Base64 as B64
import qualified "dataenc" Codec.Binary.Base64 as B64
{- Name of the git-annex tag, in our own XML namespace.
- (Not using a namespace URL to avoid unnecessary bloat.) -}

View file

@ -27,12 +27,12 @@ import qualified Types.Backend as B
import Config
-- When adding a new backend, import it here and add it to the list.
import qualified Backend.SHA
import qualified Backend.Hash
import qualified Backend.WORM
import qualified Backend.URL
list :: [Backend]
list = Backend.SHA.backends ++ Backend.WORM.backends ++ Backend.URL.backends
list = Backend.Hash.backends ++ Backend.WORM.backends ++ Backend.URL.backends
{- List of backends in the order to try them when storing a new key. -}
orderedList :: Annex [Backend]

168
Backend/Hash.hs Normal file
View file

@ -0,0 +1,168 @@
{- git-annex hashing backends
-
- Copyright 2011-2013 Joey Hess <joey@kitenet.net>
-
- Licensed under the GNU GPL version 3 or higher.
-}
{-# LANGUAGE CPP #-}
module Backend.Hash (backends) where
import Common.Annex
import qualified Annex
import Types.Backend
import Types.Key
import Types.KeySource
import Utility.Hash
import Utility.ExternalSHA
import qualified Build.SysConfig as SysConfig
import qualified Data.ByteString.Lazy as L
import Data.Char
data Hash = SHAHash HashSize | SkeinHash HashSize
type HashSize = Int
{- Order is slightly significant; want SHA256 first, and more general
- sizes earlier. -}
hashes :: [Hash]
hashes = concat
[ map SHAHash [256, 1, 512, 224, 384]
#ifdef WITH_CRYPTOHASH
, map SkeinHash [256, 512]
#endif
]
{- The SHA256E backend is the default, so genBackendE comes first. -}
backends :: [Backend]
backends = catMaybes $ map genBackendE hashes ++ map genBackend hashes
genBackend :: Hash -> Maybe Backend
genBackend hash = Just Backend
{ name = hashName hash
, getKey = keyValue hash
, fsckKey = Just $ checkKeyChecksum hash
, canUpgradeKey = Just needsUpgrade
}
genBackendE :: Hash -> Maybe Backend
genBackendE hash = do
b <- genBackend hash
return $ b
{ name = hashNameE hash
, getKey = keyValueE hash
}
hashName :: Hash -> String
hashName (SHAHash size) = "SHA" ++ show size
hashName (SkeinHash size) = "SKEIN" ++ show size
hashNameE :: Hash -> String
hashNameE hash = hashName hash ++ "E"
{- A key is a hash of its contents. -}
keyValue :: Hash -> KeySource -> Annex (Maybe Key)
keyValue hash source = do
let file = contentLocation source
stat <- liftIO $ getFileStatus file
let filesize = fromIntegral $ fileSize stat
s <- hashFile hash file filesize
return $ Just $ stubKey
{ keyName = s
, keyBackendName = hashName hash
, keySize = Just filesize
}
{- Extension preserving keys. -}
keyValueE :: Hash -> KeySource -> Annex (Maybe Key)
keyValueE hash source = keyValue hash source >>= maybe (return Nothing) addE
where
addE k = return $ Just $ k
{ keyName = keyName k ++ selectExtension (keyFilename source)
, keyBackendName = hashNameE hash
}
selectExtension :: FilePath -> String
selectExtension f
| null es = ""
| otherwise = intercalate "." ("":es)
where
es = filter (not . null) $ reverse $
take 2 $ takeWhile shortenough $
reverse $ split "." $ filter validExtension $ takeExtensions f
shortenough e = length e <= 4 -- long enough for "jpeg"
{- A key's checksum is checked during fsck. -}
checkKeyChecksum :: Hash -> Key -> FilePath -> Annex Bool
checkKeyChecksum hash key file = do
fast <- Annex.getState Annex.fast
mstat <- liftIO $ catchMaybeIO $ getFileStatus file
case (mstat, fast) of
(Just stat, False) -> do
let filesize = fromIntegral $ fileSize stat
check <$> hashFile hash file filesize
_ -> return True
where
expected = keyHash key
check s
| s == expected = True
{- A bug caused checksums to be prefixed with \ in some
- cases; still accept these as legal now that the bug has been
- fixed. -}
| '\\' : s == expected = True
| otherwise = False
keyHash :: Key -> String
keyHash key = dropExtensions (keyName key)
validExtension :: Char -> Bool
validExtension c
| isAlphaNum c = True
| c == '.' = True
| otherwise = False
{- Upgrade keys that have the \ prefix on their sha due to a bug, or
- that contain non-alphanumeric characters in their extension. -}
needsUpgrade :: Key -> Bool
needsUpgrade key = "\\" `isPrefixOf` keyHash key ||
any (not . validExtension) (takeExtensions $ keyName key)
hashFile :: Hash -> FilePath -> Integer -> Annex String
hashFile hash file filesize = do
showAction "checksum"
liftIO $ go hash
where
go (SHAHash hashsize) = case shaHasher hashsize filesize of
Left sha -> sha <$> L.readFile file
Right command ->
either error return
=<< externalSHA command hashsize file
go (SkeinHash hashsize) = skeinHasher hashsize <$> L.readFile file
shaHasher :: HashSize -> Integer -> Either (L.ByteString -> String) String
shaHasher hashsize filesize
| hashsize == 1 = use SysConfig.sha1 sha1
| hashsize == 256 = use SysConfig.sha256 sha256
| hashsize == 224 = use SysConfig.sha224 sha224
| hashsize == 384 = use SysConfig.sha384 sha384
| hashsize == 512 = use SysConfig.sha512 sha512
| otherwise = error $ "unsupported sha size " ++ show hashsize
where
use Nothing hasher = Left $ show . hasher
use (Just c) hasher
{- Use builtin, but slightly slower hashing for
- smallish files. Cryptohash benchmarks 90 to 101%
- faster than external hashers, depending on the hash
- and system. So there is no point forking an external
- process unless the file is large. -}
| filesize < 1048576 = use Nothing hasher
| otherwise = Right c
skeinHasher :: HashSize -> (L.ByteString -> String)
skeinHasher hashsize
#ifdef WITH_CRYPTOHASH
| hashsize == 256 = show . skein256
| hashsize == 512 = show . skein512
#endif
| otherwise = error $ "unsupported skein size " ++ show hashsize

View file

@ -1,146 +0,0 @@
{- git-annex SHA backends
-
- Copyright 2011,2012 Joey Hess <joey@kitenet.net>
-
- Licensed under the GNU GPL version 3 or higher.
-}
module Backend.SHA (backends) where
import Common.Annex
import qualified Annex
import Types.Backend
import Types.Key
import Types.KeySource
import Utility.ExternalSHA
import qualified Build.SysConfig as SysConfig
import Data.Digest.Pure.SHA
import qualified Data.ByteString.Lazy as L
import Data.Char
type SHASize = Int
{- Order is slightly significant; want SHA256 first, and more general
- sizes earlier. -}
sizes :: [Int]
sizes = [256, 1, 512, 224, 384]
{- The SHA256E backend is the default. -}
backends :: [Backend]
backends = catMaybes $ map genBackendE sizes ++ map genBackend sizes
genBackend :: SHASize -> Maybe Backend
genBackend size = Just $ Backend
{ name = shaName size
, getKey = keyValue size
, fsckKey = Just $ checkKeyChecksum size
, canUpgradeKey = Just $ needsUpgrade
}
genBackendE :: SHASize -> Maybe Backend
genBackendE size = do
b <- genBackend size
return $ b
{ name = shaNameE size
, getKey = keyValueE size
}
shaName :: SHASize -> String
shaName size = "SHA" ++ show size
shaNameE :: SHASize -> String
shaNameE size = shaName size ++ "E"
shaN :: SHASize -> FilePath -> Integer -> Annex String
shaN shasize file filesize = do
showAction "checksum"
liftIO $ case shaCommand shasize filesize of
Left sha -> sha <$> L.readFile file
Right command ->
either error return
=<< externalSHA command shasize file
shaCommand :: SHASize -> Integer -> Either (L.ByteString -> String) String
shaCommand shasize filesize
| shasize == 1 = use SysConfig.sha1 sha1
| shasize == 256 = use SysConfig.sha256 sha256
| shasize == 224 = use SysConfig.sha224 sha224
| shasize == 384 = use SysConfig.sha384 sha384
| shasize == 512 = use SysConfig.sha512 sha512
| otherwise = error $ "bad sha size " ++ show shasize
where
use Nothing sha = Left $ showDigest . sha
use (Just c) sha
{- use builtin, but slower sha for small files
- benchmarking indicates it's faster up to
- and slightly beyond 50 kb files -}
| filesize < 51200 = use Nothing sha
| otherwise = Right c
{- A key is a checksum of its contents. -}
keyValue :: SHASize -> KeySource -> Annex (Maybe Key)
keyValue shasize source = do
let file = contentLocation source
stat <- liftIO $ getFileStatus file
let filesize = fromIntegral $ fileSize stat
s <- shaN shasize file filesize
return $ Just $ stubKey
{ keyName = s
, keyBackendName = shaName shasize
, keySize = Just filesize
}
{- Extension preserving keys. -}
keyValueE :: SHASize -> KeySource -> Annex (Maybe Key)
keyValueE size source = keyValue size source >>= maybe (return Nothing) addE
where
addE k = return $ Just $ k
{ keyName = keyName k ++ selectExtension (keyFilename source)
, keyBackendName = shaNameE size
}
selectExtension :: FilePath -> String
selectExtension f
| null es = ""
| otherwise = intercalate "." ("":es)
where
es = filter (not . null) $ reverse $
take 2 $ takeWhile shortenough $
reverse $ split "." $ filter validExtension $ takeExtensions f
shortenough e = length e <= 4 -- long enough for "jpeg"
{- A key's checksum is checked during fsck. -}
checkKeyChecksum :: SHASize -> Key -> FilePath -> Annex Bool
checkKeyChecksum size key file = do
fast <- Annex.getState Annex.fast
mstat <- liftIO $ catchMaybeIO $ getFileStatus file
case (mstat, fast) of
(Just stat, False) -> do
let filesize = fromIntegral $ fileSize stat
check <$> shaN size file filesize
_ -> return True
where
sha = keySha key
check s
| s == sha = True
{- A bug caused checksums to be prefixed with \ in some
- cases; still accept these as legal now that the bug has been
- fixed. -}
| '\\' : s == sha = True
| otherwise = False
keySha :: Key -> String
keySha key = dropExtensions (keyName key)
validExtension :: Char -> Bool
validExtension c
| isAlphaNum c = True
| c == '.' = True
| otherwise = False
{- Upgrade keys that have the \ prefix on their sha due to a bug, or
- that contain non-alphanumeric characters in their extension. -}
needsUpgrade :: Key -> Bool
needsUpgrade key = "\\" `isPrefixOf` keySha key ||
any (not . validExtension) (takeExtensions $ keyName key)

View file

@ -10,11 +10,10 @@ module Backend.URL (
fromUrl
) where
import Data.Hash.MD5
import Common.Annex
import Types.Backend
import Types.Key
import Backend.Utilities
backends :: [Backend]
backends = [backend]
@ -27,18 +26,12 @@ backend = Backend
, canUpgradeKey = Nothing
}
{- When it's not too long, use the full url as the key name.
- If the url is too long, it's truncated at half the filename length
- limit, and the md5 of the url is prepended to ensure a unique key. -}
{- Every unique url has a corresponding key. -}
fromUrl :: String -> Maybe Integer -> Annex Key
fromUrl url size = do
limit <- liftIO . fileNameLengthLimit =<< fromRepo gitAnnexDir
let truncurl = truncateFilePath (limit `div` 2) url
let key = if url == truncurl
then url
else truncurl ++ "-" ++ md5s (Str url)
n <- genKeyName url
return $ stubKey
{ keyName = key
{ keyName = n
, keyBackendName = "URL"
, keySize = size
}
}

25
Backend/Utilities.hs Normal file
View file

@ -0,0 +1,25 @@
{- git-annex backend utilities
-
- Copyright 2012 Joey Hess <joey@kitenet.net>
-
- Licensed under the GNU GPL version 3 or higher.
-}
module Backend.Utilities where
import Data.Hash.MD5
import Common.Annex
{- Generates a keyName from an input string. Takes care of sanitizing it.
- If it's not too long, the full string is used as the keyName.
- Otherwise, it's truncated at half the filename length limit, and its
- md5 is prepended to ensure a unique key. -}
genKeyName :: String -> Annex String
genKeyName s = do
limit <- liftIO . fileNameLengthLimit =<< fromRepo gitAnnexDir
let s' = preSanitizeKeyName s
let truncs = truncateFilePath (limit `div` 2) s'
return $ if s' == truncs
then s'
else truncs ++ "-" ++ md5s (Str s)

View file

@ -11,6 +11,7 @@ import Common.Annex
import Types.Backend
import Types.Key
import Types.KeySource
import Backend.Utilities
backends :: [Backend]
backends = [backend]
@ -33,9 +34,10 @@ backend = Backend
keyValue :: KeySource -> Annex (Maybe Key)
keyValue source = do
stat <- liftIO $ getFileStatus $ contentLocation source
return $ Just Key {
keyName = takeFileName $ keyFilename source,
keyBackendName = name backend,
keySize = Just $ fromIntegral $ fileSize stat,
keyMtime = Just $ modificationTime stat
}
n <- genKeyName $ keyFilename source
return $ Just Key
{ keyName = n
, keyBackendName = name backend
, keySize = Just $ fromIntegral $ fileSize stat
, keyMtime = Just $ modificationTime stat
}

View file

@ -24,9 +24,13 @@ bundledPrograms = catMaybes
, Just "git"
#endif
, Just "cp"
#ifndef mingw32_HOST_OS
-- using xargs on windows led to problems, so it's not used there
, Just "xargs"
#endif
, Just "rsync"
, Just "ssh"
, Just "ssh-keygen"
#ifndef mingw32_HOST_OS
, Just "sh"
#endif
@ -35,13 +39,14 @@ bundledPrograms = catMaybes
, ifset SysConfig.wget "wget"
, ifset SysConfig.bup "bup"
, SysConfig.lsof
, SysConfig.gcrypt
, SysConfig.sha1
, SysConfig.sha256
, SysConfig.sha512
, SysConfig.sha224
, SysConfig.sha384
-- ionice is not included in the bundle; we rely on the system's
-- own version, which may better match its kernel
-- nice and ionice are not included in the bundle; we rely on the
-- system's own version, which may better match its kernel
]
where
ifset True s = Just s

View file

@ -13,9 +13,9 @@ import Control.Monad.IfElse
import Data.Char
import Build.TestConfig
import Build.Version
import Utility.SafeCommand
import Utility.Monad
import Utility.Exception
import Utility.ExternalSHA
import qualified Git.Version
@ -32,11 +32,14 @@ tests =
, TestCase "curl" $ testCmd "curl" "curl --version >/dev/null"
, TestCase "wget" $ testCmd "wget" "wget --version >/dev/null"
, TestCase "bup" $ testCmd "bup" "bup --version >/dev/null"
, TestCase "quvi" $ testCmd "quvi" "quvi --version >/dev/null"
, TestCase "nice" $ testCmd "nice" "nice true >/dev/null"
, TestCase "ionice" $ testCmd "ionice" "ionice -c3 true >/dev/null"
, TestCase "gpg" $ maybeSelectCmd "gpg"
[ ("gpg", "--version >/dev/null")
, ("gpg2", "--version >/dev/null") ]
, TestCase "lsof" $ findCmdPath "lsof" "lsof"
, TestCase "git-remote-gcrypt" $ findCmdPath "gcrypt" "git-remote-gcrypt"
, TestCase "ssh connection caching" getSshConnectionCaching
] ++ shaTestCases
[ (1, "da39a3ee5e6b4b0d3255bfef95601890afd80709")
@ -87,40 +90,6 @@ testCp k option = TestCase cmd $ testCmd k cmdline
cmd = "cp " ++ option
cmdline = cmd ++ " " ++ testFile ++ " " ++ testFile ++ ".new"
isReleaseBuild :: IO Bool
isReleaseBuild = isJust <$> catchMaybeIO (getEnv "RELEASE_BUILD")
{- Version is usually based on the major version from the changelog,
- plus the date of the last commit, plus the git rev of that commit.
- This works for autobuilds, ad-hoc builds, etc.
-
- If git or a git repo is not available, or something goes wrong,
- or this is a release build, just use the version from the changelog. -}
getVersion :: Test
getVersion = do
changelogversion <- getChangelogVersion
version <- ifM (isReleaseBuild)
( return changelogversion
, catchDefaultIO changelogversion $ do
let major = takeWhile (/= '.') changelogversion
autoversion <- readProcess "sh"
[ "-c"
, "git log -n 1 --format=format:'%ci %h'| sed -e 's/-//g' -e 's/ .* /-g/'"
] ""
if null autoversion
then return changelogversion
else return $ concat [ major, ".", autoversion ]
)
return $ Config "packageversion" (StringConfig version)
getChangelogVersion :: IO String
getChangelogVersion = do
changelog <- readFile "debian/changelog"
let verline = takeWhile (/= '\n') changelog
return $ middle (words verline !! 1)
where
middle = drop 1 . init
getGitVersion :: Test
getGitVersion = Config "gitversion" . StringConfig . show
<$> Git.Version.installed
@ -129,25 +98,6 @@ getSshConnectionCaching :: Test
getSshConnectionCaching = Config "sshconnectioncaching" . BoolConfig <$>
boolSystem "sh" [Param "-c", Param "ssh -o ControlPersist=yes -V >/dev/null 2>/dev/null"]
{- Set up cabal file with version. -}
cabalSetup :: IO ()
cabalSetup = do
version <- takeWhile (\c -> isDigit c || c == '.')
<$> getChangelogVersion
cabal <- readFile cabalfile
writeFile tmpcabalfile $ unlines $
map (setfield "Version" version) $
lines cabal
renameFile tmpcabalfile cabalfile
where
cabalfile = "git-annex.cabal"
tmpcabalfile = cabalfile++".tmp"
setfield field value s
| fullfield `isPrefixOf` s = fullfield ++ value
| otherwise = s
where
fullfield = field ++ ": "
setup :: IO ()
setup = do
createDirectoryIfMissing True tmpDir
@ -165,8 +115,8 @@ run ts = do
then writeSysConfig $ androidConfig config
else writeSysConfig config
cleanup
whenM (isReleaseBuild) $
cabalSetup
whenM isReleaseBuild $
cabalSetup "git-annex.cabal"
{- Hard codes some settings to cross-compile for Android. -}
androidConfig :: [Config] -> [Config]

View file

@ -294,6 +294,8 @@ expandExpressionSplice s lls = concat [before, spliced:padding, end]
{- Tweaks code output by GHC in splices to actually build. Yipes. -}
mangleCode :: String -> String
mangleCode = flip_colon
. remove_unnecessary_type_signatures
. lambdaparenhack
. lambdaparens
. declaration_parens
. case_layout
@ -331,6 +333,12 @@ mangleCode = flip_colon
preindent <- many1 $ oneOf " \n"
string "\\ "
lambdaparams <- restofline
continuedlambdaparams <- many $ try $ do
indent <- many1 $ char ' '
p <- satisfy isLetter
aram <- many $ satisfy isAlphaNum <|> oneOf "_"
newline
return $ indent ++ p:aram ++ "\n"
indent <- many1 $ char ' '
string "-> "
firstline <- restofline
@ -342,10 +350,46 @@ mangleCode = flip_colon
return $ concat
[ prefix:preindent
, "(\\ " ++ lambdaparams ++ "\n"
, concat continuedlambdaparams
, indent ++ "-> "
, lambdaparens $ intercalate "\n" (firstline:lambdalines)
, ")\n"
]
{- Hack to add missing parens in a specific case in yesod
- static route code.
-
- StaticR
- yesod_dispatch_env_a4iDV
- (\ p_a4iE2 r_a4iE3
- -> r_a4iE3 {Network.Wai.pathInfo = p_a4iE2}
- xrest_a4iDT req_a4iDW)) }
-
- Need to add another paren around the lambda, and close it
- before its parameters. lambdaparens misses this one because
- there is already one paren present.
-
- FIXME: This is a hack. lambdaparens could just always add a
- layer of parens even when a lambda seems to be in parent.
-}
lambdaparenhack = parsecAndReplace $ do
indent <- many1 $ char ' '
staticr <- string "StaticR"
newline
string indent
yesod_dispatch_env <- restofline
string indent
lambdaprefix <- string "(\\ "
l1 <- restofline
string indent
lambdaarrow <- string " ->"
l2 <- restofline
return $ unlines
[ indent ++ staticr
, indent ++ yesod_dispatch_env
, indent ++ "(" ++ lambdaprefix ++ l1
, indent ++ lambdaarrow ++ l2 ++ ")"
]
restofline = manyTill (noneOf "\n") newline
@ -439,6 +483,19 @@ mangleCode = flip_colon
- declarations. -}
declaration_parens = replace "StaticR Route Static" "StaticR (Route Static)"
{- A type signature is sometimes given for an entire lambda,
- which is not properly parenthesized or laid out. This is a
- hack to remove one specific case where this happens and the
- signature is easily inferred, so is just removed.
-}
remove_unnecessary_type_signatures = parsecAndReplace $ do
string " ::"
newline
many1 $ char ' '
string "Text.Css.Block Text.Css.Resolved"
newline
return ""
{- GHC may add full package and version qualifications for
- symbols from unimported modules. We don't want these.
-
@ -527,7 +584,7 @@ text_builder_hack = replace "Data.Text.Lazy.Builder.Internal.fromText" "Data.Tex
parsecAndReplace :: Parser String -> String -> String
parsecAndReplace p s = case parse find "" s of
Left e -> s
Right l -> concatMap (either (\c -> [c]) id) l
Right l -> concatMap (either return id) l
where
find :: Parser [Either Char String]
find = many $ try (Right <$> p) <|> (Left <$> anyChar)

View file

@ -144,7 +144,7 @@ getLibName lib libmap = case M.lookup lib libmap of
Just n -> (n, libmap)
Nothing -> (nextfreename, M.insert lib nextfreename libmap)
where
names = map (\c -> [c]) ['A' .. 'Z'] ++
names = map pure ['A' .. 'Z'] ++
[[n, l] | n <- ['0' .. '9'], l <- ['A' .. 'Z']]
used = S.fromList $ M.elems libmap
nextfreename = fromMaybe (error "ran out of short library names!") $

Some files were not shown because too many files have changed in this diff Show more