Compare commits
No commits in common. "ci" and "uuid-type-rework" have entirely different histories.
ci
...
uuid-type-
3107 changed files with 115418 additions and 153 deletions
|
@ -1,18 +0,0 @@
|
|||
Support ghc-9.8 by widening a lot of constraints.
|
||||
|
||||
This patch can be removed once upstream supports ghc 9.8 offically.
|
||||
|
||||
diff -uprN git-annex-10.20240227.orig/cabal.project git-annex-10.20240227/cabal.project
|
||||
--- git-annex-10.20240227.orig/cabal.project 1970-01-01 01:00:00.000000000 +0100
|
||||
+++ git-annex-10.20240227/cabal.project 2024-04-28 13:30:14.061706299 +0200
|
||||
@@ -0,0 +1,10 @@
|
||||
+packages: *.cabal
|
||||
+
|
||||
+allow-newer: dav
|
||||
+allow-newer: haskeline:filepath
|
||||
+allow-newer: haskeline:directory
|
||||
+allow-newer: xml-hamlet
|
||||
+allow-newer: aws:filepath
|
||||
+allow-newer: dbus:network
|
||||
+allow-newer: dbus:filepath
|
||||
+allow-newer: microstache:filepath
|
|
@ -1,85 +0,0 @@
|
|||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
ref_name:
|
||||
description: 'Tag or commit'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
jobs:
|
||||
cabal-config-edge:
|
||||
name: Generate cabal config for edge
|
||||
runs-on: x86_64
|
||||
container:
|
||||
image: alpine:edge
|
||||
env:
|
||||
CI_ALPINE_TARGET_RELEASE: edge
|
||||
steps:
|
||||
- name: Environment setup
|
||||
run: apk add nodejs git cabal patch
|
||||
- name: Repo pull
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
ref: ${{ inputs.ref_name }}
|
||||
- name: Config generation
|
||||
run: |
|
||||
patch -p1 -i .forgejo/patches/ghc-9.8.patch
|
||||
HOME="${{ github.workspace}}"/cabal_cache cabal update
|
||||
HOME="${{ github.workspace}}"/cabal_cache cabal v2-freeze --shadow-installed-packages --strong-flags --flags="+assistant +webapp +pairing +production +torrentparser +magicmime +benchmark -debuglocks +dbus +networkbsd +gitlfs +httpclientrestricted"
|
||||
mv cabal.project.freeze git-annex.config
|
||||
- name: Package upload
|
||||
uses: forgejo/upload-artifact@v3
|
||||
with:
|
||||
name: cabalconfigedge
|
||||
path: git-annex*.config
|
||||
cabal-config-v320:
|
||||
name: Generate cabal config for edge
|
||||
runs-on: x86_64
|
||||
container:
|
||||
image: alpine:3.20
|
||||
env:
|
||||
CI_ALPINE_TARGET_RELEASE: v3.20
|
||||
steps:
|
||||
- name: Environment setup
|
||||
run: apk add nodejs git cabal patch
|
||||
- name: Repo pull
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
ref: ${{ inputs.ref_name }}
|
||||
- name: Config generation
|
||||
run: |
|
||||
patch -p1 -i .forgejo/patches/ghc-9.8.patch
|
||||
HOME="${{ github.workspace }}"/cabal_cache cabal update
|
||||
HOME="${{ github.workspace }}"/cabal_cache cabal v2-freeze --shadow-installed-packages --strong-flags --flags="+assistant +webapp +pairing +production +torrentparser +magicmime +benchmark -debuglocks +dbus +networkbsd +gitlfs +httpclientrestricted"
|
||||
mv cabal.project.freeze git-annex.config
|
||||
- name: Package upload
|
||||
uses: forgejo/upload-artifact@v3
|
||||
with:
|
||||
name: cabalconfig320
|
||||
path: git-annex*.config
|
||||
upload-tarball:
|
||||
name: Upload to generic repo
|
||||
runs-on: x86_64
|
||||
needs: [cabal-config-edge,cabal-config-v320]
|
||||
container:
|
||||
image: alpine:latest
|
||||
steps:
|
||||
- name: Environment setup
|
||||
run: apk add nodejs curl findutils
|
||||
- name: Package download
|
||||
uses: forgejo/download-artifact@v3
|
||||
- name: Package deployment
|
||||
run: |
|
||||
if test $GITHUB_REF_NAME == "ci" ; then
|
||||
CI_REF_NAME=${{ inputs.ref_name }}
|
||||
else
|
||||
CI_REF_NAME=$GITHUB_REF_NAME
|
||||
fi
|
||||
curl --user ${{ vars.CODE_FORGEJO_USER }}:${{ secrets.CODE_FORGEJO_TOKEN }} --upload-file ./cabalconfigedge/git-annex.config ${{ github.server_url }}/api/packages/mirrors/generic/git-annex/$CI_REF_NAME/git-annex-$CI_REF_NAME-edge.cabal
|
||||
curl --user ${{ vars.CODE_FORGEJO_USER }}:${{ secrets.CODE_FORGEJO_TOKEN }} --upload-file ./cabalconfig320/git-annex.config ${{ github.server_url }}/api/packages/mirrors/generic/git-annex/$CI_REF_NAME/git-annex-$CI_REF_NAME-v320.cabal
|
|
@ -1,50 +0,0 @@
|
|||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
schedule:
|
||||
- cron: '@hourly'
|
||||
|
||||
jobs:
|
||||
mirror:
|
||||
name: Pull from upstream
|
||||
runs-on: x86_64
|
||||
container:
|
||||
image: alpine:latest
|
||||
env:
|
||||
upstream: https://git.joeyh.name/git/git-annex.git
|
||||
tags: '10.2024*'
|
||||
steps:
|
||||
- name: Environment setup
|
||||
run: apk add grep git sed coreutils bash nodejs
|
||||
- name: Fetch destination
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch_depth: 1
|
||||
ref: ci
|
||||
token: ${{ secrets.CODE_FORGEJO_TOKEN }}
|
||||
- name: Missing tag detecting
|
||||
run: |
|
||||
git ls-remote $upstream "refs/tags/$tags" | grep -v '{' | sed 's|.*/||' | sort > upstream_tags
|
||||
git ls-remote ${{ github.server_url}}/${{ github.repository }} "refs/tags/$tags" | grep -v '{' | sed 's|.*/||' | sort > destination_tags
|
||||
comm -23 upstream_tags destination_tags > missing_tags
|
||||
echo "Missing tags:"
|
||||
cat missing_tags
|
||||
- name: Missing tag fetch
|
||||
run: |
|
||||
git remote add upstream $upstream
|
||||
while read tag; do
|
||||
git fetch upstream tag $tag --no-tags
|
||||
done < missing_tags
|
||||
- name: Packaging workflow injection
|
||||
run: |
|
||||
while read tag; do
|
||||
git checkout $tag
|
||||
git tag -d $tag
|
||||
git checkout ci -- ./.forgejo
|
||||
git config user.name "forgejo-actions[bot]"
|
||||
git config user.email "dev@ayakael.net"
|
||||
git commit -m 'Inject custom workflow'
|
||||
git tag -a $tag -m $tag
|
||||
done < missing_tags
|
||||
- name: Push to destination
|
||||
run: git push --force origin refs/tags/*:refs/tags/* --tags
|
1
.ghci
Normal file
1
.ghci
Normal file
|
@ -0,0 +1 @@
|
|||
:load Common
|
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
debian/changelog merge=dpkg-mergechangelogs
|
21
.gitignore
vendored
Normal file
21
.gitignore
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
tmp
|
||||
test
|
||||
build-stamp
|
||||
Build/SysConfig.hs
|
||||
git-annex
|
||||
git-annex.1
|
||||
git-annex-shell.1
|
||||
git-union-merge.1
|
||||
doc/.ikiwiki
|
||||
html
|
||||
*.tix
|
||||
.hpc
|
||||
dist
|
||||
# Sandboxed builds
|
||||
cabal-dev
|
||||
# Project-local emacs configuration
|
||||
.dir-locals.el
|
||||
# OSX related
|
||||
.DS_Store
|
||||
.virthualenv
|
||||
tags
|
218
Annex.hs
Normal file
218
Annex.hs
Normal file
|
@ -0,0 +1,218 @@
|
|||
{- git-annex monad
|
||||
-
|
||||
- Copyright 2010-2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE PackageImports, GeneralizedNewtypeDeriving, TypeFamilies, MultiParamTypeClasses #-}
|
||||
|
||||
module Annex (
|
||||
Annex,
|
||||
AnnexState(..),
|
||||
FileInfo(..),
|
||||
PreferredContentMap,
|
||||
new,
|
||||
newState,
|
||||
run,
|
||||
eval,
|
||||
exec,
|
||||
getState,
|
||||
changeState,
|
||||
setFlag,
|
||||
setField,
|
||||
setOutput,
|
||||
getFlag,
|
||||
getField,
|
||||
addCleanup,
|
||||
gitRepo,
|
||||
inRepo,
|
||||
fromRepo,
|
||||
getGitConfig,
|
||||
changeGitConfig,
|
||||
changeGitRepo,
|
||||
) where
|
||||
|
||||
import "mtl" Control.Monad.State.Strict
|
||||
import Control.Monad.Trans.Control (StM, MonadBaseControl, liftBaseWith, restoreM)
|
||||
import Control.Monad.Base (liftBase, MonadBase)
|
||||
import System.Posix.Types (Fd)
|
||||
|
||||
import Common
|
||||
import qualified Git
|
||||
import qualified Git.Config
|
||||
import Git.CatFile
|
||||
import Git.CheckAttr
|
||||
import Git.SharedRepository
|
||||
import qualified Git.Queue
|
||||
import Types.Backend
|
||||
import Types.GitConfig
|
||||
import qualified Types.Remote
|
||||
import Types.Crypto
|
||||
import Types.BranchState
|
||||
import Types.TrustLevel
|
||||
import Types.Group
|
||||
import Types.Messages
|
||||
import Types.UUID
|
||||
import Utility.State
|
||||
import qualified Utility.Matcher
|
||||
import qualified Data.Map as M
|
||||
import qualified Data.Set as S
|
||||
|
||||
-- git-annex's monad
|
||||
newtype Annex a = Annex { runAnnex :: StateT AnnexState IO a }
|
||||
deriving (
|
||||
Monad,
|
||||
MonadIO,
|
||||
MonadState AnnexState,
|
||||
Functor,
|
||||
Applicative
|
||||
)
|
||||
|
||||
instance MonadBase IO Annex where
|
||||
liftBase = Annex . liftBase
|
||||
|
||||
instance MonadBaseControl IO Annex where
|
||||
newtype StM Annex a = StAnnex (StM (StateT AnnexState IO) a)
|
||||
liftBaseWith f = Annex $ liftBaseWith $ \runInIO ->
|
||||
f $ liftM StAnnex . runInIO . runAnnex
|
||||
restoreM = Annex . restoreM . unStAnnex
|
||||
where
|
||||
unStAnnex (StAnnex st) = st
|
||||
|
||||
type Matcher a = Either [Utility.Matcher.Token a] (Utility.Matcher.Matcher a)
|
||||
|
||||
data FileInfo = FileInfo
|
||||
{ relFile :: FilePath -- may be relative to cwd
|
||||
, matchFile :: FilePath -- filepath to match on; may be relative to top
|
||||
}
|
||||
|
||||
type PreferredContentMap = M.Map UUID (Utility.Matcher.Matcher (S.Set UUID -> FileInfo -> Annex Bool))
|
||||
|
||||
-- internal state storage
|
||||
data AnnexState = AnnexState
|
||||
{ repo :: Git.Repo
|
||||
, gitconfig :: GitConfig
|
||||
, backends :: [BackendA Annex]
|
||||
, remotes :: [Types.Remote.RemoteA Annex]
|
||||
, output :: MessageState
|
||||
, force :: Bool
|
||||
, fast :: Bool
|
||||
, auto :: Bool
|
||||
, branchstate :: BranchState
|
||||
, repoqueue :: Maybe Git.Queue.Queue
|
||||
, catfilehandle :: Maybe CatFileHandle
|
||||
, checkattrhandle :: Maybe CheckAttrHandle
|
||||
, forcebackend :: Maybe String
|
||||
, limit :: Matcher (FileInfo -> Annex Bool)
|
||||
, uuidmap :: Maybe UUIDMap
|
||||
, preferredcontentmap :: Maybe PreferredContentMap
|
||||
, shared :: Maybe SharedRepository
|
||||
, forcetrust :: TrustMap
|
||||
, trustmap :: Maybe TrustMap
|
||||
, groupmap :: Maybe GroupMap
|
||||
, ciphers :: M.Map StorableCipher Cipher
|
||||
, lockpool :: M.Map FilePath Fd
|
||||
, flags :: M.Map String Bool
|
||||
, fields :: M.Map String String
|
||||
, cleanup :: M.Map String (Annex ())
|
||||
, inodeschanged :: Maybe Bool
|
||||
}
|
||||
|
||||
newState :: Git.Repo -> AnnexState
|
||||
newState gitrepo = AnnexState
|
||||
{ repo = gitrepo
|
||||
, gitconfig = extractGitConfig gitrepo
|
||||
, backends = []
|
||||
, remotes = []
|
||||
, output = defaultMessageState
|
||||
, force = False
|
||||
, fast = False
|
||||
, auto = False
|
||||
, branchstate = startBranchState
|
||||
, repoqueue = Nothing
|
||||
, catfilehandle = Nothing
|
||||
, checkattrhandle = Nothing
|
||||
, forcebackend = Nothing
|
||||
, limit = Left []
|
||||
, uuidmap = Nothing
|
||||
, preferredcontentmap = Nothing
|
||||
, shared = Nothing
|
||||
, forcetrust = M.empty
|
||||
, trustmap = Nothing
|
||||
, groupmap = Nothing
|
||||
, ciphers = M.empty
|
||||
, lockpool = M.empty
|
||||
, flags = M.empty
|
||||
, fields = M.empty
|
||||
, cleanup = M.empty
|
||||
, inodeschanged = Nothing
|
||||
}
|
||||
|
||||
{- Makes an Annex state object for the specified git repo.
|
||||
- Ensures the config is read, if it was not already. -}
|
||||
new :: Git.Repo -> IO AnnexState
|
||||
new = newState <$$> Git.Config.read
|
||||
|
||||
{- performs an action in the Annex monad -}
|
||||
run :: AnnexState -> Annex a -> IO (a, AnnexState)
|
||||
run s a = runStateT (runAnnex a) s
|
||||
eval :: AnnexState -> Annex a -> IO a
|
||||
eval s a = evalStateT (runAnnex a) s
|
||||
exec :: AnnexState -> Annex a -> IO AnnexState
|
||||
exec s a = execStateT (runAnnex a) s
|
||||
|
||||
{- Sets a flag to True -}
|
||||
setFlag :: String -> Annex ()
|
||||
setFlag flag = changeState $ \s ->
|
||||
s { flags = M.insertWith' const flag True $ flags s }
|
||||
|
||||
{- Sets a field to a value -}
|
||||
setField :: String -> String -> Annex ()
|
||||
setField field value = changeState $ \s ->
|
||||
s { fields = M.insertWith' const field value $ fields s }
|
||||
|
||||
{- Adds a cleanup action to perform. -}
|
||||
addCleanup :: String -> Annex () -> Annex ()
|
||||
addCleanup uid a = changeState $ \s ->
|
||||
s { cleanup = M.insertWith' const uid a $ cleanup s }
|
||||
|
||||
{- Sets the type of output to emit. -}
|
||||
setOutput :: OutputType -> Annex ()
|
||||
setOutput o = changeState $ \s ->
|
||||
s { output = (output s) { outputType = o } }
|
||||
|
||||
{- Checks if a flag was set. -}
|
||||
getFlag :: String -> Annex Bool
|
||||
getFlag flag = fromMaybe False . M.lookup flag <$> getState flags
|
||||
|
||||
{- Gets the value of a field. -}
|
||||
getField :: String -> Annex (Maybe String)
|
||||
getField field = M.lookup field <$> getState fields
|
||||
|
||||
{- Returns the annex's git repository. -}
|
||||
gitRepo :: Annex Git.Repo
|
||||
gitRepo = getState repo
|
||||
|
||||
{- Runs an IO action in the annex's git repository. -}
|
||||
inRepo :: (Git.Repo -> IO a) -> Annex a
|
||||
inRepo a = liftIO . a =<< gitRepo
|
||||
|
||||
{- Extracts a value from the annex's git repisitory. -}
|
||||
fromRepo :: (Git.Repo -> a) -> Annex a
|
||||
fromRepo a = a <$> gitRepo
|
||||
|
||||
{- Gets the GitConfig settings. -}
|
||||
getGitConfig :: Annex GitConfig
|
||||
getGitConfig = getState gitconfig
|
||||
|
||||
{- Modifies a GitConfig setting. -}
|
||||
changeGitConfig :: (GitConfig -> GitConfig) -> Annex ()
|
||||
changeGitConfig a = changeState $ \s -> s { gitconfig = a (gitconfig s) }
|
||||
|
||||
{- Changing the git Repo data also involves re-extracting its GitConfig. -}
|
||||
changeGitRepo :: Git.Repo -> Annex ()
|
||||
changeGitRepo r = changeState $ \s -> s
|
||||
{ repo = r
|
||||
, gitconfig = extractGitConfig r
|
||||
}
|
364
Annex/Branch.hs
Normal file
364
Annex/Branch.hs
Normal file
|
@ -0,0 +1,364 @@
|
|||
{- management of the git-annex branch
|
||||
-
|
||||
- Copyright 2011-2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE CPP #-}
|
||||
|
||||
module Annex.Branch (
|
||||
fullname,
|
||||
name,
|
||||
hasOrigin,
|
||||
hasSibling,
|
||||
siblingBranches,
|
||||
create,
|
||||
update,
|
||||
forceUpdate,
|
||||
updateTo,
|
||||
get,
|
||||
change,
|
||||
commit,
|
||||
files,
|
||||
) where
|
||||
|
||||
import qualified Data.ByteString.Lazy.Char8 as L
|
||||
import System.Posix.Env
|
||||
|
||||
import Common.Annex
|
||||
import Annex.BranchState
|
||||
import Annex.Journal
|
||||
import qualified Git
|
||||
import qualified Git.Command
|
||||
import qualified Git.Ref
|
||||
import qualified Git.Branch
|
||||
import qualified Git.UnionMerge
|
||||
import qualified Git.UpdateIndex
|
||||
import Git.HashObject
|
||||
import Git.Types
|
||||
import Git.FilePath
|
||||
import Annex.CatFile
|
||||
import Annex.Perms
|
||||
import qualified Annex
|
||||
|
||||
{- Name of the branch that is used to store git-annex's information. -}
|
||||
name :: Git.Ref
|
||||
name = Git.Ref "git-annex"
|
||||
|
||||
{- Fully qualified name of the branch. -}
|
||||
fullname :: Git.Ref
|
||||
fullname = Git.Ref $ "refs/heads/" ++ show name
|
||||
|
||||
{- Branch's name in origin. -}
|
||||
originname :: Git.Ref
|
||||
originname = Git.Ref $ "origin/" ++ show name
|
||||
|
||||
{- Does origin/git-annex exist? -}
|
||||
hasOrigin :: Annex Bool
|
||||
hasOrigin = inRepo $ Git.Ref.exists originname
|
||||
|
||||
{- Does the git-annex branch or a sibling foo/git-annex branch exist? -}
|
||||
hasSibling :: Annex Bool
|
||||
hasSibling = not . null <$> siblingBranches
|
||||
|
||||
{- List of git-annex (refs, branches), including the main one and any
|
||||
- from remotes. Duplicate refs are filtered out. -}
|
||||
siblingBranches :: Annex [(Git.Ref, Git.Branch)]
|
||||
siblingBranches = inRepo $ Git.Ref.matchingUniq name
|
||||
|
||||
{- Creates the branch, if it does not already exist. -}
|
||||
create :: Annex ()
|
||||
create = void getBranch
|
||||
|
||||
{- Returns the ref of the branch, creating it first if necessary. -}
|
||||
getBranch :: Annex Git.Ref
|
||||
getBranch = maybe (hasOrigin >>= go >>= use) return =<< branchsha
|
||||
where
|
||||
go True = do
|
||||
inRepo $ Git.Command.run
|
||||
[Param "branch", Param $ show name, Param $ show originname]
|
||||
fromMaybe (error $ "failed to create " ++ show name)
|
||||
<$> branchsha
|
||||
go False = withIndex' True $
|
||||
inRepo $ Git.Branch.commit "branch created" fullname []
|
||||
use sha = do
|
||||
setIndexSha sha
|
||||
return sha
|
||||
branchsha = inRepo $ Git.Ref.sha fullname
|
||||
|
||||
{- Ensures that the branch and index are up-to-date; should be
|
||||
- called before data is read from it. Runs only once per git-annex run. -}
|
||||
update :: Annex ()
|
||||
update = runUpdateOnce $ void $ updateTo =<< siblingBranches
|
||||
|
||||
{- Forces an update even if one has already been run. -}
|
||||
forceUpdate :: Annex Bool
|
||||
forceUpdate = updateTo =<< siblingBranches
|
||||
|
||||
{- Merges the specified Refs into the index, if they have any changes not
|
||||
- already in it. The Branch names are only used in the commit message;
|
||||
- it's even possible that the provided Branches have not been updated to
|
||||
- point to the Refs yet.
|
||||
-
|
||||
- The branch is fast-forwarded if possible, otherwise a merge commit is
|
||||
- made.
|
||||
-
|
||||
- Before Refs are merged into the index, it's important to first stage the
|
||||
- journal into the index. Otherwise, any changes in the journal would
|
||||
- later get staged, and might overwrite changes made during the merge.
|
||||
- This is only done if some of the Refs do need to be merged.
|
||||
-
|
||||
- Returns True if any refs were merged in, False otherwise.
|
||||
-}
|
||||
updateTo :: [(Git.Ref, Git.Branch)] -> Annex Bool
|
||||
updateTo pairs = do
|
||||
-- ensure branch exists, and get its current ref
|
||||
branchref <- getBranch
|
||||
dirty <- journalDirty
|
||||
(refs, branches) <- unzip <$> filterM isnewer pairs
|
||||
if null refs
|
||||
{- Even when no refs need to be merged, the index
|
||||
- may still be updated if the branch has gotten ahead
|
||||
- of the index. -}
|
||||
then whenM (needUpdateIndex branchref) $ lockJournal $ do
|
||||
forceUpdateIndex branchref
|
||||
{- When there are journalled changes
|
||||
- as well as the branch being updated,
|
||||
- a commit needs to be done. -}
|
||||
when dirty $
|
||||
go branchref True [] []
|
||||
else lockJournal $ go branchref dirty refs branches
|
||||
return $ not $ null refs
|
||||
where
|
||||
isnewer (r, _) = inRepo $ Git.Branch.changed fullname r
|
||||
go branchref dirty refs branches = withIndex $ do
|
||||
cleanjournal <- if dirty then stageJournal else return noop
|
||||
let merge_desc = if null branches
|
||||
then "update"
|
||||
else "merging " ++
|
||||
unwords (map Git.Ref.describe branches) ++
|
||||
" into " ++ show name
|
||||
unless (null branches) $ do
|
||||
showSideAction merge_desc
|
||||
mergeIndex refs
|
||||
ff <- if dirty
|
||||
then return False
|
||||
else inRepo $ Git.Branch.fastForward fullname refs
|
||||
if ff
|
||||
then updateIndex branchref
|
||||
else commitBranch branchref merge_desc
|
||||
(nub $ fullname:refs)
|
||||
liftIO cleanjournal
|
||||
|
||||
{- Gets the content of a file, which may be in the journal, or committed
|
||||
- to the branch. Due to limitatons of git cat-file, does *not* get content
|
||||
- that has only been staged to the index.
|
||||
-
|
||||
- Updates the branch if necessary, to ensure the most up-to-date available
|
||||
- content is available.
|
||||
-
|
||||
- Returns an empty string if the file doesn't exist yet. -}
|
||||
get :: FilePath -> Annex String
|
||||
get = get' False
|
||||
|
||||
{- Like get, but does not merge the branch, so the info returned may not
|
||||
- reflect changes in remotes. (Changing the value this returns, and then
|
||||
- merging is always the same as using get, and then changing its value.) -}
|
||||
getStale :: FilePath -> Annex String
|
||||
getStale = get' True
|
||||
|
||||
get' :: Bool -> FilePath -> Annex String
|
||||
get' staleok file = fromjournal =<< getJournalFile file
|
||||
where
|
||||
fromjournal (Just content) = return content
|
||||
fromjournal Nothing
|
||||
| staleok = withIndex frombranch
|
||||
| otherwise = do
|
||||
update
|
||||
frombranch
|
||||
frombranch = withIndex $ L.unpack <$> catFile fullname file
|
||||
|
||||
{- Applies a function to modifiy the content of a file.
|
||||
-
|
||||
- Note that this does not cause the branch to be merged, it only
|
||||
- modifes the current content of the file on the branch.
|
||||
-}
|
||||
change :: FilePath -> (String -> String) -> Annex ()
|
||||
change file a = lockJournal $ a <$> getStale file >>= set file
|
||||
|
||||
{- Records new content of a file into the journal -}
|
||||
set :: FilePath -> String -> Annex ()
|
||||
set file content = setJournalFile file content
|
||||
|
||||
{- Stages the journal, and commits staged changes to the branch. -}
|
||||
commit :: String -> Annex ()
|
||||
commit message = whenM journalDirty $ lockJournal $ do
|
||||
cleanjournal <- stageJournal
|
||||
ref <- getBranch
|
||||
withIndex $ commitBranch ref message [fullname]
|
||||
liftIO $ cleanjournal
|
||||
|
||||
{- Commits the staged changes in the index to the branch.
|
||||
-
|
||||
- Ensures that the branch's index file is first updated to the state
|
||||
- of the branch at branchref, before running the commit action. This
|
||||
- is needed because the branch may have had changes pushed to it, that
|
||||
- are not yet reflected in the index.
|
||||
-
|
||||
- Also safely handles a race that can occur if a change is being pushed
|
||||
- into the branch at the same time. When the race happens, the commit will
|
||||
- be made on top of the newly pushed change, but without the index file
|
||||
- being updated to include it. The result is that the newly pushed
|
||||
- change is reverted. This race is detected and another commit made
|
||||
- to fix it.
|
||||
-
|
||||
- The branchref value can have been obtained using getBranch at any
|
||||
- previous point, though getting it a long time ago makes the race
|
||||
- more likely to occur.
|
||||
-}
|
||||
commitBranch :: Git.Ref -> String -> [Git.Ref] -> Annex ()
|
||||
commitBranch branchref message parents = do
|
||||
showStoringStateAction
|
||||
commitBranch' branchref message parents
|
||||
commitBranch' :: Git.Ref -> String -> [Git.Ref] -> Annex ()
|
||||
commitBranch' branchref message parents = do
|
||||
updateIndex branchref
|
||||
committedref <- inRepo $ Git.Branch.commit message fullname parents
|
||||
setIndexSha committedref
|
||||
parentrefs <- commitparents <$> catObject committedref
|
||||
when (racedetected branchref parentrefs) $
|
||||
fixrace committedref parentrefs
|
||||
where
|
||||
-- look for "parent ref" lines and return the refs
|
||||
commitparents = map (Git.Ref . snd) . filter isparent .
|
||||
map (toassoc . L.unpack) . L.lines
|
||||
toassoc = separate (== ' ')
|
||||
isparent (k,_) = k == "parent"
|
||||
|
||||
{- The race can be detected by checking the commit's
|
||||
- parent, which will be the newly pushed branch,
|
||||
- instead of the expected ref that the index was updated to. -}
|
||||
racedetected expectedref parentrefs
|
||||
| expectedref `elem` parentrefs = False -- good parent
|
||||
| otherwise = True -- race!
|
||||
|
||||
{- To recover from the race, union merge the lost refs
|
||||
- into the index, and recommit on top of the bad commit. -}
|
||||
fixrace committedref lostrefs = do
|
||||
mergeIndex lostrefs
|
||||
commitBranch committedref racemessage [committedref]
|
||||
|
||||
racemessage = message ++ " (recovery from race)"
|
||||
|
||||
{- Lists all files on the branch. There may be duplicates in the list. -}
|
||||
files :: Annex [FilePath]
|
||||
files = do
|
||||
update
|
||||
withIndex $ do
|
||||
bfiles <- inRepo $ Git.Command.pipeNullSplitZombie
|
||||
[ Params "ls-tree --name-only -r -z"
|
||||
, Param $ show fullname
|
||||
]
|
||||
jfiles <- getJournalledFiles
|
||||
return $ jfiles ++ bfiles
|
||||
|
||||
{- Populates the branch's index file with the current branch contents.
|
||||
-
|
||||
- This is only done when the index doesn't yet exist, and the index
|
||||
- is used to build up changes to be commited to the branch, and merge
|
||||
- in changes from other branches.
|
||||
-}
|
||||
genIndex :: Git.Repo -> IO ()
|
||||
genIndex g = Git.UpdateIndex.streamUpdateIndex g
|
||||
[Git.UpdateIndex.lsTree fullname g]
|
||||
|
||||
{- Merges the specified refs into the index.
|
||||
- Any changes staged in the index will be preserved. -}
|
||||
mergeIndex :: [Git.Ref] -> Annex ()
|
||||
mergeIndex branches = do
|
||||
h <- catFileHandle
|
||||
inRepo $ \g -> Git.UnionMerge.mergeIndex h g branches
|
||||
|
||||
{- Runs an action using the branch's index file. -}
|
||||
withIndex :: Annex a -> Annex a
|
||||
withIndex = withIndex' False
|
||||
withIndex' :: Bool -> Annex a -> Annex a
|
||||
withIndex' bootstrapping a = do
|
||||
f <- fromRepo gitAnnexIndex
|
||||
g <- gitRepo
|
||||
#ifdef __ANDROID__
|
||||
{- Work around for weird getEnvironment breakage on Android. See
|
||||
- https://github.com/neurocyte/ghc-android/issues/7
|
||||
- Instead, use getEnv to get some key environment variables that
|
||||
- git expects to have. -}
|
||||
let keyenv = words "USER PATH GIT_EXEC_PATH HOSTNAME HOME"
|
||||
let getEnvPair k = maybe Nothing (\v -> Just (k, v)) <$> getEnv k
|
||||
e <- liftIO $ catMaybes <$> forM keyenv getEnvPair
|
||||
#else
|
||||
e <- liftIO getEnvironment
|
||||
#endif
|
||||
let g' = g { gitEnv = Just $ ("GIT_INDEX_FILE", f):e }
|
||||
|
||||
Annex.changeState $ \s -> s { Annex.repo = g' }
|
||||
checkIndexOnce $ unlessM (liftIO $ doesFileExist f) $ do
|
||||
unless bootstrapping create
|
||||
liftIO $ createDirectoryIfMissing True $ takeDirectory f
|
||||
unless bootstrapping $ inRepo genIndex
|
||||
r <- a
|
||||
Annex.changeState $ \s -> s { Annex.repo = (Annex.repo s) { gitEnv = gitEnv g} }
|
||||
|
||||
return r
|
||||
|
||||
{- Updates the branch's index to reflect the current contents of the branch.
|
||||
- Any changes staged in the index will be preserved.
|
||||
-
|
||||
- Compares the ref stored in the lock file with the current
|
||||
- ref of the branch to see if an update is needed.
|
||||
-}
|
||||
updateIndex :: Git.Ref -> Annex ()
|
||||
updateIndex branchref = whenM (needUpdateIndex branchref) $
|
||||
forceUpdateIndex branchref
|
||||
|
||||
forceUpdateIndex :: Git.Ref -> Annex ()
|
||||
forceUpdateIndex branchref = do
|
||||
withIndex $ mergeIndex [fullname]
|
||||
setIndexSha branchref
|
||||
|
||||
{- Checks if the index needs to be updated. -}
|
||||
needUpdateIndex :: Git.Ref -> Annex Bool
|
||||
needUpdateIndex branchref = do
|
||||
lock <- fromRepo gitAnnexIndexLock
|
||||
lockref <- Git.Ref . firstLine <$>
|
||||
liftIO (catchDefaultIO "" $ readFileStrict lock)
|
||||
return (lockref /= branchref)
|
||||
|
||||
{- Record that the branch's index has been updated to correspond to a
|
||||
- given ref of the branch. -}
|
||||
setIndexSha :: Git.Ref -> Annex ()
|
||||
setIndexSha ref = do
|
||||
lock <- fromRepo gitAnnexIndexLock
|
||||
liftIO $ writeFile lock $ show ref ++ "\n"
|
||||
setAnnexPerm lock
|
||||
|
||||
{- Stages the journal into the index and returns an action that will
|
||||
- clean up the staged journal files, which should only be run once
|
||||
- the index has been committed to the branch. Should be run within
|
||||
- lockJournal, to prevent others from modifying the journal. -}
|
||||
stageJournal :: Annex (IO ())
|
||||
stageJournal = withIndex $ do
|
||||
g <- gitRepo
|
||||
let dir = gitAnnexJournalDir g
|
||||
fs <- getJournalFiles
|
||||
liftIO $ do
|
||||
h <- hashObjectStart g
|
||||
Git.UpdateIndex.streamUpdateIndex g
|
||||
[genstream dir h fs]
|
||||
hashObjectStop h
|
||||
return $ liftIO $ mapM_ removeFile $ map (dir </>) fs
|
||||
where
|
||||
genstream dir h fs streamer = forM_ fs $ \file -> do
|
||||
let path = dir </> file
|
||||
sha <- hashFile h path
|
||||
streamer $ Git.UpdateIndex.updateIndexLine
|
||||
sha FileBlob (asTopFilePath $ fileJournal file)
|
43
Annex/BranchState.hs
Normal file
43
Annex/BranchState.hs
Normal file
|
@ -0,0 +1,43 @@
|
|||
{- git-annex branch state management
|
||||
-
|
||||
- Runtime state about the git-annex branch.
|
||||
-
|
||||
- Copyright 2011-2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Annex.BranchState where
|
||||
|
||||
import Common.Annex
|
||||
import Types.BranchState
|
||||
import qualified Annex
|
||||
|
||||
getState :: Annex BranchState
|
||||
getState = Annex.getState Annex.branchstate
|
||||
|
||||
setState :: BranchState -> Annex ()
|
||||
setState state = Annex.changeState $ \s -> s { Annex.branchstate = state }
|
||||
|
||||
changeState :: (BranchState -> BranchState) -> Annex ()
|
||||
changeState changer = setState =<< changer <$> getState
|
||||
|
||||
{- Runs an action to check that the index file exists, if it's not been
|
||||
- checked before in this run of git-annex. -}
|
||||
checkIndexOnce :: Annex () -> Annex ()
|
||||
checkIndexOnce a = unlessM (indexChecked <$> getState) $ do
|
||||
a
|
||||
changeState $ \s -> s { indexChecked = True }
|
||||
|
||||
{- Runs an action to update the branch, if it's not been updated before
|
||||
- in this run of git-annex. -}
|
||||
runUpdateOnce :: Annex () -> Annex ()
|
||||
runUpdateOnce a = unlessM (branchUpdated <$> getState) $ do
|
||||
a
|
||||
disableUpdate
|
||||
|
||||
{- Avoids updating the branch. A useful optimisation when the branch
|
||||
- is known to have not changed, or git-annex won't be relying on info
|
||||
- from it. -}
|
||||
disableUpdate :: Annex ()
|
||||
disableUpdate = changeState $ \s -> s { branchUpdated = True }
|
62
Annex/CatFile.hs
Normal file
62
Annex/CatFile.hs
Normal file
|
@ -0,0 +1,62 @@
|
|||
{- git cat-file interface, with handle automatically stored in the Annex monad
|
||||
-
|
||||
- Copyright 2011 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Annex.CatFile (
|
||||
catFile,
|
||||
catObject,
|
||||
catObjectDetails,
|
||||
catFileHandle,
|
||||
catKey,
|
||||
catKeyFile,
|
||||
) where
|
||||
|
||||
import qualified Data.ByteString.Lazy as L
|
||||
|
||||
import Common.Annex
|
||||
import qualified Git
|
||||
import qualified Git.CatFile
|
||||
import qualified Annex
|
||||
import Git.Types
|
||||
|
||||
catFile :: Git.Branch -> FilePath -> Annex L.ByteString
|
||||
catFile branch file = do
|
||||
h <- catFileHandle
|
||||
liftIO $ Git.CatFile.catFile h branch file
|
||||
|
||||
catObject :: Git.Ref -> Annex L.ByteString
|
||||
catObject ref = do
|
||||
h <- catFileHandle
|
||||
liftIO $ Git.CatFile.catObject h ref
|
||||
|
||||
catObjectDetails :: Git.Ref -> Annex (Maybe (L.ByteString, Sha))
|
||||
catObjectDetails ref = do
|
||||
h <- catFileHandle
|
||||
liftIO $ Git.CatFile.catObjectDetails h ref
|
||||
|
||||
catFileHandle :: Annex Git.CatFile.CatFileHandle
|
||||
catFileHandle = maybe startup return =<< Annex.getState Annex.catfilehandle
|
||||
where
|
||||
startup = do
|
||||
h <- inRepo Git.CatFile.catFileStart
|
||||
Annex.changeState $ \s -> s { Annex.catfilehandle = Just h }
|
||||
return h
|
||||
|
||||
{- From the Sha or Ref of a symlink back to the key. -}
|
||||
catKey :: Ref -> Annex (Maybe Key)
|
||||
catKey ref = do
|
||||
l <- encodeW8 . L.unpack <$> catObject ref
|
||||
return $ if isLinkToAnnex l
|
||||
then fileKey $ takeFileName l
|
||||
else Nothing
|
||||
|
||||
{- From a file in git back to the key.
|
||||
-
|
||||
- Prefixing the file with ./ makes this work even if in a subdirectory
|
||||
- of a repo. For some reason, HEAD is sometimes needed.
|
||||
-}
|
||||
catKeyFile :: FilePath -> Annex (Maybe Key)
|
||||
catKeyFile f = catKey $ Ref $ "HEAD:./" ++ f
|
35
Annex/CheckAttr.hs
Normal file
35
Annex/CheckAttr.hs
Normal file
|
@ -0,0 +1,35 @@
|
|||
{- git check-attr interface, with handle automatically stored in the Annex monad
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Annex.CheckAttr (
|
||||
checkAttr,
|
||||
checkAttrHandle
|
||||
) where
|
||||
|
||||
import Common.Annex
|
||||
import qualified Git.CheckAttr as Git
|
||||
import qualified Annex
|
||||
|
||||
{- All gitattributes used by git-annex. -}
|
||||
annexAttrs :: [Git.Attr]
|
||||
annexAttrs =
|
||||
[ "annex.backend"
|
||||
, "annex.numcopies"
|
||||
]
|
||||
|
||||
checkAttr :: Git.Attr -> FilePath -> Annex String
|
||||
checkAttr attr file = do
|
||||
h <- checkAttrHandle
|
||||
liftIO $ Git.checkAttr h attr file
|
||||
|
||||
checkAttrHandle :: Annex Git.CheckAttrHandle
|
||||
checkAttrHandle = maybe startup return =<< Annex.getState Annex.checkattrhandle
|
||||
where
|
||||
startup = do
|
||||
h <- inRepo $ Git.checkAttrStart annexAttrs
|
||||
Annex.changeState $ \s -> s { Annex.checkattrhandle = Just h }
|
||||
return h
|
482
Annex/Content.hs
Normal file
482
Annex/Content.hs
Normal file
|
@ -0,0 +1,482 @@
|
|||
{- git-annex file content managing
|
||||
-
|
||||
- Copyright 2010,2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Annex.Content (
|
||||
inAnnex,
|
||||
inAnnexSafe,
|
||||
lockContent,
|
||||
calcGitLink,
|
||||
getViaTmp,
|
||||
getViaTmpChecked,
|
||||
getViaTmpUnchecked,
|
||||
withTmp,
|
||||
checkDiskSpace,
|
||||
moveAnnex,
|
||||
sendAnnex,
|
||||
prepSendAnnex,
|
||||
removeAnnex,
|
||||
fromAnnex,
|
||||
moveBad,
|
||||
getKeysPresent,
|
||||
saveState,
|
||||
downloadUrl,
|
||||
preseedTmp,
|
||||
freezeContent,
|
||||
thawContent,
|
||||
replaceFile,
|
||||
cleanObjectLoc,
|
||||
) where
|
||||
|
||||
import System.IO.Unsafe (unsafeInterleaveIO)
|
||||
|
||||
import Common.Annex
|
||||
import Logs.Location
|
||||
import qualified Git
|
||||
import qualified Annex
|
||||
import qualified Annex.Queue
|
||||
import qualified Annex.Branch
|
||||
import Utility.DiskFree
|
||||
import Utility.FileMode
|
||||
import qualified Utility.Url as Url
|
||||
import Types.Key
|
||||
import Utility.DataUnits
|
||||
import Utility.CopyFile
|
||||
import Config
|
||||
import Annex.Exception
|
||||
import Git.SharedRepository
|
||||
import Annex.Perms
|
||||
import Annex.Content.Direct
|
||||
import Backend
|
||||
|
||||
{- Checks if a given key's content is currently present. -}
|
||||
inAnnex :: Key -> Annex Bool
|
||||
inAnnex = inAnnex' id False $ liftIO . doesFileExist
|
||||
|
||||
{- Generic inAnnex, handling both indirect and direct mode.
|
||||
-
|
||||
- In direct mode, at least one of the associated files must pass the
|
||||
- check. Additionally, the file must be unmodified.
|
||||
-}
|
||||
inAnnex' :: (a -> Bool) -> a -> (FilePath -> Annex a) -> Key -> Annex a
|
||||
inAnnex' isgood bad check key = withObjectLoc key checkindirect checkdirect
|
||||
where
|
||||
checkindirect loc = do
|
||||
whenM (fromRepo Git.repoIsUrl) $
|
||||
error "inAnnex cannot check remote repo"
|
||||
check loc
|
||||
checkdirect [] = return bad
|
||||
checkdirect (loc:locs) = do
|
||||
r <- check loc
|
||||
if isgood r
|
||||
then ifM (goodContent key loc)
|
||||
( return r
|
||||
, checkdirect locs
|
||||
)
|
||||
else checkdirect locs
|
||||
|
||||
{- A safer check; the key's content must not only be present, but
|
||||
- is not in the process of being removed. -}
|
||||
inAnnexSafe :: Key -> Annex (Maybe Bool)
|
||||
inAnnexSafe = inAnnex' (fromMaybe False) (Just False) go
|
||||
where
|
||||
go f = liftIO $ openforlock f >>= check
|
||||
openforlock f = catchMaybeIO $
|
||||
openFd f ReadOnly Nothing defaultFileFlags
|
||||
check Nothing = return is_missing
|
||||
check (Just h) = do
|
||||
v <- getLock h (ReadLock, AbsoluteSeek, 0, 0)
|
||||
closeFd h
|
||||
return $ case v of
|
||||
Just _ -> is_locked
|
||||
Nothing -> is_unlocked
|
||||
is_locked = Nothing
|
||||
is_unlocked = Just True
|
||||
is_missing = Just False
|
||||
|
||||
{- Content is exclusively locked while running an action that might remove
|
||||
- it. (If the content is not present, no locking is done.) -}
|
||||
lockContent :: Key -> Annex a -> Annex a
|
||||
lockContent key a = do
|
||||
file <- inRepo $ gitAnnexLocation key
|
||||
bracketIO (openforlock file >>= lock) unlock a
|
||||
where
|
||||
{- Since files are stored with the write bit disabled, have
|
||||
- to fiddle with permissions to open for an exclusive lock. -}
|
||||
openforlock f = catchMaybeIO $ ifM (doesFileExist f)
|
||||
( withModifiedFileMode f
|
||||
(`unionFileModes` ownerWriteMode)
|
||||
open
|
||||
, open
|
||||
)
|
||||
where
|
||||
open = openFd f ReadWrite Nothing defaultFileFlags
|
||||
lock Nothing = return Nothing
|
||||
lock (Just fd) = do
|
||||
v <- tryIO $ setLock fd (WriteLock, AbsoluteSeek, 0, 0)
|
||||
case v of
|
||||
Left _ -> error "content is locked"
|
||||
Right _ -> return $ Just fd
|
||||
unlock Nothing = noop
|
||||
unlock (Just l) = closeFd l
|
||||
|
||||
{- Calculates the relative path to use to link a file to a key. -}
|
||||
calcGitLink :: FilePath -> Key -> Annex FilePath
|
||||
calcGitLink file key = do
|
||||
cwd <- liftIO getCurrentDirectory
|
||||
let absfile = fromMaybe whoops $ absNormPath cwd file
|
||||
loc <- inRepo $ gitAnnexLocation key
|
||||
return $ relPathDirToFile (parentDir absfile) loc
|
||||
where
|
||||
whoops = error $ "unable to normalize " ++ file
|
||||
|
||||
{- Runs an action, passing it a temporary filename to get,
|
||||
- and if the action succeeds, moves the temp file into
|
||||
- the annex as a key's content. -}
|
||||
getViaTmp :: Key -> (FilePath -> Annex Bool) -> Annex Bool
|
||||
getViaTmp = getViaTmpChecked (return True)
|
||||
|
||||
{- Like getViaTmp, but does not check that there is enough disk space
|
||||
- for the incoming key. For use when the key content is already on disk
|
||||
- and not being copied into place. -}
|
||||
getViaTmpUnchecked :: Key -> (FilePath -> Annex Bool) -> Annex Bool
|
||||
getViaTmpUnchecked = finishGetViaTmp (return True)
|
||||
|
||||
getViaTmpChecked :: Annex Bool -> Key -> (FilePath -> Annex Bool) -> Annex Bool
|
||||
getViaTmpChecked check key action = do
|
||||
tmp <- fromRepo $ gitAnnexTmpLocation key
|
||||
|
||||
-- Check that there is enough free disk space.
|
||||
-- When the temp file already exists, count the space
|
||||
-- it is using as free.
|
||||
e <- liftIO $ doesFileExist tmp
|
||||
alreadythere <- if e
|
||||
then fromIntegral . fileSize <$> liftIO (getFileStatus tmp)
|
||||
else return 0
|
||||
ifM (checkDiskSpace Nothing key alreadythere)
|
||||
( do
|
||||
when e $ thawContent tmp
|
||||
finishGetViaTmp check key action
|
||||
, return False
|
||||
)
|
||||
|
||||
finishGetViaTmp :: Annex Bool -> Key -> (FilePath -> Annex Bool) -> Annex Bool
|
||||
finishGetViaTmp check key action = do
|
||||
tmpfile <- prepTmp key
|
||||
ifM (action tmpfile <&&> check)
|
||||
( do
|
||||
moveAnnex key tmpfile
|
||||
logStatus key InfoPresent
|
||||
return True
|
||||
, do
|
||||
-- the tmp file is left behind, in case caller wants
|
||||
-- to resume its transfer
|
||||
return False
|
||||
)
|
||||
|
||||
prepTmp :: Key -> Annex FilePath
|
||||
prepTmp key = do
|
||||
tmp <- fromRepo $ gitAnnexTmpLocation key
|
||||
createAnnexDirectory (parentDir tmp)
|
||||
return tmp
|
||||
|
||||
{- Creates a temp file, runs an action on it, and cleans up the temp file. -}
|
||||
withTmp :: Key -> (FilePath -> Annex a) -> Annex a
|
||||
withTmp key action = do
|
||||
tmp <- prepTmp key
|
||||
res <- action tmp
|
||||
liftIO $ nukeFile tmp
|
||||
return res
|
||||
|
||||
{- Checks that there is disk space available to store a given key,
|
||||
- in a destination (or the annex) printing a warning if not. -}
|
||||
checkDiskSpace :: Maybe FilePath -> Key -> Integer -> Annex Bool
|
||||
checkDiskSpace destination key alreadythere = do
|
||||
reserve <- annexDiskReserve <$> Annex.getGitConfig
|
||||
free <- liftIO . getDiskFree =<< dir
|
||||
force <- Annex.getState Annex.force
|
||||
case (free, keySize key) of
|
||||
(Just have, Just need) -> do
|
||||
let ok = (need + reserve <= have + alreadythere) || force
|
||||
unless ok $ do
|
||||
liftIO $ print (need, reserve, have, alreadythere)
|
||||
needmorespace (need + reserve - have - alreadythere)
|
||||
return ok
|
||||
_ -> return True
|
||||
where
|
||||
dir = maybe (fromRepo gitAnnexDir) return destination
|
||||
needmorespace n =
|
||||
warning $ "not enough free space, need " ++
|
||||
roughSize storageUnits True n ++
|
||||
" more" ++ forcemsg
|
||||
forcemsg = " (use --force to override this check or adjust annex.diskreserve)"
|
||||
|
||||
{- Moves a key's content into .git/annex/objects/
|
||||
-
|
||||
- In direct mode, moves it to the associated file, or files.
|
||||
-
|
||||
- What if the key there already has content? This could happen for
|
||||
- various reasons; perhaps the same content is being annexed again.
|
||||
- Perhaps there has been a hash collision generating the keys.
|
||||
-
|
||||
- The current strategy is to assume that in this case it's safe to delete
|
||||
- one of the two copies of the content; and the one already in the annex
|
||||
- is left there, assuming it's the original, canonical copy.
|
||||
-
|
||||
- I considered being more paranoid, and checking that both files had
|
||||
- the same content. Decided against it because A) users explicitly choose
|
||||
- a backend based on its hashing properties and so if they're dealing
|
||||
- with colliding files it's their own fault and B) adding such a check
|
||||
- would not catch all cases of colliding keys. For example, perhaps
|
||||
- a remote has a key; if it's then added again with different content then
|
||||
- the overall system now has two different peices of content for that
|
||||
- key, and one of them will probably get deleted later. So, adding the
|
||||
- check here would only raise expectations that git-annex cannot truely
|
||||
- meet.
|
||||
-}
|
||||
moveAnnex :: Key -> FilePath -> Annex ()
|
||||
moveAnnex key src = withObjectLoc key storeobject storedirect
|
||||
where
|
||||
storeobject dest = ifM (liftIO $ doesFileExist dest)
|
||||
( liftIO $ removeFile src
|
||||
, do
|
||||
createContentDir dest
|
||||
liftIO $ moveFile src dest
|
||||
freezeContent dest
|
||||
freezeContentDir dest
|
||||
)
|
||||
storedirect fs = storedirect' =<< filterM validsymlink fs
|
||||
validsymlink f = (==) (Just key) <$> isAnnexLink f
|
||||
|
||||
storedirect' [] = storeobject =<< inRepo (gitAnnexLocation key)
|
||||
storedirect' (dest:fs) = do
|
||||
updateInodeCache key src
|
||||
thawContent src
|
||||
replaceFile dest $ liftIO . moveFile src
|
||||
forM_ fs $ \f -> replaceFile f $
|
||||
void . liftIO . copyFileExternal dest
|
||||
|
||||
{- Replaces any existing file with a new version, by running an action.
|
||||
- First, makes sure the file is deleted. Or, if it didn't already exist,
|
||||
- makes sure the parent directory exists. -}
|
||||
replaceFile :: FilePath -> (FilePath -> Annex ()) -> Annex ()
|
||||
replaceFile file a = do
|
||||
liftIO $ do
|
||||
r <- tryIO $ removeFile file
|
||||
case r of
|
||||
Left _ -> createDirectoryIfMissing True $ parentDir file
|
||||
_ -> noop
|
||||
a file
|
||||
|
||||
{- Runs an action to transfer an object's content.
|
||||
-
|
||||
- In direct mode, it's possible for the file to change as it's being sent.
|
||||
- If this happens, runs the rollback action and returns False. The
|
||||
- rollback action should remove the data that was transferred.
|
||||
-}
|
||||
sendAnnex :: Key -> Annex () -> (FilePath -> Annex Bool) -> Annex Bool
|
||||
sendAnnex key rollback sendobject = go =<< prepSendAnnex key
|
||||
where
|
||||
go Nothing = return False
|
||||
go (Just (f, checksuccess)) = do
|
||||
r <- sendobject f
|
||||
ifM checksuccess
|
||||
( return r
|
||||
, do
|
||||
rollback
|
||||
return False
|
||||
)
|
||||
|
||||
{- Returns a file that contains an object's content,
|
||||
- and an check to run after the transfer is complete.
|
||||
-
|
||||
- In direct mode, it's possible for the file to change as it's being sent,
|
||||
- and the check detects this case and returns False.
|
||||
-}
|
||||
prepSendAnnex :: Key -> Annex (Maybe (FilePath, Annex Bool))
|
||||
prepSendAnnex key = withObjectLoc key indirect direct
|
||||
where
|
||||
indirect f = return $ Just (f, return True)
|
||||
direct [] = return Nothing
|
||||
direct (f:fs) = do
|
||||
cache <- recordedInodeCache key
|
||||
-- check that we have a good file
|
||||
ifM (sameInodeCache f cache)
|
||||
( return $ Just (f, sameInodeCache f cache)
|
||||
, direct fs
|
||||
)
|
||||
|
||||
{- Performs an action, passing it the location to use for a key's content.
|
||||
-
|
||||
- In direct mode, the associated files will be passed. But, if there are
|
||||
- no associated files for a key, the indirect mode action will be
|
||||
- performed instead. -}
|
||||
withObjectLoc :: Key -> (FilePath -> Annex a) -> ([FilePath] -> Annex a) -> Annex a
|
||||
withObjectLoc key indirect direct = ifM isDirect
|
||||
( do
|
||||
fs <- associatedFiles key
|
||||
if null fs
|
||||
then goindirect
|
||||
else direct fs
|
||||
, goindirect
|
||||
)
|
||||
where
|
||||
goindirect = indirect =<< inRepo (gitAnnexLocation key)
|
||||
|
||||
cleanObjectLoc :: Key -> Annex ()
|
||||
cleanObjectLoc key = do
|
||||
file <- inRepo $ gitAnnexLocation key
|
||||
unlessM crippledFileSystem $
|
||||
void $ liftIO $ catchMaybeIO $ allowWrite $ parentDir file
|
||||
liftIO $ removeparents file (3 :: Int)
|
||||
where
|
||||
removeparents _ 0 = noop
|
||||
removeparents file n = do
|
||||
let dir = parentDir file
|
||||
maybe noop (const $ removeparents dir (n-1))
|
||||
<=< catchMaybeIO $ removeDirectory dir
|
||||
|
||||
{- Removes a key's file from .git/annex/objects/
|
||||
-
|
||||
- In direct mode, deletes the associated files or files, and replaces
|
||||
- them with symlinks. -}
|
||||
removeAnnex :: Key -> Annex ()
|
||||
removeAnnex key = withObjectLoc key remove removedirect
|
||||
where
|
||||
remove file = do
|
||||
unlessM crippledFileSystem $
|
||||
liftIO $ allowWrite $ parentDir file
|
||||
liftIO $ nukeFile file
|
||||
removeInodeCache key
|
||||
cleanObjectLoc key
|
||||
removedirect fs = do
|
||||
cache <- recordedInodeCache key
|
||||
removeInodeCache key
|
||||
mapM_ (resetfile cache) fs
|
||||
resetfile cache f = whenM (sameInodeCache f cache) $ do
|
||||
l <- calcGitLink f key
|
||||
top <- fromRepo Git.repoPath
|
||||
cwd <- liftIO getCurrentDirectory
|
||||
let top' = fromMaybe top $ absNormPath cwd top
|
||||
let l' = relPathDirToFile top' (fromMaybe l $ absNormPath top' l)
|
||||
replaceFile f $ const $
|
||||
makeAnnexLink l' f
|
||||
|
||||
{- Moves a key's file out of .git/annex/objects/ -}
|
||||
fromAnnex :: Key -> FilePath -> Annex ()
|
||||
fromAnnex key dest = do
|
||||
file <- inRepo $ gitAnnexLocation key
|
||||
unlessM crippledFileSystem $
|
||||
liftIO $ allowWrite $ parentDir file
|
||||
thawContent file
|
||||
liftIO $ moveFile file dest
|
||||
cleanObjectLoc key
|
||||
|
||||
{- Moves a key out of .git/annex/objects/ into .git/annex/bad, and
|
||||
- returns the file it was moved to. -}
|
||||
moveBad :: Key -> Annex FilePath
|
||||
moveBad key = do
|
||||
src <- inRepo $ gitAnnexLocation key
|
||||
bad <- fromRepo gitAnnexBadDir
|
||||
let dest = bad </> takeFileName src
|
||||
createAnnexDirectory (parentDir dest)
|
||||
unlessM crippledFileSystem $
|
||||
liftIO $ allowWrite (parentDir src)
|
||||
liftIO $ moveFile src dest
|
||||
cleanObjectLoc key
|
||||
logStatus key InfoMissing
|
||||
return dest
|
||||
|
||||
{- List of keys whose content exists in the annex. -}
|
||||
getKeysPresent :: Annex [Key]
|
||||
getKeysPresent = do
|
||||
direct <- isDirect
|
||||
dir <- fromRepo gitAnnexObjectDir
|
||||
liftIO $ traverse direct (2 :: Int) dir
|
||||
where
|
||||
traverse direct depth dir = do
|
||||
contents <- catchDefaultIO [] (dirContents dir)
|
||||
if depth == 0
|
||||
then do
|
||||
contents' <- filterM (present direct) contents
|
||||
let keys = mapMaybe (fileKey . takeFileName) contents'
|
||||
continue keys []
|
||||
else do
|
||||
let deeper = traverse direct (depth - 1)
|
||||
continue [] (map deeper contents)
|
||||
continue keys [] = return keys
|
||||
continue keys (a:as) = do
|
||||
{- Force lazy traversal with unsafeInterleaveIO. -}
|
||||
morekeys <- unsafeInterleaveIO a
|
||||
continue (morekeys++keys) as
|
||||
|
||||
{- In indirect mode, look for the key. In direct mode,
|
||||
- the inode cache file is only present when a key's content
|
||||
- is present. -}
|
||||
present False d = doesFileExist $ contentfile d
|
||||
present True d = doesFileExist $ contentfile d ++ ".cache"
|
||||
contentfile d = d </> takeFileName d
|
||||
|
||||
{- Things to do to record changes to content when shutting down.
|
||||
-
|
||||
- It's acceptable to avoid committing changes to the branch,
|
||||
- especially if performing a short-lived action.
|
||||
-}
|
||||
saveState :: Bool -> Annex ()
|
||||
saveState nocommit = doSideAction $ do
|
||||
Annex.Queue.flush
|
||||
unless nocommit $
|
||||
whenM (annexAlwaysCommit <$> Annex.getGitConfig) $
|
||||
Annex.Branch.commit "update"
|
||||
|
||||
{- Downloads content from any of a list of urls. -}
|
||||
downloadUrl :: [Url.URLString] -> FilePath -> Annex Bool
|
||||
downloadUrl urls file = do
|
||||
o <- map Param . annexWebOptions <$> Annex.getGitConfig
|
||||
headers <- getHttpHeaders
|
||||
liftIO $ anyM (\u -> Url.download u headers o file) urls
|
||||
|
||||
{- Copies a key's content, when present, to a temp file.
|
||||
- This is used to speed up some rsyncs. -}
|
||||
preseedTmp :: Key -> FilePath -> Annex Bool
|
||||
preseedTmp key file = go =<< inAnnex key
|
||||
where
|
||||
go False = return False
|
||||
go True = do
|
||||
ok <- copy
|
||||
when ok $ thawContent file
|
||||
return ok
|
||||
copy = ifM (liftIO $ doesFileExist file)
|
||||
( return True
|
||||
, do
|
||||
s <- inRepo $ gitAnnexLocation key
|
||||
liftIO $ copyFileExternal s file
|
||||
)
|
||||
|
||||
{- Blocks writing to an annexed file. The file is made unwritable
|
||||
- to avoid accidental edits. core.sharedRepository may change
|
||||
- who can read it. -}
|
||||
freezeContent :: FilePath -> Annex ()
|
||||
freezeContent file = unlessM crippledFileSystem $
|
||||
liftIO . go =<< fromRepo getSharedRepository
|
||||
where
|
||||
go GroupShared = modifyFileMode file $
|
||||
removeModes writeModes .
|
||||
addModes [ownerReadMode, groupReadMode]
|
||||
go AllShared = modifyFileMode file $
|
||||
removeModes writeModes .
|
||||
addModes readModes
|
||||
go _ = preventWrite file
|
||||
|
||||
{- Allows writing to an annexed file that freezeContent was called on
|
||||
- before. -}
|
||||
thawContent :: FilePath -> Annex ()
|
||||
thawContent file = unlessM crippledFileSystem $
|
||||
liftIO . go =<< fromRepo getSharedRepository
|
||||
where
|
||||
go GroupShared = groupWriteRead file
|
||||
go AllShared = groupWriteRead file
|
||||
go _ = allowWrite file
|
198
Annex/Content/Direct.hs
Normal file
198
Annex/Content/Direct.hs
Normal file
|
@ -0,0 +1,198 @@
|
|||
{- git-annex file content managing for direct mode
|
||||
-
|
||||
- Copyright 2012-2013 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Annex.Content.Direct (
|
||||
associatedFiles,
|
||||
removeAssociatedFile,
|
||||
addAssociatedFile,
|
||||
goodContent,
|
||||
recordedInodeCache,
|
||||
updateInodeCache,
|
||||
writeInodeCache,
|
||||
sameInodeCache,
|
||||
sameFileStatus,
|
||||
removeInodeCache,
|
||||
toInodeCache,
|
||||
inodesChanged,
|
||||
createInodeSentinalFile,
|
||||
) where
|
||||
|
||||
import Common.Annex
|
||||
import qualified Annex
|
||||
import Annex.Perms
|
||||
import qualified Git
|
||||
import Utility.TempFile
|
||||
import Logs.Location
|
||||
import Utility.InodeCache
|
||||
|
||||
{- Absolute FilePaths of Files in the tree that are associated with a key. -}
|
||||
associatedFiles :: Key -> Annex [FilePath]
|
||||
associatedFiles key = do
|
||||
files <- associatedFilesRelative key
|
||||
top <- fromRepo Git.repoPath
|
||||
return $ map (top </>) files
|
||||
|
||||
{- List of files in the tree that are associated with a key, relative to
|
||||
- the top of the repo. -}
|
||||
associatedFilesRelative :: Key -> Annex [FilePath]
|
||||
associatedFilesRelative key = do
|
||||
mapping <- inRepo $ gitAnnexMapping key
|
||||
liftIO $ catchDefaultIO [] $ do
|
||||
h <- openFile mapping ReadMode
|
||||
fileEncoding h
|
||||
lines <$> hGetContents h
|
||||
|
||||
{- Changes the associated files information for a key, applying a
|
||||
- transformation to the list. Returns new associatedFiles value. -}
|
||||
changeAssociatedFiles :: Key -> ([FilePath] -> [FilePath]) -> Annex [FilePath]
|
||||
changeAssociatedFiles key transform = do
|
||||
mapping <- inRepo $ gitAnnexMapping key
|
||||
files <- associatedFilesRelative key
|
||||
let files' = transform files
|
||||
when (files /= files') $ do
|
||||
createContentDir mapping
|
||||
liftIO $ viaTmp write mapping $ unlines files'
|
||||
top <- fromRepo Git.repoPath
|
||||
return $ map (top </>) files'
|
||||
where
|
||||
write file content = do
|
||||
h <- openFile file WriteMode
|
||||
fileEncoding h
|
||||
hPutStr h content
|
||||
hClose h
|
||||
|
||||
{- Removes an associated file. Returns new associatedFiles value. -}
|
||||
removeAssociatedFile :: Key -> FilePath -> Annex [FilePath]
|
||||
removeAssociatedFile key file = do
|
||||
file' <- normaliseAssociatedFile file
|
||||
fs <- changeAssociatedFiles key $ filter (/= file')
|
||||
when (null fs) $
|
||||
logStatus key InfoMissing
|
||||
return fs
|
||||
|
||||
{- Adds an associated file. Returns new associatedFiles value. -}
|
||||
addAssociatedFile :: Key -> FilePath -> Annex [FilePath]
|
||||
addAssociatedFile key file = do
|
||||
file' <- normaliseAssociatedFile file
|
||||
changeAssociatedFiles key $ \files ->
|
||||
if file' `elem` files
|
||||
then files
|
||||
else file':files
|
||||
|
||||
{- Associated files are always stored relative to the top of the repository.
|
||||
- The input FilePath is relative to the CWD. -}
|
||||
normaliseAssociatedFile :: FilePath -> Annex FilePath
|
||||
normaliseAssociatedFile file = do
|
||||
top <- fromRepo Git.repoPath
|
||||
liftIO $ relPathDirToFile top <$> absPath file
|
||||
|
||||
{- Checks if a file in the tree, associated with a key, has not been modified.
|
||||
-
|
||||
- To avoid needing to fsck the file's content, which can involve an
|
||||
- expensive checksum, this relies on a cache that contains the file's
|
||||
- expected mtime and inode.
|
||||
-}
|
||||
goodContent :: Key -> FilePath -> Annex Bool
|
||||
goodContent key file = sameInodeCache file =<< recordedInodeCache key
|
||||
|
||||
{- Gets the recorded inode cache for a key. -}
|
||||
recordedInodeCache :: Key -> Annex (Maybe InodeCache)
|
||||
recordedInodeCache key = withInodeCacheFile key $ \f ->
|
||||
liftIO $ catchDefaultIO Nothing $ readInodeCache <$> readFile f
|
||||
|
||||
{- Stores a cache of attributes for a file that is associated with a key. -}
|
||||
updateInodeCache :: Key -> FilePath -> Annex ()
|
||||
updateInodeCache key file = maybe noop (writeInodeCache key)
|
||||
=<< liftIO (genInodeCache file)
|
||||
|
||||
{- Writes a cache for a key. -}
|
||||
writeInodeCache :: Key -> InodeCache -> Annex ()
|
||||
writeInodeCache key cache = withInodeCacheFile key $ \f -> do
|
||||
createContentDir f
|
||||
liftIO $ writeFile f $ showInodeCache cache
|
||||
|
||||
{- Removes an inode cache. -}
|
||||
removeInodeCache :: Key -> Annex ()
|
||||
removeInodeCache key = withInodeCacheFile key $ \f -> do
|
||||
createContentDir f -- also thaws directory
|
||||
liftIO $ nukeFile f
|
||||
|
||||
withInodeCacheFile :: Key -> (FilePath -> Annex a) -> Annex a
|
||||
withInodeCacheFile key a = a =<< inRepo (gitAnnexInodeCache key)
|
||||
|
||||
{- Checks if a InodeCache matches the current version of a file. -}
|
||||
sameInodeCache :: FilePath -> Maybe InodeCache -> Annex Bool
|
||||
sameInodeCache _ Nothing = return False
|
||||
sameInodeCache file (Just old) = go =<< liftIO (genInodeCache file)
|
||||
where
|
||||
go Nothing = return False
|
||||
go (Just curr) = compareInodeCaches curr old
|
||||
|
||||
{- Checks if a FileStatus matches the recorded InodeCache of a file. -}
|
||||
sameFileStatus :: Key -> FileStatus -> Annex Bool
|
||||
sameFileStatus key status = do
|
||||
old <- recordedInodeCache key
|
||||
let curr = toInodeCache status
|
||||
r <- case (old, curr) of
|
||||
(Just o, Just c) -> compareInodeCaches o c
|
||||
(Nothing, Nothing) -> return True
|
||||
_ -> return False
|
||||
return r
|
||||
|
||||
{- If the inodes have changed, only the size and mtime are compared. -}
|
||||
compareInodeCaches :: InodeCache -> InodeCache -> Annex Bool
|
||||
compareInodeCaches x y
|
||||
| x == y = return True
|
||||
| otherwise = ifM inodesChanged
|
||||
( return $ compareWeak x y
|
||||
, return False
|
||||
)
|
||||
|
||||
{- Some filesystems get new inodes each time they are mounted.
|
||||
- In order to work on such a filesystem, a sentinal file is used to detect
|
||||
- when the inodes have changed.
|
||||
-
|
||||
- If the sentinal file does not exist, we have to assume that the
|
||||
- inodes have changed.
|
||||
-}
|
||||
inodesChanged :: Annex Bool
|
||||
inodesChanged = maybe calc return =<< Annex.getState Annex.inodeschanged
|
||||
where
|
||||
calc = do
|
||||
scache <- liftIO . genInodeCache
|
||||
=<< fromRepo gitAnnexInodeSentinal
|
||||
scached <- readInodeSentinalFile
|
||||
let changed = case (scache, scached) of
|
||||
(Just c1, Just c2) -> c1 /= c2
|
||||
_ -> True
|
||||
Annex.changeState $ \s -> s { Annex.inodeschanged = Just changed }
|
||||
return changed
|
||||
|
||||
readInodeSentinalFile :: Annex (Maybe InodeCache)
|
||||
readInodeSentinalFile = do
|
||||
sentinalcachefile <- fromRepo gitAnnexInodeSentinalCache
|
||||
liftIO $ catchDefaultIO Nothing $
|
||||
readInodeCache <$> readFile sentinalcachefile
|
||||
|
||||
writeInodeSentinalFile :: Annex ()
|
||||
writeInodeSentinalFile = do
|
||||
sentinalfile <- fromRepo gitAnnexInodeSentinal
|
||||
sentinalcachefile <- fromRepo gitAnnexInodeSentinalCache
|
||||
liftIO $ writeFile sentinalfile ""
|
||||
liftIO $ maybe noop (writeFile sentinalcachefile . showInodeCache)
|
||||
=<< genInodeCache sentinalfile
|
||||
|
||||
{- The sentinal file is only created when first initializing a repository.
|
||||
- If there are any annexed objects in the repository already, creating
|
||||
- the file would invalidate their inode caches. -}
|
||||
createInodeSentinalFile :: Annex ()
|
||||
createInodeSentinalFile =
|
||||
unlessM (alreadyexists <||> hasobjects)
|
||||
writeInodeSentinalFile
|
||||
where
|
||||
alreadyexists = isJust <$> readInodeSentinalFile
|
||||
hasobjects = liftIO . doesDirectoryExist =<< fromRepo gitAnnexObjectDir
|
209
Annex/Direct.hs
Normal file
209
Annex/Direct.hs
Normal file
|
@ -0,0 +1,209 @@
|
|||
{- git-annex direct mode
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Annex.Direct where
|
||||
|
||||
import Common.Annex
|
||||
import qualified Git
|
||||
import qualified Git.LsFiles
|
||||
import qualified Git.Merge
|
||||
import qualified Git.DiffTree as DiffTree
|
||||
import Git.Sha
|
||||
import Git.Types
|
||||
import Annex.CatFile
|
||||
import Utility.FileMode
|
||||
import qualified Annex.Queue
|
||||
import Logs.Location
|
||||
import Backend
|
||||
import Types.KeySource
|
||||
import Annex.Content
|
||||
import Annex.Content.Direct
|
||||
import Annex.Link
|
||||
import Utility.InodeCache
|
||||
import Utility.CopyFile
|
||||
|
||||
{- Uses git ls-files to find files that need to be committed, and stages
|
||||
- them into the index. Returns True if some changes were staged. -}
|
||||
stageDirect :: Annex Bool
|
||||
stageDirect = do
|
||||
Annex.Queue.flush
|
||||
top <- fromRepo Git.repoPath
|
||||
(l, cleanup) <- inRepo $ Git.LsFiles.stagedDetails [top]
|
||||
forM_ l go
|
||||
void $ liftIO cleanup
|
||||
staged <- Annex.Queue.size
|
||||
Annex.Queue.flush
|
||||
return $ staged /= 0
|
||||
where
|
||||
{- Determine what kind of modified or deleted file this is, as
|
||||
- efficiently as we can, by getting any key that's associated
|
||||
- with it in git, as well as its stat info. -}
|
||||
go (file, Just sha) = do
|
||||
mkey <- catKey sha
|
||||
mstat <- liftIO $ catchMaybeIO $ getSymbolicLinkStatus file
|
||||
case (mkey, mstat, toInodeCache =<< mstat) of
|
||||
(Just key, _, Just cache) -> do
|
||||
{- All direct mode files will show as
|
||||
- modified, so compare the cache to see if
|
||||
- it really was. -}
|
||||
oldcache <- recordedInodeCache key
|
||||
when (oldcache /= Just cache) $
|
||||
modifiedannexed file key cache
|
||||
(Just key, Nothing, _) -> deletedannexed file key
|
||||
(Nothing, Nothing, _) -> deletegit file
|
||||
(_, Just _, _) -> addgit file
|
||||
go _ = noop
|
||||
|
||||
modifiedannexed file oldkey cache = do
|
||||
void $ removeAssociatedFile oldkey file
|
||||
void $ addDirect file cache
|
||||
|
||||
deletedannexed file key = do
|
||||
void $ removeAssociatedFile key file
|
||||
deletegit file
|
||||
|
||||
addgit file = Annex.Queue.addCommand "add" [Param "-f"] [file]
|
||||
|
||||
deletegit file = Annex.Queue.addCommand "rm" [Param "-f"] [file]
|
||||
|
||||
{- Adds a file to the annex in direct mode. Can fail, if the file is
|
||||
- modified or deleted while it's being added. -}
|
||||
addDirect :: FilePath -> InodeCache -> Annex Bool
|
||||
addDirect file cache = do
|
||||
showStart "add" file
|
||||
let source = KeySource
|
||||
{ keyFilename = file
|
||||
, contentLocation = file
|
||||
, inodeCache = Just cache
|
||||
}
|
||||
got =<< genKey source =<< chooseBackend file
|
||||
where
|
||||
got Nothing = do
|
||||
showEndFail
|
||||
return False
|
||||
got (Just (key, _)) = ifM (sameInodeCache file $ Just cache)
|
||||
( do
|
||||
stageSymlink file =<< hashSymlink =<< calcGitLink file key
|
||||
writeInodeCache key cache
|
||||
void $ addAssociatedFile key file
|
||||
logStatus key InfoPresent
|
||||
showEndOk
|
||||
return True
|
||||
, do
|
||||
showEndFail
|
||||
return False
|
||||
)
|
||||
|
||||
{- In direct mode, git merge would usually refuse to do anything, since it
|
||||
- sees present direct mode files as type changed files. To avoid this,
|
||||
- merge is run with the work tree set to a temp directory.
|
||||
-
|
||||
- This should only be used once any changes to the real working tree have
|
||||
- already been committed, because it overwrites files in the working tree.
|
||||
-}
|
||||
mergeDirect :: FilePath -> Git.Ref -> Git.Repo -> IO Bool
|
||||
mergeDirect d branch g = do
|
||||
createDirectoryIfMissing True d
|
||||
let g' = g { location = Local { gitdir = Git.localGitDir g, worktree = Just d } }
|
||||
Git.Merge.mergeNonInteractive branch g'
|
||||
|
||||
{- Cleans up after a direct mode merge. The merge must have been committed,
|
||||
- and the commit sha passed in, along with the old sha of the tree
|
||||
- before the merge. Uses git diff-tree to find files that changed between
|
||||
- the two shas, and applies those changes to the work tree.
|
||||
-}
|
||||
mergeDirectCleanup :: FilePath -> Git.Ref -> Git.Ref -> Annex ()
|
||||
mergeDirectCleanup d oldsha newsha = do
|
||||
(items, cleanup) <- inRepo $ DiffTree.diffTreeRecursive oldsha newsha
|
||||
forM_ items updated
|
||||
void $ liftIO $ cleanup
|
||||
liftIO $ removeDirectoryRecursive d
|
||||
where
|
||||
updated item = do
|
||||
go DiffTree.srcsha DiffTree.srcmode moveout moveout_raw
|
||||
go DiffTree.dstsha DiffTree.dstmode movein movein_raw
|
||||
where
|
||||
go getsha getmode a araw
|
||||
| getsha item == nullSha = noop
|
||||
| isSymLink (getmode item) =
|
||||
maybe (araw f) (\k -> void $ a k f)
|
||||
=<< catKey (getsha item)
|
||||
| otherwise = araw f
|
||||
f = DiffTree.file item
|
||||
|
||||
moveout = removeDirect
|
||||
|
||||
{- Files deleted by the merge are removed from the work tree.
|
||||
- Empty work tree directories are removed, per git behavior. -}
|
||||
moveout_raw f = liftIO $ do
|
||||
nukeFile f
|
||||
void $ tryIO $ removeDirectory $ parentDir f
|
||||
|
||||
{- The symlink is created from the key, rather than moving in the
|
||||
- symlink created in the temp directory by the merge. This because
|
||||
- a conflicted merge will write to some other file in the temp
|
||||
- directory.
|
||||
-
|
||||
- Symlinks are replaced with their content, if it's available. -}
|
||||
movein k f = do
|
||||
l <- calcGitLink f k
|
||||
replaceFile f $
|
||||
makeAnnexLink l
|
||||
toDirect k f
|
||||
|
||||
{- Any new, modified, or renamed files were written to the temp
|
||||
- directory by the merge, and are moved to the real work tree. -}
|
||||
movein_raw f = liftIO $ do
|
||||
createDirectoryIfMissing True $ parentDir f
|
||||
void $ tryIO $ rename (d </> f) f
|
||||
|
||||
{- If possible, converts a symlink in the working tree into a direct
|
||||
- mode file. -}
|
||||
toDirect :: Key -> FilePath -> Annex ()
|
||||
toDirect k f = fromMaybe noop =<< toDirectGen k f
|
||||
|
||||
toDirectGen :: Key -> FilePath -> Annex (Maybe (Annex ()))
|
||||
toDirectGen k f = do
|
||||
loc <- inRepo $ gitAnnexLocation k
|
||||
absf <- liftIO $ absPath f
|
||||
locs <- filter (/= absf) <$> addAssociatedFile k f
|
||||
case locs of
|
||||
[] -> ifM (liftIO $ doesFileExist loc)
|
||||
( return $ Just $ do
|
||||
{- Move content from annex to direct file. -}
|
||||
updateInodeCache k loc
|
||||
thawContent loc
|
||||
replaceFile f $
|
||||
liftIO . moveFile loc
|
||||
, return Nothing
|
||||
)
|
||||
(loc':_) -> ifM (isNothing <$> getAnnexLinkTarget loc')
|
||||
{- Another direct file has the content; copy it. -}
|
||||
( return $ Just $
|
||||
replaceFile f $
|
||||
void . liftIO . copyFileExternal loc'
|
||||
, return Nothing
|
||||
)
|
||||
|
||||
{- Removes a direct mode file, while retaining its content. -}
|
||||
removeDirect :: Key -> FilePath -> Annex ()
|
||||
removeDirect k f = do
|
||||
locs <- removeAssociatedFile k f
|
||||
when (null locs) $
|
||||
whenM (isNothing <$> getAnnexLinkTarget f) $
|
||||
moveAnnex k f
|
||||
liftIO $ do
|
||||
nukeFile f
|
||||
void $ tryIO $ removeDirectory $ parentDir f
|
||||
|
||||
{- Called when a direct mode file has been changed. Its old content may be
|
||||
- lost. -}
|
||||
changedDirect :: Key -> FilePath -> Annex ()
|
||||
changedDirect oldk f = do
|
||||
locs <- removeAssociatedFile oldk f
|
||||
whenM (pure (null locs) <&&> not <$> inAnnex oldk) $
|
||||
logStatus oldk InfoMissing
|
32
Annex/Exception.hs
Normal file
32
Annex/Exception.hs
Normal file
|
@ -0,0 +1,32 @@
|
|||
{- exception handling in the git-annex monad
|
||||
-
|
||||
- Copyright 2011 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Annex.Exception (
|
||||
bracketIO,
|
||||
handle,
|
||||
tryAnnex,
|
||||
throw,
|
||||
) where
|
||||
|
||||
import Control.Exception.Lifted (handle, try)
|
||||
import Control.Monad.Trans.Control (liftBaseOp)
|
||||
import Control.Exception hiding (handle, try, throw)
|
||||
|
||||
import Common.Annex
|
||||
|
||||
{- Runs an Annex action, with setup and cleanup both in the IO monad. -}
|
||||
bracketIO :: IO c -> (c -> IO b) -> Annex a -> Annex a
|
||||
bracketIO setup cleanup go =
|
||||
liftBaseOp (Control.Exception.bracket setup cleanup) (const go)
|
||||
|
||||
{- try in the Annex monad -}
|
||||
tryAnnex :: Annex a -> Annex (Either SomeException a)
|
||||
tryAnnex = try
|
||||
|
||||
{- Throws an exception in the Annex monad. -}
|
||||
throw :: Control.Exception.Exception e => e -> Annex a
|
||||
throw = liftIO . throwIO
|
89
Annex/Journal.hs
Normal file
89
Annex/Journal.hs
Normal file
|
@ -0,0 +1,89 @@
|
|||
{- management of the git-annex journal
|
||||
-
|
||||
- The journal is used to queue up changes before they are committed to the
|
||||
- git-annex branch. Amoung other things, it ensures that if git-annex is
|
||||
- interrupted, its recorded data is not lost.
|
||||
-
|
||||
- Copyright 2011 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Annex.Journal where
|
||||
|
||||
import System.IO.Binary
|
||||
|
||||
import Common.Annex
|
||||
import Annex.Exception
|
||||
import qualified Git
|
||||
import Annex.Perms
|
||||
|
||||
{- Records content for a file in the branch to the journal.
|
||||
-
|
||||
- Using the journal, rather than immediatly staging content to the index
|
||||
- avoids git needing to rewrite the index after every change. -}
|
||||
setJournalFile :: FilePath -> String -> Annex ()
|
||||
setJournalFile file content = do
|
||||
createAnnexDirectory =<< fromRepo gitAnnexJournalDir
|
||||
createAnnexDirectory =<< fromRepo gitAnnexTmpDir
|
||||
-- journal file is written atomically
|
||||
jfile <- fromRepo $ journalFile file
|
||||
tmp <- fromRepo gitAnnexTmpDir
|
||||
let tmpfile = tmp </> takeFileName jfile
|
||||
liftIO $ do
|
||||
writeBinaryFile tmpfile content
|
||||
moveFile tmpfile jfile
|
||||
|
||||
{- Gets any journalled content for a file in the branch. -}
|
||||
getJournalFile :: FilePath -> Annex (Maybe String)
|
||||
getJournalFile file = inRepo $ \g -> catchMaybeIO $
|
||||
readFileStrict $ journalFile file g
|
||||
|
||||
{- List of files that have updated content in the journal. -}
|
||||
getJournalledFiles :: Annex [FilePath]
|
||||
getJournalledFiles = map fileJournal <$> getJournalFiles
|
||||
|
||||
{- List of existing journal files. -}
|
||||
getJournalFiles :: Annex [FilePath]
|
||||
getJournalFiles = do
|
||||
g <- gitRepo
|
||||
fs <- liftIO $ catchDefaultIO [] $
|
||||
getDirectoryContents $ gitAnnexJournalDir g
|
||||
return $ filter (`notElem` [".", ".."]) fs
|
||||
|
||||
{- Checks if there are changes in the journal. -}
|
||||
journalDirty :: Annex Bool
|
||||
journalDirty = not . null <$> getJournalFiles
|
||||
|
||||
{- Produces a filename to use in the journal for a file on the branch.
|
||||
-
|
||||
- The journal typically won't have a lot of files in it, so the hashing
|
||||
- used in the branch is not necessary, and all the files are put directly
|
||||
- in the journal directory.
|
||||
-}
|
||||
journalFile :: FilePath -> Git.Repo -> FilePath
|
||||
journalFile file repo = gitAnnexJournalDir repo </> concatMap mangle file
|
||||
where
|
||||
mangle '/' = "_"
|
||||
mangle '_' = "__"
|
||||
mangle c = [c]
|
||||
|
||||
{- Converts a journal file (relative to the journal dir) back to the
|
||||
- filename on the branch. -}
|
||||
fileJournal :: FilePath -> FilePath
|
||||
fileJournal = replace "//" "_" . replace "_" "/"
|
||||
|
||||
{- Runs an action that modifies the journal, using locking to avoid
|
||||
- contention with other git-annex processes. -}
|
||||
lockJournal :: Annex a -> Annex a
|
||||
lockJournal a = do
|
||||
file <- fromRepo gitAnnexJournalLock
|
||||
createAnnexDirectory $ takeDirectory file
|
||||
mode <- annexFileMode
|
||||
bracketIO (lock file mode) unlock a
|
||||
where
|
||||
lock file mode = do
|
||||
l <- noUmask mode $ createFile file mode
|
||||
waitToSetLock l (WriteLock, AbsoluteSeek, 0, 0)
|
||||
return l
|
||||
unlock = closeFd
|
81
Annex/Link.hs
Normal file
81
Annex/Link.hs
Normal file
|
@ -0,0 +1,81 @@
|
|||
{- git-annex links to content
|
||||
-
|
||||
- On file systems that support them, symlinks are used.
|
||||
-
|
||||
- On other filesystems, git instead stores the symlink target in a regular
|
||||
- file.
|
||||
-
|
||||
- Copyright 2013 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Annex.Link where
|
||||
|
||||
import Common.Annex
|
||||
import qualified Annex
|
||||
import qualified Git.HashObject
|
||||
import qualified Git.UpdateIndex
|
||||
import qualified Annex.Queue
|
||||
import Git.Types
|
||||
|
||||
type LinkTarget = String
|
||||
|
||||
{- Checks if a file is a link to a key. -}
|
||||
isAnnexLink :: FilePath -> Annex (Maybe Key)
|
||||
isAnnexLink file = maybe Nothing (fileKey . takeFileName) <$> getAnnexLinkTarget file
|
||||
|
||||
{- Gets the link target of a symlink.
|
||||
-
|
||||
- On a filesystem that does not support symlinks, get the link
|
||||
- target by looking inside the file. (Only return at first 8k of the file,
|
||||
- more than enough for any symlink target.)
|
||||
-
|
||||
- Returns Nothing if the file is not a symlink, or not a link to annex
|
||||
- content.
|
||||
-}
|
||||
getAnnexLinkTarget :: FilePath -> Annex (Maybe LinkTarget)
|
||||
getAnnexLinkTarget file = do
|
||||
v <- ifM (coreSymlinks <$> Annex.getGitConfig)
|
||||
( liftIO $ catchMaybeIO $ readSymbolicLink file
|
||||
, liftIO $ catchMaybeIO $ readfilestart file
|
||||
)
|
||||
case v of
|
||||
Nothing -> return Nothing
|
||||
Just l
|
||||
| isLinkToAnnex l -> return v
|
||||
| otherwise -> return Nothing
|
||||
where
|
||||
readfilestart f = do
|
||||
h <- openFile f ReadMode
|
||||
fileEncoding h
|
||||
take 8192 <$> hGetContents h
|
||||
|
||||
{- Creates a link on disk.
|
||||
-
|
||||
- On a filesystem that does not support symlinks, writes the link target
|
||||
- to a file. Note that git will only treat the file as a symlink if
|
||||
- it's staged as such, so use addAnnexLink when adding a new file or
|
||||
- modified link to git.
|
||||
-}
|
||||
makeAnnexLink :: LinkTarget -> FilePath -> Annex ()
|
||||
makeAnnexLink linktarget file = ifM (coreSymlinks <$> Annex.getGitConfig)
|
||||
( liftIO $ createSymbolicLink linktarget file
|
||||
, liftIO $ writeFile file linktarget
|
||||
)
|
||||
|
||||
{- Creates a link on disk, and additionally stages it in git. -}
|
||||
addAnnexLink :: LinkTarget -> FilePath -> Annex ()
|
||||
addAnnexLink linktarget file = do
|
||||
makeAnnexLink linktarget file
|
||||
stageSymlink file =<< hashSymlink linktarget
|
||||
|
||||
{- Injects a symlink target into git, returning its Sha. -}
|
||||
hashSymlink :: LinkTarget -> Annex Sha
|
||||
hashSymlink linktarget = inRepo $ Git.HashObject.hashObject BlobObject linktarget
|
||||
|
||||
{- Stages a symlink to the annex, using a Sha of its target. -}
|
||||
stageSymlink :: FilePath -> Sha -> Annex ()
|
||||
stageSymlink file sha =
|
||||
Annex.Queue.addUpdateIndex =<<
|
||||
inRepo (Git.UpdateIndex.stageSymlink file sha)
|
45
Annex/LockPool.hs
Normal file
45
Annex/LockPool.hs
Normal file
|
@ -0,0 +1,45 @@
|
|||
{- git-annex lock pool
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Annex.LockPool where
|
||||
|
||||
import qualified Data.Map as M
|
||||
import System.Posix.Types (Fd)
|
||||
|
||||
import Common.Annex
|
||||
import Annex
|
||||
import Annex.Perms
|
||||
|
||||
{- Create a specified lock file, and takes a shared lock. -}
|
||||
lockFile :: FilePath -> Annex ()
|
||||
lockFile file = go =<< fromPool file
|
||||
where
|
||||
go (Just _) = noop -- already locked
|
||||
go Nothing = do
|
||||
mode <- annexFileMode
|
||||
fd <- liftIO $ noUmask mode $
|
||||
openFd file ReadOnly (Just mode) defaultFileFlags
|
||||
liftIO $ waitToSetLock fd (ReadLock, AbsoluteSeek, 0, 0)
|
||||
changePool $ M.insert file fd
|
||||
|
||||
unlockFile :: FilePath -> Annex ()
|
||||
unlockFile file = maybe noop go =<< fromPool file
|
||||
where
|
||||
go fd = do
|
||||
liftIO $ closeFd fd
|
||||
changePool $ M.delete file
|
||||
|
||||
getPool :: Annex (M.Map FilePath Fd)
|
||||
getPool = getState lockpool
|
||||
|
||||
fromPool :: FilePath -> Annex (Maybe Fd)
|
||||
fromPool file = M.lookup file <$> getPool
|
||||
|
||||
changePool :: (M.Map FilePath Fd -> M.Map FilePath Fd) -> Annex ()
|
||||
changePool a = do
|
||||
m <- getPool
|
||||
changeState $ \s -> s { lockpool = a m }
|
100
Annex/Perms.hs
Normal file
100
Annex/Perms.hs
Normal file
|
@ -0,0 +1,100 @@
|
|||
{- git-annex file permissions
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Annex.Perms (
|
||||
setAnnexPerm,
|
||||
annexFileMode,
|
||||
createAnnexDirectory,
|
||||
noUmask,
|
||||
createContentDir,
|
||||
freezeContentDir,
|
||||
) where
|
||||
|
||||
import Common.Annex
|
||||
import Utility.FileMode
|
||||
import Git.SharedRepository
|
||||
import qualified Annex
|
||||
import Config
|
||||
|
||||
import System.Posix.Types
|
||||
|
||||
withShared :: (SharedRepository -> Annex a) -> Annex a
|
||||
withShared a = maybe startup a =<< Annex.getState Annex.shared
|
||||
where
|
||||
startup = do
|
||||
shared <- fromRepo getSharedRepository
|
||||
Annex.changeState $ \s -> s { Annex.shared = Just shared }
|
||||
a shared
|
||||
|
||||
{- Sets appropriate file mode for a file or directory in the annex,
|
||||
- other than the content files and content directory. Normally,
|
||||
- use the default mode, but with core.sharedRepository set,
|
||||
- allow the group to write, etc. -}
|
||||
setAnnexPerm :: FilePath -> Annex ()
|
||||
setAnnexPerm file = unlessM crippledFileSystem $
|
||||
withShared $ liftIO . go
|
||||
where
|
||||
go GroupShared = groupWriteRead file
|
||||
go AllShared = modifyFileMode file $ addModes $
|
||||
[ ownerWriteMode, groupWriteMode ] ++ readModes
|
||||
go _ = noop
|
||||
|
||||
{- Gets the appropriate mode to use for creating a file in the annex
|
||||
- (other than content files, which are locked down more). -}
|
||||
annexFileMode :: Annex FileMode
|
||||
annexFileMode = withShared $ return . go
|
||||
where
|
||||
go GroupShared = sharedmode
|
||||
go AllShared = combineModes (sharedmode:readModes)
|
||||
go _ = stdFileMode
|
||||
sharedmode = combineModes
|
||||
[ ownerWriteMode, groupWriteMode
|
||||
, ownerReadMode, groupReadMode
|
||||
]
|
||||
|
||||
{- Creates a directory inside the gitAnnexDir, including any parent
|
||||
- directories. Makes directories with appropriate permissions. -}
|
||||
createAnnexDirectory :: FilePath -> Annex ()
|
||||
createAnnexDirectory dir = traverse dir [] =<< top
|
||||
where
|
||||
top = parentDir <$> fromRepo gitAnnexDir
|
||||
traverse d below stop
|
||||
| d `equalFilePath` stop = done
|
||||
| otherwise = ifM (liftIO $ doesDirectoryExist d)
|
||||
( done
|
||||
, traverse (parentDir d) (d:below) stop
|
||||
)
|
||||
where
|
||||
done = forM_ below $ \p -> do
|
||||
liftIO $ createDirectoryIfMissing True p
|
||||
setAnnexPerm p
|
||||
|
||||
{- Blocks writing to the directory an annexed file is in, to prevent the
|
||||
- file accidentially being deleted. However, if core.sharedRepository
|
||||
- is set, this is not done, since the group must be allowed to delete the
|
||||
- file.
|
||||
-}
|
||||
freezeContentDir :: FilePath -> Annex ()
|
||||
freezeContentDir file = unlessM crippledFileSystem $
|
||||
liftIO . go =<< fromRepo getSharedRepository
|
||||
where
|
||||
dir = parentDir file
|
||||
go GroupShared = groupWriteRead dir
|
||||
go AllShared = groupWriteRead dir
|
||||
go _ = preventWrite dir
|
||||
|
||||
{- Makes the directory tree to store an annexed file's content,
|
||||
- with appropriate permissions on each level. -}
|
||||
createContentDir :: FilePath -> Annex ()
|
||||
createContentDir dest = do
|
||||
unlessM (liftIO $ doesDirectoryExist dir) $
|
||||
createAnnexDirectory dir
|
||||
-- might have already existed with restricted perms
|
||||
unlessM crippledFileSystem $
|
||||
liftIO $ allowWrite dir
|
||||
where
|
||||
dir = parentDir dest
|
62
Annex/Queue.hs
Normal file
62
Annex/Queue.hs
Normal file
|
@ -0,0 +1,62 @@
|
|||
{- git-annex command queue
|
||||
-
|
||||
- Copyright 2011, 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Annex.Queue (
|
||||
addCommand,
|
||||
addUpdateIndex,
|
||||
flush,
|
||||
flushWhenFull,
|
||||
size
|
||||
) where
|
||||
|
||||
import Common.Annex
|
||||
import Annex hiding (new)
|
||||
import qualified Git.Queue
|
||||
import qualified Git.UpdateIndex
|
||||
|
||||
{- Adds a git command to the queue. -}
|
||||
addCommand :: String -> [CommandParam] -> [FilePath] -> Annex ()
|
||||
addCommand command params files = do
|
||||
q <- get
|
||||
store <=< inRepo $ Git.Queue.addCommand command params files q
|
||||
|
||||
{- Adds an update-index stream to the queue. -}
|
||||
addUpdateIndex :: Git.UpdateIndex.Streamer -> Annex ()
|
||||
addUpdateIndex streamer = do
|
||||
q <- get
|
||||
store <=< inRepo $ Git.Queue.addUpdateIndex streamer q
|
||||
|
||||
{- Runs the queue if it is full. Should be called periodically. -}
|
||||
flushWhenFull :: Annex ()
|
||||
flushWhenFull = do
|
||||
q <- get
|
||||
when (Git.Queue.full q) flush
|
||||
|
||||
{- Runs (and empties) the queue. -}
|
||||
flush :: Annex ()
|
||||
flush = do
|
||||
q <- get
|
||||
unless (0 == Git.Queue.size q) $ do
|
||||
showStoringStateAction
|
||||
q' <- inRepo $ Git.Queue.flush q
|
||||
store q'
|
||||
|
||||
{- Gets the size of the queue. -}
|
||||
size :: Annex Int
|
||||
size = Git.Queue.size <$> get
|
||||
|
||||
get :: Annex Git.Queue.Queue
|
||||
get = maybe new return =<< getState repoqueue
|
||||
|
||||
new :: Annex Git.Queue.Queue
|
||||
new = do
|
||||
q <- Git.Queue.new . annexQueueSize <$> getGitConfig
|
||||
store q
|
||||
return q
|
||||
|
||||
store :: Git.Queue.Queue -> Annex ()
|
||||
store q = changeState $ \s -> s { repoqueue = Just q }
|
156
Annex/Ssh.hs
Normal file
156
Annex/Ssh.hs
Normal file
|
@ -0,0 +1,156 @@
|
|||
{- git-annex ssh interface, with connection caching
|
||||
-
|
||||
- Copyright 2012,2013 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE CPP #-}
|
||||
|
||||
module Annex.Ssh (
|
||||
sshParams,
|
||||
sshCleanup,
|
||||
) where
|
||||
|
||||
import qualified Data.Map as M
|
||||
import System.Posix.Env
|
||||
|
||||
import Common.Annex
|
||||
import Annex.LockPool
|
||||
import Annex.Perms
|
||||
import qualified Build.SysConfig as SysConfig
|
||||
import qualified Annex
|
||||
import Config
|
||||
|
||||
{- Generates parameters to ssh to a given host (or user@host) on a given
|
||||
- port, with connection caching. -}
|
||||
sshParams :: (String, Maybe Integer) -> [CommandParam] -> Annex [CommandParam]
|
||||
sshParams (host, port) opts = go =<< sshInfo (host, port)
|
||||
where
|
||||
go (Nothing, params) = ret params
|
||||
go (Just socketfile, params) = do
|
||||
cleanstale
|
||||
liftIO $ createDirectoryIfMissing True $ parentDir socketfile
|
||||
lockFile $ socket2lock socketfile
|
||||
ret params
|
||||
ret ps = return $ ps ++ opts ++ portParams port ++
|
||||
[Param "-T", Param host]
|
||||
-- If the lock pool is empty, this is the first ssh of this
|
||||
-- run. There could be stale ssh connections hanging around
|
||||
-- from a previous git-annex run that was interrupted.
|
||||
cleanstale = whenM (not . any isLock . M.keys <$> getPool) $
|
||||
sshCleanup
|
||||
|
||||
{- Returns a filename to use for a ssh connection caching socket, and
|
||||
- parameters to enable ssh connection caching. -}
|
||||
sshInfo :: (String, Maybe Integer) -> Annex (Maybe FilePath, [CommandParam])
|
||||
sshInfo (host, port) = go =<< sshCacheDir
|
||||
where
|
||||
go Nothing = return (Nothing, [])
|
||||
go (Just dir) = do
|
||||
let socketfile = dir </> hostport2socket host port
|
||||
if valid_unix_socket_path socketfile
|
||||
then return (Just socketfile, cacheparams socketfile)
|
||||
else do
|
||||
socketfile' <- liftIO $ relPathCwdToFile socketfile
|
||||
if valid_unix_socket_path socketfile'
|
||||
then return (Just socketfile', cacheparams socketfile')
|
||||
else return (Nothing, [])
|
||||
cacheparams :: FilePath -> [CommandParam]
|
||||
cacheparams socketfile =
|
||||
[ Param "-S", Param socketfile
|
||||
, Params "-o ControlMaster=auto -o ControlPersist=yes"
|
||||
]
|
||||
|
||||
{- ssh connection caching creates sockets, so will not work on a
|
||||
- crippled filesystem. A GIT_ANNEX_TMP_DIR can be provided to use
|
||||
- a different filesystem. -}
|
||||
sshCacheDir :: Annex (Maybe FilePath)
|
||||
sshCacheDir
|
||||
| SysConfig.sshconnectioncaching = ifM crippledFileSystem
|
||||
( maybe (return Nothing) usetmpdir =<< gettmpdir
|
||||
, ifM (fromMaybe True . annexSshCaching <$> Annex.getGitConfig)
|
||||
( Just <$> fromRepo gitAnnexSshDir
|
||||
, return Nothing
|
||||
)
|
||||
)
|
||||
| otherwise = return Nothing
|
||||
where
|
||||
gettmpdir = liftIO $ getEnv "GIT_ANNEX_TMP_DIR"
|
||||
usetmpdir tmpdir = liftIO $ catchMaybeIO $ do
|
||||
createDirectoryIfMissing True tmpdir
|
||||
return $ tmpdir
|
||||
|
||||
portParams :: Maybe Integer -> [CommandParam]
|
||||
portParams Nothing = []
|
||||
portParams (Just port) = [Param "-p", Param $ show port]
|
||||
|
||||
{- Stop any unused ssh processes. -}
|
||||
sshCleanup :: Annex ()
|
||||
sshCleanup = go =<< sshCacheDir
|
||||
where
|
||||
go Nothing = noop
|
||||
go (Just dir) = do
|
||||
sockets <- filter (not . isLock) <$>
|
||||
liftIO (catchDefaultIO [] $ dirContents dir)
|
||||
forM_ sockets cleanup
|
||||
cleanup socketfile = do
|
||||
-- Drop any shared lock we have, and take an
|
||||
-- exclusive lock, without blocking. If the lock
|
||||
-- succeeds, nothing is using this ssh, and it can
|
||||
-- be stopped.
|
||||
let lockfile = socket2lock socketfile
|
||||
unlockFile lockfile
|
||||
mode <- annexFileMode
|
||||
fd <- liftIO $ noUmask mode $
|
||||
openFd lockfile ReadWrite (Just mode) defaultFileFlags
|
||||
v <- liftIO $ tryIO $
|
||||
setLock fd (WriteLock, AbsoluteSeek, 0, 0)
|
||||
case v of
|
||||
Left _ -> noop
|
||||
Right _ -> stopssh socketfile
|
||||
liftIO $ closeFd fd
|
||||
stopssh socketfile = do
|
||||
let (host, port) = socket2hostport socketfile
|
||||
(_, params) <- sshInfo (host, port)
|
||||
-- "ssh -O stop" is noisy on stderr even with -q
|
||||
void $ liftIO $ catchMaybeIO $
|
||||
withQuietOutput createProcessSuccess $
|
||||
proc "ssh" $ toCommand $
|
||||
[ Params "-O stop"
|
||||
] ++ params ++ [Param host]
|
||||
-- Cannot remove the lock file; other processes may
|
||||
-- be waiting on our exclusive lock to use it.
|
||||
|
||||
hostport2socket :: String -> Maybe Integer -> FilePath
|
||||
hostport2socket host Nothing = host
|
||||
hostport2socket host (Just port) = host ++ "!" ++ show port
|
||||
|
||||
socket2hostport :: FilePath -> (String, Maybe Integer)
|
||||
socket2hostport socket
|
||||
| null p = (h, Nothing)
|
||||
| otherwise = (h, readish p)
|
||||
where
|
||||
(h, p) = separate (== '!') $ takeFileName socket
|
||||
|
||||
socket2lock :: FilePath -> FilePath
|
||||
socket2lock socket = socket ++ lockExt
|
||||
|
||||
isLock :: FilePath -> Bool
|
||||
isLock f = lockExt `isSuffixOf` f
|
||||
|
||||
lockExt :: String
|
||||
lockExt = ".lock"
|
||||
|
||||
{- This is the size of the sun_path component of sockaddr_un, which
|
||||
- is the limit to the total length of the filename of a unix socket.
|
||||
-
|
||||
- On Linux, this is 108. On OSX, 104. TODO: Probe
|
||||
-}
|
||||
sizeof_sockaddr_un_sun_path :: Int
|
||||
sizeof_sockaddr_un_sun_path = 100
|
||||
|
||||
{- Note that this looks at the true length of the path in bytes, as it will
|
||||
- appear on disk. -}
|
||||
valid_unix_socket_path :: FilePath -> Bool
|
||||
valid_unix_socket_path f = length (decodeW8 f) < sizeof_sockaddr_un_sun_path
|
73
Annex/UUID.hs
Normal file
73
Annex/UUID.hs
Normal file
|
@ -0,0 +1,73 @@
|
|||
{- git-annex uuids
|
||||
-
|
||||
- Each git repository used by git-annex has an annex.uuid setting that
|
||||
- uniquely identifies that repository.
|
||||
-
|
||||
- UUIDs of remotes are cached in git config, using keys named
|
||||
- remote.<name>.annex-uuid
|
||||
-
|
||||
- Copyright 2010-2013 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Annex.UUID (
|
||||
getUUID,
|
||||
getRepoUUID,
|
||||
getUncachedUUID,
|
||||
ensureUUID,
|
||||
genUUID,
|
||||
removeRepoUUID,
|
||||
storeUUID,
|
||||
) where
|
||||
|
||||
import Common.Annex
|
||||
import qualified Git
|
||||
import qualified Git.Config
|
||||
import Config
|
||||
import Types.UUID
|
||||
|
||||
configkey :: ConfigKey
|
||||
configkey = annexConfig "uuid"
|
||||
|
||||
{- Get current repository's UUID. -}
|
||||
getUUID :: Annex (Maybe UUID)
|
||||
getUUID = getRepoUUID =<< gitRepo
|
||||
|
||||
{- Looks up a repo's UUID, caching it in .git/config if it's not already. -}
|
||||
getRepoUUID :: Git.Repo -> Annex (Maybe UUID)
|
||||
getRepoUUID r = do
|
||||
c <- toUUID <$> getConfig cachekey ""
|
||||
case getUncachedUUID r of
|
||||
v@(Just u)
|
||||
| c /= v -> do
|
||||
updatecache u
|
||||
return v
|
||||
_ -> return c
|
||||
where
|
||||
updatecache u = do
|
||||
g <- gitRepo
|
||||
when (g /= r) $ storeUUID cachekey u
|
||||
cachekey = remoteConfig r "uuid"
|
||||
|
||||
removeRepoUUID :: Annex ()
|
||||
removeRepoUUID = unsetConfig configkey
|
||||
|
||||
getUncachedUUID :: Git.Repo -> Maybe UUID
|
||||
getUncachedUUID = toUUID . Git.Config.get key ""
|
||||
where
|
||||
(ConfigKey key) = configkey
|
||||
|
||||
{- Make sure that the repo has an annex.uuid setting. -}
|
||||
ensureUUID :: Annex UUID
|
||||
ensureUUID = do
|
||||
mu <- getUUID
|
||||
case mu of
|
||||
Just u -> return u
|
||||
Nothing -> do
|
||||
u <- liftIO genUUID
|
||||
storeUUID configkey u
|
||||
return u
|
||||
|
||||
storeUUID :: ConfigKey -> UUID -> Annex ()
|
||||
storeUUID configfield = setConfig configfield . fromUUID
|
47
Annex/Version.hs
Normal file
47
Annex/Version.hs
Normal file
|
@ -0,0 +1,47 @@
|
|||
{- git-annex repository versioning
|
||||
-
|
||||
- Copyright 2010,2013 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Annex.Version where
|
||||
|
||||
import Common.Annex
|
||||
import Config
|
||||
import qualified Annex
|
||||
|
||||
type Version = String
|
||||
|
||||
defaultVersion :: Version
|
||||
defaultVersion = "3"
|
||||
|
||||
directModeVersion :: Version
|
||||
directModeVersion = "4"
|
||||
|
||||
supportedVersions :: [Version]
|
||||
supportedVersions = [defaultVersion, directModeVersion]
|
||||
|
||||
upgradableVersions :: [Version]
|
||||
upgradableVersions = ["0", "1", "2"]
|
||||
|
||||
versionField :: ConfigKey
|
||||
versionField = annexConfig "version"
|
||||
|
||||
getVersion :: Annex (Maybe Version)
|
||||
getVersion = annexVersion <$> Annex.getGitConfig
|
||||
|
||||
setVersion :: Version -> Annex ()
|
||||
setVersion = setConfig versionField
|
||||
|
||||
removeVersion :: Annex ()
|
||||
removeVersion = unsetConfig versionField
|
||||
|
||||
checkVersion :: Version -> Annex ()
|
||||
checkVersion v
|
||||
| v `elem` supportedVersions = noop
|
||||
| v `elem` upgradableVersions = err "Upgrade this repository: git-annex upgrade"
|
||||
| otherwise = err "Upgrade git-annex."
|
||||
where
|
||||
err msg = error $ "Repository version " ++ v ++
|
||||
" is not supported. " ++ msg
|
33
Annex/Wanted.hs
Normal file
33
Annex/Wanted.hs
Normal file
|
@ -0,0 +1,33 @@
|
|||
{- git-annex control over whether content is wanted
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Annex.Wanted where
|
||||
|
||||
import Common.Annex
|
||||
import Logs.PreferredContent
|
||||
import Annex.UUID
|
||||
import Types.Remote
|
||||
|
||||
import qualified Data.Set as S
|
||||
|
||||
{- Check if a file is preferred content for the local repository. -}
|
||||
wantGet :: Bool -> AssociatedFile -> Annex Bool
|
||||
wantGet def Nothing = return def
|
||||
wantGet def (Just file) = isPreferredContent Nothing S.empty file def
|
||||
|
||||
{- Check if a file is preferred content for a remote. -}
|
||||
wantSend :: Bool -> AssociatedFile -> UUID -> Annex Bool
|
||||
wantSend def Nothing _ = return def
|
||||
wantSend def (Just file) to = isPreferredContent (Just to) S.empty file def
|
||||
|
||||
{- Check if a file can be dropped, maybe from a remote.
|
||||
- Don't drop files that are preferred content. -}
|
||||
wantDrop :: Bool -> Maybe UUID -> AssociatedFile -> Annex Bool
|
||||
wantDrop def _ Nothing = return $ not def
|
||||
wantDrop def from (Just file) = do
|
||||
u <- maybe getUUID (return . id) from
|
||||
not <$> isPreferredContent (Just u) (S.singleton u) file def
|
237
Assistant.hs
Normal file
237
Assistant.hs
Normal file
|
@ -0,0 +1,237 @@
|
|||
{- git-annex assistant daemon
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-
|
||||
- Overview of threads and MVars, etc:
|
||||
-
|
||||
- Thread 1: parent
|
||||
- The initial thread run, double forks to background, starts other
|
||||
- threads, and then stops, waiting for them to terminate,
|
||||
- or for a ctrl-c.
|
||||
- Thread 2: Watcher
|
||||
- Notices new files, and calls handlers for events, queuing changes.
|
||||
- Thread 3: inotify internal
|
||||
- Used by haskell inotify library to ensure inotify event buffer is
|
||||
- kept drained.
|
||||
- Thread 4: inotify startup scanner
|
||||
- Scans the tree and registers inotify watches for each directory.
|
||||
- A MVar lock is used to prevent other inotify handlers from running
|
||||
- until this is complete.
|
||||
- Thread 5: Committer
|
||||
- Waits for changes to occur, and runs the git queue to update its
|
||||
- index, then commits. Also queues Transfer events to send added
|
||||
- files to other remotes.
|
||||
- Thread 6: Pusher
|
||||
- Waits for commits to be made, and pushes updated branches to remotes,
|
||||
- in parallel. (Forks a process for each git push.)
|
||||
- Thread 7: PushRetryer
|
||||
- Runs every 30 minutes when there are failed pushes, and retries
|
||||
- them.
|
||||
- Thread 8: Merger
|
||||
- Waits for pushes to be received from remotes, and merges the
|
||||
- updated branches into the current branch.
|
||||
- (This uses inotify on .git/refs/heads, so there are additional
|
||||
- inotify threads associated with it, too.)
|
||||
- Thread 9: TransferWatcher
|
||||
- Watches for transfer information files being created and removed,
|
||||
- and maintains the DaemonStatus currentTransfers map.
|
||||
- (This uses inotify on .git/annex/transfer/, so there are
|
||||
- additional inotify threads associated with it, too.)
|
||||
- Thread 10: TransferPoller
|
||||
- Polls to determine how much of each ongoing transfer is complete.
|
||||
- Thread 11: Transferrer
|
||||
- Waits for Transfers to be queued and does them.
|
||||
- Thread 12: StatusLogger
|
||||
- Wakes up periodically and records the daemon's status to disk.
|
||||
- Thread 13: SanityChecker
|
||||
- Wakes up periodically (rarely) and does sanity checks.
|
||||
- Thread 14: MountWatcher
|
||||
- Either uses dbus to watch for drive mount events, or, when
|
||||
- there's no dbus, polls to find newly mounted filesystems.
|
||||
- Once a filesystem that contains a remote is mounted, updates
|
||||
- state about that remote, pulls from it, and queues a push to it,
|
||||
- as well as an update, and queues it onto the
|
||||
- ConnectedRemoteChan
|
||||
- Thread 15: NetWatcher
|
||||
- Deals with network connection interruptions, which would cause
|
||||
- transfers to fail, and can be recovered from by waiting for a
|
||||
- network connection, and syncing with all network remotes.
|
||||
- Uses dbus to watch for network connections, or when dbus
|
||||
- cannot be used, assumes there's been one every 30 minutes.
|
||||
- Thread 16: TransferScanner
|
||||
- Does potentially expensive checks to find data that needs to be
|
||||
- transferred from or to remotes, and queues Transfers.
|
||||
- Uses the ScanRemotes map.a
|
||||
- Thread 17: PairListener
|
||||
- Listens for incoming pairing traffic, and takes action.
|
||||
- Thread 18: ConfigMonitor
|
||||
- Triggered by changes to the git-annex branch, checks for changed
|
||||
- config files, and reloads configs.
|
||||
- Thread 19: XMPPClient
|
||||
- Built-in XMPP client.
|
||||
- Thread 20: WebApp
|
||||
- Spawns more threads as necessary to handle clients.
|
||||
- Displays the DaemonStatus.
|
||||
- Thread 21: Glacier
|
||||
- Deals with retrieving files from Amazon Glacier.
|
||||
-
|
||||
- ThreadState: (MVar)
|
||||
- The Annex state is stored here, which allows resuscitating the
|
||||
- Annex monad in IO actions run by the watcher and committer
|
||||
- threads. Thus, a single state is shared amoung the threads, and
|
||||
- only one at a time can access it.
|
||||
- DaemonStatusHandle: (STM TMVar)
|
||||
- The daemon's current status.
|
||||
- ChangeChan: (STM TChan)
|
||||
- Changes are indicated by writing to this channel. The committer
|
||||
- reads from it.
|
||||
- CommitChan: (STM TChan)
|
||||
- Commits are indicated by writing to this channel. The pusher reads
|
||||
- from it.
|
||||
- FailedPushMap (STM TMVar)
|
||||
- Failed pushes are indicated by writing to this TMVar. The push
|
||||
- retrier blocks until they're available.
|
||||
- TransferQueue (STM TChan)
|
||||
- Transfers to make are indicated by writing to this channel.
|
||||
- TransferSlots (QSemN)
|
||||
- Count of the number of currently available transfer slots.
|
||||
- Updated by the transfer watcher, this allows other threads
|
||||
- to block until a slot is available.
|
||||
- This MVar should only be manipulated from inside the Annex monad,
|
||||
- which ensures it's accessed only after the ThreadState MVar.
|
||||
- ScanRemotes (STM TMVar)
|
||||
- Remotes that have been disconnected, and should be scanned
|
||||
- are indicated by writing to this TMVar.
|
||||
- BranchChanged (STM SampleVar)
|
||||
- Changes to the git-annex branch are indicated by updating this
|
||||
- SampleVar.
|
||||
- NetMessager (STM TChan, TMVar, SampleVar)
|
||||
- Used to feed messages to the built-in XMPP client, handle
|
||||
- pushes, and signal it when it needs to restart due to configuration
|
||||
- or networking changes.
|
||||
- UrlRenderer (MVar)
|
||||
- A Yesod route rendering function is stored here. This allows
|
||||
- things that need to render Yesod routes to block until the webapp
|
||||
- has started up and such rendering is possible.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE CPP #-}
|
||||
|
||||
module Assistant where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.DaemonStatus
|
||||
import Assistant.NamedThread
|
||||
import Assistant.Types.ThreadedMonad
|
||||
import Assistant.Threads.DaemonStatus
|
||||
import Assistant.Threads.Watcher
|
||||
import Assistant.Threads.Committer
|
||||
import Assistant.Threads.Pusher
|
||||
import Assistant.Threads.Merger
|
||||
import Assistant.Threads.TransferWatcher
|
||||
import Assistant.Threads.Transferrer
|
||||
import Assistant.Threads.SanityChecker
|
||||
import Assistant.Threads.MountWatcher
|
||||
import Assistant.Threads.NetWatcher
|
||||
import Assistant.Threads.TransferScanner
|
||||
import Assistant.Threads.TransferPoller
|
||||
import Assistant.Threads.ConfigMonitor
|
||||
import Assistant.Threads.Glacier
|
||||
#ifdef WITH_WEBAPP
|
||||
import Assistant.WebApp
|
||||
import Assistant.Threads.WebApp
|
||||
#ifdef WITH_PAIRING
|
||||
import Assistant.Threads.PairListener
|
||||
#endif
|
||||
#ifdef WITH_XMPP
|
||||
import Assistant.Threads.XMPPClient
|
||||
#endif
|
||||
#else
|
||||
#warning Building without the webapp. You probably need to install Yesod..
|
||||
#endif
|
||||
import Assistant.Environment
|
||||
import qualified Utility.Daemon
|
||||
import Utility.LogFile
|
||||
import Utility.ThreadScheduler
|
||||
|
||||
stopDaemon :: Annex ()
|
||||
stopDaemon = liftIO . Utility.Daemon.stopDaemon =<< fromRepo gitAnnexPidFile
|
||||
|
||||
{- Starts the daemon. If the daemon is run in the foreground, once it's
|
||||
- running, can start the browser.
|
||||
-
|
||||
- startbrowser is passed the url and html shim file, as well as the original
|
||||
- stdout and stderr descriptors. -}
|
||||
startDaemon :: Bool -> Bool -> Maybe (Maybe Handle -> Maybe Handle -> String -> FilePath -> IO ()) -> Annex ()
|
||||
startDaemon assistant foreground startbrowser = do
|
||||
pidfile <- fromRepo gitAnnexPidFile
|
||||
logfd <- liftIO . openLog =<< fromRepo gitAnnexLogFile
|
||||
if foreground
|
||||
then do
|
||||
liftIO $ Utility.Daemon.lockPidFile pidfile
|
||||
origout <- liftIO $ catchMaybeIO $
|
||||
fdToHandle =<< dup stdOutput
|
||||
origerr <- liftIO $ catchMaybeIO $
|
||||
fdToHandle =<< dup stdError
|
||||
liftIO $ Utility.LogFile.redirLog logfd
|
||||
showStart (if assistant then "assistant" else "watch") "."
|
||||
start id $
|
||||
case startbrowser of
|
||||
Nothing -> Nothing
|
||||
Just a -> Just $ a origout origerr
|
||||
else
|
||||
start (Utility.Daemon.daemonize logfd (Just pidfile) False) Nothing
|
||||
where
|
||||
start daemonize webappwaiter = withThreadState $ \st -> do
|
||||
checkCanWatch
|
||||
when assistant $ checkEnvironment
|
||||
dstatus <- startDaemonStatus
|
||||
liftIO $ daemonize $
|
||||
flip runAssistant (go webappwaiter)
|
||||
=<< newAssistantData st dstatus
|
||||
|
||||
go webappwaiter = do
|
||||
#ifdef WITH_WEBAPP
|
||||
d <- getAssistant id
|
||||
urlrenderer <- liftIO newUrlRenderer
|
||||
mapM_ (startthread $ Just urlrenderer)
|
||||
#else
|
||||
mapM_ (startthread Nothing)
|
||||
#endif
|
||||
[ watch $ commitThread
|
||||
#ifdef WITH_WEBAPP
|
||||
, assist $ webAppThread d urlrenderer False Nothing webappwaiter
|
||||
#ifdef WITH_PAIRING
|
||||
, assist $ pairListenerThread urlrenderer
|
||||
#endif
|
||||
#ifdef WITH_XMPP
|
||||
, assist $ xmppClientThread urlrenderer
|
||||
#endif
|
||||
#endif
|
||||
, assist $ pushThread
|
||||
, assist $ pushRetryThread
|
||||
, assist $ mergeThread
|
||||
, assist $ transferWatcherThread
|
||||
, assist $ transferPollerThread
|
||||
, assist $ transfererThread
|
||||
, assist $ daemonStatusThread
|
||||
, assist $ sanityCheckerDailyThread
|
||||
, assist $ sanityCheckerHourlyThread
|
||||
, assist $ mountWatcherThread
|
||||
, assist $ netWatcherThread
|
||||
, assist $ netWatcherFallbackThread
|
||||
, assist $ transferScannerThread
|
||||
, assist $ configMonitorThread
|
||||
, assist $ glacierThread
|
||||
, watch $ watchThread
|
||||
]
|
||||
|
||||
liftIO waitForTermination
|
||||
|
||||
watch a = (True, a)
|
||||
assist a = (False, a)
|
||||
startthread urlrenderer (watcher, t)
|
||||
| watcher || assistant = startNamedThread urlrenderer t
|
||||
| otherwise = noop
|
373
Assistant/Alert.hs
Normal file
373
Assistant/Alert.hs
Normal file
|
@ -0,0 +1,373 @@
|
|||
{- git-annex assistant alerts
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
|
||||
module Assistant.Alert where
|
||||
|
||||
import Common.Annex
|
||||
import qualified Remote
|
||||
import Utility.Tense
|
||||
import Logs.Transfer
|
||||
|
||||
import qualified Data.Text as T
|
||||
import Data.Text (Text)
|
||||
import qualified Data.Map as M
|
||||
import Data.String
|
||||
|
||||
{- Different classes of alerts are displayed differently. -}
|
||||
data AlertClass = Success | Message | Activity | Warning | Error
|
||||
deriving (Eq, Ord)
|
||||
|
||||
data AlertPriority = Filler | Low | Medium | High | Pinned
|
||||
deriving (Eq, Ord)
|
||||
|
||||
{- An alert can have an name, which is used to combine it with other similar
|
||||
- alerts. -}
|
||||
data AlertName
|
||||
= FileAlert TenseChunk
|
||||
| SanityCheckFixAlert
|
||||
| WarningAlert String
|
||||
| PairAlert String
|
||||
| XMPPNeededAlert
|
||||
deriving (Eq)
|
||||
|
||||
{- The first alert is the new alert, the second is an old alert.
|
||||
- Should return a modified version of the old alert. -}
|
||||
type AlertCombiner = Alert -> Alert -> Maybe Alert
|
||||
|
||||
data Alert = Alert
|
||||
{ alertClass :: AlertClass
|
||||
, alertHeader :: Maybe TenseText
|
||||
, alertMessageRender :: [TenseChunk] -> TenseText
|
||||
, alertData :: [TenseChunk]
|
||||
, alertBlockDisplay :: Bool
|
||||
, alertClosable :: Bool
|
||||
, alertPriority :: AlertPriority
|
||||
, alertIcon :: Maybe AlertIcon
|
||||
, alertCombiner :: Maybe AlertCombiner
|
||||
, alertName :: Maybe AlertName
|
||||
, alertButton :: Maybe AlertButton
|
||||
}
|
||||
|
||||
data AlertIcon = ActivityIcon | SuccessIcon | ErrorIcon | InfoIcon | TheCloud
|
||||
|
||||
{- When clicked, a button always redirects to a URL
|
||||
- It may also run an IO action in the background, which is useful
|
||||
- to make the button close or otherwise change the alert. -}
|
||||
data AlertButton = AlertButton
|
||||
{ buttonLabel :: Text
|
||||
, buttonUrl :: Text
|
||||
, buttonAction :: Maybe (AlertId -> IO ())
|
||||
}
|
||||
|
||||
type AlertPair = (AlertId, Alert)
|
||||
|
||||
type AlertMap = M.Map AlertId Alert
|
||||
|
||||
{- Higher AlertId indicates a more recent alert. -}
|
||||
newtype AlertId = AlertId Integer
|
||||
deriving (Read, Show, Eq, Ord)
|
||||
|
||||
firstAlertId :: AlertId
|
||||
firstAlertId = AlertId 0
|
||||
|
||||
nextAlertId :: AlertId -> AlertId
|
||||
nextAlertId (AlertId i) = AlertId $ succ i
|
||||
|
||||
{- This is as many alerts as it makes sense to display at a time.
|
||||
- A display might be smaller, or larger, the point is to not overwhelm the
|
||||
- user with a ton of alerts. -}
|
||||
displayAlerts :: Int
|
||||
displayAlerts = 6
|
||||
|
||||
{- This is not a hard maximum, but there's no point in keeping a great
|
||||
- many filler alerts in an AlertMap, so when there's more than this many,
|
||||
- they start being pruned, down toward displayAlerts. -}
|
||||
maxAlerts :: Int
|
||||
maxAlerts = displayAlerts * 2
|
||||
|
||||
{- The desired order is the reverse of:
|
||||
-
|
||||
- - Pinned alerts
|
||||
- - High priority alerts, newest first
|
||||
- - Medium priority Activity, newest first (mostly used for Activity)
|
||||
- - Low priority alerts, newest first
|
||||
- - Filler priorty alerts, newest first
|
||||
- - Ties are broken by the AlertClass, with Errors etc coming first.
|
||||
-}
|
||||
compareAlertPairs :: AlertPair -> AlertPair -> Ordering
|
||||
compareAlertPairs
|
||||
(aid, Alert { alertClass = aclass, alertPriority = aprio })
|
||||
(bid, Alert { alertClass = bclass, alertPriority = bprio })
|
||||
= compare aprio bprio
|
||||
`thenOrd` compare aid bid
|
||||
`thenOrd` compare aclass bclass
|
||||
|
||||
sortAlertPairs :: [AlertPair] -> [AlertPair]
|
||||
sortAlertPairs = sortBy compareAlertPairs
|
||||
|
||||
{- Renders an alert's header for display, if it has one. -}
|
||||
renderAlertHeader :: Alert -> Maybe Text
|
||||
renderAlertHeader alert = renderTense (alertTense alert) <$> alertHeader alert
|
||||
|
||||
{- Renders an alert's message for display. -}
|
||||
renderAlertMessage :: Alert -> Text
|
||||
renderAlertMessage alert = renderTense (alertTense alert) $
|
||||
(alertMessageRender alert) (alertData alert)
|
||||
|
||||
showAlert :: Alert -> String
|
||||
showAlert alert = T.unpack $ T.unwords $ catMaybes
|
||||
[ renderAlertHeader alert
|
||||
, Just $ renderAlertMessage alert
|
||||
]
|
||||
|
||||
alertTense :: Alert -> Tense
|
||||
alertTense alert
|
||||
| alertClass alert == Activity = Present
|
||||
| otherwise = Past
|
||||
|
||||
{- Checks if two alerts display the same. -}
|
||||
effectivelySameAlert :: Alert -> Alert -> Bool
|
||||
effectivelySameAlert x y = all id
|
||||
[ alertClass x == alertClass y
|
||||
, alertHeader x == alertHeader y
|
||||
, alertData x == alertData y
|
||||
, alertBlockDisplay x == alertBlockDisplay y
|
||||
, alertClosable x == alertClosable y
|
||||
, alertPriority x == alertPriority y
|
||||
]
|
||||
|
||||
makeAlertFiller :: Bool -> Alert -> Alert
|
||||
makeAlertFiller success alert
|
||||
| isFiller alert = alert
|
||||
| otherwise = alert
|
||||
{ alertClass = if c == Activity then c' else c
|
||||
, alertPriority = Filler
|
||||
, alertClosable = True
|
||||
, alertButton = Nothing
|
||||
, alertIcon = Just $ if success then SuccessIcon else ErrorIcon
|
||||
}
|
||||
where
|
||||
c = alertClass alert
|
||||
c'
|
||||
| success = Success
|
||||
| otherwise = Error
|
||||
|
||||
isFiller :: Alert -> Bool
|
||||
isFiller alert = alertPriority alert == Filler
|
||||
|
||||
{- Updates the Alertmap, adding or updating an alert.
|
||||
-
|
||||
- Any old filler that looks the same as the alert is removed.
|
||||
-
|
||||
- Or, if the alert has an alertCombiner that combines it with
|
||||
- an old alert, the old alert is replaced with the result, and the
|
||||
- alert is removed.
|
||||
-
|
||||
- Old filler alerts are pruned once maxAlerts is reached.
|
||||
-}
|
||||
mergeAlert :: AlertId -> Alert -> AlertMap -> AlertMap
|
||||
mergeAlert i al m = maybe updatePrune updateCombine (alertCombiner al)
|
||||
where
|
||||
pruneSame k al' = k == i || not (effectivelySameAlert al al')
|
||||
pruneBloat m'
|
||||
| bloat > 0 = M.fromList $ pruneold $ M.toList m'
|
||||
| otherwise = m'
|
||||
where
|
||||
bloat = M.size m' - maxAlerts
|
||||
pruneold l =
|
||||
let (f, rest) = partition (\(_, a) -> isFiller a) l
|
||||
in drop bloat f ++ rest
|
||||
updatePrune = pruneBloat $ M.filterWithKey pruneSame $
|
||||
M.insertWith' const i al m
|
||||
updateCombine combiner =
|
||||
let combined = M.mapMaybe (combiner al) m
|
||||
in if M.null combined
|
||||
then updatePrune
|
||||
else M.delete i $ M.union combined m
|
||||
|
||||
baseActivityAlert :: Alert
|
||||
baseActivityAlert = Alert
|
||||
{ alertClass = Activity
|
||||
, alertHeader = Nothing
|
||||
, alertMessageRender = tenseWords
|
||||
, alertData = []
|
||||
, alertBlockDisplay = False
|
||||
, alertClosable = False
|
||||
, alertPriority = Medium
|
||||
, alertIcon = Just ActivityIcon
|
||||
, alertCombiner = Nothing
|
||||
, alertName = Nothing
|
||||
, alertButton = Nothing
|
||||
}
|
||||
|
||||
warningAlert :: String -> String -> Alert
|
||||
warningAlert name msg = Alert
|
||||
{ alertClass = Warning
|
||||
, alertHeader = Just $ tenseWords ["warning"]
|
||||
, alertMessageRender = tenseWords
|
||||
, alertData = [UnTensed $ T.pack msg]
|
||||
, alertBlockDisplay = True
|
||||
, alertClosable = True
|
||||
, alertPriority = High
|
||||
, alertIcon = Just ErrorIcon
|
||||
, alertCombiner = Just $ dataCombiner (++)
|
||||
, alertName = Just $ WarningAlert name
|
||||
, alertButton = Nothing
|
||||
}
|
||||
|
||||
activityAlert :: Maybe TenseText -> [TenseChunk] -> Alert
|
||||
activityAlert header dat = baseActivityAlert
|
||||
{ alertHeader = header
|
||||
, alertData = dat
|
||||
}
|
||||
|
||||
startupScanAlert :: Alert
|
||||
startupScanAlert = activityAlert Nothing
|
||||
[Tensed "Performing" "Performed", "startup scan"]
|
||||
|
||||
commitAlert :: Alert
|
||||
commitAlert = activityAlert Nothing
|
||||
[Tensed "Committing" "Committed", "changes to git"]
|
||||
|
||||
showRemotes :: [Remote] -> TenseChunk
|
||||
showRemotes = UnTensed . T.unwords . map (T.pack . Remote.name)
|
||||
|
||||
pushAlert :: [Remote] -> Alert
|
||||
pushAlert rs = activityAlert Nothing
|
||||
[Tensed "Syncing" "Synced", "with", showRemotes rs]
|
||||
|
||||
pushRetryAlert :: [Remote] -> Alert
|
||||
pushRetryAlert rs = activityAlert
|
||||
(Just $ tenseWords [Tensed "Retrying" "Retried", "sync"])
|
||||
["with", showRemotes rs]
|
||||
|
||||
syncAlert :: [Remote] -> Alert
|
||||
syncAlert rs = baseActivityAlert
|
||||
{ alertHeader = Just $ tenseWords
|
||||
[Tensed "Syncing" "Synced", "with", showRemotes rs]
|
||||
, alertData = []
|
||||
, alertPriority = Low
|
||||
}
|
||||
|
||||
scanAlert :: [Remote] -> Alert
|
||||
scanAlert rs = baseActivityAlert
|
||||
{ alertHeader = Just $ tenseWords
|
||||
[Tensed "Scanning" "Scanned", showRemotes rs]
|
||||
, alertBlockDisplay = True
|
||||
, alertPriority = Low
|
||||
}
|
||||
|
||||
sanityCheckAlert :: Alert
|
||||
sanityCheckAlert = activityAlert
|
||||
(Just $ tenseWords [Tensed "Running" "Ran", "daily sanity check"])
|
||||
["to make sure everything is ok."]
|
||||
|
||||
sanityCheckFixAlert :: String -> Alert
|
||||
sanityCheckFixAlert msg = Alert
|
||||
{ alertClass = Warning
|
||||
, alertHeader = Just $ tenseWords ["Fixed a problem"]
|
||||
, alertMessageRender = render
|
||||
, alertData = [UnTensed $ T.pack msg]
|
||||
, alertBlockDisplay = True
|
||||
, alertPriority = High
|
||||
, alertClosable = True
|
||||
, alertIcon = Just ErrorIcon
|
||||
, alertName = Just SanityCheckFixAlert
|
||||
, alertCombiner = Just $ dataCombiner (++)
|
||||
, alertButton = Nothing
|
||||
}
|
||||
where
|
||||
render dta = tenseWords $ alerthead : dta ++ [alertfoot]
|
||||
alerthead = "The daily sanity check found and fixed a problem:"
|
||||
alertfoot = "If these problems persist, consider filing a bug report."
|
||||
|
||||
pairingAlert :: AlertButton -> Alert
|
||||
pairingAlert button = baseActivityAlert
|
||||
{ alertData = [ UnTensed "Pairing in progress" ]
|
||||
, alertPriority = High
|
||||
, alertButton = Just button
|
||||
}
|
||||
|
||||
pairRequestReceivedAlert :: String -> AlertButton -> Alert
|
||||
pairRequestReceivedAlert who button = Alert
|
||||
{ alertClass = Message
|
||||
, alertHeader = Nothing
|
||||
, alertMessageRender = tenseWords
|
||||
, alertData = [UnTensed $ T.pack $ who ++ " is sending a pair request."]
|
||||
, alertBlockDisplay = False
|
||||
, alertPriority = High
|
||||
, alertClosable = True
|
||||
, alertIcon = Just InfoIcon
|
||||
, alertName = Just $ PairAlert who
|
||||
, alertCombiner = Just $ dataCombiner $ \_old new -> new
|
||||
, alertButton = Just button
|
||||
}
|
||||
|
||||
pairRequestAcknowledgedAlert :: String -> Maybe AlertButton -> Alert
|
||||
pairRequestAcknowledgedAlert who button = baseActivityAlert
|
||||
{ alertData = ["Pairing with", UnTensed (T.pack who), Tensed "in progress" "complete"]
|
||||
, alertPriority = High
|
||||
, alertCombiner = Just $ dataCombiner $ \_old new -> new
|
||||
, alertButton = button
|
||||
}
|
||||
|
||||
xmppNeededAlert :: AlertButton -> Alert
|
||||
xmppNeededAlert button = Alert
|
||||
{ alertHeader = Just "Share with friends, and keep your devices in sync across the cloud."
|
||||
, alertIcon = Just TheCloud
|
||||
, alertPriority = High
|
||||
, alertButton = Just button
|
||||
, alertClosable = True
|
||||
, alertClass = Message
|
||||
, alertMessageRender = tenseWords
|
||||
, alertBlockDisplay = True
|
||||
, alertName = Just $ XMPPNeededAlert
|
||||
, alertCombiner = Just $ dataCombiner $ \_old new -> new
|
||||
, alertData = []
|
||||
}
|
||||
|
||||
fileAlert :: TenseChunk -> FilePath -> Alert
|
||||
fileAlert msg file = (activityAlert Nothing [f])
|
||||
{ alertName = Just $ FileAlert msg
|
||||
, alertMessageRender = render
|
||||
, alertCombiner = Just $ dataCombiner combiner
|
||||
}
|
||||
where
|
||||
f = fromString $ shortFile $ takeFileName file
|
||||
render fs = tenseWords $ msg : fs
|
||||
combiner new old = take 10 $ new ++ old
|
||||
|
||||
addFileAlert :: FilePath -> Alert
|
||||
addFileAlert = fileAlert (Tensed "Adding" "Added")
|
||||
|
||||
{- This is only used as a success alert after a transfer, not during it. -}
|
||||
transferFileAlert :: Direction -> Bool -> FilePath -> Alert
|
||||
transferFileAlert direction True
|
||||
| direction == Upload = fileAlert "Uploaded"
|
||||
| otherwise = fileAlert "Downloaded"
|
||||
transferFileAlert direction False
|
||||
| direction == Upload = fileAlert "Upload failed"
|
||||
| otherwise = fileAlert "Download failed"
|
||||
|
||||
dataCombiner :: ([TenseChunk] -> [TenseChunk] -> [TenseChunk]) -> AlertCombiner
|
||||
dataCombiner combiner new old
|
||||
| alertClass new /= alertClass old = Nothing
|
||||
| alertName new == alertName old =
|
||||
Just $! old { alertData = alertData new `combiner` alertData old }
|
||||
| otherwise = Nothing
|
||||
|
||||
shortFile :: FilePath -> String
|
||||
shortFile f
|
||||
| len < maxlen = f
|
||||
| otherwise = take half f ++ ".." ++ drop (len - half) f
|
||||
where
|
||||
len = length f
|
||||
maxlen = 20
|
||||
half = (maxlen - 2) `div` 2
|
||||
|
19
Assistant/BranchChange.hs
Normal file
19
Assistant/BranchChange.hs
Normal file
|
@ -0,0 +1,19 @@
|
|||
{- git-annex assistant git-annex branch change tracking
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.BranchChange where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.Types.BranchChange
|
||||
|
||||
import Control.Concurrent.MSampleVar
|
||||
|
||||
branchChanged :: Assistant ()
|
||||
branchChanged = flip writeSV () <<~ (fromBranchChangeHandle . branchChangeHandle)
|
||||
|
||||
waitBranchChange :: Assistant ()
|
||||
waitBranchChange = readSV <<~ (fromBranchChangeHandle . branchChangeHandle)
|
39
Assistant/Changes.hs
Normal file
39
Assistant/Changes.hs
Normal file
|
@ -0,0 +1,39 @@
|
|||
{- git-annex assistant change tracking
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.Changes where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.Types.Changes
|
||||
import Utility.TSet
|
||||
|
||||
import Data.Time.Clock
|
||||
|
||||
{- Handlers call this when they made a change that needs to get committed. -}
|
||||
madeChange :: FilePath -> ChangeType -> Assistant (Maybe Change)
|
||||
madeChange f t = Just <$> (Change <$> liftIO getCurrentTime <*> pure f <*> pure t)
|
||||
|
||||
noChange :: Assistant (Maybe Change)
|
||||
noChange = return Nothing
|
||||
|
||||
{- Indicates an add needs to be done, but has not started yet. -}
|
||||
pendingAddChange :: FilePath -> Assistant (Maybe Change)
|
||||
pendingAddChange f = Just <$> (PendingAddChange <$> liftIO getCurrentTime <*> pure f)
|
||||
|
||||
{- Gets all unhandled changes.
|
||||
- Blocks until at least one change is made. -}
|
||||
getChanges :: Assistant [Change]
|
||||
getChanges = getTSet <<~ changeChan
|
||||
|
||||
{- Puts unhandled changes back into the channel.
|
||||
- Note: Original order is not preserved. -}
|
||||
refillChanges :: [Change] -> Assistant ()
|
||||
refillChanges cs = flip putTSet cs <<~ changeChan
|
||||
|
||||
{- Records a change in the channel. -}
|
||||
recordChange :: Change -> Assistant ()
|
||||
recordChange c = flip putTSet1 c <<~ changeChan
|
27
Assistant/Commits.hs
Normal file
27
Assistant/Commits.hs
Normal file
|
@ -0,0 +1,27 @@
|
|||
{- git-annex assistant commit tracking
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.Commits where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.Types.Commits
|
||||
|
||||
import Utility.TSet
|
||||
|
||||
{- Gets all unhandled commits.
|
||||
- Blocks until at least one commit is made. -}
|
||||
getCommits :: Assistant [Commit]
|
||||
getCommits = getTSet <<~ commitChan
|
||||
|
||||
{- Puts unhandled commits back into the channel.
|
||||
- Note: Original order is not preserved. -}
|
||||
refillCommits :: [Commit] -> Assistant ()
|
||||
refillCommits cs = flip putTSet cs <<~ commitChan
|
||||
|
||||
{- Records a commit in the channel. -}
|
||||
recordCommit :: Assistant ()
|
||||
recordCommit = flip putTSet1 Commit <<~ commitChan
|
13
Assistant/Common.hs
Normal file
13
Assistant/Common.hs
Normal file
|
@ -0,0 +1,13 @@
|
|||
{- Common infrastructure for the git-annex assistant.
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.Common (module X) where
|
||||
|
||||
import Common.Annex as X
|
||||
import Assistant.Monad as X
|
||||
import Assistant.Types.DaemonStatus as X
|
||||
import Assistant.Types.NamedThread as X
|
241
Assistant/DaemonStatus.hs
Normal file
241
Assistant/DaemonStatus.hs
Normal file
|
@ -0,0 +1,241 @@
|
|||
{- git-annex assistant daemon status
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.DaemonStatus where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.Alert
|
||||
import Utility.TempFile
|
||||
import Assistant.Types.NetMessager
|
||||
import Utility.NotificationBroadcaster
|
||||
import Logs.Transfer
|
||||
import Logs.Trust
|
||||
import qualified Remote
|
||||
import qualified Types.Remote as Remote
|
||||
import qualified Git
|
||||
|
||||
import Control.Concurrent.STM
|
||||
import System.Posix.Types
|
||||
import Data.Time.Clock.POSIX
|
||||
import Data.Time
|
||||
import System.Locale
|
||||
import qualified Data.Map as M
|
||||
import qualified Data.Text as T
|
||||
|
||||
getDaemonStatus :: Assistant DaemonStatus
|
||||
getDaemonStatus = (atomically . readTMVar) <<~ daemonStatusHandle
|
||||
|
||||
modifyDaemonStatus_ :: (DaemonStatus -> DaemonStatus) -> Assistant ()
|
||||
modifyDaemonStatus_ a = modifyDaemonStatus $ \s -> (a s, ())
|
||||
|
||||
modifyDaemonStatus :: (DaemonStatus -> (DaemonStatus, b)) -> Assistant b
|
||||
modifyDaemonStatus a = do
|
||||
dstatus <- getAssistant daemonStatusHandle
|
||||
liftIO $ do
|
||||
(s, b) <- atomically $ do
|
||||
r@(s, _) <- a <$> takeTMVar dstatus
|
||||
putTMVar dstatus s
|
||||
return r
|
||||
sendNotification $ changeNotifier s
|
||||
return b
|
||||
|
||||
|
||||
{- Returns a function that updates the lists of syncable remotes. -}
|
||||
calcSyncRemotes :: Annex (DaemonStatus -> DaemonStatus)
|
||||
calcSyncRemotes = do
|
||||
rs <- filter (remoteAnnexSync . Remote.gitconfig) .
|
||||
concat . Remote.byCost <$> Remote.enabledRemoteList
|
||||
alive <- trustExclude DeadTrusted (map Remote.uuid rs)
|
||||
let good r = Remote.uuid r `elem` alive
|
||||
let syncable = filter good rs
|
||||
return $ \dstatus -> dstatus
|
||||
{ syncRemotes = syncable
|
||||
, syncGitRemotes = filter (not . Remote.specialRemote) syncable
|
||||
, syncDataRemotes = filter (not . isXMPPRemote) syncable
|
||||
}
|
||||
|
||||
{- Updates the sycRemotes list from the list of all remotes in Annex state. -}
|
||||
updateSyncRemotes :: Assistant ()
|
||||
updateSyncRemotes = do
|
||||
modifyDaemonStatus_ =<< liftAnnex calcSyncRemotes
|
||||
liftIO . sendNotification =<< syncRemotesNotifier <$> getDaemonStatus
|
||||
|
||||
{- Load any previous daemon status file, and store it in a MVar for this
|
||||
- process to use as its DaemonStatus. Also gets current transfer status. -}
|
||||
startDaemonStatus :: Annex DaemonStatusHandle
|
||||
startDaemonStatus = do
|
||||
file <- fromRepo gitAnnexDaemonStatusFile
|
||||
status <- liftIO $
|
||||
flip catchDefaultIO (readDaemonStatusFile file) =<< newDaemonStatus
|
||||
transfers <- M.fromList <$> getTransfers
|
||||
addsync <- calcSyncRemotes
|
||||
liftIO $ atomically $ newTMVar $ addsync $ status
|
||||
{ scanComplete = False
|
||||
, sanityCheckRunning = False
|
||||
, currentTransfers = transfers
|
||||
}
|
||||
|
||||
{- Don't just dump out the structure, because it will change over time,
|
||||
- and parts of it are not relevant. -}
|
||||
writeDaemonStatusFile :: FilePath -> DaemonStatus -> IO ()
|
||||
writeDaemonStatusFile file status =
|
||||
viaTmp writeFile file =<< serialized <$> getPOSIXTime
|
||||
where
|
||||
serialized now = unlines
|
||||
[ "lastRunning:" ++ show now
|
||||
, "scanComplete:" ++ show (scanComplete status)
|
||||
, "sanityCheckRunning:" ++ show (sanityCheckRunning status)
|
||||
, "lastSanityCheck:" ++ maybe "" show (lastSanityCheck status)
|
||||
]
|
||||
|
||||
readDaemonStatusFile :: FilePath -> IO DaemonStatus
|
||||
readDaemonStatusFile file = parse <$> newDaemonStatus <*> readFile file
|
||||
where
|
||||
parse status = foldr parseline status . lines
|
||||
parseline line status
|
||||
| key == "lastRunning" = parseval readtime $ \v ->
|
||||
status { lastRunning = Just v }
|
||||
| key == "scanComplete" = parseval readish $ \v ->
|
||||
status { scanComplete = v }
|
||||
| key == "sanityCheckRunning" = parseval readish $ \v ->
|
||||
status { sanityCheckRunning = v }
|
||||
| key == "lastSanityCheck" = parseval readtime $ \v ->
|
||||
status { lastSanityCheck = Just v }
|
||||
| otherwise = status -- unparsable line
|
||||
where
|
||||
(key, value) = separate (== ':') line
|
||||
parseval parser a = maybe status a (parser value)
|
||||
readtime s = do
|
||||
d <- parseTime defaultTimeLocale "%s%Qs" s
|
||||
Just $ utcTimeToPOSIXSeconds d
|
||||
|
||||
{- Checks if a time stamp was made after the daemon was lastRunning.
|
||||
-
|
||||
- Some slop is built in; this really checks if the time stamp was made
|
||||
- at least ten minutes after the daemon was lastRunning. This is to
|
||||
- ensure the daemon shut down cleanly, and deal with minor clock skew.
|
||||
-
|
||||
- If the daemon has never ran before, this always returns False.
|
||||
-}
|
||||
afterLastDaemonRun :: EpochTime -> DaemonStatus -> Bool
|
||||
afterLastDaemonRun timestamp status = maybe False (< t) (lastRunning status)
|
||||
where
|
||||
t = realToFrac (timestamp + slop) :: POSIXTime
|
||||
slop = fromIntegral tenMinutes
|
||||
|
||||
tenMinutes :: Int
|
||||
tenMinutes = 10 * 60
|
||||
|
||||
{- Mutates the transfer map. Runs in STM so that the transfer map can
|
||||
- be modified in the same transaction that modifies the transfer queue.
|
||||
- Note that this does not send a notification of the change; that's left
|
||||
- to the caller. -}
|
||||
adjustTransfersSTM :: DaemonStatusHandle -> (TransferMap -> TransferMap) -> STM ()
|
||||
adjustTransfersSTM dstatus a = do
|
||||
s <- takeTMVar dstatus
|
||||
putTMVar dstatus $ s { currentTransfers = a (currentTransfers s) }
|
||||
|
||||
{- Alters a transfer's info, if the transfer is in the map. -}
|
||||
alterTransferInfo :: Transfer -> (TransferInfo -> TransferInfo) -> Assistant ()
|
||||
alterTransferInfo t a = updateTransferInfo' $ M.adjust a t
|
||||
|
||||
{- Updates a transfer's info. Adds the transfer to the map if necessary,
|
||||
- or if already present, updates it while preserving the old transferTid,
|
||||
- transferPaused, and bytesComplete values, which are not written to disk. -}
|
||||
updateTransferInfo :: Transfer -> TransferInfo -> Assistant ()
|
||||
updateTransferInfo t info = updateTransferInfo' $ M.insertWith' merge t info
|
||||
where
|
||||
merge new old = new
|
||||
{ transferTid = maybe (transferTid new) Just (transferTid old)
|
||||
, transferPaused = transferPaused new || transferPaused old
|
||||
, bytesComplete = maybe (bytesComplete new) Just (bytesComplete old)
|
||||
}
|
||||
|
||||
updateTransferInfo' :: (TransferMap -> TransferMap) -> Assistant ()
|
||||
updateTransferInfo' a = notifyTransfer `after` modifyDaemonStatus_ update
|
||||
where
|
||||
update s = s { currentTransfers = a (currentTransfers s) }
|
||||
|
||||
{- Removes a transfer from the map, and returns its info. -}
|
||||
removeTransfer :: Transfer -> Assistant (Maybe TransferInfo)
|
||||
removeTransfer t = notifyTransfer `after` modifyDaemonStatus remove
|
||||
where
|
||||
remove s =
|
||||
let (info, ts) = M.updateLookupWithKey
|
||||
(\_k _v -> Nothing)
|
||||
t (currentTransfers s)
|
||||
in (s { currentTransfers = ts }, info)
|
||||
|
||||
{- Send a notification when a transfer is changed. -}
|
||||
notifyTransfer :: Assistant ()
|
||||
notifyTransfer = do
|
||||
dstatus <- getAssistant daemonStatusHandle
|
||||
liftIO $ sendNotification
|
||||
=<< transferNotifier <$> atomically (readTMVar dstatus)
|
||||
|
||||
{- Send a notification when alerts are changed. -}
|
||||
notifyAlert :: Assistant ()
|
||||
notifyAlert = do
|
||||
dstatus <- getAssistant daemonStatusHandle
|
||||
liftIO $ sendNotification
|
||||
=<< alertNotifier <$> atomically (readTMVar dstatus)
|
||||
|
||||
{- Returns the alert's identifier, which can be used to remove it. -}
|
||||
addAlert :: Alert -> Assistant AlertId
|
||||
addAlert alert = do
|
||||
notice [showAlert alert]
|
||||
notifyAlert `after` modifyDaemonStatus add
|
||||
where
|
||||
add s = (s { lastAlertId = i, alertMap = m }, i)
|
||||
where
|
||||
i = nextAlertId $ lastAlertId s
|
||||
m = mergeAlert i alert (alertMap s)
|
||||
|
||||
removeAlert :: AlertId -> Assistant ()
|
||||
removeAlert i = updateAlert i (const Nothing)
|
||||
|
||||
updateAlert :: AlertId -> (Alert -> Maybe Alert) -> Assistant ()
|
||||
updateAlert i a = updateAlertMap $ \m -> M.update a i m
|
||||
|
||||
updateAlertMap :: (AlertMap -> AlertMap) -> Assistant ()
|
||||
updateAlertMap a = notifyAlert `after` modifyDaemonStatus_ update
|
||||
where
|
||||
update s = s { alertMap = a (alertMap s) }
|
||||
|
||||
{- Displays an alert while performing an activity that returns True on
|
||||
- success.
|
||||
-
|
||||
- The alert is left visible afterwards, as filler.
|
||||
- Old filler is pruned, to prevent the map growing too large. -}
|
||||
alertWhile :: Alert -> Assistant Bool -> Assistant Bool
|
||||
alertWhile alert a = alertWhile' alert $ do
|
||||
r <- a
|
||||
return (r, r)
|
||||
|
||||
{- Like alertWhile, but allows the activity to return a value too. -}
|
||||
alertWhile' :: Alert -> Assistant (Bool, a) -> Assistant a
|
||||
alertWhile' alert a = do
|
||||
let alert' = alert { alertClass = Activity }
|
||||
i <- addAlert alert'
|
||||
(ok, r) <- a
|
||||
updateAlertMap $ mergeAlert i $ makeAlertFiller ok alert'
|
||||
return r
|
||||
|
||||
{- Displays an alert while performing an activity, then removes it. -}
|
||||
alertDuring :: Alert -> Assistant a -> Assistant a
|
||||
alertDuring alert a = do
|
||||
i <- addAlert $ alert { alertClass = Activity }
|
||||
removeAlert i `after` a
|
||||
|
||||
{- Remotes using the XMPP transport have urls like xmpp::user@host -}
|
||||
isXMPPRemote :: Remote -> Bool
|
||||
isXMPPRemote remote = Git.repoIsUrl r && "xmpp::" `isPrefixOf` Git.repoLocation r
|
||||
where
|
||||
r = Remote.repo remote
|
||||
|
||||
getXMPPClientID :: Remote -> ClientID
|
||||
getXMPPClientID r = T.pack $ drop (length "xmpp::") (Git.repoLocation (Remote.repo r))
|
87
Assistant/Drop.hs
Normal file
87
Assistant/Drop.hs
Normal file
|
@ -0,0 +1,87 @@
|
|||
{- git-annex assistant dropping of unwanted content
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.Drop where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.DaemonStatus
|
||||
import Logs.Location
|
||||
import Logs.Trust
|
||||
import Types.Remote (AssociatedFile, uuid)
|
||||
import qualified Remote
|
||||
import qualified Command.Drop
|
||||
import Command
|
||||
import Annex.Wanted
|
||||
import Annex.Exception
|
||||
import Config
|
||||
|
||||
import qualified Data.Set as S
|
||||
|
||||
type Reason = String
|
||||
|
||||
{- Drop from local and/or remote when allowed by the preferred content and
|
||||
- numcopies settings. -}
|
||||
handleDrops :: Reason -> Bool -> Key -> AssociatedFile -> Maybe Remote -> Assistant ()
|
||||
handleDrops _ _ _ Nothing _ = noop
|
||||
handleDrops reason fromhere key f knownpresentremote = do
|
||||
syncrs <- syncDataRemotes <$> getDaemonStatus
|
||||
locs <- liftAnnex $ loggedLocations key
|
||||
handleDropsFrom locs syncrs reason fromhere key f knownpresentremote
|
||||
|
||||
{- The UUIDs are ones where the content is believed to be present.
|
||||
- The Remote list can include other remotes that do not have the content;
|
||||
- only ones that match the UUIDs will be dropped from.
|
||||
- If allows to drop fromhere, that drop will be tried first. -}
|
||||
handleDropsFrom :: [UUID] -> [Remote] -> Reason -> Bool -> Key -> AssociatedFile -> Maybe Remote -> Assistant ()
|
||||
handleDropsFrom _ _ _ _ _ Nothing _ = noop
|
||||
handleDropsFrom locs rs reason fromhere key (Just f) knownpresentremote
|
||||
| fromhere = do
|
||||
n <- getcopies
|
||||
if checkcopies n
|
||||
then go rs =<< dropl n
|
||||
else go rs n
|
||||
| otherwise = go rs =<< getcopies
|
||||
where
|
||||
getcopies = liftAnnex $ do
|
||||
have <- length <$> trustExclude UnTrusted locs
|
||||
numcopies <- getNumCopies =<< numCopies f
|
||||
return (have, numcopies)
|
||||
checkcopies (have, numcopies) = have > numcopies
|
||||
decrcopies (have, numcopies) = (have - 1, numcopies)
|
||||
|
||||
go [] _ = noop
|
||||
go (r:rest) n
|
||||
| uuid r `S.notMember` slocs = go rest n
|
||||
| checkcopies n = dropr r n >>= go rest
|
||||
| otherwise = noop
|
||||
|
||||
checkdrop n@(have, numcopies) u a =
|
||||
ifM (liftAnnex $ wantDrop True u (Just f))
|
||||
( ifM (liftAnnex $ safely $ doCommand $ a (Just numcopies))
|
||||
( do
|
||||
debug
|
||||
[ "dropped"
|
||||
, f
|
||||
, "(from " ++ maybe "here" show u ++ ")"
|
||||
, "(copies now " ++ show (have - 1) ++ ")"
|
||||
, ": " ++ reason
|
||||
]
|
||||
return $ decrcopies n
|
||||
, return n
|
||||
)
|
||||
, return n
|
||||
)
|
||||
|
||||
dropl n = checkdrop n Nothing $ \numcopies ->
|
||||
Command.Drop.startLocal f numcopies key knownpresentremote
|
||||
|
||||
dropr r n = checkdrop n (Just $ Remote.uuid r) $ \numcopies ->
|
||||
Command.Drop.startRemote f numcopies key r
|
||||
|
||||
safely a = either (const False) id <$> tryAnnex a
|
||||
|
||||
slocs = S.fromList locs
|
26
Assistant/Environment.hs
Normal file
26
Assistant/Environment.hs
Normal file
|
@ -0,0 +1,26 @@
|
|||
{- git-annex assistant environment
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.Environment where
|
||||
|
||||
import Assistant.Common
|
||||
import Utility.UserInfo
|
||||
import qualified Git.Config
|
||||
|
||||
import System.Posix.Env
|
||||
|
||||
{- Checks that the system's environment allows git to function.
|
||||
- Git requires a GECOS username, or suitable git configuration, or
|
||||
- environment variables. -}
|
||||
checkEnvironment :: Annex ()
|
||||
checkEnvironment = do
|
||||
username <- liftIO myUserName
|
||||
gecos <- liftIO myUserGecos
|
||||
gitusername <- fromRepo $ Git.Config.getMaybe "user.name"
|
||||
when (null gecos && (gitusername == Nothing || gitusername == Just "")) $
|
||||
-- existing environment is not overwritten
|
||||
liftIO $ setEnv "GIT_AUTHOR_NAME" username False
|
93
Assistant/Install.hs
Normal file
93
Assistant/Install.hs
Normal file
|
@ -0,0 +1,93 @@
|
|||
{- Assistant installation
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE CPP #-}
|
||||
|
||||
module Assistant.Install where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.Install.AutoStart
|
||||
import Assistant.Ssh
|
||||
import Locations.UserConfig
|
||||
import Utility.FileMode
|
||||
import Utility.Shell
|
||||
|
||||
#ifdef darwin_HOST_OS
|
||||
import Utility.OSX
|
||||
#else
|
||||
import Utility.FreeDesktop
|
||||
#endif
|
||||
|
||||
import System.Posix.Env
|
||||
|
||||
standaloneAppBase :: IO (Maybe FilePath)
|
||||
standaloneAppBase = getEnv "GIT_ANNEX_APP_BASE"
|
||||
|
||||
{- The standalone app does not have an installation process.
|
||||
- So when it's run, it needs to set up autostarting of the assistant
|
||||
- daemon, as well as writing the programFile, and putting a
|
||||
- git-annex-shell wrapper into ~/.ssh
|
||||
-
|
||||
- Note that this is done every time it's started, so if the user moves
|
||||
- it around, the paths this sets up won't break.
|
||||
-}
|
||||
ensureInstalled :: IO ()
|
||||
ensureInstalled = go =<< standaloneAppBase
|
||||
where
|
||||
go Nothing = noop
|
||||
go (Just base) = do
|
||||
let program = base </> "git-annex"
|
||||
programfile <- programFile
|
||||
createDirectoryIfMissing True (parentDir programfile)
|
||||
writeFile programfile program
|
||||
|
||||
#ifdef darwin_HOST_OS
|
||||
autostartfile <- userAutoStart osxAutoStartLabel
|
||||
#else
|
||||
autostartfile <- autoStartPath "git-annex" <$> userConfigDir
|
||||
#endif
|
||||
installAutoStart program autostartfile
|
||||
|
||||
{- This shim is only updated if it doesn't
|
||||
- already exist with the right content. This
|
||||
- ensures that there's no race where it would have
|
||||
- worked, but is unavailable due to being updated. -}
|
||||
sshdir <- sshDir
|
||||
let shim = sshdir </> "git-annex-shell"
|
||||
let content = unlines
|
||||
[ shebang
|
||||
, "set -e"
|
||||
, "exec", base </> "runshell" ++
|
||||
" git-annex-shell -c \"$SSH_ORIGINAL_COMMAND\""
|
||||
]
|
||||
curr <- catchDefaultIO "" $ readFileStrict shim
|
||||
when (curr /= content) $ do
|
||||
createDirectoryIfMissing True (parentDir shim)
|
||||
writeFile shim content
|
||||
modifyFileMode shim $ addModes [ownerExecuteMode]
|
||||
|
||||
{- Returns a cleaned up environment that lacks settings used to make the
|
||||
- standalone builds use their bundled libraries and programs.
|
||||
- Useful when calling programs not included in the standalone builds.
|
||||
-
|
||||
- For a non-standalone build, returns Nothing.
|
||||
-}
|
||||
cleanEnvironment :: IO (Maybe [(String, String)])
|
||||
cleanEnvironment = clean <$> getEnvironment
|
||||
where
|
||||
clean env
|
||||
| null vars = Nothing
|
||||
| otherwise = Just $ catMaybes $ map (restoreorig env) env
|
||||
| otherwise = Nothing
|
||||
where
|
||||
vars = words $ fromMaybe "" $
|
||||
lookup "GIT_ANNEX_STANDLONE_ENV" env
|
||||
restoreorig oldenv p@(k, _v)
|
||||
| k `elem` vars = case lookup ("ORIG_" ++ k) oldenv of
|
||||
Nothing -> Nothing
|
||||
(Just v') -> Just (k, v')
|
||||
| otherwise = Just p
|
38
Assistant/Install/AutoStart.hs
Normal file
38
Assistant/Install/AutoStart.hs
Normal file
|
@ -0,0 +1,38 @@
|
|||
{- Assistant autostart file installation
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE CPP #-}
|
||||
|
||||
module Assistant.Install.AutoStart where
|
||||
|
||||
import Utility.FreeDesktop
|
||||
#ifdef darwin_HOST_OS
|
||||
import Utility.OSX
|
||||
import Utility.Path
|
||||
import System.Directory
|
||||
#endif
|
||||
|
||||
installAutoStart :: FilePath -> FilePath -> IO ()
|
||||
installAutoStart command file = do
|
||||
#ifdef darwin_HOST_OS
|
||||
createDirectoryIfMissing True (parentDir file)
|
||||
writeFile file $ genOSXAutoStartFile osxAutoStartLabel command
|
||||
["assistant", "--autostart"]
|
||||
#else
|
||||
writeDesktopMenuFile (fdoAutostart command) file
|
||||
#endif
|
||||
|
||||
osxAutoStartLabel :: String
|
||||
osxAutoStartLabel = "com.branchable.git-annex.assistant"
|
||||
|
||||
fdoAutostart :: FilePath -> DesktopEntry
|
||||
fdoAutostart command = genDesktopEntry
|
||||
"Git Annex Assistant"
|
||||
"Autostart"
|
||||
False
|
||||
(command ++ " assistant --autostart")
|
||||
[]
|
114
Assistant/MakeRemote.hs
Normal file
114
Assistant/MakeRemote.hs
Normal file
|
@ -0,0 +1,114 @@
|
|||
{- git-annex assistant remote creation utilities
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.MakeRemote where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.Ssh
|
||||
import Assistant.Sync
|
||||
import qualified Types.Remote as R
|
||||
import qualified Remote
|
||||
import Remote.List
|
||||
import qualified Remote.Rsync as Rsync
|
||||
import qualified Git
|
||||
import qualified Git.Command
|
||||
import qualified Command.InitRemote
|
||||
import Logs.UUID
|
||||
import Logs.Remote
|
||||
import Git.Remote
|
||||
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Map as M
|
||||
|
||||
{- Sets up and begins syncing with a new ssh or rsync remote. -}
|
||||
makeSshRemote :: Bool -> SshData -> Assistant Remote
|
||||
makeSshRemote forcersync sshdata = do
|
||||
r <- liftAnnex $
|
||||
addRemote $ maker (sshRepoName sshdata) sshurl
|
||||
syncNewRemote r
|
||||
return r
|
||||
where
|
||||
rsync = forcersync || rsyncOnly sshdata
|
||||
maker
|
||||
| rsync = makeRsyncRemote
|
||||
| otherwise = makeGitRemote
|
||||
sshurl = T.unpack $ T.concat $
|
||||
if rsync
|
||||
then [u, h, T.pack ":", sshDirectory sshdata, T.pack "/"]
|
||||
else [T.pack "ssh://", u, h, d, T.pack "/"]
|
||||
where
|
||||
u = maybe (T.pack "") (\v -> T.concat [v, T.pack "@"]) $ sshUserName sshdata
|
||||
h = sshHostName sshdata
|
||||
d
|
||||
| T.pack "/" `T.isPrefixOf` sshDirectory sshdata = sshDirectory sshdata
|
||||
| otherwise = T.concat [T.pack "/~/", sshDirectory sshdata]
|
||||
|
||||
{- Runs an action that returns a name of the remote, and finishes adding it. -}
|
||||
addRemote :: Annex String -> Annex Remote
|
||||
addRemote a = do
|
||||
name <- a
|
||||
void remoteListRefresh
|
||||
maybe (error "failed to add remote") return =<< Remote.byName (Just name)
|
||||
|
||||
{- Inits a rsync special remote, and returns its name. -}
|
||||
makeRsyncRemote :: String -> String -> Annex String
|
||||
makeRsyncRemote name location = makeRemote name location $
|
||||
const $ makeSpecialRemote name Rsync.remote config
|
||||
where
|
||||
config = M.fromList
|
||||
[ ("encryption", "shared")
|
||||
, ("rsyncurl", location)
|
||||
, ("type", "rsync")
|
||||
]
|
||||
|
||||
{- Inits a special remote. -}
|
||||
makeSpecialRemote :: String -> RemoteType -> R.RemoteConfig -> Annex ()
|
||||
makeSpecialRemote name remotetype config = do
|
||||
(u, c) <- Command.InitRemote.findByName name
|
||||
c' <- R.setup remotetype u $ M.union config c
|
||||
describeUUID u name
|
||||
configSet u c'
|
||||
|
||||
{- Returns the name of the git remote it created. If there's already a
|
||||
- remote at the location, returns its name. -}
|
||||
makeGitRemote :: String -> String -> Annex String
|
||||
makeGitRemote basename location = makeRemote basename location $ \name ->
|
||||
void $ inRepo $ Git.Command.runBool
|
||||
[Param "remote", Param "add", Param name, Param location]
|
||||
|
||||
{- If there's not already a remote at the location, adds it using the
|
||||
- action, which is passed the name of the remote to make.
|
||||
-
|
||||
- Returns the name of the remote. -}
|
||||
makeRemote :: String -> String -> (String -> Annex ()) -> Annex String
|
||||
makeRemote basename location a = do
|
||||
g <- gitRepo
|
||||
if not (any samelocation $ Git.remotes g)
|
||||
then do
|
||||
|
||||
let name = uniqueRemoteName basename 0 g
|
||||
a name
|
||||
return name
|
||||
else return basename
|
||||
where
|
||||
samelocation x = Git.repoLocation x == location
|
||||
|
||||
{- Generate an unused name for a remote, adding a number if
|
||||
- necessary.
|
||||
-
|
||||
- Ensures that the returned name is a legal git remote name. -}
|
||||
uniqueRemoteName :: String -> Int -> Git.Repo -> String
|
||||
uniqueRemoteName basename n r
|
||||
| null namecollision = name
|
||||
| otherwise = uniqueRemoteName legalbasename (succ n) r
|
||||
where
|
||||
namecollision = filter samename (Git.remotes r)
|
||||
samename x = Git.remoteName x == Just name
|
||||
name
|
||||
| n == 0 = legalbasename
|
||||
| otherwise = legalbasename ++ show n
|
||||
legalbasename = makeLegalName basename
|
136
Assistant/Monad.hs
Normal file
136
Assistant/Monad.hs
Normal file
|
@ -0,0 +1,136 @@
|
|||
{- git-annex assistant monad
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE PackageImports, GeneralizedNewtypeDeriving, MultiParamTypeClasses #-}
|
||||
|
||||
module Assistant.Monad (
|
||||
Assistant,
|
||||
AssistantData(..),
|
||||
newAssistantData,
|
||||
runAssistant,
|
||||
getAssistant,
|
||||
liftAnnex,
|
||||
(<~>),
|
||||
(<<~),
|
||||
asIO,
|
||||
asIO1,
|
||||
asIO2,
|
||||
ThreadName,
|
||||
debug,
|
||||
notice
|
||||
) where
|
||||
|
||||
import "mtl" Control.Monad.Reader
|
||||
import Control.Monad.Base (liftBase, MonadBase)
|
||||
import System.Log.Logger
|
||||
|
||||
import Common.Annex
|
||||
import Assistant.Types.ThreadedMonad
|
||||
import Assistant.Types.DaemonStatus
|
||||
import Assistant.Types.ScanRemotes
|
||||
import Assistant.Types.TransferQueue
|
||||
import Assistant.Types.TransferSlots
|
||||
import Assistant.Types.Pushes
|
||||
import Assistant.Types.BranchChange
|
||||
import Assistant.Types.Commits
|
||||
import Assistant.Types.Changes
|
||||
import Assistant.Types.Buddies
|
||||
import Assistant.Types.NetMessager
|
||||
import Assistant.Types.ThreadName
|
||||
|
||||
newtype Assistant a = Assistant { mkAssistant :: ReaderT AssistantData IO a }
|
||||
deriving (
|
||||
Monad,
|
||||
MonadIO,
|
||||
MonadReader AssistantData,
|
||||
Functor,
|
||||
Applicative
|
||||
)
|
||||
|
||||
instance MonadBase IO Assistant where
|
||||
liftBase = Assistant . liftBase
|
||||
|
||||
data AssistantData = AssistantData
|
||||
{ threadName :: ThreadName
|
||||
, threadState :: ThreadState
|
||||
, daemonStatusHandle :: DaemonStatusHandle
|
||||
, scanRemoteMap :: ScanRemoteMap
|
||||
, transferQueue :: TransferQueue
|
||||
, transferSlots :: TransferSlots
|
||||
, failedPushMap :: FailedPushMap
|
||||
, commitChan :: CommitChan
|
||||
, changeChan :: ChangeChan
|
||||
, branchChangeHandle :: BranchChangeHandle
|
||||
, buddyList :: BuddyList
|
||||
, netMessager :: NetMessager
|
||||
}
|
||||
|
||||
newAssistantData :: ThreadState -> DaemonStatusHandle -> IO AssistantData
|
||||
newAssistantData st dstatus = AssistantData
|
||||
<$> pure (ThreadName "main")
|
||||
<*> pure st
|
||||
<*> pure dstatus
|
||||
<*> newScanRemoteMap
|
||||
<*> newTransferQueue
|
||||
<*> newTransferSlots
|
||||
<*> newFailedPushMap
|
||||
<*> newCommitChan
|
||||
<*> newChangeChan
|
||||
<*> newBranchChangeHandle
|
||||
<*> newBuddyList
|
||||
<*> newNetMessager
|
||||
|
||||
runAssistant :: AssistantData -> Assistant a -> IO a
|
||||
runAssistant d a = runReaderT (mkAssistant a) d
|
||||
|
||||
getAssistant :: (AssistantData -> a) -> Assistant a
|
||||
getAssistant = reader
|
||||
|
||||
{- Runs an action in the git-annex monad. Note that the same monad state
|
||||
- is shared amoung all assistant threads, so only one of these can run at
|
||||
- a time. Therefore, long-duration actions should be avoided. -}
|
||||
liftAnnex :: Annex a -> Assistant a
|
||||
liftAnnex a = do
|
||||
st <- reader threadState
|
||||
liftIO $ runThreadState st a
|
||||
|
||||
{- Runs an IO action, passing it an IO action that runs an Assistant action. -}
|
||||
(<~>) :: (IO a -> IO b) -> Assistant a -> Assistant b
|
||||
io <~> a = do
|
||||
d <- reader id
|
||||
liftIO $ io $ runAssistant d a
|
||||
|
||||
{- Creates an IO action that will run an Assistant action when run. -}
|
||||
asIO :: Assistant a -> Assistant (IO a)
|
||||
asIO a = do
|
||||
d <- reader id
|
||||
return $ runAssistant d a
|
||||
|
||||
asIO1 :: (a -> Assistant b) -> Assistant (a -> IO b)
|
||||
asIO1 a = do
|
||||
d <- reader id
|
||||
return $ \v -> runAssistant d $ a v
|
||||
|
||||
asIO2 :: (a -> b -> Assistant c) -> Assistant (a -> b -> IO c)
|
||||
asIO2 a = do
|
||||
d <- reader id
|
||||
return $ \v1 v2 -> runAssistant d (a v1 v2)
|
||||
|
||||
{- Runs an IO action on a selected field of the AssistantData. -}
|
||||
(<<~) :: (a -> IO b) -> (AssistantData -> a) -> Assistant b
|
||||
io <<~ v = reader v >>= liftIO . io
|
||||
|
||||
debug :: [String] -> Assistant ()
|
||||
debug = logaction debugM
|
||||
|
||||
notice :: [String] -> Assistant ()
|
||||
notice = logaction noticeM
|
||||
|
||||
logaction :: (String -> String -> IO ()) -> [String] -> Assistant ()
|
||||
logaction a ws = do
|
||||
ThreadName name <- getAssistant threadName
|
||||
liftIO $ a name $ unwords $ (name ++ ":") : ws
|
102
Assistant/NamedThread.hs
Normal file
102
Assistant/NamedThread.hs
Normal file
|
@ -0,0 +1,102 @@
|
|||
{- git-annex assistant named threads.
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE CPP #-}
|
||||
|
||||
module Assistant.NamedThread where
|
||||
|
||||
import Common.Annex
|
||||
import Assistant.Types.NamedThread
|
||||
import Assistant.Types.ThreadName
|
||||
import Assistant.Types.DaemonStatus
|
||||
import Assistant.DaemonStatus
|
||||
import Assistant.Monad
|
||||
|
||||
import Control.Concurrent
|
||||
import Control.Concurrent.Async
|
||||
import qualified Data.Map as M
|
||||
import qualified Control.Exception as E
|
||||
|
||||
#ifdef WITH_WEBAPP
|
||||
import Assistant.WebApp
|
||||
import Assistant.WebApp.Types
|
||||
import Assistant.Alert
|
||||
import qualified Data.Text as T
|
||||
#endif
|
||||
|
||||
{- Starts a named thread, if it's not already running.
|
||||
-
|
||||
- Named threads are run by a management thread, so if they crash
|
||||
- an alert is displayed, allowing the thread to be restarted. -}
|
||||
#ifdef WITH_WEBAPP
|
||||
startNamedThread :: Maybe UrlRenderer -> NamedThread -> Assistant ()
|
||||
startNamedThread urlrenderer namedthread@(NamedThread name a) = do
|
||||
#else
|
||||
startNamedThread :: Maybe Bool -> NamedThread -> Assistant ()
|
||||
startNamedThread urlrenderer namedthread@(NamedThread name a) = do
|
||||
#endif
|
||||
m <- startedThreads <$> getDaemonStatus
|
||||
case M.lookup name m of
|
||||
Nothing -> start
|
||||
Just (aid, _) -> do
|
||||
r <- liftIO (E.try (poll aid) :: IO (Either E.SomeException (Maybe (Either E.SomeException ()))))
|
||||
case r of
|
||||
Right Nothing -> noop
|
||||
_ -> start
|
||||
where
|
||||
start = do
|
||||
d <- getAssistant id
|
||||
aid <- liftIO $ runmanaged $ d { threadName = name }
|
||||
restart <- asIO $ startNamedThread urlrenderer namedthread
|
||||
modifyDaemonStatus_ $ \s -> s
|
||||
{ startedThreads = M.insertWith' const name (aid, restart) (startedThreads s) }
|
||||
runmanaged d = do
|
||||
aid <- async $ runAssistant d a
|
||||
void $ forkIO $ manager d aid
|
||||
return aid
|
||||
manager d aid = do
|
||||
r <- E.try (wait aid) :: IO (Either E.SomeException ())
|
||||
case r of
|
||||
Right _ -> noop
|
||||
Left e -> do
|
||||
let msg = unwords
|
||||
[ fromThreadName $ threadName d
|
||||
, "crashed:", show e
|
||||
]
|
||||
hPutStrLn stderr msg
|
||||
#ifdef WITH_WEBAPP
|
||||
button <- runAssistant d $
|
||||
case urlrenderer of
|
||||
Nothing -> return Nothing
|
||||
Just renderer -> do
|
||||
close <- asIO1 removeAlert
|
||||
url <- liftIO $ renderUrl renderer (RestartThreadR name) []
|
||||
return $ Just $ AlertButton
|
||||
{ buttonLabel = T.pack "Restart Thread"
|
||||
, buttonUrl = url
|
||||
, buttonAction = Just close
|
||||
}
|
||||
runAssistant d $ void $
|
||||
addAlert $ (warningAlert (fromThreadName name) msg)
|
||||
{ alertButton = button }
|
||||
#endif
|
||||
|
||||
namedThreadId :: NamedThread -> Assistant (Maybe ThreadId)
|
||||
namedThreadId (NamedThread name _) = do
|
||||
m <- startedThreads <$> getDaemonStatus
|
||||
return $ asyncThreadId . fst <$> M.lookup name m
|
||||
|
||||
{- Waits for all named threads that have been started to finish.
|
||||
-
|
||||
- Note that if a named thread crashes, it will probably
|
||||
- cause this to crash as well. Also, named threads that are started
|
||||
- after this is called will not be waited on. -}
|
||||
waitNamedThreads :: Assistant ()
|
||||
waitNamedThreads = do
|
||||
m <- startedThreads <$> getDaemonStatus
|
||||
liftIO $ mapM_ (wait . fst) $ M.elems m
|
||||
|
95
Assistant/NetMessager.hs
Normal file
95
Assistant/NetMessager.hs
Normal file
|
@ -0,0 +1,95 @@
|
|||
{- git-annex assistant out of band network messager interface
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.NetMessager where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.Types.NetMessager
|
||||
|
||||
import Control.Concurrent
|
||||
import Control.Concurrent.STM
|
||||
import Control.Concurrent.MSampleVar
|
||||
import Control.Exception as E
|
||||
import qualified Data.Set as S
|
||||
|
||||
sendNetMessage :: NetMessage -> Assistant ()
|
||||
sendNetMessage m =
|
||||
(atomically . flip writeTChan m) <<~ (netMessages . netMessager)
|
||||
|
||||
waitNetMessage :: Assistant (NetMessage)
|
||||
waitNetMessage = (atomically . readTChan) <<~ (netMessages . netMessager)
|
||||
|
||||
notifyNetMessagerRestart :: Assistant ()
|
||||
notifyNetMessagerRestart =
|
||||
flip writeSV () <<~ (netMessagerRestart . netMessager)
|
||||
|
||||
waitNetMessagerRestart :: Assistant ()
|
||||
waitNetMessagerRestart = readSV <<~ (netMessagerRestart . netMessager)
|
||||
|
||||
{- Runs an action that runs either the send or receive side of a push.
|
||||
-
|
||||
- While the push is running, netMessagesPush will get messages put into it
|
||||
- relating to this push, while any messages relating to other pushes
|
||||
- on the same side go to netMessagesDeferred. Once the push finishes,
|
||||
- those deferred messages will be fed to handledeferred for processing.
|
||||
-}
|
||||
runPush :: PushSide -> ClientID -> (NetMessage -> Assistant ()) -> Assistant a -> Assistant a
|
||||
runPush side clientid handledeferred a = do
|
||||
nm <- getAssistant netMessager
|
||||
let runningv = getSide side $ netMessagerPushRunning nm
|
||||
let setup = void $ atomically $ swapTMVar runningv $ Just clientid
|
||||
let cleanup = atomically $ do
|
||||
void $ swapTMVar runningv Nothing
|
||||
emptytchan (getSide side $ netMessagesPush nm)
|
||||
r <- E.bracket_ setup cleanup <~> a
|
||||
(void . forkIO) <~> processdeferred nm
|
||||
return r
|
||||
where
|
||||
emptytchan c = maybe noop (const $ emptytchan c) =<< tryReadTChan c
|
||||
processdeferred nm = do
|
||||
s <- liftIO $ atomically $ swapTMVar (getSide side $ netMessagesPushDeferred nm) S.empty
|
||||
mapM_ rundeferred (S.toList s)
|
||||
rundeferred m = (void . (E.try :: (IO () -> IO (Either SomeException ()))))
|
||||
<~> handledeferred m
|
||||
|
||||
{- While a push is running, matching push messages are put into
|
||||
- netMessagesPush, while others that involve the same side go to
|
||||
- netMessagesDeferredPush.
|
||||
-
|
||||
- When no push is running involving the same side, returns False.
|
||||
-
|
||||
- To avoid bloating memory, only messages that initiate pushes are
|
||||
- deferred.
|
||||
-}
|
||||
queueNetPushMessage :: NetMessage -> Assistant Bool
|
||||
queueNetPushMessage m@(Pushing clientid stage) = do
|
||||
nm <- getAssistant netMessager
|
||||
liftIO $ atomically $ do
|
||||
v <- readTMVar (getSide side $ netMessagerPushRunning nm)
|
||||
case v of
|
||||
Nothing -> return False
|
||||
(Just runningclientid)
|
||||
| runningclientid == clientid -> queue nm
|
||||
| isPushInitiation stage -> defer nm
|
||||
| otherwise -> discard
|
||||
where
|
||||
side = pushDestinationSide stage
|
||||
queue nm = do
|
||||
writeTChan (getSide side $ netMessagesPush nm) m
|
||||
return True
|
||||
defer nm = do
|
||||
let mv = getSide side $ netMessagesPushDeferred nm
|
||||
s <- takeTMVar mv
|
||||
putTMVar mv $ S.insert m s
|
||||
return True
|
||||
discard = return True
|
||||
queueNetPushMessage _ = return False
|
||||
|
||||
waitNetPushMessage :: PushSide -> Assistant (NetMessage)
|
||||
waitNetPushMessage side = (atomically . readTChan)
|
||||
<<~ (getSide side . netMessagesPush . netMessager)
|
||||
|
92
Assistant/Pairing.hs
Normal file
92
Assistant/Pairing.hs
Normal file
|
@ -0,0 +1,92 @@
|
|||
{- git-annex assistant repo pairing, core data types
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE CPP #-}
|
||||
|
||||
module Assistant.Pairing where
|
||||
|
||||
import Common.Annex
|
||||
import Utility.Verifiable
|
||||
import Assistant.Ssh
|
||||
|
||||
import Control.Concurrent
|
||||
import Network.Socket
|
||||
import Data.Char
|
||||
import qualified Data.Text as T
|
||||
|
||||
data PairStage
|
||||
{- "I'll pair with anybody who shares the secret that can be used
|
||||
- to verify this request." -}
|
||||
= PairReq
|
||||
{- "I've verified your request, and you can verify this to see
|
||||
- that I know the secret. I set up your ssh key already.
|
||||
- Here's mine for you to set up." -}
|
||||
| PairAck
|
||||
{- "I saw your PairAck; you can stop sending them." -}
|
||||
| PairDone
|
||||
deriving (Eq, Read, Show, Ord)
|
||||
|
||||
newtype PairMsg = PairMsg (Verifiable (PairStage, PairData, SomeAddr))
|
||||
deriving (Eq, Read, Show)
|
||||
|
||||
verifiedPairMsg :: PairMsg -> PairingInProgress -> Bool
|
||||
verifiedPairMsg (PairMsg m) pip = verify m $ inProgressSecret pip
|
||||
|
||||
fromPairMsg :: PairMsg -> Verifiable (PairStage, PairData, SomeAddr)
|
||||
fromPairMsg (PairMsg m) = m
|
||||
|
||||
pairMsgStage :: PairMsg -> PairStage
|
||||
pairMsgStage (PairMsg (Verifiable (s, _, _) _)) = s
|
||||
|
||||
pairMsgData :: PairMsg -> PairData
|
||||
pairMsgData (PairMsg (Verifiable (_, d, _) _)) = d
|
||||
|
||||
pairMsgAddr :: PairMsg -> SomeAddr
|
||||
pairMsgAddr (PairMsg (Verifiable (_, _, a) _)) = a
|
||||
|
||||
data PairData = PairData
|
||||
-- uname -n output, not a full domain name
|
||||
{ remoteHostName :: Maybe HostName
|
||||
, remoteUserName :: UserName
|
||||
, remoteDirectory :: FilePath
|
||||
, remoteSshPubKey :: SshPubKey
|
||||
, pairUUID :: UUID
|
||||
}
|
||||
deriving (Eq, Read, Show)
|
||||
|
||||
type UserName = String
|
||||
|
||||
{- A pairing that is in progress has a secret, a thread that is
|
||||
- broadcasting pairing messages, and a SshKeyPair that has not yet been
|
||||
- set up on disk. -}
|
||||
data PairingInProgress = PairingInProgress
|
||||
{ inProgressSecret :: Secret
|
||||
, inProgressThreadId :: Maybe ThreadId
|
||||
, inProgressSshKeyPair :: SshKeyPair
|
||||
, inProgressPairData :: PairData
|
||||
, inProgressPairStage :: PairStage
|
||||
}
|
||||
deriving (Show)
|
||||
|
||||
data SomeAddr = IPv4Addr HostAddress
|
||||
{- My Android build of the Network library does not currently have IPV6
|
||||
- support. -}
|
||||
#ifndef __ANDROID__
|
||||
| IPv6Addr HostAddress6
|
||||
#endif
|
||||
deriving (Ord, Eq, Read, Show)
|
||||
|
||||
{- This contains the whole secret, just lightly obfuscated to make it not
|
||||
- too obvious. It's only displayed in the user's web browser. -}
|
||||
newtype SecretReminder = SecretReminder [Int]
|
||||
deriving (Show, Eq, Ord, Read)
|
||||
|
||||
toSecretReminder :: T.Text -> SecretReminder
|
||||
toSecretReminder = SecretReminder . map ord . T.unpack
|
||||
|
||||
fromSecretReminder :: SecretReminder -> T.Text
|
||||
fromSecretReminder (SecretReminder s) = T.pack $ map chr s
|
90
Assistant/Pairing/MakeRemote.hs
Normal file
90
Assistant/Pairing/MakeRemote.hs
Normal file
|
@ -0,0 +1,90 @@
|
|||
{- git-annex assistant pairing remote creation
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.Pairing.MakeRemote where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.Ssh
|
||||
import Assistant.Pairing
|
||||
import Assistant.Pairing.Network
|
||||
import Assistant.MakeRemote
|
||||
|
||||
import Network.Socket
|
||||
import qualified Data.Text as T
|
||||
|
||||
{- Authorized keys are set up before pairing is complete, so that the other
|
||||
- side can immediately begin syncing. -}
|
||||
setupAuthorizedKeys :: PairMsg -> FilePath -> IO ()
|
||||
setupAuthorizedKeys msg repodir = do
|
||||
validateSshPubKey pubkey
|
||||
unlessM (liftIO $ addAuthorizedKeys False repodir pubkey) $
|
||||
error "failed setting up ssh authorized keys"
|
||||
where
|
||||
pubkey = remoteSshPubKey $ pairMsgData msg
|
||||
|
||||
{- When local pairing is complete, this is used to set up the remote for
|
||||
- the host we paired with. -}
|
||||
finishedLocalPairing :: PairMsg -> SshKeyPair -> Assistant ()
|
||||
finishedLocalPairing msg keypair = do
|
||||
sshdata <- liftIO $ setupSshKeyPair keypair =<< pairMsgToSshData msg
|
||||
{- Ensure that we know the ssh host key for the host we paired with.
|
||||
- If we don't, ssh over to get it. -}
|
||||
liftIO $ unlessM (knownHost $ sshHostName sshdata) $
|
||||
void $ sshTranscript
|
||||
[ sshOpt "StrictHostKeyChecking" "no"
|
||||
, sshOpt "NumberOfPasswordPrompts" "0"
|
||||
, "-n"
|
||||
, genSshHost (sshHostName sshdata) (sshUserName sshdata)
|
||||
, "git-annex-shell -c configlist " ++ T.unpack (sshDirectory sshdata)
|
||||
]
|
||||
Nothing
|
||||
void $ makeSshRemote False sshdata
|
||||
|
||||
{- Mostly a straightforward conversion. Except:
|
||||
- * Determine the best hostname to use to contact the host.
|
||||
- * Strip leading ~/ from the directory name.
|
||||
-}
|
||||
pairMsgToSshData :: PairMsg -> IO SshData
|
||||
pairMsgToSshData msg = do
|
||||
let d = pairMsgData msg
|
||||
hostname <- liftIO $ bestHostName msg
|
||||
let dir = case remoteDirectory d of
|
||||
('~':'/':v) -> v
|
||||
v -> v
|
||||
return SshData
|
||||
{ sshHostName = T.pack hostname
|
||||
, sshUserName = Just (T.pack $ remoteUserName d)
|
||||
, sshDirectory = T.pack dir
|
||||
, sshRepoName = genSshRepoName hostname dir
|
||||
, sshPort = 22
|
||||
, needsPubKey = True
|
||||
, rsyncOnly = False
|
||||
}
|
||||
|
||||
{- Finds the best hostname to use for the host that sent the PairMsg.
|
||||
-
|
||||
- If remoteHostName is set, tries to use a .local address based on it.
|
||||
- That's the most robust, if this system supports .local.
|
||||
- Otherwise, looks up the hostname in the DNS for the remoteAddress,
|
||||
- if any. May fall back to remoteAddress if there's no DNS. Ugh. -}
|
||||
bestHostName :: PairMsg -> IO HostName
|
||||
bestHostName msg = case remoteHostName $ pairMsgData msg of
|
||||
Just h -> do
|
||||
let localname = h ++ ".local"
|
||||
addrs <- catchDefaultIO [] $
|
||||
getAddrInfo Nothing (Just localname) Nothing
|
||||
maybe fallback (const $ return localname) (headMaybe addrs)
|
||||
Nothing -> fallback
|
||||
where
|
||||
fallback = do
|
||||
let a = pairMsgAddr msg
|
||||
let sockaddr = case a of
|
||||
IPv4Addr addr -> SockAddrInet (PortNum 0) addr
|
||||
IPv6Addr addr -> SockAddrInet6 (PortNum 0) 0 addr 0
|
||||
fromMaybe (showAddr a)
|
||||
<$> catchDefaultIO Nothing
|
||||
(fst <$> getNameInfo [] True False sockaddr)
|
130
Assistant/Pairing/Network.hs
Normal file
130
Assistant/Pairing/Network.hs
Normal file
|
@ -0,0 +1,130 @@
|
|||
{- git-annex assistant pairing network code
|
||||
-
|
||||
- All network traffic is sent over multicast UDP. For reliability,
|
||||
- each message is repeated until acknowledged. This is done using a
|
||||
- thread, that gets stopped before the next message is sent.
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.Pairing.Network where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.Pairing
|
||||
import Assistant.DaemonStatus
|
||||
import Utility.ThreadScheduler
|
||||
import Utility.Verifiable
|
||||
|
||||
import Network.Multicast
|
||||
import Network.Info
|
||||
import Network.Socket
|
||||
import Control.Exception (bracket)
|
||||
import qualified Data.Map as M
|
||||
import Control.Concurrent
|
||||
|
||||
{- This is an arbitrary port in the dynamic port range, that could
|
||||
- conceivably be used for some other broadcast messages.
|
||||
- If so, hope they ignore the garbage from us; we'll certianly
|
||||
- ignore garbage from them. Wild wild west. -}
|
||||
pairingPort :: PortNumber
|
||||
pairingPort = 55556
|
||||
|
||||
{- Goal: Reach all hosts on the same network segment.
|
||||
- Method: Use same address that avahi uses. Other broadcast addresses seem
|
||||
- to not be let through some routers. -}
|
||||
multicastAddress :: SomeAddr -> HostName
|
||||
multicastAddress (IPv4Addr _) = "224.0.0.251"
|
||||
multicastAddress (IPv6Addr _) = "ff02::fb"
|
||||
|
||||
{- Multicasts a message repeatedly on all interfaces, with a 2 second
|
||||
- delay between each transmission. The message is repeated forever
|
||||
- unless a number of repeats is specified.
|
||||
-
|
||||
- The remoteHostAddress is set to the interface's IP address.
|
||||
-
|
||||
- Note that new sockets are opened each time. This is hardly efficient,
|
||||
- but it allows new network interfaces to be used as they come up.
|
||||
- On the other hand, the expensive DNS lookups are cached.
|
||||
-}
|
||||
multicastPairMsg :: Maybe Int -> Secret -> PairData -> PairStage -> IO ()
|
||||
multicastPairMsg repeats secret pairdata stage = go M.empty repeats
|
||||
where
|
||||
go _ (Just 0) = noop
|
||||
go cache n = do
|
||||
addrs <- activeNetworkAddresses
|
||||
let cache' = updatecache cache addrs
|
||||
mapM_ (sendinterface cache') addrs
|
||||
threadDelaySeconds (Seconds 2)
|
||||
go cache' $ pred <$> n
|
||||
{- The multicast library currently chokes on ipv6 addresses. -}
|
||||
sendinterface _ (IPv6Addr _) = noop
|
||||
sendinterface cache i = void $ tryIO $
|
||||
withSocketsDo $ bracket setup cleanup use
|
||||
where
|
||||
setup = multicastSender (multicastAddress i) pairingPort
|
||||
cleanup (sock, _) = sClose sock -- FIXME does not work
|
||||
use (sock, addr) = do
|
||||
setInterface sock (showAddr i)
|
||||
maybe noop (\s -> void $ sendTo sock s addr)
|
||||
(M.lookup i cache)
|
||||
updatecache cache [] = cache
|
||||
updatecache cache (i:is)
|
||||
| M.member i cache = updatecache cache is
|
||||
| otherwise = updatecache (M.insert i (show $ mkmsg i) cache) is
|
||||
mkmsg addr = PairMsg $
|
||||
mkVerifiable (stage, pairdata, addr) secret
|
||||
|
||||
startSending :: PairingInProgress -> PairStage -> (PairStage -> IO ()) -> Assistant ()
|
||||
startSending pip stage sender = do
|
||||
a <- asIO start
|
||||
void $ liftIO $ forkIO a
|
||||
where
|
||||
start = do
|
||||
tid <- liftIO myThreadId
|
||||
let pip' = pip { inProgressPairStage = stage, inProgressThreadId = Just tid }
|
||||
oldpip <- modifyDaemonStatus $
|
||||
\s -> (s { pairingInProgress = Just pip' }, pairingInProgress s)
|
||||
maybe noop stopold oldpip
|
||||
liftIO $ sender stage
|
||||
stopold = maybe noop (liftIO . killThread) . inProgressThreadId
|
||||
|
||||
stopSending :: PairingInProgress -> Assistant ()
|
||||
stopSending pip = do
|
||||
maybe noop (liftIO . killThread) $ inProgressThreadId pip
|
||||
modifyDaemonStatus_ $ \s -> s { pairingInProgress = Nothing }
|
||||
|
||||
class ToSomeAddr a where
|
||||
toSomeAddr :: a -> SomeAddr
|
||||
|
||||
instance ToSomeAddr IPv4 where
|
||||
toSomeAddr (IPv4 a) = IPv4Addr a
|
||||
|
||||
instance ToSomeAddr IPv6 where
|
||||
toSomeAddr (IPv6 o1 o2 o3 o4) = IPv6Addr (o1, o2, o3, o4)
|
||||
|
||||
showAddr :: SomeAddr -> HostName
|
||||
showAddr (IPv4Addr a) = show $ IPv4 a
|
||||
showAddr (IPv6Addr (o1, o2, o3, o4)) = show $ IPv6 o1 o2 o3 o4
|
||||
|
||||
activeNetworkAddresses :: IO [SomeAddr]
|
||||
activeNetworkAddresses = filter (not . all (`elem` "0.:") . showAddr)
|
||||
. concatMap (\ni -> [toSomeAddr $ ipv4 ni, toSomeAddr $ ipv6 ni])
|
||||
<$> getNetworkInterfaces
|
||||
|
||||
{- A human-visible description of the repository being paired with.
|
||||
- Note that the repository's description is not shown to the user, because
|
||||
- it could be something like "my repo", which is confusing when pairing
|
||||
- with someone else's repo. However, this has the same format as the
|
||||
- default decription of a repo. -}
|
||||
pairRepo :: PairMsg -> String
|
||||
pairRepo msg = concat
|
||||
[ remoteUserName d
|
||||
, "@"
|
||||
, fromMaybe (showAddr $ pairMsgAddr msg) (remoteHostName d)
|
||||
, ":"
|
||||
, remoteDirectory d
|
||||
]
|
||||
where
|
||||
d = pairMsgData msg
|
40
Assistant/Pushes.hs
Normal file
40
Assistant/Pushes.hs
Normal file
|
@ -0,0 +1,40 @@
|
|||
{- git-annex assistant push tracking
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.Pushes where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.Types.Pushes
|
||||
|
||||
import Control.Concurrent.STM
|
||||
import Data.Time.Clock
|
||||
import qualified Data.Map as M
|
||||
|
||||
{- Blocks until there are failed pushes.
|
||||
- Returns Remotes whose pushes failed a given time duration or more ago.
|
||||
- (This may be an empty list.) -}
|
||||
getFailedPushesBefore :: NominalDiffTime -> Assistant [Remote]
|
||||
getFailedPushesBefore duration = do
|
||||
v <- getAssistant failedPushMap
|
||||
liftIO $ do
|
||||
m <- atomically $ readTMVar v
|
||||
now <- getCurrentTime
|
||||
return $ M.keys $ M.filter (not . toorecent now) m
|
||||
where
|
||||
toorecent now time = now `diffUTCTime` time < duration
|
||||
|
||||
{- Modifies the map. -}
|
||||
changeFailedPushMap :: (PushMap -> PushMap) -> Assistant ()
|
||||
changeFailedPushMap a = do
|
||||
v <- getAssistant failedPushMap
|
||||
liftIO $ atomically $ store v . a . fromMaybe M.empty =<< tryTakeTMVar v
|
||||
where
|
||||
{- tryTakeTMVar empties the TMVar; refill it only if
|
||||
- the modified map is not itself empty -}
|
||||
store v m
|
||||
| m == M.empty = noop
|
||||
| otherwise = putTMVar v $! m
|
41
Assistant/ScanRemotes.hs
Normal file
41
Assistant/ScanRemotes.hs
Normal file
|
@ -0,0 +1,41 @@
|
|||
{- git-annex assistant remotes needing scanning
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.ScanRemotes where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.Types.ScanRemotes
|
||||
import qualified Types.Remote as Remote
|
||||
|
||||
import Data.Function
|
||||
import Control.Concurrent.STM
|
||||
import qualified Data.Map as M
|
||||
|
||||
{- Blocks until there is a remote or remotes that need to be scanned.
|
||||
-
|
||||
- The list has higher priority remotes listed first. -}
|
||||
getScanRemote :: Assistant [(Remote, ScanInfo)]
|
||||
getScanRemote = do
|
||||
v <- getAssistant scanRemoteMap
|
||||
liftIO $ atomically $
|
||||
reverse . sortBy (compare `on` scanPriority . snd) . M.toList
|
||||
<$> takeTMVar v
|
||||
|
||||
{- Adds new remotes that need scanning. -}
|
||||
addScanRemotes :: Bool -> [Remote] -> Assistant ()
|
||||
addScanRemotes _ [] = noop
|
||||
addScanRemotes full rs = do
|
||||
v <- getAssistant scanRemoteMap
|
||||
liftIO $ atomically $ do
|
||||
m <- fromMaybe M.empty <$> tryTakeTMVar v
|
||||
putTMVar v $ M.unionWith merge (M.fromList $ zip rs (map info rs)) m
|
||||
where
|
||||
info r = ScanInfo (-1 * Remote.cost r) full
|
||||
merge x y = ScanInfo
|
||||
{ scanPriority = max (scanPriority x) (scanPriority y)
|
||||
, fullScan = fullScan x || fullScan y
|
||||
}
|
225
Assistant/Ssh.hs
Normal file
225
Assistant/Ssh.hs
Normal file
|
@ -0,0 +1,225 @@
|
|||
{- git-annex assistant ssh utilities
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.Ssh where
|
||||
|
||||
import Common.Annex
|
||||
import Utility.TempFile
|
||||
import Utility.UserInfo
|
||||
import Utility.Shell
|
||||
import Git.Remote
|
||||
|
||||
import Data.Text (Text)
|
||||
import qualified Data.Text as T
|
||||
import Data.Char
|
||||
|
||||
data SshData = SshData
|
||||
{ sshHostName :: Text
|
||||
, sshUserName :: Maybe Text
|
||||
, sshDirectory :: Text
|
||||
, sshRepoName :: String
|
||||
, sshPort :: Int
|
||||
, needsPubKey :: Bool
|
||||
, rsyncOnly :: Bool
|
||||
}
|
||||
deriving (Read, Show, Eq)
|
||||
|
||||
data SshKeyPair = SshKeyPair
|
||||
{ sshPubKey :: String
|
||||
, sshPrivKey :: String
|
||||
}
|
||||
|
||||
instance Show SshKeyPair where
|
||||
show = sshPubKey
|
||||
|
||||
type SshPubKey = String
|
||||
|
||||
{- ssh -ofoo=bar command-line option -}
|
||||
sshOpt :: String -> String -> String
|
||||
sshOpt k v = concat ["-o", k, "=", v]
|
||||
|
||||
sshDir :: IO FilePath
|
||||
sshDir = do
|
||||
home <- myHomeDir
|
||||
return $ home </> ".ssh"
|
||||
|
||||
{- user@host or host -}
|
||||
genSshHost :: Text -> Maybe Text -> String
|
||||
genSshHost host user = maybe "" (\v -> T.unpack v ++ "@") user ++ T.unpack host
|
||||
|
||||
{- Generates a git remote name, like host_dir or host -}
|
||||
genSshRepoName :: String -> FilePath -> String
|
||||
genSshRepoName host dir
|
||||
| null dir = makeLegalName host
|
||||
| otherwise = makeLegalName $ host ++ "_" ++ dir
|
||||
|
||||
{- The output of ssh, including both stdout and stderr. -}
|
||||
sshTranscript :: [String] -> (Maybe String) -> IO (String, Bool)
|
||||
sshTranscript opts input = processTranscript "ssh" opts input
|
||||
|
||||
{- Ensure that the ssh public key doesn't include any ssh options, like
|
||||
- command=foo, or other weirdness -}
|
||||
validateSshPubKey :: SshPubKey -> IO ()
|
||||
validateSshPubKey pubkey = either error return $ check $ words pubkey
|
||||
where
|
||||
check [prefix, _key, comment] = do
|
||||
checkprefix prefix
|
||||
checkcomment comment
|
||||
check [prefix, _key] =
|
||||
checkprefix prefix
|
||||
check _ = err "wrong number of words in ssh public key"
|
||||
|
||||
ok = Right ()
|
||||
err msg = Left $ unwords [msg, pubkey]
|
||||
|
||||
checkprefix prefix
|
||||
| ssh == "ssh" && all isAlphaNum keytype = ok
|
||||
| otherwise = err "bad ssh public key prefix"
|
||||
where
|
||||
(ssh, keytype) = separate (== '-') prefix
|
||||
|
||||
checkcomment comment
|
||||
| all (\c -> isAlphaNum c || c == '@' || c == '-' || c == '_' || c == '.') comment = ok
|
||||
| otherwise = err "bad comment in ssh public key"
|
||||
|
||||
addAuthorizedKeys :: Bool -> FilePath -> SshPubKey -> IO Bool
|
||||
addAuthorizedKeys rsynconly dir pubkey = boolSystem "sh"
|
||||
[ Param "-c" , Param $ addAuthorizedKeysCommand rsynconly dir pubkey ]
|
||||
|
||||
removeAuthorizedKeys :: Bool -> FilePath -> SshPubKey -> IO ()
|
||||
removeAuthorizedKeys rsynconly dir pubkey = do
|
||||
let keyline = authorizedKeysLine rsynconly dir pubkey
|
||||
sshdir <- sshDir
|
||||
let keyfile = sshdir </> "authorized_keys"
|
||||
ls <- lines <$> readFileStrict keyfile
|
||||
writeFile keyfile $ unlines $ filter (/= keyline) ls
|
||||
|
||||
{- Implemented as a shell command, so it can be run on remote servers over
|
||||
- ssh.
|
||||
-
|
||||
- The ~/.ssh/git-annex-shell wrapper script is created if not already
|
||||
- present.
|
||||
-}
|
||||
addAuthorizedKeysCommand :: Bool -> FilePath -> SshPubKey -> String
|
||||
addAuthorizedKeysCommand rsynconly dir pubkey = join "&&"
|
||||
[ "mkdir -p ~/.ssh"
|
||||
, join "; "
|
||||
[ "if [ ! -e " ++ wrapper ++ " ]"
|
||||
, "then (" ++ join ";" (map echoval script) ++ ") > " ++ wrapper
|
||||
, "fi"
|
||||
]
|
||||
, "chmod 700 " ++ wrapper
|
||||
, "touch ~/.ssh/authorized_keys"
|
||||
, "chmod 600 ~/.ssh/authorized_keys"
|
||||
, unwords
|
||||
[ "echo"
|
||||
, shellEscape $ authorizedKeysLine rsynconly dir pubkey
|
||||
, ">>~/.ssh/authorized_keys"
|
||||
]
|
||||
]
|
||||
where
|
||||
echoval v = "echo " ++ shellEscape v
|
||||
wrapper = "~/.ssh/git-annex-shell"
|
||||
script =
|
||||
[ shebang
|
||||
, "set -e"
|
||||
, "exec git-annex-shell -c \"$SSH_ORIGINAL_COMMAND\""
|
||||
]
|
||||
|
||||
authorizedKeysLine :: Bool -> FilePath -> SshPubKey -> String
|
||||
authorizedKeysLine rsynconly dir pubkey
|
||||
{- TODO: Locking down rsync is difficult, requiring a rather
|
||||
- long perl script. -}
|
||||
| rsynconly = pubkey
|
||||
| otherwise = limitcommand ++ pubkey
|
||||
where
|
||||
limitcommand = "command=\"GIT_ANNEX_SHELL_DIRECTORY="++shellEscape dir++" ~/.ssh/git-annex-shell\",no-agent-forwarding,no-port-forwarding,no-X11-forwarding "
|
||||
|
||||
{- Generates a ssh key pair. -}
|
||||
genSshKeyPair :: IO SshKeyPair
|
||||
genSshKeyPair = withTempDir "git-annex-keygen" $ \dir -> do
|
||||
ok <- boolSystem "ssh-keygen"
|
||||
[ Param "-P", Param "" -- no password
|
||||
, Param "-f", File $ dir </> "key"
|
||||
]
|
||||
unless ok $
|
||||
error "ssh-keygen failed"
|
||||
SshKeyPair
|
||||
<$> readFile (dir </> "key.pub")
|
||||
<*> readFile (dir </> "key")
|
||||
|
||||
{- Installs a ssh key pair, and sets up ssh config with a mangled hostname
|
||||
- that will enable use of the key. This way we avoid changing the user's
|
||||
- regular ssh experience at all. Returns a modified SshData containing the
|
||||
- mangled hostname. -}
|
||||
setupSshKeyPair :: SshKeyPair -> SshData -> IO SshData
|
||||
setupSshKeyPair sshkeypair sshdata = do
|
||||
sshdir <- sshDir
|
||||
createDirectoryIfMissing True sshdir
|
||||
|
||||
unlessM (doesFileExist $ sshdir </> sshprivkeyfile) $ do
|
||||
h <- fdToHandle =<<
|
||||
createFile (sshdir </> sshprivkeyfile)
|
||||
(unionFileModes ownerWriteMode ownerReadMode)
|
||||
hPutStr h (sshPrivKey sshkeypair)
|
||||
hClose h
|
||||
unlessM (doesFileExist $ sshdir </> sshpubkeyfile) $
|
||||
writeFile (sshdir </> sshpubkeyfile) (sshPubKey sshkeypair)
|
||||
|
||||
setSshConfig sshdata
|
||||
[ ("IdentityFile", "~/.ssh/" ++ sshprivkeyfile) ]
|
||||
where
|
||||
sshprivkeyfile = "key." ++ mangleSshHostName sshdata
|
||||
sshpubkeyfile = sshprivkeyfile ++ ".pub"
|
||||
|
||||
{- Setups up a ssh config with a mangled hostname.
|
||||
- Returns a modified SshData containing the mangled hostname. -}
|
||||
setSshConfig :: SshData -> [(String, String)] -> IO SshData
|
||||
setSshConfig sshdata config = do
|
||||
sshdir <- sshDir
|
||||
createDirectoryIfMissing True sshdir
|
||||
let configfile = sshdir </> "config"
|
||||
unlessM (catchBoolIO $ isInfixOf mangledhost <$> readFile configfile) $
|
||||
appendFile configfile $ unlines $
|
||||
[ ""
|
||||
, "# Added automatically by git-annex"
|
||||
, "Host " ++ mangledhost
|
||||
] ++ map (\(k, v) -> "\t" ++ k ++ " " ++ v)
|
||||
(settings ++ config)
|
||||
return $ sshdata { sshHostName = T.pack mangledhost }
|
||||
where
|
||||
mangledhost = mangleSshHostName sshdata
|
||||
settings =
|
||||
[ ("Hostname", T.unpack $ sshHostName sshdata)
|
||||
, ("Port", show $ sshPort sshdata)
|
||||
]
|
||||
|
||||
mangleSshHostName :: SshData -> String
|
||||
mangleSshHostName sshdata = "git-annex-" ++ host ++ (maybe "-" ('-':) user)
|
||||
where
|
||||
host = T.unpack $ sshHostName sshdata
|
||||
user = T.unpack <$> sshUserName sshdata
|
||||
|
||||
unMangleSshHostName :: String -> String
|
||||
unMangleSshHostName h
|
||||
| "git-annex-" `isPrefixOf` h = join "-" (beginning $ drop 2 dashbits)
|
||||
| otherwise = h
|
||||
where
|
||||
dashbits = split "-" h
|
||||
|
||||
{- Does ssh have known_hosts data for a hostname? -}
|
||||
knownHost :: Text -> IO Bool
|
||||
knownHost hostname = do
|
||||
sshdir <- sshDir
|
||||
ifM (doesFileExist $ sshdir </> "known_hosts")
|
||||
( not . null <$> checkhost
|
||||
, return False
|
||||
)
|
||||
where
|
||||
{- ssh-keygen -F can crash on some old known_hosts file -}
|
||||
checkhost = catchDefaultIO "" $
|
||||
readProcess "ssh-keygen" ["-F", T.unpack hostname]
|
178
Assistant/Sync.hs
Normal file
178
Assistant/Sync.hs
Normal file
|
@ -0,0 +1,178 @@
|
|||
{- git-annex assistant repo syncing
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.Sync where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.Pushes
|
||||
import Assistant.NetMessager
|
||||
import Assistant.Types.NetMessager
|
||||
import Assistant.Alert
|
||||
import Assistant.DaemonStatus
|
||||
import Assistant.ScanRemotes
|
||||
import qualified Command.Sync
|
||||
import Utility.Parallel
|
||||
import qualified Git
|
||||
import qualified Git.Branch
|
||||
import qualified Git.Ref
|
||||
import qualified Git.Command
|
||||
import qualified Remote
|
||||
import qualified Types.Remote as Remote
|
||||
import qualified Annex.Branch
|
||||
import Annex.UUID
|
||||
|
||||
import Data.Time.Clock
|
||||
import qualified Data.Map as M
|
||||
import Control.Concurrent
|
||||
|
||||
{- Syncs with remotes that may have been disconnected for a while.
|
||||
-
|
||||
- First gets git in sync, and then prepares any necessary file transfers.
|
||||
-
|
||||
- An expensive full scan is queued when the git-annex branches of some of
|
||||
- the remotes have diverged from the local git-annex branch. Otherwise,
|
||||
- it's sufficient to requeue failed transfers.
|
||||
-}
|
||||
reconnectRemotes :: Bool -> [Remote] -> Assistant ()
|
||||
reconnectRemotes _ [] = noop
|
||||
reconnectRemotes notifypushes rs = void $ do
|
||||
alertWhile (syncAlert rs) $ do
|
||||
(ok, diverged) <- sync
|
||||
=<< liftAnnex (inRepo Git.Branch.current)
|
||||
addScanRemotes diverged rs
|
||||
return ok
|
||||
where
|
||||
gitremotes = filter (notspecialremote . Remote.repo) rs
|
||||
notspecialremote r
|
||||
| Git.repoIsUrl r = True
|
||||
| Git.repoIsLocal r = True
|
||||
| otherwise = False
|
||||
sync (Just branch) = do
|
||||
diverged <- snd <$> manualPull (Just branch) gitremotes
|
||||
now <- liftIO getCurrentTime
|
||||
ok <- pushToRemotes now notifypushes gitremotes
|
||||
return (ok, diverged)
|
||||
{- No local branch exists yet, but we can try pulling. -}
|
||||
sync Nothing = do
|
||||
diverged <- snd <$> manualPull Nothing gitremotes
|
||||
return (True, diverged)
|
||||
|
||||
{- Updates the local sync branch, then pushes it to all remotes, in
|
||||
- parallel, along with the git-annex branch. This is the same
|
||||
- as "git annex sync", except in parallel, and will co-exist with use of
|
||||
- "git annex sync".
|
||||
-
|
||||
- After the pushes to normal git remotes, also signals XMPP clients that
|
||||
- they can request an XMPP push.
|
||||
-
|
||||
- Avoids running possibly long-duration commands in the Annex monad, so
|
||||
- as not to block other threads.
|
||||
-
|
||||
- This can fail, when the remote's sync branch (or git-annex branch) has
|
||||
- been updated by some other remote pushing into it, or by the remote
|
||||
- itself. To handle failure, a manual pull and merge is done, and the push
|
||||
- is retried.
|
||||
-
|
||||
- When there's a lot of activity, we may fail more than once.
|
||||
- On the other hand, we may fail because the remote is not available.
|
||||
- Rather than retrying indefinitely, after the first retry we enter a
|
||||
- fallback mode, where our push is guarenteed to succeed if the remote is
|
||||
- reachable. If the fallback fails, the push is queued to be retried
|
||||
- later.
|
||||
-}
|
||||
pushToRemotes :: UTCTime -> Bool -> [Remote] -> Assistant Bool
|
||||
pushToRemotes now notifypushes remotes = do
|
||||
(g, branch, u) <- liftAnnex $ do
|
||||
Annex.Branch.commit "update"
|
||||
(,,)
|
||||
<$> gitRepo
|
||||
<*> inRepo Git.Branch.current
|
||||
<*> getUUID
|
||||
let (xmppremotes, normalremotes) = partition isXMPPRemote remotes
|
||||
ret <- go True branch g u normalremotes
|
||||
forM_ xmppremotes $ \r ->
|
||||
sendNetMessage $ Pushing (getXMPPClientID r) CanPush
|
||||
return ret
|
||||
where
|
||||
go _ Nothing _ _ _ = return True -- no branch, so nothing to do
|
||||
go _ _ _ _ [] = return True -- no remotes, so nothing to do
|
||||
go shouldretry (Just branch) g u rs = do
|
||||
debug ["pushing to", show rs]
|
||||
liftIO $ Command.Sync.updateBranch (Command.Sync.syncBranch branch) g
|
||||
(succeeded, failed) <- liftIO $ inParallel (push g branch) rs
|
||||
updatemap succeeded []
|
||||
if null failed
|
||||
then do
|
||||
when notifypushes $
|
||||
sendNetMessage $ NotifyPush $
|
||||
map Remote.uuid succeeded
|
||||
return True
|
||||
else if shouldretry
|
||||
then retry branch g u failed
|
||||
else fallback branch g u failed
|
||||
|
||||
updatemap succeeded failed = changeFailedPushMap $ \m ->
|
||||
M.union (makemap failed) $
|
||||
M.difference m (makemap succeeded)
|
||||
makemap l = M.fromList $ zip l (repeat now)
|
||||
|
||||
retry branch g u rs = do
|
||||
debug ["trying manual pull to resolve failed pushes"]
|
||||
void $ manualPull (Just branch) rs
|
||||
go False (Just branch) g u rs
|
||||
|
||||
fallback branch g u rs = do
|
||||
debug ["fallback pushing to", show rs]
|
||||
(succeeded, failed) <- liftIO $
|
||||
inParallel (\r -> pushFallback u branch r g) rs
|
||||
updatemap succeeded failed
|
||||
when (notifypushes && (not $ null succeeded)) $
|
||||
sendNetMessage $ NotifyPush $
|
||||
map Remote.uuid succeeded
|
||||
return $ null failed
|
||||
|
||||
push g branch remote = Command.Sync.pushBranch remote branch g
|
||||
|
||||
{- This fallback push mode pushes to branches on the remote that have our
|
||||
- uuid in them. While ugly, those branches are reserved for pushing by us,
|
||||
- and so our pushes will never conflict with other pushes. -}
|
||||
pushFallback :: UUID -> Git.Ref -> Remote -> Git.Repo -> IO Bool
|
||||
pushFallback u branch remote = Git.Command.runBool
|
||||
[ Param "push"
|
||||
, Param $ Remote.name remote
|
||||
, Param $ refspec Annex.Branch.name
|
||||
, Param $ refspec branch
|
||||
]
|
||||
where
|
||||
{- Push to refs/synced/uuid/branch; this
|
||||
- avoids cluttering up the branch display. -}
|
||||
refspec b = concat
|
||||
[ s
|
||||
, ":"
|
||||
, "refs/synced/" ++ fromUUID u ++ "/" ++ s
|
||||
]
|
||||
where s = show $ Git.Ref.base b
|
||||
|
||||
{- Manually pull from remotes and merge their branches. -}
|
||||
manualPull :: Maybe Git.Ref -> [Remote] -> Assistant ([Bool], Bool)
|
||||
manualPull currentbranch remotes = do
|
||||
g <- liftAnnex gitRepo
|
||||
results <- liftIO $ forM remotes $ \r ->
|
||||
Git.Command.runBool [Param "fetch", Param $ Remote.name r] g
|
||||
haddiverged <- liftAnnex Annex.Branch.forceUpdate
|
||||
forM_ remotes $ \r ->
|
||||
liftAnnex $ Command.Sync.mergeRemote r currentbranch
|
||||
return (results, haddiverged)
|
||||
|
||||
{- Start syncing a newly added remote, using a background thread. -}
|
||||
syncNewRemote :: Remote -> Assistant ()
|
||||
syncNewRemote remote = do
|
||||
updateSyncRemotes
|
||||
thread <- asIO $ do
|
||||
reconnectRemotes False [remote]
|
||||
addScanRemotes True [remote]
|
||||
void $ liftIO $ forkIO $ thread
|
301
Assistant/Threads/Committer.hs
Normal file
301
Assistant/Threads/Committer.hs
Normal file
|
@ -0,0 +1,301 @@
|
|||
{- git-annex assistant commit thread
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE CPP, BangPatterns #-}
|
||||
|
||||
module Assistant.Threads.Committer where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.Changes
|
||||
import Assistant.Types.Changes
|
||||
import Assistant.Commits
|
||||
import Assistant.Alert
|
||||
import Assistant.DaemonStatus
|
||||
import Assistant.TransferQueue
|
||||
import Logs.Transfer
|
||||
import Logs.Location
|
||||
import qualified Annex.Queue
|
||||
import qualified Git.Command
|
||||
import qualified Git.LsFiles
|
||||
import qualified Git.Version
|
||||
import qualified Command.Add
|
||||
import Utility.ThreadScheduler
|
||||
import qualified Utility.Lsof as Lsof
|
||||
import qualified Utility.DirWatcher as DirWatcher
|
||||
import Types.KeySource
|
||||
import Config
|
||||
import Annex.Exception
|
||||
import Annex.Content
|
||||
import Annex.Link
|
||||
import qualified Annex
|
||||
|
||||
import Data.Time.Clock
|
||||
import Data.Tuple.Utils
|
||||
import qualified Data.Set as S
|
||||
import Data.Either
|
||||
|
||||
{- This thread makes git commits at appropriate times. -}
|
||||
commitThread :: NamedThread
|
||||
commitThread = namedThread "Committer" $ do
|
||||
delayadd <- liftAnnex $
|
||||
maybe delayaddDefault (return . Just . Seconds)
|
||||
=<< annexDelayAdd <$> Annex.getGitConfig
|
||||
runEvery (Seconds 1) <~> do
|
||||
-- We already waited one second as a simple rate limiter.
|
||||
-- Next, wait until at least one change is available for
|
||||
-- processing.
|
||||
changes <- getChanges
|
||||
-- Now see if now's a good time to commit.
|
||||
time <- liftIO getCurrentTime
|
||||
if shouldCommit time changes
|
||||
then do
|
||||
readychanges <- handleAdds delayadd changes
|
||||
if shouldCommit time readychanges
|
||||
then do
|
||||
debug
|
||||
[ "committing"
|
||||
, show (length readychanges)
|
||||
, "changes"
|
||||
]
|
||||
void $ alertWhile commitAlert $
|
||||
liftAnnex commitStaged
|
||||
recordCommit
|
||||
else refill readychanges
|
||||
else refill changes
|
||||
where
|
||||
refill [] = noop
|
||||
refill cs = do
|
||||
debug ["delaying commit of", show (length cs), "changes"]
|
||||
refillChanges cs
|
||||
|
||||
commitStaged :: Annex Bool
|
||||
commitStaged = do
|
||||
{- This could fail if there's another commit being made by
|
||||
- something else. -}
|
||||
v <- tryAnnex Annex.Queue.flush
|
||||
case v of
|
||||
Left _ -> return False
|
||||
Right _ -> do
|
||||
direct <- isDirect
|
||||
let params = nomessage $ catMaybes
|
||||
[ Just $ Param "--quiet"
|
||||
{- In indirect mode, avoid running the
|
||||
- usual git-annex pre-commit hook;
|
||||
- watch does the same symlink fixing,
|
||||
- and we don't want to deal with unlocked
|
||||
- files in these commits. -}
|
||||
, if direct then Nothing else Just $ Param "--no-verify"
|
||||
]
|
||||
{- Empty commits may be made if tree changes cancel
|
||||
- each other out, etc. Git returns nonzero on those,
|
||||
- so don't propigate out commit failures. -}
|
||||
void $ inRepo $ catchMaybeIO .
|
||||
Git.Command.runQuiet (Param "commit" : params)
|
||||
return True
|
||||
where
|
||||
nomessage ps
|
||||
| Git.Version.older "1.7.2" = Param "-m"
|
||||
: Param "autocommit" : ps
|
||||
| otherwise = Param "--allow-empty-message"
|
||||
: Param "-m" : Param "" : ps
|
||||
|
||||
{- Decide if now is a good time to make a commit.
|
||||
- Note that the list of change times has an undefined order.
|
||||
-
|
||||
- Current strategy: If there have been 10 changes within the past second,
|
||||
- a batch activity is taking place, so wait for later.
|
||||
-}
|
||||
shouldCommit :: UTCTime -> [Change] -> Bool
|
||||
shouldCommit now changes
|
||||
| len == 0 = False
|
||||
| len > 10000 = True -- avoid bloating queue too much
|
||||
| length (filter thisSecond changes) < 10 = True
|
||||
| otherwise = False -- batch activity
|
||||
where
|
||||
len = length changes
|
||||
thisSecond c = now `diffUTCTime` changeTime c <= 1
|
||||
|
||||
{- OSX needs a short delay after a file is added before locking it down,
|
||||
- when using a non-direct mode repository, as pasting a file seems to
|
||||
- try to set file permissions or otherwise access the file after closing
|
||||
- it. -}
|
||||
delayaddDefault :: Annex (Maybe Seconds)
|
||||
#ifdef darwin_HOST_OS
|
||||
delayaddDefault = ifM isDirect
|
||||
( return Nothing
|
||||
, return $ Just $ Seconds 1
|
||||
)
|
||||
#else
|
||||
delayaddDefault = return Nothing
|
||||
#endif
|
||||
|
||||
{- If there are PendingAddChanges, or InProcessAddChanges, the files
|
||||
- have not yet actually been added to the annex, and that has to be done
|
||||
- now, before committing.
|
||||
-
|
||||
- Deferring the adds to this point causes batches to be bundled together,
|
||||
- which allows faster checking with lsof that the files are not still open
|
||||
- for write by some other process, and faster checking with git-ls-files
|
||||
- that the files are not already checked into git.
|
||||
-
|
||||
- When a file is added, Inotify will notice the new symlink. So this waits
|
||||
- for additional Changes to arrive, so that the symlink has hopefully been
|
||||
- staged before returning, and will be committed immediately.
|
||||
-
|
||||
- OTOH, for kqueue, eventsCoalesce, so instead the symlink is directly
|
||||
- created and staged.
|
||||
-
|
||||
- Returns a list of all changes that are ready to be committed.
|
||||
- Any pending adds that are not ready yet are put back into the ChangeChan,
|
||||
- where they will be retried later.
|
||||
-}
|
||||
handleAdds :: Maybe Seconds -> [Change] -> Assistant [Change]
|
||||
handleAdds delayadd cs = returnWhen (null incomplete) $ do
|
||||
let (pending, inprocess) = partition isPendingAddChange incomplete
|
||||
direct <- liftAnnex isDirect
|
||||
pending' <- if direct
|
||||
then return pending
|
||||
else findnew pending
|
||||
(postponed, toadd) <- partitionEithers <$> safeToAdd delayadd pending' inprocess
|
||||
|
||||
unless (null postponed) $
|
||||
refillChanges postponed
|
||||
|
||||
returnWhen (null toadd) $ do
|
||||
added <- catMaybes <$> forM toadd add
|
||||
if DirWatcher.eventsCoalesce || null added || direct
|
||||
then return $ added ++ otherchanges
|
||||
else do
|
||||
r <- handleAdds delayadd =<< getChanges
|
||||
return $ r ++ added ++ otherchanges
|
||||
where
|
||||
(incomplete, otherchanges) = partition (\c -> isPendingAddChange c || isInProcessAddChange c) cs
|
||||
|
||||
findnew [] = return []
|
||||
findnew pending@(exemplar:_) = do
|
||||
(!newfiles, cleanup) <- liftAnnex $
|
||||
inRepo (Git.LsFiles.notInRepo False $ map changeFile pending)
|
||||
void $ liftIO cleanup
|
||||
-- note: timestamp info is lost here
|
||||
let ts = changeTime exemplar
|
||||
return $ map (PendingAddChange ts) newfiles
|
||||
|
||||
returnWhen c a
|
||||
| c = return otherchanges
|
||||
| otherwise = a
|
||||
|
||||
add :: Change -> Assistant (Maybe Change)
|
||||
add change@(InProcessAddChange { keySource = ks }) = do
|
||||
alertWhile' (addFileAlert $ keyFilename ks) $
|
||||
liftM ret $ catchMaybeIO <~> do
|
||||
sanitycheck ks $ do
|
||||
key <- liftAnnex $ do
|
||||
showStart "add" $ keyFilename ks
|
||||
Command.Add.ingest $ Just ks
|
||||
done (finishedChange change) (keyFilename ks) key
|
||||
where
|
||||
{- Add errors tend to be transient and will be automatically
|
||||
- dealt with, so don't pass to the alert code. -}
|
||||
ret (Just j@(Just _)) = (True, j)
|
||||
ret _ = (True, Nothing)
|
||||
add _ = return Nothing
|
||||
|
||||
done _ _ Nothing = do
|
||||
liftAnnex showEndFail
|
||||
return Nothing
|
||||
done change file (Just key) = do
|
||||
liftAnnex $ do
|
||||
logStatus key InfoPresent
|
||||
link <- ifM isDirect
|
||||
( calcGitLink file key
|
||||
, Command.Add.link file key True
|
||||
)
|
||||
whenM (pure DirWatcher.eventsCoalesce <||> isDirect) $ do
|
||||
stageSymlink file =<< hashSymlink link
|
||||
showEndOk
|
||||
queueTransfers "newly added file" Next key (Just file) Upload
|
||||
return $ Just change
|
||||
|
||||
{- Check that the keysource's keyFilename still exists,
|
||||
- and is still a hard link to its contentLocation,
|
||||
- before ingesting it. -}
|
||||
sanitycheck keysource a = do
|
||||
fs <- liftIO $ getSymbolicLinkStatus $ keyFilename keysource
|
||||
ks <- liftIO $ getSymbolicLinkStatus $ contentLocation keysource
|
||||
if deviceID ks == deviceID fs && fileID ks == fileID fs
|
||||
then a
|
||||
else do
|
||||
-- remove the hard link
|
||||
when (contentLocation keysource /= keyFilename keysource) $
|
||||
void $ liftIO $ tryIO $ removeFile $ contentLocation keysource
|
||||
return Nothing
|
||||
|
||||
{- Files can Either be Right to be added now,
|
||||
- or are unsafe, and must be Left for later.
|
||||
-
|
||||
- Check by running lsof on the repository.
|
||||
-}
|
||||
safeToAdd :: Maybe Seconds -> [Change] -> [Change] -> Assistant [Either Change Change]
|
||||
safeToAdd _ [] [] = return []
|
||||
safeToAdd delayadd pending inprocess = do
|
||||
maybe noop (liftIO . threadDelaySeconds) delayadd
|
||||
liftAnnex $ do
|
||||
keysources <- mapM Command.Add.lockDown (map changeFile pending)
|
||||
let inprocess' = inprocess ++ catMaybes (map mkinprocess $ zip pending keysources)
|
||||
openfiles <- S.fromList . map fst3 . filter openwrite <$>
|
||||
findopenfiles (map keySource inprocess')
|
||||
let checked = map (check openfiles) inprocess'
|
||||
|
||||
{- If new events are received when files are closed,
|
||||
- there's no need to retry any changes that cannot
|
||||
- be done now. -}
|
||||
if DirWatcher.closingTracked
|
||||
then do
|
||||
mapM_ canceladd $ lefts checked
|
||||
allRight $ rights checked
|
||||
else return checked
|
||||
where
|
||||
check openfiles change@(InProcessAddChange { keySource = ks })
|
||||
| S.member (contentLocation ks) openfiles = Left change
|
||||
check _ change = Right change
|
||||
|
||||
mkinprocess (c, Just ks) = Just $ InProcessAddChange
|
||||
{ changeTime = changeTime c
|
||||
, keySource = ks
|
||||
}
|
||||
mkinprocess (_, Nothing) = Nothing
|
||||
|
||||
canceladd (InProcessAddChange { keySource = ks }) = do
|
||||
warning $ keyFilename ks
|
||||
++ " still has writers, not adding"
|
||||
-- remove the hard link
|
||||
when (contentLocation ks /= keyFilename ks) $
|
||||
void $ liftIO $ tryIO $ removeFile $ contentLocation ks
|
||||
canceladd _ = noop
|
||||
|
||||
openwrite (_file, mode, _pid)
|
||||
| mode == Lsof.OpenWriteOnly = True
|
||||
| mode == Lsof.OpenReadWrite = True
|
||||
| mode == Lsof.OpenUnknown = True
|
||||
| otherwise = False
|
||||
|
||||
allRight = return . map Right
|
||||
|
||||
{- Normally the KeySources are locked down inside the temp directory,
|
||||
- so can just lsof that, which is quite efficient.
|
||||
-
|
||||
- In crippled filesystem mode, there is no lock down, so must run lsof
|
||||
- on each individual file.
|
||||
-}
|
||||
findopenfiles keysources = ifM crippledFileSystem
|
||||
( liftIO $ do
|
||||
let segments = segmentXargs $ map keyFilename keysources
|
||||
concat <$> forM segments (\fs -> Lsof.query $ "--" : fs)
|
||||
, do
|
||||
tmpdir <- fromRepo gitAnnexTmpDir
|
||||
liftIO $ Lsof.queryDir tmpdir
|
||||
)
|
86
Assistant/Threads/ConfigMonitor.hs
Normal file
86
Assistant/Threads/ConfigMonitor.hs
Normal file
|
@ -0,0 +1,86 @@
|
|||
{- git-annex assistant config monitor thread
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.Threads.ConfigMonitor where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.BranchChange
|
||||
import Assistant.DaemonStatus
|
||||
import Assistant.Commits
|
||||
import Utility.ThreadScheduler
|
||||
import Logs.UUID
|
||||
import Logs.Trust
|
||||
import Logs.Remote
|
||||
import Logs.PreferredContent
|
||||
import Logs.Group
|
||||
import Remote.List (remoteListRefresh)
|
||||
import qualified Git.LsTree as LsTree
|
||||
import qualified Annex.Branch
|
||||
|
||||
import qualified Data.Set as S
|
||||
|
||||
{- This thread detects when configuration changes have been made to the
|
||||
- git-annex branch and reloads cached configuration.
|
||||
-
|
||||
- If the branch is frequently changing, it's checked for configuration
|
||||
- changes no more often than once every 60 seconds. On the other hand,
|
||||
- if the branch has not changed in a while, configuration changes will
|
||||
- be detected immediately.
|
||||
-}
|
||||
configMonitorThread :: NamedThread
|
||||
configMonitorThread = namedThread "ConfigMonitor" $ loop =<< getConfigs
|
||||
where
|
||||
loop old = do
|
||||
waitBranchChange
|
||||
new <- getConfigs
|
||||
when (old /= new) $ do
|
||||
let changedconfigs = new `S.difference` old
|
||||
debug $ "reloading config" :
|
||||
map fst (S.toList changedconfigs)
|
||||
reloadConfigs new
|
||||
{- Record a commit to get this config
|
||||
- change pushed out to remotes. -}
|
||||
recordCommit
|
||||
liftIO $ threadDelaySeconds (Seconds 60)
|
||||
loop new
|
||||
|
||||
{- Config files, and their checksums. -}
|
||||
type Configs = S.Set (FilePath, String)
|
||||
|
||||
{- All git-annex's config files, and actions to run when they change. -}
|
||||
configFilesActions :: [(FilePath, Annex ())]
|
||||
configFilesActions =
|
||||
[ (uuidLog, void $ uuidMapLoad)
|
||||
, (remoteLog, void remoteListRefresh)
|
||||
, (trustLog, void trustMapLoad)
|
||||
, (groupLog, void groupMapLoad)
|
||||
-- Preferred content settings depend on most of the other configs,
|
||||
-- so will be reloaded whenever any configs change.
|
||||
, (preferredContentLog, noop)
|
||||
]
|
||||
|
||||
reloadConfigs :: Configs -> Assistant ()
|
||||
reloadConfigs changedconfigs = do
|
||||
liftAnnex $ do
|
||||
sequence_ as
|
||||
void preferredContentMapLoad
|
||||
{- Changes to the remote log, or the trust log, can affect the
|
||||
- syncRemotes list. Changes to the uuid log may affect its
|
||||
- display so are also included. -}
|
||||
when (any (`elem` fs) [remoteLog, trustLog, uuidLog]) $
|
||||
updateSyncRemotes
|
||||
where
|
||||
(fs, as) = unzip $ filter (flip S.member changedfiles . fst)
|
||||
configFilesActions
|
||||
changedfiles = S.map fst changedconfigs
|
||||
|
||||
getConfigs :: Assistant Configs
|
||||
getConfigs = S.fromList . map extract
|
||||
<$> liftAnnex (inRepo $ LsTree.lsTreeFiles Annex.Branch.fullname files)
|
||||
where
|
||||
files = map fst configFilesActions
|
||||
extract treeitem = (LsTree.file treeitem, LsTree.sha treeitem)
|
29
Assistant/Threads/DaemonStatus.hs
Normal file
29
Assistant/Threads/DaemonStatus.hs
Normal file
|
@ -0,0 +1,29 @@
|
|||
{- git-annex assistant daemon status thread
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.Threads.DaemonStatus where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.DaemonStatus
|
||||
import Utility.ThreadScheduler
|
||||
import Utility.NotificationBroadcaster
|
||||
|
||||
{- This writes the daemon status to disk, when it changes, but no more
|
||||
- frequently than once every ten minutes.
|
||||
-}
|
||||
daemonStatusThread :: NamedThread
|
||||
daemonStatusThread = namedThread "DaemonStatus" $ do
|
||||
notifier <- liftIO . newNotificationHandle
|
||||
=<< changeNotifier <$> getDaemonStatus
|
||||
checkpoint
|
||||
runEvery (Seconds tenMinutes) <~> do
|
||||
liftIO $ waitNotification notifier
|
||||
checkpoint
|
||||
where
|
||||
checkpoint = do
|
||||
file <- liftAnnex $ fromRepo gitAnnexDaemonStatusFile
|
||||
liftIO . writeDaemonStatusFile file =<< getDaemonStatus
|
43
Assistant/Threads/Glacier.hs
Normal file
43
Assistant/Threads/Glacier.hs
Normal file
|
@ -0,0 +1,43 @@
|
|||
{- git-annex assistant Amazon Glacier retrieval
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE CPP #-}
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
|
||||
module Assistant.Threads.Glacier where
|
||||
|
||||
import Assistant.Common
|
||||
import Utility.ThreadScheduler
|
||||
import qualified Types.Remote as Remote
|
||||
import qualified Remote.Glacier as Glacier
|
||||
import Logs.Transfer
|
||||
import Assistant.DaemonStatus
|
||||
import Assistant.TransferQueue
|
||||
|
||||
import qualified Data.Set as S
|
||||
|
||||
{- Wakes up every half hour and checks if any glacier remotes have failed
|
||||
- downloads. If so, runs glacier-cli to check if the files are now
|
||||
- available, and queues the downloads. -}
|
||||
glacierThread :: NamedThread
|
||||
glacierThread = namedThread "Glacier" $ runEvery (Seconds 3600) <~> go
|
||||
where
|
||||
isglacier r = Remote.remotetype r == Glacier.remote
|
||||
go = do
|
||||
rs <- filter isglacier . syncDataRemotes <$> getDaemonStatus
|
||||
forM_ rs $ \r ->
|
||||
check r =<< (liftAnnex $ getFailedTransfers $ Remote.uuid r)
|
||||
check _ [] = noop
|
||||
check r l = do
|
||||
let keys = map getkey l
|
||||
(availkeys, failedkeys) <- liftAnnex $ Glacier.jobList r keys
|
||||
let s = S.fromList (failedkeys ++ availkeys)
|
||||
let l' = filter (\p -> S.member (getkey p) s) l
|
||||
forM_ l' $ \(t, info) -> do
|
||||
liftAnnex $ removeFailedTransfer t
|
||||
queueTransferWhenSmall "object available from glacier" (associatedFile info) t r
|
||||
getkey = transferKey . fst
|
96
Assistant/Threads/Merger.hs
Normal file
96
Assistant/Threads/Merger.hs
Normal file
|
@ -0,0 +1,96 @@
|
|||
{- git-annex assistant git merge thread
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.Threads.Merger where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.TransferQueue
|
||||
import Assistant.BranchChange
|
||||
import Utility.DirWatcher
|
||||
import Utility.Types.DirWatcher
|
||||
import qualified Annex.Branch
|
||||
import qualified Git
|
||||
import qualified Git.Branch
|
||||
import qualified Command.Sync
|
||||
|
||||
{- This thread watches for changes to .git/refs/, and handles incoming
|
||||
- pushes. -}
|
||||
mergeThread :: NamedThread
|
||||
mergeThread = namedThread "Merger" $ do
|
||||
g <- liftAnnex gitRepo
|
||||
let dir = Git.localGitDir g </> "refs"
|
||||
liftIO $ createDirectoryIfMissing True dir
|
||||
let hook a = Just <$> asIO2 (runHandler a)
|
||||
addhook <- hook onAdd
|
||||
errhook <- hook onErr
|
||||
let hooks = mkWatchHooks
|
||||
{ addHook = addhook
|
||||
, errHook = errhook
|
||||
}
|
||||
void $ liftIO $ watchDir dir (const False) hooks id
|
||||
debug ["watching", dir]
|
||||
|
||||
type Handler = FilePath -> Assistant ()
|
||||
|
||||
{- Runs an action handler.
|
||||
-
|
||||
- Exceptions are ignored, otherwise a whole thread could be crashed.
|
||||
-}
|
||||
runHandler :: Handler -> FilePath -> Maybe FileStatus -> Assistant ()
|
||||
runHandler handler file _filestatus =
|
||||
either (liftIO . print) (const noop) =<< tryIO <~> handler file
|
||||
|
||||
{- Called when there's an error with inotify. -}
|
||||
onErr :: Handler
|
||||
onErr msg = error msg
|
||||
|
||||
{- Called when a new branch ref is written.
|
||||
-
|
||||
- This relies on git's atomic method of updating branch ref files,
|
||||
- which is to first write the new file to .lock, and then rename it
|
||||
- over the old file. So, ignore .lock files, and the rename ensures
|
||||
- the watcher sees a new file being added on each update.
|
||||
-
|
||||
- At startup, synthetic add events fire, causing this to run, but that's
|
||||
- ok; it ensures that any changes pushed since the last time the assistant
|
||||
- ran are merged in.
|
||||
-}
|
||||
onAdd :: Handler
|
||||
onAdd file
|
||||
| ".lock" `isSuffixOf` file = noop
|
||||
| isAnnexBranch file = do
|
||||
branchChanged
|
||||
whenM (liftAnnex Annex.Branch.forceUpdate) $
|
||||
queueDeferredDownloads "retrying deferred download" Later
|
||||
| "/synced/" `isInfixOf` file = do
|
||||
mergecurrent =<< liftAnnex (inRepo Git.Branch.current)
|
||||
| otherwise = noop
|
||||
where
|
||||
changedbranch = fileToBranch file
|
||||
mergecurrent (Just current)
|
||||
| equivBranches changedbranch current = do
|
||||
debug
|
||||
[ "merging", show changedbranch
|
||||
, "into", show current
|
||||
]
|
||||
void $ liftAnnex $ Command.Sync.mergeFrom changedbranch
|
||||
mergecurrent _ = noop
|
||||
|
||||
equivBranches :: Git.Ref -> Git.Ref -> Bool
|
||||
equivBranches x y = base x == base y
|
||||
where
|
||||
base = takeFileName . show
|
||||
|
||||
isAnnexBranch :: FilePath -> Bool
|
||||
isAnnexBranch f = n `isSuffixOf` f
|
||||
where
|
||||
n = "/" ++ show Annex.Branch.name
|
||||
|
||||
fileToBranch :: FilePath -> Git.Ref
|
||||
fileToBranch f = Git.Ref $ "refs" </> base
|
||||
where
|
||||
base = Prelude.last $ split "/refs/" f
|
192
Assistant/Threads/MountWatcher.hs
Normal file
192
Assistant/Threads/MountWatcher.hs
Normal file
|
@ -0,0 +1,192 @@
|
|||
{- git-annex assistant mount watcher, using either dbus or mtab polling
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE CPP #-}
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
|
||||
module Assistant.Threads.MountWatcher where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.DaemonStatus
|
||||
import Assistant.Sync
|
||||
import qualified Annex
|
||||
import qualified Git
|
||||
import Utility.ThreadScheduler
|
||||
import Utility.Mounts
|
||||
import Remote.List
|
||||
import qualified Types.Remote as Remote
|
||||
|
||||
import qualified Data.Set as S
|
||||
|
||||
#if WITH_DBUS
|
||||
import Utility.DBus
|
||||
import DBus.Client
|
||||
import DBus
|
||||
import Data.Word (Word32)
|
||||
import Control.Concurrent
|
||||
import qualified Control.Exception as E
|
||||
#else
|
||||
#warning Building without dbus support; will use mtab polling
|
||||
#endif
|
||||
|
||||
mountWatcherThread :: NamedThread
|
||||
mountWatcherThread = namedThread "MountWatcher" $
|
||||
#if WITH_DBUS
|
||||
dbusThread
|
||||
#else
|
||||
pollingThread
|
||||
#endif
|
||||
|
||||
#if WITH_DBUS
|
||||
|
||||
dbusThread :: Assistant ()
|
||||
dbusThread = do
|
||||
runclient <- asIO1 go
|
||||
r <- liftIO $ E.try $ runClient getSessionAddress runclient
|
||||
either onerr (const noop) r
|
||||
where
|
||||
go client = ifM (checkMountMonitor client)
|
||||
( do
|
||||
{- Store the current mount points in an MVar, to be
|
||||
- compared later. We could in theory work out the
|
||||
- mount point from the dbus message, but this is
|
||||
- easier. -}
|
||||
mvar <- liftIO $ newMVar =<< currentMountPoints
|
||||
handleevent <- asIO1 $ \_event -> do
|
||||
nowmounted <- liftIO $ currentMountPoints
|
||||
wasmounted <- liftIO $ swapMVar mvar nowmounted
|
||||
handleMounts wasmounted nowmounted
|
||||
liftIO $ forM_ mountChanged $ \matcher ->
|
||||
listen client matcher handleevent
|
||||
, do
|
||||
liftAnnex $
|
||||
warning "No known volume monitor available through dbus; falling back to mtab polling"
|
||||
pollingThread
|
||||
)
|
||||
onerr :: E.SomeException -> Assistant ()
|
||||
onerr e = do
|
||||
{- If the session dbus fails, the user probably
|
||||
- logged out of their desktop. Even if they log
|
||||
- back in, we won't have access to the dbus
|
||||
- session key, so polling is the best that can be
|
||||
- done in this situation. -}
|
||||
liftAnnex $
|
||||
warning $ "dbus failed; falling back to mtab polling (" ++ show e ++ ")"
|
||||
pollingThread
|
||||
|
||||
{- Examine the list of services connected to dbus, to see if there
|
||||
- are any we can use to monitor mounts. If not, will attempt to start one. -}
|
||||
checkMountMonitor :: Client -> Assistant Bool
|
||||
checkMountMonitor client = do
|
||||
running <- filter (`elem` usableservices)
|
||||
<$> liftIO (listServiceNames client)
|
||||
case running of
|
||||
[] -> startOneService client startableservices
|
||||
(service:_) -> do
|
||||
debug [ "Using running DBUS service"
|
||||
, service
|
||||
, "to monitor mount events."
|
||||
]
|
||||
return True
|
||||
where
|
||||
startableservices = [gvfs, gvfsgdu]
|
||||
usableservices = startableservices ++ [kde]
|
||||
gvfs = "org.gtk.Private.UDisks2VolumeMonitor"
|
||||
gvfsgdu = "org.gtk.Private.GduVolumeMonitor"
|
||||
kde = "org.kde.DeviceNotifications"
|
||||
|
||||
startOneService :: Client -> [ServiceName] -> Assistant Bool
|
||||
startOneService _ [] = return False
|
||||
startOneService client (x:xs) = do
|
||||
_ <- liftIO $ tryNonAsync $ callDBus client "StartServiceByName"
|
||||
[toVariant x, toVariant (0 :: Word32)]
|
||||
ifM (liftIO $ elem x <$> listServiceNames client)
|
||||
( do
|
||||
debug
|
||||
[ "Started DBUS service", x
|
||||
, "to monitor mount events."
|
||||
]
|
||||
return True
|
||||
, startOneService client xs
|
||||
)
|
||||
|
||||
{- Filter matching events recieved when drives are mounted and unmounted. -}
|
||||
mountChanged :: [MatchRule]
|
||||
mountChanged = [gvfs True, gvfs False, kde, kdefallback]
|
||||
where
|
||||
{- gvfs reliably generates this event whenever a
|
||||
- drive is mounted/unmounted, whether automatically, or manually -}
|
||||
gvfs mount = matchAny
|
||||
{ matchInterface = Just "org.gtk.Private.RemoteVolumeMonitor"
|
||||
, matchMember = Just $ if mount then "MountAdded" else "MountRemoved"
|
||||
}
|
||||
{- This event fires when KDE prompts the user what to do with a drive,
|
||||
- but maybe not at other times. And it's not received -}
|
||||
kde = matchAny
|
||||
{ matchInterface = Just "org.kde.Solid.Device"
|
||||
, matchMember = Just "setupDone"
|
||||
}
|
||||
{- This event may not be closely related to mounting a drive, but it's
|
||||
- observed reliably when a drive gets mounted or unmounted. -}
|
||||
kdefallback = matchAny
|
||||
{ matchInterface = Just "org.kde.KDirNotify"
|
||||
, matchMember = Just "enteredDirectory"
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
pollingThread :: Assistant ()
|
||||
pollingThread = go =<< liftIO currentMountPoints
|
||||
where
|
||||
go wasmounted = do
|
||||
liftIO $ threadDelaySeconds (Seconds 10)
|
||||
nowmounted <- liftIO currentMountPoints
|
||||
handleMounts wasmounted nowmounted
|
||||
go nowmounted
|
||||
|
||||
handleMounts :: MountPoints -> MountPoints -> Assistant ()
|
||||
handleMounts wasmounted nowmounted =
|
||||
mapM_ (handleMount . mnt_dir) $
|
||||
S.toList $ newMountPoints wasmounted nowmounted
|
||||
|
||||
handleMount :: FilePath -> Assistant ()
|
||||
handleMount dir = do
|
||||
debug ["detected mount of", dir]
|
||||
rs <- filter (Git.repoIsLocal . Remote.repo) <$> remotesUnder dir
|
||||
reconnectRemotes True rs
|
||||
|
||||
{- Finds remotes located underneath the mount point.
|
||||
-
|
||||
- Updates state to include the remotes.
|
||||
-
|
||||
- The config of git remotes is re-read, as it may not have been available
|
||||
- at startup time, or may have changed (it could even be a different
|
||||
- repository at the same remote location..)
|
||||
-}
|
||||
remotesUnder :: FilePath -> Assistant [Remote]
|
||||
remotesUnder dir = do
|
||||
repotop <- liftAnnex $ fromRepo Git.repoPath
|
||||
rs <- liftAnnex remoteList
|
||||
pairs <- liftAnnex $ mapM (checkremote repotop) rs
|
||||
let (waschanged, rs') = unzip pairs
|
||||
when (any id waschanged) $ do
|
||||
liftAnnex $ Annex.changeState $ \s -> s { Annex.remotes = rs' }
|
||||
updateSyncRemotes
|
||||
return $ map snd $ filter fst pairs
|
||||
where
|
||||
checkremote repotop r = case Remote.localpath r of
|
||||
Just p | dirContains dir (absPathFrom repotop p) ->
|
||||
(,) <$> pure True <*> updateRemote r
|
||||
_ -> return (False, r)
|
||||
|
||||
type MountPoints = S.Set Mntent
|
||||
|
||||
currentMountPoints :: IO MountPoints
|
||||
currentMountPoints = S.fromList <$> getMounts
|
||||
|
||||
newMountPoints :: MountPoints -> MountPoints -> MountPoints
|
||||
newMountPoints old new = S.difference new old
|
131
Assistant/Threads/NetWatcher.hs
Normal file
131
Assistant/Threads/NetWatcher.hs
Normal file
|
@ -0,0 +1,131 @@
|
|||
{- git-annex assistant network connection watcher, using dbus
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE CPP #-}
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
|
||||
module Assistant.Threads.NetWatcher where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.Sync
|
||||
import Utility.ThreadScheduler
|
||||
import Remote.List
|
||||
import qualified Types.Remote as Remote
|
||||
|
||||
#if WITH_DBUS
|
||||
import Utility.DBus
|
||||
import DBus.Client
|
||||
import DBus
|
||||
import Data.Word (Word32)
|
||||
import Assistant.NetMessager
|
||||
#else
|
||||
#warning Building without dbus support; will poll for network connection changes
|
||||
#endif
|
||||
|
||||
netWatcherThread :: NamedThread
|
||||
#if WITH_DBUS
|
||||
netWatcherThread = thread dbusThread
|
||||
#else
|
||||
netWatcherThread = thread noop
|
||||
#endif
|
||||
where
|
||||
thread = namedThread "NetWatcher"
|
||||
|
||||
{- This is a fallback for when dbus cannot be used to detect
|
||||
- network connection changes, but it also ensures that
|
||||
- any networked remotes that may have not been routable for a
|
||||
- while (despite the local network staying up), are synced with
|
||||
- periodically. -}
|
||||
netWatcherFallbackThread :: NamedThread
|
||||
netWatcherFallbackThread = namedThread "NetWatcherFallback" $
|
||||
runEvery (Seconds 3600) <~> handleConnection
|
||||
|
||||
#if WITH_DBUS
|
||||
|
||||
dbusThread :: Assistant ()
|
||||
dbusThread = do
|
||||
handleerr <- asIO2 onerr
|
||||
runclient <- asIO1 go
|
||||
liftIO $ persistentClient getSystemAddress () handleerr runclient
|
||||
where
|
||||
go client = ifM (checkNetMonitor client)
|
||||
( do
|
||||
listenNMConnections client <~> handleconn
|
||||
listenWicdConnections client <~> handleconn
|
||||
, do
|
||||
liftAnnex $
|
||||
warning "No known network monitor available through dbus; falling back to polling"
|
||||
)
|
||||
handleconn = do
|
||||
debug ["detected network connection"]
|
||||
notifyNetMessagerRestart
|
||||
handleConnection
|
||||
onerr e _ = do
|
||||
liftAnnex $
|
||||
warning $ "lost dbus connection; falling back to polling (" ++ show e ++ ")"
|
||||
{- Wait, in hope that dbus will come back -}
|
||||
liftIO $ threadDelaySeconds (Seconds 60)
|
||||
|
||||
{- Examine the list of services connected to dbus, to see if there
|
||||
- are any we can use to monitor network connections. -}
|
||||
checkNetMonitor :: Client -> Assistant Bool
|
||||
checkNetMonitor client = do
|
||||
running <- liftIO $ filter (`elem` [networkmanager, wicd])
|
||||
<$> listServiceNames client
|
||||
case running of
|
||||
[] -> return False
|
||||
(service:_) -> do
|
||||
debug [ "Using running DBUS service"
|
||||
, service
|
||||
, "to monitor network connection events."
|
||||
]
|
||||
return True
|
||||
where
|
||||
networkmanager = "org.freedesktop.NetworkManager"
|
||||
wicd = "org.wicd.daemon"
|
||||
|
||||
{- Listens for new NetworkManager connections. -}
|
||||
listenNMConnections :: Client -> IO () -> IO ()
|
||||
listenNMConnections client callback =
|
||||
listen client matcher $ \event ->
|
||||
when (Just True == anyM activeconnection (signalBody event)) $
|
||||
callback
|
||||
where
|
||||
matcher = matchAny
|
||||
{ matchInterface = Just "org.freedesktop.NetworkManager.Connection.Active"
|
||||
, matchMember = Just "PropertiesChanged"
|
||||
}
|
||||
nm_connection_activated = toVariant (2 :: Word32)
|
||||
nm_state_key = toVariant ("State" :: String)
|
||||
activeconnection v = do
|
||||
m <- fromVariant v
|
||||
vstate <- lookup nm_state_key $ dictionaryItems m
|
||||
state <- fromVariant vstate
|
||||
return $ state == nm_connection_activated
|
||||
|
||||
{- Listens for new Wicd connections. -}
|
||||
listenWicdConnections :: Client -> IO () -> IO ()
|
||||
listenWicdConnections client callback =
|
||||
listen client matcher $ \event ->
|
||||
when (any (== wicd_success) (signalBody event)) $
|
||||
callback
|
||||
where
|
||||
matcher = matchAny
|
||||
{ matchInterface = Just "org.wicd.daemon"
|
||||
, matchMember = Just "ConnectResultsSent"
|
||||
}
|
||||
wicd_success = toVariant ("success" :: String)
|
||||
|
||||
#endif
|
||||
|
||||
handleConnection :: Assistant ()
|
||||
handleConnection = reconnectRemotes True =<< networkRemotes
|
||||
|
||||
{- Finds network remotes. -}
|
||||
networkRemotes :: Assistant [Remote]
|
||||
networkRemotes = liftAnnex $
|
||||
filter (isNothing . Remote.localpath) <$> remoteList
|
153
Assistant/Threads/PairListener.hs
Normal file
153
Assistant/Threads/PairListener.hs
Normal file
|
@ -0,0 +1,153 @@
|
|||
{- git-annex assistant thread to listen for incoming pairing traffic
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.Threads.PairListener where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.Pairing
|
||||
import Assistant.Pairing.Network
|
||||
import Assistant.Pairing.MakeRemote
|
||||
import Assistant.WebApp (UrlRenderer, renderUrl)
|
||||
import Assistant.WebApp.Types
|
||||
import Assistant.Alert
|
||||
import Assistant.DaemonStatus
|
||||
import Utility.ThreadScheduler
|
||||
import Git
|
||||
|
||||
import Network.Multicast
|
||||
import Network.Socket
|
||||
import qualified Data.Text as T
|
||||
import Data.Char
|
||||
|
||||
pairListenerThread :: UrlRenderer -> NamedThread
|
||||
pairListenerThread urlrenderer = namedThread "PairListener" $ do
|
||||
listener <- asIO1 $ go [] []
|
||||
liftIO $ withSocketsDo $
|
||||
runEvery (Seconds 1) $ void $ tryIO $
|
||||
listener =<< getsock
|
||||
where
|
||||
{- Note this can crash if there's no network interface,
|
||||
- or only one like lo that doesn't support multicast. -}
|
||||
getsock = multicastReceiver (multicastAddress $ IPv4Addr undefined) pairingPort
|
||||
|
||||
go reqs cache sock = liftIO (getmsg sock []) >>= \msg -> case readish msg of
|
||||
Nothing -> go reqs cache sock
|
||||
Just m -> do
|
||||
sane <- checkSane msg
|
||||
(pip, verified) <- verificationCheck m
|
||||
=<< (pairingInProgress <$> getDaemonStatus)
|
||||
let wrongstage = maybe False (\p -> pairMsgStage m <= inProgressPairStage p) pip
|
||||
case (wrongstage, sane, pairMsgStage m) of
|
||||
-- ignore our own messages, and
|
||||
-- out of order messages
|
||||
(True, _, _) -> go reqs cache sock
|
||||
(_, False, _) -> go reqs cache sock
|
||||
(_, _, PairReq) -> if m `elem` reqs
|
||||
then go reqs (invalidateCache m cache) sock
|
||||
else do
|
||||
pairReqReceived verified urlrenderer m
|
||||
go (m:take 10 reqs) (invalidateCache m cache) sock
|
||||
(_, _, PairAck) -> do
|
||||
cache' <- pairAckReceived verified pip m cache
|
||||
go reqs cache' sock
|
||||
(_, _, PairDone) -> do
|
||||
pairDoneReceived verified pip m
|
||||
go reqs cache sock
|
||||
|
||||
{- As well as verifying the message using the shared secret,
|
||||
- check its UUID against the UUID we have stored. If
|
||||
- they're the same, someone is sending bogus messages,
|
||||
- which could be an attempt to brute force the shared secret. -}
|
||||
verificationCheck _ Nothing = return (Nothing, False)
|
||||
verificationCheck m (Just pip)
|
||||
| not verified && sameuuid = do
|
||||
liftAnnex $ warning
|
||||
"detected possible pairing brute force attempt; disabled pairing"
|
||||
stopSending pip
|
||||
return (Nothing, False)
|
||||
|otherwise = return (Just pip, verified && sameuuid)
|
||||
where
|
||||
verified = verifiedPairMsg m pip
|
||||
sameuuid = pairUUID (inProgressPairData pip) == pairUUID (pairMsgData m)
|
||||
|
||||
{- Various sanity checks on the content of the message. -}
|
||||
checkSane msg
|
||||
{- Control characters could be used in a
|
||||
- console poisoning attack. -}
|
||||
| any isControl msg || any (`elem` "\r\n") msg = do
|
||||
liftAnnex $ warning
|
||||
"illegal control characters in pairing message; ignoring"
|
||||
return False
|
||||
| otherwise = return True
|
||||
|
||||
{- PairReqs invalidate the cache of recently finished pairings.
|
||||
- This is so that, if a new pairing is started with the
|
||||
- same secret used before, a bogus PairDone is not sent. -}
|
||||
invalidateCache msg = filter (not . verifiedPairMsg msg)
|
||||
|
||||
getmsg sock c = do
|
||||
(msg, n, _) <- recvFrom sock chunksz
|
||||
if n < chunksz
|
||||
then return $ c ++ msg
|
||||
else getmsg sock $ c ++ msg
|
||||
where
|
||||
chunksz = 1024
|
||||
|
||||
{- Show an alert when a PairReq is seen. -}
|
||||
pairReqReceived :: Bool -> UrlRenderer -> PairMsg -> Assistant ()
|
||||
pairReqReceived True _ _ = noop -- ignore our own PairReq
|
||||
pairReqReceived False urlrenderer msg = do
|
||||
url <- liftIO $ renderUrl urlrenderer (FinishLocalPairR msg) []
|
||||
closealert <- asIO1 removeAlert
|
||||
void $ addAlert $ pairRequestReceivedAlert repo
|
||||
AlertButton
|
||||
{ buttonUrl = url
|
||||
, buttonLabel = T.pack "Respond"
|
||||
, buttonAction = Just closealert
|
||||
}
|
||||
where
|
||||
repo = pairRepo msg
|
||||
|
||||
{- When a verified PairAck is seen, a host is ready to pair with us, and has
|
||||
- already configured our ssh key. Stop sending PairReqs, finish the pairing,
|
||||
- and send a single PairDone. -}
|
||||
pairAckReceived :: Bool -> Maybe PairingInProgress -> PairMsg -> [PairingInProgress] -> Assistant [PairingInProgress]
|
||||
pairAckReceived True (Just pip) msg cache = do
|
||||
stopSending pip
|
||||
repodir <- repoPath <$> liftAnnex gitRepo
|
||||
liftIO $ setupAuthorizedKeys msg repodir
|
||||
finishedLocalPairing msg (inProgressSshKeyPair pip)
|
||||
startSending pip PairDone $ multicastPairMsg
|
||||
(Just 1) (inProgressSecret pip) (inProgressPairData pip)
|
||||
return $ pip : take 10 cache
|
||||
{- A stale PairAck might also be seen, after we've finished pairing.
|
||||
- Perhaps our PairDone was not received. To handle this, we keep
|
||||
- a cache of recently finished pairings, and re-send PairDone in
|
||||
- response to stale PairAcks for them. -}
|
||||
pairAckReceived _ _ msg cache = do
|
||||
let pips = filter (verifiedPairMsg msg) cache
|
||||
unless (null pips) $
|
||||
forM_ pips $ \pip ->
|
||||
startSending pip PairDone $ multicastPairMsg
|
||||
(Just 1) (inProgressSecret pip) (inProgressPairData pip)
|
||||
return cache
|
||||
|
||||
{- If we get a verified PairDone, the host has accepted our PairAck, and
|
||||
- has paired with us. Stop sending PairAcks, and finish pairing with them.
|
||||
-
|
||||
- TODO: Should third-party hosts remove their pair request alert when they
|
||||
- see a PairDone?
|
||||
- Complication: The user could have already clicked on the alert and be
|
||||
- entering the secret. Would be better to start a fresh pair request in this
|
||||
- situation.
|
||||
-}
|
||||
pairDoneReceived :: Bool -> Maybe PairingInProgress -> PairMsg -> Assistant ()
|
||||
pairDoneReceived False _ _ = noop -- not verified
|
||||
pairDoneReceived True Nothing _ = noop -- not in progress
|
||||
pairDoneReceived True (Just pip) msg = do
|
||||
stopSending pip
|
||||
finishedLocalPairing msg (inProgressSshKeyPair pip)
|
64
Assistant/Threads/Pusher.hs
Normal file
64
Assistant/Threads/Pusher.hs
Normal file
|
@ -0,0 +1,64 @@
|
|||
{- git-annex assistant git pushing thread
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.Threads.Pusher where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.Commits
|
||||
import Assistant.Types.Commits
|
||||
import Assistant.Pushes
|
||||
import Assistant.Alert
|
||||
import Assistant.DaemonStatus
|
||||
import Assistant.Sync
|
||||
import Utility.ThreadScheduler
|
||||
import qualified Types.Remote as Remote
|
||||
|
||||
import Data.Time.Clock
|
||||
|
||||
{- This thread retries pushes that failed before. -}
|
||||
pushRetryThread :: NamedThread
|
||||
pushRetryThread = namedThread "PushRetrier" $ runEvery (Seconds halfhour) <~> do
|
||||
-- We already waited half an hour, now wait until there are failed
|
||||
-- pushes to retry.
|
||||
topush <- getFailedPushesBefore (fromIntegral halfhour)
|
||||
unless (null topush) $ do
|
||||
debug ["retrying", show (length topush), "failed pushes"]
|
||||
void $ alertWhile (pushRetryAlert topush) $ do
|
||||
now <- liftIO $ getCurrentTime
|
||||
pushToRemotes now True topush
|
||||
where
|
||||
halfhour = 1800
|
||||
|
||||
{- This thread pushes git commits out to remotes soon after they are made. -}
|
||||
pushThread :: NamedThread
|
||||
pushThread = namedThread "Pusher" $ runEvery (Seconds 2) <~> do
|
||||
-- We already waited two seconds as a simple rate limiter.
|
||||
-- Next, wait until at least one commit has been made
|
||||
commits <- getCommits
|
||||
-- Now see if now's a good time to push.
|
||||
if shouldPush commits
|
||||
then do
|
||||
remotes <- filter (not . Remote.readonly)
|
||||
. syncGitRemotes <$> getDaemonStatus
|
||||
unless (null remotes) $
|
||||
void $ alertWhile (pushAlert remotes) $ do
|
||||
now <- liftIO $ getCurrentTime
|
||||
pushToRemotes now True remotes
|
||||
else do
|
||||
debug ["delaying push of", show (length commits), "commits"]
|
||||
refillCommits commits
|
||||
|
||||
{- Decide if now is a good time to push to remotes.
|
||||
-
|
||||
- Current strategy: Immediately push all commits. The commit machinery
|
||||
- already determines batches of changes, so we can't easily determine
|
||||
- batches better.
|
||||
-}
|
||||
shouldPush :: [Commit] -> Bool
|
||||
shouldPush commits
|
||||
| not (null commits) = True
|
||||
| otherwise = False
|
137
Assistant/Threads/SanityChecker.hs
Normal file
137
Assistant/Threads/SanityChecker.hs
Normal file
|
@ -0,0 +1,137 @@
|
|||
{- git-annex assistant sanity checker
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.Threads.SanityChecker (
|
||||
sanityCheckerDailyThread,
|
||||
sanityCheckerHourlyThread
|
||||
) where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.DaemonStatus
|
||||
import Assistant.Alert
|
||||
import qualified Git.LsFiles
|
||||
import qualified Git.Command
|
||||
import qualified Git.Config
|
||||
import Utility.ThreadScheduler
|
||||
import qualified Assistant.Threads.Watcher as Watcher
|
||||
import Utility.LogFile
|
||||
import Config
|
||||
|
||||
import Data.Time.Clock.POSIX
|
||||
|
||||
{- This thread wakes up hourly for inxepensive frequent sanity checks. -}
|
||||
sanityCheckerHourlyThread :: NamedThread
|
||||
sanityCheckerHourlyThread = namedThread "SanityCheckerHourly" $ forever $ do
|
||||
liftIO $ threadDelaySeconds $ Seconds oneHour
|
||||
hourlyCheck
|
||||
|
||||
{- This thread wakes up daily to make sure the tree is in good shape. -}
|
||||
sanityCheckerDailyThread :: NamedThread
|
||||
sanityCheckerDailyThread = namedThread "SanityCheckerDaily" $ forever $ do
|
||||
waitForNextCheck
|
||||
|
||||
debug ["starting sanity check"]
|
||||
void $ alertWhile sanityCheckAlert go
|
||||
debug ["sanity check complete"]
|
||||
where
|
||||
go = do
|
||||
modifyDaemonStatus_ $ \s -> s { sanityCheckRunning = True }
|
||||
|
||||
now <- liftIO $ getPOSIXTime -- before check started
|
||||
r <- either showerr return =<< tryIO <~> dailyCheck
|
||||
|
||||
modifyDaemonStatus_ $ \s -> s
|
||||
{ sanityCheckRunning = False
|
||||
, lastSanityCheck = Just now
|
||||
}
|
||||
|
||||
return r
|
||||
|
||||
showerr e = do
|
||||
liftAnnex $ warning $ show e
|
||||
return False
|
||||
|
||||
{- Only run one check per day, from the time of the last check. -}
|
||||
waitForNextCheck :: Assistant ()
|
||||
waitForNextCheck = do
|
||||
v <- lastSanityCheck <$> getDaemonStatus
|
||||
now <- liftIO getPOSIXTime
|
||||
liftIO $ threadDelaySeconds $ Seconds $ calcdelay now v
|
||||
where
|
||||
calcdelay _ Nothing = oneDay
|
||||
calcdelay now (Just lastcheck)
|
||||
| lastcheck < now = max oneDay $
|
||||
oneDay - truncate (now - lastcheck)
|
||||
| otherwise = oneDay
|
||||
|
||||
{- It's important to stay out of the Annex monad as much as possible while
|
||||
- running potentially expensive parts of this check, since remaining in it
|
||||
- will block the watcher. -}
|
||||
dailyCheck :: Assistant Bool
|
||||
dailyCheck = do
|
||||
g <- liftAnnex gitRepo
|
||||
|
||||
-- Find old unstaged symlinks, and add them to git.
|
||||
(unstaged, cleanup) <- liftIO $ Git.LsFiles.notInRepo False ["."] g
|
||||
now <- liftIO $ getPOSIXTime
|
||||
forM_ unstaged $ \file -> do
|
||||
ms <- liftIO $ catchMaybeIO $ getSymbolicLinkStatus file
|
||||
case ms of
|
||||
Just s | toonew (statusChangeTime s) now -> noop
|
||||
| isSymbolicLink s -> addsymlink file ms
|
||||
_ -> noop
|
||||
liftIO $ void cleanup
|
||||
|
||||
{- Allow git-gc to run once per day. More frequent gc is avoided
|
||||
- by default to avoid slowing things down. Only run repacks when 100x
|
||||
- the usual number of loose objects are present; we tend
|
||||
- to have a lot of small objects and they should not be a
|
||||
- significant size. -}
|
||||
when (Git.Config.getMaybe "gc.auto" g == Just "0") $
|
||||
liftIO $ void $ Git.Command.runBool
|
||||
[ Param "-c", Param "gc.auto=670000"
|
||||
, Param "gc"
|
||||
, Param "--auto"
|
||||
] g
|
||||
|
||||
return True
|
||||
where
|
||||
toonew timestamp now = now < (realToFrac (timestamp + slop) :: POSIXTime)
|
||||
slop = fromIntegral tenMinutes
|
||||
insanity msg = do
|
||||
liftAnnex $ warning msg
|
||||
void $ addAlert $ sanityCheckFixAlert msg
|
||||
addsymlink file s = do
|
||||
isdirect <- liftAnnex isDirect
|
||||
Watcher.runHandler (Watcher.onAddSymlink isdirect) file s
|
||||
insanity $ "found unstaged symlink: " ++ file
|
||||
|
||||
hourlyCheck :: Assistant ()
|
||||
hourlyCheck = checkLogSize 0
|
||||
|
||||
{- Rotate logs until log file size is < 1 mb. -}
|
||||
checkLogSize :: Int -> Assistant ()
|
||||
checkLogSize n = do
|
||||
f <- liftAnnex $ fromRepo gitAnnexLogFile
|
||||
logs <- liftIO $ listLogs f
|
||||
totalsize <- liftIO $ sum <$> mapM filesize logs
|
||||
when (totalsize > oneMegabyte) $ do
|
||||
notice ["Rotated logs due to size:", show totalsize]
|
||||
liftIO $ openLog f >>= redirLog
|
||||
when (n < maxLogs + 1) $
|
||||
checkLogSize $ n + 1
|
||||
where
|
||||
filesize f = fromIntegral . fileSize <$> liftIO (getFileStatus f)
|
||||
|
||||
oneMegabyte :: Int
|
||||
oneMegabyte = 1000000
|
||||
|
||||
oneHour :: Int
|
||||
oneHour = 60 * 60
|
||||
|
||||
oneDay :: Int
|
||||
oneDay = 24 * oneHour
|
56
Assistant/Threads/TransferPoller.hs
Normal file
56
Assistant/Threads/TransferPoller.hs
Normal file
|
@ -0,0 +1,56 @@
|
|||
{- git-annex assistant transfer polling thread
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.Threads.TransferPoller where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.DaemonStatus
|
||||
import Logs.Transfer
|
||||
import Utility.NotificationBroadcaster
|
||||
import qualified Assistant.Threads.TransferWatcher as TransferWatcher
|
||||
|
||||
import Control.Concurrent
|
||||
import qualified Data.Map as M
|
||||
|
||||
{- This thread polls the status of ongoing transfers, determining how much
|
||||
- of each transfer is complete. -}
|
||||
transferPollerThread :: NamedThread
|
||||
transferPollerThread = namedThread "TransferPoller" $ do
|
||||
g <- liftAnnex gitRepo
|
||||
tn <- liftIO . newNotificationHandle =<<
|
||||
transferNotifier <$> getDaemonStatus
|
||||
forever $ do
|
||||
liftIO $ threadDelay 500000 -- 0.5 seconds
|
||||
ts <- currentTransfers <$> getDaemonStatus
|
||||
if M.null ts
|
||||
-- block until transfers running
|
||||
then liftIO $ waitNotification tn
|
||||
else mapM_ (poll g) $ M.toList ts
|
||||
where
|
||||
poll g (t, info)
|
||||
{- Downloads are polled by checking the size of the
|
||||
- temp file being used for the transfer. -}
|
||||
| transferDirection t == Download = do
|
||||
let f = gitAnnexTmpLocation (transferKey t) g
|
||||
sz <- liftIO $ catchMaybeIO $
|
||||
fromIntegral . fileSize <$> getFileStatus f
|
||||
newsize t info sz
|
||||
{- Uploads don't need to be polled for when the TransferWatcher
|
||||
- thread can track file modifications. -}
|
||||
| TransferWatcher.watchesTransferSize = noop
|
||||
{- Otherwise, this code polls the upload progress
|
||||
- by reading the transfer info file. -}
|
||||
| otherwise = do
|
||||
let f = transferFile t g
|
||||
mi <- liftIO $ catchDefaultIO Nothing $
|
||||
readTransferInfoFile Nothing f
|
||||
maybe noop (newsize t info . bytesComplete) mi
|
||||
|
||||
newsize t info sz
|
||||
| bytesComplete info /= sz && isJust sz =
|
||||
alterTransferInfo t $ \i -> i { bytesComplete = sz }
|
||||
| otherwise = noop
|
142
Assistant/Threads/TransferScanner.hs
Normal file
142
Assistant/Threads/TransferScanner.hs
Normal file
|
@ -0,0 +1,142 @@
|
|||
{- git-annex assistant thread to scan remotes to find needed transfers
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.Threads.TransferScanner where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.Types.ScanRemotes
|
||||
import Assistant.ScanRemotes
|
||||
import Assistant.TransferQueue
|
||||
import Assistant.DaemonStatus
|
||||
import Assistant.Alert
|
||||
import Assistant.Drop
|
||||
import Logs.Transfer
|
||||
import Logs.Location
|
||||
import Logs.Web (webUUID)
|
||||
import qualified Remote
|
||||
import qualified Types.Remote as Remote
|
||||
import Utility.ThreadScheduler
|
||||
import qualified Git.LsFiles as LsFiles
|
||||
import qualified Backend
|
||||
import Annex.Content
|
||||
import Annex.Wanted
|
||||
|
||||
import qualified Data.Set as S
|
||||
|
||||
{- This thread waits until a remote needs to be scanned, to find transfers
|
||||
- that need to be made, to keep data in sync.
|
||||
-}
|
||||
transferScannerThread :: NamedThread
|
||||
transferScannerThread = namedThread "TransferScanner" $ do
|
||||
startupScan
|
||||
go S.empty
|
||||
where
|
||||
go scanned = do
|
||||
liftIO $ threadDelaySeconds (Seconds 2)
|
||||
(rs, infos) <- unzip <$> getScanRemote
|
||||
if any fullScan infos || any (`S.notMember` scanned) rs
|
||||
then do
|
||||
expensiveScan rs
|
||||
go $ scanned `S.union` S.fromList rs
|
||||
else do
|
||||
mapM_ failedTransferScan rs
|
||||
go scanned
|
||||
{- All available remotes are scanned in full on startup,
|
||||
- for multiple reasons, including:
|
||||
-
|
||||
- * This may be the first run, and there may be remotes
|
||||
- already in place, that need to be synced.
|
||||
- * We may have run before, and scanned a remote, but
|
||||
- only been in a subdirectory of the git remote, and so
|
||||
- not synced it all.
|
||||
- * We may have run before, and had transfers queued,
|
||||
- and then the system (or us) crashed, and that info was
|
||||
- lost.
|
||||
-}
|
||||
startupScan = addScanRemotes True =<< syncDataRemotes <$> getDaemonStatus
|
||||
|
||||
{- This is a cheap scan for failed transfers involving a remote. -}
|
||||
failedTransferScan :: Remote -> Assistant ()
|
||||
failedTransferScan r = do
|
||||
failed <- liftAnnex $ getFailedTransfers (Remote.uuid r)
|
||||
liftAnnex $ mapM_ removeFailedTransfer $ map fst failed
|
||||
mapM_ retry failed
|
||||
where
|
||||
retry (t, info)
|
||||
| transferDirection t == Download = do
|
||||
{- Check if the remote still has the key.
|
||||
- If not, relies on the expensiveScan to
|
||||
- get it queued from some other remote. -}
|
||||
whenM (liftAnnex $ remoteHas r $ transferKey t) $
|
||||
requeue t info
|
||||
| otherwise = do
|
||||
{- The Transferrer checks when uploading
|
||||
- that the remote doesn't already have the
|
||||
- key, so it's not redundantly checked here. -}
|
||||
requeue t info
|
||||
requeue t info = queueTransferWhenSmall "retrying failed transfer" (associatedFile info) t r
|
||||
|
||||
{- This is a expensive scan through the full git work tree, finding
|
||||
- files to transfer. The scan is blocked when the transfer queue gets
|
||||
- too large.
|
||||
-
|
||||
- This also finds files that are present either here or on a remote
|
||||
- but that are not preferred content, and drops them. Searching for files
|
||||
- to drop is done concurrently with the scan for transfers.
|
||||
-
|
||||
- TODO: It would be better to first drop as much as we can, before
|
||||
- transferring much, to minimise disk use.
|
||||
-}
|
||||
expensiveScan :: [Remote] -> Assistant ()
|
||||
expensiveScan rs = unless onlyweb $ do
|
||||
debug ["starting scan of", show visiblers]
|
||||
void $ alertWhile (scanAlert visiblers) $ do
|
||||
g <- liftAnnex gitRepo
|
||||
(files, cleanup) <- liftIO $ LsFiles.inRepo [] g
|
||||
forM_ files $ \f -> do
|
||||
ts <- maybe (return []) (findtransfers f)
|
||||
=<< liftAnnex (Backend.lookupFile f)
|
||||
mapM_ (enqueue f) ts
|
||||
void $ liftIO cleanup
|
||||
return True
|
||||
debug ["finished scan of", show visiblers]
|
||||
where
|
||||
onlyweb = all (== webUUID) $ map Remote.uuid rs
|
||||
visiblers = let rs' = filter (not . Remote.readonly) rs
|
||||
in if null rs' then rs else rs'
|
||||
enqueue f (r, t) =
|
||||
queueTransferWhenSmall "expensive scan found missing object"
|
||||
(Just f) t r
|
||||
findtransfers f (key, _) = do
|
||||
{- The syncable remotes may have changed since this
|
||||
- scan began. -}
|
||||
syncrs <- syncDataRemotes <$> getDaemonStatus
|
||||
locs <- liftAnnex $ loggedLocations key
|
||||
present <- liftAnnex $ inAnnex key
|
||||
handleDropsFrom locs syncrs
|
||||
"expensive scan found too many copies of object"
|
||||
present key (Just f) Nothing
|
||||
liftAnnex $ do
|
||||
let slocs = S.fromList locs
|
||||
let use a = return $ catMaybes $ map (a key slocs) syncrs
|
||||
if present
|
||||
then filterM (wantSend True (Just f) . Remote.uuid . fst)
|
||||
=<< use (genTransfer Upload False)
|
||||
else ifM (wantGet True $ Just f)
|
||||
( use (genTransfer Download True) , return [] )
|
||||
|
||||
genTransfer :: Direction -> Bool -> Key -> S.Set UUID -> Remote -> Maybe (Remote, Transfer)
|
||||
genTransfer direction want key slocs r
|
||||
| direction == Upload && Remote.readonly r = Nothing
|
||||
| (S.member (Remote.uuid r) slocs) == want = Just
|
||||
(r, Transfer direction (Remote.uuid r) key)
|
||||
| otherwise = Nothing
|
||||
|
||||
remoteHas :: Remote -> Key -> Annex Bool
|
||||
remoteHas r key = elem
|
||||
<$> pure (Remote.uuid r)
|
||||
<*> loggedLocations key
|
128
Assistant/Threads/TransferWatcher.hs
Normal file
128
Assistant/Threads/TransferWatcher.hs
Normal file
|
@ -0,0 +1,128 @@
|
|||
{- git-annex assistant transfer watching thread
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.Threads.TransferWatcher where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.DaemonStatus
|
||||
import Assistant.TransferQueue
|
||||
import Assistant.Drop
|
||||
import Annex.Content
|
||||
import Logs.Transfer
|
||||
import Utility.DirWatcher
|
||||
import Utility.Types.DirWatcher
|
||||
import qualified Remote
|
||||
|
||||
import Control.Concurrent
|
||||
|
||||
{- This thread watches for changes to the gitAnnexTransferDir,
|
||||
- and updates the DaemonStatus's map of ongoing transfers. -}
|
||||
transferWatcherThread :: NamedThread
|
||||
transferWatcherThread = namedThread "TransferWatcher" $ do
|
||||
dir <- liftAnnex $ gitAnnexTransferDir <$> gitRepo
|
||||
liftIO $ createDirectoryIfMissing True dir
|
||||
let hook a = Just <$> asIO2 (runHandler a)
|
||||
addhook <- hook onAdd
|
||||
delhook <- hook onDel
|
||||
modifyhook <- hook onModify
|
||||
errhook <- hook onErr
|
||||
let hooks = mkWatchHooks
|
||||
{ addHook = addhook
|
||||
, delHook = delhook
|
||||
, modifyHook = modifyhook
|
||||
, errHook = errhook
|
||||
}
|
||||
void $ liftIO $ watchDir dir (const False) hooks id
|
||||
debug ["watching for transfers"]
|
||||
|
||||
type Handler = FilePath -> Assistant ()
|
||||
|
||||
{- Runs an action handler.
|
||||
-
|
||||
- Exceptions are ignored, otherwise a whole thread could be crashed.
|
||||
-}
|
||||
runHandler :: Handler -> FilePath -> Maybe FileStatus -> Assistant ()
|
||||
runHandler handler file _filestatus =
|
||||
either (liftIO . print) (const noop) =<< tryIO <~> handler file
|
||||
|
||||
{- Called when there's an error with inotify. -}
|
||||
onErr :: Handler
|
||||
onErr msg = error msg
|
||||
|
||||
{- Called when a new transfer information file is written. -}
|
||||
onAdd :: Handler
|
||||
onAdd file = case parseTransferFile file of
|
||||
Nothing -> noop
|
||||
Just t -> go t =<< liftAnnex (checkTransfer t)
|
||||
where
|
||||
go _ Nothing = noop -- transfer already finished
|
||||
go t (Just info) = do
|
||||
debug [ "transfer starting:", describeTransfer t info ]
|
||||
r <- headMaybe . filter (sameuuid t)
|
||||
<$> liftAnnex Remote.remoteList
|
||||
updateTransferInfo t info { transferRemote = r }
|
||||
sameuuid t r = Remote.uuid r == transferUUID t
|
||||
|
||||
{- Called when a transfer information file is updated.
|
||||
-
|
||||
- The only thing that should change in the transfer info is the
|
||||
- bytesComplete, so that's the only thing updated in the DaemonStatus. -}
|
||||
onModify :: Handler
|
||||
onModify file = do
|
||||
case parseTransferFile file of
|
||||
Nothing -> noop
|
||||
Just t -> go t =<< liftIO (readTransferInfoFile Nothing file)
|
||||
where
|
||||
go _ Nothing = noop
|
||||
go t (Just newinfo) = alterTransferInfo t $
|
||||
\i -> i { bytesComplete = bytesComplete newinfo }
|
||||
|
||||
{- This thread can only watch transfer sizes when the DirWatcher supports
|
||||
- tracking modificatons to files. -}
|
||||
watchesTransferSize :: Bool
|
||||
watchesTransferSize = modifyTracked
|
||||
|
||||
{- Called when a transfer information file is removed. -}
|
||||
onDel :: Handler
|
||||
onDel file = case parseTransferFile file of
|
||||
Nothing -> noop
|
||||
Just t -> do
|
||||
debug [ "transfer finishing:", show t]
|
||||
minfo <- removeTransfer t
|
||||
|
||||
finished <- asIO2 finishedTransfer
|
||||
void $ liftIO $ forkIO $ do
|
||||
{- XXX race workaround delay. The location
|
||||
- log needs to be updated before finishedTransfer
|
||||
- runs. -}
|
||||
threadDelay 10000000 -- 10 seconds
|
||||
finished t minfo
|
||||
|
||||
{- Queue uploads of files downloaded to us, spreading them
|
||||
- out to other reachable remotes.
|
||||
-
|
||||
- Downloading a file may have caused a remote to not want it;
|
||||
- so check for drops from remotes.
|
||||
-
|
||||
- Uploading a file may cause the local repo, or some other remote to not
|
||||
- want it; handle that too.
|
||||
-}
|
||||
finishedTransfer :: Transfer -> Maybe TransferInfo -> Assistant ()
|
||||
finishedTransfer t (Just info)
|
||||
| transferDirection t == Download =
|
||||
whenM (liftAnnex $ inAnnex $ transferKey t) $ do
|
||||
dodrops False
|
||||
queueTransfersMatching (/= transferUUID t)
|
||||
"newly received object"
|
||||
Later (transferKey t) (associatedFile info) Upload
|
||||
| otherwise = dodrops True
|
||||
where
|
||||
dodrops fromhere = handleDrops
|
||||
("drop wanted after " ++ describeTransfer t info)
|
||||
fromhere (transferKey t) (associatedFile info) Nothing
|
||||
finishedTransfer _ _ = noop
|
||||
|
117
Assistant/Threads/Transferrer.hs
Normal file
117
Assistant/Threads/Transferrer.hs
Normal file
|
@ -0,0 +1,117 @@
|
|||
{- git-annex assistant data transferrer thread
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.Threads.Transferrer where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.DaemonStatus
|
||||
import Assistant.TransferQueue
|
||||
import Assistant.TransferSlots
|
||||
import Assistant.Alert
|
||||
import Assistant.Commits
|
||||
import Assistant.Drop
|
||||
import Logs.Transfer
|
||||
import Logs.Location
|
||||
import Annex.Content
|
||||
import qualified Remote
|
||||
import Types.Key
|
||||
import Locations.UserConfig
|
||||
import Assistant.Threads.TransferWatcher
|
||||
|
||||
import System.Process (create_group)
|
||||
|
||||
{- Dispatches transfers from the queue. -}
|
||||
transfererThread :: NamedThread
|
||||
transfererThread = namedThread "Transferrer" $ do
|
||||
program <- liftIO readProgramFile
|
||||
forever $ inTransferSlot $
|
||||
maybe (return Nothing) (uncurry $ startTransfer program)
|
||||
=<< getNextTransfer notrunning
|
||||
where
|
||||
{- Skip transfers that are already running. -}
|
||||
notrunning = isNothing . startedTime
|
||||
|
||||
{- By the time this is called, the daemonstatus's transfer map should
|
||||
- already have been updated to include the transfer. -}
|
||||
startTransfer :: FilePath -> Transfer -> TransferInfo -> Assistant (Maybe (Transfer, TransferInfo, Assistant ()))
|
||||
startTransfer program t info = case (transferRemote info, associatedFile info) of
|
||||
(Just remote, Just file) -> ifM (liftAnnex $ shouldTransfer t info)
|
||||
( do
|
||||
debug [ "Transferring:" , describeTransfer t info ]
|
||||
notifyTransfer
|
||||
return $ Just (t, info, transferprocess remote file)
|
||||
, do
|
||||
debug [ "Skipping unnecessary transfer:" , describeTransfer t info ]
|
||||
void $ removeTransfer t
|
||||
finishedTransfer t (Just info)
|
||||
return Nothing
|
||||
)
|
||||
_ -> return Nothing
|
||||
where
|
||||
direction = transferDirection t
|
||||
isdownload = direction == Download
|
||||
|
||||
transferprocess remote file = void $ do
|
||||
(_, _, _, pid)
|
||||
<- liftIO $ createProcess (proc program $ toCommand params)
|
||||
{ create_group = True }
|
||||
{- Alerts are only shown for successful transfers.
|
||||
- Transfers can temporarily fail for many reasons,
|
||||
- so there's no point in bothering the user about
|
||||
- those. The assistant should recover.
|
||||
-
|
||||
- After a successful upload, handle dropping it from
|
||||
- here, if desired. In this case, the remote it was
|
||||
- uploaded to is known to have it.
|
||||
-
|
||||
- Also, after a successful transfer, the location
|
||||
- log has changed. Indicate that a commit has been
|
||||
- made, in order to queue a push of the git-annex
|
||||
- branch out to remotes that did not participate
|
||||
- in the transfer.
|
||||
-}
|
||||
whenM (liftIO $ (==) ExitSuccess <$> waitForProcess pid) $ do
|
||||
void $ addAlert $ makeAlertFiller True $
|
||||
transferFileAlert direction True file
|
||||
unless isdownload $
|
||||
handleDrops
|
||||
("object uploaded to " ++ show remote)
|
||||
True (transferKey t)
|
||||
(associatedFile info)
|
||||
(Just remote)
|
||||
recordCommit
|
||||
where
|
||||
params =
|
||||
[ Param "transferkey"
|
||||
, Param "--quiet"
|
||||
, Param $ key2file $ transferKey t
|
||||
, Param $ if isdownload
|
||||
then "--from"
|
||||
else "--to"
|
||||
, Param $ Remote.name remote
|
||||
, Param "--file"
|
||||
, File file
|
||||
]
|
||||
|
||||
{- Checks if the file to download is already present, or the remote
|
||||
- being uploaded to isn't known to have the file. -}
|
||||
shouldTransfer :: Transfer -> TransferInfo -> Annex Bool
|
||||
shouldTransfer t info
|
||||
| transferDirection t == Download =
|
||||
not <$> inAnnex key
|
||||
| transferDirection t == Upload =
|
||||
{- Trust the location log to check if the
|
||||
- remote already has the key. This avoids
|
||||
- a roundtrip to the remote. -}
|
||||
case transferRemote info of
|
||||
Nothing -> return False
|
||||
Just remote ->
|
||||
notElem (Remote.uuid remote)
|
||||
<$> loggedLocations key
|
||||
| otherwise = return False
|
||||
where
|
||||
key = transferKey t
|
290
Assistant/Threads/Watcher.hs
Normal file
290
Assistant/Threads/Watcher.hs
Normal file
|
@ -0,0 +1,290 @@
|
|||
{- git-annex assistant tree watcher
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE DeriveDataTypeable, CPP #-}
|
||||
|
||||
module Assistant.Threads.Watcher (
|
||||
watchThread,
|
||||
WatcherException(..),
|
||||
checkCanWatch,
|
||||
needLsof,
|
||||
onAddSymlink,
|
||||
runHandler,
|
||||
) where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.DaemonStatus
|
||||
import Assistant.Changes
|
||||
import Assistant.Types.Changes
|
||||
import Assistant.TransferQueue
|
||||
import Assistant.Alert
|
||||
import Assistant.Drop
|
||||
import Logs.Transfer
|
||||
import Utility.DirWatcher
|
||||
import Utility.Types.DirWatcher
|
||||
import Utility.Lsof
|
||||
import qualified Annex
|
||||
import qualified Annex.Queue
|
||||
import qualified Git
|
||||
import qualified Git.UpdateIndex
|
||||
import qualified Git.LsFiles as LsFiles
|
||||
import qualified Backend
|
||||
import Annex.Content
|
||||
import Annex.Direct
|
||||
import Annex.Content.Direct
|
||||
import Annex.CatFile
|
||||
import Annex.Link
|
||||
import Git.Types
|
||||
import Config
|
||||
import Utility.ThreadScheduler
|
||||
|
||||
import Data.Bits.Utils
|
||||
import Data.Typeable
|
||||
import qualified Data.ByteString.Lazy as L
|
||||
import qualified Control.Exception as E
|
||||
|
||||
checkCanWatch :: Annex ()
|
||||
checkCanWatch
|
||||
| canWatch = do
|
||||
liftIO setupLsof
|
||||
unlessM (liftIO (inPath "lsof") <||> Annex.getState Annex.force)
|
||||
needLsof
|
||||
| otherwise = error "watch mode is not available on this system"
|
||||
|
||||
needLsof :: Annex ()
|
||||
needLsof = error $ unlines
|
||||
[ "The lsof command is needed for watch mode to be safe, and is not in PATH."
|
||||
, "To override lsof checks to ensure that files are not open for writing"
|
||||
, "when added to the annex, you can use --force"
|
||||
, "Be warned: This can corrupt data in the annex, and make fsck complain."
|
||||
]
|
||||
|
||||
{- A special exception that can be thrown to pause or resume the watcher. -}
|
||||
data WatcherException = PauseWatcher | ResumeWatcher
|
||||
deriving (Show, Eq, Typeable)
|
||||
|
||||
instance E.Exception WatcherException
|
||||
|
||||
watchThread :: NamedThread
|
||||
watchThread = namedThread "Watcher" $
|
||||
ifM (liftAnnex $ annexAutoCommit <$> Annex.getGitConfig)
|
||||
( runWatcher
|
||||
, waitFor ResumeWatcher runWatcher
|
||||
)
|
||||
|
||||
runWatcher :: Assistant ()
|
||||
runWatcher = do
|
||||
startup <- asIO1 startupScan
|
||||
direct <- liftAnnex isDirect
|
||||
addhook <- hook $ if direct then onAddDirect else onAdd
|
||||
delhook <- hook onDel
|
||||
addsymlinkhook <- hook $ onAddSymlink direct
|
||||
deldirhook <- hook onDelDir
|
||||
errhook <- hook onErr
|
||||
let hooks = mkWatchHooks
|
||||
{ addHook = addhook
|
||||
, delHook = delhook
|
||||
, addSymlinkHook = addsymlinkhook
|
||||
, delDirHook = deldirhook
|
||||
, errHook = errhook
|
||||
}
|
||||
handle <- liftIO $ watchDir "." ignored hooks startup
|
||||
debug [ "watching", "."]
|
||||
|
||||
{- Let the DirWatcher thread run until signalled to pause it,
|
||||
- then wait for a resume signal, and restart. -}
|
||||
waitFor PauseWatcher $ do
|
||||
liftIO $ stopWatchDir handle
|
||||
waitFor ResumeWatcher runWatcher
|
||||
where
|
||||
hook a = Just <$> asIO2 (runHandler a)
|
||||
|
||||
waitFor :: WatcherException -> Assistant () -> Assistant ()
|
||||
waitFor sig next = do
|
||||
r <- liftIO $ (E.try pause :: IO (Either E.SomeException ()))
|
||||
case r of
|
||||
Left e -> case E.fromException e of
|
||||
Just s
|
||||
| s == sig -> next
|
||||
_ -> noop
|
||||
_ -> noop
|
||||
where
|
||||
pause = runEvery (Seconds 86400) noop
|
||||
|
||||
{- Initial scartup scan. The action should return once the scan is complete. -}
|
||||
startupScan :: IO a -> Assistant a
|
||||
startupScan scanner = do
|
||||
liftAnnex $ showAction "scanning"
|
||||
alertWhile' startupScanAlert $ do
|
||||
r <- liftIO $ scanner
|
||||
|
||||
-- Notice any files that were deleted before
|
||||
-- watching was started.
|
||||
top <- liftAnnex $ fromRepo Git.repoPath
|
||||
(fs, cleanup) <- liftAnnex $ inRepo $ LsFiles.deleted [top]
|
||||
forM_ fs $ \f -> do
|
||||
liftAnnex $ Annex.Queue.addUpdateIndex =<<
|
||||
inRepo (Git.UpdateIndex.unstageFile f)
|
||||
maybe noop recordChange =<< madeChange f RmChange
|
||||
void $ liftIO $ cleanup
|
||||
|
||||
liftAnnex $ showAction "started"
|
||||
liftIO $ putStrLn ""
|
||||
|
||||
modifyDaemonStatus_ $ \s -> s { scanComplete = True }
|
||||
|
||||
return (True, r)
|
||||
|
||||
ignored :: FilePath -> Bool
|
||||
ignored = ig . takeFileName
|
||||
where
|
||||
ig ".git" = True
|
||||
ig ".gitignore" = True
|
||||
ig ".gitattributes" = True
|
||||
#ifdef darwin_HOST_OS
|
||||
ig ".DS_Store" = True
|
||||
#endif
|
||||
ig _ = False
|
||||
|
||||
type Handler = FilePath -> Maybe FileStatus -> Assistant (Maybe Change)
|
||||
|
||||
{- Runs an action handler, and if there was a change, adds it to the ChangeChan.
|
||||
-
|
||||
- Exceptions are ignored, otherwise a whole watcher thread could be crashed.
|
||||
-}
|
||||
runHandler :: Handler -> FilePath -> Maybe FileStatus -> Assistant ()
|
||||
runHandler handler file filestatus = void $ do
|
||||
r <- tryIO <~> handler file filestatus
|
||||
case r of
|
||||
Left e -> liftIO $ print e
|
||||
Right Nothing -> noop
|
||||
Right (Just change) -> do
|
||||
-- Just in case the commit thread is not
|
||||
-- flushing the queue fast enough.
|
||||
liftAnnex $ Annex.Queue.flushWhenFull
|
||||
recordChange change
|
||||
|
||||
onAdd :: Handler
|
||||
onAdd file filestatus
|
||||
| maybe False isRegularFile filestatus = pendingAddChange file
|
||||
| otherwise = noChange
|
||||
|
||||
{- In direct mode, add events are received for both new files, and
|
||||
- modified existing files. Or, in some cases, existing files that have not
|
||||
- really been modified. -}
|
||||
onAddDirect :: Handler
|
||||
onAddDirect file fs = do
|
||||
v <- liftAnnex $ catKeyFile file
|
||||
case (v, fs) of
|
||||
(Just key, Just filestatus) ->
|
||||
ifM (liftAnnex $ sameFileStatus key filestatus)
|
||||
( noChange
|
||||
, do
|
||||
liftAnnex $ changedDirect key file
|
||||
pendingAddChange file
|
||||
)
|
||||
_ -> pendingAddChange file
|
||||
|
||||
{- A symlink might be an arbitrary symlink, which is just added.
|
||||
- Or, if it is a git-annex symlink, ensure it points to the content
|
||||
- before adding it.
|
||||
-}
|
||||
onAddSymlink :: Bool -> Handler
|
||||
onAddSymlink isdirect file filestatus = go =<< liftAnnex (Backend.lookupFile file)
|
||||
where
|
||||
go (Just (key, _)) = do
|
||||
when isdirect $
|
||||
liftAnnex $ void $ addAssociatedFile key file
|
||||
link <- liftAnnex $ calcGitLink file key
|
||||
ifM ((==) (Just link) <$> liftIO (catchMaybeIO $ readSymbolicLink file))
|
||||
( do
|
||||
s <- getDaemonStatus
|
||||
checkcontent key s
|
||||
ensurestaged (Just link) s
|
||||
, do
|
||||
unless isdirect $ do
|
||||
liftIO $ removeFile file
|
||||
liftAnnex $ Backend.makeAnnexLink link file
|
||||
checkcontent key =<< getDaemonStatus
|
||||
addlink link
|
||||
)
|
||||
go Nothing = do -- other symlink
|
||||
mlink <- liftIO (catchMaybeIO $ readSymbolicLink file)
|
||||
ensurestaged mlink =<< getDaemonStatus
|
||||
|
||||
{- This is often called on symlinks that are already
|
||||
- staged correctly. A symlink may have been deleted
|
||||
- and being re-added, or added when the watcher was
|
||||
- not running. So they're normally restaged to make sure.
|
||||
-
|
||||
- As an optimisation, during the startup scan, avoid
|
||||
- restaging everything. Only links that were created since
|
||||
- the last time the daemon was running are staged.
|
||||
- (If the daemon has never ran before, avoid staging
|
||||
- links too.)
|
||||
-}
|
||||
ensurestaged (Just link) daemonstatus
|
||||
| scanComplete daemonstatus = addlink link
|
||||
| otherwise = case filestatus of
|
||||
Just s
|
||||
| not (afterLastDaemonRun (statusChangeTime s) daemonstatus) -> noChange
|
||||
_ -> addlink link
|
||||
ensurestaged Nothing _ = noChange
|
||||
|
||||
{- For speed, tries to reuse the existing blob for symlink target. -}
|
||||
addlink link = do
|
||||
debug ["add symlink", file]
|
||||
liftAnnex $ do
|
||||
v <- catObjectDetails $ Ref $ ':':file
|
||||
case v of
|
||||
Just (currlink, sha)
|
||||
| s2w8 link == L.unpack currlink ->
|
||||
stageSymlink file sha
|
||||
_ -> stageSymlink file =<< hashSymlink link
|
||||
madeChange file LinkChange
|
||||
|
||||
{- When a new link appears, or a link is changed, after the startup
|
||||
- scan, handle getting or dropping the key's content.
|
||||
- Also, moving or copying a link may caused it be be transferred
|
||||
- elsewhere, so check that too. -}
|
||||
checkcontent key daemonstatus
|
||||
| scanComplete daemonstatus = do
|
||||
present <- liftAnnex $ inAnnex key
|
||||
if present
|
||||
then queueTransfers "new file created" Next key (Just file) Upload
|
||||
else queueTransfers "new or renamed file wanted" Next key (Just file) Download
|
||||
handleDrops "file renamed" present key (Just file) Nothing
|
||||
| otherwise = noop
|
||||
|
||||
onDel :: Handler
|
||||
onDel file _ = do
|
||||
debug ["file deleted", file]
|
||||
liftAnnex $
|
||||
Annex.Queue.addUpdateIndex =<<
|
||||
inRepo (Git.UpdateIndex.unstageFile file)
|
||||
madeChange file RmChange
|
||||
|
||||
{- A directory has been deleted, or moved, so tell git to remove anything
|
||||
- that was inside it from its cache. Since it could reappear at any time,
|
||||
- use --cached to only delete it from the index.
|
||||
-
|
||||
- Note: This could use unstageFile, but would need to run another git
|
||||
- command to get the recursive list of files in the directory, so rm is
|
||||
- just as good. -}
|
||||
onDelDir :: Handler
|
||||
onDelDir dir _ = do
|
||||
debug ["directory deleted", dir]
|
||||
liftAnnex $ Annex.Queue.addCommand "rm"
|
||||
[Params "--quiet -r --cached --ignore-unmatch --"] [dir]
|
||||
madeChange dir RmDirChange
|
||||
|
||||
{- Called when there's an error with inotify or kqueue. -}
|
||||
onErr :: Handler
|
||||
onErr msg _ = do
|
||||
liftAnnex $ warning msg
|
||||
void $ addAlert $ warningAlert "watcher" msg
|
||||
noChange
|
92
Assistant/Threads/WebApp.hs
Normal file
92
Assistant/Threads/WebApp.hs
Normal file
|
@ -0,0 +1,92 @@
|
|||
{- git-annex assistant webapp thread
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE TypeFamilies, QuasiQuotes, MultiParamTypeClasses, TemplateHaskell, OverloadedStrings, RankNTypes #-}
|
||||
{-# OPTIONS_GHC -fno-warn-orphans #-}
|
||||
|
||||
module Assistant.Threads.WebApp where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.WebApp
|
||||
import Assistant.WebApp.Types
|
||||
import Assistant.WebApp.DashBoard
|
||||
import Assistant.WebApp.SideBar
|
||||
import Assistant.WebApp.Notifications
|
||||
import Assistant.WebApp.Configurators
|
||||
import Assistant.WebApp.Configurators.Edit
|
||||
import Assistant.WebApp.Configurators.Local
|
||||
import Assistant.WebApp.Configurators.Ssh
|
||||
import Assistant.WebApp.Configurators.Pairing
|
||||
import Assistant.WebApp.Configurators.AWS
|
||||
import Assistant.WebApp.Configurators.WebDAV
|
||||
import Assistant.WebApp.Configurators.XMPP
|
||||
import Assistant.WebApp.Configurators.Preferences
|
||||
import Assistant.WebApp.Documentation
|
||||
import Assistant.WebApp.Control
|
||||
import Assistant.WebApp.OtherRepos
|
||||
import Assistant.Types.ThreadedMonad
|
||||
import Utility.WebApp
|
||||
import Utility.TempFile
|
||||
import Utility.FileMode
|
||||
import Git
|
||||
|
||||
import Yesod
|
||||
import Yesod.Static
|
||||
import Network.Socket (SockAddr)
|
||||
import Data.Text (pack, unpack)
|
||||
|
||||
mkYesodDispatch "WebApp" $(parseRoutesFile "Assistant/WebApp/routes")
|
||||
|
||||
type Url = String
|
||||
|
||||
webAppThread
|
||||
:: AssistantData
|
||||
-> UrlRenderer
|
||||
-> Bool
|
||||
-> Maybe (IO String)
|
||||
-> Maybe (Url -> FilePath -> IO ())
|
||||
-> NamedThread
|
||||
webAppThread assistantdata urlrenderer noannex postfirstrun onstartup = thread $ liftIO $ do
|
||||
webapp <- WebApp
|
||||
<$> pure assistantdata
|
||||
<*> (pack <$> genRandomToken)
|
||||
<*> getreldir
|
||||
<*> pure $(embed "static")
|
||||
<*> newWebAppState
|
||||
<*> pure postfirstrun
|
||||
<*> pure noannex
|
||||
setUrlRenderer urlrenderer $ yesodRender webapp (pack "")
|
||||
app <- toWaiAppPlain webapp
|
||||
app' <- ifM debugEnabled
|
||||
( return $ httpDebugLogger app
|
||||
, return app
|
||||
)
|
||||
runWebApp app' $ \addr -> if noannex
|
||||
then withTempFile "webapp.html" $ \tmpfile _ ->
|
||||
go addr webapp tmpfile Nothing
|
||||
else do
|
||||
let st = threadState assistantdata
|
||||
htmlshim <- runThreadState st $ fromRepo gitAnnexHtmlShim
|
||||
urlfile <- runThreadState st $ fromRepo gitAnnexUrlFile
|
||||
go addr webapp htmlshim (Just urlfile)
|
||||
where
|
||||
thread = namedThread "WebApp"
|
||||
getreldir
|
||||
| noannex = return Nothing
|
||||
| otherwise = Just <$>
|
||||
(relHome =<< absPath
|
||||
=<< runThreadState (threadState assistantdata) (fromRepo repoPath))
|
||||
go addr webapp htmlshim urlfile = do
|
||||
let url = myUrl webapp addr
|
||||
maybe noop (`writeFileProtected` url) urlfile
|
||||
writeHtmlShim "Starting webapp..." url htmlshim
|
||||
maybe noop (\a -> a url htmlshim) onstartup
|
||||
|
||||
myUrl :: WebApp -> SockAddr -> Url
|
||||
myUrl webapp addr = unpack $ yesodRender webapp urlbase HomeR []
|
||||
where
|
||||
urlbase = pack $ "http://" ++ show addr
|
257
Assistant/Threads/XMPPClient.hs
Normal file
257
Assistant/Threads/XMPPClient.hs
Normal file
|
@ -0,0 +1,257 @@
|
|||
{- git-annex XMPP client
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.Threads.XMPPClient where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.XMPP
|
||||
import Assistant.XMPP.Client
|
||||
import Assistant.NetMessager
|
||||
import Assistant.Types.NetMessager
|
||||
import Assistant.Types.Buddies
|
||||
import Assistant.XMPP.Buddies
|
||||
import Assistant.Sync
|
||||
import Assistant.DaemonStatus
|
||||
import qualified Remote
|
||||
import Utility.ThreadScheduler
|
||||
import Assistant.WebApp (UrlRenderer, renderUrl)
|
||||
import Assistant.WebApp.Types
|
||||
import Assistant.Alert
|
||||
import Assistant.Pairing
|
||||
import Assistant.XMPP.Git
|
||||
import Annex.UUID
|
||||
|
||||
import Network.Protocol.XMPP
|
||||
import Control.Concurrent
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Set as S
|
||||
import qualified Data.Map as M
|
||||
import qualified Git.Branch
|
||||
import Data.Time.Clock
|
||||
|
||||
xmppClientThread :: UrlRenderer -> NamedThread
|
||||
xmppClientThread urlrenderer = namedThread "XMPPClient" $
|
||||
restartableClient . xmppClient urlrenderer =<< getAssistant id
|
||||
|
||||
{- Runs the client, handing restart events. -}
|
||||
restartableClient :: IO () -> Assistant ()
|
||||
restartableClient a = forever $ do
|
||||
tid <- liftIO $ forkIO a
|
||||
waitNetMessagerRestart
|
||||
liftIO $ killThread tid
|
||||
|
||||
xmppClient :: UrlRenderer -> AssistantData -> IO ()
|
||||
xmppClient urlrenderer d = do
|
||||
v <- liftAssistant $ liftAnnex getXMPPCreds
|
||||
case v of
|
||||
Nothing -> noop -- will be restarted once creds get configured
|
||||
Just c -> retry (runclient c) =<< getCurrentTime
|
||||
where
|
||||
liftAssistant = runAssistant d
|
||||
inAssistant = liftIO . liftAssistant
|
||||
|
||||
{- When the client exits, it's restarted;
|
||||
- if it keeps failing, back off to wait 5 minutes before
|
||||
- trying it again. -}
|
||||
retry client starttime = do
|
||||
e <- client
|
||||
now <- getCurrentTime
|
||||
if diffUTCTime now starttime > 300
|
||||
then do
|
||||
liftAssistant $ debug ["connection lost; reconnecting", show e]
|
||||
retry client now
|
||||
else do
|
||||
liftAssistant $ debug ["connection failed; will retry", show e]
|
||||
threadDelaySeconds (Seconds 300)
|
||||
retry client =<< getCurrentTime
|
||||
|
||||
runclient c = liftIO $ connectXMPP c $ \jid -> do
|
||||
selfjid <- bindJID jid
|
||||
putStanza gitAnnexSignature
|
||||
|
||||
inAssistant $ debug ["connected", show selfjid]
|
||||
{- The buddy list starts empty each time
|
||||
- the client connects, so that stale info
|
||||
- is not retained. -}
|
||||
void $ inAssistant $
|
||||
updateBuddyList (const noBuddies) <<~ buddyList
|
||||
|
||||
xmppThread $ receivenotifications selfjid
|
||||
forever $ do
|
||||
a <- inAssistant $ relayNetMessage selfjid
|
||||
a
|
||||
|
||||
receivenotifications selfjid = forever $ do
|
||||
l <- decodeStanza selfjid <$> getStanza
|
||||
-- inAssistant $ debug ["received:", show l]
|
||||
mapM_ (handle selfjid) l
|
||||
|
||||
handle _ (PresenceMessage p) = void $ inAssistant $
|
||||
updateBuddyList (updateBuddies p) <<~ buddyList
|
||||
handle _ (GotNetMessage QueryPresence) = putStanza gitAnnexSignature
|
||||
handle _ (GotNetMessage (NotifyPush us)) = void $ inAssistant $ pull us
|
||||
handle selfjid (GotNetMessage (PairingNotification stage c u)) =
|
||||
maybe noop (inAssistant . pairMsgReceived urlrenderer stage u selfjid) (parseJID c)
|
||||
handle _ (GotNetMessage m@(Pushing _ pushstage))
|
||||
| isPushInitiation pushstage = inAssistant $
|
||||
unlessM (queueNetPushMessage m) $
|
||||
void $ forkIO <~> handlePushInitiation m
|
||||
| otherwise = void $ inAssistant $ queueNetPushMessage m
|
||||
handle _ (Ignorable _) = noop
|
||||
handle _ (Unknown _) = noop
|
||||
handle _ (ProtocolError _) = noop
|
||||
|
||||
|
||||
data XMPPEvent
|
||||
= GotNetMessage NetMessage
|
||||
| PresenceMessage Presence
|
||||
| Ignorable ReceivedStanza
|
||||
| Unknown ReceivedStanza
|
||||
| ProtocolError ReceivedStanza
|
||||
deriving Show
|
||||
|
||||
{- Decodes an XMPP stanza into one or more events. -}
|
||||
decodeStanza :: JID -> ReceivedStanza -> [XMPPEvent]
|
||||
decodeStanza selfjid s@(ReceivedPresence p)
|
||||
| presenceType p == PresenceError = [ProtocolError s]
|
||||
| presenceFrom p == Nothing = [Ignorable s]
|
||||
| presenceFrom p == Just selfjid = [Ignorable s]
|
||||
| otherwise = maybe [PresenceMessage p] decode (gitAnnexTagInfo p)
|
||||
where
|
||||
decode i
|
||||
| tagAttr i == pushAttr = impliedp $ GotNetMessage $ NotifyPush $
|
||||
decodePushNotification (tagValue i)
|
||||
| tagAttr i == queryAttr = impliedp $ GotNetMessage QueryPresence
|
||||
| otherwise = [Unknown s]
|
||||
{- Things sent via presence imply a presence message,
|
||||
- along with their real meaning. -}
|
||||
impliedp v = [PresenceMessage p, v]
|
||||
decodeStanza selfjid s@(ReceivedMessage m)
|
||||
| messageFrom m == Nothing = [Ignorable s]
|
||||
| messageFrom m == Just selfjid = [Ignorable s]
|
||||
| messageType m == MessageError = [ProtocolError s]
|
||||
| otherwise = [fromMaybe (Unknown s) (GotNetMessage <$> decodeMessage m)]
|
||||
decodeStanza _ s = [Unknown s]
|
||||
|
||||
{- Waits for a NetMessager message to be sent, and relays it to XMPP.
|
||||
-
|
||||
- Chat messages must be directed to specific clients, not a base
|
||||
- account JID, due to git-annex clients using a negative presence priority.
|
||||
- PairingNotification messages are always directed at specific
|
||||
- clients, but Pushing messages are sometimes not, and need to be exploded.
|
||||
-}
|
||||
relayNetMessage :: JID -> Assistant (XMPP ())
|
||||
relayNetMessage selfjid = convert =<< waitNetMessage
|
||||
where
|
||||
convert (NotifyPush us) = return $ putStanza $ pushNotification us
|
||||
convert QueryPresence = return $ putStanza presenceQuery
|
||||
convert (PairingNotification stage c u) = withclient c $ \tojid -> do
|
||||
changeBuddyPairing tojid True
|
||||
return $ putStanza $ pairingNotification stage u tojid selfjid
|
||||
convert (Pushing c pushstage) = withclient c $ \tojid -> do
|
||||
if tojid == baseJID tojid
|
||||
then do
|
||||
bud <- getBuddy (genBuddyKey tojid) <<~ buddyList
|
||||
return $ forM_ (maybe [] (S.toList . buddyAssistants) bud) $ \(Client jid) ->
|
||||
putStanza $ pushMessage pushstage jid selfjid
|
||||
else return $ putStanza $ pushMessage pushstage tojid selfjid
|
||||
|
||||
withclient c a = case parseJID c of
|
||||
Nothing -> return noop
|
||||
Just tojid
|
||||
| tojid == selfjid -> return noop
|
||||
| otherwise -> a tojid
|
||||
|
||||
{- Runs a XMPP action in a separate thread, using a session to allow it
|
||||
- to access the same XMPP client. -}
|
||||
xmppThread :: XMPP () -> XMPP ()
|
||||
xmppThread a = do
|
||||
s <- getSession
|
||||
void $ liftIO $ forkIO $
|
||||
void $ runXMPP s a
|
||||
|
||||
{- We only pull from one remote out of the set listed in the push
|
||||
- notification, as an optimisation.
|
||||
-
|
||||
- Note that it might be possible (though very unlikely) for the push
|
||||
- notification to take a while to be sent, and multiple pushes happen
|
||||
- before it is sent, so it includes multiple remotes that were pushed
|
||||
- to at different times.
|
||||
-
|
||||
- It could then be the case that the remote we choose had the earlier
|
||||
- push sent to it, but then failed to get the later push, and so is not
|
||||
- fully up-to-date. If that happens, the pushRetryThread will come along
|
||||
- and retry the push, and we'll get another notification once it succeeds,
|
||||
- and pull again. -}
|
||||
pull :: [UUID] -> Assistant ()
|
||||
pull [] = noop
|
||||
pull us = do
|
||||
rs <- filter matching . syncGitRemotes <$> getDaemonStatus
|
||||
debug $ "push notification for" : map (fromUUID . Remote.uuid ) rs
|
||||
pullone rs =<< liftAnnex (inRepo Git.Branch.current)
|
||||
where
|
||||
matching r = Remote.uuid r `S.member` s
|
||||
s = S.fromList us
|
||||
|
||||
pullone [] _ = noop
|
||||
pullone (r:rs) branch =
|
||||
unlessM (all id . fst <$> manualPull branch [r]) $
|
||||
pullone rs branch
|
||||
|
||||
pairMsgReceived :: UrlRenderer -> PairStage -> UUID -> JID -> JID -> Assistant ()
|
||||
pairMsgReceived urlrenderer PairReq theiruuid selfjid theirjid
|
||||
| baseJID selfjid == baseJID theirjid = autoaccept
|
||||
| otherwise = do
|
||||
knownjids <- catMaybes . map (parseJID . getXMPPClientID)
|
||||
. filter isXMPPRemote . syncRemotes <$> getDaemonStatus
|
||||
if any (== baseJID theirjid) knownjids
|
||||
then autoaccept
|
||||
else showalert
|
||||
|
||||
where
|
||||
-- PairReq from another client using our JID, or the JID of
|
||||
-- any repo we're already paired with is automatically accepted.
|
||||
autoaccept = do
|
||||
selfuuid <- liftAnnex getUUID
|
||||
sendNetMessage $
|
||||
PairingNotification PairAck (formatJID theirjid) selfuuid
|
||||
finishXMPPPairing theirjid theiruuid
|
||||
-- Show an alert to let the user decide if they want to pair.
|
||||
showalert = do
|
||||
let route = ConfirmXMPPPairR (PairKey theiruuid $ formatJID theirjid)
|
||||
url <- liftIO $ renderUrl urlrenderer route []
|
||||
close <- asIO1 removeAlert
|
||||
void $ addAlert $ pairRequestReceivedAlert (T.unpack $ buddyName theirjid)
|
||||
AlertButton
|
||||
{ buttonUrl = url
|
||||
, buttonLabel = T.pack "Respond"
|
||||
, buttonAction = Just close
|
||||
}
|
||||
|
||||
pairMsgReceived _ PairAck theiruuid _selfjid theirjid =
|
||||
{- PairAck must come from one of the buddies we are pairing with;
|
||||
- don't pair with just anyone. -}
|
||||
whenM (isBuddyPairing theirjid) $ do
|
||||
changeBuddyPairing theirjid False
|
||||
selfuuid <- liftAnnex getUUID
|
||||
sendNetMessage $
|
||||
PairingNotification PairDone (formatJID theirjid) selfuuid
|
||||
finishXMPPPairing theirjid theiruuid
|
||||
|
||||
pairMsgReceived _ PairDone _theiruuid _selfjid theirjid =
|
||||
changeBuddyPairing theirjid False
|
||||
|
||||
isBuddyPairing :: JID -> Assistant Bool
|
||||
isBuddyPairing jid = maybe False buddyPairing <$>
|
||||
getBuddy (genBuddyKey jid) <<~ buddyList
|
||||
|
||||
changeBuddyPairing :: JID -> Bool -> Assistant ()
|
||||
changeBuddyPairing jid ispairing =
|
||||
updateBuddyList (M.adjust set key) <<~ buddyList
|
||||
where
|
||||
key = genBuddyKey jid
|
||||
set b = b { buddyPairing = ispairing }
|
207
Assistant/TransferQueue.hs
Normal file
207
Assistant/TransferQueue.hs
Normal file
|
@ -0,0 +1,207 @@
|
|||
{- git-annex assistant pending transfer queue
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.TransferQueue (
|
||||
TransferQueue,
|
||||
Schedule(..),
|
||||
newTransferQueue,
|
||||
getTransferQueue,
|
||||
queueTransfers,
|
||||
queueTransfersMatching,
|
||||
queueDeferredDownloads,
|
||||
queueTransfer,
|
||||
queueTransferAt,
|
||||
queueTransferWhenSmall,
|
||||
getNextTransfer,
|
||||
getMatchingTransfers,
|
||||
dequeueTransfers,
|
||||
) where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.DaemonStatus
|
||||
import Assistant.Types.TransferQueue
|
||||
import Logs.Transfer
|
||||
import Types.Remote
|
||||
import qualified Remote
|
||||
import qualified Types.Remote as Remote
|
||||
import Annex.Wanted
|
||||
|
||||
import Control.Concurrent.STM
|
||||
import qualified Data.Map as M
|
||||
|
||||
type Reason = String
|
||||
|
||||
{- Reads the queue's content without blocking or changing it. -}
|
||||
getTransferQueue :: Assistant [(Transfer, TransferInfo)]
|
||||
getTransferQueue = (atomically . readTVar . queuelist) <<~ transferQueue
|
||||
|
||||
stubInfo :: AssociatedFile -> Remote -> TransferInfo
|
||||
stubInfo f r = stubTransferInfo
|
||||
{ transferRemote = Just r
|
||||
, associatedFile = f
|
||||
}
|
||||
|
||||
{- Adds transfers to queue for some of the known remotes.
|
||||
- Honors preferred content settings, only transferring wanted files. -}
|
||||
queueTransfers :: Reason -> Schedule -> Key -> AssociatedFile -> Direction -> Assistant ()
|
||||
queueTransfers = queueTransfersMatching (const True)
|
||||
|
||||
{- Adds transfers to queue for some of the known remotes, that match a
|
||||
- condition. Honors preferred content settings. -}
|
||||
queueTransfersMatching :: (UUID -> Bool) -> Reason -> Schedule -> Key -> AssociatedFile -> Direction -> Assistant ()
|
||||
queueTransfersMatching matching reason schedule k f direction
|
||||
| direction == Download = whenM (liftAnnex $ wantGet True f) go
|
||||
| otherwise = go
|
||||
where
|
||||
go = do
|
||||
rs <- liftAnnex . sufficientremotes
|
||||
=<< syncDataRemotes <$> getDaemonStatus
|
||||
let matchingrs = filter (matching . Remote.uuid) rs
|
||||
if null matchingrs
|
||||
then defer
|
||||
else forM_ matchingrs $ \r ->
|
||||
enqueue reason schedule (gentransfer r) (stubInfo f r)
|
||||
sufficientremotes rs
|
||||
{- Queue downloads from all remotes that
|
||||
- have the key, with the cheapest ones first.
|
||||
- More expensive ones will only be tried if
|
||||
- downloading from a cheap one fails. -}
|
||||
| direction == Download = do
|
||||
uuids <- Remote.keyLocations k
|
||||
return $ filter (\r -> uuid r `elem` uuids) rs
|
||||
{- Upload to all remotes that want the content. -}
|
||||
| otherwise = filterM (wantSend True f . Remote.uuid) $
|
||||
filter (not . Remote.readonly) rs
|
||||
gentransfer r = Transfer
|
||||
{ transferDirection = direction
|
||||
, transferKey = k
|
||||
, transferUUID = Remote.uuid r
|
||||
}
|
||||
defer
|
||||
{- Defer this download, as no known remote has the key. -}
|
||||
| direction == Download = do
|
||||
q <- getAssistant transferQueue
|
||||
void $ liftIO $ atomically $
|
||||
modifyTVar' (deferreddownloads q) $
|
||||
\l -> (k, f):l
|
||||
| otherwise = noop
|
||||
|
||||
{- Queues any deferred downloads that can now be accomplished, leaving
|
||||
- any others in the list to try again later. -}
|
||||
queueDeferredDownloads :: Reason -> Schedule -> Assistant ()
|
||||
queueDeferredDownloads reason schedule = do
|
||||
q <- getAssistant transferQueue
|
||||
l <- liftIO $ atomically $ swapTVar (deferreddownloads q) []
|
||||
rs <- syncDataRemotes <$> getDaemonStatus
|
||||
left <- filterM (queue rs) l
|
||||
unless (null left) $
|
||||
liftIO $ atomically $ modifyTVar' (deferreddownloads q) $
|
||||
\new -> new ++ left
|
||||
where
|
||||
queue rs (k, f) = do
|
||||
uuids <- liftAnnex $ Remote.keyLocations k
|
||||
let sources = filter (\r -> uuid r `elem` uuids) rs
|
||||
unless (null sources) $
|
||||
forM_ sources $ \r ->
|
||||
enqueue reason schedule (gentransfer r) (stubInfo f r)
|
||||
return $ null sources
|
||||
where
|
||||
gentransfer r = Transfer
|
||||
{ transferDirection = Download
|
||||
, transferKey = k
|
||||
, transferUUID = Remote.uuid r
|
||||
}
|
||||
|
||||
enqueue :: Reason -> Schedule -> Transfer -> TransferInfo -> Assistant ()
|
||||
enqueue reason schedule t info
|
||||
| schedule == Next = go (new:)
|
||||
| otherwise = go (\l -> l++[new])
|
||||
where
|
||||
new = (t, info)
|
||||
go modlist = do
|
||||
q <- getAssistant transferQueue
|
||||
liftIO $ atomically $ do
|
||||
void $ modifyTVar' (queuesize q) succ
|
||||
void $ modifyTVar' (queuelist q) modlist
|
||||
debug [ "queued", describeTransfer t info, ": " ++ reason ]
|
||||
notifyTransfer
|
||||
|
||||
{- Adds a transfer to the queue. -}
|
||||
queueTransfer :: Reason -> Schedule -> AssociatedFile -> Transfer -> Remote -> Assistant ()
|
||||
queueTransfer reason schedule f t remote =
|
||||
enqueue reason schedule t (stubInfo f remote)
|
||||
|
||||
{- Blocks until the queue is no larger than a given size, and then adds a
|
||||
- transfer to the queue. -}
|
||||
queueTransferAt :: Int -> Reason -> Schedule -> AssociatedFile -> Transfer -> Remote -> Assistant ()
|
||||
queueTransferAt wantsz reason schedule f t remote = do
|
||||
q <- getAssistant transferQueue
|
||||
liftIO $ atomically $ do
|
||||
sz <- readTVar (queuesize q)
|
||||
unless (sz <= wantsz) $
|
||||
retry -- blocks until queuesize changes
|
||||
enqueue reason schedule t (stubInfo f remote)
|
||||
|
||||
queueTransferWhenSmall :: Reason -> AssociatedFile -> Transfer -> Remote -> Assistant ()
|
||||
queueTransferWhenSmall reason = queueTransferAt 10 reason Later
|
||||
|
||||
{- Blocks until a pending transfer is available in the queue,
|
||||
- and removes it.
|
||||
-
|
||||
- Checks that it's acceptable, before adding it to the
|
||||
- currentTransfers map. If it's not acceptable, it's discarded.
|
||||
-
|
||||
- This is done in a single STM transaction, so there is no window
|
||||
- where an observer sees an inconsistent status. -}
|
||||
getNextTransfer :: (TransferInfo -> Bool) -> Assistant (Maybe (Transfer, TransferInfo))
|
||||
getNextTransfer acceptable = do
|
||||
q <- getAssistant transferQueue
|
||||
dstatus <- getAssistant daemonStatusHandle
|
||||
liftIO $ atomically $ do
|
||||
sz <- readTVar (queuesize q)
|
||||
if sz < 1
|
||||
then retry -- blocks until queuesize changes
|
||||
else do
|
||||
(r@(t,info):rest) <- readTVar (queuelist q)
|
||||
writeTVar (queuelist q) rest
|
||||
void $ modifyTVar' (queuesize q) pred
|
||||
if acceptable info
|
||||
then do
|
||||
adjustTransfersSTM dstatus $
|
||||
M.insertWith' const t info
|
||||
return $ Just r
|
||||
else return Nothing
|
||||
|
||||
{- Moves transfers matching a condition from the queue, to the
|
||||
- currentTransfers map. -}
|
||||
getMatchingTransfers :: (Transfer -> Bool) -> Assistant [(Transfer, TransferInfo)]
|
||||
getMatchingTransfers c = do
|
||||
q <- getAssistant transferQueue
|
||||
dstatus <- getAssistant daemonStatusHandle
|
||||
liftIO $ atomically $ do
|
||||
ts <- dequeueTransfersSTM q c
|
||||
unless (null ts) $
|
||||
adjustTransfersSTM dstatus $ \m -> M.union m $ M.fromList ts
|
||||
return ts
|
||||
|
||||
{- Removes transfers matching a condition from the queue, and returns the
|
||||
- removed transfers. -}
|
||||
dequeueTransfers :: (Transfer -> Bool) -> Assistant [(Transfer, TransferInfo)]
|
||||
dequeueTransfers c = do
|
||||
q <- getAssistant transferQueue
|
||||
removed <- liftIO $ atomically $ dequeueTransfersSTM q c
|
||||
unless (null removed) $
|
||||
notifyTransfer
|
||||
return removed
|
||||
|
||||
dequeueTransfersSTM :: TransferQueue -> (Transfer -> Bool) -> STM [(Transfer, TransferInfo)]
|
||||
dequeueTransfersSTM q c = do
|
||||
(removed, ts) <- partition (c . fst)
|
||||
<$> readTVar (queuelist q)
|
||||
void $ writeTVar (queuesize q) (length ts)
|
||||
void $ writeTVar (queuelist q) ts
|
||||
return removed
|
73
Assistant/TransferSlots.hs
Normal file
73
Assistant/TransferSlots.hs
Normal file
|
@ -0,0 +1,73 @@
|
|||
{- git-annex assistant transfer slots
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.TransferSlots where
|
||||
|
||||
import Assistant.Common
|
||||
import Utility.ThreadScheduler
|
||||
import Assistant.Types.TransferSlots
|
||||
import Assistant.DaemonStatus
|
||||
import Logs.Transfer
|
||||
|
||||
import qualified Control.Exception as E
|
||||
import Control.Concurrent
|
||||
import qualified Control.Concurrent.MSemN as MSemN
|
||||
|
||||
type TransferGenerator = Assistant (Maybe (Transfer, TransferInfo, Assistant ()))
|
||||
|
||||
{- Waits until a transfer slot becomes available, then runs a
|
||||
- TransferGenerator, and then runs the transfer action in its own thread.
|
||||
-}
|
||||
inTransferSlot :: TransferGenerator -> Assistant ()
|
||||
inTransferSlot gen = do
|
||||
flip MSemN.wait 1 <<~ transferSlots
|
||||
runTransferThread =<< gen
|
||||
|
||||
{- Runs a TransferGenerator, and its transfer action,
|
||||
- without waiting for a slot to become available. -}
|
||||
inImmediateTransferSlot :: TransferGenerator -> Assistant ()
|
||||
inImmediateTransferSlot gen = do
|
||||
flip MSemN.signal (-1) <<~ transferSlots
|
||||
runTransferThread =<< gen
|
||||
|
||||
{- Runs a transfer action, in an already allocated transfer slot.
|
||||
- Once it finishes, frees the transfer slot.
|
||||
-
|
||||
- Note that the action is subject to being killed when the transfer
|
||||
- is canceled or paused.
|
||||
-
|
||||
- A PauseTransfer exception is handled by letting the action be killed,
|
||||
- then pausing the thread until a ResumeTransfer exception is raised,
|
||||
- then rerunning the action.
|
||||
-}
|
||||
runTransferThread :: Maybe (Transfer, TransferInfo, Assistant ()) -> Assistant ()
|
||||
runTransferThread Nothing = flip MSemN.signal 1 <<~ transferSlots
|
||||
runTransferThread (Just (t, info, a)) = do
|
||||
d <- getAssistant id
|
||||
aio <- asIO a
|
||||
tid <- liftIO $ forkIO $ runTransferThread' d aio
|
||||
updateTransferInfo t $ info { transferTid = Just tid }
|
||||
|
||||
runTransferThread' :: AssistantData -> IO () -> IO ()
|
||||
runTransferThread' d a = go
|
||||
where
|
||||
go = catchPauseResume a
|
||||
pause = catchPauseResume $ runEvery (Seconds 86400) noop
|
||||
{- Note: This must use E.try, rather than E.catch.
|
||||
- When E.catch is used, and has called go in its exception
|
||||
- handler, Control.Concurrent.throwTo will block sometimes
|
||||
- when signaling. Using E.try avoids the problem. -}
|
||||
catchPauseResume a' = do
|
||||
r <- E.try a' :: IO (Either E.SomeException ())
|
||||
case r of
|
||||
Left e -> case E.fromException e of
|
||||
Just PauseTransfer -> pause
|
||||
Just ResumeTransfer -> go
|
||||
_ -> done
|
||||
_ -> done
|
||||
done = runAssistant d $
|
||||
flip MSemN.signal 1 <<~ transferSlots
|
19
Assistant/Types/BranchChange.hs
Normal file
19
Assistant/Types/BranchChange.hs
Normal file
|
@ -0,0 +1,19 @@
|
|||
{- git-annex assistant git-annex branch change tracking
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.Types.BranchChange where
|
||||
|
||||
import Control.Concurrent.MSampleVar
|
||||
import Common.Annex
|
||||
|
||||
newtype BranchChangeHandle = BranchChangeHandle (MSampleVar ())
|
||||
|
||||
newBranchChangeHandle :: IO BranchChangeHandle
|
||||
newBranchChangeHandle = BranchChangeHandle <$> newEmptySV
|
||||
|
||||
fromBranchChangeHandle :: BranchChangeHandle -> MSampleVar ()
|
||||
fromBranchChangeHandle (BranchChangeHandle v) = v
|
80
Assistant/Types/Buddies.hs
Normal file
80
Assistant/Types/Buddies.hs
Normal file
|
@ -0,0 +1,80 @@
|
|||
{- git-annex assistant buddies
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE CPP #-}
|
||||
|
||||
module Assistant.Types.Buddies where
|
||||
|
||||
import Common.Annex
|
||||
|
||||
import qualified Data.Map as M
|
||||
import Control.Concurrent.STM
|
||||
import Utility.NotificationBroadcaster
|
||||
import Data.Text as T
|
||||
|
||||
{- For simplicity, dummy types are defined even when XMPP is disabled. -}
|
||||
#ifdef WITH_XMPP
|
||||
import Network.Protocol.XMPP
|
||||
import Data.Set as S
|
||||
import Data.Ord
|
||||
|
||||
newtype Client = Client JID
|
||||
deriving (Eq, Show)
|
||||
|
||||
instance Ord Client where
|
||||
compare = comparing show
|
||||
|
||||
data Buddy = Buddy
|
||||
{ buddyPresent :: S.Set Client
|
||||
, buddyAway :: S.Set Client
|
||||
, buddyAssistants :: S.Set Client
|
||||
, buddyPairing :: Bool
|
||||
}
|
||||
#else
|
||||
data Buddy = Buddy
|
||||
#endif
|
||||
deriving (Eq, Show)
|
||||
|
||||
data BuddyKey = BuddyKey T.Text
|
||||
deriving (Eq, Ord, Show, Read)
|
||||
|
||||
data PairKey = PairKey UUID T.Text
|
||||
deriving (Eq, Ord, Show, Read)
|
||||
|
||||
type Buddies = M.Map BuddyKey Buddy
|
||||
|
||||
{- A list of buddies, and a way to notify when it changes. -}
|
||||
type BuddyList = (TMVar Buddies, NotificationBroadcaster)
|
||||
|
||||
noBuddies :: Buddies
|
||||
noBuddies = M.empty
|
||||
|
||||
newBuddyList :: IO BuddyList
|
||||
newBuddyList = (,)
|
||||
<$> atomically (newTMVar noBuddies)
|
||||
<*> newNotificationBroadcaster
|
||||
|
||||
getBuddyList :: BuddyList -> IO [Buddy]
|
||||
getBuddyList (v, _) = M.elems <$> atomically (readTMVar v)
|
||||
|
||||
getBuddy :: BuddyKey -> BuddyList -> IO (Maybe Buddy)
|
||||
getBuddy k (v, _) = M.lookup k <$> atomically (readTMVar v)
|
||||
|
||||
getBuddyBroadcaster :: BuddyList -> NotificationBroadcaster
|
||||
getBuddyBroadcaster (_, h) = h
|
||||
|
||||
{- Applies a function to modify the buddy list, and if it's changed,
|
||||
- sends notifications to any listeners. -}
|
||||
updateBuddyList :: (Buddies -> Buddies) -> BuddyList -> IO ()
|
||||
updateBuddyList a (v, caster) = do
|
||||
changed <- atomically $ do
|
||||
buds <- takeTMVar v
|
||||
let buds' = a buds
|
||||
putTMVar v buds'
|
||||
return $ buds /= buds'
|
||||
when changed $
|
||||
sendNotification caster
|
54
Assistant/Types/Changes.hs
Normal file
54
Assistant/Types/Changes.hs
Normal file
|
@ -0,0 +1,54 @@
|
|||
{- git-annex assistant change tracking
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.Types.Changes where
|
||||
|
||||
import Types.KeySource
|
||||
import Utility.TSet
|
||||
|
||||
import Data.Time.Clock
|
||||
|
||||
data ChangeType = AddChange | LinkChange | RmChange | RmDirChange
|
||||
deriving (Show, Eq)
|
||||
|
||||
type ChangeChan = TSet Change
|
||||
|
||||
data Change
|
||||
= Change
|
||||
{ changeTime :: UTCTime
|
||||
, changeFile :: FilePath
|
||||
, changeType :: ChangeType
|
||||
}
|
||||
| PendingAddChange
|
||||
{ changeTime ::UTCTime
|
||||
, changeFile :: FilePath
|
||||
}
|
||||
| InProcessAddChange
|
||||
{ changeTime ::UTCTime
|
||||
, keySource :: KeySource
|
||||
}
|
||||
deriving (Show)
|
||||
|
||||
newChangeChan :: IO ChangeChan
|
||||
newChangeChan = newTSet
|
||||
|
||||
isPendingAddChange :: Change -> Bool
|
||||
isPendingAddChange (PendingAddChange {}) = True
|
||||
isPendingAddChange _ = False
|
||||
|
||||
isInProcessAddChange :: Change -> Bool
|
||||
isInProcessAddChange (InProcessAddChange {}) = True
|
||||
isInProcessAddChange _ = False
|
||||
|
||||
finishedChange :: Change -> Change
|
||||
finishedChange c@(InProcessAddChange { keySource = ks }) = Change
|
||||
{ changeTime = changeTime c
|
||||
, changeFile = keyFilename ks
|
||||
, changeType = AddChange
|
||||
}
|
||||
finishedChange c = c
|
||||
|
17
Assistant/Types/Commits.hs
Normal file
17
Assistant/Types/Commits.hs
Normal file
|
@ -0,0 +1,17 @@
|
|||
{- git-annex assistant commit tracking
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.Types.Commits where
|
||||
|
||||
import Utility.TSet
|
||||
|
||||
type CommitChan = TSet Commit
|
||||
|
||||
data Commit = Commit
|
||||
|
||||
newCommitChan :: IO CommitChan
|
||||
newCommitChan = newTSet
|
81
Assistant/Types/DaemonStatus.hs
Normal file
81
Assistant/Types/DaemonStatus.hs
Normal file
|
@ -0,0 +1,81 @@
|
|||
{- git-annex assistant daemon status
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE RankNTypes, ImpredicativeTypes #-}
|
||||
|
||||
module Assistant.Types.DaemonStatus where
|
||||
|
||||
import Common.Annex
|
||||
import Assistant.Alert
|
||||
import Assistant.Pairing
|
||||
import Utility.NotificationBroadcaster
|
||||
import Logs.Transfer
|
||||
import Assistant.Types.ThreadName
|
||||
|
||||
import Control.Concurrent.STM
|
||||
import Control.Concurrent.Async
|
||||
import Data.Time.Clock.POSIX
|
||||
import qualified Data.Map as M
|
||||
|
||||
data DaemonStatus = DaemonStatus
|
||||
-- All the named threads that comprise the daemon,
|
||||
-- and actions to run to restart them.
|
||||
{ startedThreads :: M.Map ThreadName (Async (), IO ())
|
||||
-- False when the daemon is performing its startup scan
|
||||
, scanComplete :: Bool
|
||||
-- Time when a previous process of the daemon was running ok
|
||||
, lastRunning :: Maybe POSIXTime
|
||||
-- True when the sanity checker is running
|
||||
, sanityCheckRunning :: Bool
|
||||
-- Last time the sanity checker ran
|
||||
, lastSanityCheck :: Maybe POSIXTime
|
||||
-- Currently running file content transfers
|
||||
, currentTransfers :: TransferMap
|
||||
-- Messages to display to the user.
|
||||
, alertMap :: AlertMap
|
||||
, lastAlertId :: AlertId
|
||||
-- Ordered list of all remotes that can be synced with
|
||||
, syncRemotes :: [Remote]
|
||||
-- Ordered list of remotes to sync git with
|
||||
, syncGitRemotes :: [Remote]
|
||||
-- Ordered list of remotes to sync data with
|
||||
, syncDataRemotes :: [Remote]
|
||||
-- Pairing request that is in progress.
|
||||
, pairingInProgress :: Maybe PairingInProgress
|
||||
-- Broadcasts notifications about all changes to the DaemonStatus
|
||||
, changeNotifier :: NotificationBroadcaster
|
||||
-- Broadcasts notifications when queued or current transfers change.
|
||||
, transferNotifier :: NotificationBroadcaster
|
||||
-- Broadcasts notifications when there's a change to the alerts
|
||||
, alertNotifier :: NotificationBroadcaster
|
||||
-- Broadcasts notifications when the syncRemotes change
|
||||
, syncRemotesNotifier :: NotificationBroadcaster
|
||||
}
|
||||
|
||||
type TransferMap = M.Map Transfer TransferInfo
|
||||
|
||||
{- This TMVar is never left empty, so accessing it will never block. -}
|
||||
type DaemonStatusHandle = TMVar DaemonStatus
|
||||
|
||||
newDaemonStatus :: IO DaemonStatus
|
||||
newDaemonStatus = DaemonStatus
|
||||
<$> pure M.empty
|
||||
<*> pure False
|
||||
<*> pure Nothing
|
||||
<*> pure False
|
||||
<*> pure Nothing
|
||||
<*> pure M.empty
|
||||
<*> pure M.empty
|
||||
<*> pure firstAlertId
|
||||
<*> pure []
|
||||
<*> pure []
|
||||
<*> pure []
|
||||
<*> pure Nothing
|
||||
<*> newNotificationBroadcaster
|
||||
<*> newNotificationBroadcaster
|
||||
<*> newNotificationBroadcaster
|
||||
<*> newNotificationBroadcaster
|
17
Assistant/Types/NamedThread.hs
Normal file
17
Assistant/Types/NamedThread.hs
Normal file
|
@ -0,0 +1,17 @@
|
|||
{- named threads
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.Types.NamedThread where
|
||||
|
||||
import Assistant.Monad
|
||||
import Assistant.Types.ThreadName
|
||||
|
||||
{- Information about a named thread that can be run. -}
|
||||
data NamedThread = NamedThread ThreadName (Assistant ())
|
||||
|
||||
namedThread :: String -> Assistant () -> NamedThread
|
||||
namedThread name a = NamedThread (ThreadName name) a
|
101
Assistant/Types/NetMessager.hs
Normal file
101
Assistant/Types/NetMessager.hs
Normal file
|
@ -0,0 +1,101 @@
|
|||
{- git-annex assistant out of band network messager types
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.Types.NetMessager where
|
||||
|
||||
import Common.Annex
|
||||
import Assistant.Pairing
|
||||
|
||||
import Data.Text (Text)
|
||||
import Control.Concurrent.STM
|
||||
import Control.Concurrent.MSampleVar
|
||||
import Data.ByteString (ByteString)
|
||||
import qualified Data.Set as S
|
||||
|
||||
{- Messages that can be sent out of band by a network messager. -}
|
||||
data NetMessage
|
||||
-- indicate that pushes have been made to the repos with these uuids
|
||||
= NotifyPush [UUID]
|
||||
-- requests other clients to inform us of their presence
|
||||
| QueryPresence
|
||||
-- notification about a stage in the pairing process,
|
||||
-- involving a client, and a UUID.
|
||||
| PairingNotification PairStage ClientID UUID
|
||||
-- used for git push over the network messager
|
||||
| Pushing ClientID PushStage
|
||||
deriving (Show, Eq, Ord)
|
||||
|
||||
{- Something used to identify the client, or clients to send the message to. -}
|
||||
type ClientID = Text
|
||||
|
||||
data PushStage
|
||||
-- indicates that we have data to push over the out of band network
|
||||
= CanPush
|
||||
-- request that a git push be sent over the out of band network
|
||||
| PushRequest
|
||||
-- indicates that a push is starting
|
||||
| StartingPush
|
||||
-- a chunk of output of git receive-pack
|
||||
| ReceivePackOutput ByteString
|
||||
-- a chuck of output of git send-pack
|
||||
| SendPackOutput ByteString
|
||||
-- sent when git receive-pack exits, with its exit code
|
||||
| ReceivePackDone ExitCode
|
||||
deriving (Show, Eq, Ord)
|
||||
|
||||
{- Things that initiate either side of a push, but do not actually send data. -}
|
||||
isPushInitiation :: PushStage -> Bool
|
||||
isPushInitiation CanPush = True
|
||||
isPushInitiation PushRequest = True
|
||||
isPushInitiation StartingPush = True
|
||||
isPushInitiation _ = False
|
||||
|
||||
data PushSide = SendPack | ReceivePack
|
||||
deriving (Eq, Ord)
|
||||
|
||||
pushDestinationSide :: PushStage -> PushSide
|
||||
pushDestinationSide CanPush = ReceivePack
|
||||
pushDestinationSide PushRequest = SendPack
|
||||
pushDestinationSide StartingPush = ReceivePack
|
||||
pushDestinationSide (ReceivePackOutput _) = SendPack
|
||||
pushDestinationSide (SendPackOutput _) = ReceivePack
|
||||
pushDestinationSide (ReceivePackDone _) = SendPack
|
||||
|
||||
type SideMap a = PushSide -> a
|
||||
|
||||
mkSideMap :: STM a -> IO (SideMap a)
|
||||
mkSideMap gen = do
|
||||
(sp, rp) <- atomically $ (,) <$> gen <*> gen
|
||||
return $ lookupside sp rp
|
||||
where
|
||||
lookupside sp _ SendPack = sp
|
||||
lookupside _ rp ReceivePack = rp
|
||||
|
||||
getSide :: PushSide -> SideMap a -> a
|
||||
getSide side m = m side
|
||||
|
||||
data NetMessager = NetMessager
|
||||
-- outgoing messages
|
||||
{ netMessages :: TChan (NetMessage)
|
||||
-- write to this to restart the net messager
|
||||
, netMessagerRestart :: MSampleVar ()
|
||||
-- only one side of a push can be running at a time
|
||||
, netMessagerPushRunning :: SideMap (TMVar (Maybe ClientID))
|
||||
-- incoming messages related to a running push
|
||||
, netMessagesPush :: SideMap (TChan NetMessage)
|
||||
-- incoming push messages, deferred to be processed later
|
||||
, netMessagesPushDeferred :: SideMap (TMVar (S.Set NetMessage))
|
||||
}
|
||||
|
||||
newNetMessager :: IO NetMessager
|
||||
newNetMessager = NetMessager
|
||||
<$> atomically newTChan
|
||||
<*> newEmptySV
|
||||
<*> mkSideMap (newTMVar Nothing)
|
||||
<*> mkSideMap newTChan
|
||||
<*> mkSideMap (newTMVar S.empty)
|
||||
where
|
24
Assistant/Types/Pushes.hs
Normal file
24
Assistant/Types/Pushes.hs
Normal file
|
@ -0,0 +1,24 @@
|
|||
{- git-annex assistant push tracking
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.Types.Pushes where
|
||||
|
||||
import Common.Annex
|
||||
|
||||
import Control.Concurrent.STM
|
||||
import Data.Time.Clock
|
||||
import qualified Data.Map as M
|
||||
|
||||
{- Track the most recent push failure for each remote. -}
|
||||
type PushMap = M.Map Remote UTCTime
|
||||
type FailedPushMap = TMVar PushMap
|
||||
|
||||
{- The TMVar starts empty, and is left empty when there are no
|
||||
- failed pushes. This way we can block until there are some failed pushes.
|
||||
-}
|
||||
newFailedPushMap :: IO FailedPushMap
|
||||
newFailedPushMap = atomically newEmptyTMVar
|
25
Assistant/Types/ScanRemotes.hs
Normal file
25
Assistant/Types/ScanRemotes.hs
Normal file
|
@ -0,0 +1,25 @@
|
|||
{- git-annex assistant remotes needing scanning
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.Types.ScanRemotes where
|
||||
|
||||
import Common.Annex
|
||||
|
||||
import Control.Concurrent.STM
|
||||
import qualified Data.Map as M
|
||||
|
||||
data ScanInfo = ScanInfo
|
||||
{ scanPriority :: Int
|
||||
, fullScan :: Bool
|
||||
}
|
||||
|
||||
type ScanRemoteMap = TMVar (M.Map Remote ScanInfo)
|
||||
|
||||
{- The TMVar starts empty, and is left empty when there are no remotes
|
||||
- to scan. -}
|
||||
newScanRemoteMap :: IO ScanRemoteMap
|
||||
newScanRemoteMap = atomically newEmptyTMVar
|
14
Assistant/Types/ThreadName.hs
Normal file
14
Assistant/Types/ThreadName.hs
Normal file
|
@ -0,0 +1,14 @@
|
|||
{- name of a thread
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.Types.ThreadName where
|
||||
|
||||
newtype ThreadName = ThreadName String
|
||||
deriving (Eq, Read, Show, Ord)
|
||||
|
||||
fromThreadName :: ThreadName -> String
|
||||
fromThreadName (ThreadName n) = n
|
38
Assistant/Types/ThreadedMonad.hs
Normal file
38
Assistant/Types/ThreadedMonad.hs
Normal file
|
@ -0,0 +1,38 @@
|
|||
{- making the Annex monad available across threads
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.Types.ThreadedMonad where
|
||||
|
||||
import Common.Annex
|
||||
import qualified Annex
|
||||
|
||||
import Control.Concurrent
|
||||
import Data.Tuple
|
||||
|
||||
{- The Annex state is stored in a MVar, so that threaded actions can access
|
||||
- it. -}
|
||||
type ThreadState = MVar Annex.AnnexState
|
||||
|
||||
{- Stores the Annex state in a MVar.
|
||||
-
|
||||
- Once the action is finished, retrieves the state from the MVar.
|
||||
-}
|
||||
withThreadState :: (ThreadState -> Annex a) -> Annex a
|
||||
withThreadState a = do
|
||||
state <- Annex.getState id
|
||||
mvar <- liftIO $ newMVar state
|
||||
r <- a mvar
|
||||
newstate <- liftIO $ takeMVar mvar
|
||||
Annex.changeState (const newstate)
|
||||
return r
|
||||
|
||||
{- Runs an Annex action, using the state from the MVar.
|
||||
-
|
||||
- This serializes calls by threads; only one thread can run in Annex at a
|
||||
- time. -}
|
||||
runThreadState :: ThreadState -> Annex a -> IO a
|
||||
runThreadState mvar a = modifyMVar mvar $ \state -> swap <$> Annex.run state a
|
29
Assistant/Types/TransferQueue.hs
Normal file
29
Assistant/Types/TransferQueue.hs
Normal file
|
@ -0,0 +1,29 @@
|
|||
{- git-annex assistant pending transfer queue
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.Types.TransferQueue where
|
||||
|
||||
import Common.Annex
|
||||
import Logs.Transfer
|
||||
import Types.Remote
|
||||
|
||||
import Control.Concurrent.STM
|
||||
|
||||
data TransferQueue = TransferQueue
|
||||
{ queuesize :: TVar Int
|
||||
, queuelist :: TVar [(Transfer, TransferInfo)]
|
||||
, deferreddownloads :: TVar [(Key, AssociatedFile)]
|
||||
}
|
||||
|
||||
data Schedule = Next | Later
|
||||
deriving (Eq)
|
||||
|
||||
newTransferQueue :: IO TransferQueue
|
||||
newTransferQueue = atomically $ TransferQueue
|
||||
<$> newTVar 0
|
||||
<*> newTVar []
|
||||
<*> newTVar []
|
34
Assistant/Types/TransferSlots.hs
Normal file
34
Assistant/Types/TransferSlots.hs
Normal file
|
@ -0,0 +1,34 @@
|
|||
{- git-annex assistant transfer slots
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE DeriveDataTypeable #-}
|
||||
|
||||
module Assistant.Types.TransferSlots where
|
||||
|
||||
import qualified Control.Exception as E
|
||||
import qualified Control.Concurrent.MSemN as MSemN
|
||||
import Data.Typeable
|
||||
|
||||
type TransferSlots = MSemN.MSemN Int
|
||||
|
||||
{- A special exception that can be thrown to pause or resume a transfer, while
|
||||
- keeping its slot in use. -}
|
||||
data TransferException = PauseTransfer | ResumeTransfer
|
||||
deriving (Show, Eq, Typeable)
|
||||
|
||||
instance E.Exception TransferException
|
||||
|
||||
{- Number of concurrent transfers allowed to be run from the assistant.
|
||||
-
|
||||
- Transfers launched by other means, including by remote assistants,
|
||||
- do not currently take up slots.
|
||||
-}
|
||||
numSlots :: Int
|
||||
numSlots = 1
|
||||
|
||||
newTransferSlots :: IO TransferSlots
|
||||
newTransferSlots = MSemN.new numSlots
|
104
Assistant/WebApp.hs
Normal file
104
Assistant/WebApp.hs
Normal file
|
@ -0,0 +1,104 @@
|
|||
{- git-annex assistant webapp core
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE TypeFamilies, QuasiQuotes, MultiParamTypeClasses, TemplateHaskell, OverloadedStrings, RankNTypes #-}
|
||||
|
||||
module Assistant.WebApp where
|
||||
|
||||
import Assistant.WebApp.Types
|
||||
import Assistant.Common hiding (liftAnnex)
|
||||
import qualified Assistant.Monad as Assistant
|
||||
import Utility.NotificationBroadcaster
|
||||
import Utility.Yesod
|
||||
|
||||
import Yesod
|
||||
import Data.Text (Text)
|
||||
import Control.Concurrent.STM
|
||||
import Control.Concurrent
|
||||
|
||||
inFirstRun :: Handler Bool
|
||||
inFirstRun = isNothing . relDir <$> getYesod
|
||||
|
||||
newWebAppState :: IO (TMVar WebAppState)
|
||||
newWebAppState = atomically $ newTMVar $ WebAppState { showIntro = True }
|
||||
|
||||
getWebAppState :: forall sub. GHandler sub WebApp WebAppState
|
||||
getWebAppState = liftIO . atomically . readTMVar =<< webAppState <$> getYesod
|
||||
|
||||
modifyWebAppState :: forall sub. (WebAppState -> WebAppState) -> GHandler sub WebApp ()
|
||||
modifyWebAppState a = go =<< webAppState <$> getYesod
|
||||
where
|
||||
go s = liftIO $ atomically $ do
|
||||
v <- takeTMVar s
|
||||
putTMVar s $ a v
|
||||
|
||||
{- Runs an Annex action from the webapp.
|
||||
-
|
||||
- When the webapp is run outside a git-annex repository, the fallback
|
||||
- value is returned.
|
||||
-}
|
||||
liftAnnexOr :: forall sub a. a -> Annex a -> GHandler sub WebApp a
|
||||
liftAnnexOr fallback a = ifM (noAnnex <$> getYesod)
|
||||
( return fallback
|
||||
, liftAssistant $ Assistant.liftAnnex a
|
||||
)
|
||||
|
||||
liftAnnex :: forall sub a. Annex a -> GHandler sub WebApp a
|
||||
liftAnnex = liftAnnexOr $ error "internal runAnnex"
|
||||
|
||||
liftAssistant :: forall sub a. (Assistant a) -> GHandler sub WebApp a
|
||||
liftAssistant a = liftIO . flip runAssistant a =<< assistantData <$> getYesod
|
||||
|
||||
waitNotifier :: forall sub. (Assistant NotificationBroadcaster) -> NotificationId -> GHandler sub WebApp ()
|
||||
waitNotifier getbroadcaster nid = liftAssistant $ do
|
||||
b <- getbroadcaster
|
||||
liftIO $ waitNotification $ notificationHandleFromId b nid
|
||||
|
||||
newNotifier :: forall sub. (Assistant NotificationBroadcaster) -> GHandler sub WebApp NotificationId
|
||||
newNotifier getbroadcaster = liftAssistant $ do
|
||||
b <- getbroadcaster
|
||||
liftIO $ notificationHandleToId <$> newNotificationHandle b
|
||||
|
||||
{- Adds the auth parameter as a hidden field on a form. Must be put into
|
||||
- every form. -}
|
||||
webAppFormAuthToken :: Widget
|
||||
webAppFormAuthToken = do
|
||||
webapp <- lift getYesod
|
||||
[whamlet|<input type="hidden" name="auth" value="#{secretToken webapp}">|]
|
||||
|
||||
{- A button with an icon, and maybe label or tooltip, that can be
|
||||
- clicked to perform some action.
|
||||
- With javascript, clicking it POSTs the Route, and remains on the same
|
||||
- page.
|
||||
- With noscript, clicking it GETs the Route. -}
|
||||
actionButton :: Route WebApp -> (Maybe String) -> (Maybe String) -> String -> String -> Widget
|
||||
actionButton route label tooltip buttonclass iconclass = $(widgetFile "actionbutton")
|
||||
|
||||
type UrlRenderFunc = Route WebApp -> [(Text, Text)] -> Text
|
||||
type UrlRenderer = MVar (UrlRenderFunc)
|
||||
|
||||
newUrlRenderer :: IO UrlRenderer
|
||||
newUrlRenderer = newEmptyMVar
|
||||
|
||||
setUrlRenderer :: UrlRenderer -> (UrlRenderFunc) -> IO ()
|
||||
setUrlRenderer = putMVar
|
||||
|
||||
{- Blocks until the webapp is running and has called setUrlRenderer. -}
|
||||
renderUrl :: UrlRenderer -> Route WebApp -> [(Text, Text)] -> IO Text
|
||||
renderUrl urlrenderer route params = do
|
||||
r <- readMVar urlrenderer
|
||||
return $ r route params
|
||||
|
||||
{- Redirects back to the referring page, or if there's none, HomeR -}
|
||||
redirectBack :: Handler ()
|
||||
redirectBack = do
|
||||
clearUltDest
|
||||
setUltDestReferer
|
||||
redirectUltDest HomeR
|
||||
|
||||
controlMenu :: Widget
|
||||
controlMenu = $(widgetFile "controlmenu")
|
18
Assistant/WebApp/Common.hs
Normal file
18
Assistant/WebApp/Common.hs
Normal file
|
@ -0,0 +1,18 @@
|
|||
{- git-annex assistant webapp, common imports
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.WebApp.Common (module X) where
|
||||
|
||||
import Assistant.Common as X hiding (liftAnnex)
|
||||
import Assistant.WebApp as X
|
||||
import Assistant.WebApp.Page as X
|
||||
import Assistant.WebApp.Form as X
|
||||
import Assistant.WebApp.Types as X
|
||||
import Utility.Yesod as X
|
||||
|
||||
import Data.Text as X (Text)
|
||||
import Yesod as X hiding (textField, passwordField, insertBy, replace, joinPath, deleteBy, delete, insert, Key, Option)
|
202
Assistant/WebApp/Configurators.hs
Normal file
202
Assistant/WebApp/Configurators.hs
Normal file
|
@ -0,0 +1,202 @@
|
|||
{- git-annex assistant webapp configurators
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE TypeFamilies, QuasiQuotes, MultiParamTypeClasses, TemplateHaskell, OverloadedStrings, RankNTypes, CPP #-}
|
||||
|
||||
module Assistant.WebApp.Configurators where
|
||||
|
||||
import Assistant.WebApp.Common
|
||||
import Assistant.DaemonStatus
|
||||
import Assistant.WebApp.Notifications
|
||||
import Assistant.WebApp.Utility
|
||||
import Assistant.WebApp.Configurators.Local
|
||||
import qualified Annex
|
||||
import qualified Remote
|
||||
import qualified Types.Remote as Remote
|
||||
import Annex.UUID (getUUID)
|
||||
import Logs.Remote
|
||||
import Logs.Trust
|
||||
import qualified Git
|
||||
#ifdef WITH_XMPP
|
||||
import Assistant.XMPP.Client
|
||||
#endif
|
||||
|
||||
import qualified Data.Map as M
|
||||
|
||||
{- The main configuration screen. -}
|
||||
getConfigurationR :: Handler RepHtml
|
||||
getConfigurationR = ifM (inFirstRun)
|
||||
( getFirstRepositoryR
|
||||
, page "Configuration" (Just Configuration) $ do
|
||||
#ifdef WITH_XMPP
|
||||
xmppconfigured <- lift $ liftAnnex $ isJust <$> getXMPPCreds
|
||||
#else
|
||||
let xmppconfigured = False
|
||||
#endif
|
||||
$(widgetFile "configurators/main")
|
||||
)
|
||||
|
||||
{- An intro message, list of repositories, and nudge to make more. -}
|
||||
introDisplay :: Text -> Widget
|
||||
introDisplay ident = do
|
||||
webapp <- lift getYesod
|
||||
repolist <- lift $ repoList $ RepoSelector
|
||||
{ onlyCloud = False
|
||||
, onlyConfigured = True
|
||||
, includeHere = False
|
||||
}
|
||||
let n = length repolist
|
||||
let numrepos = show n
|
||||
$(widgetFile "configurators/intro")
|
||||
lift $ modifyWebAppState $ \s -> s { showIntro = False }
|
||||
|
||||
makeMiscRepositories :: Widget
|
||||
makeMiscRepositories = $(widgetFile "configurators/repositories/misc")
|
||||
|
||||
makeCloudRepositories :: Widget
|
||||
makeCloudRepositories = $(widgetFile "configurators/repositories/cloud")
|
||||
|
||||
{- Lists known repositories, followed by options to add more. -}
|
||||
getRepositoriesR :: Handler RepHtml
|
||||
getRepositoriesR = page "Repositories" (Just Configuration) $ do
|
||||
let repolist = repoListDisplay $ RepoSelector
|
||||
{ onlyCloud = False
|
||||
, onlyConfigured = False
|
||||
, includeHere = True
|
||||
}
|
||||
$(widgetFile "configurators/repositories")
|
||||
|
||||
data Actions
|
||||
= DisabledRepoActions
|
||||
{ setupRepoLink :: Route WebApp }
|
||||
| SyncingRepoActions
|
||||
{ setupRepoLink :: Route WebApp
|
||||
, syncToggleLink :: Route WebApp
|
||||
}
|
||||
| NotSyncingRepoActions
|
||||
{ setupRepoLink :: Route WebApp
|
||||
, syncToggleLink :: Route WebApp
|
||||
}
|
||||
|
||||
mkSyncingRepoActions :: UUID -> Actions
|
||||
mkSyncingRepoActions u = SyncingRepoActions
|
||||
{ setupRepoLink = EditRepositoryR u
|
||||
, syncToggleLink = DisableSyncR u
|
||||
}
|
||||
|
||||
mkNotSyncingRepoActions :: UUID -> Actions
|
||||
mkNotSyncingRepoActions u = NotSyncingRepoActions
|
||||
{ setupRepoLink = EditRepositoryR u
|
||||
, syncToggleLink = EnableSyncR u
|
||||
}
|
||||
|
||||
needsEnabled :: Actions -> Bool
|
||||
needsEnabled (DisabledRepoActions _) = True
|
||||
needsEnabled _ = False
|
||||
|
||||
notSyncing :: Actions -> Bool
|
||||
notSyncing (SyncingRepoActions _ _) = False
|
||||
notSyncing _ = True
|
||||
|
||||
{- Called by client to get a list of repos, that refreshes
|
||||
- when new repos as added.
|
||||
-
|
||||
- Returns a div, which will be inserted into the calling page.
|
||||
-}
|
||||
getRepoListR :: RepoListNotificationId -> Handler RepHtml
|
||||
getRepoListR (RepoListNotificationId nid reposelector) = do
|
||||
waitNotifier getRepoListBroadcaster nid
|
||||
p <- widgetToPageContent $ repoListDisplay reposelector
|
||||
hamletToRepHtml $ [hamlet|^{pageBody p}|]
|
||||
|
||||
repoListDisplay :: RepoSelector -> Widget
|
||||
repoListDisplay reposelector = do
|
||||
autoUpdate ident (NotifierRepoListR reposelector) (10 :: Int) (10 :: Int)
|
||||
|
||||
repolist <- lift $ repoList reposelector
|
||||
|
||||
$(widgetFile "configurators/repositories/list")
|
||||
|
||||
where
|
||||
ident = "repolist"
|
||||
|
||||
type RepoList = [(String, String, Actions)]
|
||||
|
||||
{- A numbered list of known repositories,
|
||||
- with actions that can be taken on them. -}
|
||||
repoList :: RepoSelector -> Handler RepoList
|
||||
repoList reposelector
|
||||
| onlyConfigured reposelector = list =<< configured
|
||||
| otherwise = list =<< (++) <$> configured <*> rest
|
||||
where
|
||||
configured = do
|
||||
rs <- filter wantedrepo . syncRemotes
|
||||
<$> liftAssistant getDaemonStatus
|
||||
liftAnnex $ do
|
||||
let us = map Remote.uuid rs
|
||||
let l = zip us $ map mkSyncingRepoActions us
|
||||
if includeHere reposelector
|
||||
then do
|
||||
u <- getUUID
|
||||
autocommit <- annexAutoCommit <$> Annex.getGitConfig
|
||||
let hereactions = if autocommit
|
||||
then mkSyncingRepoActions u
|
||||
else mkNotSyncingRepoActions u
|
||||
let here = (u, hereactions)
|
||||
return $ here : l
|
||||
else return l
|
||||
rest = liftAnnex $ do
|
||||
m <- readRemoteLog
|
||||
unconfigured <- map snd . catMaybes . filter wantedremote
|
||||
. map (findinfo m)
|
||||
<$> (trustExclude DeadTrusted $ M.keys m)
|
||||
unsyncable <- map Remote.uuid . filter wantedrepo .
|
||||
filter (not . remoteAnnexSync . Remote.gitconfig)
|
||||
<$> Remote.enabledRemoteList
|
||||
return $ zip unsyncable (map mkNotSyncingRepoActions unsyncable) ++ unconfigured
|
||||
wantedrepo r
|
||||
| Remote.readonly r = False
|
||||
| onlyCloud reposelector = Git.repoIsUrl (Remote.repo r) && not (isXMPPRemote r)
|
||||
| otherwise = True
|
||||
wantedremote Nothing = False
|
||||
wantedremote (Just (iscloud, _))
|
||||
| onlyCloud reposelector = iscloud
|
||||
| otherwise = True
|
||||
findinfo m u = case M.lookup u m of
|
||||
Nothing -> Nothing
|
||||
Just c -> case M.lookup "type" c of
|
||||
Just "rsync" -> val True EnableRsyncR
|
||||
Just "directory" -> val False EnableDirectoryR
|
||||
#ifdef WITH_S3
|
||||
Just "S3" -> val True EnableS3R
|
||||
#endif
|
||||
Just "glacier" -> val True EnableGlacierR
|
||||
#ifdef WITH_WEBDAV
|
||||
Just "webdav" -> val True EnableWebDAVR
|
||||
#endif
|
||||
_ -> Nothing
|
||||
where
|
||||
val iscloud r = Just (iscloud, (u, DisabledRepoActions $ r u))
|
||||
list l = liftAnnex $ do
|
||||
let l' = nubBy (\x y -> fst x == fst y) l
|
||||
zip3
|
||||
<$> pure counter
|
||||
<*> Remote.prettyListUUIDs (map fst l')
|
||||
<*> pure (map snd l')
|
||||
counter = map show ([1..] :: [Int])
|
||||
|
||||
getEnableSyncR :: UUID -> Handler ()
|
||||
getEnableSyncR = flipSync True
|
||||
|
||||
getDisableSyncR :: UUID -> Handler ()
|
||||
getDisableSyncR = flipSync False
|
||||
|
||||
flipSync :: Bool -> UUID -> Handler ()
|
||||
flipSync enable uuid = do
|
||||
mremote <- liftAnnex $ Remote.remoteFromUUID uuid
|
||||
changeSyncable mremote enable
|
||||
redirect RepositoriesR
|
189
Assistant/WebApp/Configurators/AWS.hs
Normal file
189
Assistant/WebApp/Configurators/AWS.hs
Normal file
|
@ -0,0 +1,189 @@
|
|||
{- git-annex assistant webapp configurators for Amazon AWS services
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE CPP, FlexibleContexts, TypeFamilies, QuasiQuotes, MultiParamTypeClasses, TemplateHaskell, OverloadedStrings, RankNTypes #-}
|
||||
|
||||
module Assistant.WebApp.Configurators.AWS where
|
||||
|
||||
import Assistant.WebApp.Common
|
||||
import Assistant.MakeRemote
|
||||
import Assistant.Sync
|
||||
#ifdef WITH_S3
|
||||
import qualified Remote.S3 as S3
|
||||
#endif
|
||||
import qualified Remote.Glacier as Glacier
|
||||
import qualified Remote.Helper.AWS as AWS
|
||||
import Logs.Remote
|
||||
import qualified Remote
|
||||
import Types.Remote (RemoteConfig)
|
||||
import Types.StandardGroups
|
||||
import Logs.PreferredContent
|
||||
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Map as M
|
||||
import Data.Char
|
||||
|
||||
awsConfigurator :: Widget -> Handler RepHtml
|
||||
awsConfigurator = page "Add an Amazon repository" (Just Configuration)
|
||||
|
||||
glacierConfigurator :: Widget -> Handler RepHtml
|
||||
glacierConfigurator a = do
|
||||
ifM (liftIO $ inPath "glacier")
|
||||
( awsConfigurator a
|
||||
, awsConfigurator needglaciercli
|
||||
)
|
||||
where
|
||||
needglaciercli = $(widgetFile "configurators/needglaciercli")
|
||||
|
||||
data StorageClass = StandardRedundancy | ReducedRedundancy
|
||||
deriving (Eq, Enum, Bounded)
|
||||
|
||||
instance Show StorageClass where
|
||||
show StandardRedundancy = "STANDARD"
|
||||
show ReducedRedundancy = "REDUCED_REDUNDANCY"
|
||||
|
||||
data AWSInput = AWSInput
|
||||
{ accessKeyID :: Text
|
||||
, secretAccessKey :: Text
|
||||
, datacenter :: Text
|
||||
-- Only used for S3, not Glacier.
|
||||
, storageClass :: StorageClass
|
||||
, repoName :: Text
|
||||
, enableEncryption :: EnableEncryption
|
||||
}
|
||||
|
||||
data AWSCreds = AWSCreds Text Text
|
||||
|
||||
extractCreds :: AWSInput -> AWSCreds
|
||||
extractCreds i = AWSCreds (accessKeyID i) (secretAccessKey i)
|
||||
|
||||
s3InputAForm :: AForm WebApp WebApp AWSInput
|
||||
s3InputAForm = AWSInput
|
||||
<$> accessKeyIDField
|
||||
<*> secretAccessKeyField
|
||||
<*> datacenterField AWS.S3
|
||||
<*> areq (selectFieldList storageclasses) "Storage class" (Just StandardRedundancy)
|
||||
<*> areq textField "Repository name" (Just "S3")
|
||||
<*> enableEncryptionField
|
||||
where
|
||||
storageclasses :: [(Text, StorageClass)]
|
||||
storageclasses =
|
||||
[ ("Standard redundancy", StandardRedundancy)
|
||||
, ("Reduced redundancy (costs less)", ReducedRedundancy)
|
||||
]
|
||||
|
||||
glacierInputAForm :: AForm WebApp WebApp AWSInput
|
||||
glacierInputAForm = AWSInput
|
||||
<$> accessKeyIDField
|
||||
<*> secretAccessKeyField
|
||||
<*> datacenterField AWS.Glacier
|
||||
<*> pure StandardRedundancy
|
||||
<*> areq textField "Repository name" (Just "glacier")
|
||||
<*> enableEncryptionField
|
||||
|
||||
awsCredsAForm :: AForm WebApp WebApp AWSCreds
|
||||
awsCredsAForm = AWSCreds
|
||||
<$> accessKeyIDField
|
||||
<*> secretAccessKeyField
|
||||
|
||||
accessKeyIDField :: AForm WebApp WebApp Text
|
||||
accessKeyIDField = areq (textField `withNote` help) "Access Key ID" Nothing
|
||||
where
|
||||
help = [whamlet|
|
||||
<a href="https://portal.aws.amazon.com/gp/aws/securityCredentials#id_block">
|
||||
Get Amazon access keys
|
||||
|]
|
||||
|
||||
secretAccessKeyField :: AForm WebApp WebApp Text
|
||||
secretAccessKeyField = areq passwordField "Secret Access Key" Nothing
|
||||
|
||||
datacenterField :: AWS.Service -> AForm WebApp WebApp Text
|
||||
datacenterField service = areq (selectFieldList list) "Datacenter" defregion
|
||||
where
|
||||
list = M.toList $ AWS.regionMap service
|
||||
defregion = Just $ AWS.defaultRegion service
|
||||
|
||||
getAddS3R :: Handler RepHtml
|
||||
#ifdef WITH_S3
|
||||
getAddS3R = awsConfigurator $ do
|
||||
((result, form), enctype) <- lift $
|
||||
runFormGet $ renderBootstrap s3InputAForm
|
||||
case result of
|
||||
FormSuccess input -> lift $ do
|
||||
let name = T.unpack $ repoName input
|
||||
makeAWSRemote S3.remote (extractCreds input) name setgroup $ M.fromList
|
||||
[ configureEncryption $ enableEncryption input
|
||||
, ("type", "S3")
|
||||
, ("datacenter", T.unpack $ datacenter input)
|
||||
, ("storageclass", show $ storageClass input)
|
||||
]
|
||||
_ -> $(widgetFile "configurators/adds3")
|
||||
where
|
||||
setgroup r = liftAnnex $
|
||||
setStandardGroup (Remote.uuid r) TransferGroup
|
||||
#else
|
||||
getAddS3R = error "S3 not supported by this build"
|
||||
#endif
|
||||
|
||||
getAddGlacierR :: Handler RepHtml
|
||||
getAddGlacierR = glacierConfigurator $ do
|
||||
((result, form), enctype) <- lift $
|
||||
runFormGet $ renderBootstrap glacierInputAForm
|
||||
case result of
|
||||
FormSuccess input -> lift $ do
|
||||
let name = T.unpack $ repoName input
|
||||
makeAWSRemote Glacier.remote (extractCreds input) name setgroup $ M.fromList
|
||||
[ configureEncryption $ enableEncryption input
|
||||
, ("type", "glacier")
|
||||
, ("datacenter", T.unpack $ datacenter input)
|
||||
]
|
||||
_ -> $(widgetFile "configurators/addglacier")
|
||||
where
|
||||
setgroup r = liftAnnex $
|
||||
setStandardGroup (Remote.uuid r) SmallArchiveGroup
|
||||
|
||||
getEnableS3R :: UUID -> Handler RepHtml
|
||||
#ifdef WITH_S3
|
||||
getEnableS3R = awsConfigurator . enableAWSRemote S3.remote
|
||||
#else
|
||||
getEnableS3R _ = error "S3 not supported by this build"
|
||||
#endif
|
||||
|
||||
getEnableGlacierR :: UUID -> Handler RepHtml
|
||||
getEnableGlacierR = glacierConfigurator . enableAWSRemote Glacier.remote
|
||||
|
||||
enableAWSRemote :: RemoteType -> UUID -> Widget
|
||||
enableAWSRemote remotetype uuid = do
|
||||
((result, form), enctype) <- lift $
|
||||
runFormGet $ renderBootstrap awsCredsAForm
|
||||
case result of
|
||||
FormSuccess creds -> lift $ do
|
||||
m <- liftAnnex readRemoteLog
|
||||
let name = fromJust $ M.lookup "name" $
|
||||
fromJust $ M.lookup uuid m
|
||||
makeAWSRemote remotetype creds name (const noop) M.empty
|
||||
_ -> do
|
||||
description <- lift $ liftAnnex $
|
||||
T.pack . concat <$> Remote.prettyListUUIDs [uuid]
|
||||
$(widgetFile "configurators/enableaws")
|
||||
|
||||
makeAWSRemote :: RemoteType -> AWSCreds -> String -> (Remote -> Handler ()) -> RemoteConfig -> Handler ()
|
||||
makeAWSRemote remotetype (AWSCreds ak sk) name setup config = do
|
||||
remotename <- liftAnnex $ fromRepo $ uniqueRemoteName name 0
|
||||
liftIO $ AWS.setCredsEnv (T.unpack ak, T.unpack sk)
|
||||
r <- liftAnnex $ addRemote $ do
|
||||
makeSpecialRemote hostname remotetype config
|
||||
return remotename
|
||||
setup r
|
||||
liftAssistant $ syncNewRemote r
|
||||
redirect $ EditNewCloudRepositoryR $ Remote.uuid r
|
||||
where
|
||||
{- AWS services use the remote name as the basis for a host
|
||||
- name, so filter it to contain valid characters. -}
|
||||
hostname = case filter isAlphaNum name of
|
||||
[] -> "aws"
|
||||
n -> n
|
150
Assistant/WebApp/Configurators/Edit.hs
Normal file
150
Assistant/WebApp/Configurators/Edit.hs
Normal file
|
@ -0,0 +1,150 @@
|
|||
{- git-annex assistant webapp configurator for editing existing repos
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE TypeFamilies, QuasiQuotes, MultiParamTypeClasses, TemplateHaskell, OverloadedStrings, RankNTypes #-}
|
||||
|
||||
module Assistant.WebApp.Configurators.Edit where
|
||||
|
||||
import Assistant.WebApp.Common
|
||||
import Assistant.WebApp.Utility
|
||||
import Assistant.DaemonStatus
|
||||
import Assistant.MakeRemote (uniqueRemoteName)
|
||||
import Assistant.WebApp.Configurators.XMPP (xmppNeeded)
|
||||
import qualified Remote
|
||||
import qualified Types.Remote as Remote
|
||||
import qualified Remote.List as Remote
|
||||
import Logs.UUID
|
||||
import Logs.Group
|
||||
import Logs.PreferredContent
|
||||
import Types.StandardGroups
|
||||
import qualified Git
|
||||
import qualified Git.Command
|
||||
import qualified Git.Config
|
||||
import qualified Annex
|
||||
import Git.Remote
|
||||
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Map as M
|
||||
import qualified Data.Set as S
|
||||
|
||||
data RepoGroup = RepoGroupCustom String | RepoGroupStandard StandardGroup
|
||||
deriving (Show, Eq)
|
||||
|
||||
data RepoConfig = RepoConfig
|
||||
{ repoName :: Text
|
||||
, repoDescription :: Maybe Text
|
||||
, repoGroup :: RepoGroup
|
||||
, repoSyncable :: Bool
|
||||
}
|
||||
deriving (Show)
|
||||
|
||||
getRepoConfig :: UUID -> Maybe Remote -> Annex RepoConfig
|
||||
getRepoConfig uuid mremote = RepoConfig
|
||||
<$> pure (T.pack $ maybe "here" Remote.name mremote)
|
||||
<*> (maybe Nothing (Just . T.pack) . M.lookup uuid <$> uuidMap)
|
||||
<*> getrepogroup
|
||||
<*> getsyncing
|
||||
where
|
||||
getrepogroup = do
|
||||
groups <- lookupGroups uuid
|
||||
return $
|
||||
maybe (RepoGroupCustom $ unwords $ S.toList groups) RepoGroupStandard
|
||||
(getStandardGroup groups)
|
||||
getsyncing = case mremote of
|
||||
Just r -> return $ remoteAnnexSync $ Remote.gitconfig r
|
||||
Nothing -> annexAutoCommit <$> Annex.getGitConfig
|
||||
|
||||
setRepoConfig :: UUID -> Maybe Remote -> RepoConfig -> RepoConfig -> Handler ()
|
||||
setRepoConfig uuid mremote oldc newc = do
|
||||
when (repoDescription oldc /= repoDescription newc) $ liftAnnex $ do
|
||||
maybe noop (describeUUID uuid . T.unpack) (repoDescription newc)
|
||||
void uuidMapLoad
|
||||
when (repoGroup oldc /= repoGroup newc) $ liftAnnex $
|
||||
case repoGroup newc of
|
||||
RepoGroupStandard g -> setStandardGroup uuid g
|
||||
RepoGroupCustom s -> groupSet uuid $ S.fromList $ words s
|
||||
when (repoSyncable oldc /= repoSyncable newc) $
|
||||
changeSyncable mremote (repoSyncable newc)
|
||||
when (isJust mremote && makeLegalName (T.unpack $ repoName oldc) /= makeLegalName (T.unpack $ repoName newc)) $ do
|
||||
liftAnnex $ do
|
||||
name <- fromRepo $ uniqueRemoteName (T.unpack $ repoName newc) 0
|
||||
{- git remote rename expects there to be a
|
||||
- remote.<name>.fetch, and exits nonzero if
|
||||
- there's not. Special remotes don't normally
|
||||
- have that, and don't use it. Temporarily add
|
||||
- it if it's missing. -}
|
||||
let remotefetch = "remote." ++ T.unpack (repoName oldc) ++ ".fetch"
|
||||
needfetch <- isNothing <$> fromRepo (Git.Config.getMaybe remotefetch)
|
||||
when needfetch $
|
||||
inRepo $ Git.Command.run
|
||||
[Param "config", Param remotefetch, Param ""]
|
||||
inRepo $ Git.Command.run
|
||||
[ Param "remote"
|
||||
, Param "rename"
|
||||
, Param $ T.unpack $ repoName oldc
|
||||
, Param name
|
||||
]
|
||||
void $ Remote.remoteListRefresh
|
||||
liftAssistant updateSyncRemotes
|
||||
|
||||
editRepositoryAForm :: RepoConfig -> AForm WebApp WebApp RepoConfig
|
||||
editRepositoryAForm def = RepoConfig
|
||||
<$> areq textField "Name" (Just $ repoName def)
|
||||
<*> aopt textField "Description" (Just $ repoDescription def)
|
||||
<*> areq (selectFieldList groups `withNote` help) "Repository group" (Just $ repoGroup def)
|
||||
<*> areq checkBoxField "Syncing enabled" (Just $ repoSyncable def)
|
||||
where
|
||||
groups = customgroups ++ standardgroups
|
||||
standardgroups :: [(Text, RepoGroup)]
|
||||
standardgroups = map (\g -> (T.pack $ descStandardGroup g , RepoGroupStandard g))
|
||||
[minBound :: StandardGroup .. maxBound :: StandardGroup]
|
||||
customgroups :: [(Text, RepoGroup)]
|
||||
customgroups = case repoGroup def of
|
||||
RepoGroupCustom s -> [(T.pack s, RepoGroupCustom s)]
|
||||
_ -> []
|
||||
help = [whamlet|<a href="@{RepoGroupR}">What's this?</a>|]
|
||||
|
||||
getEditRepositoryR :: UUID -> Handler RepHtml
|
||||
getEditRepositoryR = editForm False
|
||||
|
||||
getEditNewRepositoryR :: UUID -> Handler RepHtml
|
||||
getEditNewRepositoryR = editForm True
|
||||
|
||||
getEditNewCloudRepositoryR :: UUID -> Handler RepHtml
|
||||
getEditNewCloudRepositoryR uuid = xmppNeeded >> editForm True uuid
|
||||
|
||||
editForm :: Bool -> UUID -> Handler RepHtml
|
||||
editForm new uuid = page "Configure repository" (Just Configuration) $ do
|
||||
mremote <- lift $ liftAnnex $ Remote.remoteFromUUID uuid
|
||||
curr <- lift $ liftAnnex $ getRepoConfig uuid mremote
|
||||
lift $ checkarchivedirectory curr
|
||||
((result, form), enctype) <- lift $
|
||||
runFormGet $ renderBootstrap $ editRepositoryAForm curr
|
||||
case result of
|
||||
FormSuccess input -> lift $ do
|
||||
checkarchivedirectory input
|
||||
setRepoConfig uuid mremote curr input
|
||||
redirect RepositoriesR
|
||||
_ -> showform form enctype curr
|
||||
where
|
||||
showform form enctype curr = do
|
||||
let istransfer = repoGroup curr == RepoGroupStandard TransferGroup
|
||||
$(widgetFile "configurators/editrepository")
|
||||
|
||||
{- Makes a toplevel archive directory, so the user can get on with
|
||||
- using it. This is done both when displaying the form, as well
|
||||
- as after it's posted, because the user may not post the form,
|
||||
- but may see that the repo is set up to use the archive
|
||||
- directory. -}
|
||||
checkarchivedirectory cfg
|
||||
| repoGroup cfg == RepoGroupStandard SmallArchiveGroup = go
|
||||
| repoGroup cfg == RepoGroupStandard FullArchiveGroup = go
|
||||
| otherwise = noop
|
||||
where
|
||||
go = liftAnnex $ inRepo $ \g ->
|
||||
createDirectoryIfMissing True $
|
||||
Git.repoPath g </> "archive"
|
334
Assistant/WebApp/Configurators/Local.hs
Normal file
334
Assistant/WebApp/Configurators/Local.hs
Normal file
|
@ -0,0 +1,334 @@
|
|||
{- git-annex assistant webapp configurators for making local repositories
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE CPP, TypeFamilies, QuasiQuotes, MultiParamTypeClasses, TemplateHaskell, OverloadedStrings, RankNTypes #-}
|
||||
|
||||
#if defined VERSION_yesod_form
|
||||
#if ! MIN_VERSION_yesod_form(1,2,0)
|
||||
#define WITH_OLD_YESOD
|
||||
#endif
|
||||
#endif
|
||||
|
||||
module Assistant.WebApp.Configurators.Local where
|
||||
|
||||
import Assistant.WebApp.Common
|
||||
import Assistant.WebApp.Utility
|
||||
import Assistant.WebApp.OtherRepos
|
||||
import Assistant.MakeRemote
|
||||
import Init
|
||||
import qualified Git
|
||||
import qualified Git.Construct
|
||||
import qualified Git.Config
|
||||
import qualified Git.Command
|
||||
import qualified Annex
|
||||
import Locations.UserConfig
|
||||
import Utility.FreeDesktop
|
||||
import Utility.Mounts
|
||||
import Utility.DiskFree
|
||||
import Utility.DataUnits
|
||||
import Utility.Network
|
||||
import Remote (prettyListUUIDs)
|
||||
import Annex.UUID
|
||||
import Types.StandardGroups
|
||||
import Logs.PreferredContent
|
||||
import Utility.UserInfo
|
||||
import Config
|
||||
|
||||
import qualified Data.Text as T
|
||||
import Data.Char
|
||||
import System.Posix.Directory
|
||||
import qualified Control.Exception as E
|
||||
|
||||
data RepositoryPath = RepositoryPath Text
|
||||
deriving Show
|
||||
|
||||
{- Custom field display for a RepositoryPath, with an icon etc.
|
||||
-
|
||||
- Validates that the path entered is not empty, and is a safe value
|
||||
- to use as a repository. -}
|
||||
repositoryPathField :: forall sub. Bool -> Field sub WebApp Text
|
||||
repositoryPathField autofocus = Field
|
||||
#ifdef WITH_OLD_YESOD
|
||||
{ fieldParse = parse
|
||||
#else
|
||||
{ fieldParse = \l _ -> parse l
|
||||
#endif
|
||||
, fieldView = view
|
||||
#ifndef WITH_OLD_YESOD
|
||||
, fieldEnctype = UrlEncoded
|
||||
#endif
|
||||
}
|
||||
where
|
||||
view idAttr nameAttr attrs val isReq =
|
||||
[whamlet|<input type="text" *{attrs} id="#{idAttr}" name="#{nameAttr}" :isReq:required :autofocus:autofocus value="#{either id id val}">|]
|
||||
|
||||
parse [path]
|
||||
| T.null path = nopath
|
||||
| otherwise = liftIO $ checkRepositoryPath path
|
||||
parse [] = return $ Right Nothing
|
||||
parse _ = nopath
|
||||
|
||||
nopath = return $ Left "Enter a location for the repository"
|
||||
|
||||
{- As well as checking the path for a lot of silly things, tilde is
|
||||
- expanded in the returned path. -}
|
||||
checkRepositoryPath :: Text -> IO (Either (SomeMessage WebApp) (Maybe Text))
|
||||
checkRepositoryPath p = do
|
||||
home <- myHomeDir
|
||||
let basepath = expandTilde home $ T.unpack p
|
||||
path <- absPath basepath
|
||||
let parent = parentDir path
|
||||
problems <- catMaybes <$> mapM runcheck
|
||||
[ (return $ path == "/", "Enter the full path to use for the repository.")
|
||||
, (return $ all isSpace basepath, "A blank path? Seems unlikely.")
|
||||
, (doesFileExist path, "A file already exists with that name.")
|
||||
, (return $ path == home, "Sorry, using git-annex for your whole home directory is not currently supported.")
|
||||
, (not <$> doesDirectoryExist parent, "Parent directory does not exist.")
|
||||
, (not <$> canWrite path, "Cannot write a repository there.")
|
||||
]
|
||||
return $
|
||||
case headMaybe problems of
|
||||
Nothing -> Right $ Just $ T.pack basepath
|
||||
Just prob -> Left prob
|
||||
where
|
||||
runcheck (chk, msg) = ifM (chk) ( return $ Just msg, return Nothing )
|
||||
expandTilde home ('~':'/':path) = home </> path
|
||||
expandTilde _ path = path
|
||||
|
||||
{- On first run, if run in the home directory, default to putting it in
|
||||
- ~/Desktop/annex, when a Desktop directory exists, and ~/annex otherwise.
|
||||
-
|
||||
- If run in another directory, that the user can write to,
|
||||
- the user probably wants to put it there. -}
|
||||
defaultRepositoryPath :: Bool -> IO FilePath
|
||||
defaultRepositoryPath firstrun = do
|
||||
cwd <- liftIO $ getCurrentDirectory
|
||||
home <- myHomeDir
|
||||
if home == cwd && firstrun
|
||||
then inhome
|
||||
else ifM (canWrite cwd) ( return cwd, inhome )
|
||||
where
|
||||
inhome = do
|
||||
desktop <- userDesktopDir
|
||||
ifM (doesDirectoryExist desktop)
|
||||
( relHome $ desktop </> gitAnnexAssistantDefaultDir
|
||||
, return $ "~" </> gitAnnexAssistantDefaultDir
|
||||
)
|
||||
|
||||
newRepositoryForm :: FilePath -> Form RepositoryPath
|
||||
newRepositoryForm defpath msg = do
|
||||
(pathRes, pathView) <- mreq (repositoryPathField True) ""
|
||||
(Just $ T.pack $ addTrailingPathSeparator defpath)
|
||||
let (err, errmsg) = case pathRes of
|
||||
FormMissing -> (False, "")
|
||||
FormFailure l -> (True, concat $ map T.unpack l)
|
||||
FormSuccess _ -> (False, "")
|
||||
let form = do
|
||||
webAppFormAuthToken
|
||||
$(widgetFile "configurators/newrepository/form")
|
||||
return (RepositoryPath <$> pathRes, form)
|
||||
|
||||
{- Making the first repository, when starting the webapp for the first time. -}
|
||||
getFirstRepositoryR :: Handler RepHtml
|
||||
getFirstRepositoryR = page "Getting started" (Just Configuration) $ do
|
||||
path <- liftIO . defaultRepositoryPath =<< lift inFirstRun
|
||||
((res, form), enctype) <- lift $ runFormGet $ newRepositoryForm path
|
||||
case res of
|
||||
FormSuccess (RepositoryPath p) -> lift $
|
||||
startFullAssistant $ T.unpack p
|
||||
_ -> $(widgetFile "configurators/newrepository/first")
|
||||
|
||||
{- Adding a new local repository, which may be entirely separate, or may
|
||||
- be connected to the current repository. -}
|
||||
getNewRepositoryR :: Handler RepHtml
|
||||
getNewRepositoryR = page "Add another repository" (Just Configuration) $ do
|
||||
home <- liftIO myHomeDir
|
||||
((res, form), enctype) <- lift $ runFormGet $ newRepositoryForm home
|
||||
case res of
|
||||
FormSuccess (RepositoryPath p) -> do
|
||||
let path = T.unpack p
|
||||
liftIO $ makeRepo path False
|
||||
u <- liftIO $ initRepo True path Nothing
|
||||
lift $ liftAnnexOr () $ setStandardGroup u ClientGroup
|
||||
liftIO $ addAutoStartFile path
|
||||
liftIO $ startAssistant path
|
||||
askcombine u path
|
||||
_ -> $(widgetFile "configurators/newrepository")
|
||||
where
|
||||
askcombine newrepouuid newrepopath = do
|
||||
newrepo <- liftIO $ relHome newrepopath
|
||||
mainrepo <- fromJust . relDir <$> lift getYesod
|
||||
$(widgetFile "configurators/newrepository/combine")
|
||||
|
||||
getCombineRepositoryR :: FilePathAndUUID -> Handler RepHtml
|
||||
getCombineRepositoryR (FilePathAndUUID newrepopath newrepouuid) = do
|
||||
r <- combineRepos newrepopath remotename
|
||||
syncRemote r
|
||||
redirect $ EditRepositoryR newrepouuid
|
||||
where
|
||||
remotename = takeFileName newrepopath
|
||||
|
||||
data RemovableDrive = RemovableDrive
|
||||
{ diskFree :: Maybe Integer
|
||||
, mountPoint :: Text
|
||||
}
|
||||
deriving (Show, Eq, Ord)
|
||||
|
||||
selectDriveForm :: [RemovableDrive] -> Maybe RemovableDrive -> Form RemovableDrive
|
||||
selectDriveForm drives def = renderBootstrap $ RemovableDrive
|
||||
<$> pure Nothing
|
||||
<*> areq (selectFieldList pairs) "Select drive:" (mountPoint <$> def)
|
||||
where
|
||||
pairs = zip (map describe drives) (map mountPoint drives)
|
||||
describe drive = case diskFree drive of
|
||||
Nothing -> mountPoint drive
|
||||
Just free ->
|
||||
let sz = roughSize storageUnits True free
|
||||
in T.unwords
|
||||
[ mountPoint drive
|
||||
, T.concat ["(", T.pack sz]
|
||||
, "free)"
|
||||
]
|
||||
|
||||
{- Adding a removable drive. -}
|
||||
getAddDriveR :: Handler RepHtml
|
||||
getAddDriveR = page "Add a removable drive" (Just Configuration) $ do
|
||||
removabledrives <- liftIO $ driveList
|
||||
writabledrives <- liftIO $
|
||||
filterM (canWrite . T.unpack . mountPoint) removabledrives
|
||||
((res, form), enctype) <- lift $ runFormGet $
|
||||
selectDriveForm (sort writabledrives) Nothing
|
||||
case res of
|
||||
FormSuccess (RemovableDrive { mountPoint = d }) -> lift $
|
||||
make (T.unpack d) >>= redirect . EditNewRepositoryR
|
||||
_ -> $(widgetFile "configurators/adddrive")
|
||||
where
|
||||
make mountpoint = do
|
||||
liftIO $ makerepo dir
|
||||
u <- liftIO $ initRepo False dir $ Just remotename
|
||||
r <- combineRepos dir remotename
|
||||
liftAnnex $ setStandardGroup u TransferGroup
|
||||
syncRemote r
|
||||
return u
|
||||
where
|
||||
dir = mountpoint </> gitAnnexAssistantDefaultDir
|
||||
remotename = takeFileName mountpoint
|
||||
{- The repo may already exist, when adding removable media
|
||||
- that has already been used elsewhere. -}
|
||||
makerepo dir = liftIO $ do
|
||||
r <- E.try (inDir dir $ getUUID) :: IO (Either E.SomeException UUID)
|
||||
case r of
|
||||
Right u | u /= NoUUID -> noop
|
||||
_ -> do
|
||||
createDirectoryIfMissing True dir
|
||||
makeRepo dir True
|
||||
|
||||
{- Each repository is made a remote of the other.
|
||||
- Next call syncRemote to get them in sync. -}
|
||||
combineRepos :: FilePath -> String -> Handler Remote
|
||||
combineRepos dir name = liftAnnex $ do
|
||||
hostname <- maybe "host" id <$> liftIO getHostname
|
||||
hostlocation <- fromRepo Git.repoLocation
|
||||
liftIO $ inDir dir $ void $ makeGitRemote hostname hostlocation
|
||||
addRemote $ makeGitRemote name dir
|
||||
|
||||
getEnableDirectoryR :: UUID -> Handler RepHtml
|
||||
getEnableDirectoryR uuid = page "Enable a repository" (Just Configuration) $ do
|
||||
description <- lift $ liftAnnex $
|
||||
T.pack . concat <$> prettyListUUIDs [uuid]
|
||||
$(widgetFile "configurators/enabledirectory")
|
||||
|
||||
{- List of removable drives. -}
|
||||
driveList :: IO [RemovableDrive]
|
||||
driveList = mapM (gen . mnt_dir) =<< filter sane <$> getMounts
|
||||
where
|
||||
gen dir = RemovableDrive
|
||||
<$> getDiskFree dir
|
||||
<*> pure (T.pack dir)
|
||||
-- filter out some things that are surely not removable drives
|
||||
sane Mntent { mnt_dir = dir, mnt_fsname = dev }
|
||||
{- We want real disks like /dev/foo, not
|
||||
- dummy mount points like proc or tmpfs or
|
||||
- gvfs-fuse-daemon. -}
|
||||
| not ('/' `elem` dev) = False
|
||||
{- Just in case: These mount points are surely not
|
||||
- removable disks. -}
|
||||
| dir == "/" = False
|
||||
| dir == "/tmp" = False
|
||||
| dir == "/run/shm" = False
|
||||
| dir == "/run/lock" = False
|
||||
| otherwise = True
|
||||
|
||||
{- Bootstraps from first run mode to a fully running assistant in a
|
||||
- repository, by running the postFirstRun callback, which returns the
|
||||
- url to the new webapp. -}
|
||||
startFullAssistant :: FilePath -> Handler ()
|
||||
startFullAssistant path = do
|
||||
webapp <- getYesod
|
||||
url <- liftIO $ do
|
||||
makeRepo path False
|
||||
u <- initRepo True path Nothing
|
||||
inDir path $
|
||||
setStandardGroup u ClientGroup
|
||||
addAutoStartFile path
|
||||
changeWorkingDirectory path
|
||||
fromJust $ postFirstRun webapp
|
||||
redirect $ T.pack url
|
||||
|
||||
{- Makes a new git repository. -}
|
||||
makeRepo :: FilePath -> Bool -> IO ()
|
||||
makeRepo path bare = do
|
||||
(transcript, ok) <- processTranscript "git" (toCommand params) Nothing
|
||||
unless ok $
|
||||
error $ "git init failed!\nOutput:\n" ++ transcript
|
||||
where
|
||||
baseparams = [Param "init", Param "--quiet"]
|
||||
params
|
||||
| bare = baseparams ++ [Param "--bare", File path]
|
||||
| otherwise = baseparams ++ [File path]
|
||||
|
||||
{- Runs an action in the git-annex repository in the specified directory. -}
|
||||
inDir :: FilePath -> Annex a -> IO a
|
||||
inDir dir a = do
|
||||
state <- Annex.new =<< Git.Config.read =<< Git.Construct.fromPath dir
|
||||
Annex.eval state a
|
||||
|
||||
initRepo :: Bool -> FilePath -> Maybe String -> IO UUID
|
||||
initRepo primary_assistant_repo dir desc = inDir dir $ do
|
||||
{- Initialize a git-annex repository in a directory with a description. -}
|
||||
unlessM isInitialized $
|
||||
initialize desc
|
||||
{- Initialize the master branch, so things that expect
|
||||
- to have it will work, before any files are added. -}
|
||||
unlessM (Git.Config.isBare <$> gitRepo) $
|
||||
void $ inRepo $ Git.Command.runBool
|
||||
[ Param "commit"
|
||||
, Param "--quiet"
|
||||
, Param "--allow-empty"
|
||||
, Param "-m"
|
||||
, Param "created repository"
|
||||
]
|
||||
{- Repositories directly managed by the assistant use direct mode.
|
||||
-
|
||||
- Automatic gc is disabled, as it can be slow. Insted, gc is done
|
||||
- once a day.
|
||||
-}
|
||||
when primary_assistant_repo $ do
|
||||
setDirect True
|
||||
inRepo $ Git.Command.run
|
||||
[Param "config", Param "gc.auto", Param "0"]
|
||||
getUUID
|
||||
|
||||
{- Checks if the user can write to a directory.
|
||||
-
|
||||
- The directory may be in the process of being created; if so
|
||||
- the parent directory is checked instead. -}
|
||||
canWrite :: FilePath -> IO Bool
|
||||
canWrite dir = do
|
||||
tocheck <- ifM (doesDirectoryExist dir)
|
||||
(return dir, return $ parentDir dir)
|
||||
catchBoolIO $ fileAccess tocheck False True False
|
292
Assistant/WebApp/Configurators/Pairing.hs
Normal file
292
Assistant/WebApp/Configurators/Pairing.hs
Normal file
|
@ -0,0 +1,292 @@
|
|||
{- git-annex assistant webapp configurator for pairing
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE TypeFamilies, QuasiQuotes, MultiParamTypeClasses, TemplateHaskell, OverloadedStrings, RankNTypes #-}
|
||||
{-# LANGUAGE CPP #-}
|
||||
|
||||
module Assistant.WebApp.Configurators.Pairing where
|
||||
|
||||
import Assistant.Pairing
|
||||
import Assistant.WebApp.Common
|
||||
import Assistant.Types.Buddies
|
||||
#ifdef WITH_PAIRING
|
||||
import Assistant.Pairing.Network
|
||||
import Assistant.Pairing.MakeRemote
|
||||
import Assistant.Ssh
|
||||
import Assistant.Alert
|
||||
import Assistant.DaemonStatus
|
||||
import Utility.Verifiable
|
||||
import Utility.Network
|
||||
import Annex.UUID
|
||||
#endif
|
||||
#ifdef WITH_XMPP
|
||||
import Assistant.XMPP
|
||||
import Assistant.XMPP.Client
|
||||
import Assistant.XMPP.Buddies
|
||||
import Assistant.XMPP.Git
|
||||
import Network.Protocol.XMPP
|
||||
import Assistant.Types.NetMessager
|
||||
import Assistant.NetMessager
|
||||
import Assistant.WebApp.Configurators
|
||||
import Assistant.WebApp.Configurators.XMPP
|
||||
#endif
|
||||
import Utility.UserInfo
|
||||
import Git
|
||||
|
||||
#ifdef WITH_PAIRING
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Text.Encoding as T
|
||||
import qualified Data.ByteString.Lazy as B
|
||||
import Data.Char
|
||||
import qualified Control.Exception as E
|
||||
import Control.Concurrent
|
||||
#endif
|
||||
#ifdef WITH_XMPP
|
||||
import qualified Data.Set as S
|
||||
#endif
|
||||
|
||||
getStartXMPPPairR :: Handler RepHtml
|
||||
#ifdef WITH_XMPP
|
||||
getStartXMPPPairR = ifM (isJust <$> liftAnnex getXMPPCreds)
|
||||
( do
|
||||
{- Ask buddies to send presence info, to get
|
||||
- the buddy list populated. -}
|
||||
liftAssistant $ sendNetMessage QueryPresence
|
||||
pairPage $
|
||||
$(widgetFile "configurators/pairing/xmpp/prompt")
|
||||
, redirect XMPPR -- go get XMPP configured, then come back
|
||||
)
|
||||
#else
|
||||
getStartXMPPPairR = noXMPPPairing
|
||||
|
||||
noXMPPPairing :: Handler RepHtml
|
||||
noXMPPPairing = noPairing "XMPP"
|
||||
#endif
|
||||
|
||||
{- Does pairing with an XMPP buddy, or with other clients sharing an
|
||||
- XMPP account. -}
|
||||
getRunningXMPPPairR :: BuddyKey -> Handler RepHtml
|
||||
#ifdef WITH_XMPP
|
||||
getRunningXMPPPairR bid = do
|
||||
buddy <- liftAssistant $ getBuddy bid <<~ buddyList
|
||||
go $ S.toList . buddyAssistants <$> buddy
|
||||
where
|
||||
go (Just (clients@((Client exemplar):_))) = do
|
||||
creds <- liftAnnex getXMPPCreds
|
||||
let ourjid = fromJust $ parseJID =<< xmppJID <$> creds
|
||||
let samejid = baseJID ourjid == baseJID exemplar
|
||||
u <- liftAnnex getUUID
|
||||
liftAssistant $ forM_ clients $ \(Client c) -> sendNetMessage $
|
||||
PairingNotification PairReq (formatJID c) u
|
||||
xmppPairEnd True $ if samejid then Nothing else Just exemplar
|
||||
-- A buddy could have logged out, or the XMPP client restarted,
|
||||
-- and there be no clients to message; handle unforseen by going back.
|
||||
go _ = redirect StartXMPPPairR
|
||||
#else
|
||||
getRunningXMPPPairR _ = noXMPPPairing
|
||||
#endif
|
||||
|
||||
{- Starts local pairing. -}
|
||||
getStartLocalPairR :: Handler RepHtml
|
||||
#ifdef WITH_PAIRING
|
||||
getStartLocalPairR = promptSecret Nothing $
|
||||
startLocalPairing PairReq noop pairingAlert Nothing
|
||||
#else
|
||||
getStartLocalPairR = noLocalPairing
|
||||
|
||||
noLocalPairing :: Handler RepHtml
|
||||
noLocalPairing = noPairing "local"
|
||||
#endif
|
||||
|
||||
{- Runs on the system that responds to a local pair request; sets up the ssh
|
||||
- authorized key first so that the originating host can immediately sync
|
||||
- with us. -}
|
||||
getFinishLocalPairR :: PairMsg -> Handler RepHtml
|
||||
#ifdef WITH_PAIRING
|
||||
getFinishLocalPairR msg = promptSecret (Just msg) $ \_ secret -> do
|
||||
repodir <- lift $ repoPath <$> liftAnnex gitRepo
|
||||
liftIO $ setup repodir
|
||||
startLocalPairing PairAck (cleanup repodir) alert uuid "" secret
|
||||
where
|
||||
alert = pairRequestAcknowledgedAlert (pairRepo msg) . Just
|
||||
setup repodir = setupAuthorizedKeys msg repodir
|
||||
cleanup repodir = removeAuthorizedKeys False repodir $
|
||||
remoteSshPubKey $ pairMsgData msg
|
||||
uuid = Just $ pairUUID $ pairMsgData msg
|
||||
#else
|
||||
getFinishLocalPairR _ = noLocalPairing
|
||||
#endif
|
||||
|
||||
getConfirmXMPPPairR :: PairKey -> Handler RepHtml
|
||||
#ifdef WITH_XMPP
|
||||
getConfirmXMPPPairR pairkey@(PairKey _ t) = case parseJID t of
|
||||
Nothing -> error "bad JID"
|
||||
Just theirjid -> pairPage $ do
|
||||
let name = buddyName theirjid
|
||||
$(widgetFile "configurators/pairing/xmpp/confirm")
|
||||
#else
|
||||
getConfirmXMPPPairR _ = noXMPPPairing
|
||||
#endif
|
||||
|
||||
getFinishXMPPPairR :: PairKey -> Handler RepHtml
|
||||
#ifdef WITH_XMPP
|
||||
getFinishXMPPPairR (PairKey theiruuid t) = case parseJID t of
|
||||
Nothing -> error "bad JID"
|
||||
Just theirjid -> do
|
||||
selfuuid <- liftAnnex getUUID
|
||||
liftAssistant $ do
|
||||
sendNetMessage $
|
||||
PairingNotification PairAck (formatJID theirjid) selfuuid
|
||||
finishXMPPPairing theirjid theiruuid
|
||||
xmppPairEnd False $ Just theirjid
|
||||
#else
|
||||
getFinishXMPPPairR _ = noXMPPPairing
|
||||
#endif
|
||||
|
||||
#ifdef WITH_XMPP
|
||||
xmppPairEnd :: Bool -> Maybe JID -> Handler RepHtml
|
||||
xmppPairEnd inprogress theirjid = pairPage $ do
|
||||
let friend = buddyName <$> theirjid
|
||||
let cloudrepolist = repoListDisplay $ RepoSelector
|
||||
{ onlyCloud = True
|
||||
, onlyConfigured = False
|
||||
, includeHere = False
|
||||
}
|
||||
$(widgetFile "configurators/pairing/xmpp/end")
|
||||
#endif
|
||||
|
||||
getRunningLocalPairR :: SecretReminder -> Handler RepHtml
|
||||
#ifdef WITH_PAIRING
|
||||
getRunningLocalPairR s = pairPage $ do
|
||||
let secret = fromSecretReminder s
|
||||
$(widgetFile "configurators/pairing/local/inprogress")
|
||||
#else
|
||||
getRunningLocalPairR _ = noLocalPairing
|
||||
#endif
|
||||
|
||||
#ifdef WITH_PAIRING
|
||||
|
||||
{- Starts local pairing, at either the PairReq (initiating host) or
|
||||
- PairAck (responding host) stage.
|
||||
-
|
||||
- Displays an alert, and starts a thread sending the pairing message,
|
||||
- which will continue running until the other host responds, or until
|
||||
- canceled by the user. If canceled by the user, runs the oncancel action.
|
||||
-
|
||||
- Redirects to the pairing in progress page.
|
||||
-}
|
||||
startLocalPairing :: PairStage -> IO () -> (AlertButton -> Alert) -> Maybe UUID -> Text -> Secret -> Widget
|
||||
startLocalPairing stage oncancel alert muuid displaysecret secret = do
|
||||
urlrender <- lift getUrlRender
|
||||
reldir <- fromJust . relDir <$> lift getYesod
|
||||
|
||||
sendrequests <- lift $ liftAssistant $ asIO2 $ mksendrequests urlrender
|
||||
{- Generating a ssh key pair can take a while, so do it in the
|
||||
- background. -}
|
||||
thread <- lift $ liftAssistant $ asIO $ do
|
||||
keypair <- liftIO $ genSshKeyPair
|
||||
pairdata <- liftIO $ PairData
|
||||
<$> getHostname
|
||||
<*> myUserName
|
||||
<*> pure reldir
|
||||
<*> pure (sshPubKey keypair)
|
||||
<*> (maybe genUUID return muuid)
|
||||
let sender = multicastPairMsg Nothing secret pairdata
|
||||
let pip = PairingInProgress secret Nothing keypair pairdata stage
|
||||
startSending pip stage $ sendrequests sender
|
||||
void $ liftIO $ forkIO thread
|
||||
|
||||
lift $ redirect $ RunningLocalPairR $ toSecretReminder displaysecret
|
||||
where
|
||||
{- Sends pairing messages until the thread is killed,
|
||||
- and shows an activity alert while doing it.
|
||||
-
|
||||
- The cancel button returns the user to the HomeR. This is
|
||||
- not ideal, but they have to be sent somewhere, and could
|
||||
- have been on a page specific to the in-process pairing
|
||||
- that just stopped, so can't go back there.
|
||||
-}
|
||||
mksendrequests urlrender sender _stage = do
|
||||
tid <- liftIO myThreadId
|
||||
let selfdestruct = AlertButton
|
||||
{ buttonLabel = "Cancel"
|
||||
, buttonUrl = urlrender HomeR
|
||||
, buttonAction = Just $ const $ do
|
||||
oncancel
|
||||
killThread tid
|
||||
}
|
||||
alertDuring (alert selfdestruct) $ liftIO $ do
|
||||
_ <- E.try (sender stage) :: IO (Either E.SomeException ())
|
||||
return ()
|
||||
|
||||
data InputSecret = InputSecret { secretText :: Maybe Text }
|
||||
|
||||
{- If a PairMsg is passed in, ensures that the user enters a secret
|
||||
- that can validate it. -}
|
||||
promptSecret :: Maybe PairMsg -> (Text -> Secret -> Widget) -> Handler RepHtml
|
||||
promptSecret msg cont = pairPage $ do
|
||||
((result, form), enctype) <- lift $
|
||||
runFormGet $ renderBootstrap $
|
||||
InputSecret <$> aopt textField "Secret phrase" Nothing
|
||||
case result of
|
||||
FormSuccess v -> do
|
||||
let rawsecret = fromMaybe "" $ secretText v
|
||||
let secret = toSecret rawsecret
|
||||
case msg of
|
||||
Nothing -> case secretProblem secret of
|
||||
Nothing -> cont rawsecret secret
|
||||
Just problem ->
|
||||
showform form enctype $ Just problem
|
||||
Just m ->
|
||||
if verify (fromPairMsg m) secret
|
||||
then cont rawsecret secret
|
||||
else showform form enctype $ Just
|
||||
"That's not the right secret phrase."
|
||||
_ -> showform form enctype Nothing
|
||||
where
|
||||
showform form enctype mproblem = do
|
||||
let start = isNothing msg
|
||||
let badphrase = isJust mproblem
|
||||
let problem = fromMaybe "" mproblem
|
||||
let (username, hostname) = maybe ("", "")
|
||||
(\(_, v, a) -> (T.pack $ remoteUserName v, T.pack $ fromMaybe (showAddr a) (remoteHostName v)))
|
||||
(verifiableVal . fromPairMsg <$> msg)
|
||||
u <- T.pack <$> liftIO myUserName
|
||||
let sameusername = username == u
|
||||
$(widgetFile "configurators/pairing/local/prompt")
|
||||
|
||||
{- This counts unicode characters as more than one character,
|
||||
- but that's ok; they *do* provide additional entropy. -}
|
||||
secretProblem :: Secret -> Maybe Text
|
||||
secretProblem s
|
||||
| B.null s = Just "The secret phrase cannot be left empty. (Remember that punctuation and white space is ignored.)"
|
||||
| B.length s < 7 = Just "Enter a longer secret phrase, at least 6 characters, but really, a phrase is best! This is not a password you'll need to enter every day."
|
||||
| s == toSecret sampleQuote = Just "Speaking of foolishness, don't paste in the example I gave. Enter a different phrase, please!"
|
||||
| otherwise = Nothing
|
||||
|
||||
toSecret :: Text -> Secret
|
||||
toSecret s = B.fromChunks [T.encodeUtf8 $ T.toLower $ T.filter isAlphaNum s]
|
||||
|
||||
{- From Dickens -}
|
||||
sampleQuote :: Text
|
||||
sampleQuote = T.unwords
|
||||
[ "It was the best of times,"
|
||||
, "it was the worst of times,"
|
||||
, "it was the age of wisdom,"
|
||||
, "it was the age of foolishness."
|
||||
]
|
||||
|
||||
#else
|
||||
|
||||
#endif
|
||||
|
||||
pairPage :: Widget -> Handler RepHtml
|
||||
pairPage = page "Pairing" (Just Configuration)
|
||||
|
||||
noPairing :: Text -> Handler RepHtml
|
||||
noPairing pairingtype = pairPage $
|
||||
$(widgetFile "configurators/pairing/disabled")
|
98
Assistant/WebApp/Configurators/Preferences.hs
Normal file
98
Assistant/WebApp/Configurators/Preferences.hs
Normal file
|
@ -0,0 +1,98 @@
|
|||
{- git-annex assistant general preferences
|
||||
-
|
||||
- Copyright 2013 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE QuasiQuotes, TemplateHaskell, OverloadedStrings #-}
|
||||
|
||||
module Assistant.WebApp.Configurators.Preferences (
|
||||
getPreferencesR
|
||||
) where
|
||||
|
||||
import Assistant.WebApp.Common
|
||||
import qualified Annex
|
||||
import qualified Git
|
||||
import Config
|
||||
import Locations.UserConfig
|
||||
import Utility.DataUnits
|
||||
|
||||
import qualified Data.Text as T
|
||||
import System.Log.Logger
|
||||
|
||||
data PrefsForm = PrefsForm
|
||||
{ diskReserve :: Text
|
||||
, numCopies :: Int
|
||||
, autoStart :: Bool
|
||||
, debugEnabled :: Bool
|
||||
}
|
||||
|
||||
prefsAForm :: PrefsForm -> AForm WebApp WebApp PrefsForm
|
||||
prefsAForm def = PrefsForm
|
||||
<$> areq (storageField `withNote` diskreservenote)
|
||||
"Disk reserve" (Just $ diskReserve def)
|
||||
<*> areq (positiveIntField `withNote` numcopiesnote)
|
||||
"Number of copies" (Just $ numCopies def)
|
||||
<*> areq (checkBoxField `withNote` autostartnote)
|
||||
"Auto start" (Just $ autoStart def)
|
||||
<*> areq (checkBoxField `withNote` debugnote)
|
||||
"Enable debug logging" (Just $ debugEnabled def)
|
||||
where
|
||||
diskreservenote = [whamlet|<br>Avoid downloading files from other repositories when there is too little free disk space.|]
|
||||
numcopiesnote = [whamlet|<br>Only drop a file after verifying that other repositories contain this many copies.|]
|
||||
debugnote = [whamlet|<a href="@{LogR}">View Log</a>|]
|
||||
autostartnote = [whamlet|Start the git-annex assistant at boot or on login.|]
|
||||
|
||||
positiveIntField = check isPositive intField
|
||||
where
|
||||
isPositive i
|
||||
| i > 0 = Right i
|
||||
| otherwise = Left notPositive
|
||||
notPositive :: Text
|
||||
notPositive = "This should be 1 or more!"
|
||||
|
||||
storageField = check validStorage textField
|
||||
where
|
||||
validStorage t
|
||||
| T.null t = Right t
|
||||
| otherwise = case readSize dataUnits $ T.unpack t of
|
||||
Nothing -> Left badParse
|
||||
Just _ -> Right t
|
||||
badParse :: Text
|
||||
badParse = "Parse error. Expected something like \"100 megabytes\" or \"2 gb\""
|
||||
|
||||
getPrefs :: Annex PrefsForm
|
||||
getPrefs = PrefsForm
|
||||
<$> (T.pack . roughSize storageUnits False . annexDiskReserve <$> Annex.getGitConfig)
|
||||
<*> (annexNumCopies <$> Annex.getGitConfig)
|
||||
<*> inAutoStartFile
|
||||
<*> ((==) <$> (pure $ Just DEBUG) <*> (liftIO $ getLevel <$> getRootLogger))
|
||||
|
||||
storePrefs :: PrefsForm -> Annex ()
|
||||
storePrefs p = do
|
||||
setConfig (annexConfig "diskreserve") (T.unpack $ diskReserve p)
|
||||
setConfig (annexConfig "numcopies") (show $ numCopies p)
|
||||
unlessM ((==) <$> pure (autoStart p) <*> inAutoStartFile) $ do
|
||||
here <- fromRepo Git.repoPath
|
||||
liftIO $ if autoStart p
|
||||
then addAutoStartFile here
|
||||
else removeAutoStartFile here
|
||||
liftIO $ updateGlobalLogger rootLoggerName $ setLevel $
|
||||
if debugEnabled p then DEBUG else WARNING
|
||||
|
||||
getPreferencesR :: Handler RepHtml
|
||||
getPreferencesR = page "Preferences" (Just Configuration) $ do
|
||||
((result, form), enctype) <- lift $ do
|
||||
current <- liftAnnex getPrefs
|
||||
runFormGet $ renderBootstrap $ prefsAForm current
|
||||
case result of
|
||||
FormSuccess new -> lift $ do
|
||||
liftAnnex $ storePrefs new
|
||||
redirect ConfigurationR
|
||||
_ -> $(widgetFile "configurators/preferences")
|
||||
|
||||
inAutoStartFile :: Annex Bool
|
||||
inAutoStartFile = do
|
||||
here <- fromRepo Git.repoPath
|
||||
any (`equalFilePath` here) <$> liftIO readAutoStartFile
|
353
Assistant/WebApp/Configurators/Ssh.hs
Normal file
353
Assistant/WebApp/Configurators/Ssh.hs
Normal file
|
@ -0,0 +1,353 @@
|
|||
{- git-annex assistant webapp configurator for ssh-based remotes
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE TypeFamilies, QuasiQuotes, MultiParamTypeClasses, TemplateHaskell, OverloadedStrings, RankNTypes #-}
|
||||
|
||||
module Assistant.WebApp.Configurators.Ssh where
|
||||
|
||||
import Assistant.WebApp.Common
|
||||
import Assistant.Ssh
|
||||
import Assistant.MakeRemote
|
||||
import Utility.Rsync (rsyncUrlIsShell)
|
||||
import Logs.Remote
|
||||
import Remote
|
||||
import Logs.PreferredContent
|
||||
import Types.StandardGroups
|
||||
import Utility.UserInfo
|
||||
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Map as M
|
||||
import Network.Socket
|
||||
|
||||
sshConfigurator :: Widget -> Handler RepHtml
|
||||
sshConfigurator = page "Add a remote server" (Just Configuration)
|
||||
|
||||
data SshInput = SshInput
|
||||
{ inputHostname :: Maybe Text
|
||||
, inputUsername :: Maybe Text
|
||||
, inputDirectory :: Maybe Text
|
||||
, inputPort :: Int
|
||||
}
|
||||
deriving (Show)
|
||||
|
||||
{- SshInput is only used for applicative form prompting, this converts
|
||||
- the result of such a form into a SshData. -}
|
||||
mkSshData :: SshInput -> SshData
|
||||
mkSshData s = SshData
|
||||
{ sshHostName = fromMaybe "" $ inputHostname s
|
||||
, sshUserName = inputUsername s
|
||||
, sshDirectory = fromMaybe "" $ inputDirectory s
|
||||
, sshRepoName = genSshRepoName
|
||||
(T.unpack $ fromJust $ inputHostname s)
|
||||
(maybe "" T.unpack $ inputDirectory s)
|
||||
, sshPort = inputPort s
|
||||
, needsPubKey = False
|
||||
, rsyncOnly = False
|
||||
}
|
||||
|
||||
sshInputAForm :: (Field WebApp WebApp Text) -> SshInput -> AForm WebApp WebApp SshInput
|
||||
sshInputAForm hostnamefield def = SshInput
|
||||
<$> aopt check_hostname "Host name" (Just $ inputHostname def)
|
||||
<*> aopt check_username "User name" (Just $ inputUsername def)
|
||||
<*> aopt textField "Directory" (Just $ Just $ fromMaybe (T.pack gitAnnexAssistantDefaultDir) $ inputDirectory def)
|
||||
<*> areq intField "Port" (Just $ inputPort def)
|
||||
where
|
||||
check_hostname = checkM (liftIO . checkdns) hostnamefield
|
||||
checkdns t = do
|
||||
let h = T.unpack t
|
||||
r <- catchMaybeIO $ getAddrInfo canonname (Just h) Nothing
|
||||
return $ case catMaybes . map addrCanonName <$> r of
|
||||
-- canonicalize input hostname if it had no dot
|
||||
Just (fullname:_)
|
||||
| '.' `elem` h -> Right t
|
||||
| otherwise -> Right $ T.pack fullname
|
||||
Just [] -> Right t
|
||||
Nothing -> Left bad_hostname
|
||||
canonname = Just $ defaultHints { addrFlags = [AI_CANONNAME] }
|
||||
|
||||
check_username = checkBool (all (`notElem` "/:@ \t") . T.unpack)
|
||||
bad_username textField
|
||||
|
||||
bad_hostname = "cannot resolve host name" :: Text
|
||||
bad_username = "bad user name" :: Text
|
||||
|
||||
data ServerStatus
|
||||
= UntestedServer
|
||||
| UnusableServer Text -- reason why it's not usable
|
||||
| UsableRsyncServer
|
||||
| UsableSshInput
|
||||
deriving (Eq)
|
||||
|
||||
usable :: ServerStatus -> Bool
|
||||
usable UntestedServer = False
|
||||
usable (UnusableServer _) = False
|
||||
usable UsableRsyncServer = True
|
||||
usable UsableSshInput = True
|
||||
|
||||
getAddSshR :: Handler RepHtml
|
||||
getAddSshR = sshConfigurator $ do
|
||||
u <- liftIO $ T.pack <$> myUserName
|
||||
((result, form), enctype) <- lift $
|
||||
runFormGet $ renderBootstrap $ sshInputAForm textField $
|
||||
SshInput Nothing (Just u) Nothing 22
|
||||
case result of
|
||||
FormSuccess sshinput -> do
|
||||
s <- liftIO $ testServer sshinput
|
||||
case s of
|
||||
Left status -> showform form enctype status
|
||||
Right sshdata -> lift $ redirect $ ConfirmSshR sshdata
|
||||
_ -> showform form enctype UntestedServer
|
||||
where
|
||||
showform form enctype status = $(widgetFile "configurators/ssh/add")
|
||||
|
||||
{- To enable an existing rsync special remote, parse the SshInput from
|
||||
- its rsyncurl, and display a form whose only real purpose is to check
|
||||
- if ssh public keys need to be set up. From there, we can proceed with
|
||||
- the usual repo setup; all that code is idempotent.
|
||||
-
|
||||
- Note that there's no EnableSshR because ssh remotes are not special
|
||||
- remotes, and so their configuration is not shared between repositories.
|
||||
-}
|
||||
getEnableRsyncR :: UUID -> Handler RepHtml
|
||||
getEnableRsyncR u = do
|
||||
m <- fromMaybe M.empty . M.lookup u <$> liftAnnex readRemoteLog
|
||||
case (parseSshRsyncUrl =<< M.lookup "rsyncurl" m, M.lookup "name" m) of
|
||||
(Just sshinput, Just reponame) -> sshConfigurator $ do
|
||||
((result, form), enctype) <- lift $
|
||||
runFormGet $ renderBootstrap $ sshInputAForm textField sshinput
|
||||
case result of
|
||||
FormSuccess sshinput'
|
||||
| isRsyncNet (inputHostname sshinput') ->
|
||||
void $ lift $ makeRsyncNet sshinput' reponame (const noop)
|
||||
| otherwise -> do
|
||||
s <- liftIO $ testServer sshinput'
|
||||
case s of
|
||||
Left status -> showform form enctype status
|
||||
Right sshdata -> enable sshdata
|
||||
{ sshRepoName = reponame }
|
||||
_ -> showform form enctype UntestedServer
|
||||
_ -> redirect AddSshR
|
||||
where
|
||||
showform form enctype status = do
|
||||
description <- lift $ liftAnnex $
|
||||
T.pack . concat <$> prettyListUUIDs [u]
|
||||
$(widgetFile "configurators/ssh/enable")
|
||||
enable sshdata = lift $ redirect $ ConfirmSshR $
|
||||
sshdata { rsyncOnly = True }
|
||||
|
||||
{- Converts a rsyncurl value to a SshInput. But only if it's a ssh rsync
|
||||
- url; rsync:// urls or bare path names are not supported.
|
||||
-
|
||||
- The hostname is stored mangled in the remote log for rsync special
|
||||
- remotes configured by this webapp. So that mangling has to reversed
|
||||
- here to get back the original hostname.
|
||||
-}
|
||||
parseSshRsyncUrl :: String -> Maybe SshInput
|
||||
parseSshRsyncUrl u
|
||||
| not (rsyncUrlIsShell u) = Nothing
|
||||
| otherwise = Just $ SshInput
|
||||
{ inputHostname = val $ unMangleSshHostName host
|
||||
, inputUsername = if null user then Nothing else val user
|
||||
, inputDirectory = val dir
|
||||
, inputPort = 22
|
||||
}
|
||||
where
|
||||
val = Just . T.pack
|
||||
(userhost, dir) = separate (== ':') u
|
||||
(user, host) = if '@' `elem` userhost
|
||||
then separate (== '@') userhost
|
||||
else (userhost, "")
|
||||
|
||||
{- Test if we can ssh into the server.
|
||||
-
|
||||
- Two probe attempts are made. First, try sshing in using the existing
|
||||
- configuration, but don't let ssh prompt for any password. If
|
||||
- passwordless login is already enabled, use it. Otherwise,
|
||||
- a special ssh key will need to be generated just for this server.
|
||||
-
|
||||
- Once logged into the server, probe to see if git-annex-shell is
|
||||
- available, or rsync. Note that, ~/.ssh/git-annex-shell may be
|
||||
- present, while git-annex-shell is not in PATH.
|
||||
-}
|
||||
testServer :: SshInput -> IO (Either ServerStatus SshData)
|
||||
testServer (SshInput { inputHostname = Nothing }) = return $
|
||||
Left $ UnusableServer "Please enter a host name."
|
||||
testServer sshinput@(SshInput { inputHostname = Just hn }) = do
|
||||
status <- probe [sshOpt "NumberOfPasswordPrompts" "0"]
|
||||
if usable status
|
||||
then ret status False
|
||||
else do
|
||||
status' <- probe []
|
||||
if usable status'
|
||||
then ret status' True
|
||||
else return $ Left status'
|
||||
where
|
||||
ret status needspubkey = return $ Right $ (mkSshData sshinput)
|
||||
{ needsPubKey = needspubkey
|
||||
, rsyncOnly = status == UsableRsyncServer
|
||||
}
|
||||
probe extraopts = do
|
||||
let remotecommand = join ";"
|
||||
[ report "loggedin"
|
||||
, checkcommand "git-annex-shell"
|
||||
, checkcommand "rsync"
|
||||
, checkcommand shim
|
||||
]
|
||||
knownhost <- knownHost hn
|
||||
let sshopts = filter (not . null) $ extraopts ++
|
||||
{- If this is an already known host, let
|
||||
- ssh check it as usual.
|
||||
- Otherwise, trust the host key. -}
|
||||
[ if knownhost then "" else sshOpt "StrictHostKeyChecking" "no"
|
||||
, "-n" -- don't read from stdin
|
||||
, "-p", show (inputPort sshinput)
|
||||
, genSshHost
|
||||
(fromJust $ inputHostname sshinput)
|
||||
(inputUsername sshinput)
|
||||
, remotecommand
|
||||
]
|
||||
parsetranscript . fst <$> sshTranscript sshopts Nothing
|
||||
parsetranscript s
|
||||
| reported "git-annex-shell" = UsableSshInput
|
||||
| reported shim = UsableSshInput
|
||||
| reported "rsync" = UsableRsyncServer
|
||||
| reported "loggedin" = UnusableServer
|
||||
"Neither rsync nor git-annex are installed on the server. Perhaps you should go install them?"
|
||||
| otherwise = UnusableServer $ T.pack $
|
||||
"Failed to ssh to the server. Transcript: " ++ s
|
||||
where
|
||||
reported r = token r `isInfixOf` s
|
||||
checkcommand c = "if which " ++ c ++ "; then " ++ report c ++ "; fi"
|
||||
token r = "git-annex-probe " ++ r
|
||||
report r = "echo " ++ token r
|
||||
shim = "~/.ssh/git-annex-shell"
|
||||
|
||||
{- Runs a ssh command; if it fails shows the user the transcript,
|
||||
- and if it succeeds, runs an action. -}
|
||||
sshSetup :: [String] -> String -> Handler RepHtml -> Handler RepHtml
|
||||
sshSetup opts input a = do
|
||||
(transcript, ok) <- liftIO $ sshTranscript opts (Just input)
|
||||
if ok
|
||||
then a
|
||||
else showSshErr transcript
|
||||
|
||||
showSshErr :: String -> Handler RepHtml
|
||||
showSshErr msg = sshConfigurator $
|
||||
$(widgetFile "configurators/ssh/error")
|
||||
|
||||
getConfirmSshR :: SshData -> Handler RepHtml
|
||||
getConfirmSshR sshdata = sshConfigurator $
|
||||
$(widgetFile "configurators/ssh/confirm")
|
||||
|
||||
getMakeSshGitR :: SshData -> Handler RepHtml
|
||||
getMakeSshGitR = makeSsh False setupGroup
|
||||
|
||||
getMakeSshRsyncR :: SshData -> Handler RepHtml
|
||||
getMakeSshRsyncR = makeSsh True setupGroup
|
||||
|
||||
makeSsh :: Bool -> (Remote -> Handler ()) -> SshData -> Handler RepHtml
|
||||
makeSsh rsync setup sshdata
|
||||
| needsPubKey sshdata = do
|
||||
keypair <- liftIO genSshKeyPair
|
||||
sshdata' <- liftIO $ setupSshKeyPair keypair sshdata
|
||||
makeSsh' rsync setup sshdata' (Just keypair)
|
||||
| sshPort sshdata /= 22 = do
|
||||
sshdata' <- liftIO $ setSshConfig sshdata []
|
||||
makeSsh' rsync setup sshdata' Nothing
|
||||
| otherwise = makeSsh' rsync setup sshdata Nothing
|
||||
|
||||
makeSsh' :: Bool -> (Remote -> Handler ()) -> SshData -> Maybe SshKeyPair -> Handler RepHtml
|
||||
makeSsh' rsync setup sshdata keypair =
|
||||
sshSetup [sshhost, remoteCommand] "" $
|
||||
makeSshRepo rsync setup sshdata
|
||||
where
|
||||
sshhost = genSshHost (sshHostName sshdata) (sshUserName sshdata)
|
||||
remotedir = T.unpack $ sshDirectory sshdata
|
||||
remoteCommand = join "&&" $ catMaybes
|
||||
[ Just $ "mkdir -p " ++ shellEscape remotedir
|
||||
, Just $ "cd " ++ shellEscape remotedir
|
||||
, if rsync then Nothing else Just "git init --bare --shared"
|
||||
, if rsync then Nothing else Just "git annex init"
|
||||
, if needsPubKey sshdata
|
||||
then addAuthorizedKeysCommand (rsyncOnly sshdata) remotedir . sshPubKey <$> keypair
|
||||
else Nothing
|
||||
]
|
||||
|
||||
makeSshRepo :: Bool -> (Remote -> Handler ()) -> SshData -> Handler RepHtml
|
||||
makeSshRepo forcersync setup sshdata = do
|
||||
r <- liftAssistant $ makeSshRemote forcersync sshdata
|
||||
setup r
|
||||
redirect $ EditNewCloudRepositoryR $ Remote.uuid r
|
||||
|
||||
getAddRsyncNetR :: Handler RepHtml
|
||||
getAddRsyncNetR = do
|
||||
((result, form), enctype) <- runFormGet $
|
||||
renderBootstrap $ sshInputAForm hostnamefield $
|
||||
SshInput Nothing Nothing Nothing 22
|
||||
let showform status = page "Add a Rsync.net repository" (Just Configuration) $
|
||||
$(widgetFile "configurators/addrsync.net")
|
||||
case result of
|
||||
FormSuccess sshinput
|
||||
| isRsyncNet (inputHostname sshinput) -> do
|
||||
let reponame = genSshRepoName "rsync.net"
|
||||
(maybe "" T.unpack $ inputDirectory sshinput)
|
||||
makeRsyncNet sshinput reponame setupGroup
|
||||
| otherwise ->
|
||||
showform $ UnusableServer
|
||||
"That is not a rsync.net host name."
|
||||
_ -> showform UntestedServer
|
||||
where
|
||||
hostnamefield = textField `withNote` help
|
||||
help = [whamlet|
|
||||
<a .btn data-toggle="collapse" data-target="#help">
|
||||
Help
|
||||
<div #help .collapse>
|
||||
<div>
|
||||
When you sign up for a Rsync.net account, you should receive an #
|
||||
email from them with the host name and user name to put here.
|
||||
<div>
|
||||
The host name will be something like "usw-s001.rsync.net", and the #
|
||||
user name something like "7491"
|
||||
|]
|
||||
|
||||
makeRsyncNet :: SshInput -> String -> (Remote -> Handler ()) -> Handler RepHtml
|
||||
makeRsyncNet sshinput reponame setup = do
|
||||
knownhost <- liftIO $ maybe (return False) knownHost (inputHostname sshinput)
|
||||
keypair <- liftIO $ genSshKeyPair
|
||||
sshdata <- liftIO $ setupSshKeyPair keypair $
|
||||
(mkSshData sshinput)
|
||||
{ sshRepoName = reponame
|
||||
, needsPubKey = True
|
||||
, rsyncOnly = True
|
||||
}
|
||||
{- I'd prefer to separate commands with && , but
|
||||
- rsync.net's shell does not support that.
|
||||
-
|
||||
- The dd method of appending to the authorized_keys file is the
|
||||
- one recommended by rsync.net documentation. I touch the file first
|
||||
- to not need to use a different method to create it.
|
||||
-}
|
||||
let remotecommand = join ";"
|
||||
[ "mkdir -p .ssh"
|
||||
, "touch .ssh/authorized_keys"
|
||||
, "dd of=.ssh/authorized_keys oflag=append conv=notrunc"
|
||||
, "mkdir -p " ++ T.unpack (sshDirectory sshdata)
|
||||
]
|
||||
let sshopts = filter (not . null)
|
||||
[ if knownhost then "" else sshOpt "StrictHostKeyChecking" "no"
|
||||
, genSshHost (sshHostName sshdata) (sshUserName sshdata)
|
||||
, remotecommand
|
||||
]
|
||||
sshSetup sshopts (sshPubKey keypair) $
|
||||
makeSshRepo True setup sshdata
|
||||
|
||||
isRsyncNet :: Maybe Text -> Bool
|
||||
isRsyncNet Nothing = False
|
||||
isRsyncNet (Just host) = ".rsync.net" `T.isSuffixOf` T.toLower host
|
||||
|
||||
setupGroup :: Remote -> Handler ()
|
||||
setupGroup r = liftAnnex $ setStandardGroup (Remote.uuid r) TransferGroup
|
129
Assistant/WebApp/Configurators/WebDAV.hs
Normal file
129
Assistant/WebApp/Configurators/WebDAV.hs
Normal file
|
@ -0,0 +1,129 @@
|
|||
{- git-annex assistant webapp configurators for WebDAV remotes
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE CPP, TypeFamilies, QuasiQuotes, MultiParamTypeClasses, TemplateHaskell, OverloadedStrings, RankNTypes #-}
|
||||
|
||||
module Assistant.WebApp.Configurators.WebDAV where
|
||||
|
||||
import Assistant.WebApp.Common
|
||||
import Assistant.MakeRemote
|
||||
import Assistant.Sync
|
||||
#ifdef WITH_WEBDAV
|
||||
import qualified Remote.WebDAV as WebDAV
|
||||
#endif
|
||||
import qualified Remote
|
||||
import Types.Remote (RemoteConfig)
|
||||
import Types.StandardGroups
|
||||
import Logs.PreferredContent
|
||||
import Logs.Remote
|
||||
import Creds
|
||||
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Map as M
|
||||
|
||||
webDAVConfigurator :: Widget -> Handler RepHtml
|
||||
webDAVConfigurator = page "Add a WebDAV repository" (Just Configuration)
|
||||
|
||||
boxConfigurator :: Widget -> Handler RepHtml
|
||||
boxConfigurator = page "Add a Box.com repository" (Just Configuration)
|
||||
|
||||
data WebDAVInput = WebDAVInput
|
||||
{ user :: Text
|
||||
, password :: Text
|
||||
, embedCreds :: Bool
|
||||
, directory :: Text
|
||||
, enableEncryption :: EnableEncryption
|
||||
}
|
||||
|
||||
toCredPair :: WebDAVInput -> CredPair
|
||||
toCredPair input = (T.unpack $ user input, T.unpack $ password input)
|
||||
|
||||
boxComAForm :: AForm WebApp WebApp WebDAVInput
|
||||
boxComAForm = WebDAVInput
|
||||
<$> areq textField "Username or Email" Nothing
|
||||
<*> areq passwordField "Box.com Password" Nothing
|
||||
<*> areq checkBoxField "Share this account with friends?" (Just True)
|
||||
<*> areq textField "Directory" (Just "annex")
|
||||
<*> enableEncryptionField
|
||||
|
||||
webDAVCredsAForm :: AForm WebApp WebApp WebDAVInput
|
||||
webDAVCredsAForm = WebDAVInput
|
||||
<$> areq textField "Username or Email" Nothing
|
||||
<*> areq passwordField "Password" Nothing
|
||||
<*> pure False
|
||||
<*> pure T.empty
|
||||
<*> pure NoEncryption -- not used!
|
||||
|
||||
getAddBoxComR :: Handler RepHtml
|
||||
#ifdef WITH_WEBDAV
|
||||
getAddBoxComR = boxConfigurator $ do
|
||||
((result, form), enctype) <- lift $
|
||||
runFormGet $ renderBootstrap boxComAForm
|
||||
case result of
|
||||
FormSuccess input -> lift $
|
||||
makeWebDavRemote "box.com" (toCredPair input) setgroup $ M.fromList
|
||||
[ configureEncryption $ enableEncryption input
|
||||
, ("embedcreds", if embedCreds input then "yes" else "no")
|
||||
, ("type", "webdav")
|
||||
, ("url", "https://www.box.com/dav/" ++ T.unpack (directory input))
|
||||
-- Box.com has a max file size of 100 mb, but
|
||||
-- using smaller chunks has better memory
|
||||
-- performance.
|
||||
, ("chunksize", "10mb")
|
||||
]
|
||||
_ -> $(widgetFile "configurators/addbox.com")
|
||||
where
|
||||
setgroup r = liftAnnex $
|
||||
setStandardGroup (Remote.uuid r) TransferGroup
|
||||
#else
|
||||
getAddBoxComR = error "WebDAV not supported by this build"
|
||||
#endif
|
||||
|
||||
getEnableWebDAVR :: UUID -> Handler RepHtml
|
||||
#ifdef WITH_WEBDAV
|
||||
getEnableWebDAVR uuid = do
|
||||
m <- liftAnnex readRemoteLog
|
||||
let c = fromJust $ M.lookup uuid m
|
||||
let name = fromJust $ M.lookup "name" c
|
||||
let url = fromJust $ M.lookup "url" c
|
||||
mcreds <- liftAnnex $
|
||||
getRemoteCredPairFor "webdav" c (WebDAV.davCreds uuid)
|
||||
case mcreds of
|
||||
Just creds -> webDAVConfigurator $ lift $
|
||||
makeWebDavRemote name creds (const noop) M.empty
|
||||
Nothing
|
||||
| "box.com/" `isInfixOf` url ->
|
||||
boxConfigurator $ showform name url
|
||||
| otherwise ->
|
||||
webDAVConfigurator $ showform name url
|
||||
where
|
||||
showform name url = do
|
||||
((result, form), enctype) <- lift $
|
||||
runFormGet $ renderBootstrap webDAVCredsAForm
|
||||
case result of
|
||||
FormSuccess input -> lift $
|
||||
makeWebDavRemote name (toCredPair input) (const noop) M.empty
|
||||
_ -> do
|
||||
description <- lift $ liftAnnex $
|
||||
T.pack . concat <$> Remote.prettyListUUIDs [uuid]
|
||||
$(widgetFile "configurators/enablewebdav")
|
||||
#else
|
||||
getEnableWebDAVR _ = error "WebDAV not supported by this build"
|
||||
#endif
|
||||
|
||||
#ifdef WITH_WEBDAV
|
||||
makeWebDavRemote :: String -> CredPair -> (Remote -> Handler ()) -> RemoteConfig -> Handler ()
|
||||
makeWebDavRemote name creds setup config = do
|
||||
remotename <- liftAnnex $ fromRepo $ uniqueRemoteName name 0
|
||||
liftIO $ WebDAV.setCredsEnv creds
|
||||
r <- liftAnnex $ addRemote $ do
|
||||
makeSpecialRemote name WebDAV.remote config
|
||||
return remotename
|
||||
setup r
|
||||
liftAssistant $ syncNewRemote r
|
||||
redirect $ EditNewCloudRepositoryR $ Remote.uuid r
|
||||
#endif
|
145
Assistant/WebApp/Configurators/XMPP.hs
Normal file
145
Assistant/WebApp/Configurators/XMPP.hs
Normal file
|
@ -0,0 +1,145 @@
|
|||
{- git-annex assistant XMPP configuration
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE TypeFamilies, QuasiQuotes, MultiParamTypeClasses, TemplateHaskell, OverloadedStrings #-}
|
||||
{-# LANGUAGE CPP #-}
|
||||
|
||||
module Assistant.WebApp.Configurators.XMPP where
|
||||
|
||||
import Assistant.WebApp.Common
|
||||
import Assistant.WebApp.Notifications
|
||||
import Utility.NotificationBroadcaster
|
||||
#ifdef WITH_XMPP
|
||||
import Assistant.XMPP.Client
|
||||
import Assistant.XMPP.Buddies
|
||||
import Assistant.Types.Buddies
|
||||
import Assistant.NetMessager
|
||||
import Assistant.Alert
|
||||
import Assistant.DaemonStatus
|
||||
import Utility.SRV
|
||||
#endif
|
||||
|
||||
#ifdef WITH_XMPP
|
||||
import Network
|
||||
import Network.Protocol.XMPP
|
||||
import qualified Data.Text as T
|
||||
import Control.Exception (SomeException)
|
||||
#endif
|
||||
|
||||
{- Displays an alert suggesting to configure XMPP, with a button. -}
|
||||
xmppNeeded :: Handler ()
|
||||
#ifdef WITH_XMPP
|
||||
xmppNeeded = whenM (isNothing <$> liftAnnex getXMPPCreds) $ do
|
||||
urlrender <- getUrlRender
|
||||
void $ liftAssistant $ do
|
||||
close <- asIO1 removeAlert
|
||||
addAlert $ xmppNeededAlert $ AlertButton
|
||||
{ buttonLabel = "Configure a Jabber account"
|
||||
, buttonUrl = urlrender XMPPR
|
||||
, buttonAction = Just close
|
||||
}
|
||||
#else
|
||||
xmppNeeded = return ()
|
||||
#endif
|
||||
|
||||
#ifdef WITH_XMPP
|
||||
getXMPPR :: Handler RepHtml
|
||||
getXMPPR = xmppPage $ do
|
||||
((result, form), enctype) <- lift $ do
|
||||
oldcreds <- liftAnnex getXMPPCreds
|
||||
runFormGet $ renderBootstrap $ xmppAForm $
|
||||
creds2Form <$> oldcreds
|
||||
let showform problem = $(widgetFile "configurators/xmpp")
|
||||
case result of
|
||||
FormSuccess f -> either (showform . Just . show) (lift . storecreds)
|
||||
=<< liftIO (validateForm f)
|
||||
_ -> showform Nothing
|
||||
where
|
||||
storecreds creds = do
|
||||
void $ liftAnnex $ setXMPPCreds creds
|
||||
liftAssistant notifyNetMessagerRestart
|
||||
redirect StartXMPPPairR
|
||||
#else
|
||||
getXMPPR = xmppPage $
|
||||
$(widgetFile "configurators/xmpp/disabled")
|
||||
#endif
|
||||
|
||||
{- Called by client to get a list of buddies.
|
||||
-
|
||||
- Returns a div, which will be inserted into the calling page.
|
||||
-}
|
||||
getBuddyListR :: NotificationId -> Handler RepHtml
|
||||
getBuddyListR nid = do
|
||||
waitNotifier getBuddyListBroadcaster nid
|
||||
|
||||
p <- widgetToPageContent buddyListDisplay
|
||||
hamletToRepHtml $ [hamlet|^{pageBody p}|]
|
||||
|
||||
buddyListDisplay :: Widget
|
||||
buddyListDisplay = do
|
||||
autoUpdate ident NotifierBuddyListR (10 :: Int) (10 :: Int)
|
||||
#ifdef WITH_XMPP
|
||||
buddies <- lift $ liftAssistant $ do
|
||||
rs <- filter isXMPPRemote . syncGitRemotes <$> getDaemonStatus
|
||||
let pairedwith = catMaybes $ map (parseJID . getXMPPClientID) rs
|
||||
catMaybes . map (buddySummary pairedwith)
|
||||
<$> (getBuddyList <<~ buddyList)
|
||||
$(widgetFile "configurators/xmpp/buddylist")
|
||||
#endif
|
||||
where
|
||||
ident = "buddylist"
|
||||
|
||||
#ifdef WITH_XMPP
|
||||
|
||||
data XMPPForm = XMPPForm
|
||||
{ formJID :: Text
|
||||
, formPassword :: Text }
|
||||
|
||||
creds2Form :: XMPPCreds -> XMPPForm
|
||||
creds2Form c = XMPPForm (xmppJID c) (xmppPassword c)
|
||||
|
||||
xmppAForm :: (Maybe XMPPForm) -> AForm WebApp WebApp XMPPForm
|
||||
xmppAForm def = XMPPForm
|
||||
<$> areq jidField "Jabber address" (formJID <$> def)
|
||||
<*> areq passwordField "Password" Nothing
|
||||
|
||||
jidField :: Field WebApp WebApp Text
|
||||
jidField = checkBool (isJust . parseJID) bad textField
|
||||
where
|
||||
bad :: Text
|
||||
bad = "This should look like an email address.."
|
||||
|
||||
validateForm :: XMPPForm -> IO (Either SomeException XMPPCreds)
|
||||
validateForm f = do
|
||||
let jid = fromMaybe (error "bad JID") $ parseJID (formJID f)
|
||||
let domain = T.unpack $ strDomain $ jidDomain jid
|
||||
hostports <- lookupSRV $ mkSRVTcp "xmpp-client" domain
|
||||
let username = fromMaybe "" (strNode <$> jidNode jid)
|
||||
case hostports of
|
||||
((h, PortNumber p):_) -> testXMPP $ XMPPCreds
|
||||
{ xmppUsername = username
|
||||
, xmppPassword = formPassword f
|
||||
, xmppHostname = h
|
||||
, xmppPort = fromIntegral p
|
||||
, xmppJID = formJID f
|
||||
}
|
||||
_ -> testXMPP $ XMPPCreds
|
||||
{ xmppUsername = username
|
||||
, xmppPassword = formPassword f
|
||||
, xmppHostname = T.unpack $ strDomain $ jidDomain jid
|
||||
, xmppPort = 5222
|
||||
, xmppJID = formJID f
|
||||
}
|
||||
|
||||
testXMPP :: XMPPCreds -> IO (Either SomeException XMPPCreds)
|
||||
testXMPP creds = either Left (const $ Right creds)
|
||||
<$> connectXMPP creds (const noop)
|
||||
|
||||
#endif
|
||||
|
||||
xmppPage :: Widget -> Handler RepHtml
|
||||
xmppPage = page "Jabber" (Just Configuration)
|
58
Assistant/WebApp/Control.hs
Normal file
58
Assistant/WebApp/Control.hs
Normal file
|
@ -0,0 +1,58 @@
|
|||
{- git-annex assistant webapp control
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE CPP, TypeFamilies, QuasiQuotes, MultiParamTypeClasses, TemplateHaskell, OverloadedStrings, RankNTypes #-}
|
||||
|
||||
module Assistant.WebApp.Control where
|
||||
|
||||
import Assistant.WebApp.Common
|
||||
import Locations.UserConfig
|
||||
import Utility.LogFile
|
||||
import Assistant.DaemonStatus
|
||||
|
||||
import Control.Concurrent
|
||||
import System.Posix (getProcessID, signalProcess, sigTERM)
|
||||
import qualified Data.Map as M
|
||||
|
||||
getShutdownR :: Handler RepHtml
|
||||
getShutdownR = page "Shutdown" Nothing $
|
||||
$(widgetFile "control/shutdown")
|
||||
|
||||
getShutdownConfirmedR :: Handler RepHtml
|
||||
getShutdownConfirmedR = page "Shutdown" Nothing $ do
|
||||
{- Wait 2 seconds before shutting down, to give the web page time
|
||||
- to display. -}
|
||||
void $ liftIO $ forkIO $ do
|
||||
threadDelay 2000000
|
||||
signalProcess sigTERM =<< getProcessID
|
||||
$(widgetFile "control/shutdownconfirmed")
|
||||
|
||||
{- Quite a hack, and doesn't redirect the browser window. -}
|
||||
getRestartR :: Handler RepHtml
|
||||
getRestartR = page "Restarting" Nothing $ do
|
||||
void $ liftIO $ forkIO $ do
|
||||
threadDelay 2000000
|
||||
program <- readProgramFile
|
||||
unlessM (boolSystem "sh" [Param "-c", Param $ restartcommand program]) $
|
||||
error "restart failed"
|
||||
$(widgetFile "control/restarting")
|
||||
where
|
||||
restartcommand program = program ++ " assistant --stop; " ++
|
||||
program ++ " webapp"
|
||||
|
||||
getRestartThreadR :: ThreadName -> Handler ()
|
||||
getRestartThreadR name = do
|
||||
m <- liftAssistant $ startedThreads <$> getDaemonStatus
|
||||
liftIO $ maybe noop snd $ M.lookup name m
|
||||
redirectBack
|
||||
|
||||
getLogR :: Handler RepHtml
|
||||
getLogR = page "Logs" Nothing $ do
|
||||
logfile <- lift $ liftAnnex $ fromRepo gitAnnexLogFile
|
||||
logs <- liftIO $ listLogs logfile
|
||||
logcontent <- liftIO $ concat <$> mapM readFile logs
|
||||
$(widgetFile "control/log")
|
150
Assistant/WebApp/DashBoard.hs
Normal file
150
Assistant/WebApp/DashBoard.hs
Normal file
|
@ -0,0 +1,150 @@
|
|||
{- git-annex assistant webapp dashboard
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE CPP, TypeFamilies, QuasiQuotes, MultiParamTypeClasses, TemplateHaskell, OverloadedStrings, RankNTypes #-}
|
||||
|
||||
module Assistant.WebApp.DashBoard where
|
||||
|
||||
import Assistant.WebApp.Common
|
||||
import Assistant.WebApp.Utility
|
||||
import Assistant.WebApp.Notifications
|
||||
import Assistant.WebApp.Configurators
|
||||
import Assistant.TransferQueue
|
||||
import Utility.NotificationBroadcaster
|
||||
import Logs.Transfer
|
||||
import Utility.Percentage
|
||||
import Utility.DataUnits
|
||||
import Types.Key
|
||||
import qualified Remote
|
||||
import qualified Git
|
||||
|
||||
import Text.Hamlet
|
||||
import qualified Data.Map as M
|
||||
import Control.Concurrent
|
||||
|
||||
{- A display of currently running and queued transfers.
|
||||
-
|
||||
- Or, if there have never been any this run, an intro display. -}
|
||||
transfersDisplay :: Bool -> Widget
|
||||
transfersDisplay warnNoScript = do
|
||||
webapp <- lift getYesod
|
||||
current <- lift $ M.toList <$> getCurrentTransfers
|
||||
queued <- lift $ take 10 <$> liftAssistant getTransferQueue
|
||||
autoUpdate ident NotifierTransfersR (10 :: Int) (10 :: Int)
|
||||
let transfers = simplifyTransfers $ current ++ queued
|
||||
if null transfers
|
||||
then ifM (lift $ showIntro <$> getWebAppState)
|
||||
( introDisplay ident
|
||||
, $(widgetFile "dashboard/transfers")
|
||||
)
|
||||
else $(widgetFile "dashboard/transfers")
|
||||
where
|
||||
ident = "transfers"
|
||||
isrunning info = not $
|
||||
transferPaused info || isNothing (startedTime info)
|
||||
|
||||
{- Simplifies a list of transfers, avoiding display of redundant
|
||||
- equivilant transfers. -}
|
||||
simplifyTransfers :: [(Transfer, TransferInfo)] -> [(Transfer, TransferInfo)]
|
||||
simplifyTransfers [] = []
|
||||
simplifyTransfers (x:[]) = [x]
|
||||
simplifyTransfers (v@(t1, _):r@((t2, _):l))
|
||||
| equivilantTransfer t1 t2 = simplifyTransfers (v:l)
|
||||
| otherwise = v : (simplifyTransfers r)
|
||||
|
||||
{- Called by client to get a display of currently in process transfers.
|
||||
-
|
||||
- Returns a div, which will be inserted into the calling page.
|
||||
-
|
||||
- Note that the head of the widget is not included, only its
|
||||
- body is. To get the widget head content, the widget is also
|
||||
- inserted onto the getHomeR page.
|
||||
-}
|
||||
getTransfersR :: NotificationId -> Handler RepHtml
|
||||
getTransfersR nid = do
|
||||
waitNotifier getTransferBroadcaster nid
|
||||
|
||||
p <- widgetToPageContent $ transfersDisplay False
|
||||
hamletToRepHtml $ [hamlet|^{pageBody p}|]
|
||||
|
||||
{- The main dashboard. -}
|
||||
dashboard :: Bool -> Widget
|
||||
dashboard warnNoScript = do
|
||||
let content = transfersDisplay warnNoScript
|
||||
$(widgetFile "dashboard/main")
|
||||
|
||||
getHomeR :: Handler RepHtml
|
||||
getHomeR = ifM (inFirstRun)
|
||||
( redirect ConfigurationR
|
||||
, page "" (Just DashBoard) $ dashboard True
|
||||
)
|
||||
|
||||
{- Used to test if the webapp is running. -}
|
||||
headHomeR :: Handler ()
|
||||
headHomeR = noop
|
||||
|
||||
{- Same as HomeR, except no autorefresh at all (and no noscript warning). -}
|
||||
getNoScriptR :: Handler RepHtml
|
||||
getNoScriptR = page "" (Just DashBoard) $ dashboard False
|
||||
|
||||
{- Same as HomeR, except with autorefreshing via meta refresh. -}
|
||||
getNoScriptAutoR :: Handler RepHtml
|
||||
getNoScriptAutoR = page "" (Just DashBoard) $ do
|
||||
let ident = NoScriptR
|
||||
let delayseconds = 3 :: Int
|
||||
let this = NoScriptAutoR
|
||||
toWidgetHead $(hamletFile $ hamletTemplate "dashboard/metarefresh")
|
||||
dashboard False
|
||||
|
||||
{- The javascript code does a post. -}
|
||||
postFileBrowserR :: Handler ()
|
||||
postFileBrowserR = void openFileBrowser
|
||||
|
||||
{- Used by non-javascript browsers, where clicking on the link actually
|
||||
- opens this page, so we redirect back to the referrer. -}
|
||||
getFileBrowserR :: Handler ()
|
||||
getFileBrowserR = whenM openFileBrowser $ redirectBack
|
||||
|
||||
{- Opens the system file browser on the repo, or, as a fallback,
|
||||
- goes to a file:// url. Returns True if it's ok to redirect away
|
||||
- from the page (ie, the system file browser was opened).
|
||||
-
|
||||
- Note that the command is opened using a different thread, to avoid
|
||||
- blocking the response to the browser on it. -}
|
||||
openFileBrowser :: Handler Bool
|
||||
openFileBrowser = do
|
||||
path <- liftAnnex $ fromRepo Git.repoPath
|
||||
ifM (liftIO $ inPath cmd <&&> inPath cmd)
|
||||
( do
|
||||
void $ liftIO $ forkIO $ void $
|
||||
boolSystem cmd [Param path]
|
||||
return True
|
||||
, do
|
||||
void $ redirect $ "file://" ++ path
|
||||
return False
|
||||
)
|
||||
where
|
||||
#ifdef darwin_HOST_OS
|
||||
cmd = "open"
|
||||
#else
|
||||
cmd = "xdg-open"
|
||||
#endif
|
||||
|
||||
{- Transfer controls. The GET is done in noscript mode and redirects back
|
||||
- to the referring page. The POST is called by javascript. -}
|
||||
getPauseTransferR :: Transfer -> Handler ()
|
||||
getPauseTransferR t = pauseTransfer t >> redirectBack
|
||||
postPauseTransferR :: Transfer -> Handler ()
|
||||
postPauseTransferR t = pauseTransfer t
|
||||
getStartTransferR :: Transfer -> Handler ()
|
||||
getStartTransferR t = startTransfer t >> redirectBack
|
||||
postStartTransferR :: Transfer -> Handler ()
|
||||
postStartTransferR t = startTransfer t
|
||||
getCancelTransferR :: Transfer -> Handler ()
|
||||
getCancelTransferR t = cancelTransfer False t >> redirectBack
|
||||
postCancelTransferR :: Transfer -> Handler ()
|
||||
postCancelTransferR t = cancelTransfer False t
|
41
Assistant/WebApp/Documentation.hs
Normal file
41
Assistant/WebApp/Documentation.hs
Normal file
|
@ -0,0 +1,41 @@
|
|||
{- git-annex assistant webapp documentation
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE TypeFamilies, QuasiQuotes, MultiParamTypeClasses, TemplateHaskell, OverloadedStrings, RankNTypes #-}
|
||||
|
||||
module Assistant.WebApp.Documentation where
|
||||
|
||||
import Assistant.WebApp.Common
|
||||
import Assistant.Install (standaloneAppBase)
|
||||
import Build.SysConfig (packageversion)
|
||||
|
||||
{- The full license info may be included in a file on disk that can
|
||||
- be read in and displayed. -}
|
||||
licenseFile :: IO (Maybe FilePath)
|
||||
licenseFile = do
|
||||
base <- standaloneAppBase
|
||||
return $ (</> "LICENSE") <$> base
|
||||
|
||||
getAboutR :: Handler RepHtml
|
||||
getAboutR = page "About git-annex" (Just About) $ do
|
||||
builtinlicense <- isJust <$> liftIO licenseFile
|
||||
$(widgetFile "documentation/about")
|
||||
|
||||
getLicenseR :: Handler RepHtml
|
||||
getLicenseR = do
|
||||
v <- liftIO licenseFile
|
||||
case v of
|
||||
Nothing -> redirect AboutR
|
||||
Just f -> customPage (Just About) $ do
|
||||
-- no sidebar, just pages of legalese..
|
||||
setTitle "License"
|
||||
license <- liftIO $ readFile f
|
||||
$(widgetFile "documentation/license")
|
||||
|
||||
getRepoGroupR :: Handler RepHtml
|
||||
getRepoGroupR = page "About repository groups" (Just About) $ do
|
||||
$(widgetFile "documentation/repogroup")
|
64
Assistant/WebApp/Form.hs
Normal file
64
Assistant/WebApp/Form.hs
Normal file
|
@ -0,0 +1,64 @@
|
|||
{- git-annex assistant webapp form utilities
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE FlexibleContexts, TypeFamilies, QuasiQuotes, MultiParamTypeClasses, TemplateHaskell, OverloadedStrings, RankNTypes #-}
|
||||
|
||||
module Assistant.WebApp.Form where
|
||||
|
||||
import Types.Remote (RemoteConfigKey)
|
||||
|
||||
import Yesod hiding (textField, passwordField)
|
||||
import Yesod.Form.Fields as F
|
||||
import Data.Text (Text)
|
||||
|
||||
{- Yesod's textField sets the required attribute for required fields.
|
||||
- We don't want this, because many of the forms used in this webapp
|
||||
- display a modal dialog when submitted, which interacts badly with
|
||||
- required field handling by the browser.
|
||||
-
|
||||
- Required fields are still checked by Yesod.
|
||||
-}
|
||||
textField :: RenderMessage master FormMessage => Field sub master Text
|
||||
textField = F.textField
|
||||
{ fieldView = \theId name attrs val _isReq -> [whamlet|
|
||||
<input id="#{theId}" name="#{name}" *{attrs} type="text" value="#{either id id val}">
|
||||
|]
|
||||
}
|
||||
|
||||
{- Also without required attribute. -}
|
||||
passwordField :: RenderMessage master FormMessage => Field sub master Text
|
||||
passwordField = F.passwordField
|
||||
{ fieldView = \theId name attrs val _isReq -> toWidget [hamlet|
|
||||
<input id="#{theId}" name="#{name}" *{attrs} type="password" value="#{either id id val}">
|
||||
|]
|
||||
}
|
||||
|
||||
{- Makes a note widget be displayed after a field. -}
|
||||
withNote :: Field sub master v -> GWidget sub master () -> Field sub master v
|
||||
withNote field note = field { fieldView = newview }
|
||||
where
|
||||
newview theId name attrs val isReq =
|
||||
let fieldwidget = (fieldView field) theId name attrs val isReq
|
||||
in [whamlet|^{fieldwidget} <span>^{note}</span>|]
|
||||
|
||||
data EnableEncryption = SharedEncryption | NoEncryption
|
||||
deriving (Eq)
|
||||
|
||||
{- Adds a check box to an AForm to control encryption. -}
|
||||
enableEncryptionField :: RenderMessage master FormMessage => AForm sub master EnableEncryption
|
||||
enableEncryptionField = areq (selectFieldList choices) "Encryption" (Just SharedEncryption)
|
||||
where
|
||||
choices :: [(Text, EnableEncryption)]
|
||||
choices =
|
||||
[ ("Encrypt all data", SharedEncryption)
|
||||
, ("Disable encryption", NoEncryption)
|
||||
]
|
||||
|
||||
{- Generates Remote configuration for encryption. -}
|
||||
configureEncryption :: EnableEncryption -> (RemoteConfigKey, String)
|
||||
configureEncryption SharedEncryption = ("encryption", "shared")
|
||||
configureEncryption NoEncryption = ("encryption", "none")
|
96
Assistant/WebApp/Notifications.hs
Normal file
96
Assistant/WebApp/Notifications.hs
Normal file
|
@ -0,0 +1,96 @@
|
|||
{- git-annex assistant webapp notifications
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE CPP, TypeFamilies, QuasiQuotes, MultiParamTypeClasses, TemplateHaskell, OverloadedStrings, RankNTypes #-}
|
||||
|
||||
#if defined VERSION_yesod_default
|
||||
#if ! MIN_VERSION_yesod_default(1,1,0)
|
||||
#define WITH_OLD_YESOD
|
||||
#endif
|
||||
#endif
|
||||
|
||||
module Assistant.WebApp.Notifications where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.WebApp
|
||||
import Assistant.WebApp.Types
|
||||
import Assistant.DaemonStatus
|
||||
import Assistant.Types.Buddies
|
||||
import Utility.NotificationBroadcaster
|
||||
import Utility.Yesod
|
||||
|
||||
import Yesod
|
||||
import Data.Text (Text)
|
||||
import qualified Data.Text as T
|
||||
#ifndef WITH_OLD_YESOD
|
||||
import qualified Data.Aeson.Types as Aeson
|
||||
#endif
|
||||
|
||||
{- Add to any widget to make it auto-update using long polling.
|
||||
-
|
||||
- The widget should have a html element with an id=ident, which will be
|
||||
- replaced when it's updated.
|
||||
-
|
||||
- The geturl route should return the notifier url to use for polling.
|
||||
-
|
||||
- ms_delay is how long to delay between AJAX updates
|
||||
- ms_startdelay is how long to delay before updating with AJAX at the start
|
||||
-}
|
||||
autoUpdate :: Text -> Route WebApp -> Int -> Int -> Widget
|
||||
autoUpdate tident geturl ms_delay ms_startdelay = do
|
||||
#ifdef WITH_OLD_YESOD
|
||||
let delay = show ms_delay
|
||||
let startdelay = show ms_startdelay
|
||||
let ident = "'" ++ T.unpack tident ++ "'"
|
||||
#else
|
||||
let delay = Aeson.String (T.pack (show ms_delay))
|
||||
let startdelay = Aeson.String (T.pack (show ms_startdelay))
|
||||
let ident = Aeson.String tident
|
||||
#endif
|
||||
addScript $ StaticR longpolling_js
|
||||
$(widgetFile "notifications/longpolling")
|
||||
|
||||
{- Notifier urls are requested by the javascript, to avoid allocation
|
||||
- of NotificationIds when noscript pages are loaded. This constructs a
|
||||
- notifier url for a given Route and NotificationBroadcaster.
|
||||
-}
|
||||
notifierUrl :: (NotificationId -> Route WebApp) -> Assistant NotificationBroadcaster -> Handler RepPlain
|
||||
notifierUrl route broadcaster = do
|
||||
(urlbits, _params) <- renderRoute . route <$> newNotifier broadcaster
|
||||
webapp <- getYesod
|
||||
return $ RepPlain $ toContent $ T.concat
|
||||
[ "/"
|
||||
, T.intercalate "/" urlbits
|
||||
, "?auth="
|
||||
, secretToken webapp
|
||||
]
|
||||
|
||||
getNotifierTransfersR :: Handler RepPlain
|
||||
getNotifierTransfersR = notifierUrl TransfersR getTransferBroadcaster
|
||||
|
||||
getNotifierSideBarR :: Handler RepPlain
|
||||
getNotifierSideBarR = notifierUrl SideBarR getAlertBroadcaster
|
||||
|
||||
getNotifierBuddyListR :: Handler RepPlain
|
||||
getNotifierBuddyListR = notifierUrl BuddyListR getBuddyListBroadcaster
|
||||
|
||||
getNotifierRepoListR :: RepoSelector -> Handler RepPlain
|
||||
getNotifierRepoListR reposelector = notifierUrl route getRepoListBroadcaster
|
||||
where
|
||||
route nid = RepoListR $ RepoListNotificationId nid reposelector
|
||||
|
||||
getTransferBroadcaster :: Assistant NotificationBroadcaster
|
||||
getTransferBroadcaster = transferNotifier <$> getDaemonStatus
|
||||
|
||||
getAlertBroadcaster :: Assistant NotificationBroadcaster
|
||||
getAlertBroadcaster = alertNotifier <$> getDaemonStatus
|
||||
|
||||
getBuddyListBroadcaster :: Assistant NotificationBroadcaster
|
||||
getBuddyListBroadcaster = getBuddyBroadcaster <$> getAssistant buddyList
|
||||
|
||||
getRepoListBroadcaster :: Assistant NotificationBroadcaster
|
||||
getRepoListBroadcaster = syncRemotesNotifier <$> getDaemonStatus
|
68
Assistant/WebApp/OtherRepos.hs
Normal file
68
Assistant/WebApp/OtherRepos.hs
Normal file
|
@ -0,0 +1,68 @@
|
|||
{- git-annex assistant webapp switching to other repos
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE CPP, TypeFamilies, QuasiQuotes, MultiParamTypeClasses, TemplateHaskell, OverloadedStrings, RankNTypes #-}
|
||||
|
||||
module Assistant.WebApp.OtherRepos where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.WebApp.Types
|
||||
import Assistant.WebApp.Page
|
||||
import qualified Git.Construct
|
||||
import qualified Git.Config
|
||||
import Locations.UserConfig
|
||||
import qualified Utility.Url as Url
|
||||
import Utility.Yesod
|
||||
|
||||
import Yesod
|
||||
import Control.Concurrent
|
||||
import System.Process (cwd)
|
||||
|
||||
getRepositorySwitcherR :: Handler RepHtml
|
||||
getRepositorySwitcherR = page "Switch repository" Nothing $ do
|
||||
repolist <- liftIO listOtherRepos
|
||||
$(widgetFile "control/repositoryswitcher")
|
||||
|
||||
listOtherRepos :: IO [(String, String)]
|
||||
listOtherRepos = do
|
||||
dirs <- readAutoStartFile
|
||||
pwd <- getCurrentDirectory
|
||||
gooddirs <- filterM doesDirectoryExist $
|
||||
filter (\d -> not $ d `dirContains` pwd) dirs
|
||||
names <- mapM relHome gooddirs
|
||||
return $ sort $ zip names gooddirs
|
||||
|
||||
{- Starts up the assistant in the repository, and waits for it to create
|
||||
- a gitAnnexUrlFile. Waits for the assistant to be up and listening for
|
||||
- connections by testing the url. Once it's running, redirect to it.
|
||||
-}
|
||||
getSwitchToRepositoryR :: FilePath -> Handler RepHtml
|
||||
getSwitchToRepositoryR repo = do
|
||||
liftIO $ startAssistant repo
|
||||
redirect =<< liftIO geturl
|
||||
where
|
||||
geturl = do
|
||||
r <- Git.Config.read =<< Git.Construct.fromPath repo
|
||||
waiturl $ gitAnnexUrlFile r
|
||||
waiturl urlfile = do
|
||||
v <- tryIO $ readFile urlfile
|
||||
case v of
|
||||
Left _ -> delayed $ waiturl urlfile
|
||||
Right url -> ifM (listening url)
|
||||
( return url
|
||||
, delayed $ waiturl urlfile
|
||||
)
|
||||
listening url = catchBoolIO $ fst <$> Url.exists url []
|
||||
delayed a = do
|
||||
threadDelay 100000 -- 1/10th of a second
|
||||
a
|
||||
|
||||
startAssistant :: FilePath -> IO ()
|
||||
startAssistant repo = do
|
||||
program <- readProgramFile
|
||||
void $ forkIO $ void $ createProcess $
|
||||
(proc program ["assistant"]) { cwd = Just repo }
|
67
Assistant/WebApp/Page.hs
Normal file
67
Assistant/WebApp/Page.hs
Normal file
|
@ -0,0 +1,67 @@
|
|||
{- git-annex assistant webapp page display
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE TypeFamilies, QuasiQuotes, MultiParamTypeClasses, TemplateHaskell, OverloadedStrings, RankNTypes #-}
|
||||
|
||||
module Assistant.WebApp.Page where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.WebApp
|
||||
import Assistant.WebApp.Types
|
||||
import Assistant.WebApp.SideBar
|
||||
import Utility.Yesod
|
||||
|
||||
import Yesod
|
||||
import Text.Hamlet
|
||||
import Data.Text (Text)
|
||||
|
||||
data NavBarItem = DashBoard | Configuration | About
|
||||
deriving (Eq)
|
||||
|
||||
navBarName :: NavBarItem -> Text
|
||||
navBarName DashBoard = "Dashboard"
|
||||
navBarName Configuration = "Configuration"
|
||||
navBarName About = "About"
|
||||
|
||||
navBarRoute :: NavBarItem -> Route WebApp
|
||||
navBarRoute DashBoard = HomeR
|
||||
navBarRoute Configuration = ConfigurationR
|
||||
navBarRoute About = AboutR
|
||||
|
||||
defaultNavBar :: [NavBarItem]
|
||||
defaultNavBar = [DashBoard, Configuration, About]
|
||||
|
||||
firstRunNavBar :: [NavBarItem]
|
||||
firstRunNavBar = [Configuration, About]
|
||||
|
||||
selectNavBar :: Handler [NavBarItem]
|
||||
selectNavBar = ifM (inFirstRun) (return firstRunNavBar, return defaultNavBar)
|
||||
|
||||
{- A standard page of the webapp, with a title, a sidebar, and that may
|
||||
- be highlighted on the navbar. -}
|
||||
page :: Html -> Maybe NavBarItem -> Widget -> Handler RepHtml
|
||||
page title navbaritem content = customPage navbaritem $ do
|
||||
setTitle title
|
||||
sideBarDisplay
|
||||
content
|
||||
|
||||
{- A custom page, with no title or sidebar set. -}
|
||||
customPage :: Maybe NavBarItem -> Widget -> Handler RepHtml
|
||||
customPage navbaritem content = do
|
||||
webapp <- getYesod
|
||||
navbar <- map navdetails <$> selectNavBar
|
||||
pageinfo <- widgetToPageContent $ do
|
||||
addStylesheet $ StaticR css_bootstrap_css
|
||||
addStylesheet $ StaticR css_bootstrap_responsive_css
|
||||
addScript $ StaticR jquery_full_js
|
||||
addScript $ StaticR js_bootstrap_dropdown_js
|
||||
addScript $ StaticR js_bootstrap_modal_js
|
||||
addScript $ StaticR js_bootstrap_collapse_js
|
||||
$(widgetFile "page")
|
||||
hamletToRepHtml $(hamletFile $ hamletTemplate "bootstrap")
|
||||
where
|
||||
navdetails i = (navBarName i, navBarRoute i, Just i == navbaritem)
|
100
Assistant/WebApp/SideBar.hs
Normal file
100
Assistant/WebApp/SideBar.hs
Normal file
|
@ -0,0 +1,100 @@
|
|||
{- git-annex assistant webapp sidebar
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE TypeFamilies, QuasiQuotes, MultiParamTypeClasses, TemplateHaskell, OverloadedStrings, RankNTypes #-}
|
||||
|
||||
module Assistant.WebApp.SideBar where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.WebApp
|
||||
import Assistant.WebApp.Types
|
||||
import Assistant.WebApp.Notifications
|
||||
import Assistant.Alert
|
||||
import Assistant.DaemonStatus
|
||||
import Utility.NotificationBroadcaster
|
||||
import Utility.Yesod
|
||||
|
||||
import Yesod
|
||||
import Data.Text (Text)
|
||||
import qualified Data.Map as M
|
||||
import Control.Concurrent
|
||||
|
||||
sideBarDisplay :: Widget
|
||||
sideBarDisplay = do
|
||||
let content = do
|
||||
{- Add newest alerts to the sidebar. -}
|
||||
alertpairs <- lift $ M.toList . alertMap
|
||||
<$> liftAssistant getDaemonStatus
|
||||
mapM_ renderalert $
|
||||
take displayAlerts $ reverse $ sortAlertPairs alertpairs
|
||||
let ident = "sidebar"
|
||||
$(widgetFile "sidebar/main")
|
||||
autoUpdate ident NotifierSideBarR (10 :: Int) (10 :: Int)
|
||||
where
|
||||
bootstrapclass :: AlertClass -> Text
|
||||
bootstrapclass Activity = "alert-info"
|
||||
bootstrapclass Warning = "alert"
|
||||
bootstrapclass Error = "alert-error"
|
||||
bootstrapclass Success = "alert-success"
|
||||
bootstrapclass Message = "alert-info"
|
||||
|
||||
renderalert (aid, alert) = do
|
||||
let alertid = show aid
|
||||
let closable = alertClosable alert
|
||||
let block = alertBlockDisplay alert
|
||||
let divclass = bootstrapclass $ alertClass alert
|
||||
$(widgetFile "sidebar/alert")
|
||||
|
||||
{- Called by client to get a sidebar display.
|
||||
-
|
||||
- Returns a div, which will be inserted into the calling page.
|
||||
-
|
||||
- Note that the head of the widget is not included, only its
|
||||
- body is. To get the widget head content, the widget is also
|
||||
- inserted onto all pages.
|
||||
-}
|
||||
getSideBarR :: NotificationId -> Handler RepHtml
|
||||
getSideBarR nid = do
|
||||
waitNotifier getAlertBroadcaster nid
|
||||
|
||||
{- This 0.1 second delay avoids very transient notifications from
|
||||
- being displayed and churning the sidebar unnecesarily.
|
||||
-
|
||||
- This needs to be below the level perceptable by the user,
|
||||
- to avoid slowing down user actions like closing alerts. -}
|
||||
liftIO $ threadDelay 100000
|
||||
|
||||
page <- widgetToPageContent sideBarDisplay
|
||||
hamletToRepHtml $ [hamlet|^{pageBody page}|]
|
||||
|
||||
{- Called by the client to close an alert. -}
|
||||
getCloseAlert :: AlertId -> Handler ()
|
||||
getCloseAlert = liftAssistant . removeAlert
|
||||
|
||||
{- When an alert with a button is clicked on, the button takes us here. -}
|
||||
getClickAlert :: AlertId -> Handler ()
|
||||
getClickAlert i = do
|
||||
m <- alertMap <$> liftAssistant getDaemonStatus
|
||||
case M.lookup i m of
|
||||
Just (Alert { alertButton = Just b }) -> do
|
||||
{- Spawn a thread to run the action while redirecting. -}
|
||||
case buttonAction b of
|
||||
Nothing -> noop
|
||||
Just a -> liftIO $ void $ forkIO $ a i
|
||||
redirect $ buttonUrl b
|
||||
_ -> redirectBack
|
||||
|
||||
htmlIcon :: AlertIcon -> GWidget sub master ()
|
||||
htmlIcon ActivityIcon = bootstrapIcon "refresh"
|
||||
htmlIcon InfoIcon = bootstrapIcon "info-sign"
|
||||
htmlIcon SuccessIcon = bootstrapIcon "ok"
|
||||
htmlIcon ErrorIcon = bootstrapIcon "exclamation-sign"
|
||||
-- utf-8 umbrella (utf-8 cloud looks too stormy)
|
||||
htmlIcon TheCloud = [whamlet|☂|]
|
||||
|
||||
bootstrapIcon :: Text -> GWidget sub master ()
|
||||
bootstrapIcon name = [whamlet|<i .icon-#{name}></i>|]
|
146
Assistant/WebApp/Types.hs
Normal file
146
Assistant/WebApp/Types.hs
Normal file
|
@ -0,0 +1,146 @@
|
|||
{- git-annex assistant webapp types
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
{-# LANGUAGE TypeFamilies, QuasiQuotes, MultiParamTypeClasses, TemplateHaskell, OverloadedStrings, RankNTypes #-}
|
||||
{-# OPTIONS_GHC -fno-warn-orphans #-}
|
||||
|
||||
module Assistant.WebApp.Types where
|
||||
|
||||
import Assistant.Common
|
||||
import Assistant.Ssh
|
||||
import Assistant.Alert
|
||||
import Assistant.Pairing
|
||||
import Assistant.Types.Buddies
|
||||
import Utility.NotificationBroadcaster
|
||||
import Utility.WebApp
|
||||
import Utility.Yesod
|
||||
import Logs.Transfer
|
||||
import Build.SysConfig (packageversion)
|
||||
|
||||
import Yesod
|
||||
import Yesod.Static
|
||||
import Text.Hamlet
|
||||
import Data.Text (Text, pack, unpack)
|
||||
import Control.Concurrent.STM
|
||||
|
||||
publicFiles "static"
|
||||
|
||||
mkYesodData "WebApp" $(parseRoutesFile "Assistant/WebApp/routes")
|
||||
|
||||
data WebApp = WebApp
|
||||
{ assistantData :: AssistantData
|
||||
, secretToken :: Text
|
||||
, relDir :: Maybe FilePath
|
||||
, getStatic :: Static
|
||||
, webAppState :: TMVar WebAppState
|
||||
, postFirstRun :: Maybe (IO String)
|
||||
, noAnnex :: Bool
|
||||
}
|
||||
|
||||
instance Yesod WebApp where
|
||||
{- Require an auth token be set when accessing any (non-static) route -}
|
||||
isAuthorized _ _ = checkAuthToken secretToken
|
||||
|
||||
{- Add the auth token to every url generated, except static subsite
|
||||
- urls (which can show up in Permission Denied pages). -}
|
||||
joinPath = insertAuthToken secretToken excludeStatic
|
||||
where
|
||||
excludeStatic [] = True
|
||||
excludeStatic (p:_) = p /= "static"
|
||||
|
||||
makeSessionBackend = webAppSessionBackend
|
||||
jsLoader _ = BottomOfHeadBlocking
|
||||
|
||||
{- The webapp does not use defaultLayout, so this is only used
|
||||
- for error pages or any other built-in yesod page.
|
||||
-
|
||||
- This can use static routes, but should use no other routes,
|
||||
- as that would expose the auth token.
|
||||
-}
|
||||
defaultLayout content = do
|
||||
webapp <- getYesod
|
||||
pageinfo <- widgetToPageContent $ do
|
||||
addStylesheet $ StaticR css_bootstrap_css
|
||||
addStylesheet $ StaticR css_bootstrap_responsive_css
|
||||
$(widgetFile "error")
|
||||
hamletToRepHtml $(hamletFile $ hamletTemplate "bootstrap")
|
||||
|
||||
instance RenderMessage WebApp FormMessage where
|
||||
renderMessage _ _ = defaultFormMessage
|
||||
|
||||
type Form x = Html -> MForm WebApp WebApp (FormResult x, Widget)
|
||||
|
||||
data WebAppState = WebAppState
|
||||
{ showIntro :: Bool -- should the into message be displayed?
|
||||
}
|
||||
|
||||
data RepoSelector = RepoSelector
|
||||
{ onlyCloud :: Bool
|
||||
, onlyConfigured :: Bool
|
||||
, includeHere :: Bool
|
||||
}
|
||||
deriving (Read, Show, Eq)
|
||||
|
||||
data RepoListNotificationId = RepoListNotificationId NotificationId RepoSelector
|
||||
deriving (Read, Show, Eq)
|
||||
|
||||
{- Only needed to work around old-yesod bug that emits a warning message
|
||||
- when a route has two parameters. -}
|
||||
data FilePathAndUUID = FilePathAndUUID FilePath UUID
|
||||
deriving (Read, Show, Eq)
|
||||
|
||||
instance PathPiece FilePathAndUUID where
|
||||
toPathPiece = pack . show
|
||||
fromPathPiece = readish . unpack
|
||||
|
||||
instance PathPiece SshData where
|
||||
toPathPiece = pack . show
|
||||
fromPathPiece = readish . unpack
|
||||
|
||||
instance PathPiece NotificationId where
|
||||
toPathPiece = pack . show
|
||||
fromPathPiece = readish . unpack
|
||||
|
||||
instance PathPiece AlertId where
|
||||
toPathPiece = pack . show
|
||||
fromPathPiece = readish . unpack
|
||||
|
||||
instance PathPiece Transfer where
|
||||
toPathPiece = pack . show
|
||||
fromPathPiece = readish . unpack
|
||||
|
||||
instance PathPiece PairMsg where
|
||||
toPathPiece = pack . show
|
||||
fromPathPiece = readish . unpack
|
||||
|
||||
instance PathPiece SecretReminder where
|
||||
toPathPiece = pack . show
|
||||
fromPathPiece = readish . unpack
|
||||
|
||||
instance PathPiece UUID where
|
||||
toPathPiece = pack . show
|
||||
fromPathPiece = readish . unpack
|
||||
|
||||
instance PathPiece BuddyKey where
|
||||
toPathPiece = pack . show
|
||||
fromPathPiece = readish . unpack
|
||||
|
||||
instance PathPiece PairKey where
|
||||
toPathPiece = pack . show
|
||||
fromPathPiece = readish . unpack
|
||||
|
||||
instance PathPiece RepoListNotificationId where
|
||||
toPathPiece = pack . show
|
||||
fromPathPiece = readish . unpack
|
||||
|
||||
instance PathPiece RepoSelector where
|
||||
toPathPiece = pack . show
|
||||
fromPathPiece = readish . unpack
|
||||
|
||||
instance PathPiece ThreadName where
|
||||
toPathPiece = pack . show
|
||||
fromPathPiece = readish . unpack
|
125
Assistant/WebApp/Utility.hs
Normal file
125
Assistant/WebApp/Utility.hs
Normal file
|
@ -0,0 +1,125 @@
|
|||
{- git-annex assistant webapp utilities
|
||||
-
|
||||
- Copyright 2012 Joey Hess <joey@kitenet.net>
|
||||
-
|
||||
- Licensed under the GNU AGPL version 3 or higher.
|
||||
-}
|
||||
|
||||
module Assistant.WebApp.Utility where
|
||||
|
||||
import Assistant.Common hiding (liftAnnex)
|
||||
import Assistant.WebApp
|
||||
import Assistant.WebApp.Types
|
||||
import Assistant.DaemonStatus
|
||||
import Assistant.TransferQueue
|
||||
import Assistant.Types.TransferSlots
|
||||
import Assistant.TransferSlots
|
||||
import Assistant.Sync
|
||||
import qualified Remote
|
||||
import qualified Types.Remote as Remote
|
||||
import qualified Remote.List as Remote
|
||||
import qualified Assistant.Threads.Transferrer as Transferrer
|
||||
import Logs.Transfer
|
||||
import Locations.UserConfig
|
||||
import qualified Config
|
||||
import Git.Config
|
||||
import Assistant.Threads.Watcher
|
||||
import Assistant.NamedThread
|
||||
|
||||
import qualified Data.Map as M
|
||||
import Control.Concurrent
|
||||
import System.Posix.Signals (signalProcessGroup, sigTERM, sigKILL)
|
||||
import System.Posix.Process (getProcessGroupIDOf)
|
||||
|
||||
{- Use Nothing to change autocommit setting; or a remote to change
|
||||
- its sync setting. -}
|
||||
changeSyncable :: (Maybe Remote) -> Bool -> Handler ()
|
||||
changeSyncable Nothing enable = do
|
||||
liftAnnex $ Config.setConfig key (boolConfig enable)
|
||||
liftIO . maybe noop (`throwTo` signal)
|
||||
=<< liftAssistant (namedThreadId watchThread)
|
||||
where
|
||||
key = Config.annexConfig "autocommit"
|
||||
signal
|
||||
| enable = ResumeWatcher
|
||||
| otherwise = PauseWatcher
|
||||
changeSyncable (Just r) True = do
|
||||
changeSyncFlag r True
|
||||
syncRemote r
|
||||
changeSyncable (Just r) False = do
|
||||
changeSyncFlag r False
|
||||
liftAssistant $ updateSyncRemotes
|
||||
{- Stop all transfers to or from this remote.
|
||||
- XXX Can't stop any ongoing scan, or git syncs. -}
|
||||
void $ liftAssistant $ dequeueTransfers tofrom
|
||||
mapM_ (cancelTransfer False) =<<
|
||||
filter tofrom . M.keys <$>
|
||||
liftAssistant (currentTransfers <$> getDaemonStatus)
|
||||
where
|
||||
tofrom t = transferUUID t == Remote.uuid r
|
||||
|
||||
changeSyncFlag :: Remote -> Bool -> Handler ()
|
||||
changeSyncFlag r enabled = liftAnnex $ do
|
||||
Config.setConfig key (boolConfig enabled)
|
||||
void $ Remote.remoteListRefresh
|
||||
where
|
||||
key = Config.remoteConfig (Remote.repo r) "sync"
|
||||
|
||||
{- Start syncing remote, using a background thread. -}
|
||||
syncRemote :: Remote -> Handler ()
|
||||
syncRemote = liftAssistant . syncNewRemote
|
||||
|
||||
pauseTransfer :: Transfer -> Handler ()
|
||||
pauseTransfer = cancelTransfer True
|
||||
|
||||
cancelTransfer :: Bool -> Transfer -> Handler ()
|
||||
cancelTransfer pause t = do
|
||||
m <- getCurrentTransfers
|
||||
unless pause $
|
||||
{- remove queued transfer -}
|
||||
void $ liftAssistant $ dequeueTransfers $ equivilantTransfer t
|
||||
{- stop running transfer -}
|
||||
maybe noop stop (M.lookup t m)
|
||||
where
|
||||
stop info = liftAssistant $ do
|
||||
{- When there's a thread associated with the
|
||||
- transfer, it's signaled first, to avoid it
|
||||
- displaying any alert about the transfer having
|
||||
- failed when the transfer process is killed. -}
|
||||
liftIO $ maybe noop signalthread $ transferTid info
|
||||
liftIO $ maybe noop killproc $ transferPid info
|
||||
if pause
|
||||
then void $ alterTransferInfo t $
|
||||
\i -> i { transferPaused = True }
|
||||
else void $ removeTransfer t
|
||||
signalthread tid
|
||||
| pause = throwTo tid PauseTransfer
|
||||
| otherwise = killThread tid
|
||||
{- In order to stop helper processes like rsync,
|
||||
- kill the whole process group of the process running the transfer. -}
|
||||
killproc pid = void $ tryIO $ do
|
||||
g <- getProcessGroupIDOf pid
|
||||
void $ tryIO $ signalProcessGroup sigTERM g
|
||||
threadDelay 50000 -- 0.05 second grace period
|
||||
void $ tryIO $ signalProcessGroup sigKILL g
|
||||
|
||||
startTransfer :: Transfer -> Handler ()
|
||||
startTransfer t = do
|
||||
m <- getCurrentTransfers
|
||||
maybe startqueued go (M.lookup t m)
|
||||
where
|
||||
go info = maybe (start info) resume $ transferTid info
|
||||
startqueued = do
|
||||
is <- liftAssistant $ map snd <$> getMatchingTransfers (== t)
|
||||
maybe noop start $ headMaybe is
|
||||
resume tid = do
|
||||
liftAssistant $ alterTransferInfo t $
|
||||
\i -> i { transferPaused = False }
|
||||
liftIO $ throwTo tid ResumeTransfer
|
||||
start info = liftAssistant $ do
|
||||
program <- liftIO readProgramFile
|
||||
inImmediateTransferSlot $
|
||||
Transferrer.startTransfer program t info
|
||||
|
||||
getCurrentTransfers :: Handler TransferMap
|
||||
getCurrentTransfers = currentTransfers <$> liftAssistant getDaemonStatus
|
74
Assistant/WebApp/routes
Normal file
74
Assistant/WebApp/routes
Normal file
|
@ -0,0 +1,74 @@
|
|||
/ HomeR GET HEAD
|
||||
/noscript NoScriptR GET
|
||||
/noscript/auto NoScriptAutoR GET
|
||||
/about AboutR GET
|
||||
/about/license LicenseR GET
|
||||
/about/repogroups RepoGroupR GET
|
||||
|
||||
/shutdown ShutdownR GET
|
||||
/shutdown/confirm ShutdownConfirmedR GET
|
||||
/restart RestartR GET
|
||||
/restart/thread/#ThreadName RestartThreadR GET
|
||||
/log LogR GET
|
||||
|
||||
/config ConfigurationR GET
|
||||
/config/repository RepositoriesR GET
|
||||
/config/preferences PreferencesR GET
|
||||
/config/xmpp XMPPR GET
|
||||
|
||||
/config/repository/new/first FirstRepositoryR GET
|
||||
/config/repository/new NewRepositoryR GET
|
||||
/config/repository/switcher RepositorySwitcherR GET
|
||||
/config/repository/switchto/#FilePath SwitchToRepositoryR GET
|
||||
/config/repository/combine/#FilePathAndUUID CombineRepositoryR GET
|
||||
/config/repository/edit/#UUID EditRepositoryR GET
|
||||
/config/repository/edit/new/#UUID EditNewRepositoryR GET
|
||||
/config/repository/edit/new/cloud/#UUID EditNewCloudRepositoryR GET
|
||||
/config/repository/sync/disable/#UUID DisableSyncR GET
|
||||
/config/repository/sync/enable/#UUID EnableSyncR GET
|
||||
|
||||
/config/repository/add/drive AddDriveR GET
|
||||
/config/repository/add/ssh AddSshR GET
|
||||
/config/repository/add/ssh/confirm/#SshData ConfirmSshR GET
|
||||
/config/repository/add/ssh/make/git/#SshData MakeSshGitR GET
|
||||
/config/repository/add/ssh/make/rsync/#SshData MakeSshRsyncR GET
|
||||
/config/repository/add/cloud/rsync.net AddRsyncNetR GET
|
||||
/config/repository/add/cloud/S3 AddS3R GET
|
||||
/config/repository/add/cloud/glacier AddGlacierR GET
|
||||
/config/repository/add/cloud/box.com AddBoxComR GET
|
||||
|
||||
/config/repository/pair/local/start StartLocalPairR GET
|
||||
/config/repository/pair/local/running/#SecretReminder RunningLocalPairR GET
|
||||
/config/repository/pair/local/finish/#PairMsg FinishLocalPairR GET
|
||||
/config/repository/pair/xmpp/start StartXMPPPairR GET
|
||||
/config/repository/pair/xmpp/running/#BuddyKey RunningXMPPPairR GET
|
||||
/config/repository/pair/xmpp/accept/#PairKey ConfirmXMPPPairR GET
|
||||
/config/repository/pair/xmpp/finish/#PairKey FinishXMPPPairR GET
|
||||
|
||||
/config/repository/enable/rsync/#UUID EnableRsyncR GET
|
||||
/config/repository/enable/directory/#UUID EnableDirectoryR GET
|
||||
/config/repository/enable/S3/#UUID EnableS3R GET
|
||||
/config/repository/enable/glacier/#UUID EnableGlacierR GET
|
||||
/config/repository/enable/webdav/#UUID EnableWebDAVR GET
|
||||
|
||||
/transfers/#NotificationId TransfersR GET
|
||||
/notifier/transfers NotifierTransfersR GET
|
||||
|
||||
/sidebar/#NotificationId SideBarR GET
|
||||
/notifier/sidebar NotifierSideBarR GET
|
||||
|
||||
/buddylist/#NotificationId BuddyListR GET
|
||||
/notifier/buddylist NotifierBuddyListR GET
|
||||
|
||||
/repolist/#RepoListNotificationId RepoListR GET
|
||||
/notifier/repolist/#RepoSelector NotifierRepoListR GET
|
||||
|
||||
/alert/close/#AlertId CloseAlert GET
|
||||
/alert/click/#AlertId ClickAlert GET
|
||||
/filebrowser FileBrowserR GET POST
|
||||
|
||||
/transfer/pause/#Transfer PauseTransferR GET POST
|
||||
/transfer/start/#Transfer StartTransferR GET POST
|
||||
/transfer/cancel/#Transfer CancelTransferR GET POST
|
||||
|
||||
/static StaticR Static getStatic
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue