Merge branch 'master' into git-remote-annex
This commit is contained in:
commit
ff5193c6ad
137 changed files with 2031 additions and 325 deletions
|
@ -1,7 +1,8 @@
|
||||||
[codespell]
|
[codespell]
|
||||||
skip = .git,*.pdf,*.svg,*._comment,jquery.*.js,*.mdwn,changelog,CHANGELOG,list.2018,html,dist,dist-newstyle,.stack-work,man,tags,tmp
|
skip = .git,*.pdf,*.svg,*._comment,jquery.*.js,*.mdwn,changelog,CHANGELOG,list.2018,html,dist,dist-newstyle,.stack-work,man,tags,tmp
|
||||||
|
ignore-regex=\b(valUs|addIn)\b
|
||||||
# some common variables etc (case insensitive)
|
# some common variables etc (case insensitive)
|
||||||
# keypair - constructs
|
# keypair - constructs
|
||||||
## May be TODO later, touches too much
|
## May be TODO later, touches too much
|
||||||
# sentinal -> sentinel
|
# sentinal -> sentinel
|
||||||
ignore-words-list = dne,inout,fo,ot,bu,te,allright,inh,mor,myu,keypair,pasttime,sentinal,startd,ifset
|
ignore-words-list = dne,inout,fo,ot,bu,te,allright,inh,mor,myu,keypair,pasttime,sentinal,startd,ifset,afile,buildt,toword
|
||||||
|
|
|
@ -173,7 +173,7 @@ adjustToSymlink' gitannexlink ti@(TreeItem f _m s) = catKey s >>= \case
|
||||||
|
|
||||||
-- This is a hidden branch ref, that's used as the basis for the AdjBranch,
|
-- This is a hidden branch ref, that's used as the basis for the AdjBranch,
|
||||||
-- since pushes can overwrite the OrigBranch at any time. So, changes
|
-- since pushes can overwrite the OrigBranch at any time. So, changes
|
||||||
-- are propigated from the AdjBranch to the head of the BasisBranch.
|
-- are propagated from the AdjBranch to the head of the BasisBranch.
|
||||||
newtype BasisBranch = BasisBranch Ref
|
newtype BasisBranch = BasisBranch Ref
|
||||||
|
|
||||||
-- The basis for refs/heads/adjusted/master(unlocked) is
|
-- The basis for refs/heads/adjusted/master(unlocked) is
|
||||||
|
@ -256,7 +256,7 @@ updateAdjustedBranch adj (AdjBranch currbranch) origbranch
|
||||||
| not (adjustmentIsStable adj) = do
|
| not (adjustmentIsStable adj) = do
|
||||||
(b, origheadfile, newheadfile) <- preventCommits $ \commitlck -> do
|
(b, origheadfile, newheadfile) <- preventCommits $ \commitlck -> do
|
||||||
-- Avoid losing any commits that the adjusted branch
|
-- Avoid losing any commits that the adjusted branch
|
||||||
-- has that have not yet been propigated back to the
|
-- has that have not yet been propagated back to the
|
||||||
-- origbranch.
|
-- origbranch.
|
||||||
_ <- propigateAdjustedCommits' True origbranch adj commitlck
|
_ <- propigateAdjustedCommits' True origbranch adj commitlck
|
||||||
|
|
||||||
|
@ -468,28 +468,39 @@ commitAdjustedTree' treesha (BasisBranch basis) parents =
|
||||||
(commitAuthorMetaData basiscommit)
|
(commitAuthorMetaData basiscommit)
|
||||||
(commitCommitterMetaData basiscommit)
|
(commitCommitterMetaData basiscommit)
|
||||||
(mkcommit cmode)
|
(mkcommit cmode)
|
||||||
mkcommit cmode = Git.Branch.commitTree cmode
|
-- Make sure that the exact message is used in the commit,
|
||||||
|
-- since that message is looked for later.
|
||||||
|
-- After git-annex 10.20240227, it's possible to use
|
||||||
|
-- commitTree instead of this, but this is being kept
|
||||||
|
-- for some time, for compatibility with older versions.
|
||||||
|
mkcommit cmode = Git.Branch.commitTreeExactMessage cmode
|
||||||
adjustedBranchCommitMessage parents treesha
|
adjustedBranchCommitMessage parents treesha
|
||||||
|
|
||||||
{- This message should never be changed. -}
|
{- This message should never be changed. -}
|
||||||
adjustedBranchCommitMessage :: String
|
adjustedBranchCommitMessage :: String
|
||||||
adjustedBranchCommitMessage = "git-annex adjusted branch"
|
adjustedBranchCommitMessage = "git-annex adjusted branch"
|
||||||
|
|
||||||
|
{- Allow for a trailing newline after the message. -}
|
||||||
|
hasAdjustedBranchCommitMessage :: Commit -> Bool
|
||||||
|
hasAdjustedBranchCommitMessage c =
|
||||||
|
dropWhileEnd (\x -> x == '\n' || x == '\r') (commitMessage c)
|
||||||
|
== adjustedBranchCommitMessage
|
||||||
|
|
||||||
findAdjustingCommit :: AdjBranch -> Annex (Maybe Commit)
|
findAdjustingCommit :: AdjBranch -> Annex (Maybe Commit)
|
||||||
findAdjustingCommit (AdjBranch b) = go =<< catCommit b
|
findAdjustingCommit (AdjBranch b) = go =<< catCommit b
|
||||||
where
|
where
|
||||||
go Nothing = return Nothing
|
go Nothing = return Nothing
|
||||||
go (Just c)
|
go (Just c)
|
||||||
| commitMessage c == adjustedBranchCommitMessage = return (Just c)
|
| hasAdjustedBranchCommitMessage c = return (Just c)
|
||||||
| otherwise = case commitParent c of
|
| otherwise = case commitParent c of
|
||||||
[p] -> go =<< catCommit p
|
[p] -> go =<< catCommit p
|
||||||
_ -> return Nothing
|
_ -> return Nothing
|
||||||
|
|
||||||
{- Check for any commits present on the adjusted branch that have not yet
|
{- Check for any commits present on the adjusted branch that have not yet
|
||||||
- been propigated to the basis branch, and propagate them to the basis
|
- been propagated to the basis branch, and propagate them to the basis
|
||||||
- branch and from there on to the orig branch.
|
- branch and from there on to the orig branch.
|
||||||
-
|
-
|
||||||
- After propigating the commits back to the basis branch,
|
- After propagating the commits back to the basis branch,
|
||||||
- rebase the adjusted branch on top of the updated basis branch.
|
- rebase the adjusted branch on top of the updated basis branch.
|
||||||
-}
|
-}
|
||||||
propigateAdjustedCommits :: OrigBranch -> Adjustment -> Annex ()
|
propigateAdjustedCommits :: OrigBranch -> Adjustment -> Annex ()
|
||||||
|
@ -540,7 +551,7 @@ propigateAdjustedCommits' warnwhendiverged origbranch adj _commitsprevented =
|
||||||
return (Right parent)
|
return (Right parent)
|
||||||
go origsha parent pastadjcommit (sha:l) = catCommit sha >>= \case
|
go origsha parent pastadjcommit (sha:l) = catCommit sha >>= \case
|
||||||
Just c
|
Just c
|
||||||
| commitMessage c == adjustedBranchCommitMessage ->
|
| hasAdjustedBranchCommitMessage c ->
|
||||||
go origsha parent True l
|
go origsha parent True l
|
||||||
| pastadjcommit ->
|
| pastadjcommit ->
|
||||||
reverseAdjustedCommit parent adj (sha, c) origbranch
|
reverseAdjustedCommit parent adj (sha, c) origbranch
|
||||||
|
@ -577,7 +588,7 @@ reverseAdjustedCommit commitparent adj (csha, basiscommit) origbranch
|
||||||
(commitAuthorMetaData basiscommit)
|
(commitAuthorMetaData basiscommit)
|
||||||
(commitCommitterMetaData basiscommit) $
|
(commitCommitterMetaData basiscommit) $
|
||||||
Git.Branch.commitTree cmode
|
Git.Branch.commitTree cmode
|
||||||
(commitMessage basiscommit)
|
[commitMessage basiscommit]
|
||||||
[commitparent] treesha
|
[commitparent] treesha
|
||||||
return (Right revadjcommit)
|
return (Right revadjcommit)
|
||||||
|
|
||||||
|
@ -631,7 +642,7 @@ data AdjustedClone = InAdjustedClone | NotInAdjustedClone
|
||||||
- checked out adjusted branch; the origin could have the two branches
|
- checked out adjusted branch; the origin could have the two branches
|
||||||
- out of sync (eg, due to another branch having been pushed to the origin's
|
- out of sync (eg, due to another branch having been pushed to the origin's
|
||||||
- origbranch), or due to a commit on its adjusted branch not having been
|
- origbranch), or due to a commit on its adjusted branch not having been
|
||||||
- propigated back to origbranch.
|
- propagated back to origbranch.
|
||||||
-
|
-
|
||||||
- So, find the adjusting commit on the currently checked out adjusted
|
- So, find the adjusting commit on the currently checked out adjusted
|
||||||
- branch, and use the parent of that commit as the basis, and set the
|
- branch, and use the parent of that commit as the basis, and set the
|
||||||
|
|
|
@ -153,7 +153,8 @@ mergeToAdjustedBranch tomerge (origbranch, adj) mergeconfig canresolvemerge comm
|
||||||
then do
|
then do
|
||||||
cmode <- annexCommitMode <$> Annex.getGitConfig
|
cmode <- annexCommitMode <$> Annex.getGitConfig
|
||||||
c <- inRepo $ Git.Branch.commitTree cmode
|
c <- inRepo $ Git.Branch.commitTree cmode
|
||||||
("Merged " ++ fromRef tomerge) [adjmergecommit]
|
["Merged " ++ fromRef tomerge]
|
||||||
|
[adjmergecommit]
|
||||||
(commitTree currentcommit)
|
(commitTree currentcommit)
|
||||||
inRepo $ Git.Branch.update "updating adjusted branch" currbranch c
|
inRepo $ Git.Branch.update "updating adjusted branch" currbranch c
|
||||||
propigateAdjustedCommits origbranch adj
|
propigateAdjustedCommits origbranch adj
|
||||||
|
|
|
@ -945,9 +945,9 @@ rememberTreeishLocked treeish graftpoint jl = do
|
||||||
addedt <- inRepo $ Git.Tree.graftTree treeish graftpoint origtree
|
addedt <- inRepo $ Git.Tree.graftTree treeish graftpoint origtree
|
||||||
cmode <- annexCommitMode <$> Annex.getGitConfig
|
cmode <- annexCommitMode <$> Annex.getGitConfig
|
||||||
c <- inRepo $ Git.Branch.commitTree cmode
|
c <- inRepo $ Git.Branch.commitTree cmode
|
||||||
"graft" [branchref] addedt
|
["graft"] [branchref] addedt
|
||||||
c' <- inRepo $ Git.Branch.commitTree cmode
|
c' <- inRepo $ Git.Branch.commitTree cmode
|
||||||
"graft cleanup" [c] origtree
|
["graft cleanup"] [c] origtree
|
||||||
inRepo $ Git.Branch.update' fullname c'
|
inRepo $ Git.Branch.update' fullname c'
|
||||||
-- The tree in c' is the same as the tree in branchref,
|
-- The tree in c' is the same as the tree in branchref,
|
||||||
-- and the index was updated to that above, so it's safe to
|
-- and the index was updated to that above, so it's safe to
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{- External addon processes for special remotes and backends.
|
{- External addon processes for special remotes and backends.
|
||||||
-
|
-
|
||||||
- Copyright 2013-2020 Joey Hess <id@joeyh.name>
|
- Copyright 2013-2024 Joey Hess <id@joeyh.name>
|
||||||
-
|
-
|
||||||
- Licensed under the GNU AGPL version 3 or higher.
|
- Licensed under the GNU AGPL version 3 or higher.
|
||||||
-}
|
-}
|
||||||
|
@ -33,16 +33,16 @@ data ExternalAddonStartError
|
||||||
= ProgramNotInstalled String
|
= ProgramNotInstalled String
|
||||||
| ProgramFailure String
|
| ProgramFailure String
|
||||||
|
|
||||||
startExternalAddonProcess :: String -> ExternalAddonPID -> Annex (Either ExternalAddonStartError ExternalAddonProcess)
|
startExternalAddonProcess :: String -> [CommandParam] -> ExternalAddonPID -> Annex (Either ExternalAddonStartError ExternalAddonProcess)
|
||||||
startExternalAddonProcess basecmd pid = do
|
startExternalAddonProcess basecmd ps pid = do
|
||||||
errrelayer <- mkStderrRelayer
|
errrelayer <- mkStderrRelayer
|
||||||
g <- Annex.gitRepo
|
g <- Annex.gitRepo
|
||||||
cmdpath <- liftIO $ searchPath basecmd
|
cmdpath <- liftIO $ searchPath basecmd
|
||||||
liftIO $ start errrelayer g cmdpath
|
liftIO $ start errrelayer g cmdpath
|
||||||
where
|
where
|
||||||
start errrelayer g cmdpath = do
|
start errrelayer g cmdpath = do
|
||||||
(cmd, ps) <- maybe (pure (basecmd, [])) findShellCommand cmdpath
|
(cmd, cmdps) <- maybe (pure (basecmd, [])) findShellCommand cmdpath
|
||||||
let basep = (proc cmd (toCommand ps))
|
let basep = (proc cmd (toCommand (cmdps ++ ps)))
|
||||||
{ std_in = CreatePipe
|
{ std_in = CreatePipe
|
||||||
, std_out = CreatePipe
|
, std_out = CreatePipe
|
||||||
, std_err = CreatePipe
|
, std_err = CreatePipe
|
||||||
|
|
|
@ -86,7 +86,7 @@ data ImportCommitConfig = ImportCommitConfig
|
||||||
{ importCommitTracking :: Maybe Sha
|
{ importCommitTracking :: Maybe Sha
|
||||||
-- ^ Current commit on the remote tracking branch.
|
-- ^ Current commit on the remote tracking branch.
|
||||||
, importCommitMode :: Git.Branch.CommitMode
|
, importCommitMode :: Git.Branch.CommitMode
|
||||||
, importCommitMessage :: String
|
, importCommitMessages :: [String]
|
||||||
}
|
}
|
||||||
|
|
||||||
{- Buils a commit for an import from a special remote.
|
{- Buils a commit for an import from a special remote.
|
||||||
|
@ -251,7 +251,7 @@ buildImportCommit' remote importcommitconfig mtrackingcommit imported@(History t
|
||||||
|
|
||||||
mkcommit parents tree = inRepo $ Git.Branch.commitTree
|
mkcommit parents tree = inRepo $ Git.Branch.commitTree
|
||||||
(importCommitMode importcommitconfig)
|
(importCommitMode importcommitconfig)
|
||||||
(importCommitMessage importcommitconfig)
|
(importCommitMessages importcommitconfig)
|
||||||
parents
|
parents
|
||||||
tree
|
tree
|
||||||
|
|
||||||
|
|
|
@ -267,7 +267,7 @@ autoInitialize' check remotelist = getInitializedVersion >>= maybe needsinit che
|
||||||
initialize Nothing Nothing
|
initialize Nothing Nothing
|
||||||
autoEnableSpecialRemotes remotelist
|
autoEnableSpecialRemotes remotelist
|
||||||
|
|
||||||
{- Checks if a repository is initialized. Does not check version for ugrade. -}
|
{- Checks if a repository is initialized. Does not check version for upgrade. -}
|
||||||
isInitialized :: Annex Bool
|
isInitialized :: Annex Bool
|
||||||
isInitialized = maybe Annex.Branch.hasSibling (const $ return True) =<< getVersion
|
isInitialized = maybe Annex.Branch.hasSibling (const $ return True) =<< getVersion
|
||||||
|
|
||||||
|
|
|
@ -131,7 +131,7 @@ import qualified Utility.RawFilePath as R
|
||||||
- trailing path separator. Most code does not rely on that, but a few
|
- trailing path separator. Most code does not rely on that, but a few
|
||||||
- things do.
|
- things do.
|
||||||
-
|
-
|
||||||
- Everything else should not end in a trailing path sepatator.
|
- Everything else should not end in a trailing path separator.
|
||||||
-
|
-
|
||||||
- Only functions (with names starting with "git") that build a path
|
- Only functions (with names starting with "git") that build a path
|
||||||
- based on a git repository should return full path relative to the git
|
- based on a git repository should return full path relative to the git
|
||||||
|
|
|
@ -46,7 +46,7 @@ setRemoteTrackingBranch tb commit =
|
||||||
-
|
-
|
||||||
- The second parent of the merge commit is the past history of the
|
- The second parent of the merge commit is the past history of the
|
||||||
- RemoteTrackingBranch as imported from a remote. When importing a
|
- RemoteTrackingBranch as imported from a remote. When importing a
|
||||||
- history of trees from a remote, commits can be sythesized from
|
- history of trees from a remote, commits can be synthesized from
|
||||||
- them, but such commits won't have the same sha due to eg date differing.
|
- them, but such commits won't have the same sha due to eg date differing.
|
||||||
- But since we know that the second parent consists entirely of such
|
- But since we know that the second parent consists entirely of such
|
||||||
- import commits, they can be reused when updating the
|
- import commits, they can be reused when updating the
|
||||||
|
@ -77,7 +77,7 @@ makeRemoteTrackingBranchMergeCommit' commitsha importedhistory treesha = do
|
||||||
cmode <- annexCommitMode <$> Annex.getGitConfig
|
cmode <- annexCommitMode <$> Annex.getGitConfig
|
||||||
inRepo $ Git.Branch.commitTree
|
inRepo $ Git.Branch.commitTree
|
||||||
cmode
|
cmode
|
||||||
"remote tracking branch"
|
["remote tracking branch"]
|
||||||
[commitsha, importedhistory]
|
[commitsha, importedhistory]
|
||||||
treesha
|
treesha
|
||||||
|
|
||||||
|
|
|
@ -407,7 +407,7 @@ fromSshOptionsEnv = map Param . lines
|
||||||
{- Enables ssh caching for git push/pull to a particular
|
{- Enables ssh caching for git push/pull to a particular
|
||||||
- remote git repo. (Can safely be used on non-ssh remotes.)
|
- remote git repo. (Can safely be used on non-ssh remotes.)
|
||||||
-
|
-
|
||||||
- Also propigates any configured ssh-options.
|
- Also propagates any configured ssh-options.
|
||||||
-
|
-
|
||||||
- Like inRepo, the action is run with the local git repo.
|
- Like inRepo, the action is run with the local git repo.
|
||||||
- But here it's a modified version, with gitEnv to set GIT_SSH=git-annex,
|
- But here it's a modified version, with gitEnv to set GIT_SSH=git-annex,
|
||||||
|
|
|
@ -387,7 +387,7 @@ prop_view_roundtrips (AssociatedFile Nothing) _ _ = True
|
||||||
prop_view_roundtrips (AssociatedFile (Just f)) metadata visible = or
|
prop_view_roundtrips (AssociatedFile (Just f)) metadata visible = or
|
||||||
[ B.null (P.takeFileName f) && B.null (P.takeDirectory f)
|
[ B.null (P.takeFileName f) && B.null (P.takeDirectory f)
|
||||||
, viewTooLarge view
|
, viewTooLarge view
|
||||||
, all hasfields (viewedFiles view (viewedFileFromReference' Nothing) (fromRawFilePath f) metadata)
|
, all hasfields (viewedFiles view (viewedFileFromReference' Nothing Nothing) (fromRawFilePath f) metadata)
|
||||||
]
|
]
|
||||||
where
|
where
|
||||||
view = View (Git.Ref "foo") $
|
view = View (Git.Ref "foo") $
|
||||||
|
@ -577,7 +577,7 @@ updateView view madj = do
|
||||||
cmode <- annexCommitMode <$> Annex.getGitConfig
|
cmode <- annexCommitMode <$> Annex.getGitConfig
|
||||||
let msg = "updated " ++ fromRef (branchView view madj)
|
let msg = "updated " ++ fromRef (branchView view madj)
|
||||||
let parent = catMaybes [oldcommit]
|
let parent = catMaybes [oldcommit]
|
||||||
inRepo (Git.Branch.commitTree cmode msg parent newtree)
|
inRepo (Git.Branch.commitTree cmode [msg] parent newtree)
|
||||||
else return Nothing
|
else return Nothing
|
||||||
|
|
||||||
{- Diff between currently checked out branch and staged changes, and
|
{- Diff between currently checked out branch and staged changes, and
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{- filenames (not paths) used in views
|
{- filenames (not paths) used in views
|
||||||
-
|
-
|
||||||
- Copyright 2014-2023 Joey Hess <id@joeyh.name>
|
- Copyright 2014-2024 Joey Hess <id@joeyh.name>
|
||||||
-
|
-
|
||||||
- Licensed under the GNU AGPL version 3 or higher.
|
- Licensed under the GNU AGPL version 3 or higher.
|
||||||
-}
|
-}
|
||||||
|
@ -19,6 +19,7 @@ module Annex.View.ViewedFile (
|
||||||
|
|
||||||
import Annex.Common
|
import Annex.Common
|
||||||
import Utility.QuickCheck
|
import Utility.QuickCheck
|
||||||
|
import Backend.Utilities (maxExtensions)
|
||||||
|
|
||||||
import qualified Data.ByteString as S
|
import qualified Data.ByteString as S
|
||||||
|
|
||||||
|
@ -37,10 +38,12 @@ type MkViewedFile = FilePath -> ViewedFile
|
||||||
- So, from dir/subdir/file.foo, generate file_%dir%subdir%.foo
|
- So, from dir/subdir/file.foo, generate file_%dir%subdir%.foo
|
||||||
-}
|
-}
|
||||||
viewedFileFromReference :: GitConfig -> MkViewedFile
|
viewedFileFromReference :: GitConfig -> MkViewedFile
|
||||||
viewedFileFromReference g = viewedFileFromReference' (annexMaxExtensionLength g)
|
viewedFileFromReference g = viewedFileFromReference'
|
||||||
|
(annexMaxExtensionLength g)
|
||||||
|
(annexMaxExtensions g)
|
||||||
|
|
||||||
viewedFileFromReference' :: Maybe Int -> MkViewedFile
|
viewedFileFromReference' :: Maybe Int -> Maybe Int -> MkViewedFile
|
||||||
viewedFileFromReference' maxextlen f = concat $
|
viewedFileFromReference' maxextlen maxextensions f = concat $
|
||||||
[ escape (fromRawFilePath base')
|
[ escape (fromRawFilePath base')
|
||||||
, if null dirs then "" else "_%" ++ intercalate "%" (map escape dirs) ++ "%"
|
, if null dirs then "" else "_%" ++ intercalate "%" (map escape dirs) ++ "%"
|
||||||
, escape $ fromRawFilePath $ S.concat extensions'
|
, escape $ fromRawFilePath $ S.concat extensions'
|
||||||
|
@ -51,11 +54,12 @@ viewedFileFromReference' maxextlen f = concat $
|
||||||
(base, extensions) = case maxextlen of
|
(base, extensions) = case maxextlen of
|
||||||
Nothing -> splitShortExtensions (toRawFilePath basefile')
|
Nothing -> splitShortExtensions (toRawFilePath basefile')
|
||||||
Just n -> splitShortExtensions' (n+1) (toRawFilePath basefile')
|
Just n -> splitShortExtensions' (n+1) (toRawFilePath basefile')
|
||||||
{- Limit to two extensions maximum. -}
|
{- Limit number of extensions. -}
|
||||||
|
maxextensions' = fromMaybe maxExtensions maxextensions
|
||||||
(base', extensions')
|
(base', extensions')
|
||||||
| length extensions <= 2 = (base, extensions)
|
| length extensions <= maxextensions' = (base, extensions)
|
||||||
| otherwise =
|
| otherwise =
|
||||||
let (es,more) = splitAt 2 (reverse extensions)
|
let (es,more) = splitAt maxextensions' (reverse extensions)
|
||||||
in (base <> mconcat (reverse more), reverse es)
|
in (base <> mconcat (reverse more), reverse es)
|
||||||
{- On Windows, if the filename looked like "dir/c:foo" then
|
{- On Windows, if the filename looked like "dir/c:foo" then
|
||||||
- basefile would look like it contains a drive letter, which will
|
- basefile would look like it contains a drive letter, which will
|
||||||
|
@ -101,7 +105,8 @@ prop_viewedFile_roundtrips tf
|
||||||
-- Relative filenames wanted, not directories.
|
-- Relative filenames wanted, not directories.
|
||||||
| any (isPathSeparator) (end f ++ beginning f) = True
|
| any (isPathSeparator) (end f ++ beginning f) = True
|
||||||
| isAbsolute f || isDrive f = True
|
| isAbsolute f || isDrive f = True
|
||||||
| otherwise = dir == dirFromViewedFile (viewedFileFromReference' Nothing f)
|
| otherwise = dir == dirFromViewedFile
|
||||||
|
(viewedFileFromReference' Nothing Nothing f)
|
||||||
where
|
where
|
||||||
f = fromTestableFilePath tf
|
f = fromTestableFilePath tf
|
||||||
dir = joinPath $ beginning $ splitDirectories f
|
dir = joinPath $ beginning $ splitDirectories f
|
||||||
|
|
|
@ -372,7 +372,7 @@ youtubePlaylist' url cmd = withTmpFile "yt-dlp" $ \tmpfile h -> do
|
||||||
<$> B.readFile tmpfile
|
<$> B.readFile tmpfile
|
||||||
return $ case partitionEithers v of
|
return $ case partitionEithers v of
|
||||||
((parserr:_), _) ->
|
((parserr:_), _) ->
|
||||||
Left $ "yt-dlp json parse errror: " ++ parserr
|
Left $ "yt-dlp json parse error: " ++ parserr
|
||||||
([], r) -> Right r
|
([], r) -> Right r
|
||||||
else return $ Left $ if null outerr
|
else return $ Left $ if null outerr
|
||||||
then "yt-dlp failed"
|
then "yt-dlp failed"
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
{- git-annex assistant sceduled jobs runner
|
{- git-annex assistant scheduled jobs runner
|
||||||
-
|
-
|
||||||
- Copyright 2013 Joey Hess <id@joeyh.name>
|
- Copyright 2013 Joey Hess <id@joeyh.name>
|
||||||
-
|
-
|
||||||
|
|
|
@ -102,7 +102,7 @@ checkNetMonitor client = do
|
||||||
networkd = "org.freedesktop.network1"
|
networkd = "org.freedesktop.network1"
|
||||||
wicd = "org.wicd.daemon"
|
wicd = "org.wicd.daemon"
|
||||||
|
|
||||||
{- Listens for systemd-networkd connections and diconnections.
|
{- Listens for systemd-networkd connections and disconnections.
|
||||||
-
|
-
|
||||||
- Connection example (once fully connected):
|
- Connection example (once fully connected):
|
||||||
- [Variant {"OperationalState": Variant "routable"}]
|
- [Variant {"OperationalState": Variant "routable"}]
|
||||||
|
@ -128,7 +128,7 @@ listenNDConnections client setconnected =
|
||||||
else setconnected False
|
else setconnected False
|
||||||
Nothing -> noop
|
Nothing -> noop
|
||||||
|
|
||||||
{- Listens for NetworkManager connections and diconnections.
|
{- Listens for NetworkManager connections and disconnections.
|
||||||
-
|
-
|
||||||
- Connection example (once fully connected):
|
- Connection example (once fully connected):
|
||||||
- [Variant {"ActivatingConnection": Variant (ObjectPath "/"), "PrimaryConnection": Variant (ObjectPath "/org/freedesktop/NetworkManager/ActiveConnection/34"), "State": Variant 70}]
|
- [Variant {"ActivatingConnection": Variant (ObjectPath "/"), "PrimaryConnection": Variant (ObjectPath "/org/freedesktop/NetworkManager/ActiveConnection/34"), "State": Variant 70}]
|
||||||
|
|
|
@ -77,7 +77,7 @@ upgradedEnv = "GIT_ANNEX_UPGRADED"
|
||||||
-
|
-
|
||||||
- Creates the destination directory where the upgrade will be installed
|
- Creates the destination directory where the upgrade will be installed
|
||||||
- early, in order to check if another upgrade has happened (or is
|
- early, in order to check if another upgrade has happened (or is
|
||||||
- happending). On failure, the directory is removed.
|
- happening). On failure, the directory is removed.
|
||||||
-}
|
-}
|
||||||
startDistributionDownload :: GitAnnexDistribution -> Assistant ()
|
startDistributionDownload :: GitAnnexDistribution -> Assistant ()
|
||||||
startDistributionDownload d = go =<< liftIO . newVersionLocation d =<< liftIO oldVersionLocation
|
startDistributionDownload d = go =<< liftIO . newVersionLocation d =<< liftIO oldVersionLocation
|
||||||
|
|
|
@ -215,7 +215,7 @@ poolVar = unsafePerformIO $ newMVar M.empty
|
||||||
-- using it.
|
-- using it.
|
||||||
newExternalState :: ExternalBackendName -> HasExt -> ExternalAddonPID -> Annex ExternalState
|
newExternalState :: ExternalBackendName -> HasExt -> ExternalAddonPID -> Annex ExternalState
|
||||||
newExternalState ebname hasext pid = do
|
newExternalState ebname hasext pid = do
|
||||||
st <- startExternalAddonProcess basecmd pid
|
st <- startExternalAddonProcess basecmd [] pid
|
||||||
st' <- case st of
|
st' <- case st of
|
||||||
Left (ProgramNotInstalled msg) -> warnonce msg >> return st
|
Left (ProgramNotInstalled msg) -> warnonce msg >> return st
|
||||||
Left (ProgramFailure msg) -> warnonce msg >> return st
|
Left (ProgramFailure msg) -> warnonce msg >> return st
|
||||||
|
|
|
@ -174,11 +174,14 @@ needsUpgrade key = or
|
||||||
]
|
]
|
||||||
|
|
||||||
trivialMigrate :: Key -> Backend -> AssociatedFile -> Bool -> Annex (Maybe Key)
|
trivialMigrate :: Key -> Backend -> AssociatedFile -> Bool -> Annex (Maybe Key)
|
||||||
trivialMigrate oldkey newbackend afile _inannex = trivialMigrate' oldkey newbackend afile
|
trivialMigrate oldkey newbackend afile _inannex = do
|
||||||
<$> (annexMaxExtensionLength <$> Annex.getGitConfig)
|
c <- Annex.getGitConfig
|
||||||
|
return $ trivialMigrate' oldkey newbackend afile
|
||||||
|
(annexMaxExtensionLength c)
|
||||||
|
(annexMaxExtensions c)
|
||||||
|
|
||||||
trivialMigrate' :: Key -> Backend -> AssociatedFile -> Maybe Int -> Maybe Key
|
trivialMigrate' :: Key -> Backend -> AssociatedFile -> Maybe Int -> Maybe Int -> Maybe Key
|
||||||
trivialMigrate' oldkey newbackend afile maxextlen
|
trivialMigrate' oldkey newbackend afile maxextlen maxexts
|
||||||
{- Fast migration from hashE to hash backend. -}
|
{- Fast migration from hashE to hash backend. -}
|
||||||
| migratable && hasExt oldvariety = Just $ alterKey oldkey $ \d -> d
|
| migratable && hasExt oldvariety = Just $ alterKey oldkey $ \d -> d
|
||||||
{ keyName = S.toShort (keyHash oldkey)
|
{ keyName = S.toShort (keyHash oldkey)
|
||||||
|
@ -189,7 +192,7 @@ trivialMigrate' oldkey newbackend afile maxextlen
|
||||||
AssociatedFile Nothing -> Nothing
|
AssociatedFile Nothing -> Nothing
|
||||||
AssociatedFile (Just file) -> Just $ alterKey oldkey $ \d -> d
|
AssociatedFile (Just file) -> Just $ alterKey oldkey $ \d -> d
|
||||||
{ keyName = S.toShort $ keyHash oldkey
|
{ keyName = S.toShort $ keyHash oldkey
|
||||||
<> selectExtension maxextlen file
|
<> selectExtension maxextlen maxexts file
|
||||||
, keyVariety = newvariety
|
, keyVariety = newvariety
|
||||||
}
|
}
|
||||||
{- Upgrade to fix bad previous migration that created a
|
{- Upgrade to fix bad previous migration that created a
|
||||||
|
|
|
@ -45,20 +45,24 @@ genKeyName s
|
||||||
- file that the key was generated from. -}
|
- file that the key was generated from. -}
|
||||||
addE :: KeySource -> (KeyVariety -> KeyVariety) -> Key -> Annex Key
|
addE :: KeySource -> (KeyVariety -> KeyVariety) -> Key -> Annex Key
|
||||||
addE source sethasext k = do
|
addE source sethasext k = do
|
||||||
maxlen <- annexMaxExtensionLength <$> Annex.getGitConfig
|
c <- Annex.getGitConfig
|
||||||
let ext = selectExtension maxlen (keyFilename source)
|
let ext = selectExtension
|
||||||
|
(annexMaxExtensionLength c)
|
||||||
|
(annexMaxExtensions c)
|
||||||
|
(keyFilename source)
|
||||||
return $ alterKey k $ \d -> d
|
return $ alterKey k $ \d -> d
|
||||||
{ keyName = keyName d <> S.toShort ext
|
{ keyName = keyName d <> S.toShort ext
|
||||||
, keyVariety = sethasext (keyVariety d)
|
, keyVariety = sethasext (keyVariety d)
|
||||||
}
|
}
|
||||||
|
|
||||||
selectExtension :: Maybe Int -> RawFilePath -> S.ByteString
|
selectExtension :: Maybe Int -> Maybe Int -> RawFilePath -> S.ByteString
|
||||||
selectExtension maxlen f
|
selectExtension maxlen maxextensions f
|
||||||
| null es = ""
|
| null es = ""
|
||||||
| otherwise = S.intercalate "." ("":es)
|
| otherwise = S.intercalate "." ("":es)
|
||||||
where
|
where
|
||||||
es = filter (not . S.null) $ reverse $
|
es = filter (not . S.null) $ reverse $
|
||||||
take 2 $ filter (S.all validInExtension) $
|
take (fromMaybe maxExtensions maxextensions) $
|
||||||
|
filter (S.all validInExtension) $
|
||||||
takeWhile shortenough $
|
takeWhile shortenough $
|
||||||
reverse $ S.split (fromIntegral (ord '.')) (P.takeExtensions f')
|
reverse $ S.split (fromIntegral (ord '.')) (P.takeExtensions f')
|
||||||
shortenough e = S.length e <= fromMaybe maxExtensionLen maxlen
|
shortenough e = S.length e <= fromMaybe maxExtensionLen maxlen
|
||||||
|
@ -75,3 +79,6 @@ validInExtension c
|
||||||
|
|
||||||
maxExtensionLen :: Int
|
maxExtensionLen :: Int
|
||||||
maxExtensionLen = 4 -- long enough for "jpeg"
|
maxExtensionLen = 4 -- long enough for "jpeg"
|
||||||
|
|
||||||
|
maxExtensions :: Int
|
||||||
|
maxExtensions = 2 -- include both extensions of "tar.gz"
|
||||||
|
|
|
@ -41,7 +41,7 @@ backendVURL = Backend
|
||||||
Nothing -> pure False
|
Nothing -> pure False
|
||||||
anyM check eks
|
anyM check eks
|
||||||
, verifyKeyContentIncrementally = Just $ \k -> do
|
, verifyKeyContentIncrementally = Just $ \k -> do
|
||||||
-- Run incremental verifiers for each equivilant key together,
|
-- Run incremental verifiers for each equivalent key together,
|
||||||
-- and see if any of them succeed.
|
-- and see if any of them succeed.
|
||||||
eks <- equivkeys k
|
eks <- equivkeys k
|
||||||
let get = \ek -> getbackend ek >>= \case
|
let get = \ek -> getbackend ek >>= \case
|
||||||
|
@ -53,7 +53,7 @@ backendVURL = Backend
|
||||||
return $ IncrementalVerifier
|
return $ IncrementalVerifier
|
||||||
{ updateIncrementalVerifier = \s ->
|
{ updateIncrementalVerifier = \s ->
|
||||||
forM_ l $ flip updateIncrementalVerifier s
|
forM_ l $ flip updateIncrementalVerifier s
|
||||||
-- If there are no equivilant keys recorded somehow,
|
-- If there are no equivalent keys recorded somehow,
|
||||||
-- or if none of them support incremental verification,
|
-- or if none of them support incremental verification,
|
||||||
-- this will return Nothing, which indicates that
|
-- this will return Nothing, which indicates that
|
||||||
-- incremental verification was not able to be
|
-- incremental verification was not able to be
|
||||||
|
@ -80,9 +80,9 @@ backendVURL = Backend
|
||||||
-- Not all keys using this backend are necessarily
|
-- Not all keys using this backend are necessarily
|
||||||
-- cryptographically secure.
|
-- cryptographically secure.
|
||||||
, isCryptographicallySecure = False
|
, isCryptographicallySecure = False
|
||||||
-- A key is secure when all recorded equivilant keys are.
|
-- A key is secure when all recorded equivalent keys are.
|
||||||
-- If there are none recorded yet, it's secure because when
|
-- If there are none recorded yet, it's secure because when
|
||||||
-- downloaded, an equivilant key that is cryptographically secure
|
-- downloaded, an equivalent key that is cryptographically secure
|
||||||
-- will be constructed then.
|
-- will be constructed then.
|
||||||
, isCryptographicallySecureKey = \k ->
|
, isCryptographicallySecureKey = \k ->
|
||||||
equivkeys k >>= \case
|
equivkeys k >>= \case
|
||||||
|
@ -95,7 +95,7 @@ backendVURL = Backend
|
||||||
}
|
}
|
||||||
where
|
where
|
||||||
equivkeys k = filter allowedequiv <$> getEquivilantKeys k
|
equivkeys k = filter allowedequiv <$> getEquivilantKeys k
|
||||||
-- Don't allow using VURL keys as equivilant keys, because that
|
-- Don't allow using VURL keys as equivalent keys, because that
|
||||||
-- could let a crafted git-annex branch cause an infinite loop.
|
-- could let a crafted git-annex branch cause an infinite loop.
|
||||||
allowedequiv ek = fromKey keyVariety ek /= VURLKey
|
allowedequiv ek = fromKey keyVariety ek /= VURLKey
|
||||||
varietymap = makeVarietyMap regularBackendList
|
varietymap = makeVarietyMap regularBackendList
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
{- Generating and installing a desktop menu entry file and icon,
|
{- Generating and installing a desktop menu entry file and icon,
|
||||||
- and a desktop autostart file. (And OSX equivilants.)
|
- and a desktop autostart file. (And OSX equivalents.)
|
||||||
-
|
-
|
||||||
- Copyright 2012 Joey Hess <id@joeyh.name>
|
- Copyright 2012 Joey Hess <id@joeyh.name>
|
||||||
-
|
-
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
{- Generating and installing a desktop menu entry file and icon,
|
{- Generating and installing a desktop menu entry file and icon,
|
||||||
- and a desktop autostart file. (And OSX equivilants.)
|
- and a desktop autostart file. (And OSX equivalents.)
|
||||||
-
|
-
|
||||||
- Copyright 2012 Joey Hess <id@joeyh.name>
|
- Copyright 2012 Joey Hess <id@joeyh.name>
|
||||||
-
|
-
|
||||||
|
|
33
CHANGELOG
33
CHANGELOG
|
@ -1,20 +1,37 @@
|
||||||
git-annex (10.20240228) UNRELEASED; urgency=medium
|
git-annex (10.20240431) UNRELEASED; urgency=medium
|
||||||
|
|
||||||
|
* Typo fixes.
|
||||||
|
Thanks, Yaroslav Halchenko
|
||||||
|
|
||||||
|
-- Joey Hess <id@joeyh.name> Wed, 01 May 2024 15:47:06 -0400
|
||||||
|
|
||||||
|
git-annex (10.20240430) upstream; urgency=medium
|
||||||
|
|
||||||
|
* Bug fix: While redundant concurrent transfers were already
|
||||||
|
prevented in most cases, it failed to prevent the case where
|
||||||
|
two different repositories were sending the same content to
|
||||||
|
the same repository.
|
||||||
* addurl, importfeed: Added --verifiable option, which improves
|
* addurl, importfeed: Added --verifiable option, which improves
|
||||||
the safety of --fast or --relaxed by letting the content of
|
the safety of --fast or --relaxed by letting the content of
|
||||||
annexed files be verified with a checksum that is calculated
|
annexed files be verified with a checksum that is calculated
|
||||||
on a later download from the web. This will become the default later.
|
on a later download from the web. This will become the default later.
|
||||||
* Added dependency on unbounded-delays.
|
* Added rclone special remote, which can be used without needing
|
||||||
|
to install the git-annex-remote-rclone program. This needs
|
||||||
|
a forthcoming version of rclone (1.67.0), which supports
|
||||||
|
"rclone gitannex".
|
||||||
|
* sync, assist, import: Allow -m option to be specified multiple
|
||||||
|
times, to provide additional paragraphs for the commit message.
|
||||||
* reregisterurl: New command that can change an url from being
|
* reregisterurl: New command that can change an url from being
|
||||||
used by a special remote to being used by the web remote.
|
used by a special remote to being used by the web remote.
|
||||||
* Bugfix: While redundant concurrent transfers were already
|
* annex.maxextensions configuration controls how many filename
|
||||||
prevented in most cases, it failed to prevent the case where
|
extensions to preserve.
|
||||||
two different repositories were sending the same content to
|
* find: Fix --help for --copies.
|
||||||
the same repository.
|
Thanks, Gergely Risko
|
||||||
* Windows: Fix escaping output to terminal when using old
|
* Windows: Fix escaping output to terminal when using old
|
||||||
versions of MinTTY.
|
versions of MinTTY.
|
||||||
|
* Added dependency on unbounded-delays.
|
||||||
|
|
||||||
-- Joey Hess <id@joeyh.name> Tue, 27 Feb 2024 13:07:10 -0400
|
-- Joey Hess <id@joeyh.name> Tue, 30 Apr 2024 15:26:32 -0400
|
||||||
|
|
||||||
git-annex (10.20240227) upstream; urgency=medium
|
git-annex (10.20240227) upstream; urgency=medium
|
||||||
|
|
||||||
|
@ -4566,7 +4583,7 @@ git-annex (5.20150508) unstable; urgency=medium
|
||||||
--clean-duplicates mode, verify that enough copies of its content still
|
--clean-duplicates mode, verify that enough copies of its content still
|
||||||
exist.
|
exist.
|
||||||
* Improve integration with KDE's file manager to work with dolphin
|
* Improve integration with KDE's file manager to work with dolphin
|
||||||
version 14.12.3 while still being compatable with 4.14.2.
|
version 14.12.3 while still being compatible with 4.14.2.
|
||||||
Thanks, silvio.
|
Thanks, silvio.
|
||||||
* assistant: Added --autostop to complement --autostart.
|
* assistant: Added --autostop to complement --autostart.
|
||||||
* Work around wget bug #784348 which could cause it to clobber git-annex
|
* Work around wget bug #784348 which could cause it to clobber git-annex
|
||||||
|
|
|
@ -298,7 +298,7 @@ keyMatchingOptions' =
|
||||||
<> completeRemotes
|
<> completeRemotes
|
||||||
)
|
)
|
||||||
, annexOption (setAnnexState . Limit.addCopies) $ strOption
|
, annexOption (setAnnexState . Limit.addCopies) $ strOption
|
||||||
( long "copies" <> short 'C' <> metavar paramRemote
|
( long "copies" <> short 'C' <> metavar paramNumber
|
||||||
<> help "skip files with fewer copies"
|
<> help "skip files with fewer copies"
|
||||||
<> hidden
|
<> hidden
|
||||||
)
|
)
|
||||||
|
|
|
@ -111,7 +111,7 @@ checkHiddenService = bracket setup cleanup go
|
||||||
-- we just want to know if the tor circuit works.
|
-- we just want to know if the tor circuit works.
|
||||||
liftIO (tryNonAsync $ connectPeer g addr) >>= \case
|
liftIO (tryNonAsync $ connectPeer g addr) >>= \case
|
||||||
Left e -> do
|
Left e -> do
|
||||||
warning $ UnquotedString $ "Unable to connect to hidden service. It may not yet have propigated to the Tor network. (" ++ show e ++ ") Will retry.."
|
warning $ UnquotedString $ "Unable to connect to hidden service. It may not yet have propagated to the Tor network. (" ++ show e ++ ") Will retry.."
|
||||||
liftIO $ threadDelaySeconds (Seconds 2)
|
liftIO $ threadDelaySeconds (Seconds 2)
|
||||||
check (n-1) addrs
|
check (n-1) addrs
|
||||||
Right conn -> do
|
Right conn -> do
|
||||||
|
|
|
@ -189,7 +189,7 @@ seek o = withOtherTmp $ \tmpdir -> do
|
||||||
liftIO $ removeWhenExistsWith removeLink tmpindex
|
liftIO $ removeWhenExistsWith removeLink tmpindex
|
||||||
cmode <- annexCommitMode <$> Annex.getGitConfig
|
cmode <- annexCommitMode <$> Annex.getGitConfig
|
||||||
cmessage <- Annex.Branch.commitMessage
|
cmessage <- Annex.Branch.commitMessage
|
||||||
c <- inRepo $ Git.commitTree cmode cmessage [] t
|
c <- inRepo $ Git.commitTree cmode [cmessage] [] t
|
||||||
liftIO $ putStrLn (fromRef c)
|
liftIO $ putStrLn (fromRef c)
|
||||||
where
|
where
|
||||||
ww = WarnUnmatchLsFiles "filter-branch"
|
ww = WarnUnmatchLsFiles "filter-branch"
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{- git-annex command
|
{- git-annex command
|
||||||
-
|
-
|
||||||
- Copyright 2012-2021 Joey Hess <id@joeyh.name>
|
- Copyright 2012-2024 Joey Hess <id@joeyh.name>
|
||||||
-
|
-
|
||||||
- Licensed under the GNU AGPL version 3 or higher.
|
- Licensed under the GNU AGPL version 3 or higher.
|
||||||
-}
|
-}
|
||||||
|
@ -70,7 +70,7 @@ data ImportOptions
|
||||||
, importToSubDir :: Maybe FilePath
|
, importToSubDir :: Maybe FilePath
|
||||||
, importContent :: Bool
|
, importContent :: Bool
|
||||||
, checkGitIgnoreOption :: CheckGitIgnore
|
, checkGitIgnoreOption :: CheckGitIgnore
|
||||||
, messageOption :: Maybe String
|
, messageOption :: [String]
|
||||||
}
|
}
|
||||||
|
|
||||||
optParser :: CmdParamsDesc -> Parser ImportOptions
|
optParser :: CmdParamsDesc -> Parser ImportOptions
|
||||||
|
@ -82,7 +82,7 @@ optParser desc = do
|
||||||
)
|
)
|
||||||
dupmode <- fromMaybe Default <$> optional duplicateModeParser
|
dupmode <- fromMaybe Default <$> optional duplicateModeParser
|
||||||
ic <- Command.Add.checkGitIgnoreSwitch
|
ic <- Command.Add.checkGitIgnoreSwitch
|
||||||
message <- optional (strOption
|
message <- many (strOption
|
||||||
( long "message" <> short 'm' <> metavar "MSG"
|
( long "message" <> short 'm' <> metavar "MSG"
|
||||||
<> help "commit message"
|
<> help "commit message"
|
||||||
))
|
))
|
||||||
|
@ -322,8 +322,8 @@ verifyExisting key destfile (yes, no) = do
|
||||||
verifyEnoughCopiesToDrop [] key Nothing needcopies mincopies [] preverified tocheck
|
verifyEnoughCopiesToDrop [] key Nothing needcopies mincopies [] preverified tocheck
|
||||||
(const yes) no
|
(const yes) no
|
||||||
|
|
||||||
seekRemote :: Remote -> Branch -> Maybe TopFilePath -> Bool -> CheckGitIgnore -> Maybe String -> CommandSeek
|
seekRemote :: Remote -> Branch -> Maybe TopFilePath -> Bool -> CheckGitIgnore -> [String] -> CommandSeek
|
||||||
seekRemote remote branch msubdir importcontent ci mimportmessage = do
|
seekRemote remote branch msubdir importcontent ci importmessages = do
|
||||||
importtreeconfig <- case msubdir of
|
importtreeconfig <- case msubdir of
|
||||||
Nothing -> return ImportTree
|
Nothing -> return ImportTree
|
||||||
Just subdir ->
|
Just subdir ->
|
||||||
|
@ -336,7 +336,7 @@ seekRemote remote branch msubdir importcontent ci mimportmessage = do
|
||||||
|
|
||||||
trackingcommit <- fromtrackingbranch Git.Ref.sha
|
trackingcommit <- fromtrackingbranch Git.Ref.sha
|
||||||
cmode <- annexCommitMode <$> Annex.getGitConfig
|
cmode <- annexCommitMode <$> Annex.getGitConfig
|
||||||
let importcommitconfig = ImportCommitConfig trackingcommit cmode importmessage
|
let importcommitconfig = ImportCommitConfig trackingcommit cmode importmessages'
|
||||||
let commitimport = commitRemote remote branch tb trackingcommit importtreeconfig importcommitconfig
|
let commitimport = commitRemote remote branch tb trackingcommit importtreeconfig importcommitconfig
|
||||||
|
|
||||||
importabletvar <- liftIO $ newTVarIO Nothing
|
importabletvar <- liftIO $ newTVarIO Nothing
|
||||||
|
@ -353,9 +353,9 @@ seekRemote remote branch msubdir importcontent ci mimportmessage = do
|
||||||
includeCommandAction $
|
includeCommandAction $
|
||||||
commitimport imported
|
commitimport imported
|
||||||
where
|
where
|
||||||
importmessage = fromMaybe
|
importmessages'
|
||||||
("import from " ++ Remote.name remote)
|
| null importmessages = ["import from " ++ Remote.name remote]
|
||||||
mimportmessage
|
| otherwise = importmessages
|
||||||
|
|
||||||
tb = mkRemoteTrackingBranch remote branch
|
tb = mkRemoteTrackingBranch remote branch
|
||||||
|
|
||||||
|
|
|
@ -573,7 +573,7 @@ playlistFields u i = map (uncurry extractField)
|
||||||
, ("itemtitle", [youtube_title i])
|
, ("itemtitle", [youtube_title i])
|
||||||
, ("feedauthor", [youtube_playlist_uploader i])
|
, ("feedauthor", [youtube_playlist_uploader i])
|
||||||
, ("itemauthor", [youtube_playlist_uploader i])
|
, ("itemauthor", [youtube_playlist_uploader i])
|
||||||
-- itemsummary omitted, no equivilant in yt-dlp data
|
-- itemsummary omitted, no equivalent in yt-dlp data
|
||||||
, ("itemdescription", [youtube_description i])
|
, ("itemdescription", [youtube_description i])
|
||||||
, ("itemrights", [youtube_license i])
|
, ("itemrights", [youtube_license i])
|
||||||
, ("itemid", [youtube_url i])
|
, ("itemid", [youtube_url i])
|
||||||
|
|
|
@ -169,7 +169,7 @@ startAll o outputter = do
|
||||||
- same key. The method is to compare each value with the value
|
- same key. The method is to compare each value with the value
|
||||||
- after it in the list, which is the old version of the value.
|
- after it in the list, which is the old version of the value.
|
||||||
-
|
-
|
||||||
- This ncessarily buffers the whole list, so does not stream.
|
- This necessarily buffers the whole list, so does not stream.
|
||||||
- But, the number of location log changes for a single key tends to be
|
- But, the number of location log changes for a single key tends to be
|
||||||
- fairly small.
|
- fairly small.
|
||||||
-
|
-
|
||||||
|
@ -377,7 +377,7 @@ sizeHistoryInfo mu o = do
|
||||||
-- time across all git-annex repositories.
|
-- time across all git-annex repositories.
|
||||||
--
|
--
|
||||||
-- This combines the new location log with what has been
|
-- This combines the new location log with what has been
|
||||||
-- accumulated so far, which is equivilant to merging together
|
-- accumulated so far, which is equivalent to merging together
|
||||||
-- all git-annex branches at that point in time.
|
-- all git-annex branches at that point in time.
|
||||||
update k sizemap locmap (oldlog, oldlocs) newlog =
|
update k sizemap locmap (oldlog, oldlocs) newlog =
|
||||||
( updatesize (updatesize sizemap sz (S.toList addedlocs))
|
( updatesize (updatesize sizemap sz (S.toList addedlocs))
|
||||||
|
@ -490,7 +490,7 @@ sizeHistoryInfo mu o = do
|
||||||
|
|
||||||
posminus a b = max 0 (a - b)
|
posminus a b = max 0 (a - b)
|
||||||
|
|
||||||
-- A verison of sizemap where uuids that are currently dead
|
-- A version of sizemap where uuids that are currently dead
|
||||||
-- have 0 size.
|
-- have 0 size.
|
||||||
sizemap' = M.mapWithKey zerodead sizemap
|
sizemap' = M.mapWithKey zerodead sizemap
|
||||||
zerodead u v = case M.lookup u (simpleMap trustlog) of
|
zerodead u v = case M.lookup u (simpleMap trustlog) of
|
||||||
|
|
|
@ -200,7 +200,7 @@ update oldkey newkey =
|
||||||
firstM (\f -> (== Just newkey) <$> isAnnexLink f) $
|
firstM (\f -> (== Just newkey) <$> isAnnexLink f) $
|
||||||
map (\f -> simplifyPath (fromTopFilePath f g)) fs
|
map (\f -> simplifyPath (fromTopFilePath f g)) fs
|
||||||
|
|
||||||
-- Always verify the content agains the newkey, even if
|
-- Always verify the content against the newkey, even if
|
||||||
-- annex.verify is unset. This is done to prent bad migration
|
-- annex.verify is unset. This is done to prent bad migration
|
||||||
-- information maliciously injected into the git-annex branch
|
-- information maliciously injected into the git-annex branch
|
||||||
-- from populating files with the wrong content.
|
-- from populating files with the wrong content.
|
||||||
|
|
|
@ -40,7 +40,7 @@ start (_, key) = do
|
||||||
)
|
)
|
||||||
where
|
where
|
||||||
{- No need to do any rollback; when sendAnnex fails, a nonzero
|
{- No need to do any rollback; when sendAnnex fails, a nonzero
|
||||||
- exit will be propigated, and the remote will know the transfer
|
- exit will be propagated, and the remote will know the transfer
|
||||||
- failed. -}
|
- failed. -}
|
||||||
rollback = noop
|
rollback = noop
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
{- git-annex command
|
{- git-annex command
|
||||||
-
|
-
|
||||||
- Copyright 2011 Joachim Breitner <mail@joachim-breitner.de>
|
- Copyright 2011 Joachim Breitner <mail@joachim-breitner.de>
|
||||||
- Copyright 2011-2023 Joey Hess <id@joeyh.name>
|
- Copyright 2011-2024 Joey Hess <id@joeyh.name>
|
||||||
-
|
-
|
||||||
- Licensed under the GNU AGPL version 3 or higher.
|
- Licensed under the GNU AGPL version 3 or higher.
|
||||||
-}
|
-}
|
||||||
|
@ -105,7 +105,7 @@ data SyncOptions = SyncOptions
|
||||||
, notOnlyAnnexOption :: Bool
|
, notOnlyAnnexOption :: Bool
|
||||||
, commitOption :: Bool
|
, commitOption :: Bool
|
||||||
, noCommitOption :: Bool
|
, noCommitOption :: Bool
|
||||||
, messageOption :: Maybe String
|
, messageOption :: [String]
|
||||||
, pullOption :: Bool
|
, pullOption :: Bool
|
||||||
, pushOption :: Bool
|
, pushOption :: Bool
|
||||||
, contentOption :: Maybe Bool
|
, contentOption :: Maybe Bool
|
||||||
|
@ -125,7 +125,7 @@ instance Default SyncOptions where
|
||||||
, notOnlyAnnexOption = False
|
, notOnlyAnnexOption = False
|
||||||
, commitOption = False
|
, commitOption = False
|
||||||
, noCommitOption = False
|
, noCommitOption = False
|
||||||
, messageOption = Nothing
|
, messageOption = []
|
||||||
, pullOption = False
|
, pullOption = False
|
||||||
, pushOption = False
|
, pushOption = False
|
||||||
, contentOption = Just False
|
, contentOption = Just False
|
||||||
|
@ -169,8 +169,8 @@ optParser mode desc = SyncOptions
|
||||||
( long "no-commit"
|
( long "no-commit"
|
||||||
<> help "avoid git commit"
|
<> help "avoid git commit"
|
||||||
))
|
))
|
||||||
<*> unlessmode [SyncMode, AssistMode] Nothing
|
<*> unlessmode [SyncMode, AssistMode] []
|
||||||
(optional (strOption
|
(many (strOption
|
||||||
( long "message" <> short 'm' <> metavar "MSG"
|
( long "message" <> short 'm' <> metavar "MSG"
|
||||||
<> help "commit message"
|
<> help "commit message"
|
||||||
)))
|
)))
|
||||||
|
@ -267,7 +267,7 @@ seek' o = startConcurrency transferStages $ do
|
||||||
|
|
||||||
remotes <- syncRemotes (syncWith o)
|
remotes <- syncRemotes (syncWith o)
|
||||||
warnSyncContentTransition o remotes
|
warnSyncContentTransition o remotes
|
||||||
-- Remotes that are git repositories, not (necesarily) special remotes.
|
-- Remotes that are git repositories, not (necessarily) special remotes.
|
||||||
let gitremotes = filter (Remote.gitSyncableRemoteType . Remote.remotetype) remotes
|
let gitremotes = filter (Remote.gitSyncableRemoteType . Remote.remotetype) remotes
|
||||||
-- Remotes that contain annex object content.
|
-- Remotes that contain annex object content.
|
||||||
contentremotes <- filter (\r -> Remote.uuid r /= NoUUID)
|
contentremotes <- filter (\r -> Remote.uuid r /= NoUUID)
|
||||||
|
@ -402,17 +402,18 @@ syncRemotes' ps available =
|
||||||
|
|
||||||
commit :: SyncOptions -> CommandStart
|
commit :: SyncOptions -> CommandStart
|
||||||
commit o = stopUnless shouldcommit $ starting "commit" ai si $ do
|
commit o = stopUnless shouldcommit $ starting "commit" ai si $ do
|
||||||
commitmessage <- maybe commitMsg return (messageOption o)
|
|
||||||
Annex.Branch.commit =<< Annex.Branch.commitMessage
|
Annex.Branch.commit =<< Annex.Branch.commitMessage
|
||||||
|
mopts <- concatMap (\msg -> [Param "-m", Param msg])
|
||||||
|
<$> if null (messageOption o)
|
||||||
|
then (:[]) <$> commitMsg
|
||||||
|
else pure (messageOption o)
|
||||||
next $ do
|
next $ do
|
||||||
showOutput
|
showOutput
|
||||||
let cmode = Git.Branch.ManualCommit
|
let cmode = Git.Branch.ManualCommit
|
||||||
cquiet <- Git.Branch.CommitQuiet <$> commandProgressDisabled
|
cquiet <- Git.Branch.CommitQuiet <$> commandProgressDisabled
|
||||||
void $ inRepo $ Git.Branch.commitCommand cmode cquiet
|
void $ inRepo $ Git.Branch.commitCommand
|
||||||
[ Param "-a"
|
cmode cquiet
|
||||||
, Param "-m"
|
([ Param "-a" ] ++ mopts)
|
||||||
, Param commitmessage
|
|
||||||
]
|
|
||||||
return True
|
return True
|
||||||
where
|
where
|
||||||
shouldcommit = notOnlyAnnex o <&&>
|
shouldcommit = notOnlyAnnex o <&&>
|
||||||
|
@ -426,7 +427,8 @@ commitMsg :: Annex String
|
||||||
commitMsg = do
|
commitMsg = do
|
||||||
u <- getUUID
|
u <- getUUID
|
||||||
m <- uuidDescMap
|
m <- uuidDescMap
|
||||||
return $ "git-annex in " ++ maybe "unknown" fromUUIDDesc (M.lookup u m)
|
return $ "git-annex in "
|
||||||
|
++ maybe "unknown" fromUUIDDesc (M.lookup u m)
|
||||||
|
|
||||||
mergeLocal :: [Git.Merge.MergeConfig] -> SyncOptions -> CurrBranch -> CommandStart
|
mergeLocal :: [Git.Merge.MergeConfig] -> SyncOptions -> CurrBranch -> CommandStart
|
||||||
mergeLocal mergeconfig o currbranch = stopUnless (notOnlyAnnex o) $
|
mergeLocal mergeconfig o currbranch = stopUnless (notOnlyAnnex o) $
|
||||||
|
@ -578,7 +580,7 @@ importRemote importcontent o remote currbranch
|
||||||
let (branch, subdir) = splitRemoteAnnexTrackingBranchSubdir b
|
let (branch, subdir) = splitRemoteAnnexTrackingBranchSubdir b
|
||||||
if canImportKeys remote importcontent
|
if canImportKeys remote importcontent
|
||||||
then do
|
then do
|
||||||
Command.Import.seekRemote remote branch subdir importcontent (CheckGitIgnore True) Nothing
|
Command.Import.seekRemote remote branch subdir importcontent (CheckGitIgnore True) []
|
||||||
-- Importing generates a branch
|
-- Importing generates a branch
|
||||||
-- that is not initially connected
|
-- that is not initially connected
|
||||||
-- to the current branch, so allow
|
-- to the current branch, so allow
|
||||||
|
@ -976,7 +978,7 @@ seekExportContent :: Maybe SyncOptions -> [Remote] -> CurrBranch -> Annex Bool
|
||||||
seekExportContent o rs (mcurrbranch, madj)
|
seekExportContent o rs (mcurrbranch, madj)
|
||||||
| null rs = return False
|
| null rs = return False
|
||||||
| otherwise = do
|
| otherwise = do
|
||||||
-- Propigate commits from the adjusted branch, so that
|
-- Propagate commits from the adjusted branch, so that
|
||||||
-- when the remoteAnnexTrackingBranch is set to the parent
|
-- when the remoteAnnexTrackingBranch is set to the parent
|
||||||
-- branch, it will be up-to-date.
|
-- branch, it will be up-to-date.
|
||||||
case (mcurrbranch, madj) of
|
case (mcurrbranch, madj) of
|
||||||
|
|
|
@ -70,7 +70,7 @@ closeDb (DbHandle _db worker jobs _) = do
|
||||||
- changes to the database!
|
- changes to the database!
|
||||||
-
|
-
|
||||||
- Note that the action is not run by the calling thread, but by a
|
- Note that the action is not run by the calling thread, but by a
|
||||||
- worker thread. Exceptions are propigated to the calling thread.
|
- worker thread. Exceptions are propagated to the calling thread.
|
||||||
-
|
-
|
||||||
- Only one action can be run at a time against a given DbHandle.
|
- Only one action can be run at a time against a given DbHandle.
|
||||||
- If called concurrently in the same process, this will block until
|
- If called concurrently in the same process, this will block until
|
||||||
|
|
|
@ -491,7 +491,7 @@ reconcileStaged dbisnew qh = ifM isBareRepo
|
||||||
|
|
||||||
-- How large is large? Too large and there will be a long
|
-- How large is large? Too large and there will be a long
|
||||||
-- delay before the message is shown; too short and the message
|
-- delay before the message is shown; too short and the message
|
||||||
-- will clutter things up unncessarily. It's uncommon for 1000
|
-- will clutter things up unnecessarily. It's uncommon for 1000
|
||||||
-- files to change in the index, and processing that many files
|
-- files to change in the index, and processing that many files
|
||||||
-- takes less than half a second, so that seems about right.
|
-- takes less than half a second, so that seems about right.
|
||||||
largediff :: Int
|
largediff :: Int
|
||||||
|
|
2
Git.hs
2
Git.hs
|
@ -167,7 +167,7 @@ relPath = adjustPath torel
|
||||||
p' <- relPathCwdToFile p
|
p' <- relPathCwdToFile p
|
||||||
return $ if B.null p' then "." else p'
|
return $ if B.null p' then "." else p'
|
||||||
|
|
||||||
{- Adusts the path to a local Repo using the provided function. -}
|
{- Adjusts the path to a local Repo using the provided function. -}
|
||||||
adjustPath :: (RawFilePath -> IO RawFilePath) -> Repo -> IO Repo
|
adjustPath :: (RawFilePath -> IO RawFilePath) -> Repo -> IO Repo
|
||||||
adjustPath f r@(Repo { location = l@(Local { gitdir = d, worktree = w }) }) = do
|
adjustPath f r@(Repo { location = l@(Local { gitdir = d, worktree = w }) }) = do
|
||||||
d' <- f d
|
d' <- f d
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{- git branch stuff
|
{- git branch stuff
|
||||||
-
|
-
|
||||||
- Copyright 2011-2021 Joey Hess <id@joeyh.name>
|
- Copyright 2011-2024 Joey Hess <id@joeyh.name>
|
||||||
-
|
-
|
||||||
- Licensed under the GNU AGPL version 3 or higher.
|
- Licensed under the GNU AGPL version 3 or higher.
|
||||||
-}
|
-}
|
||||||
|
@ -178,7 +178,7 @@ commit commitmode allowempty message branch parentrefs repo = do
|
||||||
tree <- writeTree repo
|
tree <- writeTree repo
|
||||||
ifM (cancommit tree)
|
ifM (cancommit tree)
|
||||||
( do
|
( do
|
||||||
sha <- commitTree commitmode message parentrefs tree repo
|
sha <- commitTree commitmode [message] parentrefs tree repo
|
||||||
update' branch sha repo
|
update' branch sha repo
|
||||||
return $ Just sha
|
return $ Just sha
|
||||||
, return Nothing
|
, return Nothing
|
||||||
|
@ -207,8 +207,21 @@ writeTreeQuiet repo = extractSha <$> withNullHandle go
|
||||||
go nullh = pipeReadStrict' (\p -> p { std_err = UseHandle nullh })
|
go nullh = pipeReadStrict' (\p -> p { std_err = UseHandle nullh })
|
||||||
[Param "write-tree"] repo
|
[Param "write-tree"] repo
|
||||||
|
|
||||||
commitTree :: CommitMode -> String -> [Ref] -> Ref -> Repo -> IO Sha
|
commitTree :: CommitMode -> [String] -> [Ref] -> Ref -> Repo -> IO Sha
|
||||||
commitTree commitmode message parentrefs tree repo =
|
commitTree commitmode messages parentrefs tree repo =
|
||||||
|
getSha "commit-tree" $ pipeReadStrict ps repo
|
||||||
|
where
|
||||||
|
ps = [Param "commit-tree", Param (fromRef tree)]
|
||||||
|
++ applyCommitModeForCommitTree commitmode baseparams repo
|
||||||
|
baseparams = map Param $
|
||||||
|
concatMap (\r -> ["-p", fromRef r]) parentrefs
|
||||||
|
++ concatMap (\msg -> ["-m", msg]) messages
|
||||||
|
|
||||||
|
-- commitTree passes the commit message to git with -m, which can cause it
|
||||||
|
-- to get modified slightly (eg adding trailing newline). This variant uses
|
||||||
|
-- the exact commit message that is provided.
|
||||||
|
commitTreeExactMessage :: CommitMode -> String -> [Ref] -> Ref -> Repo -> IO Sha
|
||||||
|
commitTreeExactMessage commitmode message parentrefs tree repo =
|
||||||
getSha "commit-tree" $
|
getSha "commit-tree" $
|
||||||
pipeWriteRead ([Param "commit-tree", Param (fromRef tree)] ++ ps)
|
pipeWriteRead ([Param "commit-tree", Param (fromRef tree)] ++ ps)
|
||||||
sendmsg repo
|
sendmsg repo
|
||||||
|
|
|
@ -125,7 +125,7 @@ knownMissing :: FsckResults -> MissingObjects
|
||||||
knownMissing FsckFailed = S.empty
|
knownMissing FsckFailed = S.empty
|
||||||
knownMissing (FsckFoundMissing s _) = s
|
knownMissing (FsckFoundMissing s _) = s
|
||||||
|
|
||||||
{- Finds objects that are missing from the git repsitory, or are corrupt.
|
{- Finds objects that are missing from the git repository, or are corrupt.
|
||||||
-
|
-
|
||||||
- This does not use git cat-file --batch, because catting a corrupt
|
- This does not use git cat-file --batch, because catting a corrupt
|
||||||
- object can cause it to crash, or to report incorrect size information.
|
- object can cause it to crash, or to report incorrect size information.
|
||||||
|
|
2
Logs.hs
2
Logs.hs
|
@ -211,7 +211,7 @@ chunkLogFile config key =
|
||||||
chunkLogExt :: S.ByteString
|
chunkLogExt :: S.ByteString
|
||||||
chunkLogExt = ".log.cnk"
|
chunkLogExt = ".log.cnk"
|
||||||
|
|
||||||
{- The filename of the equivilant keys log for a given key. -}
|
{- The filename of the equivalent keys log for a given key. -}
|
||||||
equivilantKeysLogFile :: GitConfig -> Key -> RawFilePath
|
equivilantKeysLogFile :: GitConfig -> Key -> RawFilePath
|
||||||
equivilantKeysLogFile config key =
|
equivilantKeysLogFile config key =
|
||||||
(branchHashDir config key P.</> keyFile key)
|
(branchHashDir config key P.</> keyFile key)
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
{- Logs listing keys that are equivilant to a key.
|
{- Logs listing keys that are equivalent to a key.
|
||||||
-
|
-
|
||||||
- Copyright 2024 Joey Hess <id@joeyh.name>
|
- Copyright 2024 Joey Hess <id@joeyh.name>
|
||||||
-
|
-
|
||||||
|
|
|
@ -267,7 +267,7 @@ setupConsole = do
|
||||||
hSetBuffering stderr LineBuffering
|
hSetBuffering stderr LineBuffering
|
||||||
#ifdef mingw32_HOST_OS
|
#ifdef mingw32_HOST_OS
|
||||||
{- Avoid outputting CR at end of line on Windows. git commands do
|
{- Avoid outputting CR at end of line on Windows. git commands do
|
||||||
- not ouput CR there. -}
|
- not output CR there. -}
|
||||||
hSetNewlineMode stdout noNewlineTranslation
|
hSetNewlineMode stdout noNewlineTranslation
|
||||||
hSetNewlineMode stderr noNewlineTranslation
|
hSetNewlineMode stderr noNewlineTranslation
|
||||||
#endif
|
#endif
|
||||||
|
@ -353,7 +353,7 @@ mkPrompter = getConcurrency >>= \case
|
||||||
(const $ run a)
|
(const $ run a)
|
||||||
|
|
||||||
{- Catch all (non-async and not ExitCode) exceptions and display,
|
{- Catch all (non-async and not ExitCode) exceptions and display,
|
||||||
- santizing any control characters in the exceptions.
|
- sanitizing any control characters in the exceptions.
|
||||||
-
|
-
|
||||||
- Exits nonzero on exception, so should only be used at topmost level.
|
- Exits nonzero on exception, so should only be used at topmost level.
|
||||||
-}
|
-}
|
||||||
|
|
|
@ -348,7 +348,7 @@ listImportableContentsM serial adir c = adbfind >>= \case
|
||||||
mk _ = Nothing
|
mk _ = Nothing
|
||||||
|
|
||||||
-- This does not guard against every possible race. As long as the adb
|
-- This does not guard against every possible race. As long as the adb
|
||||||
-- connection is resonably fast, it's probably as good as
|
-- connection is reasonably fast, it's probably as good as
|
||||||
-- git's handling of similar situations with files being modified while
|
-- git's handling of similar situations with files being modified while
|
||||||
-- it's updating the working tree for a merge.
|
-- it's updating the working tree for a merge.
|
||||||
retrieveExportWithContentIdentifierM :: AndroidSerial -> AndroidPath -> ExportLocation -> [ContentIdentifier] -> FilePath -> Either Key (Annex Key) -> MeterUpdate -> Annex (Key, Verification)
|
retrieveExportWithContentIdentifierM :: AndroidSerial -> AndroidPath -> ExportLocation -> [ContentIdentifier] -> FilePath -> Either Key (Annex Key) -> MeterUpdate -> Annex (Key, Verification)
|
||||||
|
|
|
@ -413,7 +413,7 @@ mkContentIdentifier (IgnoreInodes ii) f st =
|
||||||
|
|
||||||
-- Since ignoreinodes can be changed by enableremote, and since previous
|
-- Since ignoreinodes can be changed by enableremote, and since previous
|
||||||
-- versions of git-annex ignored inodes by default, treat two content
|
-- versions of git-annex ignored inodes by default, treat two content
|
||||||
-- idenfiers as the same if they differ only by one having the inode
|
-- identifiers as the same if they differ only by one having the inode
|
||||||
-- ignored.
|
-- ignored.
|
||||||
guardSameContentIdentifiers :: a -> [ContentIdentifier] -> Maybe ContentIdentifier -> a
|
guardSameContentIdentifiers :: a -> [ContentIdentifier] -> Maybe ContentIdentifier -> a
|
||||||
guardSameContentIdentifiers _ _ Nothing = giveup "file not found"
|
guardSameContentIdentifiers _ _ Nothing = giveup "file not found"
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{- External special remote interface.
|
{- External special remote interface.
|
||||||
-
|
-
|
||||||
- Copyright 2013-2022 Joey Hess <id@joeyh.name>
|
- Copyright 2013-2024 Joey Hess <id@joeyh.name>
|
||||||
-
|
-
|
||||||
- Licensed under the GNU AGPL version 3 or higher.
|
- Licensed under the GNU AGPL version 3 or higher.
|
||||||
-}
|
-}
|
||||||
|
@ -9,7 +9,7 @@
|
||||||
{-# LANGUAGE BangPatterns #-}
|
{-# LANGUAGE BangPatterns #-}
|
||||||
{-# LANGUAGE RankNTypes #-}
|
{-# LANGUAGE RankNTypes #-}
|
||||||
|
|
||||||
module Remote.External (remote) where
|
module Remote.External where
|
||||||
|
|
||||||
import Remote.External.Types
|
import Remote.External.Types
|
||||||
import Remote.External.AsyncExtension
|
import Remote.External.AsyncExtension
|
||||||
|
@ -48,10 +48,10 @@ remote :: RemoteType
|
||||||
remote = specialRemoteType $ RemoteType
|
remote = specialRemoteType $ RemoteType
|
||||||
{ typename = "external"
|
{ typename = "external"
|
||||||
, enumerate = const (findSpecialRemotes "externaltype")
|
, enumerate = const (findSpecialRemotes "externaltype")
|
||||||
, generate = gen
|
, generate = gen remote Nothing
|
||||||
, configParser = remoteConfigParser
|
, configParser = remoteConfigParser Nothing
|
||||||
, setup = externalSetup
|
, setup = externalSetup Nothing Nothing
|
||||||
, exportSupported = checkExportSupported
|
, exportSupported = checkExportSupported Nothing
|
||||||
, importSupported = importUnsupported
|
, importSupported = importUnsupported
|
||||||
, thirdPartyPopulated = False
|
, thirdPartyPopulated = False
|
||||||
}
|
}
|
||||||
|
@ -62,15 +62,15 @@ externaltypeField = Accepted "externaltype"
|
||||||
readonlyField :: RemoteConfigField
|
readonlyField :: RemoteConfigField
|
||||||
readonlyField = Accepted "readonly"
|
readonlyField = Accepted "readonly"
|
||||||
|
|
||||||
gen :: Git.Repo -> UUID -> RemoteConfig -> RemoteGitConfig -> RemoteStateHandle -> Annex (Maybe Remote)
|
gen :: RemoteType -> Maybe ExternalProgram -> Git.Repo -> UUID -> RemoteConfig -> RemoteGitConfig -> RemoteStateHandle -> Annex (Maybe Remote)
|
||||||
gen r u rc gc rs
|
gen rt externalprogram r u rc gc rs
|
||||||
-- readonly mode only downloads urls; does not use external program
|
-- readonly mode only downloads urls; does not use external program
|
||||||
| externaltype == "readonly" = do
|
| externalprogram' == ExternalType "readonly" = do
|
||||||
c <- parsedRemoteConfig remote rc
|
c <- parsedRemoteConfig remote rc
|
||||||
cst <- remoteCost gc c expensiveRemoteCost
|
cst <- remoteCost gc c expensiveRemoteCost
|
||||||
let rmt = mk c cst (pure GloballyAvailable)
|
let rmt = mk c cst (pure GloballyAvailable)
|
||||||
Nothing
|
Nothing
|
||||||
(externalInfo externaltype)
|
(externalInfo externalprogram')
|
||||||
Nothing
|
Nothing
|
||||||
Nothing
|
Nothing
|
||||||
exportUnsupported
|
exportUnsupported
|
||||||
|
@ -83,7 +83,7 @@ gen r u rc gc rs
|
||||||
rmt
|
rmt
|
||||||
| otherwise = do
|
| otherwise = do
|
||||||
c <- parsedRemoteConfig remote rc
|
c <- parsedRemoteConfig remote rc
|
||||||
external <- newExternal externaltype (Just u) c (Just gc)
|
external <- newExternal externalprogram' (Just u) c (Just gc)
|
||||||
(Git.remoteName r) (Just rs)
|
(Git.remoteName r) (Just rs)
|
||||||
Annex.addCleanupAction (RemoteCleanup u) $ stopExternal external
|
Annex.addCleanupAction (RemoteCleanup u) $ stopExternal external
|
||||||
cst <- getCost external r gc c
|
cst <- getCost external r gc c
|
||||||
|
@ -150,21 +150,29 @@ gen r u rc gc rs
|
||||||
, appendonly = False
|
, appendonly = False
|
||||||
, untrustworthy = False
|
, untrustworthy = False
|
||||||
, availability = avail
|
, availability = avail
|
||||||
, remotetype = remote
|
, remotetype = rt
|
||||||
{ exportSupported = cheapexportsupported }
|
{ exportSupported = cheapexportsupported }
|
||||||
, mkUnavailable = gen r u rc
|
, mkUnavailable =
|
||||||
(gc { remoteAnnexExternalType = Just "!dne!" }) rs
|
let dneprogram = case externalprogram of
|
||||||
|
Just (ExternalCommand _ _) -> Just (ExternalType "!dne!")
|
||||||
|
_ -> Nothing
|
||||||
|
dnegc = gc { remoteAnnexExternalType = Just "!dne!" }
|
||||||
|
in gen rt dneprogram r u rc dnegc rs
|
||||||
, getInfo = togetinfo
|
, getInfo = togetinfo
|
||||||
, claimUrl = toclaimurl
|
, claimUrl = toclaimurl
|
||||||
, checkUrl = tocheckurl
|
, checkUrl = tocheckurl
|
||||||
, remoteStateHandle = rs
|
, remoteStateHandle = rs
|
||||||
}
|
}
|
||||||
externaltype = fromMaybe (giveup "missing externaltype") (remoteAnnexExternalType gc)
|
externalprogram' = case externalprogram of
|
||||||
|
Just p -> p
|
||||||
|
Nothing -> ExternalType $
|
||||||
|
fromMaybe (giveup "missing externaltype")
|
||||||
|
(remoteAnnexExternalType gc)
|
||||||
|
|
||||||
externalSetup :: SetupStage -> Maybe UUID -> Maybe CredPair -> RemoteConfig -> RemoteGitConfig -> Annex (RemoteConfig, UUID)
|
externalSetup :: Maybe ExternalProgram -> Maybe (String, String) -> SetupStage -> Maybe UUID -> Maybe CredPair -> RemoteConfig -> RemoteGitConfig -> Annex (RemoteConfig, UUID)
|
||||||
externalSetup _ mu _ c gc = do
|
externalSetup externalprogram setgitconfig _ mu _ c gc = do
|
||||||
u <- maybe (liftIO genUUID) return mu
|
u <- maybe (liftIO genUUID) return mu
|
||||||
pc <- either giveup return $ parseRemoteConfig c lenientRemoteConfigParser
|
pc <- either giveup return $ parseRemoteConfig c (lenientRemoteConfigParser externalprogram)
|
||||||
let readonlyconfig = getRemoteConfigValue readonlyField pc == Just True
|
let readonlyconfig = getRemoteConfigValue readonlyField pc == Just True
|
||||||
let externaltype = if readonlyconfig
|
let externaltype = if readonlyconfig
|
||||||
then "readonly"
|
then "readonly"
|
||||||
|
@ -181,8 +189,9 @@ externalSetup _ mu _ c gc = do
|
||||||
setConfig (remoteAnnexConfig (fromJust (lookupName c)) "readonly") (boolConfig True)
|
setConfig (remoteAnnexConfig (fromJust (lookupName c)) "readonly") (boolConfig True)
|
||||||
return c'
|
return c'
|
||||||
else do
|
else do
|
||||||
pc' <- either giveup return $ parseRemoteConfig c' lenientRemoteConfigParser
|
pc' <- either giveup return $ parseRemoteConfig c' (lenientRemoteConfigParser externalprogram)
|
||||||
external <- newExternal externaltype (Just u) pc' (Just gc) Nothing Nothing
|
let p = fromMaybe (ExternalType externaltype) externalprogram
|
||||||
|
external <- newExternal p (Just u) pc' (Just gc) Nothing Nothing
|
||||||
-- Now that we have an external, ask it to LISTCONFIGS,
|
-- Now that we have an external, ask it to LISTCONFIGS,
|
||||||
-- and re-parse the RemoteConfig strictly, so we can
|
-- and re-parse the RemoteConfig strictly, so we can
|
||||||
-- error out if the user provided an unexpected config.
|
-- error out if the user provided an unexpected config.
|
||||||
|
@ -200,17 +209,20 @@ externalSetup _ mu _ c gc = do
|
||||||
liftIO . atomically . readTMVar . externalConfigChanges
|
liftIO . atomically . readTMVar . externalConfigChanges
|
||||||
return (changes c')
|
return (changes c')
|
||||||
|
|
||||||
gitConfigSpecialRemote u c'' [("externaltype", externaltype)]
|
gitConfigSpecialRemote u c''
|
||||||
|
[ fromMaybe ("externaltype", externaltype) setgitconfig ]
|
||||||
return (M.delete readonlyField c'', u)
|
return (M.delete readonlyField c'', u)
|
||||||
|
|
||||||
checkExportSupported :: ParsedRemoteConfig -> RemoteGitConfig -> Annex Bool
|
checkExportSupported :: Maybe ExternalProgram -> ParsedRemoteConfig -> RemoteGitConfig -> Annex Bool
|
||||||
checkExportSupported c gc = do
|
checkExportSupported Nothing c gc = do
|
||||||
let externaltype = fromMaybe (giveup "Specify externaltype=") $
|
let externaltype = fromMaybe (giveup "Specify externaltype=") $
|
||||||
remoteAnnexExternalType gc <|> getRemoteConfigValue externaltypeField c
|
remoteAnnexExternalType gc <|> getRemoteConfigValue externaltypeField c
|
||||||
if externaltype == "readonly"
|
if externaltype == "readonly"
|
||||||
then return False
|
then return False
|
||||||
else checkExportSupported'
|
else checkExportSupported (Just (ExternalType externaltype)) c gc
|
||||||
=<< newExternal externaltype Nothing c (Just gc) Nothing Nothing
|
checkExportSupported (Just externalprogram) c gc =
|
||||||
|
checkExportSupported'
|
||||||
|
=<< newExternal externalprogram Nothing c (Just gc) Nothing Nothing
|
||||||
|
|
||||||
checkExportSupported' :: External -> Annex Bool
|
checkExportSupported' :: External -> Annex Bool
|
||||||
checkExportSupported' external = go `catchNonAsync` (const (return False))
|
checkExportSupported' external = go `catchNonAsync` (const (return False))
|
||||||
|
@ -658,7 +670,7 @@ startExternal' external = do
|
||||||
n <- succ <$> readTVar (externalLastPid external)
|
n <- succ <$> readTVar (externalLastPid external)
|
||||||
writeTVar (externalLastPid external) n
|
writeTVar (externalLastPid external) n
|
||||||
return n
|
return n
|
||||||
AddonProcess.startExternalAddonProcess basecmd pid >>= \case
|
AddonProcess.startExternalAddonProcess externalcmd externalparams pid >>= \case
|
||||||
Left (AddonProcess.ProgramFailure err) -> do
|
Left (AddonProcess.ProgramFailure err) -> do
|
||||||
unusable err
|
unusable err
|
||||||
Left (AddonProcess.ProgramNotInstalled err) ->
|
Left (AddonProcess.ProgramNotInstalled err) ->
|
||||||
|
@ -666,8 +678,8 @@ startExternal' external = do
|
||||||
(Just rname, Just True) -> unusable $ unlines
|
(Just rname, Just True) -> unusable $ unlines
|
||||||
[ err
|
[ err
|
||||||
, "This remote has annex-readonly=true, and previous versions of"
|
, "This remote has annex-readonly=true, and previous versions of"
|
||||||
, "git-annex would tried to download from it without"
|
, "git-annex would try to download from it without"
|
||||||
, "installing " ++ basecmd ++ ". If you want that, you need to set:"
|
, "installing " ++ externalcmd ++ ". If you want that, you need to set:"
|
||||||
, "git config remote." ++ rname ++ ".annex-externaltype readonly"
|
, "git config remote." ++ rname ++ ".annex-externaltype readonly"
|
||||||
]
|
]
|
||||||
_ -> unusable err
|
_ -> unusable err
|
||||||
|
@ -686,7 +698,9 @@ startExternal' external = do
|
||||||
extensions <- startproto st
|
extensions <- startproto st
|
||||||
return (st, extensions)
|
return (st, extensions)
|
||||||
where
|
where
|
||||||
basecmd = "git-annex-remote-" ++ externalType external
|
(externalcmd, externalparams) = case externalProgram external of
|
||||||
|
ExternalType t -> ("git-annex-remote-" ++ t, [])
|
||||||
|
ExternalCommand c ps -> (c, ps)
|
||||||
startproto st = do
|
startproto st = do
|
||||||
receiveMessage st external
|
receiveMessage st external
|
||||||
(const Nothing)
|
(const Nothing)
|
||||||
|
@ -707,13 +721,13 @@ startExternal' external = do
|
||||||
case filter (`notElem` fromExtensionList supportedExtensionList) (fromExtensionList exwanted) of
|
case filter (`notElem` fromExtensionList supportedExtensionList) (fromExtensionList exwanted) of
|
||||||
[] -> return exwanted
|
[] -> return exwanted
|
||||||
exrest -> unusable $ unwords $
|
exrest -> unusable $ unwords $
|
||||||
[ basecmd
|
[ externalcmd
|
||||||
, "requested extensions that this version of git-annex does not support:"
|
, "requested extensions that this version of git-annex does not support:"
|
||||||
] ++ exrest
|
] ++ exrest
|
||||||
|
|
||||||
unusable msg = do
|
unusable msg = do
|
||||||
warning (UnquotedString msg)
|
warning (UnquotedString msg)
|
||||||
giveup ("unable to use external special remote " ++ basecmd)
|
giveup ("unable to use external special remote " ++ externalcmd)
|
||||||
|
|
||||||
stopExternal :: External -> Annex ()
|
stopExternal :: External -> Annex ()
|
||||||
stopExternal external = liftIO $ do
|
stopExternal external = liftIO $ do
|
||||||
|
@ -825,12 +839,13 @@ getWebUrls key = filter supported <$> getUrls key
|
||||||
where
|
where
|
||||||
supported u = snd (getDownloader u) == WebDownloader
|
supported u = snd (getDownloader u) == WebDownloader
|
||||||
|
|
||||||
externalInfo :: ExternalType -> Annex [(String, String)]
|
externalInfo :: ExternalProgram -> Annex [(String, String)]
|
||||||
externalInfo et = return [("externaltype", et)]
|
externalInfo (ExternalType et) = return [("externaltype", et)]
|
||||||
|
externalInfo (ExternalCommand _ _) = return []
|
||||||
|
|
||||||
getInfoM :: External -> Annex [(String, String)]
|
getInfoM :: External -> Annex [(String, String)]
|
||||||
getInfoM external = (++)
|
getInfoM external = (++)
|
||||||
<$> externalInfo (externalType external)
|
<$> externalInfo (externalProgram external)
|
||||||
<*> handleRequest external GETINFO Nothing (collect [])
|
<*> handleRequest external GETINFO Nothing (collect [])
|
||||||
where
|
where
|
||||||
collect l req = case req of
|
collect l req = case req of
|
||||||
|
@ -847,34 +862,41 @@ getInfoM external = (++)
|
||||||
|
|
||||||
{- All unknown configs are passed through in case the external program
|
{- All unknown configs are passed through in case the external program
|
||||||
- uses them. -}
|
- uses them. -}
|
||||||
lenientRemoteConfigParser :: RemoteConfigParser
|
lenientRemoteConfigParser :: Maybe ExternalProgram -> RemoteConfigParser
|
||||||
lenientRemoteConfigParser =
|
lenientRemoteConfigParser externalprogram =
|
||||||
addRemoteConfigParser specialRemoteConfigParsers baseRemoteConfigParser
|
addRemoteConfigParser specialRemoteConfigParsers (baseRemoteConfigParser externalprogram)
|
||||||
|
|
||||||
baseRemoteConfigParser :: RemoteConfigParser
|
baseRemoteConfigParser :: Maybe ExternalProgram -> RemoteConfigParser
|
||||||
baseRemoteConfigParser = RemoteConfigParser
|
baseRemoteConfigParser externalprogram = RemoteConfigParser
|
||||||
{ remoteConfigFieldParsers =
|
{ remoteConfigFieldParsers = if isJust extcommand
|
||||||
[ optionalStringParser externaltypeField
|
then []
|
||||||
(FieldDesc "type of external special remote to use")
|
else
|
||||||
, trueFalseParser readonlyField (Just False)
|
[ optionalStringParser externaltypeField
|
||||||
(FieldDesc "enable readonly mode")
|
(FieldDesc "type of external special remote to use")
|
||||||
]
|
, trueFalseParser readonlyField (Just False)
|
||||||
|
(FieldDesc "enable readonly mode")
|
||||||
|
]
|
||||||
, remoteConfigRestPassthrough = Just
|
, remoteConfigRestPassthrough = Just
|
||||||
( const True
|
( const True
|
||||||
, [("*", FieldDesc "all other parameters are passed to external special remote program")]
|
, [("*", FieldDesc $ "all other parameters are passed to " ++ fromMaybe "external special remote program" extcommand)]
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
where
|
||||||
|
extcommand = case externalprogram of
|
||||||
|
Just (ExternalCommand c _) -> Just c
|
||||||
|
_ -> Nothing
|
||||||
|
|
||||||
{- When the remote supports LISTCONFIGS, only accept the ones it listed.
|
{- When the remote supports LISTCONFIGS, only accept the ones it listed.
|
||||||
- When it does not, accept all configs. -}
|
- When it does not, accept all configs. -}
|
||||||
strictRemoteConfigParser :: External -> Annex RemoteConfigParser
|
strictRemoteConfigParser :: External -> Annex RemoteConfigParser
|
||||||
strictRemoteConfigParser external = listConfigs external >>= \case
|
strictRemoteConfigParser external = listConfigs external >>= \case
|
||||||
Nothing -> return lenientRemoteConfigParser
|
Nothing -> return lcp
|
||||||
Just l -> do
|
Just l -> do
|
||||||
let s = S.fromList (map fst l)
|
let s = S.fromList (map fst l)
|
||||||
let listed f = S.member (fromProposedAccepted f) s
|
let listed f = S.member (fromProposedAccepted f) s
|
||||||
return $ lenientRemoteConfigParser
|
return $ lcp { remoteConfigRestPassthrough = Just (listed, l) }
|
||||||
{ remoteConfigRestPassthrough = Just (listed, l) }
|
where
|
||||||
|
lcp = lenientRemoteConfigParser (Just (externalProgram external))
|
||||||
|
|
||||||
listConfigs :: External -> Annex (Maybe [(Setting, FieldDesc)])
|
listConfigs :: External -> Annex (Maybe [(Setting, FieldDesc)])
|
||||||
listConfigs external = handleRequest external LISTCONFIGS Nothing (collect [])
|
listConfigs external = handleRequest external LISTCONFIGS Nothing (collect [])
|
||||||
|
@ -886,20 +908,21 @@ listConfigs external = handleRequest external LISTCONFIGS Nothing (collect [])
|
||||||
UNSUPPORTED_REQUEST -> result Nothing
|
UNSUPPORTED_REQUEST -> result Nothing
|
||||||
_ -> Nothing
|
_ -> Nothing
|
||||||
|
|
||||||
remoteConfigParser :: RemoteConfig -> Annex RemoteConfigParser
|
remoteConfigParser :: Maybe ExternalProgram -> RemoteConfig -> Annex RemoteConfigParser
|
||||||
remoteConfigParser c
|
remoteConfigParser externalprogram c
|
||||||
-- No need to start the external when there is no config to parse,
|
-- No need to start the external when there is no config to parse,
|
||||||
-- or when everything in the config was already accepted; in those
|
-- or when everything in the config was already accepted; in those
|
||||||
-- cases the lenient parser will do the same thing as the strict
|
-- cases the lenient parser will do the same thing as the strict
|
||||||
-- parser.
|
-- parser.
|
||||||
| M.null (M.filter isproposed c) = return lenientRemoteConfigParser
|
| M.null (M.filter isproposed c) = return (lenientRemoteConfigParser externalprogram)
|
||||||
| otherwise = case parseRemoteConfig c baseRemoteConfigParser of
|
| otherwise = case parseRemoteConfig c (baseRemoteConfigParser externalprogram) of
|
||||||
Left _ -> return lenientRemoteConfigParser
|
Left _ -> return (lenientRemoteConfigParser externalprogram)
|
||||||
Right pc -> case (getRemoteConfigValue externaltypeField pc, getRemoteConfigValue readonlyField pc) of
|
Right pc -> case (getRemoteConfigValue externaltypeField pc, getRemoteConfigValue readonlyField pc) of
|
||||||
(Nothing, _) -> return lenientRemoteConfigParser
|
(Nothing, _) -> return (lenientRemoteConfigParser externalprogram)
|
||||||
(_, Just True) -> return lenientRemoteConfigParser
|
(_, Just True) -> return (lenientRemoteConfigParser externalprogram)
|
||||||
(Just externaltype, _) -> do
|
(Just externaltype, _) -> do
|
||||||
external <- newExternal externaltype Nothing pc Nothing Nothing Nothing
|
let p = fromMaybe (ExternalType externaltype) externalprogram
|
||||||
|
external <- newExternal p Nothing pc Nothing Nothing Nothing
|
||||||
strictRemoteConfigParser external
|
strictRemoteConfigParser external
|
||||||
where
|
where
|
||||||
isproposed (Accepted _) = False
|
isproposed (Accepted _) = False
|
||||||
|
|
19
Remote/External/Types.hs
vendored
19
Remote/External/Types.hs
vendored
|
@ -1,6 +1,6 @@
|
||||||
{- External special remote data types.
|
{- External special remote data types.
|
||||||
-
|
-
|
||||||
- Copyright 2013-2020 Joey Hess <id@joeyh.name>
|
- Copyright 2013-2024 Joey Hess <id@joeyh.name>
|
||||||
-
|
-
|
||||||
- Licensed under the GNU AGPL version 3 or higher.
|
- Licensed under the GNU AGPL version 3 or higher.
|
||||||
-}
|
-}
|
||||||
|
@ -12,7 +12,7 @@
|
||||||
module Remote.External.Types (
|
module Remote.External.Types (
|
||||||
External(..),
|
External(..),
|
||||||
newExternal,
|
newExternal,
|
||||||
ExternalType,
|
ExternalProgram(..),
|
||||||
ExternalState(..),
|
ExternalState(..),
|
||||||
PrepareStatus(..),
|
PrepareStatus(..),
|
||||||
ExtensionList(..),
|
ExtensionList(..),
|
||||||
|
@ -64,7 +64,7 @@ import Text.Read
|
||||||
import qualified Data.ByteString.Short as S (fromShort)
|
import qualified Data.ByteString.Short as S (fromShort)
|
||||||
|
|
||||||
data External = External
|
data External = External
|
||||||
{ externalType :: ExternalType
|
{ externalProgram :: ExternalProgram
|
||||||
, externalUUID :: Maybe UUID
|
, externalUUID :: Maybe UUID
|
||||||
, externalState :: TVar [ExternalState]
|
, externalState :: TVar [ExternalState]
|
||||||
-- ^ Contains states for external special remote processes
|
-- ^ Contains states for external special remote processes
|
||||||
|
@ -77,9 +77,9 @@ data External = External
|
||||||
, externalAsync :: TMVar ExternalAsync
|
, externalAsync :: TMVar ExternalAsync
|
||||||
}
|
}
|
||||||
|
|
||||||
newExternal :: ExternalType -> Maybe UUID -> ParsedRemoteConfig -> Maybe RemoteGitConfig -> Maybe RemoteName -> Maybe RemoteStateHandle -> Annex External
|
newExternal :: ExternalProgram -> Maybe UUID -> ParsedRemoteConfig -> Maybe RemoteGitConfig -> Maybe RemoteName -> Maybe RemoteStateHandle -> Annex External
|
||||||
newExternal externaltype u c gc rn rs = liftIO $ External
|
newExternal p u c gc rn rs = liftIO $ External
|
||||||
<$> pure externaltype
|
<$> pure p
|
||||||
<*> pure u
|
<*> pure u
|
||||||
<*> atomically (newTVar [])
|
<*> atomically (newTVar [])
|
||||||
<*> atomically (newTVar 0)
|
<*> atomically (newTVar 0)
|
||||||
|
@ -89,7 +89,12 @@ newExternal externaltype u c gc rn rs = liftIO $ External
|
||||||
<*> pure rs
|
<*> pure rs
|
||||||
<*> atomically (newTMVar UncheckedExternalAsync)
|
<*> atomically (newTMVar UncheckedExternalAsync)
|
||||||
|
|
||||||
type ExternalType = String
|
data ExternalProgram
|
||||||
|
= ExternalType String
|
||||||
|
-- ^ "git-annex-remote-" is prepended to this to get the program
|
||||||
|
| ExternalCommand String [CommandParam]
|
||||||
|
-- ^ to use a program with a different name, and parameters
|
||||||
|
deriving (Show, Eq)
|
||||||
|
|
||||||
data ExternalState = ExternalState
|
data ExternalState = ExternalState
|
||||||
{ externalSend :: forall t. (Proto.Sendable t, ToAsyncWrapped t) => t -> IO ()
|
{ externalSend :: forall t. (Proto.Sendable t, ToAsyncWrapped t) => t -> IO ()
|
||||||
|
|
|
@ -37,6 +37,7 @@ import qualified Remote.Ddar
|
||||||
import qualified Remote.GitLFS
|
import qualified Remote.GitLFS
|
||||||
import qualified Remote.HttpAlso
|
import qualified Remote.HttpAlso
|
||||||
import qualified Remote.Borg
|
import qualified Remote.Borg
|
||||||
|
import qualified Remote.Rclone
|
||||||
import qualified Remote.Hook
|
import qualified Remote.Hook
|
||||||
import qualified Remote.External
|
import qualified Remote.External
|
||||||
|
|
||||||
|
@ -59,6 +60,7 @@ remoteTypes = map adjustExportImportRemoteType
|
||||||
, Remote.GitLFS.remote
|
, Remote.GitLFS.remote
|
||||||
, Remote.HttpAlso.remote
|
, Remote.HttpAlso.remote
|
||||||
, Remote.Borg.remote
|
, Remote.Borg.remote
|
||||||
|
, Remote.Rclone.remote
|
||||||
, Remote.Hook.remote
|
, Remote.Hook.remote
|
||||||
, Remote.External.remote
|
, Remote.External.remote
|
||||||
]
|
]
|
||||||
|
|
31
Remote/Rclone.hs
Normal file
31
Remote/Rclone.hs
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
{- Rclone special remote, using "rclone gitannex"
|
||||||
|
-
|
||||||
|
- Copyright 2024 Joey Hess <id@joeyh.name>
|
||||||
|
-
|
||||||
|
- Licensed under the GNU AGPL version 3 or higher.
|
||||||
|
-}
|
||||||
|
|
||||||
|
module Remote.Rclone (remote) where
|
||||||
|
|
||||||
|
import Types
|
||||||
|
import Types.Remote
|
||||||
|
import Remote.Helper.Special
|
||||||
|
import Remote.Helper.ExportImport
|
||||||
|
import Utility.SafeCommand
|
||||||
|
import qualified Remote.External as External
|
||||||
|
import Remote.External.Types
|
||||||
|
|
||||||
|
remote :: RemoteType
|
||||||
|
remote = specialRemoteType $ RemoteType
|
||||||
|
{ typename = "rclone"
|
||||||
|
, enumerate = const (findSpecialRemotes "rclone")
|
||||||
|
, generate = External.gen remote p
|
||||||
|
, configParser = External.remoteConfigParser p
|
||||||
|
, setup = External.externalSetup p setgitconfig
|
||||||
|
, exportSupported = External.checkExportSupported p
|
||||||
|
, importSupported = importUnsupported
|
||||||
|
, thirdPartyPopulated = False
|
||||||
|
}
|
||||||
|
where
|
||||||
|
p = Just $ ExternalCommand "rclone" [Param "gitannex"]
|
||||||
|
setgitconfig = Just ("rclone", "true")
|
|
@ -136,6 +136,7 @@ data GitConfig = GitConfig
|
||||||
, annexAllowedIPAddresses :: String
|
, annexAllowedIPAddresses :: String
|
||||||
, annexAllowUnverifiedDownloads :: Bool
|
, annexAllowUnverifiedDownloads :: Bool
|
||||||
, annexMaxExtensionLength :: Maybe Int
|
, annexMaxExtensionLength :: Maybe Int
|
||||||
|
, annexMaxExtensions :: Maybe Int
|
||||||
, annexJobs :: Concurrency
|
, annexJobs :: Concurrency
|
||||||
, annexCacheCreds :: Bool
|
, annexCacheCreds :: Bool
|
||||||
, annexAutoUpgradeRepository :: Bool
|
, annexAutoUpgradeRepository :: Bool
|
||||||
|
@ -244,6 +245,7 @@ extractGitConfig configsource r = GitConfig
|
||||||
, annexAllowUnverifiedDownloads = (== Just "ACKTHPPT") $
|
, annexAllowUnverifiedDownloads = (== Just "ACKTHPPT") $
|
||||||
getmaybe (annexConfig "security.allow-unverified-downloads")
|
getmaybe (annexConfig "security.allow-unverified-downloads")
|
||||||
, annexMaxExtensionLength = getmayberead (annexConfig "maxextensionlength")
|
, annexMaxExtensionLength = getmayberead (annexConfig "maxextensionlength")
|
||||||
|
, annexMaxExtensions = getmayberead (annexConfig "maxextensions")
|
||||||
, annexJobs = fromMaybe NonConcurrent $
|
, annexJobs = fromMaybe NonConcurrent $
|
||||||
parseConcurrency =<< getmaybe (annexConfig "jobs")
|
parseConcurrency =<< getmaybe (annexConfig "jobs")
|
||||||
, annexCacheCreds = getbool (annexConfig "cachecreds") True
|
, annexCacheCreds = getbool (annexConfig "cachecreds") True
|
||||||
|
|
|
@ -60,7 +60,7 @@ data KeyData = Key
|
||||||
|
|
||||||
instance NFData KeyData
|
instance NFData KeyData
|
||||||
|
|
||||||
{- Caching the seralization of a key is an optimization.
|
{- Caching the serialization of a key is an optimization.
|
||||||
-
|
-
|
||||||
- This constructor is not exported, and all smart constructors maintain
|
- This constructor is not exported, and all smart constructors maintain
|
||||||
- the serialization.
|
- the serialization.
|
||||||
|
|
|
@ -27,9 +27,9 @@ data BwRate = BwRate ByteSize Duration
|
||||||
|
|
||||||
-- Parse eg, "0KiB/60s"
|
-- Parse eg, "0KiB/60s"
|
||||||
--
|
--
|
||||||
-- Also, it can be set to "true" (or other git config equivilants)
|
-- Also, it can be set to "true" (or other git config equivalents)
|
||||||
-- to enable ProbeStallDetection.
|
-- to enable ProbeStallDetection.
|
||||||
-- And "false" (and other git config equivilants) explicitly
|
-- And "false" (and other git config equivalents) explicitly
|
||||||
-- disable stall detection.
|
-- disable stall detection.
|
||||||
parseStallDetection :: String -> Either String StallDetection
|
parseStallDetection :: String -> Either String StallDetection
|
||||||
parseStallDetection s = case isTrueFalse s of
|
parseStallDetection s = case isTrueFalse s of
|
||||||
|
|
|
@ -69,7 +69,7 @@ mkGpgCmd Nothing = GpgCmd (fromMaybe "gpg" BuildInfo.gpg)
|
||||||
boolGpgCmd :: GpgCmd -> [CommandParam] -> IO Bool
|
boolGpgCmd :: GpgCmd -> [CommandParam] -> IO Bool
|
||||||
boolGpgCmd (GpgCmd cmd) = boolSystem cmd
|
boolGpgCmd (GpgCmd cmd) = boolSystem cmd
|
||||||
|
|
||||||
-- Generate an argument list to asymetrically encrypt to the given recipients.
|
-- Generate an argument list to asymmetrically encrypt to the given recipients.
|
||||||
pkEncTo :: [String] -> [CommandParam]
|
pkEncTo :: [String] -> [CommandParam]
|
||||||
pkEncTo = concatMap (\r -> [Param "--recipient", Param r])
|
pkEncTo = concatMap (\r -> [Param "--recipient", Param r])
|
||||||
|
|
||||||
|
|
|
@ -253,7 +253,7 @@ describeMatchResult descop l prefix = Just $
|
||||||
go (MatchedOpen:rest) = "(" : go rest
|
go (MatchedOpen:rest) = "(" : go rest
|
||||||
go (MatchedClose:rest) = ")" : go rest
|
go (MatchedClose:rest) = ")" : go rest
|
||||||
|
|
||||||
-- Remove unncessary outermost parens
|
-- Remove unnecessary outermost parens
|
||||||
simplify True (MatchedOpen:rest) = case lastMaybe rest of
|
simplify True (MatchedOpen:rest) = case lastMaybe rest of
|
||||||
Just MatchedClose -> simplify False (dropFromEnd 1 rest)
|
Just MatchedClose -> simplify False (dropFromEnd 1 rest)
|
||||||
_ -> simplify False rest
|
_ -> simplify False rest
|
||||||
|
|
|
@ -48,7 +48,7 @@ absPathFrom dir path = simplifyPath (combine dir path)
|
||||||
- already exists. -}
|
- already exists. -}
|
||||||
absPath :: RawFilePath -> IO RawFilePath
|
absPath :: RawFilePath -> IO RawFilePath
|
||||||
absPath file
|
absPath file
|
||||||
-- Avoid unncessarily getting the current directory when the path
|
-- Avoid unnecessarily getting the current directory when the path
|
||||||
-- is already absolute. absPathFrom uses simplifyPath
|
-- is already absolute. absPathFrom uses simplifyPath
|
||||||
-- so also used here for consistency.
|
-- so also used here for consistency.
|
||||||
| isAbsolute file = return $ simplifyPath file
|
| isAbsolute file = return $ simplifyPath file
|
||||||
|
|
|
@ -32,7 +32,7 @@ import Prelude
|
||||||
- characters, except for ones in surrogate plane. Converting a string that
|
- characters, except for ones in surrogate plane. Converting a string that
|
||||||
- does contain other unicode characters to a ByteString using the
|
- does contain other unicode characters to a ByteString using the
|
||||||
- filesystem encoding (see GHC.IO.Encoding) will throw an exception,
|
- filesystem encoding (see GHC.IO.Encoding) will throw an exception,
|
||||||
- so use this instead to avoid quickcheck tests breaking unncessarily.
|
- so use this instead to avoid quickcheck tests breaking unnecessarily.
|
||||||
-}
|
-}
|
||||||
newtype TestableString = TestableString
|
newtype TestableString = TestableString
|
||||||
{ fromTestableString :: String }
|
{ fromTestableString :: String }
|
||||||
|
@ -46,7 +46,7 @@ instance Arbitrary TestableString where
|
||||||
-
|
-
|
||||||
- No real-world filename can be empty or contain a NUL. So code can
|
- No real-world filename can be empty or contain a NUL. So code can
|
||||||
- well be written that assumes that and using this avoids quickcheck
|
- well be written that assumes that and using this avoids quickcheck
|
||||||
- tests breaking unncessarily.
|
- tests breaking unnecessarily.
|
||||||
-}
|
-}
|
||||||
newtype TestableFilePath = TestableFilePath
|
newtype TestableFilePath = TestableFilePath
|
||||||
{ fromTestableFilePath :: FilePath }
|
{ fromTestableFilePath :: FilePath }
|
||||||
|
|
|
@ -58,7 +58,7 @@ newtype Armoring = Armoring Bool
|
||||||
- This is unfortunately needed because of an infelicity in the SOP
|
- This is unfortunately needed because of an infelicity in the SOP
|
||||||
- standard, as documented in section 9.9 "Be Careful with Special
|
- standard, as documented in section 9.9 "Be Careful with Special
|
||||||
- Designators", when using "@FD:" and similar designators the SOP
|
- Designators", when using "@FD:" and similar designators the SOP
|
||||||
- command may test for the presense of a file with the same name on the
|
- command may test for the presence of a file with the same name on the
|
||||||
- filesystem, and fail with AMBIGUOUS_INPUT.
|
- filesystem, and fail with AMBIGUOUS_INPUT.
|
||||||
-
|
-
|
||||||
- Since we don't want to need to deal with such random failure due to
|
- Since we don't want to need to deal with such random failure due to
|
||||||
|
|
|
@ -0,0 +1,55 @@
|
||||||
|
### Please describe the problem.
|
||||||
|
|
||||||
|
The file extension of annexed files are dropped if they have more than four characters (like `.blend` files)
|
||||||
|
All `*E` backends are affected but `WORM` seems to work.
|
||||||
|
|
||||||
|
### What steps will reproduce the problem?
|
||||||
|
|
||||||
|
[[!format sh """
|
||||||
|
mkdir foo && cd foo
|
||||||
|
git init
|
||||||
|
git annex init
|
||||||
|
echo '* annex.backend=SHA256E' > .gitattributes
|
||||||
|
echo '* annex.largefiles=(largerthan=0)' >> .gitattributes
|
||||||
|
echo 'foo' > foo.abc
|
||||||
|
echo 'bar' > bar.abcd
|
||||||
|
echo 'baz' > baz.abcde
|
||||||
|
echo 'faz' > faz.abcdef
|
||||||
|
git annex add .
|
||||||
|
ls -l
|
||||||
|
"""]]
|
||||||
|
|
||||||
|
Outputs
|
||||||
|
|
||||||
|
[[!format sh """
|
||||||
|
total 16
|
||||||
|
lrwxrwxrwx 1 foo bar 188 Apr 18 11:52 bar.abcd -> .git/annex/objects/z2/jk/SHA256E-s4--7d865e959b2466918c9863afca942d0fb89d7c9ac0c99bafc3749504ded97730.abcd/SHA256E-s4--7d865e959b2466918c9863afca942d0fb89d7c9ac0c99bafc3749504ded97730.abcd
|
||||||
|
lrwxrwxrwx 1 foo bar 178 Apr 18 11:52 baz.abcde -> .git/annex/objects/MZ/Fq/SHA256E-s4--bf07a7fbb825fc0aae7bf4a1177b2b31fcf8a3feeaf7092761e18c859ee52a9c/SHA256E-s4--bf07a7fbb825fc0aae7bf4a1177b2b31fcf8a3feeaf7092761e18c859ee52a9c
|
||||||
|
lrwxrwxrwx 1 foo bar 178 Apr 18 11:52 faz.abcdef -> .git/annex/objects/6Z/zG/SHA256E-s4--0206bf5fc94a74ae22c2c0e93ad1b578ae7f16cb52fb470cddf1f0d324c6bbf3/SHA256E-s4--0206bf5fc94a74ae22c2c0e93ad1b578ae7f16cb52fb470cddf1f0d324c6bbf3
|
||||||
|
lrwxrwxrwx 1 foo bar 186 Apr 18 11:52 foo.abc -> .git/annex/objects/Mq/J5/SHA256E-s4--b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c.abc/SHA256E-s4--b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c.abc
|
||||||
|
"""]]
|
||||||
|
|
||||||
|
|
||||||
|
### What version of git-annex are you using? On what operating system?
|
||||||
|
|
||||||
|
- Tested with Ubuntu 22.04
|
||||||
|
- git-annex v8 from original ubuntu ppa
|
||||||
|
- git-annex-standalone v10 from neurodebian ppa
|
||||||
|
|
||||||
|
### Please provide any additional information below.
|
||||||
|
|
||||||
|
[[!format sh """
|
||||||
|
$ git annex version
|
||||||
|
git-annex version: 10.20240227-1~ndall+1
|
||||||
|
build flags: Assistant Webapp Pairing Inotify DBus DesktopNotify TorrentParser MagicMime Benchmark Feeds Testsuite S3 WebDAV
|
||||||
|
dependency versions: aws-0.22.1 bloomfilter-2.0.1.0 cryptonite-0.29 DAV-1.3.4 feed-1.3.2.1 ghc-9.0.2 http-client-0.7.13.1 persistent-sqlite-2.13.1.0 torrent-10000.1.1 uuid-1.3.15 yesod-1.6.2.1
|
||||||
|
key/value backends: SHA256E SHA256 SHA512E SHA512 SHA224E SHA224 SHA384E SHA384 SHA3_256E SHA3_256 SHA3_512E SHA3_512 SHA3_224E SHA3_224 SHA3_384E SHA3_384 SKEIN256E SKEIN256 SKEIN512E SKEIN512 BLAKE2B256E BLAKE2B256 BLAKE2B512E BLAKE2B512 BLAKE2B160E BLAKE2B160 BLAKE2B224E BLAKE2B224 BLAKE2B384E BLAKE2B384 BLAKE2BP512E BLAKE2BP512 BLAKE2S256E BLAKE2S256 BLAKE2S160E BLAKE2S160 BLAKE2S224E BLAKE2S224 BLAKE2SP256E BLAKE2SP256 BLAKE2SP224E BLAKE2SP224 SHA1E SHA1 MD5E MD5 WORM URL X*
|
||||||
|
remote types: git gcrypt p2p S3 bup directory rsync web bittorrent webdav adb tahoe glacier ddar git-lfs httpalso borg hook external
|
||||||
|
operating system: linux x86_64
|
||||||
|
supported repository versions: 8 9 10
|
||||||
|
upgrade supported from repository versions: 0 1 2 3 4 5 6 7 8 9 10
|
||||||
|
local repository version: 10
|
||||||
|
"""]]
|
||||||
|
|
||||||
|
> As nobodyinperson says, set annex.maxextensionlength to 5 in order to
|
||||||
|
> support such long extensions. [[done]] --[[Joey]]
|
|
@ -0,0 +1,8 @@
|
||||||
|
[[!comment format=mdwn
|
||||||
|
username="nobodyinperson"
|
||||||
|
avatar="http://cdn.libravatar.org/avatar/736a41cd4988ede057bae805d000f4f5"
|
||||||
|
subject="annex.maxextensionlength and annex.maxextensions"
|
||||||
|
date="2024-04-27T19:16:29Z"
|
||||||
|
content="""
|
||||||
|
See the recently added `annex.maxextensionlength` and `annex.maxextensions` configurations: https://git-annex.branchable.com/git-annex/, which should fix your issue.
|
||||||
|
"""]]
|
|
@ -0,0 +1,39 @@
|
||||||
|
[[!comment format=mdwn
|
||||||
|
username="jasonc"
|
||||||
|
nickname="mail"
|
||||||
|
avatar="http://cdn.libravatar.org/avatar/cb07bdfbe978aa83388d64e08a972eb2"
|
||||||
|
subject="Possible simplified scenario"
|
||||||
|
date="2024-04-03T16:48:04Z"
|
||||||
|
content="""
|
||||||
|
Hello, firstly thank you for developing a really useful piece of software. During my initial experimentation I came across what appears to be a variation of this bug, and think I've distilled it to a minimal reproducible scenario.
|
||||||
|
|
||||||
|
Initialise in the usual way on an NTFS partition, then add a directory special remote (no `encryption`, no `importtree` and no `exporttree`):
|
||||||
|
<pre>
|
||||||
|
git init
|
||||||
|
git annex init local
|
||||||
|
git annex initremote nextdoor type=directory directory=N:\nextdoordir encryption=none
|
||||||
|
</pre>
|
||||||
|
|
||||||
|
In my case I then added and committed the files locally, then moved them to the directory special remote and back again:
|
||||||
|
<pre>
|
||||||
|
git annex add .
|
||||||
|
git commit --all --message=\"first commit\"
|
||||||
|
git annex move . --to nextdoor
|
||||||
|
git annex move . --from nextdoor
|
||||||
|
</pre>
|
||||||
|
|
||||||
|
This completes successfully, however repeating the last two steps a second time triggers the `permission denied (Access is denied.)` failure at the start of the bug report.
|
||||||
|
|
||||||
|
Going through each part step by step:
|
||||||
|
|
||||||
|
* Since NTFS is designated as a \"crippled filesystem\", the annexed objects appear to be read-write by default (no ACL modifications, no ReadOnly attribute).
|
||||||
|
* When the files are moved away to the directory special remote (in my test, the same NTFS partition), they pick up a ReadOnly attribute in the new location, so `Archive+Compression` becomes `ReadOnly+Archive+Compression`.
|
||||||
|
* When the files are then moved back from the directory special remote, the ReadOnly attribute persists.
|
||||||
|
* Repeating the movement then fails, as the file cannot be dropped locally (the UNC path exists, but `DeleteFile` fails).
|
||||||
|
|
||||||
|
If I remove the ReadOnly attributes and try again, the move away is successful. Similarly if I use a networked ext4 location for the directory special remote (and NTFS locally), the same cycle of success then failure can be observed.
|
||||||
|
|
||||||
|
Version information: git `git version 2.44.0.windows.1`, annex `git-annex version: 10.20240130-gad8e32c09d3ec866e0c0654cdcd146bf1aefbc5e` (installer from 2024-02-27), Windows 10 22H2
|
||||||
|
|
||||||
|
If you require logs or other information, please let me know.
|
||||||
|
"""]]
|
|
@ -0,0 +1,46 @@
|
||||||
|
### Please describe the problem.
|
||||||
|
|
||||||
|
git annex pull reports `--time-limit=1m` as an invalid option, even though its manpage states that it can use the options from git-annex-common-options and the manpage for those includes the --time-limit option.
|
||||||
|
|
||||||
|
|
||||||
|
### What steps will reproduce the problem?
|
||||||
|
|
||||||
|
git annex pull while specifying the --time-limit option.
|
||||||
|
|
||||||
|
|
||||||
|
### What version of git-annex are you using? On what operating system?
|
||||||
|
|
||||||
|
[[!format sh """
|
||||||
|
$ git annex version
|
||||||
|
git-annex version: 10.20240227
|
||||||
|
build flags: Assistant Webapp Pairing Inotify DBus DesktopNotify TorrentParser MagicMime Feeds Testsuite S3 WebDAV
|
||||||
|
dependency versions: aws-0.24.1 bloomfilter-2.0.1.2 crypton-0.34 DAV-1.3.4 feed-1.3.2.1 ghc-9.6.4 http-client-0.7.17 persistent-sqlite-2.13.3.0 torrent-10000.1.3 uuid-1.3.15 yesod-1.6.2.1
|
||||||
|
key/value backends: SHA256E SHA256 SHA512E SHA512 SHA224E SHA224 SHA384E SHA384 SHA3_256E SHA3_256 SHA3_512E SHA3_512 SHA3_224E SHA3_224 SHA3_384E SHA3_384 SKEIN256E SKEIN256 SKEIN512E SKEIN512 BLAKE2B256E BLAKE2B256 BLAKE2B512E BLAKE2B512 BLAKE2B160E BLAKE2B160 BLAKE2B224E BLAKE2B224 BLAKE2B384E BLAKE2B384 BLAKE2BP512E BLAKE2BP512 BLAKE2S256E BLAKE2S256 BLAKE2S160E BLAKE2S160 BLAKE2S224E BLAKE2S224 BLAKE2SP256E BLAKE2SP256 BLAKE2SP224E BLAKE2SP224 SHA1E SHA1 MD5E MD5 WORM URL X*
|
||||||
|
remote types: git gcrypt p2p S3 bup directory rsync web bittorrent webdav adb tahoe glacier ddar git-lfs httpalso borg hook external
|
||||||
|
operating system: linux x86_64
|
||||||
|
supported repository versions: 8 9 10
|
||||||
|
upgrade supported from repository versions: 0 1 2 3 4 5 6 7 8 9 10
|
||||||
|
local repository version: 10
|
||||||
|
"""]]
|
||||||
|
|
||||||
|
On Ubuntu, but git-annex is installed from a recent nixpkgs.
|
||||||
|
|
||||||
|
|
||||||
|
### Please provide any additional information below.
|
||||||
|
|
||||||
|
[[!format sh """
|
||||||
|
# If you can, paste a complete transcript of the problem occurring here.
|
||||||
|
# If the problem is with the git-annex assistant, paste in .git/annex/daemon.log
|
||||||
|
|
||||||
|
$ git annex pull --time-limit=1m
|
||||||
|
Invalid option `--time-limit=1m'
|
||||||
|
|
||||||
|
Usage: git-annex COMMAND
|
||||||
|
[...]
|
||||||
|
|
||||||
|
# End of transcript or log.
|
||||||
|
"""]]
|
||||||
|
|
||||||
|
### Have you had any luck using git-annex before? (Sometimes we get tired of reading bug reports all day and a lil' positive end note does wonders)
|
||||||
|
|
||||||
|
I am currently working on a web interface that lets non-git-annex users request files to be available in a specific repository (which will be located on the storage cluster of a HPC system). A combination of git annex metadata and an appropriate required content expression should make this essentially trivial, which is nice. The time-limit option would be helpful for the background worker doing all the fetching, though.
|
|
@ -0,0 +1,43 @@
|
||||||
|
### Please describe the problem.
|
||||||
|
|
||||||
|
Files added/modified in an `adjusted(unlocked)` branch get added as locked (symlink) files to the related branch, without honoring the `annex.addunlocked` setting.
|
||||||
|
|
||||||
|
### What steps will reproduce the problem?
|
||||||
|
|
||||||
|
[[!format sh """
|
||||||
|
#!/bin/sh -x
|
||||||
|
set -x
|
||||||
|
test -e testrepo && chmod +w -R testrepo && rm -rf testrepo
|
||||||
|
git -c init.defaultBranch=main init testrepo
|
||||||
|
cd testrepo
|
||||||
|
git annex init
|
||||||
|
git annex config --set annex.addunlocked true
|
||||||
|
echo content > README.md
|
||||||
|
git annex assist
|
||||||
|
git annex adjust --unlock
|
||||||
|
echo "changed on adjusted(unlocked) branch" > README.md
|
||||||
|
git annex assist
|
||||||
|
git switch main
|
||||||
|
file README.md # should be an unlocked file, but is a symlink
|
||||||
|
# README.md: symbolic link to .git/annex/objects/1z/J4/SHA256E-s37--947ef3df7717076c1cdd9c17f5fe90d69347c479e491500dac3051a4a03c6ecb.md/SHA256E-s37--947ef3df7717076c1cdd9c17f5fe90d69347c479e491500dac3051a4a03c6ecb.md
|
||||||
|
"""]]
|
||||||
|
|
||||||
|
### What version of git-annex are you using? On what operating system?
|
||||||
|
|
||||||
|
[[!format sh """
|
||||||
|
# on NixOS
|
||||||
|
> git annex version
|
||||||
|
git-annex version: 10.20240129
|
||||||
|
build flags: Assistant Webapp Pairing Inotify DBus DesktopNotify TorrentParser MagicMime Feeds Testsuite S3 WebDAV
|
||||||
|
dependency versions: aws-0.24.1 bloomfilter-2.0.1.2 crypton-0.32 DAV-1.3.4 feed-1.3.2.1 ghc-9.4.8 http-client-0.7.15 persistent-sqlite-2.13.2.0 torrent-10000.1.3 uuid-1.3.15 yesod-1.6.2.1
|
||||||
|
key/value backends: SHA256E SHA256 SHA512E SHA512 SHA224E SHA224 SHA384E SHA384 SHA3_256E SHA3_256 SHA3_512E SHA3_512 SHA3_224E SHA3_224 SHA3_384E SHA3_384 SKEIN256E SKEIN256 SKEIN512E SKEIN512 BLAKE2B256E BLAKE2B256 BLAKE2B512E BLAKE2B512 BLAKE2B160E BLAKE2B160 BLAKE2B224E BLAKE2B224 BLAKE2B384E BLAKE2B384 BLAKE2BP512E BLAKE2BP512 BLAKE2S256E BLAKE2S256 BLAKE2S160E BLAKE2S160 BLAKE2S224E BLAKE2S224 BLAKE2SP256E BLAKE2SP256 BLAKE2SP224E BLAKE2SP224 SHA1E SHA1 MD5E MD5 WORM URL X*
|
||||||
|
remote types: git gcrypt p2p S3 bup directory rsync web bittorrent webdav adb tahoe glacier ddar git-lfs httpalso borg hook external
|
||||||
|
operating system: linux x86_64
|
||||||
|
supported repository versions: 8 9 10
|
||||||
|
upgrade supported from repository versions: 0 1 2 3 4 5 6 7 8 9 10
|
||||||
|
local repository version: 10
|
||||||
|
"""]]
|
||||||
|
|
||||||
|
### Have you had any luck using git-annex before? (Sometimes we get tired of reading bug reports all day and a lil' positive end note does wonders)
|
||||||
|
|
||||||
|
Hell yeah, git annex rules! 💪
|
|
@ -0,0 +1,106 @@
|
||||||
|
### Please describe the problem.
|
||||||
|
|
||||||
|
Today I noticed odd commits happening such as
|
||||||
|
|
||||||
|
```
|
||||||
|
❯ git show 4a157861f3d27a40b38ae441dfe306e45e448c66
|
||||||
|
commit 4a157861f3d27a40b38ae441dfe306e45e448c66
|
||||||
|
Author: ReproStim User <changeme@example.com>
|
||||||
|
Date: Wed Apr 17 09:22:04 2024 -0400
|
||||||
|
|
||||||
|
git-annex in reprostim@reproiner:/data/reprostim
|
||||||
|
|
||||||
|
diff --git a/Videos/2024.03.17.14.09.12.550_2024.03.17.14.09.18.818.mkv.log b/Videos/2024.03.17.14.09.12.550_2024.03.17.14.09.18.818.mkv.log
|
||||||
|
index fc930f54..92b79020 100644
|
||||||
|
--- a/Videos/2024.03.17.14.09.12.550_2024.03.17.14.09.18.818.mkv.log
|
||||||
|
+++ b/Videos/2024.03.17.14.09.12.550_2024.03.17.14.09.18.818.mkv.log
|
||||||
|
@@ -1 +1 @@
|
||||||
|
-/annex/objects/MD5E-s68799--29541299bea3691f430d855d2fb432fb.mkv.log
|
||||||
|
+/annex/objects/MD5E-s69--08983cc11522233e5d4815e4ef62275a.mkv.log
|
||||||
|
```
|
||||||
|
|
||||||
|
-- today is April but commits are for files in March...
|
||||||
|
|
||||||
|
There is `git annex webapp` running which is configured to offload all content to another host.
|
||||||
|
|
||||||
|
And actual patch shows that it pretty much annexed the "unlocked link" file after the file was offloaded to remote host.
|
||||||
|
|
||||||
|
|
||||||
|
Do not have a minimal reproducer yet, but I think it happened while
|
||||||
|
|
||||||
|
- I had initially .log files which are text going to git
|
||||||
|
- then I added to `.gitattributes`
|
||||||
|
|
||||||
|
```
|
||||||
|
*.log annex.largefiles=anything
|
||||||
|
```
|
||||||
|
|
||||||
|
but it was never committed (? I assumed that annex webapp/assistant would do that -- it didn't) -- only now I did that.
|
||||||
|
- not sure how this morning was special...
|
||||||
|
|
||||||
|
The most interesting is that if I `annex get` -- I do get correct file...
|
||||||
|
|
||||||
|
It is like an inception!!!
|
||||||
|
|
||||||
|
On the fresh clone, if I look inside that file I see short key:
|
||||||
|
|
||||||
|
```
|
||||||
|
❯ cat 2024.03.17.14.09.12.550_2024.03.17.14.09.18.818.mkv.log
|
||||||
|
/annex/objects/MD5E-s69--08983cc11522233e5d4815e4ef62275a.mkv.log
|
||||||
|
```
|
||||||
|
|
||||||
|
then, if I `annex get` it -- I get content with long key
|
||||||
|
|
||||||
|
```shell
|
||||||
|
❯ git annex get 2024.03.17.14.09.12.550_2024.03.17.14.09.18.818.mkv.log
|
||||||
|
get 2024.03.17.14.09.12.550_2024.03.17.14.09.18.818.mkv.log (from rolando...)
|
||||||
|
ok
|
||||||
|
(recording state in git...)
|
||||||
|
❯ cat 2024.03.17.14.09.12.550_2024.03.17.14.09.18.818.mkv.log
|
||||||
|
/annex/objects/MD5E-s68799--29541299bea3691f430d855d2fb432fb.mkv.log
|
||||||
|
```
|
||||||
|
|
||||||
|
then upon subsequent get -- I will get the actual content:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
❯ git annex get 2024.03.17.14.09.12.550_2024.03.17.14.09.18.818.mkv.log
|
||||||
|
get 2024.03.17.14.09.12.550_2024.03.17.14.09.18.818.mkv.log (from rolando...)
|
||||||
|
ok
|
||||||
|
(recording state in git...)
|
||||||
|
❯ head -n 1 2024.03.17.14.09.12.550_2024.03.17.14.09.18.818.mkv.log
|
||||||
|
2024-03-17 14:09:12.551 [info] [685899] Session logging begin : reprostim-videocapture 1.5.0.119, session_logger_2024.03.17.14.09.12.550, start_ts=2024.03.17.14.09.12.550
|
||||||
|
```
|
||||||
|
and dropping it would lead me just to the "long key"
|
||||||
|
|
||||||
|
```
|
||||||
|
❯ git annex drop 2024.03.17.14.09.12.550_2024.03.17.14.09.18.818.mkv.log
|
||||||
|
drop 2024.03.17.14.09.12.550_2024.03.17.14.09.18.818.mkv.log (locking rolando...) ok
|
||||||
|
(recording state in git...)
|
||||||
|
❯ cat 2024.03.17.14.09.12.550_2024.03.17.14.09.18.818.mkv.log
|
||||||
|
/annex/objects/MD5E-s68799--29541299bea3691f430d855d2fb432fb.mkv.log
|
||||||
|
```
|
||||||
|
|
||||||
|
and will not be able to come out into reality from the 2nd level of inception:
|
||||||
|
|
||||||
|
```
|
||||||
|
❯ git annex drop 2024.03.17.14.09.12.550_2024.03.17.14.09.18.818.mkv.log
|
||||||
|
❯ cat 2024.03.17.14.09.12.550_2024.03.17.14.09.18.818.mkv.log
|
||||||
|
/annex/objects/MD5E-s68799--29541299bea3691f430d855d2fb432fb.mkv.log
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### What version of git-annex are you using? On what operating system?
|
||||||
|
|
||||||
|
on original server with webapp: 10.20240227-1~ndall+1
|
||||||
|
|
||||||
|
on intermediate server through which transfer of files happens: I think it might be old
|
||||||
|
|
||||||
|
```
|
||||||
|
[bids@rolando VIDS] > git annex version
|
||||||
|
git-annex version: 6.20180808-ga1327779a
|
||||||
|
```
|
||||||
|
|
||||||
|
on laptop where I dive into inception: 10.20240129
|
||||||
|
|
||||||
|
[[!meta author=yoh]]
|
||||||
|
[[!tag projects/repronim]]
|
|
@ -0,0 +1,78 @@
|
||||||
|
### Please describe the problem.
|
||||||
|
|
||||||
|
`git-annex` is misreporting the available disk space on my large (98TiB) APFS volume on MacOS.
|
||||||
|
|
||||||
|
### What steps will reproduce the problem?
|
||||||
|
|
||||||
|
- acquire a large storage array (how large, unclear)
|
||||||
|
- format with APFS on MacOS (no idea if it also occurs on on other platforms or file systems)
|
||||||
|
- clone a git-annex repo onto it
|
||||||
|
- look at `git annex info` / try to copy data
|
||||||
|
|
||||||
|
Here's my `df` output:
|
||||||
|
|
||||||
|
```
|
||||||
|
Filesystem Size Used Avail Capacity iused ifree %iused Mounted on
|
||||||
|
/dev/disk5s1 98Ti 2.2Ti 96Ti 3% 54k 1.0T 0% /Volumes/ABYSS
|
||||||
|
```
|
||||||
|
|
||||||
|
And here's the `git annex info`:
|
||||||
|
|
||||||
|
```
|
||||||
|
trusted repositories: 0
|
||||||
|
semitrusted repositories: 8
|
||||||
|
00000000-0000-0000-0000-000000000001 -- web
|
||||||
|
00000000-0000-0000-0000-000000000002 -- bittorrent
|
||||||
|
335d1827-1f6e-40d8-ae70-23ba9dd4b4a6 -- borg
|
||||||
|
4d887674-fb3f-47ee-afea-487f0143950e -- dvc
|
||||||
|
512ca1ff-43b7-4537-bf64-0f55e1ba2e8a -- cci server [origin]
|
||||||
|
71414f81-c837-4838-894d-840d0a2170ff -- [pocket]
|
||||||
|
dd695a7f-4b41-4a7d-ae57-3174f6a839e1 -- bsu server [grue]
|
||||||
|
df875036-d197-46f8-940c-9df5dc7fc2cd -- abyss [here]
|
||||||
|
untrusted repositories: 0
|
||||||
|
transfers in progress: none
|
||||||
|
available local disk space: 3.94 gigabytes (+100 megabytes reserved)
|
||||||
|
local annex keys: 14380
|
||||||
|
local annex size: 4.18 terabytes
|
||||||
|
annexed files in working tree: 13312
|
||||||
|
size of annexed files in working tree: 2.59 terabytes
|
||||||
|
combined annex size of all repositories: 19.09 terabytes (+ 9 unknown size)
|
||||||
|
annex sizes of repositories:
|
||||||
|
5.57 TB: 512ca1ff-43b7-4537-bf64-0f55e1ba2e8a -- cci server [origin]
|
||||||
|
4.18 TB: df875036-d197-46f8-940c-9df5dc7fc2cd -- abyss [here]
|
||||||
|
3.21 TB: 71414f81-c837-4838-894d-840d0a2170ff -- [pocket]
|
||||||
|
2.52 TB: 335d1827-1f6e-40d8-ae70-23ba9dd4b4a6 -- borg
|
||||||
|
1.92 TB: dd695a7f-4b41-4a7d-ae57-3174f6a839e1 -- bsu server [grue]
|
||||||
|
879.87 GB: 00000000-0000-0000-0000-000000000001 -- web
|
||||||
|
819.55 GB: 4d887674-fb3f-47ee-afea-487f0143950e -- dvc
|
||||||
|
backend usage:
|
||||||
|
SHA256: 6991
|
||||||
|
SHA256E: 6321
|
||||||
|
bloom filter size: 32 mebibytes (2.9% full)
|
||||||
|
```
|
||||||
|
|
||||||
|
### What version of git-annex are you using? On what operating system?
|
||||||
|
|
||||||
|
MacOS Sonoma 14.4.1, Git Annex version below:
|
||||||
|
|
||||||
|
```
|
||||||
|
git-annex version: 10.20240227
|
||||||
|
build flags: Assistant Webapp Pairing FsEvents TorrentParser MagicMime Benchmark Feeds Testsuite S3 WebDAV
|
||||||
|
dependency versions: aws-0.24.1 bloomfilter-2.0.1.2 crypton-0.34 DAV-1.3.4 feed-1.3.2.1 ghc-9.6.3 http-client-0.7.16 persistent-sqlite-2.13.3.0 torrent-10000.1.3 uuid-1.3.15 yesod-1.6.2.1
|
||||||
|
key/value backends: SHA256E SHA256 SHA512E SHA512 SHA224E SHA224 SHA384E SHA384 SHA3_256E SHA3_256 SHA3_512E SHA3_512 SHA3_224E SHA3_224 SHA3_384E SHA3_384 SKEIN256E SKEIN256 SKEIN512E SKEIN512 BLAKE2B256E BLAKE2B256 BLAKE2B512E BLAKE2B512 BLAKE2B160E BLAKE2B160 BLAKE2B224E BLAKE2B224 BLAKE2B384E BLAKE2B384 BLAKE2BP512E BLAKE2BP512 BLAKE2S256E BLAKE2S256 BLAKE2S160E BLAKE2S160 BLAKE2S224E BLAKE2S224 BLAKE2SP256E BLAKE2SP256 BLAKE2SP224E BLAKE2SP224 SHA1E SHA1 MD5E MD5 WORM URL X*
|
||||||
|
remote types: git gcrypt p2p S3 bup directory rsync web bittorrent webdav adb tahoe glacier ddar git-lfs httpalso borg hook external
|
||||||
|
operating system: darwin aarch64
|
||||||
|
supported repository versions: 8 9 10
|
||||||
|
upgrade supported from repository versions: 0 1 2 3 4 5 6 7 8 9 10
|
||||||
|
local repository version: 10
|
||||||
|
```
|
||||||
|
|
||||||
|
### Please provide any additional information below.
|
||||||
|
|
||||||
|
I don't know what other file system versions (or operating systems) this may appear in, as this is the only 98TiB volume I have right now.
|
||||||
|
|
||||||
|
I'm happy to do some poking around or test proposed fixes, I have some Haskell experience but it's been a while. My current instinct is that either `disk-free-space` is misbehaving on a disk this large for unclear reasons.
|
||||||
|
|
||||||
|
### Have you had any luck using git-annex before? (Sometimes we get tired of reading bug reports all day and a lil' positive end note does wonders)
|
||||||
|
|
||||||
|
Been successfully using it for the last year+, for managing archives of academic research & teaching work and the assets for my web site. Generally quite happy :). Like it enough I want to use it schlep a few terabytes (and growing) onto my new Thunderbolt RAID array...
|
|
@ -0,0 +1,16 @@
|
||||||
|
[[!comment format=mdwn
|
||||||
|
username="mdekstrand"
|
||||||
|
avatar="http://cdn.libravatar.org/avatar/0acb8a6c848d39aa53d94bd81239b034"
|
||||||
|
subject="Update after transferring more files"
|
||||||
|
date="2024-04-09T14:45:07Z"
|
||||||
|
content="""
|
||||||
|
After transferring many more files with `--force`, `git annex info` reports an available space of 17.46 terabytes, with following actual space:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ df -h .
|
||||||
|
Filesystem Size Used Avail Capacity iused ifree %iused Mounted on
|
||||||
|
/dev/disk5s1 98Ti 2.3Ti 96Ti 3% 75k 1.0T 0% /Volumes/ABYSS
|
||||||
|
```
|
||||||
|
|
||||||
|
I suspect the problem is either an integer overflow of some kind, or an integer size/precision error reading from the disk space struct in the disk space library.
|
||||||
|
"""]]
|
35
doc/bugs/git_annex_copy_just_does_not_copy_sometimes.mdwn
Normal file
35
doc/bugs/git_annex_copy_just_does_not_copy_sometimes.mdwn
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
### Please describe the problem.
|
||||||
|
|
||||||
|
Executing git annex copy sometimes does not copy the necessary files.
|
||||||
|
|
||||||
|
### What steps will reproduce the problem?
|
||||||
|
|
||||||
|
I do not know how to reproduce it, because it sometimes happens and sometimes not. This is the code I used:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ git annex findkeys --not --in origin-storage
|
||||||
|
MD5E-s7265--9885654f68b8e72de9b681c8783b3bf8.yaml
|
||||||
|
|
||||||
|
$ git annex copy --not --in origin-storage --to origin-storage
|
||||||
|
### Note that there is no output
|
||||||
|
|
||||||
|
$ git annex findkeys --not --in origin-storage
|
||||||
|
MD5E-s7265--9885654f68b8e72de9b681c8783b3bf8.yaml
|
||||||
|
## Still there!
|
||||||
|
|
||||||
|
11:00 $ git annex findkeys --not --in origin-storage | git annex copy --batch-keys --to origin-storage
|
||||||
|
copy MD5E-s7265--9885654f68b8e72de9b681c8783b3bf8.yaml (to origin-storage...)
|
||||||
|
ok
|
||||||
|
|
||||||
|
$ git annex findkeys --not --in origin-storage
|
||||||
|
## Now the problem is resolved
|
||||||
|
```
|
||||||
|
|
||||||
|
My expectations are that the second and the fourth command do the same thing, but they don't.
|
||||||
|
|
||||||
|
### What version of git-annex are you using? On what operating system?
|
||||||
|
|
||||||
|
Rocky OS 9
|
||||||
|
git-annex version: 10.20230926-g44a7b4c9734adfda5912dd82c1aa97c615689f57
|
||||||
|
|
||||||
|
|
|
@ -13,5 +13,5 @@ progress. And all that nontrivial work has in fact been done in order to
|
||||||
support -J. It's just not enabled by default or for -J1.
|
support -J. It's just not enabled by default or for -J1.
|
||||||
|
|
||||||
The reason git-annex has not yet switched to the concurrent output by
|
The reason git-annex has not yet switched to the concurrent output by
|
||||||
default is that it assumes a VT100 compatable terminal.
|
default is that it assumes a VT100 compatible terminal.
|
||||||
"""]]
|
"""]]
|
||||||
|
|
142
doc/bugs/migrate_removes_associated_URLs_with_custom_scheme.mdwn
Normal file
142
doc/bugs/migrate_removes_associated_URLs_with_custom_scheme.mdwn
Normal file
|
@ -0,0 +1,142 @@
|
||||||
|
### Please describe the problem.
|
||||||
|
|
||||||
|
With URLs that are handled by the web remote the URL will be kept through a migration, i.e.
|
||||||
|
|
||||||
|
```
|
||||||
|
git annex addurl --relaxed <some-http-url>
|
||||||
|
git annex get <the-added-file>
|
||||||
|
git annex migrate
|
||||||
|
```
|
||||||
|
|
||||||
|
will migrate the key of the file to be hash based, and keep the URL associated to that key.
|
||||||
|
If I do the same with a URL whose scheme is handled by a custom special remote (this one specifically is what I got the issue with: <https://github.com/matrss/datalad-cds/blob/main/src/datalad_cds/cds_remote.py>, it registers itself for the `cds:` scheme), the URL seems to be dropped from the key (i.e. whereis no longer shows it and git annex can no longer fetch it from the special remote).
|
||||||
|
|
||||||
|
### What steps will reproduce the problem?
|
||||||
|
|
||||||
|
The steps mentioned above for a URL whose scheme is handled by an (external?) special remote. Specifically, I saw it with datalad-cds, it might happen for other special remotes as well.
|
||||||
|
|
||||||
|
### What version of git-annex are you using? On what operating system?
|
||||||
|
|
||||||
|
```
|
||||||
|
git-annex version: 10.20231129
|
||||||
|
build flags: Assistant Webapp Pairing Inotify DBus DesktopNotify TorrentParser MagicMime Feeds Testsuite S3 WebDAV
|
||||||
|
dependency versions: aws-0.24.1 bloomfilter-2.0.1.2 crypton-0.32 DAV-1.3.4 feed-1.3.2.1 ghc-9.4.8 http-client-0.7.15 persistent-sqlite-2.13.3.0 torrent-10000.1.3 uuid-1.3.15 yesod-1.6.2.1
|
||||||
|
key/value backends: SHA256E SHA256 SHA512E SHA512 SHA224E SHA224 SHA384E SHA384 SHA3_256E SHA3_256 SHA3_512E SHA3_512 SHA3_224E SHA3_224 SHA3_384E SHA3_384 SKEIN256E SKEIN256 SKEIN512E SKEIN512 BLAKE2B256E BLAKE2B256 BLAKE2B512E BLAKE2B512 BLAKE2B160E BLAKE2B160 BLAKE2B224E BLAKE2B224 BLAKE2B384E BLAKE2B384 BLAKE2BP512E BLAKE2BP512 BLAKE2S256E BLAKE2S256 BLAKE2S160E BLAKE2S160 BLAKE2S224E BLAKE2S224 BLAKE2SP256E BLAKE2SP256 BLAKE2SP224E BLAKE2SP224 SHA1E SHA1 MD5E MD5 WORM URL X*
|
||||||
|
remote types: git gcrypt p2p S3 bup directory rsync web bittorrent webdav adb tahoe glacier ddar git-lfs httpalso borg hook external
|
||||||
|
operating system: linux x86_64
|
||||||
|
supported repository versions: 8 9 10
|
||||||
|
upgrade supported from repository versions: 0 1 2 3 4 5 6 7 8 9 10
|
||||||
|
local repository version: 10
|
||||||
|
```
|
||||||
|
|
||||||
|
Installed with nix from nixpkgs on an ubuntu system.
|
||||||
|
|
||||||
|
### Please provide any additional information below.
|
||||||
|
|
||||||
|
It works with the web remote:
|
||||||
|
[[!format sh """
|
||||||
|
# If you can, paste a complete transcript of the problem occurring here.
|
||||||
|
# If the problem is with the git-annex assistant, paste in .git/annex/daemon.log
|
||||||
|
$ datalad create test-ds
|
||||||
|
create(ok): [...] (dataset)
|
||||||
|
$ cd test-ds/
|
||||||
|
$ git annex addurl --relaxed "https://git-annex.branchable.com/"
|
||||||
|
addurl https://git-annex.branchable.com/ (to git-annex.branchable.com_) ok
|
||||||
|
(recording state in git...)
|
||||||
|
$ git annex whereis git-annex.branchable.com_
|
||||||
|
whereis git-annex.branchable.com_ (1 copy)
|
||||||
|
00000000-0000-0000-0000-000000000001 -- web
|
||||||
|
|
||||||
|
web: https://git-annex.branchable.com/
|
||||||
|
ok
|
||||||
|
$ ls -l
|
||||||
|
[...] git-annex.branchable.com_ -> '.git/annex/objects/pJ/v4/URL--https&c%%git-annex.branchable.com%/URL--https&c%%git-annex.branchable.com%'
|
||||||
|
$ git annex get git-annex.branchable.com_
|
||||||
|
get git-annex.branchable.com_ (from web...)
|
||||||
|
ok
|
||||||
|
(recording state in git...)
|
||||||
|
$ git annex migrate
|
||||||
|
migrate git-annex.branchable.com_ (checksum...) ok
|
||||||
|
(recording state in git...)
|
||||||
|
$ ls -l
|
||||||
|
[...] git-annex.branchable.com_ -> .git/annex/objects/31/qv/MD5E-s12462--6b956d66f8352205df79936ada326ec3/MD5E-s12462--6b956d66f8352205df79936ada326ec3
|
||||||
|
$ git annex whereis git-annex.branchable.com_
|
||||||
|
whereis git-annex.branchable.com_ (2 copies)
|
||||||
|
00000000-0000-0000-0000-000000000001 -- web
|
||||||
|
60079e0e-42e4-492e-a7b1-dde764d069eb -- [here]
|
||||||
|
|
||||||
|
web: https://git-annex.branchable.com/
|
||||||
|
ok
|
||||||
|
# End of transcript or log.
|
||||||
|
"""]]
|
||||||
|
|
||||||
|
It doesn't work with the custom special remote:
|
||||||
|
[[!format sh """
|
||||||
|
$ datalad create test-ds
|
||||||
|
create(ok): [...] (dataset)
|
||||||
|
$ cd test-ds/
|
||||||
|
$ datalad download-cds --lazy --nosave --path "2022-01-01.grib" "$(cat <<EOF
|
||||||
|
{
|
||||||
|
"dataset": "reanalysis-era5-complete",
|
||||||
|
"sub-selection": {
|
||||||
|
"class": "ea",
|
||||||
|
"date": "2022-01-01",
|
||||||
|
"expver": "1",
|
||||||
|
"levelist": "1",
|
||||||
|
"levtype": "ml",
|
||||||
|
"param": "130",
|
||||||
|
"stream": "oper",
|
||||||
|
"time": "00:00:00/06:00:00/12:00:00/18:00:00",
|
||||||
|
"type": "an",
|
||||||
|
"grid": ".3/.3"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
)"
|
||||||
|
cds(ok): [...] (dataset)
|
||||||
|
$ ls -l
|
||||||
|
[...] 2022-01-01.grib -> '.git/annex/objects/Mx/4G/URL--cds&cv1-eyJkYXRhc2V0IjoicmVhbmFs-162a71d794c333f5e04b13283421a49a/URL--cds&cv1-eyJkYXRhc2V0IjoicmVhbmFs-162a71d794c333f5e04b13283421a49a'
|
||||||
|
$ git annex whereis 2022-01-01.grib
|
||||||
|
whereis 2022-01-01.grib (1 copy)
|
||||||
|
923e2755-e747-42f4-890a-9c921068fb82 -- [cds]
|
||||||
|
|
||||||
|
cds: {"dataset":"reanalysis-era5-complete","sub-selection":{"class":"ea","date":"2022-01-01","expver":"1","levelist":"1","levtype":"ml","param":"130","stream":"oper","time":"00:00:00/06:00:00/12:00:00/18:00:00","type":"an","grid":".3/.3"}}
|
||||||
|
cds: cds:v1-eyJkYXRhc2V0IjoicmVhbmFseXNpcy1lcmE1LWNvbXBsZXRlIiwic3ViLXNlbGVjdGlvbiI6eyJjbGFzcyI6ImVhIiwiZGF0ZSI6IjIwMjItMDEtMDEiLCJleHB2ZXIiOiIxIiwibGV2ZWxpc3QiOiIxIiwibGV2dHlwZSI6Im1sIiwicGFyYW0iOiIxMzAiLCJzdHJlYW0iOiJvcGVyIiwidGltZSI6IjAwOjAwOjAwLzA2OjAwOjAwLzEyOjAwOjAwLzE4OjAwOjAwIiwidHlwZSI6ImFuIiwiZ3JpZCI6Ii4zLy4zIn19
|
||||||
|
ok
|
||||||
|
$ git config --local remote.cds.annex-security-allow-unverified-downloads ACKTHPPT
|
||||||
|
$ git annex get 2022-01-01.grib
|
||||||
|
get 2022-01-01.grib (from cds...)
|
||||||
|
2024-04-06 11:37:05,250 INFO Welcome to the CDS
|
||||||
|
2024-04-06 11:37:05,251 INFO Sending request to https://cds.climate.copernicus.eu/api/v2/resources/reanalysis-era5-complete
|
||||||
|
2024-04-06 11:37:05,340 INFO Request is queued
|
||||||
|
2024-04-06 11:37:06,400 INFO Request is running
|
||||||
|
2024-04-06 11:37:26,399 INFO Request is completed
|
||||||
|
2024-04-06 11:37:26,399 INFO Downloading https://download-0017.copernicus-climate.eu/cache-compute-0017/cache/data9/adaptor.mars.external-1712396225.5545986-18258-18-822e5b91-cf60-4dbd-a808-a1253d4fe109.grib to .git/annex/tmp/URL--cds&cv1-eyJkYXRhc2V0IjoicmVhbmFs-162a71d794c333f5e04b13283421a49a (5.5M)
|
||||||
|
0%| | 0.00/5.51M [00:00<?, ?B/s] 2%|▏ | 104k/5.51M [00:00<00:06, 866kB/s] 17%|█▋ | 942k/5.51M [00:00<00:00, 5.00MB/s] 57%|█████▋ | 3.15M/5.51M [00:00<00:00, 13.0MB/s] 99%|█████████▉| 5.48M/5.51M [00:00<00:00, 17.3MB/s] 2024-04-06 11:37:27,322 INFO Download rate 6M/s
|
||||||
|
ok
|
||||||
|
(recording state in git...)
|
||||||
|
$ git annex migrate
|
||||||
|
migrate 2022-01-01.grib (checksum...) ok
|
||||||
|
(recording state in git...)
|
||||||
|
$ ls -l
|
||||||
|
[...] 2022-01-01.grib -> .git/annex/objects/KJ/6K/MD5E-s5774880--94a848eefd02d72952c8541c52a93550.grib/MD5E-s5774880--94a848eefd02d72952c8541c52a93550.grib
|
||||||
|
$ git annex whereis 2022-01-01.grib
|
||||||
|
whereis 2022-01-01.grib (1 copy)
|
||||||
|
5dfef0c9-8e18-4ea2-9ee1-646830b5749b -- [here]
|
||||||
|
ok
|
||||||
|
$ git annex drop 2022-01-01.grib
|
||||||
|
drop 2022-01-01.grib (unsafe)
|
||||||
|
Could only verify the existence of 0 out of 1 necessary copy
|
||||||
|
|
||||||
|
Rather than dropping this file, try using: git annex move
|
||||||
|
|
||||||
|
(Use --force to override this check, or adjust numcopies.)
|
||||||
|
failed
|
||||||
|
drop: 1 failed
|
||||||
|
"""]]
|
||||||
|
|
||||||
|
I know that this is sort of abusing the URL handling in git-annex, but it was super easy to implement. You recommended me to use SETSTATE/GETSTATE from the external special remote protocol instead already at some point, but I didn't get around to reworking it for that yet.
|
||||||
|
|
||||||
|
### Have you had any luck using git-annex before? (Sometimes we get tired of reading bug reports all day and a lil' positive end note does wonders)
|
||||||
|
|
||||||
|
Yes! It is absolutely great, thank you for it.
|
|
@ -0,0 +1,9 @@
|
||||||
|
[[!comment format=mdwn
|
||||||
|
username="m.risse@77eac2c22d673d5f10305c0bade738ad74055f92"
|
||||||
|
nickname="m.risse"
|
||||||
|
avatar="http://cdn.libravatar.org/avatar/59541f50d845e5f81aff06e88a38b9de"
|
||||||
|
subject="comment 1"
|
||||||
|
date="2024-04-06T10:05:04Z"
|
||||||
|
content="""
|
||||||
|
Looking at this again I am also surprised that I didn't need to set the equivalent of `git config --local remote.cds.annex-security-allow-unverified-downloads ACKTHPPT` for the web special remote when get'ing a file with just a URL key.
|
||||||
|
"""]]
|
|
@ -0,0 +1,8 @@
|
||||||
|
[[!comment format=mdwn
|
||||||
|
username="yarikoptic"
|
||||||
|
avatar="http://cdn.libravatar.org/avatar/f11e9c84cb18d26a1748c33b48c924b4"
|
||||||
|
subject="comment 1"
|
||||||
|
date="2024-04-29T13:13:48Z"
|
||||||
|
content="""
|
||||||
|
> fixed in [10.20230626-58-ga05bc6a314 AKA 10.20230802~99](https://git.kitenet.net/index.cgi/git-annex.git/commit/?id=a05bc6a31459dedd5d199fbc864af2e9d7d1f6d1) --[[yarikoptic]]
|
||||||
|
"""]]
|
|
@ -0,0 +1,8 @@
|
||||||
|
[[!comment format=mdwn
|
||||||
|
username="nobodyinperson"
|
||||||
|
avatar="http://cdn.libravatar.org/avatar/736a41cd4988ede057bae805d000f4f5"
|
||||||
|
subject="Happens on nix on MacOS as well"
|
||||||
|
date="2024-04-13T15:51:54Z"
|
||||||
|
content="""
|
||||||
|
I just had the same problem. An `addurl` test failing while building on MacOS with `nix-shell -p git-annex`. I recorded a video of it coincidentally. I don't know what's with the `security` program it tries to call. I was using a MacOS VM via [Docker-OSX](https://github.com/sickcodes/Docker-OSX?tab=readme-ov-file#big-sur-).
|
||||||
|
"""]]
|
|
@ -1,3 +1,7 @@
|
||||||
|
[[!toc ]]
|
||||||
|
|
||||||
|
## motivations
|
||||||
|
|
||||||
Say we have 2 drives and want to fill them both evenly with files,
|
Say we have 2 drives and want to fill them both evenly with files,
|
||||||
different files in each drive. Currently, preferred content cannot express
|
different files in each drive. Currently, preferred content cannot express
|
||||||
that entirely:
|
that entirely:
|
||||||
|
|
|
@ -72,7 +72,7 @@ on its own line, followed by a newline and the binary data.
|
||||||
The Len value tells how many bytes of data to read.
|
The Len value tells how many bytes of data to read.
|
||||||
|
|
||||||
DATA 3
|
DATA 3
|
||||||
foo1
|
foo
|
||||||
|
|
||||||
Note that there is no newline after the binary data; the next protocol
|
Note that there is no newline after the binary data; the next protocol
|
||||||
message will come immediately after it.
|
message will come immediately after it.
|
||||||
|
|
125
doc/design/p2p_protocol_over_http.mdwn
Normal file
125
doc/design/p2p_protocol_over_http.mdwn
Normal file
|
@ -0,0 +1,125 @@
|
||||||
|
[[!toc ]]
|
||||||
|
|
||||||
|
## motivation
|
||||||
|
|
||||||
|
The [[P2P protocol]] is a custom protocol that git-annex speaks over a ssh
|
||||||
|
connection (mostly). This is a design working on supporting the P2P
|
||||||
|
protocol over HTTP.
|
||||||
|
|
||||||
|
Upload of annex objects to git remotes that use http is currently not
|
||||||
|
supported by git-annex, and this would be a generally very useful addition.
|
||||||
|
|
||||||
|
For use cases such as OpenNeuro's javascript client, ssh is too difficult
|
||||||
|
to support, so they currently use a special remote that talks to a http
|
||||||
|
endpoint in order to upload objects. Implementing this would let them
|
||||||
|
talk to git-annex over http.
|
||||||
|
|
||||||
|
With the [[passthrough_proxy]], this would let clients configure a single
|
||||||
|
http remote that accesses a more complicated network of git-annex
|
||||||
|
repositories.
|
||||||
|
|
||||||
|
## approach 1: encapsulation
|
||||||
|
|
||||||
|
One approach is to encapsulate the P2P protocol inside HTTP. This has the
|
||||||
|
benefit of being simple to think about. It is not very web-native though.
|
||||||
|
|
||||||
|
There would be a single API endpoint. The client connects and sends a
|
||||||
|
request that encapsulates one or more lines in the P2P protocol. The server
|
||||||
|
sends a response that encapsulates one or more lines in the P2P
|
||||||
|
protocol.
|
||||||
|
|
||||||
|
For example (eliding the full HTTP responses, only showing the data):
|
||||||
|
|
||||||
|
> POST /git-annex HTTP/1.0
|
||||||
|
> Content-Type: x-git-annex-p2p
|
||||||
|
> Content-Length: ...
|
||||||
|
>
|
||||||
|
> AUTH 79a5a1f4-07e8-11ef-873d-97f93ca91925
|
||||||
|
< AUTH_SUCCESS ecf6d4ca-07e8-11ef-8990-9b8c1f696bf6
|
||||||
|
|
||||||
|
> POST /git-annex HTTP/1.0
|
||||||
|
> Content-Type: x-git-annex-p2p
|
||||||
|
> Content-Length: ...
|
||||||
|
>
|
||||||
|
> VERSION 1
|
||||||
|
< VERSION 1
|
||||||
|
|
||||||
|
> POST /git-annex HTTP/1.0
|
||||||
|
> Content-Type: x-git-annex-p2p
|
||||||
|
> Content-Length: ...
|
||||||
|
>
|
||||||
|
> CHECKPRESENT SHA1--foo
|
||||||
|
< SUCCESS
|
||||||
|
|
||||||
|
> POST /git-annex HTTP/1.0
|
||||||
|
> Content-Type: x-git-annex-p2p
|
||||||
|
> Content-Length: ...
|
||||||
|
>
|
||||||
|
> PUT bar SHA1--bar
|
||||||
|
< PUT-FROM 0
|
||||||
|
|
||||||
|
> POST /git-annex HTTP/1.0
|
||||||
|
> Content-Type: x-git-annex-p2p
|
||||||
|
> Content-Length: ...
|
||||||
|
>
|
||||||
|
> DATA 3
|
||||||
|
> foo
|
||||||
|
> VALID
|
||||||
|
< SUCCESS
|
||||||
|
|
||||||
|
Note that, since VERSION is negotiated in one request, the HTTP server
|
||||||
|
needs to know that a series of requests are part of the same P2P protocol
|
||||||
|
session. In the example above, it would not have a good way to do that.
|
||||||
|
One solution would be to add a session identifier UUID to each request.
|
||||||
|
|
||||||
|
## approach 2: HTTP API
|
||||||
|
|
||||||
|
Another approach is to define a web-native API with endpoints that
|
||||||
|
correspond to each action in the P2P protocol.
|
||||||
|
|
||||||
|
Something like this:
|
||||||
|
|
||||||
|
> GET /git-annex/v1/AUTH?clientuuid=79a5a1f4-07e8-11ef-873d-97f93ca91925 HTTP/1.0
|
||||||
|
< AUTH_SUCCESS ecf6d4ca-07e8-11ef-8990-9b8c1f696bf6
|
||||||
|
|
||||||
|
> GET /git-annex/v1/CHECKPRESENT?key=SHA1--foo&clientuuid=79a5a1f4-07e8-11ef-873d-97f93ca91925&serveruuid=ecf6d4ca-07e8-11ef-8990-9b8c1f696bf6 HTTP/1.0
|
||||||
|
> SUCCESS
|
||||||
|
|
||||||
|
> GET /git-annex/v1/PUT-FROM?key=SHA1--foo&clientuuid=79a5a1f4-07e8-11ef-873d-97f93ca91925&serveruuid=ecf6d4ca-07e8-11ef-8990-9b8c1f696bf6 HTTP/1.0
|
||||||
|
< PUT-FROM 0
|
||||||
|
|
||||||
|
> POST /git-annex/v1/PUT?key=SHA1--foo&associatedfile=bar&put-from=0&clientuuid=79a5a1f4-07e8-11ef-873d-97f93ca91925&serveruuid=ecf6d4ca-07e8-11ef-8990-9b8c1f696bf6 HTTP/1.0
|
||||||
|
> Content-Type: application/octet-stream
|
||||||
|
> Content-Length: 4
|
||||||
|
> foo1
|
||||||
|
< SUCCESS
|
||||||
|
|
||||||
|
(In the last example above "foo" is the content, there is an additional byte at the end that
|
||||||
|
is 1 for VALID and 0 for INVALID. This seems better than needing an entire
|
||||||
|
other request to indicate validitity.)
|
||||||
|
|
||||||
|
This needs a more complex spec. But it's easier for others to implement,
|
||||||
|
especially since it does not need a session identifier, so the HTTP server can
|
||||||
|
be stateless.
|
||||||
|
|
||||||
|
## HTTP GET
|
||||||
|
|
||||||
|
It should be possible to support a regular HTTP get of a key, with
|
||||||
|
no additional parameters, so that annex objects can be served to other clients
|
||||||
|
from this web server.
|
||||||
|
|
||||||
|
> GET /git-annex/key/SHA1--foo HTTP/1.0
|
||||||
|
< foo
|
||||||
|
|
||||||
|
Although this would be a special case, not used by git-annex, because the P2P
|
||||||
|
protocol's GET has the complication of offsets, and of the server sending
|
||||||
|
VALID/INVALID after the content, and of needing to know the client's UUID in
|
||||||
|
order to update the location log.
|
||||||
|
|
||||||
|
## Problem: CONNECT
|
||||||
|
|
||||||
|
The CONNECT message allows both sides of the P2P protocol to send DATA
|
||||||
|
messages in any order. This seems difficult to encapsulate in HTTP.
|
||||||
|
|
||||||
|
Probably this can be not implemented, it's probably not needed for a HTTP
|
||||||
|
remote?
|
|
@ -1,3 +1,7 @@
|
||||||
|
[[!toc ]]
|
||||||
|
|
||||||
|
## motivations
|
||||||
|
|
||||||
When [[balanced_preferred_content]] is used, there may be many repositories
|
When [[balanced_preferred_content]] is used, there may be many repositories
|
||||||
in a location -- either a server or a cluster -- and getting any given file
|
in a location -- either a server or a cluster -- and getting any given file
|
||||||
may need to access any of them. Configuring remotes for each repository
|
may need to access any of them. Configuring remotes for each repository
|
||||||
|
@ -32,7 +36,15 @@ cluster.
|
||||||
|
|
||||||
A proxy would not hold the content of files itself. It would be a clone of
|
A proxy would not hold the content of files itself. It would be a clone of
|
||||||
the git repository though, probably. Uploads and downloads would stream
|
the git repository though, probably. Uploads and downloads would stream
|
||||||
through the proxy. The git-annex [[P2P_protocol]] could be relayed in this way.
|
through the proxy.
|
||||||
|
|
||||||
|
## protocol
|
||||||
|
|
||||||
|
The git-annex [[P2P_protocol]] would be relayed via the proxy,
|
||||||
|
which would be a regular git ssh remote.
|
||||||
|
|
||||||
|
There is also the possibility of relaying the P2P protocol over another
|
||||||
|
protocol such as HTTP, see [[P2P_protocol_over_http]].
|
||||||
|
|
||||||
## UUID discovery
|
## UUID discovery
|
||||||
|
|
||||||
|
@ -47,11 +59,11 @@ the cluster.
|
||||||
Could the P2P protocol be extended to let the proxy communicate the UUIDs
|
Could the P2P protocol be extended to let the proxy communicate the UUIDs
|
||||||
of all the repositories behind it?
|
of all the repositories behind it?
|
||||||
|
|
||||||
Once the client git-annex knows the set of UUIDs behind the proxy, it can
|
Once the client git-annex knows the set of UUIDs behind the proxy, it could
|
||||||
instantiate a remote object per uuid, each of which accesses the proxy, but
|
eg instantiate a remote object per UUID, each of which accesses the proxy, but
|
||||||
with a different UUID.
|
with a different UUID.
|
||||||
|
|
||||||
But, git-annx usually only does UUID discovery the first time a ssh remote
|
But, git-annex usually only does UUID discovery the first time a ssh remote
|
||||||
is accessed. So it would need to discover at that point that the remote is
|
is accessed. So it would need to discover at that point that the remote is
|
||||||
a proxy. Then it could do UUID discovery each time git-annex starts up.
|
a proxy. Then it could do UUID discovery each time git-annex starts up.
|
||||||
But that adds significant overhead, git-annex would be making a connection
|
But that adds significant overhead, git-annex would be making a connection
|
||||||
|
@ -64,8 +76,7 @@ git-annex branch?
|
||||||
|
|
||||||
With this approach, git-annex would know as soon as it sees the proxy's
|
With this approach, git-annex would know as soon as it sees the proxy's
|
||||||
UUID that this is a proxy for this other set of UUIDS. (Unless its
|
UUID that this is a proxy for this other set of UUIDS. (Unless its
|
||||||
git-annex branch is not up-to-date.) And then it can instantiate a UUID for
|
git-annex branch is not up-to-date.)
|
||||||
each remote.
|
|
||||||
|
|
||||||
One difficulty with this is that, when the git-annex branch is not up to
|
One difficulty with this is that, when the git-annex branch is not up to
|
||||||
date with changes from the proxy, git-annex may try to access repositories
|
date with changes from the proxy, git-annex may try to access repositories
|
||||||
|
@ -76,17 +87,55 @@ to store data when eg, all the repositories that is knows about are full.
|
||||||
Just getting the git-annex back in sync should recover from either
|
Just getting the git-annex back in sync should recover from either
|
||||||
situation.
|
situation.
|
||||||
|
|
||||||
## streaming to special remotes
|
## user interface
|
||||||
|
|
||||||
As well as being an intermediary to git-annex repositories, the proxy could
|
What to name the instantiated remotes? Probably the best that could
|
||||||
provide access to other special remotes. That could be an object store like
|
be done is to use the proxy's own remote names as suffixes on the client.
|
||||||
S3, which might be internal to the cluster or not. When using a cloud
|
Eg, the proxy's "node1" remote is "proxy-node1".
|
||||||
service like S3, only the proxy needs to know the access credentials.
|
|
||||||
|
|
||||||
Currently git-annex does not support streaming content to special remotes.
|
But the user probably doesn't want to pick which node to send content to.
|
||||||
The remote interface operates on object files stored on disk. See
|
They don't necessarily know anything about the nodes. Ideally the user
|
||||||
[[todo/transitive_transfers]] for discussion of that problem. If proxies
|
would `git-annex copy --to proxy` or `git-annex push` and let it pick
|
||||||
get implemented, that problem should be revisited.
|
which instantiated remote(s) to send to.
|
||||||
|
|
||||||
|
To make `git-annex copy --to proxy` work, `storeKey` could be changed to
|
||||||
|
allow returning a UUID (or UUIDs) where the content was actually stored.
|
||||||
|
That would also allow a single upload to the proxy to fan out and be stored
|
||||||
|
in multiple nodes. The proxy would use preferred content to pick which of
|
||||||
|
its nodes to store on.
|
||||||
|
|
||||||
|
Instantiated remotes would still be needed for `git-annex get` and similar
|
||||||
|
to work.
|
||||||
|
|
||||||
|
To make `git-annex copy --from proxy` work, the proxy would need to pick
|
||||||
|
a node and stream content from it. That's doable, but how to handle a case
|
||||||
|
where a node gets corrupted? The best it could do is mark that node as no
|
||||||
|
longer containing the content (as if a fsck failed) and try another one
|
||||||
|
next time. This complication might not be necessary. Consider that
|
||||||
|
while `git-annex copy --to foo` followed later by `git-annex copy --from foo`
|
||||||
|
will usually work, it doesn't work when eg first copying to a transfer
|
||||||
|
remote, which then sends the content elsewhere and drops its copy.
|
||||||
|
|
||||||
|
What about dropping? `git-annex drop --from proxy` could be made to work,
|
||||||
|
by having `removeKey` return a list of UUIDs that the content was dropped
|
||||||
|
from. What should that do if it's able to drop from some nodes but not
|
||||||
|
others? Perhaps it would need to be able to return a list of UUIDs that
|
||||||
|
content was dropped from but still indicate it overall failed to drop.
|
||||||
|
(Note that it's entirely possible that dropping from one node of the proxy
|
||||||
|
involves lockContent on another node of the proxy in order to satisfy
|
||||||
|
numcopies.)
|
||||||
|
|
||||||
|
A command like `git-annex push` would see all the instantiated remotes and
|
||||||
|
would pick one to send content to. Seems like the proxy might choose to
|
||||||
|
`storeKey` the content on other node(s) than the requested one. Which would
|
||||||
|
be fine. But, `git-annex push` would still do considerable extra work in
|
||||||
|
interating over all the instantiated remotes. So it might be better to make
|
||||||
|
such commands not operate on instantiated remotes for sending content but
|
||||||
|
only on the proxy.
|
||||||
|
|
||||||
|
Commands like `git-annex push` and `git-annex pull`
|
||||||
|
should also skip the instantiated remotes when pushing or pulling the git
|
||||||
|
repo, because that would be extra work that accomplishes nothing.
|
||||||
|
|
||||||
## speed
|
## speed
|
||||||
|
|
||||||
|
@ -107,6 +156,18 @@ content. Eg, analize what files are typically requested, and store another
|
||||||
copy of those on the proxy. Perhaps prioritize storing smaller files, where
|
copy of those on the proxy. Perhaps prioritize storing smaller files, where
|
||||||
latency tends to swamp transfer speed.
|
latency tends to swamp transfer speed.
|
||||||
|
|
||||||
|
## streaming to special remotes
|
||||||
|
|
||||||
|
As well as being an intermediary to git-annex repositories, the proxy could
|
||||||
|
provide access to other special remotes. That could be an object store like
|
||||||
|
S3, which might be internal to the cluster or not. When using a cloud
|
||||||
|
service like S3, only the proxy needs to know the access credentials.
|
||||||
|
|
||||||
|
Currently git-annex does not support streaming content to special remotes.
|
||||||
|
The remote interface operates on object files stored on disk. See
|
||||||
|
[[todo/transitive_transfers]] for discussion of that problem. If proxies
|
||||||
|
get implemented, that problem should be revisited.
|
||||||
|
|
||||||
## encryption
|
## encryption
|
||||||
|
|
||||||
When the proxy is in front of a special remote that uses encryption, where
|
When the proxy is in front of a special remote that uses encryption, where
|
||||||
|
@ -125,3 +186,82 @@ implementation for this.
|
||||||
|
|
||||||
There's potentially a layering problem here, because exactly how encryption
|
There's potentially a layering problem here, because exactly how encryption
|
||||||
(or chunking) works can vary depending on the type of special remote.
|
(or chunking) works can vary depending on the type of special remote.
|
||||||
|
|
||||||
|
## cycles
|
||||||
|
|
||||||
|
What if repo A is a proxy and has repo B as a remote. Meanwhile, repo B is
|
||||||
|
a proxy and has repo A as a remote?
|
||||||
|
|
||||||
|
An upload to repo A will start by checkin if repo B wants the content and if so,
|
||||||
|
start an upload to repo B. Then the same happens on repo B, leading it to
|
||||||
|
start an upload to repo A.
|
||||||
|
|
||||||
|
At this point, it might be possible for git-annex to detect the cycle,
|
||||||
|
if the proxy uses a transfer lock file. If repo B or repo A had some other
|
||||||
|
remote that is not part of a cycle, they could deposit the upload there and
|
||||||
|
the upload still succeed. Otherwise the upload would fail, which is
|
||||||
|
probably the best that can be done with such a broken configuration.
|
||||||
|
|
||||||
|
So, it seems like proxies will need to take transfer locks for uploads,
|
||||||
|
even though the content is being proxied to elsewhere.
|
||||||
|
|
||||||
|
Dropping could have similar cycles with content presence locking, which
|
||||||
|
needs to be thought through as well. A cycle of the actual dropContent
|
||||||
|
operation might also be possible.
|
||||||
|
|
||||||
|
## exporttree=yes
|
||||||
|
|
||||||
|
Could the proxy be in front of a special remote that uses exporttree=yes?
|
||||||
|
|
||||||
|
Some possible approaches:
|
||||||
|
|
||||||
|
* Proxy caches files until all the files in the configured
|
||||||
|
annex-tracking-branch are available, then exports them all to the special
|
||||||
|
remote. Not ideal at all.
|
||||||
|
* Proxy exports each file to the special remote as it is received.
|
||||||
|
It records an incomplete tree export after each export.
|
||||||
|
Once all files in the configured annex-tracking-branch have been sent,
|
||||||
|
it records a completed tree export. This seems possible, it's similar
|
||||||
|
to `git-annex export --to=remote` recovering after having been
|
||||||
|
interrupted.
|
||||||
|
* Proxy storeExport and all related export/import actions. This would need
|
||||||
|
a large expansion of the P2P protocol.
|
||||||
|
|
||||||
|
The first two approaches need some way to communicate the
|
||||||
|
configured annex-tracking-branch over the P2P protocol. Or to communicate
|
||||||
|
the tree that it currently points to.
|
||||||
|
|
||||||
|
The first two approaches also have a complication when a key is sent to
|
||||||
|
the proxy that is not part of the configured annex-tracking-branch. What
|
||||||
|
does the proxy do with it?
|
||||||
|
|
||||||
|
## possible enhancement: indirect uploads
|
||||||
|
|
||||||
|
(Thanks to Chris Markiewicz for this idea.)
|
||||||
|
|
||||||
|
When a client wants to upload an object, the proxy could indicate that the
|
||||||
|
upload should not be sent to it, but instead be PUT to a HTTP url that it
|
||||||
|
provides to the client.
|
||||||
|
|
||||||
|
An example use case involves
|
||||||
|
[presigned S3 urls](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-presigned-url.html).
|
||||||
|
When one of the proxy's nodes is a S3 bucket, having the client upload
|
||||||
|
directly to S3 would avoid needing double traffic through the proxy's
|
||||||
|
network.
|
||||||
|
|
||||||
|
This would need a special remote that generates the presigned S3 url.
|
||||||
|
Probably an external, so the external special remote protocol would need to
|
||||||
|
be updated as well as the P2P protocol.
|
||||||
|
|
||||||
|
Since an upload to a proxy can be distributed to multiple nodes, should
|
||||||
|
the proxy be able to indicate more than one url that the client
|
||||||
|
should upload to? Also the proxy might want an upload to still be sent to
|
||||||
|
it in addition to url(s). Of course the downside is that the client would
|
||||||
|
need to upload more than once, which eliminates one benefit of the proxy.
|
||||||
|
So it might be reasonable to only support one url, but what if the proxy
|
||||||
|
has multiple remotes that want to provide urls, how does it pick which one
|
||||||
|
wins?
|
||||||
|
|
||||||
|
Is only an URL enough for the client to be able to upload to wherever? It
|
||||||
|
may be that the HTTP verb is also necessary. Consider POST vs PUT. Some
|
||||||
|
services might need additional HTTP headers.
|
||||||
|
|
|
@ -13,7 +13,7 @@ about the new remote yet, and crashed (and was restarted knowing about it,
|
||||||
so successfully sent any other files). So got sidetracked on fixing that.
|
so successfully sent any other files). So got sidetracked on fixing that.
|
||||||
|
|
||||||
Also did some work to make the gpg bundled with git-annex on OSX be
|
Also did some work to make the gpg bundled with git-annex on OSX be
|
||||||
compatable with the config files written by MacGPG. At first I was going to
|
compatible with the config files written by MacGPG. At first I was going to
|
||||||
hack it to not crash on the options it didn't support, but it turned out
|
hack it to not crash on the options it didn't support, but it turned out
|
||||||
that upgrading to version 1.4.14 actually fixed the problem that was making
|
that upgrading to version 1.4.14 actually fixed the problem that was making
|
||||||
it build without support for DNS.
|
it build without support for DNS.
|
||||||
|
|
|
@ -40,7 +40,7 @@ complicate it, and the hidden service side would need to listen on a unix
|
||||||
socket, instead of the regular http port. It might be worth it to use http
|
socket, instead of the regular http port. It might be worth it to use http
|
||||||
for tor, if it could be reused for git-annex http servers not on the tor
|
for tor, if it could be reused for git-annex http servers not on the tor
|
||||||
network. But, then I'd have to make the http server support git pull and
|
network. But, then I'd have to make the http server support git pull and
|
||||||
push over http in a way that's compatable with how git uses http, including
|
push over http in a way that's compatible with how git uses http, including
|
||||||
authentication. Which is a whole nother ball of complexity. So, I'm leaning
|
authentication. Which is a whole nother ball of complexity. So, I'm leaning
|
||||||
instead to using a simple custom protocol something like:
|
instead to using a simple custom protocol something like:
|
||||||
|
|
||||||
|
|
|
@ -79,7 +79,7 @@ repository with access to the encrypted data stored in the special remote.
|
||||||
By default `gpg` is used for shared encryption, but it is also possible to
|
By default `gpg` is used for shared encryption, but it is also possible to
|
||||||
use other programs that implement the Stateless OpenPGP command line
|
use other programs that implement the Stateless OpenPGP command line
|
||||||
interface. For example, to use Sequoia PGP's `sqop` command, configured to
|
interface. For example, to use Sequoia PGP's `sqop` command, configured to
|
||||||
be backwards compatable with `gpg`:
|
be backwards compatible with `gpg`:
|
||||||
|
|
||||||
git config annex.shared-sop-command sqop
|
git config annex.shared-sop-command sqop
|
||||||
git config annex.shared-sop-profile rfc4880
|
git config annex.shared-sop-profile rfc4880
|
||||||
|
|
35
doc/forum/Alternative_modes_for_annex_repos.mdwn
Normal file
35
doc/forum/Alternative_modes_for_annex_repos.mdwn
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
I have given a lot of thought and experimentation to how git-annex could be used for large projects where there is a desire to distribute files to many users, but where only a minority of users would would actually push key changes.
|
||||||
|
|
||||||
|
The first option would be to have an annexless mode, where a local repo either has no uuid, or where the git-annex branch is not stored in the default namespace.
|
||||||
|
|
||||||
|
This is for cases where the client only cares that a file exists in the repo and that it has been verified.
|
||||||
|
|
||||||
|
One possibility could be `git-annex-get --no-init`, which would not init a local repo, but would get and verify a file. The existence of a file would simply be if the file exists. Only upon making a change can a could be fully inited.
|
||||||
|
|
||||||
|
Or even better, in a restricted environment where git-annex is not available, this case is simple enough that getting a key from a url could be done with a shellscript. The url could be extracted from the upstream git-annex branch without checking it out, and the symlinks used for verification. However, there is a chance that the upstream git-annex branch may not be stable (like if it is not propagated after a mirror), so one could "shrinkwrap" keys and store their remote url locations in a .gitattributes file or somewhere else in the same branch. If key changes are desired, it can be fairly effortlessly upgraded to an actual git-annex repo.
|
||||||
|
|
||||||
|
A step up from a completely annexless repo would be a hypothetical local-only git-annex repo, where git-annex only uses a git-annex branch locally.
|
||||||
|
|
||||||
|
There could be a `git-annex-init --local` option which creates a `local/git-annex` branch, for local tracking, but would not sync to the server by default.
|
||||||
|
|
||||||
|
In this mode, the upstream git-annex branch would just be pulled and kept read-only, and `local/git-annex` would keep local differences. The `local/git-annex` would just use the union driver to combine upstream changes with local changes. Upgrading to a full git-annex repo would be as easy as creating a new `git-annex` branch at the same commit id as `local/git-annex`
|
||||||
|
|
||||||
|
|
||||||
|
So, in summary, I have considered two modes:
|
||||||
|
|
||||||
|
Fully annex-less mode, which is simple enough to be implemented completely without git-annex, useful for restricted environments. Optionally, can use a kind of shinkwrapping to externalize key URLs to a file in the branch to guarantee that the fetch location is stored.
|
||||||
|
|
||||||
|
Secondly is local mode, where a `local/git-annex` branch is downstream from a git-annex branch, and in order to sync changes back to the server, the repo is upgraded.
|
||||||
|
|
||||||
|
Both of these modes could easily be upgraded to a full git-annex repo on demand.
|
||||||
|
|
||||||
|
I think this is useful when considering large scale usage.
|
||||||
|
|
||||||
|
|
||||||
|
Most of this functionality is something that is probably best suited for a wrapper.
|
||||||
|
|
||||||
|
In terms of any any potential core changes to git-annex, it may be as simple as having a GIT_ANNEX_BRANCH environment variable, analogous to the GIT_DIR variable for git.
|
||||||
|
|
||||||
|
Has anyone given any thought to scenarios like this?
|
||||||
|
|
||||||
|
I think there are cases where developers use git-lfs and something like this might be a better fit. And also with making git-annex repos more generally available and portable.
|
11
doc/forum/Cheapest_test_for_an_initialized_annex__63__.mdwn
Normal file
11
doc/forum/Cheapest_test_for_an_initialized_annex__63__.mdwn
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
I need to implement some functionality that should work in similar ways when faced with a plain Git vs a git-annex repo. In this context I am looking for the cheapest of the appropriate tests for an initialized annex?
|
||||||
|
|
||||||
|
I have looked at
|
||||||
|
|
||||||
|
- `git config annex.uuid` (~3ms)
|
||||||
|
- `git rev-parse git-annex` (~3ms)
|
||||||
|
- `git annex info --fast -q` (~50ms)
|
||||||
|
|
||||||
|
Are the fast ones sufficient to guarantee that no subsequent call to a git-annex command would yield `git-annex: First run: git-annex init`?
|
||||||
|
|
||||||
|
Thanks!
|
|
@ -0,0 +1,10 @@
|
||||||
|
[[!comment format=mdwn
|
||||||
|
username="aurtzy"
|
||||||
|
avatar="http://cdn.libravatar.org/avatar/7d44f9b718a534ada04551ee8c72e07f"
|
||||||
|
subject="comment 1"
|
||||||
|
date="2024-04-25T17:35:49Z"
|
||||||
|
content="""
|
||||||
|
`git config annex.uuid` seems reliable enough to me - the uuid is stored in `.git/config` and doesn't exist if it's not annex-inited (assuming no malicious behavior).
|
||||||
|
|
||||||
|
If you're looking for the very cheapest, perhaps something like a file-exists check on `.git/annex` would work? I don't know if there are any edge cases with this one, though.
|
||||||
|
"""]]
|
|
@ -0,0 +1,11 @@
|
||||||
|
[[!comment format=mdwn
|
||||||
|
username="joey"
|
||||||
|
subject="""comment 2"""
|
||||||
|
date="2024-04-30T19:18:58Z"
|
||||||
|
content="""
|
||||||
|
annex.uuid is the right way.
|
||||||
|
|
||||||
|
I would not trust checking existence of .git/annex directory. There may be
|
||||||
|
some situation where the repo is initialized, then uninitialized, but that
|
||||||
|
directory still exists. Or similar.
|
||||||
|
"""]]
|
|
@ -0,0 +1,3 @@
|
||||||
|
Is it possible to disable the automatic switching to the `adjusted/main(unlocked)` branch?
|
||||||
|
|
||||||
|
Context: For our measurement campaign, have a policy to only use unlocked files anyway and git-annex always initially switching to that adjusted branch is a bit annoying as we have to manually switch back to the default branch - of which the name is not always the same. Because when you `git annex assist` in an empty repo, it first pushes the `synced/main` branch, which makes it the default on the (Gitea) remote, so subsequent `git clone`s use that as default branch. I'd like to avoid the adjusted branch if possible as I have hit some performance / merge conflict situations with it in the past.
|
|
@ -0,0 +1,52 @@
|
||||||
|
I'm working with Datalad, but I suspect that my problems stem from not fully understanding how git annex works.
|
||||||
|
|
||||||
|
I’ve been trying a set up a dataset that primarily lives on a web server, but needs to be clone-able by other people. The annex files are visible and downloadable from the server’s website. In particular, the files I’m concerned about here are in a subdataset.
|
||||||
|
|
||||||
|
I used `datalad addurls` to add the URL of each file on the server to each file in the annex. When I run `git annex whereis filename`, it shows up that it lives on the server in the server’s local copy of the dataset, and that it lives on the web, with a correct URL. In fact, if I click on that URL and open it in a browser, it downloads my file.
|
||||||
|
|
||||||
|
The dataset lives on Github, but the annex does not. When I make a clone of the superdataset on my personal computer, I get messages like
|
||||||
|
|
||||||
|
```
|
||||||
|
[INFO ] Unable to parse git config from origin
|
||||||
|
[INFO ] Remote origin does not have git-annex installed; setting annex-ignore
|
||||||
|
| This could be a problem with the git-annex installation on the remote. Please make sure that git-annex-shell is available in PATH when you ssh into the remote. Once you have fixed the git-annex installation, run: git annex enableremote origin
|
||||||
|
install(ok): /home/erin/Documents/DHA/carcas (dataset)
|
||||||
|
```
|
||||||
|
|
||||||
|
Then when I'm in the dataset `carcas-models` that has the annex and I run `datalad get models
|
||||||
|
/Alpaca\ 3rd\ Carpal\ L.glb`, I get this error message:
|
||||||
|
|
||||||
|
```
|
||||||
|
get(error): models/Alpaca 3rd Carpal L.glb (file) [no known url
|
||||||
|
no known url
|
||||||
|
no known url]
|
||||||
|
```
|
||||||
|
|
||||||
|
I suspect my problem is with how I set things up with git annex, because when I try `git annex get models/Alpaca\ 3rd\ Carpal\ L.glb`, I get the error:
|
||||||
|
|
||||||
|
```
|
||||||
|
get models/Alpaca 3rd Carpal L.glb (from web...)
|
||||||
|
no known url
|
||||||
|
|
||||||
|
Unable to access these remotes: web
|
||||||
|
|
||||||
|
Maybe add some of these git remotes (git remote add ...):
|
||||||
|
095e299d-037e-4172-87e0-bbd7183a6613 -- CARCAS models on the 3dviewers server
|
||||||
|
|
||||||
|
(Note that these git remotes have annex-ignore set: origin)
|
||||||
|
failed
|
||||||
|
get: 1 failed
|
||||||
|
```
|
||||||
|
|
||||||
|
I'm confused on how to debug this because when I run git annex whereis models/Alpaca\ 3rd\ Carpal\ L.glb, everything looks correct:
|
||||||
|
|
||||||
|
```
|
||||||
|
whereis models/Alpaca 3rd Carpal L.glb (2 copies)
|
||||||
|
00000000-0000-0000-0000-000000000001 -- web
|
||||||
|
095e299d-037e-4172-87e0-bbd7183a6613 -- CARCAS models on the 3dviewers server
|
||||||
|
|
||||||
|
web: https://3dviewer.sites.carleton.edu/carcas/carcas-models/models/Alpaca%203rd%20Carpal%20L.glb
|
||||||
|
ok
|
||||||
|
```
|
||||||
|
|
||||||
|
What's the correct way to set up this use case? I don't think that I want the server to be a special remote, because the hidden files like .gitattributes aren't visible. I want to be able to put more files on the server, add their URLS based on where they are on the server, and push to github so that other people can get these files if they want.
|
|
@ -0,0 +1,16 @@
|
||||||
|
[[!comment format=mdwn
|
||||||
|
username="unqueued"
|
||||||
|
avatar="http://cdn.libravatar.org/avatar/3bcbe0c9e9825637ad7efa70f458640d"
|
||||||
|
subject="comment 10"
|
||||||
|
date="2024-04-10T12:26:49Z"
|
||||||
|
content="""
|
||||||
|
For my two cents, I have found git-annex to be a simple enough format that I have only needed basic helper scripts.
|
||||||
|
|
||||||
|
But many operations can be done with one or a few lines of code.
|
||||||
|
|
||||||
|
Git can do much of the heavy lifting for you in terms of looking stuff up from the git-annex branch, and I find the formats to be quite regular and easy to parse.
|
||||||
|
|
||||||
|
I am thinking of bringing some of this together into a PHP library.
|
||||||
|
|
||||||
|
But maybe I should just post my pure git-annex bash/perl one-liners.
|
||||||
|
"""]]
|
15
doc/forum/When_to_reuse_UUIDs_and_avoiding_UUID_clutter.mdwn
Normal file
15
doc/forum/When_to_reuse_UUIDs_and_avoiding_UUID_clutter.mdwn
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
I wanted to discuss cases for UUID reuse
|
||||||
|
|
||||||
|
One reason is to mutate a special remote type. For example, a directory special remote to an rsync special remote and vice versa, passing along the uuid argument to initremote. Changing the directory layout is not hard. And you may wish to re-layout your .git/annex/objects directory to a different directory prefix and upload it to a cloud provider. If it supports an rclone remote that has hashing, you can verify it without having to redownload.
|
||||||
|
|
||||||
|
Another good reason is to reuse a uuid is to avoid uuid namespace clutter.
|
||||||
|
If you know ahead of time that you are storing data in repos that may later be merged, it makes sense to have a template annex repo to base a new repo off of, as well as store common settings and uuids.
|
||||||
|
|
||||||
|
For example, I have a uuid space for multimedia annexes (peertube, youtube, podcasts, etc).
|
||||||
|
|
||||||
|
The template comes preloaded with a uuid.log and remote.log. If my hostname is in the uuid.log, I reinit with that.
|
||||||
|
|
||||||
|
If I must merge unrelated histories with conflicting name/uuid values, I first prefix my names with something. After a merge, I can do some gymnastics to make sure that the proper keys are set present for the respective uuid/name that I have chosen, and make the obsolete uuid/name dead. Simply making them dead is not enough, because even if a special remote uuid is marked dead, if the name is the same, it will still cause a conflict, so prefixing uuid/name collisions is importnat.
|
||||||
|
|
||||||
|
I currently have a several annex template repos for different purposes (disk images, multimedia, etc). I have been meaning to automate this process more.
|
||||||
|
|
|
@ -0,0 +1,16 @@
|
||||||
|
[[!comment format=mdwn
|
||||||
|
username="unqueued"
|
||||||
|
avatar="http://cdn.libravatar.org/avatar/3bcbe0c9e9825637ad7efa70f458640d"
|
||||||
|
subject="comment 3"
|
||||||
|
date="2024-04-10T12:46:53Z"
|
||||||
|
content="""
|
||||||
|
I would just clone the repo to the new machine, do `git annex init`, and then rsync the contents of `.git/annex/objects`, and then do `git annex fsck --all` to have to recheck every key it knows about.
|
||||||
|
|
||||||
|
Alternatively, if you're concerned that there might be keys that weren't properly recorded somehow, in your new repo, after `.git/annex/objects` has been transferred, you can create an ingestion directory with a flat layout of the copied keys:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
mkdir ingest && find .git/annex/objects -type f | xargs mv ingest && git annex reinject --known ingest/*
|
||||||
|
```
|
||||||
|
|
||||||
|
Finally, if you just want to rebuild it from scratch, do cp with the `-cL` option. If you are on macOS, it will make a reflink copy, and follow the symlinks. Delete the target .git dir and re-create it.
|
||||||
|
"""]]
|
|
@ -6,7 +6,7 @@
|
||||||
content="""
|
content="""
|
||||||
Sorry it took so long to get back to you.
|
Sorry it took so long to get back to you.
|
||||||
|
|
||||||
You do not necessarily have to have git-annex installed on your web server. But it will open up one of the nicest ways to use git-annex with a server, which is to put a bare git repository on the server, and let git-annex send the contents of large files to that repository. It's fine to install any old version of git-annex on the server, they're all forwards and backwards compatable.
|
You do not necessarily have to have git-annex installed on your web server. But it will open up one of the nicest ways to use git-annex with a server, which is to put a bare git repository on the server, and let git-annex send the contents of large files to that repository. It's fine to install any old version of git-annex on the server, they're all forwards and backwards compatible.
|
||||||
|
|
||||||
In any case, you need git-annex installed on any computers where you want to access the repository, certainly.
|
In any case, you need git-annex installed on any computers where you want to access the repository, certainly.
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,15 @@
|
||||||
|
I would like to retain a copy of keys which are referenced in a tagged commit.
|
||||||
|
|
||||||
|
- is there some easy way to figure out size of keys across all such tagged commits? `git-annex info` seems to report per each commit/tree separately. I need a total size.
|
||||||
|
(I will need to assess it over hundreds of repos)
|
||||||
|
|
||||||
|
- ATM I do not think it is possible directly with [preferred-content expressions](https://git-annex.branchable.com/git-annex-preferred-content/).
|
||||||
|
Do you think it would be feasible to develop support for retaining content based on the properties of the commit?
|
||||||
|
|
||||||
|
Or what other "workflow" would you recommend? e.g. I guess upon tagging I could add to all keys some metadata field (e.g. `released`, may be with value of tags where it was released) and then set preferred-content based on having that metadata field?
|
||||||
|
|
||||||
|
|
||||||
|
Target use-case -- backup remote for dandisets will soon seize to exist, need to figure out some backup strategy. For that want to first assess how much of data which was tagged (higher priority to keep) to retain.
|
||||||
|
|
||||||
|
[[!meta author=yoh]]
|
||||||
|
[[!tag projects/dandi]]
|
|
@ -0,0 +1,8 @@
|
||||||
|
[[!comment format=mdwn
|
||||||
|
username="yarikoptic"
|
||||||
|
avatar="http://cdn.libravatar.org/avatar/f11e9c84cb18d26a1748c33b48c924b4"
|
||||||
|
subject="comment 7"
|
||||||
|
date="2024-04-25T21:11:56Z"
|
||||||
|
content="""
|
||||||
|
Thank you Joey! I hope that release is coming some time soon?
|
||||||
|
"""]]
|
|
@ -34,6 +34,9 @@ files that it does not match will instead be added with `git add`.
|
||||||
|
|
||||||
Use this option to specify a commit message.
|
Use this option to specify a commit message.
|
||||||
|
|
||||||
|
If multiple -m options are given, their values are concatenated
|
||||||
|
as separate paragraphs.
|
||||||
|
|
||||||
* `--content-of=path` `-C path`
|
* `--content-of=path` `-C path`
|
||||||
|
|
||||||
Only add, pull, and push files in the given path.
|
Only add, pull, and push files in the given path.
|
||||||
|
|
|
@ -107,6 +107,9 @@ the tree of files on the remote, even when importing into a subdirectory.
|
||||||
Use this option to specify a commit message for the changes that have
|
Use this option to specify a commit message for the changes that have
|
||||||
been made to the special remote since the last import from it.
|
been made to the special remote since the last import from it.
|
||||||
|
|
||||||
|
If multiple -m options are given, their values are concatenated
|
||||||
|
as separate paragraphs.
|
||||||
|
|
||||||
# IMPORTING FROM A DIRECTORY
|
# IMPORTING FROM A DIRECTORY
|
||||||
|
|
||||||
When run with a path, `git annex import` **moves** files from somewhere outside
|
When run with a path, `git annex import` **moves** files from somewhere outside
|
||||||
|
|
|
@ -56,7 +56,7 @@ See [[git-annex-preferred-content]](1).
|
||||||
Only pull with the remotes with the lowest annex-cost value configured.
|
Only pull with the remotes with the lowest annex-cost value configured.
|
||||||
|
|
||||||
When a list of remotes (or remote groups) is provided, it picks from
|
When a list of remotes (or remote groups) is provided, it picks from
|
||||||
amoung those, otherwise it picks from amoung all remotes.
|
among those, otherwise it picks from among all remotes.
|
||||||
|
|
||||||
* `--only-annex` `-a`, `--not-only-annex`
|
* `--only-annex` `-a`, `--not-only-annex`
|
||||||
|
|
||||||
|
@ -70,7 +70,7 @@ See [[git-annex-preferred-content]](1).
|
||||||
When this is combined with --no-content, only the git-annex branch
|
When this is combined with --no-content, only the git-annex branch
|
||||||
will be pulled.
|
will be pulled.
|
||||||
|
|
||||||
* `--no-content, `-g`, `--content`
|
* `--no-content`, `-g`, `--content`
|
||||||
|
|
||||||
Use `--no-content` or `-g` to avoid downloading (and dropping)
|
Use `--no-content` or `-g` to avoid downloading (and dropping)
|
||||||
the content of annexed files, and also prevent doing any migrations of
|
the content of annexed files, and also prevent doing any migrations of
|
||||||
|
|
|
@ -67,7 +67,7 @@ See [[git-annex-preferred-content]](1).
|
||||||
|
|
||||||
* `--only-annex` `-a`, `--not-only-annex`
|
* `--only-annex` `-a`, `--not-only-annex`
|
||||||
|
|
||||||
Only pull the git-annex branch and annexed content from remotes,
|
Only push the git-annex branch and annexed content to remotes,
|
||||||
not other git branches.
|
not other git branches.
|
||||||
|
|
||||||
The `annex.synconlyannex` configuration can be set to true to make
|
The `annex.synconlyannex` configuration can be set to true to make
|
||||||
|
@ -75,7 +75,7 @@ See [[git-annex-preferred-content]](1).
|
||||||
`--not-only-annex`.
|
`--not-only-annex`.
|
||||||
|
|
||||||
When this is combined with --no-content, only the git-annex branch
|
When this is combined with --no-content, only the git-annex branch
|
||||||
will be pulled.
|
will be pushed.
|
||||||
|
|
||||||
* `--no-content`, `-g`, `--content`
|
* `--no-content`, `-g`, `--content`
|
||||||
|
|
||||||
|
|
|
@ -51,6 +51,9 @@ when syncing with repositories that have preferred content configured.
|
||||||
|
|
||||||
Use this option to specify a commit message.
|
Use this option to specify a commit message.
|
||||||
|
|
||||||
|
If multiple -m options are given, their values are concatenated
|
||||||
|
as separate paragraphs.
|
||||||
|
|
||||||
* `--pull`, `--no-pull`
|
* `--pull`, `--no-pull`
|
||||||
|
|
||||||
Use this option to disable pulling.
|
Use this option to disable pulling.
|
||||||
|
|
|
@ -0,0 +1,10 @@
|
||||||
|
[[!comment format=mdwn
|
||||||
|
username="rshalaev"
|
||||||
|
avatar="http://cdn.libravatar.org/avatar/d7f181d338cbcef7418faa01f0441e86"
|
||||||
|
subject="How to find last available version of a file?"
|
||||||
|
date="2024-04-29T14:07:40Z"
|
||||||
|
content="""
|
||||||
|
Repo that contains the latest/current version of a file is not accessible. Can git annex whereis find the last available version of a file in other repos (or a specific repo)?
|
||||||
|
|
||||||
|
I can looping through commit log and running whereis for each commit until an earlier version of a file is found, but perhaps there is a better way to do it with a single command?
|
||||||
|
"""]]
|
|
@ -0,0 +1,11 @@
|
||||||
|
[[!comment format=mdwn
|
||||||
|
username="joey"
|
||||||
|
subject="""Re: How to find last available version of a file?"""
|
||||||
|
date="2024-04-30T18:52:12Z"
|
||||||
|
content="""
|
||||||
|
There's not currently a way to do that without some scripting to get
|
||||||
|
the keys, and then `git-annex whereis --key`.
|
||||||
|
|
||||||
|
I think this idea is worth doing something about, so I made this todo:
|
||||||
|
[[todo/wherewas]].
|
||||||
|
"""]]
|
|
@ -873,8 +873,16 @@ repository, using [[git-annex-config]]. See its man page for a list.)
|
||||||
and also when generating a view branch.
|
and also when generating a view branch.
|
||||||
|
|
||||||
The default length is 4, which allows extensions like "jpeg". The dot before
|
The default length is 4, which allows extensions like "jpeg". The dot before
|
||||||
the extension is not counted part of its length. At most two extensions
|
the extension is not counted part of its length.
|
||||||
at the end of a filename will be preserved, e.g. .gz or .tar.gz .
|
|
||||||
|
* `annex.maxextensions`
|
||||||
|
|
||||||
|
Maximum number of filename extensions to preserve when using a backend
|
||||||
|
that preserves filename extensions, and also when generating a view
|
||||||
|
branch.
|
||||||
|
|
||||||
|
The default is 2, which allows for compound extensions like ".tar.gz".
|
||||||
|
When set to 1, it will only preserve the last extension, eg ".gz".
|
||||||
|
|
||||||
* `annex.diskreserve`
|
* `annex.diskreserve`
|
||||||
|
|
||||||
|
@ -1805,6 +1813,11 @@ Remotes are configured using these settings in `.git/config`.
|
||||||
Used to identify Amazon Glacier special remotes.
|
Used to identify Amazon Glacier special remotes.
|
||||||
Normally this is automatically set up by `git annex initremote`.
|
Normally this is automatically set up by `git annex initremote`.
|
||||||
|
|
||||||
|
* `remote.<name>.annex-rclone`
|
||||||
|
|
||||||
|
Used to identify rclone special remotes.
|
||||||
|
Normally this is automatically set up by `git annex initremote`.
|
||||||
|
|
||||||
* `remote.<name>.annex-web`
|
* `remote.<name>.annex-web`
|
||||||
|
|
||||||
Used to identify web special remotes.
|
Used to identify web special remotes.
|
||||||
|
@ -1843,7 +1856,7 @@ Remotes are configured using these settings in `.git/config`.
|
||||||
|
|
||||||
* `remote.<name>.annex-externaltype`
|
* `remote.<name>.annex-externaltype`
|
||||||
|
|
||||||
Used external special remotes to record the type of the remote.
|
Used by external special remotes to record the type of the remote.
|
||||||
|
|
||||||
Eg, if this is set to "foo", git-annex will run a "git-annex-remote-foo"
|
Eg, if this is set to "foo", git-annex will run a "git-annex-remote-foo"
|
||||||
program to communicate with the external special remote.
|
program to communicate with the external special remote.
|
||||||
|
|
|
@ -1,17 +0,0 @@
|
||||||
git-annex 10.20230926 released with [[!toggle text="these changes"]]
|
|
||||||
[[!toggleable text=""" * Fix more breakage caused by git's fix for CVE-2022-24765, this time
|
|
||||||
involving a remote (either local or ssh) that is a repository not owned
|
|
||||||
by the current user.
|
|
||||||
* Fix using git remotes that are bare when git is configured with
|
|
||||||
safe.bareRepository = explicit.
|
|
||||||
* Fix linker optimisation in linux standalone tarballs.
|
|
||||||
* adb: Avoid some problems with unusual characters in exporttree
|
|
||||||
filenames that confuse adb shell commands.
|
|
||||||
* push: When on an adjusted branch, propagate changes to parent branch
|
|
||||||
before updating export remotes.
|
|
||||||
* lookupkey: Added --ref option.
|
|
||||||
* enableremote: Avoid overwriting existing git remote when passed the uuid
|
|
||||||
of a specialremote that was earlier initialized with the same name.
|
|
||||||
* Support being built with crypton rather than the no-longer maintained
|
|
||||||
cryptonite.
|
|
||||||
* Removed the vendored git-lfs and the GitLfs build flag."""]]
|
|
24
doc/news/version_10.20240430.mdwn
Normal file
24
doc/news/version_10.20240430.mdwn
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
git-annex 10.20240430 released with [[!toggle text="these changes"]]
|
||||||
|
[[!toggleable text=""" * Bug fix: While redundant concurrent transfers were already
|
||||||
|
prevented in most cases, it failed to prevent the case where
|
||||||
|
two different repositories were sending the same content to
|
||||||
|
the same repository.
|
||||||
|
* addurl, importfeed: Added --verifiable option, which improves
|
||||||
|
the safety of --fast or --relaxed by letting the content of
|
||||||
|
annexed files be verified with a checksum that is calculated
|
||||||
|
on a later download from the web. This will become the default later.
|
||||||
|
* Added rclone special remote, which can be used without needing
|
||||||
|
to install the git-annex-remote-rclone program. This needs
|
||||||
|
a forthcoming version of rclone (1.67.0), which supports
|
||||||
|
"rclone gitannex".
|
||||||
|
* sync, assist, import: Allow -m option to be specified multiple
|
||||||
|
times, to provide additional paragraphs for the commit message.
|
||||||
|
* reregisterurl: New command that can change an url from being
|
||||||
|
used by a special remote to being used by the web remote.
|
||||||
|
* annex.maxextensions configuration controls how many filename
|
||||||
|
extensions to preserve.
|
||||||
|
* find: Fix --help for --copies.
|
||||||
|
Thanks, Gergely Risko
|
||||||
|
* Windows: Fix escaping output to terminal when using old
|
||||||
|
versions of MinTTY.
|
||||||
|
* Added dependency on unbounded-delays."""]]
|
|
@ -18,7 +18,7 @@ looks like a cache directory with a missing or empty base file, so it
|
||||||
decides to clean it up. In the meantime the first process has written
|
decides to clean it up. In the meantime the first process has written
|
||||||
base and other files and so the rm fails. Also, the first process may
|
base and other files and so the rm fails. Also, the first process may
|
||||||
succeed and end up running git-annex with some locale files missing
|
succeed and end up running git-annex with some locale files missing
|
||||||
(if the rm happened to delete those), resulting in incompatable
|
(if the rm happened to delete those), resulting in incompatible
|
||||||
system locales being used.
|
system locales being used.
|
||||||
|
|
||||||
So, it ought to defer cleaning up old caches until after it's made sure its
|
So, it ought to defer cleaning up old caches until after it's made sure its
|
||||||
|
|
|
@ -26,6 +26,7 @@ the git history is not stored in them.
|
||||||
* [[git]]
|
* [[git]]
|
||||||
* [[httpalso]]
|
* [[httpalso]]
|
||||||
* [[borg]]
|
* [[borg]]
|
||||||
|
* [[rclone]]
|
||||||
|
|
||||||
The above special remotes are built into git-annex, and can be used
|
The above special remotes are built into git-annex, and can be used
|
||||||
to tie git-annex into many cloud services.
|
to tie git-annex into many cloud services.
|
||||||
|
|
|
@ -63,7 +63,7 @@ the S3 remote.
|
||||||
affect new objects sent to the remote, but not objects already
|
affect new objects sent to the remote, but not objects already
|
||||||
stored there.
|
stored there.
|
||||||
|
|
||||||
* `host` - Specify in order to use a different, S3 compatable
|
* `host` - Specify in order to use a different, S3 compatible
|
||||||
service.
|
service.
|
||||||
|
|
||||||
* `region` - Specify the region to use. Only makes sense to use when
|
* `region` - Specify the region to use. Only makes sense to use when
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue