Merge branch 'master' into adjustedbranch
This commit is contained in:
commit
7811556a5b
40 changed files with 608 additions and 74 deletions
|
@ -47,8 +47,14 @@ tryLockExclusive :: Maybe FileMode -> LockFile -> Annex (Maybe LockHandle)
|
|||
tryLockExclusive m f = tryPidLock m f $ Posix.tryLockExclusive m f
|
||||
|
||||
checkLocked :: LockFile -> Annex (Maybe Bool)
|
||||
checkLocked f = Posix.checkLocked f
|
||||
`pidLockCheck` Pid.checkLocked
|
||||
checkLocked f = Posix.checkLocked f `pidLockCheck` checkpid
|
||||
where
|
||||
checkpid pidlock = do
|
||||
v <- Pid.checkLocked pidlock
|
||||
case v of
|
||||
-- Only return true when the posix lock file exists.
|
||||
Just _ -> Posix.checkLocked f
|
||||
Nothing -> return Nothing
|
||||
|
||||
getLockStatus :: LockFile -> Annex LockStatus
|
||||
getLockStatus f = Posix.getLockStatus f
|
||||
|
@ -88,6 +94,6 @@ tryPidLock m f posixlock = liftIO . go =<< pidLockFile
|
|||
|
||||
-- The posix lock file is created even when using pid locks, in order to
|
||||
-- avoid complicating any code that might expect to be able to see that
|
||||
-- lock file.
|
||||
-- lock file. But, it's not locked.
|
||||
dummyPosixLock :: Maybe FileMode -> LockFile -> IO ()
|
||||
dummyPosixLock m f = closeFd =<< openLockFile ReadLock m f
|
||||
|
|
|
@ -201,7 +201,7 @@ checkInsaneLustre dest = do
|
|||
--
|
||||
-- Uses a 1 second wait-loop.
|
||||
--
|
||||
-- May wait untie timeout if the lock file is stale and is on a network file
|
||||
-- May wait until timeout if the lock file is stale and is on a network file
|
||||
-- system, or on a system where the side lock cannot be taken.
|
||||
waitLock :: Seconds -> LockFile -> IO LockHandle
|
||||
waitLock (Seconds timeout) lockfile = go timeout
|
||||
|
|
|
@ -7,15 +7,13 @@
|
|||
- the lock will be released, despite the first thread still having the
|
||||
- lockfile open.
|
||||
-
|
||||
- Or, if a process is already holding an exclusive lock on a file, an
|
||||
- Or, if a process is already holding an exclusive lock on a file, and
|
||||
- re-opens it and tries to take another exclusive lock, it won't block
|
||||
- on the first lock.
|
||||
-
|
||||
- To avoid these problems, this implements a lock pool. This keeps track
|
||||
- of which lock files are being used by the process, and avoids
|
||||
- re-opening them. Instead, if a lockfile is in use by the current
|
||||
- process, STM is used to handle further concurrent uses of that lock
|
||||
- file.
|
||||
- of which lock files are being used by the process, using STM to handle
|
||||
- inter-process locking.
|
||||
-
|
||||
- Note that, like Utility.LockFile, this does *not* attempt to be a
|
||||
- portability shim; the native locking of the OS is used.
|
||||
|
|
|
@ -7,20 +7,24 @@
|
|||
|
||||
{-# LANGUAGE CPP #-}
|
||||
|
||||
module Utility.LockPool.LockHandle where
|
||||
module Utility.LockPool.LockHandle (
|
||||
LockHandle,
|
||||
FileLockOps(..),
|
||||
dropLock,
|
||||
#ifndef mingw32_HOST_OS
|
||||
checkSaneLock,
|
||||
#endif
|
||||
makeLockHandle,
|
||||
tryMakeLockHandle,
|
||||
) where
|
||||
|
||||
import qualified Utility.LockPool.STM as P
|
||||
#ifndef mingw32_HOST_OS
|
||||
import Utility.LockPool.STM (LockFile)
|
||||
#endif
|
||||
|
||||
import Control.Concurrent.STM
|
||||
import Control.Exception
|
||||
|
||||
data LockHandle = LockHandle
|
||||
{ poolHandle :: P.LockHandle
|
||||
, fileLockOps :: FileLockOps
|
||||
}
|
||||
data LockHandle = LockHandle P.LockHandle FileLockOps
|
||||
|
||||
data FileLockOps = FileLockOps
|
||||
{ fDropLock :: IO ()
|
||||
|
@ -30,7 +34,7 @@ data FileLockOps = FileLockOps
|
|||
}
|
||||
|
||||
dropLock :: LockHandle -> IO ()
|
||||
dropLock h = P.releaseLock (poolHandle h) (fDropLock (fileLockOps h))
|
||||
dropLock (LockHandle ph _) = P.releaseLock ph
|
||||
|
||||
#ifndef mingw32_HOST_OS
|
||||
checkSaneLock :: LockFile -> LockHandle -> IO Bool
|
||||
|
@ -40,26 +44,30 @@ checkSaneLock lockfile (LockHandle _ flo) = fCheckSaneLock flo lockfile
|
|||
-- Take a lock, by first updating the lock pool, and then taking the file
|
||||
-- lock. If taking the file lock fails for any reason, take care to
|
||||
-- release the lock in the lock pool.
|
||||
makeLockHandle :: STM P.LockHandle -> IO FileLockOps -> IO LockHandle
|
||||
makeLockHandle pa fa = bracketOnError setup cleanup go
|
||||
makeLockHandle :: P.LockPool -> LockFile -> (P.LockPool -> LockFile -> STM P.LockHandle) -> (LockFile -> IO FileLockOps) -> IO LockHandle
|
||||
makeLockHandle pool file pa fa = bracketOnError setup cleanup go
|
||||
where
|
||||
setup = atomically pa
|
||||
cleanup ph = P.releaseLock ph (return ())
|
||||
go ph = do
|
||||
fo <- fa
|
||||
return $ LockHandle ph fo
|
||||
setup = atomically (pa pool file)
|
||||
cleanup ph = P.releaseLock ph
|
||||
go ph = mkLockHandle pool file ph =<< fa file
|
||||
|
||||
tryMakeLockHandle :: STM (Maybe P.LockHandle) -> IO (Maybe FileLockOps) -> IO (Maybe LockHandle)
|
||||
tryMakeLockHandle pa fa = bracketOnError setup cleanup go
|
||||
tryMakeLockHandle :: P.LockPool -> LockFile -> (P.LockPool -> LockFile -> STM (Maybe P.LockHandle)) -> (LockFile -> IO (Maybe FileLockOps)) -> IO (Maybe LockHandle)
|
||||
tryMakeLockHandle pool file pa fa = bracketOnError setup cleanup go
|
||||
where
|
||||
setup = atomically pa
|
||||
setup = atomically (pa pool file)
|
||||
cleanup Nothing = return ()
|
||||
cleanup (Just ph) = P.releaseLock ph (return ())
|
||||
cleanup (Just ph) = P.releaseLock ph
|
||||
go Nothing = return Nothing
|
||||
go (Just ph) = do
|
||||
mfo <- fa
|
||||
mfo <- fa file
|
||||
case mfo of
|
||||
Nothing -> do
|
||||
cleanup (Just ph)
|
||||
return Nothing
|
||||
Just fo -> return $ Just $ LockHandle ph fo
|
||||
Just fo -> Just <$> mkLockHandle pool file ph fo
|
||||
|
||||
mkLockHandle :: P.LockPool -> LockFile -> P.LockHandle -> FileLockOps -> IO LockHandle
|
||||
mkLockHandle pool file ph fo = do
|
||||
atomically $ P.registerCloseLockFile pool file (fDropLock fo)
|
||||
return $ LockHandle ph fo
|
||||
|
||||
|
|
|
@ -32,15 +32,17 @@ import Prelude
|
|||
|
||||
-- Takes a pid lock, blocking until the lock is available or the timeout.
|
||||
waitLock :: Seconds -> LockFile -> IO LockHandle
|
||||
waitLock timeout file = makeLockHandle
|
||||
(P.waitTakeLock P.lockPool file LockExclusive)
|
||||
(mk <$> F.waitLock timeout file)
|
||||
waitLock timeout file = makeLockHandle P.lockPool file
|
||||
-- LockShared for STM lock, because a pid lock can be the top-level
|
||||
-- lock with various other STM level locks gated behind it.
|
||||
(\p f -> P.waitTakeLock p f LockShared)
|
||||
(\f -> mk <$> F.waitLock timeout f)
|
||||
|
||||
-- Tries to take a pid lock, but does not block.
|
||||
tryLock :: LockFile -> IO (Maybe LockHandle)
|
||||
tryLock file = tryMakeLockHandle
|
||||
(P.tryTakeLock P.lockPool file LockShared)
|
||||
(fmap mk <$> F.tryLock file)
|
||||
tryLock file = tryMakeLockHandle P.lockPool file
|
||||
(\p f -> P.tryTakeLock p f LockShared)
|
||||
(\f -> fmap mk <$> F.tryLock f)
|
||||
|
||||
checkLocked :: LockFile -> IO (Maybe Bool)
|
||||
checkLocked file = P.getLockStatus P.lockPool file
|
||||
|
|
|
@ -33,27 +33,27 @@ import Prelude
|
|||
|
||||
-- Takes a shared lock, blocking until the lock is available.
|
||||
lockShared :: Maybe FileMode -> LockFile -> IO LockHandle
|
||||
lockShared mode file = makeLockHandle
|
||||
(P.waitTakeLock P.lockPool file LockShared)
|
||||
(mk <$> F.lockShared mode file)
|
||||
lockShared mode file = makeLockHandle P.lockPool file
|
||||
(\p f -> P.waitTakeLock p f LockShared)
|
||||
(\f -> mk <$> F.lockShared mode f)
|
||||
|
||||
-- Takes an exclusive lock, blocking until the lock is available.
|
||||
lockExclusive :: Maybe FileMode -> LockFile -> IO LockHandle
|
||||
lockExclusive mode file = makeLockHandle
|
||||
(P.waitTakeLock P.lockPool file LockExclusive)
|
||||
(mk <$> F.lockExclusive mode file)
|
||||
lockExclusive mode file = makeLockHandle P.lockPool file
|
||||
(\p f -> P.waitTakeLock p f LockExclusive)
|
||||
(\f -> mk <$> F.lockExclusive mode f)
|
||||
|
||||
-- Tries to take a shared lock, but does not block.
|
||||
tryLockShared :: Maybe FileMode -> LockFile -> IO (Maybe LockHandle)
|
||||
tryLockShared mode file = tryMakeLockHandle
|
||||
(P.tryTakeLock P.lockPool file LockShared)
|
||||
(fmap mk <$> F.tryLockShared mode file)
|
||||
tryLockShared mode file = tryMakeLockHandle P.lockPool file
|
||||
(\p f -> P.tryTakeLock p f LockShared)
|
||||
(\f -> fmap mk <$> F.tryLockShared mode f)
|
||||
|
||||
-- Tries to take an exclusive lock, but does not block.
|
||||
tryLockExclusive :: Maybe FileMode -> LockFile -> IO (Maybe LockHandle)
|
||||
tryLockExclusive mode file = tryMakeLockHandle
|
||||
(P.tryTakeLock P.lockPool file LockExclusive)
|
||||
(fmap mk <$> F.tryLockExclusive mode file)
|
||||
tryLockExclusive mode file = tryMakeLockHandle P.lockPool file
|
||||
(\p f -> P.tryTakeLock p f LockExclusive)
|
||||
(\f -> fmap mk <$> F.tryLockExclusive mode f)
|
||||
|
||||
-- Returns Nothing when the file doesn't exist, for cases where
|
||||
-- that is different from it not being locked.
|
||||
|
|
|
@ -15,8 +15,12 @@ module Utility.LockPool.STM (
|
|||
tryTakeLock,
|
||||
getLockStatus,
|
||||
releaseLock,
|
||||
CloseLockFile,
|
||||
registerCloseLockFile,
|
||||
) where
|
||||
|
||||
import Utility.Monad
|
||||
|
||||
import System.IO.Unsafe (unsafePerformIO)
|
||||
import qualified Data.Map.Strict as M
|
||||
import Control.Concurrent.STM
|
||||
|
@ -36,7 +40,9 @@ type LockHandle = TMVar (LockPool, LockFile)
|
|||
|
||||
type LockCount = Integer
|
||||
|
||||
data LockStatus = LockStatus LockMode LockCount
|
||||
data LockStatus = LockStatus LockMode LockCount CloseLockFile
|
||||
|
||||
type CloseLockFile = IO ()
|
||||
|
||||
-- This TMVar is normally kept full.
|
||||
type LockPool = TMVar (M.Map LockFile LockStatus)
|
||||
|
@ -59,11 +65,11 @@ waitTakeLock :: LockPool -> LockFile -> LockMode -> STM LockHandle
|
|||
waitTakeLock pool file mode = do
|
||||
m <- takeTMVar pool
|
||||
v <- case M.lookup file m of
|
||||
Just (LockStatus mode' n)
|
||||
Just (LockStatus mode' n closelockfile)
|
||||
| mode == LockShared && mode' == LockShared ->
|
||||
return $ LockStatus mode (succ n)
|
||||
return $ LockStatus mode (succ n) closelockfile
|
||||
| n > 0 -> retry -- wait for lock
|
||||
_ -> return $ LockStatus mode 1
|
||||
_ -> return $ LockStatus mode 1 noop
|
||||
putTMVar pool (M.insert file v m)
|
||||
newTMVar (pool, file)
|
||||
|
||||
|
@ -74,6 +80,16 @@ tryTakeLock pool file mode =
|
|||
`orElse`
|
||||
return Nothing
|
||||
|
||||
-- Call after waitTakeLock or tryTakeLock, to register a CloseLockFile
|
||||
-- action to run when releasing the lock.
|
||||
registerCloseLockFile :: LockPool -> LockFile -> CloseLockFile -> STM ()
|
||||
registerCloseLockFile pool file closelockfile = do
|
||||
m <- takeTMVar pool
|
||||
putTMVar pool (M.update go file m)
|
||||
where
|
||||
go (LockStatus mode n closelockfile') = Just $
|
||||
LockStatus mode n (closelockfile' >> closelockfile)
|
||||
|
||||
-- Checks if a lock is being held. If it's held by the current process,
|
||||
-- runs the getdefault action; otherwise runs the checker action.
|
||||
--
|
||||
|
@ -87,7 +103,7 @@ getLockStatus pool file getdefault checker = do
|
|||
v <- atomically $ do
|
||||
m <- takeTMVar pool
|
||||
let threadlocked = case M.lookup file m of
|
||||
Just (LockStatus _ n) | n > 0 -> True
|
||||
Just (LockStatus _ n _) | n > 0 -> True
|
||||
_ -> False
|
||||
if threadlocked
|
||||
then do
|
||||
|
@ -99,25 +115,24 @@ getLockStatus pool file getdefault checker = do
|
|||
Just restore -> bracket_ (return ()) restore checker
|
||||
|
||||
-- Only runs action to close underlying lock file when this is the last
|
||||
-- user of the lock, and when the handle has not already been closed.
|
||||
-- user of the lock, and when the lock has not already been closed.
|
||||
--
|
||||
-- Note that the lock pool is left empty while the closelockfile action
|
||||
-- Note that the lock pool is left empty while the CloseLockFile action
|
||||
-- is run, to avoid race with another thread trying to open the same lock
|
||||
-- file.
|
||||
releaseLock :: LockHandle -> IO () -> IO ()
|
||||
releaseLock h closelockfile = go =<< atomically (tryTakeTMVar h)
|
||||
releaseLock :: LockHandle -> IO ()
|
||||
releaseLock h = go =<< atomically (tryTakeTMVar h)
|
||||
where
|
||||
go (Just (pool, file)) = do
|
||||
(m, unused) <- atomically $ do
|
||||
(m, closelockfile) <- atomically $ do
|
||||
m <- takeTMVar pool
|
||||
return $ case M.lookup file m of
|
||||
Just (LockStatus mode n)
|
||||
| n == 1 -> (M.delete file m, True)
|
||||
Just (LockStatus mode n closelockfile)
|
||||
| n == 1 -> (M.delete file m, closelockfile)
|
||||
| otherwise ->
|
||||
(M.insert file (LockStatus mode (pred n)) m, False)
|
||||
Nothing -> (m, True)
|
||||
when unused
|
||||
closelockfile
|
||||
(M.insert file (LockStatus mode (pred n) closelockfile) m, noop)
|
||||
Nothing -> (m, noop)
|
||||
closelockfile
|
||||
atomically $ putTMVar pool m
|
||||
-- The LockHandle was already closed.
|
||||
go Nothing = return ()
|
||||
|
|
|
@ -22,9 +22,9 @@ import Utility.LockPool.STM (LockFile, LockMode(..))
|
|||
{- Tries to lock a file with a shared lock, which allows other processes to
|
||||
- also lock it shared. Fails if the file is exclusively locked. -}
|
||||
lockShared :: LockFile -> IO (Maybe LockHandle)
|
||||
lockShared file = tryMakeLockHandle
|
||||
(P.tryTakeLock P.lockPool file LockShared)
|
||||
(fmap mk <$> F.lockShared file)
|
||||
lockShared file = tryMakeLockHandle P.lockPool file
|
||||
(\p f -> P.tryTakeLock p f LockShared)
|
||||
(\f -> fmap mk <$> F.lockShared f)
|
||||
|
||||
{- Tries to take an exclusive lock on a file. Fails if another process has
|
||||
- a shared or exclusive lock.
|
||||
|
@ -33,9 +33,9 @@ lockShared file = tryMakeLockHandle
|
|||
- read or write by any other process. So for advisory locking of a file's
|
||||
- content, a separate LockFile should be used. -}
|
||||
lockExclusive :: LockFile -> IO (Maybe LockHandle)
|
||||
lockExclusive file = tryMakeLockHandle
|
||||
(P.tryTakeLock P.lockPool file LockExclusive)
|
||||
(fmap mk <$> F.lockExclusive file)
|
||||
lockExclusive file = tryMakeLockHandle P.lockPool file
|
||||
(\p f -> P.tryTakeLock f LockExclusive)
|
||||
(\f -> fmap mk <$> F.lockExclusive f)
|
||||
|
||||
{- If the initial lock fails, this is a BUSY wait, and does not
|
||||
- guarentee FIFO order of waiters. In other news, Windows is a POS. -}
|
||||
|
|
5
debian/changelog
vendored
5
debian/changelog
vendored
|
@ -1,6 +1,11 @@
|
|||
git-annex (6.20160230) UNRELEASED; urgency=medium
|
||||
|
||||
* metadata: Added -r to remove all current values of a field.
|
||||
* Fix data loss that can occur when annex.pidlock is set in a repository.
|
||||
* Fix bug preventing moving files to/from a repository with annex.pidlock set.
|
||||
* Fix shared lock file FD leak.
|
||||
* Fix metadata hook behavior when multiple files are added at once.
|
||||
Thanks, Klaus Ethgen.
|
||||
|
||||
-- Joey Hess <id@joeyh.name> Mon, 29 Feb 2016 13:00:30 -0400
|
||||
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
### Please describe the problem.
|
||||
massive repo, max cpu using
|
||||
|
||||
git annex add .
|
||||
|
||||
had to interrupt the job as it was processing 1 small file per 5 seconds after about 3h run.
|
||||
|
||||
I am running it on the root of a large (currently 1TB) exFAT-based drive used for archiving
|
||||
|
||||
The repo grew to 28G.
|
||||
|
||||
Is this a regular issue with exFAT? I've done quite a bit of searching. I'll do more.
|
||||
|
||||
### What steps will reproduce the problem?
|
||||
- install on El Capitan (latest) via homebrew
|
||||
- create 1TB exFAT file store
|
||||
- follow walk through to setup annex locally and on external
|
||||
- add
|
||||
|
||||
### What version of git-annex are you using? On what operating system?
|
||||
git-annex version: 6.20160126
|
||||
build flags: Assistant Webapp Pairing Testsuite S3(multipartupload)(storageclasses) WebDAV FsEvents XMPP ConcurrentOutput TorrentParser Feeds Quvi
|
||||
key/value backends: SHA256E SHA256 SHA512E SHA512 SHA224E SHA224 SHA384E SHA384 SHA3_256E SHA3_256 SHA3_512E SHA3_512 SHA3_224E SHA3_224 SHA3_384E SHA3_384 SKEIN256E SKEIN256 SKEIN512E SKEIN512 SHA1E SHA1 MD5E MD5 WORM URL
|
||||
remote types: git gcrypt S3 bup directory rsync web bittorrent webdav tahoe glacier ddar hook external
|
||||
|
||||
El Capitan 10.11.3
|
||||
|
||||
|
||||
### Please provide any additional information below.
|
||||
|
||||
[[!format sh """
|
||||
# If you can, paste a complete transcript of the problem occurring here.
|
||||
# If the problem is with the git-annex assistant, paste in .git/annex/daemon.log
|
||||
|
||||
|
||||
# End of transcript or log.
|
||||
"""]]
|
||||
|
||||
### Have you had any luck using git-annex before? (Sometimes we get tired of reading bug reports all day and a lil' positive end note does wonders)
|
||||
I'd love to say I have. You'll hear my shout of joy when I do.
|
|
@ -94,5 +94,3 @@ ok
|
|||
|
||||
|
||||
"""]]
|
||||
|
||||
> provisionally [[done]]. --[[Joey]]
|
||||
|
|
|
@ -0,0 +1,24 @@
|
|||
[[!comment format=mdwn
|
||||
username="bvaa"
|
||||
subject="similar problem"
|
||||
date="2016-03-01T08:12:27Z"
|
||||
content="""
|
||||
I have a similar problem on Windows 7 64bit trying to add files that are around 5GB in size. I tried repository version 5 and 6 with same results.
|
||||
|
||||
```
|
||||
$ git annex add bigfile
|
||||
add bigfile ok
|
||||
(recording state in git...)
|
||||
|
||||
$ git annex status
|
||||
fatal: Cannot handle files this big
|
||||
```
|
||||
git-annex version: 6.20160229-g37a89cc
|
||||
build flags: Assistant Webapp Pairing Testsuite S3(multipartupload) WebDAV ConcurrentOutput TorrentParser Feeds Quvi
|
||||
key/value backends: SHA256E SHA256 SHA512E SHA512 SHA224E SHA224 SHA384E SHA384 SHA3_256E SHA3_256 SHA3_512E SHA3_512 SHA3_224E SHA3_224 SHA3_384E SHA3_384 SKEIN256E SKEIN256 SKEIN512E SKEIN512 SHA1E SHA1 MD5E MD5 WORM URL
|
||||
remote types: git gcrypt S3 bup directory rsync web bittorrent webdav tahoe glacier ddar hook external
|
||||
local repository version: 5
|
||||
supported repository versions: 5 6
|
||||
upgrade supported from repository versions: 2 3 4 5
|
||||
|
||||
"""]]
|
|
@ -0,0 +1,20 @@
|
|||
[[!comment format=mdwn
|
||||
username="joey"
|
||||
subject="""comment 3"""
|
||||
date="2016-03-01T14:41:45Z"
|
||||
content="""
|
||||
git (not git-annex) will throw this error if a file size is greater than
|
||||
`size_t`.
|
||||
|
||||
This bug report seemed to originally concern git add being run on such a
|
||||
file, but I can't see how git-annex would do that, it doesn't add large
|
||||
files to git.
|
||||
|
||||
I think that in the case of git-annex status, when it runs git status, that
|
||||
looks at work tree files, and so falls over if they're large, even if
|
||||
what's checked into git is a nice small git-annex symlink. This would also
|
||||
probably affect other places where git looks at worktree files, perhaps git
|
||||
diff (in v6 repo mode).
|
||||
|
||||
Reopening bug report.
|
||||
"""]]
|
|
@ -0,0 +1,31 @@
|
|||
### Please describe the problem.
|
||||
|
||||
Ideally annex should detect all "paranormal" cases such as running on NFS mounted partition, but according to [https://git-annex.branchable.com/bugs/huge_multiple_copies_of___39__.nfs__42____39___and___39__.panfs__42____39___being_created/](https://git-annex.branchable.com/bugs/huge_multiple_copies_of___39__.nfs__42____39___and___39__.panfs__42____39___being_created/). Happily ignorant we were running annex (5.20151116-g76139a9) on NFS mounted partition until we filled up 2TB of allocated to us space with .nfs* files. Well -- apparently according to above we should have tried pidlock... trying now but doesn't work :-/
|
||||
|
||||
[[!format sh """
|
||||
*$> git clone smaug:/tmp/123 123-clone && cd 123-clone && git config annex.pidlock true && echo 124 > 124.dat && git annex add 124.dat && git commit -m 'added 124' && git annex move --to=origin 124.dat
|
||||
Initialized empty Git repository in /home/yhalchen/123-clone/.git/
|
||||
remote: Counting objects: 22, done.
|
||||
remote: Compressing objects: 100% (16/16), done.
|
||||
remote: Total 22 (delta 3), reused 0 (delta 0)
|
||||
Receiving objects: 100% (22/22), done.
|
||||
Resolving deltas: 100% (3/3), done.
|
||||
total 1
|
||||
1 123.dat@ 1 README.txt
|
||||
(merging origin/git-annex into git-annex...)
|
||||
(recording state in git...)
|
||||
add 124.dat ok
|
||||
(recording state in git...)
|
||||
[master 0f1092a] added 124
|
||||
1 files changed, 1 insertions(+), 0 deletions(-)
|
||||
create mode 120000 124.dat
|
||||
move 124.dat (checking origin...) git-annex: content is locked
|
||||
|
||||
$> echo $?
|
||||
1
|
||||
|
||||
"""]]
|
||||
|
||||
BTW running move in our old now somewhat screwed up annex, results in a differently expressed error: [http://www.onerussian.com/tmp/2016-02-29.png](http://www.onerussian.com/tmp/2016-02-29.png)
|
||||
|
||||
[[!meta author=yoh]]
|
|
@ -0,0 +1,20 @@
|
|||
[[!comment format=mdwn
|
||||
username="joey"
|
||||
subject="""comment 10"""
|
||||
date="2016-03-01T20:52:38Z"
|
||||
content="""
|
||||
2456732 openat(AT_FDCWD, ".git/annex/ssh/", O_RDONLY|O_NONBLOCK|O_DIRECTORY|O_CLOEXEC) = -1 ENOENT (No such file or directory)
|
||||
2456732 mkdir(".git/annex/ssh", 0777) = 0
|
||||
2456732 open(".git/annex/ssh/smaug.lock", O_RDONLY|O_CREAT, 0666) = 11
|
||||
2456732 fcntl(11, F_GETFD) = 0
|
||||
2456732 fcntl(11, F_SETFD, FD_CLOEXEC) = 0
|
||||
2456732 close(11) = 0
|
||||
|
||||
Backs up what I thought git-annex should be doing; it's not fcntl locking that file.
|
||||
|
||||
Ah, I'll bet it's not git-annex at all this time.
|
||||
It runs ssh with -S .git/annex/ssh/smaug, and ssh probably
|
||||
does its own locking around setting up that control socket.
|
||||
|
||||
If so, disabling annex.sshcaching will avoid the problem.
|
||||
"""]]
|
|
@ -0,0 +1,7 @@
|
|||
[[!comment format=mdwn
|
||||
username="https://me.yahoo.com/a/EbvxpTI_xP9Aod7Mg4cwGhgjrCrdM5s-#7c0f4"
|
||||
subject="comment 11"
|
||||
date="2016-03-01T22:40:44Z"
|
||||
content="""
|
||||
would then may be annex not to use sshcaching if operating under pidlock, unless some nfs specific flag is used to tease it apart
|
||||
"""]]
|
|
@ -0,0 +1,7 @@
|
|||
[[!comment format=mdwn
|
||||
username="joey"
|
||||
subject="""comment 1"""
|
||||
date="2016-03-01T14:36:25Z"
|
||||
content="""
|
||||
FYI, I think you could remove the .nfs files to free up space.
|
||||
"""]]
|
|
@ -0,0 +1,10 @@
|
|||
[[!comment format=mdwn
|
||||
username="joey"
|
||||
subject="""comment 2"""
|
||||
date="2016-03-01T15:35:48Z"
|
||||
content="""
|
||||
Oddly, I cannot reproduce this, although I can reproduce the behavior in
|
||||
<http://git-annex.branchable.com/bugs/thread_blocked_indefinitely_in_an_STM_transaction__while_moving_within__a_local_clone/>a
|
||||
|
||||
(smaug:/tmp/123 has permissions that do not let me access it.)
|
||||
"""]]
|
|
@ -0,0 +1,9 @@
|
|||
[[!comment format=mdwn
|
||||
username="joey"
|
||||
subject="""comment 3"""
|
||||
date="2016-03-01T16:52:16Z"
|
||||
content="""
|
||||
I've fixed the STM transaction bug. Need either more info to reproduce this
|
||||
bug, or you could test and see if it still occurs when git-annex is
|
||||
upgraded to ad888a6b760e8f9d31f8d99c51912bcdaa7fb0c1
|
||||
"""]]
|
|
@ -0,0 +1,9 @@
|
|||
[[!comment format=mdwn
|
||||
username="https://me.yahoo.com/a/EbvxpTI_xP9Aod7Mg4cwGhgjrCrdM5s-#7c0f4"
|
||||
subject="more info"
|
||||
date="2016-03-01T17:31:29Z"
|
||||
content="""
|
||||
If we could remove those .nfs* files, it would indeed be not that bad but we can't
|
||||
|
||||
smaug:/tmp/123 -- sorry about permissions but it is a regular annex nothing special, so the bug should show itself with other repos as well I think. I gave you access to it now and also there is /tmp/123.tar.gz archive of it just in case.
|
||||
"""]]
|
|
@ -0,0 +1,31 @@
|
|||
[[!comment format=mdwn
|
||||
username="https://me.yahoo.com/a/EbvxpTI_xP9Aod7Mg4cwGhgjrCrdM5s-#7c0f4"
|
||||
subject="recent snapshot seems has fixed it"
|
||||
date="2016-03-01T18:52:27Z"
|
||||
content="""
|
||||
[[!format sh \"\"\"
|
||||
$> git clone smaug:/tmp/123 123-clone && cd 123-clone && git config annex.pidlock true && echo 124 > 124.dat && git annex add 124.dat && git commit -m 'added 124' && git annex move --to=origin 124.dat
|
||||
Cloning into '123-clone'...
|
||||
remote: Counting objects: 22, done.
|
||||
remote: Compressing objects: 100% (16/16), done.
|
||||
remote: Total 22 (delta 3), reused 0 (delta 0)
|
||||
Receiving objects: 100% (22/22), done.
|
||||
Resolving deltas: 100% (3/3), done.
|
||||
Checking connectivity... done.
|
||||
total 1
|
||||
1 123.dat@ 1 README.txt
|
||||
(merging origin/git-annex into git-annex...)
|
||||
(recording state in git...)
|
||||
add 124.dat ok
|
||||
(recording state in git...)
|
||||
[master 6eca577] added 124
|
||||
1 file changed, 1 insertion(+)
|
||||
create mode 120000 124.dat
|
||||
move 124.dat (checking origin...) ok
|
||||
(recording state in git...)
|
||||
|
||||
$> git annex version
|
||||
git-annex version: 6.20160301+gitg647fffd-1~ndall+1
|
||||
|
||||
\"\"\"]]
|
||||
"""]]
|
|
@ -0,0 +1,7 @@
|
|||
[[!comment format=mdwn
|
||||
username="https://me.yahoo.com/a/EbvxpTI_xP9Aod7Mg4cwGhgjrCrdM5s-#7c0f4"
|
||||
subject="comment 6"
|
||||
date="2016-03-01T18:54:15Z"
|
||||
content="""
|
||||
but then I found ./.git/annex/ssh/.nfs000000000000f41600003608.lock left behind (removable, luckily to me ;) )
|
||||
"""]]
|
|
@ -0,0 +1,7 @@
|
|||
[[!comment format=mdwn
|
||||
username="https://me.yahoo.com/a/EbvxpTI_xP9Aod7Mg4cwGhgjrCrdM5s-#7c0f4"
|
||||
subject="comment 7"
|
||||
date="2016-03-01T18:58:20Z"
|
||||
content="""
|
||||
and those are breeding with next subsequent --move
|
||||
"""]]
|
|
@ -0,0 +1,20 @@
|
|||
[[!comment format=mdwn
|
||||
username="joey"
|
||||
subject="""comment 8"""
|
||||
date="2016-03-01T20:17:35Z"
|
||||
content="""
|
||||
That ssh lock file is created by this code:
|
||||
|
||||
-- The posix lock file is created even when using pid locks, in order to
|
||||
-- avoid complicating any code that might expect to be able to see that
|
||||
-- lock file. But, it's not locked.
|
||||
dummyPosixLock :: Maybe FileMode -> LockFile -> IO ()
|
||||
dummyPosixLock m f = closeFd =<< openLockFile ReadLock m f
|
||||
|
||||
But, that does not ever actually take a lock on the file, so
|
||||
NFS should not make its .nfs thing in this case. Unless NFS does it when a
|
||||
FD is simply opened with close-on-exec set.
|
||||
|
||||
Can you get a strace of the creation of files under .git/annex/ssh/
|
||||
that result in these .nfs things?
|
||||
"""]]
|
|
@ -0,0 +1,7 @@
|
|||
[[!comment format=mdwn
|
||||
username="https://me.yahoo.com/a/EbvxpTI_xP9Aod7Mg4cwGhgjrCrdM5s-#7c0f4"
|
||||
subject="comment 9"
|
||||
date="2016-03-01T20:43:17Z"
|
||||
content="""
|
||||
ok -- see on smaug /mnt/nfs/scrap/datalad/test_nfs/123-clone-move.strace . Now you can experiment there as well -- the entire /mnt/btrfs/scrap is mounted also via nfs (under /mnt/nfs/scrap)
|
||||
"""]]
|
37
doc/bugs/git-annex_confuses_Git_with_nested_submodules.mdwn
Normal file
37
doc/bugs/git-annex_confuses_Git_with_nested_submodules.mdwn
Normal file
|
@ -0,0 +1,37 @@
|
|||
### Please describe the problem.
|
||||
The way git-annex deals with submodules (replacing the .git file in the submodule, with a link to the corresponding gitdir of the submodule) seems to confuse Git when creating another submodule in an annex-init'ed submodule.
|
||||
|
||||
### What steps will reproduce the problem?
|
||||
% mkdir some ; cd some; git init
|
||||
Initialized empty Git repository in /tmp/some/.git/
|
||||
% git submodule add /src/somegitrepo sub_lvl1
|
||||
Cloning into 'sub_lvl1'...
|
||||
done.
|
||||
% cd sub_lvl1
|
||||
% git annex init
|
||||
init (merging origin/git-annex into git-annex...)
|
||||
(recording state in git...)
|
||||
ok
|
||||
(recording state in git...)
|
||||
% git submodule add /src/somegitrepo sub_lvl2
|
||||
Cloning into 'sub_lvl2'...
|
||||
done.
|
||||
fatal: Could not chdir to '../../../sub_lvl2': No such file or directory
|
||||
Unable to checkout submodule 'sub_lvl2'
|
||||
|
||||
### What version of git-annex are you using? On what operating system?
|
||||
% apt-cache policy git-annex-standalone
|
||||
git-annex-standalone:
|
||||
Installed: 6.20160213+gitg9597a21-1~ndall+1
|
||||
|
||||
Debian stretch, git-annex from NeuroDebian.
|
||||
|
||||
### Have you had any luck using git-annex before? (Sometimes we get tired of reading bug reports all day and a lil' positive end note does wonders)
|
||||
|
||||
Yes, lots! Using it for some of its original use cases for more than five years now -- I was actually surprised to learn, just now, that the oldest commit in my music repository is exactly 5 years and 6 days old. Thanks for longevity and reliability!
|
||||
|
||||
More recently I aim exploring the use of git annex for managing datasets and their dependencies, i.e. going from raw to some processed state over multiple levels, where each level is a useful starting point for some analysis, and each previous level is a dependency (input) to the next. With just one level above "raw" this has massively improved collaboration workflows in student/teacher settings for me. Deeper nesting levels would allow for even more interesting applications, but see above ;-) I think Git seems needlessly confused, but I don't fully grasp what is happening yet. I'd appreciate any insight you may have. Although it is Git that shows the undesired behavior, it seems it is git-annex that ultimately confused it. Hence I came here first.
|
||||
|
||||
BTW: What a nice idea to ask for something like this in a bug report.
|
||||
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
[[!comment format=mdwn
|
||||
username="joey"
|
||||
subject="""comment 1"""
|
||||
date="2016-03-01T20:25:13Z"
|
||||
content="""
|
||||
Reproduced this.
|
||||
|
||||
This really does feel like a git bug. git is supposed to treat "gitlink"
|
||||
files and .git symlinks the same. While modern versions of git set up
|
||||
gitlink files for submodules, older versions of git used .git symlinks, and
|
||||
git should still support that.
|
||||
|
||||
Looks like the problem can be worked around, by setting
|
||||
`GIT_DIR`. In your example, `GIT_DIR=../.git/modules/sub_lvl1/ git
|
||||
submodule add /src/somegitrepo sub_lvl2`
|
||||
"""]]
|
|
@ -0,0 +1,17 @@
|
|||
[[!comment format=mdwn
|
||||
username="joey"
|
||||
subject="""comment 2"""
|
||||
date="2016-03-01T20:36:43Z"
|
||||
content="""
|
||||
Here's a more minimal test case, not involving git-annex at all:
|
||||
|
||||
git init gitdir
|
||||
mkdir worktree
|
||||
cd worktree
|
||||
ln -s ../gitdir/.git .git
|
||||
git submodule add /any/git/repo sub
|
||||
|
||||
fatal: Could not chdir to '../../../sub': No such file or directory
|
||||
|
||||
I have forwarded that test case to the git ML.
|
||||
"""]]
|
|
@ -0,0 +1,29 @@
|
|||
[[!comment format=mdwn
|
||||
username="joey"
|
||||
subject="""comment 3"""
|
||||
date="2016-03-02T16:48:24Z"
|
||||
content="""
|
||||
[git bug report](http://news.gmane.org/find-root.php?message_id=20160301204218.GA4083%40kitenet.net)
|
||||
|
||||
So far, the git devs admit this is a problem, but don't seem too keen on fixing
|
||||
it, even though it breaks backwards compatability with repositories git
|
||||
submodule add created (circa 2012).
|
||||
|
||||
It might be that git-annex init could work around git's bugginess by,
|
||||
instead of making submodule/.git a symlink to ../.git/modules/dir, making
|
||||
submodule/.git be the git directory, and converting ../.git/modules/dir
|
||||
to a symlink. In very limited testing, that setup seems to work.
|
||||
|
||||
I don't know if all the submodule stuff would work, perhaps it would break moving
|
||||
submodules etc. And, since git likes to chdir around (not the best idea),
|
||||
if it expected to be able to chdir from .git/modules to dir and chdir .. to
|
||||
get back, changing that to a symlink would defeat it.
|
||||
|
||||
BTW, I found another way, unrelated to git-annex or symlinks at all,
|
||||
that git submodule add's broken path handling makes it fall over with
|
||||
nested submodules.
|
||||
<http://news.gmane.org/find-root.php?message_id=20160302165240.GA17654%40kitenet.net>.
|
||||
|
||||
(It's almost like myrepos was a better idea than this submodule stuff, or
|
||||
something...)
|
||||
""]]
|
|
@ -0,0 +1,9 @@
|
|||
[[!comment format=mdwn
|
||||
username="mih"
|
||||
subject="Thanks"
|
||||
date="2016-03-02T19:30:49Z"
|
||||
content="""
|
||||
Thanks for investigating this further.
|
||||
|
||||
One aspect that may make switching the location of the .git directory into the worktree of the submodule less desirable is this: With the actual .git in ../.git/modules/... one can easily rm -rf the submodule, deinit it, and re-init/update from the (still present) ../.git/modules/... at a later point in time. Especially, when a submodule is a more complicated beast (e.g. with multiple configured remotes) the required steps to regenerate the same setup get more complex.
|
||||
"""]]
|
|
@ -0,0 +1,40 @@
|
|||
relates to having pidlock true
|
||||
|
||||
[[!format sh """
|
||||
$> mkdir 123; cd 123; git init; git annex init; git config annex.pidlock true && echo "123" > 123.dat; git annex add 123.dat; git commit -m 'added';
|
||||
W: git-annex repositories not (yet) supported in the prompt
|
||||
Initialized empty Git repository in /tmp/123/.git/
|
||||
init ok
|
||||
(recording state in git...)
|
||||
add 123.dat ok
|
||||
(recording state in git...)
|
||||
[master (root-commit) 9449f1b] added
|
||||
1 file changed, 1 insertion(+)
|
||||
create mode 120000 123.dat
|
||||
|
||||
$> git clone . ../123-clone && git remote add clone ../123-clone && git fetch clone && cd ../123-clone && git config annex.pidlock true && cd - && git annex move --to=clone .
|
||||
Cloning into '../123-clone'...
|
||||
done.
|
||||
From ../123-clone
|
||||
* [new branch] master -> clone/master
|
||||
move 123.dat git-annex: thread blocked indefinitely in an STM transaction
|
||||
|
||||
$> echo $?
|
||||
1
|
||||
|
||||
$> git annex version
|
||||
git-annex version: 6.20160226+gitg01f1de0-1~ndall+1
|
||||
build flags: Assistant Webapp Pairing Testsuite S3(multipartupload) WebDAV Inotify DBus DesktopNotify XMPP ConcurrentOutput TorrentParser MagicMime Feeds Quvi
|
||||
key/value backends: SHA256E SHA256 SHA512E SHA512 SHA224E SHA224 SHA384E SHA384 SHA3_256E SHA3_256 SHA3_512E SHA3_512 SHA3_224E SHA3_224 SHA3_384E SHA3_384 SKEIN256E SKEIN256 SKEIN512E SKEIN512 SHA1E SHA1 MD5E MD5 WORM URL
|
||||
remote types: git gcrypt S3 bup directory rsync web bittorrent webdav tahoe glacier ddar hook external
|
||||
local repository version: 5
|
||||
supported repository versions: 5 6
|
||||
upgrade supported from repository versions: 0 1 2 4 5
|
||||
|
||||
"""]]
|
||||
|
||||
and it works ok without pidlock enabled
|
||||
|
||||
[[!meta author=yoh]]
|
||||
|
||||
> [[fixed|done]] --[[Joey]]
|
|
@ -0,0 +1,8 @@
|
|||
[[!comment format=mdwn
|
||||
username="joey"
|
||||
subject="""comment 1"""
|
||||
date="2016-03-01T15:40:12Z"
|
||||
content="""
|
||||
I can reproduce this. But, when I change the origin remote to use ssh, it
|
||||
works around the problem.
|
||||
"""]]
|
|
@ -0,0 +1,11 @@
|
|||
[[!comment format=mdwn
|
||||
username="joey"
|
||||
subject="""comment 2"""
|
||||
date="2016-03-01T16:11:37Z"
|
||||
content="""
|
||||
A worse problem with annex.pidlock is that it completly broke checking
|
||||
whether a key is present in the repository. That could lead to data loss
|
||||
when eg, moving --to a repo with annex.pidlock set.
|
||||
|
||||
I've fixed that related bug.
|
||||
"""]]
|
|
@ -0,0 +1,17 @@
|
|||
[[!comment format=mdwn
|
||||
username="joey"
|
||||
subject="""comment 3"""
|
||||
date="2016-03-01T16:21:31Z"
|
||||
content="""
|
||||
Analysis: What's crashing is Utility.LockPool.PidLock.waitLock after a call
|
||||
to Utility.LockPool.PidLock.tryLock. The former takes an exclusive STM lock
|
||||
of the pid lock file; the latter takes a shared STM lock.
|
||||
|
||||
Since the pid lock stands in for multiple more fine-grained locks, waitLock
|
||||
will be called while a lock from tryLock (or a previous waitLock perhaps)
|
||||
is still open.
|
||||
|
||||
The fix seems as simple as making waitLock take a shared STM lock of the
|
||||
pid lock file, leaving the exclusive lock for the later, more fine-grained
|
||||
STM lock checking that's done after taking the pid lock.
|
||||
"""]]
|
9
doc/devblog/day_368__leap.mdwn
Normal file
9
doc/devblog/day_368__leap.mdwn
Normal file
|
@ -0,0 +1,9 @@
|
|||
Pushed out a release today, could not resist the leap day in the version
|
||||
number, and also there were enough bug fixes accumulated to make it worth
|
||||
doing.
|
||||
|
||||
I now have `git-annex sync` working inside adjusted branches, so pulls
|
||||
get adjusted appropriately before being merged into the adjusted branch.
|
||||
Seems to mostly work well, I did just find one bug in it though. Only
|
||||
propigating adjusted commits remains to be done to finish my adjusted
|
||||
branches prototype.
|
27
doc/forum/How_to_shrink_transfer_repo__63__.mdwn
Normal file
27
doc/forum/How_to_shrink_transfer_repo__63__.mdwn
Normal file
|
@ -0,0 +1,27 @@
|
|||
Hello,
|
||||
|
||||
I have two repositories (Asaru and Horus) that are both ```group=client``` and ```wanted=standard```. The other one, Astarte is ```group=transfer``` and ```wanted=standard```. Pretty standard I think.
|
||||
|
||||
```
|
||||
repository mode: direct
|
||||
trusted repositories: 0
|
||||
semitrusted repositories: 5
|
||||
00000000-0000-0000-0000-000000000001 -- web
|
||||
00000000-0000-0000-0000-000000000002 -- bittorrent
|
||||
58001764-966d-4076-ae99-4ef6de25df39 -- Asaru [here]
|
||||
8165bdf1-907e-4bbe-9c35-22fbf6f8cb00 -- Astarte [astarte]
|
||||
cca0c3c8-593a-4395-936c-1093f0f762e8 -- Horus
|
||||
untrusted repositories: 0
|
||||
```
|
||||
|
||||
I always sync on the two client repos like that ```git annex add . && git annex sync --content```. The transfer repo is growing larger and larger. ```git annex dropunused N``` says, that it ```could only verify the existence of 0 out of 1 necessary copies```.
|
||||
|
||||
What is the best way to clean up the transfer repo?
|
||||
|
||||
1. Make the two client repos trusted? The three repos have been created manually, not through the assistant. Is that what the assistant does, too?
|
||||
2. Try to get the two client repos into touch with each other and try to use ```dropunsed --from=astarte```?
|
||||
|
||||
What is the recommended way for that?
|
||||
|
||||
Thanks,
|
||||
Florian
|
|
@ -0,0 +1,8 @@
|
|||
[[!comment format=mdwn
|
||||
username="grawity@2ea26be48562f66fcb9b66307da72b1e2e37453f"
|
||||
nickname="grawity"
|
||||
subject="comment 2"
|
||||
date="2016-02-29T17:25:17Z"
|
||||
content="""
|
||||
Hmm, I still think that avoiding duplicating uuids would be smarter behavior, but the host symlinks will do just fine. Thanks for the suggestion.
|
||||
"""]]
|
3
doc/forum/Undo_git_merge_git-annex.mdwn
Normal file
3
doc/forum/Undo_git_merge_git-annex.mdwn
Normal file
|
@ -0,0 +1,3 @@
|
|||
After accidentally typing git merge git-annex, I am now wondering how to clean up the resulting chaos...
|
||||
|
||||
Any tips?
|
|
@ -1,4 +1,4 @@
|
|||
#! /bin/sh
|
||||
#!/bin/sh
|
||||
#
|
||||
# Copyright (C) 2014 Joey Hess <id@joeyh.name>
|
||||
# Copyright (C) 2016 Klaus Ethgen <Klaus@Ethgen.ch>
|
||||
|
@ -112,7 +112,7 @@ if [ -n "$*" ]; then
|
|||
process "$f"
|
||||
done
|
||||
else
|
||||
for f in "$(git diff-index --name-only --cached $against)"; do
|
||||
git diff-index --name-only --cached $against | while read f; do
|
||||
process "$f"
|
||||
done
|
||||
fi
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
[[!comment format=mdwn
|
||||
username="grawity@2ea26be48562f66fcb9b66307da72b1e2e37453f"
|
||||
nickname="grawity"
|
||||
subject="comment 2"
|
||||
date="2016-03-01T07:10:55Z"
|
||||
content="""
|
||||
Thanks, but you missed my point entirely... I wasn't asking for a mode that would delete data without checking. I was asking for the complete opposite – a mode that would _inject an extra copy_ of the data without checking.
|
||||
|
||||
Yeah, I guess I could `annex add` the files, then un-annex them, and _then_ `annex import --clean-duplicates`, but that's a somewhat long-winded approach, needing twice the space and twice the time.
|
||||
|
||||
(...speaking of losing data, it seems that `git annex reinject` is perfectly happy to delete files if I accidentally give it the wrong target. I.e. after failing content verification, it still throws away the source.)
|
||||
|
||||
---
|
||||
|
||||
It doesn't have to be part of git-annex; I could _script_ this feature myself, though there aren't nearly enough plumbing commands either. (For example, a command to hash a file and give its key (like `git hash-object`), or a command to find all paths for a key.)
|
||||
|
||||
Having an equivalent of `git hash-object -w` (inject an arbitrary object) would make it even easier, but I couldn't find anything like that either.
|
||||
|
||||
---
|
||||
|
||||
Anyway, let's cancel this todo, I'll find other ways.
|
||||
"""]]
|
Loading…
Add table
Add a link
Reference in a new issue