2010-11-02 23:04:24 +00:00
|
|
|
{- git-annex command
|
|
|
|
-
|
2015-01-21 16:50:09 +00:00
|
|
|
- Copyright 2010, 2013 Joey Hess <id@joeyh.name>
|
2010-11-02 23:04:24 +00:00
|
|
|
-
|
|
|
|
- Licensed under the GNU GPL version 3 or higher.
|
|
|
|
-}
|
|
|
|
|
2013-02-10 19:48:38 +00:00
|
|
|
{-# LANGUAGE CPP #-}
|
|
|
|
|
2010-11-02 23:04:24 +00:00
|
|
|
module Command.Add where
|
|
|
|
|
2011-10-05 20:02:51 +00:00
|
|
|
import Common.Annex
|
2010-11-02 23:04:24 +00:00
|
|
|
import Command
|
2012-06-20 20:07:14 +00:00
|
|
|
import Types.KeySource
|
2012-06-05 23:51:03 +00:00
|
|
|
import Backend
|
2011-10-15 20:21:08 +00:00
|
|
|
import Logs.Location
|
2011-10-04 04:40:47 +00:00
|
|
|
import Annex.Content
|
2012-12-24 17:37:29 +00:00
|
|
|
import Annex.Content.Direct
|
2012-06-06 00:28:34 +00:00
|
|
|
import Annex.Perms
|
fully support core.symlinks=false in all relevant symlink handling code
Refactored annex link code into nice clean new library.
Audited and dealt with calls to createSymbolicLink.
Remaining calls are all safe, because:
Annex/Link.hs: ( liftIO $ createSymbolicLink linktarget file
only when core.symlinks=true
Assistant/WebApp/Configurators/Local.hs: createSymbolicLink link link
test if symlinks can be made
Command/Fix.hs: liftIO $ createSymbolicLink link file
command only works in indirect mode
Command/FromKey.hs: liftIO $ createSymbolicLink link file
command only works in indirect mode
Command/Indirect.hs: liftIO $ createSymbolicLink l f
refuses to run if core.symlinks=false
Init.hs: createSymbolicLink f f2
test if symlinks can be made
Remote/Directory.hs: go [file] = catchBoolIO $ createSymbolicLink file f >> return True
fast key linking; catches failure to make symlink and falls back to copy
Remote/Git.hs: liftIO $ catchBoolIO $ createSymbolicLink loc file >> return True
ditto
Upgrade/V1.hs: liftIO $ createSymbolicLink link f
v1 repos could not be on a filesystem w/o symlinks
Audited and dealt with calls to readSymbolicLink.
Remaining calls are all safe, because:
Annex/Link.hs: ( liftIO $ catchMaybeIO $ readSymbolicLink file
only when core.symlinks=true
Assistant/Threads/Watcher.hs: ifM ((==) (Just link) <$> liftIO (catchMaybeIO $ readSymbolicLink file))
code that fixes real symlinks when inotify sees them
It's ok to not fix psdueo-symlinks.
Assistant/Threads/Watcher.hs: mlink <- liftIO (catchMaybeIO $ readSymbolicLink file)
ditto
Command/Fix.hs: stopUnless ((/=) (Just link) <$> liftIO (catchMaybeIO $ readSymbolicLink file)) $ do
command only works in indirect mode
Upgrade/V1.hs: getsymlink = takeFileName <$> readSymbolicLink file
v1 repos could not be on a filesystem w/o symlinks
Audited and dealt with calls to isSymbolicLink.
(Typically used with getSymbolicLinkStatus, but that is just used because
getFileStatus is not as robust; it also works on pseudolinks.)
Remaining calls are all safe, because:
Assistant/Threads/SanityChecker.hs: | isSymbolicLink s -> addsymlink file ms
only handles staging of symlinks that were somehow not staged
(might need to be updated to support pseudolinks, but this is
only a belt-and-suspenders check anyway, and I've never seen the code run)
Command/Add.hs: if isSymbolicLink s || not (isRegularFile s)
avoids adding symlinks to the annex, so not relevant
Command/Indirect.hs: | isSymbolicLink s -> void $ flip whenAnnexed f $
only allowed on systems that support symlinks
Command/Indirect.hs: whenM (liftIO $ not . isSymbolicLink <$> getSymbolicLinkStatus f) $ do
ditto
Seek.hs:notSymlink f = liftIO $ not . isSymbolicLink <$> getSymbolicLinkStatus f
used to find unlocked files, only relevant in indirect mode
Utility/FSEvents.hs: | Files.isSymbolicLink s = runhook addSymlinkHook $ Just s
Utility/FSEvents.hs: | Files.isSymbolicLink s ->
Utility/INotify.hs: | Files.isSymbolicLink s ->
Utility/INotify.hs: checkfiletype Files.isSymbolicLink addSymlinkHook f
Utility/Kqueue.hs: | Files.isSymbolicLink s = callhook addSymlinkHook (Just s) change
all above are lower-level, not relevant
Audited and dealt with calls to isSymLink.
Remaining calls are all safe, because:
Annex/Direct.hs: | isSymLink (getmode item) =
This is looking at git diff-tree objects, not files on disk
Command/Unused.hs: | isSymLink (LsTree.mode l) = do
This is looking at git ls-tree, not file on disk
Utility/FileMode.hs:isSymLink :: FileMode -> Bool
Utility/FileMode.hs:isSymLink = checkMode symbolicLinkMode
low-level
Done!!
2013-02-17 19:05:55 +00:00
|
|
|
import Annex.Link
|
2014-02-23 04:08:29 +00:00
|
|
|
import Annex.MetaData
|
fully support core.symlinks=false in all relevant symlink handling code
Refactored annex link code into nice clean new library.
Audited and dealt with calls to createSymbolicLink.
Remaining calls are all safe, because:
Annex/Link.hs: ( liftIO $ createSymbolicLink linktarget file
only when core.symlinks=true
Assistant/WebApp/Configurators/Local.hs: createSymbolicLink link link
test if symlinks can be made
Command/Fix.hs: liftIO $ createSymbolicLink link file
command only works in indirect mode
Command/FromKey.hs: liftIO $ createSymbolicLink link file
command only works in indirect mode
Command/Indirect.hs: liftIO $ createSymbolicLink l f
refuses to run if core.symlinks=false
Init.hs: createSymbolicLink f f2
test if symlinks can be made
Remote/Directory.hs: go [file] = catchBoolIO $ createSymbolicLink file f >> return True
fast key linking; catches failure to make symlink and falls back to copy
Remote/Git.hs: liftIO $ catchBoolIO $ createSymbolicLink loc file >> return True
ditto
Upgrade/V1.hs: liftIO $ createSymbolicLink link f
v1 repos could not be on a filesystem w/o symlinks
Audited and dealt with calls to readSymbolicLink.
Remaining calls are all safe, because:
Annex/Link.hs: ( liftIO $ catchMaybeIO $ readSymbolicLink file
only when core.symlinks=true
Assistant/Threads/Watcher.hs: ifM ((==) (Just link) <$> liftIO (catchMaybeIO $ readSymbolicLink file))
code that fixes real symlinks when inotify sees them
It's ok to not fix psdueo-symlinks.
Assistant/Threads/Watcher.hs: mlink <- liftIO (catchMaybeIO $ readSymbolicLink file)
ditto
Command/Fix.hs: stopUnless ((/=) (Just link) <$> liftIO (catchMaybeIO $ readSymbolicLink file)) $ do
command only works in indirect mode
Upgrade/V1.hs: getsymlink = takeFileName <$> readSymbolicLink file
v1 repos could not be on a filesystem w/o symlinks
Audited and dealt with calls to isSymbolicLink.
(Typically used with getSymbolicLinkStatus, but that is just used because
getFileStatus is not as robust; it also works on pseudolinks.)
Remaining calls are all safe, because:
Assistant/Threads/SanityChecker.hs: | isSymbolicLink s -> addsymlink file ms
only handles staging of symlinks that were somehow not staged
(might need to be updated to support pseudolinks, but this is
only a belt-and-suspenders check anyway, and I've never seen the code run)
Command/Add.hs: if isSymbolicLink s || not (isRegularFile s)
avoids adding symlinks to the annex, so not relevant
Command/Indirect.hs: | isSymbolicLink s -> void $ flip whenAnnexed f $
only allowed on systems that support symlinks
Command/Indirect.hs: whenM (liftIO $ not . isSymbolicLink <$> getSymbolicLinkStatus f) $ do
ditto
Seek.hs:notSymlink f = liftIO $ not . isSymbolicLink <$> getSymbolicLinkStatus f
used to find unlocked files, only relevant in indirect mode
Utility/FSEvents.hs: | Files.isSymbolicLink s = runhook addSymlinkHook $ Just s
Utility/FSEvents.hs: | Files.isSymbolicLink s ->
Utility/INotify.hs: | Files.isSymbolicLink s ->
Utility/INotify.hs: checkfiletype Files.isSymbolicLink addSymlinkHook f
Utility/Kqueue.hs: | Files.isSymbolicLink s = callhook addSymlinkHook (Just s) change
all above are lower-level, not relevant
Audited and dealt with calls to isSymLink.
Remaining calls are all safe, because:
Annex/Direct.hs: | isSymLink (getmode item) =
This is looking at git diff-tree objects, not files on disk
Command/Unused.hs: | isSymLink (LsTree.mode l) = do
This is looking at git ls-tree, not file on disk
Utility/FileMode.hs:isSymLink :: FileMode -> Bool
Utility/FileMode.hs:isSymLink = checkMode symbolicLinkMode
low-level
Done!!
2013-02-17 19:05:55 +00:00
|
|
|
import qualified Annex
|
|
|
|
import qualified Annex.Queue
|
2013-11-13 01:05:04 +00:00
|
|
|
#ifdef WITH_CLIBS
|
2013-02-27 06:39:22 +00:00
|
|
|
#ifndef __ANDROID__
|
2011-08-20 20:11:42 +00:00
|
|
|
import Utility.Touch
|
2013-02-10 19:48:38 +00:00
|
|
|
#endif
|
2013-11-13 01:05:04 +00:00
|
|
|
#endif
|
2012-12-24 17:37:29 +00:00
|
|
|
import Config
|
2013-02-14 20:54:36 +00:00
|
|
|
import Utility.InodeCache
|
2013-03-29 20:17:13 +00:00
|
|
|
import Annex.FileMatcher
|
2013-05-17 19:59:37 +00:00
|
|
|
import Annex.ReplaceFile
|
Fix a few bugs involving filenames that are at or near the filesystem's maximum filename length limit.
Started with a problem when running addurl on a really long url,
because the whole url is munged into the filename. Ended up doing
a fairly extensive review for places where filenames could get too large,
although it's hard to say I'm not missed any..
Backend.Url had a 128 character limit, which is fine when the limit is 255,
but not if it's a lot shorter on some systems. So check the pathconf()
limit. Note that this could result in fromUrl creating different keys
for the same url, if run on systems with different limits. I don't see
this is likely to cause any problems. That can already happen when using
addurl --fast, or if the content of an url changes.
Both Command.AddUrl and Backend.Url assumed that urls don't contain a
lot of multi-byte unicode, and would fail to truncate an url that did
properly.
A few places use a filename as the template to make a temp file.
While that's nice in that the temp file name can be easily related back to
the original filename, it could lead to `git annex add` failing to add a
filename that was at or close to the maximum length.
Note that in Command.Add.lockdown, the template is still derived from the
filename, just with enough space left to turn it into a temp file.
This is an important optimisation, because the assistant may lock down
a bunch of files all at once, and using the same template for all of them
would cause openTempFile to iterate through the same set of names,
looking for an unused temp file. I'm not very happy with the relatedTemplate
hack, but it avoids that slowdown.
Backend.WORM does not limit the filename stored in the key.
I have not tried to change that; so git annex add will fail on really long
filenames when using the WORM backend. It seems better to preserve the
invariant that a WORM key always contains the complete filename, since
the filename is the only unique material in the key, other than mtime and
size. Since nobody has complained about add failing (I think I saw it
once?) on WORM, probably it's ok, or nobody but me uses it.
There may be compatability problems if using git annex addurl --fast
or the WORM backend on a system with the 255 limit and then trying to use
that repo in a system with a smaller limit. I have not tried to deal with
those.
This commit was sponsored by Alexander Brem. Thanks!
2013-07-30 21:49:11 +00:00
|
|
|
import Utility.Tmp
|
2010-11-02 23:04:24 +00:00
|
|
|
|
unify exception handling into Utility.Exception
Removed old extensible-exceptions, only needed for very old ghc.
Made webdav use Utility.Exception, to work after some changes in DAV's
exception handling.
Removed Annex.Exception. Mostly this was trivial, but note that
tryAnnex is replaced with tryNonAsync and catchAnnex replaced with
catchNonAsync. In theory that could be a behavior change, since the former
caught all exceptions, and the latter don't catch async exceptions.
However, in practice, nothing in the Annex monad uses async exceptions.
Grepping for throwTo and killThread only find stuff in the assistant,
which does not seem related.
Command.Add.undo is changed to accept a SomeException, and things
that use it for rollback now catch non-async exceptions, rather than
only IOExceptions.
2014-08-08 01:55:44 +00:00
|
|
|
import Control.Exception (IOException)
|
|
|
|
|
2014-10-14 18:20:10 +00:00
|
|
|
cmd :: [Command]
|
2015-02-06 21:08:14 +00:00
|
|
|
cmd = [notBareRepo $ withOptions addOptions $
|
|
|
|
command "add" paramPaths seek SectionCommon "add files to annex"]
|
|
|
|
|
|
|
|
addOptions :: [Option]
|
|
|
|
addOptions = includeDotFilesOption : fileMatchingOptions
|
2014-03-26 18:52:07 +00:00
|
|
|
|
|
|
|
includeDotFilesOption :: Option
|
|
|
|
includeDotFilesOption = flagOption [] "include-dotfiles" "don't skip dotfiles"
|
2010-12-30 18:19:16 +00:00
|
|
|
|
2013-02-20 18:12:55 +00:00
|
|
|
{- Add acts on both files not checked into git yet, and unlocked files.
|
|
|
|
-
|
|
|
|
- In direct mode, it acts on any files that have changed. -}
|
fix inversion of control in CommandSeek (no behavior changes)
I've been disliking how the command seek actions were written for some
time, with their inversion of control and ugly workarounds.
The last straw to fix it was sync --content, which didn't fit the
Annex [CommandStart] interface well at all. I have not yet made it take
advantage of the changed interface though.
The crucial change, and probably why I didn't do it this way from the
beginning, is to make each CommandStart action be run with exceptions
caught, and if it fails, increment a failure counter in annex state.
So I finally remove the very first code I wrote for git-annex, which
was before I had exception handling in the Annex monad, and so ran outside
that monad, passing state explicitly as it ran each CommandStart action.
This was a real slog from 1 to 5 am.
Test suite passes.
Memory usage is lower than before, sometimes by a couple of megabytes, and
remains constant, even when running in a large repo, and even when
repeatedly failing and incrementing the error counter. So no accidental
laziness space leaks.
Wall clock speed is identical, even in large repos.
This commit was sponsored by an anonymous bitcoiner.
2014-01-20 08:11:42 +00:00
|
|
|
seek :: CommandSeek
|
|
|
|
seek ps = do
|
|
|
|
matcher <- largeFilesMatcher
|
|
|
|
let go a = flip a ps $ \file -> ifM (checkFileMatcher matcher file <||> Annex.getState Annex.force)
|
|
|
|
( start file
|
2015-04-08 20:14:23 +00:00
|
|
|
, startSmall file
|
fix inversion of control in CommandSeek (no behavior changes)
I've been disliking how the command seek actions were written for some
time, with their inversion of control and ugly workarounds.
The last straw to fix it was sync --content, which didn't fit the
Annex [CommandStart] interface well at all. I have not yet made it take
advantage of the changed interface though.
The crucial change, and probably why I didn't do it this way from the
beginning, is to make each CommandStart action be run with exceptions
caught, and if it fails, increment a failure counter in annex state.
So I finally remove the very first code I wrote for git-annex, which
was before I had exception handling in the Annex monad, and so ran outside
that monad, passing state explicitly as it ran each CommandStart action.
This was a real slog from 1 to 5 am.
Test suite passes.
Memory usage is lower than before, sometimes by a couple of megabytes, and
remains constant, even when running in a large repo, and even when
repeatedly failing and incrementing the error counter. So no accidental
laziness space leaks.
Wall clock speed is identical, even in large repos.
This commit was sponsored by an anonymous bitcoiner.
2014-01-20 08:11:42 +00:00
|
|
|
)
|
2014-03-26 18:52:07 +00:00
|
|
|
skipdotfiles <- not <$> Annex.getFlag (optionName includeDotFilesOption)
|
|
|
|
go $ withFilesNotInGit skipdotfiles
|
fix inversion of control in CommandSeek (no behavior changes)
I've been disliking how the command seek actions were written for some
time, with their inversion of control and ugly workarounds.
The last straw to fix it was sync --content, which didn't fit the
Annex [CommandStart] interface well at all. I have not yet made it take
advantage of the changed interface though.
The crucial change, and probably why I didn't do it this way from the
beginning, is to make each CommandStart action be run with exceptions
caught, and if it fails, increment a failure counter in annex state.
So I finally remove the very first code I wrote for git-annex, which
was before I had exception handling in the Annex monad, and so ran outside
that monad, passing state explicitly as it ran each CommandStart action.
This was a real slog from 1 to 5 am.
Test suite passes.
Memory usage is lower than before, sometimes by a couple of megabytes, and
remains constant, even when running in a large repo, and even when
repeatedly failing and incrementing the error counter. So no accidental
laziness space leaks.
Wall clock speed is identical, even in large repos.
This commit was sponsored by an anonymous bitcoiner.
2014-01-20 08:11:42 +00:00
|
|
|
ifM isDirect
|
|
|
|
( go withFilesMaybeModified
|
|
|
|
, go withFilesUnlocked
|
|
|
|
)
|
2010-11-11 22:54:52 +00:00
|
|
|
|
2015-04-08 20:14:23 +00:00
|
|
|
{- Pass file off to git-add. -}
|
|
|
|
startSmall :: FilePath -> CommandStart
|
|
|
|
startSmall file = do
|
|
|
|
showStart "add" file
|
2015-04-08 20:16:42 +00:00
|
|
|
showNote "non-large file; adding content to git repository"
|
2015-04-08 20:14:23 +00:00
|
|
|
next $ do
|
|
|
|
params <- forceParams
|
|
|
|
Annex.Queue.addCommand "add" (params++[Param "--"]) [file]
|
|
|
|
next $ return True
|
|
|
|
|
2012-12-19 16:50:24 +00:00
|
|
|
{- The add subcommand annexes a file, generating a key for it using a
|
|
|
|
- backend, and then moving it into the annex directory and setting up
|
|
|
|
- the symlink pointing to its content. -}
|
2012-02-14 03:42:44 +00:00
|
|
|
start :: FilePath -> CommandStart
|
2013-02-20 17:37:46 +00:00
|
|
|
start file = ifAnnexed file addpresent add
|
2012-11-12 05:05:04 +00:00
|
|
|
where
|
|
|
|
add = do
|
2013-04-23 21:22:56 +00:00
|
|
|
ms <- liftIO $ catchMaybeIO $ getSymbolicLinkStatus file
|
|
|
|
case ms of
|
|
|
|
Nothing -> stop
|
|
|
|
Just s
|
|
|
|
| isSymbolicLink s || not (isRegularFile s) -> stop
|
|
|
|
| otherwise -> do
|
|
|
|
showStart "add" file
|
|
|
|
next $ perform file
|
2014-04-17 22:03:39 +00:00
|
|
|
addpresent key = ifM isDirect
|
2014-09-18 18:24:38 +00:00
|
|
|
( do
|
|
|
|
ms <- liftIO $ catchMaybeIO $ getSymbolicLinkStatus file
|
|
|
|
case ms of
|
|
|
|
Just s | isSymbolicLink s -> fixup key
|
|
|
|
_ -> ifM (goodContent key file) ( stop , add )
|
2013-02-20 17:37:46 +00:00
|
|
|
, fixup key
|
|
|
|
)
|
|
|
|
fixup key = do
|
2014-09-18 18:24:38 +00:00
|
|
|
-- the annexed symlink is present but not yet added to git
|
2012-11-12 05:05:04 +00:00
|
|
|
showStart "add" file
|
|
|
|
liftIO $ removeFile file
|
2014-09-18 18:24:38 +00:00
|
|
|
whenM isDirect $
|
|
|
|
void $ addAssociatedFile key file
|
2013-09-25 20:07:11 +00:00
|
|
|
next $ next $ cleanup file key Nothing =<< inAnnex key
|
2010-11-02 23:04:24 +00:00
|
|
|
|
2012-06-06 00:28:34 +00:00
|
|
|
{- The file that's being added is locked down before a key is generated,
|
2013-06-12 18:02:31 +00:00
|
|
|
- to prevent it from being modified in between. This lock down is not
|
|
|
|
- perfect at best (and pretty weak at worst). For example, it does not
|
|
|
|
- guard against files that are already opened for write by another process.
|
|
|
|
- So a KeySource is returned. Its inodeCache can be used to detect any
|
|
|
|
- changes that might be made to the file after it was locked down.
|
2013-06-10 17:10:30 +00:00
|
|
|
-
|
2013-06-12 18:02:31 +00:00
|
|
|
- When possible, the file is hard linked to a temp directory. This guards
|
|
|
|
- against some changes, like deletion or overwrite of the file, and
|
|
|
|
- allows lsof checks to be done more efficiently when adding a lot of files.
|
2013-01-14 19:02:13 +00:00
|
|
|
-
|
|
|
|
- Lockdown can fail if a file gets deleted, and Nothing will be returned.
|
|
|
|
-}
|
|
|
|
lockDown :: FilePath -> Annex (Maybe KeySource)
|
2015-04-30 19:28:17 +00:00
|
|
|
lockDown = either
|
|
|
|
(\e -> warning (show e) >> return Nothing)
|
|
|
|
(return . Just)
|
|
|
|
<=< lockDown'
|
2014-03-20 01:08:46 +00:00
|
|
|
|
|
|
|
lockDown' :: FilePath -> Annex (Either IOException KeySource)
|
|
|
|
lockDown' file = ifM crippledFileSystem
|
fix for Windows file timestamp timezone madness
On Windows, changing the time zone causes the apparent mtime of files to
change. This confuses git-annex, which natually thinks this means the files
have actually been modified (since THAT'S WHAT A MTIME IS FOR, BILL <sheesh>).
Work around this stupidity, by using the inode sentinal file to detect if
the timezone has changed, and calculate a TSDelta, which will be applied
when generating InodeCaches.
This should add no overhead at all on unix. Indeed, I sped up a few
things slightly in the refactoring.
Seems to basically work! But it has a big known problem:
If the timezone changes while the assistant (or a long-running command)
runs, it won't notice, since it only checks the inode cache once, and
so will use the old delta for all new inode caches it generates for new
files it's added. Which will result in them seeming changed the next time
it runs.
This commit was sponsored by Vincent Demeester.
2014-06-11 21:51:12 +00:00
|
|
|
( withTSDelta $ liftIO . tryIO . nohardlink
|
unify exception handling into Utility.Exception
Removed old extensible-exceptions, only needed for very old ghc.
Made webdav use Utility.Exception, to work after some changes in DAV's
exception handling.
Removed Annex.Exception. Mostly this was trivial, but note that
tryAnnex is replaced with tryNonAsync and catchAnnex replaced with
catchNonAsync. In theory that could be a behavior change, since the former
caught all exceptions, and the latter don't catch async exceptions.
However, in practice, nothing in the Annex monad uses async exceptions.
Grepping for throwTo and killThread only find stuff in the assistant,
which does not seem related.
Command.Add.undo is changed to accept a SomeException, and things
that use it for rollback now catch non-async exceptions, rather than
only IOExceptions.
2014-08-08 01:55:44 +00:00
|
|
|
, tryIO $ do
|
2014-02-26 20:52:56 +00:00
|
|
|
tmp <- fromRepo gitAnnexTmpMiscDir
|
2013-02-14 18:10:36 +00:00
|
|
|
createAnnexDirectory tmp
|
2014-03-20 01:08:46 +00:00
|
|
|
go tmp
|
2013-11-07 19:18:54 +00:00
|
|
|
)
|
|
|
|
where
|
|
|
|
{- In indirect mode, the write bit is removed from the file as part
|
|
|
|
- of lock down to guard against further writes, and because objects
|
|
|
|
- in the annex have their write bit disabled anyway.
|
|
|
|
-
|
|
|
|
- Freezing the content early also lets us fail early when
|
|
|
|
- someone else owns the file.
|
|
|
|
-
|
|
|
|
- This is not done in direct mode, because files there need to
|
|
|
|
- remain writable at all times.
|
|
|
|
-}
|
2014-10-09 18:53:13 +00:00
|
|
|
go tmp = do
|
2013-11-07 19:18:54 +00:00
|
|
|
unlessM isDirect $
|
|
|
|
freezeContent file
|
fix for Windows file timestamp timezone madness
On Windows, changing the time zone causes the apparent mtime of files to
change. This confuses git-annex, which natually thinks this means the files
have actually been modified (since THAT'S WHAT A MTIME IS FOR, BILL <sheesh>).
Work around this stupidity, by using the inode sentinal file to detect if
the timezone has changed, and calculate a TSDelta, which will be applied
when generating InodeCaches.
This should add no overhead at all on unix. Indeed, I sped up a few
things slightly in the refactoring.
Seems to basically work! But it has a big known problem:
If the timezone changes while the assistant (or a long-running command)
runs, it won't notice, since it only checks the inode cache once, and
so will use the old delta for all new inode caches it generates for new
files it's added. Which will result in them seeming changed the next time
it runs.
This commit was sponsored by Vincent Demeester.
2014-06-11 21:51:12 +00:00
|
|
|
withTSDelta $ \delta -> liftIO $ do
|
Fix a few bugs involving filenames that are at or near the filesystem's maximum filename length limit.
Started with a problem when running addurl on a really long url,
because the whole url is munged into the filename. Ended up doing
a fairly extensive review for places where filenames could get too large,
although it's hard to say I'm not missed any..
Backend.Url had a 128 character limit, which is fine when the limit is 255,
but not if it's a lot shorter on some systems. So check the pathconf()
limit. Note that this could result in fromUrl creating different keys
for the same url, if run on systems with different limits. I don't see
this is likely to cause any problems. That can already happen when using
addurl --fast, or if the content of an url changes.
Both Command.AddUrl and Backend.Url assumed that urls don't contain a
lot of multi-byte unicode, and would fail to truncate an url that did
properly.
A few places use a filename as the template to make a temp file.
While that's nice in that the temp file name can be easily related back to
the original filename, it could lead to `git annex add` failing to add a
filename that was at or close to the maximum length.
Note that in Command.Add.lockdown, the template is still derived from the
filename, just with enough space left to turn it into a temp file.
This is an important optimisation, because the assistant may lock down
a bunch of files all at once, and using the same template for all of them
would cause openTempFile to iterate through the same set of names,
looking for an unused temp file. I'm not very happy with the relatedTemplate
hack, but it avoids that slowdown.
Backend.WORM does not limit the filename stored in the key.
I have not tried to change that; so git annex add will fail on really long
filenames when using the WORM backend. It seems better to preserve the
invariant that a WORM key always contains the complete filename, since
the filename is the only unique material in the key, other than mtime and
size. Since nobody has complained about add failing (I think I saw it
once?) on WORM, probably it's ok, or nobody but me uses it.
There may be compatability problems if using git annex addurl --fast
or the WORM backend on a system with the 255 limit and then trying to use
that repo in a system with a smaller limit. I have not tried to deal with
those.
This commit was sponsored by Alexander Brem. Thanks!
2013-07-30 21:49:11 +00:00
|
|
|
(tmpfile, h) <- openTempFile tmp $
|
|
|
|
relatedTemplate $ takeFileName file
|
2013-02-14 18:10:36 +00:00
|
|
|
hClose h
|
|
|
|
nukeFile tmpfile
|
fix for Windows file timestamp timezone madness
On Windows, changing the time zone causes the apparent mtime of files to
change. This confuses git-annex, which natually thinks this means the files
have actually been modified (since THAT'S WHAT A MTIME IS FOR, BILL <sheesh>).
Work around this stupidity, by using the inode sentinal file to detect if
the timezone has changed, and calculate a TSDelta, which will be applied
when generating InodeCaches.
This should add no overhead at all on unix. Indeed, I sped up a few
things slightly in the refactoring.
Seems to basically work! But it has a big known problem:
If the timezone changes while the assistant (or a long-running command)
runs, it won't notice, since it only checks the inode cache once, and
so will use the old delta for all new inode caches it generates for new
files it's added. Which will result in them seeming changed the next time
it runs.
This commit was sponsored by Vincent Demeester.
2014-06-11 21:51:12 +00:00
|
|
|
withhardlink delta tmpfile `catchIO` const (nohardlink delta)
|
2014-10-09 18:53:13 +00:00
|
|
|
nohardlink delta = do
|
fix for Windows file timestamp timezone madness
On Windows, changing the time zone causes the apparent mtime of files to
change. This confuses git-annex, which natually thinks this means the files
have actually been modified (since THAT'S WHAT A MTIME IS FOR, BILL <sheesh>).
Work around this stupidity, by using the inode sentinal file to detect if
the timezone has changed, and calculate a TSDelta, which will be applied
when generating InodeCaches.
This should add no overhead at all on unix. Indeed, I sped up a few
things slightly in the refactoring.
Seems to basically work! But it has a big known problem:
If the timezone changes while the assistant (or a long-running command)
runs, it won't notice, since it only checks the inode cache once, and
so will use the old delta for all new inode caches it generates for new
files it's added. Which will result in them seeming changed the next time
it runs.
This commit was sponsored by Vincent Demeester.
2014-06-11 21:51:12 +00:00
|
|
|
cache <- genInodeCache file delta
|
2013-09-25 07:09:06 +00:00
|
|
|
return KeySource
|
2013-06-10 17:10:30 +00:00
|
|
|
{ keyFilename = file
|
|
|
|
, contentLocation = file
|
|
|
|
, inodeCache = cache
|
|
|
|
}
|
fix for Windows file timestamp timezone madness
On Windows, changing the time zone causes the apparent mtime of files to
change. This confuses git-annex, which natually thinks this means the files
have actually been modified (since THAT'S WHAT A MTIME IS FOR, BILL <sheesh>).
Work around this stupidity, by using the inode sentinal file to detect if
the timezone has changed, and calculate a TSDelta, which will be applied
when generating InodeCaches.
This should add no overhead at all on unix. Indeed, I sped up a few
things slightly in the refactoring.
Seems to basically work! But it has a big known problem:
If the timezone changes while the assistant (or a long-running command)
runs, it won't notice, since it only checks the inode cache once, and
so will use the old delta for all new inode caches it generates for new
files it's added. Which will result in them seeming changed the next time
it runs.
This commit was sponsored by Vincent Demeester.
2014-06-11 21:51:12 +00:00
|
|
|
withhardlink delta tmpfile = do
|
2013-06-10 17:10:30 +00:00
|
|
|
createLink file tmpfile
|
fix for Windows file timestamp timezone madness
On Windows, changing the time zone causes the apparent mtime of files to
change. This confuses git-annex, which natually thinks this means the files
have actually been modified (since THAT'S WHAT A MTIME IS FOR, BILL <sheesh>).
Work around this stupidity, by using the inode sentinal file to detect if
the timezone has changed, and calculate a TSDelta, which will be applied
when generating InodeCaches.
This should add no overhead at all on unix. Indeed, I sped up a few
things slightly in the refactoring.
Seems to basically work! But it has a big known problem:
If the timezone changes while the assistant (or a long-running command)
runs, it won't notice, since it only checks the inode cache once, and
so will use the old delta for all new inode caches it generates for new
files it's added. Which will result in them seeming changed the next time
it runs.
This commit was sponsored by Vincent Demeester.
2014-06-11 21:51:12 +00:00
|
|
|
cache <- genInodeCache tmpfile delta
|
2013-09-25 07:09:06 +00:00
|
|
|
return KeySource
|
2013-06-10 17:10:30 +00:00
|
|
|
{ keyFilename = file
|
|
|
|
, contentLocation = tmpfile
|
|
|
|
, inodeCache = cache
|
|
|
|
}
|
2012-06-06 17:07:30 +00:00
|
|
|
|
2013-02-14 18:10:36 +00:00
|
|
|
{- Ingests a locked down file into the annex.
|
2012-12-24 17:37:29 +00:00
|
|
|
-
|
2013-01-06 21:24:22 +00:00
|
|
|
- In direct mode, leaves the file alone, and just updates bookkeeping
|
2012-12-24 17:37:29 +00:00
|
|
|
- information.
|
|
|
|
-}
|
2013-09-25 20:07:11 +00:00
|
|
|
ingest :: Maybe KeySource -> Annex (Maybe Key, Maybe InodeCache)
|
|
|
|
ingest Nothing = return (Nothing, Nothing)
|
fix for Windows file timestamp timezone madness
On Windows, changing the time zone causes the apparent mtime of files to
change. This confuses git-annex, which natually thinks this means the files
have actually been modified (since THAT'S WHAT A MTIME IS FOR, BILL <sheesh>).
Work around this stupidity, by using the inode sentinal file to detect if
the timezone has changed, and calculate a TSDelta, which will be applied
when generating InodeCaches.
This should add no overhead at all on unix. Indeed, I sped up a few
things slightly in the refactoring.
Seems to basically work! But it has a big known problem:
If the timezone changes while the assistant (or a long-running command)
runs, it won't notice, since it only checks the inode cache once, and
so will use the old delta for all new inode caches it generates for new
files it's added. Which will result in them seeming changed the next time
it runs.
This commit was sponsored by Vincent Demeester.
2014-06-11 21:51:12 +00:00
|
|
|
ingest (Just source) = withTSDelta $ \delta -> do
|
2012-06-16 02:06:59 +00:00
|
|
|
backend <- chooseBackend $ keyFilename source
|
2013-02-14 20:54:36 +00:00
|
|
|
k <- genKey source backend
|
2015-01-20 23:35:50 +00:00
|
|
|
let src = contentLocation source
|
|
|
|
ms <- liftIO $ catchMaybeIO $ getFileStatus src
|
|
|
|
mcache <- maybe (pure Nothing) (liftIO . toInodeCache delta src) ms
|
2014-02-23 04:08:29 +00:00
|
|
|
case (mcache, inodeCache source) of
|
|
|
|
(_, Nothing) -> go k mcache ms
|
|
|
|
(Just newc, Just c) | compareStrong c newc -> go k mcache ms
|
2013-04-23 22:09:00 +00:00
|
|
|
_ -> failure "changed while it was being added"
|
2012-11-12 05:05:04 +00:00
|
|
|
where
|
2014-02-23 04:08:29 +00:00
|
|
|
go k mcache ms = ifM isDirect
|
|
|
|
( godirect k mcache ms
|
|
|
|
, goindirect k mcache ms
|
|
|
|
)
|
2013-02-14 20:54:36 +00:00
|
|
|
|
2014-02-23 04:08:29 +00:00
|
|
|
goindirect (Just (key, _)) mcache ms = do
|
unify exception handling into Utility.Exception
Removed old extensible-exceptions, only needed for very old ghc.
Made webdav use Utility.Exception, to work after some changes in DAV's
exception handling.
Removed Annex.Exception. Mostly this was trivial, but note that
tryAnnex is replaced with tryNonAsync and catchAnnex replaced with
catchNonAsync. In theory that could be a behavior change, since the former
caught all exceptions, and the latter don't catch async exceptions.
However, in practice, nothing in the Annex monad uses async exceptions.
Grepping for throwTo and killThread only find stuff in the assistant,
which does not seem related.
Command.Add.undo is changed to accept a SomeException, and things
that use it for rollback now catch non-async exceptions, rather than
only IOExceptions.
2014-08-08 01:55:44 +00:00
|
|
|
catchNonAsync (moveAnnex key $ contentLocation source)
|
Switch to MonadCatchIO-transformers for better handling of state while catching exceptions.
As seen in this bug report, the lifted exception handling using the StateT
monad throws away state changes when an action throws an exception.
http://git-annex.branchable.com/bugs/git_annex_fork_bombs_on_gpg_file/
.. Which can result in cached values being redundantly calculated, or other
possibly worse bugs when the annex state gets out of sync with reality.
This switches from a StateT AnnexState to a ReaderT (MVar AnnexState).
All changes to the state go via the MVar. So when an Annex action is
running inside an exception handler, and it makes some changes, they
immediately go into affect in the MVar. If it then throws an exception
(or even crashes its thread!), the state changes are still in effect.
The MonadCatchIO-transformers change is actually only incidental.
I could have kept on using lifted-base for the exception handling.
However, I'd have needed to write a new instance of MonadBaseControl
for the new monad.. and I didn't write the old instance.. I begged Bas
and he kindly sent it to me. Happily, MonadCatchIO-transformers is
able to derive a MonadCatchIO instance for my monad.
This is a deep level change. It passes the test suite! What could it break?
Well.. The most likely breakage would be to code that runs an Annex action
in an exception handler, and *wants* state changes to be thrown away.
Perhaps the state changes leaves the state inconsistent, or wrong. Since
there are relatively few places in git-annex that catch exceptions in the
Annex monad, and the AnnexState is generally just used to cache calculated
data, this is unlikely to be a problem.
Oh yeah, this change also makes Assistant.Types.ThreadedMonad a bit
redundant. It's now entirely possible to run concurrent Annex actions in
different threads, all sharing access to the same state! The ThreadedMonad
just adds some extra work on top of that, with its own MVar, and avoids
such actions possibly stepping on one-another's toes. I have not gotten
rid of it, but might try that later. Being able to run concurrent Annex
actions would simplify parts of the Assistant code.
2013-05-19 18:16:36 +00:00
|
|
|
(undo (keyFilename source) key)
|
2014-02-24 18:41:33 +00:00
|
|
|
maybe noop (genMetaData key (keyFilename source)) ms
|
2012-12-29 19:32:29 +00:00
|
|
|
liftIO $ nukeFile $ keyFilename source
|
2014-10-09 19:35:19 +00:00
|
|
|
return (Just key, mcache)
|
2014-02-23 04:08:29 +00:00
|
|
|
goindirect _ _ _ = failure "failed to generate a key"
|
2012-12-29 19:32:29 +00:00
|
|
|
|
2014-02-23 04:08:29 +00:00
|
|
|
godirect (Just (key, _)) (Just cache) ms = do
|
2013-04-06 20:01:39 +00:00
|
|
|
addInodeCache key cache
|
2014-02-24 18:41:33 +00:00
|
|
|
maybe noop (genMetaData key (keyFilename source)) ms
|
2013-03-11 18:14:45 +00:00
|
|
|
finishIngestDirect key source
|
2014-10-09 19:35:19 +00:00
|
|
|
return (Just key, Just cache)
|
2014-02-23 04:08:29 +00:00
|
|
|
godirect _ _ _ = failure "failed to generate a key"
|
2012-12-29 19:32:29 +00:00
|
|
|
|
2013-04-23 22:09:00 +00:00
|
|
|
failure msg = do
|
|
|
|
warning $ keyFilename source ++ " " ++ msg
|
2013-02-14 18:10:36 +00:00
|
|
|
when (contentLocation source /= keyFilename source) $
|
|
|
|
liftIO $ nukeFile $ contentLocation source
|
2013-09-25 20:07:11 +00:00
|
|
|
return (Nothing, Nothing)
|
2010-11-02 23:04:24 +00:00
|
|
|
|
2013-03-11 18:14:45 +00:00
|
|
|
finishIngestDirect :: Key -> KeySource -> Annex ()
|
|
|
|
finishIngestDirect key source = do
|
|
|
|
void $ addAssociatedFile key $ keyFilename source
|
|
|
|
when (contentLocation source /= keyFilename source) $
|
|
|
|
liftIO $ nukeFile $ contentLocation source
|
|
|
|
|
2013-05-17 19:59:37 +00:00
|
|
|
{- Copy to any other locations using the same key. -}
|
|
|
|
otherfs <- filter (/= keyFilename source) <$> associatedFiles key
|
|
|
|
forM_ otherfs $
|
|
|
|
addContentWhenNotPresent key (keyFilename source)
|
|
|
|
|
2012-06-06 17:07:30 +00:00
|
|
|
perform :: FilePath -> CommandPerform
|
2013-09-25 20:07:11 +00:00
|
|
|
perform file = lockDown file >>= ingest >>= go
|
|
|
|
where
|
2014-10-09 18:53:13 +00:00
|
|
|
go (Just key, cache) = next $ cleanup file key cache True
|
2013-09-25 20:07:11 +00:00
|
|
|
go (Nothing, _) = stop
|
2012-06-06 00:28:34 +00:00
|
|
|
|
2011-07-08 01:29:31 +00:00
|
|
|
{- On error, put the file back so it doesn't seem to have vanished.
|
|
|
|
- This can be called before or after the symlink is in place. -}
|
unify exception handling into Utility.Exception
Removed old extensible-exceptions, only needed for very old ghc.
Made webdav use Utility.Exception, to work after some changes in DAV's
exception handling.
Removed Annex.Exception. Mostly this was trivial, but note that
tryAnnex is replaced with tryNonAsync and catchAnnex replaced with
catchNonAsync. In theory that could be a behavior change, since the former
caught all exceptions, and the latter don't catch async exceptions.
However, in practice, nothing in the Annex monad uses async exceptions.
Grepping for throwTo and killThread only find stuff in the assistant,
which does not seem related.
Command.Add.undo is changed to accept a SomeException, and things
that use it for rollback now catch non-async exceptions, rather than
only IOExceptions.
2014-08-08 01:55:44 +00:00
|
|
|
undo :: FilePath -> Key -> SomeException -> Annex a
|
2011-07-08 01:29:31 +00:00
|
|
|
undo file key e = do
|
2012-03-06 18:12:15 +00:00
|
|
|
whenM (inAnnex key) $ do
|
2012-06-06 17:13:13 +00:00
|
|
|
liftIO $ nukeFile file
|
unify exception handling into Utility.Exception
Removed old extensible-exceptions, only needed for very old ghc.
Made webdav use Utility.Exception, to work after some changes in DAV's
exception handling.
Removed Annex.Exception. Mostly this was trivial, but note that
tryAnnex is replaced with tryNonAsync and catchAnnex replaced with
catchNonAsync. In theory that could be a behavior change, since the former
caught all exceptions, and the latter don't catch async exceptions.
However, in practice, nothing in the Annex monad uses async exceptions.
Grepping for throwTo and killThread only find stuff in the assistant,
which does not seem related.
Command.Add.undo is changed to accept a SomeException, and things
that use it for rollback now catch non-async exceptions, rather than
only IOExceptions.
2014-08-08 01:55:44 +00:00
|
|
|
catchNonAsync (fromAnnex key file) tryharder
|
2012-03-06 18:12:15 +00:00
|
|
|
logStatus key InfoMissing
|
unify exception handling into Utility.Exception
Removed old extensible-exceptions, only needed for very old ghc.
Made webdav use Utility.Exception, to work after some changes in DAV's
exception handling.
Removed Annex.Exception. Mostly this was trivial, but note that
tryAnnex is replaced with tryNonAsync and catchAnnex replaced with
catchNonAsync. In theory that could be a behavior change, since the former
caught all exceptions, and the latter don't catch async exceptions.
However, in practice, nothing in the Annex monad uses async exceptions.
Grepping for throwTo and killThread only find stuff in the assistant,
which does not seem related.
Command.Add.undo is changed to accept a SomeException, and things
that use it for rollback now catch non-async exceptions, rather than
only IOExceptions.
2014-08-08 01:55:44 +00:00
|
|
|
throwM e
|
2012-11-12 05:05:04 +00:00
|
|
|
where
|
|
|
|
-- fromAnnex could fail if the file ownership is weird
|
unify exception handling into Utility.Exception
Removed old extensible-exceptions, only needed for very old ghc.
Made webdav use Utility.Exception, to work after some changes in DAV's
exception handling.
Removed Annex.Exception. Mostly this was trivial, but note that
tryAnnex is replaced with tryNonAsync and catchAnnex replaced with
catchNonAsync. In theory that could be a behavior change, since the former
caught all exceptions, and the latter don't catch async exceptions.
However, in practice, nothing in the Annex monad uses async exceptions.
Grepping for throwTo and killThread only find stuff in the assistant,
which does not seem related.
Command.Add.undo is changed to accept a SomeException, and things
that use it for rollback now catch non-async exceptions, rather than
only IOExceptions.
2014-08-08 01:55:44 +00:00
|
|
|
tryharder :: SomeException -> Annex ()
|
2012-11-12 05:05:04 +00:00
|
|
|
tryharder _ = do
|
2013-04-04 19:46:33 +00:00
|
|
|
src <- calcRepo $ gitAnnexLocation key
|
2012-11-12 05:05:04 +00:00
|
|
|
liftIO $ moveFile src file
|
2011-07-07 23:29:36 +00:00
|
|
|
|
2012-06-19 06:40:21 +00:00
|
|
|
{- Creates the symlink to the annexed content, returns the link target. -}
|
2013-09-25 20:07:11 +00:00
|
|
|
link :: FilePath -> Key -> Maybe InodeCache -> Annex String
|
unify exception handling into Utility.Exception
Removed old extensible-exceptions, only needed for very old ghc.
Made webdav use Utility.Exception, to work after some changes in DAV's
exception handling.
Removed Annex.Exception. Mostly this was trivial, but note that
tryAnnex is replaced with tryNonAsync and catchAnnex replaced with
catchNonAsync. In theory that could be a behavior change, since the former
caught all exceptions, and the latter don't catch async exceptions.
However, in practice, nothing in the Annex monad uses async exceptions.
Grepping for throwTo and killThread only find stuff in the assistant,
which does not seem related.
Command.Add.undo is changed to accept a SomeException, and things
that use it for rollback now catch non-async exceptions, rather than
only IOExceptions.
2014-08-08 01:55:44 +00:00
|
|
|
link file key mcache = flip catchNonAsync (undo file key) $ do
|
2015-01-27 21:38:06 +00:00
|
|
|
l <- calcRepo $ gitAnnexLink file key
|
2013-04-02 17:13:42 +00:00
|
|
|
replaceFile file $ makeAnnexLink l
|
2011-08-06 18:57:22 +00:00
|
|
|
|
2013-09-25 20:07:11 +00:00
|
|
|
-- touch symlink to have same time as the original file,
|
|
|
|
-- as provided in the InodeCache
|
|
|
|
case mcache of
|
2014-02-25 18:09:39 +00:00
|
|
|
#if defined(WITH_CLIBS) && ! defined(__ANDROID__)
|
2013-09-25 20:07:11 +00:00
|
|
|
Just c -> liftIO $ touch file (TimeSpec $ inodeCacheToMtime c) False
|
2014-02-25 18:09:39 +00:00
|
|
|
#else
|
|
|
|
Just _ -> noop
|
2013-02-10 19:48:38 +00:00
|
|
|
#endif
|
2014-02-25 18:09:39 +00:00
|
|
|
Nothing -> noop
|
2011-03-15 03:00:23 +00:00
|
|
|
|
2012-06-19 06:40:21 +00:00
|
|
|
return l
|
|
|
|
|
2013-04-11 17:35:52 +00:00
|
|
|
{- Creates the symlink to the annexed content, and stages it in git.
|
|
|
|
-
|
|
|
|
- As long as the filesystem supports symlinks, we use
|
fully support core.symlinks=false in all relevant symlink handling code
Refactored annex link code into nice clean new library.
Audited and dealt with calls to createSymbolicLink.
Remaining calls are all safe, because:
Annex/Link.hs: ( liftIO $ createSymbolicLink linktarget file
only when core.symlinks=true
Assistant/WebApp/Configurators/Local.hs: createSymbolicLink link link
test if symlinks can be made
Command/Fix.hs: liftIO $ createSymbolicLink link file
command only works in indirect mode
Command/FromKey.hs: liftIO $ createSymbolicLink link file
command only works in indirect mode
Command/Indirect.hs: liftIO $ createSymbolicLink l f
refuses to run if core.symlinks=false
Init.hs: createSymbolicLink f f2
test if symlinks can be made
Remote/Directory.hs: go [file] = catchBoolIO $ createSymbolicLink file f >> return True
fast key linking; catches failure to make symlink and falls back to copy
Remote/Git.hs: liftIO $ catchBoolIO $ createSymbolicLink loc file >> return True
ditto
Upgrade/V1.hs: liftIO $ createSymbolicLink link f
v1 repos could not be on a filesystem w/o symlinks
Audited and dealt with calls to readSymbolicLink.
Remaining calls are all safe, because:
Annex/Link.hs: ( liftIO $ catchMaybeIO $ readSymbolicLink file
only when core.symlinks=true
Assistant/Threads/Watcher.hs: ifM ((==) (Just link) <$> liftIO (catchMaybeIO $ readSymbolicLink file))
code that fixes real symlinks when inotify sees them
It's ok to not fix psdueo-symlinks.
Assistant/Threads/Watcher.hs: mlink <- liftIO (catchMaybeIO $ readSymbolicLink file)
ditto
Command/Fix.hs: stopUnless ((/=) (Just link) <$> liftIO (catchMaybeIO $ readSymbolicLink file)) $ do
command only works in indirect mode
Upgrade/V1.hs: getsymlink = takeFileName <$> readSymbolicLink file
v1 repos could not be on a filesystem w/o symlinks
Audited and dealt with calls to isSymbolicLink.
(Typically used with getSymbolicLinkStatus, but that is just used because
getFileStatus is not as robust; it also works on pseudolinks.)
Remaining calls are all safe, because:
Assistant/Threads/SanityChecker.hs: | isSymbolicLink s -> addsymlink file ms
only handles staging of symlinks that were somehow not staged
(might need to be updated to support pseudolinks, but this is
only a belt-and-suspenders check anyway, and I've never seen the code run)
Command/Add.hs: if isSymbolicLink s || not (isRegularFile s)
avoids adding symlinks to the annex, so not relevant
Command/Indirect.hs: | isSymbolicLink s -> void $ flip whenAnnexed f $
only allowed on systems that support symlinks
Command/Indirect.hs: whenM (liftIO $ not . isSymbolicLink <$> getSymbolicLinkStatus f) $ do
ditto
Seek.hs:notSymlink f = liftIO $ not . isSymbolicLink <$> getSymbolicLinkStatus f
used to find unlocked files, only relevant in indirect mode
Utility/FSEvents.hs: | Files.isSymbolicLink s = runhook addSymlinkHook $ Just s
Utility/FSEvents.hs: | Files.isSymbolicLink s ->
Utility/INotify.hs: | Files.isSymbolicLink s ->
Utility/INotify.hs: checkfiletype Files.isSymbolicLink addSymlinkHook f
Utility/Kqueue.hs: | Files.isSymbolicLink s = callhook addSymlinkHook (Just s) change
all above are lower-level, not relevant
Audited and dealt with calls to isSymLink.
Remaining calls are all safe, because:
Annex/Direct.hs: | isSymLink (getmode item) =
This is looking at git diff-tree objects, not files on disk
Command/Unused.hs: | isSymLink (LsTree.mode l) = do
This is looking at git ls-tree, not file on disk
Utility/FileMode.hs:isSymLink :: FileMode -> Bool
Utility/FileMode.hs:isSymLink = checkMode symbolicLinkMode
low-level
Done!!
2013-02-17 19:05:55 +00:00
|
|
|
- git add, rather than directly staging the symlink to git.
|
|
|
|
- Using git add is best because it allows the queuing to work
|
|
|
|
- and is faster (staging the symlink runs hash-object commands each time).
|
|
|
|
- Also, using git add allows it to skip gitignored files, unless forced
|
|
|
|
- to include them.
|
|
|
|
-}
|
2013-09-25 20:07:11 +00:00
|
|
|
addLink :: FilePath -> Key -> Maybe InodeCache -> Annex ()
|
|
|
|
addLink file key mcache = ifM (coreSymlinks <$> Annex.getGitConfig)
|
2013-04-11 17:35:52 +00:00
|
|
|
( do
|
2013-09-25 20:07:11 +00:00
|
|
|
_ <- link file key mcache
|
2015-04-08 20:14:23 +00:00
|
|
|
params <- forceParams
|
2013-04-11 17:35:52 +00:00
|
|
|
Annex.Queue.addCommand "add" (params++[Param "--"]) [file]
|
|
|
|
, do
|
2013-09-25 20:07:11 +00:00
|
|
|
l <- link file key mcache
|
2013-04-11 17:35:52 +00:00
|
|
|
addAnnexLink l file
|
|
|
|
)
|
|
|
|
|
2015-04-08 20:14:23 +00:00
|
|
|
forceParams :: Annex [CommandParam]
|
|
|
|
forceParams = ifM (Annex.getState Annex.force)
|
|
|
|
( return [Param "-f"]
|
|
|
|
, return []
|
|
|
|
)
|
|
|
|
|
2013-09-25 20:07:11 +00:00
|
|
|
cleanup :: FilePath -> Key -> Maybe InodeCache -> Bool -> CommandCleanup
|
|
|
|
cleanup file key mcache hascontent = do
|
2013-02-05 17:41:48 +00:00
|
|
|
ifM (isDirect <&&> pure hascontent)
|
2013-04-04 19:46:33 +00:00
|
|
|
( do
|
2015-01-27 21:38:06 +00:00
|
|
|
l <- calcRepo $ gitAnnexLink file key
|
2013-04-04 19:46:33 +00:00
|
|
|
stageSymlink file =<< hashSymlink l
|
2013-09-25 20:07:11 +00:00
|
|
|
, addLink file key mcache
|
2013-02-05 17:41:48 +00:00
|
|
|
)
|
2014-01-05 18:09:57 +00:00
|
|
|
when hascontent $
|
|
|
|
logStatus key InfoPresent
|
2013-02-05 17:41:48 +00:00
|
|
|
return True
|