2011-01-16 20:05:05 +00:00
|
|
|
|
{- git-annex file content managing
|
2010-10-27 20:53:54 +00:00
|
|
|
|
-
|
2015-01-21 16:50:09 +00:00
|
|
|
|
- Copyright 2010-2014 Joey Hess <id@joeyh.name>
|
2010-10-27 20:53:54 +00:00
|
|
|
|
-
|
|
|
|
|
- Licensed under the GNU GPL version 3 or higher.
|
|
|
|
|
-}
|
2010-10-14 07:40:26 +00:00
|
|
|
|
|
2013-05-11 20:03:00 +00:00
|
|
|
|
{-# LANGUAGE CPP #-}
|
|
|
|
|
|
2011-10-04 04:40:47 +00:00
|
|
|
|
module Annex.Content (
|
2011-01-16 20:05:05 +00:00
|
|
|
|
inAnnex,
|
2015-04-09 19:34:47 +00:00
|
|
|
|
inAnnex',
|
2011-11-09 22:33:15 +00:00
|
|
|
|
inAnnexSafe,
|
2013-07-18 17:30:12 +00:00
|
|
|
|
inAnnexCheck,
|
2011-11-09 22:33:15 +00:00
|
|
|
|
lockContent,
|
2011-01-16 20:05:05 +00:00
|
|
|
|
getViaTmp,
|
2015-10-01 18:13:53 +00:00
|
|
|
|
getViaTmp',
|
|
|
|
|
checkDiskSpaceToGet,
|
other 80% of avoding verification when hard linking to objects in shared repo
In c6632ee5c8e66c26ef18317f56ae02bae1e7e280, it actually only handled
uploading objects to a shared repository. To avoid verification when
downloading objects from a shared repository, was a lot harder.
On the plus side, if the process of downloading a file from a remote
is able to verify its content on the side, the remote can indicate this
now, and avoid the extra post-download verification.
As of yet, I don't have any remotes (except Git) using this ability.
Some more work would be needed to support it in special remotes.
It would make sense for tahoe to implicitly verify things downloaded from it;
as long as you trust your tahoe server (which typically runs locally),
there's cryptographic integrity. OTOH, despite bup being based on shas,
a bup repo under an attacker's control could have the git ref used for an
object changed, and so a bup repo shouldn't implicitly verify. Indeed,
tahoe seems unique in being trustworthy enough to implicitly verify.
2015-10-02 17:56:42 +00:00
|
|
|
|
VerifyConfig(..),
|
|
|
|
|
Types.Remote.unVerified,
|
2014-07-29 22:40:40 +00:00
|
|
|
|
prepTmp,
|
2011-04-28 00:06:07 +00:00
|
|
|
|
withTmp,
|
2011-03-22 21:27:04 +00:00
|
|
|
|
checkDiskSpace,
|
2011-01-16 20:05:05 +00:00
|
|
|
|
moveAnnex,
|
2012-12-08 21:03:39 +00:00
|
|
|
|
sendAnnex,
|
2013-01-10 15:45:44 +00:00
|
|
|
|
prepSendAnnex,
|
2011-01-16 20:05:05 +00:00
|
|
|
|
removeAnnex,
|
|
|
|
|
fromAnnex,
|
|
|
|
|
moveBad,
|
2014-03-07 16:43:56 +00:00
|
|
|
|
KeyLocation(..),
|
2011-06-23 01:19:52 +00:00
|
|
|
|
getKeysPresent,
|
2012-01-02 18:20:20 +00:00
|
|
|
|
saveState,
|
|
|
|
|
downloadUrl,
|
2012-01-19 21:05:39 +00:00
|
|
|
|
preseedTmp,
|
2012-04-21 18:06:36 +00:00
|
|
|
|
freezeContent,
|
|
|
|
|
thawContent,
|
2013-10-10 21:27:00 +00:00
|
|
|
|
dirKeys,
|
2013-11-24 01:58:39 +00:00
|
|
|
|
withObjectLoc,
|
2015-06-02 18:20:38 +00:00
|
|
|
|
staleKeysPrune,
|
2011-01-16 20:05:05 +00:00
|
|
|
|
) where
|
2010-10-14 07:40:26 +00:00
|
|
|
|
|
2012-03-11 22:04:58 +00:00
|
|
|
|
import System.IO.Unsafe (unsafeInterleaveIO)
|
2015-06-02 18:20:38 +00:00
|
|
|
|
import qualified Data.Set as S
|
2011-11-10 01:45:03 +00:00
|
|
|
|
|
2011-10-05 20:02:51 +00:00
|
|
|
|
import Common.Annex
|
2011-10-15 20:21:08 +00:00
|
|
|
|
import Logs.Location
|
2015-05-12 19:19:08 +00:00
|
|
|
|
import Logs.Transfer
|
2011-06-30 17:16:57 +00:00
|
|
|
|
import qualified Git
|
2010-10-14 07:40:26 +00:00
|
|
|
|
import qualified Annex
|
2011-10-04 04:40:47 +00:00
|
|
|
|
import qualified Annex.Queue
|
|
|
|
|
import qualified Annex.Branch
|
2012-03-22 21:09:54 +00:00
|
|
|
|
import Utility.DiskFree
|
2011-09-23 22:13:24 +00:00
|
|
|
|
import Utility.FileMode
|
2013-09-28 18:35:21 +00:00
|
|
|
|
import qualified Annex.Url as Url
|
2011-06-02 01:56:04 +00:00
|
|
|
|
import Types.Key
|
2011-07-06 00:36:43 +00:00
|
|
|
|
import Utility.DataUnits
|
2012-01-19 21:05:39 +00:00
|
|
|
|
import Utility.CopyFile
|
2011-03-28 01:43:25 +00:00
|
|
|
|
import Config
|
2012-04-21 18:06:36 +00:00
|
|
|
|
import Git.SharedRepository
|
2012-04-21 20:59:49 +00:00
|
|
|
|
import Annex.Perms
|
2013-04-02 17:13:42 +00:00
|
|
|
|
import Annex.Link
|
2012-12-07 21:28:23 +00:00
|
|
|
|
import Annex.Content.Direct
|
2013-05-17 19:59:37 +00:00
|
|
|
|
import Annex.ReplaceFile
|
2015-05-18 20:23:07 +00:00
|
|
|
|
import Utility.LockPool
|
2015-04-04 00:38:56 +00:00
|
|
|
|
import Messages.Progress
|
Do verification of checksums of annex objects downloaded from remotes.
* When annex objects are received into git repositories, their checksums are
verified then too.
* To get the old, faster, behavior of not verifying checksums, set
annex.verify=false, or remote.<name>.annex-verify=false.
* setkey, rekey: These commands also now verify that the provided file
matches the key, unless annex.verify=false.
* reinject: Already verified content; this can now be disabled by
setting annex.verify=false.
recvkey and reinject already did verification, so removed now duplicate
code from them. fsck still does its own verification, which is ok since it
does not use getViaTmp, so verification doesn't happen twice when using fsck
--from.
2015-10-01 19:54:37 +00:00
|
|
|
|
import qualified Types.Remote
|
|
|
|
|
import qualified Types.Backend
|
|
|
|
|
import qualified Backend
|
2014-01-28 20:01:19 +00:00
|
|
|
|
|
2011-11-09 22:33:15 +00:00
|
|
|
|
{- Checks if a given key's content is currently present. -}
|
2010-10-14 23:36:11 +00:00
|
|
|
|
inAnnex :: Key -> Annex Bool
|
2013-07-18 17:30:12 +00:00
|
|
|
|
inAnnex key = inAnnexCheck key $ liftIO . doesFileExist
|
|
|
|
|
|
|
|
|
|
{- Runs an arbitrary check on a key's content. -}
|
|
|
|
|
inAnnexCheck :: Key -> (FilePath -> Annex Bool) -> Annex Bool
|
|
|
|
|
inAnnexCheck key check = inAnnex' id False check key
|
2012-12-07 21:28:23 +00:00
|
|
|
|
|
|
|
|
|
{- Generic inAnnex, handling both indirect and direct mode.
|
|
|
|
|
-
|
|
|
|
|
- In direct mode, at least one of the associated files must pass the
|
|
|
|
|
- check. Additionally, the file must be unmodified.
|
|
|
|
|
-}
|
|
|
|
|
inAnnex' :: (a -> Bool) -> a -> (FilePath -> Annex a) -> Key -> Annex a
|
|
|
|
|
inAnnex' isgood bad check key = withObjectLoc key checkindirect checkdirect
|
|
|
|
|
where
|
|
|
|
|
checkindirect loc = do
|
|
|
|
|
whenM (fromRepo Git.repoIsUrl) $
|
|
|
|
|
error "inAnnex cannot check remote repo"
|
|
|
|
|
check loc
|
|
|
|
|
checkdirect [] = return bad
|
|
|
|
|
checkdirect (loc:locs) = do
|
|
|
|
|
r <- check loc
|
|
|
|
|
if isgood r
|
2012-12-08 21:03:39 +00:00
|
|
|
|
then ifM (goodContent key loc)
|
2012-12-07 21:28:23 +00:00
|
|
|
|
( return r
|
|
|
|
|
, checkdirect locs
|
|
|
|
|
)
|
|
|
|
|
else checkdirect locs
|
2011-11-09 22:33:15 +00:00
|
|
|
|
|
|
|
|
|
{- A safer check; the key's content must not only be present, but
|
|
|
|
|
- is not in the process of being removed. -}
|
|
|
|
|
inAnnexSafe :: Key -> Annex (Maybe Bool)
|
2015-05-19 18:26:07 +00:00
|
|
|
|
inAnnexSafe key = inAnnex' (fromMaybe True) (Just False) go key
|
2012-12-13 04:24:19 +00:00
|
|
|
|
where
|
2014-01-28 20:01:19 +00:00
|
|
|
|
is_locked = Nothing
|
|
|
|
|
is_unlocked = Just True
|
|
|
|
|
is_missing = Just False
|
|
|
|
|
|
|
|
|
|
go contentfile = maybe (checkindirect contentfile) (checkdirect contentfile)
|
|
|
|
|
=<< contentLockFile key
|
|
|
|
|
|
2013-08-02 16:27:32 +00:00
|
|
|
|
#ifndef mingw32_HOST_OS
|
2014-08-20 22:56:25 +00:00
|
|
|
|
checkindirect contentfile = liftIO $ checkOr is_missing contentfile
|
2014-01-28 20:01:19 +00:00
|
|
|
|
{- In direct mode, the content file must exist, but
|
2014-08-20 22:56:25 +00:00
|
|
|
|
- the lock file generally won't exist unless a removal is in
|
|
|
|
|
- process. -}
|
2014-01-28 20:01:19 +00:00
|
|
|
|
checkdirect contentfile lockfile = liftIO $
|
|
|
|
|
ifM (doesFileExist contentfile)
|
2014-08-20 22:56:25 +00:00
|
|
|
|
( checkOr is_unlocked lockfile
|
2014-01-28 20:01:19 +00:00
|
|
|
|
, return is_missing
|
|
|
|
|
)
|
2015-01-28 20:11:28 +00:00
|
|
|
|
checkOr d lockfile = do
|
2014-08-20 22:56:25 +00:00
|
|
|
|
v <- checkLocked lockfile
|
2012-12-13 04:24:19 +00:00
|
|
|
|
return $ case v of
|
2015-01-28 20:11:28 +00:00
|
|
|
|
Nothing -> d
|
2014-08-20 22:56:25 +00:00
|
|
|
|
Just True -> is_locked
|
|
|
|
|
Just False -> is_unlocked
|
2013-05-11 20:03:00 +00:00
|
|
|
|
#else
|
2014-06-05 19:43:12 +00:00
|
|
|
|
checkindirect f = liftIO $ ifM (doesFileExist f)
|
2014-06-05 19:31:23 +00:00
|
|
|
|
( do
|
|
|
|
|
v <- lockShared f
|
|
|
|
|
case v of
|
|
|
|
|
Nothing -> return is_locked
|
|
|
|
|
Just lockhandle -> do
|
|
|
|
|
dropLock lockhandle
|
|
|
|
|
return is_unlocked
|
|
|
|
|
, return is_missing
|
|
|
|
|
)
|
2014-01-28 20:01:19 +00:00
|
|
|
|
{- In Windows, see if we can take a shared lock. If so,
|
|
|
|
|
- remove the lock file to clean up after ourselves. -}
|
|
|
|
|
checkdirect contentfile lockfile =
|
|
|
|
|
ifM (liftIO $ doesFileExist contentfile)
|
|
|
|
|
( modifyContent lockfile $ liftIO $ do
|
|
|
|
|
v <- lockShared lockfile
|
|
|
|
|
case v of
|
|
|
|
|
Nothing -> return is_locked
|
|
|
|
|
Just lockhandle -> do
|
|
|
|
|
dropLock lockhandle
|
|
|
|
|
void $ tryIO $ nukeFile lockfile
|
|
|
|
|
return is_unlocked
|
|
|
|
|
, return is_missing
|
|
|
|
|
)
|
2013-05-11 20:03:00 +00:00
|
|
|
|
#endif
|
2014-01-28 20:01:19 +00:00
|
|
|
|
|
|
|
|
|
{- Direct mode and especially Windows has to use a separate lock
|
|
|
|
|
- file from the content, since locking the actual content file
|
|
|
|
|
- would interfere with the user's use of it. -}
|
|
|
|
|
contentLockFile :: Key -> Annex (Maybe FilePath)
|
2015-05-19 18:10:13 +00:00
|
|
|
|
#ifndef mingw32_HOST_OS
|
2014-01-28 20:01:19 +00:00
|
|
|
|
contentLockFile key = ifM isDirect
|
|
|
|
|
( Just <$> calcRepo (gitAnnexContentLock key)
|
|
|
|
|
, return Nothing
|
|
|
|
|
)
|
2015-05-19 18:10:13 +00:00
|
|
|
|
#else
|
|
|
|
|
contentLockFile key = Just <$> calcRepo (gitAnnexContentLock key)
|
|
|
|
|
#endif
|
2010-10-16 17:59:48 +00:00
|
|
|
|
|
2014-08-21 00:08:45 +00:00
|
|
|
|
newtype ContentLock = ContentLock Key
|
|
|
|
|
|
2011-11-10 02:15:33 +00:00
|
|
|
|
{- Content is exclusively locked while running an action that might remove
|
2014-08-21 00:08:45 +00:00
|
|
|
|
- it. (If the content is not present, no locking is done.)
|
|
|
|
|
-}
|
|
|
|
|
lockContent :: Key -> (ContentLock -> Annex a) -> Annex a
|
2013-08-04 17:54:09 +00:00
|
|
|
|
lockContent key a = do
|
2014-01-28 20:01:19 +00:00
|
|
|
|
contentfile <- calcRepo $ gitAnnexLocation key
|
|
|
|
|
lockfile <- contentLockFile key
|
2014-08-21 00:08:45 +00:00
|
|
|
|
bracket
|
|
|
|
|
(lock contentfile lockfile)
|
|
|
|
|
(unlock lockfile)
|
2015-05-19 18:10:13 +00:00
|
|
|
|
(const $ a $ ContentLock key )
|
2012-12-13 04:24:19 +00:00
|
|
|
|
where
|
2014-01-28 20:01:19 +00:00
|
|
|
|
alreadylocked = error "content is locked"
|
|
|
|
|
cleanuplockfile lockfile = modifyContent lockfile $
|
|
|
|
|
void $ liftIO $ tryIO $
|
|
|
|
|
nukeFile lockfile
|
|
|
|
|
#ifndef mingw32_HOST_OS
|
|
|
|
|
{- Since content files are stored with the write bit disabled, have
|
2012-12-13 04:24:19 +00:00
|
|
|
|
- to fiddle with permissions to open for an exclusive lock. -}
|
2015-05-19 18:53:19 +00:00
|
|
|
|
lock contentfile Nothing = trylock $ bracket_
|
|
|
|
|
(thawContent contentfile)
|
|
|
|
|
(freezeContent contentfile)
|
|
|
|
|
(maybe alreadylocked return
|
|
|
|
|
=<< liftIO (tryLockExclusive Nothing contentfile))
|
2015-05-19 18:10:13 +00:00
|
|
|
|
lock _ (Just lockfile) = trylock $ do
|
|
|
|
|
mode <- annexFileMode
|
|
|
|
|
maybe alreadylocked return
|
|
|
|
|
=<< modifyContent lockfile
|
|
|
|
|
(liftIO $ tryLockExclusive (Just mode) lockfile)
|
|
|
|
|
unlock mlockfile lck = do
|
2014-01-28 20:01:19 +00:00
|
|
|
|
maybe noop cleanuplockfile mlockfile
|
2015-05-19 18:10:13 +00:00
|
|
|
|
liftIO $ dropLock lck
|
2015-08-03 19:54:29 +00:00
|
|
|
|
|
|
|
|
|
failedtolock e = error $ "failed to lock content: " ++ show e
|
|
|
|
|
trylock locker = locker `catchIO` failedtolock
|
2013-08-04 17:12:18 +00:00
|
|
|
|
#else
|
2015-05-19 18:10:13 +00:00
|
|
|
|
lock _ (Just lockfile) = do
|
|
|
|
|
modifyContent lockfile $
|
|
|
|
|
void $ liftIO $ tryIO $
|
|
|
|
|
writeFile lockfile ""
|
2015-05-22 17:54:54 +00:00
|
|
|
|
maybe alreadylocked (return . Just)
|
|
|
|
|
=<< liftIO (lockExclusive lockfile)
|
2015-05-19 18:10:13 +00:00
|
|
|
|
-- never reached; windows always uses a separate lock file
|
2014-01-28 20:01:19 +00:00
|
|
|
|
lock _ Nothing = return Nothing
|
|
|
|
|
unlock mlockfile mlockhandle = do
|
|
|
|
|
liftIO $ maybe noop dropLock mlockhandle
|
|
|
|
|
maybe noop cleanuplockfile mlockfile
|
2013-05-11 20:03:00 +00:00
|
|
|
|
#endif
|
2011-11-10 01:45:03 +00:00
|
|
|
|
|
2015-10-01 18:13:53 +00:00
|
|
|
|
{- Runs an action, passing it the temp file to get,
|
|
|
|
|
- and if the action succeeds, verifies the file matches
|
|
|
|
|
- the key and moves the file into the annex as a key's content. -}
|
other 80% of avoding verification when hard linking to objects in shared repo
In c6632ee5c8e66c26ef18317f56ae02bae1e7e280, it actually only handled
uploading objects to a shared repository. To avoid verification when
downloading objects from a shared repository, was a lot harder.
On the plus side, if the process of downloading a file from a remote
is able to verify its content on the side, the remote can indicate this
now, and avoid the extra post-download verification.
As of yet, I don't have any remotes (except Git) using this ability.
Some more work would be needed to support it in special remotes.
It would make sense for tahoe to implicitly verify things downloaded from it;
as long as you trust your tahoe server (which typically runs locally),
there's cryptographic integrity. OTOH, despite bup being based on shas,
a bup repo under an attacker's control could have the git ref used for an
object changed, and so a bup repo shouldn't implicitly verify. Indeed,
tahoe seems unique in being trustworthy enough to implicitly verify.
2015-10-02 17:56:42 +00:00
|
|
|
|
getViaTmp :: VerifyConfig -> Key -> (FilePath -> Annex (Bool, Types.Remote.Verification)) -> Annex Bool
|
Do verification of checksums of annex objects downloaded from remotes.
* When annex objects are received into git repositories, their checksums are
verified then too.
* To get the old, faster, behavior of not verifying checksums, set
annex.verify=false, or remote.<name>.annex-verify=false.
* setkey, rekey: These commands also now verify that the provided file
matches the key, unless annex.verify=false.
* reinject: Already verified content; this can now be disabled by
setting annex.verify=false.
recvkey and reinject already did verification, so removed now duplicate
code from them. fsck still does its own verification, which is ok since it
does not use getViaTmp, so verification doesn't happen twice when using fsck
--from.
2015-10-01 19:54:37 +00:00
|
|
|
|
getViaTmp v key action = checkDiskSpaceToGet key False $
|
|
|
|
|
getViaTmp' v key action
|
2013-01-10 15:45:44 +00:00
|
|
|
|
|
|
|
|
|
{- Like getViaTmp, but does not check that there is enough disk space
|
|
|
|
|
- for the incoming key. For use when the key content is already on disk
|
|
|
|
|
- and not being copied into place. -}
|
other 80% of avoding verification when hard linking to objects in shared repo
In c6632ee5c8e66c26ef18317f56ae02bae1e7e280, it actually only handled
uploading objects to a shared repository. To avoid verification when
downloading objects from a shared repository, was a lot harder.
On the plus side, if the process of downloading a file from a remote
is able to verify its content on the side, the remote can indicate this
now, and avoid the extra post-download verification.
As of yet, I don't have any remotes (except Git) using this ability.
Some more work would be needed to support it in special remotes.
It would make sense for tahoe to implicitly verify things downloaded from it;
as long as you trust your tahoe server (which typically runs locally),
there's cryptographic integrity. OTOH, despite bup being based on shas,
a bup repo under an attacker's control could have the git ref used for an
object changed, and so a bup repo shouldn't implicitly verify. Indeed,
tahoe seems unique in being trustworthy enough to implicitly verify.
2015-10-02 17:56:42 +00:00
|
|
|
|
getViaTmp' :: VerifyConfig -> Key -> (FilePath -> Annex (Bool, Types.Remote.Verification)) -> Annex Bool
|
Do verification of checksums of annex objects downloaded from remotes.
* When annex objects are received into git repositories, their checksums are
verified then too.
* To get the old, faster, behavior of not verifying checksums, set
annex.verify=false, or remote.<name>.annex-verify=false.
* setkey, rekey: These commands also now verify that the provided file
matches the key, unless annex.verify=false.
* reinject: Already verified content; this can now be disabled by
setting annex.verify=false.
recvkey and reinject already did verification, so removed now duplicate
code from them. fsck still does its own verification, which is ok since it
does not use getViaTmp, so verification doesn't happen twice when using fsck
--from.
2015-10-01 19:54:37 +00:00
|
|
|
|
getViaTmp' v key action = do
|
2015-10-01 18:07:06 +00:00
|
|
|
|
tmpfile <- prepTmp key
|
other 80% of avoding verification when hard linking to objects in shared repo
In c6632ee5c8e66c26ef18317f56ae02bae1e7e280, it actually only handled
uploading objects to a shared repository. To avoid verification when
downloading objects from a shared repository, was a lot harder.
On the plus side, if the process of downloading a file from a remote
is able to verify its content on the side, the remote can indicate this
now, and avoid the extra post-download verification.
As of yet, I don't have any remotes (except Git) using this ability.
Some more work would be needed to support it in special remotes.
It would make sense for tahoe to implicitly verify things downloaded from it;
as long as you trust your tahoe server (which typically runs locally),
there's cryptographic integrity. OTOH, despite bup being based on shas,
a bup repo under an attacker's control could have the git ref used for an
object changed, and so a bup repo shouldn't implicitly verify. Indeed,
tahoe seems unique in being trustworthy enough to implicitly verify.
2015-10-02 17:56:42 +00:00
|
|
|
|
(ok, verification) <- action tmpfile
|
|
|
|
|
if ok
|
|
|
|
|
then ifM (verifyKeyContent v verification key tmpfile)
|
Do verification of checksums of annex objects downloaded from remotes.
* When annex objects are received into git repositories, their checksums are
verified then too.
* To get the old, faster, behavior of not verifying checksums, set
annex.verify=false, or remote.<name>.annex-verify=false.
* setkey, rekey: These commands also now verify that the provided file
matches the key, unless annex.verify=false.
* reinject: Already verified content; this can now be disabled by
setting annex.verify=false.
recvkey and reinject already did verification, so removed now duplicate
code from them. fsck still does its own verification, which is ok since it
does not use getViaTmp, so verification doesn't happen twice when using fsck
--from.
2015-10-01 19:54:37 +00:00
|
|
|
|
( do
|
|
|
|
|
moveAnnex key tmpfile
|
|
|
|
|
logStatus key InfoPresent
|
|
|
|
|
return True
|
|
|
|
|
, do
|
|
|
|
|
warning "verification of content failed"
|
|
|
|
|
liftIO $ nukeFile tmpfile
|
|
|
|
|
return False
|
|
|
|
|
)
|
|
|
|
|
-- On transfer failure, the tmp file is left behind, in case
|
|
|
|
|
-- caller wants to resume its transfer
|
other 80% of avoding verification when hard linking to objects in shared repo
In c6632ee5c8e66c26ef18317f56ae02bae1e7e280, it actually only handled
uploading objects to a shared repository. To avoid verification when
downloading objects from a shared repository, was a lot harder.
On the plus side, if the process of downloading a file from a remote
is able to verify its content on the side, the remote can indicate this
now, and avoid the extra post-download verification.
As of yet, I don't have any remotes (except Git) using this ability.
Some more work would be needed to support it in special remotes.
It would make sense for tahoe to implicitly verify things downloaded from it;
as long as you trust your tahoe server (which typically runs locally),
there's cryptographic integrity. OTOH, despite bup being based on shas,
a bup repo under an attacker's control could have the git ref used for an
object changed, and so a bup repo shouldn't implicitly verify. Indeed,
tahoe seems unique in being trustworthy enough to implicitly verify.
2015-10-02 17:56:42 +00:00
|
|
|
|
else return False
|
2014-01-04 19:08:06 +00:00
|
|
|
|
|
Do verification of checksums of annex objects downloaded from remotes.
* When annex objects are received into git repositories, their checksums are
verified then too.
* To get the old, faster, behavior of not verifying checksums, set
annex.verify=false, or remote.<name>.annex-verify=false.
* setkey, rekey: These commands also now verify that the provided file
matches the key, unless annex.verify=false.
* reinject: Already verified content; this can now be disabled by
setting annex.verify=false.
recvkey and reinject already did verification, so removed now duplicate
code from them. fsck still does its own verification, which is ok since it
does not use getViaTmp, so verification doesn't happen twice when using fsck
--from.
2015-10-01 19:54:37 +00:00
|
|
|
|
{- Verifies that a file is the expected content of a key.
|
2015-10-02 16:38:02 +00:00
|
|
|
|
- Configuration can prevent verification, for either a
|
|
|
|
|
- particular remote or always.
|
Do verification of checksums of annex objects downloaded from remotes.
* When annex objects are received into git repositories, their checksums are
verified then too.
* To get the old, faster, behavior of not verifying checksums, set
annex.verify=false, or remote.<name>.annex-verify=false.
* setkey, rekey: These commands also now verify that the provided file
matches the key, unless annex.verify=false.
* reinject: Already verified content; this can now be disabled by
setting annex.verify=false.
recvkey and reinject already did verification, so removed now duplicate
code from them. fsck still does its own verification, which is ok since it
does not use getViaTmp, so verification doesn't happen twice when using fsck
--from.
2015-10-01 19:54:37 +00:00
|
|
|
|
-
|
|
|
|
|
- Most keys have a known size, and if so, the file size is checked.
|
|
|
|
|
-
|
|
|
|
|
- When the key's backend allows verifying the content (eg via checksum),
|
2015-10-02 16:38:02 +00:00
|
|
|
|
- it is checked.
|
Do verification of checksums of annex objects downloaded from remotes.
* When annex objects are received into git repositories, their checksums are
verified then too.
* To get the old, faster, behavior of not verifying checksums, set
annex.verify=false, or remote.<name>.annex-verify=false.
* setkey, rekey: These commands also now verify that the provided file
matches the key, unless annex.verify=false.
* reinject: Already verified content; this can now be disabled by
setting annex.verify=false.
recvkey and reinject already did verification, so removed now duplicate
code from them. fsck still does its own verification, which is ok since it
does not use getViaTmp, so verification doesn't happen twice when using fsck
--from.
2015-10-01 19:54:37 +00:00
|
|
|
|
-}
|
other 80% of avoding verification when hard linking to objects in shared repo
In c6632ee5c8e66c26ef18317f56ae02bae1e7e280, it actually only handled
uploading objects to a shared repository. To avoid verification when
downloading objects from a shared repository, was a lot harder.
On the plus side, if the process of downloading a file from a remote
is able to verify its content on the side, the remote can indicate this
now, and avoid the extra post-download verification.
As of yet, I don't have any remotes (except Git) using this ability.
Some more work would be needed to support it in special remotes.
It would make sense for tahoe to implicitly verify things downloaded from it;
as long as you trust your tahoe server (which typically runs locally),
there's cryptographic integrity. OTOH, despite bup being based on shas,
a bup repo under an attacker's control could have the git ref used for an
object changed, and so a bup repo shouldn't implicitly verify. Indeed,
tahoe seems unique in being trustworthy enough to implicitly verify.
2015-10-02 17:56:42 +00:00
|
|
|
|
verifyKeyContent :: VerifyConfig -> Types.Remote.Verification -> Key -> FilePath -> Annex Bool
|
|
|
|
|
verifyKeyContent _ Types.Remote.Verified _ _ = return True
|
|
|
|
|
verifyKeyContent v Types.Remote.UnVerified k f = ifM (shouldVerify v)
|
2015-10-02 16:38:02 +00:00
|
|
|
|
( verifysize <&&> verifycontent
|
|
|
|
|
, return True
|
|
|
|
|
)
|
Do verification of checksums of annex objects downloaded from remotes.
* When annex objects are received into git repositories, their checksums are
verified then too.
* To get the old, faster, behavior of not verifying checksums, set
annex.verify=false, or remote.<name>.annex-verify=false.
* setkey, rekey: These commands also now verify that the provided file
matches the key, unless annex.verify=false.
* reinject: Already verified content; this can now be disabled by
setting annex.verify=false.
recvkey and reinject already did verification, so removed now duplicate
code from them. fsck still does its own verification, which is ok since it
does not use getViaTmp, so verification doesn't happen twice when using fsck
--from.
2015-10-01 19:54:37 +00:00
|
|
|
|
where
|
|
|
|
|
verifysize = case Types.Key.keySize k of
|
|
|
|
|
Nothing -> return True
|
|
|
|
|
Just size -> do
|
|
|
|
|
size' <- liftIO $ catchDefaultIO 0 $ getFileSize f
|
|
|
|
|
return (size' == size)
|
2015-10-02 16:38:02 +00:00
|
|
|
|
verifycontent = case Types.Backend.verifyKeyContent =<< Backend.maybeLookupBackendName (Types.Key.keyBackendName k) of
|
|
|
|
|
Nothing -> return True
|
|
|
|
|
Just verifier -> verifier k f
|
Do verification of checksums of annex objects downloaded from remotes.
* When annex objects are received into git repositories, their checksums are
verified then too.
* To get the old, faster, behavior of not verifying checksums, set
annex.verify=false, or remote.<name>.annex-verify=false.
* setkey, rekey: These commands also now verify that the provided file
matches the key, unless annex.verify=false.
* reinject: Already verified content; this can now be disabled by
setting annex.verify=false.
recvkey and reinject already did verification, so removed now duplicate
code from them. fsck still does its own verification, which is ok since it
does not use getViaTmp, so verification doesn't happen twice when using fsck
--from.
2015-10-01 19:54:37 +00:00
|
|
|
|
|
other 80% of avoding verification when hard linking to objects in shared repo
In c6632ee5c8e66c26ef18317f56ae02bae1e7e280, it actually only handled
uploading objects to a shared repository. To avoid verification when
downloading objects from a shared repository, was a lot harder.
On the plus side, if the process of downloading a file from a remote
is able to verify its content on the side, the remote can indicate this
now, and avoid the extra post-download verification.
As of yet, I don't have any remotes (except Git) using this ability.
Some more work would be needed to support it in special remotes.
It would make sense for tahoe to implicitly verify things downloaded from it;
as long as you trust your tahoe server (which typically runs locally),
there's cryptographic integrity. OTOH, despite bup being based on shas,
a bup repo under an attacker's control could have the git ref used for an
object changed, and so a bup repo shouldn't implicitly verify. Indeed,
tahoe seems unique in being trustworthy enough to implicitly verify.
2015-10-02 17:56:42 +00:00
|
|
|
|
data VerifyConfig = AlwaysVerify | NoVerify | RemoteVerify Remote | DefaultVerify
|
Do verification of checksums of annex objects downloaded from remotes.
* When annex objects are received into git repositories, their checksums are
verified then too.
* To get the old, faster, behavior of not verifying checksums, set
annex.verify=false, or remote.<name>.annex-verify=false.
* setkey, rekey: These commands also now verify that the provided file
matches the key, unless annex.verify=false.
* reinject: Already verified content; this can now be disabled by
setting annex.verify=false.
recvkey and reinject already did verification, so removed now duplicate
code from them. fsck still does its own verification, which is ok since it
does not use getViaTmp, so verification doesn't happen twice when using fsck
--from.
2015-10-01 19:54:37 +00:00
|
|
|
|
|
other 80% of avoding verification when hard linking to objects in shared repo
In c6632ee5c8e66c26ef18317f56ae02bae1e7e280, it actually only handled
uploading objects to a shared repository. To avoid verification when
downloading objects from a shared repository, was a lot harder.
On the plus side, if the process of downloading a file from a remote
is able to verify its content on the side, the remote can indicate this
now, and avoid the extra post-download verification.
As of yet, I don't have any remotes (except Git) using this ability.
Some more work would be needed to support it in special remotes.
It would make sense for tahoe to implicitly verify things downloaded from it;
as long as you trust your tahoe server (which typically runs locally),
there's cryptographic integrity. OTOH, despite bup being based on shas,
a bup repo under an attacker's control could have the git ref used for an
object changed, and so a bup repo shouldn't implicitly verify. Indeed,
tahoe seems unique in being trustworthy enough to implicitly verify.
2015-10-02 17:56:42 +00:00
|
|
|
|
shouldVerify :: VerifyConfig -> Annex Bool
|
Do verification of checksums of annex objects downloaded from remotes.
* When annex objects are received into git repositories, their checksums are
verified then too.
* To get the old, faster, behavior of not verifying checksums, set
annex.verify=false, or remote.<name>.annex-verify=false.
* setkey, rekey: These commands also now verify that the provided file
matches the key, unless annex.verify=false.
* reinject: Already verified content; this can now be disabled by
setting annex.verify=false.
recvkey and reinject already did verification, so removed now duplicate
code from them. fsck still does its own verification, which is ok since it
does not use getViaTmp, so verification doesn't happen twice when using fsck
--from.
2015-10-01 19:54:37 +00:00
|
|
|
|
shouldVerify AlwaysVerify = return True
|
2015-10-02 16:36:03 +00:00
|
|
|
|
shouldVerify NoVerify = return False
|
Do verification of checksums of annex objects downloaded from remotes.
* When annex objects are received into git repositories, their checksums are
verified then too.
* To get the old, faster, behavior of not verifying checksums, set
annex.verify=false, or remote.<name>.annex-verify=false.
* setkey, rekey: These commands also now verify that the provided file
matches the key, unless annex.verify=false.
* reinject: Already verified content; this can now be disabled by
setting annex.verify=false.
recvkey and reinject already did verification, so removed now duplicate
code from them. fsck still does its own verification, which is ok since it
does not use getViaTmp, so verification doesn't happen twice when using fsck
--from.
2015-10-01 19:54:37 +00:00
|
|
|
|
shouldVerify DefaultVerify = annexVerify <$> Annex.getGitConfig
|
|
|
|
|
shouldVerify (RemoteVerify r) = shouldVerify DefaultVerify
|
|
|
|
|
<&&> pure (remoteAnnexVerify (Types.Remote.gitconfig r))
|
|
|
|
|
|
2015-10-01 18:13:53 +00:00
|
|
|
|
{- Checks if there is enough free disk space to download a key
|
|
|
|
|
- to its temp file.
|
2014-01-04 19:08:06 +00:00
|
|
|
|
-
|
|
|
|
|
- When the temp file already exists, count the space it is using as
|
|
|
|
|
- free, since the download will overwrite it or resume.
|
|
|
|
|
-
|
|
|
|
|
- Wen there's enough free space, runs the download action.
|
|
|
|
|
-}
|
2015-10-01 18:13:53 +00:00
|
|
|
|
checkDiskSpaceToGet :: Key -> a -> Annex a -> Annex a
|
|
|
|
|
checkDiskSpaceToGet key unabletoget getkey = do
|
2014-02-26 20:52:56 +00:00
|
|
|
|
tmp <- fromRepo $ gitAnnexTmpObjectLocation key
|
2011-03-22 21:27:04 +00:00
|
|
|
|
|
|
|
|
|
e <- liftIO $ doesFileExist tmp
|
2015-01-20 20:58:48 +00:00
|
|
|
|
alreadythere <- liftIO $ if e
|
|
|
|
|
then getFileSize tmp
|
2012-04-20 18:57:57 +00:00
|
|
|
|
else return 0
|
2015-05-12 19:19:08 +00:00
|
|
|
|
ifM (checkDiskSpace Nothing key alreadythere True)
|
2012-04-20 18:57:57 +00:00
|
|
|
|
( do
|
2014-01-04 19:08:06 +00:00
|
|
|
|
-- The tmp file may not have been left writable
|
2012-04-21 18:06:36 +00:00
|
|
|
|
when e $ thawContent tmp
|
2014-01-04 19:08:06 +00:00
|
|
|
|
getkey
|
import metadata from feeds
When annex.genmetadata is set, metadata from the feed is added to files
that are imported from it.
Reused the same feedtitle and itemtitle, feedauthor, itemauthor, etc names
that are used in --template.
Also added title and author, which are the item title/author if available,
falling back to the feed title/author. These are more likely to be common
metadata fields.
(There is a small bit of dupication here, but once git gets
around to packing the object, it will compress it away.)
The itempubdate field is not included in the metadata as a string; instead
it is used to generate year and month fields, same as is done when adding
files with annex.genmetadata set.
This commit was sponsored by Amitai Schlair, who cooincidentially
is responsible for ikiwiki generating nice feed metadata!
2014-07-03 17:46:09 +00:00
|
|
|
|
, return unabletoget
|
2012-04-20 18:57:57 +00:00
|
|
|
|
)
|
2011-03-22 21:27:04 +00:00
|
|
|
|
|
2013-01-10 15:45:44 +00:00
|
|
|
|
prepTmp :: Key -> Annex FilePath
|
|
|
|
|
prepTmp key = do
|
2014-02-26 20:52:56 +00:00
|
|
|
|
tmp <- fromRepo $ gitAnnexTmpObjectLocation key
|
2015-01-09 17:11:56 +00:00
|
|
|
|
createAnnexDirectory (parentDir tmp)
|
2013-01-10 15:45:44 +00:00
|
|
|
|
return tmp
|
|
|
|
|
|
2014-07-29 21:17:41 +00:00
|
|
|
|
{- Creates a temp file for a key, runs an action on it, and cleans up
|
|
|
|
|
- the temp file. If the action throws an exception, the temp file is
|
|
|
|
|
- left behind, which allows for resuming.
|
|
|
|
|
-}
|
2011-04-28 00:06:07 +00:00
|
|
|
|
withTmp :: Key -> (FilePath -> Annex a) -> Annex a
|
|
|
|
|
withTmp key action = do
|
2011-07-15 07:12:05 +00:00
|
|
|
|
tmp <- prepTmp key
|
2011-04-28 00:06:07 +00:00
|
|
|
|
res <- action tmp
|
2012-06-06 17:13:13 +00:00
|
|
|
|
liftIO $ nukeFile tmp
|
2011-04-28 00:06:07 +00:00
|
|
|
|
return res
|
|
|
|
|
|
2011-03-22 21:27:04 +00:00
|
|
|
|
{- Checks that there is disk space available to store a given key,
|
2015-05-12 19:19:08 +00:00
|
|
|
|
- in a destination (or the annex) printing a warning if not.
|
|
|
|
|
-
|
|
|
|
|
- If the destination is on the same filesystem as the annex,
|
|
|
|
|
- checks for any other running downloads, removing the amount of data still
|
|
|
|
|
- to be downloaded from the free space. This way, we avoid overcommitting
|
|
|
|
|
- when doing concurrent downloads.
|
|
|
|
|
-}
|
|
|
|
|
checkDiskSpace :: Maybe FilePath -> Key -> Integer -> Bool -> Annex Bool
|
|
|
|
|
checkDiskSpace destination key alreadythere samefilesystem = ifM (Annex.getState Annex.force)
|
2015-04-18 01:29:15 +00:00
|
|
|
|
( return True
|
|
|
|
|
, do
|
2015-05-12 19:19:08 +00:00
|
|
|
|
-- We can't get inprogress and free at the same
|
|
|
|
|
-- time, and both can be changing, so there's a
|
|
|
|
|
-- small race here. Err on the side of caution
|
|
|
|
|
-- by getting inprogress first, so if it takes
|
|
|
|
|
-- a while, we'll see any decrease in the free
|
|
|
|
|
-- disk space.
|
|
|
|
|
inprogress <- if samefilesystem
|
|
|
|
|
then sizeOfDownloadsInProgress (/= key)
|
|
|
|
|
else pure 0
|
2015-04-18 01:29:15 +00:00
|
|
|
|
free <- liftIO . getDiskFree =<< dir
|
|
|
|
|
case (free, fromMaybe 1 (keySize key)) of
|
|
|
|
|
(Just have, need) -> do
|
2015-05-12 19:19:08 +00:00
|
|
|
|
reserve <- annexDiskReserve <$> Annex.getGitConfig
|
|
|
|
|
let delta = need + reserve - have - alreadythere + inprogress
|
|
|
|
|
let ok = delta <= 0
|
2015-04-18 01:29:15 +00:00
|
|
|
|
unless ok $
|
2015-05-12 19:19:08 +00:00
|
|
|
|
needmorespace delta
|
2015-04-18 01:29:15 +00:00
|
|
|
|
return ok
|
|
|
|
|
_ -> return True
|
|
|
|
|
)
|
2012-12-13 04:24:19 +00:00
|
|
|
|
where
|
|
|
|
|
dir = maybe (fromRepo gitAnnexDir) return destination
|
|
|
|
|
needmorespace n =
|
|
|
|
|
warning $ "not enough free space, need " ++
|
|
|
|
|
roughSize storageUnits True n ++
|
|
|
|
|
" more" ++ forcemsg
|
|
|
|
|
forcemsg = " (use --force to override this check or adjust annex.diskreserve)"
|
2011-03-22 21:27:04 +00:00
|
|
|
|
|
2012-12-07 18:40:31 +00:00
|
|
|
|
{- Moves a key's content into .git/annex/objects/
|
2012-12-07 21:28:23 +00:00
|
|
|
|
-
|
2012-12-07 18:40:31 +00:00
|
|
|
|
- In direct mode, moves it to the associated file, or files.
|
2011-04-29 00:41:40 +00:00
|
|
|
|
-
|
|
|
|
|
- What if the key there already has content? This could happen for
|
|
|
|
|
- various reasons; perhaps the same content is being annexed again.
|
|
|
|
|
- Perhaps there has been a hash collision generating the keys.
|
|
|
|
|
-
|
|
|
|
|
- The current strategy is to assume that in this case it's safe to delete
|
|
|
|
|
- one of the two copies of the content; and the one already in the annex
|
|
|
|
|
- is left there, assuming it's the original, canonical copy.
|
|
|
|
|
-
|
|
|
|
|
- I considered being more paranoid, and checking that both files had
|
|
|
|
|
- the same content. Decided against it because A) users explicitly choose
|
|
|
|
|
- a backend based on its hashing properties and so if they're dealing
|
|
|
|
|
- with colliding files it's their own fault and B) adding such a check
|
|
|
|
|
- would not catch all cases of colliding keys. For example, perhaps
|
|
|
|
|
- a remote has a key; if it's then added again with different content then
|
|
|
|
|
- the overall system now has two different peices of content for that
|
|
|
|
|
- key, and one of them will probably get deleted later. So, adding the
|
|
|
|
|
- check here would only raise expectations that git-annex cannot truely
|
|
|
|
|
- meet.
|
|
|
|
|
-}
|
2010-11-08 23:26:37 +00:00
|
|
|
|
moveAnnex :: Key -> FilePath -> Annex ()
|
2012-12-07 21:28:23 +00:00
|
|
|
|
moveAnnex key src = withObjectLoc key storeobject storedirect
|
2012-12-07 18:40:31 +00:00
|
|
|
|
where
|
2013-02-18 06:39:40 +00:00
|
|
|
|
storeobject dest = ifM (liftIO $ doesFileExist dest)
|
2013-05-17 20:25:18 +00:00
|
|
|
|
( alreadyhave
|
2013-11-15 18:52:03 +00:00
|
|
|
|
, modifyContent dest $ do
|
2013-02-18 06:39:40 +00:00
|
|
|
|
liftIO $ moveFile src dest
|
|
|
|
|
freezeContent dest
|
|
|
|
|
)
|
2013-05-17 20:25:18 +00:00
|
|
|
|
storeindirect = storeobject =<< calcRepo (gitAnnexLocation key)
|
2013-02-05 20:48:00 +00:00
|
|
|
|
|
2013-05-17 20:25:18 +00:00
|
|
|
|
{- In direct mode, the associated file's content may be locally
|
|
|
|
|
- modified. In that case, it's preserved. However, the content
|
|
|
|
|
- we're moving into the annex may be the only extant copy, so
|
|
|
|
|
- it's important we not lose it. So, when the key's content
|
|
|
|
|
- cannot be moved to any associated file, it's stored in indirect
|
|
|
|
|
- mode.
|
|
|
|
|
-}
|
|
|
|
|
storedirect = storedirect' storeindirect
|
|
|
|
|
storedirect' fallback [] = fallback
|
|
|
|
|
storedirect' fallback (f:fs) = do
|
2012-12-07 18:40:31 +00:00
|
|
|
|
thawContent src
|
2013-05-17 20:25:18 +00:00
|
|
|
|
v <- isAnnexLink f
|
2013-09-25 07:09:06 +00:00
|
|
|
|
if Just key == v
|
2013-05-17 20:25:18 +00:00
|
|
|
|
then do
|
|
|
|
|
updateInodeCache key src
|
|
|
|
|
replaceFile f $ liftIO . moveFile src
|
2013-09-03 17:35:49 +00:00
|
|
|
|
chmodContent f
|
2013-05-17 20:25:18 +00:00
|
|
|
|
forM_ fs $
|
|
|
|
|
addContentWhenNotPresent key f
|
|
|
|
|
else ifM (goodContent key f)
|
|
|
|
|
( storedirect' alreadyhave fs
|
|
|
|
|
, storedirect' fallback fs
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
alreadyhave = liftIO $ removeFile src
|
2011-07-15 07:12:05 +00:00
|
|
|
|
|
2012-12-08 21:03:39 +00:00
|
|
|
|
{- Runs an action to transfer an object's content.
|
|
|
|
|
-
|
|
|
|
|
- In direct mode, it's possible for the file to change as it's being sent.
|
2013-01-09 22:42:29 +00:00
|
|
|
|
- If this happens, runs the rollback action and returns False. The
|
2013-01-10 15:45:44 +00:00
|
|
|
|
- rollback action should remove the data that was transferred.
|
2012-12-08 21:03:39 +00:00
|
|
|
|
-}
|
2013-02-18 06:39:40 +00:00
|
|
|
|
sendAnnex :: Key -> Annex () -> (FilePath -> Annex Bool) -> Annex Bool
|
2013-01-10 15:45:44 +00:00
|
|
|
|
sendAnnex key rollback sendobject = go =<< prepSendAnnex key
|
|
|
|
|
where
|
|
|
|
|
go Nothing = return False
|
|
|
|
|
go (Just (f, checksuccess)) = do
|
|
|
|
|
r <- sendobject f
|
|
|
|
|
ifM checksuccess
|
|
|
|
|
( return r
|
|
|
|
|
, do
|
|
|
|
|
rollback
|
|
|
|
|
return False
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
{- Returns a file that contains an object's content,
|
2014-09-05 17:44:09 +00:00
|
|
|
|
- and a check to run after the transfer is complete.
|
2013-01-10 15:45:44 +00:00
|
|
|
|
-
|
|
|
|
|
- In direct mode, it's possible for the file to change as it's being sent,
|
|
|
|
|
- and the check detects this case and returns False.
|
2013-03-18 15:18:04 +00:00
|
|
|
|
-
|
|
|
|
|
- Note that the returned check action is, in some cases, run in the
|
|
|
|
|
- Annex monad of the remote that is receiving the object, rather than
|
2013-03-18 17:17:43 +00:00
|
|
|
|
- the sender. So it cannot rely on Annex state.
|
2013-01-10 15:45:44 +00:00
|
|
|
|
-}
|
|
|
|
|
prepSendAnnex :: Key -> Annex (Maybe (FilePath, Annex Bool))
|
|
|
|
|
prepSendAnnex key = withObjectLoc key indirect direct
|
2012-12-08 21:03:39 +00:00
|
|
|
|
where
|
2013-01-10 15:45:44 +00:00
|
|
|
|
indirect f = return $ Just (f, return True)
|
|
|
|
|
direct [] = return Nothing
|
|
|
|
|
direct (f:fs) = do
|
2013-02-14 20:17:40 +00:00
|
|
|
|
cache <- recordedInodeCache key
|
2012-12-08 21:03:39 +00:00
|
|
|
|
-- check that we have a good file
|
2013-02-19 20:26:07 +00:00
|
|
|
|
ifM (sameInodeCache f cache)
|
|
|
|
|
( return $ Just (f, sameInodeCache f cache)
|
2013-01-10 15:45:44 +00:00
|
|
|
|
, direct fs
|
2012-12-08 21:03:39 +00:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
{- Performs an action, passing it the location to use for a key's content.
|
|
|
|
|
-
|
|
|
|
|
- In direct mode, the associated files will be passed. But, if there are
|
|
|
|
|
- no associated files for a key, the indirect mode action will be
|
|
|
|
|
- performed instead. -}
|
|
|
|
|
withObjectLoc :: Key -> (FilePath -> Annex a) -> ([FilePath] -> Annex a) -> Annex a
|
|
|
|
|
withObjectLoc key indirect direct = ifM isDirect
|
|
|
|
|
( do
|
|
|
|
|
fs <- associatedFiles key
|
|
|
|
|
if null fs
|
|
|
|
|
then goindirect
|
|
|
|
|
else direct fs
|
|
|
|
|
, goindirect
|
|
|
|
|
)
|
|
|
|
|
where
|
2013-04-04 19:46:33 +00:00
|
|
|
|
goindirect = indirect =<< calcRepo (gitAnnexLocation key)
|
2012-12-08 21:03:39 +00:00
|
|
|
|
|
2013-11-15 18:52:03 +00:00
|
|
|
|
cleanObjectLoc :: Key -> Annex () -> Annex ()
|
|
|
|
|
cleanObjectLoc key cleaner = do
|
2013-04-04 19:46:33 +00:00
|
|
|
|
file <- calcRepo $ gitAnnexLocation key
|
unify exception handling into Utility.Exception
Removed old extensible-exceptions, only needed for very old ghc.
Made webdav use Utility.Exception, to work after some changes in DAV's
exception handling.
Removed Annex.Exception. Mostly this was trivial, but note that
tryAnnex is replaced with tryNonAsync and catchAnnex replaced with
catchNonAsync. In theory that could be a behavior change, since the former
caught all exceptions, and the latter don't catch async exceptions.
However, in practice, nothing in the Annex monad uses async exceptions.
Grepping for throwTo and killThread only find stuff in the assistant,
which does not seem related.
Command.Add.undo is changed to accept a SomeException, and things
that use it for rollback now catch non-async exceptions, rather than
only IOExceptions.
2014-08-08 01:55:44 +00:00
|
|
|
|
void $ tryIO $ thawContentDir file
|
2013-11-15 18:52:03 +00:00
|
|
|
|
cleaner
|
2013-02-15 20:37:57 +00:00
|
|
|
|
liftIO $ removeparents file (3 :: Int)
|
2012-12-13 04:24:19 +00:00
|
|
|
|
where
|
|
|
|
|
removeparents _ 0 = noop
|
|
|
|
|
removeparents file n = do
|
2015-01-09 17:11:56 +00:00
|
|
|
|
let dir = parentDir file
|
2012-12-13 04:24:19 +00:00
|
|
|
|
maybe noop (const $ removeparents dir (n-1))
|
|
|
|
|
<=< catchMaybeIO $ removeDirectory dir
|
2012-02-24 20:30:47 +00:00
|
|
|
|
|
2012-12-07 21:28:23 +00:00
|
|
|
|
{- Removes a key's file from .git/annex/objects/
|
|
|
|
|
-
|
|
|
|
|
- In direct mode, deletes the associated files or files, and replaces
|
2014-08-21 00:08:45 +00:00
|
|
|
|
- them with symlinks.
|
|
|
|
|
-}
|
|
|
|
|
removeAnnex :: ContentLock -> Annex ()
|
|
|
|
|
removeAnnex (ContentLock key) = withObjectLoc key remove removedirect
|
2012-12-07 21:28:23 +00:00
|
|
|
|
where
|
2013-11-15 18:52:03 +00:00
|
|
|
|
remove file = cleanObjectLoc key $ do
|
2014-01-24 16:58:52 +00:00
|
|
|
|
secureErase file
|
2013-02-15 21:58:49 +00:00
|
|
|
|
liftIO $ nukeFile file
|
|
|
|
|
removeInodeCache key
|
2012-12-08 17:13:36 +00:00
|
|
|
|
removedirect fs = do
|
2013-02-14 20:17:40 +00:00
|
|
|
|
cache <- recordedInodeCache key
|
2013-02-15 20:37:57 +00:00
|
|
|
|
removeInodeCache key
|
2013-02-05 20:48:00 +00:00
|
|
|
|
mapM_ (resetfile cache) fs
|
2013-02-19 20:26:07 +00:00
|
|
|
|
resetfile cache f = whenM (sameInodeCache f cache) $ do
|
2015-01-27 21:38:06 +00:00
|
|
|
|
l <- calcRepo $ gitAnnexLink f key
|
2014-01-24 16:58:52 +00:00
|
|
|
|
secureErase f
|
2014-02-06 21:35:37 +00:00
|
|
|
|
replaceFile f $ makeAnnexLink l
|
2010-11-08 23:26:37 +00:00
|
|
|
|
|
2014-01-24 16:58:52 +00:00
|
|
|
|
{- Runs the secure erase command if set, otherwise does nothing.
|
|
|
|
|
- File may or may not be deleted at the end; caller is responsible for
|
|
|
|
|
- making sure it's deleted. -}
|
|
|
|
|
secureErase :: FilePath -> Annex ()
|
|
|
|
|
secureErase file = maybe noop go =<< annexSecureEraseCommand <$> Annex.getGitConfig
|
|
|
|
|
where
|
2014-10-09 18:53:13 +00:00
|
|
|
|
go basecmd = void $ liftIO $
|
2014-01-24 16:58:52 +00:00
|
|
|
|
boolSystem "sh" [Param "-c", Param $ gencmd basecmd]
|
|
|
|
|
gencmd = massReplace [ ("%file", shellEscape file) ]
|
|
|
|
|
|
2010-11-08 23:26:37 +00:00
|
|
|
|
{- Moves a key's file out of .git/annex/objects/ -}
|
|
|
|
|
fromAnnex :: Key -> FilePath -> Annex ()
|
2013-11-15 18:52:03 +00:00
|
|
|
|
fromAnnex key dest = cleanObjectLoc key $ do
|
2013-04-04 19:46:33 +00:00
|
|
|
|
file <- calcRepo $ gitAnnexLocation key
|
2012-04-21 18:06:36 +00:00
|
|
|
|
thawContent file
|
|
|
|
|
liftIO $ moveFile file dest
|
2010-11-08 20:47:36 +00:00
|
|
|
|
|
2010-11-13 18:59:27 +00:00
|
|
|
|
{- Moves a key out of .git/annex/objects/ into .git/annex/bad, and
|
2010-11-13 19:42:56 +00:00
|
|
|
|
- returns the file it was moved to. -}
|
2010-11-13 18:59:27 +00:00
|
|
|
|
moveBad :: Key -> Annex FilePath
|
|
|
|
|
moveBad key = do
|
2013-04-04 19:46:33 +00:00
|
|
|
|
src <- calcRepo $ gitAnnexLocation key
|
2011-11-11 05:52:58 +00:00
|
|
|
|
bad <- fromRepo gitAnnexBadDir
|
2011-11-08 19:34:10 +00:00
|
|
|
|
let dest = bad </> takeFileName src
|
2015-01-09 17:11:56 +00:00
|
|
|
|
createAnnexDirectory (parentDir dest)
|
2013-11-15 18:52:03 +00:00
|
|
|
|
cleanObjectLoc key $
|
|
|
|
|
liftIO $ moveFile src dest
|
2011-07-01 19:24:07 +00:00
|
|
|
|
logStatus key InfoMissing
|
2010-11-13 18:59:27 +00:00
|
|
|
|
return dest
|
|
|
|
|
|
2014-03-07 16:43:56 +00:00
|
|
|
|
data KeyLocation = InAnnex | InRepository
|
|
|
|
|
|
|
|
|
|
{- List of keys whose content exists in the specified location.
|
|
|
|
|
|
|
|
|
|
- InAnnex only lists keys under .git/annex/objects,
|
|
|
|
|
- while InRepository, in direct mode, also finds keys located in the
|
|
|
|
|
- work tree.
|
|
|
|
|
-
|
|
|
|
|
- Note that InRepository has to check whether direct mode files
|
|
|
|
|
- have goodContent.
|
|
|
|
|
-}
|
|
|
|
|
getKeysPresent :: KeyLocation -> Annex [Key]
|
|
|
|
|
getKeysPresent keyloc = do
|
2013-02-15 21:58:49 +00:00
|
|
|
|
direct <- isDirect
|
|
|
|
|
dir <- fromRepo gitAnnexObjectDir
|
2014-03-07 16:43:56 +00:00
|
|
|
|
s <- getstate direct
|
2015-06-11 19:14:42 +00:00
|
|
|
|
depth <- gitAnnexLocationDepth <$> Annex.getGitConfig
|
|
|
|
|
liftIO $ walk s direct depth dir
|
2012-12-13 04:24:19 +00:00
|
|
|
|
where
|
2015-05-10 19:28:13 +00:00
|
|
|
|
walk s direct depth dir = do
|
2012-12-13 04:24:19 +00:00
|
|
|
|
contents <- catchDefaultIO [] (dirContents dir)
|
2015-06-11 19:14:42 +00:00
|
|
|
|
if depth < 2
|
2013-02-15 21:58:49 +00:00
|
|
|
|
then do
|
2014-03-07 16:43:56 +00:00
|
|
|
|
contents' <- filterM (present s direct) contents
|
2013-02-15 21:58:49 +00:00
|
|
|
|
let keys = mapMaybe (fileKey . takeFileName) contents'
|
|
|
|
|
continue keys []
|
2012-12-13 04:24:19 +00:00
|
|
|
|
else do
|
2015-05-10 19:28:13 +00:00
|
|
|
|
let deeper = walk s direct (depth - 1)
|
2012-12-13 04:24:19 +00:00
|
|
|
|
continue [] (map deeper contents)
|
|
|
|
|
continue keys [] = return keys
|
|
|
|
|
continue keys (a:as) = do
|
|
|
|
|
{- Force lazy traversal with unsafeInterleaveIO. -}
|
|
|
|
|
morekeys <- unsafeInterleaveIO a
|
|
|
|
|
continue (morekeys++keys) as
|
2011-06-23 01:19:52 +00:00
|
|
|
|
|
2014-03-07 16:43:56 +00:00
|
|
|
|
present _ False d = presentInAnnex d
|
|
|
|
|
present s True d = presentDirect s d <||> presentInAnnex d
|
|
|
|
|
|
|
|
|
|
presentInAnnex = doesFileExist . contentfile
|
2013-02-15 21:58:49 +00:00
|
|
|
|
contentfile d = d </> takeFileName d
|
|
|
|
|
|
2014-03-07 16:43:56 +00:00
|
|
|
|
presentDirect s d = case keyloc of
|
|
|
|
|
InAnnex -> return False
|
|
|
|
|
InRepository -> case fileKey (takeFileName d) of
|
|
|
|
|
Nothing -> return False
|
|
|
|
|
Just k -> Annex.eval s $
|
|
|
|
|
anyM (goodContent k) =<< associatedFiles k
|
|
|
|
|
|
|
|
|
|
{- In order to run Annex monad actions within unsafeInterleaveIO,
|
|
|
|
|
- the current state is taken and reused. No changes made to this
|
|
|
|
|
- state will be preserved.
|
|
|
|
|
-
|
|
|
|
|
- As an optimsation, call inodesChanged to prime the state with
|
|
|
|
|
- a cached value that will be used in the call to goodContent.
|
|
|
|
|
-}
|
|
|
|
|
getstate direct = do
|
|
|
|
|
when direct $
|
2015-04-11 04:10:34 +00:00
|
|
|
|
void inodesChanged
|
2014-03-07 16:43:56 +00:00
|
|
|
|
Annex.getState id
|
|
|
|
|
|
2012-01-28 19:41:52 +00:00
|
|
|
|
{- Things to do to record changes to content when shutting down.
|
|
|
|
|
-
|
|
|
|
|
- It's acceptable to avoid committing changes to the branch,
|
|
|
|
|
- especially if performing a short-lived action.
|
|
|
|
|
-}
|
|
|
|
|
saveState :: Bool -> Annex ()
|
2012-09-16 00:46:38 +00:00
|
|
|
|
saveState nocommit = doSideAction $ do
|
2012-04-27 17:23:52 +00:00
|
|
|
|
Annex.Queue.flush
|
2012-09-16 00:46:38 +00:00
|
|
|
|
unless nocommit $
|
2013-01-01 17:52:47 +00:00
|
|
|
|
whenM (annexAlwaysCommit <$> Annex.getGitConfig) $
|
2012-09-15 23:47:23 +00:00
|
|
|
|
Annex.Branch.commit "update"
|
2012-01-02 18:20:20 +00:00
|
|
|
|
|
|
|
|
|
{- Downloads content from any of a list of urls. -}
|
|
|
|
|
downloadUrl :: [Url.URLString] -> FilePath -> Annex Bool
|
2013-04-09 03:34:05 +00:00
|
|
|
|
downloadUrl urls file = go =<< annexWebDownloadCommand <$> Annex.getGitConfig
|
|
|
|
|
where
|
2015-04-04 00:38:56 +00:00
|
|
|
|
go Nothing = do
|
|
|
|
|
a <- ifM commandProgressDisabled
|
|
|
|
|
( return Url.downloadQuiet
|
|
|
|
|
, return Url.download
|
|
|
|
|
)
|
|
|
|
|
Url.withUrlOptions $ \uo ->
|
|
|
|
|
anyM (\u -> a u file uo) urls
|
|
|
|
|
go (Just basecmd) = anyM (downloadcmd basecmd) urls
|
2013-04-09 03:34:05 +00:00
|
|
|
|
downloadcmd basecmd url =
|
2015-04-04 18:34:03 +00:00
|
|
|
|
progressCommand "sh" [Param "-c", Param $ gencmd url basecmd]
|
2015-04-04 00:38:56 +00:00
|
|
|
|
<&&> liftIO (doesFileExist file)
|
2013-04-09 03:56:37 +00:00
|
|
|
|
gencmd url = massReplace
|
|
|
|
|
[ ("%file", shellEscape file)
|
|
|
|
|
, ("%url", shellEscape url)
|
|
|
|
|
]
|
2012-01-19 21:05:39 +00:00
|
|
|
|
|
|
|
|
|
{- Copies a key's content, when present, to a temp file.
|
|
|
|
|
- This is used to speed up some rsyncs. -}
|
2012-01-20 17:23:11 +00:00
|
|
|
|
preseedTmp :: Key -> FilePath -> Annex Bool
|
|
|
|
|
preseedTmp key file = go =<< inAnnex key
|
2012-12-13 04:24:19 +00:00
|
|
|
|
where
|
|
|
|
|
go False = return False
|
|
|
|
|
go True = do
|
|
|
|
|
ok <- copy
|
|
|
|
|
when ok $ thawContent file
|
|
|
|
|
return ok
|
|
|
|
|
copy = ifM (liftIO $ doesFileExist file)
|
2013-02-15 21:58:49 +00:00
|
|
|
|
( return True
|
|
|
|
|
, do
|
2013-04-04 19:46:33 +00:00
|
|
|
|
s <- calcRepo $ gitAnnexLocation key
|
2015-07-30 17:40:17 +00:00
|
|
|
|
liftIO $ ifM (doesFileExist s)
|
|
|
|
|
( copyFileExternal CopyTimeStamps s file
|
|
|
|
|
, return False
|
|
|
|
|
)
|
2013-02-15 21:58:49 +00:00
|
|
|
|
)
|
2012-04-21 18:06:36 +00:00
|
|
|
|
|
2013-05-06 21:30:57 +00:00
|
|
|
|
{- Blocks writing to an annexed file, and modifies file permissions to
|
|
|
|
|
- allow reading it, per core.sharedRepository setting. -}
|
2012-04-21 18:06:36 +00:00
|
|
|
|
freezeContent :: FilePath -> Annex ()
|
2013-02-14 18:10:36 +00:00
|
|
|
|
freezeContent file = unlessM crippledFileSystem $
|
2015-05-19 19:04:24 +00:00
|
|
|
|
withShared go
|
2012-12-13 04:24:19 +00:00
|
|
|
|
where
|
2015-05-19 19:04:24 +00:00
|
|
|
|
go GroupShared = liftIO $ modifyFileMode file $
|
2012-12-13 04:24:19 +00:00
|
|
|
|
removeModes writeModes .
|
|
|
|
|
addModes [ownerReadMode, groupReadMode]
|
2015-05-19 19:04:24 +00:00
|
|
|
|
go AllShared = liftIO $ modifyFileMode file $
|
2012-12-13 04:24:19 +00:00
|
|
|
|
removeModes writeModes .
|
|
|
|
|
addModes readModes
|
2015-05-19 19:04:24 +00:00
|
|
|
|
go _ = liftIO $ modifyFileMode file $
|
2013-05-06 21:30:57 +00:00
|
|
|
|
removeModes writeModes .
|
|
|
|
|
addModes [ownerReadMode]
|
2012-04-21 18:06:36 +00:00
|
|
|
|
|
2013-09-03 17:35:49 +00:00
|
|
|
|
{- Adjusts read mode of annexed file per core.sharedRepository setting. -}
|
|
|
|
|
chmodContent :: FilePath -> Annex ()
|
|
|
|
|
chmodContent file = unlessM crippledFileSystem $
|
2015-05-19 19:04:24 +00:00
|
|
|
|
withShared go
|
2013-09-03 17:35:49 +00:00
|
|
|
|
where
|
2015-05-19 19:04:24 +00:00
|
|
|
|
go GroupShared = liftIO $ modifyFileMode file $
|
2013-09-03 17:35:49 +00:00
|
|
|
|
addModes [ownerReadMode, groupReadMode]
|
2015-05-19 19:04:24 +00:00
|
|
|
|
go AllShared = liftIO $ modifyFileMode file $
|
2013-09-03 17:35:49 +00:00
|
|
|
|
addModes readModes
|
2015-05-19 19:04:24 +00:00
|
|
|
|
go _ = liftIO $ modifyFileMode file $
|
2013-09-03 17:35:49 +00:00
|
|
|
|
addModes [ownerReadMode]
|
|
|
|
|
|
2012-04-21 18:06:36 +00:00
|
|
|
|
{- Allows writing to an annexed file that freezeContent was called on
|
|
|
|
|
- before. -}
|
|
|
|
|
thawContent :: FilePath -> Annex ()
|
2013-02-14 18:10:36 +00:00
|
|
|
|
thawContent file = unlessM crippledFileSystem $
|
2015-05-19 19:04:24 +00:00
|
|
|
|
withShared go
|
2012-12-13 04:24:19 +00:00
|
|
|
|
where
|
2015-05-19 19:04:24 +00:00
|
|
|
|
go GroupShared = liftIO $ groupWriteRead file
|
|
|
|
|
go AllShared = liftIO $ groupWriteRead file
|
|
|
|
|
go _ = liftIO $ allowWrite file
|
2013-10-10 21:27:00 +00:00
|
|
|
|
|
|
|
|
|
{- Finds files directly inside a directory like gitAnnexBadDir
|
|
|
|
|
- (not in subdirectories) and returns the corresponding keys. -}
|
|
|
|
|
dirKeys :: (Git.Repo -> FilePath) -> Annex [Key]
|
|
|
|
|
dirKeys dirspec = do
|
|
|
|
|
dir <- fromRepo dirspec
|
|
|
|
|
ifM (liftIO $ doesDirectoryExist dir)
|
|
|
|
|
( do
|
|
|
|
|
contents <- liftIO $ getDirectoryContents dir
|
|
|
|
|
files <- liftIO $ filterM doesFileExist $
|
|
|
|
|
map (dir </>) contents
|
|
|
|
|
return $ mapMaybe (fileKey . takeFileName) files
|
|
|
|
|
, return []
|
|
|
|
|
)
|
|
|
|
|
|
2015-06-02 18:20:38 +00:00
|
|
|
|
{- Looks in the specified directory for bad/tmp keys, and returns a list
|
|
|
|
|
- of those that might still have value, or might be stale and removable.
|
|
|
|
|
-
|
|
|
|
|
- Also, stale keys that can be proven to have no value
|
|
|
|
|
- (ie, their content is already present) are deleted.
|
|
|
|
|
-}
|
|
|
|
|
staleKeysPrune :: (Git.Repo -> FilePath) -> Bool -> Annex [Key]
|
|
|
|
|
staleKeysPrune dirspec nottransferred = do
|
|
|
|
|
contents <- dirKeys dirspec
|
|
|
|
|
|
|
|
|
|
dups <- filterM inAnnex contents
|
|
|
|
|
let stale = contents `exclude` dups
|
|
|
|
|
|
|
|
|
|
dir <- fromRepo dirspec
|
|
|
|
|
liftIO $ forM_ dups $ \t -> removeFile $ dir </> keyFile t
|
|
|
|
|
|
|
|
|
|
if nottransferred
|
|
|
|
|
then do
|
|
|
|
|
inprogress <- S.fromList . map (transferKey . fst)
|
|
|
|
|
<$> getTransfers
|
|
|
|
|
return $ filter (`S.notMember` inprogress) stale
|
|
|
|
|
else return stale
|
|
|
|
|
|
|
|
|
|
{- Finds items in the first, smaller list, that are not
|
|
|
|
|
- present in the second, larger list.
|
|
|
|
|
-
|
|
|
|
|
- Constructing a single set, of the list that tends to be
|
|
|
|
|
- smaller, appears more efficient in both memory and CPU
|
|
|
|
|
- than constructing and taking the S.difference of two sets. -}
|
|
|
|
|
exclude :: Ord a => [a] -> [a] -> [a]
|
|
|
|
|
exclude [] _ = [] -- optimisation
|
|
|
|
|
exclude smaller larger = S.toList $ remove larger $ S.fromList smaller
|
|
|
|
|
where
|
|
|
|
|
remove a b = foldl (flip S.delete) b a
|