2011-01-16 20:05:05 +00:00
|
|
|
|
{- git-annex file content managing
|
2010-10-27 20:53:54 +00:00
|
|
|
|
-
|
2019-08-27 16:59:57 +00:00
|
|
|
|
- Copyright 2010-2019 Joey Hess <id@joeyh.name>
|
2010-10-27 20:53:54 +00:00
|
|
|
|
-
|
2019-03-13 19:48:14 +00:00
|
|
|
|
- Licensed under the GNU AGPL version 3 or higher.
|
2010-10-27 20:53:54 +00:00
|
|
|
|
-}
|
2010-10-14 07:40:26 +00:00
|
|
|
|
|
2013-05-11 20:03:00 +00:00
|
|
|
|
{-# LANGUAGE CPP #-}
|
|
|
|
|
|
2011-10-04 04:40:47 +00:00
|
|
|
|
module Annex.Content (
|
2011-01-16 20:05:05 +00:00
|
|
|
|
inAnnex,
|
2015-04-09 19:34:47 +00:00
|
|
|
|
inAnnex',
|
2011-11-09 22:33:15 +00:00
|
|
|
|
inAnnexSafe,
|
2013-07-18 17:30:12 +00:00
|
|
|
|
inAnnexCheck,
|
2018-12-04 16:20:34 +00:00
|
|
|
|
objectFileExists,
|
2015-10-08 18:27:37 +00:00
|
|
|
|
lockContentShared,
|
2015-10-09 19:48:02 +00:00
|
|
|
|
lockContentForRemoval,
|
|
|
|
|
ContentRemovalLock,
|
2018-06-21 17:34:11 +00:00
|
|
|
|
RetrievalSecurityPolicy(..),
|
2011-01-16 20:05:05 +00:00
|
|
|
|
getViaTmp,
|
2018-03-13 18:18:30 +00:00
|
|
|
|
getViaTmpFromDisk,
|
2015-10-01 18:13:53 +00:00
|
|
|
|
checkDiskSpaceToGet,
|
2014-07-29 22:40:40 +00:00
|
|
|
|
prepTmp,
|
2011-04-28 00:06:07 +00:00
|
|
|
|
withTmp,
|
2011-03-22 21:27:04 +00:00
|
|
|
|
checkDiskSpace,
|
2017-11-30 20:08:30 +00:00
|
|
|
|
needMoreDiskSpace,
|
2011-01-16 20:05:05 +00:00
|
|
|
|
moveAnnex,
|
2015-12-22 20:22:28 +00:00
|
|
|
|
populatePointerFile,
|
2015-12-27 19:59:59 +00:00
|
|
|
|
linkToAnnex,
|
|
|
|
|
linkFromAnnex,
|
2015-12-04 18:20:32 +00:00
|
|
|
|
LinkAnnexResult(..),
|
2015-12-15 17:02:33 +00:00
|
|
|
|
unlinkAnnex,
|
2015-12-11 19:13:36 +00:00
|
|
|
|
checkedCopyFile,
|
2016-01-07 18:51:28 +00:00
|
|
|
|
linkOrCopy,
|
|
|
|
|
linkOrCopy',
|
2012-12-08 21:03:39 +00:00
|
|
|
|
sendAnnex,
|
2013-01-10 15:45:44 +00:00
|
|
|
|
prepSendAnnex,
|
2011-01-16 20:05:05 +00:00
|
|
|
|
removeAnnex,
|
|
|
|
|
moveBad,
|
2014-03-07 16:43:56 +00:00
|
|
|
|
KeyLocation(..),
|
2019-08-27 16:59:57 +00:00
|
|
|
|
listKeys,
|
2012-01-02 18:20:20 +00:00
|
|
|
|
saveState,
|
|
|
|
|
downloadUrl,
|
2012-01-19 21:05:39 +00:00
|
|
|
|
preseedTmp,
|
2013-10-10 21:27:00 +00:00
|
|
|
|
dirKeys,
|
2013-11-24 01:58:39 +00:00
|
|
|
|
withObjectLoc,
|
2015-06-02 18:20:38 +00:00
|
|
|
|
staleKeysPrune,
|
2017-11-29 17:49:52 +00:00
|
|
|
|
pruneTmpWorkDirBefore,
|
2015-12-11 14:42:18 +00:00
|
|
|
|
isUnmodified,
|
2018-10-25 20:38:04 +00:00
|
|
|
|
isUnmodifiedCheap,
|
2016-04-20 17:21:56 +00:00
|
|
|
|
verifyKeyContent,
|
|
|
|
|
VerifyConfig(..),
|
|
|
|
|
Verification(..),
|
|
|
|
|
unVerified,
|
2017-11-29 17:49:52 +00:00
|
|
|
|
withTmpWorkDir,
|
2011-01-16 20:05:05 +00:00
|
|
|
|
) where
|
2010-10-14 07:40:26 +00:00
|
|
|
|
|
2012-03-11 22:04:58 +00:00
|
|
|
|
import System.IO.Unsafe (unsafeInterleaveIO)
|
2015-06-02 18:20:38 +00:00
|
|
|
|
import qualified Data.Set as S
|
2011-11-10 01:45:03 +00:00
|
|
|
|
|
2016-01-20 20:36:33 +00:00
|
|
|
|
import Annex.Common
|
2011-10-15 20:21:08 +00:00
|
|
|
|
import Logs.Location
|
2016-08-03 16:37:12 +00:00
|
|
|
|
import Types.Transfer
|
2015-05-12 19:19:08 +00:00
|
|
|
|
import Logs.Transfer
|
2011-06-30 17:16:57 +00:00
|
|
|
|
import qualified Git
|
2010-10-14 07:40:26 +00:00
|
|
|
|
import qualified Annex
|
2011-10-04 04:40:47 +00:00
|
|
|
|
import qualified Annex.Queue
|
|
|
|
|
import qualified Annex.Branch
|
2011-09-23 22:13:24 +00:00
|
|
|
|
import Utility.FileMode
|
2013-09-28 18:35:21 +00:00
|
|
|
|
import qualified Annex.Url as Url
|
2012-01-19 21:05:39 +00:00
|
|
|
|
import Utility.CopyFile
|
2015-11-17 01:00:54 +00:00
|
|
|
|
import Utility.Metered
|
2016-01-05 21:22:19 +00:00
|
|
|
|
import Git.FilePath
|
2012-04-21 20:59:49 +00:00
|
|
|
|
import Annex.Perms
|
2013-04-02 17:13:42 +00:00
|
|
|
|
import Annex.Link
|
2015-11-12 22:05:45 +00:00
|
|
|
|
import Annex.LockPool
|
2020-04-17 18:36:45 +00:00
|
|
|
|
import Annex.WorkerPool
|
2015-04-04 00:38:56 +00:00
|
|
|
|
import Messages.Progress
|
2018-06-21 17:34:11 +00:00
|
|
|
|
import Types.Remote (unVerified, Verification(..), RetrievalSecurityPolicy(..))
|
Do verification of checksums of annex objects downloaded from remotes.
* When annex objects are received into git repositories, their checksums are
verified then too.
* To get the old, faster, behavior of not verifying checksums, set
annex.verify=false, or remote.<name>.annex-verify=false.
* setkey, rekey: These commands also now verify that the provided file
matches the key, unless annex.verify=false.
* reinject: Already verified content; this can now be disabled by
setting annex.verify=false.
recvkey and reinject already did verification, so removed now duplicate
code from them. fsck still does its own verification, which is ok since it
does not use getViaTmp, so verification doesn't happen twice when using fsck
--from.
2015-10-01 19:54:37 +00:00
|
|
|
|
import qualified Types.Remote
|
|
|
|
|
import qualified Types.Backend
|
|
|
|
|
import qualified Backend
|
2015-12-11 17:56:12 +00:00
|
|
|
|
import qualified Database.Keys
|
2015-10-08 20:55:11 +00:00
|
|
|
|
import Types.NumCopies
|
annex.securehashesonly
Cryptographically secure hashes can be forced to be used in a repository,
by setting annex.securehashesonly. This does not prevent the git repository
from containing files with insecure hashes, but it does prevent the content
of such files from being pulled into .git/annex/objects from another
repository.
We want to make sure that at no point does git-annex accept content into
.git/annex/objects that is hashed with an insecure key. Here's how it
was done:
* .git/annex/objects/xx/yy/KEY/ is kept frozen, so nothing can be
written to it normally
* So every place that writes content must call, thawContent or modifyContent.
We can audit for these, and be sure we've considered all cases.
* The main functions are moveAnnex, and linkToAnnex; these were made to
check annex.securehashesonly, and are the main security boundary
for annex.securehashesonly.
* Most other calls to modifyContent deal with other files in the KEY
directory (inode cache etc). The other ones that mess with the content
are:
- Annex.Direct.toDirectGen, in which content already in the
annex directory is moved to the direct mode file, so not relevant.
- fix and lock, which don't add new content
- Command.ReKey.linkKey, which manually unlocks it to make a
copy.
* All other calls to thawContent appear safe.
Made moveAnnex return a Bool, so checked all callsites and made them
deal with a failure in appropriate ways.
linkToAnnex simply returns LinkAnnexFailed; all callsites already deal
with it failing in appropriate ways.
This commit was sponsored by Riku Voipio.
2017-02-27 17:01:32 +00:00
|
|
|
|
import Types.Key
|
2015-10-08 20:55:11 +00:00
|
|
|
|
import Annex.UUID
|
2015-12-09 19:42:16 +00:00
|
|
|
|
import Annex.InodeSentinal
|
2015-12-09 21:00:37 +00:00
|
|
|
|
import Utility.InodeCache
|
2018-08-22 18:41:09 +00:00
|
|
|
|
import Annex.Content.LowLevel
|
|
|
|
|
import Annex.Content.PointerFile
|
use fine-grained WorkerStages when transferring and verifying
This means that Command.Move and Command.Get don't need to
manually set the stage, and is a lot cleaner conceptually.
Also, this makes Command.Sync.syncFile use the worker pool better.
In the scenario where it first downloads content and then uploads it to
some other remotes, it will start in TransferStage, then enter VerifyStage
and then go back to TransferStage for each transfer to the remotes.
Before, it entered CleanupStage after the download, and stayed in it for
the upload, so too many transfer jobs could run at the same time.
Note that, in Remote.Git, it uses runTransfer and also verifyKeyContent
inside onLocal. That has a Annex state for the remote, with no worker pool.
So the resulting calls to enteringStage won't block in there.
While Remote.Git.copyToRemote does do checksum verification, I
realized that should not use a verification slot in the WorkerPool
to do it. Because, it's reading back from eg, a removable disk to checksum.
That will contend with other writes to that disk. It's best to treat
that checksum verification as just part of the transer. So, removed the todo
item about that, as there's nothing needing to be done.
2019-06-19 17:09:26 +00:00
|
|
|
|
import Types.WorkerPool
|
2019-12-11 18:12:22 +00:00
|
|
|
|
import qualified Utility.RawFilePath as R
|
2014-01-28 20:01:19 +00:00
|
|
|
|
|
2019-12-18 20:45:03 +00:00
|
|
|
|
import qualified System.FilePath.ByteString as P
|
|
|
|
|
|
2011-11-09 22:33:15 +00:00
|
|
|
|
{- Checks if a given key's content is currently present. -}
|
2010-10-14 23:36:11 +00:00
|
|
|
|
inAnnex :: Key -> Annex Bool
|
2019-12-11 18:12:22 +00:00
|
|
|
|
inAnnex key = inAnnexCheck key $ liftIO . R.doesPathExist
|
2013-07-18 17:30:12 +00:00
|
|
|
|
|
|
|
|
|
{- Runs an arbitrary check on a key's content. -}
|
2019-12-11 18:12:22 +00:00
|
|
|
|
inAnnexCheck :: Key -> (RawFilePath -> Annex Bool) -> Annex Bool
|
2013-07-18 17:30:12 +00:00
|
|
|
|
inAnnexCheck key check = inAnnex' id False check key
|
2012-12-07 21:28:23 +00:00
|
|
|
|
|
2018-10-16 17:30:04 +00:00
|
|
|
|
{- inAnnex that performs an arbitrary check of the key's content. -}
|
2019-12-11 18:12:22 +00:00
|
|
|
|
inAnnex' :: (a -> Bool) -> a -> (RawFilePath -> Annex a) -> Key -> Annex a
|
2019-08-27 16:59:57 +00:00
|
|
|
|
inAnnex' isgood bad check key = withObjectLoc key $ \loc -> do
|
|
|
|
|
r <- check loc
|
|
|
|
|
if isgood r
|
|
|
|
|
then ifM (annexThin <$> Annex.getGitConfig)
|
|
|
|
|
-- When annex.thin is set, the object file
|
|
|
|
|
-- could be modified; make sure it's not.
|
|
|
|
|
-- (Suppress any messages about
|
|
|
|
|
-- checksumming, to avoid them cluttering
|
|
|
|
|
-- the display.)
|
|
|
|
|
( ifM (doQuietAction $ isUnmodified key loc)
|
2012-12-07 21:28:23 +00:00
|
|
|
|
( return r
|
2019-08-27 16:59:57 +00:00
|
|
|
|
, return bad
|
2012-12-07 21:28:23 +00:00
|
|
|
|
)
|
2019-08-27 16:59:57 +00:00
|
|
|
|
, return r
|
|
|
|
|
)
|
|
|
|
|
else return bad
|
2011-11-09 22:33:15 +00:00
|
|
|
|
|
2018-12-04 16:20:34 +00:00
|
|
|
|
{- Like inAnnex, checks if the object file for a key exists,
|
|
|
|
|
- but there are no guarantees it has the right content. -}
|
|
|
|
|
objectFileExists :: Key -> Annex Bool
|
2019-12-11 18:12:22 +00:00
|
|
|
|
objectFileExists key =
|
|
|
|
|
calcRepo (gitAnnexLocation key)
|
|
|
|
|
>>= liftIO . R.doesPathExist
|
2018-12-04 16:20:34 +00:00
|
|
|
|
|
2011-11-09 22:33:15 +00:00
|
|
|
|
{- A safer check; the key's content must not only be present, but
|
|
|
|
|
- is not in the process of being removed. -}
|
|
|
|
|
inAnnexSafe :: Key -> Annex (Maybe Bool)
|
2019-12-11 18:12:22 +00:00
|
|
|
|
inAnnexSafe key =
|
|
|
|
|
inAnnex' (fromMaybe True) (Just False) (go . fromRawFilePath) key
|
2012-12-13 04:24:19 +00:00
|
|
|
|
where
|
2014-01-28 20:01:19 +00:00
|
|
|
|
is_locked = Nothing
|
|
|
|
|
is_unlocked = Just True
|
|
|
|
|
is_missing = Just False
|
|
|
|
|
|
2019-08-27 16:59:57 +00:00
|
|
|
|
go contentfile = flip checklock contentfile =<< contentLockFile key
|
2014-01-28 20:01:19 +00:00
|
|
|
|
|
2013-08-02 16:27:32 +00:00
|
|
|
|
#ifndef mingw32_HOST_OS
|
2019-08-27 16:59:57 +00:00
|
|
|
|
checklock Nothing contentfile = checkOr is_missing contentfile
|
|
|
|
|
{- The content file must exist, but the lock file generally
|
|
|
|
|
- won't exist unless a removal is in process. -}
|
|
|
|
|
checklock (Just lockfile) contentfile =
|
2015-11-12 22:05:45 +00:00
|
|
|
|
ifM (liftIO $ doesFileExist contentfile)
|
2014-08-20 22:56:25 +00:00
|
|
|
|
( checkOr is_unlocked lockfile
|
2014-01-28 20:01:19 +00:00
|
|
|
|
, return is_missing
|
|
|
|
|
)
|
2017-11-15 20:55:38 +00:00
|
|
|
|
checkOr d lockfile = checkLocked lockfile >>= return . \case
|
|
|
|
|
Nothing -> d
|
|
|
|
|
Just True -> is_locked
|
|
|
|
|
Just False -> is_unlocked
|
2013-05-11 20:03:00 +00:00
|
|
|
|
#else
|
2019-08-27 16:59:57 +00:00
|
|
|
|
checklock Nothing contentfile = liftIO $ ifM (doesFileExist contentfile)
|
|
|
|
|
( lockShared contentfile >>= \case
|
2017-11-15 20:55:38 +00:00
|
|
|
|
Nothing -> return is_locked
|
|
|
|
|
Just lockhandle -> do
|
|
|
|
|
dropLock lockhandle
|
|
|
|
|
return is_unlocked
|
2014-06-05 19:31:23 +00:00
|
|
|
|
, return is_missing
|
|
|
|
|
)
|
2014-01-28 20:01:19 +00:00
|
|
|
|
{- In Windows, see if we can take a shared lock. If so,
|
|
|
|
|
- remove the lock file to clean up after ourselves. -}
|
2019-08-27 16:59:57 +00:00
|
|
|
|
checklock (Just lockfile) contentfile =
|
2014-01-28 20:01:19 +00:00
|
|
|
|
ifM (liftIO $ doesFileExist contentfile)
|
2017-11-15 20:55:38 +00:00
|
|
|
|
( modifyContent lockfile $ liftIO $
|
2017-12-05 17:11:03 +00:00
|
|
|
|
lockShared lockfile >>= \case
|
2014-01-28 20:01:19 +00:00
|
|
|
|
Nothing -> return is_locked
|
|
|
|
|
Just lockhandle -> do
|
|
|
|
|
dropLock lockhandle
|
|
|
|
|
void $ tryIO $ nukeFile lockfile
|
|
|
|
|
return is_unlocked
|
|
|
|
|
, return is_missing
|
|
|
|
|
)
|
2013-05-11 20:03:00 +00:00
|
|
|
|
#endif
|
2014-01-28 20:01:19 +00:00
|
|
|
|
|
2019-08-27 16:59:57 +00:00
|
|
|
|
{- Windows has to use a separate lock file from the content, since
|
|
|
|
|
- locking the actual content file would interfere with the user's
|
|
|
|
|
- use of it. -}
|
2014-01-28 20:01:19 +00:00
|
|
|
|
contentLockFile :: Key -> Annex (Maybe FilePath)
|
2015-05-19 18:10:13 +00:00
|
|
|
|
#ifndef mingw32_HOST_OS
|
2019-08-27 16:59:57 +00:00
|
|
|
|
contentLockFile _ = pure Nothing
|
2015-05-19 18:10:13 +00:00
|
|
|
|
#else
|
|
|
|
|
contentLockFile key = Just <$> calcRepo (gitAnnexContentLock key)
|
|
|
|
|
#endif
|
2010-10-16 17:59:48 +00:00
|
|
|
|
|
2015-10-08 18:27:37 +00:00
|
|
|
|
{- Prevents the content from being removed while the action is running.
|
|
|
|
|
- Uses a shared lock.
|
|
|
|
|
-
|
make sure that lockContentShared is always paired with an inAnnex check
lockContentShared had a screwy caveat that it didn't verify that the content
was present when locking it, but in the most common case, eg indirect mode,
it failed to lock when the content is not present.
That led to a few callers forgetting to check inAnnex when using it,
but the potential data loss was unlikely to be noticed because it only
affected direct mode I think.
Fix data loss bug when the local repository uses direct mode, and a
locally modified file is dropped from a remote repsitory. The bug
caused the modified file to be counted as a copy of the original file.
(This is not a severe bug because in such a situation, dropping
from the remote and then modifying the file is allowed and has the same
end result.)
And, in content locking over tor, when the remote repository is
in direct mode, it neglected to check that the content was actually
present when locking it. This could cause git annex drop to remove
the only copy of a file when it thought the tor remote had a copy.
So, make lockContentShared do its own inAnnex check. This could perhaps
be optimised for direct mode, to avoid the check then, since locking
the content necessarily verifies it exists there, but I have not bothered
with that.
This commit was sponsored by Jeff Goeke-Smith on Patreon.
2018-03-07 18:13:02 +00:00
|
|
|
|
- If locking fails, or the content is not present, throws an exception
|
|
|
|
|
- rather than running the action.
|
2015-10-08 18:27:37 +00:00
|
|
|
|
-}
|
2015-10-08 20:55:11 +00:00
|
|
|
|
lockContentShared :: Key -> (VerifiedCopy -> Annex a) -> Annex a
|
make sure that lockContentShared is always paired with an inAnnex check
lockContentShared had a screwy caveat that it didn't verify that the content
was present when locking it, but in the most common case, eg indirect mode,
it failed to lock when the content is not present.
That led to a few callers forgetting to check inAnnex when using it,
but the potential data loss was unlikely to be noticed because it only
affected direct mode I think.
Fix data loss bug when the local repository uses direct mode, and a
locally modified file is dropped from a remote repsitory. The bug
caused the modified file to be counted as a copy of the original file.
(This is not a severe bug because in such a situation, dropping
from the remote and then modifying the file is allowed and has the same
end result.)
And, in content locking over tor, when the remote repository is
in direct mode, it neglected to check that the content was actually
present when locking it. This could cause git annex drop to remove
the only copy of a file when it thought the tor remote had a copy.
So, make lockContentShared do its own inAnnex check. This could perhaps
be optimised for direct mode, to avoid the check then, since locking
the content necessarily verifies it exists there, but I have not bothered
with that.
This commit was sponsored by Jeff Goeke-Smith on Patreon.
2018-03-07 18:13:02 +00:00
|
|
|
|
lockContentShared key a = lockContentUsing lock key $ ifM (inAnnex key)
|
|
|
|
|
( do
|
|
|
|
|
u <- getUUID
|
|
|
|
|
withVerifiedCopy LockedCopy u (return True) a
|
|
|
|
|
, giveup $ "failed to lock content: not present"
|
|
|
|
|
)
|
2015-10-08 18:27:37 +00:00
|
|
|
|
where
|
|
|
|
|
#ifndef mingw32_HOST_OS
|
2015-11-12 22:05:45 +00:00
|
|
|
|
lock contentfile Nothing = tryLockShared Nothing contentfile
|
2015-10-08 18:27:37 +00:00
|
|
|
|
lock _ (Just lockfile) = posixLocker tryLockShared lockfile
|
|
|
|
|
#else
|
|
|
|
|
lock = winLocker lockShared
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
{- Exclusively locks content, while performing an action that
|
|
|
|
|
- might remove it.
|
2018-11-15 19:38:55 +00:00
|
|
|
|
-
|
|
|
|
|
- If locking fails, throws an exception rather than running the action.
|
2014-08-21 00:08:45 +00:00
|
|
|
|
-}
|
2015-10-09 19:48:02 +00:00
|
|
|
|
lockContentForRemoval :: Key -> (ContentRemovalLock -> Annex a) -> Annex a
|
|
|
|
|
lockContentForRemoval key a = lockContentUsing lock key $
|
|
|
|
|
a (ContentRemovalLock key)
|
2015-10-08 18:27:37 +00:00
|
|
|
|
where
|
|
|
|
|
#ifndef mingw32_HOST_OS
|
|
|
|
|
{- Since content files are stored with the write bit disabled, have
|
|
|
|
|
- to fiddle with permissions to open for an exclusive lock. -}
|
|
|
|
|
lock contentfile Nothing = bracket_
|
|
|
|
|
(thawContent contentfile)
|
|
|
|
|
(freezeContent contentfile)
|
2015-11-12 22:05:45 +00:00
|
|
|
|
(tryLockExclusive Nothing contentfile)
|
2015-10-08 18:27:37 +00:00
|
|
|
|
lock _ (Just lockfile) = posixLocker tryLockExclusive lockfile
|
|
|
|
|
#else
|
|
|
|
|
lock = winLocker lockExclusive
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
{- Passed the object content file, and maybe a separate lock file to use,
|
|
|
|
|
- when the content file itself should not be locked. -}
|
|
|
|
|
type ContentLocker = FilePath -> Maybe LockFile -> Annex (Maybe LockHandle)
|
|
|
|
|
|
|
|
|
|
#ifndef mingw32_HOST_OS
|
2015-11-12 22:05:45 +00:00
|
|
|
|
posixLocker :: (Maybe FileMode -> LockFile -> Annex (Maybe LockHandle)) -> LockFile -> Annex (Maybe LockHandle)
|
2015-10-08 18:27:37 +00:00
|
|
|
|
posixLocker takelock lockfile = do
|
|
|
|
|
mode <- annexFileMode
|
|
|
|
|
modifyContent lockfile $
|
2015-11-12 22:05:45 +00:00
|
|
|
|
takelock (Just mode) lockfile
|
2015-10-08 18:27:37 +00:00
|
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
winLocker :: (LockFile -> IO (Maybe LockHandle)) -> ContentLocker
|
|
|
|
|
winLocker takelock _ (Just lockfile) = do
|
|
|
|
|
modifyContent lockfile $
|
|
|
|
|
void $ liftIO $ tryIO $
|
|
|
|
|
writeFile lockfile ""
|
|
|
|
|
liftIO $ takelock lockfile
|
|
|
|
|
-- never reached; windows always uses a separate lock file
|
|
|
|
|
winLocker _ _ Nothing = return Nothing
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
lockContentUsing :: ContentLocker -> Key -> Annex a -> Annex a
|
|
|
|
|
lockContentUsing locker key a = do
|
2019-12-11 18:12:22 +00:00
|
|
|
|
contentfile <- fromRawFilePath <$> calcRepo (gitAnnexLocation key)
|
2014-01-28 20:01:19 +00:00
|
|
|
|
lockfile <- contentLockFile key
|
2014-08-21 00:08:45 +00:00
|
|
|
|
bracket
|
|
|
|
|
(lock contentfile lockfile)
|
|
|
|
|
(unlock lockfile)
|
2015-10-08 20:55:11 +00:00
|
|
|
|
(const a)
|
2012-12-13 04:24:19 +00:00
|
|
|
|
where
|
2016-11-16 01:29:54 +00:00
|
|
|
|
alreadylocked = giveup "content is locked"
|
|
|
|
|
failedtolock e = giveup $ "failed to lock content: " ++ show e
|
2015-10-08 18:27:37 +00:00
|
|
|
|
|
|
|
|
|
lock contentfile lockfile =
|
2015-05-19 18:53:19 +00:00
|
|
|
|
(maybe alreadylocked return
|
2015-10-08 18:27:37 +00:00
|
|
|
|
=<< locker contentfile lockfile)
|
|
|
|
|
`catchIO` failedtolock
|
|
|
|
|
|
|
|
|
|
#ifndef mingw32_HOST_OS
|
2015-05-19 18:10:13 +00:00
|
|
|
|
unlock mlockfile lck = do
|
2014-01-28 20:01:19 +00:00
|
|
|
|
maybe noop cleanuplockfile mlockfile
|
2015-05-19 18:10:13 +00:00
|
|
|
|
liftIO $ dropLock lck
|
2013-08-04 17:12:18 +00:00
|
|
|
|
#else
|
2015-10-12 19:48:59 +00:00
|
|
|
|
unlock mlockfile lck = do
|
|
|
|
|
-- Can't delete a locked file on Windows
|
|
|
|
|
liftIO $ dropLock lck
|
2014-01-28 20:01:19 +00:00
|
|
|
|
maybe noop cleanuplockfile mlockfile
|
2013-05-11 20:03:00 +00:00
|
|
|
|
#endif
|
2011-11-10 01:45:03 +00:00
|
|
|
|
|
2015-10-08 18:27:37 +00:00
|
|
|
|
cleanuplockfile lockfile = modifyContent lockfile $
|
|
|
|
|
void $ liftIO $ tryIO $
|
|
|
|
|
nukeFile lockfile
|
|
|
|
|
|
2015-10-01 18:13:53 +00:00
|
|
|
|
{- Runs an action, passing it the temp file to get,
|
|
|
|
|
- and if the action succeeds, verifies the file matches
|
|
|
|
|
- the key and moves the file into the annex as a key's content. -}
|
2018-06-21 17:34:11 +00:00
|
|
|
|
getViaTmp :: RetrievalSecurityPolicy -> VerifyConfig -> Key -> (FilePath -> Annex (Bool, Verification)) -> Annex Bool
|
|
|
|
|
getViaTmp rsp v key action = checkDiskSpaceToGet key False $
|
|
|
|
|
getViaTmpFromDisk rsp v key action
|
2013-01-10 15:45:44 +00:00
|
|
|
|
|
|
|
|
|
{- Like getViaTmp, but does not check that there is enough disk space
|
|
|
|
|
- for the incoming key. For use when the key content is already on disk
|
|
|
|
|
- and not being copied into place. -}
|
2018-06-21 17:34:11 +00:00
|
|
|
|
getViaTmpFromDisk :: RetrievalSecurityPolicy -> VerifyConfig -> Key -> (FilePath -> Annex (Bool, Verification)) -> Annex Bool
|
|
|
|
|
getViaTmpFromDisk rsp v key action = checkallowed $ do
|
2015-10-01 18:07:06 +00:00
|
|
|
|
tmpfile <- prepTmp key
|
2018-03-13 18:50:49 +00:00
|
|
|
|
resuming <- liftIO $ doesFileExist tmpfile
|
other 80% of avoding verification when hard linking to objects in shared repo
In c6632ee5c8e66c26ef18317f56ae02bae1e7e280, it actually only handled
uploading objects to a shared repository. To avoid verification when
downloading objects from a shared repository, was a lot harder.
On the plus side, if the process of downloading a file from a remote
is able to verify its content on the side, the remote can indicate this
now, and avoid the extra post-download verification.
As of yet, I don't have any remotes (except Git) using this ability.
Some more work would be needed to support it in special remotes.
It would make sense for tahoe to implicitly verify things downloaded from it;
as long as you trust your tahoe server (which typically runs locally),
there's cryptographic integrity. OTOH, despite bup being based on shas,
a bup repo under an attacker's control could have the git ref used for an
object changed, and so a bup repo shouldn't implicitly verify. Indeed,
tahoe seems unique in being trustworthy enough to implicitly verify.
2015-10-02 17:56:42 +00:00
|
|
|
|
(ok, verification) <- action tmpfile
|
2018-03-13 18:50:49 +00:00
|
|
|
|
-- When the temp file already had content, we don't know if
|
|
|
|
|
-- that content is good or not, so only trust if it the action
|
|
|
|
|
-- Verified it in passing. Otherwise, force verification even
|
|
|
|
|
-- if the VerifyConfig normally disables it.
|
|
|
|
|
let verification' = if resuming
|
|
|
|
|
then case verification of
|
|
|
|
|
Verified -> Verified
|
|
|
|
|
_ -> MustVerify
|
|
|
|
|
else verification
|
other 80% of avoding verification when hard linking to objects in shared repo
In c6632ee5c8e66c26ef18317f56ae02bae1e7e280, it actually only handled
uploading objects to a shared repository. To avoid verification when
downloading objects from a shared repository, was a lot harder.
On the plus side, if the process of downloading a file from a remote
is able to verify its content on the side, the remote can indicate this
now, and avoid the extra post-download verification.
As of yet, I don't have any remotes (except Git) using this ability.
Some more work would be needed to support it in special remotes.
It would make sense for tahoe to implicitly verify things downloaded from it;
as long as you trust your tahoe server (which typically runs locally),
there's cryptographic integrity. OTOH, despite bup being based on shas,
a bup repo under an attacker's control could have the git ref used for an
object changed, and so a bup repo shouldn't implicitly verify. Indeed,
tahoe seems unique in being trustworthy enough to implicitly verify.
2015-10-02 17:56:42 +00:00
|
|
|
|
if ok
|
2018-06-21 17:34:11 +00:00
|
|
|
|
then ifM (verifyKeyContent rsp v verification' key tmpfile)
|
2017-11-29 17:49:52 +00:00
|
|
|
|
( ifM (pruneTmpWorkDirBefore tmpfile (moveAnnex key))
|
annex.securehashesonly
Cryptographically secure hashes can be forced to be used in a repository,
by setting annex.securehashesonly. This does not prevent the git repository
from containing files with insecure hashes, but it does prevent the content
of such files from being pulled into .git/annex/objects from another
repository.
We want to make sure that at no point does git-annex accept content into
.git/annex/objects that is hashed with an insecure key. Here's how it
was done:
* .git/annex/objects/xx/yy/KEY/ is kept frozen, so nothing can be
written to it normally
* So every place that writes content must call, thawContent or modifyContent.
We can audit for these, and be sure we've considered all cases.
* The main functions are moveAnnex, and linkToAnnex; these were made to
check annex.securehashesonly, and are the main security boundary
for annex.securehashesonly.
* Most other calls to modifyContent deal with other files in the KEY
directory (inode cache etc). The other ones that mess with the content
are:
- Annex.Direct.toDirectGen, in which content already in the
annex directory is moved to the direct mode file, so not relevant.
- fix and lock, which don't add new content
- Command.ReKey.linkKey, which manually unlocks it to make a
copy.
* All other calls to thawContent appear safe.
Made moveAnnex return a Bool, so checked all callsites and made them
deal with a failure in appropriate ways.
linkToAnnex simply returns LinkAnnexFailed; all callsites already deal
with it failing in appropriate ways.
This commit was sponsored by Riku Voipio.
2017-02-27 17:01:32 +00:00
|
|
|
|
( do
|
|
|
|
|
logStatus key InfoPresent
|
|
|
|
|
return True
|
|
|
|
|
, return False
|
|
|
|
|
)
|
Do verification of checksums of annex objects downloaded from remotes.
* When annex objects are received into git repositories, their checksums are
verified then too.
* To get the old, faster, behavior of not verifying checksums, set
annex.verify=false, or remote.<name>.annex-verify=false.
* setkey, rekey: These commands also now verify that the provided file
matches the key, unless annex.verify=false.
* reinject: Already verified content; this can now be disabled by
setting annex.verify=false.
recvkey and reinject already did verification, so removed now duplicate
code from them. fsck still does its own verification, which is ok since it
does not use getViaTmp, so verification doesn't happen twice when using fsck
--from.
2015-10-01 19:54:37 +00:00
|
|
|
|
, do
|
|
|
|
|
warning "verification of content failed"
|
2018-06-22 00:54:02 +00:00
|
|
|
|
-- The bad content is not retained, because
|
|
|
|
|
-- a retry should not try to resume from it
|
|
|
|
|
-- since it's apparently corrupted.
|
|
|
|
|
-- Also, the bad content could be any data,
|
|
|
|
|
-- including perhaps the content of another
|
|
|
|
|
-- file than the one that was requested,
|
|
|
|
|
-- and so it's best not to keep it on disk.
|
2017-11-29 17:49:52 +00:00
|
|
|
|
pruneTmpWorkDirBefore tmpfile (liftIO . nukeFile)
|
Do verification of checksums of annex objects downloaded from remotes.
* When annex objects are received into git repositories, their checksums are
verified then too.
* To get the old, faster, behavior of not verifying checksums, set
annex.verify=false, or remote.<name>.annex-verify=false.
* setkey, rekey: These commands also now verify that the provided file
matches the key, unless annex.verify=false.
* reinject: Already verified content; this can now be disabled by
setting annex.verify=false.
recvkey and reinject already did verification, so removed now duplicate
code from them. fsck still does its own verification, which is ok since it
does not use getViaTmp, so verification doesn't happen twice when using fsck
--from.
2015-10-01 19:54:37 +00:00
|
|
|
|
return False
|
|
|
|
|
)
|
|
|
|
|
-- On transfer failure, the tmp file is left behind, in case
|
|
|
|
|
-- caller wants to resume its transfer
|
other 80% of avoding verification when hard linking to objects in shared repo
In c6632ee5c8e66c26ef18317f56ae02bae1e7e280, it actually only handled
uploading objects to a shared repository. To avoid verification when
downloading objects from a shared repository, was a lot harder.
On the plus side, if the process of downloading a file from a remote
is able to verify its content on the side, the remote can indicate this
now, and avoid the extra post-download verification.
As of yet, I don't have any remotes (except Git) using this ability.
Some more work would be needed to support it in special remotes.
It would make sense for tahoe to implicitly verify things downloaded from it;
as long as you trust your tahoe server (which typically runs locally),
there's cryptographic integrity. OTOH, despite bup being based on shas,
a bup repo under an attacker's control could have the git ref used for an
object changed, and so a bup repo shouldn't implicitly verify. Indeed,
tahoe seems unique in being trustworthy enough to implicitly verify.
2015-10-02 17:56:42 +00:00
|
|
|
|
else return False
|
2018-06-21 17:34:11 +00:00
|
|
|
|
where
|
|
|
|
|
-- Avoid running the action to get the content when the
|
|
|
|
|
-- RetrievalSecurityPolicy would cause verification to always fail.
|
|
|
|
|
checkallowed a = case rsp of
|
|
|
|
|
RetrievalAllKeysSecure -> a
|
|
|
|
|
RetrievalVerifiableKeysSecure
|
2019-11-22 20:24:04 +00:00
|
|
|
|
| isVerifiable (fromKey keyVariety key) -> a
|
2018-06-21 17:34:11 +00:00
|
|
|
|
| otherwise -> ifM (annexAllowUnverifiedDownloads <$> Annex.getGitConfig)
|
|
|
|
|
( a
|
|
|
|
|
, warnUnverifiableInsecure key >> return False
|
|
|
|
|
)
|
2014-01-04 19:08:06 +00:00
|
|
|
|
|
Do verification of checksums of annex objects downloaded from remotes.
* When annex objects are received into git repositories, their checksums are
verified then too.
* To get the old, faster, behavior of not verifying checksums, set
annex.verify=false, or remote.<name>.annex-verify=false.
* setkey, rekey: These commands also now verify that the provided file
matches the key, unless annex.verify=false.
* reinject: Already verified content; this can now be disabled by
setting annex.verify=false.
recvkey and reinject already did verification, so removed now duplicate
code from them. fsck still does its own verification, which is ok since it
does not use getViaTmp, so verification doesn't happen twice when using fsck
--from.
2015-10-01 19:54:37 +00:00
|
|
|
|
{- Verifies that a file is the expected content of a key.
|
2018-06-21 17:34:11 +00:00
|
|
|
|
-
|
2015-10-02 16:38:02 +00:00
|
|
|
|
- Configuration can prevent verification, for either a
|
2018-06-21 17:34:11 +00:00
|
|
|
|
- particular remote or always, unless the RetrievalSecurityPolicy
|
|
|
|
|
- requires verification.
|
Do verification of checksums of annex objects downloaded from remotes.
* When annex objects are received into git repositories, their checksums are
verified then too.
* To get the old, faster, behavior of not verifying checksums, set
annex.verify=false, or remote.<name>.annex-verify=false.
* setkey, rekey: These commands also now verify that the provided file
matches the key, unless annex.verify=false.
* reinject: Already verified content; this can now be disabled by
setting annex.verify=false.
recvkey and reinject already did verification, so removed now duplicate
code from them. fsck still does its own verification, which is ok since it
does not use getViaTmp, so verification doesn't happen twice when using fsck
--from.
2015-10-01 19:54:37 +00:00
|
|
|
|
-
|
|
|
|
|
- Most keys have a known size, and if so, the file size is checked.
|
|
|
|
|
-
|
2018-06-21 17:34:11 +00:00
|
|
|
|
- When the key's backend allows verifying the content (via checksum),
|
2015-10-02 16:38:02 +00:00
|
|
|
|
- it is checked.
|
2018-06-21 17:34:11 +00:00
|
|
|
|
-
|
|
|
|
|
- If the RetrievalSecurityPolicy requires verification and the key's
|
|
|
|
|
- backend doesn't support it, the verification will fail.
|
Do verification of checksums of annex objects downloaded from remotes.
* When annex objects are received into git repositories, their checksums are
verified then too.
* To get the old, faster, behavior of not verifying checksums, set
annex.verify=false, or remote.<name>.annex-verify=false.
* setkey, rekey: These commands also now verify that the provided file
matches the key, unless annex.verify=false.
* reinject: Already verified content; this can now be disabled by
setting annex.verify=false.
recvkey and reinject already did verification, so removed now duplicate
code from them. fsck still does its own verification, which is ok since it
does not use getViaTmp, so verification doesn't happen twice when using fsck
--from.
2015-10-01 19:54:37 +00:00
|
|
|
|
-}
|
2018-06-21 17:34:11 +00:00
|
|
|
|
verifyKeyContent :: RetrievalSecurityPolicy -> VerifyConfig -> Verification -> Key -> FilePath -> Annex Bool
|
|
|
|
|
verifyKeyContent rsp v verification k f = case (rsp, verification) of
|
|
|
|
|
(_, Verified) -> return True
|
|
|
|
|
(RetrievalVerifiableKeysSecure, _)
|
2019-11-22 20:24:04 +00:00
|
|
|
|
| isVerifiable (fromKey keyVariety k) -> verify
|
2018-06-21 17:34:11 +00:00
|
|
|
|
| otherwise -> ifM (annexAllowUnverifiedDownloads <$> Annex.getGitConfig)
|
|
|
|
|
( verify
|
|
|
|
|
, warnUnverifiableInsecure k >> return False
|
|
|
|
|
)
|
|
|
|
|
(_, UnVerified) -> ifM (shouldVerify v)
|
2018-03-13 18:18:30 +00:00
|
|
|
|
( verify
|
|
|
|
|
, return True
|
|
|
|
|
)
|
2018-06-21 17:34:11 +00:00
|
|
|
|
(_, MustVerify) -> verify
|
Do verification of checksums of annex objects downloaded from remotes.
* When annex objects are received into git repositories, their checksums are
verified then too.
* To get the old, faster, behavior of not verifying checksums, set
annex.verify=false, or remote.<name>.annex-verify=false.
* setkey, rekey: These commands also now verify that the provided file
matches the key, unless annex.verify=false.
* reinject: Already verified content; this can now be disabled by
setting annex.verify=false.
recvkey and reinject already did verification, so removed now duplicate
code from them. fsck still does its own verification, which is ok since it
does not use getViaTmp, so verification doesn't happen twice when using fsck
--from.
2015-10-01 19:54:37 +00:00
|
|
|
|
where
|
use fine-grained WorkerStages when transferring and verifying
This means that Command.Move and Command.Get don't need to
manually set the stage, and is a lot cleaner conceptually.
Also, this makes Command.Sync.syncFile use the worker pool better.
In the scenario where it first downloads content and then uploads it to
some other remotes, it will start in TransferStage, then enter VerifyStage
and then go back to TransferStage for each transfer to the remotes.
Before, it entered CleanupStage after the download, and stayed in it for
the upload, so too many transfer jobs could run at the same time.
Note that, in Remote.Git, it uses runTransfer and also verifyKeyContent
inside onLocal. That has a Annex state for the remote, with no worker pool.
So the resulting calls to enteringStage won't block in there.
While Remote.Git.copyToRemote does do checksum verification, I
realized that should not use a verification slot in the WorkerPool
to do it. Because, it's reading back from eg, a removable disk to checksum.
That will contend with other writes to that disk. It's best to treat
that checksum verification as just part of the transer. So, removed the todo
item about that, as there's nothing needing to be done.
2019-06-19 17:09:26 +00:00
|
|
|
|
verify = enteringStage VerifyStage $ verifysize <&&> verifycontent
|
2019-11-22 20:24:04 +00:00
|
|
|
|
verifysize = case fromKey keySize k of
|
Do verification of checksums of annex objects downloaded from remotes.
* When annex objects are received into git repositories, their checksums are
verified then too.
* To get the old, faster, behavior of not verifying checksums, set
annex.verify=false, or remote.<name>.annex-verify=false.
* setkey, rekey: These commands also now verify that the provided file
matches the key, unless annex.verify=false.
* reinject: Already verified content; this can now be disabled by
setting annex.verify=false.
recvkey and reinject already did verification, so removed now duplicate
code from them. fsck still does its own verification, which is ok since it
does not use getViaTmp, so verification doesn't happen twice when using fsck
--from.
2015-10-01 19:54:37 +00:00
|
|
|
|
Nothing -> return True
|
|
|
|
|
Just size -> do
|
|
|
|
|
size' <- liftIO $ catchDefaultIO 0 $ getFileSize f
|
|
|
|
|
return (size' == size)
|
2019-11-22 20:24:04 +00:00
|
|
|
|
verifycontent = case Types.Backend.verifyKeyContent =<< Backend.maybeLookupBackendVariety (fromKey keyVariety k) of
|
2015-10-02 16:38:02 +00:00
|
|
|
|
Nothing -> return True
|
|
|
|
|
Just verifier -> verifier k f
|
Do verification of checksums of annex objects downloaded from remotes.
* When annex objects are received into git repositories, their checksums are
verified then too.
* To get the old, faster, behavior of not verifying checksums, set
annex.verify=false, or remote.<name>.annex-verify=false.
* setkey, rekey: These commands also now verify that the provided file
matches the key, unless annex.verify=false.
* reinject: Already verified content; this can now be disabled by
setting annex.verify=false.
recvkey and reinject already did verification, so removed now duplicate
code from them. fsck still does its own verification, which is ok since it
does not use getViaTmp, so verification doesn't happen twice when using fsck
--from.
2015-10-01 19:54:37 +00:00
|
|
|
|
|
2018-06-21 17:34:11 +00:00
|
|
|
|
warnUnverifiableInsecure :: Key -> Annex ()
|
|
|
|
|
warnUnverifiableInsecure k = warning $ unwords
|
|
|
|
|
[ "Getting " ++ kv ++ " keys with this remote is not secure;"
|
|
|
|
|
, "the content cannot be verified to be correct."
|
|
|
|
|
, "(Use annex.security.allow-unverified-downloads to bypass"
|
|
|
|
|
, "this safety check.)"
|
|
|
|
|
]
|
|
|
|
|
where
|
2019-11-22 20:24:04 +00:00
|
|
|
|
kv = decodeBS (formatKeyVariety (fromKey keyVariety k))
|
2018-06-21 17:34:11 +00:00
|
|
|
|
|
other 80% of avoding verification when hard linking to objects in shared repo
In c6632ee5c8e66c26ef18317f56ae02bae1e7e280, it actually only handled
uploading objects to a shared repository. To avoid verification when
downloading objects from a shared repository, was a lot harder.
On the plus side, if the process of downloading a file from a remote
is able to verify its content on the side, the remote can indicate this
now, and avoid the extra post-download verification.
As of yet, I don't have any remotes (except Git) using this ability.
Some more work would be needed to support it in special remotes.
It would make sense for tahoe to implicitly verify things downloaded from it;
as long as you trust your tahoe server (which typically runs locally),
there's cryptographic integrity. OTOH, despite bup being based on shas,
a bup repo under an attacker's control could have the git ref used for an
object changed, and so a bup repo shouldn't implicitly verify. Indeed,
tahoe seems unique in being trustworthy enough to implicitly verify.
2015-10-02 17:56:42 +00:00
|
|
|
|
data VerifyConfig = AlwaysVerify | NoVerify | RemoteVerify Remote | DefaultVerify
|
Do verification of checksums of annex objects downloaded from remotes.
* When annex objects are received into git repositories, their checksums are
verified then too.
* To get the old, faster, behavior of not verifying checksums, set
annex.verify=false, or remote.<name>.annex-verify=false.
* setkey, rekey: These commands also now verify that the provided file
matches the key, unless annex.verify=false.
* reinject: Already verified content; this can now be disabled by
setting annex.verify=false.
recvkey and reinject already did verification, so removed now duplicate
code from them. fsck still does its own verification, which is ok since it
does not use getViaTmp, so verification doesn't happen twice when using fsck
--from.
2015-10-01 19:54:37 +00:00
|
|
|
|
|
other 80% of avoding verification when hard linking to objects in shared repo
In c6632ee5c8e66c26ef18317f56ae02bae1e7e280, it actually only handled
uploading objects to a shared repository. To avoid verification when
downloading objects from a shared repository, was a lot harder.
On the plus side, if the process of downloading a file from a remote
is able to verify its content on the side, the remote can indicate this
now, and avoid the extra post-download verification.
As of yet, I don't have any remotes (except Git) using this ability.
Some more work would be needed to support it in special remotes.
It would make sense for tahoe to implicitly verify things downloaded from it;
as long as you trust your tahoe server (which typically runs locally),
there's cryptographic integrity. OTOH, despite bup being based on shas,
a bup repo under an attacker's control could have the git ref used for an
object changed, and so a bup repo shouldn't implicitly verify. Indeed,
tahoe seems unique in being trustworthy enough to implicitly verify.
2015-10-02 17:56:42 +00:00
|
|
|
|
shouldVerify :: VerifyConfig -> Annex Bool
|
Do verification of checksums of annex objects downloaded from remotes.
* When annex objects are received into git repositories, their checksums are
verified then too.
* To get the old, faster, behavior of not verifying checksums, set
annex.verify=false, or remote.<name>.annex-verify=false.
* setkey, rekey: These commands also now verify that the provided file
matches the key, unless annex.verify=false.
* reinject: Already verified content; this can now be disabled by
setting annex.verify=false.
recvkey and reinject already did verification, so removed now duplicate
code from them. fsck still does its own verification, which is ok since it
does not use getViaTmp, so verification doesn't happen twice when using fsck
--from.
2015-10-01 19:54:37 +00:00
|
|
|
|
shouldVerify AlwaysVerify = return True
|
2015-10-02 16:36:03 +00:00
|
|
|
|
shouldVerify NoVerify = return False
|
Do verification of checksums of annex objects downloaded from remotes.
* When annex objects are received into git repositories, their checksums are
verified then too.
* To get the old, faster, behavior of not verifying checksums, set
annex.verify=false, or remote.<name>.annex-verify=false.
* setkey, rekey: These commands also now verify that the provided file
matches the key, unless annex.verify=false.
* reinject: Already verified content; this can now be disabled by
setting annex.verify=false.
recvkey and reinject already did verification, so removed now duplicate
code from them. fsck still does its own verification, which is ok since it
does not use getViaTmp, so verification doesn't happen twice when using fsck
--from.
2015-10-01 19:54:37 +00:00
|
|
|
|
shouldVerify DefaultVerify = annexVerify <$> Annex.getGitConfig
|
2017-09-04 20:39:56 +00:00
|
|
|
|
shouldVerify (RemoteVerify r) =
|
|
|
|
|
(shouldVerify DefaultVerify
|
|
|
|
|
<&&> pure (remoteAnnexVerify (Types.Remote.gitconfig r)))
|
|
|
|
|
-- Export remotes are not key/value stores, so always verify
|
|
|
|
|
-- content from them even when verification is disabled.
|
2017-09-07 17:45:31 +00:00
|
|
|
|
<||> Types.Remote.isExportSupported r
|
Do verification of checksums of annex objects downloaded from remotes.
* When annex objects are received into git repositories, their checksums are
verified then too.
* To get the old, faster, behavior of not verifying checksums, set
annex.verify=false, or remote.<name>.annex-verify=false.
* setkey, rekey: These commands also now verify that the provided file
matches the key, unless annex.verify=false.
* reinject: Already verified content; this can now be disabled by
setting annex.verify=false.
recvkey and reinject already did verification, so removed now duplicate
code from them. fsck still does its own verification, which is ok since it
does not use getViaTmp, so verification doesn't happen twice when using fsck
--from.
2015-10-01 19:54:37 +00:00
|
|
|
|
|
2015-10-01 18:13:53 +00:00
|
|
|
|
{- Checks if there is enough free disk space to download a key
|
|
|
|
|
- to its temp file.
|
2014-01-04 19:08:06 +00:00
|
|
|
|
-
|
|
|
|
|
- When the temp file already exists, count the space it is using as
|
|
|
|
|
- free, since the download will overwrite it or resume.
|
|
|
|
|
-
|
|
|
|
|
- Wen there's enough free space, runs the download action.
|
|
|
|
|
-}
|
2015-10-01 18:13:53 +00:00
|
|
|
|
checkDiskSpaceToGet :: Key -> a -> Annex a -> Annex a
|
|
|
|
|
checkDiskSpaceToGet key unabletoget getkey = do
|
2014-02-26 20:52:56 +00:00
|
|
|
|
tmp <- fromRepo $ gitAnnexTmpObjectLocation key
|
2011-03-22 21:27:04 +00:00
|
|
|
|
|
|
|
|
|
e <- liftIO $ doesFileExist tmp
|
2015-01-20 20:58:48 +00:00
|
|
|
|
alreadythere <- liftIO $ if e
|
|
|
|
|
then getFileSize tmp
|
2012-04-20 18:57:57 +00:00
|
|
|
|
else return 0
|
2015-05-12 19:19:08 +00:00
|
|
|
|
ifM (checkDiskSpace Nothing key alreadythere True)
|
2012-04-20 18:57:57 +00:00
|
|
|
|
( do
|
2014-01-04 19:08:06 +00:00
|
|
|
|
-- The tmp file may not have been left writable
|
2012-04-21 18:06:36 +00:00
|
|
|
|
when e $ thawContent tmp
|
2014-01-04 19:08:06 +00:00
|
|
|
|
getkey
|
import metadata from feeds
When annex.genmetadata is set, metadata from the feed is added to files
that are imported from it.
Reused the same feedtitle and itemtitle, feedauthor, itemauthor, etc names
that are used in --template.
Also added title and author, which are the item title/author if available,
falling back to the feed title/author. These are more likely to be common
metadata fields.
(There is a small bit of dupication here, but once git gets
around to packing the object, it will compress it away.)
The itempubdate field is not included in the metadata as a string; instead
it is used to generate year and month fields, same as is done when adding
files with annex.genmetadata set.
This commit was sponsored by Amitai Schlair, who cooincidentially
is responsible for ikiwiki generating nice feed metadata!
2014-07-03 17:46:09 +00:00
|
|
|
|
, return unabletoget
|
2012-04-20 18:57:57 +00:00
|
|
|
|
)
|
2011-03-22 21:27:04 +00:00
|
|
|
|
|
2013-01-10 15:45:44 +00:00
|
|
|
|
prepTmp :: Key -> Annex FilePath
|
|
|
|
|
prepTmp key = do
|
2014-02-26 20:52:56 +00:00
|
|
|
|
tmp <- fromRepo $ gitAnnexTmpObjectLocation key
|
2015-01-09 17:11:56 +00:00
|
|
|
|
createAnnexDirectory (parentDir tmp)
|
2013-01-10 15:45:44 +00:00
|
|
|
|
return tmp
|
|
|
|
|
|
2017-11-29 17:49:52 +00:00
|
|
|
|
{- Prepares a temp file for a key, runs an action on it, and cleans up
|
2014-07-29 21:17:41 +00:00
|
|
|
|
- the temp file. If the action throws an exception, the temp file is
|
|
|
|
|
- left behind, which allows for resuming.
|
|
|
|
|
-}
|
2011-04-28 00:06:07 +00:00
|
|
|
|
withTmp :: Key -> (FilePath -> Annex a) -> Annex a
|
|
|
|
|
withTmp key action = do
|
2011-07-15 07:12:05 +00:00
|
|
|
|
tmp <- prepTmp key
|
2011-04-28 00:06:07 +00:00
|
|
|
|
res <- action tmp
|
2017-11-29 17:49:52 +00:00
|
|
|
|
pruneTmpWorkDirBefore tmp (liftIO . nukeFile)
|
2011-04-28 00:06:07 +00:00
|
|
|
|
return res
|
|
|
|
|
|
2012-12-07 18:40:31 +00:00
|
|
|
|
{- Moves a key's content into .git/annex/objects/
|
2012-12-07 21:28:23 +00:00
|
|
|
|
-
|
2015-12-09 19:25:14 +00:00
|
|
|
|
- When a key has associated pointer files, the object is hard
|
|
|
|
|
- linked (or copied) to the files, and the object file is left thawed.
|
2011-04-29 00:41:40 +00:00
|
|
|
|
-
|
|
|
|
|
- What if the key there already has content? This could happen for
|
|
|
|
|
- various reasons; perhaps the same content is being annexed again.
|
|
|
|
|
- Perhaps there has been a hash collision generating the keys.
|
|
|
|
|
-
|
|
|
|
|
- The current strategy is to assume that in this case it's safe to delete
|
|
|
|
|
- one of the two copies of the content; and the one already in the annex
|
|
|
|
|
- is left there, assuming it's the original, canonical copy.
|
|
|
|
|
-
|
|
|
|
|
- I considered being more paranoid, and checking that both files had
|
|
|
|
|
- the same content. Decided against it because A) users explicitly choose
|
|
|
|
|
- a backend based on its hashing properties and so if they're dealing
|
|
|
|
|
- with colliding files it's their own fault and B) adding such a check
|
|
|
|
|
- would not catch all cases of colliding keys. For example, perhaps
|
|
|
|
|
- a remote has a key; if it's then added again with different content then
|
|
|
|
|
- the overall system now has two different peices of content for that
|
|
|
|
|
- key, and one of them will probably get deleted later. So, adding the
|
|
|
|
|
- check here would only raise expectations that git-annex cannot truely
|
|
|
|
|
- meet.
|
annex.securehashesonly
Cryptographically secure hashes can be forced to be used in a repository,
by setting annex.securehashesonly. This does not prevent the git repository
from containing files with insecure hashes, but it does prevent the content
of such files from being pulled into .git/annex/objects from another
repository.
We want to make sure that at no point does git-annex accept content into
.git/annex/objects that is hashed with an insecure key. Here's how it
was done:
* .git/annex/objects/xx/yy/KEY/ is kept frozen, so nothing can be
written to it normally
* So every place that writes content must call, thawContent or modifyContent.
We can audit for these, and be sure we've considered all cases.
* The main functions are moveAnnex, and linkToAnnex; these were made to
check annex.securehashesonly, and are the main security boundary
for annex.securehashesonly.
* Most other calls to modifyContent deal with other files in the KEY
directory (inode cache etc). The other ones that mess with the content
are:
- Annex.Direct.toDirectGen, in which content already in the
annex directory is moved to the direct mode file, so not relevant.
- fix and lock, which don't add new content
- Command.ReKey.linkKey, which manually unlocks it to make a
copy.
* All other calls to thawContent appear safe.
Made moveAnnex return a Bool, so checked all callsites and made them
deal with a failure in appropriate ways.
linkToAnnex simply returns LinkAnnexFailed; all callsites already deal
with it failing in appropriate ways.
This commit was sponsored by Riku Voipio.
2017-02-27 17:01:32 +00:00
|
|
|
|
-
|
|
|
|
|
- May return false, when a particular variety of key is not being
|
|
|
|
|
- accepted into the repository. Will display a warning message in this
|
|
|
|
|
- case. May also throw exceptions in some cases.
|
2011-04-29 00:41:40 +00:00
|
|
|
|
-}
|
annex.securehashesonly
Cryptographically secure hashes can be forced to be used in a repository,
by setting annex.securehashesonly. This does not prevent the git repository
from containing files with insecure hashes, but it does prevent the content
of such files from being pulled into .git/annex/objects from another
repository.
We want to make sure that at no point does git-annex accept content into
.git/annex/objects that is hashed with an insecure key. Here's how it
was done:
* .git/annex/objects/xx/yy/KEY/ is kept frozen, so nothing can be
written to it normally
* So every place that writes content must call, thawContent or modifyContent.
We can audit for these, and be sure we've considered all cases.
* The main functions are moveAnnex, and linkToAnnex; these were made to
check annex.securehashesonly, and are the main security boundary
for annex.securehashesonly.
* Most other calls to modifyContent deal with other files in the KEY
directory (inode cache etc). The other ones that mess with the content
are:
- Annex.Direct.toDirectGen, in which content already in the
annex directory is moved to the direct mode file, so not relevant.
- fix and lock, which don't add new content
- Command.ReKey.linkKey, which manually unlocks it to make a
copy.
* All other calls to thawContent appear safe.
Made moveAnnex return a Bool, so checked all callsites and made them
deal with a failure in appropriate ways.
linkToAnnex simply returns LinkAnnexFailed; all callsites already deal
with it failing in appropriate ways.
This commit was sponsored by Riku Voipio.
2017-02-27 17:01:32 +00:00
|
|
|
|
moveAnnex :: Key -> FilePath -> Annex Bool
|
|
|
|
|
moveAnnex key src = ifM (checkSecureHashes key)
|
|
|
|
|
( do
|
2019-08-27 16:59:57 +00:00
|
|
|
|
withObjectLoc key storeobject
|
annex.securehashesonly
Cryptographically secure hashes can be forced to be used in a repository,
by setting annex.securehashesonly. This does not prevent the git repository
from containing files with insecure hashes, but it does prevent the content
of such files from being pulled into .git/annex/objects from another
repository.
We want to make sure that at no point does git-annex accept content into
.git/annex/objects that is hashed with an insecure key. Here's how it
was done:
* .git/annex/objects/xx/yy/KEY/ is kept frozen, so nothing can be
written to it normally
* So every place that writes content must call, thawContent or modifyContent.
We can audit for these, and be sure we've considered all cases.
* The main functions are moveAnnex, and linkToAnnex; these were made to
check annex.securehashesonly, and are the main security boundary
for annex.securehashesonly.
* Most other calls to modifyContent deal with other files in the KEY
directory (inode cache etc). The other ones that mess with the content
are:
- Annex.Direct.toDirectGen, in which content already in the
annex directory is moved to the direct mode file, so not relevant.
- fix and lock, which don't add new content
- Command.ReKey.linkKey, which manually unlocks it to make a
copy.
* All other calls to thawContent appear safe.
Made moveAnnex return a Bool, so checked all callsites and made them
deal with a failure in appropriate ways.
linkToAnnex simply returns LinkAnnexFailed; all callsites already deal
with it failing in appropriate ways.
This commit was sponsored by Riku Voipio.
2017-02-27 17:01:32 +00:00
|
|
|
|
return True
|
|
|
|
|
, return False
|
|
|
|
|
)
|
2012-12-07 18:40:31 +00:00
|
|
|
|
where
|
2019-12-11 18:12:22 +00:00
|
|
|
|
storeobject dest = ifM (liftIO $ R.doesPathExist dest)
|
2013-05-17 20:25:18 +00:00
|
|
|
|
( alreadyhave
|
2019-12-11 18:12:22 +00:00
|
|
|
|
, modifyContent dest' $ do
|
2015-12-27 19:59:59 +00:00
|
|
|
|
freezeContent src
|
2019-12-11 18:12:22 +00:00
|
|
|
|
liftIO $ moveFile src dest'
|
2016-01-05 21:22:19 +00:00
|
|
|
|
g <- Annex.gitRepo
|
|
|
|
|
fs <- map (`fromTopFilePath` g)
|
|
|
|
|
<$> Database.Keys.getAssociatedFiles key
|
2015-12-27 19:59:59 +00:00
|
|
|
|
unless (null fs) $ do
|
2019-12-11 18:12:22 +00:00
|
|
|
|
ics <- mapM (populatePointerFile (Restage True) key dest) fs
|
2018-08-22 19:22:52 +00:00
|
|
|
|
Database.Keys.storeInodeCaches' key [dest] (catMaybes ics)
|
2013-02-18 06:39:40 +00:00
|
|
|
|
)
|
2019-12-11 18:12:22 +00:00
|
|
|
|
where
|
|
|
|
|
dest' = fromRawFilePath dest
|
2013-05-17 20:25:18 +00:00
|
|
|
|
alreadyhave = liftIO $ removeFile src
|
2011-07-15 07:12:05 +00:00
|
|
|
|
|
annex.securehashesonly
Cryptographically secure hashes can be forced to be used in a repository,
by setting annex.securehashesonly. This does not prevent the git repository
from containing files with insecure hashes, but it does prevent the content
of such files from being pulled into .git/annex/objects from another
repository.
We want to make sure that at no point does git-annex accept content into
.git/annex/objects that is hashed with an insecure key. Here's how it
was done:
* .git/annex/objects/xx/yy/KEY/ is kept frozen, so nothing can be
written to it normally
* So every place that writes content must call, thawContent or modifyContent.
We can audit for these, and be sure we've considered all cases.
* The main functions are moveAnnex, and linkToAnnex; these were made to
check annex.securehashesonly, and are the main security boundary
for annex.securehashesonly.
* Most other calls to modifyContent deal with other files in the KEY
directory (inode cache etc). The other ones that mess with the content
are:
- Annex.Direct.toDirectGen, in which content already in the
annex directory is moved to the direct mode file, so not relevant.
- fix and lock, which don't add new content
- Command.ReKey.linkKey, which manually unlocks it to make a
copy.
* All other calls to thawContent appear safe.
Made moveAnnex return a Bool, so checked all callsites and made them
deal with a failure in appropriate ways.
linkToAnnex simply returns LinkAnnexFailed; all callsites already deal
with it failing in appropriate ways.
This commit was sponsored by Riku Voipio.
2017-02-27 17:01:32 +00:00
|
|
|
|
checkSecureHashes :: Key -> Annex Bool
|
|
|
|
|
checkSecureHashes key
|
2019-11-22 20:24:04 +00:00
|
|
|
|
| cryptographicallySecure (fromKey keyVariety key) = return True
|
annex.securehashesonly
Cryptographically secure hashes can be forced to be used in a repository,
by setting annex.securehashesonly. This does not prevent the git repository
from containing files with insecure hashes, but it does prevent the content
of such files from being pulled into .git/annex/objects from another
repository.
We want to make sure that at no point does git-annex accept content into
.git/annex/objects that is hashed with an insecure key. Here's how it
was done:
* .git/annex/objects/xx/yy/KEY/ is kept frozen, so nothing can be
written to it normally
* So every place that writes content must call, thawContent or modifyContent.
We can audit for these, and be sure we've considered all cases.
* The main functions are moveAnnex, and linkToAnnex; these were made to
check annex.securehashesonly, and are the main security boundary
for annex.securehashesonly.
* Most other calls to modifyContent deal with other files in the KEY
directory (inode cache etc). The other ones that mess with the content
are:
- Annex.Direct.toDirectGen, in which content already in the
annex directory is moved to the direct mode file, so not relevant.
- fix and lock, which don't add new content
- Command.ReKey.linkKey, which manually unlocks it to make a
copy.
* All other calls to thawContent appear safe.
Made moveAnnex return a Bool, so checked all callsites and made them
deal with a failure in appropriate ways.
linkToAnnex simply returns LinkAnnexFailed; all callsites already deal
with it failing in appropriate ways.
This commit was sponsored by Riku Voipio.
2017-02-27 17:01:32 +00:00
|
|
|
|
| otherwise = ifM (annexSecureHashesOnly <$> Annex.getGitConfig)
|
|
|
|
|
( do
|
2019-11-22 20:24:04 +00:00
|
|
|
|
warning $ "annex.securehashesonly blocked adding " ++ decodeBS (formatKeyVariety (fromKey keyVariety key)) ++ " key to annex objects"
|
annex.securehashesonly
Cryptographically secure hashes can be forced to be used in a repository,
by setting annex.securehashesonly. This does not prevent the git repository
from containing files with insecure hashes, but it does prevent the content
of such files from being pulled into .git/annex/objects from another
repository.
We want to make sure that at no point does git-annex accept content into
.git/annex/objects that is hashed with an insecure key. Here's how it
was done:
* .git/annex/objects/xx/yy/KEY/ is kept frozen, so nothing can be
written to it normally
* So every place that writes content must call, thawContent or modifyContent.
We can audit for these, and be sure we've considered all cases.
* The main functions are moveAnnex, and linkToAnnex; these were made to
check annex.securehashesonly, and are the main security boundary
for annex.securehashesonly.
* Most other calls to modifyContent deal with other files in the KEY
directory (inode cache etc). The other ones that mess with the content
are:
- Annex.Direct.toDirectGen, in which content already in the
annex directory is moved to the direct mode file, so not relevant.
- fix and lock, which don't add new content
- Command.ReKey.linkKey, which manually unlocks it to make a
copy.
* All other calls to thawContent appear safe.
Made moveAnnex return a Bool, so checked all callsites and made them
deal with a failure in appropriate ways.
linkToAnnex simply returns LinkAnnexFailed; all callsites already deal
with it failing in appropriate ways.
This commit was sponsored by Riku Voipio.
2017-02-27 17:01:32 +00:00
|
|
|
|
return False
|
|
|
|
|
, return True
|
|
|
|
|
)
|
|
|
|
|
|
2015-12-27 19:59:59 +00:00
|
|
|
|
data LinkAnnexResult = LinkAnnexOk | LinkAnnexFailed | LinkAnnexNoop
|
|
|
|
|
|
|
|
|
|
{- Populates the annex object file by hard linking or copying a source
|
|
|
|
|
- file to it. -}
|
|
|
|
|
linkToAnnex :: Key -> FilePath -> Maybe InodeCache -> Annex LinkAnnexResult
|
annex.securehashesonly
Cryptographically secure hashes can be forced to be used in a repository,
by setting annex.securehashesonly. This does not prevent the git repository
from containing files with insecure hashes, but it does prevent the content
of such files from being pulled into .git/annex/objects from another
repository.
We want to make sure that at no point does git-annex accept content into
.git/annex/objects that is hashed with an insecure key. Here's how it
was done:
* .git/annex/objects/xx/yy/KEY/ is kept frozen, so nothing can be
written to it normally
* So every place that writes content must call, thawContent or modifyContent.
We can audit for these, and be sure we've considered all cases.
* The main functions are moveAnnex, and linkToAnnex; these were made to
check annex.securehashesonly, and are the main security boundary
for annex.securehashesonly.
* Most other calls to modifyContent deal with other files in the KEY
directory (inode cache etc). The other ones that mess with the content
are:
- Annex.Direct.toDirectGen, in which content already in the
annex directory is moved to the direct mode file, so not relevant.
- fix and lock, which don't add new content
- Command.ReKey.linkKey, which manually unlocks it to make a
copy.
* All other calls to thawContent appear safe.
Made moveAnnex return a Bool, so checked all callsites and made them
deal with a failure in appropriate ways.
linkToAnnex simply returns LinkAnnexFailed; all callsites already deal
with it failing in appropriate ways.
This commit was sponsored by Riku Voipio.
2017-02-27 17:01:32 +00:00
|
|
|
|
linkToAnnex key src srcic = ifM (checkSecureHashes key)
|
|
|
|
|
( do
|
2019-12-11 18:12:22 +00:00
|
|
|
|
dest <- fromRawFilePath <$> calcRepo (gitAnnexLocation key)
|
annex.securehashesonly
Cryptographically secure hashes can be forced to be used in a repository,
by setting annex.securehashesonly. This does not prevent the git repository
from containing files with insecure hashes, but it does prevent the content
of such files from being pulled into .git/annex/objects from another
repository.
We want to make sure that at no point does git-annex accept content into
.git/annex/objects that is hashed with an insecure key. Here's how it
was done:
* .git/annex/objects/xx/yy/KEY/ is kept frozen, so nothing can be
written to it normally
* So every place that writes content must call, thawContent or modifyContent.
We can audit for these, and be sure we've considered all cases.
* The main functions are moveAnnex, and linkToAnnex; these were made to
check annex.securehashesonly, and are the main security boundary
for annex.securehashesonly.
* Most other calls to modifyContent deal with other files in the KEY
directory (inode cache etc). The other ones that mess with the content
are:
- Annex.Direct.toDirectGen, in which content already in the
annex directory is moved to the direct mode file, so not relevant.
- fix and lock, which don't add new content
- Command.ReKey.linkKey, which manually unlocks it to make a
copy.
* All other calls to thawContent appear safe.
Made moveAnnex return a Bool, so checked all callsites and made them
deal with a failure in appropriate ways.
linkToAnnex simply returns LinkAnnexFailed; all callsites already deal
with it failing in appropriate ways.
This commit was sponsored by Riku Voipio.
2017-02-27 17:01:32 +00:00
|
|
|
|
modifyContent dest $ linkAnnex To key src srcic dest Nothing
|
|
|
|
|
, return LinkAnnexFailed
|
|
|
|
|
)
|
2015-12-27 19:59:59 +00:00
|
|
|
|
|
|
|
|
|
{- Makes a destination file be a link or copy from the annex object. -}
|
2016-04-14 18:30:15 +00:00
|
|
|
|
linkFromAnnex :: Key -> FilePath -> Maybe FileMode -> Annex LinkAnnexResult
|
|
|
|
|
linkFromAnnex key dest destmode = do
|
2015-12-27 19:59:59 +00:00
|
|
|
|
src <- calcRepo (gitAnnexLocation key)
|
|
|
|
|
srcic <- withTSDelta (liftIO . genInodeCache src)
|
2019-12-11 18:12:22 +00:00
|
|
|
|
linkAnnex From key (fromRawFilePath src) srcic dest destmode
|
2015-12-27 19:59:59 +00:00
|
|
|
|
|
|
|
|
|
data FromTo = From | To
|
|
|
|
|
|
|
|
|
|
{- Hard links or copies from or to the annex object location.
|
|
|
|
|
- Updates inode cache.
|
2015-12-04 17:39:14 +00:00
|
|
|
|
-
|
2018-09-05 19:27:22 +00:00
|
|
|
|
- Freezes or thaws the destination appropriately.
|
|
|
|
|
-
|
|
|
|
|
- When a hard link is made, the annex object necessarily has to be thawed
|
|
|
|
|
- too. So, adding an object to the annex with a hard link can prevent
|
|
|
|
|
- losing the content if the source file is deleted, but does not
|
|
|
|
|
- guard against modifications.
|
2016-04-14 18:30:15 +00:00
|
|
|
|
-
|
|
|
|
|
- Nothing is done if the destination file already exists.
|
2015-12-04 17:39:14 +00:00
|
|
|
|
-}
|
2016-04-14 18:30:15 +00:00
|
|
|
|
linkAnnex :: FromTo -> Key -> FilePath -> Maybe InodeCache -> FilePath -> Maybe FileMode -> Annex LinkAnnexResult
|
|
|
|
|
linkAnnex _ _ _ Nothing _ _ = return LinkAnnexFailed
|
2017-11-15 20:55:38 +00:00
|
|
|
|
linkAnnex fromto key src (Just srcic) dest destmode =
|
2019-12-11 18:12:22 +00:00
|
|
|
|
withTSDelta (liftIO . genInodeCache dest') >>= \case
|
2015-12-29 20:26:27 +00:00
|
|
|
|
Just destic -> do
|
|
|
|
|
cs <- Database.Keys.getInodeCaches key
|
|
|
|
|
if null cs
|
|
|
|
|
then Database.Keys.addInodeCaches key [srcic, destic]
|
|
|
|
|
else Database.Keys.addInodeCaches key [srcic]
|
2015-12-15 17:02:33 +00:00
|
|
|
|
return LinkAnnexNoop
|
2018-09-05 21:26:12 +00:00
|
|
|
|
Nothing -> linkOrCopy key src dest destmode >>= \case
|
|
|
|
|
Nothing -> failed
|
|
|
|
|
Just r -> do
|
2018-09-05 19:27:22 +00:00
|
|
|
|
case fromto of
|
|
|
|
|
From -> thawContent dest
|
2018-09-05 21:26:12 +00:00
|
|
|
|
To -> case r of
|
|
|
|
|
Copied -> freezeContent dest
|
|
|
|
|
Linked -> noop
|
2015-12-27 19:59:59 +00:00
|
|
|
|
checksrcunchanged
|
2015-12-22 19:20:03 +00:00
|
|
|
|
where
|
2019-12-11 18:12:22 +00:00
|
|
|
|
dest' = toRawFilePath dest
|
2015-12-22 19:20:03 +00:00
|
|
|
|
failed = do
|
|
|
|
|
Database.Keys.addInodeCaches key [srcic]
|
|
|
|
|
return LinkAnnexFailed
|
2019-12-11 18:12:22 +00:00
|
|
|
|
checksrcunchanged = withTSDelta (liftIO . genInodeCache (toRawFilePath src)) >>= \case
|
2017-11-15 20:55:38 +00:00
|
|
|
|
Just srcic' | compareStrong srcic srcic' -> do
|
2019-12-11 18:12:22 +00:00
|
|
|
|
destic <- withTSDelta (liftIO . genInodeCache dest')
|
2017-11-15 20:55:38 +00:00
|
|
|
|
Database.Keys.addInodeCaches key $
|
|
|
|
|
catMaybes [destic, Just srcic]
|
|
|
|
|
return LinkAnnexOk
|
|
|
|
|
_ -> do
|
|
|
|
|
liftIO $ nukeFile dest
|
|
|
|
|
failed
|
2015-12-27 19:59:59 +00:00
|
|
|
|
|
2015-12-15 17:02:33 +00:00
|
|
|
|
{- Removes the annex object file for a key. Lowlevel. -}
|
|
|
|
|
unlinkAnnex :: Key -> Annex ()
|
|
|
|
|
unlinkAnnex key = do
|
2019-12-11 18:12:22 +00:00
|
|
|
|
obj <- fromRawFilePath <$> calcRepo (gitAnnexLocation key)
|
2015-12-15 17:02:33 +00:00
|
|
|
|
modifyContent obj $ do
|
|
|
|
|
secureErase obj
|
|
|
|
|
liftIO $ nukeFile obj
|
|
|
|
|
|
2012-12-08 21:03:39 +00:00
|
|
|
|
{- Runs an action to transfer an object's content.
|
|
|
|
|
-
|
2015-12-10 18:29:34 +00:00
|
|
|
|
- In some cases, it's possible for the file to change as it's being sent.
|
2020-05-13 18:03:00 +00:00
|
|
|
|
- If this happens, runs the rollback action and throws an exception.
|
|
|
|
|
- The rollback action should remove the data that was transferred.
|
2012-12-08 21:03:39 +00:00
|
|
|
|
-}
|
2020-05-13 18:03:00 +00:00
|
|
|
|
sendAnnex :: Key -> Annex () -> (FilePath -> Annex a) -> Annex a
|
2013-01-10 15:45:44 +00:00
|
|
|
|
sendAnnex key rollback sendobject = go =<< prepSendAnnex key
|
|
|
|
|
where
|
|
|
|
|
go (Just (f, checksuccess)) = do
|
|
|
|
|
r <- sendobject f
|
2020-05-13 18:03:00 +00:00
|
|
|
|
unlessM checksuccess $ do
|
|
|
|
|
rollback
|
|
|
|
|
giveup "content changed while it was being sent"
|
|
|
|
|
return r
|
|
|
|
|
go Nothing = giveup "content not available to send"
|
2013-01-10 15:45:44 +00:00
|
|
|
|
|
|
|
|
|
{- Returns a file that contains an object's content,
|
2014-09-05 17:44:09 +00:00
|
|
|
|
- and a check to run after the transfer is complete.
|
2013-01-10 15:45:44 +00:00
|
|
|
|
-
|
2019-08-27 16:59:57 +00:00
|
|
|
|
- When a file is unlocked, it's possble for its content to
|
|
|
|
|
- change as it's being sent. The check detects this case
|
2015-12-10 18:29:34 +00:00
|
|
|
|
- and returns False.
|
2013-03-18 15:18:04 +00:00
|
|
|
|
-
|
|
|
|
|
- Note that the returned check action is, in some cases, run in the
|
|
|
|
|
- Annex monad of the remote that is receiving the object, rather than
|
2013-03-18 17:17:43 +00:00
|
|
|
|
- the sender. So it cannot rely on Annex state.
|
2013-01-10 15:45:44 +00:00
|
|
|
|
-}
|
|
|
|
|
prepSendAnnex :: Key -> Annex (Maybe (FilePath, Annex Bool))
|
2019-08-27 16:59:57 +00:00
|
|
|
|
prepSendAnnex key = withObjectLoc key $ \f -> do
|
|
|
|
|
cache <- Database.Keys.getInodeCaches key
|
|
|
|
|
cache' <- if null cache
|
|
|
|
|
-- Since no inode cache is in the database, this
|
|
|
|
|
-- object is not currently unlocked. But that could
|
|
|
|
|
-- change while the transfer is in progress, so
|
|
|
|
|
-- generate an inode cache for the starting
|
|
|
|
|
-- content.
|
|
|
|
|
then maybeToList <$>
|
|
|
|
|
withTSDelta (liftIO . genInodeCache f)
|
|
|
|
|
else pure cache
|
|
|
|
|
return $ if null cache'
|
|
|
|
|
then Nothing
|
2019-12-11 18:12:22 +00:00
|
|
|
|
else Just (fromRawFilePath f, sameInodeCache f cache')
|
2019-08-27 16:59:57 +00:00
|
|
|
|
|
|
|
|
|
{- Performs an action, passing it the location to use for a key's content. -}
|
2019-12-11 18:12:22 +00:00
|
|
|
|
withObjectLoc :: Key -> (RawFilePath -> Annex a) -> Annex a
|
2019-08-27 16:59:57 +00:00
|
|
|
|
withObjectLoc key a = a =<< calcRepo (gitAnnexLocation key)
|
2012-12-08 21:03:39 +00:00
|
|
|
|
|
2013-11-15 18:52:03 +00:00
|
|
|
|
cleanObjectLoc :: Key -> Annex () -> Annex ()
|
|
|
|
|
cleanObjectLoc key cleaner = do
|
2019-12-11 18:12:22 +00:00
|
|
|
|
file <- fromRawFilePath <$> calcRepo (gitAnnexLocation key)
|
unify exception handling into Utility.Exception
Removed old extensible-exceptions, only needed for very old ghc.
Made webdav use Utility.Exception, to work after some changes in DAV's
exception handling.
Removed Annex.Exception. Mostly this was trivial, but note that
tryAnnex is replaced with tryNonAsync and catchAnnex replaced with
catchNonAsync. In theory that could be a behavior change, since the former
caught all exceptions, and the latter don't catch async exceptions.
However, in practice, nothing in the Annex monad uses async exceptions.
Grepping for throwTo and killThread only find stuff in the assistant,
which does not seem related.
Command.Add.undo is changed to accept a SomeException, and things
that use it for rollback now catch non-async exceptions, rather than
only IOExceptions.
2014-08-08 01:55:44 +00:00
|
|
|
|
void $ tryIO $ thawContentDir file
|
2013-11-15 18:52:03 +00:00
|
|
|
|
cleaner
|
2013-02-15 20:37:57 +00:00
|
|
|
|
liftIO $ removeparents file (3 :: Int)
|
2012-12-13 04:24:19 +00:00
|
|
|
|
where
|
|
|
|
|
removeparents _ 0 = noop
|
|
|
|
|
removeparents file n = do
|
2015-01-09 17:11:56 +00:00
|
|
|
|
let dir = parentDir file
|
2012-12-13 04:24:19 +00:00
|
|
|
|
maybe noop (const $ removeparents dir (n-1))
|
|
|
|
|
<=< catchMaybeIO $ removeDirectory dir
|
2012-02-24 20:30:47 +00:00
|
|
|
|
|
2012-12-07 21:28:23 +00:00
|
|
|
|
{- Removes a key's file from .git/annex/objects/
|
2014-08-21 00:08:45 +00:00
|
|
|
|
-}
|
2015-10-09 19:48:02 +00:00
|
|
|
|
removeAnnex :: ContentRemovalLock -> Annex ()
|
2019-08-27 16:59:57 +00:00
|
|
|
|
removeAnnex (ContentRemovalLock key) = withObjectLoc key $ \file ->
|
|
|
|
|
cleanObjectLoc key $ do
|
2019-12-11 18:12:22 +00:00
|
|
|
|
let file' = fromRawFilePath file
|
|
|
|
|
secureErase file'
|
|
|
|
|
liftIO $ nukeFile file'
|
2016-01-05 21:22:19 +00:00
|
|
|
|
g <- Annex.gitRepo
|
2019-12-11 18:12:22 +00:00
|
|
|
|
mapM_ (\f -> void $ tryIO $ resetpointer $ fromTopFilePath f g)
|
2015-12-09 21:00:37 +00:00
|
|
|
|
=<< Database.Keys.getAssociatedFiles key
|
2015-12-09 21:47:05 +00:00
|
|
|
|
Database.Keys.removeInodeCaches key
|
2019-08-27 16:59:57 +00:00
|
|
|
|
where
|
2018-08-14 18:22:23 +00:00
|
|
|
|
-- Check associated pointer file for modifications, and reset if
|
|
|
|
|
-- it's unmodified.
|
2015-12-11 14:42:18 +00:00
|
|
|
|
resetpointer file = ifM (isUnmodified key file)
|
2019-12-11 18:12:22 +00:00
|
|
|
|
( depopulatePointerFile key file
|
2018-08-14 18:22:23 +00:00
|
|
|
|
-- Modified file, so leave it alone.
|
2015-12-11 14:42:18 +00:00
|
|
|
|
-- If it was a hard link to the annex object,
|
|
|
|
|
-- that object might have been frozen as part of the
|
|
|
|
|
-- removal process, so thaw it.
|
2019-12-11 18:12:22 +00:00
|
|
|
|
, void $ tryIO $ thawContent $ fromRawFilePath file
|
2015-12-11 14:42:18 +00:00
|
|
|
|
)
|
2018-08-22 18:41:09 +00:00
|
|
|
|
|
2015-12-11 14:42:18 +00:00
|
|
|
|
{- Check if a file contains the unmodified content of the key.
|
|
|
|
|
-
|
|
|
|
|
- The expensive way to tell is to do a verification of its content.
|
2015-12-09 21:00:37 +00:00
|
|
|
|
- The cheaper way is to see if the InodeCache for the key matches the
|
2015-12-09 21:47:05 +00:00
|
|
|
|
- file. -}
|
2019-12-11 18:12:22 +00:00
|
|
|
|
isUnmodified :: Key -> RawFilePath -> Annex Bool
|
2015-12-11 14:42:18 +00:00
|
|
|
|
isUnmodified key f = go =<< geti
|
2015-12-09 21:00:37 +00:00
|
|
|
|
where
|
2015-12-11 14:42:18 +00:00
|
|
|
|
go Nothing = return False
|
2018-10-25 20:38:04 +00:00
|
|
|
|
go (Just fc) = isUnmodifiedCheap' key fc <||> expensivecheck fc
|
2019-12-11 18:12:22 +00:00
|
|
|
|
expensivecheck fc = ifM (verifyKeyContent RetrievalAllKeysSecure AlwaysVerify UnVerified key (fromRawFilePath f))
|
2018-10-16 17:30:04 +00:00
|
|
|
|
( do
|
|
|
|
|
-- The file could have been modified while it was
|
|
|
|
|
-- being verified. Detect that.
|
|
|
|
|
ifM (geti >>= maybe (return False) (compareInodeCaches fc))
|
|
|
|
|
( do
|
|
|
|
|
-- Update the InodeCache to avoid
|
|
|
|
|
-- performing this expensive check again.
|
|
|
|
|
Database.Keys.addInodeCaches key [fc]
|
|
|
|
|
return True
|
|
|
|
|
, return False
|
|
|
|
|
)
|
2018-10-26 16:56:40 +00:00
|
|
|
|
, return False
|
2015-12-09 21:00:37 +00:00
|
|
|
|
)
|
|
|
|
|
geti = withTSDelta (liftIO . genInodeCache f)
|
|
|
|
|
|
2018-10-25 20:38:04 +00:00
|
|
|
|
{- Cheap check if a file contains the unmodified content of the key,
|
|
|
|
|
- only checking the InodeCache of the key.
|
2018-10-30 04:40:17 +00:00
|
|
|
|
-
|
|
|
|
|
- Note that, on systems not supporting high-resolution mtimes,
|
|
|
|
|
- this may report a false positive when repeated edits are made to a file
|
|
|
|
|
- within a small time window (eg 1 second).
|
2018-10-25 20:38:04 +00:00
|
|
|
|
-}
|
2019-12-11 18:12:22 +00:00
|
|
|
|
isUnmodifiedCheap :: Key -> RawFilePath -> Annex Bool
|
2018-10-25 20:38:04 +00:00
|
|
|
|
isUnmodifiedCheap key f = maybe (return False) (isUnmodifiedCheap' key)
|
|
|
|
|
=<< withTSDelta (liftIO . genInodeCache f)
|
|
|
|
|
|
|
|
|
|
isUnmodifiedCheap' :: Key -> InodeCache -> Annex Bool
|
|
|
|
|
isUnmodifiedCheap' key fc =
|
|
|
|
|
anyM (compareInodeCaches fc) =<< Database.Keys.getInodeCaches key
|
|
|
|
|
|
2010-11-13 18:59:27 +00:00
|
|
|
|
{- Moves a key out of .git/annex/objects/ into .git/annex/bad, and
|
2010-11-13 19:42:56 +00:00
|
|
|
|
- returns the file it was moved to. -}
|
2010-11-13 18:59:27 +00:00
|
|
|
|
moveBad :: Key -> Annex FilePath
|
|
|
|
|
moveBad key = do
|
2019-12-11 18:12:22 +00:00
|
|
|
|
src <- fromRawFilePath <$> calcRepo (gitAnnexLocation key)
|
2011-11-11 05:52:58 +00:00
|
|
|
|
bad <- fromRepo gitAnnexBadDir
|
2011-11-08 19:34:10 +00:00
|
|
|
|
let dest = bad </> takeFileName src
|
2015-01-09 17:11:56 +00:00
|
|
|
|
createAnnexDirectory (parentDir dest)
|
2013-11-15 18:52:03 +00:00
|
|
|
|
cleanObjectLoc key $
|
|
|
|
|
liftIO $ moveFile src dest
|
2011-07-01 19:24:07 +00:00
|
|
|
|
logStatus key InfoMissing
|
2010-11-13 18:59:27 +00:00
|
|
|
|
return dest
|
|
|
|
|
|
2019-08-27 16:59:57 +00:00
|
|
|
|
data KeyLocation = InAnnex | InAnywhere
|
2014-03-07 16:43:56 +00:00
|
|
|
|
|
2019-08-27 16:59:57 +00:00
|
|
|
|
{- InAnnex only lists keys with content in .git/annex/objects.
|
|
|
|
|
- InAnywhere lists all keys that have directories in
|
|
|
|
|
- .git/annex/objects, whether or not the content is present.
|
2014-03-07 16:43:56 +00:00
|
|
|
|
-}
|
2019-08-27 16:59:57 +00:00
|
|
|
|
listKeys :: KeyLocation -> Annex [Key]
|
|
|
|
|
listKeys keyloc = do
|
2013-02-15 21:58:49 +00:00
|
|
|
|
dir <- fromRepo gitAnnexObjectDir
|
2019-08-27 16:59:57 +00:00
|
|
|
|
{- In order to run Annex monad actions within unsafeInterleaveIO,
|
|
|
|
|
- the current state is taken and reused. No changes made to this
|
|
|
|
|
- state will be preserved.
|
|
|
|
|
-}
|
|
|
|
|
s <- Annex.getState id
|
2015-06-11 19:14:42 +00:00
|
|
|
|
depth <- gitAnnexLocationDepth <$> Annex.getGitConfig
|
2019-08-27 16:59:57 +00:00
|
|
|
|
liftIO $ walk s depth dir
|
2012-12-13 04:24:19 +00:00
|
|
|
|
where
|
2019-08-27 16:59:57 +00:00
|
|
|
|
walk s depth dir = do
|
2012-12-13 04:24:19 +00:00
|
|
|
|
contents <- catchDefaultIO [] (dirContents dir)
|
2015-06-11 19:14:42 +00:00
|
|
|
|
if depth < 2
|
2013-02-15 21:58:49 +00:00
|
|
|
|
then do
|
2019-08-27 16:59:57 +00:00
|
|
|
|
contents' <- filterM (present s) contents
|
2019-12-18 20:45:03 +00:00
|
|
|
|
let keys = mapMaybe (fileKey . P.takeFileName . toRawFilePath) contents'
|
2013-02-15 21:58:49 +00:00
|
|
|
|
continue keys []
|
2012-12-13 04:24:19 +00:00
|
|
|
|
else do
|
2019-08-27 16:59:57 +00:00
|
|
|
|
let deeper = walk s (depth - 1)
|
2012-12-13 04:24:19 +00:00
|
|
|
|
continue [] (map deeper contents)
|
|
|
|
|
continue keys [] = return keys
|
|
|
|
|
continue keys (a:as) = do
|
|
|
|
|
{- Force lazy traversal with unsafeInterleaveIO. -}
|
|
|
|
|
morekeys <- unsafeInterleaveIO a
|
|
|
|
|
continue (morekeys++keys) as
|
2011-06-23 01:19:52 +00:00
|
|
|
|
|
2015-12-15 19:56:37 +00:00
|
|
|
|
inanywhere = case keyloc of
|
|
|
|
|
InAnywhere -> True
|
|
|
|
|
_ -> False
|
|
|
|
|
|
2019-08-27 16:59:57 +00:00
|
|
|
|
present _ _ | inanywhere = pure True
|
|
|
|
|
present _ d = presentInAnnex d
|
2014-03-07 16:43:56 +00:00
|
|
|
|
|
|
|
|
|
presentInAnnex = doesFileExist . contentfile
|
2013-02-15 21:58:49 +00:00
|
|
|
|
contentfile d = d </> takeFileName d
|
|
|
|
|
|
2012-01-28 19:41:52 +00:00
|
|
|
|
{- Things to do to record changes to content when shutting down.
|
|
|
|
|
-
|
|
|
|
|
- It's acceptable to avoid committing changes to the branch,
|
|
|
|
|
- especially if performing a short-lived action.
|
|
|
|
|
-}
|
|
|
|
|
saveState :: Bool -> Annex ()
|
2012-09-16 00:46:38 +00:00
|
|
|
|
saveState nocommit = doSideAction $ do
|
2012-04-27 17:23:52 +00:00
|
|
|
|
Annex.Queue.flush
|
2018-10-31 02:19:32 +00:00
|
|
|
|
Database.Keys.closeDb
|
2012-09-16 00:46:38 +00:00
|
|
|
|
unless nocommit $
|
2013-01-01 17:52:47 +00:00
|
|
|
|
whenM (annexAlwaysCommit <$> Annex.getGitConfig) $
|
2018-08-02 18:06:06 +00:00
|
|
|
|
Annex.Branch.commit =<< Annex.Branch.commitMessage
|
2012-01-02 18:20:20 +00:00
|
|
|
|
|
2018-12-30 16:29:49 +00:00
|
|
|
|
{- Downloads content from any of a list of urls, displaying a progress
|
|
|
|
|
- meter. -}
|
2020-01-22 20:13:48 +00:00
|
|
|
|
downloadUrl :: Key -> MeterUpdate -> [Url.URLString] -> FilePath -> Url.UrlOptions -> Annex Bool
|
|
|
|
|
downloadUrl k p urls file uo =
|
2018-04-06 21:00:46 +00:00
|
|
|
|
-- Poll the file to handle configurations where an external
|
|
|
|
|
-- download command is used.
|
|
|
|
|
meteredFile file (Just p) k $
|
2020-01-22 20:13:48 +00:00
|
|
|
|
anyM (\u -> Url.download p u file uo) urls
|
2012-01-19 21:05:39 +00:00
|
|
|
|
|
|
|
|
|
{- Copies a key's content, when present, to a temp file.
|
|
|
|
|
- This is used to speed up some rsyncs. -}
|
2012-01-20 17:23:11 +00:00
|
|
|
|
preseedTmp :: Key -> FilePath -> Annex Bool
|
|
|
|
|
preseedTmp key file = go =<< inAnnex key
|
2012-12-13 04:24:19 +00:00
|
|
|
|
where
|
|
|
|
|
go False = return False
|
|
|
|
|
go True = do
|
|
|
|
|
ok <- copy
|
|
|
|
|
when ok $ thawContent file
|
|
|
|
|
return ok
|
|
|
|
|
copy = ifM (liftIO $ doesFileExist file)
|
2013-02-15 21:58:49 +00:00
|
|
|
|
( return True
|
|
|
|
|
, do
|
2019-12-11 18:12:22 +00:00
|
|
|
|
s <- fromRawFilePath <$> (calcRepo $ gitAnnexLocation key)
|
2015-07-30 17:40:17 +00:00
|
|
|
|
liftIO $ ifM (doesFileExist s)
|
|
|
|
|
( copyFileExternal CopyTimeStamps s file
|
|
|
|
|
, return False
|
|
|
|
|
)
|
2013-02-15 21:58:49 +00:00
|
|
|
|
)
|
2012-04-21 18:06:36 +00:00
|
|
|
|
|
2013-10-10 21:27:00 +00:00
|
|
|
|
{- Finds files directly inside a directory like gitAnnexBadDir
|
|
|
|
|
- (not in subdirectories) and returns the corresponding keys. -}
|
|
|
|
|
dirKeys :: (Git.Repo -> FilePath) -> Annex [Key]
|
|
|
|
|
dirKeys dirspec = do
|
|
|
|
|
dir <- fromRepo dirspec
|
|
|
|
|
ifM (liftIO $ doesDirectoryExist dir)
|
|
|
|
|
( do
|
|
|
|
|
contents <- liftIO $ getDirectoryContents dir
|
|
|
|
|
files <- liftIO $ filterM doesFileExist $
|
|
|
|
|
map (dir </>) contents
|
2019-12-18 20:45:03 +00:00
|
|
|
|
return $ mapMaybe (fileKey . P.takeFileName . toRawFilePath) files
|
2013-10-10 21:27:00 +00:00
|
|
|
|
, return []
|
|
|
|
|
)
|
|
|
|
|
|
2015-06-02 18:20:38 +00:00
|
|
|
|
{- Looks in the specified directory for bad/tmp keys, and returns a list
|
|
|
|
|
- of those that might still have value, or might be stale and removable.
|
|
|
|
|
-
|
|
|
|
|
- Also, stale keys that can be proven to have no value
|
|
|
|
|
- (ie, their content is already present) are deleted.
|
|
|
|
|
-}
|
|
|
|
|
staleKeysPrune :: (Git.Repo -> FilePath) -> Bool -> Annex [Key]
|
|
|
|
|
staleKeysPrune dirspec nottransferred = do
|
|
|
|
|
contents <- dirKeys dirspec
|
|
|
|
|
|
|
|
|
|
dups <- filterM inAnnex contents
|
|
|
|
|
let stale = contents `exclude` dups
|
|
|
|
|
|
|
|
|
|
dir <- fromRepo dirspec
|
2017-11-29 17:49:52 +00:00
|
|
|
|
forM_ dups $ \k ->
|
2019-12-18 20:45:03 +00:00
|
|
|
|
pruneTmpWorkDirBefore (dir </> fromRawFilePath (keyFile k))
|
|
|
|
|
(liftIO . removeFile)
|
2015-06-02 18:20:38 +00:00
|
|
|
|
|
|
|
|
|
if nottransferred
|
|
|
|
|
then do
|
|
|
|
|
inprogress <- S.fromList . map (transferKey . fst)
|
|
|
|
|
<$> getTransfers
|
|
|
|
|
return $ filter (`S.notMember` inprogress) stale
|
|
|
|
|
else return stale
|
|
|
|
|
|
2017-11-29 17:49:52 +00:00
|
|
|
|
{- Prune the work dir associated with the specified content file,
|
|
|
|
|
- before performing an action that deletes the file, or moves it away.
|
|
|
|
|
-
|
|
|
|
|
- This preserves the invariant that the workdir never exists without
|
|
|
|
|
- the content file.
|
|
|
|
|
-}
|
|
|
|
|
pruneTmpWorkDirBefore :: FilePath -> (FilePath -> Annex a) -> Annex a
|
|
|
|
|
pruneTmpWorkDirBefore f action = do
|
|
|
|
|
let workdir = gitAnnexTmpWorkDir f
|
|
|
|
|
liftIO $ whenM (doesDirectoryExist workdir) $
|
|
|
|
|
removeDirectoryRecursive workdir
|
|
|
|
|
action f
|
|
|
|
|
|
|
|
|
|
{- Runs an action, passing it a temporary work directory where
|
|
|
|
|
- it can write files while receiving the content of a key.
|
|
|
|
|
-
|
2018-06-28 16:58:11 +00:00
|
|
|
|
- Preserves the invariant that the workdir never exists without the
|
|
|
|
|
- content file, by creating an empty content file first.
|
|
|
|
|
-
|
2017-11-30 17:45:43 +00:00
|
|
|
|
- On exception, or when the action returns Nothing,
|
2018-06-28 16:58:11 +00:00
|
|
|
|
- the temporary work directory is retained (unless
|
|
|
|
|
- empty), so anything in it can be used on resume.
|
2017-11-29 17:49:52 +00:00
|
|
|
|
-}
|
2017-11-30 17:45:43 +00:00
|
|
|
|
withTmpWorkDir :: Key -> (FilePath -> Annex (Maybe a)) -> Annex (Maybe a)
|
2017-11-29 19:49:05 +00:00
|
|
|
|
withTmpWorkDir key action = do
|
2017-11-29 17:49:52 +00:00
|
|
|
|
-- Create the object file if it does not exist. This way,
|
|
|
|
|
-- staleKeysPrune only has to look for object files, and can
|
|
|
|
|
-- clean up gitAnnexTmpWorkDir for those it finds.
|
2017-11-29 19:49:05 +00:00
|
|
|
|
obj <- prepTmp key
|
2017-11-29 17:49:52 +00:00
|
|
|
|
unlessM (liftIO $ doesFileExist obj) $ do
|
|
|
|
|
liftIO $ writeFile obj ""
|
|
|
|
|
setAnnexFilePerm obj
|
|
|
|
|
let tmpdir = gitAnnexTmpWorkDir obj
|
2020-03-05 18:56:47 +00:00
|
|
|
|
createAnnexDirectory tmpdir
|
2017-11-29 17:49:52 +00:00
|
|
|
|
res <- action tmpdir
|
2017-11-29 19:49:05 +00:00
|
|
|
|
case res of
|
2017-11-30 17:45:43 +00:00
|
|
|
|
Just _ -> liftIO $ removeDirectoryRecursive tmpdir
|
2018-06-28 16:58:11 +00:00
|
|
|
|
Nothing -> liftIO $ void $ tryIO $ removeDirectory tmpdir
|
2017-11-29 17:49:52 +00:00
|
|
|
|
return res
|
|
|
|
|
|
2015-06-02 18:20:38 +00:00
|
|
|
|
{- Finds items in the first, smaller list, that are not
|
|
|
|
|
- present in the second, larger list.
|
|
|
|
|
-
|
|
|
|
|
- Constructing a single set, of the list that tends to be
|
|
|
|
|
- smaller, appears more efficient in both memory and CPU
|
|
|
|
|
- than constructing and taking the S.difference of two sets. -}
|
|
|
|
|
exclude :: Ord a => [a] -> [a] -> [a]
|
|
|
|
|
exclude [] _ = [] -- optimisation
|
|
|
|
|
exclude smaller larger = S.toList $ remove larger $ S.fromList smaller
|
|
|
|
|
where
|
|
|
|
|
remove a b = foldl (flip S.delete) b a
|