2011-01-16 20:05:05 +00:00
|
|
|
{- git-annex file content managing
|
2010-10-27 20:53:54 +00:00
|
|
|
-
|
add content retention files
This allows lockContentShared to lock content for eg, 10 minutes and
if the process then gets terminated before it can unlock, the content
will remain locked for that amount of time.
The Windows implementation is not yet tested.
In P2P.Annex, a duration of 10 minutes is used. This way, when p2pstdio
or remotedaemon is serving the P2P protocol, and is asked to
LOCKCONTENT, and that process gets killed, the content will not be
subject to deletion. This is not a perfect solution to
doc/todo/P2P_locking_connection_drop_safety.mdwn yet, but it gets most
of the way there, without needing any P2P protocol changes.
This is only done in v10 and higher repositories (or on Windows). It
might be possible to backport it to v8 or earlier, but it would
complicate locking even further, and without a separate lock file, might
be hard. I think that by the time this fix reaches a given user, they
will probably have been running git-annex 10.x long enough that their v8
repositories will have upgraded to v10 after the 1 year wait. And it's
not as if git-annex hasn't already been subject to this problem (though
I have not heard of any data loss caused by it) for 6 years already, so
waiting another fraction of a year on top of however long it takes this
fix to reach users is unlikely to be a problem.
2024-07-03 18:44:38 +00:00
|
|
|
- Copyright 2010-2024 Joey Hess <id@joeyh.name>
|
2010-10-27 20:53:54 +00:00
|
|
|
-
|
2019-03-13 19:48:14 +00:00
|
|
|
- Licensed under the GNU AGPL version 3 or higher.
|
2010-10-27 20:53:54 +00:00
|
|
|
-}
|
2010-10-14 07:40:26 +00:00
|
|
|
|
2013-05-11 20:03:00 +00:00
|
|
|
{-# LANGUAGE CPP #-}
|
filter out control characters in warning messages
Converted warning and similar to use StringContainingQuotedPath. Most
warnings are static strings, some do refer to filepaths that need to be
quoted, and others don't need quoting.
Note that, since quote filters out control characters of even
UnquotedString, this makes all warnings safe, even when an attacker
sneaks in a control character in some other way.
When json is being output, no quoting is done, since json gets its own
quoting.
This does, as a side effect, make warning messages in json output not
be indented. The indentation is only needed to offset warning messages
underneath the display of the file they apply to, so that's ok.
Sponsored-by: Brett Eisenberg on Patreon
2023-04-10 18:47:32 +00:00
|
|
|
{-# LANGUAGE OverloadedStrings #-}
|
2013-05-11 20:03:00 +00:00
|
|
|
|
2011-10-04 04:40:47 +00:00
|
|
|
module Annex.Content (
|
2011-01-16 20:05:05 +00:00
|
|
|
inAnnex,
|
2015-04-09 19:34:47 +00:00
|
|
|
inAnnex',
|
2011-11-09 22:33:15 +00:00
|
|
|
inAnnexSafe,
|
2013-07-18 17:30:12 +00:00
|
|
|
inAnnexCheck,
|
2018-12-04 16:20:34 +00:00
|
|
|
objectFileExists,
|
2015-10-08 18:27:37 +00:00
|
|
|
lockContentShared,
|
2015-10-09 19:48:02 +00:00
|
|
|
lockContentForRemoval,
|
|
|
|
ContentRemovalLock,
|
2018-06-21 17:34:11 +00:00
|
|
|
RetrievalSecurityPolicy(..),
|
2011-01-16 20:05:05 +00:00
|
|
|
getViaTmp,
|
2018-03-13 18:18:30 +00:00
|
|
|
getViaTmpFromDisk,
|
2022-01-07 17:17:43 +00:00
|
|
|
verificationOfContentFailed,
|
2015-10-01 18:13:53 +00:00
|
|
|
checkDiskSpaceToGet,
|
2020-07-03 17:41:57 +00:00
|
|
|
checkSecureHashes,
|
2014-07-29 22:40:40 +00:00
|
|
|
prepTmp,
|
2011-04-28 00:06:07 +00:00
|
|
|
withTmp,
|
2011-03-22 21:27:04 +00:00
|
|
|
checkDiskSpace,
|
2017-11-30 20:08:30 +00:00
|
|
|
needMoreDiskSpace,
|
2011-01-16 20:05:05 +00:00
|
|
|
moveAnnex,
|
2015-12-22 20:22:28 +00:00
|
|
|
populatePointerFile,
|
2015-12-27 19:59:59 +00:00
|
|
|
linkToAnnex,
|
|
|
|
linkFromAnnex,
|
2021-07-27 17:01:30 +00:00
|
|
|
linkFromAnnex',
|
2015-12-04 18:20:32 +00:00
|
|
|
LinkAnnexResult(..),
|
2015-12-15 17:02:33 +00:00
|
|
|
unlinkAnnex,
|
2015-12-11 19:13:36 +00:00
|
|
|
checkedCopyFile,
|
2016-01-07 18:51:28 +00:00
|
|
|
linkOrCopy,
|
|
|
|
linkOrCopy',
|
2012-12-08 21:03:39 +00:00
|
|
|
sendAnnex,
|
2013-01-10 15:45:44 +00:00
|
|
|
prepSendAnnex,
|
2021-06-25 17:04:17 +00:00
|
|
|
prepSendAnnex',
|
2011-01-16 20:05:05 +00:00
|
|
|
removeAnnex,
|
|
|
|
moveBad,
|
2014-03-07 16:43:56 +00:00
|
|
|
KeyLocation(..),
|
2019-08-27 16:59:57 +00:00
|
|
|
listKeys,
|
2022-02-21 18:45:11 +00:00
|
|
|
listKeys',
|
2012-01-02 18:20:20 +00:00
|
|
|
saveState,
|
|
|
|
downloadUrl,
|
2012-01-19 21:05:39 +00:00
|
|
|
preseedTmp,
|
2013-10-10 21:27:00 +00:00
|
|
|
dirKeys,
|
2013-11-24 01:58:39 +00:00
|
|
|
withObjectLoc,
|
2015-06-02 18:20:38 +00:00
|
|
|
staleKeysPrune,
|
2017-11-29 17:49:52 +00:00
|
|
|
pruneTmpWorkDirBefore,
|
2015-12-11 14:42:18 +00:00
|
|
|
isUnmodified,
|
2018-10-25 20:38:04 +00:00
|
|
|
isUnmodifiedCheap,
|
2021-07-29 17:36:19 +00:00
|
|
|
verifyKeyContentPostRetrieval,
|
2016-04-20 17:21:56 +00:00
|
|
|
verifyKeyContent,
|
2021-08-17 16:41:36 +00:00
|
|
|
VerifyConfig,
|
|
|
|
VerifyConfigA(..),
|
2016-04-20 17:21:56 +00:00
|
|
|
Verification(..),
|
|
|
|
unVerified,
|
2017-11-29 17:49:52 +00:00
|
|
|
withTmpWorkDir,
|
2022-01-13 17:24:50 +00:00
|
|
|
KeyStatus(..),
|
|
|
|
isKeyUnlockedThin,
|
|
|
|
getKeyStatus,
|
|
|
|
getKeyFileStatus,
|
2022-05-16 19:19:48 +00:00
|
|
|
cleanObjectDirs,
|
2023-12-08 20:22:14 +00:00
|
|
|
contentSize,
|
2011-01-16 20:05:05 +00:00
|
|
|
) where
|
2010-10-14 07:40:26 +00:00
|
|
|
|
2012-03-11 22:04:58 +00:00
|
|
|
import System.IO.Unsafe (unsafeInterleaveIO)
|
2015-06-02 18:20:38 +00:00
|
|
|
import qualified Data.Set as S
|
2011-11-10 01:45:03 +00:00
|
|
|
|
2016-01-20 20:36:33 +00:00
|
|
|
import Annex.Common
|
2020-11-16 15:21:33 +00:00
|
|
|
import Annex.Content.Presence
|
|
|
|
import Annex.Content.LowLevel
|
|
|
|
import Annex.Content.PointerFile
|
2021-07-27 18:07:23 +00:00
|
|
|
import Annex.Verify
|
2011-06-30 17:16:57 +00:00
|
|
|
import qualified Git
|
2010-10-14 07:40:26 +00:00
|
|
|
import qualified Annex
|
2011-10-04 04:40:47 +00:00
|
|
|
import qualified Annex.Queue
|
|
|
|
import qualified Annex.Branch
|
2013-09-28 18:35:21 +00:00
|
|
|
import qualified Annex.Url as Url
|
2020-11-16 15:21:33 +00:00
|
|
|
import qualified Backend
|
|
|
|
import qualified Database.Keys
|
2016-01-05 21:22:19 +00:00
|
|
|
import Git.FilePath
|
2012-04-21 20:59:49 +00:00
|
|
|
import Annex.Perms
|
2013-04-02 17:13:42 +00:00
|
|
|
import Annex.Link
|
2015-11-12 22:05:45 +00:00
|
|
|
import Annex.LockPool
|
add content retention files
This allows lockContentShared to lock content for eg, 10 minutes and
if the process then gets terminated before it can unlock, the content
will remain locked for that amount of time.
The Windows implementation is not yet tested.
In P2P.Annex, a duration of 10 minutes is used. This way, when p2pstdio
or remotedaemon is serving the P2P protocol, and is asked to
LOCKCONTENT, and that process gets killed, the content will not be
subject to deletion. This is not a perfect solution to
doc/todo/P2P_locking_connection_drop_safety.mdwn yet, but it gets most
of the way there, without needing any P2P protocol changes.
This is only done in v10 and higher repositories (or on Windows). It
might be possible to backport it to v8 or earlier, but it would
complicate locking even further, and without a separate lock file, might
be hard. I think that by the time this fix reaches a given user, they
will probably have been running git-annex 10.x long enough that their v8
repositories will have upgraded to v10 after the 1 year wait. And it's
not as if git-annex hasn't already been subject to this problem (though
I have not heard of any data loss caused by it) for 6 years already, so
waiting another fraction of a year on top of however long it takes this
fix to reach users is unlikely to be a problem.
2024-07-03 18:44:38 +00:00
|
|
|
import Annex.LockFile
|
2020-11-16 15:21:33 +00:00
|
|
|
import Annex.UUID
|
|
|
|
import Annex.InodeSentinal
|
2021-07-27 17:01:30 +00:00
|
|
|
import Annex.ReplaceFile
|
2020-11-16 18:09:55 +00:00
|
|
|
import Annex.AdjustedBranch (adjustedBranchRefresh)
|
2022-05-16 19:08:42 +00:00
|
|
|
import Annex.DirHashes
|
2015-04-04 00:38:56 +00:00
|
|
|
import Messages.Progress
|
2021-08-17 16:41:36 +00:00
|
|
|
import Types.Remote (RetrievalSecurityPolicy(..), VerifyConfigA(..))
|
2015-10-08 20:55:11 +00:00
|
|
|
import Types.NumCopies
|
annex.securehashesonly
Cryptographically secure hashes can be forced to be used in a repository,
by setting annex.securehashesonly. This does not prevent the git repository
from containing files with insecure hashes, but it does prevent the content
of such files from being pulled into .git/annex/objects from another
repository.
We want to make sure that at no point does git-annex accept content into
.git/annex/objects that is hashed with an insecure key. Here's how it
was done:
* .git/annex/objects/xx/yy/KEY/ is kept frozen, so nothing can be
written to it normally
* So every place that writes content must call, thawContent or modifyContent.
We can audit for these, and be sure we've considered all cases.
* The main functions are moveAnnex, and linkToAnnex; these were made to
check annex.securehashesonly, and are the main security boundary
for annex.securehashesonly.
* Most other calls to modifyContent deal with other files in the KEY
directory (inode cache etc). The other ones that mess with the content
are:
- Annex.Direct.toDirectGen, in which content already in the
annex directory is moved to the direct mode file, so not relevant.
- fix and lock, which don't add new content
- Command.ReKey.linkKey, which manually unlocks it to make a
copy.
* All other calls to thawContent appear safe.
Made moveAnnex return a Bool, so checked all callsites and made them
deal with a failure in appropriate ways.
linkToAnnex simply returns LinkAnnexFailed; all callsites already deal
with it failing in appropriate ways.
This commit was sponsored by Riku Voipio.
2017-02-27 17:01:32 +00:00
|
|
|
import Types.Key
|
2020-11-16 15:21:33 +00:00
|
|
|
import Types.Transfer
|
|
|
|
import Logs.Transfer
|
|
|
|
import Logs.Location
|
2015-12-09 21:00:37 +00:00
|
|
|
import Utility.InodeCache
|
2020-11-16 15:21:33 +00:00
|
|
|
import Utility.CopyFile
|
|
|
|
import Utility.Metered
|
add content retention files
This allows lockContentShared to lock content for eg, 10 minutes and
if the process then gets terminated before it can unlock, the content
will remain locked for that amount of time.
The Windows implementation is not yet tested.
In P2P.Annex, a duration of 10 minutes is used. This way, when p2pstdio
or remotedaemon is serving the P2P protocol, and is asked to
LOCKCONTENT, and that process gets killed, the content will not be
subject to deletion. This is not a perfect solution to
doc/todo/P2P_locking_connection_drop_safety.mdwn yet, but it gets most
of the way there, without needing any P2P protocol changes.
This is only done in v10 and higher repositories (or on Windows). It
might be possible to backport it to v8 or earlier, but it would
complicate locking even further, and without a separate lock file, might
be hard. I think that by the time this fix reaches a given user, they
will probably have been running git-annex 10.x long enough that their v8
repositories will have upgraded to v10 after the 1 year wait. And it's
not as if git-annex hasn't already been subject to this problem (though
I have not heard of any data loss caused by it) for 6 years already, so
waiting another fraction of a year on top of however long it takes this
fix to reach users is unlikely to be a problem.
2024-07-03 18:44:38 +00:00
|
|
|
import Utility.HumanTime
|
|
|
|
import Utility.TimeStamp
|
2024-03-26 18:07:41 +00:00
|
|
|
#ifndef mingw32_HOST_OS
|
2023-04-27 19:57:50 +00:00
|
|
|
import Utility.FileMode
|
2024-03-26 18:07:41 +00:00
|
|
|
#endif
|
2019-12-11 18:12:22 +00:00
|
|
|
import qualified Utility.RawFilePath as R
|
2014-01-28 20:01:19 +00:00
|
|
|
|
2019-12-18 20:45:03 +00:00
|
|
|
import qualified System.FilePath.ByteString as P
|
2023-03-01 19:55:58 +00:00
|
|
|
import System.PosixCompat.Files (isSymbolicLink, linkCount)
|
add content retention files
This allows lockContentShared to lock content for eg, 10 minutes and
if the process then gets terminated before it can unlock, the content
will remain locked for that amount of time.
The Windows implementation is not yet tested.
In P2P.Annex, a duration of 10 minutes is used. This way, when p2pstdio
or remotedaemon is serving the P2P protocol, and is asked to
LOCKCONTENT, and that process gets killed, the content will not be
subject to deletion. This is not a perfect solution to
doc/todo/P2P_locking_connection_drop_safety.mdwn yet, but it gets most
of the way there, without needing any P2P protocol changes.
This is only done in v10 and higher repositories (or on Windows). It
might be possible to backport it to v8 or earlier, but it would
complicate locking even further, and without a separate lock file, might
be hard. I think that by the time this fix reaches a given user, they
will probably have been running git-annex 10.x long enough that their v8
repositories will have upgraded to v10 after the 1 year wait. And it's
not as if git-annex hasn't already been subject to this problem (though
I have not heard of any data loss caused by it) for 6 years already, so
waiting another fraction of a year on top of however long it takes this
fix to reach users is unlikely to be a problem.
2024-07-03 18:44:38 +00:00
|
|
|
import Data.Time.Clock.POSIX
|
2019-12-18 20:45:03 +00:00
|
|
|
|
2015-10-08 18:27:37 +00:00
|
|
|
{- Prevents the content from being removed while the action is running.
|
|
|
|
- Uses a shared lock.
|
|
|
|
-
|
make sure that lockContentShared is always paired with an inAnnex check
lockContentShared had a screwy caveat that it didn't verify that the content
was present when locking it, but in the most common case, eg indirect mode,
it failed to lock when the content is not present.
That led to a few callers forgetting to check inAnnex when using it,
but the potential data loss was unlikely to be noticed because it only
affected direct mode I think.
Fix data loss bug when the local repository uses direct mode, and a
locally modified file is dropped from a remote repsitory. The bug
caused the modified file to be counted as a copy of the original file.
(This is not a severe bug because in such a situation, dropping
from the remote and then modifying the file is allowed and has the same
end result.)
And, in content locking over tor, when the remote repository is
in direct mode, it neglected to check that the content was actually
present when locking it. This could cause git annex drop to remove
the only copy of a file when it thought the tor remote had a copy.
So, make lockContentShared do its own inAnnex check. This could perhaps
be optimised for direct mode, to avoid the check then, since locking
the content necessarily verifies it exists there, but I have not bothered
with that.
This commit was sponsored by Jeff Goeke-Smith on Patreon.
2018-03-07 18:13:02 +00:00
|
|
|
- If locking fails, or the content is not present, throws an exception
|
|
|
|
- rather than running the action.
|
add content retention files
This allows lockContentShared to lock content for eg, 10 minutes and
if the process then gets terminated before it can unlock, the content
will remain locked for that amount of time.
The Windows implementation is not yet tested.
In P2P.Annex, a duration of 10 minutes is used. This way, when p2pstdio
or remotedaemon is serving the P2P protocol, and is asked to
LOCKCONTENT, and that process gets killed, the content will not be
subject to deletion. This is not a perfect solution to
doc/todo/P2P_locking_connection_drop_safety.mdwn yet, but it gets most
of the way there, without needing any P2P protocol changes.
This is only done in v10 and higher repositories (or on Windows). It
might be possible to backport it to v8 or earlier, but it would
complicate locking even further, and without a separate lock file, might
be hard. I think that by the time this fix reaches a given user, they
will probably have been running git-annex 10.x long enough that their v8
repositories will have upgraded to v10 after the 1 year wait. And it's
not as if git-annex hasn't already been subject to this problem (though
I have not heard of any data loss caused by it) for 6 years already, so
waiting another fraction of a year on top of however long it takes this
fix to reach users is unlikely to be a problem.
2024-07-03 18:44:38 +00:00
|
|
|
-
|
|
|
|
- When a Duration is provided, the content is prevented from being removed
|
|
|
|
- for that amount of time, even if the current process is terminated.
|
|
|
|
- (This is only done when using a separate lock file from the content
|
|
|
|
- file eg in v10 and higher repositories.)
|
2015-10-08 18:27:37 +00:00
|
|
|
-}
|
add content retention files
This allows lockContentShared to lock content for eg, 10 minutes and
if the process then gets terminated before it can unlock, the content
will remain locked for that amount of time.
The Windows implementation is not yet tested.
In P2P.Annex, a duration of 10 minutes is used. This way, when p2pstdio
or remotedaemon is serving the P2P protocol, and is asked to
LOCKCONTENT, and that process gets killed, the content will not be
subject to deletion. This is not a perfect solution to
doc/todo/P2P_locking_connection_drop_safety.mdwn yet, but it gets most
of the way there, without needing any P2P protocol changes.
This is only done in v10 and higher repositories (or on Windows). It
might be possible to backport it to v8 or earlier, but it would
complicate locking even further, and without a separate lock file, might
be hard. I think that by the time this fix reaches a given user, they
will probably have been running git-annex 10.x long enough that their v8
repositories will have upgraded to v10 after the 1 year wait. And it's
not as if git-annex hasn't already been subject to this problem (though
I have not heard of any data loss caused by it) for 6 years already, so
waiting another fraction of a year on top of however long it takes this
fix to reach users is unlikely to be a problem.
2024-07-03 18:44:38 +00:00
|
|
|
lockContentShared :: Key -> Maybe Duration -> (VerifiedCopy -> Annex a) -> Annex a
|
|
|
|
lockContentShared key mduration a = do
|
|
|
|
retention <- case mduration of
|
|
|
|
Nothing -> pure Nothing
|
|
|
|
Just duration -> do
|
|
|
|
rt <- calcRepo (gitAnnexContentRetentionTimestamp key)
|
|
|
|
now <- liftIO getPOSIXTime
|
|
|
|
pure $ Just
|
|
|
|
( rt
|
|
|
|
, now + fromIntegral (durationSeconds duration)
|
|
|
|
)
|
|
|
|
lockContentUsing (lock retention) key notpresent $
|
|
|
|
ifM (inAnnex key)
|
|
|
|
( do
|
|
|
|
u <- getUUID
|
toward SafeDropProof expiry checking
Added Maybe POSIXTime to SafeDropProof, which gets set when the proof is
based on a LockedCopy. If there are several LockedCopies, it uses the
closest expiry time. That is not optimal, it may be that the proof
expires based on one LockedCopy but another one has not expired. But
that seems unlikely to really happen, and anyway the user can just
re-run a drop if it fails due to expiry.
Pass the SafeDropProof to removeKey, which is responsible for checking
it for expiry in situations where that could be a problem. Which really
only means in Remote.Git.
Made Remote.Git check expiry when dropping from a local remote.
Checking expiry when dropping from a P2P remote is not yet implemented.
P2P.Protocol.remove has SafeDropProof plumbed through to it for that
purpose.
Fixing the remaining 2 build warnings should complete this work.
Note that the use of a POSIXTime here means that if the clock gets set
forward while git-annex is in the middle of a drop, it may say that
dropping took too long. That seems ok. Less ok is that if the clock gets
turned back a sufficient amount (eg 5 minutes), proof expiry won't be
noticed. It might be better to use the Monotonic clock, but that doesn't
advance when a laptop is suspended, and while there is the linux
Boottime clock, that is not available on other systems. Perhaps a
combination of POSIXTime and the Monotonic clock could detect laptop
suspension and also detect clock being turned back?
There is a potential future flag day where
p2pDefaultLockContentRetentionDuration is not assumed, but is probed
using the P2P protocol, and peers that don't support it can no longer
produce a LockedCopy. Until that happens, when git-annex is
communicating with older peers there is a risk of data loss when
a ssh connection closes during LOCKCONTENT.
2024-07-04 16:23:46 +00:00
|
|
|
withVerifiedCopy LockedCopy u (return (Right True)) a
|
add content retention files
This allows lockContentShared to lock content for eg, 10 minutes and
if the process then gets terminated before it can unlock, the content
will remain locked for that amount of time.
The Windows implementation is not yet tested.
In P2P.Annex, a duration of 10 minutes is used. This way, when p2pstdio
or remotedaemon is serving the P2P protocol, and is asked to
LOCKCONTENT, and that process gets killed, the content will not be
subject to deletion. This is not a perfect solution to
doc/todo/P2P_locking_connection_drop_safety.mdwn yet, but it gets most
of the way there, without needing any P2P protocol changes.
This is only done in v10 and higher repositories (or on Windows). It
might be possible to backport it to v8 or earlier, but it would
complicate locking even further, and without a separate lock file, might
be hard. I think that by the time this fix reaches a given user, they
will probably have been running git-annex 10.x long enough that their v8
repositories will have upgraded to v10 after the 1 year wait. And it's
not as if git-annex hasn't already been subject to this problem (though
I have not heard of any data loss caused by it) for 6 years already, so
waiting another fraction of a year on top of however long it takes this
fix to reach users is unlikely to be a problem.
2024-07-03 18:44:38 +00:00
|
|
|
, notpresent
|
|
|
|
)
|
2015-10-08 18:27:37 +00:00
|
|
|
where
|
2020-07-25 15:54:34 +00:00
|
|
|
notpresent = giveup $ "failed to lock content: not present"
|
2015-10-08 18:27:37 +00:00
|
|
|
#ifndef mingw32_HOST_OS
|
add content retention files
This allows lockContentShared to lock content for eg, 10 minutes and
if the process then gets terminated before it can unlock, the content
will remain locked for that amount of time.
The Windows implementation is not yet tested.
In P2P.Annex, a duration of 10 minutes is used. This way, when p2pstdio
or remotedaemon is serving the P2P protocol, and is asked to
LOCKCONTENT, and that process gets killed, the content will not be
subject to deletion. This is not a perfect solution to
doc/todo/P2P_locking_connection_drop_safety.mdwn yet, but it gets most
of the way there, without needing any P2P protocol changes.
This is only done in v10 and higher repositories (or on Windows). It
might be possible to backport it to v8 or earlier, but it would
complicate locking even further, and without a separate lock file, might
be hard. I think that by the time this fix reaches a given user, they
will probably have been running git-annex 10.x long enough that their v8
repositories will have upgraded to v10 after the 1 year wait. And it's
not as if git-annex hasn't already been subject to this problem (though
I have not heard of any data loss caused by it) for 6 years already, so
waiting another fraction of a year on top of however long it takes this
fix to reach users is unlikely to be a problem.
2024-07-03 18:44:38 +00:00
|
|
|
lock retention _ (Just lockfile) =
|
|
|
|
( posixLocker tryLockShared lockfile >>= \case
|
|
|
|
Just lck -> do
|
|
|
|
writeretention retention
|
|
|
|
return (Just lck)
|
|
|
|
Nothing -> return Nothing
|
|
|
|
, Just $ posixLocker tryLockExclusive lockfile >>= \case
|
|
|
|
Just lck -> do
|
|
|
|
dropretention retention
|
|
|
|
return (Just lck)
|
|
|
|
Nothing -> return Nothing
|
2022-01-13 18:54:57 +00:00
|
|
|
)
|
add content retention files
This allows lockContentShared to lock content for eg, 10 minutes and
if the process then gets terminated before it can unlock, the content
will remain locked for that amount of time.
The Windows implementation is not yet tested.
In P2P.Annex, a duration of 10 minutes is used. This way, when p2pstdio
or remotedaemon is serving the P2P protocol, and is asked to
LOCKCONTENT, and that process gets killed, the content will not be
subject to deletion. This is not a perfect solution to
doc/todo/P2P_locking_connection_drop_safety.mdwn yet, but it gets most
of the way there, without needing any P2P protocol changes.
This is only done in v10 and higher repositories (or on Windows). It
might be possible to backport it to v8 or earlier, but it would
complicate locking even further, and without a separate lock file, might
be hard. I think that by the time this fix reaches a given user, they
will probably have been running git-annex 10.x long enough that their v8
repositories will have upgraded to v10 after the 1 year wait. And it's
not as if git-annex hasn't already been subject to this problem (though
I have not heard of any data loss caused by it) for 6 years already, so
waiting another fraction of a year on top of however long it takes this
fix to reach users is unlikely to be a problem.
2024-07-03 18:44:38 +00:00
|
|
|
lock _ contentfile Nothing =
|
2022-01-13 18:54:57 +00:00
|
|
|
( tryLockShared Nothing contentfile
|
|
|
|
, Nothing
|
|
|
|
)
|
2015-10-08 18:27:37 +00:00
|
|
|
#else
|
2024-07-29 19:14:12 +00:00
|
|
|
lock retention obj lckf =
|
2024-07-29 20:32:24 +00:00
|
|
|
let (locker, postunlock) = winLocker lockShared obj lckf
|
add content retention files
This allows lockContentShared to lock content for eg, 10 minutes and
if the process then gets terminated before it can unlock, the content
will remain locked for that amount of time.
The Windows implementation is not yet tested.
In P2P.Annex, a duration of 10 minutes is used. This way, when p2pstdio
or remotedaemon is serving the P2P protocol, and is asked to
LOCKCONTENT, and that process gets killed, the content will not be
subject to deletion. This is not a perfect solution to
doc/todo/P2P_locking_connection_drop_safety.mdwn yet, but it gets most
of the way there, without needing any P2P protocol changes.
This is only done in v10 and higher repositories (or on Windows). It
might be possible to backport it to v8 or earlier, but it would
complicate locking even further, and without a separate lock file, might
be hard. I think that by the time this fix reaches a given user, they
will probably have been running git-annex 10.x long enough that their v8
repositories will have upgraded to v10 after the 1 year wait. And it's
not as if git-annex hasn't already been subject to this problem (though
I have not heard of any data loss caused by it) for 6 years already, so
waiting another fraction of a year on top of however long it takes this
fix to reach users is unlikely to be a problem.
2024-07-03 18:44:38 +00:00
|
|
|
in
|
|
|
|
( locker >>= \case
|
|
|
|
Just lck -> do
|
|
|
|
writeretention retention
|
|
|
|
return (Just lck)
|
|
|
|
Nothing -> return Nothing
|
2024-07-29 19:14:12 +00:00
|
|
|
, Just $ \lckfile -> do
|
|
|
|
maybe noop (\pu -> pu lckfile) postunlock
|
|
|
|
lockdropretention obj lckf retention
|
add content retention files
This allows lockContentShared to lock content for eg, 10 minutes and
if the process then gets terminated before it can unlock, the content
will remain locked for that amount of time.
The Windows implementation is not yet tested.
In P2P.Annex, a duration of 10 minutes is used. This way, when p2pstdio
or remotedaemon is serving the P2P protocol, and is asked to
LOCKCONTENT, and that process gets killed, the content will not be
subject to deletion. This is not a perfect solution to
doc/todo/P2P_locking_connection_drop_safety.mdwn yet, but it gets most
of the way there, without needing any P2P protocol changes.
This is only done in v10 and higher repositories (or on Windows). It
might be possible to backport it to v8 or earlier, but it would
complicate locking even further, and without a separate lock file, might
be hard. I think that by the time this fix reaches a given user, they
will probably have been running git-annex 10.x long enough that their v8
repositories will have upgraded to v10 after the 1 year wait. And it's
not as if git-annex hasn't already been subject to this problem (though
I have not heard of any data loss caused by it) for 6 years already, so
waiting another fraction of a year on top of however long it takes this
fix to reach users is unlikely to be a problem.
2024-07-03 18:44:38 +00:00
|
|
|
)
|
|
|
|
|
2024-07-29 19:14:12 +00:00
|
|
|
lockdropretention _ _ Nothing = noop
|
|
|
|
lockdropretention obj lckf retention = do
|
add content retention files
This allows lockContentShared to lock content for eg, 10 minutes and
if the process then gets terminated before it can unlock, the content
will remain locked for that amount of time.
The Windows implementation is not yet tested.
In P2P.Annex, a duration of 10 minutes is used. This way, when p2pstdio
or remotedaemon is serving the P2P protocol, and is asked to
LOCKCONTENT, and that process gets killed, the content will not be
subject to deletion. This is not a perfect solution to
doc/todo/P2P_locking_connection_drop_safety.mdwn yet, but it gets most
of the way there, without needing any P2P protocol changes.
This is only done in v10 and higher repositories (or on Windows). It
might be possible to backport it to v8 or earlier, but it would
complicate locking even further, and without a separate lock file, might
be hard. I think that by the time this fix reaches a given user, they
will probably have been running git-annex 10.x long enough that their v8
repositories will have upgraded to v10 after the 1 year wait. And it's
not as if git-annex hasn't already been subject to this problem (though
I have not heard of any data loss caused by it) for 6 years already, so
waiting another fraction of a year on top of however long it takes this
fix to reach users is unlikely to be a problem.
2024-07-03 18:44:38 +00:00
|
|
|
-- In order to dropretention, have to
|
|
|
|
-- take an exclusive lock.
|
|
|
|
let (exlocker, expostunlock) =
|
2024-07-29 20:32:24 +00:00
|
|
|
winLocker lockExclusive obj lckf
|
add content retention files
This allows lockContentShared to lock content for eg, 10 minutes and
if the process then gets terminated before it can unlock, the content
will remain locked for that amount of time.
The Windows implementation is not yet tested.
In P2P.Annex, a duration of 10 minutes is used. This way, when p2pstdio
or remotedaemon is serving the P2P protocol, and is asked to
LOCKCONTENT, and that process gets killed, the content will not be
subject to deletion. This is not a perfect solution to
doc/todo/P2P_locking_connection_drop_safety.mdwn yet, but it gets most
of the way there, without needing any P2P protocol changes.
This is only done in v10 and higher repositories (or on Windows). It
might be possible to backport it to v8 or earlier, but it would
complicate locking even further, and without a separate lock file, might
be hard. I think that by the time this fix reaches a given user, they
will probably have been running git-annex 10.x long enough that their v8
repositories will have upgraded to v10 after the 1 year wait. And it's
not as if git-annex hasn't already been subject to this problem (though
I have not heard of any data loss caused by it) for 6 years already, so
waiting another fraction of a year on top of however long it takes this
fix to reach users is unlikely to be a problem.
2024-07-03 18:44:38 +00:00
|
|
|
exlocker >>= \case
|
|
|
|
Nothing -> noop
|
|
|
|
Just lck -> do
|
|
|
|
dropretention retention
|
|
|
|
liftIO $ dropLock lck
|
2024-07-29 19:14:12 +00:00
|
|
|
case (expostunlock, lckf) of
|
|
|
|
(Just pu, Just f) -> pu f
|
|
|
|
_ -> noop
|
2015-10-08 18:27:37 +00:00
|
|
|
#endif
|
add content retention files
This allows lockContentShared to lock content for eg, 10 minutes and
if the process then gets terminated before it can unlock, the content
will remain locked for that amount of time.
The Windows implementation is not yet tested.
In P2P.Annex, a duration of 10 minutes is used. This way, when p2pstdio
or remotedaemon is serving the P2P protocol, and is asked to
LOCKCONTENT, and that process gets killed, the content will not be
subject to deletion. This is not a perfect solution to
doc/todo/P2P_locking_connection_drop_safety.mdwn yet, but it gets most
of the way there, without needing any P2P protocol changes.
This is only done in v10 and higher repositories (or on Windows). It
might be possible to backport it to v8 or earlier, but it would
complicate locking even further, and without a separate lock file, might
be hard. I think that by the time this fix reaches a given user, they
will probably have been running git-annex 10.x long enough that their v8
repositories will have upgraded to v10 after the 1 year wait. And it's
not as if git-annex hasn't already been subject to this problem (though
I have not heard of any data loss caused by it) for 6 years already, so
waiting another fraction of a year on top of however long it takes this
fix to reach users is unlikely to be a problem.
2024-07-03 18:44:38 +00:00
|
|
|
|
|
|
|
writeretention Nothing = noop
|
|
|
|
writeretention (Just (rt, retentionts)) =
|
|
|
|
writeContentRetentionTimestamp key rt retentionts
|
|
|
|
|
|
|
|
-- When this is called, an exclusive lock has been taken, so no other
|
|
|
|
-- processes can be writing to the retention time stamp file.
|
|
|
|
-- The timestamp in the file may have been written by this
|
|
|
|
-- call to lockContentShared or a later call. Only delete the file
|
|
|
|
-- in the former case.
|
|
|
|
dropretention Nothing = noop
|
|
|
|
dropretention (Just (rt, retentionts)) =
|
|
|
|
readContentRetentionTimestamp rt >>= \case
|
|
|
|
Just ts | ts == retentionts ->
|
|
|
|
removeRetentionTimeStamp key rt
|
|
|
|
_ -> noop
|
2015-10-08 18:27:37 +00:00
|
|
|
|
add content retention files
This allows lockContentShared to lock content for eg, 10 minutes and
if the process then gets terminated before it can unlock, the content
will remain locked for that amount of time.
The Windows implementation is not yet tested.
In P2P.Annex, a duration of 10 minutes is used. This way, when p2pstdio
or remotedaemon is serving the P2P protocol, and is asked to
LOCKCONTENT, and that process gets killed, the content will not be
subject to deletion. This is not a perfect solution to
doc/todo/P2P_locking_connection_drop_safety.mdwn yet, but it gets most
of the way there, without needing any P2P protocol changes.
This is only done in v10 and higher repositories (or on Windows). It
might be possible to backport it to v8 or earlier, but it would
complicate locking even further, and without a separate lock file, might
be hard. I think that by the time this fix reaches a given user, they
will probably have been running git-annex 10.x long enough that their v8
repositories will have upgraded to v10 after the 1 year wait. And it's
not as if git-annex hasn't already been subject to this problem (though
I have not heard of any data loss caused by it) for 6 years already, so
waiting another fraction of a year on top of however long it takes this
fix to reach users is unlikely to be a problem.
2024-07-03 18:44:38 +00:00
|
|
|
{- Exclusively locks content, including checking the retention timestamp,
|
|
|
|
- while performing an action that might remove it.
|
2018-11-15 19:38:55 +00:00
|
|
|
-
|
|
|
|
- If locking fails, throws an exception rather than running the action.
|
2020-07-25 15:54:34 +00:00
|
|
|
-
|
finishing up move --from --to
Lock the local content for drop after getting it from src, to prevent another
process from using the local content as a copy and dropping it from src,
which would prevent dropping the local content after sending it to dest.
Support resuming an interrupted move that downloaded the content from
src, leaving the local content populated. In this case, the location log
has not been updated to say the content is present locally, so we can
assume that it's resuming and go ahead and drop the local content after
sending it to dest.
Note that if a `git-annex get` is being ran at the same time as a
`git-annex move --from --to`, it may get a file just before the move
processes it. So the location log has not been updated yet, and the move
thinks it's resuming. Resulting in local copy being dropped after it's
sent to the dest. This race is something we'll just have to live with,
it seems.
I also gave up on the idea of checking if the location log had been updated
by a `git-annex get` that is ran at the same time. That wouldn't work, because
the location log is precached in the seek stage, so reading it again after
sending the content to dest would not notice changes made to it, unless the cache
were invalidated, which would slow it down a lot. That idea anyway was subject
to races where it would not detect the concurrent `git-annex get`.
So concurrent `git-annex get` will have results that may be surprising.
To make that less surprising, updated the documentation of this feature to
be explicit that it downloads content to the local repository
temporarily.
Sponsored-by: Dartmouth College's DANDI project
2023-01-23 21:07:21 +00:00
|
|
|
- When the content file itself is used as the lock file,
|
|
|
|
- and locking fails because the the content is not present, runs the
|
2022-01-11 21:01:11 +00:00
|
|
|
- fallback action instead. However, the content is not guaranteed to be
|
|
|
|
- present when this succeeds.
|
2014-08-21 00:08:45 +00:00
|
|
|
-}
|
2020-07-25 15:54:34 +00:00
|
|
|
lockContentForRemoval :: Key -> Annex a -> (ContentRemovalLock -> Annex a) -> Annex a
|
2022-01-13 18:54:57 +00:00
|
|
|
lockContentForRemoval key fallback a = lockContentUsing lock key fallback $
|
2015-10-09 19:48:02 +00:00
|
|
|
a (ContentRemovalLock key)
|
2015-10-08 18:27:37 +00:00
|
|
|
where
|
|
|
|
#ifndef mingw32_HOST_OS
|
add content retention files
This allows lockContentShared to lock content for eg, 10 minutes and
if the process then gets terminated before it can unlock, the content
will remain locked for that amount of time.
The Windows implementation is not yet tested.
In P2P.Annex, a duration of 10 minutes is used. This way, when p2pstdio
or remotedaemon is serving the P2P protocol, and is asked to
LOCKCONTENT, and that process gets killed, the content will not be
subject to deletion. This is not a perfect solution to
doc/todo/P2P_locking_connection_drop_safety.mdwn yet, but it gets most
of the way there, without needing any P2P protocol changes.
This is only done in v10 and higher repositories (or on Windows). It
might be possible to backport it to v8 or earlier, but it would
complicate locking even further, and without a separate lock file, might
be hard. I think that by the time this fix reaches a given user, they
will probably have been running git-annex 10.x long enough that their v8
repositories will have upgraded to v10 after the 1 year wait. And it's
not as if git-annex hasn't already been subject to this problem (though
I have not heard of any data loss caused by it) for 6 years already, so
waiting another fraction of a year on top of however long it takes this
fix to reach users is unlikely to be a problem.
2024-07-03 18:44:38 +00:00
|
|
|
lock _ (Just lockfile) =
|
|
|
|
( checkRetentionTimestamp key
|
|
|
|
(posixLocker tryLockExclusive lockfile)
|
|
|
|
, Nothing
|
|
|
|
)
|
2022-01-11 21:01:11 +00:00
|
|
|
{- No lock file, so the content file itself is locked.
|
|
|
|
- Since content files are stored with the write bit
|
|
|
|
- disabled, have to fiddle with permissions to open
|
|
|
|
- for an exclusive lock. -}
|
2022-01-13 18:54:57 +00:00
|
|
|
lock contentfile Nothing =
|
|
|
|
let lck = bracket_
|
2022-01-11 21:01:11 +00:00
|
|
|
(thawContent contentfile)
|
|
|
|
(freezeContent contentfile)
|
|
|
|
(tryLockExclusive Nothing contentfile)
|
2022-01-13 18:54:57 +00:00
|
|
|
in (lck, Nothing)
|
2015-10-08 18:27:37 +00:00
|
|
|
#else
|
2024-07-29 19:14:12 +00:00
|
|
|
lock obj lckf =
|
|
|
|
let (exlocker, expostunlock) =
|
2024-07-29 20:32:24 +00:00
|
|
|
winLocker lockExclusive obj lckf
|
2024-07-29 19:14:12 +00:00
|
|
|
in (checkRetentionTimestamp key exlocker, expostunlock)
|
2015-10-08 18:27:37 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
{- Passed the object content file, and maybe a separate lock file to use,
|
|
|
|
- when the content file itself should not be locked. -}
|
add content retention files
This allows lockContentShared to lock content for eg, 10 minutes and
if the process then gets terminated before it can unlock, the content
will remain locked for that amount of time.
The Windows implementation is not yet tested.
In P2P.Annex, a duration of 10 minutes is used. This way, when p2pstdio
or remotedaemon is serving the P2P protocol, and is asked to
LOCKCONTENT, and that process gets killed, the content will not be
subject to deletion. This is not a perfect solution to
doc/todo/P2P_locking_connection_drop_safety.mdwn yet, but it gets most
of the way there, without needing any P2P protocol changes.
This is only done in v10 and higher repositories (or on Windows). It
might be possible to backport it to v8 or earlier, but it would
complicate locking even further, and without a separate lock file, might
be hard. I think that by the time this fix reaches a given user, they
will probably have been running git-annex 10.x long enough that their v8
repositories will have upgraded to v10 after the 1 year wait. And it's
not as if git-annex hasn't already been subject to this problem (though
I have not heard of any data loss caused by it) for 6 years already, so
waiting another fraction of a year on top of however long it takes this
fix to reach users is unlikely to be a problem.
2024-07-03 18:44:38 +00:00
|
|
|
type ContentLocker
|
|
|
|
= RawFilePath
|
|
|
|
-> Maybe LockFile
|
|
|
|
->
|
|
|
|
( Annex (Maybe LockHandle)
|
|
|
|
-- ^ Takes the lock, which may be shared or exclusive.
|
|
|
|
#ifndef mingw32_HOST_OS
|
|
|
|
, Maybe (Annex (Maybe LockHandle))
|
|
|
|
-- ^ When the above takes a shared lock, this is used
|
|
|
|
-- to take an exclusive lock, after dropping the shared lock,
|
|
|
|
-- and prior to deleting the lock file, in order to
|
|
|
|
-- ensure that no other processes also have a shared lock.
|
|
|
|
#else
|
|
|
|
, Maybe (RawFilePath -> Annex ())
|
|
|
|
-- ^ On Windows, this is called after the lock is dropped,
|
|
|
|
-- but before the lock file is cleaned up.
|
|
|
|
#endif
|
|
|
|
)
|
2015-10-08 18:27:37 +00:00
|
|
|
|
|
|
|
#ifndef mingw32_HOST_OS
|
2023-04-27 19:57:50 +00:00
|
|
|
posixLocker :: (Maybe ModeSetter -> LockFile -> Annex (Maybe LockHandle)) -> LockFile -> Annex (Maybe LockHandle)
|
2015-10-08 18:27:37 +00:00
|
|
|
posixLocker takelock lockfile = do
|
|
|
|
mode <- annexFileMode
|
2022-05-16 16:34:56 +00:00
|
|
|
modifyContentDirWhenExists lockfile $
|
2015-11-12 22:05:45 +00:00
|
|
|
takelock (Just mode) lockfile
|
2015-10-08 18:27:37 +00:00
|
|
|
#else
|
|
|
|
winLocker :: (LockFile -> IO (Maybe LockHandle)) -> ContentLocker
|
2022-01-13 18:54:57 +00:00
|
|
|
winLocker takelock _ (Just lockfile) =
|
|
|
|
let lck = do
|
revert windows-specific locking changes that broke tests
This reverts windows-specific parts of 5a98f2d50913682c4ebe0e0c4ce695c450a96091
There were no code paths in common between windows and unix, so this
will return Windows to the old behavior.
The problem that the commit talks about has to do with multiple different
locations where git-annex can store annex object files, but that is not
too relevant to Windows anyway, because on windows the filesystem is always
treated as criplled and/or symlinks are not supported, so it will only
use one object location. It would need to be using a repo populated
in another OS to have the other object location in use probably.
Then a drop and get could possibly lead to a dangling lock file.
And, I was not able to actually reproduce that situation happening
before making that commit, even when I forced a race. So making these
changes on windows was just begging trouble..
I suspect that the change that caused the reversion is in
Annex/Content/Presence.hs. It checks if the content file exists,
and then called modifyContentDirWhenExists, which seems like it would
not fail, but if something deleted the content file at that point,
that call would fail. Which would result in an exception being thrown,
which should not normally happen from a call to inAnnexSafe. That was a
windows-specific change; the unix side did not have an equivilant
change.
Sponsored-by: Dartmouth College's Datalad project
2022-05-23 17:04:39 +00:00
|
|
|
modifyContentDir lockfile $
|
2022-01-13 18:54:57 +00:00
|
|
|
void $ liftIO $ tryIO $
|
|
|
|
writeFile (fromRawFilePath lockfile) ""
|
|
|
|
liftIO $ takelock lockfile
|
|
|
|
in (lck, Nothing)
|
2015-10-08 18:27:37 +00:00
|
|
|
-- never reached; windows always uses a separate lock file
|
2022-01-13 18:54:57 +00:00
|
|
|
winLocker _ _ Nothing = (return Nothing, Nothing)
|
2015-10-08 18:27:37 +00:00
|
|
|
#endif
|
|
|
|
|
2020-07-25 15:54:34 +00:00
|
|
|
{- The fallback action is run if the ContentLocker throws an IO exception
|
|
|
|
- and the content is not present. It's not guaranteed to always run when
|
|
|
|
- the content is not present, because the content file is not always
|
2022-01-11 21:01:11 +00:00
|
|
|
- the file that is locked. -}
|
2022-01-13 18:54:57 +00:00
|
|
|
lockContentUsing :: ContentLocker -> Key -> Annex a -> Annex a -> Annex a
|
2022-01-20 15:33:14 +00:00
|
|
|
lockContentUsing contentlocker key fallback a = withContentLockFile key $ \mlockfile -> do
|
2020-10-29 18:20:57 +00:00
|
|
|
contentfile <- calcRepo (gitAnnexLocation key)
|
2022-01-13 18:54:57 +00:00
|
|
|
let (locker, sharedtoexclusive) = contentlocker contentfile mlockfile
|
2014-08-21 00:08:45 +00:00
|
|
|
bracket
|
2022-01-13 18:54:57 +00:00
|
|
|
(lock locker mlockfile)
|
|
|
|
(either (const noop) (unlock sharedtoexclusive mlockfile))
|
2020-07-25 15:54:34 +00:00
|
|
|
go
|
2012-12-13 04:24:19 +00:00
|
|
|
where
|
2016-11-16 01:29:54 +00:00
|
|
|
alreadylocked = giveup "content is locked"
|
|
|
|
failedtolock e = giveup $ "failed to lock content: " ++ show e
|
2015-10-08 18:27:37 +00:00
|
|
|
|
delete content lock file safely on drop, keep after shared lock
This seems to be the best that can be done to avoid forever accumulating
the new content lock files, while being fully safe.
This is fixing code paths that have lingered unused since direct mode!
And direct mode seems to have been buggy in this area, since the content
lock file was deleted on unlock. But with a shared lock, there could be
another process that also had the lock file locked, and deleting it
invalidates that lock.
So, the lock file cannot be deleted after a shared lock. At least, not
wihout taking an exclusive lock first.. which I have not pursued yet but may.
After an exclusive lock, the lock file can be deleted. But there is
still a potential race, where the exclusive lock is held, and another
process gets the file open, just as the exclusive lock is dropped and
the lock file is deleted. That other process would be left with a file
handle it can take a shared lock of, but with no effect since the file
is deleted. Annex.Transfer also deletes lock files, and deals with this
same problem by using checkSaneLock, which is how I've dealt with it
here.
Sponsored-by: Dartmouth College's Datalad project
2022-01-13 17:58:58 +00:00
|
|
|
#ifndef mingw32_HOST_OS
|
2023-03-27 16:20:35 +00:00
|
|
|
lock locker mlockfile =
|
2022-01-13 18:54:57 +00:00
|
|
|
#else
|
2023-03-27 16:20:35 +00:00
|
|
|
lock locker _mlockfile =
|
|
|
|
#endif
|
|
|
|
tryIO $ locker >>= \case
|
|
|
|
Nothing -> alreadylocked
|
|
|
|
Just h ->
|
|
|
|
#ifndef mingw32_HOST_OS
|
|
|
|
case mlockfile of
|
|
|
|
Nothing -> return h
|
|
|
|
Just lockfile ->
|
|
|
|
ifM (checkSaneLock lockfile h)
|
|
|
|
( return h
|
|
|
|
, alreadylocked
|
|
|
|
)
|
|
|
|
#else
|
|
|
|
return h
|
delete content lock file safely on drop, keep after shared lock
This seems to be the best that can be done to avoid forever accumulating
the new content lock files, while being fully safe.
This is fixing code paths that have lingered unused since direct mode!
And direct mode seems to have been buggy in this area, since the content
lock file was deleted on unlock. But with a shared lock, there could be
another process that also had the lock file locked, and deleting it
invalidates that lock.
So, the lock file cannot be deleted after a shared lock. At least, not
wihout taking an exclusive lock first.. which I have not pursued yet but may.
After an exclusive lock, the lock file can be deleted. But there is
still a potential race, where the exclusive lock is held, and another
process gets the file open, just as the exclusive lock is dropped and
the lock file is deleted. That other process would be left with a file
handle it can take a shared lock of, but with no effect since the file
is deleted. Annex.Transfer also deletes lock files, and deals with this
same problem by using checkSaneLock, which is how I've dealt with it
here.
Sponsored-by: Dartmouth College's Datalad project
2022-01-13 17:58:58 +00:00
|
|
|
#endif
|
2020-07-25 15:54:34 +00:00
|
|
|
|
|
|
|
go (Right _) = a
|
|
|
|
go (Left e) = ifM (inAnnex key)
|
|
|
|
( failedtolock e
|
|
|
|
, fallback
|
|
|
|
)
|
2015-10-08 18:27:37 +00:00
|
|
|
|
|
|
|
#ifndef mingw32_HOST_OS
|
2022-01-13 18:54:57 +00:00
|
|
|
unlock sharedtoexclusive mlockfile lck = case (sharedtoexclusive, mlockfile) of
|
|
|
|
-- We have a shared lock, so other processes may also
|
|
|
|
-- have shared locks of the same lock file. To avoid
|
|
|
|
-- deleting the lock file when there are other shared
|
|
|
|
-- locks, try to convert to an exclusive lock, and only
|
|
|
|
-- delete it when that succeeds.
|
|
|
|
--
|
|
|
|
-- Since other processes might be doing the same,
|
|
|
|
-- a race is possible where we open the lock file
|
|
|
|
-- and then another process takes the exclusive lock and
|
|
|
|
-- deletes it, leaving us with an invalid lock. To avoid
|
|
|
|
-- that race, checkSaneLock is used after taking the lock
|
|
|
|
-- here, and above.
|
|
|
|
(Just exclusivelocker, Just lockfile) -> do
|
|
|
|
liftIO $ dropLock lck
|
|
|
|
exclusivelocker >>= \case
|
|
|
|
Nothing -> return ()
|
|
|
|
Just h -> do
|
|
|
|
whenM (checkSaneLock lockfile h) $ do
|
|
|
|
cleanuplockfile lockfile
|
|
|
|
liftIO $ dropLock h
|
|
|
|
-- We have an exclusive lock, so no other process can have
|
2022-01-20 18:00:19 +00:00
|
|
|
-- the lock file locked, and so it's safe to remove it, as
|
|
|
|
-- long as all lock attempts use checkSaneLock.
|
2022-01-13 18:54:57 +00:00
|
|
|
_ -> do
|
delete content lock file safely on drop, keep after shared lock
This seems to be the best that can be done to avoid forever accumulating
the new content lock files, while being fully safe.
This is fixing code paths that have lingered unused since direct mode!
And direct mode seems to have been buggy in this area, since the content
lock file was deleted on unlock. But with a shared lock, there could be
another process that also had the lock file locked, and deleting it
invalidates that lock.
So, the lock file cannot be deleted after a shared lock. At least, not
wihout taking an exclusive lock first.. which I have not pursued yet but may.
After an exclusive lock, the lock file can be deleted. But there is
still a potential race, where the exclusive lock is held, and another
process gets the file open, just as the exclusive lock is dropped and
the lock file is deleted. That other process would be left with a file
handle it can take a shared lock of, but with no effect since the file
is deleted. Annex.Transfer also deletes lock files, and deals with this
same problem by using checkSaneLock, which is how I've dealt with it
here.
Sponsored-by: Dartmouth College's Datalad project
2022-01-13 17:58:58 +00:00
|
|
|
maybe noop cleanuplockfile mlockfile
|
2022-01-13 18:54:57 +00:00
|
|
|
liftIO $ dropLock lck
|
2013-08-04 17:12:18 +00:00
|
|
|
#else
|
add content retention files
This allows lockContentShared to lock content for eg, 10 minutes and
if the process then gets terminated before it can unlock, the content
will remain locked for that amount of time.
The Windows implementation is not yet tested.
In P2P.Annex, a duration of 10 minutes is used. This way, when p2pstdio
or remotedaemon is serving the P2P protocol, and is asked to
LOCKCONTENT, and that process gets killed, the content will not be
subject to deletion. This is not a perfect solution to
doc/todo/P2P_locking_connection_drop_safety.mdwn yet, but it gets most
of the way there, without needing any P2P protocol changes.
This is only done in v10 and higher repositories (or on Windows). It
might be possible to backport it to v8 or earlier, but it would
complicate locking even further, and without a separate lock file, might
be hard. I think that by the time this fix reaches a given user, they
will probably have been running git-annex 10.x long enough that their v8
repositories will have upgraded to v10 after the 1 year wait. And it's
not as if git-annex hasn't already been subject to this problem (though
I have not heard of any data loss caused by it) for 6 years already, so
waiting another fraction of a year on top of however long it takes this
fix to reach users is unlikely to be a problem.
2024-07-03 18:44:38 +00:00
|
|
|
unlock postunlock mlockfile lck = do
|
delete content lock file safely on drop, keep after shared lock
This seems to be the best that can be done to avoid forever accumulating
the new content lock files, while being fully safe.
This is fixing code paths that have lingered unused since direct mode!
And direct mode seems to have been buggy in this area, since the content
lock file was deleted on unlock. But with a shared lock, there could be
another process that also had the lock file locked, and deleting it
invalidates that lock.
So, the lock file cannot be deleted after a shared lock. At least, not
wihout taking an exclusive lock first.. which I have not pursued yet but may.
After an exclusive lock, the lock file can be deleted. But there is
still a potential race, where the exclusive lock is held, and another
process gets the file open, just as the exclusive lock is dropped and
the lock file is deleted. That other process would be left with a file
handle it can take a shared lock of, but with no effect since the file
is deleted. Annex.Transfer also deletes lock files, and deals with this
same problem by using checkSaneLock, which is how I've dealt with it
here.
Sponsored-by: Dartmouth College's Datalad project
2022-01-13 17:58:58 +00:00
|
|
|
-- Can't delete a locked file on Windows,
|
|
|
|
-- so close our lock first. If there are other shared
|
add content retention files
This allows lockContentShared to lock content for eg, 10 minutes and
if the process then gets terminated before it can unlock, the content
will remain locked for that amount of time.
The Windows implementation is not yet tested.
In P2P.Annex, a duration of 10 minutes is used. This way, when p2pstdio
or remotedaemon is serving the P2P protocol, and is asked to
LOCKCONTENT, and that process gets killed, the content will not be
subject to deletion. This is not a perfect solution to
doc/todo/P2P_locking_connection_drop_safety.mdwn yet, but it gets most
of the way there, without needing any P2P protocol changes.
This is only done in v10 and higher repositories (or on Windows). It
might be possible to backport it to v8 or earlier, but it would
complicate locking even further, and without a separate lock file, might
be hard. I think that by the time this fix reaches a given user, they
will probably have been running git-annex 10.x long enough that their v8
repositories will have upgraded to v10 after the 1 year wait. And it's
not as if git-annex hasn't already been subject to this problem (though
I have not heard of any data loss caused by it) for 6 years already, so
waiting another fraction of a year on top of however long it takes this
fix to reach users is unlikely to be a problem.
2024-07-03 18:44:38 +00:00
|
|
|
-- locks, they will prevent the lock file deletion from
|
delete content lock file safely on drop, keep after shared lock
This seems to be the best that can be done to avoid forever accumulating
the new content lock files, while being fully safe.
This is fixing code paths that have lingered unused since direct mode!
And direct mode seems to have been buggy in this area, since the content
lock file was deleted on unlock. But with a shared lock, there could be
another process that also had the lock file locked, and deleting it
invalidates that lock.
So, the lock file cannot be deleted after a shared lock. At least, not
wihout taking an exclusive lock first.. which I have not pursued yet but may.
After an exclusive lock, the lock file can be deleted. But there is
still a potential race, where the exclusive lock is held, and another
process gets the file open, just as the exclusive lock is dropped and
the lock file is deleted. That other process would be left with a file
handle it can take a shared lock of, but with no effect since the file
is deleted. Annex.Transfer also deletes lock files, and deals with this
same problem by using checkSaneLock, which is how I've dealt with it
here.
Sponsored-by: Dartmouth College's Datalad project
2022-01-13 17:58:58 +00:00
|
|
|
-- happening.
|
2015-10-12 19:48:59 +00:00
|
|
|
liftIO $ dropLock lck
|
add content retention files
This allows lockContentShared to lock content for eg, 10 minutes and
if the process then gets terminated before it can unlock, the content
will remain locked for that amount of time.
The Windows implementation is not yet tested.
In P2P.Annex, a duration of 10 minutes is used. This way, when p2pstdio
or remotedaemon is serving the P2P protocol, and is asked to
LOCKCONTENT, and that process gets killed, the content will not be
subject to deletion. This is not a perfect solution to
doc/todo/P2P_locking_connection_drop_safety.mdwn yet, but it gets most
of the way there, without needing any P2P protocol changes.
This is only done in v10 and higher repositories (or on Windows). It
might be possible to backport it to v8 or earlier, but it would
complicate locking even further, and without a separate lock file, might
be hard. I think that by the time this fix reaches a given user, they
will probably have been running git-annex 10.x long enough that their v8
repositories will have upgraded to v10 after the 1 year wait. And it's
not as if git-annex hasn't already been subject to this problem (though
I have not heard of any data loss caused by it) for 6 years already, so
waiting another fraction of a year on top of however long it takes this
fix to reach users is unlikely to be a problem.
2024-07-03 18:44:38 +00:00
|
|
|
case mlockfile of
|
|
|
|
Nothing -> noop -- never reached
|
|
|
|
Just lockfile -> do
|
2024-07-29 19:14:12 +00:00
|
|
|
maybe noop (\pu -> pu lockfile) postunlock
|
2024-07-29 17:58:40 +00:00
|
|
|
cleanuplockfile lockfile
|
2013-05-11 20:03:00 +00:00
|
|
|
#endif
|
2011-11-10 01:45:03 +00:00
|
|
|
|
2022-01-20 18:00:19 +00:00
|
|
|
cleanuplockfile lockfile = void $ tryNonAsync $ do
|
|
|
|
thawContentDir lockfile
|
|
|
|
liftIO $ removeWhenExistsWith R.removeLink lockfile
|
2022-05-16 19:08:42 +00:00
|
|
|
cleanObjectDirs lockfile
|
2015-10-08 18:27:37 +00:00
|
|
|
|
2015-10-01 18:13:53 +00:00
|
|
|
{- Runs an action, passing it the temp file to get,
|
|
|
|
- and if the action succeeds, verifies the file matches
|
|
|
|
- the key and moves the file into the annex as a key's content. -}
|
disk free checking for unsized keys
Improve disk free space checking when transferring unsized keys to
local git remotes. Since the size of the object file is known, can
check that instead.
Getting unsized keys from local git remotes does not check the actual
object size. It would be harder to handle that direction because the size
check is run locally, before anything involving the remote is done. So it
doesn't know the size of the file on the remote.
Also, transferring unsized keys to other remotes, including ssh remotes and
p2p remotes don't do disk size checking for unsized keys. This would need a
change in protocol.
(It does seem like it would be possible to implement the same thing for
directory special remotes though.)
In some sense, it might be better to not ever do disk free checking for
unsized keys, than to do it only sometimes. A user might notice this
direction working and consider it a bug that the other direction does not.
On the other hand, disk reserve checking is not implemented for most
special remotes at all, and yet it is implemented for a few, which is also
inconsistent, but best effort. And so doing this best effort seems to make
some sense. Fundamentally, if the user wants the size to always be checked,
they should not use unsized keys.
Sponsored-by: Brock Spratlen on Patreon
2024-01-16 18:29:10 +00:00
|
|
|
getViaTmp :: RetrievalSecurityPolicy -> VerifyConfig -> Key -> AssociatedFile -> Maybe FileSize -> (RawFilePath -> Annex (Bool, Verification)) -> Annex Bool
|
|
|
|
getViaTmp rsp v key af sz action =
|
|
|
|
checkDiskSpaceToGet key sz False $
|
|
|
|
getViaTmpFromDisk rsp v key af action
|
2013-01-10 15:45:44 +00:00
|
|
|
|
|
|
|
{- Like getViaTmp, but does not check that there is enough disk space
|
|
|
|
- for the incoming key. For use when the key content is already on disk
|
|
|
|
- and not being copied into place. -}
|
2020-11-16 18:09:55 +00:00
|
|
|
getViaTmpFromDisk :: RetrievalSecurityPolicy -> VerifyConfig -> Key -> AssociatedFile -> (RawFilePath -> Annex (Bool, Verification)) -> Annex Bool
|
|
|
|
getViaTmpFromDisk rsp v key af action = checkallowed $ do
|
2015-10-01 18:07:06 +00:00
|
|
|
tmpfile <- prepTmp key
|
2020-10-29 18:20:57 +00:00
|
|
|
resuming <- liftIO $ R.doesPathExist tmpfile
|
other 80% of avoding verification when hard linking to objects in shared repo
In c6632ee5c8e66c26ef18317f56ae02bae1e7e280, it actually only handled
uploading objects to a shared repository. To avoid verification when
downloading objects from a shared repository, was a lot harder.
On the plus side, if the process of downloading a file from a remote
is able to verify its content on the side, the remote can indicate this
now, and avoid the extra post-download verification.
As of yet, I don't have any remotes (except Git) using this ability.
Some more work would be needed to support it in special remotes.
It would make sense for tahoe to implicitly verify things downloaded from it;
as long as you trust your tahoe server (which typically runs locally),
there's cryptographic integrity. OTOH, despite bup being based on shas,
a bup repo under an attacker's control could have the git ref used for an
object changed, and so a bup repo shouldn't implicitly verify. Indeed,
tahoe seems unique in being trustworthy enough to implicitly verify.
2015-10-02 17:56:42 +00:00
|
|
|
(ok, verification) <- action tmpfile
|
2018-03-13 18:50:49 +00:00
|
|
|
-- When the temp file already had content, we don't know if
|
|
|
|
-- that content is good or not, so only trust if it the action
|
|
|
|
-- Verified it in passing. Otherwise, force verification even
|
|
|
|
-- if the VerifyConfig normally disables it.
|
|
|
|
let verification' = if resuming
|
|
|
|
then case verification of
|
|
|
|
Verified -> Verified
|
|
|
|
_ -> MustVerify
|
|
|
|
else verification
|
other 80% of avoding verification when hard linking to objects in shared repo
In c6632ee5c8e66c26ef18317f56ae02bae1e7e280, it actually only handled
uploading objects to a shared repository. To avoid verification when
downloading objects from a shared repository, was a lot harder.
On the plus side, if the process of downloading a file from a remote
is able to verify its content on the side, the remote can indicate this
now, and avoid the extra post-download verification.
As of yet, I don't have any remotes (except Git) using this ability.
Some more work would be needed to support it in special remotes.
It would make sense for tahoe to implicitly verify things downloaded from it;
as long as you trust your tahoe server (which typically runs locally),
there's cryptographic integrity. OTOH, despite bup being based on shas,
a bup repo under an attacker's control could have the git ref used for an
object changed, and so a bup repo shouldn't implicitly verify. Indeed,
tahoe seems unique in being trustworthy enough to implicitly verify.
2015-10-02 17:56:42 +00:00
|
|
|
if ok
|
2021-07-29 17:36:19 +00:00
|
|
|
then ifM (verifyKeyContentPostRetrieval rsp v verification' key tmpfile)
|
2020-12-11 15:33:10 +00:00
|
|
|
( pruneTmpWorkDirBefore tmpfile (moveAnnex key af)
|
Do verification of checksums of annex objects downloaded from remotes.
* When annex objects are received into git repositories, their checksums are
verified then too.
* To get the old, faster, behavior of not verifying checksums, set
annex.verify=false, or remote.<name>.annex-verify=false.
* setkey, rekey: These commands also now verify that the provided file
matches the key, unless annex.verify=false.
* reinject: Already verified content; this can now be disabled by
setting annex.verify=false.
recvkey and reinject already did verification, so removed now duplicate
code from them. fsck still does its own verification, which is ok since it
does not use getViaTmp, so verification doesn't happen twice when using fsck
--from.
2015-10-01 19:54:37 +00:00
|
|
|
, do
|
2022-01-07 17:17:43 +00:00
|
|
|
verificationOfContentFailed tmpfile
|
Do verification of checksums of annex objects downloaded from remotes.
* When annex objects are received into git repositories, their checksums are
verified then too.
* To get the old, faster, behavior of not verifying checksums, set
annex.verify=false, or remote.<name>.annex-verify=false.
* setkey, rekey: These commands also now verify that the provided file
matches the key, unless annex.verify=false.
* reinject: Already verified content; this can now be disabled by
setting annex.verify=false.
recvkey and reinject already did verification, so removed now duplicate
code from them. fsck still does its own verification, which is ok since it
does not use getViaTmp, so verification doesn't happen twice when using fsck
--from.
2015-10-01 19:54:37 +00:00
|
|
|
return False
|
|
|
|
)
|
|
|
|
-- On transfer failure, the tmp file is left behind, in case
|
|
|
|
-- caller wants to resume its transfer
|
other 80% of avoding verification when hard linking to objects in shared repo
In c6632ee5c8e66c26ef18317f56ae02bae1e7e280, it actually only handled
uploading objects to a shared repository. To avoid verification when
downloading objects from a shared repository, was a lot harder.
On the plus side, if the process of downloading a file from a remote
is able to verify its content on the side, the remote can indicate this
now, and avoid the extra post-download verification.
As of yet, I don't have any remotes (except Git) using this ability.
Some more work would be needed to support it in special remotes.
It would make sense for tahoe to implicitly verify things downloaded from it;
as long as you trust your tahoe server (which typically runs locally),
there's cryptographic integrity. OTOH, despite bup being based on shas,
a bup repo under an attacker's control could have the git ref used for an
object changed, and so a bup repo shouldn't implicitly verify. Indeed,
tahoe seems unique in being trustworthy enough to implicitly verify.
2015-10-02 17:56:42 +00:00
|
|
|
else return False
|
2018-06-21 17:34:11 +00:00
|
|
|
where
|
|
|
|
-- Avoid running the action to get the content when the
|
|
|
|
-- RetrievalSecurityPolicy would cause verification to always fail.
|
|
|
|
checkallowed a = case rsp of
|
|
|
|
RetrievalAllKeysSecure -> a
|
2021-07-27 18:07:23 +00:00
|
|
|
RetrievalVerifiableKeysSecure -> ifM (isVerifiable key)
|
2020-07-29 19:23:18 +00:00
|
|
|
( a
|
|
|
|
, ifM (annexAllowUnverifiedDownloads <$> Annex.getGitConfig)
|
2018-06-21 17:34:11 +00:00
|
|
|
( a
|
|
|
|
, warnUnverifiableInsecure key >> return False
|
|
|
|
)
|
2020-07-29 19:23:18 +00:00
|
|
|
)
|
2014-01-04 19:08:06 +00:00
|
|
|
|
2022-01-07 17:17:43 +00:00
|
|
|
{- When the content of a file that was successfully transferred from a remote
|
|
|
|
- fails to verify, use this to display a message so the user knows why it
|
|
|
|
- failed, and to clean up the corrupted content.
|
|
|
|
-
|
|
|
|
- The bad content is not retained, because the transfer of it succeeded.
|
|
|
|
- So it's not incomplete and a resume using it will not work. While
|
|
|
|
- some protocols like rsync could recover such a bad content file,
|
|
|
|
- they are assumed to not write out bad data to a file in the first place.
|
|
|
|
- Most protocols, including the P2P protocol, pick up downloads where they
|
|
|
|
- left off, and so if the bad content were not deleted, repeated downloads
|
|
|
|
- would continue to fail.
|
|
|
|
-}
|
|
|
|
verificationOfContentFailed :: RawFilePath -> Annex ()
|
|
|
|
verificationOfContentFailed tmpfile = do
|
|
|
|
warning "Verification of content failed"
|
|
|
|
pruneTmpWorkDirBefore tmpfile
|
|
|
|
(liftIO . removeWhenExistsWith R.removeLink)
|
|
|
|
|
2015-10-01 18:13:53 +00:00
|
|
|
{- Checks if there is enough free disk space to download a key
|
|
|
|
- to its temp file.
|
2014-01-04 19:08:06 +00:00
|
|
|
-
|
|
|
|
- When the temp file already exists, count the space it is using as
|
|
|
|
- free, since the download will overwrite it or resume.
|
|
|
|
-
|
|
|
|
- Wen there's enough free space, runs the download action.
|
|
|
|
-}
|
disk free checking for unsized keys
Improve disk free space checking when transferring unsized keys to
local git remotes. Since the size of the object file is known, can
check that instead.
Getting unsized keys from local git remotes does not check the actual
object size. It would be harder to handle that direction because the size
check is run locally, before anything involving the remote is done. So it
doesn't know the size of the file on the remote.
Also, transferring unsized keys to other remotes, including ssh remotes and
p2p remotes don't do disk size checking for unsized keys. This would need a
change in protocol.
(It does seem like it would be possible to implement the same thing for
directory special remotes though.)
In some sense, it might be better to not ever do disk free checking for
unsized keys, than to do it only sometimes. A user might notice this
direction working and consider it a bug that the other direction does not.
On the other hand, disk reserve checking is not implemented for most
special remotes at all, and yet it is implemented for a few, which is also
inconsistent, but best effort. And so doing this best effort seems to make
some sense. Fundamentally, if the user wants the size to always be checked,
they should not use unsized keys.
Sponsored-by: Brock Spratlen on Patreon
2024-01-16 18:29:10 +00:00
|
|
|
checkDiskSpaceToGet :: Key -> Maybe FileSize -> a -> Annex a -> Annex a
|
|
|
|
checkDiskSpaceToGet key sz unabletoget getkey = do
|
2020-11-05 15:26:34 +00:00
|
|
|
tmp <- fromRepo (gitAnnexTmpObjectLocation key)
|
2020-11-06 18:10:58 +00:00
|
|
|
e <- liftIO $ doesFileExist (fromRawFilePath tmp)
|
2015-01-20 20:58:48 +00:00
|
|
|
alreadythere <- liftIO $ if e
|
|
|
|
then getFileSize tmp
|
2012-04-20 18:57:57 +00:00
|
|
|
else return 0
|
disk free checking for unsized keys
Improve disk free space checking when transferring unsized keys to
local git remotes. Since the size of the object file is known, can
check that instead.
Getting unsized keys from local git remotes does not check the actual
object size. It would be harder to handle that direction because the size
check is run locally, before anything involving the remote is done. So it
doesn't know the size of the file on the remote.
Also, transferring unsized keys to other remotes, including ssh remotes and
p2p remotes don't do disk size checking for unsized keys. This would need a
change in protocol.
(It does seem like it would be possible to implement the same thing for
directory special remotes though.)
In some sense, it might be better to not ever do disk free checking for
unsized keys, than to do it only sometimes. A user might notice this
direction working and consider it a bug that the other direction does not.
On the other hand, disk reserve checking is not implemented for most
special remotes at all, and yet it is implemented for a few, which is also
inconsistent, but best effort. And so doing this best effort seems to make
some sense. Fundamentally, if the user wants the size to always be checked,
they should not use unsized keys.
Sponsored-by: Brock Spratlen on Patreon
2024-01-16 18:29:10 +00:00
|
|
|
ifM (checkDiskSpace sz Nothing key alreadythere True)
|
2012-04-20 18:57:57 +00:00
|
|
|
( do
|
2014-01-04 19:08:06 +00:00
|
|
|
-- The tmp file may not have been left writable
|
2020-11-06 18:10:58 +00:00
|
|
|
when e $ thawContent tmp
|
2014-01-04 19:08:06 +00:00
|
|
|
getkey
|
import metadata from feeds
When annex.genmetadata is set, metadata from the feed is added to files
that are imported from it.
Reused the same feedtitle and itemtitle, feedauthor, itemauthor, etc names
that are used in --template.
Also added title and author, which are the item title/author if available,
falling back to the feed title/author. These are more likely to be common
metadata fields.
(There is a small bit of dupication here, but once git gets
around to packing the object, it will compress it away.)
The itempubdate field is not included in the metadata as a string; instead
it is used to generate year and month fields, same as is done when adding
files with annex.genmetadata set.
This commit was sponsored by Amitai Schlair, who cooincidentially
is responsible for ikiwiki generating nice feed metadata!
2014-07-03 17:46:09 +00:00
|
|
|
, return unabletoget
|
2012-04-20 18:57:57 +00:00
|
|
|
)
|
2011-03-22 21:27:04 +00:00
|
|
|
|
2020-10-29 18:20:57 +00:00
|
|
|
prepTmp :: Key -> Annex RawFilePath
|
2013-01-10 15:45:44 +00:00
|
|
|
prepTmp key = do
|
2014-02-26 20:52:56 +00:00
|
|
|
tmp <- fromRepo $ gitAnnexTmpObjectLocation key
|
2015-01-09 17:11:56 +00:00
|
|
|
createAnnexDirectory (parentDir tmp)
|
2013-01-10 15:45:44 +00:00
|
|
|
return tmp
|
|
|
|
|
2017-11-29 17:49:52 +00:00
|
|
|
{- Prepares a temp file for a key, runs an action on it, and cleans up
|
2014-07-29 21:17:41 +00:00
|
|
|
- the temp file. If the action throws an exception, the temp file is
|
|
|
|
- left behind, which allows for resuming.
|
|
|
|
-}
|
2020-10-29 18:20:57 +00:00
|
|
|
withTmp :: Key -> (RawFilePath -> Annex a) -> Annex a
|
2011-04-28 00:06:07 +00:00
|
|
|
withTmp key action = do
|
2011-07-15 07:12:05 +00:00
|
|
|
tmp <- prepTmp key
|
2011-04-28 00:06:07 +00:00
|
|
|
res <- action tmp
|
2020-10-29 18:20:57 +00:00
|
|
|
pruneTmpWorkDirBefore tmp (liftIO . removeWhenExistsWith R.removeLink)
|
2011-04-28 00:06:07 +00:00
|
|
|
return res
|
|
|
|
|
2012-12-07 18:40:31 +00:00
|
|
|
{- Moves a key's content into .git/annex/objects/
|
2012-12-07 21:28:23 +00:00
|
|
|
-
|
2015-12-09 19:25:14 +00:00
|
|
|
- When a key has associated pointer files, the object is hard
|
|
|
|
- linked (or copied) to the files, and the object file is left thawed.
|
2011-04-29 00:41:40 +00:00
|
|
|
-
|
|
|
|
- What if the key there already has content? This could happen for
|
|
|
|
- various reasons; perhaps the same content is being annexed again.
|
|
|
|
- Perhaps there has been a hash collision generating the keys.
|
|
|
|
-
|
|
|
|
- The current strategy is to assume that in this case it's safe to delete
|
|
|
|
- one of the two copies of the content; and the one already in the annex
|
|
|
|
- is left there, assuming it's the original, canonical copy.
|
|
|
|
-
|
|
|
|
- I considered being more paranoid, and checking that both files had
|
|
|
|
- the same content. Decided against it because A) users explicitly choose
|
|
|
|
- a backend based on its hashing properties and so if they're dealing
|
|
|
|
- with colliding files it's their own fault and B) adding such a check
|
|
|
|
- would not catch all cases of colliding keys. For example, perhaps
|
|
|
|
- a remote has a key; if it's then added again with different content then
|
2023-03-14 02:39:16 +00:00
|
|
|
- the overall system now has two different pieces of content for that
|
2011-04-29 00:41:40 +00:00
|
|
|
- key, and one of them will probably get deleted later. So, adding the
|
2023-03-14 02:39:16 +00:00
|
|
|
- check here would only raise expectations that git-annex cannot truly
|
2011-04-29 00:41:40 +00:00
|
|
|
- meet.
|
annex.securehashesonly
Cryptographically secure hashes can be forced to be used in a repository,
by setting annex.securehashesonly. This does not prevent the git repository
from containing files with insecure hashes, but it does prevent the content
of such files from being pulled into .git/annex/objects from another
repository.
We want to make sure that at no point does git-annex accept content into
.git/annex/objects that is hashed with an insecure key. Here's how it
was done:
* .git/annex/objects/xx/yy/KEY/ is kept frozen, so nothing can be
written to it normally
* So every place that writes content must call, thawContent or modifyContent.
We can audit for these, and be sure we've considered all cases.
* The main functions are moveAnnex, and linkToAnnex; these were made to
check annex.securehashesonly, and are the main security boundary
for annex.securehashesonly.
* Most other calls to modifyContent deal with other files in the KEY
directory (inode cache etc). The other ones that mess with the content
are:
- Annex.Direct.toDirectGen, in which content already in the
annex directory is moved to the direct mode file, so not relevant.
- fix and lock, which don't add new content
- Command.ReKey.linkKey, which manually unlocks it to make a
copy.
* All other calls to thawContent appear safe.
Made moveAnnex return a Bool, so checked all callsites and made them
deal with a failure in appropriate ways.
linkToAnnex simply returns LinkAnnexFailed; all callsites already deal
with it failing in appropriate ways.
This commit was sponsored by Riku Voipio.
2017-02-27 17:01:32 +00:00
|
|
|
-
|
|
|
|
- May return false, when a particular variety of key is not being
|
|
|
|
- accepted into the repository. Will display a warning message in this
|
|
|
|
- case. May also throw exceptions in some cases.
|
2011-04-29 00:41:40 +00:00
|
|
|
-}
|
2020-11-16 18:09:55 +00:00
|
|
|
moveAnnex :: Key -> AssociatedFile -> RawFilePath -> Annex Bool
|
|
|
|
moveAnnex key af src = ifM (checkSecureHashes' key)
|
annex.securehashesonly
Cryptographically secure hashes can be forced to be used in a repository,
by setting annex.securehashesonly. This does not prevent the git repository
from containing files with insecure hashes, but it does prevent the content
of such files from being pulled into .git/annex/objects from another
repository.
We want to make sure that at no point does git-annex accept content into
.git/annex/objects that is hashed with an insecure key. Here's how it
was done:
* .git/annex/objects/xx/yy/KEY/ is kept frozen, so nothing can be
written to it normally
* So every place that writes content must call, thawContent or modifyContent.
We can audit for these, and be sure we've considered all cases.
* The main functions are moveAnnex, and linkToAnnex; these were made to
check annex.securehashesonly, and are the main security boundary
for annex.securehashesonly.
* Most other calls to modifyContent deal with other files in the KEY
directory (inode cache etc). The other ones that mess with the content
are:
- Annex.Direct.toDirectGen, in which content already in the
annex directory is moved to the direct mode file, so not relevant.
- fix and lock, which don't add new content
- Command.ReKey.linkKey, which manually unlocks it to make a
copy.
* All other calls to thawContent appear safe.
Made moveAnnex return a Bool, so checked all callsites and made them
deal with a failure in appropriate ways.
linkToAnnex simply returns LinkAnnexFailed; all callsites already deal
with it failing in appropriate ways.
This commit was sponsored by Riku Voipio.
2017-02-27 17:01:32 +00:00
|
|
|
( do
|
2019-08-27 16:59:57 +00:00
|
|
|
withObjectLoc key storeobject
|
annex.securehashesonly
Cryptographically secure hashes can be forced to be used in a repository,
by setting annex.securehashesonly. This does not prevent the git repository
from containing files with insecure hashes, but it does prevent the content
of such files from being pulled into .git/annex/objects from another
repository.
We want to make sure that at no point does git-annex accept content into
.git/annex/objects that is hashed with an insecure key. Here's how it
was done:
* .git/annex/objects/xx/yy/KEY/ is kept frozen, so nothing can be
written to it normally
* So every place that writes content must call, thawContent or modifyContent.
We can audit for these, and be sure we've considered all cases.
* The main functions are moveAnnex, and linkToAnnex; these were made to
check annex.securehashesonly, and are the main security boundary
for annex.securehashesonly.
* Most other calls to modifyContent deal with other files in the KEY
directory (inode cache etc). The other ones that mess with the content
are:
- Annex.Direct.toDirectGen, in which content already in the
annex directory is moved to the direct mode file, so not relevant.
- fix and lock, which don't add new content
- Command.ReKey.linkKey, which manually unlocks it to make a
copy.
* All other calls to thawContent appear safe.
Made moveAnnex return a Bool, so checked all callsites and made them
deal with a failure in appropriate ways.
linkToAnnex simply returns LinkAnnexFailed; all callsites already deal
with it failing in appropriate ways.
This commit was sponsored by Riku Voipio.
2017-02-27 17:01:32 +00:00
|
|
|
return True
|
|
|
|
, return False
|
|
|
|
)
|
2012-12-07 18:40:31 +00:00
|
|
|
where
|
2019-12-11 18:12:22 +00:00
|
|
|
storeobject dest = ifM (liftIO $ R.doesPathExist dest)
|
2013-05-17 20:25:18 +00:00
|
|
|
( alreadyhave
|
2022-05-16 16:34:56 +00:00
|
|
|
, adjustedBranchRefresh af $ modifyContentDir dest $ do
|
2022-06-22 20:47:34 +00:00
|
|
|
liftIO $ moveFile src dest
|
2021-10-27 18:11:34 +00:00
|
|
|
-- Freeze the object file now that it is in place.
|
|
|
|
-- Waiting until now to freeze it allows for freeze
|
|
|
|
-- hooks that prevent moving the file.
|
2021-10-26 19:46:38 +00:00
|
|
|
freezeContent dest
|
2021-06-15 13:24:59 +00:00
|
|
|
g <- Annex.gitRepo
|
|
|
|
fs <- map (`fromTopFilePath` g)
|
|
|
|
<$> Database.Keys.getAssociatedFiles key
|
|
|
|
unless (null fs) $ do
|
2021-07-27 16:29:10 +00:00
|
|
|
destic <- withTSDelta $
|
|
|
|
liftIO . genInodeCache dest
|
2021-06-15 13:24:59 +00:00
|
|
|
ics <- mapM (populatePointerFile (Restage True) key dest) fs
|
2021-07-27 16:29:10 +00:00
|
|
|
Database.Keys.addInodeCaches key
|
|
|
|
(catMaybes (destic:ics))
|
2013-02-18 06:39:40 +00:00
|
|
|
)
|
2020-10-29 18:20:57 +00:00
|
|
|
alreadyhave = liftIO $ R.removeLink src
|
2011-07-15 07:12:05 +00:00
|
|
|
|
2020-07-03 17:41:57 +00:00
|
|
|
checkSecureHashes :: Key -> Annex (Maybe String)
|
2024-02-29 21:21:29 +00:00
|
|
|
checkSecureHashes key = ifM (Backend.isCryptographicallySecureKey key)
|
2020-07-29 19:23:18 +00:00
|
|
|
( return Nothing
|
|
|
|
, ifM (annexSecureHashesOnly <$> Annex.getGitConfig)
|
2020-07-03 17:41:57 +00:00
|
|
|
( return $ Just $ "annex.securehashesonly blocked adding " ++ decodeBS (formatKeyVariety (fromKey keyVariety key)) ++ " key"
|
|
|
|
, return Nothing
|
annex.securehashesonly
Cryptographically secure hashes can be forced to be used in a repository,
by setting annex.securehashesonly. This does not prevent the git repository
from containing files with insecure hashes, but it does prevent the content
of such files from being pulled into .git/annex/objects from another
repository.
We want to make sure that at no point does git-annex accept content into
.git/annex/objects that is hashed with an insecure key. Here's how it
was done:
* .git/annex/objects/xx/yy/KEY/ is kept frozen, so nothing can be
written to it normally
* So every place that writes content must call, thawContent or modifyContent.
We can audit for these, and be sure we've considered all cases.
* The main functions are moveAnnex, and linkToAnnex; these were made to
check annex.securehashesonly, and are the main security boundary
for annex.securehashesonly.
* Most other calls to modifyContent deal with other files in the KEY
directory (inode cache etc). The other ones that mess with the content
are:
- Annex.Direct.toDirectGen, in which content already in the
annex directory is moved to the direct mode file, so not relevant.
- fix and lock, which don't add new content
- Command.ReKey.linkKey, which manually unlocks it to make a
copy.
* All other calls to thawContent appear safe.
Made moveAnnex return a Bool, so checked all callsites and made them
deal with a failure in appropriate ways.
linkToAnnex simply returns LinkAnnexFailed; all callsites already deal
with it failing in appropriate ways.
This commit was sponsored by Riku Voipio.
2017-02-27 17:01:32 +00:00
|
|
|
)
|
2020-07-29 19:23:18 +00:00
|
|
|
)
|
annex.securehashesonly
Cryptographically secure hashes can be forced to be used in a repository,
by setting annex.securehashesonly. This does not prevent the git repository
from containing files with insecure hashes, but it does prevent the content
of such files from being pulled into .git/annex/objects from another
repository.
We want to make sure that at no point does git-annex accept content into
.git/annex/objects that is hashed with an insecure key. Here's how it
was done:
* .git/annex/objects/xx/yy/KEY/ is kept frozen, so nothing can be
written to it normally
* So every place that writes content must call, thawContent or modifyContent.
We can audit for these, and be sure we've considered all cases.
* The main functions are moveAnnex, and linkToAnnex; these were made to
check annex.securehashesonly, and are the main security boundary
for annex.securehashesonly.
* Most other calls to modifyContent deal with other files in the KEY
directory (inode cache etc). The other ones that mess with the content
are:
- Annex.Direct.toDirectGen, in which content already in the
annex directory is moved to the direct mode file, so not relevant.
- fix and lock, which don't add new content
- Command.ReKey.linkKey, which manually unlocks it to make a
copy.
* All other calls to thawContent appear safe.
Made moveAnnex return a Bool, so checked all callsites and made them
deal with a failure in appropriate ways.
linkToAnnex simply returns LinkAnnexFailed; all callsites already deal
with it failing in appropriate ways.
This commit was sponsored by Riku Voipio.
2017-02-27 17:01:32 +00:00
|
|
|
|
2020-07-03 17:41:57 +00:00
|
|
|
checkSecureHashes' :: Key -> Annex Bool
|
|
|
|
checkSecureHashes' key = checkSecureHashes key >>= \case
|
|
|
|
Nothing -> return True
|
|
|
|
Just msg -> do
|
2023-12-08 16:36:33 +00:00
|
|
|
warning $ UnquotedString $ msg ++ " to annex objects"
|
2020-07-03 17:41:57 +00:00
|
|
|
return False
|
|
|
|
|
2015-12-27 19:59:59 +00:00
|
|
|
data LinkAnnexResult = LinkAnnexOk | LinkAnnexFailed | LinkAnnexNoop
|
2021-08-02 17:59:23 +00:00
|
|
|
deriving (Eq)
|
2015-12-27 19:59:59 +00:00
|
|
|
|
|
|
|
{- Populates the annex object file by hard linking or copying a source
|
|
|
|
- file to it. -}
|
2020-10-29 18:20:57 +00:00
|
|
|
linkToAnnex :: Key -> RawFilePath -> Maybe InodeCache -> Annex LinkAnnexResult
|
2020-07-03 17:41:57 +00:00
|
|
|
linkToAnnex key src srcic = ifM (checkSecureHashes' key)
|
annex.securehashesonly
Cryptographically secure hashes can be forced to be used in a repository,
by setting annex.securehashesonly. This does not prevent the git repository
from containing files with insecure hashes, but it does prevent the content
of such files from being pulled into .git/annex/objects from another
repository.
We want to make sure that at no point does git-annex accept content into
.git/annex/objects that is hashed with an insecure key. Here's how it
was done:
* .git/annex/objects/xx/yy/KEY/ is kept frozen, so nothing can be
written to it normally
* So every place that writes content must call, thawContent or modifyContent.
We can audit for these, and be sure we've considered all cases.
* The main functions are moveAnnex, and linkToAnnex; these were made to
check annex.securehashesonly, and are the main security boundary
for annex.securehashesonly.
* Most other calls to modifyContent deal with other files in the KEY
directory (inode cache etc). The other ones that mess with the content
are:
- Annex.Direct.toDirectGen, in which content already in the
annex directory is moved to the direct mode file, so not relevant.
- fix and lock, which don't add new content
- Command.ReKey.linkKey, which manually unlocks it to make a
copy.
* All other calls to thawContent appear safe.
Made moveAnnex return a Bool, so checked all callsites and made them
deal with a failure in appropriate ways.
linkToAnnex simply returns LinkAnnexFailed; all callsites already deal
with it failing in appropriate ways.
This commit was sponsored by Riku Voipio.
2017-02-27 17:01:32 +00:00
|
|
|
( do
|
2020-10-29 18:20:57 +00:00
|
|
|
dest <- calcRepo (gitAnnexLocation key)
|
2022-05-16 16:34:56 +00:00
|
|
|
modifyContentDir dest $ linkAnnex To key src srcic dest Nothing
|
annex.securehashesonly
Cryptographically secure hashes can be forced to be used in a repository,
by setting annex.securehashesonly. This does not prevent the git repository
from containing files with insecure hashes, but it does prevent the content
of such files from being pulled into .git/annex/objects from another
repository.
We want to make sure that at no point does git-annex accept content into
.git/annex/objects that is hashed with an insecure key. Here's how it
was done:
* .git/annex/objects/xx/yy/KEY/ is kept frozen, so nothing can be
written to it normally
* So every place that writes content must call, thawContent or modifyContent.
We can audit for these, and be sure we've considered all cases.
* The main functions are moveAnnex, and linkToAnnex; these were made to
check annex.securehashesonly, and are the main security boundary
for annex.securehashesonly.
* Most other calls to modifyContent deal with other files in the KEY
directory (inode cache etc). The other ones that mess with the content
are:
- Annex.Direct.toDirectGen, in which content already in the
annex directory is moved to the direct mode file, so not relevant.
- fix and lock, which don't add new content
- Command.ReKey.linkKey, which manually unlocks it to make a
copy.
* All other calls to thawContent appear safe.
Made moveAnnex return a Bool, so checked all callsites and made them
deal with a failure in appropriate ways.
linkToAnnex simply returns LinkAnnexFailed; all callsites already deal
with it failing in appropriate ways.
This commit was sponsored by Riku Voipio.
2017-02-27 17:01:32 +00:00
|
|
|
, return LinkAnnexFailed
|
|
|
|
)
|
2015-12-27 19:59:59 +00:00
|
|
|
|
2021-07-27 17:01:30 +00:00
|
|
|
{- Makes a destination file be a link or copy from the annex object.
|
|
|
|
-
|
|
|
|
- linkAnnex stats the file after copying it to add to the inode
|
|
|
|
- cache. But dest may be a file in the working tree, which could
|
|
|
|
- get modified immediately after being populated. To avoid such a
|
|
|
|
- race, call linkAnnex on a temporary file and move it into place
|
|
|
|
- afterwards. Note that a consequence of this is that, if the file
|
|
|
|
- already exists, it will be overwritten.
|
|
|
|
-}
|
2020-10-29 18:20:57 +00:00
|
|
|
linkFromAnnex :: Key -> RawFilePath -> Maybe FileMode -> Annex LinkAnnexResult
|
2021-07-27 17:01:30 +00:00
|
|
|
linkFromAnnex key dest destmode =
|
2021-08-02 17:59:23 +00:00
|
|
|
replaceFile' (const noop) (fromRawFilePath dest) (== LinkAnnexOk) $ \tmp ->
|
2023-10-26 17:36:49 +00:00
|
|
|
linkFromAnnex' key tmp destmode
|
2021-07-27 17:01:30 +00:00
|
|
|
|
|
|
|
{- This is only safe to use when dest is not a worktree file. -}
|
|
|
|
linkFromAnnex' :: Key -> RawFilePath -> Maybe FileMode -> Annex LinkAnnexResult
|
|
|
|
linkFromAnnex' key dest destmode = do
|
2015-12-27 19:59:59 +00:00
|
|
|
src <- calcRepo (gitAnnexLocation key)
|
|
|
|
srcic <- withTSDelta (liftIO . genInodeCache src)
|
2020-10-29 18:20:57 +00:00
|
|
|
linkAnnex From key src srcic dest destmode
|
2015-12-27 19:59:59 +00:00
|
|
|
|
|
|
|
data FromTo = From | To
|
|
|
|
|
|
|
|
{- Hard links or copies from or to the annex object location.
|
|
|
|
- Updates inode cache.
|
2015-12-04 17:39:14 +00:00
|
|
|
-
|
2018-09-05 19:27:22 +00:00
|
|
|
- Freezes or thaws the destination appropriately.
|
|
|
|
-
|
|
|
|
- When a hard link is made, the annex object necessarily has to be thawed
|
|
|
|
- too. So, adding an object to the annex with a hard link can prevent
|
|
|
|
- losing the content if the source file is deleted, but does not
|
|
|
|
- guard against modifications.
|
2016-04-14 18:30:15 +00:00
|
|
|
-
|
|
|
|
- Nothing is done if the destination file already exists.
|
2015-12-04 17:39:14 +00:00
|
|
|
-}
|
2020-10-29 18:20:57 +00:00
|
|
|
linkAnnex :: FromTo -> Key -> RawFilePath -> Maybe InodeCache -> RawFilePath -> Maybe FileMode -> Annex LinkAnnexResult
|
2016-04-14 18:30:15 +00:00
|
|
|
linkAnnex _ _ _ Nothing _ _ = return LinkAnnexFailed
|
2017-11-15 20:55:38 +00:00
|
|
|
linkAnnex fromto key src (Just srcic) dest destmode =
|
2020-10-29 18:20:57 +00:00
|
|
|
withTSDelta (liftIO . genInodeCache dest) >>= \case
|
2015-12-29 20:26:27 +00:00
|
|
|
Just destic -> do
|
|
|
|
cs <- Database.Keys.getInodeCaches key
|
|
|
|
if null cs
|
|
|
|
then Database.Keys.addInodeCaches key [srcic, destic]
|
|
|
|
else Database.Keys.addInodeCaches key [srcic]
|
2015-12-15 17:02:33 +00:00
|
|
|
return LinkAnnexNoop
|
2018-09-05 21:26:12 +00:00
|
|
|
Nothing -> linkOrCopy key src dest destmode >>= \case
|
|
|
|
Nothing -> failed
|
|
|
|
Just r -> do
|
2018-09-05 19:27:22 +00:00
|
|
|
case fromto of
|
2020-11-06 18:10:58 +00:00
|
|
|
From -> thawContent dest
|
2018-09-05 21:26:12 +00:00
|
|
|
To -> case r of
|
2020-11-06 18:10:58 +00:00
|
|
|
Copied -> freezeContent dest
|
2018-09-05 21:26:12 +00:00
|
|
|
Linked -> noop
|
2015-12-27 19:59:59 +00:00
|
|
|
checksrcunchanged
|
2015-12-22 19:20:03 +00:00
|
|
|
where
|
|
|
|
failed = do
|
|
|
|
Database.Keys.addInodeCaches key [srcic]
|
|
|
|
return LinkAnnexFailed
|
2020-10-29 18:20:57 +00:00
|
|
|
checksrcunchanged = withTSDelta (liftIO . genInodeCache src) >>= \case
|
2017-11-15 20:55:38 +00:00
|
|
|
Just srcic' | compareStrong srcic srcic' -> do
|
2020-10-29 18:20:57 +00:00
|
|
|
destic <- withTSDelta (liftIO . genInodeCache dest)
|
2017-11-15 20:55:38 +00:00
|
|
|
Database.Keys.addInodeCaches key $
|
|
|
|
catMaybes [destic, Just srcic]
|
|
|
|
return LinkAnnexOk
|
|
|
|
_ -> do
|
2020-10-29 18:20:57 +00:00
|
|
|
liftIO $ removeWhenExistsWith R.removeLink dest
|
2017-11-15 20:55:38 +00:00
|
|
|
failed
|
2015-12-27 19:59:59 +00:00
|
|
|
|
2015-12-15 17:02:33 +00:00
|
|
|
{- Removes the annex object file for a key. Lowlevel. -}
|
|
|
|
unlinkAnnex :: Key -> Annex ()
|
|
|
|
unlinkAnnex key = do
|
2020-10-29 14:33:12 +00:00
|
|
|
obj <- calcRepo (gitAnnexLocation key)
|
2022-05-16 16:34:56 +00:00
|
|
|
modifyContentDir obj $ do
|
2015-12-15 17:02:33 +00:00
|
|
|
secureErase obj
|
2020-10-29 14:33:12 +00:00
|
|
|
liftIO $ removeWhenExistsWith R.removeLink obj
|
2015-12-15 17:02:33 +00:00
|
|
|
|
disk free checking for unsized keys
Improve disk free space checking when transferring unsized keys to
local git remotes. Since the size of the object file is known, can
check that instead.
Getting unsized keys from local git remotes does not check the actual
object size. It would be harder to handle that direction because the size
check is run locally, before anything involving the remote is done. So it
doesn't know the size of the file on the remote.
Also, transferring unsized keys to other remotes, including ssh remotes and
p2p remotes don't do disk size checking for unsized keys. This would need a
change in protocol.
(It does seem like it would be possible to implement the same thing for
directory special remotes though.)
In some sense, it might be better to not ever do disk free checking for
unsized keys, than to do it only sometimes. A user might notice this
direction working and consider it a bug that the other direction does not.
On the other hand, disk reserve checking is not implemented for most
special remotes at all, and yet it is implemented for a few, which is also
inconsistent, but best effort. And so doing this best effort seems to make
some sense. Fundamentally, if the user wants the size to always be checked,
they should not use unsized keys.
Sponsored-by: Brock Spratlen on Patreon
2024-01-16 18:29:10 +00:00
|
|
|
{- Runs an action to transfer an object's content. The action is also
|
|
|
|
- passed the size of the object.
|
2012-12-08 21:03:39 +00:00
|
|
|
-
|
2015-12-10 18:29:34 +00:00
|
|
|
- In some cases, it's possible for the file to change as it's being sent.
|
2020-05-13 18:03:00 +00:00
|
|
|
- If this happens, runs the rollback action and throws an exception.
|
|
|
|
- The rollback action should remove the data that was transferred.
|
2012-12-08 21:03:39 +00:00
|
|
|
-}
|
2024-07-01 14:42:27 +00:00
|
|
|
sendAnnex :: Key -> Maybe FilePath -> Annex () -> (FilePath -> FileSize -> Annex a) -> Annex a
|
|
|
|
sendAnnex key o rollback sendobject = go =<< prepSendAnnex' key o
|
2013-01-10 15:45:44 +00:00
|
|
|
where
|
disk free checking for unsized keys
Improve disk free space checking when transferring unsized keys to
local git remotes. Since the size of the object file is known, can
check that instead.
Getting unsized keys from local git remotes does not check the actual
object size. It would be harder to handle that direction because the size
check is run locally, before anything involving the remote is done. So it
doesn't know the size of the file on the remote.
Also, transferring unsized keys to other remotes, including ssh remotes and
p2p remotes don't do disk size checking for unsized keys. This would need a
change in protocol.
(It does seem like it would be possible to implement the same thing for
directory special remotes though.)
In some sense, it might be better to not ever do disk free checking for
unsized keys, than to do it only sometimes. A user might notice this
direction working and consider it a bug that the other direction does not.
On the other hand, disk reserve checking is not implemented for most
special remotes at all, and yet it is implemented for a few, which is also
inconsistent, but best effort. And so doing this best effort seems to make
some sense. Fundamentally, if the user wants the size to always be checked,
they should not use unsized keys.
Sponsored-by: Brock Spratlen on Patreon
2024-01-16 18:29:10 +00:00
|
|
|
go (Just (f, sz, check)) = do
|
|
|
|
r <- sendobject f sz
|
2021-06-25 17:04:17 +00:00
|
|
|
check >>= \case
|
|
|
|
Nothing -> return r
|
|
|
|
Just err -> do
|
|
|
|
rollback
|
|
|
|
giveup err
|
2020-05-13 18:03:00 +00:00
|
|
|
go Nothing = giveup "content not available to send"
|
2013-01-10 15:45:44 +00:00
|
|
|
|
|
|
|
{- Returns a file that contains an object's content,
|
2014-09-05 17:44:09 +00:00
|
|
|
- and a check to run after the transfer is complete.
|
2013-01-10 15:45:44 +00:00
|
|
|
-
|
2023-03-14 02:39:16 +00:00
|
|
|
- When a file is unlocked, it's possible for its content to
|
2019-08-27 16:59:57 +00:00
|
|
|
- change as it's being sent. The check detects this case
|
2015-12-10 18:29:34 +00:00
|
|
|
- and returns False.
|
2013-03-18 15:18:04 +00:00
|
|
|
-
|
|
|
|
- Note that the returned check action is, in some cases, run in the
|
|
|
|
- Annex monad of the remote that is receiving the object, rather than
|
2013-03-18 17:17:43 +00:00
|
|
|
- the sender. So it cannot rely on Annex state.
|
2013-01-10 15:45:44 +00:00
|
|
|
-}
|
2024-07-01 14:42:27 +00:00
|
|
|
prepSendAnnex :: Key -> Maybe FilePath -> Annex (Maybe (FilePath, FileSize, Annex Bool))
|
|
|
|
prepSendAnnex key Nothing = withObjectLoc key $ \f -> do
|
disk free checking for unsized keys
Improve disk free space checking when transferring unsized keys to
local git remotes. Since the size of the object file is known, can
check that instead.
Getting unsized keys from local git remotes does not check the actual
object size. It would be harder to handle that direction because the size
check is run locally, before anything involving the remote is done. So it
doesn't know the size of the file on the remote.
Also, transferring unsized keys to other remotes, including ssh remotes and
p2p remotes don't do disk size checking for unsized keys. This would need a
change in protocol.
(It does seem like it would be possible to implement the same thing for
directory special remotes though.)
In some sense, it might be better to not ever do disk free checking for
unsized keys, than to do it only sometimes. A user might notice this
direction working and consider it a bug that the other direction does not.
On the other hand, disk reserve checking is not implemented for most
special remotes at all, and yet it is implemented for a few, which is also
inconsistent, but best effort. And so doing this best effort seems to make
some sense. Fundamentally, if the user wants the size to always be checked,
they should not use unsized keys.
Sponsored-by: Brock Spratlen on Patreon
2024-01-16 18:29:10 +00:00
|
|
|
let retval c cs = return $ Just
|
2024-07-01 14:42:27 +00:00
|
|
|
( fromRawFilePath f
|
disk free checking for unsized keys
Improve disk free space checking when transferring unsized keys to
local git remotes. Since the size of the object file is known, can
check that instead.
Getting unsized keys from local git remotes does not check the actual
object size. It would be harder to handle that direction because the size
check is run locally, before anything involving the remote is done. So it
doesn't know the size of the file on the remote.
Also, transferring unsized keys to other remotes, including ssh remotes and
p2p remotes don't do disk size checking for unsized keys. This would need a
change in protocol.
(It does seem like it would be possible to implement the same thing for
directory special remotes though.)
In some sense, it might be better to not ever do disk free checking for
unsized keys, than to do it only sometimes. A user might notice this
direction working and consider it a bug that the other direction does not.
On the other hand, disk reserve checking is not implemented for most
special remotes at all, and yet it is implemented for a few, which is also
inconsistent, but best effort. And so doing this best effort seems to make
some sense. Fundamentally, if the user wants the size to always be checked,
they should not use unsized keys.
Sponsored-by: Brock Spratlen on Patreon
2024-01-16 18:29:10 +00:00
|
|
|
, inodeCacheFileSize c
|
|
|
|
, sameInodeCache f cs
|
|
|
|
)
|
2019-08-27 16:59:57 +00:00
|
|
|
cache <- Database.Keys.getInodeCaches key
|
2021-07-29 16:28:13 +00:00
|
|
|
if null cache
|
2019-08-27 16:59:57 +00:00
|
|
|
-- Since no inode cache is in the database, this
|
|
|
|
-- object is not currently unlocked. But that could
|
|
|
|
-- change while the transfer is in progress, so
|
|
|
|
-- generate an inode cache for the starting
|
|
|
|
-- content.
|
disk free checking for unsized keys
Improve disk free space checking when transferring unsized keys to
local git remotes. Since the size of the object file is known, can
check that instead.
Getting unsized keys from local git remotes does not check the actual
object size. It would be harder to handle that direction because the size
check is run locally, before anything involving the remote is done. So it
doesn't know the size of the file on the remote.
Also, transferring unsized keys to other remotes, including ssh remotes and
p2p remotes don't do disk size checking for unsized keys. This would need a
change in protocol.
(It does seem like it would be possible to implement the same thing for
directory special remotes though.)
In some sense, it might be better to not ever do disk free checking for
unsized keys, than to do it only sometimes. A user might notice this
direction working and consider it a bug that the other direction does not.
On the other hand, disk reserve checking is not implemented for most
special remotes at all, and yet it is implemented for a few, which is also
inconsistent, but best effort. And so doing this best effort seems to make
some sense. Fundamentally, if the user wants the size to always be checked,
they should not use unsized keys.
Sponsored-by: Brock Spratlen on Patreon
2024-01-16 18:29:10 +00:00
|
|
|
then maybe (return Nothing) (\fc -> retval fc [fc])
|
2021-07-29 16:28:13 +00:00
|
|
|
=<< withTSDelta (liftIO . genInodeCache f)
|
2021-07-26 21:33:49 +00:00
|
|
|
-- Verify that the object is not modified. Usually this
|
|
|
|
-- only has to check the inode cache, but if the cache
|
|
|
|
-- is somehow stale, it will fall back to verifying its
|
|
|
|
-- content.
|
|
|
|
else withTSDelta (liftIO . genInodeCache f) >>= \case
|
|
|
|
Just fc -> ifM (isUnmodified' key f fc cache)
|
disk free checking for unsized keys
Improve disk free space checking when transferring unsized keys to
local git remotes. Since the size of the object file is known, can
check that instead.
Getting unsized keys from local git remotes does not check the actual
object size. It would be harder to handle that direction because the size
check is run locally, before anything involving the remote is done. So it
doesn't know the size of the file on the remote.
Also, transferring unsized keys to other remotes, including ssh remotes and
p2p remotes don't do disk size checking for unsized keys. This would need a
change in protocol.
(It does seem like it would be possible to implement the same thing for
directory special remotes though.)
In some sense, it might be better to not ever do disk free checking for
unsized keys, than to do it only sometimes. A user might notice this
direction working and consider it a bug that the other direction does not.
On the other hand, disk reserve checking is not implemented for most
special remotes at all, and yet it is implemented for a few, which is also
inconsistent, but best effort. And so doing this best effort seems to make
some sense. Fundamentally, if the user wants the size to always be checked,
they should not use unsized keys.
Sponsored-by: Brock Spratlen on Patreon
2024-01-16 18:29:10 +00:00
|
|
|
( retval fc (fc:cache)
|
2021-07-29 16:28:13 +00:00
|
|
|
, return Nothing
|
2021-07-26 21:33:49 +00:00
|
|
|
)
|
2021-07-29 16:28:13 +00:00
|
|
|
Nothing -> return Nothing
|
2024-07-01 14:42:27 +00:00
|
|
|
-- If the provided object file is the annex object file, handle as above.
|
|
|
|
prepSendAnnex key (Just o) = withObjectLoc key $ \aof ->
|
|
|
|
let o' = toRawFilePath o
|
|
|
|
in if aof == o'
|
|
|
|
then prepSendAnnex key Nothing
|
|
|
|
else do
|
|
|
|
withTSDelta (liftIO . genInodeCache o') >>= \case
|
|
|
|
Nothing -> return Nothing
|
|
|
|
Just c -> return $ Just
|
|
|
|
( o
|
|
|
|
, inodeCacheFileSize c
|
|
|
|
, sameInodeCache o' [c]
|
|
|
|
)
|
|
|
|
|
|
|
|
prepSendAnnex' :: Key -> Maybe FilePath -> Annex (Maybe (FilePath, FileSize, Annex (Maybe String)))
|
|
|
|
prepSendAnnex' key o = prepSendAnnex key o >>= \case
|
disk free checking for unsized keys
Improve disk free space checking when transferring unsized keys to
local git remotes. Since the size of the object file is known, can
check that instead.
Getting unsized keys from local git remotes does not check the actual
object size. It would be harder to handle that direction because the size
check is run locally, before anything involving the remote is done. So it
doesn't know the size of the file on the remote.
Also, transferring unsized keys to other remotes, including ssh remotes and
p2p remotes don't do disk size checking for unsized keys. This would need a
change in protocol.
(It does seem like it would be possible to implement the same thing for
directory special remotes though.)
In some sense, it might be better to not ever do disk free checking for
unsized keys, than to do it only sometimes. A user might notice this
direction working and consider it a bug that the other direction does not.
On the other hand, disk reserve checking is not implemented for most
special remotes at all, and yet it is implemented for a few, which is also
inconsistent, but best effort. And so doing this best effort seems to make
some sense. Fundamentally, if the user wants the size to always be checked,
they should not use unsized keys.
Sponsored-by: Brock Spratlen on Patreon
2024-01-16 18:29:10 +00:00
|
|
|
Just (f, sz, checksuccess) ->
|
2021-06-25 17:04:17 +00:00
|
|
|
let checksuccess' = ifM checksuccess
|
|
|
|
( return Nothing
|
|
|
|
, return (Just "content changed while it was being sent")
|
|
|
|
)
|
disk free checking for unsized keys
Improve disk free space checking when transferring unsized keys to
local git remotes. Since the size of the object file is known, can
check that instead.
Getting unsized keys from local git remotes does not check the actual
object size. It would be harder to handle that direction because the size
check is run locally, before anything involving the remote is done. So it
doesn't know the size of the file on the remote.
Also, transferring unsized keys to other remotes, including ssh remotes and
p2p remotes don't do disk size checking for unsized keys. This would need a
change in protocol.
(It does seem like it would be possible to implement the same thing for
directory special remotes though.)
In some sense, it might be better to not ever do disk free checking for
unsized keys, than to do it only sometimes. A user might notice this
direction working and consider it a bug that the other direction does not.
On the other hand, disk reserve checking is not implemented for most
special remotes at all, and yet it is implemented for a few, which is also
inconsistent, but best effort. And so doing this best effort seems to make
some sense. Fundamentally, if the user wants the size to always be checked,
they should not use unsized keys.
Sponsored-by: Brock Spratlen on Patreon
2024-01-16 18:29:10 +00:00
|
|
|
in return (Just (f, sz, checksuccess'))
|
2021-06-25 17:04:17 +00:00
|
|
|
Nothing -> return Nothing
|
|
|
|
|
2013-11-15 18:52:03 +00:00
|
|
|
cleanObjectLoc :: Key -> Annex () -> Annex ()
|
|
|
|
cleanObjectLoc key cleaner = do
|
2020-10-29 18:20:57 +00:00
|
|
|
file <- calcRepo (gitAnnexLocation key)
|
unify exception handling into Utility.Exception
Removed old extensible-exceptions, only needed for very old ghc.
Made webdav use Utility.Exception, to work after some changes in DAV's
exception handling.
Removed Annex.Exception. Mostly this was trivial, but note that
tryAnnex is replaced with tryNonAsync and catchAnnex replaced with
catchNonAsync. In theory that could be a behavior change, since the former
caught all exceptions, and the latter don't catch async exceptions.
However, in practice, nothing in the Annex monad uses async exceptions.
Grepping for throwTo and killThread only find stuff in the assistant,
which does not seem related.
Command.Add.undo is changed to accept a SomeException, and things
that use it for rollback now catch non-async exceptions, rather than
only IOExceptions.
2014-08-08 01:55:44 +00:00
|
|
|
void $ tryIO $ thawContentDir file
|
2022-02-24 18:10:53 +00:00
|
|
|
{- Thawing is not necessary when the file was frozen only
|
|
|
|
- by removing write perms. But if there is a thaw hook, it may do
|
|
|
|
- something else that is necessary to allow the file to be
|
|
|
|
- deleted.
|
|
|
|
-}
|
|
|
|
whenM hasThawHook $
|
|
|
|
void $ tryIO $ thawContent file
|
|
|
|
|
2013-11-15 18:52:03 +00:00
|
|
|
cleaner
|
2022-05-16 19:08:42 +00:00
|
|
|
cleanObjectDirs file
|
2022-01-13 18:19:36 +00:00
|
|
|
|
2022-05-16 19:19:48 +00:00
|
|
|
{- Given a filename inside the object directory, tries to remove the object
|
|
|
|
- directory, as well as the object hash directories.
|
|
|
|
-
|
|
|
|
- Does nothing if the object directory is not empty, and does not
|
|
|
|
- throw an exception if it's unable to remove a directory. -}
|
2022-05-16 19:08:42 +00:00
|
|
|
cleanObjectDirs :: RawFilePath -> Annex ()
|
|
|
|
cleanObjectDirs f = do
|
|
|
|
HashLevels n <- objectHashLevels <$> Annex.getGitConfig
|
|
|
|
liftIO $ go f (succ n)
|
2012-12-13 04:24:19 +00:00
|
|
|
where
|
2022-05-16 19:08:42 +00:00
|
|
|
go _ 0 = noop
|
|
|
|
go file n = do
|
2015-01-09 17:11:56 +00:00
|
|
|
let dir = parentDir file
|
2022-05-16 19:08:42 +00:00
|
|
|
maybe noop (const $ go dir (n-1))
|
2022-05-16 19:19:48 +00:00
|
|
|
<=< catchMaybeIO $ tryWhenExists $
|
|
|
|
removeDirectory (fromRawFilePath dir)
|
2012-02-24 20:30:47 +00:00
|
|
|
|
2020-11-16 18:09:55 +00:00
|
|
|
{- Removes a key's file from .git/annex/objects/ -}
|
2015-10-09 19:48:02 +00:00
|
|
|
removeAnnex :: ContentRemovalLock -> Annex ()
|
2019-08-27 16:59:57 +00:00
|
|
|
removeAnnex (ContentRemovalLock key) = withObjectLoc key $ \file ->
|
|
|
|
cleanObjectLoc key $ do
|
2020-10-29 18:20:57 +00:00
|
|
|
secureErase file
|
|
|
|
liftIO $ removeWhenExistsWith R.removeLink file
|
2021-06-15 13:24:59 +00:00
|
|
|
g <- Annex.gitRepo
|
|
|
|
mapM_ (\f -> void $ tryIO $ resetpointer $ fromTopFilePath f g)
|
|
|
|
=<< Database.Keys.getAssociatedFiles key
|
|
|
|
Database.Keys.removeInodeCaches key
|
2019-08-27 16:59:57 +00:00
|
|
|
where
|
2018-08-14 18:22:23 +00:00
|
|
|
-- Check associated pointer file for modifications, and reset if
|
|
|
|
-- it's unmodified.
|
2022-06-22 20:08:26 +00:00
|
|
|
resetpointer file = unlessM (liftIO $ isSymbolicLink <$> R.getSymbolicLinkStatus file) $
|
2022-02-24 18:21:23 +00:00
|
|
|
ifM (isUnmodified key file)
|
|
|
|
( adjustedBranchRefresh (AssociatedFile (Just file)) $
|
|
|
|
depopulatePointerFile key file
|
|
|
|
-- Modified file, so leave it alone.
|
|
|
|
-- If it was a hard link to the annex object,
|
|
|
|
-- that object might have been frozen as part of the
|
|
|
|
-- removal process, so thaw it.
|
|
|
|
, void $ tryIO $ thawContent file
|
|
|
|
)
|
2018-08-22 18:41:09 +00:00
|
|
|
|
2010-11-13 18:59:27 +00:00
|
|
|
{- Moves a key out of .git/annex/objects/ into .git/annex/bad, and
|
2010-11-13 19:42:56 +00:00
|
|
|
- returns the file it was moved to. -}
|
2022-06-22 20:47:34 +00:00
|
|
|
moveBad :: Key -> Annex RawFilePath
|
2010-11-13 18:59:27 +00:00
|
|
|
moveBad key = do
|
2020-10-29 18:20:57 +00:00
|
|
|
src <- calcRepo (gitAnnexLocation key)
|
2011-11-11 05:52:58 +00:00
|
|
|
bad <- fromRepo gitAnnexBadDir
|
2020-10-29 18:20:57 +00:00
|
|
|
let dest = bad P.</> P.takeFileName src
|
2015-01-09 17:11:56 +00:00
|
|
|
createAnnexDirectory (parentDir dest)
|
2013-11-15 18:52:03 +00:00
|
|
|
cleanObjectLoc key $
|
2022-06-22 20:47:34 +00:00
|
|
|
liftIO $ moveFile src dest
|
2024-08-23 20:35:12 +00:00
|
|
|
logStatus NoLiveUpdate key InfoMissing
|
2022-06-22 20:47:34 +00:00
|
|
|
return dest
|
2010-11-13 18:59:27 +00:00
|
|
|
|
2019-08-27 16:59:57 +00:00
|
|
|
data KeyLocation = InAnnex | InAnywhere
|
2014-03-07 16:43:56 +00:00
|
|
|
|
2019-08-27 16:59:57 +00:00
|
|
|
{- InAnnex only lists keys with content in .git/annex/objects.
|
|
|
|
- InAnywhere lists all keys that have directories in
|
|
|
|
- .git/annex/objects, whether or not the content is present.
|
2014-03-07 16:43:56 +00:00
|
|
|
-}
|
2019-08-27 16:59:57 +00:00
|
|
|
listKeys :: KeyLocation -> Annex [Key]
|
2022-02-21 18:45:11 +00:00
|
|
|
listKeys keyloc = listKeys' keyloc (const (pure True))
|
|
|
|
|
|
|
|
{- Due to use of unsafeInterleaveIO, the passed filter action
|
|
|
|
- will be run in a copy of the Annex state, so any changes it
|
|
|
|
- makes to the state will not be preserved. -}
|
|
|
|
listKeys' :: KeyLocation -> (Key -> Annex Bool) -> Annex [Key]
|
|
|
|
listKeys' keyloc want = do
|
2013-02-15 21:58:49 +00:00
|
|
|
dir <- fromRepo gitAnnexObjectDir
|
2019-08-27 16:59:57 +00:00
|
|
|
s <- Annex.getState id
|
2022-02-21 18:45:11 +00:00
|
|
|
r <- Annex.getRead id
|
2015-06-11 19:14:42 +00:00
|
|
|
depth <- gitAnnexLocationDepth <$> Annex.getGitConfig
|
2022-02-21 18:45:11 +00:00
|
|
|
liftIO $ walk (s, r) depth (fromRawFilePath dir)
|
2012-12-13 04:24:19 +00:00
|
|
|
where
|
2019-08-27 16:59:57 +00:00
|
|
|
walk s depth dir = do
|
2012-12-13 04:24:19 +00:00
|
|
|
contents <- catchDefaultIO [] (dirContents dir)
|
2015-06-11 19:14:42 +00:00
|
|
|
if depth < 2
|
2013-02-15 21:58:49 +00:00
|
|
|
then do
|
2022-02-21 18:45:11 +00:00
|
|
|
contents' <- filterM present contents
|
|
|
|
keys <- filterM (Annex.eval s . want) $
|
|
|
|
mapMaybe (fileKey . P.takeFileName . toRawFilePath) contents'
|
2013-02-15 21:58:49 +00:00
|
|
|
continue keys []
|
2012-12-13 04:24:19 +00:00
|
|
|
else do
|
2019-08-27 16:59:57 +00:00
|
|
|
let deeper = walk s (depth - 1)
|
2012-12-13 04:24:19 +00:00
|
|
|
continue [] (map deeper contents)
|
|
|
|
continue keys [] = return keys
|
|
|
|
continue keys (a:as) = do
|
|
|
|
{- Force lazy traversal with unsafeInterleaveIO. -}
|
|
|
|
morekeys <- unsafeInterleaveIO a
|
|
|
|
continue (morekeys++keys) as
|
2011-06-23 01:19:52 +00:00
|
|
|
|
2015-12-15 19:56:37 +00:00
|
|
|
inanywhere = case keyloc of
|
|
|
|
InAnywhere -> True
|
|
|
|
_ -> False
|
|
|
|
|
2022-02-21 18:45:11 +00:00
|
|
|
present _ | inanywhere = pure True
|
|
|
|
present d = presentInAnnex d
|
2014-03-07 16:43:56 +00:00
|
|
|
|
|
|
|
presentInAnnex = doesFileExist . contentfile
|
2013-02-15 21:58:49 +00:00
|
|
|
contentfile d = d </> takeFileName d
|
|
|
|
|
2012-01-28 19:41:52 +00:00
|
|
|
{- Things to do to record changes to content when shutting down.
|
|
|
|
-
|
|
|
|
- It's acceptable to avoid committing changes to the branch,
|
|
|
|
- especially if performing a short-lived action.
|
|
|
|
-}
|
|
|
|
saveState :: Bool -> Annex ()
|
2012-09-16 00:46:38 +00:00
|
|
|
saveState nocommit = doSideAction $ do
|
2012-04-27 17:23:52 +00:00
|
|
|
Annex.Queue.flush
|
avoid flushing keys db queue after each Annex action
The flush was only done Annex.run' to make sure that the queue was flushed
before git-annex exits. But, doing it there means that as soon as one
change gets queued, it gets flushed soon after, which contributes to
excessive writes to the database, slowing git-annex down.
(This does not yet speed git-annex up, but it is a stepping stone to
doing so.)
Database queues do not autoflush when garbage collected, so have to
be flushed explicitly. I don't think it's possible to make them
autoflush (except perhaps if git-annex sqitched to using ResourceT..).
The comment in Database.Keys.closeDb used to be accurate, since the
automatic flushing did mean that all writes reached the database even
when closeDb was not called. But now, closeDb or flushDb needs to be
called before stopping using an Annex state. So, removed that comment.
In Remote.Git, change to using quiesce everywhere that it used to use
stopCoProcesses. This means that uses on onLocal in there are just as
slow as before. I considered only calling closeDb on the local git remotes
when git-annex exits. But, the reason that Remote.Git calls stopCoProcesses
in each onLocal is so as not to leave git processes running that have files
open on the remote repo, when it's on removable media. So, it seemed to make
sense to also closeDb after each one, since sqlite may also keep files
open. Although that has not seemed to cause problems with removable
media so far. It was also just easier to quiesce in each onLocal than
once at the end. This does likely leave performance on the floor, so
could be revisited.
In Annex.Content.saveState, there was no reason to close the db,
flushing it is enough.
The rest of the changes are from auditing for Annex.new, and making
sure that quiesce is called, after any action that might possibly need
it.
After that audit, I'm pretty sure that the change to Annex.run' is
safe. The only concern might be that this does let more changes get
queued for write to the db, and if git-annex is interrupted, those will be
lost. But interrupting git-annex can obviously already prevent it from
writing the most recent change to the db, so it must recover from such
lost data... right?
Sponsored-by: Dartmouth College's Datalad project
2022-10-12 17:50:46 +00:00
|
|
|
Database.Keys.flushDb
|
2012-09-16 00:46:38 +00:00
|
|
|
unless nocommit $
|
2013-01-01 17:52:47 +00:00
|
|
|
whenM (annexAlwaysCommit <$> Annex.getGitConfig) $
|
2018-08-02 18:06:06 +00:00
|
|
|
Annex.Branch.commit =<< Annex.Branch.commitMessage
|
2012-01-02 18:20:20 +00:00
|
|
|
|
2018-12-30 16:29:49 +00:00
|
|
|
{- Downloads content from any of a list of urls, displaying a progress
|
2021-09-01 19:28:22 +00:00
|
|
|
- meter.
|
|
|
|
-
|
|
|
|
- Only displays error message if all the urls fail to download.
|
|
|
|
- When listfailedurls is set, lists each url and why it failed.
|
|
|
|
- Otherwise, only displays one error message, from one of the urls
|
|
|
|
- that failed.
|
|
|
|
-}
|
|
|
|
downloadUrl :: Bool -> Key -> MeterUpdate -> Maybe IncrementalVerifier -> [Url.URLString] -> FilePath -> Url.UrlOptions -> Annex Bool
|
|
|
|
downloadUrl listfailedurls k p iv urls file uo =
|
2018-04-06 21:00:46 +00:00
|
|
|
-- Poll the file to handle configurations where an external
|
|
|
|
-- download command is used.
|
2024-01-19 18:26:21 +00:00
|
|
|
meteredFile (toRawFilePath file) (Just p) k (go urls [])
|
2020-09-02 15:35:07 +00:00
|
|
|
where
|
2024-01-19 18:11:27 +00:00
|
|
|
go (u:us) errs p' = Url.download' p' iv u file uo >>= \case
|
2020-09-02 15:35:07 +00:00
|
|
|
Right () -> return True
|
2021-08-18 18:49:01 +00:00
|
|
|
Left err -> do
|
|
|
|
-- If the incremental verifier was fed anything
|
|
|
|
-- while the download that failed ran, it's unable
|
|
|
|
-- to be used for the other urls.
|
|
|
|
case iv of
|
|
|
|
Just iv' ->
|
2021-11-09 16:29:09 +00:00
|
|
|
liftIO $ positionIncrementalVerifier iv' >>= \case
|
|
|
|
Just n | n > 0 -> unableIncrementalVerifier iv'
|
2021-08-18 18:49:01 +00:00
|
|
|
_ -> noop
|
|
|
|
Nothing -> noop
|
2024-01-19 18:11:27 +00:00
|
|
|
go us ((u, err) : errs) p'
|
|
|
|
go [] [] _ = return False
|
|
|
|
go [] errs@((_, err):_) _ = do
|
2021-09-01 19:28:22 +00:00
|
|
|
if listfailedurls
|
filter out control characters in warning messages
Converted warning and similar to use StringContainingQuotedPath. Most
warnings are static strings, some do refer to filepaths that need to be
quoted, and others don't need quoting.
Note that, since quote filters out control characters of even
UnquotedString, this makes all warnings safe, even when an attacker
sneaks in a control character in some other way.
When json is being output, no quoting is done, since json gets its own
quoting.
This does, as a side effect, make warning messages in json output not
be indented. The indentation is only needed to offset warning messages
underneath the display of the file they apply to, so that's ok.
Sponsored-by: Brett Eisenberg on Patreon
2023-04-10 18:47:32 +00:00
|
|
|
then warning $ UnquotedString $
|
|
|
|
unlines $ flip map errs $ \(u, err') ->
|
|
|
|
u ++ " " ++ err'
|
|
|
|
else warning $ UnquotedString err
|
2021-09-01 19:28:22 +00:00
|
|
|
return False
|
2012-01-19 21:05:39 +00:00
|
|
|
|
|
|
|
{- Copies a key's content, when present, to a temp file.
|
|
|
|
- This is used to speed up some rsyncs. -}
|
2012-01-20 17:23:11 +00:00
|
|
|
preseedTmp :: Key -> FilePath -> Annex Bool
|
|
|
|
preseedTmp key file = go =<< inAnnex key
|
2012-12-13 04:24:19 +00:00
|
|
|
where
|
|
|
|
go False = return False
|
|
|
|
go True = do
|
|
|
|
ok <- copy
|
2020-11-06 18:10:58 +00:00
|
|
|
when ok $ thawContent (toRawFilePath file)
|
2012-12-13 04:24:19 +00:00
|
|
|
return ok
|
|
|
|
copy = ifM (liftIO $ doesFileExist file)
|
2013-02-15 21:58:49 +00:00
|
|
|
( return True
|
|
|
|
, do
|
2019-12-11 18:12:22 +00:00
|
|
|
s <- fromRawFilePath <$> (calcRepo $ gitAnnexLocation key)
|
2015-07-30 17:40:17 +00:00
|
|
|
liftIO $ ifM (doesFileExist s)
|
|
|
|
( copyFileExternal CopyTimeStamps s file
|
|
|
|
, return False
|
|
|
|
)
|
2013-02-15 21:58:49 +00:00
|
|
|
)
|
2012-04-21 18:06:36 +00:00
|
|
|
|
2013-10-10 21:27:00 +00:00
|
|
|
{- Finds files directly inside a directory like gitAnnexBadDir
|
|
|
|
- (not in subdirectories) and returns the corresponding keys. -}
|
2020-10-29 18:20:57 +00:00
|
|
|
dirKeys :: (Git.Repo -> RawFilePath) -> Annex [Key]
|
2013-10-10 21:27:00 +00:00
|
|
|
dirKeys dirspec = do
|
2020-10-29 18:20:57 +00:00
|
|
|
dir <- fromRawFilePath <$> fromRepo dirspec
|
2013-10-10 21:27:00 +00:00
|
|
|
ifM (liftIO $ doesDirectoryExist dir)
|
|
|
|
( do
|
|
|
|
contents <- liftIO $ getDirectoryContents dir
|
|
|
|
files <- liftIO $ filterM doesFileExist $
|
|
|
|
map (dir </>) contents
|
2019-12-18 20:45:03 +00:00
|
|
|
return $ mapMaybe (fileKey . P.takeFileName . toRawFilePath) files
|
2013-10-10 21:27:00 +00:00
|
|
|
, return []
|
|
|
|
)
|
|
|
|
|
2015-06-02 18:20:38 +00:00
|
|
|
{- Looks in the specified directory for bad/tmp keys, and returns a list
|
|
|
|
- of those that might still have value, or might be stale and removable.
|
|
|
|
-
|
|
|
|
- Also, stale keys that can be proven to have no value
|
|
|
|
- (ie, their content is already present) are deleted.
|
|
|
|
-}
|
2020-10-29 18:20:57 +00:00
|
|
|
staleKeysPrune :: (Git.Repo -> RawFilePath) -> Bool -> Annex [Key]
|
2015-06-02 18:20:38 +00:00
|
|
|
staleKeysPrune dirspec nottransferred = do
|
|
|
|
contents <- dirKeys dirspec
|
|
|
|
|
|
|
|
dups <- filterM inAnnex contents
|
|
|
|
let stale = contents `exclude` dups
|
|
|
|
|
|
|
|
dir <- fromRepo dirspec
|
2017-11-29 17:49:52 +00:00
|
|
|
forM_ dups $ \k ->
|
2020-10-29 18:20:57 +00:00
|
|
|
pruneTmpWorkDirBefore (dir P.</> keyFile k)
|
|
|
|
(liftIO . R.removeLink)
|
2015-06-02 18:20:38 +00:00
|
|
|
|
|
|
|
if nottransferred
|
|
|
|
then do
|
|
|
|
inprogress <- S.fromList . map (transferKey . fst)
|
|
|
|
<$> getTransfers
|
|
|
|
return $ filter (`S.notMember` inprogress) stale
|
|
|
|
else return stale
|
|
|
|
|
2017-11-29 17:49:52 +00:00
|
|
|
{- Prune the work dir associated with the specified content file,
|
|
|
|
- before performing an action that deletes the file, or moves it away.
|
|
|
|
-
|
|
|
|
- This preserves the invariant that the workdir never exists without
|
|
|
|
- the content file.
|
|
|
|
-}
|
2020-10-29 18:20:57 +00:00
|
|
|
pruneTmpWorkDirBefore :: RawFilePath -> (RawFilePath -> Annex a) -> Annex a
|
2017-11-29 17:49:52 +00:00
|
|
|
pruneTmpWorkDirBefore f action = do
|
2020-10-29 18:20:57 +00:00
|
|
|
let workdir = fromRawFilePath $ gitAnnexTmpWorkDir f
|
2017-11-29 17:49:52 +00:00
|
|
|
liftIO $ whenM (doesDirectoryExist workdir) $
|
|
|
|
removeDirectoryRecursive workdir
|
|
|
|
action f
|
|
|
|
|
|
|
|
{- Runs an action, passing it a temporary work directory where
|
|
|
|
- it can write files while receiving the content of a key.
|
|
|
|
-
|
2018-06-28 16:58:11 +00:00
|
|
|
- Preserves the invariant that the workdir never exists without the
|
|
|
|
- content file, by creating an empty content file first.
|
|
|
|
-
|
2017-11-30 17:45:43 +00:00
|
|
|
- On exception, or when the action returns Nothing,
|
2018-06-28 16:58:11 +00:00
|
|
|
- the temporary work directory is retained (unless
|
|
|
|
- empty), so anything in it can be used on resume.
|
2017-11-29 17:49:52 +00:00
|
|
|
-}
|
2020-10-29 18:20:57 +00:00
|
|
|
withTmpWorkDir :: Key -> (RawFilePath -> Annex (Maybe a)) -> Annex (Maybe a)
|
2017-11-29 19:49:05 +00:00
|
|
|
withTmpWorkDir key action = do
|
2017-11-29 17:49:52 +00:00
|
|
|
-- Create the object file if it does not exist. This way,
|
|
|
|
-- staleKeysPrune only has to look for object files, and can
|
|
|
|
-- clean up gitAnnexTmpWorkDir for those it finds.
|
2017-11-29 19:49:05 +00:00
|
|
|
obj <- prepTmp key
|
2020-10-29 18:20:57 +00:00
|
|
|
let obj' = fromRawFilePath obj
|
|
|
|
unlessM (liftIO $ doesFileExist obj') $ do
|
|
|
|
liftIO $ writeFile obj' ""
|
2020-11-06 18:10:58 +00:00
|
|
|
setAnnexFilePerm obj
|
2017-11-29 17:49:52 +00:00
|
|
|
let tmpdir = gitAnnexTmpWorkDir obj
|
2020-03-05 18:56:47 +00:00
|
|
|
createAnnexDirectory tmpdir
|
2017-11-29 17:49:52 +00:00
|
|
|
res <- action tmpdir
|
2017-11-29 19:49:05 +00:00
|
|
|
case res of
|
2020-10-29 18:20:57 +00:00
|
|
|
Just _ -> liftIO $ removeDirectoryRecursive (fromRawFilePath tmpdir)
|
|
|
|
Nothing -> liftIO $ void $ tryIO $ removeDirectory (fromRawFilePath tmpdir)
|
2017-11-29 17:49:52 +00:00
|
|
|
return res
|
|
|
|
|
2015-06-02 18:20:38 +00:00
|
|
|
{- Finds items in the first, smaller list, that are not
|
|
|
|
- present in the second, larger list.
|
|
|
|
-
|
|
|
|
- Constructing a single set, of the list that tends to be
|
|
|
|
- smaller, appears more efficient in both memory and CPU
|
|
|
|
- than constructing and taking the S.difference of two sets. -}
|
|
|
|
exclude :: Ord a => [a] -> [a] -> [a]
|
|
|
|
exclude [] _ = [] -- optimisation
|
|
|
|
exclude smaller larger = S.toList $ remove larger $ S.fromList smaller
|
|
|
|
where
|
|
|
|
remove a b = foldl (flip S.delete) b a
|
2022-01-13 17:24:50 +00:00
|
|
|
|
|
|
|
data KeyStatus
|
|
|
|
= KeyMissing
|
|
|
|
| KeyPresent
|
|
|
|
| KeyUnlockedThin
|
|
|
|
-- ^ An annex.thin worktree file is hard linked to the object.
|
|
|
|
| KeyLockedThin
|
|
|
|
-- ^ The object has hard links, but the file being fscked
|
|
|
|
-- is not the one that hard links to it.
|
|
|
|
deriving (Show)
|
|
|
|
|
|
|
|
isKeyUnlockedThin :: KeyStatus -> Bool
|
|
|
|
isKeyUnlockedThin KeyUnlockedThin = True
|
|
|
|
isKeyUnlockedThin KeyLockedThin = False
|
|
|
|
isKeyUnlockedThin KeyPresent = False
|
|
|
|
isKeyUnlockedThin KeyMissing = False
|
|
|
|
|
|
|
|
getKeyStatus :: Key -> Annex KeyStatus
|
|
|
|
getKeyStatus key = catchDefaultIO KeyMissing $ do
|
|
|
|
afs <- not . null <$> Database.Keys.getAssociatedFiles key
|
|
|
|
obj <- calcRepo (gitAnnexLocation key)
|
|
|
|
multilink <- ((> 1) . linkCount <$> liftIO (R.getFileStatus obj))
|
|
|
|
return $ if multilink && afs
|
|
|
|
then KeyUnlockedThin
|
|
|
|
else KeyPresent
|
|
|
|
|
2022-06-22 20:20:08 +00:00
|
|
|
getKeyFileStatus :: Key -> RawFilePath -> Annex KeyStatus
|
2022-01-13 17:24:50 +00:00
|
|
|
getKeyFileStatus key file = do
|
|
|
|
s <- getKeyStatus key
|
|
|
|
case s of
|
|
|
|
KeyUnlockedThin -> catchDefaultIO KeyUnlockedThin $
|
2022-06-22 20:20:08 +00:00
|
|
|
ifM (isJust <$> isAnnexLink file)
|
2022-01-13 17:24:50 +00:00
|
|
|
( return KeyLockedThin
|
|
|
|
, return KeyUnlockedThin
|
|
|
|
)
|
|
|
|
_ -> return s
|
|
|
|
|
2023-12-08 20:22:14 +00:00
|
|
|
{- Gets the size of the content of a key when it is present.
|
|
|
|
- Useful when the key does not have keySize set.
|
|
|
|
-
|
|
|
|
- When the object file appears possibly modified with annex.thin set, does
|
|
|
|
- not do an expensive verification that the content is good, just returns
|
|
|
|
- Nothing.
|
|
|
|
-}
|
|
|
|
contentSize :: Key -> Annex (Maybe FileSize)
|
|
|
|
contentSize key = catchDefaultIO Nothing $
|
|
|
|
withObjectLoc key $ \loc ->
|
|
|
|
withTSDelta (liftIO . genInodeCache loc) >>= \case
|
|
|
|
Just ic -> ifM (unmodified ic)
|
|
|
|
( return (Just (inodeCacheFileSize ic))
|
|
|
|
, return Nothing
|
|
|
|
)
|
|
|
|
Nothing -> return Nothing
|
|
|
|
where
|
|
|
|
unmodified ic =
|
|
|
|
ifM (annexThin <$> Annex.getGitConfig)
|
|
|
|
( isUnmodifiedCheap' key ic
|
|
|
|
, return True
|
|
|
|
)
|
add content retention files
This allows lockContentShared to lock content for eg, 10 minutes and
if the process then gets terminated before it can unlock, the content
will remain locked for that amount of time.
The Windows implementation is not yet tested.
In P2P.Annex, a duration of 10 minutes is used. This way, when p2pstdio
or remotedaemon is serving the P2P protocol, and is asked to
LOCKCONTENT, and that process gets killed, the content will not be
subject to deletion. This is not a perfect solution to
doc/todo/P2P_locking_connection_drop_safety.mdwn yet, but it gets most
of the way there, without needing any P2P protocol changes.
This is only done in v10 and higher repositories (or on Windows). It
might be possible to backport it to v8 or earlier, but it would
complicate locking even further, and without a separate lock file, might
be hard. I think that by the time this fix reaches a given user, they
will probably have been running git-annex 10.x long enough that their v8
repositories will have upgraded to v10 after the 1 year wait. And it's
not as if git-annex hasn't already been subject to this problem (though
I have not heard of any data loss caused by it) for 6 years already, so
waiting another fraction of a year on top of however long it takes this
fix to reach users is unlikely to be a problem.
2024-07-03 18:44:38 +00:00
|
|
|
|
|
|
|
{- Avoids writing a timestamp when the file already contains a later
|
|
|
|
- timestamp. The file is written atomically, so when it contained an
|
|
|
|
- earlier timestamp, a reader will always see one or the other timestamp.
|
|
|
|
-}
|
|
|
|
writeContentRetentionTimestamp :: Key -> RawFilePath -> POSIXTime -> Annex ()
|
|
|
|
writeContentRetentionTimestamp key rt t = do
|
|
|
|
lckfile <- calcRepo (gitAnnexContentRetentionTimestampLock key)
|
|
|
|
modifyContentDirWhenExists lckfile $ bracket (lock lckfile) unlock $ \_ ->
|
|
|
|
readContentRetentionTimestamp rt >>= \case
|
|
|
|
Just ts | ts >= t -> return ()
|
|
|
|
_ -> replaceFile (const noop) (fromRawFilePath rt) $ \tmp ->
|
|
|
|
liftIO $ writeFile (fromRawFilePath tmp) $ show t
|
|
|
|
where
|
|
|
|
lock = takeExclusiveLock
|
|
|
|
unlock = liftIO . dropLock
|
|
|
|
|
|
|
|
{- Does not need locking because the file is written atomically. -}
|
|
|
|
readContentRetentionTimestamp :: RawFilePath -> Annex (Maybe POSIXTime)
|
|
|
|
readContentRetentionTimestamp rt =
|
|
|
|
liftIO $ join <$> tryWhenExists
|
|
|
|
(parsePOSIXTime <$> readFile (fromRawFilePath rt))
|
|
|
|
|
|
|
|
{- Checks if the retention timestamp is in the future, if so returns
|
|
|
|
- Nothing.
|
|
|
|
-
|
|
|
|
- If the retention timestamp is in the past, the retention timestamp file
|
|
|
|
- is deleted. This cleans up stale retention timestamps.
|
|
|
|
-
|
|
|
|
- The locker should take a lock that prevents any other processes from
|
|
|
|
- writing to the retention timestamp. So the retention timestamp lock
|
|
|
|
- is not used here and can also be deleted when deleting the retention
|
|
|
|
- timestamp file.
|
|
|
|
-}
|
|
|
|
checkRetentionTimestamp :: Key -> Annex (Maybe LockHandle) -> Annex (Maybe LockHandle)
|
|
|
|
checkRetentionTimestamp key locker = do
|
|
|
|
rt <- calcRepo (gitAnnexContentRetentionTimestamp key)
|
|
|
|
readContentRetentionTimestamp rt >>= \case
|
|
|
|
Nothing -> locker
|
|
|
|
Just ts -> do
|
|
|
|
now <- liftIO getPOSIXTime
|
|
|
|
if now > ts
|
|
|
|
then locker >>= \case
|
|
|
|
Nothing -> return Nothing
|
|
|
|
Just lock -> do
|
|
|
|
removeRetentionTimeStamp key rt
|
|
|
|
return (Just lock)
|
|
|
|
else return Nothing
|
|
|
|
|
|
|
|
{- Remove the retention timestamp and its lock file. Another lock must
|
|
|
|
- be held, that prevents anything else writing to the file at the same
|
|
|
|
- time. -}
|
|
|
|
removeRetentionTimeStamp :: Key -> RawFilePath -> Annex ()
|
|
|
|
removeRetentionTimeStamp key rt = modifyContentDirWhenExists rt $ do
|
|
|
|
liftIO $ removeWhenExistsWith R.removeLink rt
|
|
|
|
rtl <- calcRepo (gitAnnexContentRetentionTimestampLock key)
|
|
|
|
liftIO $ removeWhenExistsWith R.removeLink rtl
|