2018-08-31 16:23:22 +00:00
|
|
|
{- git-annex general metadata storage log and per-remote metadata storage log.
|
2014-02-13 01:12:22 +00:00
|
|
|
-
|
|
|
|
- A line of the log will look like "timestamp field [+-]value [...]"
|
|
|
|
-
|
2018-08-31 16:23:22 +00:00
|
|
|
- (In the per-remote log, each field is prefixed with "uuid:")
|
|
|
|
-
|
2014-02-13 01:12:22 +00:00
|
|
|
- Note that unset values are preserved. Consider this case:
|
|
|
|
-
|
|
|
|
- We have:
|
|
|
|
-
|
|
|
|
- 100 foo +x
|
|
|
|
- 200 foo -x
|
|
|
|
-
|
|
|
|
- An unmerged remote has:
|
|
|
|
-
|
|
|
|
- 150 foo +x
|
|
|
|
-
|
|
|
|
- After union merge, because the foo -x was preserved, we know that
|
|
|
|
- after the other remote redundantly set foo +x, it was unset,
|
|
|
|
- and so foo currently has no value.
|
|
|
|
-
|
2020-07-14 18:35:26 +00:00
|
|
|
- Copyright 2014-2020 Joey Hess <id@joeyh.name>
|
2014-02-13 01:12:22 +00:00
|
|
|
-
|
2019-03-13 19:48:14 +00:00
|
|
|
- Licensed under the GNU AGPL version 3 or higher.
|
2014-02-13 01:12:22 +00:00
|
|
|
-}
|
|
|
|
|
|
|
|
module Logs.MetaData (
|
|
|
|
getCurrentMetaData,
|
2020-07-14 18:35:26 +00:00
|
|
|
parseCurrentMetaData,
|
2018-08-31 16:23:22 +00:00
|
|
|
getCurrentRemoteMetaData,
|
2014-02-13 01:12:22 +00:00
|
|
|
addMetaData,
|
2018-08-31 16:23:22 +00:00
|
|
|
addRemoteMetaData,
|
|
|
|
addMetaDataClocked,
|
2014-02-13 01:12:22 +00:00
|
|
|
currentMetaData,
|
2014-02-24 18:41:33 +00:00
|
|
|
copyMetaData,
|
2014-02-13 01:12:22 +00:00
|
|
|
) where
|
|
|
|
|
2016-01-20 20:36:33 +00:00
|
|
|
import Annex.Common
|
2014-02-13 01:12:22 +00:00
|
|
|
import Types.MetaData
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
import Types.RemoteState
|
2014-03-18 22:55:43 +00:00
|
|
|
import Annex.MetaData.StandardFields
|
2017-08-14 17:55:38 +00:00
|
|
|
import Annex.VectorClock
|
2014-02-13 01:12:22 +00:00
|
|
|
import qualified Annex.Branch
|
2015-01-28 21:17:26 +00:00
|
|
|
import qualified Annex
|
2014-02-13 01:12:22 +00:00
|
|
|
import Logs
|
2018-10-30 03:13:36 +00:00
|
|
|
import Utility.TimeStamp
|
2018-09-05 17:20:10 +00:00
|
|
|
import Logs.MetaData.Pure
|
2014-02-13 01:12:22 +00:00
|
|
|
|
|
|
|
import qualified Data.Set as S
|
2014-03-18 22:55:43 +00:00
|
|
|
import qualified Data.Map as M
|
2020-07-14 18:35:26 +00:00
|
|
|
import qualified Data.ByteString.Lazy as L
|
2014-02-13 01:12:22 +00:00
|
|
|
|
|
|
|
{- Go through the log from oldest to newest, and combine it all
|
2014-03-18 22:55:43 +00:00
|
|
|
- into a single MetaData representing the current state.
|
|
|
|
-
|
|
|
|
- Automatically generates a lastchanged metadata for each field that's
|
|
|
|
- currently set, based on timestamps in the log.
|
|
|
|
-}
|
2014-02-13 01:12:22 +00:00
|
|
|
getCurrentMetaData :: Key -> Annex MetaData
|
2018-08-31 16:23:22 +00:00
|
|
|
getCurrentMetaData = getCurrentMetaData' metaDataLogFile
|
|
|
|
|
2019-11-26 19:27:22 +00:00
|
|
|
getCurrentMetaData' :: (GitConfig -> Key -> RawFilePath) -> Key -> Annex MetaData
|
2018-08-31 16:23:22 +00:00
|
|
|
getCurrentMetaData' getlogfile k = do
|
|
|
|
config <- Annex.getGitConfig
|
2020-07-14 18:35:26 +00:00
|
|
|
parseCurrentMetaData <$> Annex.Branch.get (getlogfile config k)
|
|
|
|
|
|
|
|
parseCurrentMetaData :: L.ByteString -> MetaData
|
|
|
|
parseCurrentMetaData content =
|
|
|
|
let ls = S.toAscList $ parseLog content
|
|
|
|
loggedmeta = logToCurrentMetaData ls
|
|
|
|
in currentMetaData $ unionMetaData loggedmeta
|
2014-03-18 22:55:43 +00:00
|
|
|
(lastchanged ls loggedmeta)
|
2014-02-13 01:12:22 +00:00
|
|
|
where
|
2014-10-09 18:53:13 +00:00
|
|
|
lastchanged [] _ = emptyMetaData
|
2014-03-19 23:10:35 +00:00
|
|
|
lastchanged ls (MetaData currentlyset) =
|
2014-03-18 22:55:43 +00:00
|
|
|
let m = foldl' (flip M.union) M.empty (map genlastchanged ls)
|
2014-03-19 23:10:35 +00:00
|
|
|
in MetaData $
|
|
|
|
-- Add a overall lastchanged using the oldest log
|
|
|
|
-- item (log is in ascending order).
|
|
|
|
M.insert lastChangedField (lastchangedval $ Prelude.last ls) $
|
|
|
|
M.mapKeys mkLastChangedField $
|
2014-03-18 22:55:43 +00:00
|
|
|
-- Only include fields that are currently set.
|
2014-03-19 23:10:35 +00:00
|
|
|
m `M.intersection` currentlyset
|
2014-03-18 22:55:43 +00:00
|
|
|
-- Makes each field have the timestamp as its value.
|
|
|
|
genlastchanged l =
|
|
|
|
let MetaData m = value l
|
2014-03-19 23:10:35 +00:00
|
|
|
ts = lastchangedval l
|
2014-03-18 22:55:43 +00:00
|
|
|
in M.map (const ts) m
|
2019-01-07 19:51:05 +00:00
|
|
|
lastchangedval l = S.singleton $ toMetaValue $ encodeBS $ showts $
|
2017-08-14 17:55:38 +00:00
|
|
|
case changed l of
|
|
|
|
VectorClock t -> t
|
|
|
|
Unknown -> 0
|
2015-05-10 19:36:58 +00:00
|
|
|
showts = formatPOSIXTime "%F@%H-%M-%S"
|
2014-02-13 01:12:22 +00:00
|
|
|
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
getCurrentRemoteMetaData :: RemoteStateHandle -> Key -> Annex RemoteMetaData
|
|
|
|
getCurrentRemoteMetaData (RemoteStateHandle u) k = extractRemoteMetaData u <$>
|
2018-08-31 16:23:22 +00:00
|
|
|
getCurrentMetaData' remoteMetaDataLogFile k
|
|
|
|
|
2014-02-13 01:12:22 +00:00
|
|
|
{- Adds in some metadata, which can override existing values, or unset
|
|
|
|
- them, but otherwise leaves any existing metadata as-is. -}
|
|
|
|
addMetaData :: Key -> MetaData -> Annex ()
|
start implementing hidden git-annex repositories
This adds a separate journal, which does not currently get committed to
an index, but is planned to be committed to .git/annex/index-private.
Changes that are regarding a UUID that is private will get written to
this journal, and so will not be published into the git-annex branch.
All log writing should have been made to indicate the UUID it's
regarding, though I've not verified this yet.
Currently, no UUIDs are treated as private yet, a way to configure that
is needed.
The implementation is careful to not add any additional IO work when
privateUUIDsKnown is False. It will skip looking at the private journal
at all. So this should be free, or nearly so, unless the feature is
used. When it is used, all branch reads will be about twice as expensive.
It is very lucky -- or very prudent design -- that Annex.Branch.change
and maybeChange are the only ways to change a file on the branch,
and Annex.Branch.set is only internal use. That let Annex.Branch.get
always yield any private information that has been recorded, without
the risk that Annex.Branch.set might be called, with a non-private UUID,
and end up leaking the private information into the git-annex branch.
And, this relies on the way git-annex union merges the git-annex branch.
When reading a file, there can be a public and a private version, and
they are just concacenated together. That will be handled the same as if
there were two diverged git-annex branches that got union merged.
2021-04-20 18:32:41 +00:00
|
|
|
addMetaData = addMetaData' (Annex.Branch.RegardingUUID []) metaDataLogFile
|
2018-08-31 16:23:22 +00:00
|
|
|
|
start implementing hidden git-annex repositories
This adds a separate journal, which does not currently get committed to
an index, but is planned to be committed to .git/annex/index-private.
Changes that are regarding a UUID that is private will get written to
this journal, and so will not be published into the git-annex branch.
All log writing should have been made to indicate the UUID it's
regarding, though I've not verified this yet.
Currently, no UUIDs are treated as private yet, a way to configure that
is needed.
The implementation is careful to not add any additional IO work when
privateUUIDsKnown is False. It will skip looking at the private journal
at all. So this should be free, or nearly so, unless the feature is
used. When it is used, all branch reads will be about twice as expensive.
It is very lucky -- or very prudent design -- that Annex.Branch.change
and maybeChange are the only ways to change a file on the branch,
and Annex.Branch.set is only internal use. That let Annex.Branch.get
always yield any private information that has been recorded, without
the risk that Annex.Branch.set might be called, with a non-private UUID,
and end up leaking the private information into the git-annex branch.
And, this relies on the way git-annex union merges the git-annex branch.
When reading a file, there can be a public and a private version, and
they are just concacenated together. That will be handled the same as if
there were two diverged git-annex branches that got union merged.
2021-04-20 18:32:41 +00:00
|
|
|
addMetaData' :: Annex.Branch.RegardingUUID -> (GitConfig -> Key -> RawFilePath) -> Key -> MetaData -> Annex ()
|
|
|
|
addMetaData' ru getlogfile k metadata =
|
|
|
|
addMetaDataClocked' ru getlogfile k metadata =<< currentVectorClock
|
2014-02-13 05:57:43 +00:00
|
|
|
|
deal better with clock skew situations, using vector clocks
* Deal with clock skew, both forwards and backwards, when logging
information to the git-annex branch.
* GIT_ANNEX_VECTOR_CLOCK can now be set to a fixed value (eg 1)
rather than needing to be advanced each time a new change is made.
* Misuse of GIT_ANNEX_VECTOR_CLOCK will no longer confuse git-annex.
When changing a file in the git-annex branch, the vector clock to use is now
determined by first looking at the current time (or GIT_ANNEX_VECTOR_CLOCK
when set), and comparing it to the newest vector clock already in use in
that file. If a newer time stamp was already in use, advance it forward by
a second instead.
When the clock is set to a time in the past, this avoids logging with
an old timestamp, which would risk that log line later being ignored in favor
of "newer" line that is really not newer.
When a log entry has been made with a clock that was set far ahead in the
future, this avoids newer information being logged with an older timestamp
and so being ignored in favor of that future-timestamped information.
Once all clocks get fixed, this will result in the vector clocks being
incremented, until finally enough time has passed that time gets back ahead
of the vector clock value, and then it will return to usual operation.
(This latter situation is not ideal, but it seems the best that can be done.
The issue with it is, since all writers will be incrementing the last
vector clock they saw, there's no way to tell when one writer made a write
significantly later in time than another, so the earlier write might
arbitrarily be picked when merging. This problem is why git-annex uses
timestamps in the first place, rather than pure vector clocks.)
Advancing forward by 1 second is somewhat arbitrary. setDead
advances a timestamp by just 1 picosecond, and the vector clock could
too. But then it would interfere with setDead, which wants to be
overrulled by any change. So it could use 2 picoseconds or something,
but that seems weird. It could just as well advance it forward by a
minute or whatever, but then it would be harder for real time to catch
up with the vector clock when forward clock slew had happened.
A complication is that many log files contain several different peices of
information, and it may be best to only use vector clocks for the same peice
of information. For example, a key's location log file contains
InfoPresent/InfoMissing for each UUID, and it only looks at the vector
clocks for the UUID that is being changed, and not other UUIDs.
Although exactly where the dividing line is can be hard to determine.
Consider metadata logs, where a field "tag" can have multiple values set
at different times. Should it advance forward past the last tag?
Probably. What about when a different field is set, should it look at
the clocks of other fields? Perhaps not, but currently it does, and
this does not seems like it will cause any problems.
Another one I'm not entirely sure about is the export log, which is
keyed by (fromuuid, touuid). So if multiple repos are exporting to the
same remote, different vector clocks can be used for that remote.
It looks like that's probably ok, because it does not try to determine
what order things occurred when there was an export conflict.
Sponsored-by: Jochen Bartl on Patreon
2021-08-03 20:45:20 +00:00
|
|
|
{- Reusing the same CandidateVectorClock when making changes to the metadata
|
2014-02-13 05:57:43 +00:00
|
|
|
- of multiple keys is a nice optimisation. The same metadata lines
|
|
|
|
- will tend to be generated across the different log files, and so
|
|
|
|
- git will be able to pack the data more efficiently. -}
|
deal better with clock skew situations, using vector clocks
* Deal with clock skew, both forwards and backwards, when logging
information to the git-annex branch.
* GIT_ANNEX_VECTOR_CLOCK can now be set to a fixed value (eg 1)
rather than needing to be advanced each time a new change is made.
* Misuse of GIT_ANNEX_VECTOR_CLOCK will no longer confuse git-annex.
When changing a file in the git-annex branch, the vector clock to use is now
determined by first looking at the current time (or GIT_ANNEX_VECTOR_CLOCK
when set), and comparing it to the newest vector clock already in use in
that file. If a newer time stamp was already in use, advance it forward by
a second instead.
When the clock is set to a time in the past, this avoids logging with
an old timestamp, which would risk that log line later being ignored in favor
of "newer" line that is really not newer.
When a log entry has been made with a clock that was set far ahead in the
future, this avoids newer information being logged with an older timestamp
and so being ignored in favor of that future-timestamped information.
Once all clocks get fixed, this will result in the vector clocks being
incremented, until finally enough time has passed that time gets back ahead
of the vector clock value, and then it will return to usual operation.
(This latter situation is not ideal, but it seems the best that can be done.
The issue with it is, since all writers will be incrementing the last
vector clock they saw, there's no way to tell when one writer made a write
significantly later in time than another, so the earlier write might
arbitrarily be picked when merging. This problem is why git-annex uses
timestamps in the first place, rather than pure vector clocks.)
Advancing forward by 1 second is somewhat arbitrary. setDead
advances a timestamp by just 1 picosecond, and the vector clock could
too. But then it would interfere with setDead, which wants to be
overrulled by any change. So it could use 2 picoseconds or something,
but that seems weird. It could just as well advance it forward by a
minute or whatever, but then it would be harder for real time to catch
up with the vector clock when forward clock slew had happened.
A complication is that many log files contain several different peices of
information, and it may be best to only use vector clocks for the same peice
of information. For example, a key's location log file contains
InfoPresent/InfoMissing for each UUID, and it only looks at the vector
clocks for the UUID that is being changed, and not other UUIDs.
Although exactly where the dividing line is can be hard to determine.
Consider metadata logs, where a field "tag" can have multiple values set
at different times. Should it advance forward past the last tag?
Probably. What about when a different field is set, should it look at
the clocks of other fields? Perhaps not, but currently it does, and
this does not seems like it will cause any problems.
Another one I'm not entirely sure about is the export log, which is
keyed by (fromuuid, touuid). So if multiple repos are exporting to the
same remote, different vector clocks can be used for that remote.
It looks like that's probably ok, because it does not try to determine
what order things occurred when there was an export conflict.
Sponsored-by: Jochen Bartl on Patreon
2021-08-03 20:45:20 +00:00
|
|
|
addMetaDataClocked :: Key -> MetaData -> CandidateVectorClock -> Annex ()
|
start implementing hidden git-annex repositories
This adds a separate journal, which does not currently get committed to
an index, but is planned to be committed to .git/annex/index-private.
Changes that are regarding a UUID that is private will get written to
this journal, and so will not be published into the git-annex branch.
All log writing should have been made to indicate the UUID it's
regarding, though I've not verified this yet.
Currently, no UUIDs are treated as private yet, a way to configure that
is needed.
The implementation is careful to not add any additional IO work when
privateUUIDsKnown is False. It will skip looking at the private journal
at all. So this should be free, or nearly so, unless the feature is
used. When it is used, all branch reads will be about twice as expensive.
It is very lucky -- or very prudent design -- that Annex.Branch.change
and maybeChange are the only ways to change a file on the branch,
and Annex.Branch.set is only internal use. That let Annex.Branch.get
always yield any private information that has been recorded, without
the risk that Annex.Branch.set might be called, with a non-private UUID,
and end up leaking the private information into the git-annex branch.
And, this relies on the way git-annex union merges the git-annex branch.
When reading a file, there can be a public and a private version, and
they are just concacenated together. That will be handled the same as if
there were two diverged git-annex branches that got union merged.
2021-04-20 18:32:41 +00:00
|
|
|
addMetaDataClocked = addMetaDataClocked' (Annex.Branch.RegardingUUID []) metaDataLogFile
|
2018-08-31 16:23:22 +00:00
|
|
|
|
deal better with clock skew situations, using vector clocks
* Deal with clock skew, both forwards and backwards, when logging
information to the git-annex branch.
* GIT_ANNEX_VECTOR_CLOCK can now be set to a fixed value (eg 1)
rather than needing to be advanced each time a new change is made.
* Misuse of GIT_ANNEX_VECTOR_CLOCK will no longer confuse git-annex.
When changing a file in the git-annex branch, the vector clock to use is now
determined by first looking at the current time (or GIT_ANNEX_VECTOR_CLOCK
when set), and comparing it to the newest vector clock already in use in
that file. If a newer time stamp was already in use, advance it forward by
a second instead.
When the clock is set to a time in the past, this avoids logging with
an old timestamp, which would risk that log line later being ignored in favor
of "newer" line that is really not newer.
When a log entry has been made with a clock that was set far ahead in the
future, this avoids newer information being logged with an older timestamp
and so being ignored in favor of that future-timestamped information.
Once all clocks get fixed, this will result in the vector clocks being
incremented, until finally enough time has passed that time gets back ahead
of the vector clock value, and then it will return to usual operation.
(This latter situation is not ideal, but it seems the best that can be done.
The issue with it is, since all writers will be incrementing the last
vector clock they saw, there's no way to tell when one writer made a write
significantly later in time than another, so the earlier write might
arbitrarily be picked when merging. This problem is why git-annex uses
timestamps in the first place, rather than pure vector clocks.)
Advancing forward by 1 second is somewhat arbitrary. setDead
advances a timestamp by just 1 picosecond, and the vector clock could
too. But then it would interfere with setDead, which wants to be
overrulled by any change. So it could use 2 picoseconds or something,
but that seems weird. It could just as well advance it forward by a
minute or whatever, but then it would be harder for real time to catch
up with the vector clock when forward clock slew had happened.
A complication is that many log files contain several different peices of
information, and it may be best to only use vector clocks for the same peice
of information. For example, a key's location log file contains
InfoPresent/InfoMissing for each UUID, and it only looks at the vector
clocks for the UUID that is being changed, and not other UUIDs.
Although exactly where the dividing line is can be hard to determine.
Consider metadata logs, where a field "tag" can have multiple values set
at different times. Should it advance forward past the last tag?
Probably. What about when a different field is set, should it look at
the clocks of other fields? Perhaps not, but currently it does, and
this does not seems like it will cause any problems.
Another one I'm not entirely sure about is the export log, which is
keyed by (fromuuid, touuid). So if multiple repos are exporting to the
same remote, different vector clocks can be used for that remote.
It looks like that's probably ok, because it does not try to determine
what order things occurred when there was an export conflict.
Sponsored-by: Jochen Bartl on Patreon
2021-08-03 20:45:20 +00:00
|
|
|
addMetaDataClocked' :: Annex.Branch.RegardingUUID -> (GitConfig -> Key -> RawFilePath) -> Key -> MetaData -> CandidateVectorClock -> Annex ()
|
start implementing hidden git-annex repositories
This adds a separate journal, which does not currently get committed to
an index, but is planned to be committed to .git/annex/index-private.
Changes that are regarding a UUID that is private will get written to
this journal, and so will not be published into the git-annex branch.
All log writing should have been made to indicate the UUID it's
regarding, though I've not verified this yet.
Currently, no UUIDs are treated as private yet, a way to configure that
is needed.
The implementation is careful to not add any additional IO work when
privateUUIDsKnown is False. It will skip looking at the private journal
at all. So this should be free, or nearly so, unless the feature is
used. When it is used, all branch reads will be about twice as expensive.
It is very lucky -- or very prudent design -- that Annex.Branch.change
and maybeChange are the only ways to change a file on the branch,
and Annex.Branch.set is only internal use. That let Annex.Branch.get
always yield any private information that has been recorded, without
the risk that Annex.Branch.set might be called, with a non-private UUID,
and end up leaking the private information into the git-annex branch.
And, this relies on the way git-annex union merges the git-annex branch.
When reading a file, there can be a public and a private version, and
they are just concacenated together. That will be handled the same as if
there were two diverged git-annex branches that got union merged.
2021-04-20 18:32:41 +00:00
|
|
|
addMetaDataClocked' ru getlogfile k d@(MetaData m) c
|
import metadata from feeds
When annex.genmetadata is set, metadata from the feed is added to files
that are imported from it.
Reused the same feedtitle and itemtitle, feedauthor, itemauthor, etc names
that are used in --template.
Also added title and author, which are the item title/author if available,
falling back to the feed title/author. These are more likely to be common
metadata fields.
(There is a small bit of dupication here, but once git gets
around to packing the object, it will compress it away.)
The itempubdate field is not included in the metadata as a string; instead
it is used to generate year and month fields, same as is done when adding
files with annex.genmetadata set.
This commit was sponsored by Amitai Schlair, who cooincidentially
is responsible for ikiwiki generating nice feed metadata!
2014-07-03 17:46:09 +00:00
|
|
|
| d == emptyMetaData = noop
|
2015-01-28 21:17:26 +00:00
|
|
|
| otherwise = do
|
|
|
|
config <- Annex.getGitConfig
|
deal better with clock skew situations, using vector clocks
* Deal with clock skew, both forwards and backwards, when logging
information to the git-annex branch.
* GIT_ANNEX_VECTOR_CLOCK can now be set to a fixed value (eg 1)
rather than needing to be advanced each time a new change is made.
* Misuse of GIT_ANNEX_VECTOR_CLOCK will no longer confuse git-annex.
When changing a file in the git-annex branch, the vector clock to use is now
determined by first looking at the current time (or GIT_ANNEX_VECTOR_CLOCK
when set), and comparing it to the newest vector clock already in use in
that file. If a newer time stamp was already in use, advance it forward by
a second instead.
When the clock is set to a time in the past, this avoids logging with
an old timestamp, which would risk that log line later being ignored in favor
of "newer" line that is really not newer.
When a log entry has been made with a clock that was set far ahead in the
future, this avoids newer information being logged with an older timestamp
and so being ignored in favor of that future-timestamped information.
Once all clocks get fixed, this will result in the vector clocks being
incremented, until finally enough time has passed that time gets back ahead
of the vector clock value, and then it will return to usual operation.
(This latter situation is not ideal, but it seems the best that can be done.
The issue with it is, since all writers will be incrementing the last
vector clock they saw, there's no way to tell when one writer made a write
significantly later in time than another, so the earlier write might
arbitrarily be picked when merging. This problem is why git-annex uses
timestamps in the first place, rather than pure vector clocks.)
Advancing forward by 1 second is somewhat arbitrary. setDead
advances a timestamp by just 1 picosecond, and the vector clock could
too. But then it would interfere with setDead, which wants to be
overrulled by any change. So it could use 2 picoseconds or something,
but that seems weird. It could just as well advance it forward by a
minute or whatever, but then it would be harder for real time to catch
up with the vector clock when forward clock slew had happened.
A complication is that many log files contain several different peices of
information, and it may be best to only use vector clocks for the same peice
of information. For example, a key's location log file contains
InfoPresent/InfoMissing for each UUID, and it only looks at the vector
clocks for the UUID that is being changed, and not other UUIDs.
Although exactly where the dividing line is can be hard to determine.
Consider metadata logs, where a field "tag" can have multiple values set
at different times. Should it advance forward past the last tag?
Probably. What about when a different field is set, should it look at
the clocks of other fields? Perhaps not, but currently it does, and
this does not seems like it will cause any problems.
Another one I'm not entirely sure about is the export log, which is
keyed by (fromuuid, touuid). So if multiple repos are exporting to the
same remote, different vector clocks can be used for that remote.
It looks like that's probably ok, because it does not try to determine
what order things occurred when there was an export conflict.
Sponsored-by: Jochen Bartl on Patreon
2021-08-03 20:45:20 +00:00
|
|
|
Annex.Branch.change ru (getlogfile config k) $ \b ->
|
|
|
|
let s = parseLog b
|
|
|
|
c' = advanceVectorClock c (map changed (S.toList s))
|
|
|
|
ent = LogEntry c' metadata
|
|
|
|
in buildLog $ simplifyLog $ S.insert ent s
|
2014-03-18 22:55:43 +00:00
|
|
|
where
|
|
|
|
metadata = MetaData $ M.filterWithKey (\f _ -> not (isLastChangedField f)) m
|
2014-02-13 01:12:22 +00:00
|
|
|
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
addRemoteMetaData :: Key -> RemoteStateHandle -> MetaData -> Annex ()
|
|
|
|
addRemoteMetaData k (RemoteStateHandle u) m =
|
start implementing hidden git-annex repositories
This adds a separate journal, which does not currently get committed to
an index, but is planned to be committed to .git/annex/index-private.
Changes that are regarding a UUID that is private will get written to
this journal, and so will not be published into the git-annex branch.
All log writing should have been made to indicate the UUID it's
regarding, though I've not verified this yet.
Currently, no UUIDs are treated as private yet, a way to configure that
is needed.
The implementation is careful to not add any additional IO work when
privateUUIDsKnown is False. It will skip looking at the private journal
at all. So this should be free, or nearly so, unless the feature is
used. When it is used, all branch reads will be about twice as expensive.
It is very lucky -- or very prudent design -- that Annex.Branch.change
and maybeChange are the only ways to change a file on the branch,
and Annex.Branch.set is only internal use. That let Annex.Branch.get
always yield any private information that has been recorded, without
the risk that Annex.Branch.set might be called, with a non-private UUID,
and end up leaking the private information into the git-annex branch.
And, this relies on the way git-annex union merges the git-annex branch.
When reading a file, there can be a public and a private version, and
they are just concacenated together. That will be handled the same as if
there were two diverged git-annex branches that got union merged.
2021-04-20 18:32:41 +00:00
|
|
|
addMetaData' (Annex.Branch.RegardingUUID [u]) remoteMetaDataLogFile k $ fromRemoteMetaData $
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
RemoteMetaData u m
|
2018-08-31 16:23:22 +00:00
|
|
|
|
|
|
|
getMetaDataLog :: Key -> Annex (Log MetaData)
|
|
|
|
getMetaDataLog key = do
|
|
|
|
config <- Annex.getGitConfig
|
|
|
|
readLog $ metaDataLogFile config key
|
|
|
|
|
2014-02-24 18:41:33 +00:00
|
|
|
{- Copies the metadata from the old key to the new key.
|
|
|
|
-
|
|
|
|
- The exact content of the metadata file is copied, so that the timestamps
|
|
|
|
- remain the same, and because this is more space-efficient in the git
|
|
|
|
- repository.
|
|
|
|
-
|
|
|
|
- Any metadata already attached to the new key is not preserved.
|
2017-09-28 16:56:35 +00:00
|
|
|
-
|
|
|
|
- Returns True when metadata was copied.
|
2014-02-24 18:41:33 +00:00
|
|
|
-}
|
2017-09-28 16:56:35 +00:00
|
|
|
copyMetaData :: Key -> Key -> Annex Bool
|
2014-02-24 18:41:33 +00:00
|
|
|
copyMetaData oldkey newkey
|
2017-09-28 16:56:35 +00:00
|
|
|
| oldkey == newkey = return False
|
2014-02-24 18:41:33 +00:00
|
|
|
| otherwise = do
|
2014-03-18 22:55:43 +00:00
|
|
|
l <- getMetaDataLog oldkey
|
2017-09-28 16:56:35 +00:00
|
|
|
if logToCurrentMetaData (S.toAscList l) == emptyMetaData
|
|
|
|
then return False
|
|
|
|
else do
|
|
|
|
config <- Annex.getGitConfig
|
start implementing hidden git-annex repositories
This adds a separate journal, which does not currently get committed to
an index, but is planned to be committed to .git/annex/index-private.
Changes that are regarding a UUID that is private will get written to
this journal, and so will not be published into the git-annex branch.
All log writing should have been made to indicate the UUID it's
regarding, though I've not verified this yet.
Currently, no UUIDs are treated as private yet, a way to configure that
is needed.
The implementation is careful to not add any additional IO work when
privateUUIDsKnown is False. It will skip looking at the private journal
at all. So this should be free, or nearly so, unless the feature is
used. When it is used, all branch reads will be about twice as expensive.
It is very lucky -- or very prudent design -- that Annex.Branch.change
and maybeChange are the only ways to change a file on the branch,
and Annex.Branch.set is only internal use. That let Annex.Branch.get
always yield any private information that has been recorded, without
the risk that Annex.Branch.set might be called, with a non-private UUID,
and end up leaking the private information into the git-annex branch.
And, this relies on the way git-annex union merges the git-annex branch.
When reading a file, there can be a public and a private version, and
they are just concacenated together. That will be handled the same as if
there were two diverged git-annex branches that got union merged.
2021-04-20 18:32:41 +00:00
|
|
|
Annex.Branch.change
|
|
|
|
(Annex.Branch.RegardingUUID [])
|
|
|
|
(metaDataLogFile config newkey)
|
|
|
|
(const $ buildLog l)
|
2017-09-28 16:56:35 +00:00
|
|
|
return True
|
2018-09-05 17:20:10 +00:00
|
|
|
|
2019-11-26 19:27:22 +00:00
|
|
|
readLog :: RawFilePath -> Annex (Log MetaData)
|
2019-01-07 19:51:05 +00:00
|
|
|
readLog = parseLog <$$> Annex.Branch.get
|