2014-02-13 01:12:22 +00:00
|
|
|
{- git-annex command
|
|
|
|
-
|
2016-07-26 18:53:00 +00:00
|
|
|
- Copyright 2014-2016 Joey Hess <id@joeyh.name>
|
2014-02-13 01:12:22 +00:00
|
|
|
-
|
2019-03-13 19:48:14 +00:00
|
|
|
- Licensed under the GNU AGPL version 3 or higher.
|
2014-02-13 01:12:22 +00:00
|
|
|
-}
|
|
|
|
|
|
|
|
module Command.MetaData where
|
|
|
|
|
|
|
|
import Command
|
2014-02-23 04:08:29 +00:00
|
|
|
import Annex.MetaData
|
2017-08-14 17:55:38 +00:00
|
|
|
import Annex.VectorClock
|
2014-02-13 01:12:22 +00:00
|
|
|
import Logs.MetaData
|
2016-07-27 14:46:25 +00:00
|
|
|
import Annex.WorkTree
|
2021-11-01 18:40:33 +00:00
|
|
|
import Messages.JSON (JSONActionItem(..), AddJSONActionItemFields(..))
|
2016-07-27 14:46:25 +00:00
|
|
|
import Types.Messages
|
Fix mangling of --json output of utf-8 characters when not running in a utf-8 locale
As long as all code imports Utility.Aeson rather than Data.Aeson,
and no Strings that may contain utf-8 characters are used for eg, object
keys via T.pack, this is guaranteed to fix the problem everywhere that
git-annex generates json.
It's kind of annoying to need to wrap ToJSON with a ToJSON', especially
since every data type that has a ToJSON instance has to be ported over.
However, that only took 50 lines of code, which is worth it to ensure full
coverage. I initially tried an alternative approach of a newtype FileEncoded,
which had to be used everywhere a String was fed into aeson, and chasing
down all the sites would have been far too hard. Did consider creating an
intentionally overlapping instance ToJSON String, and letting ghc fail
to build anything that passed in a String, but am not sure that wouldn't
pollute some library that git-annex depends on that happens to use ToJSON
String internally.
This commit was supported by the NSF-funded DataLad project.
2018-04-16 19:42:45 +00:00
|
|
|
import Utility.Aeson
|
make --batch honor matching options
When --batch is used with matching options like --in, --metadata, etc, only
operate on the provided files when they match those options. Otherwise, a
blank line is output in the batch protocol.
Affected commands: find, add, whereis, drop, copy, move, get
In the case of find, the documentation for --batch already said it honored
the matching options. The docs for the rest didn't, but it makes sense to
have them honor them. While this is a behavior change, why specify the
matching options with --batch if you didn't want them to apply?
Note that the batch output for all of the affected commands could
already output a blank line in other cases, so batch users should
already be prepared to deal with it.
git-annex metadata didn't seem worth making support the matching options,
since all it does is output metadata or set metadata, the use cases for
using it in combination with the martching options seem small. Made it
refuse to run when they're combined, leaving open the possibility for later
support if a use case develops.
This commit was sponsored by Brett Eisenberg on Patreon.
2018-08-08 16:03:30 +00:00
|
|
|
import Limit
|
2014-02-13 01:12:22 +00:00
|
|
|
|
|
|
|
import qualified Data.Set as S
|
2016-07-27 14:46:25 +00:00
|
|
|
import qualified Data.Map as M
|
2016-07-26 18:53:00 +00:00
|
|
|
import qualified Data.Text as T
|
2019-01-07 19:51:05 +00:00
|
|
|
import qualified Data.ByteString.Char8 as B8
|
2016-07-26 18:53:00 +00:00
|
|
|
import qualified Data.ByteString.Lazy.UTF8 as BU
|
2016-12-13 15:07:49 +00:00
|
|
|
import Control.Concurrent
|
2014-02-13 01:12:22 +00:00
|
|
|
|
2015-07-08 16:33:27 +00:00
|
|
|
cmd :: Command
|
2018-02-19 18:28:17 +00:00
|
|
|
cmd = withGlobalOptions [jsonOptions, annexedMatchingOptions] $
|
2015-07-12 13:25:43 +00:00
|
|
|
command "metadata" SectionMetaData
|
|
|
|
"sets or gets metadata of a file"
|
|
|
|
paramPaths (seek <$$> optParser)
|
2014-02-13 01:12:22 +00:00
|
|
|
|
2015-07-12 13:25:43 +00:00
|
|
|
data MetaDataOptions = MetaDataOptions
|
|
|
|
{ forFiles :: CmdParams
|
|
|
|
, getSet :: GetSet
|
|
|
|
, keyOptions :: Maybe KeyOptions
|
2016-07-27 14:46:25 +00:00
|
|
|
, batchOption :: BatchMode
|
2015-07-12 13:25:43 +00:00
|
|
|
}
|
2014-03-17 19:26:18 +00:00
|
|
|
|
2015-08-11 17:19:01 +00:00
|
|
|
data GetSet = Get MetaField | GetAll | Set [ModMeta]
|
2014-02-19 19:04:12 +00:00
|
|
|
|
2015-07-12 13:25:43 +00:00
|
|
|
optParser :: CmdParamsDesc -> Parser MetaDataOptions
|
|
|
|
optParser desc = MetaDataOptions
|
|
|
|
<$> cmdParams desc
|
2015-08-11 17:19:01 +00:00
|
|
|
<*> ((Get <$> getopt) <|> (Set <$> some modopts) <|> pure GetAll)
|
2016-08-03 16:37:12 +00:00
|
|
|
<*> optional parseKeyOptions
|
2021-08-25 18:20:33 +00:00
|
|
|
<*> parseBatchOption False
|
2014-02-13 05:49:38 +00:00
|
|
|
where
|
2019-01-07 19:51:05 +00:00
|
|
|
getopt = option (eitherReader (mkMetaField . T.pack))
|
2015-07-12 13:25:43 +00:00
|
|
|
( long "get" <> short 'g' <> metavar paramField
|
|
|
|
<> help "get single metadata field"
|
|
|
|
)
|
|
|
|
modopts = option (eitherReader parseModMeta)
|
|
|
|
( long "set" <> short 's' <> metavar "FIELD[+-]=VALUE"
|
|
|
|
<> help "set or unset metadata value"
|
|
|
|
)
|
2019-01-18 18:20:44 +00:00
|
|
|
<|> (AddMeta tagMetaField . toMetaValue . encodeBS <$> strOption
|
2015-07-12 13:25:43 +00:00
|
|
|
( long "tag" <> short 't' <> metavar "TAG"
|
|
|
|
<> help "set a tag"
|
|
|
|
))
|
2019-01-18 18:20:44 +00:00
|
|
|
<|> (DelMeta tagMetaField . Just . toMetaValue . encodeBS <$> strOption
|
2015-07-12 13:25:43 +00:00
|
|
|
( long "untag" <> short 'u' <> metavar "TAG"
|
|
|
|
<> help "remove a tag"
|
|
|
|
))
|
2019-01-07 19:51:05 +00:00
|
|
|
<|> option (eitherReader (\f -> DelMeta <$> mkMetaField (T.pack f) <*> pure Nothing))
|
2016-02-29 17:00:46 +00:00
|
|
|
( long "remove" <> short 'r' <> metavar "FIELD"
|
|
|
|
<> help "remove all values of a field"
|
|
|
|
)
|
2017-09-28 16:36:10 +00:00
|
|
|
<|> flag' DelAllMeta
|
|
|
|
( long "remove-all"
|
|
|
|
<> help "remove all metadata"
|
|
|
|
)
|
2014-02-19 19:04:12 +00:00
|
|
|
|
2015-07-12 13:25:43 +00:00
|
|
|
seek :: MetaDataOptions -> CommandSeek
|
2016-12-13 15:07:49 +00:00
|
|
|
seek o = case batchOption o of
|
|
|
|
NoBatch -> do
|
2020-12-23 19:21:33 +00:00
|
|
|
c <- currentVectorClock
|
2020-05-28 19:55:17 +00:00
|
|
|
let ww = WarnUnmatchLsFiles
|
2020-07-13 21:04:02 +00:00
|
|
|
let seeker = AnnexedFileSeeker
|
2020-07-22 18:23:28 +00:00
|
|
|
{ startAction = start c o
|
2020-07-13 21:04:02 +00:00
|
|
|
, checkContentPresent = Nothing
|
|
|
|
, usesLocationLog = False
|
|
|
|
}
|
|
|
|
let seekaction = case getSet o of
|
2020-07-10 19:40:06 +00:00
|
|
|
Get _ -> withFilesInGitAnnex ww
|
|
|
|
GetAll -> withFilesInGitAnnex ww
|
|
|
|
Set _ -> withFilesInGitAnnexNonRecursive ww
|
2016-12-13 15:07:49 +00:00
|
|
|
"Not recursively setting metadata. Use --force to do that."
|
2020-07-24 16:05:28 +00:00
|
|
|
withKeyOptions (keyOptions o) False seeker
|
2018-10-01 18:12:06 +00:00
|
|
|
(commandAction . startKeys c o)
|
2020-07-13 21:04:02 +00:00
|
|
|
(seekaction seeker)
|
2020-05-28 19:55:17 +00:00
|
|
|
=<< workTreeItems ww (forFiles o)
|
added -z
Added -z option to git-annex commands that use --batch, useful for
supporting filenames containing newlines.
It only controls input to --batch, the output will still be line delimited
unless --json or etc is used to get some other output. While git often
makes -z affect both input and output, I don't like trying them together,
and making it affect output would have been a significant complication,
and also git-annex output is generally not intended to be machine parsed,
unless using --json or a format option.
Commands that take pairs like "file key" still separate them with a space
in --batch mode. All such commands take care to support filenames with
spaces when parsing that, so there was no need to change it, and it would
have needed significant changes to the batch machinery to separate tose
with a null.
To make fromkey and registerurl support -z, I had to give them a --batch
option. The implicit batch mode they enter when not provided with input
parameters does not support -z as that would have complicated option
parsing. Seemed better to move these toward using the same --batch as
everything else, though the implicit batch mode can still be used.
This commit was sponsored by Ole-Morten Duesund on Patreon.
2018-09-20 20:09:21 +00:00
|
|
|
Batch fmt -> withMessageState $ \s -> case outputType s of
|
make --batch honor matching options
When --batch is used with matching options like --in, --metadata, etc, only
operate on the provided files when they match those options. Otherwise, a
blank line is output in the batch protocol.
Affected commands: find, add, whereis, drop, copy, move, get
In the case of find, the documentation for --batch already said it honored
the matching options. The docs for the rest didn't, but it makes sense to
have them honor them. While this is a behavior change, why specify the
matching options with --batch if you didn't want them to apply?
Note that the batch output for all of the affected commands could
already output a blank line in other cases, so batch users should
already be prepared to deal with it.
git-annex metadata didn't seem worth making support the matching options,
since all it does is output metadata or set metadata, the use cases for
using it in combination with the martching options seem small. Made it
refuse to run when they're combined, leaving open the possibility for later
support if a use case develops.
This commit was sponsored by Brett Eisenberg on Patreon.
2018-08-08 16:03:30 +00:00
|
|
|
JSONOutput _ -> ifM limited
|
|
|
|
( giveup "combining --batch with file matching options is not currently supported"
|
2022-01-26 16:59:55 +00:00
|
|
|
, batchOnly (keyOptions o) (forFiles o) $
|
|
|
|
batchInput fmt parseJSONInput
|
|
|
|
(commandAction . batchCommandStart . startBatch)
|
make --batch honor matching options
When --batch is used with matching options like --in, --metadata, etc, only
operate on the provided files when they match those options. Otherwise, a
blank line is output in the batch protocol.
Affected commands: find, add, whereis, drop, copy, move, get
In the case of find, the documentation for --batch already said it honored
the matching options. The docs for the rest didn't, but it makes sense to
have them honor them. While this is a behavior change, why specify the
matching options with --batch if you didn't want them to apply?
Note that the batch output for all of the affected commands could
already output a blank line in other cases, so batch users should
already be prepared to deal with it.
git-annex metadata didn't seem worth making support the matching options,
since all it does is output metadata or set metadata, the use cases for
using it in combination with the martching options seem small. Made it
refuse to run when they're combined, leaving open the possibility for later
support if a use case develops.
This commit was sponsored by Brett Eisenberg on Patreon.
2018-08-08 16:03:30 +00:00
|
|
|
)
|
2016-12-13 15:07:49 +00:00
|
|
|
_ -> giveup "--batch is currently only supported in --json mode"
|
2014-02-13 01:12:22 +00:00
|
|
|
|
deal better with clock skew situations, using vector clocks
* Deal with clock skew, both forwards and backwards, when logging
information to the git-annex branch.
* GIT_ANNEX_VECTOR_CLOCK can now be set to a fixed value (eg 1)
rather than needing to be advanced each time a new change is made.
* Misuse of GIT_ANNEX_VECTOR_CLOCK will no longer confuse git-annex.
When changing a file in the git-annex branch, the vector clock to use is now
determined by first looking at the current time (or GIT_ANNEX_VECTOR_CLOCK
when set), and comparing it to the newest vector clock already in use in
that file. If a newer time stamp was already in use, advance it forward by
a second instead.
When the clock is set to a time in the past, this avoids logging with
an old timestamp, which would risk that log line later being ignored in favor
of "newer" line that is really not newer.
When a log entry has been made with a clock that was set far ahead in the
future, this avoids newer information being logged with an older timestamp
and so being ignored in favor of that future-timestamped information.
Once all clocks get fixed, this will result in the vector clocks being
incremented, until finally enough time has passed that time gets back ahead
of the vector clock value, and then it will return to usual operation.
(This latter situation is not ideal, but it seems the best that can be done.
The issue with it is, since all writers will be incrementing the last
vector clock they saw, there's no way to tell when one writer made a write
significantly later in time than another, so the earlier write might
arbitrarily be picked when merging. This problem is why git-annex uses
timestamps in the first place, rather than pure vector clocks.)
Advancing forward by 1 second is somewhat arbitrary. setDead
advances a timestamp by just 1 picosecond, and the vector clock could
too. But then it would interfere with setDead, which wants to be
overrulled by any change. So it could use 2 picoseconds or something,
but that seems weird. It could just as well advance it forward by a
minute or whatever, but then it would be harder for real time to catch
up with the vector clock when forward clock slew had happened.
A complication is that many log files contain several different peices of
information, and it may be best to only use vector clocks for the same peice
of information. For example, a key's location log file contains
InfoPresent/InfoMissing for each UUID, and it only looks at the vector
clocks for the UUID that is being changed, and not other UUIDs.
Although exactly where the dividing line is can be hard to determine.
Consider metadata logs, where a field "tag" can have multiple values set
at different times. Should it advance forward past the last tag?
Probably. What about when a different field is set, should it look at
the clocks of other fields? Perhaps not, but currently it does, and
this does not seems like it will cause any problems.
Another one I'm not entirely sure about is the export log, which is
keyed by (fromuuid, touuid). So if multiple repos are exporting to the
same remote, different vector clocks can be used for that remote.
It looks like that's probably ok, because it does not try to determine
what order things occurred when there was an export conflict.
Sponsored-by: Jochen Bartl on Patreon
2021-08-03 20:45:20 +00:00
|
|
|
start :: CandidateVectorClock -> MetaDataOptions -> SeekInput -> RawFilePath -> Key -> CommandStart
|
2020-09-14 20:49:33 +00:00
|
|
|
start c o si file k = startKeys c o (si, k, mkActionItem (k, afile))
|
2016-07-20 19:22:55 +00:00
|
|
|
where
|
2017-03-10 17:12:24 +00:00
|
|
|
afile = AssociatedFile (Just file)
|
2014-03-17 19:26:18 +00:00
|
|
|
|
deal better with clock skew situations, using vector clocks
* Deal with clock skew, both forwards and backwards, when logging
information to the git-annex branch.
* GIT_ANNEX_VECTOR_CLOCK can now be set to a fixed value (eg 1)
rather than needing to be advanced each time a new change is made.
* Misuse of GIT_ANNEX_VECTOR_CLOCK will no longer confuse git-annex.
When changing a file in the git-annex branch, the vector clock to use is now
determined by first looking at the current time (or GIT_ANNEX_VECTOR_CLOCK
when set), and comparing it to the newest vector clock already in use in
that file. If a newer time stamp was already in use, advance it forward by
a second instead.
When the clock is set to a time in the past, this avoids logging with
an old timestamp, which would risk that log line later being ignored in favor
of "newer" line that is really not newer.
When a log entry has been made with a clock that was set far ahead in the
future, this avoids newer information being logged with an older timestamp
and so being ignored in favor of that future-timestamped information.
Once all clocks get fixed, this will result in the vector clocks being
incremented, until finally enough time has passed that time gets back ahead
of the vector clock value, and then it will return to usual operation.
(This latter situation is not ideal, but it seems the best that can be done.
The issue with it is, since all writers will be incrementing the last
vector clock they saw, there's no way to tell when one writer made a write
significantly later in time than another, so the earlier write might
arbitrarily be picked when merging. This problem is why git-annex uses
timestamps in the first place, rather than pure vector clocks.)
Advancing forward by 1 second is somewhat arbitrary. setDead
advances a timestamp by just 1 picosecond, and the vector clock could
too. But then it would interfere with setDead, which wants to be
overrulled by any change. So it could use 2 picoseconds or something,
but that seems weird. It could just as well advance it forward by a
minute or whatever, but then it would be harder for real time to catch
up with the vector clock when forward clock slew had happened.
A complication is that many log files contain several different peices of
information, and it may be best to only use vector clocks for the same peice
of information. For example, a key's location log file contains
InfoPresent/InfoMissing for each UUID, and it only looks at the vector
clocks for the UUID that is being changed, and not other UUIDs.
Although exactly where the dividing line is can be hard to determine.
Consider metadata logs, where a field "tag" can have multiple values set
at different times. Should it advance forward past the last tag?
Probably. What about when a different field is set, should it look at
the clocks of other fields? Perhaps not, but currently it does, and
this does not seems like it will cause any problems.
Another one I'm not entirely sure about is the export log, which is
keyed by (fromuuid, touuid). So if multiple repos are exporting to the
same remote, different vector clocks can be used for that remote.
It looks like that's probably ok, because it does not try to determine
what order things occurred when there was an export conflict.
Sponsored-by: Jochen Bartl on Patreon
2021-08-03 20:45:20 +00:00
|
|
|
startKeys :: CandidateVectorClock -> MetaDataOptions -> (SeekInput, Key, ActionItem) -> CommandStart
|
2020-09-14 20:49:33 +00:00
|
|
|
startKeys c o (si, k, ai) = case getSet o of
|
finish CommandStart transition
The hoped for optimisation of CommandStart with -J did not materialize.
In fact, not runnign CommandStart in parallel is slower than -J3.
So, CommandStart are still run in parallel.
(The actual bad performance I've been seeing with -J in my big repo
has to do with building the remoteList.)
But, this is still progress toward making -J faster, because it gets rid
of the onlyActionOn roadblock in the way of making CommandCleanup jobs
run separate from CommandPerform jobs.
Added OnlyActionOn constructor for ActionItem which fixes the
onlyActionOn breakage in the last commit.
Made CustomOutput include an ActionItem, so even things using it can
specify OnlyActionOn.
In Command.Move and Command.Sync, there were CommandStarts that used
includeCommandAction, so output messages, which is no longer allowed.
Fixed by using startingCustomOutput, but that's still not quite right,
since it prevents message display for the includeCommandAction run
inside it too.
2019-06-12 13:23:26 +00:00
|
|
|
Get f -> startingCustomOutput k $ do
|
2015-07-12 13:25:43 +00:00
|
|
|
l <- S.toList . currentMetaDataValues f <$> getCurrentMetaData k
|
|
|
|
liftIO $ forM_ l $
|
2019-01-07 19:51:05 +00:00
|
|
|
B8.putStrLn . fromMetaValue
|
make CommandStart return a StartMessage
The goal is to be able to run CommandStart in the main thread when -J is
used, rather than unncessarily passing it off to a worker thread, which
incurs overhead that is signficant when the CommandStart is going to
quickly decide to stop.
To do that, the message it displays needs to be displayed in the worker
thread, after the CommandStart has run.
Also, the change will mean that CommandStart will no longer necessarily
run with the same Annex state as CommandPerform. While its docs already
said it should avoid modifying Annex state, I audited all the
CommandStart code as part of the conversion. (Note that CommandSeek
already sometimes runs with a different Annex state, and that has not been
a source of any problems, so I am not too worried that this change will
lead to breakage going forward.)
The only modification of Annex state I found was it calling
allowMessages in some Commands that default to noMessages. Dealt with
that by adding a startCustomOutput and a startingUsualMessages.
This lets a command start with noMessages and then select the output it
wants for each CommandStart.
One bit of breakage: onlyActionOn has been removed from commands that used it.
The plan is that, since a StartMessage contains an ActionItem,
when a Key can be extracted from that, the parallel job runner can
run onlyActionOn' automatically. Then commands won't need to worry about
this detail. Future work.
Otherwise, this was a fairly straightforward process of making each
CommandStart compile again. Hopefully other behavior changes were mostly
avoided.
In a few cases, a command had a CommandStart that called a CommandPerform
that then called showStart multiple times. I have collapsed those
down to a single start action. The main command to perhaps suffer from it
is Command.Direct, which used to show a start for each file, and no
longer does.
Another minor behavior change is that some commands used showStart
before, but had an associated file and a Key available, so were changed
to ShowStart with an ActionItemAssociatedFile. That will not change the
normal output or behavior, but --json output will now include the key.
This should not break it for anyone using a real json parser.
2019-06-06 19:42:30 +00:00
|
|
|
next $ return True
|
2020-09-14 20:49:33 +00:00
|
|
|
_ -> starting "metadata" ai si $
|
make CommandStart return a StartMessage
The goal is to be able to run CommandStart in the main thread when -J is
used, rather than unncessarily passing it off to a worker thread, which
incurs overhead that is signficant when the CommandStart is going to
quickly decide to stop.
To do that, the message it displays needs to be displayed in the worker
thread, after the CommandStart has run.
Also, the change will mean that CommandStart will no longer necessarily
run with the same Annex state as CommandPerform. While its docs already
said it should avoid modifying Annex state, I audited all the
CommandStart code as part of the conversion. (Note that CommandSeek
already sometimes runs with a different Annex state, and that has not been
a source of any problems, so I am not too worried that this change will
lead to breakage going forward.)
The only modification of Annex state I found was it calling
allowMessages in some Commands that default to noMessages. Dealt with
that by adding a startCustomOutput and a startingUsualMessages.
This lets a command start with noMessages and then select the output it
wants for each CommandStart.
One bit of breakage: onlyActionOn has been removed from commands that used it.
The plan is that, since a StartMessage contains an ActionItem,
when a Key can be extracted from that, the parallel job runner can
run onlyActionOn' automatically. Then commands won't need to worry about
this detail. Future work.
Otherwise, this was a fairly straightforward process of making each
CommandStart compile again. Hopefully other behavior changes were mostly
avoided.
In a few cases, a command had a CommandStart that called a CommandPerform
that then called showStart multiple times. I have collapsed those
down to a single start action. The main command to perhaps suffer from it
is Command.Direct, which used to show a start for each file, and no
longer does.
Another minor behavior change is that some commands used showStart
before, but had an associated file and a Key available, so were changed
to ShowStart with an ActionItemAssociatedFile. That will not change the
normal output or behavior, but --json output will now include the key.
This should not break it for anyone using a real json parser.
2019-06-06 19:42:30 +00:00
|
|
|
perform c o k
|
2015-08-11 17:19:01 +00:00
|
|
|
|
deal better with clock skew situations, using vector clocks
* Deal with clock skew, both forwards and backwards, when logging
information to the git-annex branch.
* GIT_ANNEX_VECTOR_CLOCK can now be set to a fixed value (eg 1)
rather than needing to be advanced each time a new change is made.
* Misuse of GIT_ANNEX_VECTOR_CLOCK will no longer confuse git-annex.
When changing a file in the git-annex branch, the vector clock to use is now
determined by first looking at the current time (or GIT_ANNEX_VECTOR_CLOCK
when set), and comparing it to the newest vector clock already in use in
that file. If a newer time stamp was already in use, advance it forward by
a second instead.
When the clock is set to a time in the past, this avoids logging with
an old timestamp, which would risk that log line later being ignored in favor
of "newer" line that is really not newer.
When a log entry has been made with a clock that was set far ahead in the
future, this avoids newer information being logged with an older timestamp
and so being ignored in favor of that future-timestamped information.
Once all clocks get fixed, this will result in the vector clocks being
incremented, until finally enough time has passed that time gets back ahead
of the vector clock value, and then it will return to usual operation.
(This latter situation is not ideal, but it seems the best that can be done.
The issue with it is, since all writers will be incrementing the last
vector clock they saw, there's no way to tell when one writer made a write
significantly later in time than another, so the earlier write might
arbitrarily be picked when merging. This problem is why git-annex uses
timestamps in the first place, rather than pure vector clocks.)
Advancing forward by 1 second is somewhat arbitrary. setDead
advances a timestamp by just 1 picosecond, and the vector clock could
too. But then it would interfere with setDead, which wants to be
overrulled by any change. So it could use 2 picoseconds or something,
but that seems weird. It could just as well advance it forward by a
minute or whatever, but then it would be harder for real time to catch
up with the vector clock when forward clock slew had happened.
A complication is that many log files contain several different peices of
information, and it may be best to only use vector clocks for the same peice
of information. For example, a key's location log file contains
InfoPresent/InfoMissing for each UUID, and it only looks at the vector
clocks for the UUID that is being changed, and not other UUIDs.
Although exactly where the dividing line is can be hard to determine.
Consider metadata logs, where a field "tag" can have multiple values set
at different times. Should it advance forward past the last tag?
Probably. What about when a different field is set, should it look at
the clocks of other fields? Perhaps not, but currently it does, and
this does not seems like it will cause any problems.
Another one I'm not entirely sure about is the export log, which is
keyed by (fromuuid, touuid). So if multiple repos are exporting to the
same remote, different vector clocks can be used for that remote.
It looks like that's probably ok, because it does not try to determine
what order things occurred when there was an export conflict.
Sponsored-by: Jochen Bartl on Patreon
2021-08-03 20:45:20 +00:00
|
|
|
perform :: CandidateVectorClock -> MetaDataOptions -> Key -> CommandPerform
|
2017-08-14 17:55:38 +00:00
|
|
|
perform c o k = case getSet o of
|
2015-08-11 17:19:01 +00:00
|
|
|
Set ms -> do
|
|
|
|
oldm <- getCurrentMetaData k
|
|
|
|
let m = combineMetaData $ map (modMeta oldm) ms
|
2018-08-31 16:23:22 +00:00
|
|
|
addMetaDataClocked k m c
|
2015-08-11 17:19:01 +00:00
|
|
|
next $ cleanup k
|
|
|
|
_ -> next $ cleanup k
|
2014-02-13 01:12:22 +00:00
|
|
|
|
2014-02-13 01:48:25 +00:00
|
|
|
cleanup :: Key -> CommandCleanup
|
|
|
|
cleanup k = do
|
2016-07-26 18:53:00 +00:00
|
|
|
m <- getCurrentMetaData k
|
2021-11-01 18:40:33 +00:00
|
|
|
case toJSON' (AddJSONActionItemFields m) of
|
2020-04-15 17:55:08 +00:00
|
|
|
Object o -> maybeShowJSON $ AesonObject o
|
|
|
|
_ -> noop
|
2016-07-26 18:53:00 +00:00
|
|
|
showLongNote $ unlines $ concatMap showmeta $
|
|
|
|
map unwrapmeta (fromMetaData m)
|
2014-02-13 01:12:22 +00:00
|
|
|
return True
|
|
|
|
where
|
2014-02-23 17:58:16 +00:00
|
|
|
unwrapmeta (f, v) = (fromMetaField f, map fromMetaValue (S.toList v))
|
2019-01-07 19:51:05 +00:00
|
|
|
showmeta (f, vs) = map ((T.unpack f ++ "=") ++) (map decodeBS vs)
|
2016-07-26 18:53:00 +00:00
|
|
|
|
2020-04-15 20:04:05 +00:00
|
|
|
parseJSONInput :: String -> Annex (Either String (Either RawFilePath Key, MetaData))
|
|
|
|
parseJSONInput i = case eitherDecode (BU.fromString i) of
|
|
|
|
Left e -> return (Left e)
|
|
|
|
Right v -> do
|
2021-11-01 18:40:33 +00:00
|
|
|
let m = case itemFields v of
|
2020-04-15 20:04:05 +00:00
|
|
|
Nothing -> emptyMetaData
|
2021-11-01 18:40:33 +00:00
|
|
|
Just m' -> m'
|
2020-04-15 20:04:05 +00:00
|
|
|
case (itemKey v, itemFile v) of
|
|
|
|
(Just k, _) -> return $
|
|
|
|
Right (Right k, m)
|
|
|
|
(Nothing, Just f) -> do
|
2020-11-02 20:31:28 +00:00
|
|
|
f' <- liftIO $ relPathCwdToFile (toRawFilePath f)
|
|
|
|
return $ Right (Left f', m)
|
2020-04-15 20:04:05 +00:00
|
|
|
(Nothing, Nothing) -> return $
|
|
|
|
Left "JSON input is missing either file or key"
|
2016-07-27 14:46:25 +00:00
|
|
|
|
2020-09-14 20:49:33 +00:00
|
|
|
startBatch :: (SeekInput, (Either RawFilePath Key, MetaData)) -> CommandStart
|
|
|
|
startBatch (si, (i, (MetaData m))) = case i of
|
2016-07-27 14:46:25 +00:00
|
|
|
Left f -> do
|
2020-07-10 18:17:35 +00:00
|
|
|
mk <- lookupKey f
|
2016-07-27 14:46:25 +00:00
|
|
|
case mk of
|
2019-06-06 16:53:24 +00:00
|
|
|
Just k -> go k (mkActionItem (k, AssociatedFile (Just f)))
|
metadata --batch: Avoid crashing when a non-annexed file is input
Turns out that CommandStart actions do not have their exceptions caught,
which is why the giveup was causing a crash. Mostly these actions
do not do very much work on their own, but it does seem possible there
are other commands whose CommandStart also throws an exception.
So, my first attempt at a fix was to catch those exceptions. But,
--json-error-messages then causes a difficulty, because in order to output
a json error message, an action needs to have been started; that sets up
the json object that the error message will be included in a field of.
While it would be possible to output an object with just an error field,
this would be json output of a format that the user has no reason to
expect, that happens only in an exceptional circumstance. That is something
I have always wanted to avoid with the json output; while git-annex man
pages don't document what the json looks like, the output has always
been made to be self-describing. Eg, it includes "error-messages":[]
even when there's no errors.
With that ruled out, it doesn't seem a good idea to catch CommandStart
exceptions and display the error to stderr when --json-error-messages
is set. And so I don't know if it makes sense to catch exceptions from that
at all. Maybe I'd have a different opinion if --json-error-messages did not
exist though.
So instead, output a blank line like other batch commands do.
This also leaves open the possibility of implementing support for matching
object with metadata --json, which would also want to output a blank line
when the input didn't match.
Sponsored-by: Dartmouth College's DANDI project
2021-11-01 17:38:14 +00:00
|
|
|
Nothing -> return Nothing
|
2016-07-27 14:46:25 +00:00
|
|
|
Right k -> go k (mkActionItem k)
|
|
|
|
where
|
2020-09-14 20:49:33 +00:00
|
|
|
go k ai = starting "metadata" ai si $ do
|
2016-07-27 14:46:25 +00:00
|
|
|
let o = MetaDataOptions
|
|
|
|
{ forFiles = []
|
|
|
|
, getSet = if MetaData m == emptyMetaData
|
|
|
|
then GetAll
|
|
|
|
else Set $ map mkModMeta (M.toList m)
|
|
|
|
, keyOptions = Nothing
|
|
|
|
, batchOption = NoBatch
|
|
|
|
}
|
2020-12-23 19:21:33 +00:00
|
|
|
t <- currentVectorClock
|
2016-12-13 15:07:49 +00:00
|
|
|
-- It would be bad if two batch mode changes used exactly
|
|
|
|
-- the same timestamp, since the order of adds and removals
|
|
|
|
-- of the same metadata value would then be indeterminate.
|
|
|
|
-- To guarantee that never happens, delay 1 microsecond,
|
|
|
|
-- so the timestamp will always be different. This is
|
|
|
|
-- probably less expensive than cleaner methods,
|
|
|
|
-- such as taking from a list of increasing timestamps.
|
|
|
|
liftIO $ threadDelay 1
|
make CommandStart return a StartMessage
The goal is to be able to run CommandStart in the main thread when -J is
used, rather than unncessarily passing it off to a worker thread, which
incurs overhead that is signficant when the CommandStart is going to
quickly decide to stop.
To do that, the message it displays needs to be displayed in the worker
thread, after the CommandStart has run.
Also, the change will mean that CommandStart will no longer necessarily
run with the same Annex state as CommandPerform. While its docs already
said it should avoid modifying Annex state, I audited all the
CommandStart code as part of the conversion. (Note that CommandSeek
already sometimes runs with a different Annex state, and that has not been
a source of any problems, so I am not too worried that this change will
lead to breakage going forward.)
The only modification of Annex state I found was it calling
allowMessages in some Commands that default to noMessages. Dealt with
that by adding a startCustomOutput and a startingUsualMessages.
This lets a command start with noMessages and then select the output it
wants for each CommandStart.
One bit of breakage: onlyActionOn has been removed from commands that used it.
The plan is that, since a StartMessage contains an ActionItem,
when a Key can be extracted from that, the parallel job runner can
run onlyActionOn' automatically. Then commands won't need to worry about
this detail. Future work.
Otherwise, this was a fairly straightforward process of making each
CommandStart compile again. Hopefully other behavior changes were mostly
avoided.
In a few cases, a command had a CommandStart that called a CommandPerform
that then called showStart multiple times. I have collapsed those
down to a single start action. The main command to perhaps suffer from it
is Command.Direct, which used to show a start for each file, and no
longer does.
Another minor behavior change is that some commands used showStart
before, but had an associated file and a Key available, so were changed
to ShowStart with an ActionItemAssociatedFile. That will not change the
normal output or behavior, but --json output will now include the key.
This should not break it for anyone using a real json parser.
2019-06-06 19:42:30 +00:00
|
|
|
perform t o k
|
2016-07-27 14:46:25 +00:00
|
|
|
mkModMeta (f, s)
|
|
|
|
| S.null s = DelMeta f Nothing
|
|
|
|
| otherwise = SetMeta f s
|