1acdd18ea8
* Deal with clock skew, both forwards and backwards, when logging information to the git-annex branch. * GIT_ANNEX_VECTOR_CLOCK can now be set to a fixed value (eg 1) rather than needing to be advanced each time a new change is made. * Misuse of GIT_ANNEX_VECTOR_CLOCK will no longer confuse git-annex. When changing a file in the git-annex branch, the vector clock to use is now determined by first looking at the current time (or GIT_ANNEX_VECTOR_CLOCK when set), and comparing it to the newest vector clock already in use in that file. If a newer time stamp was already in use, advance it forward by a second instead. When the clock is set to a time in the past, this avoids logging with an old timestamp, which would risk that log line later being ignored in favor of "newer" line that is really not newer. When a log entry has been made with a clock that was set far ahead in the future, this avoids newer information being logged with an older timestamp and so being ignored in favor of that future-timestamped information. Once all clocks get fixed, this will result in the vector clocks being incremented, until finally enough time has passed that time gets back ahead of the vector clock value, and then it will return to usual operation. (This latter situation is not ideal, but it seems the best that can be done. The issue with it is, since all writers will be incrementing the last vector clock they saw, there's no way to tell when one writer made a write significantly later in time than another, so the earlier write might arbitrarily be picked when merging. This problem is why git-annex uses timestamps in the first place, rather than pure vector clocks.) Advancing forward by 1 second is somewhat arbitrary. setDead advances a timestamp by just 1 picosecond, and the vector clock could too. But then it would interfere with setDead, which wants to be overrulled by any change. So it could use 2 picoseconds or something, but that seems weird. It could just as well advance it forward by a minute or whatever, but then it would be harder for real time to catch up with the vector clock when forward clock slew had happened. A complication is that many log files contain several different peices of information, and it may be best to only use vector clocks for the same peice of information. For example, a key's location log file contains InfoPresent/InfoMissing for each UUID, and it only looks at the vector clocks for the UUID that is being changed, and not other UUIDs. Although exactly where the dividing line is can be hard to determine. Consider metadata logs, where a field "tag" can have multiple values set at different times. Should it advance forward past the last tag? Probably. What about when a different field is set, should it look at the clocks of other fields? Perhaps not, but currently it does, and this does not seems like it will cause any problems. Another one I'm not entirely sure about is the export log, which is keyed by (fromuuid, touuid). So if multiple repos are exporting to the same remote, different vector clocks can be used for that remote. It looks like that's probably ok, because it does not try to determine what order things occurred when there was an export conflict. Sponsored-by: Jochen Bartl on Patreon
127 lines
3.6 KiB
Haskell
127 lines
3.6 KiB
Haskell
{- git-annex presence log, pure operations
|
|
-
|
|
- Copyright 2010-2019 Joey Hess <id@joeyh.name>
|
|
-
|
|
- Licensed under the GNU AGPL version 3 or higher.
|
|
-}
|
|
|
|
module Logs.Presence.Pure where
|
|
|
|
import Annex.Common
|
|
import Annex.VectorClock
|
|
import Logs.Line
|
|
import Utility.QuickCheck
|
|
|
|
import qualified Data.Map as M
|
|
import qualified Data.ByteString.Lazy as L
|
|
import qualified Data.ByteString as S
|
|
import qualified Data.ByteString.Char8 as C8
|
|
import qualified Data.Attoparsec.ByteString.Lazy as A
|
|
import Data.Attoparsec.ByteString.Char8 (char, anyChar)
|
|
import Data.ByteString.Builder
|
|
|
|
newtype LogInfo = LogInfo { fromLogInfo :: S.ByteString }
|
|
deriving (Show, Eq, Ord)
|
|
|
|
data LogLine = LogLine
|
|
{ date :: VectorClock
|
|
, status :: LogStatus
|
|
, info :: LogInfo
|
|
} deriving (Eq, Show)
|
|
|
|
data LogStatus = InfoPresent | InfoMissing | InfoDead
|
|
deriving (Eq, Show, Bounded, Enum)
|
|
|
|
parseLog :: L.ByteString -> [LogLine]
|
|
parseLog = fromMaybe [] . A.maybeResult . A.parse logParser
|
|
|
|
logParser :: A.Parser [LogLine]
|
|
logParser = parseLogLines $ LogLine
|
|
<$> vectorClockParser
|
|
<* char ' '
|
|
<*> statusParser
|
|
<* char ' '
|
|
<*> (LogInfo <$> A.takeByteString)
|
|
|
|
statusParser :: A.Parser LogStatus
|
|
statusParser = do
|
|
c <- anyChar
|
|
case c of
|
|
'1' -> return InfoPresent
|
|
'0' -> return InfoMissing
|
|
'X' -> return InfoDead
|
|
_ -> fail "unknown status character"
|
|
|
|
parseStatus :: String -> Maybe LogStatus
|
|
parseStatus "1" = Just InfoPresent
|
|
parseStatus "0" = Just InfoMissing
|
|
parseStatus "X" = Just InfoDead
|
|
parseStatus _ = Nothing
|
|
|
|
buildLog :: [LogLine] -> Builder
|
|
buildLog = mconcat . map genline
|
|
where
|
|
genline (LogLine c s (LogInfo i)) =
|
|
buildVectorClock c <> sp <> genstatus s <> sp <> byteString i <> nl
|
|
sp = charUtf8 ' '
|
|
nl = charUtf8 '\n'
|
|
genstatus InfoPresent = charUtf8 '1'
|
|
genstatus InfoMissing = charUtf8 '0'
|
|
genstatus InfoDead = charUtf8 'X'
|
|
|
|
{- Given a log, returns only the info that is are still in effect. -}
|
|
getLog :: L.ByteString -> [LogInfo]
|
|
getLog = map info . filterPresent . parseLog
|
|
|
|
{- Returns the info from LogLines that are in effect. -}
|
|
filterPresent :: [LogLine] -> [LogLine]
|
|
filterPresent = filter (\l -> InfoPresent == status l) . compactLog
|
|
|
|
{- Compacts a set of logs, returning a subset that contains the current
|
|
- status. -}
|
|
compactLog :: [LogLine] -> [LogLine]
|
|
compactLog = mapLog . logMap
|
|
|
|
type LogMap = M.Map LogInfo LogLine
|
|
|
|
mapLog :: LogMap -> [LogLine]
|
|
mapLog = M.elems
|
|
|
|
logMap :: [LogLine] -> LogMap
|
|
logMap = foldr insertNewerLogLine M.empty
|
|
|
|
insertBetter :: (LogLine -> Bool) -> LogLine -> LogMap -> Maybe LogMap
|
|
insertBetter betterthan l m
|
|
| better = Just (M.insert i l m)
|
|
| otherwise = Nothing
|
|
where
|
|
better = maybe True betterthan (M.lookup i m)
|
|
i = info l
|
|
|
|
{- Inserts a log into a map of logs, if the log has newer
|
|
- information than the other logs in the map for the same info. -}
|
|
insertNewerLogLine :: LogLine -> LogMap -> LogMap
|
|
insertNewerLogLine l m = fromMaybe m $ insertBetter newer l m
|
|
where
|
|
newer l' = date l' <= date l
|
|
|
|
{- Inserts the log unless there's already one in the map with
|
|
- the same status for its info, in which case there's no need to
|
|
- change anything, to avoid log churn. -}
|
|
insertNewStatus :: LogLine -> LogMap -> Maybe LogMap
|
|
insertNewStatus l m = insertBetter diffstatus l m
|
|
where
|
|
diffstatus l' = status l' /= status l
|
|
|
|
instance Arbitrary LogLine where
|
|
arbitrary = LogLine
|
|
<$> arbitrary
|
|
<*> elements [minBound..maxBound]
|
|
<*> (LogInfo <$> arbinfo)
|
|
where
|
|
arbinfo = (encodeBS <$> arbitrary) `suchThat`
|
|
(\b -> C8.notElem '\n' b && C8.notElem '\r' b)
|
|
|
|
prop_parse_build_presence_log :: [LogLine] -> Bool
|
|
prop_parse_build_presence_log l =
|
|
parseLog (toLazyByteString (buildLog l)) == l
|