2016-11-17 21:19:04 +00:00
|
|
|
{- P2P protocol
|
2018-03-07 19:15:23 +00:00
|
|
|
-
|
|
|
|
- See doc/design/p2p_protocol.mdwn
|
2016-11-17 21:19:04 +00:00
|
|
|
-
|
2024-06-23 13:28:18 +00:00
|
|
|
- Copyright 2016-2024 Joey Hess <id@joeyh.name>
|
2016-11-17 21:19:04 +00:00
|
|
|
-
|
2019-03-13 19:48:14 +00:00
|
|
|
- Licensed under the GNU AGPL version 3 or higher.
|
2016-11-17 21:19:04 +00:00
|
|
|
-}
|
|
|
|
|
2016-12-02 20:39:01 +00:00
|
|
|
{-# LANGUAGE DeriveFunctor, TemplateHaskell, FlexibleContexts #-}
|
|
|
|
{-# LANGUAGE TypeSynonymInstances, FlexibleInstances, RankNTypes #-}
|
2024-06-27 16:20:22 +00:00
|
|
|
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
|
2016-12-02 20:39:01 +00:00
|
|
|
{-# OPTIONS_GHC -fno-warn-orphans #-}
|
2016-11-17 21:19:04 +00:00
|
|
|
|
2016-11-22 18:34:49 +00:00
|
|
|
module P2P.Protocol where
|
2016-11-17 21:19:04 +00:00
|
|
|
|
|
|
|
import qualified Utility.SimpleProtocol as Proto
|
2018-03-13 18:18:30 +00:00
|
|
|
import Types (Annex)
|
2016-11-17 21:19:04 +00:00
|
|
|
import Types.Key
|
|
|
|
import Types.UUID
|
2021-02-10 16:41:05 +00:00
|
|
|
import Types.Transfer
|
2021-08-18 17:19:02 +00:00
|
|
|
import Types.Remote (Verification(..))
|
|
|
|
import Utility.Hash (IncrementalVerifier(..))
|
2016-11-22 18:18:34 +00:00
|
|
|
import Utility.AuthToken
|
2016-11-17 21:19:04 +00:00
|
|
|
import Utility.Applicative
|
|
|
|
import Utility.PartialPrelude
|
2016-12-07 17:37:35 +00:00
|
|
|
import Utility.Metered
|
2019-11-26 19:27:22 +00:00
|
|
|
import Utility.FileSystemEncoding
|
2024-07-03 21:54:01 +00:00
|
|
|
import Utility.MonotonicClock
|
2016-12-02 20:39:01 +00:00
|
|
|
import Git.FilePath
|
2016-12-09 18:52:38 +00:00
|
|
|
import Annex.ChangedRefs (ChangedRefs)
|
toward SafeDropProof expiry checking
Added Maybe POSIXTime to SafeDropProof, which gets set when the proof is
based on a LockedCopy. If there are several LockedCopies, it uses the
closest expiry time. That is not optimal, it may be that the proof
expires based on one LockedCopy but another one has not expired. But
that seems unlikely to really happen, and anyway the user can just
re-run a drop if it fails due to expiry.
Pass the SafeDropProof to removeKey, which is responsible for checking
it for expiry in situations where that could be a problem. Which really
only means in Remote.Git.
Made Remote.Git check expiry when dropping from a local remote.
Checking expiry when dropping from a P2P remote is not yet implemented.
P2P.Protocol.remove has SafeDropProof plumbed through to it for that
purpose.
Fixing the remaining 2 build warnings should complete this work.
Note that the use of a POSIXTime here means that if the clock gets set
forward while git-annex is in the middle of a drop, it may say that
dropping took too long. That seems ok. Less ok is that if the clock gets
turned back a sufficient amount (eg 5 minutes), proof expiry won't be
noticed. It might be better to use the Monotonic clock, but that doesn't
advance when a laptop is suspended, and while there is the linux
Boottime clock, that is not available on other systems. Perhaps a
combination of POSIXTime and the Monotonic clock could detect laptop
suspension and also detect clock being turned back?
There is a potential future flag day where
p2pDefaultLockContentRetentionDuration is not assumed, but is probed
using the P2P protocol, and peers that don't support it can no longer
produce a LockedCopy. Until that happens, when git-annex is
communicating with older peers there is a risk of data loss when
a ssh connection closes during LOCKCONTENT.
2024-07-04 16:23:46 +00:00
|
|
|
import Types.NumCopies
|
2016-11-17 21:19:04 +00:00
|
|
|
|
|
|
|
import Control.Monad
|
|
|
|
import Control.Monad.Free
|
|
|
|
import Control.Monad.Free.TH
|
Add content locking to P2P protocol
Is content locking needed in the P2P protocol? Based on re-reading
bugs/concurrent_drop--from_presence_checking_failures.mdwn,
I think so: Peers can form cycles, and multiple peers can all be trying
to drop the same content.
So, added content locking to the protocol, with some difficulty.
The implementation is fine as far as it goes, but note the warning
comment for lockContentWhile -- if the connection to the peer is dropped
unexpectedly, the peer will then unlock the content, and yet the local
side will still think it's locked.
To be honest I'm not sure if Remote.Git's lockKey for ssh remotes
doesn't have the same problem. It checks that the
"ssh remote git-annex-shell lockcontent"
process has not exited, but if the connection closes afer that check,
the lockcontent command will unlock it, and yet the local side will
still think it's locked.
Probably this needs to be fixed by eg, making lockcontent catch any
execptions due to the connection closing, and in that case, wait a
significantly long time before dropping the lock.
This commit was sponsored by Anthony DeRobertis on Patreon.
2016-11-18 05:32:24 +00:00
|
|
|
import Control.Monad.Catch
|
2016-11-19 20:30:57 +00:00
|
|
|
import System.Exit (ExitCode(..))
|
2016-11-20 16:08:16 +00:00
|
|
|
import System.IO
|
2023-03-02 14:53:12 +00:00
|
|
|
import qualified System.FilePath.ByteString as P
|
|
|
|
import qualified Data.ByteString as B
|
2016-11-17 21:19:04 +00:00
|
|
|
import qualified Data.ByteString.Lazy as L
|
2024-06-27 16:20:22 +00:00
|
|
|
import qualified Data.Set as S
|
2016-12-02 20:39:01 +00:00
|
|
|
import Data.Char
|
2024-07-09 18:26:30 +00:00
|
|
|
import Data.Maybe
|
2024-07-04 17:42:09 +00:00
|
|
|
import Data.Time.Clock.POSIX
|
2016-12-08 17:58:03 +00:00
|
|
|
import Control.Applicative
|
2024-07-08 01:20:50 +00:00
|
|
|
import Control.DeepSeq
|
2016-12-08 17:58:03 +00:00
|
|
|
import Prelude
|
2016-11-17 21:19:04 +00:00
|
|
|
|
|
|
|
newtype Offset = Offset Integer
|
2024-07-10 16:19:47 +00:00
|
|
|
deriving (Show, Eq, NFData, Num, Real, Ord, Enum, Integral)
|
2016-11-17 21:19:04 +00:00
|
|
|
|
|
|
|
newtype Len = Len Integer
|
|
|
|
deriving (Show)
|
|
|
|
|
2018-03-12 17:43:19 +00:00
|
|
|
newtype ProtocolVersion = ProtocolVersion Integer
|
|
|
|
deriving (Show, Eq, Ord)
|
|
|
|
|
|
|
|
defaultProtocolVersion :: ProtocolVersion
|
|
|
|
defaultProtocolVersion = ProtocolVersion 0
|
|
|
|
|
|
|
|
maxProtocolVersion :: ProtocolVersion
|
2024-07-03 20:59:22 +00:00
|
|
|
maxProtocolVersion = ProtocolVersion 3
|
2018-03-12 17:43:19 +00:00
|
|
|
|
2024-07-23 18:58:49 +00:00
|
|
|
-- In order from newest to oldest.
|
|
|
|
allProtocolVersions :: [ProtocolVersion]
|
|
|
|
allProtocolVersions =
|
|
|
|
[ ProtocolVersion 3
|
|
|
|
, ProtocolVersion 2
|
|
|
|
, ProtocolVersion 1
|
|
|
|
, ProtocolVersion 0
|
|
|
|
]
|
|
|
|
|
2020-12-09 17:46:42 +00:00
|
|
|
newtype ProtoAssociatedFile = ProtoAssociatedFile AssociatedFile
|
|
|
|
deriving (Show)
|
|
|
|
|
2018-03-06 18:58:24 +00:00
|
|
|
-- | Service as used by the connect message in gitremote-helpers(1)
|
2016-11-19 20:30:57 +00:00
|
|
|
data Service = UploadPack | ReceivePack
|
|
|
|
deriving (Show)
|
|
|
|
|
2018-03-13 18:18:30 +00:00
|
|
|
data Validity = Valid | Invalid
|
|
|
|
deriving (Show)
|
2024-06-27 16:20:22 +00:00
|
|
|
|
|
|
|
newtype Bypass = Bypass (S.Set UUID)
|
|
|
|
deriving (Show, Monoid, Semigroup)
|
2018-03-13 18:18:30 +00:00
|
|
|
|
2016-11-17 21:19:04 +00:00
|
|
|
-- | Messages in the protocol. The peer that makes the connection
|
|
|
|
-- always initiates requests, and the other peer makes responses to them.
|
|
|
|
data Message
|
|
|
|
= AUTH UUID AuthToken -- uuid of the peer that is authenticating
|
|
|
|
| AUTH_SUCCESS UUID -- uuid of the remote peer
|
|
|
|
| AUTH_FAILURE
|
2018-03-12 17:43:19 +00:00
|
|
|
| VERSION ProtocolVersion
|
2016-11-19 20:30:57 +00:00
|
|
|
| CONNECT Service
|
|
|
|
| CONNECTDONE ExitCode
|
2016-12-09 18:52:38 +00:00
|
|
|
| NOTIFYCHANGE
|
|
|
|
| CHANGED ChangedRefs
|
2016-11-18 01:56:02 +00:00
|
|
|
| CHECKPRESENT Key
|
Add content locking to P2P protocol
Is content locking needed in the P2P protocol? Based on re-reading
bugs/concurrent_drop--from_presence_checking_failures.mdwn,
I think so: Peers can form cycles, and multiple peers can all be trying
to drop the same content.
So, added content locking to the protocol, with some difficulty.
The implementation is fine as far as it goes, but note the warning
comment for lockContentWhile -- if the connection to the peer is dropped
unexpectedly, the peer will then unlock the content, and yet the local
side will still think it's locked.
To be honest I'm not sure if Remote.Git's lockKey for ssh remotes
doesn't have the same problem. It checks that the
"ssh remote git-annex-shell lockcontent"
process has not exited, but if the connection closes afer that check,
the lockcontent command will unlock it, and yet the local side will
still think it's locked.
Probably this needs to be fixed by eg, making lockcontent catch any
execptions due to the connection closing, and in that case, wait a
significantly long time before dropping the lock.
This commit was sponsored by Anthony DeRobertis on Patreon.
2016-11-18 05:32:24 +00:00
|
|
|
| LOCKCONTENT Key
|
|
|
|
| UNLOCKCONTENT
|
2016-11-18 01:48:59 +00:00
|
|
|
| REMOVE Key
|
2024-07-03 20:59:22 +00:00
|
|
|
| REMOVE_BEFORE MonotonicTimestamp Key
|
|
|
|
| GETTIMESTAMP
|
2020-12-09 17:46:42 +00:00
|
|
|
| GET Offset ProtoAssociatedFile Key
|
|
|
|
| PUT ProtoAssociatedFile Key
|
2016-11-17 21:19:04 +00:00
|
|
|
| PUT_FROM Offset
|
2016-11-18 01:37:49 +00:00
|
|
|
| ALREADY_HAVE
|
2024-06-18 16:07:01 +00:00
|
|
|
| ALREADY_HAVE_PLUS [UUID]
|
2016-11-17 21:19:04 +00:00
|
|
|
| SUCCESS
|
2024-06-18 16:07:01 +00:00
|
|
|
| SUCCESS_PLUS [UUID]
|
2016-11-17 21:19:04 +00:00
|
|
|
| FAILURE
|
2024-06-23 13:28:18 +00:00
|
|
|
| FAILURE_PLUS [UUID]
|
2024-06-27 16:20:22 +00:00
|
|
|
| BYPASS Bypass
|
2016-11-19 20:30:57 +00:00
|
|
|
| DATA Len -- followed by bytes of data
|
2018-03-13 18:18:30 +00:00
|
|
|
| VALIDITY Validity
|
2024-07-03 20:59:22 +00:00
|
|
|
| TIMESTAMP MonotonicTimestamp
|
2016-11-18 02:06:59 +00:00
|
|
|
| ERROR String
|
2016-11-17 21:19:04 +00:00
|
|
|
deriving (Show)
|
|
|
|
|
2016-11-20 16:08:16 +00:00
|
|
|
instance Proto.Sendable Message where
|
|
|
|
formatMessage (AUTH uuid authtoken) = ["AUTH", Proto.serialize uuid, Proto.serialize authtoken]
|
2016-12-16 22:26:07 +00:00
|
|
|
formatMessage (AUTH_SUCCESS uuid) = ["AUTH-SUCCESS", Proto.serialize uuid]
|
2016-11-20 16:08:16 +00:00
|
|
|
formatMessage AUTH_FAILURE = ["AUTH-FAILURE"]
|
2018-03-12 17:43:19 +00:00
|
|
|
formatMessage (VERSION v) = ["VERSION", Proto.serialize v]
|
2016-11-20 16:08:16 +00:00
|
|
|
formatMessage (CONNECT service) = ["CONNECT", Proto.serialize service]
|
|
|
|
formatMessage (CONNECTDONE exitcode) = ["CONNECTDONE", Proto.serialize exitcode]
|
2016-12-09 18:52:38 +00:00
|
|
|
formatMessage NOTIFYCHANGE = ["NOTIFYCHANGE"]
|
|
|
|
formatMessage (CHANGED refs) = ["CHANGED", Proto.serialize refs]
|
2016-11-20 16:08:16 +00:00
|
|
|
formatMessage (CHECKPRESENT key) = ["CHECKPRESENT", Proto.serialize key]
|
|
|
|
formatMessage (LOCKCONTENT key) = ["LOCKCONTENT", Proto.serialize key]
|
|
|
|
formatMessage UNLOCKCONTENT = ["UNLOCKCONTENT"]
|
|
|
|
formatMessage (REMOVE key) = ["REMOVE", Proto.serialize key]
|
2024-07-03 20:59:22 +00:00
|
|
|
formatMessage (REMOVE_BEFORE ts key) = ["REMOVE-BEFORE", Proto.serialize ts, Proto.serialize key]
|
|
|
|
formatMessage GETTIMESTAMP = ["GETTIMESTAMP"]
|
2016-12-02 20:39:01 +00:00
|
|
|
formatMessage (GET offset af key) = ["GET", Proto.serialize offset, Proto.serialize af, Proto.serialize key]
|
|
|
|
formatMessage (PUT af key) = ["PUT", Proto.serialize af, Proto.serialize key]
|
2016-11-20 16:08:16 +00:00
|
|
|
formatMessage (PUT_FROM offset) = ["PUT-FROM", Proto.serialize offset]
|
|
|
|
formatMessage ALREADY_HAVE = ["ALREADY-HAVE"]
|
2024-06-18 16:07:01 +00:00
|
|
|
formatMessage (ALREADY_HAVE_PLUS uuids) = ("ALREADY-HAVE-PLUS":map Proto.serialize uuids)
|
2016-11-20 16:08:16 +00:00
|
|
|
formatMessage SUCCESS = ["SUCCESS"]
|
2024-06-18 16:07:01 +00:00
|
|
|
formatMessage (SUCCESS_PLUS uuids) = ("SUCCESS-PLUS":map Proto.serialize uuids)
|
2016-11-20 16:08:16 +00:00
|
|
|
formatMessage FAILURE = ["FAILURE"]
|
2024-06-23 13:28:18 +00:00
|
|
|
formatMessage (FAILURE_PLUS uuids) = ("FAILURE-PLUS":map Proto.serialize uuids)
|
2024-06-27 16:20:22 +00:00
|
|
|
formatMessage (BYPASS (Bypass uuids)) = ("BYPASS":map Proto.serialize (S.toList uuids))
|
2024-07-03 20:59:22 +00:00
|
|
|
formatMessage (DATA len) = ["DATA", Proto.serialize len]
|
2018-03-13 18:18:30 +00:00
|
|
|
formatMessage (VALIDITY Valid) = ["VALID"]
|
|
|
|
formatMessage (VALIDITY Invalid) = ["INVALID"]
|
2024-07-03 20:59:22 +00:00
|
|
|
formatMessage (TIMESTAMP ts) = ["TIMESTAMP", Proto.serialize ts]
|
2016-11-20 16:08:16 +00:00
|
|
|
formatMessage (ERROR err) = ["ERROR", Proto.serialize err]
|
|
|
|
|
|
|
|
instance Proto.Receivable Message where
|
|
|
|
parseCommand "AUTH" = Proto.parse2 AUTH
|
|
|
|
parseCommand "AUTH-SUCCESS" = Proto.parse1 AUTH_SUCCESS
|
|
|
|
parseCommand "AUTH-FAILURE" = Proto.parse0 AUTH_FAILURE
|
2018-03-12 17:43:19 +00:00
|
|
|
parseCommand "VERSION" = Proto.parse1 VERSION
|
2016-11-20 16:08:16 +00:00
|
|
|
parseCommand "CONNECT" = Proto.parse1 CONNECT
|
2016-11-21 23:33:57 +00:00
|
|
|
parseCommand "CONNECTDONE" = Proto.parse1 CONNECTDONE
|
2016-12-09 18:52:38 +00:00
|
|
|
parseCommand "NOTIFYCHANGE" = Proto.parse0 NOTIFYCHANGE
|
|
|
|
parseCommand "CHANGED" = Proto.parse1 CHANGED
|
2016-11-20 16:08:16 +00:00
|
|
|
parseCommand "CHECKPRESENT" = Proto.parse1 CHECKPRESENT
|
|
|
|
parseCommand "LOCKCONTENT" = Proto.parse1 LOCKCONTENT
|
|
|
|
parseCommand "UNLOCKCONTENT" = Proto.parse0 UNLOCKCONTENT
|
|
|
|
parseCommand "REMOVE" = Proto.parse1 REMOVE
|
2024-07-03 20:59:22 +00:00
|
|
|
parseCommand "REMOVE-BEFORE" = Proto.parse2 REMOVE_BEFORE
|
|
|
|
parseCommand "GETTIMESTAMP" = Proto.parse0 GETTIMESTAMP
|
2016-12-02 20:39:01 +00:00
|
|
|
parseCommand "GET" = Proto.parse3 GET
|
|
|
|
parseCommand "PUT" = Proto.parse2 PUT
|
2016-11-20 16:08:16 +00:00
|
|
|
parseCommand "PUT-FROM" = Proto.parse1 PUT_FROM
|
|
|
|
parseCommand "ALREADY-HAVE" = Proto.parse0 ALREADY_HAVE
|
2024-06-18 16:07:01 +00:00
|
|
|
parseCommand "ALREADY-HAVE-PLUS" = Proto.parseList ALREADY_HAVE_PLUS
|
2016-11-20 16:08:16 +00:00
|
|
|
parseCommand "SUCCESS" = Proto.parse0 SUCCESS
|
2024-06-18 16:07:01 +00:00
|
|
|
parseCommand "SUCCESS-PLUS" = Proto.parseList SUCCESS_PLUS
|
2016-11-20 16:08:16 +00:00
|
|
|
parseCommand "FAILURE" = Proto.parse0 FAILURE
|
2024-06-23 13:28:18 +00:00
|
|
|
parseCommand "FAILURE-PLUS" = Proto.parseList FAILURE_PLUS
|
2024-06-27 16:20:22 +00:00
|
|
|
parseCommand "BYPASS" = Proto.parseList (BYPASS . Bypass . S.fromList)
|
2016-11-20 16:08:16 +00:00
|
|
|
parseCommand "DATA" = Proto.parse1 DATA
|
2018-03-13 18:18:30 +00:00
|
|
|
parseCommand "VALID" = Proto.parse0 (VALIDITY Valid)
|
|
|
|
parseCommand "INVALID" = Proto.parse0 (VALIDITY Invalid)
|
2024-07-03 20:59:22 +00:00
|
|
|
parseCommand "TIMESTAMP" = Proto.parse1 TIMESTAMP
|
|
|
|
parseCommand "ERROR" = Proto.parse1 ERROR
|
2016-11-20 16:08:16 +00:00
|
|
|
parseCommand _ = Proto.parseFail
|
|
|
|
|
2018-03-12 17:43:19 +00:00
|
|
|
instance Proto.Serializable ProtocolVersion where
|
|
|
|
serialize (ProtocolVersion n) = show n
|
|
|
|
deserialize = ProtocolVersion <$$> readish
|
|
|
|
|
2016-11-20 16:08:16 +00:00
|
|
|
instance Proto.Serializable Offset where
|
|
|
|
serialize (Offset n) = show n
|
|
|
|
deserialize = Offset <$$> readish
|
|
|
|
|
|
|
|
instance Proto.Serializable Len where
|
|
|
|
serialize (Len n) = show n
|
|
|
|
deserialize = Len <$$> readish
|
|
|
|
|
2024-07-03 20:59:22 +00:00
|
|
|
instance Proto.Serializable MonotonicTimestamp where
|
|
|
|
serialize (MonotonicTimestamp n) = show n
|
|
|
|
deserialize = MonotonicTimestamp <$$> readish
|
|
|
|
|
2016-11-20 16:08:16 +00:00
|
|
|
instance Proto.Serializable Service where
|
|
|
|
serialize UploadPack = "git-upload-pack"
|
|
|
|
serialize ReceivePack = "git-receive-pack"
|
|
|
|
deserialize "git-upload-pack" = Just UploadPack
|
|
|
|
deserialize "git-receive-pack" = Just ReceivePack
|
|
|
|
deserialize _ = Nothing
|
|
|
|
|
2020-12-09 17:46:42 +00:00
|
|
|
-- | Since ProtoAssociatedFile is not the last thing in a protocol line,
|
2016-12-02 20:39:01 +00:00
|
|
|
-- its serialization cannot contain any whitespace. This is handled
|
|
|
|
-- by replacing whitespace with '%' (and '%' with '%%')
|
|
|
|
--
|
2024-06-25 15:35:41 +00:00
|
|
|
-- When deserializing an AssociatedFile from a peer, that escaping is
|
|
|
|
-- reversed. Unfortunately, an input tab will be deescaped to a space
|
|
|
|
-- though. And it's sanitized, to avoid any control characters that might
|
|
|
|
-- cause problems when it's displayed to the user.
|
2016-12-02 20:39:01 +00:00
|
|
|
--
|
2024-06-25 15:35:41 +00:00
|
|
|
-- These mungings are ok, because a ProtoAssociatedFile is normally
|
|
|
|
-- only displayed to the user and so does not need to match a file on disk.
|
|
|
|
-- It may also be used in checking preferred content, which is very
|
|
|
|
-- unlikely to care about spaces vs tabs or control characters.
|
2020-12-09 17:46:42 +00:00
|
|
|
instance Proto.Serializable ProtoAssociatedFile where
|
|
|
|
serialize (ProtoAssociatedFile (AssociatedFile Nothing)) = ""
|
|
|
|
serialize (ProtoAssociatedFile (AssociatedFile (Just af))) =
|
2021-08-11 00:45:02 +00:00
|
|
|
decodeBS $ toInternalGitPath $ encodeBS $ concatMap esc $ fromRawFilePath af
|
2016-12-02 20:39:01 +00:00
|
|
|
where
|
|
|
|
esc '%' = "%%"
|
|
|
|
esc c
|
|
|
|
| isSpace c = "%"
|
|
|
|
| otherwise = [c]
|
|
|
|
|
2023-03-02 14:53:12 +00:00
|
|
|
deserialize s = case fromInternalGitPath $ toRawFilePath $ deesc [] s of
|
2016-12-02 20:39:01 +00:00
|
|
|
f
|
2023-03-02 14:53:12 +00:00
|
|
|
| B.null f -> Just $ ProtoAssociatedFile $ AssociatedFile Nothing
|
|
|
|
| P.isRelative f -> Just $ ProtoAssociatedFile $
|
|
|
|
AssociatedFile $ Just f
|
2016-12-02 20:39:01 +00:00
|
|
|
| otherwise -> Nothing
|
|
|
|
where
|
|
|
|
deesc b [] = reverse b
|
|
|
|
deesc b ('%':'%':cs) = deesc ('%':b) cs
|
|
|
|
deesc b ('%':cs) = deesc ('_':b) cs
|
|
|
|
deesc b (c:cs)
|
|
|
|
| isControl c = deesc ('_':b) cs
|
|
|
|
| otherwise = deesc (c:b) cs
|
|
|
|
|
2016-11-20 16:08:16 +00:00
|
|
|
-- | Free monad for the protocol, combining net communication,
|
|
|
|
-- and local actions.
|
|
|
|
data ProtoF c = Net (NetF c) | Local (LocalF c)
|
|
|
|
deriving (Functor)
|
|
|
|
|
|
|
|
type Proto = Free ProtoF
|
|
|
|
|
|
|
|
net :: Net a -> Proto a
|
|
|
|
net = hoistFree Net
|
|
|
|
|
|
|
|
local :: Local a -> Proto a
|
|
|
|
local = hoistFree Local
|
|
|
|
|
|
|
|
data NetF c
|
|
|
|
= SendMessage Message c
|
2018-03-12 17:43:19 +00:00
|
|
|
| ReceiveMessage (Maybe Message -> c)
|
2016-12-07 17:37:35 +00:00
|
|
|
| SendBytes Len L.ByteString MeterUpdate c
|
2016-12-02 17:47:42 +00:00
|
|
|
-- ^ Sends exactly Len bytes of data. (Any more or less will
|
|
|
|
-- confuse the receiver.)
|
2016-12-07 18:25:01 +00:00
|
|
|
| ReceiveBytes Len MeterUpdate (L.ByteString -> c)
|
2024-07-26 16:48:47 +00:00
|
|
|
-- ^ Streams bytes from peer. Stops once Len are read,
|
|
|
|
-- or if connection is lost. This allows resuming
|
|
|
|
-- interrupted transfers.
|
2016-11-20 20:42:18 +00:00
|
|
|
| CheckAuthToken UUID AuthToken (Bool -> c)
|
2016-11-21 23:24:55 +00:00
|
|
|
| RelayService Service c
|
|
|
|
-- ^ Runs a service, relays its output to the peer, and data
|
|
|
|
-- from the peer to it.
|
|
|
|
| Relay RelayHandle RelayHandle (ExitCode -> c)
|
|
|
|
-- ^ Reads from the first RelayHandle, and sends the data to a
|
|
|
|
-- peer, while at the same time accepting input from the peer
|
|
|
|
-- which is sent the the second RelayHandle. Continues until
|
|
|
|
-- the peer sends an ExitCode.
|
2018-03-12 19:19:40 +00:00
|
|
|
| SetProtocolVersion ProtocolVersion c
|
|
|
|
--- ^ Called when a new protocol version has been negotiated.
|
|
|
|
| GetProtocolVersion (ProtocolVersion -> c)
|
2024-07-03 20:59:22 +00:00
|
|
|
| GetMonotonicTimestamp (MonotonicTimestamp -> c)
|
2016-11-20 16:08:16 +00:00
|
|
|
deriving (Functor)
|
|
|
|
|
|
|
|
type Net = Free NetF
|
|
|
|
|
|
|
|
newtype RelayHandle = RelayHandle Handle
|
|
|
|
|
|
|
|
data LocalF c
|
2016-12-02 17:47:42 +00:00
|
|
|
= TmpContentSize Key (Len -> c)
|
|
|
|
-- ^ Gets size of the temp file where received content may have
|
|
|
|
-- been stored. If not present, returns 0.
|
2016-12-06 19:05:44 +00:00
|
|
|
| FileSize FilePath (Len -> c)
|
|
|
|
-- ^ Gets size of the content of a file. If not present, returns 0.
|
2016-12-02 17:47:42 +00:00
|
|
|
| ContentSize Key (Maybe Len -> c)
|
|
|
|
-- ^ Gets size of the content of a key, when the full content is
|
|
|
|
-- present.
|
2024-07-01 14:42:27 +00:00
|
|
|
| ReadContent Key AssociatedFile (Maybe FilePath) Offset (L.ByteString -> Proto Validity -> Proto (Maybe [UUID])) (Maybe [UUID] -> c)
|
2016-12-08 23:56:02 +00:00
|
|
|
-- ^ Reads the content of a key and sends it to the callback.
|
Fixed some other potential hangs in the P2P protocol
Finishes the start made in 983c9d5a53189f71797591692c0ed675f5bd1c16, by
handling the case where `transfer` fails for some other reason, and so the
ReadContent callback does not get run. I don't know of a case where
`transfer` does fail other than the locking dealt with in that commit, but
it's good to have a guarantee.
StoreContent and StoreContentTo had a similar problem.
Things like `getViaTmp` may decide not to run the transfer action.
And `transfer` could certianly fail, if another transfer of the same
object was in progress. (Or a different object when annex.pidlock is set.)
If the transfer action was not run, the content of the object would
not all get consumed, and so would get interpreted as protocol commands,
which would not go well.
My approach to fixing all of these things is to set a TVar only
once all the data in the transfer is known to have been read/written.
This way the internals of `transfer`, `getViaTmp` etc don't matter.
So in ReadContent, it checks if the transfer completed.
If not, as long as it didn't throw an exception, send empty and Invalid
data to the callback. On an exception the state of the protocol is unknown
so it has to raise ProtoFailureException and close the connection,
same as before.
In StoreContent, if the transfer did not complete
some portion of the DATA has been read, so the protocol is in an unknown
state and it has to close the conection as well.
(The ProtoFailureMessage used here matches the one in Annex.Transfer, which
is the most likely reason. Not ideal to duplicate it..)
StoreContent did not ever close the protocol connection before. So this is
a protocol change, but only in an exceptional circumstance, and it's not
going to break anything, because clients already need to deal with the
connection breaking at any point.
The way this new behavior looks (here origin has annex.pidlock = true so will
only accept one upload to it at a time):
git annex copy --to origin -J2
copy x (to origin...) ok
copy y (to origin...)
Lost connection (fd:25: hGetChar: end of file)
This work is supported by the NIH-funded NICEMAN (ReproNim TR&D3) project.
2018-11-06 18:44:00 +00:00
|
|
|
-- Must run the callback, or terminate the protocol connection.
|
|
|
|
--
|
Fix a P2P protocol hang
When readContent got Nothing from prepSendAnnex, it did not run its
callback, and the callback is what sends the DATA reply.
sendContent checks with contentSize that the object file is present, but
that doesn't really guarantee that prepSendAnnex won't return Nothing.
So, it was possible for a P2P protocol GET to not receive a response,
and appear to hang. When what it's really doing is waiting for the next
protocol command.
This seems most likely to happen when the annex is in direct mode, and the
file being requested has been modified. It could also happen in an indirect
mode repository if genInodeCache somehow failed. Perhaps due to a race
with a drop of the content file.
Fixed by making readContent behave the way its spec said it should,
and run the callback with L.empty in this case.
Note that, it's finee for readContent to send any amount of data
to the callback, including L.empty. sendBytes deals with that
by making sure it sends exactly the specified number of bytes,
aborting the protocol if it's too short. So, when L.empty is sent,
the protocol will end up aborting.
This work is supported by the NIH-funded NICEMAN (ReproNim TR&D3) project.
2018-11-02 17:41:50 +00:00
|
|
|
-- May send any amount of data, including L.empty if the content is
|
|
|
|
-- not available. The callback must deal with that.
|
Fixed some other potential hangs in the P2P protocol
Finishes the start made in 983c9d5a53189f71797591692c0ed675f5bd1c16, by
handling the case where `transfer` fails for some other reason, and so the
ReadContent callback does not get run. I don't know of a case where
`transfer` does fail other than the locking dealt with in that commit, but
it's good to have a guarantee.
StoreContent and StoreContentTo had a similar problem.
Things like `getViaTmp` may decide not to run the transfer action.
And `transfer` could certianly fail, if another transfer of the same
object was in progress. (Or a different object when annex.pidlock is set.)
If the transfer action was not run, the content of the object would
not all get consumed, and so would get interpreted as protocol commands,
which would not go well.
My approach to fixing all of these things is to set a TVar only
once all the data in the transfer is known to have been read/written.
This way the internals of `transfer`, `getViaTmp` etc don't matter.
So in ReadContent, it checks if the transfer completed.
If not, as long as it didn't throw an exception, send empty and Invalid
data to the callback. On an exception the state of the protocol is unknown
so it has to raise ProtoFailureException and close the connection,
same as before.
In StoreContent, if the transfer did not complete
some portion of the DATA has been read, so the protocol is in an unknown
state and it has to close the conection as well.
(The ProtoFailureMessage used here matches the one in Annex.Transfer, which
is the most likely reason. Not ideal to duplicate it..)
StoreContent did not ever close the protocol connection before. So this is
a protocol change, but only in an exceptional circumstance, and it's not
going to break anything, because clients already need to deal with the
connection breaking at any point.
The way this new behavior looks (here origin has annex.pidlock = true so will
only accept one upload to it at a time):
git annex copy --to origin -J2
copy x (to origin...) ok
copy y (to origin...)
Lost connection (fd:25: hGetChar: end of file)
This work is supported by the NIH-funded NICEMAN (ReproNim TR&D3) project.
2018-11-06 18:44:00 +00:00
|
|
|
--
|
Fix a P2P protocol hang
When readContent got Nothing from prepSendAnnex, it did not run its
callback, and the callback is what sends the DATA reply.
sendContent checks with contentSize that the object file is present, but
that doesn't really guarantee that prepSendAnnex won't return Nothing.
So, it was possible for a P2P protocol GET to not receive a response,
and appear to hang. When what it's really doing is waiting for the next
protocol command.
This seems most likely to happen when the annex is in direct mode, and the
file being requested has been modified. It could also happen in an indirect
mode repository if genInodeCache somehow failed. Perhaps due to a race
with a drop of the content file.
Fixed by making readContent behave the way its spec said it should,
and run the callback with L.empty in this case.
Note that, it's finee for readContent to send any amount of data
to the callback, including L.empty. sendBytes deals with that
by making sure it sends exactly the specified number of bytes,
aborting the protocol if it's too short. So, when L.empty is sent,
the protocol will end up aborting.
This work is supported by the NIH-funded NICEMAN (ReproNim TR&D3) project.
2018-11-02 17:41:50 +00:00
|
|
|
-- And the content may change while it's being sent.
|
2018-03-13 18:18:30 +00:00
|
|
|
-- The callback is passed a validity check that it can run after
|
|
|
|
-- sending the content to detect when this happened.
|
|
|
|
| StoreContent Key AssociatedFile Offset Len (Proto L.ByteString) (Proto (Maybe Validity)) (Bool -> c)
|
2016-12-06 19:05:44 +00:00
|
|
|
-- ^ Stores content to the key's temp file starting at an offset.
|
2016-12-02 17:47:42 +00:00
|
|
|
-- Once the whole content of the key has been stored, moves the
|
2016-12-06 19:05:44 +00:00
|
|
|
-- temp file into place as the content of the key, and returns True.
|
|
|
|
--
|
Fixed some other potential hangs in the P2P protocol
Finishes the start made in 983c9d5a53189f71797591692c0ed675f5bd1c16, by
handling the case where `transfer` fails for some other reason, and so the
ReadContent callback does not get run. I don't know of a case where
`transfer` does fail other than the locking dealt with in that commit, but
it's good to have a guarantee.
StoreContent and StoreContentTo had a similar problem.
Things like `getViaTmp` may decide not to run the transfer action.
And `transfer` could certianly fail, if another transfer of the same
object was in progress. (Or a different object when annex.pidlock is set.)
If the transfer action was not run, the content of the object would
not all get consumed, and so would get interpreted as protocol commands,
which would not go well.
My approach to fixing all of these things is to set a TVar only
once all the data in the transfer is known to have been read/written.
This way the internals of `transfer`, `getViaTmp` etc don't matter.
So in ReadContent, it checks if the transfer completed.
If not, as long as it didn't throw an exception, send empty and Invalid
data to the callback. On an exception the state of the protocol is unknown
so it has to raise ProtoFailureException and close the connection,
same as before.
In StoreContent, if the transfer did not complete
some portion of the DATA has been read, so the protocol is in an unknown
state and it has to close the conection as well.
(The ProtoFailureMessage used here matches the one in Annex.Transfer, which
is the most likely reason. Not ideal to duplicate it..)
StoreContent did not ever close the protocol connection before. So this is
a protocol change, but only in an exceptional circumstance, and it's not
going to break anything, because clients already need to deal with the
connection breaking at any point.
The way this new behavior looks (here origin has annex.pidlock = true so will
only accept one upload to it at a time):
git annex copy --to origin -J2
copy x (to origin...) ok
copy y (to origin...)
Lost connection (fd:25: hGetChar: end of file)
This work is supported by the NIH-funded NICEMAN (ReproNim TR&D3) project.
2018-11-06 18:44:00 +00:00
|
|
|
-- Must consume the whole lazy ByteString, or if unable to do
|
|
|
|
-- so, terminate the protocol connection.
|
|
|
|
--
|
2018-03-13 18:18:30 +00:00
|
|
|
-- If the validity check is provided and fails, the content was
|
2023-03-14 02:39:16 +00:00
|
|
|
-- changed while it was being sent, so verification of the
|
2018-03-13 18:18:30 +00:00
|
|
|
-- received content should be forced.
|
2016-11-18 01:27:16 +00:00
|
|
|
--
|
|
|
|
-- Note: The ByteString may not contain the entire remaining content
|
2016-12-02 17:47:42 +00:00
|
|
|
-- of the key. Only once the temp file size == Len has the whole
|
2016-11-18 01:27:16 +00:00
|
|
|
-- content been transferred.
|
2021-02-09 21:03:27 +00:00
|
|
|
| StoreContentTo FilePath (Maybe IncrementalVerifier) Offset Len (Proto L.ByteString) (Proto (Maybe Validity)) ((Bool, Verification) -> c)
|
2018-03-13 18:18:30 +00:00
|
|
|
-- ^ Like StoreContent, but stores the content to a temp file.
|
2024-07-10 20:06:39 +00:00
|
|
|
| SendContentWith (L.ByteString -> Annex (Maybe Validity -> Annex Bool)) (Proto L.ByteString) (Proto (Maybe Validity)) (Bool -> c)
|
|
|
|
-- ^ Reads content from the Proto L.ByteString and sends it to the
|
|
|
|
-- callback. The callback must consume the whole lazy ByteString,
|
|
|
|
-- before it returns a validity checker.
|
2016-11-20 16:08:16 +00:00
|
|
|
| SetPresent Key UUID c
|
|
|
|
| CheckContentPresent Key (Bool -> c)
|
2016-11-18 01:37:49 +00:00
|
|
|
-- ^ Checks if the whole content of the key is locally present.
|
2024-07-03 20:59:22 +00:00
|
|
|
| RemoveContent Key (Maybe MonotonicTimestamp) (Bool -> c)
|
2016-12-02 17:47:42 +00:00
|
|
|
-- ^ If the content is not present, still succeeds.
|
2016-11-18 01:48:59 +00:00
|
|
|
-- May fail if not enough copies to safely drop, etc.
|
2024-07-03 20:59:22 +00:00
|
|
|
-- After locking the content for removal, checks if it's later
|
|
|
|
-- than the MonotonicTimestamp, and fails.
|
2016-11-20 16:08:16 +00:00
|
|
|
| TryLockContent Key (Bool -> Proto ()) c
|
Add content locking to P2P protocol
Is content locking needed in the P2P protocol? Based on re-reading
bugs/concurrent_drop--from_presence_checking_failures.mdwn,
I think so: Peers can form cycles, and multiple peers can all be trying
to drop the same content.
So, added content locking to the protocol, with some difficulty.
The implementation is fine as far as it goes, but note the warning
comment for lockContentWhile -- if the connection to the peer is dropped
unexpectedly, the peer will then unlock the content, and yet the local
side will still think it's locked.
To be honest I'm not sure if Remote.Git's lockKey for ssh remotes
doesn't have the same problem. It checks that the
"ssh remote git-annex-shell lockcontent"
process has not exited, but if the connection closes afer that check,
the lockcontent command will unlock it, and yet the local side will
still think it's locked.
Probably this needs to be fixed by eg, making lockcontent catch any
execptions due to the connection closing, and in that case, wait a
significantly long time before dropping the lock.
This commit was sponsored by Anthony DeRobertis on Patreon.
2016-11-18 05:32:24 +00:00
|
|
|
-- ^ Try to lock the content of a key, preventing it
|
2016-12-02 18:49:22 +00:00
|
|
|
-- from being deleted, while running the provided protocol
|
make sure that lockContentShared is always paired with an inAnnex check
lockContentShared had a screwy caveat that it didn't verify that the content
was present when locking it, but in the most common case, eg indirect mode,
it failed to lock when the content is not present.
That led to a few callers forgetting to check inAnnex when using it,
but the potential data loss was unlikely to be noticed because it only
affected direct mode I think.
Fix data loss bug when the local repository uses direct mode, and a
locally modified file is dropped from a remote repsitory. The bug
caused the modified file to be counted as a copy of the original file.
(This is not a severe bug because in such a situation, dropping
from the remote and then modifying the file is allowed and has the same
end result.)
And, in content locking over tor, when the remote repository is
in direct mode, it neglected to check that the content was actually
present when locking it. This could cause git annex drop to remove
the only copy of a file when it thought the tor remote had a copy.
So, make lockContentShared do its own inAnnex check. This could perhaps
be optimised for direct mode, to avoid the check then, since locking
the content necessarily verifies it exists there, but I have not bothered
with that.
This commit was sponsored by Jeff Goeke-Smith on Patreon.
2018-03-07 18:13:02 +00:00
|
|
|
-- action. If unable to lock the content, or the content is not
|
|
|
|
-- present, runs the protocol action with False.
|
2016-12-09 18:52:38 +00:00
|
|
|
| WaitRefChange (ChangedRefs -> c)
|
2018-03-13 01:46:58 +00:00
|
|
|
-- ^ Waits for one or more git refs to change and returns them.a
|
2020-12-11 16:03:40 +00:00
|
|
|
| UpdateMeterTotalSize Meter TotalSize c
|
2018-03-13 01:46:58 +00:00
|
|
|
-- ^ Updates the total size of a Meter, for cases where the size is
|
|
|
|
-- not known until the data is being received.
|
2018-03-13 18:18:30 +00:00
|
|
|
| RunValidityCheck (Annex Validity) (Validity -> c)
|
|
|
|
-- ^ Runs a deferred validity check.
|
2024-07-04 17:42:09 +00:00
|
|
|
| GetLocalCurrentTime (POSIXTime -> c)
|
|
|
|
-- ^ Gets the local time.
|
2016-11-17 21:19:04 +00:00
|
|
|
deriving (Functor)
|
|
|
|
|
2016-11-20 16:08:16 +00:00
|
|
|
type Local = Free LocalF
|
2016-11-17 21:19:04 +00:00
|
|
|
|
2016-11-20 16:08:16 +00:00
|
|
|
-- Generate sendMessage etc functions for all free monad constructors.
|
|
|
|
$(makeFree ''NetF)
|
|
|
|
$(makeFree ''LocalF)
|
2016-11-17 21:19:04 +00:00
|
|
|
|
2018-03-12 17:43:19 +00:00
|
|
|
auth :: UUID -> AuthToken -> Proto () -> Proto (Maybe UUID)
|
|
|
|
auth myuuid t a = do
|
2016-11-20 16:08:16 +00:00
|
|
|
net $ sendMessage (AUTH myuuid t)
|
2018-03-12 17:43:19 +00:00
|
|
|
postAuth a
|
2018-03-08 18:02:18 +00:00
|
|
|
|
2018-03-12 17:43:19 +00:00
|
|
|
postAuth :: Proto () -> Proto (Maybe UUID)
|
|
|
|
postAuth a = do
|
2016-11-20 16:08:16 +00:00
|
|
|
r <- net receiveMessage
|
2016-11-17 21:19:04 +00:00
|
|
|
case r of
|
2018-03-12 17:43:19 +00:00
|
|
|
Just (AUTH_SUCCESS theiruuid) -> do
|
|
|
|
a
|
|
|
|
return $ Just theiruuid
|
|
|
|
Just AUTH_FAILURE -> return Nothing
|
2016-11-17 21:19:04 +00:00
|
|
|
_ -> do
|
2016-11-20 16:08:16 +00:00
|
|
|
net $ sendMessage (ERROR "auth failed")
|
2016-11-17 21:19:04 +00:00
|
|
|
return Nothing
|
|
|
|
|
2018-03-12 17:43:19 +00:00
|
|
|
negotiateProtocolVersion :: ProtocolVersion -> Proto ()
|
|
|
|
negotiateProtocolVersion preferredversion = do
|
|
|
|
net $ sendMessage (VERSION preferredversion)
|
|
|
|
r <- net receiveMessage
|
|
|
|
case r of
|
2018-03-12 19:19:40 +00:00
|
|
|
Just (VERSION v) -> net $ setProtocolVersion v
|
2018-03-12 17:43:19 +00:00
|
|
|
-- Old server doesn't know about the VERSION command.
|
2024-07-26 17:54:37 +00:00
|
|
|
Just (ERROR _) -> net $ setProtocolVersion (ProtocolVersion 0)
|
2018-03-12 17:43:19 +00:00
|
|
|
_ -> net $ sendMessage (ERROR "expected VERSION")
|
|
|
|
|
2024-06-27 16:20:22 +00:00
|
|
|
sendBypass :: Bypass -> Proto ()
|
|
|
|
sendBypass bypass@(Bypass s)
|
|
|
|
| S.null s = return ()
|
|
|
|
| otherwise = do
|
|
|
|
ver <- net getProtocolVersion
|
|
|
|
if ver >= ProtocolVersion 2
|
|
|
|
then net $ sendMessage (BYPASS bypass)
|
|
|
|
else return ()
|
|
|
|
|
2024-06-28 17:42:25 +00:00
|
|
|
checkPresent :: Key -> Proto (Either String Bool)
|
2016-11-18 01:56:02 +00:00
|
|
|
checkPresent key = do
|
2016-11-20 16:08:16 +00:00
|
|
|
net $ sendMessage (CHECKPRESENT key)
|
2024-06-28 17:42:25 +00:00
|
|
|
checkSuccess'
|
2016-11-18 01:56:02 +00:00
|
|
|
|
Add content locking to P2P protocol
Is content locking needed in the P2P protocol? Based on re-reading
bugs/concurrent_drop--from_presence_checking_failures.mdwn,
I think so: Peers can form cycles, and multiple peers can all be trying
to drop the same content.
So, added content locking to the protocol, with some difficulty.
The implementation is fine as far as it goes, but note the warning
comment for lockContentWhile -- if the connection to the peer is dropped
unexpectedly, the peer will then unlock the content, and yet the local
side will still think it's locked.
To be honest I'm not sure if Remote.Git's lockKey for ssh remotes
doesn't have the same problem. It checks that the
"ssh remote git-annex-shell lockcontent"
process has not exited, but if the connection closes afer that check,
the lockcontent command will unlock it, and yet the local side will
still think it's locked.
Probably this needs to be fixed by eg, making lockcontent catch any
execptions due to the connection closing, and in that case, wait a
significantly long time before dropping the lock.
This commit was sponsored by Anthony DeRobertis on Patreon.
2016-11-18 05:32:24 +00:00
|
|
|
{- Locks content to prevent it from being dropped, while running an action.
|
|
|
|
-
|
|
|
|
- Note that this only guarantees that the content is locked as long as the
|
|
|
|
- connection to the peer remains up. If the connection is unexpectededly
|
|
|
|
- dropped, the peer will then unlock the content.
|
|
|
|
-}
|
|
|
|
lockContentWhile
|
|
|
|
:: MonadMask m
|
2016-12-06 19:05:44 +00:00
|
|
|
=> (forall r. r -> Proto r -> m r)
|
Add content locking to P2P protocol
Is content locking needed in the P2P protocol? Based on re-reading
bugs/concurrent_drop--from_presence_checking_failures.mdwn,
I think so: Peers can form cycles, and multiple peers can all be trying
to drop the same content.
So, added content locking to the protocol, with some difficulty.
The implementation is fine as far as it goes, but note the warning
comment for lockContentWhile -- if the connection to the peer is dropped
unexpectedly, the peer will then unlock the content, and yet the local
side will still think it's locked.
To be honest I'm not sure if Remote.Git's lockKey for ssh remotes
doesn't have the same problem. It checks that the
"ssh remote git-annex-shell lockcontent"
process has not exited, but if the connection closes afer that check,
the lockcontent command will unlock it, and yet the local side will
still think it's locked.
Probably this needs to be fixed by eg, making lockcontent catch any
execptions due to the connection closing, and in that case, wait a
significantly long time before dropping the lock.
This commit was sponsored by Anthony DeRobertis on Patreon.
2016-11-18 05:32:24 +00:00
|
|
|
-> Key
|
2016-12-06 19:05:44 +00:00
|
|
|
-> (Bool -> m a)
|
|
|
|
-> m a
|
Add content locking to P2P protocol
Is content locking needed in the P2P protocol? Based on re-reading
bugs/concurrent_drop--from_presence_checking_failures.mdwn,
I think so: Peers can form cycles, and multiple peers can all be trying
to drop the same content.
So, added content locking to the protocol, with some difficulty.
The implementation is fine as far as it goes, but note the warning
comment for lockContentWhile -- if the connection to the peer is dropped
unexpectedly, the peer will then unlock the content, and yet the local
side will still think it's locked.
To be honest I'm not sure if Remote.Git's lockKey for ssh remotes
doesn't have the same problem. It checks that the
"ssh remote git-annex-shell lockcontent"
process has not exited, but if the connection closes afer that check,
the lockcontent command will unlock it, and yet the local side will
still think it's locked.
Probably this needs to be fixed by eg, making lockcontent catch any
execptions due to the connection closing, and in that case, wait a
significantly long time before dropping the lock.
This commit was sponsored by Anthony DeRobertis on Patreon.
2016-11-18 05:32:24 +00:00
|
|
|
lockContentWhile runproto key a = bracket setup cleanup a
|
|
|
|
where
|
2016-12-06 19:05:44 +00:00
|
|
|
setup = runproto False $ do
|
2016-11-20 16:08:16 +00:00
|
|
|
net $ sendMessage (LOCKCONTENT key)
|
Add content locking to P2P protocol
Is content locking needed in the P2P protocol? Based on re-reading
bugs/concurrent_drop--from_presence_checking_failures.mdwn,
I think so: Peers can form cycles, and multiple peers can all be trying
to drop the same content.
So, added content locking to the protocol, with some difficulty.
The implementation is fine as far as it goes, but note the warning
comment for lockContentWhile -- if the connection to the peer is dropped
unexpectedly, the peer will then unlock the content, and yet the local
side will still think it's locked.
To be honest I'm not sure if Remote.Git's lockKey for ssh remotes
doesn't have the same problem. It checks that the
"ssh remote git-annex-shell lockcontent"
process has not exited, but if the connection closes afer that check,
the lockcontent command will unlock it, and yet the local side will
still think it's locked.
Probably this needs to be fixed by eg, making lockcontent catch any
execptions due to the connection closing, and in that case, wait a
significantly long time before dropping the lock.
This commit was sponsored by Anthony DeRobertis on Patreon.
2016-11-18 05:32:24 +00:00
|
|
|
checkSuccess
|
2016-12-06 19:05:44 +00:00
|
|
|
cleanup True = runproto () $ net $ sendMessage UNLOCKCONTENT
|
Add content locking to P2P protocol
Is content locking needed in the P2P protocol? Based on re-reading
bugs/concurrent_drop--from_presence_checking_failures.mdwn,
I think so: Peers can form cycles, and multiple peers can all be trying
to drop the same content.
So, added content locking to the protocol, with some difficulty.
The implementation is fine as far as it goes, but note the warning
comment for lockContentWhile -- if the connection to the peer is dropped
unexpectedly, the peer will then unlock the content, and yet the local
side will still think it's locked.
To be honest I'm not sure if Remote.Git's lockKey for ssh remotes
doesn't have the same problem. It checks that the
"ssh remote git-annex-shell lockcontent"
process has not exited, but if the connection closes afer that check,
the lockcontent command will unlock it, and yet the local side will
still think it's locked.
Probably this needs to be fixed by eg, making lockcontent catch any
execptions due to the connection closing, and in that case, wait a
significantly long time before dropping the lock.
This commit was sponsored by Anthony DeRobertis on Patreon.
2016-11-18 05:32:24 +00:00
|
|
|
cleanup False = return ()
|
|
|
|
|
toward SafeDropProof expiry checking
Added Maybe POSIXTime to SafeDropProof, which gets set when the proof is
based on a LockedCopy. If there are several LockedCopies, it uses the
closest expiry time. That is not optimal, it may be that the proof
expires based on one LockedCopy but another one has not expired. But
that seems unlikely to really happen, and anyway the user can just
re-run a drop if it fails due to expiry.
Pass the SafeDropProof to removeKey, which is responsible for checking
it for expiry in situations where that could be a problem. Which really
only means in Remote.Git.
Made Remote.Git check expiry when dropping from a local remote.
Checking expiry when dropping from a P2P remote is not yet implemented.
P2P.Protocol.remove has SafeDropProof plumbed through to it for that
purpose.
Fixing the remaining 2 build warnings should complete this work.
Note that the use of a POSIXTime here means that if the clock gets set
forward while git-annex is in the middle of a drop, it may say that
dropping took too long. That seems ok. Less ok is that if the clock gets
turned back a sufficient amount (eg 5 minutes), proof expiry won't be
noticed. It might be better to use the Monotonic clock, but that doesn't
advance when a laptop is suspended, and while there is the linux
Boottime clock, that is not available on other systems. Perhaps a
combination of POSIXTime and the Monotonic clock could detect laptop
suspension and also detect clock being turned back?
There is a potential future flag day where
p2pDefaultLockContentRetentionDuration is not assumed, but is probed
using the P2P protocol, and peers that don't support it can no longer
produce a LockedCopy. Until that happens, when git-annex is
communicating with older peers there is a risk of data loss when
a ssh connection closes during LOCKCONTENT.
2024-07-04 16:23:46 +00:00
|
|
|
remove :: Maybe SafeDropProof -> Key -> Proto (Either String Bool, Maybe [UUID])
|
2024-07-04 17:42:09 +00:00
|
|
|
remove proof key =
|
|
|
|
case safeDropProofEndTime =<< proof of
|
|
|
|
Nothing -> removeanytime
|
|
|
|
Just endtime -> do
|
|
|
|
ver <- net getProtocolVersion
|
|
|
|
if ver >= ProtocolVersion 3
|
|
|
|
then removeBefore endtime key
|
|
|
|
-- Peer is too old to support REMOVE-BEFORE
|
|
|
|
else removeanytime
|
|
|
|
where
|
|
|
|
removeanytime = do
|
|
|
|
net $ sendMessage (REMOVE key)
|
|
|
|
checkSuccessFailurePlus
|
|
|
|
|
2024-07-10 14:23:10 +00:00
|
|
|
getTimestamp :: Proto (Either String MonotonicTimestamp)
|
|
|
|
getTimestamp = do
|
|
|
|
net $ sendMessage GETTIMESTAMP
|
|
|
|
net receiveMessage >>= \case
|
|
|
|
Just (TIMESTAMP ts) -> return (Right ts)
|
|
|
|
Just (ERROR err) -> return (Left err)
|
|
|
|
_ -> do
|
|
|
|
net $ sendMessage (ERROR "expected TIMESTAMP")
|
|
|
|
return (Left "protocol error")
|
|
|
|
|
2024-07-25 14:11:09 +00:00
|
|
|
removeBefore :: POSIXTime -> Key -> Proto (Either String Bool, Maybe [UUID])
|
|
|
|
removeBefore endtime key = getTimestamp >>= \case
|
|
|
|
Right remotetime ->
|
|
|
|
canRemoveBefore endtime remotetime (local getLocalCurrentTime) >>= \case
|
|
|
|
Just remoteendtime ->
|
|
|
|
removeBeforeRemoteEndTime remoteendtime key
|
|
|
|
Nothing ->
|
|
|
|
return (Right False, Nothing)
|
|
|
|
Left err -> return (Left err, Nothing)
|
|
|
|
|
2024-07-04 17:42:09 +00:00
|
|
|
{- The endtime is the last local time at which the key can be removed.
|
|
|
|
- To tell the remote how long it has to remove the key, get its current
|
|
|
|
- timestamp, and add to it the number of seconds from the current local
|
|
|
|
- time until the endtime.
|
|
|
|
-
|
|
|
|
- Order of retrieving timestamps matters. Getting the local time after the
|
|
|
|
- remote timestamp means that, if there is some delay in getting the
|
|
|
|
- response from the remote, that is reflected in the local time, and so
|
|
|
|
- reduces the allowed time.
|
|
|
|
-}
|
2024-07-25 14:11:09 +00:00
|
|
|
canRemoveBefore :: Monad m => POSIXTime -> MonotonicTimestamp -> m POSIXTime -> m (Maybe MonotonicTimestamp)
|
|
|
|
canRemoveBefore endtime remotetime getlocaltime = do
|
|
|
|
localtime <- getlocaltime
|
|
|
|
let timeleft = endtime - localtime
|
|
|
|
let timeleft' = MonotonicTimestamp (floor timeleft)
|
|
|
|
let remoteendtime = remotetime + timeleft'
|
|
|
|
return $ if timeleft <= 0
|
|
|
|
then Nothing
|
|
|
|
else Just remoteendtime
|
2016-11-18 01:48:59 +00:00
|
|
|
|
2024-07-10 14:03:26 +00:00
|
|
|
removeBeforeRemoteEndTime :: MonotonicTimestamp -> Key -> Proto (Either String Bool, Maybe [UUID])
|
|
|
|
removeBeforeRemoteEndTime remoteendtime key = do
|
|
|
|
net $ sendMessage $
|
|
|
|
REMOVE_BEFORE remoteendtime key
|
|
|
|
checkSuccessFailurePlus
|
|
|
|
|
2021-02-09 21:03:27 +00:00
|
|
|
get :: FilePath -> Key -> Maybe IncrementalVerifier -> AssociatedFile -> Meter -> MeterUpdate -> Proto (Bool, Verification)
|
|
|
|
get dest key iv af m p =
|
2020-12-09 17:46:42 +00:00
|
|
|
receiveContent (Just m) p sizer storer $ \offset ->
|
|
|
|
GET offset (ProtoAssociatedFile af) key
|
2016-12-06 19:05:44 +00:00
|
|
|
where
|
|
|
|
sizer = fileSize dest
|
2021-02-09 21:03:27 +00:00
|
|
|
storer = storeContentTo dest iv
|
2016-11-17 21:19:04 +00:00
|
|
|
|
2024-06-18 16:07:01 +00:00
|
|
|
put :: Key -> AssociatedFile -> MeterUpdate -> Proto (Maybe [UUID])
|
2024-07-22 14:20:18 +00:00
|
|
|
put key af p = put' key af $ \offset ->
|
|
|
|
sendContent key af Nothing offset p
|
|
|
|
|
|
|
|
put' :: Key -> AssociatedFile -> (Offset -> Proto (Maybe [UUID])) -> Proto (Maybe [UUID])
|
|
|
|
put' key af sender = do
|
2020-12-09 17:46:42 +00:00
|
|
|
net $ sendMessage (PUT (ProtoAssociatedFile af) key)
|
2016-11-20 16:08:16 +00:00
|
|
|
r <- net receiveMessage
|
2016-11-17 21:19:04 +00:00
|
|
|
case r of
|
2024-07-22 14:20:18 +00:00
|
|
|
Just (PUT_FROM offset) -> sender offset
|
2024-06-18 16:07:01 +00:00
|
|
|
Just ALREADY_HAVE -> return (Just [])
|
|
|
|
Just (ALREADY_HAVE_PLUS uuids) -> return (Just uuids)
|
2016-11-17 21:19:04 +00:00
|
|
|
_ -> do
|
2018-03-12 17:43:19 +00:00
|
|
|
net $ sendMessage (ERROR "expected PUT_FROM or ALREADY_HAVE")
|
2024-06-18 16:07:01 +00:00
|
|
|
return Nothing
|
2016-11-17 21:19:04 +00:00
|
|
|
|
2024-07-22 19:02:08 +00:00
|
|
|
-- The protocol does not have a way to get the PUT offset
|
|
|
|
-- without sending DATA, so send an empty bytestring and indicate
|
|
|
|
-- it is not valid.
|
|
|
|
getPutOffset :: Key -> AssociatedFile -> Proto (Either [UUID] Offset)
|
|
|
|
getPutOffset key af = do
|
|
|
|
net $ sendMessage (PUT (ProtoAssociatedFile af) key)
|
|
|
|
r <- net receiveMessage
|
|
|
|
case r of
|
|
|
|
Just (PUT_FROM offset) -> do
|
|
|
|
void $ sendContent' nullMeterUpdate (Len 0) L.empty $
|
|
|
|
return Invalid
|
|
|
|
return (Right offset)
|
|
|
|
Just ALREADY_HAVE -> return (Left [])
|
|
|
|
Just (ALREADY_HAVE_PLUS uuids) -> return (Left uuids)
|
|
|
|
_ -> do
|
|
|
|
net $ sendMessage (ERROR "expected PUT_FROM or ALREADY_HAVE")
|
|
|
|
return (Left [])
|
|
|
|
|
2016-12-02 19:34:15 +00:00
|
|
|
data ServerHandler a
|
|
|
|
= ServerGot a
|
|
|
|
| ServerContinue
|
|
|
|
| ServerUnexpected
|
|
|
|
|
|
|
|
-- Server loop, getting messages from the client and handling them
|
|
|
|
serverLoop :: (Message -> Proto (ServerHandler a)) -> Proto (Maybe a)
|
2024-07-09 18:26:30 +00:00
|
|
|
serverLoop a = serveOneMessage a serverLoop
|
|
|
|
|
|
|
|
-- Get one message from the client and handle it.
|
|
|
|
serveOneMessage
|
|
|
|
:: (Message -> Proto (ServerHandler a))
|
|
|
|
-> ((Message -> Proto (ServerHandler a)) -> Proto (Maybe a))
|
|
|
|
-> Proto (Maybe a)
|
|
|
|
serveOneMessage a cont = do
|
2018-03-12 17:43:19 +00:00
|
|
|
mcmd <- net receiveMessage
|
|
|
|
case mcmd of
|
2016-12-02 19:34:15 +00:00
|
|
|
-- When the client sends ERROR to the server, the server
|
|
|
|
-- gives up, since it's not clear what state the client
|
|
|
|
-- is in, and so not possible to recover.
|
2018-03-12 17:43:19 +00:00
|
|
|
Just (ERROR _) -> return Nothing
|
2023-03-14 02:39:16 +00:00
|
|
|
-- When the client sends an unparsable message, the server
|
2024-07-09 18:26:30 +00:00
|
|
|
-- responds with an error message, and continues. This allows
|
2018-03-12 17:43:19 +00:00
|
|
|
-- expanding the protocol with new messages.
|
|
|
|
Nothing -> do
|
|
|
|
net $ sendMessage (ERROR "unknown command")
|
2024-07-09 18:26:30 +00:00
|
|
|
cont a
|
2018-03-12 17:43:19 +00:00
|
|
|
Just cmd -> do
|
2016-12-02 19:34:15 +00:00
|
|
|
v <- a cmd
|
|
|
|
case v of
|
|
|
|
ServerGot r -> return (Just r)
|
2024-07-09 18:26:30 +00:00
|
|
|
ServerContinue -> cont a
|
2016-12-02 19:34:15 +00:00
|
|
|
-- If the client sends an unexpected message,
|
|
|
|
-- the server will respond with ERROR, and
|
|
|
|
-- always continues processing messages.
|
|
|
|
--
|
|
|
|
-- Since the protocol is not versioned, this
|
|
|
|
-- is necessary to handle protocol changes
|
|
|
|
-- robustly, since the client can detect when
|
|
|
|
-- it's talking to a server that does not
|
|
|
|
-- support some new feature, and fall back.
|
|
|
|
ServerUnexpected -> do
|
|
|
|
net $ sendMessage (ERROR "unexpected command")
|
2024-07-09 18:26:30 +00:00
|
|
|
cont a
|
2016-12-02 19:34:15 +00:00
|
|
|
|
|
|
|
-- | Serve the protocol, with an unauthenticated peer. Once the peer
|
|
|
|
-- successfully authenticates, returns their UUID.
|
|
|
|
serveAuth :: UUID -> Proto (Maybe UUID)
|
|
|
|
serveAuth myuuid = serverLoop handler
|
|
|
|
where
|
|
|
|
handler (AUTH theiruuid authtoken) = do
|
|
|
|
ok <- net $ checkAuthToken theiruuid authtoken
|
|
|
|
if ok
|
|
|
|
then do
|
|
|
|
net $ sendMessage (AUTH_SUCCESS myuuid)
|
|
|
|
return (ServerGot theiruuid)
|
|
|
|
else do
|
|
|
|
net $ sendMessage AUTH_FAILURE
|
|
|
|
return ServerContinue
|
|
|
|
handler _ = return ServerUnexpected
|
|
|
|
|
2018-05-25 17:17:56 +00:00
|
|
|
data ServerMode
|
|
|
|
= ServeReadOnly
|
|
|
|
-- ^ Allow reading, but not writing.
|
|
|
|
| ServeAppendOnly
|
|
|
|
-- ^ Allow reading, and storing new objects, but not deleting objects.
|
|
|
|
| ServeReadWrite
|
|
|
|
-- ^ Full read and write access.
|
2024-07-09 17:37:55 +00:00
|
|
|
deriving (Show, Eq, Ord)
|
2018-03-07 17:15:55 +00:00
|
|
|
|
2016-12-02 19:34:15 +00:00
|
|
|
-- | Serve the protocol, with a peer that has authenticated.
|
2018-03-07 17:15:55 +00:00
|
|
|
serveAuthed :: ServerMode -> UUID -> Proto ()
|
2024-07-09 18:26:30 +00:00
|
|
|
serveAuthed servermode myuuid = void $ serverLoop $
|
|
|
|
serverHandler servermode myuuid
|
|
|
|
|
|
|
|
-- | Serve a single command in the protocol, the same as serveAuthed,
|
|
|
|
-- but without looping to handle the next command.
|
|
|
|
serveOneCommandAuthed :: ServerMode -> UUID -> Proto ()
|
|
|
|
serveOneCommandAuthed servermode myuuid = fromMaybe () <$>
|
|
|
|
serveOneMessage (serverHandler servermode myuuid)
|
|
|
|
(const $ pure Nothing)
|
|
|
|
|
|
|
|
serverHandler :: ServerMode -> UUID -> Message -> Proto (ServerHandler ())
|
|
|
|
serverHandler servermode myuuid = handler
|
2016-11-17 21:19:04 +00:00
|
|
|
where
|
2018-03-12 17:43:19 +00:00
|
|
|
handler (VERSION theirversion) = do
|
|
|
|
let v = min theirversion maxProtocolVersion
|
2018-03-12 19:19:40 +00:00
|
|
|
net $ setProtocolVersion v
|
2018-03-12 17:43:19 +00:00
|
|
|
net $ sendMessage (VERSION v)
|
|
|
|
return ServerContinue
|
2016-12-02 19:34:15 +00:00
|
|
|
handler (LOCKCONTENT key) = do
|
|
|
|
local $ tryLockContent key $ \locked -> do
|
Add content locking to P2P protocol
Is content locking needed in the P2P protocol? Based on re-reading
bugs/concurrent_drop--from_presence_checking_failures.mdwn,
I think so: Peers can form cycles, and multiple peers can all be trying
to drop the same content.
So, added content locking to the protocol, with some difficulty.
The implementation is fine as far as it goes, but note the warning
comment for lockContentWhile -- if the connection to the peer is dropped
unexpectedly, the peer will then unlock the content, and yet the local
side will still think it's locked.
To be honest I'm not sure if Remote.Git's lockKey for ssh remotes
doesn't have the same problem. It checks that the
"ssh remote git-annex-shell lockcontent"
process has not exited, but if the connection closes afer that check,
the lockcontent command will unlock it, and yet the local side will
still think it's locked.
Probably this needs to be fixed by eg, making lockcontent catch any
execptions due to the connection closing, and in that case, wait a
significantly long time before dropping the lock.
This commit was sponsored by Anthony DeRobertis on Patreon.
2016-11-18 05:32:24 +00:00
|
|
|
sendSuccess locked
|
|
|
|
when locked $ do
|
2016-11-20 16:08:16 +00:00
|
|
|
r' <- net receiveMessage
|
Add content locking to P2P protocol
Is content locking needed in the P2P protocol? Based on re-reading
bugs/concurrent_drop--from_presence_checking_failures.mdwn,
I think so: Peers can form cycles, and multiple peers can all be trying
to drop the same content.
So, added content locking to the protocol, with some difficulty.
The implementation is fine as far as it goes, but note the warning
comment for lockContentWhile -- if the connection to the peer is dropped
unexpectedly, the peer will then unlock the content, and yet the local
side will still think it's locked.
To be honest I'm not sure if Remote.Git's lockKey for ssh remotes
doesn't have the same problem. It checks that the
"ssh remote git-annex-shell lockcontent"
process has not exited, but if the connection closes afer that check,
the lockcontent command will unlock it, and yet the local side will
still think it's locked.
Probably this needs to be fixed by eg, making lockcontent catch any
execptions due to the connection closing, and in that case, wait a
significantly long time before dropping the lock.
This commit was sponsored by Anthony DeRobertis on Patreon.
2016-11-18 05:32:24 +00:00
|
|
|
case r' of
|
2018-03-12 17:43:19 +00:00
|
|
|
Just UNLOCKCONTENT -> return ()
|
2016-11-20 16:08:16 +00:00
|
|
|
_ -> net $ sendMessage (ERROR "expected UNLOCKCONTENT")
|
2016-12-02 19:34:15 +00:00
|
|
|
return ServerContinue
|
|
|
|
handler (CHECKPRESENT key) = do
|
|
|
|
sendSuccess =<< local (checkContentPresent key)
|
|
|
|
return ServerContinue
|
2024-06-11 16:04:58 +00:00
|
|
|
handler (REMOVE key) =
|
2024-07-03 20:59:22 +00:00
|
|
|
handleremove key Nothing
|
|
|
|
handler (REMOVE_BEFORE ts key) =
|
|
|
|
handleremove key (Just ts)
|
|
|
|
handler GETTIMESTAMP = do
|
|
|
|
ts <- net getMonotonicTimestamp
|
|
|
|
net $ sendMessage $ TIMESTAMP ts
|
|
|
|
return ServerContinue
|
2024-06-11 16:04:58 +00:00
|
|
|
handler (PUT (ProtoAssociatedFile af) key) =
|
|
|
|
checkPUTServerMode servermode $ \case
|
|
|
|
Nothing -> handleput af key
|
|
|
|
Just notallowed -> do
|
|
|
|
notallowed
|
|
|
|
return ServerContinue
|
2020-12-09 17:46:42 +00:00
|
|
|
handler (GET offset (ProtoAssociatedFile af) key) = do
|
2024-07-01 14:42:27 +00:00
|
|
|
void $ sendContent key af Nothing offset nullMeterUpdate
|
2016-11-18 00:54:14 +00:00
|
|
|
-- setPresent not called because the peer may have
|
2016-12-02 18:49:22 +00:00
|
|
|
-- requested the data but not permanently stored it.
|
2016-12-02 19:34:15 +00:00
|
|
|
return ServerContinue
|
|
|
|
handler (CONNECT service) = do
|
2016-12-09 21:11:16 +00:00
|
|
|
-- After connecting to git, there may be unconsumed data
|
|
|
|
-- from the git processes hanging around (even if they
|
|
|
|
-- exited successfully), so stop serving this connection.
|
2024-06-11 16:04:58 +00:00
|
|
|
let endit = return $ ServerGot ()
|
|
|
|
checkCONNECTServerMode service servermode $ \case
|
|
|
|
Nothing -> do
|
|
|
|
net $ relayService service
|
|
|
|
endit
|
|
|
|
Just notallowed -> do
|
|
|
|
notallowed
|
|
|
|
endit
|
2016-12-09 18:52:38 +00:00
|
|
|
handler NOTIFYCHANGE = do
|
|
|
|
refs <- local waitRefChange
|
|
|
|
net $ sendMessage (CHANGED refs)
|
|
|
|
return ServerContinue
|
2024-06-27 16:20:22 +00:00
|
|
|
handler (BYPASS _) = return ServerContinue
|
2016-12-02 19:34:15 +00:00
|
|
|
handler _ = return ServerUnexpected
|
2016-11-17 21:19:04 +00:00
|
|
|
|
2018-05-25 17:17:56 +00:00
|
|
|
handleput af key = do
|
|
|
|
have <- local $ checkContentPresent key
|
|
|
|
if have
|
|
|
|
then net $ sendMessage ALREADY_HAVE
|
|
|
|
else do
|
|
|
|
let sizer = tmpContentSize key
|
2021-02-10 16:41:05 +00:00
|
|
|
let storer = storeContent key af
|
|
|
|
v <- receiveContent Nothing nullMeterUpdate sizer storer PUT_FROM
|
|
|
|
when (observeBool v) $
|
2018-05-25 17:17:56 +00:00
|
|
|
local $ setPresent key myuuid
|
|
|
|
return ServerContinue
|
2024-07-03 20:59:22 +00:00
|
|
|
|
|
|
|
handleremove key mts =
|
|
|
|
checkREMOVEServerMode servermode $ \case
|
|
|
|
Nothing -> do
|
|
|
|
sendSuccess =<< local (removeContent key mts)
|
|
|
|
return ServerContinue
|
|
|
|
Just notallowed -> do
|
|
|
|
notallowed
|
|
|
|
return ServerContinue
|
2018-05-25 17:17:56 +00:00
|
|
|
|
2024-06-11 16:04:58 +00:00
|
|
|
sendReadOnlyError :: Proto ()
|
|
|
|
sendReadOnlyError = net $ sendMessage $
|
|
|
|
ERROR "this repository is read-only; write access denied"
|
|
|
|
|
|
|
|
sendAppendOnlyError :: Proto ()
|
|
|
|
sendAppendOnlyError = net $ sendMessage $
|
|
|
|
ERROR "this repository is append-only; removal denied"
|
|
|
|
|
|
|
|
checkPUTServerMode :: Monad m => ServerMode -> (Maybe (Proto ()) -> m a) -> m a
|
|
|
|
checkPUTServerMode servermode a =
|
|
|
|
case servermode of
|
|
|
|
ServeReadWrite -> a Nothing
|
|
|
|
ServeAppendOnly -> a Nothing
|
|
|
|
ServeReadOnly -> a (Just sendReadOnlyError)
|
|
|
|
|
|
|
|
checkREMOVEServerMode :: Monad m => ServerMode -> (Maybe (Proto ()) -> m a) -> m a
|
|
|
|
checkREMOVEServerMode servermode a =
|
|
|
|
case servermode of
|
|
|
|
ServeReadWrite -> a Nothing
|
|
|
|
ServeAppendOnly -> a (Just sendAppendOnlyError)
|
|
|
|
ServeReadOnly -> a (Just sendReadOnlyError)
|
|
|
|
|
|
|
|
checkCONNECTServerMode :: Monad m => Service -> ServerMode -> (Maybe (Proto ()) -> m a) -> m a
|
|
|
|
checkCONNECTServerMode service servermode a =
|
|
|
|
case (servermode, service) of
|
|
|
|
(ServeReadWrite, _) -> a Nothing
|
|
|
|
(ServeAppendOnly, UploadPack) -> a Nothing
|
|
|
|
-- git protocol could be used to overwrite
|
|
|
|
-- refs or something, so don't allow
|
|
|
|
(ServeAppendOnly, ReceivePack) -> a (Just sendReadOnlyError)
|
|
|
|
(ServeReadOnly, UploadPack) -> a Nothing
|
|
|
|
(ServeReadOnly, ReceivePack) -> a (Just sendReadOnlyError)
|
|
|
|
|
2024-07-01 14:42:27 +00:00
|
|
|
sendContent :: Key -> AssociatedFile -> Maybe FilePath -> Offset -> MeterUpdate -> Proto (Maybe [UUID])
|
|
|
|
sendContent key af o offset@(Offset n) p = go =<< local (contentSize key)
|
2016-12-08 23:56:02 +00:00
|
|
|
where
|
|
|
|
go (Just (Len totallen)) = do
|
|
|
|
let len = totallen - n
|
|
|
|
if len <= 0
|
2018-03-13 18:18:30 +00:00
|
|
|
then sender (Len 0) L.empty (return Valid)
|
2024-07-01 14:42:27 +00:00
|
|
|
else local $ readContent key af o offset $
|
2016-12-08 23:56:02 +00:00
|
|
|
sender (Len len)
|
2020-12-01 20:05:55 +00:00
|
|
|
-- Content not available to send. Indicate this by sending
|
2024-07-22 19:02:08 +00:00
|
|
|
-- empty data and indicate it's invalid.
|
2020-12-01 20:05:55 +00:00
|
|
|
go Nothing = sender (Len 0) L.empty (return Invalid)
|
2024-07-22 14:20:18 +00:00
|
|
|
|
|
|
|
sender = sendContent' p'
|
|
|
|
|
|
|
|
p' = offsetMeterUpdate p (toBytesProcessed n)
|
|
|
|
|
|
|
|
sendContent' :: MeterUpdate -> Len -> L.ByteString -> Proto Validity -> Proto (Maybe [UUID])
|
|
|
|
sendContent' p len content validitycheck = do
|
|
|
|
net $ sendMessage (DATA len)
|
|
|
|
net $ sendBytes len content p
|
|
|
|
ver <- net getProtocolVersion
|
|
|
|
when (ver >= ProtocolVersion 1) $
|
|
|
|
net . sendMessage . VALIDITY =<< validitycheck
|
|
|
|
checkSuccessPlus
|
2016-11-17 21:19:04 +00:00
|
|
|
|
2018-03-13 18:18:30 +00:00
|
|
|
receiveContent
|
2021-02-10 16:41:05 +00:00
|
|
|
:: Observable t
|
|
|
|
=> Maybe Meter
|
2018-03-13 18:18:30 +00:00
|
|
|
-> MeterUpdate
|
|
|
|
-> Local Len
|
2021-02-10 16:41:05 +00:00
|
|
|
-> (Offset -> Len -> Proto L.ByteString -> Proto (Maybe Validity) -> Local t)
|
2018-03-13 18:18:30 +00:00
|
|
|
-> (Offset -> Message)
|
2021-02-10 16:41:05 +00:00
|
|
|
-> Proto t
|
2018-03-13 01:46:58 +00:00
|
|
|
receiveContent mm p sizer storer mkmsg = do
|
2016-12-06 19:05:44 +00:00
|
|
|
Len n <- local sizer
|
2016-12-07 18:52:10 +00:00
|
|
|
let p' = offsetMeterUpdate p (toBytesProcessed n)
|
2016-11-18 01:04:35 +00:00
|
|
|
let offset = Offset n
|
2016-11-20 16:08:16 +00:00
|
|
|
net $ sendMessage (mkmsg offset)
|
|
|
|
r <- net receiveMessage
|
2016-11-18 01:04:35 +00:00
|
|
|
case r of
|
2018-03-13 01:46:58 +00:00
|
|
|
Just (DATA len@(Len l)) -> do
|
|
|
|
local $ case mm of
|
|
|
|
Nothing -> return ()
|
2020-12-11 16:03:40 +00:00
|
|
|
Just m -> updateMeterTotalSize m (TotalSize (n+l))
|
2018-03-13 18:18:30 +00:00
|
|
|
ver <- net getProtocolVersion
|
|
|
|
let validitycheck = if ver >= ProtocolVersion 1
|
|
|
|
then net receiveMessage >>= \case
|
|
|
|
Just (VALIDITY v) -> return (Just v)
|
|
|
|
_ -> do
|
|
|
|
net $ sendMessage (ERROR "expected VALID or INVALID")
|
|
|
|
return Nothing
|
|
|
|
else return Nothing
|
2021-02-10 16:41:05 +00:00
|
|
|
v <- local $ storer offset len
|
2016-12-08 22:26:03 +00:00
|
|
|
(net (receiveBytes len p'))
|
2018-03-13 18:18:30 +00:00
|
|
|
validitycheck
|
2021-02-10 16:41:05 +00:00
|
|
|
sendSuccess (observeBool v)
|
|
|
|
return v
|
2024-07-01 15:19:02 +00:00
|
|
|
Just (ERROR _err) ->
|
|
|
|
return observeFailure
|
2016-11-18 01:04:35 +00:00
|
|
|
_ -> do
|
2016-11-20 16:08:16 +00:00
|
|
|
net $ sendMessage (ERROR "expected DATA")
|
2021-02-10 16:41:05 +00:00
|
|
|
return observeFailure
|
2016-11-17 21:19:04 +00:00
|
|
|
|
2016-11-18 01:48:59 +00:00
|
|
|
checkSuccess :: Proto Bool
|
2024-06-28 17:42:25 +00:00
|
|
|
checkSuccess = either (const False) id <$> checkSuccess'
|
|
|
|
|
|
|
|
checkSuccess' :: Proto (Either String Bool)
|
|
|
|
checkSuccess' = do
|
2016-11-20 16:08:16 +00:00
|
|
|
ack <- net receiveMessage
|
2016-11-18 01:48:59 +00:00
|
|
|
case ack of
|
2024-06-28 17:42:25 +00:00
|
|
|
Just SUCCESS -> return (Right True)
|
|
|
|
Just FAILURE -> return (Right False)
|
|
|
|
Just (ERROR err) -> return (Left err)
|
2016-11-18 01:48:59 +00:00
|
|
|
_ -> do
|
2016-11-20 16:08:16 +00:00
|
|
|
net $ sendMessage (ERROR "expected SUCCESS or FAILURE")
|
2024-06-28 17:42:25 +00:00
|
|
|
return (Right False)
|
2016-11-18 01:48:59 +00:00
|
|
|
|
2024-06-18 16:07:01 +00:00
|
|
|
checkSuccessPlus :: Proto (Maybe [UUID])
|
2024-06-23 13:28:18 +00:00
|
|
|
checkSuccessPlus =
|
|
|
|
checkSuccessFailurePlus >>= return . \case
|
2024-06-28 18:07:23 +00:00
|
|
|
(Right True, v) -> v
|
|
|
|
(Right False, _) -> Nothing
|
|
|
|
(Left _, _) -> Nothing
|
2024-06-23 13:28:18 +00:00
|
|
|
|
2024-06-28 18:07:23 +00:00
|
|
|
checkSuccessFailurePlus :: Proto (Either String Bool, Maybe [UUID])
|
2024-06-23 13:28:18 +00:00
|
|
|
checkSuccessFailurePlus = do
|
|
|
|
ver <- net getProtocolVersion
|
|
|
|
if ver >= ProtocolVersion 2
|
|
|
|
then do
|
|
|
|
ack <- net receiveMessage
|
|
|
|
case ack of
|
2024-06-28 18:07:23 +00:00
|
|
|
Just SUCCESS -> return (Right True, Just [])
|
|
|
|
Just (SUCCESS_PLUS l) -> return (Right True, Just l)
|
|
|
|
Just FAILURE -> return (Right False, Nothing)
|
|
|
|
Just (FAILURE_PLUS l) -> return (Right False, Just l)
|
|
|
|
Just (ERROR err) -> return (Left err, Nothing)
|
2024-06-23 13:28:18 +00:00
|
|
|
_ -> do
|
|
|
|
net $ sendMessage (ERROR "expected SUCCESS or SUCCESS-PLUS or FAILURE or FAILURE-PLUS")
|
2024-06-28 18:07:23 +00:00
|
|
|
return (Right False, Nothing)
|
2024-06-23 13:28:18 +00:00
|
|
|
else do
|
|
|
|
ok <- checkSuccess
|
|
|
|
if ok
|
2024-06-28 18:07:23 +00:00
|
|
|
then return (Right True, Just [])
|
|
|
|
else return (Right False, Nothing)
|
2024-06-18 16:07:01 +00:00
|
|
|
|
2016-11-18 02:06:59 +00:00
|
|
|
sendSuccess :: Bool -> Proto ()
|
2016-11-20 16:08:16 +00:00
|
|
|
sendSuccess True = net $ sendMessage SUCCESS
|
|
|
|
sendSuccess False = net $ sendMessage FAILURE
|
2016-11-18 02:06:59 +00:00
|
|
|
|
2016-12-09 20:02:43 +00:00
|
|
|
notifyChange :: Proto (Maybe ChangedRefs)
|
|
|
|
notifyChange = do
|
|
|
|
net $ sendMessage NOTIFYCHANGE
|
|
|
|
ack <- net receiveMessage
|
|
|
|
case ack of
|
2018-03-12 17:43:19 +00:00
|
|
|
Just (CHANGED rs) -> return (Just rs)
|
2016-12-09 20:02:43 +00:00
|
|
|
_ -> do
|
|
|
|
net $ sendMessage (ERROR "expected CHANGED")
|
|
|
|
return Nothing
|
|
|
|
|
2016-11-20 16:08:16 +00:00
|
|
|
connect :: Service -> Handle -> Handle -> Proto ExitCode
|
|
|
|
connect service hin hout = do
|
|
|
|
net $ sendMessage (CONNECT service)
|
2016-11-21 23:24:55 +00:00
|
|
|
net $ relay (RelayHandle hin) (RelayHandle hout)
|
|
|
|
|
|
|
|
data RelayData
|
|
|
|
= RelayToPeer L.ByteString
|
|
|
|
| RelayFromPeer L.ByteString
|
|
|
|
| RelayDone ExitCode
|
|
|
|
deriving (Show)
|
|
|
|
|
|
|
|
relayFromPeer :: Net RelayData
|
|
|
|
relayFromPeer = do
|
|
|
|
r <- receiveMessage
|
|
|
|
case r of
|
2018-03-12 17:43:19 +00:00
|
|
|
Just (CONNECTDONE exitcode) -> return $ RelayDone exitcode
|
|
|
|
Just (DATA len) -> RelayFromPeer <$> receiveBytes len nullMeterUpdate
|
2016-11-21 23:24:55 +00:00
|
|
|
_ -> do
|
|
|
|
sendMessage $ ERROR "expected DATA or CONNECTDONE"
|
|
|
|
return $ RelayDone $ ExitFailure 1
|
|
|
|
|
|
|
|
relayToPeer :: RelayData -> Net ()
|
|
|
|
relayToPeer (RelayDone exitcode) = sendMessage (CONNECTDONE exitcode)
|
|
|
|
relayToPeer (RelayToPeer b) = do
|
2016-11-20 16:08:16 +00:00
|
|
|
let len = Len $ fromIntegral $ L.length b
|
|
|
|
sendMessage (DATA len)
|
2016-12-07 17:37:35 +00:00
|
|
|
sendBytes len b nullMeterUpdate
|
2016-11-21 23:24:55 +00:00
|
|
|
relayToPeer (RelayFromPeer _) = return ()
|
2024-07-01 14:42:27 +00:00
|
|
|
|