2024-06-10 22:01:36 +00:00
|
|
|
{- P2P protocol proxying
|
|
|
|
-
|
|
|
|
- Copyright 2024 Joey Hess <id@joeyh.name>
|
|
|
|
-
|
|
|
|
- Licensed under the GNU AGPL version 3 or higher.
|
|
|
|
-}
|
|
|
|
|
2024-06-11 20:56:52 +00:00
|
|
|
{-# LANGUAGE RankNTypes, FlexibleContexts, ScopedTypeVariables #-}
|
2024-06-18 16:07:01 +00:00
|
|
|
{-# LANGUAGE BangPatterns #-}
|
2024-06-10 22:01:36 +00:00
|
|
|
|
|
|
|
module P2P.Proxy where
|
|
|
|
|
|
|
|
import Annex.Common
|
2024-06-25 18:52:47 +00:00
|
|
|
import qualified Annex
|
2024-06-10 22:01:36 +00:00
|
|
|
import P2P.Protocol
|
|
|
|
import P2P.IO
|
2024-06-18 16:07:01 +00:00
|
|
|
import Utility.Metered
|
REMOVE-BEFORE and GETTIMESTAMP proxying
For clusters, the timestamps have to be translated, since each node can
have its own idea about what time it is. To translate a timestamp, the
proxy remembers what time it asked the node for a timestamp in
GETTIMESTAMP, and applies the delta as an offset in REMOVE-BEFORE.
This does mean that a remove from a cluster has to call GETTIMESTAMP on
every node before dropping from nodes. Not very efficient. Although
currently it tries to drop from every single node anyway, which is also
not very efficient.
I thought about caching the GETTIMESTAMP from the nodes on the first
call. That would improve efficiency. But, since monotonic clocks on
!Linux don't advance when the computer is suspended, consider what might
happen if one node was suspended for a while, then came back. Its
monotonic timestamp would end up behind where the proxying expects it to
be. Would that result in removing when it shouldn't, or refusing to
remove when it should? Have not thought it through. Either way, a
cluster behaving strangly for an extended period of time because one
of its nodes was briefly asleep doesn't seem like good behavior.
2024-07-04 19:08:33 +00:00
|
|
|
import Utility.MonotonicClock
|
2024-06-25 15:35:41 +00:00
|
|
|
import Git.FilePath
|
2024-06-25 18:52:47 +00:00
|
|
|
import Types.Concurrency
|
|
|
|
import Annex.Concurrent
|
2024-06-27 18:36:55 +00:00
|
|
|
import qualified Remote
|
2024-06-10 22:01:36 +00:00
|
|
|
|
2024-06-18 16:07:01 +00:00
|
|
|
import Data.Either
|
2024-06-17 18:14:08 +00:00
|
|
|
import Control.Concurrent.STM
|
2024-06-25 18:52:47 +00:00
|
|
|
import Control.Concurrent.Async
|
|
|
|
import qualified Control.Concurrent.MSem as MSem
|
2024-06-18 16:07:01 +00:00
|
|
|
import qualified Data.ByteString.Lazy as L
|
2024-06-27 16:20:22 +00:00
|
|
|
import qualified Data.Set as S
|
REMOVE-BEFORE and GETTIMESTAMP proxying
For clusters, the timestamps have to be translated, since each node can
have its own idea about what time it is. To translate a timestamp, the
proxy remembers what time it asked the node for a timestamp in
GETTIMESTAMP, and applies the delta as an offset in REMOVE-BEFORE.
This does mean that a remove from a cluster has to call GETTIMESTAMP on
every node before dropping from nodes. Not very efficient. Although
currently it tries to drop from every single node anyway, which is also
not very efficient.
I thought about caching the GETTIMESTAMP from the nodes on the first
call. That would improve efficiency. But, since monotonic clocks on
!Linux don't advance when the computer is suspended, consider what might
happen if one node was suspended for a while, then came back. Its
monotonic timestamp would end up behind where the proxying expects it to
be. Would that result in removing when it shouldn't, or refusing to
remove when it should? Have not thought it through. Either way, a
cluster behaving strangly for an extended period of time because one
of its nodes was briefly asleep doesn't seem like good behavior.
2024-07-04 19:08:33 +00:00
|
|
|
import qualified Data.Map as M
|
|
|
|
import Data.Unique
|
2024-06-25 18:52:47 +00:00
|
|
|
import GHC.Conc
|
2024-06-17 16:44:08 +00:00
|
|
|
|
2024-06-17 18:14:08 +00:00
|
|
|
type ProtoCloser = Annex ()
|
|
|
|
|
|
|
|
data ClientSide = ClientSide RunState P2PConnection
|
|
|
|
|
REMOVE-BEFORE and GETTIMESTAMP proxying
For clusters, the timestamps have to be translated, since each node can
have its own idea about what time it is. To translate a timestamp, the
proxy remembers what time it asked the node for a timestamp in
GETTIMESTAMP, and applies the delta as an offset in REMOVE-BEFORE.
This does mean that a remove from a cluster has to call GETTIMESTAMP on
every node before dropping from nodes. Not very efficient. Although
currently it tries to drop from every single node anyway, which is also
not very efficient.
I thought about caching the GETTIMESTAMP from the nodes on the first
call. That would improve efficiency. But, since monotonic clocks on
!Linux don't advance when the computer is suspended, consider what might
happen if one node was suspended for a while, then came back. Its
monotonic timestamp would end up behind where the proxying expects it to
be. Would that result in removing when it shouldn't, or refusing to
remove when it should? Have not thought it through. Either way, a
cluster behaving strangly for an extended period of time because one
of its nodes was briefly asleep doesn't seem like good behavior.
2024-07-04 19:08:33 +00:00
|
|
|
newtype RemoteSideId = RemoteSideId Unique
|
|
|
|
deriving (Eq, Ord)
|
|
|
|
|
2024-06-17 18:14:08 +00:00
|
|
|
data RemoteSide = RemoteSide
|
2024-06-27 18:36:55 +00:00
|
|
|
{ remote :: Remote
|
2024-06-17 18:14:08 +00:00
|
|
|
, remoteConnect :: Annex (Maybe (RunState, P2PConnection, ProtoCloser))
|
|
|
|
, remoteTMVar :: TMVar (RunState, P2PConnection, ProtoCloser)
|
REMOVE-BEFORE and GETTIMESTAMP proxying
For clusters, the timestamps have to be translated, since each node can
have its own idea about what time it is. To translate a timestamp, the
proxy remembers what time it asked the node for a timestamp in
GETTIMESTAMP, and applies the delta as an offset in REMOVE-BEFORE.
This does mean that a remove from a cluster has to call GETTIMESTAMP on
every node before dropping from nodes. Not very efficient. Although
currently it tries to drop from every single node anyway, which is also
not very efficient.
I thought about caching the GETTIMESTAMP from the nodes on the first
call. That would improve efficiency. But, since monotonic clocks on
!Linux don't advance when the computer is suspended, consider what might
happen if one node was suspended for a while, then came back. Its
monotonic timestamp would end up behind where the proxying expects it to
be. Would that result in removing when it shouldn't, or refusing to
remove when it should? Have not thought it through. Either way, a
cluster behaving strangly for an extended period of time because one
of its nodes was briefly asleep doesn't seem like good behavior.
2024-07-04 19:08:33 +00:00
|
|
|
, remoteSideId :: RemoteSideId
|
2024-06-17 18:14:08 +00:00
|
|
|
}
|
|
|
|
|
2024-06-27 18:36:55 +00:00
|
|
|
mkRemoteSide :: Remote -> Annex (Maybe (RunState, P2PConnection, ProtoCloser)) -> Annex RemoteSide
|
|
|
|
mkRemoteSide r remoteconnect = RemoteSide
|
|
|
|
<$> pure r
|
2024-06-17 18:14:08 +00:00
|
|
|
<*> pure remoteconnect
|
|
|
|
<*> liftIO (atomically newEmptyTMVar)
|
REMOVE-BEFORE and GETTIMESTAMP proxying
For clusters, the timestamps have to be translated, since each node can
have its own idea about what time it is. To translate a timestamp, the
proxy remembers what time it asked the node for a timestamp in
GETTIMESTAMP, and applies the delta as an offset in REMOVE-BEFORE.
This does mean that a remove from a cluster has to call GETTIMESTAMP on
every node before dropping from nodes. Not very efficient. Although
currently it tries to drop from every single node anyway, which is also
not very efficient.
I thought about caching the GETTIMESTAMP from the nodes on the first
call. That would improve efficiency. But, since monotonic clocks on
!Linux don't advance when the computer is suspended, consider what might
happen if one node was suspended for a while, then came back. Its
monotonic timestamp would end up behind where the proxying expects it to
be. Would that result in removing when it shouldn't, or refusing to
remove when it should? Have not thought it through. Either way, a
cluster behaving strangly for an extended period of time because one
of its nodes was briefly asleep doesn't seem like good behavior.
2024-07-04 19:08:33 +00:00
|
|
|
<*> liftIO (RemoteSideId <$> newUnique)
|
2024-06-12 15:37:14 +00:00
|
|
|
|
2024-06-17 19:51:10 +00:00
|
|
|
runRemoteSide :: RemoteSide -> Proto a -> Annex (Either ProtoFailure a)
|
|
|
|
runRemoteSide remoteside a =
|
|
|
|
liftIO (atomically $ tryReadTMVar $ remoteTMVar remoteside) >>= \case
|
|
|
|
Just (runst, conn, _closer) -> liftIO $ runNetProto runst conn a
|
|
|
|
Nothing -> remoteConnect remoteside >>= \case
|
|
|
|
Just (runst, conn, closer) -> do
|
|
|
|
liftIO $ atomically $ putTMVar
|
|
|
|
(remoteTMVar remoteside)
|
|
|
|
(runst, conn, closer)
|
|
|
|
liftIO $ runNetProto runst conn a
|
|
|
|
Nothing -> giveup "Unable to connect to remote."
|
|
|
|
|
|
|
|
closeRemoteSide :: RemoteSide -> Annex ()
|
|
|
|
closeRemoteSide remoteside =
|
2024-06-28 17:19:57 +00:00
|
|
|
liftIO (atomically $ tryTakeTMVar $ remoteTMVar remoteside) >>= \case
|
2024-06-17 19:51:10 +00:00
|
|
|
Just (_, _, closer) -> closer
|
|
|
|
Nothing -> return ()
|
|
|
|
|
2024-06-17 23:19:15 +00:00
|
|
|
{- Selects what remotes to proxy to for top-level P2P protocol
|
|
|
|
- actions.
|
|
|
|
- -}
|
|
|
|
data ProxySelector = ProxySelector
|
checkpresent support for clusters
This assumes that the proxy for a cluster has up-to-date location
logs. If it didn't, it might proxy the checkpresent to a node that no
longer has the content, while some other node still does, and so
it would incorrectly appear that the cluster no longer contains the
content.
Since cluster UUIDs are not stored to location logs,
git-annex fsck --fast when claiming to fix a location log when
that occurred would not cause any problems. And presumably the location
tracking would later get sorted out.
At least usually, changes to the content of nodes goes via the proxy,
and it will update its location logs, so they will be accurate. However,
if there were multiple proxies to the same cluster, or nodes were
accessed directly (or via proxy to the node and not the cluster),
the proxy's location log could certainly be wrong.
(The location log access for GET has the same issues.)
2024-06-18 15:10:48 +00:00
|
|
|
{ proxyCHECKPRESENT :: Key -> Annex (Maybe RemoteSide)
|
2024-06-18 15:01:10 +00:00
|
|
|
, proxyLOCKCONTENT :: Key -> Annex (Maybe RemoteSide)
|
|
|
|
, proxyUNLOCKCONTENT :: Annex (Maybe RemoteSide)
|
2024-06-23 13:28:18 +00:00
|
|
|
, proxyREMOVE :: Key -> Annex [RemoteSide]
|
|
|
|
-- ^ remove from all of these remotes
|
REMOVE-BEFORE and GETTIMESTAMP proxying
For clusters, the timestamps have to be translated, since each node can
have its own idea about what time it is. To translate a timestamp, the
proxy remembers what time it asked the node for a timestamp in
GETTIMESTAMP, and applies the delta as an offset in REMOVE-BEFORE.
This does mean that a remove from a cluster has to call GETTIMESTAMP on
every node before dropping from nodes. Not very efficient. Although
currently it tries to drop from every single node anyway, which is also
not very efficient.
I thought about caching the GETTIMESTAMP from the nodes on the first
call. That would improve efficiency. But, since monotonic clocks on
!Linux don't advance when the computer is suspended, consider what might
happen if one node was suspended for a while, then came back. Its
monotonic timestamp would end up behind where the proxying expects it to
be. Would that result in removing when it shouldn't, or refusing to
remove when it should? Have not thought it through. Either way, a
cluster behaving strangly for an extended period of time because one
of its nodes was briefly asleep doesn't seem like good behavior.
2024-07-04 19:08:33 +00:00
|
|
|
, proxyGETTIMESTAMP :: Annex [RemoteSide]
|
|
|
|
-- ^ should send every remote that proxyREMOVE can
|
|
|
|
-- ever return for any key
|
2024-06-18 15:01:10 +00:00
|
|
|
, proxyGET :: Key -> Annex (Maybe RemoteSide)
|
2024-06-25 15:35:41 +00:00
|
|
|
, proxyPUT :: AssociatedFile -> Key -> Annex [RemoteSide]
|
2024-06-23 13:28:18 +00:00
|
|
|
-- ^ put to some/all of these remotes
|
2024-06-17 23:19:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
singleProxySelector :: RemoteSide -> ProxySelector
|
|
|
|
singleProxySelector r = ProxySelector
|
checkpresent support for clusters
This assumes that the proxy for a cluster has up-to-date location
logs. If it didn't, it might proxy the checkpresent to a node that no
longer has the content, while some other node still does, and so
it would incorrectly appear that the cluster no longer contains the
content.
Since cluster UUIDs are not stored to location logs,
git-annex fsck --fast when claiming to fix a location log when
that occurred would not cause any problems. And presumably the location
tracking would later get sorted out.
At least usually, changes to the content of nodes goes via the proxy,
and it will update its location logs, so they will be accurate. However,
if there were multiple proxies to the same cluster, or nodes were
accessed directly (or via proxy to the node and not the cluster),
the proxy's location log could certainly be wrong.
(The location log access for GET has the same issues.)
2024-06-18 15:10:48 +00:00
|
|
|
{ proxyCHECKPRESENT = const (pure (Just r))
|
2024-06-18 15:01:10 +00:00
|
|
|
, proxyLOCKCONTENT = const (pure (Just r))
|
|
|
|
, proxyUNLOCKCONTENT = pure (Just r)
|
2024-06-23 13:28:18 +00:00
|
|
|
, proxyREMOVE = const (pure [r])
|
REMOVE-BEFORE and GETTIMESTAMP proxying
For clusters, the timestamps have to be translated, since each node can
have its own idea about what time it is. To translate a timestamp, the
proxy remembers what time it asked the node for a timestamp in
GETTIMESTAMP, and applies the delta as an offset in REMOVE-BEFORE.
This does mean that a remove from a cluster has to call GETTIMESTAMP on
every node before dropping from nodes. Not very efficient. Although
currently it tries to drop from every single node anyway, which is also
not very efficient.
I thought about caching the GETTIMESTAMP from the nodes on the first
call. That would improve efficiency. But, since monotonic clocks on
!Linux don't advance when the computer is suspended, consider what might
happen if one node was suspended for a while, then came back. Its
monotonic timestamp would end up behind where the proxying expects it to
be. Would that result in removing when it shouldn't, or refusing to
remove when it should? Have not thought it through. Either way, a
cluster behaving strangly for an extended period of time because one
of its nodes was briefly asleep doesn't seem like good behavior.
2024-07-04 19:08:33 +00:00
|
|
|
, proxyGETTIMESTAMP = pure [r]
|
2024-06-18 15:01:10 +00:00
|
|
|
, proxyGET = const (pure (Just r))
|
2024-06-25 15:35:41 +00:00
|
|
|
, proxyPUT = const (const (pure [r]))
|
2024-06-17 23:19:15 +00:00
|
|
|
}
|
|
|
|
|
2024-06-12 15:37:14 +00:00
|
|
|
{- To keep this module limited to P2P protocol actions,
|
|
|
|
- all other actions that a proxy needs to do are provided
|
|
|
|
- here. -}
|
|
|
|
data ProxyMethods = ProxyMethods
|
|
|
|
{ removedContent :: UUID -> Key -> Annex ()
|
|
|
|
-- ^ called when content is removed from a repository
|
|
|
|
, addedContent :: UUID -> Key -> Annex ()
|
|
|
|
-- ^ called when content is added to a repository
|
|
|
|
}
|
2024-06-10 22:01:36 +00:00
|
|
|
|
2024-06-11 16:05:44 +00:00
|
|
|
{- Type of function that takes a error handler, which is
|
2024-06-11 14:20:11 +00:00
|
|
|
- used to handle a ProtoFailure when receiving a message
|
2024-06-11 16:05:44 +00:00
|
|
|
- from the client or remote.
|
2024-06-11 14:20:11 +00:00
|
|
|
-}
|
2024-06-17 17:04:40 +00:00
|
|
|
type ProtoErrorHandled r =
|
|
|
|
(forall t. ((t -> Annex r) -> Annex (Either ProtoFailure t) -> Annex r)) -> Annex r
|
2024-06-11 14:20:11 +00:00
|
|
|
|
2024-06-11 16:05:44 +00:00
|
|
|
{- This is the first thing run when proxying with a client.
|
|
|
|
- The client has already authenticated. Most clients will send a
|
|
|
|
- VERSION message, although version 0 clients will not and will send
|
2024-06-27 16:20:22 +00:00
|
|
|
- some other message, which is returned to handle later.
|
2024-06-10 22:01:36 +00:00
|
|
|
-
|
|
|
|
- But before the client will send VERSION, it needs to see AUTH_SUCCESS.
|
|
|
|
- So send that, although the connection with the remote is not actually
|
|
|
|
- brought up yet.
|
|
|
|
-}
|
|
|
|
getClientProtocolVersion
|
2024-06-17 19:00:11 +00:00
|
|
|
:: UUID
|
2024-06-10 22:01:36 +00:00
|
|
|
-> ClientSide
|
|
|
|
-> (Maybe (ProtocolVersion, Maybe Message) -> Annex r)
|
2024-06-17 17:04:40 +00:00
|
|
|
-> ProtoErrorHandled r
|
2024-06-17 19:00:11 +00:00
|
|
|
getClientProtocolVersion remoteuuid (ClientSide clientrunst clientconn) cont protoerrhandler =
|
|
|
|
protoerrhandler cont $ client $ getClientProtocolVersion' remoteuuid
|
2024-06-17 18:14:08 +00:00
|
|
|
where
|
|
|
|
client = liftIO . runNetProto clientrunst clientconn
|
2024-06-10 22:01:36 +00:00
|
|
|
|
2024-06-11 14:20:11 +00:00
|
|
|
getClientProtocolVersion'
|
2024-06-17 19:00:11 +00:00
|
|
|
:: UUID
|
2024-06-11 14:20:11 +00:00
|
|
|
-> Proto (Maybe (ProtocolVersion, Maybe Message))
|
2024-06-17 19:00:11 +00:00
|
|
|
getClientProtocolVersion' remoteuuid = do
|
|
|
|
net $ sendMessage (AUTH_SUCCESS remoteuuid)
|
2024-06-10 22:01:36 +00:00
|
|
|
msg <- net receiveMessage
|
|
|
|
case msg of
|
|
|
|
Nothing -> return Nothing
|
|
|
|
Just (VERSION v) ->
|
|
|
|
-- If the client sends a newer version than we
|
|
|
|
-- understand, reduce it; we need to parse the
|
|
|
|
-- protocol too.
|
2024-06-18 16:07:01 +00:00
|
|
|
let v' = min v maxProtocolVersion
|
2024-06-10 22:01:36 +00:00
|
|
|
in return (Just (v', Nothing))
|
|
|
|
Just othermsg -> return
|
|
|
|
(Just (defaultProtocolVersion, Just othermsg))
|
|
|
|
|
2024-06-27 16:20:22 +00:00
|
|
|
{- Send negotiated protocol version to the client.
|
|
|
|
- With a version 0 client, preserves the other protocol message
|
|
|
|
- received in getClientProtocolVersion. -}
|
|
|
|
sendClientProtocolVersion
|
|
|
|
:: ClientSide
|
|
|
|
-> Maybe Message
|
|
|
|
-> ProtocolVersion
|
|
|
|
-> (Maybe Message -> Annex r)
|
|
|
|
-> ProtoErrorHandled r
|
|
|
|
sendClientProtocolVersion (ClientSide clientrunst clientconn) othermsg protocolversion cont protoerrhandler =
|
|
|
|
case othermsg of
|
|
|
|
Nothing -> protoerrhandler (\() -> cont Nothing) $
|
|
|
|
client $ net $ sendMessage $ VERSION protocolversion
|
|
|
|
Just _ -> cont othermsg
|
|
|
|
where
|
|
|
|
client = liftIO . runNetProto clientrunst clientconn
|
|
|
|
|
|
|
|
{- When speaking to a version 2 client, get the BYPASS message which may be
|
|
|
|
- sent immediately after VERSION. Returns any other message to be handled
|
|
|
|
- later. -}
|
|
|
|
getClientBypass
|
|
|
|
:: ClientSide
|
|
|
|
-> ProtocolVersion
|
|
|
|
-> Maybe Message
|
|
|
|
-> ((Bypass, Maybe Message) -> Annex r)
|
|
|
|
-> ProtoErrorHandled r
|
|
|
|
getClientBypass (ClientSide clientrunst clientconn) (ProtocolVersion protocolversion) Nothing cont protoerrhandler
|
|
|
|
| protocolversion < 2 = cont (Bypass S.empty, Nothing)
|
|
|
|
| otherwise = protoerrhandler cont $
|
|
|
|
client $ net receiveMessage >>= return . \case
|
|
|
|
Just (BYPASS bypass) -> (bypass, Nothing)
|
|
|
|
Just othermsg -> (Bypass S.empty, Just othermsg)
|
|
|
|
Nothing -> (Bypass S.empty, Nothing)
|
|
|
|
where
|
|
|
|
client = liftIO . runNetProto clientrunst clientconn
|
|
|
|
getClientBypass _ _ (Just othermsg) cont _ =
|
|
|
|
-- Pass along non-BYPASS message from version 0 client.
|
|
|
|
cont (Bypass S.empty, (Just othermsg))
|
|
|
|
|
REMOVE-BEFORE and GETTIMESTAMP proxying
For clusters, the timestamps have to be translated, since each node can
have its own idea about what time it is. To translate a timestamp, the
proxy remembers what time it asked the node for a timestamp in
GETTIMESTAMP, and applies the delta as an offset in REMOVE-BEFORE.
This does mean that a remove from a cluster has to call GETTIMESTAMP on
every node before dropping from nodes. Not very efficient. Although
currently it tries to drop from every single node anyway, which is also
not very efficient.
I thought about caching the GETTIMESTAMP from the nodes on the first
call. That would improve efficiency. But, since monotonic clocks on
!Linux don't advance when the computer is suspended, consider what might
happen if one node was suspended for a while, then came back. Its
monotonic timestamp would end up behind where the proxying expects it to
be. Would that result in removing when it shouldn't, or refusing to
remove when it should? Have not thought it through. Either way, a
cluster behaving strangly for an extended period of time because one
of its nodes was briefly asleep doesn't seem like good behavior.
2024-07-04 19:08:33 +00:00
|
|
|
data ProxyState = ProxyState
|
|
|
|
{ proxyRemoteLatestTimestamps :: TVar (M.Map RemoteSideId MonotonicTimestamp)
|
|
|
|
, proxyRemoteLatestLocalTimestamp :: TVar (Maybe MonotonicTimestamp)
|
|
|
|
}
|
|
|
|
|
|
|
|
mkProxyState :: IO ProxyState
|
|
|
|
mkProxyState = ProxyState
|
|
|
|
<$> newTVarIO mempty
|
|
|
|
<*> newTVarIO Nothing
|
|
|
|
|
2024-06-10 22:01:36 +00:00
|
|
|
{- Proxy between the client and the remote. This picks up after
|
2024-06-27 16:20:22 +00:00
|
|
|
- sendClientProtocolVersion.
|
2024-06-10 22:01:36 +00:00
|
|
|
-}
|
|
|
|
proxy
|
2024-06-11 14:20:11 +00:00
|
|
|
:: Annex r
|
2024-06-12 15:37:14 +00:00
|
|
|
-> ProxyMethods
|
REMOVE-BEFORE and GETTIMESTAMP proxying
For clusters, the timestamps have to be translated, since each node can
have its own idea about what time it is. To translate a timestamp, the
proxy remembers what time it asked the node for a timestamp in
GETTIMESTAMP, and applies the delta as an offset in REMOVE-BEFORE.
This does mean that a remove from a cluster has to call GETTIMESTAMP on
every node before dropping from nodes. Not very efficient. Although
currently it tries to drop from every single node anyway, which is also
not very efficient.
I thought about caching the GETTIMESTAMP from the nodes on the first
call. That would improve efficiency. But, since monotonic clocks on
!Linux don't advance when the computer is suspended, consider what might
happen if one node was suspended for a while, then came back. Its
monotonic timestamp would end up behind where the proxying expects it to
be. Would that result in removing when it shouldn't, or refusing to
remove when it should? Have not thought it through. Either way, a
cluster behaving strangly for an extended period of time because one
of its nodes was briefly asleep doesn't seem like good behavior.
2024-07-04 19:08:33 +00:00
|
|
|
-> ProxyState
|
2024-06-10 22:01:36 +00:00
|
|
|
-> ServerMode
|
|
|
|
-> ClientSide
|
2024-06-18 16:07:01 +00:00
|
|
|
-> UUID
|
2024-06-17 23:19:15 +00:00
|
|
|
-> ProxySelector
|
2024-06-25 18:52:47 +00:00
|
|
|
-> ConcurrencyConfig
|
2024-06-17 19:51:10 +00:00
|
|
|
-> ProtocolVersion
|
2024-06-20 14:04:26 +00:00
|
|
|
-- ^ Protocol version being spoken between the proxy and the
|
|
|
|
-- client. When there are multiple remotes, some may speak an
|
|
|
|
-- earlier version.
|
2024-06-10 22:01:36 +00:00
|
|
|
-> Maybe Message
|
2024-06-11 14:20:11 +00:00
|
|
|
-- ^ non-VERSION message that was received from the client when
|
|
|
|
-- negotiating protocol version, and has not been responded to yet
|
2024-06-17 17:04:40 +00:00
|
|
|
-> ProtoErrorHandled r
|
REMOVE-BEFORE and GETTIMESTAMP proxying
For clusters, the timestamps have to be translated, since each node can
have its own idea about what time it is. To translate a timestamp, the
proxy remembers what time it asked the node for a timestamp in
GETTIMESTAMP, and applies the delta as an offset in REMOVE-BEFORE.
This does mean that a remove from a cluster has to call GETTIMESTAMP on
every node before dropping from nodes. Not very efficient. Although
currently it tries to drop from every single node anyway, which is also
not very efficient.
I thought about caching the GETTIMESTAMP from the nodes on the first
call. That would improve efficiency. But, since monotonic clocks on
!Linux don't advance when the computer is suspended, consider what might
happen if one node was suspended for a while, then came back. Its
monotonic timestamp would end up behind where the proxying expects it to
be. Would that result in removing when it shouldn't, or refusing to
remove when it should? Have not thought it through. Either way, a
cluster behaving strangly for an extended period of time because one
of its nodes was briefly asleep doesn't seem like good behavior.
2024-07-04 19:08:33 +00:00
|
|
|
proxy proxydone proxymethods proxystate servermode (ClientSide clientrunst clientconn) remoteuuid proxyselector concurrencyconfig (ProtocolVersion protocolversion) othermsg protoerrhandler = do
|
2024-06-27 16:20:22 +00:00
|
|
|
case othermsg of
|
|
|
|
Nothing -> proxynextclientmessage ()
|
2024-06-11 16:05:44 +00:00
|
|
|
Just message -> proxyclientmessage (Just message)
|
2024-06-10 22:01:36 +00:00
|
|
|
where
|
2024-06-17 18:14:08 +00:00
|
|
|
client = liftIO . runNetProto clientrunst clientconn
|
|
|
|
|
2024-06-11 16:05:44 +00:00
|
|
|
proxynextclientmessage () = protoerrhandler proxyclientmessage $
|
2024-06-11 19:01:14 +00:00
|
|
|
client (net receiveMessage)
|
2024-06-10 22:01:36 +00:00
|
|
|
|
2024-06-11 16:05:44 +00:00
|
|
|
servermodechecker c a = c servermode $ \case
|
|
|
|
Nothing -> a
|
|
|
|
Just notallowed ->
|
|
|
|
protoerrhandler proxynextclientmessage $
|
2024-06-11 19:01:14 +00:00
|
|
|
client notallowed
|
2024-06-11 16:05:44 +00:00
|
|
|
|
2024-06-17 19:51:10 +00:00
|
|
|
proxyclientmessage Nothing = proxydone
|
2024-06-11 16:05:44 +00:00
|
|
|
proxyclientmessage (Just message) = case message of
|
checkpresent support for clusters
This assumes that the proxy for a cluster has up-to-date location
logs. If it didn't, it might proxy the checkpresent to a node that no
longer has the content, while some other node still does, and so
it would incorrectly appear that the cluster no longer contains the
content.
Since cluster UUIDs are not stored to location logs,
git-annex fsck --fast when claiming to fix a location log when
that occurred would not cause any problems. And presumably the location
tracking would later get sorted out.
At least usually, changes to the content of nodes goes via the proxy,
and it will update its location logs, so they will be accurate. However,
if there were multiple proxies to the same cluster, or nodes were
accessed directly (or via proxy to the node and not the cluster),
the proxy's location log could certainly be wrong.
(The location log access for GET has the same issues.)
2024-06-18 15:10:48 +00:00
|
|
|
CHECKPRESENT k -> proxyCHECKPRESENT proxyselector k >>= \case
|
|
|
|
Just remoteside ->
|
|
|
|
proxyresponse remoteside message
|
|
|
|
(const proxynextclientmessage)
|
|
|
|
Nothing ->
|
|
|
|
protoerrhandler proxynextclientmessage $
|
|
|
|
client $ net $ sendMessage FAILURE
|
2024-06-18 15:01:10 +00:00
|
|
|
LOCKCONTENT k -> proxyLOCKCONTENT proxyselector k >>= \case
|
|
|
|
Just remoteside ->
|
|
|
|
proxyresponse remoteside message
|
|
|
|
(const proxynextclientmessage)
|
|
|
|
Nothing ->
|
|
|
|
protoerrhandler proxynextclientmessage $
|
|
|
|
client $ net $ sendMessage FAILURE
|
|
|
|
UNLOCKCONTENT -> proxyUNLOCKCONTENT proxyselector >>= \case
|
|
|
|
Just remoteside ->
|
|
|
|
proxynoresponse remoteside message
|
|
|
|
proxynextclientmessage
|
|
|
|
Nothing -> proxynextclientmessage ()
|
2024-06-17 19:51:10 +00:00
|
|
|
REMOVE k -> do
|
2024-06-23 13:28:18 +00:00
|
|
|
remotesides <- proxyREMOVE proxyselector k
|
2024-06-11 16:05:44 +00:00
|
|
|
servermodechecker checkREMOVEServerMode $
|
2024-06-23 13:28:18 +00:00
|
|
|
handleREMOVE remotesides k message
|
REMOVE-BEFORE and GETTIMESTAMP proxying
For clusters, the timestamps have to be translated, since each node can
have its own idea about what time it is. To translate a timestamp, the
proxy remembers what time it asked the node for a timestamp in
GETTIMESTAMP, and applies the delta as an offset in REMOVE-BEFORE.
This does mean that a remove from a cluster has to call GETTIMESTAMP on
every node before dropping from nodes. Not very efficient. Although
currently it tries to drop from every single node anyway, which is also
not very efficient.
I thought about caching the GETTIMESTAMP from the nodes on the first
call. That would improve efficiency. But, since monotonic clocks on
!Linux don't advance when the computer is suspended, consider what might
happen if one node was suspended for a while, then came back. Its
monotonic timestamp would end up behind where the proxying expects it to
be. Would that result in removing when it shouldn't, or refusing to
remove when it should? Have not thought it through. Either way, a
cluster behaving strangly for an extended period of time because one
of its nodes was briefly asleep doesn't seem like good behavior.
2024-07-04 19:08:33 +00:00
|
|
|
REMOVE_BEFORE _ k -> do
|
|
|
|
remotesides <- proxyREMOVE proxyselector k
|
|
|
|
servermodechecker checkREMOVEServerMode $
|
|
|
|
handleREMOVE remotesides k message
|
|
|
|
GETTIMESTAMP -> do
|
|
|
|
remotesides <- proxyGETTIMESTAMP proxyselector
|
|
|
|
handleGETTIMESTAMP remotesides
|
2024-06-18 15:01:10 +00:00
|
|
|
GET _ _ k -> proxyGET proxyselector k >>= \case
|
|
|
|
Just remoteside -> handleGET remoteside message
|
|
|
|
Nothing ->
|
|
|
|
protoerrhandler proxynextclientmessage $
|
|
|
|
client $ net $ sendMessage $
|
|
|
|
ERROR "content not present"
|
2024-06-25 15:35:41 +00:00
|
|
|
PUT paf k -> do
|
|
|
|
af <- getassociatedfile paf
|
|
|
|
remotesides <- proxyPUT proxyselector af k
|
2024-06-11 16:05:44 +00:00
|
|
|
servermodechecker checkPUTServerMode $
|
2024-06-18 16:07:01 +00:00
|
|
|
handlePUT remotesides k message
|
2024-06-27 16:20:22 +00:00
|
|
|
BYPASS _ -> proxynextclientmessage ()
|
2024-06-11 16:05:44 +00:00
|
|
|
-- These messages involve the git repository, not the
|
|
|
|
-- annex. So they affect the git repository of the proxy,
|
|
|
|
-- not the remote.
|
|
|
|
CONNECT service ->
|
|
|
|
servermodechecker (checkCONNECTServerMode service) $
|
2024-06-12 14:40:51 +00:00
|
|
|
-- P2P protocol does not continue after
|
|
|
|
-- relaying from git.
|
2024-06-17 19:51:10 +00:00
|
|
|
protoerrhandler (\() -> proxydone) $
|
2024-06-12 14:40:51 +00:00
|
|
|
client $ net $ relayService service
|
|
|
|
NOTIFYCHANGE -> protoerr
|
2024-06-11 16:05:44 +00:00
|
|
|
-- Messages that the client should only send after one of
|
|
|
|
-- the messages above.
|
|
|
|
SUCCESS -> protoerr
|
2024-06-18 16:07:01 +00:00
|
|
|
SUCCESS_PLUS _ -> protoerr
|
2024-06-11 16:05:44 +00:00
|
|
|
FAILURE -> protoerr
|
2024-06-23 13:28:18 +00:00
|
|
|
FAILURE_PLUS _ -> protoerr
|
2024-06-11 19:01:14 +00:00
|
|
|
DATA _ -> protoerr
|
|
|
|
VALIDITY _ -> protoerr
|
2024-06-11 16:05:44 +00:00
|
|
|
-- If the client errors out, give up.
|
|
|
|
ERROR msg -> giveup $ "client error: " ++ msg
|
|
|
|
-- Messages that only the server should send.
|
|
|
|
CONNECTDONE _ -> protoerr
|
|
|
|
CHANGED _ -> protoerr
|
|
|
|
AUTH_SUCCESS _ -> protoerr
|
|
|
|
AUTH_FAILURE -> protoerr
|
|
|
|
PUT_FROM _ -> protoerr
|
|
|
|
ALREADY_HAVE -> protoerr
|
2024-06-18 16:07:01 +00:00
|
|
|
ALREADY_HAVE_PLUS _ -> protoerr
|
REMOVE-BEFORE and GETTIMESTAMP proxying
For clusters, the timestamps have to be translated, since each node can
have its own idea about what time it is. To translate a timestamp, the
proxy remembers what time it asked the node for a timestamp in
GETTIMESTAMP, and applies the delta as an offset in REMOVE-BEFORE.
This does mean that a remove from a cluster has to call GETTIMESTAMP on
every node before dropping from nodes. Not very efficient. Although
currently it tries to drop from every single node anyway, which is also
not very efficient.
I thought about caching the GETTIMESTAMP from the nodes on the first
call. That would improve efficiency. But, since monotonic clocks on
!Linux don't advance when the computer is suspended, consider what might
happen if one node was suspended for a while, then came back. Its
monotonic timestamp would end up behind where the proxying expects it to
be. Would that result in removing when it shouldn't, or refusing to
remove when it should? Have not thought it through. Either way, a
cluster behaving strangly for an extended period of time because one
of its nodes was briefly asleep doesn't seem like good behavior.
2024-07-04 19:08:33 +00:00
|
|
|
TIMESTAMP _ -> protoerr
|
2024-06-11 16:05:44 +00:00
|
|
|
-- Early messages that the client should not send now.
|
|
|
|
AUTH _ _ -> protoerr
|
|
|
|
VERSION _ -> protoerr
|
|
|
|
|
2024-06-11 20:56:52 +00:00
|
|
|
-- Send a message to the remote, send its response back to the
|
2024-06-12 15:37:14 +00:00
|
|
|
-- client, and pass it to the continuation.
|
2024-06-17 19:51:10 +00:00
|
|
|
proxyresponse remoteside message a =
|
|
|
|
getresponse (runRemoteSide remoteside) message $ \resp ->
|
|
|
|
protoerrhandler (a resp) $
|
|
|
|
client $ net $ sendMessage resp
|
2024-06-11 20:56:52 +00:00
|
|
|
|
|
|
|
-- Send a message to the remote, that it will not respond to.
|
2024-06-17 19:51:10 +00:00
|
|
|
proxynoresponse remoteside message a =
|
2024-06-12 15:37:14 +00:00
|
|
|
protoerrhandler a $
|
2024-06-17 19:51:10 +00:00
|
|
|
runRemoteSide remoteside $ net $ sendMessage message
|
2024-06-11 20:56:52 +00:00
|
|
|
|
|
|
|
-- Send a message to the endpoint and get back its response.
|
|
|
|
getresponse endpoint message handleresp =
|
|
|
|
protoerrhandler (withresp handleresp) $
|
|
|
|
endpoint $ net $ do
|
|
|
|
sendMessage message
|
|
|
|
receiveMessage
|
2024-06-11 19:01:14 +00:00
|
|
|
|
2024-06-11 20:56:52 +00:00
|
|
|
withresp a (Just resp) = a resp
|
|
|
|
-- Whichever of the remote or client the message was read from
|
|
|
|
-- hung up.
|
2024-06-17 19:51:10 +00:00
|
|
|
withresp _ Nothing = proxydone
|
2024-06-11 20:56:52 +00:00
|
|
|
|
|
|
|
-- Read a message from one party, send it to the other,
|
2024-06-12 15:37:14 +00:00
|
|
|
-- and then pass the message to the continuation.
|
2024-06-11 20:56:52 +00:00
|
|
|
relayonemessage from to cont =
|
|
|
|
flip protoerrhandler (from $ net $ receiveMessage) $
|
2024-06-12 15:37:14 +00:00
|
|
|
withresp $ \message ->
|
|
|
|
protoerrhandler (cont message) $
|
|
|
|
to $ net $ sendMessage message
|
2024-06-11 20:56:52 +00:00
|
|
|
|
2024-06-11 16:05:44 +00:00
|
|
|
protoerr = do
|
2024-07-01 15:19:02 +00:00
|
|
|
_ <- client $ net $ sendMessage (ERROR "protocol error X")
|
|
|
|
giveup "protocol error M"
|
2024-06-23 13:28:18 +00:00
|
|
|
|
2024-07-10 13:45:23 +00:00
|
|
|
-- When there is a single remote, reply with its timestamp,
|
|
|
|
-- to avoid needing timestamp translation.
|
|
|
|
handleGETTIMESTAMP (remoteside:[]) = do
|
|
|
|
liftIO $ atomically $ do
|
|
|
|
writeTVar (proxyRemoteLatestTimestamps proxystate)
|
|
|
|
mempty
|
|
|
|
writeTVar (proxyRemoteLatestLocalTimestamp proxystate)
|
|
|
|
Nothing
|
|
|
|
proxyresponse remoteside GETTIMESTAMP
|
|
|
|
(const proxynextclientmessage)
|
|
|
|
-- When there are multiple remotes, reply with our local timestamp,
|
|
|
|
-- and do timestamp translation when sending REMOVE-FROM.
|
REMOVE-BEFORE and GETTIMESTAMP proxying
For clusters, the timestamps have to be translated, since each node can
have its own idea about what time it is. To translate a timestamp, the
proxy remembers what time it asked the node for a timestamp in
GETTIMESTAMP, and applies the delta as an offset in REMOVE-BEFORE.
This does mean that a remove from a cluster has to call GETTIMESTAMP on
every node before dropping from nodes. Not very efficient. Although
currently it tries to drop from every single node anyway, which is also
not very efficient.
I thought about caching the GETTIMESTAMP from the nodes on the first
call. That would improve efficiency. But, since monotonic clocks on
!Linux don't advance when the computer is suspended, consider what might
happen if one node was suspended for a while, then came back. Its
monotonic timestamp would end up behind where the proxying expects it to
be. Would that result in removing when it shouldn't, or refusing to
remove when it should? Have not thought it through. Either way, a
cluster behaving strangly for an extended period of time because one
of its nodes was briefly asleep doesn't seem like good behavior.
2024-07-04 19:08:33 +00:00
|
|
|
handleGETTIMESTAMP remotesides = do
|
|
|
|
-- Order of getting timestamps matters.
|
|
|
|
-- Getting the local time after the time of the remotes
|
|
|
|
-- means that if there is some delay in getting the time
|
|
|
|
-- from a remote, that is reflected in the local time,
|
|
|
|
-- and so reduces the allowed time.
|
|
|
|
remotetimes <- (M.fromList . mapMaybe join) <$> getremotetimes
|
|
|
|
localtime <- liftIO currentMonotonicTimestamp
|
|
|
|
liftIO $ atomically $ do
|
|
|
|
writeTVar (proxyRemoteLatestTimestamps proxystate)
|
|
|
|
remotetimes
|
|
|
|
writeTVar (proxyRemoteLatestLocalTimestamp proxystate)
|
|
|
|
(Just localtime)
|
|
|
|
protoerrhandler proxynextclientmessage $
|
|
|
|
client $ net $ sendMessage (TIMESTAMP localtime)
|
|
|
|
where
|
|
|
|
getremotetimes = forMC concurrencyconfig remotesides $ \r ->
|
|
|
|
runRemoteSideOrSkipFailed r $ do
|
|
|
|
net $ sendMessage GETTIMESTAMP
|
|
|
|
net receiveMessage >>= return . \case
|
|
|
|
Just (TIMESTAMP ts) ->
|
|
|
|
Just (remoteSideId r, ts)
|
|
|
|
_ -> Nothing
|
|
|
|
|
2024-07-10 13:45:23 +00:00
|
|
|
proxyTimestamp ts _ _ Nothing = ts -- not proxying timestamps
|
REMOVE-BEFORE and GETTIMESTAMP proxying
For clusters, the timestamps have to be translated, since each node can
have its own idea about what time it is. To translate a timestamp, the
proxy remembers what time it asked the node for a timestamp in
GETTIMESTAMP, and applies the delta as an offset in REMOVE-BEFORE.
This does mean that a remove from a cluster has to call GETTIMESTAMP on
every node before dropping from nodes. Not very efficient. Although
currently it tries to drop from every single node anyway, which is also
not very efficient.
I thought about caching the GETTIMESTAMP from the nodes on the first
call. That would improve efficiency. But, since monotonic clocks on
!Linux don't advance when the computer is suspended, consider what might
happen if one node was suspended for a while, then came back. Its
monotonic timestamp would end up behind where the proxying expects it to
be. Would that result in removing when it shouldn't, or refusing to
remove when it should? Have not thought it through. Either way, a
cluster behaving strangly for an extended period of time because one
of its nodes was briefly asleep doesn't seem like good behavior.
2024-07-04 19:08:33 +00:00
|
|
|
proxyTimestamp ts r tsm (Just correspondinglocaltime) =
|
|
|
|
case M.lookup (remoteSideId r) tsm of
|
|
|
|
Just oldts -> oldts + (ts - correspondinglocaltime)
|
|
|
|
Nothing -> ts -- not reached
|
|
|
|
|
2024-06-23 13:28:18 +00:00
|
|
|
handleREMOVE [] _ _ =
|
|
|
|
-- When no places are provided to remove from,
|
|
|
|
-- don't report a successful remote.
|
|
|
|
protoerrhandler proxynextclientmessage $
|
|
|
|
client $ net $ sendMessage FAILURE
|
|
|
|
handleREMOVE remotesides k message = do
|
REMOVE-BEFORE and GETTIMESTAMP proxying
For clusters, the timestamps have to be translated, since each node can
have its own idea about what time it is. To translate a timestamp, the
proxy remembers what time it asked the node for a timestamp in
GETTIMESTAMP, and applies the delta as an offset in REMOVE-BEFORE.
This does mean that a remove from a cluster has to call GETTIMESTAMP on
every node before dropping from nodes. Not very efficient. Although
currently it tries to drop from every single node anyway, which is also
not very efficient.
I thought about caching the GETTIMESTAMP from the nodes on the first
call. That would improve efficiency. But, since monotonic clocks on
!Linux don't advance when the computer is suspended, consider what might
happen if one node was suspended for a while, then came back. Its
monotonic timestamp would end up behind where the proxying expects it to
be. Would that result in removing when it shouldn't, or refusing to
remove when it should? Have not thought it through. Either way, a
cluster behaving strangly for an extended period of time because one
of its nodes was briefly asleep doesn't seem like good behavior.
2024-07-04 19:08:33 +00:00
|
|
|
tsm <- liftIO $ readTVarIO $
|
|
|
|
proxyRemoteLatestTimestamps proxystate
|
|
|
|
oldlocaltime <- liftIO $ readTVarIO $
|
|
|
|
proxyRemoteLatestLocalTimestamp proxystate
|
2024-06-25 18:52:47 +00:00
|
|
|
v <- forMC concurrencyconfig remotesides $ \r ->
|
2024-06-23 13:28:18 +00:00
|
|
|
runRemoteSideOrSkipFailed r $ do
|
REMOVE-BEFORE and GETTIMESTAMP proxying
For clusters, the timestamps have to be translated, since each node can
have its own idea about what time it is. To translate a timestamp, the
proxy remembers what time it asked the node for a timestamp in
GETTIMESTAMP, and applies the delta as an offset in REMOVE-BEFORE.
This does mean that a remove from a cluster has to call GETTIMESTAMP on
every node before dropping from nodes. Not very efficient. Although
currently it tries to drop from every single node anyway, which is also
not very efficient.
I thought about caching the GETTIMESTAMP from the nodes on the first
call. That would improve efficiency. But, since monotonic clocks on
!Linux don't advance when the computer is suspended, consider what might
happen if one node was suspended for a while, then came back. Its
monotonic timestamp would end up behind where the proxying expects it to
be. Would that result in removing when it shouldn't, or refusing to
remove when it should? Have not thought it through. Either way, a
cluster behaving strangly for an extended period of time because one
of its nodes was briefly asleep doesn't seem like good behavior.
2024-07-04 19:08:33 +00:00
|
|
|
case message of
|
|
|
|
REMOVE_BEFORE ts _ -> do
|
|
|
|
v <- net getProtocolVersion
|
|
|
|
if v < ProtocolVersion 3
|
|
|
|
then net $ sendMessage $
|
|
|
|
REMOVE k
|
|
|
|
else net $ sendMessage $
|
|
|
|
REMOVE_BEFORE (proxyTimestamp ts r tsm oldlocaltime) k
|
|
|
|
_ -> net $ sendMessage message
|
2024-06-23 13:28:18 +00:00
|
|
|
net receiveMessage >>= return . \case
|
|
|
|
Just SUCCESS ->
|
2024-06-28 18:07:23 +00:00
|
|
|
Just ((True, Nothing), [Remote.uuid (remote r)])
|
2024-06-23 13:28:18 +00:00
|
|
|
Just (SUCCESS_PLUS us) ->
|
2024-06-28 18:07:23 +00:00
|
|
|
Just ((True, Nothing), Remote.uuid (remote r):us)
|
2024-06-23 13:28:18 +00:00
|
|
|
Just FAILURE ->
|
2024-06-28 18:07:23 +00:00
|
|
|
Just ((False, Nothing), [])
|
2024-06-23 13:28:18 +00:00
|
|
|
Just (FAILURE_PLUS us) ->
|
2024-06-28 18:07:23 +00:00
|
|
|
Just ((False, Nothing), us)
|
|
|
|
Just (ERROR err) ->
|
|
|
|
Just ((False, Just err), [])
|
2024-06-23 13:28:18 +00:00
|
|
|
_ -> Nothing
|
|
|
|
let v' = map join v
|
|
|
|
let us = concatMap snd $ catMaybes v'
|
|
|
|
mapM_ (\u -> removedContent proxymethods u k) us
|
|
|
|
protoerrhandler proxynextclientmessage $
|
|
|
|
client $ net $ sendMessage $
|
2024-06-23 13:57:40 +00:00
|
|
|
let nonplussed = all (== remoteuuid) us
|
|
|
|
|| protocolversion < 2
|
2024-06-28 18:07:23 +00:00
|
|
|
in if all (maybe False (fst . fst)) v'
|
2024-06-23 13:57:40 +00:00
|
|
|
then if nonplussed
|
2024-06-23 13:28:18 +00:00
|
|
|
then SUCCESS
|
|
|
|
else SUCCESS_PLUS us
|
2024-06-23 13:57:40 +00:00
|
|
|
else if nonplussed
|
2024-06-28 18:07:23 +00:00
|
|
|
then case mapMaybe (snd . fst) (catMaybes v') of
|
|
|
|
[] -> FAILURE
|
|
|
|
(err:_) -> ERROR err
|
2024-06-23 13:28:18 +00:00
|
|
|
else FAILURE_PLUS us
|
2024-06-12 15:37:14 +00:00
|
|
|
|
2024-06-17 19:51:10 +00:00
|
|
|
handleGET remoteside message = getresponse (runRemoteSide remoteside) message $
|
2024-07-01 15:19:02 +00:00
|
|
|
withDATA (relayGET remoteside) $ \case
|
|
|
|
ERROR err -> protoerrhandler proxynextclientmessage $
|
|
|
|
client $ net $ sendMessage (ERROR err)
|
|
|
|
_ -> protoerr
|
2024-06-11 21:15:52 +00:00
|
|
|
|
2024-06-23 13:53:33 +00:00
|
|
|
handlePUT (remoteside:[]) k message
|
2024-06-27 18:36:55 +00:00
|
|
|
| Remote.uuid (remote remoteside) == remoteuuid =
|
2024-06-23 13:53:33 +00:00
|
|
|
getresponse (runRemoteSide remoteside) message $ \resp -> case resp of
|
|
|
|
ALREADY_HAVE -> protoerrhandler proxynextclientmessage $
|
|
|
|
client $ net $ sendMessage resp
|
|
|
|
ALREADY_HAVE_PLUS _ -> protoerrhandler proxynextclientmessage $
|
|
|
|
client $ net $ sendMessage resp
|
|
|
|
PUT_FROM _ ->
|
|
|
|
getresponse client resp $
|
2024-07-01 15:19:02 +00:00
|
|
|
withDATA
|
|
|
|
(relayPUT remoteside k)
|
|
|
|
(const protoerr)
|
2024-06-23 13:53:33 +00:00
|
|
|
_ -> protoerr
|
2024-06-18 16:07:01 +00:00
|
|
|
handlePUT [] _ _ =
|
|
|
|
protoerrhandler proxynextclientmessage $
|
|
|
|
client $ net $ sendMessage ALREADY_HAVE
|
|
|
|
handlePUT remotesides k message =
|
|
|
|
handlePutMulti remotesides k message
|
2024-06-11 21:15:52 +00:00
|
|
|
|
2024-07-01 15:19:02 +00:00
|
|
|
withDATA a _ message@(DATA len) = a len message
|
|
|
|
withDATA _ a message = a message
|
2024-06-20 14:04:26 +00:00
|
|
|
|
2024-06-17 19:51:10 +00:00
|
|
|
relayGET remoteside len = relayDATAStart client $
|
|
|
|
relayDATACore len (runRemoteSide remoteside) client $
|
|
|
|
relayDATAFinish (runRemoteSide remoteside) client $
|
|
|
|
relayonemessage client (runRemoteSide remoteside) $
|
2024-06-12 15:37:14 +00:00
|
|
|
const proxynextclientmessage
|
2024-06-11 21:15:52 +00:00
|
|
|
|
2024-06-17 19:51:10 +00:00
|
|
|
relayPUT remoteside k len = relayDATAStart (runRemoteSide remoteside) $
|
|
|
|
relayDATACore len client (runRemoteSide remoteside) $
|
|
|
|
relayDATAFinish client (runRemoteSide remoteside) $
|
|
|
|
relayonemessage (runRemoteSide remoteside) client finished
|
2024-06-12 15:37:14 +00:00
|
|
|
where
|
|
|
|
finished resp () = do
|
2024-06-20 14:04:26 +00:00
|
|
|
void $ relayPUTRecord k remoteside resp
|
2024-06-12 15:37:14 +00:00
|
|
|
proxynextclientmessage ()
|
2024-06-11 21:15:52 +00:00
|
|
|
|
2024-06-20 14:04:26 +00:00
|
|
|
relayPUTRecord k remoteside SUCCESS = do
|
2024-06-27 18:36:55 +00:00
|
|
|
addedContent proxymethods (Remote.uuid (remote remoteside)) k
|
|
|
|
return $ Just [Remote.uuid (remote remoteside)]
|
2024-06-20 14:04:26 +00:00
|
|
|
relayPUTRecord k remoteside (SUCCESS_PLUS us) = do
|
2024-06-27 18:36:55 +00:00
|
|
|
let us' = (Remote.uuid (remote remoteside)) : us
|
2024-06-20 14:04:26 +00:00
|
|
|
forM_ us' $ \u ->
|
|
|
|
addedContent proxymethods u k
|
|
|
|
return $ Just us'
|
|
|
|
relayPUTRecord _ _ _ =
|
|
|
|
return Nothing
|
2024-06-18 16:07:01 +00:00
|
|
|
|
|
|
|
handlePutMulti remotesides k message = do
|
|
|
|
let initiate remoteside = do
|
2024-06-25 18:10:06 +00:00
|
|
|
resp <- runRemoteSideOrSkipFailed remoteside $ net $ do
|
2024-06-18 16:07:01 +00:00
|
|
|
sendMessage message
|
|
|
|
receiveMessage
|
|
|
|
case resp of
|
2024-06-25 18:10:06 +00:00
|
|
|
Just (Just (PUT_FROM (Offset offset))) ->
|
2024-06-18 16:07:01 +00:00
|
|
|
return $ Right $
|
|
|
|
Right (remoteside, offset)
|
2024-06-25 18:10:06 +00:00
|
|
|
Just (Just ALREADY_HAVE) ->
|
2024-06-18 16:07:01 +00:00
|
|
|
return $ Right $ Left remoteside
|
2024-06-25 18:10:06 +00:00
|
|
|
Just (Just _) -> protoerr
|
|
|
|
Just Nothing -> return (Left ())
|
|
|
|
Nothing -> return (Left ())
|
2024-06-18 16:07:01 +00:00
|
|
|
let alreadyhave = \case
|
|
|
|
Right (Left _) -> True
|
|
|
|
_ -> False
|
2024-06-25 18:52:47 +00:00
|
|
|
l <- forMC concurrencyconfig remotesides initiate
|
2024-06-18 16:07:01 +00:00
|
|
|
if all alreadyhave l
|
|
|
|
then if protocolversion < 2
|
|
|
|
then protoerrhandler proxynextclientmessage $
|
|
|
|
client $ net $ sendMessage ALREADY_HAVE
|
|
|
|
else protoerrhandler proxynextclientmessage $
|
|
|
|
client $ net $ sendMessage $ ALREADY_HAVE_PLUS $
|
|
|
|
filter (/= remoteuuid) $
|
2024-06-27 18:36:55 +00:00
|
|
|
map (Remote.uuid . remote) (lefts (rights l))
|
2024-06-18 16:07:01 +00:00
|
|
|
else if null (rights l)
|
|
|
|
-- no response from any remote
|
|
|
|
then proxydone
|
|
|
|
else do
|
|
|
|
let l' = rights (rights l)
|
|
|
|
let minoffset = minimum (map snd l')
|
|
|
|
getresponse client (PUT_FROM (Offset minoffset)) $
|
2024-07-01 15:19:02 +00:00
|
|
|
withDATA (relayPUTMulti minoffset l' k)
|
|
|
|
(const protoerr)
|
2024-06-20 14:04:26 +00:00
|
|
|
|
2024-06-18 16:07:01 +00:00
|
|
|
relayPUTMulti minoffset remotes k (Len datalen) _ = do
|
|
|
|
let totallen = datalen + minoffset
|
|
|
|
-- Tell each remote how much data to expect, depending
|
|
|
|
-- on the remote's offset.
|
2024-06-27 18:36:55 +00:00
|
|
|
rs <- forMC concurrencyconfig remotes $ \r@(remoteside, remoteoffset) ->
|
2024-06-25 18:10:06 +00:00
|
|
|
runRemoteSideOrSkipFailed remoteside $ do
|
2024-06-18 16:07:01 +00:00
|
|
|
net $ sendMessage $ DATA $ Len $
|
|
|
|
totallen - remoteoffset
|
2024-06-27 18:36:55 +00:00
|
|
|
return r
|
2024-06-25 18:10:06 +00:00
|
|
|
protoerrhandler (send (catMaybes rs) minoffset) $
|
2024-06-18 16:07:01 +00:00
|
|
|
client $ net $ receiveBytes (Len datalen) nullMeterUpdate
|
|
|
|
where
|
|
|
|
chunksize = fromIntegral defaultChunkSize
|
|
|
|
|
|
|
|
-- Stream the lazy bytestring out to the remotes in chunks.
|
|
|
|
-- Only start sending to a remote once past its desired
|
|
|
|
-- offset.
|
|
|
|
send rs n b = do
|
|
|
|
let (chunk, b') = L.splitAt chunksize b
|
|
|
|
let chunklen = fromIntegral (L.length chunk)
|
|
|
|
let !n' = n + chunklen
|
2024-06-25 18:52:47 +00:00
|
|
|
rs' <- forMC concurrencyconfig rs $ \r@(remoteside, remoteoffset) ->
|
2024-06-18 16:07:01 +00:00
|
|
|
if n >= remoteoffset
|
2024-06-20 14:04:26 +00:00
|
|
|
then runRemoteSideOrSkipFailed remoteside $ do
|
2024-06-18 16:07:01 +00:00
|
|
|
net $ sendBytes (Len chunklen) chunk nullMeterUpdate
|
2024-06-20 14:04:26 +00:00
|
|
|
return r
|
2024-06-23 20:01:58 +00:00
|
|
|
else if (n' > remoteoffset)
|
2024-06-18 16:07:01 +00:00
|
|
|
then do
|
|
|
|
let chunkoffset = remoteoffset - n
|
|
|
|
let subchunklen = chunklen - chunkoffset
|
|
|
|
let subchunk = L.drop (fromIntegral chunkoffset) chunk
|
2024-06-20 14:04:26 +00:00
|
|
|
runRemoteSideOrSkipFailed remoteside $ do
|
2024-06-18 16:07:01 +00:00
|
|
|
net $ sendBytes (Len subchunklen) subchunk nullMeterUpdate
|
2024-06-20 14:04:26 +00:00
|
|
|
return r
|
2024-06-18 16:07:01 +00:00
|
|
|
else return (Just r)
|
|
|
|
if L.null b'
|
|
|
|
then sent (catMaybes rs')
|
|
|
|
else send (catMaybes rs') n' b'
|
|
|
|
|
|
|
|
sent [] = proxydone
|
2024-06-20 14:04:26 +00:00
|
|
|
sent rs = relayDATAFinishMulti k (map fst rs)
|
|
|
|
|
|
|
|
runRemoteSideOrSkipFailed remoteside a =
|
|
|
|
runRemoteSide remoteside a >>= \case
|
|
|
|
Right v -> return (Just v)
|
2024-06-18 16:07:01 +00:00
|
|
|
Left _ -> do
|
|
|
|
-- This connection to the remote is
|
|
|
|
-- unrecoverable at this point, so close it.
|
|
|
|
closeRemoteSide remoteside
|
|
|
|
return Nothing
|
2024-06-20 14:04:26 +00:00
|
|
|
|
|
|
|
relayDATAStart x receive message =
|
|
|
|
protoerrhandler (\() -> receive) $
|
|
|
|
x $ net $ sendMessage message
|
|
|
|
|
|
|
|
relayDATACore len x y a = protoerrhandler send $
|
|
|
|
x $ net $ receiveBytes len nullMeterUpdate
|
|
|
|
where
|
|
|
|
send b = protoerrhandler a $
|
|
|
|
y $ net $ sendBytes len b nullMeterUpdate
|
|
|
|
|
|
|
|
relayDATAFinish x y sendsuccessfailure ()
|
|
|
|
| protocolversion == 0 = sendsuccessfailure
|
|
|
|
-- Protocol version 1 has a VALID or
|
|
|
|
-- INVALID message after the data.
|
|
|
|
| otherwise = relayonemessage x y (\_ () -> sendsuccessfailure)
|
|
|
|
|
|
|
|
relayDATAFinishMulti k rs
|
|
|
|
| protocolversion == 0 =
|
|
|
|
finish $ net receiveMessage
|
|
|
|
| otherwise =
|
|
|
|
flip protoerrhandler (client $ net $ receiveMessage) $
|
|
|
|
withresp $ \message ->
|
|
|
|
finish $ do
|
|
|
|
-- Relay VALID or INVALID message
|
|
|
|
-- only to remotes that support
|
|
|
|
-- protocol version 1.
|
|
|
|
net getProtocolVersion >>= \case
|
|
|
|
ProtocolVersion 0 -> return ()
|
|
|
|
_ -> net $ sendMessage message
|
|
|
|
net receiveMessage
|
|
|
|
where
|
|
|
|
finish a = do
|
2024-06-25 18:52:47 +00:00
|
|
|
storeduuids <- forMC concurrencyconfig rs $ \r ->
|
2024-06-20 14:04:26 +00:00
|
|
|
runRemoteSideOrSkipFailed r a >>= \case
|
|
|
|
Just (Just resp) ->
|
|
|
|
relayPUTRecord k r resp
|
|
|
|
_ -> return Nothing
|
|
|
|
protoerrhandler proxynextclientmessage $
|
|
|
|
client $ net $ sendMessage $
|
|
|
|
case concat (catMaybes storeduuids) of
|
|
|
|
[] -> FAILURE
|
|
|
|
us
|
|
|
|
| protocolversion < 2 -> SUCCESS
|
|
|
|
| otherwise -> SUCCESS_PLUS us
|
|
|
|
|
2024-06-25 15:35:41 +00:00
|
|
|
-- The associated file received from the P2P protocol
|
|
|
|
-- is relative to the top of the git repository. But this process
|
|
|
|
-- may be running with a different cwd.
|
|
|
|
getassociatedfile (ProtoAssociatedFile (AssociatedFile (Just f))) =
|
|
|
|
AssociatedFile . Just
|
|
|
|
<$> fromRepo (fromTopFilePath (asTopFilePath f))
|
|
|
|
getassociatedfile (ProtoAssociatedFile (AssociatedFile Nothing)) =
|
|
|
|
return $ AssociatedFile Nothing
|
2024-06-25 18:52:47 +00:00
|
|
|
|
|
|
|
data ConcurrencyConfig = ConcurrencyConfig Int (MSem.MSem Int)
|
|
|
|
|
|
|
|
noConcurrencyConfig :: Annex ConcurrencyConfig
|
|
|
|
noConcurrencyConfig = liftIO $ ConcurrencyConfig 1 <$> MSem.new 1
|
|
|
|
|
|
|
|
getConcurrencyConfig :: Annex ConcurrencyConfig
|
|
|
|
getConcurrencyConfig = (annexJobs <$> Annex.getGitConfig) >>= \case
|
|
|
|
NonConcurrent -> noConcurrencyConfig
|
|
|
|
Concurrent n -> go n
|
|
|
|
ConcurrentPerCpu -> go =<< liftIO getNumProcessors
|
|
|
|
where
|
|
|
|
go n = do
|
|
|
|
c <- liftIO getNumCapabilities
|
|
|
|
when (n > c) $
|
|
|
|
liftIO $ setNumCapabilities n
|
|
|
|
setConcurrency (ConcurrencyGitConfig (Concurrent n))
|
|
|
|
msem <- liftIO $ MSem.new n
|
|
|
|
return (ConcurrencyConfig n msem)
|
|
|
|
|
|
|
|
forMC :: ConcurrencyConfig -> [a] -> (a -> Annex b) -> Annex [b]
|
|
|
|
forMC _ (x:[]) a = do
|
|
|
|
r <- a x
|
|
|
|
return [r]
|
|
|
|
forMC (ConcurrencyConfig n msem) xs a
|
|
|
|
| n < 2 = forM xs a
|
|
|
|
| otherwise = do
|
|
|
|
runners <- forM xs $ \x ->
|
|
|
|
forkState $ bracketIO
|
|
|
|
(MSem.wait msem)
|
|
|
|
(const $ MSem.signal msem)
|
|
|
|
(const $ a x)
|
|
|
|
mapM id =<< liftIO (forConcurrently runners id)
|
|
|
|
|