2024-07-07 16:59:12 +00:00
|
|
|
{- P2P protocol over HTTP
|
|
|
|
-
|
|
|
|
- https://git-annex.branchable.com/design/p2p_protocol_over_http/
|
|
|
|
-
|
|
|
|
- Copyright 2024 Joey Hess <id@joeyh.name>
|
|
|
|
-
|
|
|
|
- Licensed under the GNU AGPL version 3 or higher.
|
|
|
|
-}
|
|
|
|
|
|
|
|
{-# LANGUAGE DataKinds #-}
|
|
|
|
{-# LANGUAGE TypeOperators #-}
|
2024-07-08 01:20:50 +00:00
|
|
|
{-# LANGUAGE TypeFamilies #-}
|
2024-07-10 20:06:39 +00:00
|
|
|
{-# LANGUAGE TypeApplications #-}
|
2024-07-07 16:59:12 +00:00
|
|
|
{-# LANGUAGE OverloadedStrings #-}
|
2024-07-11 15:42:32 +00:00
|
|
|
{-# LANGUAGE BangPatterns #-}
|
2024-07-07 16:59:12 +00:00
|
|
|
|
2024-07-09 01:11:01 +00:00
|
|
|
module P2P.Http (
|
|
|
|
module P2P.Http,
|
|
|
|
module P2P.Http.Types,
|
|
|
|
module P2P.Http.State,
|
|
|
|
) where
|
2024-07-07 16:59:12 +00:00
|
|
|
|
|
|
|
import Annex.Common
|
2024-07-09 01:11:01 +00:00
|
|
|
import P2P.Http.Types
|
|
|
|
import P2P.Http.State
|
2024-07-10 13:13:01 +00:00
|
|
|
import P2P.Protocol hiding (Offset, Bypass, auth)
|
2024-07-09 17:37:55 +00:00
|
|
|
import P2P.IO
|
2024-07-10 20:06:39 +00:00
|
|
|
import P2P.Annex
|
2024-07-10 16:19:47 +00:00
|
|
|
import Annex.WorkerPool
|
2024-07-22 14:20:18 +00:00
|
|
|
import Annex.Concurrent
|
2024-07-10 16:19:47 +00:00
|
|
|
import Types.WorkerPool
|
|
|
|
import Types.Direction
|
|
|
|
import Utility.Metered
|
2024-07-07 16:59:12 +00:00
|
|
|
|
|
|
|
import Servant
|
2024-07-07 20:08:05 +00:00
|
|
|
import Servant.Client.Streaming
|
2024-07-07 18:48:20 +00:00
|
|
|
import qualified Servant.Types.SourceT as S
|
2024-07-07 16:59:12 +00:00
|
|
|
import qualified Data.ByteString as B
|
2024-07-10 20:06:39 +00:00
|
|
|
import qualified Data.ByteString.Lazy as L
|
|
|
|
import qualified Data.ByteString.Lazy.Internal as LI
|
2024-07-22 14:20:18 +00:00
|
|
|
import Data.Char
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
import Control.Concurrent.STM
|
2024-07-10 20:06:39 +00:00
|
|
|
import Control.Concurrent.Async
|
|
|
|
import Control.Concurrent
|
|
|
|
import System.IO.Unsafe
|
2024-07-07 16:59:12 +00:00
|
|
|
|
2024-07-07 18:48:20 +00:00
|
|
|
type P2PHttpAPI
|
2024-07-11 15:19:20 +00:00
|
|
|
= "git-annex" :> SU :> PV3 :> "key" :> GetAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV2 :> "key" :> GetAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV1 :> "key" :> GetAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV0 :> "key" :> GetAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV3 :> "checkpresent" :> CheckPresentAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV2 :> "checkpresent" :> CheckPresentAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV1 :> "checkpresent" :> CheckPresentAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV0 :> "checkpresent" :> CheckPresentAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV3 :> "remove" :> RemoveAPI RemoveResultPlus
|
|
|
|
:<|> "git-annex" :> SU :> PV2 :> "remove" :> RemoveAPI RemoveResultPlus
|
|
|
|
:<|> "git-annex" :> SU :> PV1 :> "remove" :> RemoveAPI RemoveResult
|
|
|
|
:<|> "git-annex" :> SU :> PV0 :> "remove" :> RemoveAPI RemoveResult
|
|
|
|
:<|> "git-annex" :> SU :> PV3 :> "remove-before" :> RemoveBeforeAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV3 :> "gettimestamp" :> GetTimestampAPI
|
2024-07-22 14:20:18 +00:00
|
|
|
:<|> "git-annex" :> SU :> PV3 :> "put" :> PutAPI PutResultPlus
|
|
|
|
:<|> "git-annex" :> SU :> PV2 :> "put" :> PutAPI PutResultPlus
|
|
|
|
:<|> "git-annex" :> SU :> PV1 :> "put" :> PutAPI PutResult
|
|
|
|
:<|> "git-annex" :> SU :> PV0 :> "put" :> PutAPI PutResult
|
2024-07-11 15:19:20 +00:00
|
|
|
:<|> "git-annex" :> SU :> PV3 :> "putoffset"
|
2024-07-07 16:59:12 +00:00
|
|
|
:> PutOffsetAPI PutOffsetResultPlus
|
2024-07-11 15:19:20 +00:00
|
|
|
:<|> "git-annex" :> SU :> PV2 :> "putoffset"
|
2024-07-07 16:59:12 +00:00
|
|
|
:> PutOffsetAPI PutOffsetResultPlus
|
2024-07-11 15:19:20 +00:00
|
|
|
:<|> "git-annex" :> SU :> PV1 :> "putoffset"
|
2024-07-07 16:59:12 +00:00
|
|
|
:> PutOffsetAPI PutOffsetResult
|
2024-07-11 15:19:20 +00:00
|
|
|
:<|> "git-annex" :> SU :> PV3 :> "lockcontent" :> LockContentAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV2 :> "lockcontent" :> LockContentAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV1 :> "lockcontent" :> LockContentAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV0 :> "lockcontent" :> LockContentAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV3 :> "keeplocked" :> KeepLockedAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV2 :> "keeplocked" :> KeepLockedAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV1 :> "keeplocked" :> KeepLockedAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV0 :> "keeplocked" :> KeepLockedAPI
|
|
|
|
:<|> "git-annex" :> SU :> "key" :> GetGenericAPI
|
2024-07-07 16:59:12 +00:00
|
|
|
|
2024-07-07 18:48:20 +00:00
|
|
|
p2pHttpAPI :: Proxy P2PHttpAPI
|
|
|
|
p2pHttpAPI = Proxy
|
|
|
|
|
2024-07-08 18:00:23 +00:00
|
|
|
p2pHttpApp :: P2PHttpServerState -> Application
|
|
|
|
p2pHttpApp = serve p2pHttpAPI . serveP2pHttp
|
|
|
|
|
|
|
|
serveP2pHttp :: P2PHttpServerState -> Server P2PHttpAPI
|
|
|
|
serveP2pHttp st
|
2024-07-08 18:20:30 +00:00
|
|
|
= serveGet st
|
2024-07-08 18:00:23 +00:00
|
|
|
:<|> serveGet st
|
|
|
|
:<|> serveGet st
|
|
|
|
:<|> serveGet st
|
|
|
|
:<|> serveCheckPresent st
|
|
|
|
:<|> serveCheckPresent st
|
|
|
|
:<|> serveCheckPresent st
|
|
|
|
:<|> serveCheckPresent st
|
|
|
|
:<|> serveRemove st id
|
|
|
|
:<|> serveRemove st id
|
|
|
|
:<|> serveRemove st dePlus
|
|
|
|
:<|> serveRemove st dePlus
|
|
|
|
:<|> serveRemoveBefore st
|
|
|
|
:<|> serveGetTimestamp st
|
|
|
|
:<|> servePut st id
|
|
|
|
:<|> servePut st id
|
|
|
|
:<|> servePut st dePlus
|
2024-07-22 14:20:18 +00:00
|
|
|
:<|> servePut st dePlus
|
2024-07-08 18:00:23 +00:00
|
|
|
:<|> servePutOffset st id
|
|
|
|
:<|> servePutOffset st id
|
|
|
|
:<|> servePutOffset st dePlus
|
|
|
|
:<|> serveLockContent st
|
|
|
|
:<|> serveLockContent st
|
|
|
|
:<|> serveLockContent st
|
|
|
|
:<|> serveLockContent st
|
|
|
|
:<|> serveKeepLocked st
|
|
|
|
:<|> serveKeepLocked st
|
|
|
|
:<|> serveKeepLocked st
|
|
|
|
:<|> serveKeepLocked st
|
|
|
|
:<|> serveGetGeneric st
|
2024-07-08 17:26:02 +00:00
|
|
|
|
2024-07-11 15:19:20 +00:00
|
|
|
type GetGenericAPI
|
|
|
|
= CaptureKey
|
2024-07-11 15:26:03 +00:00
|
|
|
:> IsSecure
|
|
|
|
:> AuthHeader
|
|
|
|
:> StreamGet NoFraming OctetStream
|
|
|
|
(Headers '[DataLengthHeader] (SourceIO B.ByteString))
|
2024-07-07 18:48:20 +00:00
|
|
|
|
2024-07-11 15:19:20 +00:00
|
|
|
serveGetGeneric
|
|
|
|
:: P2PHttpServerState
|
|
|
|
-> B64UUID ServerSide
|
|
|
|
-> B64Key
|
2024-07-11 15:26:03 +00:00
|
|
|
-> IsSecure
|
|
|
|
-> Maybe Auth
|
|
|
|
-> Handler (Headers '[DataLengthHeader] (S.SourceT IO B.ByteString))
|
|
|
|
serveGetGeneric st su@(B64UUID u) k =
|
|
|
|
-- Use V0 because it does not alter the returned data to indicate
|
2024-07-22 14:20:18 +00:00
|
|
|
-- Invalid content.
|
2024-07-11 15:26:03 +00:00
|
|
|
serveGet st su V0 k cu [] Nothing Nothing
|
|
|
|
where
|
|
|
|
-- Reuse server UUID as client UUID.
|
|
|
|
cu = B64UUID u :: B64UUID ClientSide
|
2024-07-08 17:26:02 +00:00
|
|
|
|
|
|
|
type GetAPI
|
2024-07-11 15:19:20 +00:00
|
|
|
= CaptureKey
|
|
|
|
:> CU Required
|
2024-07-07 18:48:20 +00:00
|
|
|
:> BypassUUIDs
|
2024-07-07 16:59:12 +00:00
|
|
|
:> AssociatedFileParam
|
|
|
|
:> OffsetParam
|
2024-07-10 16:19:47 +00:00
|
|
|
:> IsSecure
|
2024-07-09 21:30:55 +00:00
|
|
|
:> AuthHeader
|
2024-07-07 16:59:12 +00:00
|
|
|
:> StreamGet NoFraming OctetStream
|
2024-07-08 17:26:02 +00:00
|
|
|
(Headers '[DataLengthHeader] (SourceIO B.ByteString))
|
2024-07-07 16:59:12 +00:00
|
|
|
|
2024-07-07 18:48:20 +00:00
|
|
|
serveGet
|
2024-07-09 14:12:36 +00:00
|
|
|
:: APIVersion v
|
|
|
|
=> P2PHttpServerState
|
2024-07-11 15:19:20 +00:00
|
|
|
-> B64UUID ServerSide
|
2024-07-09 14:12:36 +00:00
|
|
|
-> v
|
2024-07-08 18:00:23 +00:00
|
|
|
-> B64Key
|
2024-07-10 16:19:47 +00:00
|
|
|
-> B64UUID ClientSide
|
2024-07-07 18:48:20 +00:00
|
|
|
-> [B64UUID Bypass]
|
|
|
|
-> Maybe B64FilePath
|
|
|
|
-> Maybe Offset
|
2024-07-10 16:19:47 +00:00
|
|
|
-> IsSecure
|
2024-07-09 21:30:55 +00:00
|
|
|
-> Maybe Auth
|
2024-07-07 18:48:20 +00:00
|
|
|
-> Handler (Headers '[DataLengthHeader] (S.SourceT IO B.ByteString))
|
2024-07-11 15:19:20 +00:00
|
|
|
serveGet st su apiver (B64Key k) cu bypass baf startat sec auth = do
|
2024-07-22 14:20:18 +00:00
|
|
|
conn <- getP2PConnection apiver st cu su bypass sec auth ReadAction id
|
2024-07-10 20:06:39 +00:00
|
|
|
bsv <- liftIO newEmptyTMVarIO
|
|
|
|
endv <- liftIO newEmptyTMVarIO
|
|
|
|
validityv <- liftIO newEmptyTMVarIO
|
2024-07-11 13:15:52 +00:00
|
|
|
finalv <- liftIO newEmptyTMVarIO
|
|
|
|
annexworker <- liftIO $ async $ inAnnexWorker st $ do
|
2024-07-11 11:46:52 +00:00
|
|
|
let storer _offset len = sendContentWith $ \bs -> do
|
|
|
|
liftIO $ atomically $ putTMVar bsv (len, bs)
|
2024-07-10 20:06:39 +00:00
|
|
|
liftIO $ atomically $ takeTMVar endv
|
2024-07-11 13:55:17 +00:00
|
|
|
liftIO $ signalFullyConsumedByteString $
|
|
|
|
connOhdl $ serverP2PConnection conn
|
2024-07-10 20:06:39 +00:00
|
|
|
return $ \v -> do
|
2024-07-11 11:46:52 +00:00
|
|
|
liftIO $ atomically $ putTMVar validityv v
|
2024-07-10 20:06:39 +00:00
|
|
|
return True
|
2024-07-22 14:20:18 +00:00
|
|
|
enteringStage (TransferStage Upload) $
|
2024-07-11 13:55:17 +00:00
|
|
|
runFullProto (clientRunState conn) (clientP2PConnection conn) $
|
2024-07-10 20:06:39 +00:00
|
|
|
void $ receiveContent Nothing nullMeterUpdate
|
2024-07-10 16:19:47 +00:00
|
|
|
sizer storer getreq
|
2024-07-11 13:55:17 +00:00
|
|
|
void $ liftIO $ forkIO $ waitfinal endv finalv conn annexworker
|
2024-07-11 11:46:52 +00:00
|
|
|
(Len len, bs) <- liftIO $ atomically $ takeTMVar bsv
|
2024-07-22 15:19:52 +00:00
|
|
|
bv <- liftIO $ newMVar (filter (not . B.null) (L.toChunks bs))
|
2024-07-11 15:42:32 +00:00
|
|
|
szv <- liftIO $ newMVar 0
|
2024-07-10 20:06:39 +00:00
|
|
|
let streamer = S.SourceT $ \s -> s =<< return
|
2024-07-11 15:42:32 +00:00
|
|
|
(stream (bv, szv, len, endv, validityv, finalv))
|
2024-07-11 15:57:55 +00:00
|
|
|
return $ addHeader (DataLength len) streamer
|
2024-07-10 16:19:47 +00:00
|
|
|
where
|
2024-07-11 15:42:32 +00:00
|
|
|
stream (bv, szv, len, endv, validityv, finalv) =
|
2024-07-11 11:46:52 +00:00
|
|
|
S.fromActionStep B.null $
|
2024-07-11 15:42:32 +00:00
|
|
|
modifyMVar bv $ nextchunk szv $
|
|
|
|
checkvalidity szv len endv validityv finalv
|
2024-07-10 20:06:39 +00:00
|
|
|
|
2024-07-22 15:19:52 +00:00
|
|
|
nextchunk szv checkvalid (b:[]) = do
|
|
|
|
updateszv szv b
|
|
|
|
ifM checkvalid
|
|
|
|
( return ([], b)
|
|
|
|
-- The key's content is invalid, but
|
|
|
|
-- the amount of data is the same as the
|
|
|
|
-- DataLengthHeader indicated. Truncate
|
|
|
|
-- the response by one byte to indicate
|
|
|
|
-- to the client that it's not valid.
|
|
|
|
, return ([], B.take (B.length b - 1) b)
|
|
|
|
)
|
|
|
|
nextchunk szv checkvalid (b:bs) = do
|
|
|
|
updateszv szv b
|
|
|
|
return (bs, b)
|
|
|
|
nextchunk _szv checkvalid [] = do
|
|
|
|
void checkvalid
|
|
|
|
-- Result ignored because 0 bytes of data are sent,
|
|
|
|
-- so even if the key is invalid, if that's the
|
|
|
|
-- amount of data that the DataLengthHeader indicates,
|
|
|
|
-- we've successfully served an empty key.
|
|
|
|
return ([], mempty)
|
|
|
|
|
|
|
|
updateszv szv b = modifyMVar szv $ \sz ->
|
|
|
|
let !sz' = sz + fromIntegral (B.length b)
|
|
|
|
in return (sz', ())
|
2024-07-10 21:48:48 +00:00
|
|
|
|
2024-07-22 15:19:52 +00:00
|
|
|
-- Returns False when the key's content is invalid, but the
|
|
|
|
-- amount of data sent was the same as indicated by the
|
|
|
|
-- DataLengthHeader.
|
2024-07-11 15:42:32 +00:00
|
|
|
checkvalidity szv len endv validityv finalv =
|
2024-07-10 20:06:39 +00:00
|
|
|
ifM (atomically $ isEmptyTMVar endv)
|
2024-07-10 21:48:48 +00:00
|
|
|
( do
|
2024-07-10 20:06:39 +00:00
|
|
|
atomically $ putTMVar endv ()
|
|
|
|
validity <- atomically $ takeTMVar validityv
|
2024-07-11 15:42:32 +00:00
|
|
|
sz <- takeMVar szv
|
2024-07-11 13:15:52 +00:00
|
|
|
atomically $ putTMVar finalv ()
|
2024-07-10 20:06:39 +00:00
|
|
|
return $ case validity of
|
2024-07-22 15:19:52 +00:00
|
|
|
Nothing -> True
|
|
|
|
Just Valid -> True
|
|
|
|
Just Invalid -> sz /= len
|
|
|
|
, pure True
|
2024-07-10 20:06:39 +00:00
|
|
|
)
|
2024-07-11 13:15:52 +00:00
|
|
|
|
2024-07-11 13:55:17 +00:00
|
|
|
waitfinal endv finalv conn annexworker = do
|
2024-07-11 13:15:52 +00:00
|
|
|
-- Wait for everything to be transferred before
|
|
|
|
-- stopping the annexworker. The validityv will usually
|
|
|
|
-- be written to at the end. If the client disconnects
|
|
|
|
-- early that does not happen, so catch STM exception.
|
|
|
|
liftIO $ void $ tryNonAsync $ atomically $ takeTMVar finalv
|
|
|
|
-- Make sure the annexworker is not left blocked on endv
|
|
|
|
-- if the client disconnected early.
|
2024-07-11 15:26:03 +00:00
|
|
|
void $ liftIO $ atomically $ tryPutTMVar endv ()
|
2024-07-22 14:20:18 +00:00
|
|
|
void $ tryNonAsync $ wait annexworker
|
2024-07-11 13:55:17 +00:00
|
|
|
void $ tryNonAsync $ releaseP2PConnection conn
|
2024-07-11 13:15:52 +00:00
|
|
|
|
2024-07-10 20:06:39 +00:00
|
|
|
sizer = pure $ Len $ case startat of
|
2024-07-10 16:19:47 +00:00
|
|
|
Just (Offset o) -> fromIntegral o
|
|
|
|
Nothing -> 0
|
|
|
|
|
|
|
|
getreq offset = P2P.Protocol.GET offset (ProtoAssociatedFile af) k
|
|
|
|
|
|
|
|
af = AssociatedFile $ case baf of
|
|
|
|
Just (B64FilePath f) -> Just f
|
|
|
|
Nothing -> Nothing
|
2024-07-07 18:48:20 +00:00
|
|
|
|
2024-07-07 20:08:05 +00:00
|
|
|
clientGet
|
2024-07-10 20:06:39 +00:00
|
|
|
:: ClientEnv
|
|
|
|
-> ProtocolVersion
|
|
|
|
-> B64Key
|
|
|
|
-> B64UUID ServerSide
|
2024-07-11 15:19:20 +00:00
|
|
|
-> B64UUID ClientSide
|
2024-07-10 20:06:39 +00:00
|
|
|
-> [B64UUID Bypass]
|
|
|
|
-> Maybe B64FilePath
|
|
|
|
-> Maybe Offset
|
|
|
|
-> Maybe Auth
|
|
|
|
-> IO ()
|
2024-07-11 15:50:44 +00:00
|
|
|
clientGet clientenv (ProtocolVersion ver) k su cu bypass af o auth =
|
|
|
|
withClientM (cli k cu bypass af o auth) clientenv $ \case
|
2024-07-10 20:06:39 +00:00
|
|
|
Left err -> throwM err
|
|
|
|
Right respheaders -> do
|
|
|
|
let dl = case lookupResponseHeader @DataLengthHeader' respheaders of
|
|
|
|
Header h -> h
|
|
|
|
_ -> error "missing data length header"
|
2024-07-11 15:57:55 +00:00
|
|
|
liftIO $ print ("datalength", dl :: DataLength)
|
2024-07-11 11:56:55 +00:00
|
|
|
b <- S.unSourceT (getResponse respheaders) gatherByteString
|
2024-07-10 20:06:39 +00:00
|
|
|
liftIO $ print "got it all, writing to file 'got'"
|
|
|
|
L.writeFile "got" b
|
2024-07-11 15:50:44 +00:00
|
|
|
where
|
|
|
|
cli =case ver of
|
|
|
|
3 -> v3 su V3
|
|
|
|
2 -> v2 su V2
|
|
|
|
1 -> v1 su V1
|
|
|
|
0 -> v0 su V0
|
|
|
|
_ -> error "unsupported protocol version"
|
|
|
|
|
|
|
|
v3 :<|> v2 :<|> v1 :<|> v0 :<|> _ = client p2pHttpAPI
|
2024-07-10 20:06:39 +00:00
|
|
|
|
2024-07-11 11:56:55 +00:00
|
|
|
gatherByteString :: S.StepT IO B.ByteString -> IO L.ByteString
|
|
|
|
gatherByteString = unsafeInterleaveIO . go
|
2024-07-10 20:06:39 +00:00
|
|
|
where
|
2024-07-11 11:56:55 +00:00
|
|
|
go S.Stop = return LI.Empty
|
|
|
|
go (S.Error err) = giveup err
|
|
|
|
go (S.Skip s) = go s
|
|
|
|
go (S.Effect ms) = ms >>= go
|
|
|
|
go (S.Yield v s) = LI.Chunk v <$> unsafeInterleaveIO (go s)
|
2024-07-10 20:06:39 +00:00
|
|
|
|
2024-07-07 16:59:12 +00:00
|
|
|
type CheckPresentAPI
|
|
|
|
= KeyParam
|
2024-07-11 15:19:20 +00:00
|
|
|
:> CU Required
|
2024-07-07 18:48:20 +00:00
|
|
|
:> BypassUUIDs
|
2024-07-10 03:44:40 +00:00
|
|
|
:> IsSecure
|
2024-07-10 00:52:56 +00:00
|
|
|
:> AuthHeader
|
2024-07-07 16:59:12 +00:00
|
|
|
:> Post '[JSON] CheckPresentResult
|
2024-07-07 18:48:20 +00:00
|
|
|
|
|
|
|
serveCheckPresent
|
2024-07-09 14:12:36 +00:00
|
|
|
:: APIVersion v
|
|
|
|
=> P2PHttpServerState
|
2024-07-11 15:19:20 +00:00
|
|
|
-> B64UUID ServerSide
|
2024-07-09 14:12:36 +00:00
|
|
|
-> v
|
2024-07-08 18:00:23 +00:00
|
|
|
-> B64Key
|
2024-07-07 18:48:20 +00:00
|
|
|
-> B64UUID ClientSide
|
|
|
|
-> [B64UUID Bypass]
|
2024-07-10 03:44:40 +00:00
|
|
|
-> IsSecure
|
2024-07-10 00:52:56 +00:00
|
|
|
-> Maybe Auth
|
2024-07-07 18:48:20 +00:00
|
|
|
-> Handler CheckPresentResult
|
2024-07-11 15:19:20 +00:00
|
|
|
serveCheckPresent st su apiver (B64Key k) cu bypass sec auth = do
|
2024-07-22 15:26:22 +00:00
|
|
|
res <- withP2PConnection apiver st cu su bypass sec auth ReadAction id
|
2024-07-11 13:55:17 +00:00
|
|
|
$ \conn -> liftIO $ proxyClientNetProto conn $ checkPresent k
|
2024-07-09 13:08:42 +00:00
|
|
|
case res of
|
2024-07-10 13:13:01 +00:00
|
|
|
Right b -> return (CheckPresentResult b)
|
|
|
|
Left err -> throwError $ err500 { errBody = encodeBL err }
|
2024-07-07 18:48:20 +00:00
|
|
|
|
2024-07-09 17:37:55 +00:00
|
|
|
clientCheckPresent
|
|
|
|
:: ClientEnv
|
|
|
|
-> ProtocolVersion
|
|
|
|
-> B64Key
|
|
|
|
-> B64UUID ClientSide
|
|
|
|
-> B64UUID ServerSide
|
|
|
|
-> [B64UUID Bypass]
|
2024-07-10 00:52:56 +00:00
|
|
|
-> Maybe Auth
|
2024-07-09 17:37:55 +00:00
|
|
|
-> IO Bool
|
2024-07-10 03:44:40 +00:00
|
|
|
clientCheckPresent clientenv (ProtocolVersion ver) key cu su bypass auth =
|
2024-07-11 15:19:20 +00:00
|
|
|
withClientM (cli su key cu bypass auth) clientenv $ \case
|
2024-07-09 17:37:55 +00:00
|
|
|
Left err -> throwM err
|
|
|
|
Right (CheckPresentResult res) -> return res
|
2024-07-10 03:44:40 +00:00
|
|
|
where
|
|
|
|
cli = case ver of
|
2024-07-11 15:19:20 +00:00
|
|
|
3 -> flip v3 V3
|
|
|
|
2 -> flip v2 V2
|
|
|
|
1 -> flip v1 V1
|
|
|
|
0 -> flip v0 V0
|
2024-07-10 03:44:40 +00:00
|
|
|
_ -> error "unsupported protocol version"
|
|
|
|
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
v3 :<|> v2 :<|> v1 :<|> v0 :<|> _ = client p2pHttpAPI
|
2024-07-09 17:37:55 +00:00
|
|
|
|
2024-07-07 16:59:12 +00:00
|
|
|
type RemoveAPI result
|
|
|
|
= KeyParam
|
2024-07-11 15:19:20 +00:00
|
|
|
:> CU Required
|
2024-07-07 18:48:20 +00:00
|
|
|
:> BypassUUIDs
|
2024-07-10 13:13:01 +00:00
|
|
|
:> IsSecure
|
2024-07-10 00:52:56 +00:00
|
|
|
:> AuthHeader
|
2024-07-07 16:59:12 +00:00
|
|
|
:> Post '[JSON] result
|
2024-07-07 18:48:20 +00:00
|
|
|
|
|
|
|
serveRemove
|
2024-07-09 14:12:36 +00:00
|
|
|
:: APIVersion v
|
|
|
|
=> P2PHttpServerState
|
2024-07-08 18:00:23 +00:00
|
|
|
-> (RemoveResultPlus -> t)
|
2024-07-11 15:19:20 +00:00
|
|
|
-> B64UUID ServerSide
|
2024-07-09 14:12:36 +00:00
|
|
|
-> v
|
2024-07-07 18:48:20 +00:00
|
|
|
-> B64Key
|
|
|
|
-> B64UUID ClientSide
|
|
|
|
-> [B64UUID Bypass]
|
2024-07-10 13:13:01 +00:00
|
|
|
-> IsSecure
|
2024-07-10 00:52:56 +00:00
|
|
|
-> Maybe Auth
|
2024-07-07 18:48:20 +00:00
|
|
|
-> Handler t
|
2024-07-11 15:19:20 +00:00
|
|
|
serveRemove st resultmangle su apiver (B64Key k) cu bypass sec auth = do
|
2024-07-22 15:26:22 +00:00
|
|
|
res <- withP2PConnection apiver st cu su bypass sec auth RemoveAction id
|
2024-07-11 13:55:17 +00:00
|
|
|
$ \conn ->
|
|
|
|
liftIO $ proxyClientNetProto conn $ remove Nothing k
|
2024-07-10 13:13:01 +00:00
|
|
|
case res of
|
2024-07-10 14:03:26 +00:00
|
|
|
(Right b, plusuuids) -> return $ resultmangle $
|
|
|
|
RemoveResultPlus b (map B64UUID (fromMaybe [] plusuuids))
|
2024-07-10 13:13:01 +00:00
|
|
|
(Left err, _) -> throwError $
|
|
|
|
err500 { errBody = encodeBL err }
|
2024-07-07 20:08:05 +00:00
|
|
|
|
|
|
|
clientRemove
|
2024-07-10 13:19:58 +00:00
|
|
|
:: ClientEnv
|
|
|
|
-> ProtocolVersion
|
2024-07-07 20:08:05 +00:00
|
|
|
-> B64Key
|
|
|
|
-> B64UUID ClientSide
|
|
|
|
-> B64UUID ServerSide
|
|
|
|
-> [B64UUID Bypass]
|
2024-07-10 00:52:56 +00:00
|
|
|
-> Maybe Auth
|
2024-07-10 13:19:58 +00:00
|
|
|
-> IO RemoveResultPlus
|
|
|
|
clientRemove clientenv (ProtocolVersion ver) key cu su bypass auth =
|
|
|
|
withClientM cli clientenv $ \case
|
|
|
|
Left err -> throwM err
|
|
|
|
Right res -> return res
|
2024-07-07 20:08:05 +00:00
|
|
|
where
|
2024-07-10 13:19:58 +00:00
|
|
|
cli = case ver of
|
2024-07-11 15:19:20 +00:00
|
|
|
3 -> v3 su V3 key cu bypass auth
|
|
|
|
2 -> v2 su V2 key cu bypass auth
|
|
|
|
1 -> plus <$> v1 su V1 key cu bypass auth
|
|
|
|
0 -> plus <$> v0 su V0 key cu bypass auth
|
2024-07-10 13:19:58 +00:00
|
|
|
_ -> error "unsupported protocol version"
|
|
|
|
|
2024-07-07 20:08:05 +00:00
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
v3 :<|> v2 :<|> v1 :<|> v0 :<|> _ = client p2pHttpAPI
|
2024-07-10 13:13:01 +00:00
|
|
|
|
2024-07-07 16:59:12 +00:00
|
|
|
type RemoveBeforeAPI
|
|
|
|
= KeyParam
|
2024-07-11 15:19:20 +00:00
|
|
|
:> CU Required
|
2024-07-07 18:48:20 +00:00
|
|
|
:> BypassUUIDs
|
|
|
|
:> QueryParam' '[Required] "timestamp" Timestamp
|
2024-07-10 14:03:26 +00:00
|
|
|
:> IsSecure
|
|
|
|
:> AuthHeader
|
|
|
|
:> Post '[JSON] RemoveResultPlus
|
2024-07-07 18:48:20 +00:00
|
|
|
|
|
|
|
serveRemoveBefore
|
2024-07-09 14:12:36 +00:00
|
|
|
:: APIVersion v
|
|
|
|
=> P2PHttpServerState
|
2024-07-11 15:19:20 +00:00
|
|
|
-> B64UUID ServerSide
|
2024-07-09 14:12:36 +00:00
|
|
|
-> v
|
2024-07-08 18:00:23 +00:00
|
|
|
-> B64Key
|
2024-07-07 18:48:20 +00:00
|
|
|
-> B64UUID ClientSide
|
|
|
|
-> [B64UUID Bypass]
|
|
|
|
-> Timestamp
|
2024-07-10 14:03:26 +00:00
|
|
|
-> IsSecure
|
|
|
|
-> Maybe Auth
|
|
|
|
-> Handler RemoveResultPlus
|
2024-07-11 15:19:20 +00:00
|
|
|
serveRemoveBefore st su apiver (B64Key k) cu bypass (Timestamp ts) sec auth = do
|
2024-07-22 15:26:22 +00:00
|
|
|
res <- withP2PConnection apiver st cu su bypass sec auth RemoveAction id
|
2024-07-11 13:55:17 +00:00
|
|
|
$ \conn ->
|
|
|
|
liftIO $ proxyClientNetProto conn $
|
2024-07-10 14:03:26 +00:00
|
|
|
removeBeforeRemoteEndTime ts k
|
|
|
|
case res of
|
|
|
|
(Right b, plusuuids) -> return $
|
|
|
|
RemoveResultPlus b (map B64UUID (fromMaybe [] plusuuids))
|
|
|
|
(Left err, _) -> throwError $
|
|
|
|
err500 { errBody = encodeBL err }
|
2024-07-07 18:48:20 +00:00
|
|
|
|
2024-07-07 20:08:05 +00:00
|
|
|
clientRemoveBefore
|
2024-07-10 14:03:26 +00:00
|
|
|
:: ClientEnv
|
|
|
|
-> ProtocolVersion
|
2024-07-07 20:08:05 +00:00
|
|
|
-> B64Key
|
|
|
|
-> B64UUID ClientSide
|
|
|
|
-> B64UUID ServerSide
|
|
|
|
-> [B64UUID Bypass]
|
|
|
|
-> Timestamp
|
2024-07-10 14:03:26 +00:00
|
|
|
-> Maybe Auth
|
|
|
|
-> IO RemoveResultPlus
|
|
|
|
clientRemoveBefore clientenv (ProtocolVersion ver) key cu su bypass ts auth =
|
2024-07-11 15:19:20 +00:00
|
|
|
withClientM (cli su key cu bypass ts auth) clientenv $ \case
|
2024-07-10 14:03:26 +00:00
|
|
|
Left err -> throwM err
|
|
|
|
Right res -> return res
|
2024-07-07 20:08:05 +00:00
|
|
|
where
|
2024-07-10 14:03:26 +00:00
|
|
|
cli = case ver of
|
2024-07-11 15:19:20 +00:00
|
|
|
3 -> flip v3 V3
|
2024-07-10 14:03:26 +00:00
|
|
|
_ -> error "unsupported protocol version"
|
|
|
|
|
2024-07-07 20:08:05 +00:00
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
v3 :<|> _ = client p2pHttpAPI
|
|
|
|
|
2024-07-07 16:59:12 +00:00
|
|
|
type GetTimestampAPI
|
2024-07-11 15:19:20 +00:00
|
|
|
= CU Required
|
2024-07-07 18:48:20 +00:00
|
|
|
:> BypassUUIDs
|
2024-07-10 14:23:10 +00:00
|
|
|
:> IsSecure
|
|
|
|
:> AuthHeader
|
2024-07-07 16:59:12 +00:00
|
|
|
:> Post '[JSON] GetTimestampResult
|
2024-07-07 18:48:20 +00:00
|
|
|
|
|
|
|
serveGetTimestamp
|
2024-07-09 14:12:36 +00:00
|
|
|
:: APIVersion v
|
|
|
|
=> P2PHttpServerState
|
2024-07-11 15:19:20 +00:00
|
|
|
-> B64UUID ServerSide
|
2024-07-09 14:12:36 +00:00
|
|
|
-> v
|
2024-07-08 18:00:23 +00:00
|
|
|
-> B64UUID ClientSide
|
2024-07-07 18:48:20 +00:00
|
|
|
-> [B64UUID Bypass]
|
2024-07-10 14:23:10 +00:00
|
|
|
-> IsSecure
|
|
|
|
-> Maybe Auth
|
2024-07-07 18:48:20 +00:00
|
|
|
-> Handler GetTimestampResult
|
2024-07-11 15:19:20 +00:00
|
|
|
serveGetTimestamp st su apiver cu bypass sec auth = do
|
2024-07-22 15:26:22 +00:00
|
|
|
res <- withP2PConnection apiver st cu su bypass sec auth ReadAction id
|
2024-07-11 13:55:17 +00:00
|
|
|
$ \conn ->
|
|
|
|
liftIO $ proxyClientNetProto conn getTimestamp
|
2024-07-10 14:23:10 +00:00
|
|
|
case res of
|
|
|
|
Right ts -> return $ GetTimestampResult (Timestamp ts)
|
|
|
|
Left err -> throwError $
|
|
|
|
err500 { errBody = encodeBL err }
|
2024-07-07 18:48:20 +00:00
|
|
|
|
2024-07-07 20:08:05 +00:00
|
|
|
clientGetTimestamp
|
2024-07-10 14:23:10 +00:00
|
|
|
:: ClientEnv
|
|
|
|
-> ProtocolVersion
|
2024-07-07 20:08:05 +00:00
|
|
|
-> B64UUID ClientSide
|
|
|
|
-> B64UUID ServerSide
|
|
|
|
-> [B64UUID Bypass]
|
2024-07-10 14:23:10 +00:00
|
|
|
-> Maybe Auth
|
|
|
|
-> IO GetTimestampResult
|
|
|
|
clientGetTimestamp clientenv (ProtocolVersion ver) cu su bypass auth =
|
2024-07-11 15:19:20 +00:00
|
|
|
withClientM (cli su cu bypass auth) clientenv $ \case
|
2024-07-10 14:23:10 +00:00
|
|
|
Left err -> throwM err
|
|
|
|
Right res -> return res
|
2024-07-07 20:08:05 +00:00
|
|
|
where
|
2024-07-10 14:23:10 +00:00
|
|
|
cli = case ver of
|
2024-07-11 15:19:20 +00:00
|
|
|
3 -> flip v3 V3
|
2024-07-10 14:23:10 +00:00
|
|
|
_ -> error "unsupported protocol version"
|
|
|
|
|
2024-07-07 20:08:05 +00:00
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|>
|
|
|
|
v3 :<|> _ = client p2pHttpAPI
|
|
|
|
|
2024-07-07 16:59:12 +00:00
|
|
|
type PutAPI result
|
2024-07-22 14:20:18 +00:00
|
|
|
= DataLengthHeaderRequired
|
|
|
|
:> KeyParam
|
2024-07-11 15:19:20 +00:00
|
|
|
:> CU Required
|
2024-07-07 18:48:20 +00:00
|
|
|
:> BypassUUIDs
|
2024-07-07 16:59:12 +00:00
|
|
|
:> AssociatedFileParam
|
|
|
|
:> OffsetParam
|
|
|
|
:> StreamBody NoFraming OctetStream (SourceIO B.ByteString)
|
2024-07-11 16:20:07 +00:00
|
|
|
:> IsSecure
|
|
|
|
:> AuthHeader
|
2024-07-07 16:59:12 +00:00
|
|
|
:> Post '[JSON] result
|
|
|
|
|
2024-07-07 18:48:20 +00:00
|
|
|
servePut
|
2024-07-09 14:12:36 +00:00
|
|
|
:: APIVersion v
|
|
|
|
=> P2PHttpServerState
|
2024-07-08 18:00:23 +00:00
|
|
|
-> (PutResultPlus -> t)
|
2024-07-11 15:19:20 +00:00
|
|
|
-> B64UUID ServerSide
|
2024-07-09 14:12:36 +00:00
|
|
|
-> v
|
2024-07-22 14:20:18 +00:00
|
|
|
-> DataLength
|
2024-07-07 18:48:20 +00:00
|
|
|
-> B64Key
|
|
|
|
-> B64UUID ClientSide
|
|
|
|
-> [B64UUID Bypass]
|
|
|
|
-> Maybe B64FilePath
|
|
|
|
-> Maybe Offset
|
|
|
|
-> S.SourceT IO B.ByteString
|
2024-07-11 16:20:07 +00:00
|
|
|
-> IsSecure
|
|
|
|
-> Maybe Auth
|
2024-07-07 18:48:20 +00:00
|
|
|
-> Handler t
|
2024-07-22 14:20:18 +00:00
|
|
|
servePut st resultmangle su apiver (DataLength len) (B64Key k) cu bypass baf moffset stream sec auth = do
|
|
|
|
validityv <- liftIO newEmptyTMVarIO
|
|
|
|
let validitycheck = local $ runValidityCheck $
|
|
|
|
liftIO $ atomically $ readTMVar validityv
|
|
|
|
content <- liftIO $ S.unSourceT stream (gather validityv)
|
2024-07-22 15:26:22 +00:00
|
|
|
res <- withP2PConnection' apiver st cu su bypass sec auth WriteAction
|
|
|
|
(\st -> st { connectionWaitVar = False }) $ \conn ->
|
|
|
|
liftIO $ inAnnexWorker st $
|
|
|
|
enteringStage (TransferStage Download) $
|
|
|
|
runFullProto (clientRunState conn) (clientP2PConnection conn) $
|
|
|
|
protoaction content validitycheck
|
2024-07-11 16:20:07 +00:00
|
|
|
case res of
|
2024-07-22 14:20:18 +00:00
|
|
|
Right (Right (Just plusuuids)) -> return $ resultmangle $
|
|
|
|
PutResultPlus True (map B64UUID plusuuids)
|
|
|
|
Right (Right Nothing) -> return $ resultmangle $
|
|
|
|
PutResultPlus False []
|
|
|
|
Right (Left protofail) -> throwError $
|
|
|
|
err500 { errBody = encodeBL (describeProtoFailure protofail) }
|
2024-07-11 16:20:07 +00:00
|
|
|
Left err -> throwError $
|
2024-07-22 14:20:18 +00:00
|
|
|
err500 { errBody = encodeBL (show err) }
|
|
|
|
where
|
|
|
|
protoaction content validitycheck = put' k af $ \offset' ->
|
|
|
|
let offsetdelta = offset' - offset
|
|
|
|
in case compare offset' offset of
|
|
|
|
EQ -> sendContent' nullMeterUpdate (Len len)
|
|
|
|
content validitycheck
|
|
|
|
GT -> sendContent' nullMeterUpdate
|
|
|
|
(Len (len - fromIntegral offsetdelta))
|
|
|
|
(L.drop (fromIntegral offsetdelta) content)
|
|
|
|
validitycheck
|
|
|
|
LT -> sendContent' nullMeterUpdate
|
|
|
|
(Len 0)
|
|
|
|
mempty
|
|
|
|
(return Invalid)
|
|
|
|
|
|
|
|
offset = case moffset of
|
|
|
|
Just (Offset o) -> o
|
|
|
|
Nothing -> 0
|
|
|
|
|
|
|
|
af = AssociatedFile $ case baf of
|
|
|
|
Just (B64FilePath f) -> Just f
|
|
|
|
Nothing -> Nothing
|
|
|
|
|
|
|
|
-- Streams the ByteString from the client. Avoids returning a longer
|
|
|
|
-- or shorter than expected ByteString by truncating or padding;
|
|
|
|
-- in such cases the data is not Valid.
|
|
|
|
gather validityv = unsafeInterleaveIO . go 0
|
|
|
|
where
|
|
|
|
go n S.Stop
|
|
|
|
| n == len = do
|
|
|
|
atomically $ writeTMVar validityv Valid
|
|
|
|
return LI.Empty
|
|
|
|
| otherwise = do
|
|
|
|
atomically $ writeTMVar validityv Invalid
|
|
|
|
padout n
|
|
|
|
go n (S.Error _err) = do
|
|
|
|
atomically $ writeTMVar validityv Invalid
|
|
|
|
padout n
|
|
|
|
go n (S.Skip s) = go n s
|
|
|
|
go n (S.Effect ms) = ms >>= go n
|
|
|
|
go n (S.Yield v s) =
|
|
|
|
let !n' = n + fromIntegral (B.length v)
|
|
|
|
in if n' > len
|
|
|
|
then do
|
|
|
|
atomically $ writeTMVar validityv Invalid
|
|
|
|
return $ LI.Chunk
|
|
|
|
(B.take (fromIntegral (len - n')) v)
|
|
|
|
LI.Empty
|
|
|
|
else LI.Chunk v <$> unsafeInterleaveIO (go n' s)
|
|
|
|
|
|
|
|
padout n =return $ LI.Chunk
|
|
|
|
(B.replicate (fromIntegral (len-n))
|
|
|
|
(fromIntegral (ord 'X')))
|
|
|
|
LI.Empty
|
2024-07-07 18:48:20 +00:00
|
|
|
|
2024-07-07 20:08:05 +00:00
|
|
|
clientPut
|
2024-07-22 14:20:18 +00:00
|
|
|
:: ClientEnv
|
|
|
|
-> ProtocolVersion
|
2024-07-07 20:08:05 +00:00
|
|
|
-> B64Key
|
2024-07-07 18:48:20 +00:00
|
|
|
-> B64UUID ServerSide
|
2024-07-22 14:20:18 +00:00
|
|
|
-> B64UUID ClientSide
|
2024-07-07 18:48:20 +00:00
|
|
|
-> [B64UUID Bypass]
|
2024-07-11 16:20:07 +00:00
|
|
|
-> Maybe Auth
|
2024-07-22 14:20:18 +00:00
|
|
|
-> Maybe Offset
|
|
|
|
-> AssociatedFile
|
|
|
|
-> FilePath
|
|
|
|
-> FileSize
|
|
|
|
-> Annex Bool
|
|
|
|
-> Annex PutResultPlus
|
|
|
|
clientPut clientenv (ProtocolVersion ver) k su cu bypass auth moffset af contentfile contentfilesize validitycheck = do
|
|
|
|
checkv <- liftIO newEmptyTMVarIO
|
|
|
|
checkresultv <- liftIO newEmptyTMVarIO
|
|
|
|
let checker = do
|
|
|
|
liftIO $ atomically $ takeTMVar checkv
|
|
|
|
validitycheck >>= liftIO . atomically . putTMVar checkresultv
|
|
|
|
checkerthread <- liftIO . async =<< forkState checker
|
|
|
|
liftIO (withClientM (cli (stream checkv checkresultv)) clientenv return) >>= \case
|
|
|
|
Left err -> do
|
|
|
|
void $ liftIO $ atomically $ tryPutTMVar checkv ()
|
|
|
|
join $ liftIO (wait checkerthread)
|
|
|
|
throwM err
|
|
|
|
Right res -> do
|
|
|
|
join $ liftIO (wait checkerthread)
|
|
|
|
return res
|
2024-07-07 20:08:05 +00:00
|
|
|
where
|
2024-07-22 14:20:18 +00:00
|
|
|
stream checkv checkresultv = S.SourceT $ \a -> do
|
2024-07-22 15:47:24 +00:00
|
|
|
-- TODO seek to offset when requested
|
2024-07-22 14:20:18 +00:00
|
|
|
bl <- L.readFile contentfile
|
2024-07-22 15:47:24 +00:00
|
|
|
v <- newMVar (0, filter (not . B.null) (L.toChunks bl))
|
2024-07-22 14:20:18 +00:00
|
|
|
a (go v)
|
|
|
|
where
|
2024-07-22 15:47:24 +00:00
|
|
|
go v = S.fromActionStep B.null $
|
|
|
|
modifyMVar v $ \case
|
|
|
|
(n, (b:[])) -> do
|
2024-07-22 14:20:18 +00:00
|
|
|
let !n' = n + B.length b
|
2024-07-22 15:47:24 +00:00
|
|
|
ifM (checkvalid n)
|
|
|
|
( return ((n', []), b)
|
|
|
|
-- The key's content is invalid, but
|
|
|
|
-- the amount of data is the same as the
|
|
|
|
-- DataLengthHeader indicates. Truncate
|
|
|
|
-- the stream by one byte to indicate
|
|
|
|
-- to the server that it's not valid.
|
|
|
|
, return ((n' - 1, []), B.take (B.length b - 1) b)
|
|
|
|
)
|
|
|
|
(n, []) -> do
|
|
|
|
void $ checkvalid n
|
|
|
|
return ((n, []), mempty)
|
|
|
|
(n, (b:bs)) ->
|
|
|
|
let !n' = n + B.length b
|
|
|
|
in return ((n', bs), b)
|
|
|
|
|
|
|
|
checkvalid n = do
|
|
|
|
void $ liftIO $ atomically $ tryPutTMVar checkv ()
|
|
|
|
valid <- liftIO $ atomically $ readTMVar checkresultv
|
|
|
|
if not valid
|
|
|
|
then return (n /= fromIntegral nlen)
|
|
|
|
else return True
|
2024-07-22 14:20:18 +00:00
|
|
|
|
|
|
|
baf = case af of
|
|
|
|
AssociatedFile Nothing -> Nothing
|
|
|
|
AssociatedFile (Just f) -> Just (B64FilePath f)
|
|
|
|
|
2024-07-22 15:47:24 +00:00
|
|
|
len = DataLength nlen
|
|
|
|
nlen = case moffset of
|
2024-07-22 14:20:18 +00:00
|
|
|
Nothing -> contentfilesize
|
|
|
|
Just (Offset o) -> contentfilesize - fromIntegral o
|
|
|
|
|
|
|
|
cli src = case ver of
|
|
|
|
3 -> v3 su V3 len k cu bypass baf moffset src auth
|
|
|
|
2 -> v2 su V2 len k cu bypass baf moffset src auth
|
|
|
|
1 -> plus <$> v1 su V1 len k cu bypass baf moffset src auth
|
|
|
|
0 -> plus <$> v0 su V0 len k cu bypass baf moffset src auth
|
|
|
|
_ -> error "unsupported protocol version"
|
|
|
|
|
2024-07-07 20:08:05 +00:00
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|>
|
|
|
|
_ :<|>
|
|
|
|
v3 :<|> v2 :<|> v1 :<|> v0 :<|> _ = client p2pHttpAPI
|
2024-07-07 18:48:20 +00:00
|
|
|
|
2024-07-07 16:59:12 +00:00
|
|
|
type PutOffsetAPI result
|
|
|
|
= KeyParam
|
2024-07-11 15:19:20 +00:00
|
|
|
:> CU Required
|
2024-07-07 18:48:20 +00:00
|
|
|
:> BypassUUIDs
|
2024-07-07 16:59:12 +00:00
|
|
|
:> Post '[JSON] result
|
|
|
|
|
2024-07-07 18:48:20 +00:00
|
|
|
servePutOffset
|
2024-07-09 14:12:36 +00:00
|
|
|
:: APIVersion v
|
|
|
|
=> P2PHttpServerState
|
2024-07-08 18:00:23 +00:00
|
|
|
-> (PutOffsetResultPlus -> t)
|
2024-07-11 15:19:20 +00:00
|
|
|
-> B64UUID ServerSide
|
2024-07-09 14:12:36 +00:00
|
|
|
-> v
|
2024-07-07 18:48:20 +00:00
|
|
|
-> B64Key
|
|
|
|
-> B64UUID ClientSide
|
|
|
|
-> [B64UUID Bypass]
|
|
|
|
-> Handler t
|
2024-07-22 15:19:52 +00:00
|
|
|
servePutOffset st resultmangle su apiver (B64Key k) cu bypass = undefined
|
|
|
|
|
2024-07-07 18:48:20 +00:00
|
|
|
|
2024-07-07 20:08:05 +00:00
|
|
|
clientPutOffset
|
2024-07-11 15:19:20 +00:00
|
|
|
:: B64UUID ServerSide
|
|
|
|
-> ProtocolVersion
|
2024-07-07 20:08:05 +00:00
|
|
|
-> B64Key
|
|
|
|
-> B64UUID ClientSide
|
|
|
|
-> [B64UUID Bypass]
|
|
|
|
-> ClientM PutOffsetResultPlus
|
2024-07-11 15:19:20 +00:00
|
|
|
clientPutOffset su (ProtocolVersion ver) = case ver of
|
|
|
|
3 -> v3 su V3
|
|
|
|
2 -> v2 su V2
|
2024-07-07 20:08:05 +00:00
|
|
|
_ -> error "unsupported protocol version"
|
|
|
|
where
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|>
|
|
|
|
_ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
v3 :<|> v2 :<|> _ = client p2pHttpAPI
|
|
|
|
|
2024-07-08 01:20:50 +00:00
|
|
|
type LockContentAPI
|
|
|
|
= KeyParam
|
2024-07-11 15:19:20 +00:00
|
|
|
:> CU Required
|
2024-07-08 01:20:50 +00:00
|
|
|
:> BypassUUIDs
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
:> Post '[JSON] LockResult
|
2024-07-07 20:08:05 +00:00
|
|
|
|
2024-07-08 01:20:50 +00:00
|
|
|
serveLockContent
|
2024-07-09 14:12:36 +00:00
|
|
|
:: APIVersion v
|
|
|
|
=> P2PHttpServerState
|
2024-07-11 15:19:20 +00:00
|
|
|
-> B64UUID ServerSide
|
2024-07-09 14:12:36 +00:00
|
|
|
-> v
|
2024-07-08 18:00:23 +00:00
|
|
|
-> B64Key
|
2024-07-08 01:20:50 +00:00
|
|
|
-> B64UUID ClientSide
|
|
|
|
-> [B64UUID Bypass]
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
-> Handler LockResult
|
2024-07-10 14:03:26 +00:00
|
|
|
serveLockContent = undefined -- TODO
|
2024-07-07 18:48:20 +00:00
|
|
|
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
clientLockContent
|
2024-07-11 15:19:20 +00:00
|
|
|
:: B64UUID ServerSide
|
|
|
|
-> ProtocolVersion
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
-> B64Key
|
|
|
|
-> B64UUID ClientSide
|
|
|
|
-> [B64UUID Bypass]
|
|
|
|
-> ClientM LockResult
|
2024-07-11 15:19:20 +00:00
|
|
|
clientLockContent su (ProtocolVersion ver) = case ver of
|
|
|
|
3 -> v3 su V3
|
|
|
|
2 -> v2 su V2
|
|
|
|
1 -> v1 su V1
|
|
|
|
0 -> v0 su V0
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
_ -> error "unsupported protocol version"
|
|
|
|
where
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|>
|
|
|
|
_ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|>
|
|
|
|
v3 :<|> v2 :<|> v1 :<|> v0 :<|> _ = client p2pHttpAPI
|
2024-07-07 18:48:20 +00:00
|
|
|
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
type KeepLockedAPI
|
2024-07-09 00:18:55 +00:00
|
|
|
= LockIDParam
|
2024-07-11 15:19:20 +00:00
|
|
|
:> CU Required
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
:> BypassUUIDs
|
|
|
|
:> Header "Connection" ConnectionKeepAlive
|
|
|
|
:> Header "Keep-Alive" KeepAlive
|
|
|
|
:> StreamBody NewlineFraming JSON (SourceIO UnlockRequest)
|
|
|
|
:> Post '[JSON] LockResult
|
2024-07-07 18:48:20 +00:00
|
|
|
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
serveKeepLocked
|
2024-07-09 14:12:36 +00:00
|
|
|
:: APIVersion v
|
|
|
|
=> P2PHttpServerState
|
2024-07-11 15:19:20 +00:00
|
|
|
-> B64UUID ServerSide
|
2024-07-09 14:12:36 +00:00
|
|
|
-> v
|
2024-07-09 00:18:55 +00:00
|
|
|
-> LockID
|
2024-07-08 01:20:50 +00:00
|
|
|
-> B64UUID ClientSide
|
|
|
|
-> [B64UUID Bypass]
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
-> Maybe ConnectionKeepAlive
|
|
|
|
-> Maybe KeepAlive
|
|
|
|
-> S.SourceT IO UnlockRequest
|
|
|
|
-> Handler LockResult
|
2024-07-11 15:19:20 +00:00
|
|
|
serveKeepLocked st su apiver lckid cu _ _ _ unlockrequeststream = do
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
_ <- liftIO $ S.unSourceT unlockrequeststream go
|
2024-07-09 00:18:55 +00:00
|
|
|
return (LockResult False Nothing)
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
where
|
2024-07-08 18:20:30 +00:00
|
|
|
go S.Stop = dropLock lckid st
|
|
|
|
go (S.Error _err) = dropLock lckid st
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
go (S.Skip s) = go s
|
|
|
|
go (S.Effect ms) = ms >>= go
|
|
|
|
go (S.Yield (UnlockRequest False) s) = go s
|
2024-07-08 18:20:30 +00:00
|
|
|
go (S.Yield (UnlockRequest True) _) = dropLock lckid st
|
|
|
|
|
2024-07-09 17:37:55 +00:00
|
|
|
clientKeepLocked'
|
2024-07-11 15:19:20 +00:00
|
|
|
:: B64UUID ServerSide
|
|
|
|
-> ProtocolVersion
|
2024-07-09 00:18:55 +00:00
|
|
|
-> LockID
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
-> B64UUID ClientSide
|
|
|
|
-> [B64UUID Bypass]
|
|
|
|
-> Maybe ConnectionKeepAlive
|
|
|
|
-> Maybe KeepAlive
|
|
|
|
-> S.SourceT IO UnlockRequest
|
|
|
|
-> ClientM LockResult
|
2024-07-11 15:19:20 +00:00
|
|
|
clientKeepLocked' su (ProtocolVersion ver) = case ver of
|
|
|
|
3 -> v3 su V3
|
|
|
|
2 -> v2 su V2
|
|
|
|
1 -> v1 su V1
|
|
|
|
0 -> v0 su V0
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
_ -> error "unsupported protocol version"
|
2024-07-08 01:20:50 +00:00
|
|
|
where
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|>
|
|
|
|
_ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|>
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
v3 :<|> v2 :<|> v1 :<|> v0 :<|> _ = client p2pHttpAPI
|
2024-07-08 01:20:50 +00:00
|
|
|
|
2024-07-09 17:37:55 +00:00
|
|
|
clientKeepLocked
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
:: ClientEnv
|
2024-07-09 17:37:55 +00:00
|
|
|
-> ProtocolVersion
|
2024-07-09 00:18:55 +00:00
|
|
|
-> LockID
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
-> B64UUID ClientSide
|
|
|
|
-> B64UUID ServerSide
|
|
|
|
-> [B64UUID Bypass]
|
|
|
|
-> TMVar Bool
|
|
|
|
-> IO ()
|
2024-07-09 17:37:55 +00:00
|
|
|
clientKeepLocked clientenv protover lckid cu su bypass keeplocked = do
|
2024-07-11 15:19:20 +00:00
|
|
|
let cli = clientKeepLocked' su protover lckid cu bypass
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
(Just connectionKeepAlive) (Just keepAlive)
|
|
|
|
(S.fromStepT unlocksender)
|
|
|
|
withClientM cli clientenv $ \case
|
|
|
|
Left err -> throwM err
|
2024-07-09 00:18:55 +00:00
|
|
|
Right (LockResult _ _) ->
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
liftIO $ print "end of lock connection to server"
|
|
|
|
where
|
|
|
|
unlocksender =
|
|
|
|
S.Yield (UnlockRequest False) $ S.Effect $ do
|
|
|
|
liftIO $ print "sent keep locked request"
|
|
|
|
return $ S.Effect $ do
|
|
|
|
stilllock <- liftIO $ atomically $ takeTMVar keeplocked
|
|
|
|
if stilllock
|
|
|
|
then return unlocksender
|
|
|
|
else do
|
|
|
|
liftIO $ print "sending unlock request"
|
|
|
|
return $ S.Yield (UnlockRequest True) S.Stop
|
|
|
|
|
2024-07-09 14:12:36 +00:00
|
|
|
type PV3 = Capture "v3" V3
|
|
|
|
|
|
|
|
type PV2 = Capture "v2" V2
|
|
|
|
|
|
|
|
type PV1 = Capture "v1" V1
|
|
|
|
|
|
|
|
type PV0 = Capture "v0" V0
|
|
|
|
|
2024-07-11 15:19:20 +00:00
|
|
|
type SU = Capture "serveruuid" (B64UUID ServerSide)
|
2024-07-07 18:48:20 +00:00
|
|
|
|
2024-07-11 15:19:20 +00:00
|
|
|
type CU req = QueryParam' '[req] "clientuuid" (B64UUID ClientSide)
|
2024-07-08 01:20:50 +00:00
|
|
|
|
|
|
|
type BypassUUIDs = QueryParams "bypass" (B64UUID Bypass)
|
2024-07-07 16:59:12 +00:00
|
|
|
|
|
|
|
type CaptureKey = Capture "key" B64Key
|
|
|
|
|
2024-07-07 18:48:20 +00:00
|
|
|
type KeyParam = QueryParam' '[Required] "key" B64Key
|
2024-07-07 16:59:12 +00:00
|
|
|
|
|
|
|
type AssociatedFileParam = QueryParam "associatedfile" B64FilePath
|
|
|
|
|
2024-07-07 18:48:20 +00:00
|
|
|
type OffsetParam = QueryParam "offset" Offset
|
2024-07-07 16:59:12 +00:00
|
|
|
|
2024-07-11 15:57:55 +00:00
|
|
|
type DataLengthHeader = Header DataLengthHeader' DataLength
|
2024-07-10 20:06:39 +00:00
|
|
|
|
2024-07-22 14:20:18 +00:00
|
|
|
type DataLengthHeaderRequired = Header' '[Required] DataLengthHeader' DataLength
|
|
|
|
|
2024-07-10 20:06:39 +00:00
|
|
|
type DataLengthHeader' = "X-git-annex-data-length"
|
2024-07-07 16:59:12 +00:00
|
|
|
|
2024-07-09 00:18:55 +00:00
|
|
|
type LockIDParam = QueryParam' '[Required] "lockid" LockID
|
2024-07-09 21:30:55 +00:00
|
|
|
|
|
|
|
type AuthHeader = Header "Authorization" Auth
|