2024-07-07 16:59:12 +00:00
|
|
|
{- P2P protocol over HTTP
|
|
|
|
-
|
|
|
|
- https://git-annex.branchable.com/design/p2p_protocol_over_http/
|
|
|
|
-
|
|
|
|
- Copyright 2024 Joey Hess <id@joeyh.name>
|
|
|
|
-
|
|
|
|
- Licensed under the GNU AGPL version 3 or higher.
|
|
|
|
-}
|
|
|
|
|
|
|
|
{-# LANGUAGE DataKinds #-}
|
|
|
|
{-# LANGUAGE TypeOperators #-}
|
2024-07-08 01:20:50 +00:00
|
|
|
{-# LANGUAGE TypeFamilies #-}
|
2024-07-10 20:06:39 +00:00
|
|
|
{-# LANGUAGE TypeApplications #-}
|
2024-07-07 16:59:12 +00:00
|
|
|
{-# LANGUAGE OverloadedStrings #-}
|
2024-07-11 15:42:32 +00:00
|
|
|
{-# LANGUAGE BangPatterns #-}
|
2024-07-07 16:59:12 +00:00
|
|
|
|
2024-07-09 01:11:01 +00:00
|
|
|
module P2P.Http (
|
|
|
|
module P2P.Http,
|
|
|
|
module P2P.Http.Types,
|
|
|
|
module P2P.Http.State,
|
|
|
|
) where
|
2024-07-07 16:59:12 +00:00
|
|
|
|
|
|
|
import Annex.Common
|
2024-07-09 01:11:01 +00:00
|
|
|
import P2P.Http.Types
|
|
|
|
import P2P.Http.State
|
2024-07-10 13:13:01 +00:00
|
|
|
import P2P.Protocol hiding (Offset, Bypass, auth)
|
2024-07-09 17:37:55 +00:00
|
|
|
import P2P.IO
|
2024-07-10 20:06:39 +00:00
|
|
|
import P2P.Annex
|
2024-07-10 16:19:47 +00:00
|
|
|
import Annex.WorkerPool
|
|
|
|
import Types.WorkerPool
|
|
|
|
import Types.Direction
|
|
|
|
import Utility.Metered
|
2024-07-07 16:59:12 +00:00
|
|
|
|
|
|
|
import Servant
|
2024-07-07 20:08:05 +00:00
|
|
|
import Servant.Client.Streaming
|
2024-07-07 18:48:20 +00:00
|
|
|
import qualified Servant.Types.SourceT as S
|
2024-07-07 16:59:12 +00:00
|
|
|
import qualified Data.ByteString as B
|
2024-07-10 20:06:39 +00:00
|
|
|
import qualified Data.ByteString.Lazy as L
|
|
|
|
import qualified Data.ByteString.Lazy.Internal as LI
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
import Control.Concurrent.STM
|
2024-07-10 20:06:39 +00:00
|
|
|
import Control.Concurrent.Async
|
|
|
|
import Control.Concurrent
|
|
|
|
import System.IO.Unsafe
|
2024-07-07 16:59:12 +00:00
|
|
|
|
2024-07-07 18:48:20 +00:00
|
|
|
type P2PHttpAPI
|
2024-07-11 15:19:20 +00:00
|
|
|
= "git-annex" :> SU :> PV3 :> "key" :> GetAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV2 :> "key" :> GetAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV1 :> "key" :> GetAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV0 :> "key" :> GetAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV3 :> "checkpresent" :> CheckPresentAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV2 :> "checkpresent" :> CheckPresentAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV1 :> "checkpresent" :> CheckPresentAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV0 :> "checkpresent" :> CheckPresentAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV3 :> "remove" :> RemoveAPI RemoveResultPlus
|
|
|
|
:<|> "git-annex" :> SU :> PV2 :> "remove" :> RemoveAPI RemoveResultPlus
|
|
|
|
:<|> "git-annex" :> SU :> PV1 :> "remove" :> RemoveAPI RemoveResult
|
|
|
|
:<|> "git-annex" :> SU :> PV0 :> "remove" :> RemoveAPI RemoveResult
|
|
|
|
:<|> "git-annex" :> SU :> PV3 :> "remove-before" :> RemoveBeforeAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV3 :> "gettimestamp" :> GetTimestampAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV3 :> "put" :> DataLengthHeader
|
2024-07-07 16:59:12 +00:00
|
|
|
:> PutAPI PutResultPlus
|
2024-07-11 15:19:20 +00:00
|
|
|
:<|> "git-annex" :> SU :> PV2 :> "put" :> DataLengthHeader
|
2024-07-07 16:59:12 +00:00
|
|
|
:> PutAPI PutResultPlus
|
2024-07-11 15:19:20 +00:00
|
|
|
:<|> "git-annex" :> SU :> PV1 :> "put" :> DataLengthHeader
|
2024-07-07 16:59:12 +00:00
|
|
|
:> PutAPI PutResult
|
2024-07-11 15:19:20 +00:00
|
|
|
:<|> "git-annex" :> SU :> PV0 :> "put"
|
2024-07-07 16:59:12 +00:00
|
|
|
:> PutAPI PutResult
|
2024-07-11 15:19:20 +00:00
|
|
|
:<|> "git-annex" :> SU :> PV3 :> "putoffset"
|
2024-07-07 16:59:12 +00:00
|
|
|
:> PutOffsetAPI PutOffsetResultPlus
|
2024-07-11 15:19:20 +00:00
|
|
|
:<|> "git-annex" :> SU :> PV2 :> "putoffset"
|
2024-07-07 16:59:12 +00:00
|
|
|
:> PutOffsetAPI PutOffsetResultPlus
|
2024-07-11 15:19:20 +00:00
|
|
|
:<|> "git-annex" :> SU :> PV1 :> "putoffset"
|
2024-07-07 16:59:12 +00:00
|
|
|
:> PutOffsetAPI PutOffsetResult
|
2024-07-11 15:19:20 +00:00
|
|
|
:<|> "git-annex" :> SU :> PV3 :> "lockcontent" :> LockContentAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV2 :> "lockcontent" :> LockContentAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV1 :> "lockcontent" :> LockContentAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV0 :> "lockcontent" :> LockContentAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV3 :> "keeplocked" :> KeepLockedAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV2 :> "keeplocked" :> KeepLockedAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV1 :> "keeplocked" :> KeepLockedAPI
|
|
|
|
:<|> "git-annex" :> SU :> PV0 :> "keeplocked" :> KeepLockedAPI
|
|
|
|
:<|> "git-annex" :> SU :> "key" :> GetGenericAPI
|
2024-07-07 16:59:12 +00:00
|
|
|
|
2024-07-07 18:48:20 +00:00
|
|
|
p2pHttpAPI :: Proxy P2PHttpAPI
|
|
|
|
p2pHttpAPI = Proxy
|
|
|
|
|
2024-07-08 18:00:23 +00:00
|
|
|
p2pHttpApp :: P2PHttpServerState -> Application
|
|
|
|
p2pHttpApp = serve p2pHttpAPI . serveP2pHttp
|
|
|
|
|
|
|
|
serveP2pHttp :: P2PHttpServerState -> Server P2PHttpAPI
|
|
|
|
serveP2pHttp st
|
2024-07-08 18:20:30 +00:00
|
|
|
= serveGet st
|
2024-07-08 18:00:23 +00:00
|
|
|
:<|> serveGet st
|
|
|
|
:<|> serveGet st
|
|
|
|
:<|> serveGet st
|
|
|
|
:<|> serveCheckPresent st
|
|
|
|
:<|> serveCheckPresent st
|
|
|
|
:<|> serveCheckPresent st
|
|
|
|
:<|> serveCheckPresent st
|
|
|
|
:<|> serveRemove st id
|
|
|
|
:<|> serveRemove st id
|
|
|
|
:<|> serveRemove st dePlus
|
|
|
|
:<|> serveRemove st dePlus
|
|
|
|
:<|> serveRemoveBefore st
|
|
|
|
:<|> serveGetTimestamp st
|
|
|
|
:<|> servePut st id
|
|
|
|
:<|> servePut st id
|
|
|
|
:<|> servePut st dePlus
|
2024-07-11 15:19:20 +00:00
|
|
|
:<|> (\su v -> servePut st dePlus su v Nothing)
|
2024-07-08 18:00:23 +00:00
|
|
|
:<|> servePutOffset st id
|
|
|
|
:<|> servePutOffset st id
|
|
|
|
:<|> servePutOffset st dePlus
|
|
|
|
:<|> serveLockContent st
|
|
|
|
:<|> serveLockContent st
|
|
|
|
:<|> serveLockContent st
|
|
|
|
:<|> serveLockContent st
|
|
|
|
:<|> serveKeepLocked st
|
|
|
|
:<|> serveKeepLocked st
|
|
|
|
:<|> serveKeepLocked st
|
|
|
|
:<|> serveKeepLocked st
|
|
|
|
:<|> serveGetGeneric st
|
2024-07-08 17:26:02 +00:00
|
|
|
|
2024-07-11 15:19:20 +00:00
|
|
|
type GetGenericAPI
|
|
|
|
= CaptureKey
|
2024-07-11 15:26:03 +00:00
|
|
|
:> IsSecure
|
|
|
|
:> AuthHeader
|
|
|
|
:> StreamGet NoFraming OctetStream
|
|
|
|
(Headers '[DataLengthHeader] (SourceIO B.ByteString))
|
2024-07-07 18:48:20 +00:00
|
|
|
|
2024-07-11 15:19:20 +00:00
|
|
|
serveGetGeneric
|
|
|
|
:: P2PHttpServerState
|
|
|
|
-> B64UUID ServerSide
|
|
|
|
-> B64Key
|
2024-07-11 15:26:03 +00:00
|
|
|
-> IsSecure
|
|
|
|
-> Maybe Auth
|
|
|
|
-> Handler (Headers '[DataLengthHeader] (S.SourceT IO B.ByteString))
|
|
|
|
serveGetGeneric st su@(B64UUID u) k =
|
|
|
|
-- Use V0 because it does not alter the returned data to indicate
|
|
|
|
-- InValid content.
|
|
|
|
serveGet st su V0 k cu [] Nothing Nothing
|
|
|
|
where
|
|
|
|
-- Reuse server UUID as client UUID.
|
|
|
|
cu = B64UUID u :: B64UUID ClientSide
|
2024-07-08 17:26:02 +00:00
|
|
|
|
|
|
|
type GetAPI
|
2024-07-11 15:19:20 +00:00
|
|
|
= CaptureKey
|
|
|
|
:> CU Required
|
2024-07-07 18:48:20 +00:00
|
|
|
:> BypassUUIDs
|
2024-07-07 16:59:12 +00:00
|
|
|
:> AssociatedFileParam
|
|
|
|
:> OffsetParam
|
2024-07-10 16:19:47 +00:00
|
|
|
:> IsSecure
|
2024-07-09 21:30:55 +00:00
|
|
|
:> AuthHeader
|
2024-07-07 16:59:12 +00:00
|
|
|
:> StreamGet NoFraming OctetStream
|
2024-07-08 17:26:02 +00:00
|
|
|
(Headers '[DataLengthHeader] (SourceIO B.ByteString))
|
2024-07-07 16:59:12 +00:00
|
|
|
|
2024-07-07 18:48:20 +00:00
|
|
|
serveGet
|
2024-07-09 14:12:36 +00:00
|
|
|
:: APIVersion v
|
|
|
|
=> P2PHttpServerState
|
2024-07-11 15:19:20 +00:00
|
|
|
-> B64UUID ServerSide
|
2024-07-09 14:12:36 +00:00
|
|
|
-> v
|
2024-07-08 18:00:23 +00:00
|
|
|
-> B64Key
|
2024-07-10 16:19:47 +00:00
|
|
|
-> B64UUID ClientSide
|
2024-07-07 18:48:20 +00:00
|
|
|
-> [B64UUID Bypass]
|
|
|
|
-> Maybe B64FilePath
|
|
|
|
-> Maybe Offset
|
2024-07-10 16:19:47 +00:00
|
|
|
-> IsSecure
|
2024-07-09 21:30:55 +00:00
|
|
|
-> Maybe Auth
|
2024-07-07 18:48:20 +00:00
|
|
|
-> Handler (Headers '[DataLengthHeader] (S.SourceT IO B.ByteString))
|
2024-07-11 15:19:20 +00:00
|
|
|
serveGet st su apiver (B64Key k) cu bypass baf startat sec auth = do
|
2024-07-11 13:55:17 +00:00
|
|
|
conn <- getP2PConnection apiver st cu su bypass sec auth ReadAction
|
2024-07-10 20:06:39 +00:00
|
|
|
bsv <- liftIO newEmptyTMVarIO
|
|
|
|
endv <- liftIO newEmptyTMVarIO
|
|
|
|
validityv <- liftIO newEmptyTMVarIO
|
2024-07-11 13:15:52 +00:00
|
|
|
finalv <- liftIO newEmptyTMVarIO
|
|
|
|
annexworker <- liftIO $ async $ inAnnexWorker st $ do
|
2024-07-11 11:46:52 +00:00
|
|
|
let storer _offset len = sendContentWith $ \bs -> do
|
|
|
|
liftIO $ atomically $ putTMVar bsv (len, bs)
|
2024-07-10 20:06:39 +00:00
|
|
|
liftIO $ atomically $ takeTMVar endv
|
2024-07-11 13:55:17 +00:00
|
|
|
liftIO $ signalFullyConsumedByteString $
|
|
|
|
connOhdl $ serverP2PConnection conn
|
2024-07-10 20:06:39 +00:00
|
|
|
return $ \v -> do
|
2024-07-11 11:46:52 +00:00
|
|
|
liftIO $ atomically $ putTMVar validityv v
|
2024-07-10 20:06:39 +00:00
|
|
|
return True
|
2024-07-11 13:15:52 +00:00
|
|
|
v <- enteringStage (TransferStage Upload) $
|
2024-07-11 13:55:17 +00:00
|
|
|
runFullProto (clientRunState conn) (clientP2PConnection conn) $
|
2024-07-10 20:06:39 +00:00
|
|
|
void $ receiveContent Nothing nullMeterUpdate
|
2024-07-10 16:19:47 +00:00
|
|
|
sizer storer getreq
|
2024-07-11 13:15:52 +00:00
|
|
|
return v
|
2024-07-11 13:55:17 +00:00
|
|
|
void $ liftIO $ forkIO $ waitfinal endv finalv conn annexworker
|
2024-07-11 11:46:52 +00:00
|
|
|
(Len len, bs) <- liftIO $ atomically $ takeTMVar bsv
|
2024-07-10 20:06:39 +00:00
|
|
|
bv <- liftIO $ newMVar (L.toChunks bs)
|
2024-07-11 15:42:32 +00:00
|
|
|
szv <- liftIO $ newMVar 0
|
2024-07-10 20:06:39 +00:00
|
|
|
let streamer = S.SourceT $ \s -> s =<< return
|
2024-07-11 15:42:32 +00:00
|
|
|
(stream (bv, szv, len, endv, validityv, finalv))
|
2024-07-11 11:46:52 +00:00
|
|
|
return $ addHeader len streamer
|
2024-07-10 16:19:47 +00:00
|
|
|
where
|
2024-07-11 15:42:32 +00:00
|
|
|
stream (bv, szv, len, endv, validityv, finalv) =
|
2024-07-11 11:46:52 +00:00
|
|
|
S.fromActionStep B.null $
|
2024-07-11 15:42:32 +00:00
|
|
|
modifyMVar bv $ nextchunk szv $
|
|
|
|
checkvalidity szv len endv validityv finalv
|
2024-07-10 20:06:39 +00:00
|
|
|
|
2024-07-11 15:42:32 +00:00
|
|
|
nextchunk szv atend (b:bs)
|
|
|
|
| not (B.null b) = do
|
|
|
|
modifyMVar szv $ \sz ->
|
|
|
|
let !sz' = sz + fromIntegral (B.length b)
|
|
|
|
in return (sz', ())
|
|
|
|
return (bs, b)
|
|
|
|
| otherwise = nextchunk szv atend bs
|
|
|
|
nextchunk _szv atend [] = do
|
2024-07-10 21:48:48 +00:00
|
|
|
endbit <- atend
|
|
|
|
return ([], endbit)
|
|
|
|
|
2024-07-11 15:42:32 +00:00
|
|
|
checkvalidity szv len endv validityv finalv =
|
2024-07-10 20:06:39 +00:00
|
|
|
ifM (atomically $ isEmptyTMVar endv)
|
2024-07-10 21:48:48 +00:00
|
|
|
( do
|
2024-07-10 20:06:39 +00:00
|
|
|
atomically $ putTMVar endv ()
|
|
|
|
validity <- atomically $ takeTMVar validityv
|
2024-07-11 15:42:32 +00:00
|
|
|
sz <- takeMVar szv
|
2024-07-11 13:15:52 +00:00
|
|
|
atomically $ putTMVar finalv ()
|
2024-07-10 20:06:39 +00:00
|
|
|
-- When the key's content is invalid,
|
|
|
|
-- indicate that to the client by padding
|
2024-07-11 15:42:32 +00:00
|
|
|
-- the response if necessary, so it is not
|
|
|
|
-- the same length indicated by the
|
|
|
|
-- DataLengthHeader.
|
2024-07-10 20:06:39 +00:00
|
|
|
return $ case validity of
|
|
|
|
Nothing -> mempty
|
|
|
|
Just Valid -> mempty
|
2024-07-11 15:42:32 +00:00
|
|
|
Just Invalid
|
|
|
|
| sz == len -> "X"
|
|
|
|
| otherwise -> mempty
|
2024-07-10 21:48:48 +00:00
|
|
|
, pure mempty
|
2024-07-10 20:06:39 +00:00
|
|
|
)
|
2024-07-11 13:15:52 +00:00
|
|
|
|
2024-07-11 13:55:17 +00:00
|
|
|
waitfinal endv finalv conn annexworker = do
|
2024-07-11 13:15:52 +00:00
|
|
|
-- Wait for everything to be transferred before
|
|
|
|
-- stopping the annexworker. The validityv will usually
|
|
|
|
-- be written to at the end. If the client disconnects
|
|
|
|
-- early that does not happen, so catch STM exception.
|
|
|
|
liftIO $ void $ tryNonAsync $ atomically $ takeTMVar finalv
|
|
|
|
-- Make sure the annexworker is not left blocked on endv
|
|
|
|
-- if the client disconnected early.
|
2024-07-11 15:26:03 +00:00
|
|
|
void $ liftIO $ atomically $ tryPutTMVar endv ()
|
2024-07-11 13:55:17 +00:00
|
|
|
void $ void $ tryNonAsync $ wait annexworker
|
|
|
|
void $ tryNonAsync $ releaseP2PConnection conn
|
2024-07-11 13:15:52 +00:00
|
|
|
|
2024-07-10 20:06:39 +00:00
|
|
|
sizer = pure $ Len $ case startat of
|
2024-07-10 16:19:47 +00:00
|
|
|
Just (Offset o) -> fromIntegral o
|
|
|
|
Nothing -> 0
|
|
|
|
|
|
|
|
getreq offset = P2P.Protocol.GET offset (ProtoAssociatedFile af) k
|
|
|
|
|
|
|
|
af = AssociatedFile $ case baf of
|
|
|
|
Just (B64FilePath f) -> Just f
|
|
|
|
Nothing -> Nothing
|
2024-07-07 18:48:20 +00:00
|
|
|
|
2024-07-07 20:08:05 +00:00
|
|
|
clientGet
|
2024-07-10 20:06:39 +00:00
|
|
|
:: ClientEnv
|
|
|
|
-> ProtocolVersion
|
|
|
|
-> B64Key
|
|
|
|
-> B64UUID ServerSide
|
2024-07-11 15:19:20 +00:00
|
|
|
-> B64UUID ClientSide
|
2024-07-10 20:06:39 +00:00
|
|
|
-> [B64UUID Bypass]
|
|
|
|
-> Maybe B64FilePath
|
|
|
|
-> Maybe Offset
|
|
|
|
-> Maybe Auth
|
|
|
|
-> IO ()
|
2024-07-11 15:19:20 +00:00
|
|
|
clientGet clientenv ver k su cu bypass af o auth =
|
|
|
|
withClientM (clientGet' su ver k cu bypass af o auth) clientenv $ \case
|
2024-07-10 20:06:39 +00:00
|
|
|
Left err -> throwM err
|
|
|
|
Right respheaders -> do
|
|
|
|
let dl = case lookupResponseHeader @DataLengthHeader' respheaders of
|
|
|
|
Header h -> h
|
|
|
|
_ -> error "missing data length header"
|
|
|
|
liftIO $ print ("datalength", dl :: Integer)
|
2024-07-11 11:56:55 +00:00
|
|
|
b <- S.unSourceT (getResponse respheaders) gatherByteString
|
2024-07-10 20:06:39 +00:00
|
|
|
liftIO $ print "got it all, writing to file 'got'"
|
|
|
|
L.writeFile "got" b
|
|
|
|
|
2024-07-11 11:56:55 +00:00
|
|
|
gatherByteString :: S.StepT IO B.ByteString -> IO L.ByteString
|
|
|
|
gatherByteString = unsafeInterleaveIO . go
|
2024-07-10 20:06:39 +00:00
|
|
|
where
|
2024-07-11 11:56:55 +00:00
|
|
|
go S.Stop = return LI.Empty
|
|
|
|
go (S.Error err) = giveup err
|
|
|
|
go (S.Skip s) = go s
|
|
|
|
go (S.Effect ms) = ms >>= go
|
|
|
|
go (S.Yield v s) = LI.Chunk v <$> unsafeInterleaveIO (go s)
|
2024-07-10 20:06:39 +00:00
|
|
|
|
|
|
|
clientGet'
|
2024-07-11 15:19:20 +00:00
|
|
|
:: B64UUID ServerSide
|
|
|
|
-> ProtocolVersion
|
2024-07-07 20:08:05 +00:00
|
|
|
-> B64Key
|
2024-07-10 16:19:47 +00:00
|
|
|
-> B64UUID ClientSide
|
2024-07-07 20:08:05 +00:00
|
|
|
-> [B64UUID Bypass]
|
|
|
|
-> Maybe B64FilePath
|
|
|
|
-> Maybe Offset
|
2024-07-09 21:30:55 +00:00
|
|
|
-> Maybe Auth
|
2024-07-07 20:08:05 +00:00
|
|
|
-> ClientM (Headers '[DataLengthHeader] (S.SourceT IO B.ByteString))
|
2024-07-11 15:19:20 +00:00
|
|
|
clientGet' su (ProtocolVersion ver) = case ver of
|
|
|
|
3 -> v3 su V3
|
|
|
|
2 -> v2 su V2
|
|
|
|
1 -> v1 su V1
|
|
|
|
0 -> v0 su V0
|
2024-07-07 20:08:05 +00:00
|
|
|
_ -> error "unsupported protocol version"
|
|
|
|
where
|
2024-07-08 17:26:02 +00:00
|
|
|
v3 :<|> v2 :<|> v1 :<|> v0 :<|> _ = client p2pHttpAPI
|
2024-07-07 20:08:05 +00:00
|
|
|
|
2024-07-07 16:59:12 +00:00
|
|
|
type CheckPresentAPI
|
|
|
|
= KeyParam
|
2024-07-11 15:19:20 +00:00
|
|
|
:> CU Required
|
2024-07-07 18:48:20 +00:00
|
|
|
:> BypassUUIDs
|
2024-07-10 03:44:40 +00:00
|
|
|
:> IsSecure
|
2024-07-10 00:52:56 +00:00
|
|
|
:> AuthHeader
|
2024-07-07 16:59:12 +00:00
|
|
|
:> Post '[JSON] CheckPresentResult
|
2024-07-07 18:48:20 +00:00
|
|
|
|
|
|
|
serveCheckPresent
|
2024-07-09 14:12:36 +00:00
|
|
|
:: APIVersion v
|
|
|
|
=> P2PHttpServerState
|
2024-07-11 15:19:20 +00:00
|
|
|
-> B64UUID ServerSide
|
2024-07-09 14:12:36 +00:00
|
|
|
-> v
|
2024-07-08 18:00:23 +00:00
|
|
|
-> B64Key
|
2024-07-07 18:48:20 +00:00
|
|
|
-> B64UUID ClientSide
|
|
|
|
-> [B64UUID Bypass]
|
2024-07-10 03:44:40 +00:00
|
|
|
-> IsSecure
|
2024-07-10 00:52:56 +00:00
|
|
|
-> Maybe Auth
|
2024-07-07 18:48:20 +00:00
|
|
|
-> Handler CheckPresentResult
|
2024-07-11 15:19:20 +00:00
|
|
|
serveCheckPresent st su apiver (B64Key k) cu bypass sec auth = do
|
2024-07-10 03:44:40 +00:00
|
|
|
res <- withP2PConnection apiver st cu su bypass sec auth ReadAction
|
2024-07-11 13:55:17 +00:00
|
|
|
$ \conn -> liftIO $ proxyClientNetProto conn $ checkPresent k
|
2024-07-09 13:08:42 +00:00
|
|
|
case res of
|
2024-07-10 13:13:01 +00:00
|
|
|
Right b -> return (CheckPresentResult b)
|
|
|
|
Left err -> throwError $ err500 { errBody = encodeBL err }
|
2024-07-07 18:48:20 +00:00
|
|
|
|
2024-07-09 17:37:55 +00:00
|
|
|
clientCheckPresent
|
|
|
|
:: ClientEnv
|
|
|
|
-> ProtocolVersion
|
|
|
|
-> B64Key
|
|
|
|
-> B64UUID ClientSide
|
|
|
|
-> B64UUID ServerSide
|
|
|
|
-> [B64UUID Bypass]
|
2024-07-10 00:52:56 +00:00
|
|
|
-> Maybe Auth
|
2024-07-09 17:37:55 +00:00
|
|
|
-> IO Bool
|
2024-07-10 03:44:40 +00:00
|
|
|
clientCheckPresent clientenv (ProtocolVersion ver) key cu su bypass auth =
|
2024-07-11 15:19:20 +00:00
|
|
|
withClientM (cli su key cu bypass auth) clientenv $ \case
|
2024-07-09 17:37:55 +00:00
|
|
|
Left err -> throwM err
|
|
|
|
Right (CheckPresentResult res) -> return res
|
2024-07-10 03:44:40 +00:00
|
|
|
where
|
|
|
|
cli = case ver of
|
2024-07-11 15:19:20 +00:00
|
|
|
3 -> flip v3 V3
|
|
|
|
2 -> flip v2 V2
|
|
|
|
1 -> flip v1 V1
|
|
|
|
0 -> flip v0 V0
|
2024-07-10 03:44:40 +00:00
|
|
|
_ -> error "unsupported protocol version"
|
|
|
|
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
v3 :<|> v2 :<|> v1 :<|> v0 :<|> _ = client p2pHttpAPI
|
2024-07-09 17:37:55 +00:00
|
|
|
|
2024-07-07 16:59:12 +00:00
|
|
|
type RemoveAPI result
|
|
|
|
= KeyParam
|
2024-07-11 15:19:20 +00:00
|
|
|
:> CU Required
|
2024-07-07 18:48:20 +00:00
|
|
|
:> BypassUUIDs
|
2024-07-10 13:13:01 +00:00
|
|
|
:> IsSecure
|
2024-07-10 00:52:56 +00:00
|
|
|
:> AuthHeader
|
2024-07-07 16:59:12 +00:00
|
|
|
:> Post '[JSON] result
|
2024-07-07 18:48:20 +00:00
|
|
|
|
|
|
|
serveRemove
|
2024-07-09 14:12:36 +00:00
|
|
|
:: APIVersion v
|
|
|
|
=> P2PHttpServerState
|
2024-07-08 18:00:23 +00:00
|
|
|
-> (RemoveResultPlus -> t)
|
2024-07-11 15:19:20 +00:00
|
|
|
-> B64UUID ServerSide
|
2024-07-09 14:12:36 +00:00
|
|
|
-> v
|
2024-07-07 18:48:20 +00:00
|
|
|
-> B64Key
|
|
|
|
-> B64UUID ClientSide
|
|
|
|
-> [B64UUID Bypass]
|
2024-07-10 13:13:01 +00:00
|
|
|
-> IsSecure
|
2024-07-10 00:52:56 +00:00
|
|
|
-> Maybe Auth
|
2024-07-07 18:48:20 +00:00
|
|
|
-> Handler t
|
2024-07-11 15:19:20 +00:00
|
|
|
serveRemove st resultmangle su apiver (B64Key k) cu bypass sec auth = do
|
2024-07-10 13:13:01 +00:00
|
|
|
res <- withP2PConnection apiver st cu su bypass sec auth RemoveAction
|
2024-07-11 13:55:17 +00:00
|
|
|
$ \conn ->
|
|
|
|
liftIO $ proxyClientNetProto conn $ remove Nothing k
|
2024-07-10 13:13:01 +00:00
|
|
|
case res of
|
2024-07-10 14:03:26 +00:00
|
|
|
(Right b, plusuuids) -> return $ resultmangle $
|
|
|
|
RemoveResultPlus b (map B64UUID (fromMaybe [] plusuuids))
|
2024-07-10 13:13:01 +00:00
|
|
|
(Left err, _) -> throwError $
|
|
|
|
err500 { errBody = encodeBL err }
|
2024-07-07 20:08:05 +00:00
|
|
|
|
|
|
|
clientRemove
|
2024-07-10 13:19:58 +00:00
|
|
|
:: ClientEnv
|
|
|
|
-> ProtocolVersion
|
2024-07-07 20:08:05 +00:00
|
|
|
-> B64Key
|
|
|
|
-> B64UUID ClientSide
|
|
|
|
-> B64UUID ServerSide
|
|
|
|
-> [B64UUID Bypass]
|
2024-07-10 00:52:56 +00:00
|
|
|
-> Maybe Auth
|
2024-07-10 13:19:58 +00:00
|
|
|
-> IO RemoveResultPlus
|
|
|
|
clientRemove clientenv (ProtocolVersion ver) key cu su bypass auth =
|
|
|
|
withClientM cli clientenv $ \case
|
|
|
|
Left err -> throwM err
|
|
|
|
Right res -> return res
|
2024-07-07 20:08:05 +00:00
|
|
|
where
|
2024-07-10 13:19:58 +00:00
|
|
|
cli = case ver of
|
2024-07-11 15:19:20 +00:00
|
|
|
3 -> v3 su V3 key cu bypass auth
|
|
|
|
2 -> v2 su V2 key cu bypass auth
|
|
|
|
1 -> plus <$> v1 su V1 key cu bypass auth
|
|
|
|
0 -> plus <$> v0 su V0 key cu bypass auth
|
2024-07-10 13:19:58 +00:00
|
|
|
_ -> error "unsupported protocol version"
|
|
|
|
|
2024-07-07 20:08:05 +00:00
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
v3 :<|> v2 :<|> v1 :<|> v0 :<|> _ = client p2pHttpAPI
|
2024-07-10 13:13:01 +00:00
|
|
|
|
2024-07-07 16:59:12 +00:00
|
|
|
type RemoveBeforeAPI
|
|
|
|
= KeyParam
|
2024-07-11 15:19:20 +00:00
|
|
|
:> CU Required
|
2024-07-07 18:48:20 +00:00
|
|
|
:> BypassUUIDs
|
|
|
|
:> QueryParam' '[Required] "timestamp" Timestamp
|
2024-07-10 14:03:26 +00:00
|
|
|
:> IsSecure
|
|
|
|
:> AuthHeader
|
|
|
|
:> Post '[JSON] RemoveResultPlus
|
2024-07-07 18:48:20 +00:00
|
|
|
|
|
|
|
serveRemoveBefore
|
2024-07-09 14:12:36 +00:00
|
|
|
:: APIVersion v
|
|
|
|
=> P2PHttpServerState
|
2024-07-11 15:19:20 +00:00
|
|
|
-> B64UUID ServerSide
|
2024-07-09 14:12:36 +00:00
|
|
|
-> v
|
2024-07-08 18:00:23 +00:00
|
|
|
-> B64Key
|
2024-07-07 18:48:20 +00:00
|
|
|
-> B64UUID ClientSide
|
|
|
|
-> [B64UUID Bypass]
|
|
|
|
-> Timestamp
|
2024-07-10 14:03:26 +00:00
|
|
|
-> IsSecure
|
|
|
|
-> Maybe Auth
|
|
|
|
-> Handler RemoveResultPlus
|
2024-07-11 15:19:20 +00:00
|
|
|
serveRemoveBefore st su apiver (B64Key k) cu bypass (Timestamp ts) sec auth = do
|
2024-07-10 14:03:26 +00:00
|
|
|
res <- withP2PConnection apiver st cu su bypass sec auth RemoveAction
|
2024-07-11 13:55:17 +00:00
|
|
|
$ \conn ->
|
|
|
|
liftIO $ proxyClientNetProto conn $
|
2024-07-10 14:03:26 +00:00
|
|
|
removeBeforeRemoteEndTime ts k
|
|
|
|
case res of
|
|
|
|
(Right b, plusuuids) -> return $
|
|
|
|
RemoveResultPlus b (map B64UUID (fromMaybe [] plusuuids))
|
|
|
|
(Left err, _) -> throwError $
|
|
|
|
err500 { errBody = encodeBL err }
|
2024-07-07 18:48:20 +00:00
|
|
|
|
2024-07-07 20:08:05 +00:00
|
|
|
clientRemoveBefore
|
2024-07-10 14:03:26 +00:00
|
|
|
:: ClientEnv
|
|
|
|
-> ProtocolVersion
|
2024-07-07 20:08:05 +00:00
|
|
|
-> B64Key
|
|
|
|
-> B64UUID ClientSide
|
|
|
|
-> B64UUID ServerSide
|
|
|
|
-> [B64UUID Bypass]
|
|
|
|
-> Timestamp
|
2024-07-10 14:03:26 +00:00
|
|
|
-> Maybe Auth
|
|
|
|
-> IO RemoveResultPlus
|
|
|
|
clientRemoveBefore clientenv (ProtocolVersion ver) key cu su bypass ts auth =
|
2024-07-11 15:19:20 +00:00
|
|
|
withClientM (cli su key cu bypass ts auth) clientenv $ \case
|
2024-07-10 14:03:26 +00:00
|
|
|
Left err -> throwM err
|
|
|
|
Right res -> return res
|
2024-07-07 20:08:05 +00:00
|
|
|
where
|
2024-07-10 14:03:26 +00:00
|
|
|
cli = case ver of
|
2024-07-11 15:19:20 +00:00
|
|
|
3 -> flip v3 V3
|
2024-07-10 14:03:26 +00:00
|
|
|
_ -> error "unsupported protocol version"
|
|
|
|
|
2024-07-07 20:08:05 +00:00
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
v3 :<|> _ = client p2pHttpAPI
|
|
|
|
|
2024-07-07 16:59:12 +00:00
|
|
|
type GetTimestampAPI
|
2024-07-11 15:19:20 +00:00
|
|
|
= CU Required
|
2024-07-07 18:48:20 +00:00
|
|
|
:> BypassUUIDs
|
2024-07-10 14:23:10 +00:00
|
|
|
:> IsSecure
|
|
|
|
:> AuthHeader
|
2024-07-07 16:59:12 +00:00
|
|
|
:> Post '[JSON] GetTimestampResult
|
2024-07-07 18:48:20 +00:00
|
|
|
|
|
|
|
serveGetTimestamp
|
2024-07-09 14:12:36 +00:00
|
|
|
:: APIVersion v
|
|
|
|
=> P2PHttpServerState
|
2024-07-11 15:19:20 +00:00
|
|
|
-> B64UUID ServerSide
|
2024-07-09 14:12:36 +00:00
|
|
|
-> v
|
2024-07-08 18:00:23 +00:00
|
|
|
-> B64UUID ClientSide
|
2024-07-07 18:48:20 +00:00
|
|
|
-> [B64UUID Bypass]
|
2024-07-10 14:23:10 +00:00
|
|
|
-> IsSecure
|
|
|
|
-> Maybe Auth
|
2024-07-07 18:48:20 +00:00
|
|
|
-> Handler GetTimestampResult
|
2024-07-11 15:19:20 +00:00
|
|
|
serveGetTimestamp st su apiver cu bypass sec auth = do
|
2024-07-10 14:23:10 +00:00
|
|
|
res <- withP2PConnection apiver st cu su bypass sec auth ReadAction
|
2024-07-11 13:55:17 +00:00
|
|
|
$ \conn ->
|
|
|
|
liftIO $ proxyClientNetProto conn getTimestamp
|
2024-07-10 14:23:10 +00:00
|
|
|
case res of
|
|
|
|
Right ts -> return $ GetTimestampResult (Timestamp ts)
|
|
|
|
Left err -> throwError $
|
|
|
|
err500 { errBody = encodeBL err }
|
2024-07-07 18:48:20 +00:00
|
|
|
|
2024-07-07 20:08:05 +00:00
|
|
|
clientGetTimestamp
|
2024-07-10 14:23:10 +00:00
|
|
|
:: ClientEnv
|
|
|
|
-> ProtocolVersion
|
2024-07-07 20:08:05 +00:00
|
|
|
-> B64UUID ClientSide
|
|
|
|
-> B64UUID ServerSide
|
|
|
|
-> [B64UUID Bypass]
|
2024-07-10 14:23:10 +00:00
|
|
|
-> Maybe Auth
|
|
|
|
-> IO GetTimestampResult
|
|
|
|
clientGetTimestamp clientenv (ProtocolVersion ver) cu su bypass auth =
|
2024-07-11 15:19:20 +00:00
|
|
|
withClientM (cli su cu bypass auth) clientenv $ \case
|
2024-07-10 14:23:10 +00:00
|
|
|
Left err -> throwM err
|
|
|
|
Right res -> return res
|
2024-07-07 20:08:05 +00:00
|
|
|
where
|
2024-07-10 14:23:10 +00:00
|
|
|
cli = case ver of
|
2024-07-11 15:19:20 +00:00
|
|
|
3 -> flip v3 V3
|
2024-07-10 14:23:10 +00:00
|
|
|
_ -> error "unsupported protocol version"
|
|
|
|
|
2024-07-07 20:08:05 +00:00
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|>
|
|
|
|
v3 :<|> _ = client p2pHttpAPI
|
|
|
|
|
2024-07-07 16:59:12 +00:00
|
|
|
type PutAPI result
|
|
|
|
= KeyParam
|
2024-07-11 15:19:20 +00:00
|
|
|
:> CU Required
|
2024-07-07 18:48:20 +00:00
|
|
|
:> BypassUUIDs
|
2024-07-07 16:59:12 +00:00
|
|
|
:> AssociatedFileParam
|
|
|
|
:> OffsetParam
|
|
|
|
:> Header' '[Required] "X-git-annex-data-length" DataLength
|
|
|
|
:> StreamBody NoFraming OctetStream (SourceIO B.ByteString)
|
|
|
|
:> Post '[JSON] result
|
|
|
|
|
2024-07-07 18:48:20 +00:00
|
|
|
servePut
|
2024-07-09 14:12:36 +00:00
|
|
|
:: APIVersion v
|
|
|
|
=> P2PHttpServerState
|
2024-07-08 18:00:23 +00:00
|
|
|
-> (PutResultPlus -> t)
|
2024-07-11 15:19:20 +00:00
|
|
|
-> B64UUID ServerSide
|
2024-07-09 14:12:36 +00:00
|
|
|
-> v
|
2024-07-07 18:48:20 +00:00
|
|
|
-> Maybe Integer
|
|
|
|
-> B64Key
|
|
|
|
-> B64UUID ClientSide
|
|
|
|
-> [B64UUID Bypass]
|
|
|
|
-> Maybe B64FilePath
|
|
|
|
-> Maybe Offset
|
|
|
|
-> DataLength
|
|
|
|
-> S.SourceT IO B.ByteString
|
|
|
|
-> Handler t
|
2024-07-10 14:03:26 +00:00
|
|
|
servePut = undefined -- TODO
|
2024-07-07 18:48:20 +00:00
|
|
|
|
2024-07-07 20:08:05 +00:00
|
|
|
clientPut
|
2024-07-09 17:37:55 +00:00
|
|
|
:: ProtocolVersion
|
2024-07-07 20:08:05 +00:00
|
|
|
-> Maybe Integer
|
|
|
|
-> B64Key
|
2024-07-07 18:48:20 +00:00
|
|
|
-> B64UUID ClientSide
|
|
|
|
-> B64UUID ServerSide
|
|
|
|
-> [B64UUID Bypass]
|
|
|
|
-> Maybe B64FilePath
|
|
|
|
-> Maybe Offset
|
|
|
|
-> DataLength
|
|
|
|
-> S.SourceT IO B.ByteString
|
2024-07-07 20:08:05 +00:00
|
|
|
-> ClientM PutResultPlus
|
2024-07-09 17:37:55 +00:00
|
|
|
clientPut (ProtocolVersion ver) sz k cu su bypass af o l src = case ver of
|
2024-07-11 15:19:20 +00:00
|
|
|
3 -> v3 su V3 sz k cu bypass af o l src
|
|
|
|
2 -> v2 su V2 sz k cu bypass af o l src
|
|
|
|
1 -> plus <$> v1 su V1 sz k cu bypass af o l src
|
|
|
|
0 -> plus <$> v0 su V0 k cu bypass af o l src
|
2024-07-07 20:08:05 +00:00
|
|
|
_ -> error "unsupported protocol version"
|
|
|
|
where
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|>
|
|
|
|
_ :<|>
|
|
|
|
v3 :<|> v2 :<|> v1 :<|> v0 :<|> _ = client p2pHttpAPI
|
2024-07-07 18:48:20 +00:00
|
|
|
|
2024-07-07 16:59:12 +00:00
|
|
|
type PutOffsetAPI result
|
|
|
|
= KeyParam
|
2024-07-11 15:19:20 +00:00
|
|
|
:> CU Required
|
2024-07-07 18:48:20 +00:00
|
|
|
:> BypassUUIDs
|
2024-07-07 16:59:12 +00:00
|
|
|
:> Post '[JSON] result
|
|
|
|
|
2024-07-07 18:48:20 +00:00
|
|
|
servePutOffset
|
2024-07-09 14:12:36 +00:00
|
|
|
:: APIVersion v
|
|
|
|
=> P2PHttpServerState
|
2024-07-08 18:00:23 +00:00
|
|
|
-> (PutOffsetResultPlus -> t)
|
2024-07-11 15:19:20 +00:00
|
|
|
-> B64UUID ServerSide
|
2024-07-09 14:12:36 +00:00
|
|
|
-> v
|
2024-07-07 18:48:20 +00:00
|
|
|
-> B64Key
|
|
|
|
-> B64UUID ClientSide
|
|
|
|
-> [B64UUID Bypass]
|
|
|
|
-> Handler t
|
2024-07-10 14:03:26 +00:00
|
|
|
servePutOffset = undefined -- TODO
|
2024-07-07 18:48:20 +00:00
|
|
|
|
2024-07-07 20:08:05 +00:00
|
|
|
clientPutOffset
|
2024-07-11 15:19:20 +00:00
|
|
|
:: B64UUID ServerSide
|
|
|
|
-> ProtocolVersion
|
2024-07-07 20:08:05 +00:00
|
|
|
-> B64Key
|
|
|
|
-> B64UUID ClientSide
|
|
|
|
-> [B64UUID Bypass]
|
|
|
|
-> ClientM PutOffsetResultPlus
|
2024-07-11 15:19:20 +00:00
|
|
|
clientPutOffset su (ProtocolVersion ver) = case ver of
|
|
|
|
3 -> v3 su V3
|
|
|
|
2 -> v2 su V2
|
2024-07-07 20:08:05 +00:00
|
|
|
_ -> error "unsupported protocol version"
|
|
|
|
where
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|>
|
|
|
|
_ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
v3 :<|> v2 :<|> _ = client p2pHttpAPI
|
|
|
|
|
2024-07-08 01:20:50 +00:00
|
|
|
type LockContentAPI
|
|
|
|
= KeyParam
|
2024-07-11 15:19:20 +00:00
|
|
|
:> CU Required
|
2024-07-08 01:20:50 +00:00
|
|
|
:> BypassUUIDs
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
:> Post '[JSON] LockResult
|
2024-07-07 20:08:05 +00:00
|
|
|
|
2024-07-08 01:20:50 +00:00
|
|
|
serveLockContent
|
2024-07-09 14:12:36 +00:00
|
|
|
:: APIVersion v
|
|
|
|
=> P2PHttpServerState
|
2024-07-11 15:19:20 +00:00
|
|
|
-> B64UUID ServerSide
|
2024-07-09 14:12:36 +00:00
|
|
|
-> v
|
2024-07-08 18:00:23 +00:00
|
|
|
-> B64Key
|
2024-07-08 01:20:50 +00:00
|
|
|
-> B64UUID ClientSide
|
|
|
|
-> [B64UUID Bypass]
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
-> Handler LockResult
|
2024-07-10 14:03:26 +00:00
|
|
|
serveLockContent = undefined -- TODO
|
2024-07-07 18:48:20 +00:00
|
|
|
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
clientLockContent
|
2024-07-11 15:19:20 +00:00
|
|
|
:: B64UUID ServerSide
|
|
|
|
-> ProtocolVersion
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
-> B64Key
|
|
|
|
-> B64UUID ClientSide
|
|
|
|
-> [B64UUID Bypass]
|
|
|
|
-> ClientM LockResult
|
2024-07-11 15:19:20 +00:00
|
|
|
clientLockContent su (ProtocolVersion ver) = case ver of
|
|
|
|
3 -> v3 su V3
|
|
|
|
2 -> v2 su V2
|
|
|
|
1 -> v1 su V1
|
|
|
|
0 -> v0 su V0
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
_ -> error "unsupported protocol version"
|
|
|
|
where
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|>
|
|
|
|
_ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|>
|
|
|
|
v3 :<|> v2 :<|> v1 :<|> v0 :<|> _ = client p2pHttpAPI
|
2024-07-07 18:48:20 +00:00
|
|
|
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
type KeepLockedAPI
|
2024-07-09 00:18:55 +00:00
|
|
|
= LockIDParam
|
2024-07-11 15:19:20 +00:00
|
|
|
:> CU Required
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
:> BypassUUIDs
|
|
|
|
:> Header "Connection" ConnectionKeepAlive
|
|
|
|
:> Header "Keep-Alive" KeepAlive
|
|
|
|
:> StreamBody NewlineFraming JSON (SourceIO UnlockRequest)
|
|
|
|
:> Post '[JSON] LockResult
|
2024-07-07 18:48:20 +00:00
|
|
|
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
serveKeepLocked
|
2024-07-09 14:12:36 +00:00
|
|
|
:: APIVersion v
|
|
|
|
=> P2PHttpServerState
|
2024-07-11 15:19:20 +00:00
|
|
|
-> B64UUID ServerSide
|
2024-07-09 14:12:36 +00:00
|
|
|
-> v
|
2024-07-09 00:18:55 +00:00
|
|
|
-> LockID
|
2024-07-08 01:20:50 +00:00
|
|
|
-> B64UUID ClientSide
|
|
|
|
-> [B64UUID Bypass]
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
-> Maybe ConnectionKeepAlive
|
|
|
|
-> Maybe KeepAlive
|
|
|
|
-> S.SourceT IO UnlockRequest
|
|
|
|
-> Handler LockResult
|
2024-07-11 15:19:20 +00:00
|
|
|
serveKeepLocked st su apiver lckid cu _ _ _ unlockrequeststream = do
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
_ <- liftIO $ S.unSourceT unlockrequeststream go
|
2024-07-09 00:18:55 +00:00
|
|
|
return (LockResult False Nothing)
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
where
|
2024-07-08 18:20:30 +00:00
|
|
|
go S.Stop = dropLock lckid st
|
|
|
|
go (S.Error _err) = dropLock lckid st
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
go (S.Skip s) = go s
|
|
|
|
go (S.Effect ms) = ms >>= go
|
|
|
|
go (S.Yield (UnlockRequest False) s) = go s
|
2024-07-08 18:20:30 +00:00
|
|
|
go (S.Yield (UnlockRequest True) _) = dropLock lckid st
|
|
|
|
|
2024-07-09 17:37:55 +00:00
|
|
|
clientKeepLocked'
|
2024-07-11 15:19:20 +00:00
|
|
|
:: B64UUID ServerSide
|
|
|
|
-> ProtocolVersion
|
2024-07-09 00:18:55 +00:00
|
|
|
-> LockID
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
-> B64UUID ClientSide
|
|
|
|
-> [B64UUID Bypass]
|
|
|
|
-> Maybe ConnectionKeepAlive
|
|
|
|
-> Maybe KeepAlive
|
|
|
|
-> S.SourceT IO UnlockRequest
|
|
|
|
-> ClientM LockResult
|
2024-07-11 15:19:20 +00:00
|
|
|
clientKeepLocked' su (ProtocolVersion ver) = case ver of
|
|
|
|
3 -> v3 su V3
|
|
|
|
2 -> v2 su V2
|
|
|
|
1 -> v1 su V1
|
|
|
|
0 -> v0 su V0
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
_ -> error "unsupported protocol version"
|
2024-07-08 01:20:50 +00:00
|
|
|
where
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|>
|
|
|
|
_ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
_ :<|> _ :<|> _ :<|>
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
_ :<|> _ :<|> _ :<|> _ :<|>
|
|
|
|
v3 :<|> v2 :<|> v1 :<|> v0 :<|> _ = client p2pHttpAPI
|
2024-07-08 01:20:50 +00:00
|
|
|
|
2024-07-09 17:37:55 +00:00
|
|
|
clientKeepLocked
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
:: ClientEnv
|
2024-07-09 17:37:55 +00:00
|
|
|
-> ProtocolVersion
|
2024-07-09 00:18:55 +00:00
|
|
|
-> LockID
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
-> B64UUID ClientSide
|
|
|
|
-> B64UUID ServerSide
|
|
|
|
-> [B64UUID Bypass]
|
|
|
|
-> TMVar Bool
|
|
|
|
-> IO ()
|
2024-07-09 17:37:55 +00:00
|
|
|
clientKeepLocked clientenv protover lckid cu su bypass keeplocked = do
|
2024-07-11 15:19:20 +00:00
|
|
|
let cli = clientKeepLocked' su protover lckid cu bypass
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
(Just connectionKeepAlive) (Just keepAlive)
|
|
|
|
(S.fromStepT unlocksender)
|
|
|
|
withClientM cli clientenv $ \case
|
|
|
|
Left err -> throwM err
|
2024-07-09 00:18:55 +00:00
|
|
|
Right (LockResult _ _) ->
|
convert lockcontent api to http long polling
Websockets would work, but the problem with using them for this is that
each lockcontent call is a separate websocket connection. And that's an
actual TCP connection. One TCP connection per file dropped would be too
expensive. With http long polling, regular http pipelining can be used,
so it will reuse a TCP connection.
Unfortunately, at least with servant, bi-directional streams with long
polling don't result in true bidirectional full duplex communication.
Servant processes the whole client body stream before generating the server
body stream. I think it's entirely possible to do full bi-directional
communication over http, but it would need changes to servant.
And, there's no way for the client to tell if the server successfully
locked the content, since the server will keep processing the client
stream no matter what.:
So, added a new api endpoint, keeplocked. lockcontent will lock the key
for 10 minutes with retention lock, and then a call to keeplocked will
keep it locked for as long as needed. This does mean that there will
need to be a Map of locks by key, and I will probably want to add
some kind of lock identifier that lockcontent returns.
2024-07-08 14:40:38 +00:00
|
|
|
liftIO $ print "end of lock connection to server"
|
|
|
|
where
|
|
|
|
unlocksender =
|
|
|
|
S.Yield (UnlockRequest False) $ S.Effect $ do
|
|
|
|
liftIO $ print "sent keep locked request"
|
|
|
|
return $ S.Effect $ do
|
|
|
|
stilllock <- liftIO $ atomically $ takeTMVar keeplocked
|
|
|
|
if stilllock
|
|
|
|
then return unlocksender
|
|
|
|
else do
|
|
|
|
liftIO $ print "sending unlock request"
|
|
|
|
return $ S.Yield (UnlockRequest True) S.Stop
|
|
|
|
|
2024-07-09 14:12:36 +00:00
|
|
|
type PV3 = Capture "v3" V3
|
|
|
|
|
|
|
|
type PV2 = Capture "v2" V2
|
|
|
|
|
|
|
|
type PV1 = Capture "v1" V1
|
|
|
|
|
|
|
|
type PV0 = Capture "v0" V0
|
|
|
|
|
2024-07-11 15:19:20 +00:00
|
|
|
type SU = Capture "serveruuid" (B64UUID ServerSide)
|
2024-07-07 18:48:20 +00:00
|
|
|
|
2024-07-11 15:19:20 +00:00
|
|
|
type CU req = QueryParam' '[req] "clientuuid" (B64UUID ClientSide)
|
2024-07-08 01:20:50 +00:00
|
|
|
|
|
|
|
type BypassUUIDs = QueryParams "bypass" (B64UUID Bypass)
|
2024-07-07 16:59:12 +00:00
|
|
|
|
|
|
|
type CaptureKey = Capture "key" B64Key
|
|
|
|
|
2024-07-07 18:48:20 +00:00
|
|
|
type KeyParam = QueryParam' '[Required] "key" B64Key
|
2024-07-07 16:59:12 +00:00
|
|
|
|
|
|
|
type AssociatedFileParam = QueryParam "associatedfile" B64FilePath
|
|
|
|
|
2024-07-07 18:48:20 +00:00
|
|
|
type OffsetParam = QueryParam "offset" Offset
|
2024-07-07 16:59:12 +00:00
|
|
|
|
2024-07-10 20:06:39 +00:00
|
|
|
type DataLengthHeader = Header DataLengthHeader' Integer
|
|
|
|
|
|
|
|
type DataLengthHeader' = "X-git-annex-data-length"
|
2024-07-07 16:59:12 +00:00
|
|
|
|
2024-07-09 00:18:55 +00:00
|
|
|
type LockIDParam = QueryParam' '[Required] "lockid" LockID
|
2024-07-09 21:30:55 +00:00
|
|
|
|
|
|
|
type AuthHeader = Header "Authorization" Auth
|