2012-11-15 00:25:00 +00:00
|
|
|
{- WebDAV remotes.
|
|
|
|
-
|
2021-08-16 20:51:58 +00:00
|
|
|
- Copyright 2012-2021 Joey Hess <id@joeyh.name>
|
2012-11-15 00:25:00 +00:00
|
|
|
-
|
2019-03-13 19:48:14 +00:00
|
|
|
- Licensed under the GNU AGPL version 3 or higher.
|
2012-11-15 00:25:00 +00:00
|
|
|
-}
|
|
|
|
|
2014-07-30 15:19:05 +00:00
|
|
|
{-# LANGUAGE ScopedTypeVariables #-}
|
2021-04-05 17:40:31 +00:00
|
|
|
{-# LANGUAGE OverloadedStrings #-}
|
incremental verify for byteRetriever special remotes
Several special remotes verify content while it is being retrieved,
avoiding a separate checksum pass. They are: S3, bup, ddar, and
gcrypt (with a local repository).
Not done when using chunking, yet.
Complicated by Retriever needing to change to be polymorphic. Which in turn
meant RankNTypes is needed, and also needed some code changes. The
change in Remote.External does not change behavior at all but avoids
the type checking failing because of a "rigid, skolem type" which
"would escape its scope". So I refactored slightly to make the type
checker's job easier there.
Unfortunately, directory uses fileRetriever (except when chunked),
so it is not amoung the improved ones. Fixing that would need a way for
FileRetriever to return a Verification. But, since the file retrieved
may be encrypted or chunked, it would be extra work to always
incrementally checksum the file while retrieving it. Hm.
Some other special remotes use fileRetriever, and so don't get incremental
verification, but could be converted to byteRetriever later. One is
GitLFS, which uses downloadConduit, which writes to the file, so could
verify as it goes. Other special remotes like web could too, but don't
use Remote.Helper.Special and so will need to be addressed separately.
Sponsored-by: Dartmouth College's DANDI project
2021-08-11 17:43:30 +00:00
|
|
|
{-# LANGUAGE RankNTypes #-}
|
2013-02-27 04:07:28 +00:00
|
|
|
|
2014-02-11 18:06:50 +00:00
|
|
|
module Remote.WebDAV (remote, davCreds, configUrl) where
|
2012-11-15 00:25:00 +00:00
|
|
|
|
|
|
|
import Network.Protocol.HTTP.DAV
|
|
|
|
import qualified Data.Map as M
|
2014-08-07 19:45:56 +00:00
|
|
|
import qualified Data.ByteString.Lazy as L
|
2012-11-15 17:46:16 +00:00
|
|
|
import qualified Data.ByteString.UTF8 as B8
|
2012-11-16 21:58:58 +00:00
|
|
|
import qualified Data.ByteString.Lazy.UTF8 as L8
|
2017-09-12 18:08:00 +00:00
|
|
|
import Network.HTTP.Client (HttpException(..), RequestBody)
|
2017-09-28 16:01:58 +00:00
|
|
|
import qualified Network.HTTP.Client as HTTP
|
2019-07-05 19:09:37 +00:00
|
|
|
import Network.HTTP.Client (HttpExceptionContent(..), responseStatus)
|
2012-11-16 04:09:22 +00:00
|
|
|
import Network.HTTP.Types
|
2012-11-17 03:16:18 +00:00
|
|
|
import System.IO.Error
|
unify exception handling into Utility.Exception
Removed old extensible-exceptions, only needed for very old ghc.
Made webdav use Utility.Exception, to work after some changes in DAV's
exception handling.
Removed Annex.Exception. Mostly this was trivial, but note that
tryAnnex is replaced with tryNonAsync and catchAnnex replaced with
catchNonAsync. In theory that could be a behavior change, since the former
caught all exceptions, and the latter don't catch async exceptions.
However, in practice, nothing in the Annex monad uses async exceptions.
Grepping for throwTo and killThread only find stuff in the assistant,
which does not seem related.
Command.Add.undo is changed to accept a SomeException, and things
that use it for rollback now catch non-async exceptions, rather than
only IOExceptions.
2014-08-08 01:55:44 +00:00
|
|
|
import Control.Monad.Catch
|
2017-10-07 18:11:32 +00:00
|
|
|
import Control.Monad.IO.Class (MonadIO)
|
2020-03-20 16:48:43 +00:00
|
|
|
import Control.Concurrent.STM hiding (check)
|
2012-11-15 00:25:00 +00:00
|
|
|
|
2016-01-20 20:36:33 +00:00
|
|
|
import Annex.Common
|
2012-11-15 00:25:00 +00:00
|
|
|
import Types.Remote
|
2017-09-15 20:34:45 +00:00
|
|
|
import Types.Export
|
2012-11-15 00:25:00 +00:00
|
|
|
import qualified Git
|
2020-12-02 18:57:43 +00:00
|
|
|
import qualified Annex
|
2012-11-15 00:25:00 +00:00
|
|
|
import Config
|
2013-03-13 20:16:01 +00:00
|
|
|
import Config.Cost
|
2020-01-14 19:41:34 +00:00
|
|
|
import Annex.SpecialRemote.Config
|
2012-11-15 00:25:00 +00:00
|
|
|
import Remote.Helper.Special
|
2014-08-07 23:32:23 +00:00
|
|
|
import Remote.Helper.Http
|
2019-02-20 19:55:01 +00:00
|
|
|
import Remote.Helper.ExportImport
|
2014-07-24 18:49:22 +00:00
|
|
|
import qualified Remote.Helper.Chunked.Legacy as Legacy
|
2012-11-15 00:25:00 +00:00
|
|
|
import Creds
|
2013-03-28 21:03:04 +00:00
|
|
|
import Utility.Metered
|
2017-09-12 19:13:42 +00:00
|
|
|
import Utility.Url (URLString, matchStatusCodeException, matchHttpExceptionContent)
|
2022-05-09 17:18:47 +00:00
|
|
|
import Annex.Verify
|
2013-09-07 22:38:00 +00:00
|
|
|
import Annex.UUID
|
2014-08-07 19:45:56 +00:00
|
|
|
import Remote.WebDAV.DavLocation
|
2020-01-10 18:10:20 +00:00
|
|
|
import Types.ProposedAccepted
|
2012-11-15 00:25:00 +00:00
|
|
|
|
|
|
|
remote :: RemoteType
|
2020-01-14 19:41:34 +00:00
|
|
|
remote = specialRemoteType $ RemoteType
|
2017-09-07 17:45:31 +00:00
|
|
|
{ typename = "webdav"
|
|
|
|
, enumerate = const (findSpecialRemotes "webdav")
|
|
|
|
, generate = gen
|
2020-01-14 19:41:34 +00:00
|
|
|
, configParser = mkRemoteConfigParser
|
|
|
|
[ optionalStringParser urlField
|
2020-01-20 19:20:04 +00:00
|
|
|
(FieldDesc "(required) url to the WebDAV directory")
|
|
|
|
, optionalStringParser davcredsField HiddenField
|
2020-01-14 19:41:34 +00:00
|
|
|
]
|
2017-09-07 17:45:31 +00:00
|
|
|
, setup = webdavSetup
|
2017-09-12 18:08:00 +00:00
|
|
|
, exportSupported = exportIsSupported
|
2019-02-20 19:55:01 +00:00
|
|
|
, importSupported = importUnsupported
|
add thirdPartyPopulated interface
This is to support, eg a borg repo as a special remote, which is
populated not by running git-annex commands, but by using borg. Then
git-annex sync lists the content of the remote, learns which files are
annex objects, and treats those as present in the remote.
So, most of the import machinery is reused, to a new purpose. While
normally importtree maintains a remote tracking branch, this does not,
because the files stored in the remote are annex object files, not
user-visible filenames. But, internally, a git tree is still generated,
of the files on the remote that are annex objects. This tree is used
by retrieveExportWithContentIdentifier, etc. As with other import/export
remotes, that the tree is recorded in the export log, and gets grafted
into the git-annex branch.
importKey changed to be able to return Nothing, to indicate when an
ImportLocation is not an annex object and so should be skipped from
being included in the tree.
It did not seem to make sense to have git-annex import do this, since
from the user's perspective, it's not like other imports. So only
git-annex sync does it.
Note that, git-annex sync does not yet download objects from such
remotes that are preferred content. importKeys is run with
content downloading disabled, to avoid getting the content of all
objects. Perhaps what's needed is for seekSyncContent to be run with these
remotes, but I don't know if it will just work (in particular, it needs
to avoid trying to transfer objects to them), so I skipped that for now.
(Untested and unused as of yet.)
This commit was sponsored by Jochen Bartl on Patreon.
2020-12-18 18:52:57 +00:00
|
|
|
, thirdPartyPopulated = False
|
2017-09-07 17:45:31 +00:00
|
|
|
}
|
2012-11-15 00:25:00 +00:00
|
|
|
|
2020-01-14 19:41:34 +00:00
|
|
|
urlField :: RemoteConfigField
|
|
|
|
urlField = Accepted "url"
|
|
|
|
|
2020-01-15 14:57:45 +00:00
|
|
|
davcredsField :: RemoteConfigField
|
|
|
|
davcredsField = Accepted "davcreds"
|
|
|
|
|
fix encryption of content to gcrypt and git-lfs
Fix serious regression in gcrypt and encrypted git-lfs remotes.
Since version 7.20200202.7, git-annex incorrectly stored content
on those remotes without encrypting it.
Problem was, Remote.Git enumerates all git remotes, including git-lfs
and gcrypt. It then dispatches to those. So, Remote.List used the
RemoteConfigParser from Remote.Git, instead of from git-lfs or gcrypt,
and that parser does not know about encryption fields, so did not
include them in the ParsedRemoteConfig. (Also didn't include other
fields specific to those remotes, perhaps chunking etc also didn't
get through.)
To fix, had to move RemoteConfig parsing down into the generate methods
of each remote, rather than doing it in Remote.List.
And a consequence of that was that ParsedRemoteConfig had to change to
include the RemoteConfig that got parsed, so that testremote can
generate a new remote based on an existing remote.
(I would have rather fixed this just inside Remote.Git, but that was not
practical, at least not w/o re-doing work that Remote.List already did.
Big ugly mostly mechanical patch seemed preferable to making git-annex
slower.)
2020-02-26 21:20:56 +00:00
|
|
|
gen :: Git.Repo -> UUID -> RemoteConfig -> RemoteGitConfig -> RemoteStateHandle -> Annex (Maybe Remote)
|
2020-03-20 16:48:43 +00:00
|
|
|
gen r u rc gc rs = do
|
|
|
|
c <- parsedRemoteConfig remote rc
|
|
|
|
new
|
|
|
|
<$> pure c
|
2023-01-12 17:42:28 +00:00
|
|
|
<*> remoteCost gc c expensiveRemoteCost
|
2020-03-20 16:48:43 +00:00
|
|
|
<*> mkDavHandleVar c gc u
|
2012-11-30 04:55:59 +00:00
|
|
|
where
|
2020-03-20 16:48:43 +00:00
|
|
|
new c cst hdl = Just $ specialRemote c
|
2020-05-13 15:50:31 +00:00
|
|
|
(store hdl chunkconfig)
|
|
|
|
(retrieve hdl chunkconfig)
|
|
|
|
(remove hdl)
|
remove "checking remotename" message
This fixes fsck of a remote that uses chunking displaying
(checking remotename) (checking remotename)" for every chunk.
Also, some remotes displayed the message, and others did not, with no
consistency. It was originally displayed only when accessing remotes
that were expensive or might involve a password prompt, I think, but
nothing in the API said when to do it so it became an inconsistent mess.
Originally I thought fsck should always display it. But it only displays
in fsck --from remote, so the user knows the remote is being accessed,
so there is no reason to tell them it's accessing it over and over.
It was also possible for git-annex move to sometimes display it twice,
due to checking if content is present twice. But, the user of move
specifies --from/--to, so it does not need to display when it's
accessing the remote, as the user expects it to access the remote.
git-annex get might display it, but only if the remote also supports
hasKeyCheap, which is really only local git remotes, which didn't
display it always; and in any case nothing displayed it before hasKeyCheap,
which is checked first, so I don't think this needs to display it ever.
mirror is like move. And that's all the main places it would have been
displayed.
This commit was sponsored by Jochen Bartl on Patreon.
2021-04-27 16:50:45 +00:00
|
|
|
(checkKey hdl chunkconfig)
|
2012-11-15 00:25:00 +00:00
|
|
|
this
|
2012-11-30 04:55:59 +00:00
|
|
|
where
|
2014-12-16 19:26:13 +00:00
|
|
|
this = Remote
|
|
|
|
{ uuid = u
|
|
|
|
, cost = cst
|
|
|
|
, name = Git.repoDescribe r
|
|
|
|
, storeKey = storeKeyDummy
|
2020-05-13 21:05:56 +00:00
|
|
|
, retrieveKeyFile = retrieveKeyFileDummy
|
|
|
|
, retrieveKeyFileCheap = Nothing
|
2018-06-21 15:35:27 +00:00
|
|
|
-- HttpManagerRestricted is used here, so this is
|
|
|
|
-- secure.
|
|
|
|
, retrievalSecurityPolicy = RetrievalAllKeysSecure
|
2014-12-16 19:26:13 +00:00
|
|
|
, removeKey = removeKeyDummy
|
2015-10-08 19:01:38 +00:00
|
|
|
, lockContent = Nothing
|
2014-12-16 19:26:13 +00:00
|
|
|
, checkPresent = checkPresentDummy
|
|
|
|
, checkPresentCheap = False
|
2019-01-30 18:55:28 +00:00
|
|
|
, exportActions = ExportActions
|
2020-03-20 16:48:43 +00:00
|
|
|
{ storeExport = storeExportDav hdl
|
|
|
|
, retrieveExport = retrieveExportDav hdl
|
|
|
|
, checkPresentExport = checkPresentExportDav hdl this
|
|
|
|
, removeExport = removeExportDav hdl
|
2020-12-28 18:37:15 +00:00
|
|
|
, versionedExport = False
|
2017-09-15 17:15:47 +00:00
|
|
|
, removeExportDirectory = Just $
|
2020-03-20 16:48:43 +00:00
|
|
|
removeExportDirectoryDav hdl
|
|
|
|
, renameExport = renameExportDav hdl
|
2017-09-12 18:08:00 +00:00
|
|
|
}
|
2019-02-20 19:55:01 +00:00
|
|
|
, importActions = importUnsupported
|
2014-12-16 19:26:13 +00:00
|
|
|
, whereisKey = Nothing
|
|
|
|
, remoteFsck = Nothing
|
|
|
|
, repairRepo = Nothing
|
|
|
|
, config = c
|
2018-06-04 18:31:55 +00:00
|
|
|
, getRepo = return r
|
2014-12-16 19:26:13 +00:00
|
|
|
, gitconfig = gc
|
|
|
|
, localpath = Nothing
|
|
|
|
, readonly = False
|
2018-08-30 15:12:18 +00:00
|
|
|
, appendonly = False
|
2020-12-28 19:08:53 +00:00
|
|
|
, untrustworthy = False
|
2023-08-16 18:31:31 +00:00
|
|
|
, availability = pure GloballyAvailable
|
2014-12-16 19:26:13 +00:00
|
|
|
, remotetype = remote
|
fix encryption of content to gcrypt and git-lfs
Fix serious regression in gcrypt and encrypted git-lfs remotes.
Since version 7.20200202.7, git-annex incorrectly stored content
on those remotes without encrypting it.
Problem was, Remote.Git enumerates all git remotes, including git-lfs
and gcrypt. It then dispatches to those. So, Remote.List used the
RemoteConfigParser from Remote.Git, instead of from git-lfs or gcrypt,
and that parser does not know about encryption fields, so did not
include them in the ParsedRemoteConfig. (Also didn't include other
fields specific to those remotes, perhaps chunking etc also didn't
get through.)
To fix, had to move RemoteConfig parsing down into the generate methods
of each remote, rather than doing it in Remote.List.
And a consequence of that was that ParsedRemoteConfig had to change to
include the RemoteConfig that got parsed, so that testremote can
generate a new remote based on an existing remote.
(I would have rather fixed this just inside Remote.Git, but that was not
practical, at least not w/o re-doing work that Remote.List already did.
Big ugly mostly mechanical patch seemed preferable to making git-annex
slower.)
2020-02-26 21:20:56 +00:00
|
|
|
, mkUnavailable = gen r u (M.insert urlField (Proposed "http://!dne!/") rc) gc rs
|
2014-12-16 19:26:13 +00:00
|
|
|
, getInfo = includeCredsInfo c (davCreds u) $
|
2020-01-14 19:41:34 +00:00
|
|
|
[("url", fromMaybe "unknown" $ getRemoteConfigValue urlField c)]
|
2014-12-16 19:26:13 +00:00
|
|
|
, claimUrl = Nothing
|
|
|
|
, checkUrl = Nothing
|
add RemoteStateHandle
This solves the problem of sameas remotes trampling over per-remote
state. Used for:
* per-remote state, of course
* per-remote metadata, also of course
* per-remote content identifiers, because two remote implementations
could in theory generate the same content identifier for two different
peices of content
While chunk logs are per-remote data, they don't use this, because the
number and size of chunks stored is a common property across sameas
remotes.
External special remote had a complication, where it was theoretically
possible for a remote to send SETSTATE or GETSTATE during INITREMOTE or
EXPORTSUPPORTED. Since the uuid of the remote is typically generate in
Remote.setup, it would only be possible to pass a Maybe
RemoteStateHandle into it, and it would otherwise have to construct its
own. Rather than go that route, I decided to send an ERROR in this case.
It seems unlikely that any existing external special remote will be
affected. They would have to make up a git-annex key, and set state for
some reason during INITREMOTE. I can imagine such a hack, but it doesn't
seem worth complicating the code in such an ugly way to support it.
Unfortunately, both TestRemote and Annex.Import needed the Remote
to have a new field added that holds its RemoteStateHandle.
2019-10-14 16:33:27 +00:00
|
|
|
, remoteStateHandle = rs
|
2014-12-16 19:26:13 +00:00
|
|
|
}
|
convert WebDAV to new special remote interface, adding new-style chunking support
Reusing http connection when operating on chunks is not done yet,
I had to submit some patches to DAV to support that. However, this is no
slower than old-style chunking was.
Note that it's a fileRetriever and a fileStorer, despite DAV using
bytestrings that would allow streaming. As a result, upload/download of
encrypted files is made a bit more expensive, since it spools them to temp
files. This was needed to get the progress meters to work.
There are probably ways to avoid that.. But it turns out that the current
DAV interface buffers the whole file content in memory, and I have
sent in a patch to DAV to improve its interfaces. Using the new interfaces,
it's certainly going to need to be a fileStorer, in order to read the file
size from the file (getting the size of a bytestring would destroy
laziness). It should be possible to use the new interface to make it be a
byteRetriever, so I'll change that when I get to it.
This commit was sponsored by Andreas Olsson.
2014-08-06 20:55:32 +00:00
|
|
|
chunkconfig = getChunkConfig c
|
2012-11-15 00:25:00 +00:00
|
|
|
|
2017-02-07 18:35:58 +00:00
|
|
|
webdavSetup :: SetupStage -> Maybe UUID -> Maybe CredPair -> RemoteConfig -> RemoteGitConfig -> Annex (RemoteConfig, UUID)
|
2021-03-17 13:41:12 +00:00
|
|
|
webdavSetup ss mu mcreds c gc = do
|
2013-09-07 22:38:00 +00:00
|
|
|
u <- maybe (liftIO genUUID) return mu
|
2020-01-10 18:10:20 +00:00
|
|
|
url <- maybe (giveup "Specify url=")
|
|
|
|
(return . fromProposedAccepted)
|
2020-01-14 19:41:34 +00:00
|
|
|
(M.lookup urlField c)
|
2016-05-23 21:27:15 +00:00
|
|
|
(c', encsetup) <- encryptionSetup c gc
|
add LISTCONFIGS to external special remote protocol
Special remote programs that use GETCONFIG/SETCONFIG are recommended
to implement it.
The description is not yet used, but will be useful later when adding a way
to make initremote list all accepted configs.
configParser now takes a RemoteConfig parameter. Normally, that's not
needed, because configParser returns a parter, it does not parse it
itself. But, it's needed to look at externaltype and work out what
external remote program to run for LISTCONFIGS.
Note that, while externalUUID is changed to a Maybe UUID, checkExportSupported
used to use NoUUID. The code that now checks for Nothing used to behave
in some undefined way if the external program made requests that
triggered it.
Also, note that in externalSetup, once it generates external,
it parses the RemoteConfig strictly. That generates a
ParsedRemoteConfig, which is thrown away. The reason it's ok to throw
that away, is that, if the strict parse succeeded, the result must be
the same as the earlier, lenient parse.
initremote of an external special remote now runs the program three
times. First for LISTCONFIGS, then EXPORTSUPPORTED, and again
LISTCONFIGS+INITREMOTE. It would not be hard to eliminate at least
one of those, and it should be possible to only run the program once.
2020-01-17 19:30:14 +00:00
|
|
|
pc <- either giveup return . parseRemoteConfig c' =<< configParser remote c'
|
2020-01-14 19:41:34 +00:00
|
|
|
creds <- maybe (getCreds pc gc u) (return . Just) mcreds
|
2012-11-15 00:25:00 +00:00
|
|
|
testDav url creds
|
2018-03-27 16:41:57 +00:00
|
|
|
gitConfigSpecialRemote u c' [("webdav", "true")]
|
2021-03-17 13:41:12 +00:00
|
|
|
c'' <- setRemoteCredPair ss encsetup pc gc (davCreds u) creds
|
2013-09-07 22:38:00 +00:00
|
|
|
return (c'', u)
|
2012-11-15 00:25:00 +00:00
|
|
|
|
2020-03-20 16:48:43 +00:00
|
|
|
store :: DavHandleVar -> ChunkConfig -> Storer
|
|
|
|
store hv (LegacyChunks chunksize) = fileStorer $ \k f p ->
|
2020-12-02 18:57:43 +00:00
|
|
|
withDavHandle hv $ \dav -> do
|
|
|
|
annexrunner <- Annex.makeRunner
|
|
|
|
liftIO $ withMeteredFile f p $ storeLegacyChunked annexrunner chunksize k dav
|
2020-03-20 16:48:43 +00:00
|
|
|
store hv _ = httpStorer $ \k reqbody ->
|
2020-05-13 18:03:00 +00:00
|
|
|
withDavHandle hv $ \dav -> liftIO $ goDAV dav $ do
|
|
|
|
let tmp = keyTmpLocation k
|
|
|
|
let dest = keyLocation k
|
|
|
|
storeHelper dav tmp dest reqbody
|
2017-09-12 18:08:00 +00:00
|
|
|
|
|
|
|
storeHelper :: DavHandle -> DavLocation -> DavLocation -> RequestBody -> DAVT IO ()
|
|
|
|
storeHelper dav tmp dest reqbody = do
|
2017-09-15 19:52:31 +00:00
|
|
|
maybe noop (void . mkColRecursive) (locationParent tmp)
|
2017-10-07 18:11:32 +00:00
|
|
|
debugDav $ "putContent " ++ tmp
|
2014-08-07 23:32:23 +00:00
|
|
|
inLocation tmp $
|
|
|
|
putContentM' (contentType, reqbody)
|
2017-09-12 18:08:00 +00:00
|
|
|
finalizeStore dav tmp dest
|
convert WebDAV to new special remote interface, adding new-style chunking support
Reusing http connection when operating on chunks is not done yet,
I had to submit some patches to DAV to support that. However, this is no
slower than old-style chunking was.
Note that it's a fileRetriever and a fileStorer, despite DAV using
bytestrings that would allow streaming. As a result, upload/download of
encrypted files is made a bit more expensive, since it spools them to temp
files. This was needed to get the progress meters to work.
There are probably ways to avoid that.. But it turns out that the current
DAV interface buffers the whole file content in memory, and I have
sent in a patch to DAV to improve its interfaces. Using the new interfaces,
it's certainly going to need to be a fileStorer, in order to read the file
size from the file (getting the size of a bytestring would destroy
laziness). It should be possible to use the new interface to make it be a
byteRetriever, so I'll change that when I get to it.
This commit was sponsored by Andreas Olsson.
2014-08-06 20:55:32 +00:00
|
|
|
|
2017-09-12 18:08:00 +00:00
|
|
|
finalizeStore :: DavHandle -> DavLocation -> DavLocation -> DAVT IO ()
|
|
|
|
finalizeStore dav tmp dest = do
|
2017-10-07 18:11:32 +00:00
|
|
|
debugDav $ "delContent " ++ dest
|
2014-08-07 19:45:56 +00:00
|
|
|
inLocation dest $ void $ safely $ delContentM
|
|
|
|
maybe noop (void . mkColRecursive) (locationParent dest)
|
2017-09-12 18:08:00 +00:00
|
|
|
moveDAV (baseURL dav) tmp dest
|
2012-11-15 00:25:00 +00:00
|
|
|
|
2020-03-20 16:48:43 +00:00
|
|
|
retrieve :: DavHandleVar -> ChunkConfig -> Retriever
|
2021-08-16 20:51:58 +00:00
|
|
|
retrieve hv cc = fileRetriever' $ \d k p iv ->
|
2020-05-13 18:03:00 +00:00
|
|
|
withDavHandle hv $ \dav -> case cc of
|
2021-08-16 20:51:58 +00:00
|
|
|
LegacyChunks _ -> do
|
|
|
|
-- Not doing incremental verification for chunks.
|
2021-11-09 16:29:09 +00:00
|
|
|
liftIO $ maybe noop unableIncrementalVerifier iv
|
2021-08-16 20:51:58 +00:00
|
|
|
retrieveLegacyChunked (fromRawFilePath d) k p dav
|
|
|
|
_ -> liftIO $ goDAV dav $
|
|
|
|
retrieveHelper (keyLocation k) (fromRawFilePath d) p iv
|
|
|
|
|
|
|
|
retrieveHelper :: DavLocation -> FilePath -> MeterUpdate -> Maybe IncrementalVerifier -> DAVT IO ()
|
|
|
|
retrieveHelper loc d p iv = do
|
2017-10-07 18:11:32 +00:00
|
|
|
debugDav $ "retrieve " ++ loc
|
|
|
|
inLocation loc $
|
2021-08-16 20:51:58 +00:00
|
|
|
withContentM $ httpBodyRetriever d p iv
|
convert WebDAV to new special remote interface, adding new-style chunking support
Reusing http connection when operating on chunks is not done yet,
I had to submit some patches to DAV to support that. However, this is no
slower than old-style chunking was.
Note that it's a fileRetriever and a fileStorer, despite DAV using
bytestrings that would allow streaming. As a result, upload/download of
encrypted files is made a bit more expensive, since it spools them to temp
files. This was needed to get the progress meters to work.
There are probably ways to avoid that.. But it turns out that the current
DAV interface buffers the whole file content in memory, and I have
sent in a patch to DAV to improve its interfaces. Using the new interfaces,
it's certainly going to need to be a fileStorer, in order to read the file
size from the file (getting the size of a bytestring would destroy
laziness). It should be possible to use the new interface to make it be a
byteRetriever, so I'll change that when I get to it.
This commit was sponsored by Andreas Olsson.
2014-08-06 20:55:32 +00:00
|
|
|
|
2020-03-20 16:48:43 +00:00
|
|
|
remove :: DavHandleVar -> Remover
|
2020-05-15 19:05:52 +00:00
|
|
|
remove hv k = withDavHandle hv $ \dav -> liftIO $ goDAV dav $
|
|
|
|
-- Delete the key's whole directory, including any
|
|
|
|
-- legacy chunked files, etc, in a single action.
|
|
|
|
removeHelper (keyDir k)
|
2017-09-12 18:08:00 +00:00
|
|
|
|
2020-05-15 18:11:59 +00:00
|
|
|
removeHelper :: DavLocation -> DAVT IO ()
|
2017-09-12 18:08:00 +00:00
|
|
|
removeHelper d = do
|
2017-10-07 18:11:32 +00:00
|
|
|
debugDav $ "delContent " ++ d
|
2017-09-12 18:08:00 +00:00
|
|
|
v <- safely $ inLocation d delContentM
|
|
|
|
case v of
|
2020-05-15 18:11:59 +00:00
|
|
|
Just _ -> return ()
|
2017-09-12 18:08:00 +00:00
|
|
|
Nothing -> do
|
|
|
|
v' <- existsDAV d
|
|
|
|
case v' of
|
2020-05-15 18:11:59 +00:00
|
|
|
Right False -> return ()
|
|
|
|
_ -> giveup "failed to remove content from remote"
|
2014-08-07 22:32:07 +00:00
|
|
|
|
remove "checking remotename" message
This fixes fsck of a remote that uses chunking displaying
(checking remotename) (checking remotename)" for every chunk.
Also, some remotes displayed the message, and others did not, with no
consistency. It was originally displayed only when accessing remotes
that were expensive or might involve a password prompt, I think, but
nothing in the API said when to do it so it became an inconsistent mess.
Originally I thought fsck should always display it. But it only displays
in fsck --from remote, so the user knows the remote is being accessed,
so there is no reason to tell them it's accessing it over and over.
It was also possible for git-annex move to sometimes display it twice,
due to checking if content is present twice. But, the user of move
specifies --from/--to, so it does not need to display when it's
accessing the remote, as the user expects it to access the remote.
git-annex get might display it, but only if the remote also supports
hasKeyCheap, which is really only local git remotes, which didn't
display it always; and in any case nothing displayed it before hasKeyCheap,
which is checked first, so I don't think this needs to display it ever.
mirror is like move. And that's all the main places it would have been
displayed.
This commit was sponsored by Jochen Bartl on Patreon.
2021-04-27 16:50:45 +00:00
|
|
|
checkKey :: DavHandleVar -> ChunkConfig -> CheckPresent
|
|
|
|
checkKey hv chunkconfig k = withDavHandle hv $ \dav ->
|
2020-05-13 18:03:00 +00:00
|
|
|
case chunkconfig of
|
|
|
|
LegacyChunks _ -> checkKeyLegacyChunked dav k
|
|
|
|
_ -> do
|
|
|
|
v <- liftIO $ goDAV dav $
|
|
|
|
existsDAV (keyLocation k)
|
|
|
|
either giveup return v
|
2020-03-20 16:48:43 +00:00
|
|
|
|
2020-05-15 16:17:15 +00:00
|
|
|
storeExportDav :: DavHandleVar -> FilePath -> Key -> ExportLocation -> MeterUpdate -> Annex ()
|
2020-03-20 16:48:43 +00:00
|
|
|
storeExportDav hdl f k loc p = case exportLocation loc of
|
2020-05-15 16:17:15 +00:00
|
|
|
Right dest -> withDavHandle hdl $ \h -> runExport h $ \dav -> do
|
2019-02-07 17:47:57 +00:00
|
|
|
reqbody <- liftIO $ httpBodyStorer f p
|
2021-03-12 19:16:23 +00:00
|
|
|
storeHelper dav (exportTmpLocation loc k) dest reqbody
|
2020-05-15 16:17:15 +00:00
|
|
|
Left err -> giveup err
|
2017-09-12 18:08:00 +00:00
|
|
|
|
2022-05-09 16:25:04 +00:00
|
|
|
retrieveExportDav :: DavHandleVar -> Key -> ExportLocation -> FilePath -> MeterUpdate -> Annex Verification
|
2022-05-09 17:18:47 +00:00
|
|
|
retrieveExportDav hdl k loc d p = case exportLocation loc of
|
|
|
|
Right src -> verifyKeyContentIncrementally AlwaysVerify k $ \iv ->
|
|
|
|
withDavHandle hdl $ \h -> runExport h $ \_dav ->
|
|
|
|
retrieveHelper src d p iv
|
2020-05-15 16:51:09 +00:00
|
|
|
Left err -> giveup err
|
2017-09-12 18:08:00 +00:00
|
|
|
|
2020-03-20 16:48:43 +00:00
|
|
|
checkPresentExportDav :: DavHandleVar -> Remote -> Key -> ExportLocation -> Annex Bool
|
2020-05-13 18:03:00 +00:00
|
|
|
checkPresentExportDav hdl _ _k loc = case exportLocation loc of
|
2020-05-15 19:05:52 +00:00
|
|
|
Right p -> withDavHandle hdl $ \h -> liftIO $ do
|
|
|
|
v <- goDAV h $ existsDAV p
|
|
|
|
either giveup return v
|
2019-02-07 17:47:57 +00:00
|
|
|
Left err -> giveup err
|
2017-09-12 18:08:00 +00:00
|
|
|
|
2020-05-15 18:11:59 +00:00
|
|
|
removeExportDav :: DavHandleVar-> Key -> ExportLocation -> Annex ()
|
2020-03-20 16:48:43 +00:00
|
|
|
removeExportDav hdl _k loc = case exportLocation loc of
|
2020-05-15 18:11:59 +00:00
|
|
|
Right p -> withDavHandle hdl $ \h -> runExport h $ \_dav ->
|
2019-02-07 17:47:57 +00:00
|
|
|
removeHelper p
|
|
|
|
-- When the exportLocation is not legal for webdav,
|
2023-03-14 02:39:16 +00:00
|
|
|
-- the content is certainly not stored there, so it's ok for
|
2019-02-07 17:47:57 +00:00
|
|
|
-- removal to succeed. This allows recovery after failure to store
|
|
|
|
-- content there, as the user can rename the problem file and
|
|
|
|
-- this will be called to make sure it's gone.
|
2020-05-15 18:11:59 +00:00
|
|
|
Left _err -> return ()
|
2017-09-15 17:15:47 +00:00
|
|
|
|
2020-05-15 18:32:45 +00:00
|
|
|
removeExportDirectoryDav :: DavHandleVar -> ExportDirectory -> Annex ()
|
|
|
|
removeExportDirectoryDav hdl dir = withDavHandle hdl $ \h -> runExport h $ \_dav -> do
|
2019-12-02 16:26:33 +00:00
|
|
|
let d = fromRawFilePath $ fromExportDirectory dir
|
2017-10-07 18:11:32 +00:00
|
|
|
debugDav $ "delContent " ++ d
|
2020-05-15 18:32:45 +00:00
|
|
|
inLocation d delContentM
|
2017-09-15 17:15:47 +00:00
|
|
|
|
2020-05-15 19:05:52 +00:00
|
|
|
renameExportDav :: DavHandleVar -> Key -> ExportLocation -> ExportLocation -> Annex (Maybe ())
|
2020-03-20 16:48:43 +00:00
|
|
|
renameExportDav hdl _k src dest = case (exportLocation src, exportLocation dest) of
|
webdav: deal with buggy webdav servers in renameExport
box.com already had a special case, since its renaming was known buggy.
In its case, renaming to the temp file succeeds, but then renaming the temp
file to final destination fails.
Then this 4shared server has buggy handling of renames across directories.
While already worked around with for the temp files when storing exports
now being in the same directory as the final filename, that also affected
renameExport when the file moves between directories.
I'm not entirely clear what happens on the 4shared server when it fails
this way. It kind of looks like it may rename the file to destination and
then still fail.
To handle both, when rename fails, delete both the source and the
destination, and fall back to uploading the content again. In the box.com
case, the temp file is the source, and deleting it makes sure the temp file
gets cleaned up. In the 4shared case, the file may have been renamed to the
destination and so cleaning that up avoids any interference with the
re-upload to the destination.
2021-03-22 17:08:18 +00:00
|
|
|
(Right srcl, Right destl) -> withDavHandle hdl $ \h -> do
|
|
|
|
-- Several webdav servers have buggy handing of renames,
|
|
|
|
-- and fail to rename in some circumstances.
|
|
|
|
-- Since after a failure it's not clear where the file ended
|
|
|
|
-- up, recover by deleting both the source and destination.
|
|
|
|
-- The file will later be re-uploaded to the destination,
|
|
|
|
-- so this deletion is ok.
|
|
|
|
let go = runExport h $ \dav -> do
|
|
|
|
maybe noop (void . mkColRecursive) (locationParent destl)
|
|
|
|
moveDAV (baseURL dav) srcl destl
|
|
|
|
return (Just ())
|
|
|
|
let recover = do
|
|
|
|
void $ runExport h $ \_dav -> safely $
|
|
|
|
inLocation srcl delContentM
|
|
|
|
void $ runExport h $ \_dav -> safely $
|
|
|
|
inLocation destl delContentM
|
|
|
|
return Nothing
|
|
|
|
catchNonAsync go (const recover)
|
2020-05-15 19:05:52 +00:00
|
|
|
(Left err, _) -> giveup err
|
|
|
|
(_, Left err) -> giveup err
|
2020-05-15 16:17:15 +00:00
|
|
|
|
|
|
|
runExport :: DavHandle -> (DavHandle -> DAVT IO a) -> Annex a
|
|
|
|
runExport h a = liftIO (goDAV h (a h))
|
2017-09-12 18:08:00 +00:00
|
|
|
|
2020-03-20 16:48:43 +00:00
|
|
|
configUrl :: ParsedRemoteConfig -> Maybe URLString
|
|
|
|
configUrl c = fixup <$> getRemoteConfigValue urlField c
|
2013-12-02 20:01:20 +00:00
|
|
|
where
|
|
|
|
-- box.com DAV url changed
|
2017-09-13 19:09:52 +00:00
|
|
|
fixup = replace "https://www.box.com/dav/" boxComUrl
|
|
|
|
|
|
|
|
boxComUrl :: URLString
|
|
|
|
boxComUrl = "https://dav.box.com/dav/"
|
2013-04-27 19:16:06 +00:00
|
|
|
|
2014-08-07 22:32:07 +00:00
|
|
|
type DavUser = B8.ByteString
|
|
|
|
type DavPass = B8.ByteString
|
|
|
|
|
|
|
|
baseURL :: DavHandle -> URLString
|
|
|
|
baseURL (DavHandle _ _ _ u) = u
|
|
|
|
|
|
|
|
|
2012-11-15 17:46:16 +00:00
|
|
|
toDavUser :: String -> DavUser
|
|
|
|
toDavUser = B8.fromString
|
2012-11-15 00:25:00 +00:00
|
|
|
|
2012-11-15 17:46:16 +00:00
|
|
|
toDavPass :: String -> DavPass
|
|
|
|
toDavPass = B8.fromString
|
|
|
|
|
2012-11-15 00:25:00 +00:00
|
|
|
{- Test if a WebDAV store is usable, by writing to a test file, and then
|
2014-08-07 19:45:56 +00:00
|
|
|
- deleting the file.
|
|
|
|
-
|
|
|
|
- Also ensures that the path of the url exists, trying to create it if not.
|
|
|
|
-
|
|
|
|
- Throws an error if store is not usable.
|
|
|
|
-}
|
|
|
|
testDav :: URLString -> Maybe CredPair -> Annex ()
|
|
|
|
testDav url (Just (u, p)) = do
|
2015-10-11 17:29:44 +00:00
|
|
|
showAction "testing WebDAV server"
|
2014-08-07 22:32:07 +00:00
|
|
|
test $ liftIO $ evalDAVT url $ do
|
|
|
|
prepDAV user pass
|
2014-08-07 19:45:56 +00:00
|
|
|
makeParentDirs
|
2017-09-15 19:52:31 +00:00
|
|
|
inLocation (tmpLocation "test") $ do
|
2015-12-11 16:13:20 +00:00
|
|
|
putContentM (Nothing, L8.fromString "test")
|
2014-08-07 19:45:56 +00:00
|
|
|
delContentM
|
2012-11-17 19:30:11 +00:00
|
|
|
where
|
2014-08-07 19:45:56 +00:00
|
|
|
test a = liftIO $
|
|
|
|
either (\e -> throwIO $ "WebDAV test failed: " ++ show e)
|
2012-12-01 18:32:50 +00:00
|
|
|
(const noop)
|
2014-02-24 22:21:51 +00:00
|
|
|
=<< tryNonAsync a
|
2012-11-17 19:30:11 +00:00
|
|
|
|
2012-11-16 04:09:22 +00:00
|
|
|
user = toDavUser u
|
|
|
|
pass = toDavPass p
|
2023-04-10 17:38:14 +00:00
|
|
|
testDav _ Nothing = giveup "Need to configure webdav username and password."
|
2012-11-15 00:25:00 +00:00
|
|
|
|
2014-08-07 19:45:56 +00:00
|
|
|
{- Tries to make all the parent directories in the WebDAV urls's path,
|
|
|
|
- right down to the root.
|
|
|
|
-
|
|
|
|
- Ignores any failures, which can occur for reasons including the WebDAV
|
|
|
|
- server only serving up WebDAV in a subdirectory. -}
|
|
|
|
makeParentDirs :: DAVT IO ()
|
|
|
|
makeParentDirs = go
|
|
|
|
where
|
|
|
|
go = do
|
|
|
|
l <- getDAVLocation
|
|
|
|
case locationParent l of
|
|
|
|
Nothing -> noop
|
|
|
|
Just p -> void $ safely $ inDAVLocation (const p) go
|
|
|
|
void $ safely mkCol
|
|
|
|
|
|
|
|
{- Checks if the directory exists. If not, tries to create its
|
|
|
|
- parent directories, all the way down to the root, and finally creates
|
|
|
|
- it. -}
|
|
|
|
mkColRecursive :: DavLocation -> DAVT IO Bool
|
|
|
|
mkColRecursive d = go =<< existsDAV d
|
|
|
|
where
|
|
|
|
go (Right True) = return True
|
2017-10-07 18:11:32 +00:00
|
|
|
go _ = do
|
|
|
|
debugDav $ "mkCol " ++ d
|
|
|
|
ifM (inLocation d mkCol)
|
|
|
|
( return True
|
|
|
|
, do
|
|
|
|
case locationParent d of
|
|
|
|
Nothing -> makeParentDirs
|
|
|
|
Just parent -> void (mkColRecursive parent)
|
|
|
|
inLocation d mkCol
|
|
|
|
)
|
2014-08-07 19:45:56 +00:00
|
|
|
|
2020-01-14 19:41:34 +00:00
|
|
|
getCreds :: ParsedRemoteConfig -> RemoteGitConfig -> UUID -> Annex (Maybe CredPair)
|
2016-05-23 21:03:20 +00:00
|
|
|
getCreds c gc u = getRemoteCredPairFor "webdav" c gc (davCreds u)
|
2012-11-15 00:25:00 +00:00
|
|
|
|
|
|
|
davCreds :: UUID -> CredPairStorage
|
|
|
|
davCreds u = CredPairStorage
|
2012-12-13 04:45:27 +00:00
|
|
|
{ credPairFile = fromUUID u
|
|
|
|
, credPairEnvironment = ("WEBDAV_USERNAME", "WEBDAV_PASSWORD")
|
2020-01-15 14:57:45 +00:00
|
|
|
, credPairRemoteField = davcredsField
|
2012-12-13 04:45:27 +00:00
|
|
|
}
|
2014-02-24 22:21:51 +00:00
|
|
|
|
|
|
|
{- Content-Type to use for files uploaded to WebDAV. -}
|
|
|
|
contentType :: Maybe B8.ByteString
|
|
|
|
contentType = Just $ B8.fromString "application/octet-stream"
|
|
|
|
|
|
|
|
throwIO :: String -> IO a
|
|
|
|
throwIO msg = ioError $ mkIOError userErrorType msg Nothing Nothing
|
|
|
|
|
2014-08-07 19:45:56 +00:00
|
|
|
moveDAV :: URLString -> DavLocation -> DavLocation -> DAVT IO ()
|
2017-10-07 18:11:32 +00:00
|
|
|
moveDAV baseurl src dest = do
|
|
|
|
debugDav $ "moveContent " ++ src ++ " " ++ newurl
|
|
|
|
inLocation src $ moveContentM (B8.fromString newurl)
|
2014-02-24 22:21:51 +00:00
|
|
|
where
|
2017-10-07 18:11:32 +00:00
|
|
|
newurl = locationUrl baseurl dest
|
2014-02-24 22:21:51 +00:00
|
|
|
|
2014-08-07 19:45:56 +00:00
|
|
|
existsDAV :: DavLocation -> DAVT IO (Either String Bool)
|
2017-10-07 18:11:32 +00:00
|
|
|
existsDAV l = do
|
|
|
|
debugDav $ "getProps " ++ l
|
|
|
|
inLocation l check `catchNonAsync` (\e -> return (Left $ show e))
|
2014-02-24 22:21:51 +00:00
|
|
|
where
|
2014-08-07 19:45:56 +00:00
|
|
|
check = do
|
2016-02-09 15:47:35 +00:00
|
|
|
-- Some DAV services only support depth of 1, and
|
|
|
|
-- more depth is certainly not needed to check if a
|
|
|
|
-- location exists.
|
|
|
|
setDepth (Just Depth1)
|
2017-09-12 19:13:42 +00:00
|
|
|
catchJust missinghttpstatus
|
2014-02-24 22:21:51 +00:00
|
|
|
(getPropsM >> ispresent True)
|
|
|
|
(const $ ispresent False)
|
2014-08-07 19:45:56 +00:00
|
|
|
ispresent = return . Right
|
2017-09-12 19:13:42 +00:00
|
|
|
missinghttpstatus e =
|
|
|
|
matchStatusCodeException (== notFound404) e
|
|
|
|
<|> matchHttpExceptionContent toomanyredirects e
|
|
|
|
toomanyredirects (TooManyRedirects _) = True
|
|
|
|
toomanyredirects _ = False
|
2014-02-24 22:21:51 +00:00
|
|
|
|
2014-08-07 19:45:56 +00:00
|
|
|
safely :: DAVT IO a -> DAVT IO (Maybe a)
|
unify exception handling into Utility.Exception
Removed old extensible-exceptions, only needed for very old ghc.
Made webdav use Utility.Exception, to work after some changes in DAV's
exception handling.
Removed Annex.Exception. Mostly this was trivial, but note that
tryAnnex is replaced with tryNonAsync and catchAnnex replaced with
catchNonAsync. In theory that could be a behavior change, since the former
caught all exceptions, and the latter don't catch async exceptions.
However, in practice, nothing in the Annex monad uses async exceptions.
Grepping for throwTo and killThread only find stuff in the assistant,
which does not seem related.
Command.Add.undo is changed to accept a SomeException, and things
that use it for rollback now catch non-async exceptions, rather than
only IOExceptions.
2014-08-08 01:55:44 +00:00
|
|
|
safely = eitherToMaybe <$$> tryNonAsync
|
2014-08-07 19:45:56 +00:00
|
|
|
|
2014-08-07 22:32:07 +00:00
|
|
|
choke :: IO (Either String a) -> IO a
|
|
|
|
choke f = do
|
|
|
|
x <- f
|
|
|
|
case x of
|
2023-04-10 17:38:14 +00:00
|
|
|
Left e -> giveup e
|
2014-08-07 22:32:07 +00:00
|
|
|
Right r -> return r
|
|
|
|
|
|
|
|
data DavHandle = DavHandle DAVContext DavUser DavPass URLString
|
|
|
|
|
2020-05-13 18:03:00 +00:00
|
|
|
type DavHandleVar = TVar (Either (Annex (Either String DavHandle)) (Either String DavHandle))
|
2020-03-20 16:48:43 +00:00
|
|
|
|
|
|
|
{- Prepares a DavHandle for later use. Does not connect to the server or do
|
|
|
|
- anything else expensive. -}
|
|
|
|
mkDavHandleVar :: ParsedRemoteConfig -> RemoteGitConfig -> UUID -> Annex DavHandleVar
|
|
|
|
mkDavHandleVar c gc u = liftIO $ newTVarIO $ Left $ do
|
|
|
|
mcreds <- getCreds c gc u
|
|
|
|
case (mcreds, configUrl c) of
|
|
|
|
(Just (user, pass), Just baseurl) -> do
|
|
|
|
ctx <- mkDAVContext baseurl
|
|
|
|
let h = DavHandle ctx (toDavUser user) (toDavPass pass) baseurl
|
2020-05-13 18:03:00 +00:00
|
|
|
return (Right h)
|
|
|
|
_ -> return $ Left "webdav credentials not available"
|
2020-03-20 16:48:43 +00:00
|
|
|
|
2020-05-13 18:03:00 +00:00
|
|
|
withDavHandle :: DavHandleVar -> (DavHandle -> Annex a) -> Annex a
|
2020-03-20 16:48:43 +00:00
|
|
|
withDavHandle hv a = liftIO (readTVarIO hv) >>= \case
|
2020-05-13 18:03:00 +00:00
|
|
|
Right hdl -> either giveup a hdl
|
|
|
|
Left mkhdl -> do
|
|
|
|
hdl <- mkhdl
|
|
|
|
liftIO $ atomically $ writeTVar hv (Right hdl)
|
|
|
|
either giveup a hdl
|
|
|
|
|
2014-08-07 22:32:07 +00:00
|
|
|
goDAV :: DavHandle -> DAVT IO a -> IO a
|
2014-08-09 05:38:13 +00:00
|
|
|
goDAV (DavHandle ctx user pass _) a = choke $ run $ prettifyExceptions $ do
|
2014-08-07 22:32:07 +00:00
|
|
|
prepDAV user pass
|
2014-02-24 22:21:51 +00:00
|
|
|
a
|
|
|
|
where
|
2014-08-07 22:32:07 +00:00
|
|
|
run = fst <$$> runDAVContext ctx
|
|
|
|
|
2014-08-09 05:38:13 +00:00
|
|
|
{- Catch StatusCodeException and trim it to only the statusMessage part,
|
|
|
|
- eliminating a lot of noise, which can include the whole request that
|
|
|
|
- failed. The rethrown exception is no longer a StatusCodeException. -}
|
2016-12-10 12:24:27 +00:00
|
|
|
prettifyExceptions :: DAVT IO a -> DAVT IO a
|
|
|
|
prettifyExceptions a = catchJust (matchStatusCodeException (const True)) a go
|
|
|
|
where
|
2017-09-28 16:01:58 +00:00
|
|
|
go (HttpExceptionRequest req (StatusCodeException response message)) = giveup $ unwords
|
2016-12-10 12:24:27 +00:00
|
|
|
[ "DAV failure:"
|
|
|
|
, show (responseStatus response)
|
|
|
|
, show (message)
|
2017-09-28 16:01:58 +00:00
|
|
|
, "HTTP request:"
|
|
|
|
, show (HTTP.method req)
|
|
|
|
, show (HTTP.path req)
|
2016-12-10 12:24:27 +00:00
|
|
|
]
|
|
|
|
go e = throwM e
|
2014-08-09 05:38:13 +00:00
|
|
|
|
2014-08-07 22:32:07 +00:00
|
|
|
prepDAV :: DavUser -> DavPass -> DAVT IO ()
|
|
|
|
prepDAV user pass = do
|
|
|
|
setResponseTimeout Nothing -- disable default (5 second!) timeout
|
|
|
|
setCreds user pass
|
2014-08-08 17:17:24 +00:00
|
|
|
|
|
|
|
--
|
|
|
|
-- Legacy chunking code, to be removed eventually.
|
|
|
|
--
|
|
|
|
|
2020-12-02 18:57:43 +00:00
|
|
|
storeLegacyChunked :: (Annex () -> IO ()) -> ChunkSize -> Key -> DavHandle -> L.ByteString -> IO ()
|
|
|
|
storeLegacyChunked annexrunner chunksize k dav b =
|
2014-08-08 17:17:24 +00:00
|
|
|
Legacy.storeChunks k tmp dest storer recorder finalizer
|
|
|
|
where
|
|
|
|
storehttp l b' = void $ goDAV dav $ do
|
|
|
|
maybe noop (void . mkColRecursive) (locationParent l)
|
2017-10-07 18:11:32 +00:00
|
|
|
debugDav $ "putContent " ++ l
|
2014-08-08 17:17:24 +00:00
|
|
|
inLocation l $ putContentM (contentType, b')
|
2020-12-02 18:57:43 +00:00
|
|
|
storer locs = Legacy.storeChunked annexrunner chunksize locs storehttp b
|
2014-08-08 17:17:24 +00:00
|
|
|
recorder l s = storehttp l (L8.fromString s)
|
|
|
|
finalizer tmp' dest' = goDAV dav $
|
2017-09-12 18:08:00 +00:00
|
|
|
finalizeStore dav tmp' (fromJust $ locationParent dest')
|
2014-08-08 17:17:24 +00:00
|
|
|
|
2016-02-09 15:50:40 +00:00
|
|
|
tmp = addTrailingPathSeparator $ keyTmpLocation k
|
2014-08-08 17:40:55 +00:00
|
|
|
dest = keyLocation k
|
2014-08-08 17:17:24 +00:00
|
|
|
|
2020-03-20 16:48:43 +00:00
|
|
|
retrieveLegacyChunked :: FilePath -> Key -> MeterUpdate -> DavHandle -> Annex ()
|
|
|
|
retrieveLegacyChunked d k p dav = liftIO $
|
2014-08-08 17:17:24 +00:00
|
|
|
withStoredFilesLegacyChunked k dav onerr $ \locs ->
|
2014-08-08 17:40:55 +00:00
|
|
|
Legacy.meteredWriteFileChunks p d locs $ \l ->
|
2017-10-07 18:11:32 +00:00
|
|
|
goDAV dav $ do
|
|
|
|
debugDav $ "getContent " ++ l
|
2014-08-08 17:40:55 +00:00
|
|
|
inLocation l $
|
|
|
|
snd <$> getContentM
|
2014-08-08 17:17:24 +00:00
|
|
|
where
|
2023-04-10 17:38:14 +00:00
|
|
|
onerr = giveup "download failed"
|
2014-08-08 17:17:24 +00:00
|
|
|
|
|
|
|
checkKeyLegacyChunked :: DavHandle -> CheckPresent
|
|
|
|
checkKeyLegacyChunked dav k = liftIO $
|
2023-04-10 17:38:14 +00:00
|
|
|
either giveup id <$> withStoredFilesLegacyChunked k dav onerr check
|
2014-08-08 17:17:24 +00:00
|
|
|
where
|
|
|
|
check [] = return $ Right True
|
|
|
|
check (l:ls) = do
|
|
|
|
v <- goDAV dav $ existsDAV l
|
|
|
|
if v == Right True
|
|
|
|
then check ls
|
|
|
|
else return v
|
|
|
|
|
|
|
|
{- Failed to read the chunkcount file; see if it's missing,
|
|
|
|
- or if there's a problem accessing it,
|
|
|
|
- or perhaps this was an intermittent error. -}
|
|
|
|
onerr f = do
|
|
|
|
v <- goDAV dav $ existsDAV f
|
|
|
|
return $ if v == Right True
|
|
|
|
then Left $ "failed to read " ++ f
|
|
|
|
else v
|
|
|
|
|
|
|
|
withStoredFilesLegacyChunked
|
|
|
|
:: Key
|
|
|
|
-> DavHandle
|
|
|
|
-> (DavLocation -> IO a)
|
|
|
|
-> ([DavLocation] -> IO a)
|
|
|
|
-> IO a
|
|
|
|
withStoredFilesLegacyChunked k dav onerr a = do
|
|
|
|
let chunkcount = keyloc ++ Legacy.chunkCount
|
2017-10-07 18:11:32 +00:00
|
|
|
v <- goDAV dav $ safely $ do
|
|
|
|
debugDav $ "getContent " ++ chunkcount
|
2014-08-08 17:17:24 +00:00
|
|
|
inLocation chunkcount $
|
|
|
|
snd <$> getContentM
|
|
|
|
case v of
|
|
|
|
Just s -> a $ Legacy.listChunks keyloc $ L8.toString s
|
|
|
|
Nothing -> do
|
|
|
|
chunks <- Legacy.probeChunks keyloc $ \f ->
|
|
|
|
(== Right True) <$> goDAV dav (existsDAV f)
|
|
|
|
if null chunks
|
|
|
|
then onerr chunkcount
|
|
|
|
else a chunks
|
|
|
|
where
|
2014-08-08 17:40:55 +00:00
|
|
|
keyloc = keyLocation k
|
2017-10-07 18:11:32 +00:00
|
|
|
|
|
|
|
debugDav :: MonadIO m => String -> DAVT m ()
|
2021-04-05 17:40:31 +00:00
|
|
|
debugDav msg = liftIO $ debug "Remote.WebDAV" msg
|