2011-08-20 20:11:42 +00:00
|
|
|
{- Url downloading.
|
2011-08-17 00:49:04 +00:00
|
|
|
-
|
2023-02-10 17:34:47 +00:00
|
|
|
- Copyright 2011-2023 Joey Hess <id@joeyh.name>
|
2011-08-17 00:49:04 +00:00
|
|
|
-
|
2014-05-10 14:01:27 +00:00
|
|
|
- License: BSD-2-clause
|
2011-08-17 00:49:04 +00:00
|
|
|
-}
|
|
|
|
|
2014-08-15 22:02:17 +00:00
|
|
|
{-# LANGUAGE OverloadedStrings #-}
|
2014-08-17 19:39:01 +00:00
|
|
|
{-# LANGUAGE RankNTypes #-}
|
2015-05-10 19:37:55 +00:00
|
|
|
{-# LANGUAGE FlexibleContexts #-}
|
2020-01-22 20:13:48 +00:00
|
|
|
{-# LANGUAGE BangPatterns #-}
|
2012-10-10 15:26:30 +00:00
|
|
|
|
2011-08-20 20:11:42 +00:00
|
|
|
module Utility.Url (
|
2018-04-04 19:15:12 +00:00
|
|
|
newManager,
|
2012-01-02 18:20:20 +00:00
|
|
|
URLString,
|
2013-09-28 18:35:21 +00:00
|
|
|
UserAgent,
|
limit url downloads to whitelisted schemes
Security fix! Allowing any schemes, particularly file: and
possibly others like scp: allowed file exfiltration by anyone who had
write access to the git repository, since they could add an annexed file
using such an url, or using an url that redirected to such an url,
and wait for the victim to get it into their repository and send them a copy.
* Added annex.security.allowed-url-schemes setting, which defaults
to only allowing http and https URLs. Note especially that file:/
is no longer enabled by default.
* Removed annex.web-download-command, since its interface does not allow
supporting annex.security.allowed-url-schemes across redirects.
If you used this setting, you may want to instead use annex.web-options
to pass options to curl.
With annex.web-download-command removed, nearly all url accesses in
git-annex are made via Utility.Url via http-client or curl. http-client
only supports http and https, so no problem there.
(Disabling one and not the other is not implemented.)
Used curl --proto to limit the allowed url schemes.
Note that this will cause git annex fsck --from web to mark files using
a disallowed url scheme as not being present in the web. That seems
acceptable; fsck --from web also does that when a web server is not available.
youtube-dl already disabled file: itself (probably for similar
reasons). The scheme check was also added to youtube-dl urls for
completeness, although that check won't catch any redirects it might
follow. But youtube-dl goes off and does its own thing with other
protocols anyway, so that's fine.
Special remotes that support other domain-specific url schemes are not
affected by this change. In the bittorrent remote, aria2c can still
download magnet: links. The download of the .torrent file is
otherwise now limited by annex.security.allowed-url-schemes.
This does not address any external special remotes that might download
an url themselves. Current thinking is all external special remotes will
need to be audited for this problem, although many of them will use
http libraries that only support http and not curl's menagarie.
The related problem of accessing private localhost and LAN urls is not
addressed by this commit.
This commit was sponsored by Brett Eisenberg on Patreon.
2018-06-15 20:52:24 +00:00
|
|
|
Scheme,
|
|
|
|
mkScheme,
|
|
|
|
allowedScheme,
|
2018-06-17 17:05:30 +00:00
|
|
|
UrlDownloader(..),
|
2019-05-30 18:51:34 +00:00
|
|
|
NonHttpUrlDownloader(..),
|
2018-04-04 19:15:12 +00:00
|
|
|
UrlOptions(..),
|
|
|
|
defUrlOptions,
|
2014-08-15 21:47:21 +00:00
|
|
|
mkUrlOptions,
|
2012-02-10 23:17:41 +00:00
|
|
|
check,
|
2013-10-11 17:05:00 +00:00
|
|
|
checkBoth,
|
2011-08-17 00:49:04 +00:00
|
|
|
exists,
|
2015-01-22 18:52:52 +00:00
|
|
|
UrlInfo(..),
|
|
|
|
getUrlInfo,
|
2015-08-19 16:24:55 +00:00
|
|
|
assumeUrlExists,
|
2011-08-17 00:49:04 +00:00
|
|
|
download,
|
2019-08-04 16:31:54 +00:00
|
|
|
downloadConduit,
|
2018-04-06 19:58:16 +00:00
|
|
|
sinkResponseFile,
|
2017-12-06 17:16:06 +00:00
|
|
|
downloadPartial,
|
2016-07-12 20:30:36 +00:00
|
|
|
matchStatusCodeException,
|
2017-09-12 19:13:42 +00:00
|
|
|
matchHttpExceptionContent,
|
2020-01-22 20:13:48 +00:00
|
|
|
BasicAuth(..),
|
|
|
|
GetBasicAuth,
|
|
|
|
noBasicAuth,
|
|
|
|
applyBasicAuth',
|
2020-09-14 22:19:51 +00:00
|
|
|
extractFromResourceT,
|
2022-08-15 16:22:01 +00:00
|
|
|
conduitUrlSchemes,
|
2011-08-17 00:49:04 +00:00
|
|
|
) where
|
|
|
|
|
2012-03-16 00:39:25 +00:00
|
|
|
import Common
|
2021-04-05 17:40:31 +00:00
|
|
|
import Utility.Debug
|
2018-04-06 19:58:16 +00:00
|
|
|
import Utility.Metered
|
2020-06-22 15:30:33 +00:00
|
|
|
import Network.HTTP.Client.Restricted
|
2019-05-30 18:51:34 +00:00
|
|
|
import Utility.IPAddress
|
2020-11-05 15:26:34 +00:00
|
|
|
import qualified Utility.RawFilePath as R
|
2021-08-18 18:49:01 +00:00
|
|
|
import Utility.Hash (IncrementalVerifier(..))
|
2023-08-14 16:28:10 +00:00
|
|
|
import Utility.Url.Parse
|
2015-05-05 17:53:06 +00:00
|
|
|
|
2011-08-17 00:49:04 +00:00
|
|
|
import Network.URI
|
2014-08-15 21:17:19 +00:00
|
|
|
import Network.HTTP.Types
|
|
|
|
import qualified Data.CaseInsensitive as CI
|
2014-08-15 22:02:17 +00:00
|
|
|
import qualified Data.ByteString as B
|
2014-08-15 21:17:19 +00:00
|
|
|
import qualified Data.ByteString.UTF8 as B8
|
2017-12-06 17:16:06 +00:00
|
|
|
import qualified Data.ByteString.Lazy as L
|
limit url downloads to whitelisted schemes
Security fix! Allowing any schemes, particularly file: and
possibly others like scp: allowed file exfiltration by anyone who had
write access to the git repository, since they could add an annexed file
using such an url, or using an url that redirected to such an url,
and wait for the victim to get it into their repository and send them a copy.
* Added annex.security.allowed-url-schemes setting, which defaults
to only allowing http and https URLs. Note especially that file:/
is no longer enabled by default.
* Removed annex.web-download-command, since its interface does not allow
supporting annex.security.allowed-url-schemes across redirects.
If you used this setting, you may want to instead use annex.web-options
to pass options to curl.
With annex.web-download-command removed, nearly all url accesses in
git-annex are made via Utility.Url via http-client or curl. http-client
only supports http and https, so no problem there.
(Disabling one and not the other is not implemented.)
Used curl --proto to limit the allowed url schemes.
Note that this will cause git annex fsck --from web to mark files using
a disallowed url scheme as not being present in the web. That seems
acceptable; fsck --from web also does that when a web server is not available.
youtube-dl already disabled file: itself (probably for similar
reasons). The scheme check was also added to youtube-dl urls for
completeness, although that check won't catch any redirects it might
follow. But youtube-dl goes off and does its own thing with other
protocols anyway, so that's fine.
Special remotes that support other domain-specific url schemes are not
affected by this change. In the bittorrent remote, aria2c can still
download magnet: links. The download of the .torrent file is
otherwise now limited by annex.security.allowed-url-schemes.
This does not address any external special remotes that might download
an url themselves. Current thinking is all external special remotes will
need to be audited for this problem, although many of them will use
http libraries that only support http and not curl's menagarie.
The related problem of accessing private localhost and LAN urls is not
addressed by this commit.
This commit was sponsored by Brett Eisenberg on Patreon.
2018-06-15 20:52:24 +00:00
|
|
|
import qualified Data.Set as S
|
2020-09-14 22:19:51 +00:00
|
|
|
import Control.Exception (throwIO, evaluate)
|
2015-10-01 17:47:54 +00:00
|
|
|
import Control.Monad.Trans.Resource
|
2020-09-14 22:19:51 +00:00
|
|
|
import Control.Monad.IO.Class (MonadIO)
|
|
|
|
import Control.DeepSeq
|
2018-04-04 19:15:12 +00:00
|
|
|
import Network.HTTP.Conduit
|
2018-06-20 17:05:02 +00:00
|
|
|
import Network.HTTP.Client
|
2019-05-30 20:03:52 +00:00
|
|
|
import Network.HTTP.Simple (getResponseHeader)
|
2019-05-30 18:51:34 +00:00
|
|
|
import Network.Socket
|
|
|
|
import Network.BSD (getProtocolNumber)
|
|
|
|
import Data.Either
|
2018-04-06 19:58:16 +00:00
|
|
|
import Data.Conduit
|
2019-05-30 18:51:34 +00:00
|
|
|
import Text.Read
|
2015-10-01 17:47:54 +00:00
|
|
|
|
2011-08-17 00:49:04 +00:00
|
|
|
type URLString = String
|
|
|
|
|
2012-04-22 05:13:09 +00:00
|
|
|
type Headers = [String]
|
|
|
|
|
2013-09-28 18:35:21 +00:00
|
|
|
type UserAgent = String
|
|
|
|
|
limit url downloads to whitelisted schemes
Security fix! Allowing any schemes, particularly file: and
possibly others like scp: allowed file exfiltration by anyone who had
write access to the git repository, since they could add an annexed file
using such an url, or using an url that redirected to such an url,
and wait for the victim to get it into their repository and send them a copy.
* Added annex.security.allowed-url-schemes setting, which defaults
to only allowing http and https URLs. Note especially that file:/
is no longer enabled by default.
* Removed annex.web-download-command, since its interface does not allow
supporting annex.security.allowed-url-schemes across redirects.
If you used this setting, you may want to instead use annex.web-options
to pass options to curl.
With annex.web-download-command removed, nearly all url accesses in
git-annex are made via Utility.Url via http-client or curl. http-client
only supports http and https, so no problem there.
(Disabling one and not the other is not implemented.)
Used curl --proto to limit the allowed url schemes.
Note that this will cause git annex fsck --from web to mark files using
a disallowed url scheme as not being present in the web. That seems
acceptable; fsck --from web also does that when a web server is not available.
youtube-dl already disabled file: itself (probably for similar
reasons). The scheme check was also added to youtube-dl urls for
completeness, although that check won't catch any redirects it might
follow. But youtube-dl goes off and does its own thing with other
protocols anyway, so that's fine.
Special remotes that support other domain-specific url schemes are not
affected by this change. In the bittorrent remote, aria2c can still
download magnet: links. The download of the .torrent file is
otherwise now limited by annex.security.allowed-url-schemes.
This does not address any external special remotes that might download
an url themselves. Current thinking is all external special remotes will
need to be audited for this problem, although many of them will use
http libraries that only support http and not curl's menagarie.
The related problem of accessing private localhost and LAN urls is not
addressed by this commit.
This commit was sponsored by Brett Eisenberg on Patreon.
2018-06-15 20:52:24 +00:00
|
|
|
newtype Scheme = Scheme (CI.CI String)
|
|
|
|
deriving (Eq, Ord)
|
|
|
|
|
|
|
|
mkScheme :: String -> Scheme
|
|
|
|
mkScheme = Scheme . CI.mk
|
|
|
|
|
|
|
|
fromScheme :: Scheme -> String
|
|
|
|
fromScheme (Scheme s) = CI.original s
|
|
|
|
|
2014-08-15 21:47:21 +00:00
|
|
|
data UrlOptions = UrlOptions
|
2014-02-25 02:00:25 +00:00
|
|
|
{ userAgent :: Maybe UserAgent
|
|
|
|
, reqHeaders :: Headers
|
2018-04-06 21:00:46 +00:00
|
|
|
, urlDownloader :: UrlDownloader
|
2014-08-15 21:47:21 +00:00
|
|
|
, applyRequest :: Request -> Request
|
2018-04-04 19:15:12 +00:00
|
|
|
, httpManager :: Manager
|
limit url downloads to whitelisted schemes
Security fix! Allowing any schemes, particularly file: and
possibly others like scp: allowed file exfiltration by anyone who had
write access to the git repository, since they could add an annexed file
using such an url, or using an url that redirected to such an url,
and wait for the victim to get it into their repository and send them a copy.
* Added annex.security.allowed-url-schemes setting, which defaults
to only allowing http and https URLs. Note especially that file:/
is no longer enabled by default.
* Removed annex.web-download-command, since its interface does not allow
supporting annex.security.allowed-url-schemes across redirects.
If you used this setting, you may want to instead use annex.web-options
to pass options to curl.
With annex.web-download-command removed, nearly all url accesses in
git-annex are made via Utility.Url via http-client or curl. http-client
only supports http and https, so no problem there.
(Disabling one and not the other is not implemented.)
Used curl --proto to limit the allowed url schemes.
Note that this will cause git annex fsck --from web to mark files using
a disallowed url scheme as not being present in the web. That seems
acceptable; fsck --from web also does that when a web server is not available.
youtube-dl already disabled file: itself (probably for similar
reasons). The scheme check was also added to youtube-dl urls for
completeness, although that check won't catch any redirects it might
follow. But youtube-dl goes off and does its own thing with other
protocols anyway, so that's fine.
Special remotes that support other domain-specific url schemes are not
affected by this change. In the bittorrent remote, aria2c can still
download magnet: links. The download of the .torrent file is
otherwise now limited by annex.security.allowed-url-schemes.
This does not address any external special remotes that might download
an url themselves. Current thinking is all external special remotes will
need to be audited for this problem, although many of them will use
http libraries that only support http and not curl's menagarie.
The related problem of accessing private localhost and LAN urls is not
addressed by this commit.
This commit was sponsored by Brett Eisenberg on Patreon.
2018-06-15 20:52:24 +00:00
|
|
|
, allowedSchemes :: S.Set Scheme
|
2021-07-02 14:43:44 +00:00
|
|
|
, disallowedSchemeMessage :: Maybe (URI -> String)
|
2020-01-22 20:13:48 +00:00
|
|
|
, getBasicAuth :: GetBasicAuth
|
2014-02-25 02:00:25 +00:00
|
|
|
}
|
|
|
|
|
2018-04-06 21:00:46 +00:00
|
|
|
data UrlDownloader
|
2019-05-30 18:51:34 +00:00
|
|
|
= DownloadWithConduit NonHttpUrlDownloader
|
2018-04-06 21:00:46 +00:00
|
|
|
| DownloadWithCurl [CommandParam]
|
|
|
|
|
2019-05-30 18:51:34 +00:00
|
|
|
data NonHttpUrlDownloader
|
|
|
|
= DownloadWithCurlRestricted Restriction
|
|
|
|
|
2018-04-04 19:15:12 +00:00
|
|
|
defUrlOptions :: IO UrlOptions
|
|
|
|
defUrlOptions = UrlOptions
|
|
|
|
<$> pure Nothing
|
|
|
|
<*> pure []
|
2019-05-30 18:51:34 +00:00
|
|
|
<*> pure (DownloadWithConduit (DownloadWithCurlRestricted mempty))
|
2018-04-04 19:15:12 +00:00
|
|
|
<*> pure id
|
2019-07-17 20:48:50 +00:00
|
|
|
<*> newManager tlsManagerSettings
|
2022-08-15 16:22:01 +00:00
|
|
|
<*> pure conduitUrlSchemes
|
2021-07-02 14:43:44 +00:00
|
|
|
<*> pure Nothing
|
2020-01-22 20:13:48 +00:00
|
|
|
<*> pure noBasicAuth
|
2018-04-04 19:15:12 +00:00
|
|
|
|
2022-08-15 16:22:01 +00:00
|
|
|
conduitUrlSchemes :: S.Set Scheme
|
|
|
|
conduitUrlSchemes = S.fromList $ map mkScheme ["http", "https", "ftp"]
|
|
|
|
|
2021-07-02 14:43:44 +00:00
|
|
|
mkUrlOptions :: Maybe UserAgent -> Headers -> UrlDownloader -> Manager -> S.Set Scheme -> Maybe (URI -> String) -> GetBasicAuth -> UrlOptions
|
|
|
|
mkUrlOptions defuseragent reqheaders urldownloader =
|
|
|
|
UrlOptions useragent reqheaders urldownloader applyrequest
|
2014-08-15 21:17:19 +00:00
|
|
|
where
|
|
|
|
applyrequest = \r -> r { requestHeaders = requestHeaders r ++ addedheaders }
|
|
|
|
addedheaders = uaheader ++ otherheaders
|
2016-01-11 16:10:38 +00:00
|
|
|
useragent = maybe defuseragent (Just . B8.toString . snd)
|
|
|
|
(headMaybe uafromheaders)
|
2014-08-15 21:47:21 +00:00
|
|
|
uaheader = case useragent of
|
2014-08-15 21:17:19 +00:00
|
|
|
Nothing -> []
|
|
|
|
Just ua -> [(hUserAgent, B8.fromString ua)]
|
2016-01-11 16:10:38 +00:00
|
|
|
(uafromheaders, otherheaders) = partition (\(h, _) -> h == hUserAgent)
|
|
|
|
(map toheader reqheaders)
|
2014-08-15 21:17:19 +00:00
|
|
|
toheader s =
|
|
|
|
let (h, v) = separate (== ':') s
|
|
|
|
h' = CI.mk (B8.fromString h)
|
|
|
|
in case v of
|
|
|
|
(' ':v') -> (h', B8.fromString v')
|
|
|
|
_ -> (h', B8.fromString v)
|
|
|
|
|
2018-04-06 21:00:46 +00:00
|
|
|
curlParams :: UrlOptions -> [CommandParam] -> [CommandParam]
|
limit url downloads to whitelisted schemes
Security fix! Allowing any schemes, particularly file: and
possibly others like scp: allowed file exfiltration by anyone who had
write access to the git repository, since they could add an annexed file
using such an url, or using an url that redirected to such an url,
and wait for the victim to get it into their repository and send them a copy.
* Added annex.security.allowed-url-schemes setting, which defaults
to only allowing http and https URLs. Note especially that file:/
is no longer enabled by default.
* Removed annex.web-download-command, since its interface does not allow
supporting annex.security.allowed-url-schemes across redirects.
If you used this setting, you may want to instead use annex.web-options
to pass options to curl.
With annex.web-download-command removed, nearly all url accesses in
git-annex are made via Utility.Url via http-client or curl. http-client
only supports http and https, so no problem there.
(Disabling one and not the other is not implemented.)
Used curl --proto to limit the allowed url schemes.
Note that this will cause git annex fsck --from web to mark files using
a disallowed url scheme as not being present in the web. That seems
acceptable; fsck --from web also does that when a web server is not available.
youtube-dl already disabled file: itself (probably for similar
reasons). The scheme check was also added to youtube-dl urls for
completeness, although that check won't catch any redirects it might
follow. But youtube-dl goes off and does its own thing with other
protocols anyway, so that's fine.
Special remotes that support other domain-specific url schemes are not
affected by this change. In the bittorrent remote, aria2c can still
download magnet: links. The download of the .torrent file is
otherwise now limited by annex.security.allowed-url-schemes.
This does not address any external special remotes that might download
an url themselves. Current thinking is all external special remotes will
need to be audited for this problem, although many of them will use
http libraries that only support http and not curl's menagarie.
The related problem of accessing private localhost and LAN urls is not
addressed by this commit.
This commit was sponsored by Brett Eisenberg on Patreon.
2018-06-15 20:52:24 +00:00
|
|
|
curlParams uo ps = ps ++ uaparams ++ headerparams ++ addedparams ++ schemeparams
|
2018-04-06 21:00:46 +00:00
|
|
|
where
|
|
|
|
uaparams = case userAgent uo of
|
|
|
|
Nothing -> []
|
|
|
|
Just ua -> [Param "--user-agent", Param ua]
|
|
|
|
headerparams = concatMap (\h -> [Param "-H", Param h]) (reqHeaders uo)
|
|
|
|
addedparams = case urlDownloader uo of
|
2019-05-30 18:51:34 +00:00
|
|
|
DownloadWithConduit _ -> []
|
2018-04-06 21:00:46 +00:00
|
|
|
DownloadWithCurl l -> l
|
limit url downloads to whitelisted schemes
Security fix! Allowing any schemes, particularly file: and
possibly others like scp: allowed file exfiltration by anyone who had
write access to the git repository, since they could add an annexed file
using such an url, or using an url that redirected to such an url,
and wait for the victim to get it into their repository and send them a copy.
* Added annex.security.allowed-url-schemes setting, which defaults
to only allowing http and https URLs. Note especially that file:/
is no longer enabled by default.
* Removed annex.web-download-command, since its interface does not allow
supporting annex.security.allowed-url-schemes across redirects.
If you used this setting, you may want to instead use annex.web-options
to pass options to curl.
With annex.web-download-command removed, nearly all url accesses in
git-annex are made via Utility.Url via http-client or curl. http-client
only supports http and https, so no problem there.
(Disabling one and not the other is not implemented.)
Used curl --proto to limit the allowed url schemes.
Note that this will cause git annex fsck --from web to mark files using
a disallowed url scheme as not being present in the web. That seems
acceptable; fsck --from web also does that when a web server is not available.
youtube-dl already disabled file: itself (probably for similar
reasons). The scheme check was also added to youtube-dl urls for
completeness, although that check won't catch any redirects it might
follow. But youtube-dl goes off and does its own thing with other
protocols anyway, so that's fine.
Special remotes that support other domain-specific url schemes are not
affected by this change. In the bittorrent remote, aria2c can still
download magnet: links. The download of the .torrent file is
otherwise now limited by annex.security.allowed-url-schemes.
This does not address any external special remotes that might download
an url themselves. Current thinking is all external special remotes will
need to be audited for this problem, although many of them will use
http libraries that only support http and not curl's menagarie.
The related problem of accessing private localhost and LAN urls is not
addressed by this commit.
This commit was sponsored by Brett Eisenberg on Patreon.
2018-06-15 20:52:24 +00:00
|
|
|
schemeparams =
|
|
|
|
[ Param "--proto"
|
|
|
|
, Param $ intercalate "," ("-all" : schemelist)
|
|
|
|
]
|
|
|
|
schemelist = map fromScheme $ S.toList $ allowedSchemes uo
|
|
|
|
|
2019-11-12 17:33:41 +00:00
|
|
|
checkPolicy :: UrlOptions -> URI -> IO (Either String a) -> IO (Either String a)
|
|
|
|
checkPolicy uo u a
|
limit url downloads to whitelisted schemes
Security fix! Allowing any schemes, particularly file: and
possibly others like scp: allowed file exfiltration by anyone who had
write access to the git repository, since they could add an annexed file
using such an url, or using an url that redirected to such an url,
and wait for the victim to get it into their repository and send them a copy.
* Added annex.security.allowed-url-schemes setting, which defaults
to only allowing http and https URLs. Note especially that file:/
is no longer enabled by default.
* Removed annex.web-download-command, since its interface does not allow
supporting annex.security.allowed-url-schemes across redirects.
If you used this setting, you may want to instead use annex.web-options
to pass options to curl.
With annex.web-download-command removed, nearly all url accesses in
git-annex are made via Utility.Url via http-client or curl. http-client
only supports http and https, so no problem there.
(Disabling one and not the other is not implemented.)
Used curl --proto to limit the allowed url schemes.
Note that this will cause git annex fsck --from web to mark files using
a disallowed url scheme as not being present in the web. That seems
acceptable; fsck --from web also does that when a web server is not available.
youtube-dl already disabled file: itself (probably for similar
reasons). The scheme check was also added to youtube-dl urls for
completeness, although that check won't catch any redirects it might
follow. But youtube-dl goes off and does its own thing with other
protocols anyway, so that's fine.
Special remotes that support other domain-specific url schemes are not
affected by this change. In the bittorrent remote, aria2c can still
download magnet: links. The download of the .torrent file is
otherwise now limited by annex.security.allowed-url-schemes.
This does not address any external special remotes that might download
an url themselves. Current thinking is all external special remotes will
need to be audited for this problem, although many of them will use
http libraries that only support http and not curl's menagarie.
The related problem of accessing private localhost and LAN urls is not
addressed by this commit.
This commit was sponsored by Brett Eisenberg on Patreon.
2018-06-15 20:52:24 +00:00
|
|
|
| allowedScheme uo u = a
|
2021-07-02 14:43:44 +00:00
|
|
|
| otherwise = return $ Left $ case disallowedSchemeMessage uo of
|
|
|
|
Nothing -> "Configuration does not allow accessing" ++ show u
|
|
|
|
Just f -> f u
|
limit url downloads to whitelisted schemes
Security fix! Allowing any schemes, particularly file: and
possibly others like scp: allowed file exfiltration by anyone who had
write access to the git repository, since they could add an annexed file
using such an url, or using an url that redirected to such an url,
and wait for the victim to get it into their repository and send them a copy.
* Added annex.security.allowed-url-schemes setting, which defaults
to only allowing http and https URLs. Note especially that file:/
is no longer enabled by default.
* Removed annex.web-download-command, since its interface does not allow
supporting annex.security.allowed-url-schemes across redirects.
If you used this setting, you may want to instead use annex.web-options
to pass options to curl.
With annex.web-download-command removed, nearly all url accesses in
git-annex are made via Utility.Url via http-client or curl. http-client
only supports http and https, so no problem there.
(Disabling one and not the other is not implemented.)
Used curl --proto to limit the allowed url schemes.
Note that this will cause git annex fsck --from web to mark files using
a disallowed url scheme as not being present in the web. That seems
acceptable; fsck --from web also does that when a web server is not available.
youtube-dl already disabled file: itself (probably for similar
reasons). The scheme check was also added to youtube-dl urls for
completeness, although that check won't catch any redirects it might
follow. But youtube-dl goes off and does its own thing with other
protocols anyway, so that's fine.
Special remotes that support other domain-specific url schemes are not
affected by this change. In the bittorrent remote, aria2c can still
download magnet: links. The download of the .torrent file is
otherwise now limited by annex.security.allowed-url-schemes.
This does not address any external special remotes that might download
an url themselves. Current thinking is all external special remotes will
need to be audited for this problem, although many of them will use
http libraries that only support http and not curl's menagarie.
The related problem of accessing private localhost and LAN urls is not
addressed by this commit.
This commit was sponsored by Brett Eisenberg on Patreon.
2018-06-15 20:52:24 +00:00
|
|
|
|
2019-11-12 17:33:41 +00:00
|
|
|
unsupportedUrlScheme :: URI -> String
|
|
|
|
unsupportedUrlScheme u = "Unsupported url scheme " ++ show u
|
2018-06-17 17:05:30 +00:00
|
|
|
|
limit url downloads to whitelisted schemes
Security fix! Allowing any schemes, particularly file: and
possibly others like scp: allowed file exfiltration by anyone who had
write access to the git repository, since they could add an annexed file
using such an url, or using an url that redirected to such an url,
and wait for the victim to get it into their repository and send them a copy.
* Added annex.security.allowed-url-schemes setting, which defaults
to only allowing http and https URLs. Note especially that file:/
is no longer enabled by default.
* Removed annex.web-download-command, since its interface does not allow
supporting annex.security.allowed-url-schemes across redirects.
If you used this setting, you may want to instead use annex.web-options
to pass options to curl.
With annex.web-download-command removed, nearly all url accesses in
git-annex are made via Utility.Url via http-client or curl. http-client
only supports http and https, so no problem there.
(Disabling one and not the other is not implemented.)
Used curl --proto to limit the allowed url schemes.
Note that this will cause git annex fsck --from web to mark files using
a disallowed url scheme as not being present in the web. That seems
acceptable; fsck --from web also does that when a web server is not available.
youtube-dl already disabled file: itself (probably for similar
reasons). The scheme check was also added to youtube-dl urls for
completeness, although that check won't catch any redirects it might
follow. But youtube-dl goes off and does its own thing with other
protocols anyway, so that's fine.
Special remotes that support other domain-specific url schemes are not
affected by this change. In the bittorrent remote, aria2c can still
download magnet: links. The download of the .torrent file is
otherwise now limited by annex.security.allowed-url-schemes.
This does not address any external special remotes that might download
an url themselves. Current thinking is all external special remotes will
need to be audited for this problem, although many of them will use
http libraries that only support http and not curl's menagarie.
The related problem of accessing private localhost and LAN urls is not
addressed by this commit.
This commit was sponsored by Brett Eisenberg on Patreon.
2018-06-15 20:52:24 +00:00
|
|
|
allowedScheme :: UrlOptions -> URI -> Bool
|
|
|
|
allowedScheme uo u = uscheme `S.member` allowedSchemes uo
|
|
|
|
where
|
|
|
|
uscheme = mkScheme $ takeWhile (/=':') (uriScheme u)
|
2014-08-15 21:17:19 +00:00
|
|
|
|
2012-02-10 23:17:41 +00:00
|
|
|
{- Checks that an url exists and could be successfully downloaded,
|
2019-11-12 17:33:41 +00:00
|
|
|
- also checking that its size, if available, matches a specified size.
|
|
|
|
-
|
2020-04-27 17:48:14 +00:00
|
|
|
- The Left error is returned if policy or the restricted http manager
|
|
|
|
- does not allow accessing the url or the url scheme is not supported.
|
2019-11-12 17:33:41 +00:00
|
|
|
-}
|
|
|
|
checkBoth :: URLString -> Maybe Integer -> UrlOptions -> IO (Either String Bool)
|
|
|
|
checkBoth url expected_size uo = fmap go <$> check url expected_size uo
|
|
|
|
where
|
|
|
|
go v = fst v && snd v
|
|
|
|
|
|
|
|
check :: URLString -> Maybe Integer -> UrlOptions -> IO (Either String (Bool, Bool))
|
|
|
|
check url expected_size uo = fmap go <$> getUrlInfo url uo
|
2012-12-13 04:24:19 +00:00
|
|
|
where
|
2015-01-22 18:52:52 +00:00
|
|
|
go (UrlInfo False _ _) = (False, False)
|
|
|
|
go (UrlInfo True Nothing _) = (True, True)
|
|
|
|
go (UrlInfo True s _) = case expected_size of
|
2013-10-11 17:05:00 +00:00
|
|
|
Just _ -> (True, expected_size == s)
|
|
|
|
Nothing -> (True, True)
|
2012-02-10 23:17:41 +00:00
|
|
|
|
2019-11-12 17:33:41 +00:00
|
|
|
exists :: URLString -> UrlOptions -> IO (Either String Bool)
|
|
|
|
exists url uo = fmap urlExists <$> getUrlInfo url uo
|
2015-01-22 18:52:52 +00:00
|
|
|
|
|
|
|
data UrlInfo = UrlInfo
|
|
|
|
{ urlExists :: Bool
|
|
|
|
, urlSize :: Maybe Integer
|
|
|
|
, urlSuggestedFile :: Maybe FilePath
|
|
|
|
}
|
2016-07-12 20:30:36 +00:00
|
|
|
deriving (Show)
|
2015-01-22 18:52:52 +00:00
|
|
|
|
2015-08-19 16:24:55 +00:00
|
|
|
assumeUrlExists :: UrlInfo
|
|
|
|
assumeUrlExists = UrlInfo True Nothing Nothing
|
|
|
|
|
2012-02-10 23:17:41 +00:00
|
|
|
{- Checks that an url exists and could be successfully downloaded,
|
2019-11-12 17:33:41 +00:00
|
|
|
- also returning its size and suggested filename if available.
|
|
|
|
-
|
2020-04-27 17:48:14 +00:00
|
|
|
- The Left error is returned if policy or the restricted http manages
|
|
|
|
- does not allow accessing the url or the url scheme is not supported.
|
2019-11-12 17:33:41 +00:00
|
|
|
-}
|
|
|
|
getUrlInfo :: URLString -> UrlOptions -> IO (Either String UrlInfo)
|
2015-01-22 18:52:52 +00:00
|
|
|
getUrlInfo url uo = case parseURIRelaxed url of
|
2019-11-12 17:33:41 +00:00
|
|
|
Just u -> checkPolicy uo u (go u)
|
|
|
|
Nothing -> return (Right dne)
|
|
|
|
where
|
|
|
|
go :: URI -> IO (Either String UrlInfo)
|
2023-02-10 17:34:47 +00:00
|
|
|
go u = case (urlDownloader uo, parseRequestRelaxed u) of
|
2020-04-27 17:48:14 +00:00
|
|
|
(DownloadWithConduit (DownloadWithCurlRestricted r), Just req) ->
|
|
|
|
existsconduit r req
|
2019-11-12 17:33:41 +00:00
|
|
|
(DownloadWithConduit (DownloadWithCurlRestricted r), Nothing)
|
|
|
|
| isfileurl u -> Right <$> existsfile u
|
|
|
|
| isftpurl u -> (Right <$> existscurlrestricted r u url ftpport)
|
|
|
|
`catchNonAsync` (const $ return $ Right dne)
|
|
|
|
| otherwise -> return $ Left $ unsupportedUrlScheme u
|
|
|
|
(DownloadWithCurl _, _)
|
|
|
|
| isfileurl u -> Right <$> existsfile u
|
|
|
|
| otherwise -> Right <$> existscurl u (basecurlparams url)
|
|
|
|
|
limit url downloads to whitelisted schemes
Security fix! Allowing any schemes, particularly file: and
possibly others like scp: allowed file exfiltration by anyone who had
write access to the git repository, since they could add an annexed file
using such an url, or using an url that redirected to such an url,
and wait for the victim to get it into their repository and send them a copy.
* Added annex.security.allowed-url-schemes setting, which defaults
to only allowing http and https URLs. Note especially that file:/
is no longer enabled by default.
* Removed annex.web-download-command, since its interface does not allow
supporting annex.security.allowed-url-schemes across redirects.
If you used this setting, you may want to instead use annex.web-options
to pass options to curl.
With annex.web-download-command removed, nearly all url accesses in
git-annex are made via Utility.Url via http-client or curl. http-client
only supports http and https, so no problem there.
(Disabling one and not the other is not implemented.)
Used curl --proto to limit the allowed url schemes.
Note that this will cause git annex fsck --from web to mark files using
a disallowed url scheme as not being present in the web. That seems
acceptable; fsck --from web also does that when a web server is not available.
youtube-dl already disabled file: itself (probably for similar
reasons). The scheme check was also added to youtube-dl urls for
completeness, although that check won't catch any redirects it might
follow. But youtube-dl goes off and does its own thing with other
protocols anyway, so that's fine.
Special remotes that support other domain-specific url schemes are not
affected by this change. In the bittorrent remote, aria2c can still
download magnet: links. The download of the .torrent file is
otherwise now limited by annex.security.allowed-url-schemes.
This does not address any external special remotes that might download
an url themselves. Current thinking is all external special remotes will
need to be audited for this problem, although many of them will use
http libraries that only support http and not curl's menagarie.
The related problem of accessing private localhost and LAN urls is not
addressed by this commit.
This commit was sponsored by Brett Eisenberg on Patreon.
2018-06-15 20:52:24 +00:00
|
|
|
dne = UrlInfo False Nothing Nothing
|
2015-01-22 18:52:52 +00:00
|
|
|
found sz f = return $ UrlInfo True sz f
|
2013-01-26 22:30:53 +00:00
|
|
|
|
2018-06-17 17:05:30 +00:00
|
|
|
isfileurl u = uriScheme u == "file:"
|
2019-05-30 18:51:34 +00:00
|
|
|
isftpurl u = uriScheme u == "ftp:"
|
2018-06-17 17:05:30 +00:00
|
|
|
|
2019-05-30 20:03:52 +00:00
|
|
|
ftpport = 21
|
|
|
|
|
|
|
|
basecurlparams u = curlParams uo $
|
2013-09-28 18:35:21 +00:00
|
|
|
[ Param "-s"
|
|
|
|
, Param "--head"
|
2019-05-30 20:03:52 +00:00
|
|
|
, Param "-L", Param u
|
2013-09-28 18:35:21 +00:00
|
|
|
, Param "-w", Param "%{http_code}"
|
2018-04-06 21:00:46 +00:00
|
|
|
]
|
2013-01-26 22:30:53 +00:00
|
|
|
|
2014-08-15 21:17:19 +00:00
|
|
|
extractlencurl s = case lastMaybe $ filter ("Content-Length:" `isPrefixOf`) (lines s) of
|
2013-01-26 22:30:53 +00:00
|
|
|
Just l -> case lastMaybe $ words l of
|
|
|
|
Just sz -> readish sz
|
|
|
|
_ -> Nothing
|
|
|
|
_ -> Nothing
|
2014-08-15 21:17:19 +00:00
|
|
|
|
2018-04-06 21:00:46 +00:00
|
|
|
extractlen = readish . B8.toString
|
|
|
|
<=< lookup hContentLength . responseHeaders
|
2015-01-22 18:52:52 +00:00
|
|
|
|
|
|
|
extractfilename = contentDispositionFilename . B8.toString
|
2018-04-06 21:00:46 +00:00
|
|
|
<=< lookup hContentDisposition . responseHeaders
|
2015-01-22 18:52:52 +00:00
|
|
|
|
2020-04-27 17:48:14 +00:00
|
|
|
existsconduit r req =
|
2020-05-04 16:44:26 +00:00
|
|
|
let a = catchcrossprotoredir r (existsconduit' req uo)
|
|
|
|
in catchJust matchconnectionrestricted a retconnectionrestricted
|
2020-04-27 17:48:14 +00:00
|
|
|
|
|
|
|
matchconnectionrestricted he@(HttpExceptionRequest _ (InternalException ie)) =
|
|
|
|
case fromException ie of
|
2020-05-04 16:44:26 +00:00
|
|
|
Just (ConnectionRestricted _why) -> Just he
|
2020-04-27 17:48:14 +00:00
|
|
|
_ -> Nothing
|
|
|
|
matchconnectionrestricted _ = Nothing
|
|
|
|
|
|
|
|
retconnectionrestricted he@(HttpExceptionRequest _ (InternalException ie)) =
|
|
|
|
case fromException ie of
|
|
|
|
Just (ConnectionRestricted why) -> return (Left why)
|
|
|
|
_ -> throwM he
|
|
|
|
retconnectionrestricted he = throwM he
|
|
|
|
|
|
|
|
existsconduit' req uo' = do
|
2015-01-22 17:47:06 +00:00
|
|
|
let req' = headRequest (applyRequest uo req)
|
2021-04-05 17:40:31 +00:00
|
|
|
debug "Utility.Url" (show req')
|
2020-01-22 20:13:48 +00:00
|
|
|
join $ runResourceT $ do
|
2018-04-04 19:15:12 +00:00
|
|
|
resp <- http req' (httpManager uo)
|
2020-09-14 22:19:51 +00:00
|
|
|
if responseStatus resp == ok200
|
2020-01-22 20:13:48 +00:00
|
|
|
then do
|
2020-09-14 22:19:51 +00:00
|
|
|
len <- extractFromResourceT (extractlen resp)
|
|
|
|
fn <- extractFromResourceT (extractfilename resp)
|
2020-01-22 20:13:48 +00:00
|
|
|
return $ found len fn
|
|
|
|
else if responseStatus resp == unauthorized401
|
|
|
|
then return $ getBasicAuth uo' (show (getUri req)) >>= \case
|
|
|
|
Nothing -> return dne
|
|
|
|
Just (ba, signalsuccess) -> do
|
2020-04-27 17:48:14 +00:00
|
|
|
ui <- existsconduit'
|
2020-01-22 20:13:48 +00:00
|
|
|
(applyBasicAuth' ba req)
|
|
|
|
(uo' { getBasicAuth = noBasicAuth })
|
|
|
|
signalsuccess (urlExists ui)
|
|
|
|
return ui
|
|
|
|
else return $ return dne
|
2013-09-28 18:35:21 +00:00
|
|
|
|
2019-05-30 18:51:34 +00:00
|
|
|
existscurl u curlparams = do
|
2016-07-12 20:30:36 +00:00
|
|
|
output <- catchDefaultIO "" $
|
|
|
|
readProcess "curl" $ toCommand curlparams
|
|
|
|
let len = extractlencurl output
|
|
|
|
let good = found len Nothing
|
|
|
|
let isftp = or
|
|
|
|
[ "ftp" `isInfixOf` uriScheme u
|
|
|
|
-- Check to see if http redirected to ftp.
|
|
|
|
, "Location: ftp://" `isInfixOf` output
|
|
|
|
]
|
|
|
|
case lastMaybe (lines output) of
|
|
|
|
Just ('2':_:_) -> good
|
|
|
|
-- don't try to parse ftp status codes; if curl
|
|
|
|
-- got a length, it's good
|
|
|
|
_ | isftp && isJust len -> good
|
limit url downloads to whitelisted schemes
Security fix! Allowing any schemes, particularly file: and
possibly others like scp: allowed file exfiltration by anyone who had
write access to the git repository, since they could add an annexed file
using such an url, or using an url that redirected to such an url,
and wait for the victim to get it into their repository and send them a copy.
* Added annex.security.allowed-url-schemes setting, which defaults
to only allowing http and https URLs. Note especially that file:/
is no longer enabled by default.
* Removed annex.web-download-command, since its interface does not allow
supporting annex.security.allowed-url-schemes across redirects.
If you used this setting, you may want to instead use annex.web-options
to pass options to curl.
With annex.web-download-command removed, nearly all url accesses in
git-annex are made via Utility.Url via http-client or curl. http-client
only supports http and https, so no problem there.
(Disabling one and not the other is not implemented.)
Used curl --proto to limit the allowed url schemes.
Note that this will cause git annex fsck --from web to mark files using
a disallowed url scheme as not being present in the web. That seems
acceptable; fsck --from web also does that when a web server is not available.
youtube-dl already disabled file: itself (probably for similar
reasons). The scheme check was also added to youtube-dl urls for
completeness, although that check won't catch any redirects it might
follow. But youtube-dl goes off and does its own thing with other
protocols anyway, so that's fine.
Special remotes that support other domain-specific url schemes are not
affected by this change. In the bittorrent remote, aria2c can still
download magnet: links. The download of the .torrent file is
otherwise now limited by annex.security.allowed-url-schemes.
This does not address any external special remotes that might download
an url themselves. Current thinking is all external special remotes will
need to be audited for this problem, although many of them will use
http libraries that only support http and not curl's menagarie.
The related problem of accessing private localhost and LAN urls is not
addressed by this commit.
This commit was sponsored by Brett Eisenberg on Patreon.
2018-06-15 20:52:24 +00:00
|
|
|
_ -> return dne
|
2018-06-17 17:05:30 +00:00
|
|
|
|
2019-06-04 15:24:32 +00:00
|
|
|
existscurlrestricted r u url' defport = existscurl u
|
|
|
|
=<< curlRestrictedParams r u defport (basecurlparams url')
|
2019-05-30 18:51:34 +00:00
|
|
|
|
2018-06-17 17:05:30 +00:00
|
|
|
existsfile u = do
|
2020-11-05 15:26:34 +00:00
|
|
|
let f = toRawFilePath (unEscapeString (uriPath u))
|
2022-09-05 17:44:03 +00:00
|
|
|
s <- catchMaybeIO $ R.getSymbolicLinkStatus f
|
2018-06-17 17:05:30 +00:00
|
|
|
case s of
|
|
|
|
Just stat -> do
|
|
|
|
sz <- getFileSize' f stat
|
|
|
|
found (Just sz) Nothing
|
|
|
|
Nothing -> return dne
|
2020-04-27 17:48:14 +00:00
|
|
|
|
|
|
|
-- When http server redirects to a protocol which conduit does not
|
|
|
|
-- support, it will throw a StatusCodeException with found302
|
|
|
|
-- and a Response with the redir Location.
|
|
|
|
catchcrossprotoredir r a =
|
|
|
|
catchJust (matchStatusCodeException (== found302))
|
|
|
|
(Right <$> a)
|
|
|
|
(followredir r)
|
2020-01-22 20:13:48 +00:00
|
|
|
|
2019-05-30 20:03:52 +00:00
|
|
|
followredir r (HttpExceptionRequest _ (StatusCodeException resp _)) =
|
|
|
|
case headMaybe $ map decodeBS $ getResponseHeader hLocation resp of
|
|
|
|
Just url' -> case parseURIRelaxed url' of
|
|
|
|
-- only follow http to ftp redirects;
|
|
|
|
-- http to file redirect would not be secure,
|
|
|
|
-- and http-conduit follows http to http.
|
|
|
|
Just u' | isftpurl u' ->
|
2019-11-12 17:33:41 +00:00
|
|
|
checkPolicy uo u' $ Right <$>
|
2019-05-30 20:03:52 +00:00
|
|
|
existscurlrestricted r u' url' ftpport
|
2019-11-12 17:33:41 +00:00
|
|
|
_ -> return (Right dne)
|
|
|
|
Nothing -> return (Right dne)
|
|
|
|
followredir _ _ = return (Right dne)
|
2016-07-12 20:30:36 +00:00
|
|
|
|
2015-01-22 18:52:52 +00:00
|
|
|
-- Parse eg: attachment; filename="fname.ext"
|
|
|
|
-- per RFC 2616
|
|
|
|
contentDispositionFilename :: String -> Maybe FilePath
|
|
|
|
contentDispositionFilename s
|
|
|
|
| "attachment; filename=\"" `isPrefixOf` s && "\"" `isSuffixOf` s =
|
2018-11-23 15:24:05 +00:00
|
|
|
Just $ dropFromEnd 1 $ drop 1 $ dropWhile (/= '"') s
|
2015-01-22 18:52:52 +00:00
|
|
|
| otherwise = Nothing
|
|
|
|
|
2014-08-15 22:02:17 +00:00
|
|
|
headRequest :: Request -> Request
|
|
|
|
headRequest r = r
|
|
|
|
{ method = methodHead
|
2023-03-14 02:39:16 +00:00
|
|
|
-- remove default Accept-Encoding header, to get actual,
|
2014-08-15 22:02:17 +00:00
|
|
|
-- not gzip compressed size.
|
|
|
|
, requestHeaders = (hAcceptEncoding, B.empty) :
|
|
|
|
filter (\(h, _) -> h /= hAcceptEncoding)
|
|
|
|
(requestHeaders r)
|
|
|
|
}
|
|
|
|
|
2018-04-06 19:58:16 +00:00
|
|
|
{- Download a perhaps large file, with auto-resume of incomplete downloads.
|
2018-05-08 20:11:45 +00:00
|
|
|
-
|
2019-11-12 17:33:41 +00:00
|
|
|
- When the download fails, returns an error message.
|
2018-04-06 19:58:16 +00:00
|
|
|
-}
|
2021-08-18 18:49:01 +00:00
|
|
|
download :: MeterUpdate -> Maybe IncrementalVerifier -> URLString -> FilePath -> UrlOptions -> IO (Either String ())
|
2018-10-03 16:31:09 +00:00
|
|
|
download = download' False
|
|
|
|
|
2021-08-18 18:49:01 +00:00
|
|
|
download' :: Bool -> MeterUpdate -> Maybe IncrementalVerifier -> URLString -> FilePath -> UrlOptions -> IO (Either String ())
|
|
|
|
download' nocurlerror meterupdate iv url file uo =
|
2018-05-08 20:11:45 +00:00
|
|
|
catchJust matchHttpException go showhttpexception
|
2018-10-03 15:56:52 +00:00
|
|
|
`catchNonAsync` (dlfailed . show)
|
2018-04-06 19:58:16 +00:00
|
|
|
where
|
|
|
|
go = case parseURIRelaxed url of
|
2019-11-12 17:33:41 +00:00
|
|
|
Just u -> checkPolicy uo u $
|
2023-02-10 17:34:47 +00:00
|
|
|
case (urlDownloader uo, parseRequestRelaxed u) of
|
2019-05-30 20:03:52 +00:00
|
|
|
(DownloadWithConduit (DownloadWithCurlRestricted r), Just req) -> catchJust
|
|
|
|
(matchStatusCodeException (== found302))
|
2021-08-18 18:49:01 +00:00
|
|
|
(downloadConduit meterupdate iv req file uo >> return (Right ()))
|
2019-05-30 20:03:52 +00:00
|
|
|
(followredir r)
|
2019-05-30 18:51:34 +00:00
|
|
|
(DownloadWithConduit (DownloadWithCurlRestricted r), Nothing)
|
2018-06-17 17:05:30 +00:00
|
|
|
| isfileurl u -> downloadfile u
|
2019-05-30 20:03:52 +00:00
|
|
|
| isftpurl u -> downloadcurlrestricted r u url ftpport
|
2019-11-12 17:33:41 +00:00
|
|
|
| otherwise -> dlfailed $ unsupportedUrlScheme u
|
2018-06-17 17:05:30 +00:00
|
|
|
(DownloadWithCurl _, _)
|
|
|
|
| isfileurl u -> downloadfile u
|
2019-05-30 20:03:52 +00:00
|
|
|
| otherwise -> downloadcurl url basecurlparams
|
2018-09-25 17:38:20 +00:00
|
|
|
Nothing -> do
|
2021-04-05 17:40:31 +00:00
|
|
|
liftIO $ debug "Utility.Url" url
|
2018-10-03 15:56:52 +00:00
|
|
|
dlfailed "invalid url"
|
2018-06-17 17:05:30 +00:00
|
|
|
|
|
|
|
isfileurl u = uriScheme u == "file:"
|
2019-05-30 18:51:34 +00:00
|
|
|
isftpurl u = uriScheme u == "ftp:"
|
2018-04-06 19:58:16 +00:00
|
|
|
|
2019-05-30 20:03:52 +00:00
|
|
|
ftpport = 21
|
|
|
|
|
2019-11-12 17:33:41 +00:00
|
|
|
showhttpexception he = dlfailed $ case he of
|
|
|
|
HttpExceptionRequest _ (StatusCodeException r _) ->
|
|
|
|
B8.toString $ statusMessage $ responseStatus r
|
|
|
|
HttpExceptionRequest _ (InternalException ie) ->
|
|
|
|
case fromException ie of
|
|
|
|
Nothing -> show ie
|
|
|
|
Just (ConnectionRestricted why) -> why
|
|
|
|
HttpExceptionRequest _ other -> show other
|
|
|
|
_ -> show he
|
2018-04-06 19:58:16 +00:00
|
|
|
|
2021-08-18 18:49:01 +00:00
|
|
|
dlfailed msg = do
|
|
|
|
noverification
|
|
|
|
return $ Left $ "download failed: " ++ msg
|
2019-11-12 17:33:41 +00:00
|
|
|
|
2019-05-30 18:51:34 +00:00
|
|
|
basecurlparams = curlParams uo
|
2019-11-12 17:33:41 +00:00
|
|
|
[ if nocurlerror
|
2019-05-30 18:51:34 +00:00
|
|
|
then Param "-S"
|
|
|
|
else Param "-sS"
|
|
|
|
, Param "-f"
|
|
|
|
, Param "-L"
|
|
|
|
, Param "-C", Param "-"
|
|
|
|
]
|
|
|
|
|
2019-05-30 20:03:52 +00:00
|
|
|
downloadcurl rawurl curlparams = do
|
2021-08-18 18:49:01 +00:00
|
|
|
noverification
|
2018-04-06 19:58:16 +00:00
|
|
|
-- curl does not create destination file
|
|
|
|
-- if the url happens to be empty, so pre-create.
|
|
|
|
unlessM (doesFileExist file) $
|
|
|
|
writeFile file ""
|
2019-11-12 17:33:41 +00:00
|
|
|
ifM (boolSystem "curl" (curlparams ++ [Param "-o", File file, File rawurl]))
|
|
|
|
( return $ Right ()
|
|
|
|
, return $ Left "download failed"
|
|
|
|
)
|
2019-05-30 18:51:34 +00:00
|
|
|
|
2019-05-30 20:03:52 +00:00
|
|
|
downloadcurlrestricted r u rawurl defport =
|
|
|
|
downloadcurl rawurl =<< curlRestrictedParams r u defport basecurlparams
|
2019-05-30 18:51:34 +00:00
|
|
|
|
2018-06-17 17:05:30 +00:00
|
|
|
downloadfile u = do
|
2021-08-18 18:49:01 +00:00
|
|
|
noverification
|
2018-06-17 17:05:30 +00:00
|
|
|
let src = unEscapeString (uriPath u)
|
|
|
|
withMeteredFile src meterupdate $
|
|
|
|
L.writeFile file
|
2019-11-12 17:33:41 +00:00
|
|
|
return $ Right ()
|
2018-04-06 19:58:16 +00:00
|
|
|
|
2019-08-04 16:31:54 +00:00
|
|
|
-- Conduit does not support ftp, so will throw an exception on a
|
|
|
|
-- redirect to a ftp url; fall back to curl.
|
2019-05-30 20:03:52 +00:00
|
|
|
followredir r ex@(HttpExceptionRequest _ (StatusCodeException resp _)) =
|
|
|
|
case headMaybe $ map decodeBS $ getResponseHeader hLocation resp of
|
|
|
|
Just url' -> case parseURIRelaxed url' of
|
|
|
|
Just u' | isftpurl u' ->
|
2019-11-12 17:33:41 +00:00
|
|
|
checkPolicy uo u' $
|
2019-05-30 20:03:52 +00:00
|
|
|
downloadcurlrestricted r u' url' ftpport
|
|
|
|
_ -> throwIO ex
|
|
|
|
Nothing -> throwIO ex
|
2019-06-04 15:24:32 +00:00
|
|
|
followredir _ ex = throwIO ex
|
2019-05-30 20:03:52 +00:00
|
|
|
|
2021-11-09 16:29:09 +00:00
|
|
|
noverification = maybe noop unableIncrementalVerifier iv
|
2021-08-18 18:49:01 +00:00
|
|
|
|
2019-08-04 16:31:54 +00:00
|
|
|
{- Download a perhaps large file using conduit, with auto-resume
|
|
|
|
- of incomplete downloads.
|
|
|
|
-
|
2020-11-19 18:03:00 +00:00
|
|
|
- A Request can be configured to throw exceptions for non-2xx http
|
|
|
|
- status codes, or not. That configuration is overridden by this,
|
|
|
|
- and if it is unable to download, it throws an exception containing
|
|
|
|
- a user-visible explanation of the problem. (However, exceptions
|
|
|
|
- thrown for reasons other than http status codes will still be thrown
|
|
|
|
- as usual.)
|
2019-08-04 16:31:54 +00:00
|
|
|
-}
|
2021-08-18 18:49:01 +00:00
|
|
|
downloadConduit :: MeterUpdate -> Maybe IncrementalVerifier -> Request -> FilePath -> UrlOptions -> IO ()
|
|
|
|
downloadConduit meterupdate iv req file uo =
|
2020-11-05 15:26:34 +00:00
|
|
|
catchMaybeIO (getFileSize (toRawFilePath file)) >>= \case
|
2019-08-04 16:31:54 +00:00
|
|
|
Just sz | sz > 0 -> resumedownload sz
|
2020-01-22 20:13:48 +00:00
|
|
|
_ -> join $ runResourceT $ do
|
2021-04-05 17:40:31 +00:00
|
|
|
liftIO $ debug "Utility.Url" (show req')
|
2019-08-04 16:31:54 +00:00
|
|
|
resp <- http req' (httpManager uo)
|
|
|
|
if responseStatus resp == ok200
|
2020-01-22 20:13:48 +00:00
|
|
|
then do
|
|
|
|
store zeroBytesProcessed WriteMode resp
|
|
|
|
return (return ())
|
2020-09-14 22:19:51 +00:00
|
|
|
else do
|
|
|
|
rf <- extractFromResourceT (respfailure resp)
|
|
|
|
if responseStatus resp == unauthorized401
|
|
|
|
then return $ getBasicAuth uo (show (getUri req')) >>= \case
|
|
|
|
Nothing -> giveup rf
|
|
|
|
Just ba -> retryauthed ba
|
|
|
|
else return $ giveup rf
|
2019-08-04 16:31:54 +00:00
|
|
|
where
|
|
|
|
req' = applyRequest uo $ req
|
|
|
|
-- Override http-client's default decompression of gzip
|
|
|
|
-- compressed files. We want the unmodified file content.
|
|
|
|
{ requestHeaders = (hAcceptEncoding, "identity") :
|
|
|
|
filter ((/= hAcceptEncoding) . fst)
|
|
|
|
(requestHeaders req)
|
|
|
|
, decompress = const False
|
2020-11-19 18:03:00 +00:00
|
|
|
-- Avoid throwing exceptions non-2xx http status codes,
|
|
|
|
-- since we rely on parsing the Response to handle
|
|
|
|
-- several such codes.
|
|
|
|
, checkResponse = \_ _ -> return ()
|
2019-08-04 16:31:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
-- Resume download from where a previous download was interrupted,
|
|
|
|
-- when supported by the http server. The server may also opt to
|
|
|
|
-- send the whole file rather than resuming.
|
2020-11-19 18:03:00 +00:00
|
|
|
resumedownload sz = join $ runResourceT $ do
|
2020-11-19 18:41:33 +00:00
|
|
|
let req'' = req' { requestHeaders = resumeFromHeader sz : requestHeaders req' }
|
2021-04-05 17:40:31 +00:00
|
|
|
liftIO $ debug "Urility.Url" (show req'')
|
2020-11-19 18:03:00 +00:00
|
|
|
resp <- http req'' (httpManager uo)
|
|
|
|
if responseStatus resp == partialContent206
|
|
|
|
then do
|
|
|
|
store (toBytesProcessed sz) AppendMode resp
|
|
|
|
return (return ())
|
|
|
|
else if responseStatus resp == ok200
|
2020-01-22 20:13:48 +00:00
|
|
|
then do
|
2020-11-19 18:03:00 +00:00
|
|
|
store zeroBytesProcessed WriteMode resp
|
2020-01-22 20:13:48 +00:00
|
|
|
return (return ())
|
2020-11-19 18:03:00 +00:00
|
|
|
else if alreadydownloaded sz resp
|
2021-08-18 18:49:01 +00:00
|
|
|
then do
|
|
|
|
liftIO noverification
|
|
|
|
return (return ())
|
2020-09-14 22:19:51 +00:00
|
|
|
else do
|
|
|
|
rf <- extractFromResourceT (respfailure resp)
|
|
|
|
if responseStatus resp == unauthorized401
|
|
|
|
then return $ getBasicAuth uo (show (getUri req'')) >>= \case
|
|
|
|
Nothing -> giveup rf
|
|
|
|
Just ba -> retryauthed ba
|
|
|
|
else return $ giveup rf
|
2019-08-04 16:31:54 +00:00
|
|
|
|
2020-11-19 18:03:00 +00:00
|
|
|
alreadydownloaded sz resp
|
|
|
|
| responseStatus resp /= requestedRangeNotSatisfiable416 = False
|
|
|
|
| otherwise = case lookup hContentRange (responseHeaders resp) of
|
2019-08-04 16:31:54 +00:00
|
|
|
-- This could be improved by fixing
|
|
|
|
-- https://github.com/aristidb/http-types/issues/87
|
|
|
|
Just crh -> crh == B8.fromString ("bytes */" ++ show sz)
|
|
|
|
-- Some http servers send no Content-Range header when
|
|
|
|
-- the range extends beyond the end of the file.
|
|
|
|
-- There is no way to distinguish between the file
|
|
|
|
-- being the same size on the http server, vs
|
|
|
|
-- it being shorter than the file we already have.
|
|
|
|
-- So assume we have the whole content of the file
|
|
|
|
-- already, the same as wget and curl do.
|
|
|
|
Nothing -> True
|
|
|
|
|
|
|
|
store initialp mode resp =
|
2021-08-18 18:49:01 +00:00
|
|
|
sinkResponseFile meterupdate iv initialp file mode resp
|
2019-08-04 16:31:54 +00:00
|
|
|
|
2020-09-14 22:19:51 +00:00
|
|
|
respfailure = B8.toString . statusMessage . responseStatus
|
2019-08-04 16:31:54 +00:00
|
|
|
|
2020-01-22 20:13:48 +00:00
|
|
|
retryauthed (ba, signalsuccess) = do
|
|
|
|
r <- tryNonAsync $ downloadConduit
|
2021-08-18 18:49:01 +00:00
|
|
|
meterupdate iv
|
2020-01-22 20:13:48 +00:00
|
|
|
(applyBasicAuth' ba req)
|
|
|
|
file
|
|
|
|
(uo { getBasicAuth = noBasicAuth })
|
|
|
|
case r of
|
|
|
|
Right () -> signalsuccess True
|
|
|
|
Left e -> do
|
2020-01-22 20:38:34 +00:00
|
|
|
() <- signalsuccess False
|
2020-01-22 20:13:48 +00:00
|
|
|
throwM e
|
|
|
|
|
2021-11-09 16:29:09 +00:00
|
|
|
noverification = maybe noop unableIncrementalVerifier iv
|
2021-08-18 18:49:01 +00:00
|
|
|
|
|
|
|
{- Sinks a Response's body to a file. The file can either be appended to
|
|
|
|
- (AppendMode), or written from the start of the response (WriteMode).
|
|
|
|
- Updates the meter and incremental verifier as data is received,
|
|
|
|
- when not appending.
|
2018-04-06 19:58:16 +00:00
|
|
|
-
|
|
|
|
- Note that the responseStatus is not checked by this function.
|
|
|
|
-}
|
2018-04-25 01:23:40 +00:00
|
|
|
sinkResponseFile
|
|
|
|
:: MonadResource m
|
|
|
|
=> MeterUpdate
|
2021-08-18 18:49:01 +00:00
|
|
|
-> Maybe IncrementalVerifier
|
2018-04-25 01:23:40 +00:00
|
|
|
-> BytesProcessed
|
|
|
|
-> FilePath
|
|
|
|
-> IOMode
|
|
|
|
-> Response (ConduitM () B8.ByteString m ())
|
|
|
|
-> m ()
|
2021-08-18 18:49:01 +00:00
|
|
|
sinkResponseFile meterupdate iv initialp file mode resp = do
|
|
|
|
ui <- case (iv, mode) of
|
|
|
|
(Just iv', AppendMode) -> do
|
2021-11-09 16:29:09 +00:00
|
|
|
liftIO $ unableIncrementalVerifier iv'
|
2021-08-18 18:49:01 +00:00
|
|
|
return (const noop)
|
2021-11-09 16:29:09 +00:00
|
|
|
(Just iv', _) -> return (updateIncrementalVerifier iv')
|
2021-08-18 18:49:01 +00:00
|
|
|
(Nothing, _) -> return (const noop)
|
2018-04-06 19:58:16 +00:00
|
|
|
(fr, fh) <- allocate (openBinaryFile file mode) hClose
|
2021-08-18 18:49:01 +00:00
|
|
|
runConduit $ responseBody resp .| go ui initialp fh
|
2018-04-06 19:58:16 +00:00
|
|
|
release fr
|
|
|
|
where
|
2021-08-18 18:49:01 +00:00
|
|
|
go ui sofar fh = await >>= \case
|
2018-04-06 19:58:16 +00:00
|
|
|
Nothing -> return ()
|
|
|
|
Just bs -> do
|
|
|
|
let sofar' = addBytesProcessed sofar (B.length bs)
|
|
|
|
liftIO $ do
|
|
|
|
void $ meterupdate sofar'
|
2021-08-18 18:49:01 +00:00
|
|
|
() <- ui bs
|
2018-04-06 19:58:16 +00:00
|
|
|
B.hPut fh bs
|
2021-08-18 18:49:01 +00:00
|
|
|
go ui sofar' fh
|
2018-04-06 19:58:16 +00:00
|
|
|
|
2017-12-06 17:16:06 +00:00
|
|
|
{- Downloads at least the specified number of bytes from an url. -}
|
|
|
|
downloadPartial :: URLString -> UrlOptions -> Int -> IO (Maybe L.ByteString)
|
|
|
|
downloadPartial url uo n = case parseURIRelaxed url of
|
|
|
|
Nothing -> return Nothing
|
|
|
|
Just u -> go u `catchNonAsync` const (return Nothing)
|
|
|
|
where
|
2023-02-10 17:34:47 +00:00
|
|
|
go u = case parseRequestRelaxed u of
|
2017-12-06 17:16:06 +00:00
|
|
|
Nothing -> return Nothing
|
|
|
|
Just req -> do
|
|
|
|
let req' = applyRequest uo req
|
2021-04-05 17:40:31 +00:00
|
|
|
liftIO $ debug "Utility.Url" (show req')
|
2018-04-04 19:15:12 +00:00
|
|
|
withResponse req' (httpManager uo) $ \resp ->
|
2017-12-06 17:16:06 +00:00
|
|
|
if responseStatus resp == ok200
|
2018-10-03 16:00:07 +00:00
|
|
|
then Just <$> brReadSome (responseBody resp) n
|
2017-12-06 17:16:06 +00:00
|
|
|
else return Nothing
|
|
|
|
|
2023-02-10 17:34:47 +00:00
|
|
|
{- Generate a http-conduit Request for an URI. This is able
|
|
|
|
- to deal with some urls that parseRequest would usually reject.
|
|
|
|
-}
|
|
|
|
parseRequestRelaxed :: MonadThrow m => URI -> m Request
|
|
|
|
parseRequestRelaxed u = case uriAuthority u of
|
|
|
|
Just ua
|
|
|
|
-- parseURI can handle an empty port value, but
|
|
|
|
-- parseRequest cannot. So remove the ':' to
|
|
|
|
-- make it work.
|
|
|
|
| uriPort ua == ":" -> parseRequest $ show $
|
|
|
|
u { uriAuthority = Just $ ua { uriPort = "" } }
|
|
|
|
_ -> parseRequest (show u)
|
|
|
|
|
2014-08-17 19:39:01 +00:00
|
|
|
hAcceptEncoding :: CI.CI B.ByteString
|
|
|
|
hAcceptEncoding = "Accept-Encoding"
|
|
|
|
|
2015-01-22 18:52:52 +00:00
|
|
|
hContentDisposition :: CI.CI B.ByteString
|
|
|
|
hContentDisposition = "Content-Disposition"
|
|
|
|
|
2018-04-06 19:58:16 +00:00
|
|
|
hContentRange :: CI.CI B.ByteString
|
|
|
|
hContentRange = "Content-Range"
|
|
|
|
|
|
|
|
resumeFromHeader :: FileSize -> Header
|
|
|
|
resumeFromHeader sz = (hRange, renderByteRanges [ByteRangeFrom sz])
|
|
|
|
|
2016-07-12 20:30:36 +00:00
|
|
|
{- Use with eg:
|
|
|
|
-
|
|
|
|
- > catchJust (matchStatusCodeException (== notFound404))
|
|
|
|
-}
|
2016-12-10 12:24:27 +00:00
|
|
|
matchStatusCodeException :: (Status -> Bool) -> HttpException -> Maybe HttpException
|
2018-04-06 19:58:16 +00:00
|
|
|
matchStatusCodeException want = matchStatusCodeHeadersException (\s _h -> want s)
|
|
|
|
|
|
|
|
matchStatusCodeHeadersException :: (Status -> ResponseHeaders -> Bool) -> HttpException -> Maybe HttpException
|
|
|
|
matchStatusCodeHeadersException want e@(HttpExceptionRequest _ (StatusCodeException r _))
|
|
|
|
| want (responseStatus r) (responseHeaders r) = Just e
|
2016-12-10 12:24:27 +00:00
|
|
|
| otherwise = Nothing
|
2018-04-06 19:58:16 +00:00
|
|
|
matchStatusCodeHeadersException _ _ = Nothing
|
2017-09-12 19:13:42 +00:00
|
|
|
|
2018-05-08 20:11:45 +00:00
|
|
|
{- Use with eg:
|
|
|
|
-
|
|
|
|
- > catchJust matchHttpException
|
|
|
|
-}
|
|
|
|
matchHttpException :: HttpException -> Maybe HttpException
|
|
|
|
matchHttpException = Just
|
|
|
|
|
2017-09-12 19:13:42 +00:00
|
|
|
matchHttpExceptionContent :: (HttpExceptionContent -> Bool) -> HttpException -> Maybe HttpException
|
|
|
|
matchHttpExceptionContent want e@(HttpExceptionRequest _ hec)
|
|
|
|
| want hec = Just e
|
|
|
|
| otherwise = Nothing
|
|
|
|
matchHttpExceptionContent _ _ = Nothing
|
2019-05-30 18:51:34 +00:00
|
|
|
|
|
|
|
{- Constructs parameters that prevent curl from accessing any IP addresses
|
|
|
|
- blocked by the Restriction. These are added to the input parameters,
|
|
|
|
- which should tell curl what to do.
|
|
|
|
-
|
|
|
|
- This has to disable redirects because it looks up the IP addresses
|
|
|
|
- of the host and after limiting to those allowed by the Restriction,
|
|
|
|
- makes curl resolve the host to those IP addresses. It doesn't make sense
|
|
|
|
- to use this for http anyway, only for ftp or perhaps other protocols
|
|
|
|
- supported by curl.
|
|
|
|
-
|
|
|
|
- Throws an exception if the Restriction blocks all addresses, or
|
|
|
|
- if the dns lookup fails. A malformed url will also cause an exception.
|
|
|
|
-}
|
|
|
|
curlRestrictedParams :: Restriction -> URI -> Int -> [CommandParam] -> IO [CommandParam]
|
|
|
|
curlRestrictedParams r u defport ps = case uriAuthority u of
|
|
|
|
Nothing -> giveup "malformed url"
|
|
|
|
Just uath -> case uriPort uath of
|
|
|
|
"" -> go (uriRegName uath) defport
|
2023-02-10 17:34:47 +00:00
|
|
|
-- ignore an empty port, same as
|
|
|
|
-- parseRequestRelaxed does.
|
|
|
|
":" -> go (uriRegName uath) defport
|
2019-05-30 18:51:34 +00:00
|
|
|
-- strict parser because the port we provide to curl
|
|
|
|
-- needs to match the port in the url
|
|
|
|
(':':s) -> case readMaybe s :: Maybe Int of
|
|
|
|
Just p -> go (uriRegName uath) p
|
|
|
|
Nothing -> giveup "malformed url"
|
|
|
|
_ -> giveup "malformed url"
|
|
|
|
where
|
|
|
|
go hostname p = do
|
|
|
|
proto <- getProtocolNumber "tcp"
|
|
|
|
let serv = show p
|
|
|
|
let hints = defaultHints
|
|
|
|
{ addrFlags = [AI_ADDRCONFIG]
|
|
|
|
, addrProtocol = proto
|
|
|
|
, addrSocketType = Stream
|
|
|
|
}
|
|
|
|
addrs <- getAddrInfo (Just hints) (Just hostname) (Just serv)
|
|
|
|
case partitionEithers (map checkrestriction addrs) of
|
|
|
|
((e:_es), []) -> throwIO e
|
|
|
|
(_, as)
|
2023-08-01 22:41:27 +00:00
|
|
|
| null as -> giveup $
|
|
|
|
"cannot resolve host " ++ hostname
|
2019-05-30 18:51:34 +00:00
|
|
|
| otherwise -> return $
|
|
|
|
(limitresolve p) as ++ ps
|
|
|
|
checkrestriction addr = maybe (Right addr) Left $
|
|
|
|
checkAddressRestriction r addr
|
|
|
|
limitresolve p addrs =
|
|
|
|
[ Param "--resolve"
|
|
|
|
, Param $ "*:" ++ show p ++ ":" ++ intercalate ":"
|
|
|
|
(mapMaybe (bracketaddr <$$> extractIPAddress . addrAddress) addrs)
|
|
|
|
-- Don't let a ftp server provide an IP address.
|
|
|
|
, Param "--ftp-skip-pasv-ip"
|
|
|
|
-- Prevent all http redirects.
|
|
|
|
, Param "--max-redirs", Param "0"
|
|
|
|
]
|
|
|
|
bracketaddr a = "[" ++ a ++ "]"
|
2020-01-22 20:13:48 +00:00
|
|
|
|
|
|
|
data BasicAuth = BasicAuth
|
|
|
|
{ basicAuthUser :: String
|
|
|
|
, basicAuthPassword :: String
|
|
|
|
}
|
|
|
|
|
|
|
|
-- Note that this is only used when using conduit, not curl.
|
|
|
|
--
|
|
|
|
-- The returned IO action is run after trying to use the BasicAuth,
|
|
|
|
-- indicating if the password worked.
|
|
|
|
type GetBasicAuth = URLString -> IO (Maybe (BasicAuth, Bool -> IO ()))
|
|
|
|
|
|
|
|
noBasicAuth :: GetBasicAuth
|
|
|
|
noBasicAuth = const $ pure Nothing
|
|
|
|
|
|
|
|
applyBasicAuth' :: BasicAuth -> Request -> Request
|
|
|
|
applyBasicAuth' ba = applyBasicAuth
|
|
|
|
(encodeBS (basicAuthUser ba))
|
|
|
|
(encodeBS (basicAuthPassword ba))
|
2020-09-14 22:19:51 +00:00
|
|
|
|
|
|
|
{- Make sure whatever is returned is fully evaluated. Avoids any possible
|
|
|
|
- issues with laziness deferring processing until a time when the resource
|
|
|
|
- has been freed. -}
|
|
|
|
extractFromResourceT :: (MonadIO m, NFData a) => a -> ResourceT m a
|
|
|
|
extractFromResourceT v = do
|
|
|
|
liftIO $ evaluate (rnf v)
|
|
|
|
return v
|