git-annex version 6.20180626
-----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEKKUAw1IH6rcvbA8l2xLbD/BfjzgFAlstCaQACgkQ2xLbD/Bf jzh5nxAAn7D9soTI0ex6AVDDo2CjOyTTDVrIcl2h5XizfuUD3ev5P0TR3BZmzpAb MI6uaZ8kxqZ/eGAsBTyH9PsV7QVYIdht9t89ytP4xWyTQiOgjyJeA6PnJl4zVK9z Y8Of3mlylaz+97+sndljpsvy/KHENrHI7HHd+qxAu7wKysJxG6fJB7CjremkjaCI zAwg3mIy72ZKyuR/8hL9puJN9fdfw1ulkzQR+he007e/HkurPCwgRAOYW/Aa2tpY Oigdb9a6/0nl/VnOS8ZyHrSPRrhLH9c4IBmsdC1Xt5NDVmID/sWgD9uPF9dsHSMF OM25QdSlJ5cSNg+/XCpmmhC9MjgKkuVNpZ/fWBaHFs6KYgGhtZcAayQdz5AmMS2N HTPWB1IxZiV5TQHQpLbdH/q3RfNtRq1G1tc24zpd/zdhzijeTM6D8n4No6LXNq8X 7U0qcrp9TdLOpBCTf6Jrg/7qFaXddHoEW1e3KrsOmB0hlYHuNxfY4bs0+ROeXGOT 00koezcbF8kEI0ekoDvJjtVqaUq+608YjJZ5v7dE0vbtTj0KGbl5EHwC9atUluCX MHyTDY89uq68g4HIDytL001ZLvE3EUGJc4jh3+OMDzuZSKB5uwJIIky+qIaQu34K QJrZuyAIY0sVFV6LUX9nwqTW6Nnx/bB+kZ6k0+gx+Lpf7pUpE+o= =kex4 -----END PGP SIGNATURE----- gpgsig -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEKKUAw1IH6rcvbA8l2xLbD/BfjzgFAlsxnX4ACgkQ2xLbD/Bf jzjK1xAAnJ58ZxLyTYlCZRcKiR81UHS/Mk6+SDAjRIRbT0SsY+6gSP55XKjrcuOb Jatp+6cNNSgk2lBpn37mq+rYIqboFh9moDRK7JSh1mDHCVtIwdARGblFRfuwaWPi xHnu+Pj43+SP7OF+8qP8/kDM+js3iMS+0gvBBz8pQN/yJDROXii6u0eONOd7vbER iRY9QpJdj5lp3hjaWfXt5iJC0re0eOAY4eUSHPsFIASysShnn33dFPOZ2hbhRKjR unQHUVIUE+ehmW3w9qIqn+9v2kca7laGK11cvzYRpmu/9rrvpf+RF1h42S8822dP CKHvxDkBGbyqTA+F9/6zpU1i9/ARgHFDpScRcdq7ZJi9FbWabKDklHCsgxwrkdXb +FXgb7N5Sa4+eVDNUf4rxldtLPX53nrtZ3IqrGiCWApCvbysNyP5kE0nix02l9z2 xzY2vlpicx7TOMoO9mZesSFNgRzuFAbbya/zDJrz+xfgSRYXRYg58yTpmhpTFvSI h3Fw6+MYvehvRdAweLtoQt2p/UV2MAWrTpNzFoqgf2OCQOiH97ACDHn8Yki9rnQi NuMsqv9WOYQs4SaygDZMKemgAxftf3uaXiBW0RzHHwwWnDjHhqsEioOvOhNNyZbz U3OjKrH1JZlkNHlIBQD4BsWGLlIct66ZTU3k2OxPEp+mpEG/Xi4= =p+cW -----END PGP SIGNATURE----- Merge tag '6.20180626' - previously embargoed security release
This commit is contained in:
commit
3160cadba3
83 changed files with 1909 additions and 648 deletions
|
@ -1,6 +1,6 @@
|
|||
{- git-annex file content managing
|
||||
-
|
||||
- Copyright 2010-2017 Joey Hess <id@joeyh.name>
|
||||
- Copyright 2010-2018 Joey Hess <id@joeyh.name>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
@ -15,6 +15,7 @@ module Annex.Content (
|
|||
lockContentShared,
|
||||
lockContentForRemoval,
|
||||
ContentRemovalLock,
|
||||
RetrievalSecurityPolicy(..),
|
||||
getViaTmp,
|
||||
getViaTmpFromDisk,
|
||||
checkDiskSpaceToGet,
|
||||
|
@ -78,7 +79,7 @@ import qualified Annex.Content.Direct as Direct
|
|||
import Annex.ReplaceFile
|
||||
import Annex.LockPool
|
||||
import Messages.Progress
|
||||
import Types.Remote (unVerified, Verification(..))
|
||||
import Types.Remote (unVerified, Verification(..), RetrievalSecurityPolicy(..))
|
||||
import qualified Types.Remote
|
||||
import qualified Types.Backend
|
||||
import qualified Backend
|
||||
|
@ -293,15 +294,15 @@ lockContentUsing locker key a = do
|
|||
{- Runs an action, passing it the temp file to get,
|
||||
- and if the action succeeds, verifies the file matches
|
||||
- the key and moves the file into the annex as a key's content. -}
|
||||
getViaTmp :: VerifyConfig -> Key -> (FilePath -> Annex (Bool, Verification)) -> Annex Bool
|
||||
getViaTmp v key action = checkDiskSpaceToGet key False $
|
||||
getViaTmpFromDisk v key action
|
||||
getViaTmp :: RetrievalSecurityPolicy -> VerifyConfig -> Key -> (FilePath -> Annex (Bool, Verification)) -> Annex Bool
|
||||
getViaTmp rsp v key action = checkDiskSpaceToGet key False $
|
||||
getViaTmpFromDisk rsp v key action
|
||||
|
||||
{- Like getViaTmp, but does not check that there is enough disk space
|
||||
- for the incoming key. For use when the key content is already on disk
|
||||
- and not being copied into place. -}
|
||||
getViaTmpFromDisk :: VerifyConfig -> Key -> (FilePath -> Annex (Bool, Verification)) -> Annex Bool
|
||||
getViaTmpFromDisk v key action = do
|
||||
getViaTmpFromDisk :: RetrievalSecurityPolicy -> VerifyConfig -> Key -> (FilePath -> Annex (Bool, Verification)) -> Annex Bool
|
||||
getViaTmpFromDisk rsp v key action = checkallowed $ do
|
||||
tmpfile <- prepTmp key
|
||||
resuming <- liftIO $ doesFileExist tmpfile
|
||||
(ok, verification) <- action tmpfile
|
||||
|
@ -315,7 +316,7 @@ getViaTmpFromDisk v key action = do
|
|||
_ -> MustVerify
|
||||
else verification
|
||||
if ok
|
||||
then ifM (verifyKeyContent v verification' key tmpfile)
|
||||
then ifM (verifyKeyContent rsp v verification' key tmpfile)
|
||||
( ifM (pruneTmpWorkDirBefore tmpfile (moveAnnex key))
|
||||
( do
|
||||
logStatus key InfoPresent
|
||||
|
@ -324,30 +325,59 @@ getViaTmpFromDisk v key action = do
|
|||
)
|
||||
, do
|
||||
warning "verification of content failed"
|
||||
-- The bad content is not retained, because
|
||||
-- a retry should not try to resume from it
|
||||
-- since it's apparently corrupted.
|
||||
-- Also, the bad content could be any data,
|
||||
-- including perhaps the content of another
|
||||
-- file than the one that was requested,
|
||||
-- and so it's best not to keep it on disk.
|
||||
pruneTmpWorkDirBefore tmpfile (liftIO . nukeFile)
|
||||
return False
|
||||
)
|
||||
-- On transfer failure, the tmp file is left behind, in case
|
||||
-- caller wants to resume its transfer
|
||||
else return False
|
||||
where
|
||||
-- Avoid running the action to get the content when the
|
||||
-- RetrievalSecurityPolicy would cause verification to always fail.
|
||||
checkallowed a = case rsp of
|
||||
RetrievalAllKeysSecure -> a
|
||||
RetrievalVerifiableKeysSecure
|
||||
| isVerifiable (keyVariety key) -> a
|
||||
| otherwise -> ifM (annexAllowUnverifiedDownloads <$> Annex.getGitConfig)
|
||||
( a
|
||||
, warnUnverifiableInsecure key >> return False
|
||||
)
|
||||
|
||||
{- Verifies that a file is the expected content of a key.
|
||||
-
|
||||
- Configuration can prevent verification, for either a
|
||||
- particular remote or always.
|
||||
- particular remote or always, unless the RetrievalSecurityPolicy
|
||||
- requires verification.
|
||||
-
|
||||
- Most keys have a known size, and if so, the file size is checked.
|
||||
-
|
||||
- When the key's backend allows verifying the content (eg via checksum),
|
||||
- When the key's backend allows verifying the content (via checksum),
|
||||
- it is checked.
|
||||
-
|
||||
- If the RetrievalSecurityPolicy requires verification and the key's
|
||||
- backend doesn't support it, the verification will fail.
|
||||
-}
|
||||
verifyKeyContent :: VerifyConfig -> Verification -> Key -> FilePath -> Annex Bool
|
||||
verifyKeyContent v verification k f = case verification of
|
||||
Verified -> return True
|
||||
UnVerified -> ifM (shouldVerify v)
|
||||
verifyKeyContent :: RetrievalSecurityPolicy -> VerifyConfig -> Verification -> Key -> FilePath -> Annex Bool
|
||||
verifyKeyContent rsp v verification k f = case (rsp, verification) of
|
||||
(_, Verified) -> return True
|
||||
(RetrievalVerifiableKeysSecure, _)
|
||||
| isVerifiable (keyVariety k) -> verify
|
||||
| otherwise -> ifM (annexAllowUnverifiedDownloads <$> Annex.getGitConfig)
|
||||
( verify
|
||||
, warnUnverifiableInsecure k >> return False
|
||||
)
|
||||
(_, UnVerified) -> ifM (shouldVerify v)
|
||||
( verify
|
||||
, return True
|
||||
)
|
||||
MustVerify -> verify
|
||||
(_, MustVerify) -> verify
|
||||
where
|
||||
verify = verifysize <&&> verifycontent
|
||||
verifysize = case keySize k of
|
||||
|
@ -359,6 +389,16 @@ verifyKeyContent v verification k f = case verification of
|
|||
Nothing -> return True
|
||||
Just verifier -> verifier k f
|
||||
|
||||
warnUnverifiableInsecure :: Key -> Annex ()
|
||||
warnUnverifiableInsecure k = warning $ unwords
|
||||
[ "Getting " ++ kv ++ " keys with this remote is not secure;"
|
||||
, "the content cannot be verified to be correct."
|
||||
, "(Use annex.security.allow-unverified-downloads to bypass"
|
||||
, "this safety check.)"
|
||||
]
|
||||
where
|
||||
kv = formatKeyVariety (keyVariety k)
|
||||
|
||||
data VerifyConfig = AlwaysVerify | NoVerify | RemoteVerify Remote | DefaultVerify
|
||||
|
||||
shouldVerify :: VerifyConfig -> Annex Bool
|
||||
|
@ -827,7 +867,7 @@ isUnmodified key f = go =<< geti
|
|||
go (Just fc) = cheapcheck fc <||> expensivecheck fc
|
||||
cheapcheck fc = anyM (compareInodeCaches fc)
|
||||
=<< Database.Keys.getInodeCaches key
|
||||
expensivecheck fc = ifM (verifyKeyContent AlwaysVerify UnVerified key f)
|
||||
expensivecheck fc = ifM (verifyKeyContent RetrievalAllKeysSecure AlwaysVerify UnVerified key f)
|
||||
-- The file could have been modified while it was
|
||||
-- being verified. Detect that.
|
||||
( geti >>= maybe (return False) (compareInodeCaches fc)
|
||||
|
@ -943,18 +983,8 @@ downloadUrl k p urls file =
|
|||
-- Poll the file to handle configurations where an external
|
||||
-- download command is used.
|
||||
meteredFile file (Just p) k $
|
||||
go =<< annexWebDownloadCommand <$> Annex.getGitConfig
|
||||
where
|
||||
go Nothing = Url.withUrlOptions $ \uo ->
|
||||
liftIO $ anyM (\u -> Url.download p u file uo) urls
|
||||
go (Just basecmd) = anyM (downloadcmd basecmd) urls
|
||||
downloadcmd basecmd url =
|
||||
progressCommand "sh" [Param "-c", Param $ gencmd url basecmd]
|
||||
<&&> liftIO (doesFileExist file)
|
||||
gencmd url = massReplace
|
||||
[ ("%file", shellEscape file)
|
||||
, ("%url", shellEscape url)
|
||||
]
|
||||
Url.withUrlOptions $ \uo ->
|
||||
liftIO $ anyM (\u -> Url.download p u file uo) urls
|
||||
|
||||
{- Copies a key's content, when present, to a temp file.
|
||||
- This is used to speed up some rsyncs. -}
|
||||
|
|
66
Annex/Url.hs
66
Annex/Url.hs
|
@ -1,5 +1,5 @@
|
|||
{- Url downloading, with git-annex user agent and configured http
|
||||
- headers and curl options.
|
||||
- headers, security restrictions, etc.
|
||||
-
|
||||
- Copyright 2013-2018 Joey Hess <id@joeyh.name>
|
||||
-
|
||||
|
@ -11,13 +11,18 @@ module Annex.Url (
|
|||
withUrlOptions,
|
||||
getUrlOptions,
|
||||
getUserAgent,
|
||||
httpAddressesUnlimited,
|
||||
) where
|
||||
|
||||
import Annex.Common
|
||||
import qualified Annex
|
||||
import Utility.Url as U
|
||||
import Utility.IPAddress
|
||||
import Utility.HttpManagerRestricted
|
||||
import qualified BuildInfo
|
||||
|
||||
import Network.Socket
|
||||
|
||||
defaultUserAgent :: U.UserAgent
|
||||
defaultUserAgent = "git-annex/" ++ BuildInfo.packageversion
|
||||
|
||||
|
@ -34,15 +39,62 @@ getUrlOptions = Annex.getState Annex.urloptions >>= \case
|
|||
{ Annex.urloptions = Just uo }
|
||||
return uo
|
||||
where
|
||||
mk = mkUrlOptions
|
||||
<$> getUserAgent
|
||||
<*> headers
|
||||
<*> options
|
||||
<*> liftIO (U.newManager U.managerSettings)
|
||||
mk = do
|
||||
(urldownloader, manager) <- checkallowedaddr
|
||||
mkUrlOptions
|
||||
<$> getUserAgent
|
||||
<*> headers
|
||||
<*> pure urldownloader
|
||||
<*> pure manager
|
||||
<*> (annexAllowedUrlSchemes <$> Annex.getGitConfig)
|
||||
|
||||
headers = annexHttpHeadersCommand <$> Annex.getGitConfig >>= \case
|
||||
Just cmd -> lines <$> liftIO (readProcess "sh" ["-c", cmd])
|
||||
Nothing -> annexHttpHeaders <$> Annex.getGitConfig
|
||||
options = map Param . annexWebOptions <$> Annex.getGitConfig
|
||||
|
||||
checkallowedaddr = words . annexAllowedHttpAddresses <$> Annex.getGitConfig >>= \case
|
||||
["all"] -> do
|
||||
-- Only allow curl when all are allowed,
|
||||
-- as its interface does not allow preventing
|
||||
-- it from accessing specific IP addresses.
|
||||
curlopts <- map Param . annexWebOptions <$> Annex.getGitConfig
|
||||
let urldownloader = if null curlopts
|
||||
then U.DownloadWithCurl curlopts
|
||||
else U.DownloadWithConduit
|
||||
manager <- liftIO $ U.newManager U.managerSettings
|
||||
return (urldownloader, manager)
|
||||
allowedaddrs -> do
|
||||
addrmatcher <- liftIO $
|
||||
(\l v -> any (\f -> f v) l) . catMaybes
|
||||
<$> mapM makeAddressMatcher allowedaddrs
|
||||
-- Default to not allowing access to loopback
|
||||
-- and private IP addresses to avoid data
|
||||
-- leakage.
|
||||
let isallowed addr
|
||||
| addrmatcher addr = True
|
||||
| isLoopbackAddress addr = False
|
||||
| isPrivateAddress addr = False
|
||||
| otherwise = True
|
||||
let connectionrestricted = addrConnectionRestricted
|
||||
("Configuration of annex.security.allowed-http-addresses does not allow accessing address " ++)
|
||||
let r = Restriction
|
||||
{ addressRestriction = \addr ->
|
||||
if isallowed (addrAddress addr)
|
||||
then Nothing
|
||||
else Just (connectionrestricted addr)
|
||||
}
|
||||
(settings, pr) <- liftIO $
|
||||
restrictManagerSettings r U.managerSettings
|
||||
case pr of
|
||||
Nothing -> return ()
|
||||
Just ProxyRestricted -> toplevelWarning True
|
||||
"http proxy settings not used due to annex.security.allowed-http-addresses configuration"
|
||||
manager <- liftIO $ U.newManager settings
|
||||
return (U.DownloadWithConduit, manager)
|
||||
|
||||
httpAddressesUnlimited :: Annex Bool
|
||||
httpAddressesUnlimited =
|
||||
("all" == ) . annexAllowedHttpAddresses <$> Annex.getGitConfig
|
||||
|
||||
withUrlOptions :: (U.UrlOptions -> Annex a) -> Annex a
|
||||
withUrlOptions a = a =<< getUrlOptions
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{- youtube-dl integration for git-annex
|
||||
-
|
||||
- Copyright 2017 Joey Hess <id@joeyh.name>
|
||||
- Copyright 2017-2018 Joey Hess <id@joeyh.name>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
@ -11,7 +11,7 @@ module Annex.YoutubeDl (
|
|||
youtubeDlSupported,
|
||||
youtubeDlCheck,
|
||||
youtubeDlFileName,
|
||||
youtubeDlFileName',
|
||||
youtubeDlFileNameHtmlOnly,
|
||||
) where
|
||||
|
||||
import Annex.Common
|
||||
|
@ -27,6 +27,12 @@ import Logs.Transfer
|
|||
import Network.URI
|
||||
import Control.Concurrent.Async
|
||||
|
||||
-- youtube-dl is can follow redirects to anywhere, including potentially
|
||||
-- localhost or a private address. So, it's only allowed to be used if the
|
||||
-- user has allowed access to all addresses.
|
||||
youtubeDlAllowed :: Annex Bool
|
||||
youtubeDlAllowed = httpAddressesUnlimited
|
||||
|
||||
-- Runs youtube-dl in a work directory, to download a single media file
|
||||
-- from the url. Reutrns the path to the media file in the work directory.
|
||||
--
|
||||
|
@ -41,8 +47,14 @@ import Control.Concurrent.Async
|
|||
-- (Note that we can't use --output to specifiy the file to download to,
|
||||
-- due to <https://github.com/rg3/youtube-dl/issues/14864>)
|
||||
youtubeDl :: URLString -> FilePath -> Annex (Either String (Maybe FilePath))
|
||||
youtubeDl url workdir
|
||||
| supportedScheme url = ifM (liftIO $ inPath "youtube-dl")
|
||||
youtubeDl url workdir = ifM httpAddressesUnlimited
|
||||
( withUrlOptions $ youtubeDl' url workdir
|
||||
, return (Right Nothing)
|
||||
)
|
||||
|
||||
youtubeDl' :: URLString -> FilePath -> UrlOptions -> Annex (Either String (Maybe FilePath))
|
||||
youtubeDl' url workdir uo
|
||||
| supportedScheme uo url = ifM (liftIO $ inPath "youtube-dl")
|
||||
( runcmd >>= \case
|
||||
Right True -> workdirfiles >>= \case
|
||||
(f:[]) -> return (Right (Just f))
|
||||
|
@ -107,7 +119,13 @@ youtubeDlMaxSize workdir = ifM (Annex.getState Annex.force)
|
|||
|
||||
-- Download a media file to a destination,
|
||||
youtubeDlTo :: Key -> URLString -> FilePath -> Annex Bool
|
||||
youtubeDlTo key url dest = do
|
||||
youtubeDlTo key url dest = ifM youtubeDlAllowed
|
||||
( youtubeDlTo' key url dest
|
||||
, return False
|
||||
)
|
||||
|
||||
youtubeDlTo' :: Key -> URLString -> FilePath -> Annex Bool
|
||||
youtubeDlTo' key url dest = do
|
||||
res <- withTmpWorkDir key $ \workdir ->
|
||||
youtubeDl url workdir >>= \case
|
||||
Right (Just mediafile) -> do
|
||||
|
@ -134,8 +152,14 @@ youtubeDlSupported url = either (const False) id <$> youtubeDlCheck url
|
|||
|
||||
-- Check if youtube-dl can find media in an url.
|
||||
youtubeDlCheck :: URLString -> Annex (Either String Bool)
|
||||
youtubeDlCheck url
|
||||
| supportedScheme url = catchMsgIO $ htmlOnly url False $ do
|
||||
youtubeDlCheck url = ifM youtubeDlAllowed
|
||||
( withUrlOptions $ youtubeDlCheck' url
|
||||
, return (Right False)
|
||||
)
|
||||
|
||||
youtubeDlCheck' :: URLString -> UrlOptions -> Annex (Either String Bool)
|
||||
youtubeDlCheck' url uo
|
||||
| supportedScheme uo url = catchMsgIO $ htmlOnly url False $ do
|
||||
opts <- youtubeDlOpts [ Param url, Param "--simulate" ]
|
||||
liftIO $ snd <$> processTranscript "youtube-dl" (toCommand opts) Nothing
|
||||
| otherwise = return (Right False)
|
||||
|
@ -144,18 +168,28 @@ youtubeDlCheck url
|
|||
--
|
||||
-- (This is not always identical to the filename it uses when downloading.)
|
||||
youtubeDlFileName :: URLString -> Annex (Either String FilePath)
|
||||
youtubeDlFileName url
|
||||
| supportedScheme url = flip catchIO (pure . Left . show) $
|
||||
htmlOnly url nomedia (youtubeDlFileName' url)
|
||||
| otherwise = return nomedia
|
||||
youtubeDlFileName url = ifM youtubeDlAllowed
|
||||
( withUrlOptions go
|
||||
, return nomedia
|
||||
)
|
||||
where
|
||||
go uo
|
||||
| supportedScheme uo url = flip catchIO (pure . Left . show) $
|
||||
htmlOnly url nomedia (youtubeDlFileNameHtmlOnly' url uo)
|
||||
| otherwise = return nomedia
|
||||
nomedia = Left "no media in url"
|
||||
|
||||
-- Does not check if the url contains htmlOnly; use when that's already
|
||||
-- been verified.
|
||||
youtubeDlFileName' :: URLString -> Annex (Either String FilePath)
|
||||
youtubeDlFileName' url
|
||||
| supportedScheme url = flip catchIO (pure . Left . show) go
|
||||
youtubeDlFileNameHtmlOnly :: URLString -> Annex (Either String FilePath)
|
||||
youtubeDlFileNameHtmlOnly url = ifM youtubeDlAllowed
|
||||
( withUrlOptions $ youtubeDlFileNameHtmlOnly' url
|
||||
, return (Left "no media in url")
|
||||
)
|
||||
|
||||
youtubeDlFileNameHtmlOnly' :: URLString -> UrlOptions -> Annex (Either String FilePath)
|
||||
youtubeDlFileNameHtmlOnly' url uo
|
||||
| supportedScheme uo url = flip catchIO (pure . Left . show) go
|
||||
| otherwise = return nomedia
|
||||
where
|
||||
go = do
|
||||
|
@ -189,12 +223,13 @@ youtubeDlOpts addopts = do
|
|||
opts <- map Param . annexYoutubeDlOptions <$> Annex.getGitConfig
|
||||
return (opts ++ addopts)
|
||||
|
||||
supportedScheme :: URLString -> Bool
|
||||
supportedScheme url = case uriScheme <$> parseURIRelaxed url of
|
||||
supportedScheme :: UrlOptions -> URLString -> Bool
|
||||
supportedScheme uo url = case parseURIRelaxed url of
|
||||
Nothing -> False
|
||||
-- avoid ugly message from youtube-dl about not supporting file:
|
||||
Just "file:" -> False
|
||||
-- ftp indexes may look like html pages, and there's no point
|
||||
-- involving youtube-dl in a ftp download
|
||||
Just "ftp:" -> False
|
||||
Just _ -> True
|
||||
Just u -> case uriScheme u of
|
||||
-- avoid ugly message from youtube-dl about not supporting file:
|
||||
"file:" -> False
|
||||
-- ftp indexes may look like html pages, and there's no point
|
||||
-- involving youtube-dl in a ftp download
|
||||
"ftp:" -> False
|
||||
_ -> allowedScheme uo u
|
||||
|
|
40
CHANGELOG
40
CHANGELOG
|
@ -1,4 +1,39 @@
|
|||
git-annex (6.20180530) UNRELEASED; urgency=medium
|
||||
* Include uname in standalone builds.
|
||||
|
||||
git-annex (6.20180626) upstream; urgency=high
|
||||
|
||||
Security fix release for CVE-2018-10857 and CVE-2018-10859
|
||||
https://git-annex.branchable.com/security/CVE-2018-10857_and_CVE-2018-10859/
|
||||
|
||||
* Refuse to download content, that cannot be verified with a hash,
|
||||
from encrypted special remotes (for CVE-2018-10859),
|
||||
and from all external special remotes and glacier (for CVE-2018-10857).
|
||||
In particular, URL and WORM keys stored on such remotes won't
|
||||
be downloaded. If this affects your files, you can run
|
||||
`git-annex migrate` on the affected files, to convert them
|
||||
to use a hash.
|
||||
* Added annex.security.allow-unverified-downloads, which can override
|
||||
the above.
|
||||
* Added annex.security.allowed-url-schemes setting, which defaults
|
||||
to only allowing http, https, and ftp URLs. Note especially that file:/
|
||||
is no longer enabled by default.
|
||||
* Removed annex.web-download-command, since its interface does not allow
|
||||
supporting annex.security.allowed-url-schemes across redirects.
|
||||
If you used this setting, you may want to instead use annex.web-options
|
||||
to pass options to curl.
|
||||
* git-annex will refuse to download content from http servers on
|
||||
localhost, or any private IP addresses, to prevent accidental
|
||||
exposure of internal data. This can be overridden with the
|
||||
annex.security.allowed-http-addresses setting.
|
||||
* Local http proxies will not be used unless allowed by the
|
||||
annex.security.allowed-http-addresses setting.
|
||||
* Since the interfaces to curl and youtube-dl do not have a way to
|
||||
prevent them from accessing localhost or private IP addresses,
|
||||
they default to not being used for url downloads.
|
||||
Only when annex.security.allowed-http-addresses=all will curl and
|
||||
youtube-dl be used.
|
||||
|
||||
Non-security fix changes:
|
||||
|
||||
* Fix build with ghc 8.4+, which broke due to the Semigroup Monoid change.
|
||||
* version: Show operating system and repository version list
|
||||
|
@ -17,9 +52,8 @@ git-annex (6.20180530) UNRELEASED; urgency=medium
|
|||
* When content has been lost from an export remote and
|
||||
git-annex fsck --from remote has noticed it's gone, re-running
|
||||
git-annex export or git-annex sync --content will re-upload it.
|
||||
* Include uname in standalone builds.
|
||||
|
||||
-- Joey Hess <id@joeyh.name> Wed, 30 May 2018 11:49:08 -0400
|
||||
-- Joey Hess <id@joeyh.name> Fri, 22 Jun 2018 10:36:22 -0400
|
||||
|
||||
git-annex (6.20180529) upstream; urgency=medium
|
||||
|
||||
|
|
46
COPYRIGHT
46
COPYRIGHT
|
@ -24,6 +24,11 @@ Copyright: 2011 Bas van Dijk & Roel van Dijk
|
|||
2012, 2013 Joey Hess <id@joeyh.name>
|
||||
License: BSD-2-clause
|
||||
|
||||
Files: Utility/HttpManagerRestricted.hs
|
||||
Copyright: 2018 Joey Hess <id@joeyh.name>
|
||||
2013 Michael Snoyman
|
||||
License: MIT
|
||||
|
||||
Files: Utility/*
|
||||
Copyright: 2012-2018 Joey Hess <id@joeyh.name>
|
||||
License: BSD-2-clause
|
||||
|
@ -51,26 +56,7 @@ Copyright: © 2005-2011 by John Resig, Branden Aaron & Jörn Zaefferer
|
|||
License: MIT or GPL-2
|
||||
The full text of version 2 of the GPL is distributed in
|
||||
/usr/share/common-licenses/GPL-2 on Debian systems. The text of the MIT
|
||||
license follows:
|
||||
.
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
.
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
license is in the MIT section below.
|
||||
|
||||
Files: static/*/bootstrap* static/*/glyphicons-halflings*
|
||||
Copyright: 2012-2014 Twitter, Inc.
|
||||
|
@ -153,6 +139,26 @@ License: BSD-2-clause
|
|||
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGE.
|
||||
|
||||
License: MIT
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
.
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
License: AGPL-3+
|
||||
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||
|
|
|
@ -277,7 +277,7 @@ downloadWeb o url urlinfo file =
|
|||
-- Ask youtube-dl what filename it will download
|
||||
-- first, and check if that is already an annexed file,
|
||||
-- to avoid unnecessary work in that case.
|
||||
| otherwise = youtubeDlFileName' url >>= \case
|
||||
| otherwise = youtubeDlFileNameHtmlOnly url >>= \case
|
||||
Right dest -> ifAnnexed dest
|
||||
(alreadyannexed dest)
|
||||
(dl dest)
|
||||
|
|
|
@ -109,7 +109,7 @@ getKey' key afile = dispatch
|
|||
| Remote.hasKeyCheap r =
|
||||
either (const False) id <$> Remote.hasKey r key
|
||||
| otherwise = return True
|
||||
docopy r witness = getViaTmp (RemoteVerify r) key $ \dest ->
|
||||
docopy r witness = getViaTmp (Remote.retrievalSecurityPolicy r) (RemoteVerify r) key $ \dest ->
|
||||
download (Remote.uuid r) key afile stdRetry
|
||||
(\p -> do
|
||||
showAction $ "from " ++ Remote.name r
|
||||
|
|
|
@ -207,7 +207,7 @@ fromPerform src removewhen key afile = do
|
|||
where
|
||||
go = notifyTransfer Download afile $
|
||||
download (Remote.uuid src) key afile stdRetry $ \p ->
|
||||
getViaTmp (RemoteVerify src) key $ \t ->
|
||||
getViaTmp (Remote.retrievalSecurityPolicy src) (RemoteVerify src) key $ \t ->
|
||||
Remote.retrieveKeyFile src key afile t p
|
||||
dispatch _ _ False = stop -- failed
|
||||
dispatch RemoveNever _ True = next $ return True -- copy complete
|
||||
|
|
|
@ -213,7 +213,7 @@ storeReceived f = do
|
|||
warning $ "Received a file " ++ f ++ " that is not a git-annex key. Deleting this file."
|
||||
liftIO $ nukeFile f
|
||||
Just k -> void $
|
||||
getViaTmpFromDisk AlwaysVerify k $ \dest -> unVerified $
|
||||
getViaTmpFromDisk RetrievalVerifiableKeysSecure AlwaysVerify k $ \dest -> unVerified $
|
||||
liftIO $ catchBoolIO $ do
|
||||
rename f dest
|
||||
return True
|
||||
|
|
|
@ -83,7 +83,7 @@ linkKey file oldkey newkey = ifM (isJust <$> isAnnexLink file)
|
|||
- This avoids hard linking to content linked to an
|
||||
- unlocked file, which would leave the new key unlocked
|
||||
- and vulnerable to corruption. -}
|
||||
( getViaTmpFromDisk DefaultVerify newkey $ \tmp -> unVerified $ do
|
||||
( getViaTmpFromDisk RetrievalAllKeysSecure DefaultVerify newkey $ \tmp -> unVerified $ do
|
||||
oldobj <- calcRepo (gitAnnexLocation oldkey)
|
||||
linkOrCopy' (return True) newkey oldobj tmp Nothing
|
||||
, do
|
||||
|
|
|
@ -13,6 +13,7 @@ import Annex.Action
|
|||
import Annex
|
||||
import Utility.Rsync
|
||||
import Types.Transfer
|
||||
import Types.Remote (RetrievalSecurityPolicy(..))
|
||||
import Command.SendKey (fieldTransfer)
|
||||
import qualified CmdLine.GitAnnexShell.Fields as Fields
|
||||
|
||||
|
@ -31,7 +32,9 @@ start key = fieldTransfer Download key $ \_p -> do
|
|||
fromunlocked <- (isJust <$> Fields.getField Fields.unlocked)
|
||||
<||> (isJust <$> Fields.getField Fields.direct)
|
||||
let verify = if fromunlocked then AlwaysVerify else DefaultVerify
|
||||
ifM (getViaTmp verify key go)
|
||||
-- This matches the retrievalSecurityPolicy of Remote.Git
|
||||
let rsp = RetrievalAllKeysSecure
|
||||
ifM (getViaTmp rsp verify key go)
|
||||
( do
|
||||
-- forcibly quit after receiving one key,
|
||||
-- and shutdown cleanly
|
||||
|
|
|
@ -45,7 +45,7 @@ startSrcDest (src:dest:[])
|
|||
showStart "reinject" dest
|
||||
next $ ifAnnexed dest go stop
|
||||
where
|
||||
go key = ifM (verifyKeyContent DefaultVerify UnVerified key src)
|
||||
go key = ifM (verifyKeyContent RetrievalAllKeysSecure DefaultVerify UnVerified key src)
|
||||
( perform src key
|
||||
, error "failed"
|
||||
)
|
||||
|
|
|
@ -33,7 +33,7 @@ perform file key = do
|
|||
-- the file might be on a different filesystem, so moveFile is used
|
||||
-- rather than simply calling moveAnnex; disk space is also
|
||||
-- checked this way.
|
||||
ok <- getViaTmp DefaultVerify key $ \dest -> unVerified $
|
||||
ok <- getViaTmp RetrievalAllKeysSecure DefaultVerify key $ \dest -> unVerified $
|
||||
if dest /= file
|
||||
then liftIO $ catchBoolIO $ do
|
||||
moveFile file dest
|
||||
|
|
|
@ -179,7 +179,7 @@ test st r k =
|
|||
Just b -> case Backend.verifyKeyContent b of
|
||||
Nothing -> return True
|
||||
Just verifier -> verifier k (key2file k)
|
||||
get = getViaTmp (RemoteVerify r) k $ \dest ->
|
||||
get = getViaTmp (Remote.retrievalSecurityPolicy r) (RemoteVerify r) k $ \dest ->
|
||||
Remote.retrieveKeyFile r k (AssociatedFile Nothing)
|
||||
dest nullMeterUpdate
|
||||
store = Remote.storeKey r k (AssociatedFile Nothing) nullMeterUpdate
|
||||
|
@ -220,7 +220,7 @@ testExportTree st (Just _) ea k1 k2 =
|
|||
retrieveexport k = withTmpFile "exported" $ \tmp h -> do
|
||||
liftIO $ hClose h
|
||||
ifM (Remote.retrieveExport ea k testexportlocation tmp nullMeterUpdate)
|
||||
( verifyKeyContent AlwaysVerify UnVerified k tmp
|
||||
( verifyKeyContent RetrievalAllKeysSecure AlwaysVerify UnVerified k tmp
|
||||
, return False
|
||||
)
|
||||
checkpresentexport k = Remote.checkPresentExport ea k testexportlocation
|
||||
|
@ -238,10 +238,10 @@ testUnavailable st r k =
|
|||
, check (`notElem` [Right True, Right False]) "checkPresent" $
|
||||
Remote.checkPresent r k
|
||||
, check (== Right False) "retrieveKeyFile" $
|
||||
getViaTmp (RemoteVerify r) k $ \dest ->
|
||||
getViaTmp (Remote.retrievalSecurityPolicy r) (RemoteVerify r) k $ \dest ->
|
||||
Remote.retrieveKeyFile r k (AssociatedFile Nothing) dest nullMeterUpdate
|
||||
, check (== Right False) "retrieveKeyFileCheap" $
|
||||
getViaTmp (RemoteVerify r) k $ \dest -> unVerified $
|
||||
getViaTmp (Remote.retrievalSecurityPolicy r) (RemoteVerify r) k $ \dest -> unVerified $
|
||||
Remote.retrieveKeyFileCheap r k (AssociatedFile Nothing) dest
|
||||
]
|
||||
where
|
||||
|
|
|
@ -60,7 +60,7 @@ toPerform key file remote = go Upload file $
|
|||
fromPerform :: Key -> AssociatedFile -> Remote -> CommandPerform
|
||||
fromPerform key file remote = go Upload file $
|
||||
download (uuid remote) key file stdRetry $ \p ->
|
||||
getViaTmp (RemoteVerify remote) key $
|
||||
getViaTmp (retrievalSecurityPolicy remote) (RemoteVerify remote) key $
|
||||
\t -> Remote.retrieveKeyFile remote key file t p
|
||||
|
||||
go :: Direction -> AssociatedFile -> (NotifyWitness -> Annex Bool) -> CommandPerform
|
||||
|
|
|
@ -42,7 +42,7 @@ start = do
|
|||
return ok
|
||||
| otherwise = notifyTransfer direction file $
|
||||
download (Remote.uuid remote) key file stdRetry $ \p ->
|
||||
getViaTmp (RemoteVerify remote) key $ \t -> do
|
||||
getViaTmp (Remote.retrievalSecurityPolicy remote) (RemoteVerify remote) key $ \t -> do
|
||||
r <- Remote.retrieveKeyFile remote key file t p
|
||||
-- Make sure we get the current
|
||||
-- associated files data for the key,
|
||||
|
|
26
NEWS
26
NEWS
|
@ -1,3 +1,29 @@
|
|||
git-annex (6.20180626) upstream; urgency=high
|
||||
|
||||
A security fix has changed git-annex to refuse to download content from
|
||||
some special remotes when the content cannot be verified with a hash check.
|
||||
In particular URL and WORM keys stored on such remotes won't be downloaded.
|
||||
See the documentation of the annex.security.allow-unverified-downloads
|
||||
configuration for how to deal with this if it affects your files.
|
||||
|
||||
A security fix has changed git-annex to only support http, https, and ftp
|
||||
URL schemes by default. You can enable other URL schemes, at your own risk,
|
||||
using annex.security.allowed-url-schemes.
|
||||
|
||||
A related security fix prevents git-annex from connecting to http
|
||||
servers (and proxies) on localhost or private networks. This can
|
||||
be overridden, at your own risk, using annex.security.allowed-http-addresses.
|
||||
|
||||
Setting annex.web-options no longer is enough to make curl be used,
|
||||
and youtube-dl is also no longer used by default. See the
|
||||
documentation of annex.security.allowed-http-addresses for
|
||||
details and how to enable them.
|
||||
|
||||
The annex.web-download-command configuration has been removed,
|
||||
use annex.web-options instead.
|
||||
|
||||
-- Joey Hess <id@joeyh.name> Fri, 15 Jun 2018 17:54:23 -0400
|
||||
|
||||
git-annex (6.20180309) upstream; urgency=medium
|
||||
|
||||
Note that, due to not using rsync to transfer files over ssh
|
||||
|
|
|
@ -22,6 +22,7 @@ import P2P.Protocol
|
|||
import P2P.IO
|
||||
import Logs.Location
|
||||
import Types.NumCopies
|
||||
import Types.Remote (RetrievalSecurityPolicy(..))
|
||||
import Utility.Metered
|
||||
|
||||
import Control.Monad.Free
|
||||
|
@ -63,9 +64,12 @@ runLocal runst runner a = case a of
|
|||
Right Nothing -> runner (next False)
|
||||
Left e -> return (Left (show e))
|
||||
StoreContent k af o l getb validitycheck next -> do
|
||||
-- This is the same as the retrievalSecurityPolicy of
|
||||
-- Remote.P2P and Remote.Git.
|
||||
let rsp = RetrievalAllKeysSecure
|
||||
ok <- flip catchNonAsync (const $ return False) $
|
||||
transfer download k af $ \p ->
|
||||
getViaTmp DefaultVerify k $ \tmp -> do
|
||||
getViaTmp rsp DefaultVerify k $ \tmp -> do
|
||||
storefile tmp o l getb validitycheck p
|
||||
runner (next ok)
|
||||
StoreContentTo dest o l getb validitycheck next -> do
|
||||
|
|
|
@ -12,6 +12,7 @@ module Remote (
|
|||
storeKey,
|
||||
retrieveKeyFile,
|
||||
retrieveKeyFileCheap,
|
||||
retrievalSecurityPolicy,
|
||||
removeKey,
|
||||
hasKey,
|
||||
hasKeyCheap,
|
||||
|
|
|
@ -48,6 +48,7 @@ gen r u c gc = do
|
|||
, storeKey = storeKeyDummy
|
||||
, retrieveKeyFile = retreiveKeyFileDummy
|
||||
, retrieveKeyFileCheap = \_ _ _ -> return False
|
||||
, retrievalSecurityPolicy = RetrievalAllKeysSecure
|
||||
, removeKey = removeKeyDummy
|
||||
, lockContent = Nothing
|
||||
, checkPresent = checkPresentDummy
|
||||
|
|
|
@ -59,6 +59,8 @@ gen r _ c gc =
|
|||
, storeKey = uploadKey
|
||||
, retrieveKeyFile = downloadKey
|
||||
, retrieveKeyFileCheap = downloadKeyCheap
|
||||
-- Bittorrent does its own hash checks.
|
||||
, retrievalSecurityPolicy = RetrievalAllKeysSecure
|
||||
, removeKey = dropKey
|
||||
, lockContent = Nothing
|
||||
, checkPresent = checkKey
|
||||
|
|
|
@ -59,6 +59,9 @@ gen r u c gc = do
|
|||
, storeKey = storeKeyDummy
|
||||
, retrieveKeyFile = retreiveKeyFileDummy
|
||||
, retrieveKeyFileCheap = retrieveCheap buprepo
|
||||
-- Bup uses git, which cryptographically verifies content
|
||||
-- (with SHA1, but sufficiently for this).
|
||||
, retrievalSecurityPolicy = RetrievalAllKeysSecure
|
||||
, removeKey = removeKeyDummy
|
||||
, lockContent = Nothing
|
||||
, checkPresent = checkPresentDummy
|
||||
|
|
|
@ -58,6 +58,9 @@ gen r u c gc = do
|
|||
, storeKey = storeKeyDummy
|
||||
, retrieveKeyFile = retreiveKeyFileDummy
|
||||
, retrieveKeyFileCheap = retrieveCheap
|
||||
-- ddar communicates over ssh, not subject to http redirect
|
||||
-- type attacks
|
||||
, retrievalSecurityPolicy = RetrievalAllKeysSecure
|
||||
, removeKey = removeKeyDummy
|
||||
, lockContent = Nothing
|
||||
, checkPresent = checkPresentDummy
|
||||
|
|
|
@ -58,6 +58,7 @@ gen r u c gc = do
|
|||
, storeKey = storeKeyDummy
|
||||
, retrieveKeyFile = retreiveKeyFileDummy
|
||||
, retrieveKeyFileCheap = retrieveKeyFileCheapM dir chunkconfig
|
||||
, retrievalSecurityPolicy = RetrievalAllKeysSecure
|
||||
, removeKey = removeKeyDummy
|
||||
, lockContent = Nothing
|
||||
, checkPresent = checkPresentDummy
|
||||
|
|
|
@ -109,6 +109,11 @@ gen r u c gc
|
|||
, storeKey = storeKeyDummy
|
||||
, retrieveKeyFile = retreiveKeyFileDummy
|
||||
, retrieveKeyFileCheap = \_ _ _ -> return False
|
||||
-- External special remotes use many http libraries
|
||||
-- and have no protection against redirects to
|
||||
-- local private web servers, or in some cases
|
||||
-- to file:// urls.
|
||||
, retrievalSecurityPolicy = RetrievalVerifiableKeysSecure
|
||||
, removeKey = removeKeyDummy
|
||||
, lockContent = Nothing
|
||||
, checkPresent = checkPresentDummy
|
||||
|
|
|
@ -113,6 +113,7 @@ gen' r u c gc = do
|
|||
, storeKey = storeKeyDummy
|
||||
, retrieveKeyFile = retreiveKeyFileDummy
|
||||
, retrieveKeyFileCheap = \_ _ _ -> return False
|
||||
, retrievalSecurityPolicy = RetrievalAllKeysSecure
|
||||
, removeKey = removeKeyDummy
|
||||
, lockContent = Nothing
|
||||
, checkPresent = checkPresentDummy
|
||||
|
|
|
@ -161,6 +161,7 @@ gen r u c gc
|
|||
, storeKey = copyToRemote new st
|
||||
, retrieveKeyFile = copyFromRemote new st
|
||||
, retrieveKeyFileCheap = copyFromRemoteCheap new st
|
||||
, retrievalSecurityPolicy = RetrievalAllKeysSecure
|
||||
, removeKey = dropKey new st
|
||||
, lockContent = Just (lockKey new st)
|
||||
, checkPresent = inAnnex new st
|
||||
|
@ -625,10 +626,11 @@ copyToRemote' repo r (State connpool duc _) key file meterupdate
|
|||
ensureInitialized
|
||||
copier <- mkCopier hardlink params
|
||||
let verify = Annex.Content.RemoteVerify r
|
||||
let rsp = RetrievalAllKeysSecure
|
||||
runTransfer (Transfer Download u key) file stdRetry $ \p ->
|
||||
let p' = combineMeterUpdate meterupdate p
|
||||
in Annex.Content.saveState True `after`
|
||||
Annex.Content.getViaTmp verify key
|
||||
Annex.Content.getViaTmp rsp verify key
|
||||
(\dest -> copier object dest p' (liftIO checksuccessio))
|
||||
)
|
||||
copyremotefallback p = Annex.Content.sendAnnex key noop $ \object -> do
|
||||
|
|
|
@ -55,6 +55,11 @@ gen r u c gc = new <$> remoteCost gc veryExpensiveRemoteCost
|
|||
, storeKey = storeKeyDummy
|
||||
, retrieveKeyFile = retreiveKeyFileDummy
|
||||
, retrieveKeyFileCheap = retrieveCheap this
|
||||
-- glacier-cli does not follow redirects and does
|
||||
-- not support file://, as far as we know, but
|
||||
-- there's no guarantee that will continue to be
|
||||
-- the case, so require verifiable keys.
|
||||
, retrievalSecurityPolicy = RetrievalVerifiableKeysSecure
|
||||
, removeKey = removeKeyDummy
|
||||
, lockContent = Nothing
|
||||
, checkPresent = checkPresentDummy
|
||||
|
|
|
@ -162,6 +162,14 @@ specialRemote' cfg c preparestorer prepareretriever prepareremover preparecheckp
|
|||
(retrieveKeyFileCheap baser k f d)
|
||||
-- retrieval of encrypted keys is never cheap
|
||||
(\_ -> return False)
|
||||
-- When encryption is used, the remote could provide
|
||||
-- some other content encrypted by the user, and trick
|
||||
-- git-annex into decrypting it, leaking the decryption
|
||||
-- into the git-annex repository. Verifiable keys
|
||||
-- are the main protection against this attack.
|
||||
, retrievalSecurityPolicy = if isencrypted
|
||||
then RetrievalVerifiableKeysSecure
|
||||
else retrievalSecurityPolicy baser
|
||||
, removeKey = \k -> cip >>= removeKeyGen k
|
||||
, checkPresent = \k -> cip >>= checkPresentGen k
|
||||
, cost = if isencrypted
|
||||
|
|
|
@ -49,6 +49,9 @@ gen r u c gc = do
|
|||
, storeKey = storeKeyDummy
|
||||
, retrieveKeyFile = retreiveKeyFileDummy
|
||||
, retrieveKeyFileCheap = retrieveCheap hooktype
|
||||
-- A hook could use http and be vulnerable to
|
||||
-- redirect to file:// attacks, etc.
|
||||
, retrievalSecurityPolicy = RetrievalVerifiableKeysSecure
|
||||
, removeKey = removeKeyDummy
|
||||
, lockContent = Nothing
|
||||
, checkPresent = checkPresentDummy
|
||||
|
|
|
@ -53,6 +53,7 @@ chainGen addr r u c gc = do
|
|||
, storeKey = store (const protorunner)
|
||||
, retrieveKeyFile = retrieve (const protorunner)
|
||||
, retrieveKeyFileCheap = \_ _ _ -> return False
|
||||
, retrievalSecurityPolicy = RetrievalAllKeysSecure
|
||||
, removeKey = remove protorunner
|
||||
, lockContent = Just $ lock withconn runProtoConn u
|
||||
, checkPresent = checkpresent protorunner
|
||||
|
|
|
@ -72,6 +72,7 @@ gen r u c gc = do
|
|||
, storeKey = storeKeyDummy
|
||||
, retrieveKeyFile = retreiveKeyFileDummy
|
||||
, retrieveKeyFileCheap = retrieveCheap o
|
||||
, retrievalSecurityPolicy = RetrievalAllKeysSecure
|
||||
, removeKey = removeKeyDummy
|
||||
, lockContent = Nothing
|
||||
, checkPresent = checkPresentDummy
|
||||
|
|
|
@ -84,6 +84,9 @@ gen r u c gc = do
|
|||
, storeKey = storeKeyDummy
|
||||
, retrieveKeyFile = retreiveKeyFileDummy
|
||||
, retrieveKeyFileCheap = retrieveCheap
|
||||
-- HttpManagerRestricted is used here, so this is
|
||||
-- secure.
|
||||
, retrievalSecurityPolicy = RetrievalAllKeysSecure
|
||||
, removeKey = removeKeyDummy
|
||||
, lockContent = Nothing
|
||||
, checkPresent = checkPresentDummy
|
||||
|
|
|
@ -73,6 +73,8 @@ gen r u c gc = do
|
|||
, storeKey = store u hdl
|
||||
, retrieveKeyFile = retrieve u hdl
|
||||
, retrieveKeyFileCheap = \_ _ _ -> return False
|
||||
-- Tahoe cryptographically verifies content.
|
||||
, retrievalSecurityPolicy = RetrievalAllKeysSecure
|
||||
, removeKey = remove
|
||||
, lockContent = Nothing
|
||||
, checkPresent = checkKey u hdl
|
||||
|
|
|
@ -48,6 +48,9 @@ gen r _ c gc =
|
|||
, storeKey = uploadKey
|
||||
, retrieveKeyFile = downloadKey
|
||||
, retrieveKeyFileCheap = downloadKeyCheap
|
||||
-- HttpManagerRestricted is used here, so this is
|
||||
-- secure.
|
||||
, retrievalSecurityPolicy = RetrievalAllKeysSecure
|
||||
, removeKey = dropKey
|
||||
, lockContent = Nothing
|
||||
, checkPresent = checkKey
|
||||
|
|
|
@ -72,6 +72,9 @@ gen r u c gc = new <$> remoteCost gc expensiveRemoteCost
|
|||
, storeKey = storeKeyDummy
|
||||
, retrieveKeyFile = retreiveKeyFileDummy
|
||||
, retrieveKeyFileCheap = retrieveCheap
|
||||
-- HttpManagerRestricted is used here, so this is
|
||||
-- secure.
|
||||
, retrievalSecurityPolicy = RetrievalAllKeysSecure
|
||||
, removeKey = removeKeyDummy
|
||||
, lockContent = Nothing
|
||||
, checkPresent = checkPresentDummy
|
||||
|
|
6
Test.hs
6
Test.hs
|
@ -1714,10 +1714,12 @@ test_add_subdirs = intmpclonerepo $ do
|
|||
test_addurl :: Assertion
|
||||
test_addurl = intmpclonerepo $ do
|
||||
-- file:// only; this test suite should not hit the network
|
||||
let filecmd c ps = git_annex c ("-cannex.security.allowed-url-schemes=file" : ps)
|
||||
f <- absPath "myurl"
|
||||
let url = replace "\\" "/" ("file:///" ++ dropDrive f)
|
||||
writeFile f "foo"
|
||||
git_annex "addurl" [url] @? ("addurl failed on " ++ url)
|
||||
not <$> git_annex "addurl" [url] @? "addurl failed to fail on file url"
|
||||
filecmd "addurl" [url] @? ("addurl failed on " ++ url)
|
||||
let dest = "addurlurldest"
|
||||
git_annex "addurl" ["--file", dest, url] @? ("addurl failed on " ++ url ++ " with --file")
|
||||
filecmd "addurl" ["--file", dest, url] @? ("addurl failed on " ++ url ++ " with --file")
|
||||
doesFileExist dest @? (dest ++ " missing after addurl --file")
|
||||
|
|
|
@ -33,8 +33,10 @@ import Config.DynamicConfig
|
|||
import Utility.HumanTime
|
||||
import Utility.Gpg (GpgCmd, mkGpgCmd)
|
||||
import Utility.ThreadScheduler (Seconds(..))
|
||||
import Utility.Url (Scheme, mkScheme)
|
||||
|
||||
import Control.Concurrent.STM
|
||||
import qualified Data.Set as S
|
||||
|
||||
-- | A configurable value, that may not be fully determined yet because
|
||||
-- the global git config has not yet been loaded.
|
||||
|
@ -71,7 +73,6 @@ data GitConfig = GitConfig
|
|||
, annexWebOptions :: [String]
|
||||
, annexYoutubeDlOptions :: [String]
|
||||
, annexAriaTorrentOptions :: [String]
|
||||
, annexWebDownloadCommand :: Maybe String
|
||||
, annexCrippledFileSystem :: Bool
|
||||
, annexLargeFiles :: Maybe String
|
||||
, annexAddSmallFiles :: Bool
|
||||
|
@ -93,6 +94,9 @@ data GitConfig = GitConfig
|
|||
, annexSecureHashesOnly :: Bool
|
||||
, annexRetry :: Maybe Integer
|
||||
, annexRetryDelay :: Maybe Seconds
|
||||
, annexAllowedUrlSchemes :: S.Set Scheme
|
||||
, annexAllowedHttpAddresses :: String
|
||||
, annexAllowUnverifiedDownloads :: Bool
|
||||
, coreSymlinks :: Bool
|
||||
, coreSharedRepository :: SharedRepository
|
||||
, receiveDenyCurrentBranch :: DenyCurrentBranch
|
||||
|
@ -133,7 +137,6 @@ extractGitConfig r = GitConfig
|
|||
, annexWebOptions = getwords (annex "web-options")
|
||||
, annexYoutubeDlOptions = getwords (annex "youtube-dl-options")
|
||||
, annexAriaTorrentOptions = getwords (annex "aria-torrent-options")
|
||||
, annexWebDownloadCommand = getmaybe (annex "web-download-command")
|
||||
, annexCrippledFileSystem = getbool (annex "crippledfilesystem") False
|
||||
, annexLargeFiles = getmaybe (annex "largefiles")
|
||||
, annexAddSmallFiles = getbool (annex "addsmallfiles") True
|
||||
|
@ -159,6 +162,13 @@ extractGitConfig r = GitConfig
|
|||
, annexRetry = getmayberead (annex "retry")
|
||||
, annexRetryDelay = Seconds
|
||||
<$> getmayberead (annex "retrydelay")
|
||||
, annexAllowedUrlSchemes = S.fromList $ map mkScheme $
|
||||
maybe ["http", "https", "ftp"] words $
|
||||
getmaybe (annex "security.allowed-url-schemes")
|
||||
, annexAllowedHttpAddresses = fromMaybe "" $
|
||||
getmaybe (annex "security.allowed-http-addresses")
|
||||
, annexAllowUnverifiedDownloads = (== Just "ACKTHPPT") $
|
||||
getmaybe (annex "security.allow-unverified-downloads")
|
||||
, coreSymlinks = getbool "core.symlinks" True
|
||||
, coreSharedRepository = getSharedRepository r
|
||||
, receiveDenyCurrentBranch = getDenyCurrentBranch r
|
||||
|
|
19
Types/Key.hs
19
Types/Key.hs
|
@ -1,6 +1,6 @@
|
|||
{- git-annex Key data type
|
||||
-
|
||||
- Copyright 2011-2017 Joey Hess <id@joeyh.name>
|
||||
- Copyright 2011-2018 Joey Hess <id@joeyh.name>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
@ -88,6 +88,23 @@ cryptographicallySecure (Blake2sKey _ _) = True
|
|||
cryptographicallySecure (Blake2spKey _ _) = True
|
||||
cryptographicallySecure _ = False
|
||||
|
||||
{- Is the Key variety backed by a hash, which allows verifying content?
|
||||
- It does not have to be cryptographically secure against eg birthday
|
||||
- attacks.
|
||||
-}
|
||||
isVerifiable :: KeyVariety -> Bool
|
||||
isVerifiable (SHA2Key _ _) = True
|
||||
isVerifiable (SHA3Key _ _) = True
|
||||
isVerifiable (SKEINKey _ _) = True
|
||||
isVerifiable (Blake2bKey _ _) = True
|
||||
isVerifiable (Blake2sKey _ _) = True
|
||||
isVerifiable (Blake2spKey _ _) = True
|
||||
isVerifiable (SHA1Key _) = True
|
||||
isVerifiable (MD5Key _) = True
|
||||
isVerifiable WORMKey = False
|
||||
isVerifiable URLKey = False
|
||||
isVerifiable (OtherKey _) = False
|
||||
|
||||
formatKeyVariety :: KeyVariety -> String
|
||||
formatKeyVariety v = case v of
|
||||
SHA2Key sz e -> adde e (addsz sz "SHA")
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
-
|
||||
- Most things should not need this, using Types instead
|
||||
-
|
||||
- Copyright 2011-2017 Joey Hess <id@joeyh.name>
|
||||
- Copyright 2011-2018 Joey Hess <id@joeyh.name>
|
||||
-
|
||||
- Licensed under the GNU GPL version 3 or higher.
|
||||
-}
|
||||
|
@ -18,6 +18,7 @@ module Types.Remote
|
|||
, Availability(..)
|
||||
, Verification(..)
|
||||
, unVerified
|
||||
, RetrievalSecurityPolicy(..)
|
||||
, isExportSupported
|
||||
, ExportActions(..)
|
||||
)
|
||||
|
@ -85,6 +86,8 @@ data RemoteA a = Remote
|
|||
-- Retrieves a key's contents to a tmp file, if it can be done cheaply.
|
||||
-- It's ok to create a symlink or hardlink.
|
||||
, retrieveKeyFileCheap :: Key -> AssociatedFile -> FilePath -> a Bool
|
||||
-- Security policy for reteiving keys from this remote.
|
||||
, retrievalSecurityPolicy :: RetrievalSecurityPolicy
|
||||
-- Removes a key's contents (succeeds if the contents are not present)
|
||||
, removeKey :: Key -> a Bool
|
||||
-- Uses locking to prevent removal of a key's contents,
|
||||
|
@ -165,6 +168,32 @@ unVerified a = do
|
|||
ok <- a
|
||||
return (ok, UnVerified)
|
||||
|
||||
-- Security policy indicating what keys can be safely retrieved from a
|
||||
-- remote.
|
||||
data RetrievalSecurityPolicy
|
||||
= RetrievalVerifiableKeysSecure
|
||||
-- ^ Transfer of keys whose content can be verified
|
||||
-- with a hash check is secure; transfer of unverifiable keys is
|
||||
-- not secure and should not be allowed.
|
||||
--
|
||||
-- This is used eg, when HTTP to a remote could be redirected to a
|
||||
-- local private web server or even a file:// url, causing private
|
||||
-- data from it that is not the intended content of a key to make
|
||||
-- its way into the git-annex repository.
|
||||
--
|
||||
-- It's also used when content is stored encrypted on a remote,
|
||||
-- which could replace it with a different encrypted file, and
|
||||
-- trick git-annex into decrypting it and leaking the decryption
|
||||
-- into the git-annex repository.
|
||||
--
|
||||
-- It's not (currently) used when the remote could alter the
|
||||
-- content stored on it, because git-annex does not provide
|
||||
-- strong guarantees about the content of keys that cannot be
|
||||
-- verified with a hash check.
|
||||
-- (But annex.securehashesonly does provide such guarantees.)
|
||||
| RetrievalAllKeysSecure
|
||||
-- ^ Any key can be securely retrieved.
|
||||
|
||||
isExportSupported :: RemoteA a -> a Bool
|
||||
isExportSupported r = exportSupported (remotetype r) (config r) (gitconfig r)
|
||||
|
||||
|
|
232
Utility/HttpManagerRestricted.hs
Normal file
232
Utility/HttpManagerRestricted.hs
Normal file
|
@ -0,0 +1,232 @@
|
|||
{- | Restricted Manager for http-client-tls
|
||||
-
|
||||
- Copyright 2018 Joey Hess <id@joeyh.name>
|
||||
-
|
||||
- Portions from http-client-tls Copyright (c) 2013 Michael Snoyman
|
||||
-
|
||||
- License: MIT
|
||||
-}
|
||||
|
||||
{-# LANGUAGE ScopedTypeVariables, DeriveDataTypeable, LambdaCase, PatternGuards #-}
|
||||
{-# LANGUAGE CPP #-}
|
||||
|
||||
module Utility.HttpManagerRestricted (
|
||||
restrictManagerSettings,
|
||||
Restriction(..),
|
||||
ConnectionRestricted(..),
|
||||
addrConnectionRestricted,
|
||||
ProxyRestricted(..),
|
||||
IPAddrString,
|
||||
) where
|
||||
|
||||
import Network.HTTP.Client
|
||||
import Network.HTTP.Client.Internal
|
||||
(ManagerSettings(..), Connection, runProxyOverride, makeConnection)
|
||||
import Network.Socket
|
||||
import Network.BSD (getProtocolNumber)
|
||||
import Control.Exception
|
||||
import qualified Network.Connection as NC
|
||||
import qualified Data.ByteString.UTF8 as BU
|
||||
import Data.Default
|
||||
import Data.Typeable
|
||||
import Control.Applicative
|
||||
|
||||
data Restriction = Restriction
|
||||
{ addressRestriction :: AddrInfo -> Maybe ConnectionRestricted
|
||||
}
|
||||
|
||||
-- | An exception used to indicate that the connection was restricted.
|
||||
data ConnectionRestricted = ConnectionRestricted String
|
||||
deriving (Show, Typeable)
|
||||
|
||||
instance Exception ConnectionRestricted
|
||||
|
||||
type IPAddrString = String
|
||||
|
||||
-- | Constructs a ConnectionRestricted, passing the function a string
|
||||
-- containing the IP address.
|
||||
addrConnectionRestricted :: (IPAddrString -> String) -> AddrInfo -> ConnectionRestricted
|
||||
addrConnectionRestricted mkmessage =
|
||||
ConnectionRestricted . mkmessage . showSockAddress . addrAddress
|
||||
|
||||
data ProxyRestricted = ProxyRestricted
|
||||
deriving (Show)
|
||||
|
||||
-- | Adjusts a ManagerSettings to enforce a Restriction. The restriction
|
||||
-- will be checked each time a Request is made, and for each redirect
|
||||
-- followed.
|
||||
--
|
||||
-- The http proxy is also checked against the Restriction, and if
|
||||
-- access to it is blocked, the http proxy will not be used.
|
||||
restrictManagerSettings
|
||||
:: Restriction
|
||||
-> ManagerSettings
|
||||
-> IO (ManagerSettings, Maybe ProxyRestricted)
|
||||
restrictManagerSettings cfg base = restrictProxy cfg $ base
|
||||
{ managerRawConnection = restrictedRawConnection cfg
|
||||
, managerTlsConnection = restrictedTlsConnection cfg
|
||||
#if MIN_VERSION_http_client(0,5,0)
|
||||
, managerWrapException = wrapOurExceptions base
|
||||
#else
|
||||
, managerWrapIOException = wrapOurExceptions base
|
||||
#endif
|
||||
}
|
||||
|
||||
restrictProxy
|
||||
:: Restriction
|
||||
-> ManagerSettings
|
||||
-> IO (ManagerSettings, Maybe ProxyRestricted)
|
||||
restrictProxy cfg base = do
|
||||
http_proxy_addr <- getproxyaddr False
|
||||
https_proxy_addr <- getproxyaddr True
|
||||
let (http_proxy, http_r) = mkproxy http_proxy_addr
|
||||
let (https_proxy, https_r) = mkproxy https_proxy_addr
|
||||
let ms = managerSetInsecureProxy http_proxy $
|
||||
managerSetSecureProxy https_proxy base
|
||||
return (ms, http_r <|> https_r)
|
||||
where
|
||||
-- This does not use localhost because http-client may choose
|
||||
-- not to use the proxy for localhost.
|
||||
testnetip = "198.51.100.1"
|
||||
dummyreq https = parseRequest_ $
|
||||
"http" ++ (if https then "s" else "") ++ "://" ++ testnetip
|
||||
|
||||
getproxyaddr https = extractproxy >>= \case
|
||||
Nothing -> return Nothing
|
||||
Just p -> do
|
||||
proto <- getProtocolNumber "tcp"
|
||||
let serv = show (proxyPort p)
|
||||
let hints = defaultHints
|
||||
{ addrFlags = [AI_ADDRCONFIG]
|
||||
, addrProtocol = proto
|
||||
, addrSocketType = Stream
|
||||
}
|
||||
let h = BU.toString $ proxyHost p
|
||||
getAddrInfo (Just hints) (Just h) (Just serv) >>= \case
|
||||
[] -> return Nothing
|
||||
(addr:_) -> return $ Just addr
|
||||
where
|
||||
-- These contortions are necessary until this issue
|
||||
-- is fixed:
|
||||
-- https://github.com/snoyberg/http-client/issues/355
|
||||
extractproxy = do
|
||||
let po = if https
|
||||
then managerProxySecure base
|
||||
else managerProxyInsecure base
|
||||
f <- runProxyOverride po https
|
||||
return $ proxy $ f $ dummyreq https
|
||||
|
||||
mkproxy Nothing = (noProxy, Nothing)
|
||||
mkproxy (Just proxyaddr) = case addressRestriction cfg proxyaddr of
|
||||
Nothing -> (addrtoproxy (addrAddress proxyaddr), Nothing)
|
||||
Just _ -> (noProxy, Just ProxyRestricted)
|
||||
|
||||
addrtoproxy addr = case addr of
|
||||
SockAddrInet pn _ -> mk pn
|
||||
SockAddrInet6 pn _ _ _ -> mk pn
|
||||
_ -> noProxy
|
||||
where
|
||||
mk pn = useProxy Network.HTTP.Client.Proxy
|
||||
{ proxyHost = BU.fromString (showSockAddress addr)
|
||||
, proxyPort = fromIntegral pn
|
||||
}
|
||||
|
||||
#if MIN_VERSION_http_client(0,5,0)
|
||||
wrapOurExceptions :: ManagerSettings -> Request -> IO a -> IO a
|
||||
wrapOurExceptions base req a =
|
||||
let wrapper se
|
||||
| Just (_ :: ConnectionRestricted) <- fromException se =
|
||||
toException $ HttpExceptionRequest req $
|
||||
InternalException se
|
||||
| otherwise = se
|
||||
in managerWrapException base req (handle (throwIO . wrapper) a)
|
||||
#else
|
||||
wrapOurExceptions :: ManagerSettings -> IO a -> IO a
|
||||
wrapOurExceptions base a =
|
||||
let wrapper se = case fromException se of
|
||||
Just (_ :: ConnectionRestricted) ->
|
||||
-- Not really a TLS exception, but there is no
|
||||
-- way to put SomeException in the
|
||||
-- InternalIOException this old version uses.
|
||||
toException $ TlsException se
|
||||
Nothing -> se
|
||||
in managerWrapIOException base (handle (throwIO . wrapper) a)
|
||||
#endif
|
||||
|
||||
restrictedRawConnection :: Restriction -> IO (Maybe HostAddress -> String -> Int -> IO Connection)
|
||||
restrictedRawConnection cfg = getConnection cfg Nothing
|
||||
|
||||
restrictedTlsConnection :: Restriction -> IO (Maybe HostAddress -> String -> Int -> IO Connection)
|
||||
restrictedTlsConnection cfg = getConnection cfg $
|
||||
-- It's not possible to access the TLSSettings
|
||||
-- used in the base ManagerSettings. So, use the default
|
||||
-- value, which is the same thing http-client-tls defaults to.
|
||||
-- Since changing from the default settings can only make TLS
|
||||
-- less secure, this is not a big problem.
|
||||
Just def
|
||||
|
||||
|
||||
|
||||
-- Based on Network.HTTP.Client.TLS.getTlsConnection.
|
||||
--
|
||||
-- Checks the Restriction
|
||||
--
|
||||
-- Does not support SOCKS.
|
||||
getConnection :: Restriction -> Maybe NC.TLSSettings -> IO (Maybe HostAddress -> String -> Int -> IO Connection)
|
||||
getConnection cfg tls = do
|
||||
context <- NC.initConnectionContext
|
||||
return $ \_ha h p -> bracketOnError
|
||||
(go context h p)
|
||||
NC.connectionClose
|
||||
convertConnection
|
||||
where
|
||||
go context h p = do
|
||||
let connparams = NC.ConnectionParams
|
||||
{ NC.connectionHostname = h
|
||||
, NC.connectionPort = fromIntegral p
|
||||
, NC.connectionUseSecure = tls
|
||||
, NC.connectionUseSocks = Nothing -- unsupprted
|
||||
}
|
||||
proto <- getProtocolNumber "tcp"
|
||||
let serv = show p
|
||||
let hints = defaultHints
|
||||
{ addrFlags = [AI_ADDRCONFIG]
|
||||
, addrProtocol = proto
|
||||
, addrSocketType = Stream
|
||||
}
|
||||
addrs <- getAddrInfo (Just hints) (Just h) (Just serv)
|
||||
bracketOnError
|
||||
(firstSuccessful $ map tryToConnect addrs)
|
||||
close
|
||||
(\sock -> NC.connectFromSocket context sock connparams)
|
||||
where
|
||||
tryToConnect addr = case addressRestriction cfg addr of
|
||||
Nothing -> bracketOnError
|
||||
(socket (addrFamily addr) (addrSocketType addr) (addrProtocol addr))
|
||||
close
|
||||
(\sock -> connect sock (addrAddress addr) >> return sock)
|
||||
Just r -> throwIO r
|
||||
firstSuccessful [] = throwIO $ NC.HostNotResolved h
|
||||
firstSuccessful (a:as) = a `catch` \(e ::IOException) ->
|
||||
case as of
|
||||
[] -> throwIO e
|
||||
_ -> firstSuccessful as
|
||||
|
||||
-- Copied from Network.HTTP.Client.TLS, unfortunately not exported.
|
||||
convertConnection :: NC.Connection -> IO Connection
|
||||
convertConnection conn = makeConnection
|
||||
(NC.connectionGetChunk conn)
|
||||
(NC.connectionPut conn)
|
||||
-- Closing an SSL connection gracefully involves writing/reading
|
||||
-- on the socket. But when this is called the socket might be
|
||||
-- already closed, and we get a @ResourceVanished@.
|
||||
(NC.connectionClose conn `Control.Exception.catch` \(_ :: IOException) -> return ())
|
||||
|
||||
-- For ipv4 and ipv6, the string will contain only the IP address,
|
||||
-- omitting the port that the Show instance includes.
|
||||
showSockAddress :: SockAddr -> IPAddrString
|
||||
showSockAddress a@(SockAddrInet _ _) =
|
||||
takeWhile (/= ':') $ show a
|
||||
showSockAddress a@(SockAddrInet6 _ _ _ _) =
|
||||
takeWhile (/= ']') $ drop 1 $ show a
|
||||
showSockAddress a = show a
|
93
Utility/IPAddress.hs
Normal file
93
Utility/IPAddress.hs
Normal file
|
@ -0,0 +1,93 @@
|
|||
{- IP addresses
|
||||
-
|
||||
- Copyright 2018 Joey Hess <id@joeyh.name>
|
||||
-
|
||||
- License: BSD-2-clause
|
||||
-}
|
||||
|
||||
module Utility.IPAddress where
|
||||
|
||||
import Utility.Exception
|
||||
|
||||
import Network.Socket
|
||||
import Data.Word
|
||||
import Control.Applicative
|
||||
import Prelude
|
||||
|
||||
{- Check if an IP address is a loopback address; connecting to it
|
||||
- may connect back to the local host. -}
|
||||
isLoopbackAddress :: SockAddr -> Bool
|
||||
isLoopbackAddress (SockAddrInet _ ipv4) = case hostAddressToTuple ipv4 of
|
||||
-- localhost
|
||||
(127,_,_,_) -> True
|
||||
-- current network; functions equivilant to loopback
|
||||
(0,_,_, _) -> True
|
||||
_ -> False
|
||||
isLoopbackAddress (SockAddrInet6 _ _ ipv6 _) = case hostAddress6ToTuple ipv6 of
|
||||
-- localhost
|
||||
(0,0,0,0,0,0,0,1) -> True
|
||||
-- unspecified address; functions equivilant to loopback
|
||||
(0,0,0,0,0,0,0,0) -> True
|
||||
v -> maybe False
|
||||
(isLoopbackAddress . SockAddrInet 0)
|
||||
(embeddedIpv4 v)
|
||||
isLoopbackAddress _ = False
|
||||
|
||||
{- Check if an IP address is not globally routed, and is used
|
||||
- for private communication, eg on a LAN. -}
|
||||
isPrivateAddress :: SockAddr -> Bool
|
||||
isPrivateAddress (SockAddrInet _ ipv4) = case hostAddressToTuple ipv4 of
|
||||
-- lan
|
||||
(10,_,_,_) -> True
|
||||
(172,n,_,_) | n >= 16 && n <= 31 -> True -- 172.16.0.0/12
|
||||
(192,168,_,_) -> True
|
||||
-- carrier-grade NAT
|
||||
(100,n,0,0) | n >= 64 && n <= 127 -> True -- 100.64.0.0/10
|
||||
-- link-local
|
||||
(169,254,_,_) -> True
|
||||
_ -> False
|
||||
isPrivateAddress (SockAddrInet6 _ _ ipv6 _) = case hostAddress6ToTuple ipv6 of
|
||||
v@(n,_,_,_,_,_,_,_)
|
||||
-- local to lan or private between orgs
|
||||
| n >= 0xfc00 && n <= 0xfdff -> True -- fc00::/7
|
||||
-- link-local
|
||||
| n >= 0xfe80 && n <= 0xfebf -> True -- fe80::/10
|
||||
| otherwise -> maybe False
|
||||
(isPrivateAddress . SockAddrInet 0)
|
||||
(embeddedIpv4 v)
|
||||
isPrivateAddress _ = False
|
||||
|
||||
embeddedIpv4 :: (Word16, Word16, Word16, Word16, Word16, Word16, Word16, Word16) -> Maybe HostAddress
|
||||
embeddedIpv4 v = case v of
|
||||
-- IPv4 mapped address (::ffff:0:0/96)
|
||||
(0,0,0,0,0,0xffff,a,b) -> Just (toipv4 a b)
|
||||
-- IPV4 translated address (::ffff:0:ipv4)
|
||||
(0,0,0,0,0xffff,0,a,b) -> Just (toipv4 a b)
|
||||
-- IPV4/IPV6 translation (64:ff9b::ipv4)
|
||||
(0x64,0xff9b,0,0,0,0,a,b) -> Just (toipv4 a b)
|
||||
_ -> Nothing
|
||||
where
|
||||
toipv4 a b = htonl $ fromIntegral a * (2^halfipv4bits) + fromIntegral b
|
||||
halfipv4bits = 16 :: Word32
|
||||
|
||||
{- Given a string containing an IP address, make a function that will
|
||||
- match that address in a SockAddr. Nothing when the address cannot be
|
||||
- parsed.
|
||||
-
|
||||
- This does not involve any DNS lookups.
|
||||
-}
|
||||
makeAddressMatcher :: String -> IO (Maybe (SockAddr -> Bool))
|
||||
makeAddressMatcher s = go
|
||||
<$> catchDefaultIO [] (getAddrInfo (Just hints) (Just s) Nothing)
|
||||
where
|
||||
hints = defaultHints
|
||||
{ addrSocketType = Stream
|
||||
, addrFlags = [AI_NUMERICHOST]
|
||||
}
|
||||
|
||||
go [] = Nothing
|
||||
go l = Just $ \sockaddr -> any (match sockaddr) (map addrAddress l)
|
||||
|
||||
match (SockAddrInet _ a) (SockAddrInet _ b) = a == b
|
||||
match (SockAddrInet6 _ _ a _) (SockAddrInet6 _ _ b _) = a == b
|
||||
match _ _ = False
|
155
Utility/Url.hs
155
Utility/Url.hs
|
@ -15,6 +15,10 @@ module Utility.Url (
|
|||
managerSettings,
|
||||
URLString,
|
||||
UserAgent,
|
||||
Scheme,
|
||||
mkScheme,
|
||||
allowedScheme,
|
||||
UrlDownloader(..),
|
||||
UrlOptions(..),
|
||||
defUrlOptions,
|
||||
mkUrlOptions,
|
||||
|
@ -34,6 +38,7 @@ module Utility.Url (
|
|||
|
||||
import Common
|
||||
import Utility.Metered
|
||||
import Utility.HttpManagerRestricted
|
||||
|
||||
import Network.URI
|
||||
import Network.HTTP.Types
|
||||
|
@ -41,9 +46,10 @@ import qualified Data.CaseInsensitive as CI
|
|||
import qualified Data.ByteString as B
|
||||
import qualified Data.ByteString.UTF8 as B8
|
||||
import qualified Data.ByteString.Lazy as L
|
||||
import qualified Data.Set as S
|
||||
import Control.Monad.Trans.Resource
|
||||
import Network.HTTP.Conduit
|
||||
import Network.HTTP.Client (brRead, withResponse)
|
||||
import Network.HTTP.Client
|
||||
import Data.Conduit
|
||||
|
||||
#if ! MIN_VERSION_http_client(0,5,0)
|
||||
|
@ -65,12 +71,22 @@ type Headers = [String]
|
|||
|
||||
type UserAgent = String
|
||||
|
||||
newtype Scheme = Scheme (CI.CI String)
|
||||
deriving (Eq, Ord)
|
||||
|
||||
mkScheme :: String -> Scheme
|
||||
mkScheme = Scheme . CI.mk
|
||||
|
||||
fromScheme :: Scheme -> String
|
||||
fromScheme (Scheme s) = CI.original s
|
||||
|
||||
data UrlOptions = UrlOptions
|
||||
{ userAgent :: Maybe UserAgent
|
||||
, reqHeaders :: Headers
|
||||
, urlDownloader :: UrlDownloader
|
||||
, applyRequest :: Request -> Request
|
||||
, httpManager :: Manager
|
||||
, allowedSchemes :: S.Set Scheme
|
||||
}
|
||||
|
||||
data UrlDownloader
|
||||
|
@ -84,20 +100,12 @@ defUrlOptions = UrlOptions
|
|||
<*> pure DownloadWithConduit
|
||||
<*> pure id
|
||||
<*> newManager managerSettings
|
||||
<*> pure (S.fromList $ map mkScheme ["http", "https", "ftp"])
|
||||
|
||||
mkUrlOptions :: Maybe UserAgent -> Headers -> [CommandParam] -> Manager -> UrlOptions
|
||||
mkUrlOptions defuseragent reqheaders reqparams manager =
|
||||
mkUrlOptions :: Maybe UserAgent -> Headers -> UrlDownloader -> Manager -> S.Set Scheme -> UrlOptions
|
||||
mkUrlOptions defuseragent reqheaders urldownloader manager =
|
||||
UrlOptions useragent reqheaders urldownloader applyrequest manager
|
||||
where
|
||||
urldownloader = if null reqparams
|
||||
#if MIN_VERSION_cryptonite(0,6,0)
|
||||
then DownloadWithConduit
|
||||
#else
|
||||
-- Work around for old cryptonite bug that broke tls.
|
||||
-- https://github.com/vincenthz/hs-tls/issues/109
|
||||
then DownloadWithCurl reqparams
|
||||
#endif
|
||||
else DownloadWithCurl reqparams
|
||||
applyrequest = \r -> r { requestHeaders = requestHeaders r ++ addedheaders }
|
||||
addedheaders = uaheader ++ otherheaders
|
||||
useragent = maybe defuseragent (Just . B8.toString . snd)
|
||||
|
@ -115,7 +123,7 @@ mkUrlOptions defuseragent reqheaders reqparams manager =
|
|||
_ -> (h', B8.fromString v)
|
||||
|
||||
curlParams :: UrlOptions -> [CommandParam] -> [CommandParam]
|
||||
curlParams uo ps = ps ++ uaparams ++ headerparams ++ addedparams
|
||||
curlParams uo ps = ps ++ uaparams ++ headerparams ++ addedparams ++ schemeparams
|
||||
where
|
||||
uaparams = case userAgent uo of
|
||||
Nothing -> []
|
||||
|
@ -124,6 +132,31 @@ curlParams uo ps = ps ++ uaparams ++ headerparams ++ addedparams
|
|||
addedparams = case urlDownloader uo of
|
||||
DownloadWithConduit -> []
|
||||
DownloadWithCurl l -> l
|
||||
schemeparams =
|
||||
[ Param "--proto"
|
||||
, Param $ intercalate "," ("-all" : schemelist)
|
||||
]
|
||||
schemelist = map fromScheme $ S.toList $ allowedSchemes uo
|
||||
|
||||
checkPolicy :: UrlOptions -> URI -> a -> IO a -> IO a
|
||||
checkPolicy uo u onerr a
|
||||
| allowedScheme uo u = a
|
||||
| otherwise = do
|
||||
hPutStrLn stderr $
|
||||
"Configuration does not allow accessing " ++ show u
|
||||
hFlush stderr
|
||||
return onerr
|
||||
|
||||
unsupportedUrlScheme :: URI -> IO ()
|
||||
unsupportedUrlScheme u = do
|
||||
hPutStrLn stderr $
|
||||
"Unsupported url scheme" ++ show u
|
||||
hFlush stderr
|
||||
|
||||
allowedScheme :: UrlOptions -> URI -> Bool
|
||||
allowedScheme uo u = uscheme `S.member` allowedSchemes uo
|
||||
where
|
||||
uscheme = mkScheme $ takeWhile (/=':') (uriScheme u)
|
||||
|
||||
{- Checks that an url exists and could be successfully downloaded,
|
||||
- also checking that its size, if available, matches a specified size. -}
|
||||
|
@ -158,32 +191,26 @@ assumeUrlExists = UrlInfo True Nothing Nothing
|
|||
- also returning its size and suggested filename if available. -}
|
||||
getUrlInfo :: URLString -> UrlOptions -> IO UrlInfo
|
||||
getUrlInfo url uo = case parseURIRelaxed url of
|
||||
Just u -> case (urlDownloader uo, parseUrlConduit (show u)) of
|
||||
(DownloadWithConduit, Just req) -> catchJust
|
||||
-- When http redirects to a protocol which
|
||||
-- conduit does not support, it will throw
|
||||
-- a StatusCodeException with found302.
|
||||
(matchStatusCodeException (== found302))
|
||||
(existsconduit req)
|
||||
(const (existscurl u))
|
||||
`catchNonAsync` (const dne)
|
||||
-- http-conduit does not support file:, ftp:, etc urls,
|
||||
-- so fall back to reading files and using curl.
|
||||
_
|
||||
| uriScheme u == "file:" -> do
|
||||
let f = unEscapeString (uriPath u)
|
||||
s <- catchMaybeIO $ getFileStatus f
|
||||
case s of
|
||||
Just stat -> do
|
||||
sz <- getFileSize' f stat
|
||||
found (Just sz) Nothing
|
||||
Nothing -> dne
|
||||
| otherwise -> existscurl u
|
||||
Nothing -> dne
|
||||
Just u -> checkPolicy uo u dne $
|
||||
case (urlDownloader uo, parseUrlConduit (show u)) of
|
||||
(DownloadWithConduit, Just req) ->
|
||||
existsconduit req
|
||||
`catchNonAsync` (const $ return dne)
|
||||
(DownloadWithConduit, Nothing)
|
||||
| isfileurl u -> existsfile u
|
||||
| otherwise -> do
|
||||
unsupportedUrlScheme u
|
||||
return dne
|
||||
(DownloadWithCurl _, _)
|
||||
| isfileurl u -> existsfile u
|
||||
| otherwise -> existscurl u
|
||||
Nothing -> return dne
|
||||
where
|
||||
dne = return $ UrlInfo False Nothing Nothing
|
||||
dne = UrlInfo False Nothing Nothing
|
||||
found sz f = return $ UrlInfo True sz f
|
||||
|
||||
isfileurl u = uriScheme u == "file:"
|
||||
|
||||
curlparams = curlParams uo $
|
||||
[ Param "-s"
|
||||
, Param "--head"
|
||||
|
@ -213,7 +240,7 @@ getUrlInfo url uo = case parseURIRelaxed url of
|
|||
then found
|
||||
(extractlen resp)
|
||||
(extractfilename resp)
|
||||
else dne
|
||||
else return dne
|
||||
|
||||
existscurl u = do
|
||||
output <- catchDefaultIO "" $
|
||||
|
@ -230,7 +257,16 @@ getUrlInfo url uo = case parseURIRelaxed url of
|
|||
-- don't try to parse ftp status codes; if curl
|
||||
-- got a length, it's good
|
||||
_ | isftp && isJust len -> good
|
||||
_ -> dne
|
||||
_ -> return dne
|
||||
|
||||
existsfile u = do
|
||||
let f = unEscapeString (uriPath u)
|
||||
s <- catchMaybeIO $ getFileStatus f
|
||||
case s of
|
||||
Just stat -> do
|
||||
sz <- getFileSize' f stat
|
||||
found (Just sz) Nothing
|
||||
Nothing -> return dne
|
||||
|
||||
-- Parse eg: attachment; filename="fname.ext"
|
||||
-- per RFC 2616
|
||||
|
@ -252,10 +288,6 @@ headRequest r = r
|
|||
}
|
||||
|
||||
{- Download a perhaps large file, with auto-resume of incomplete downloads.
|
||||
-
|
||||
- By default, conduit is used for the download, except for file: urls,
|
||||
- which are copied. If the url scheme is not supported by conduit, falls
|
||||
- back to using curl.
|
||||
-
|
||||
- Displays error message on stderr when download failed.
|
||||
-}
|
||||
|
@ -265,22 +297,21 @@ download meterupdate url file uo =
|
|||
`catchNonAsync` showerr
|
||||
where
|
||||
go = case parseURIRelaxed url of
|
||||
Just u -> case (urlDownloader uo, parseUrlConduit (show u)) of
|
||||
(DownloadWithConduit, Just req) -> catchJust
|
||||
-- When http redirects to a protocol which
|
||||
-- conduit does not support, it will throw
|
||||
-- a StatusCodeException with found302.
|
||||
(matchStatusCodeException (== found302))
|
||||
(downloadconduit req)
|
||||
(const downloadcurl)
|
||||
_
|
||||
| uriScheme u == "file:" -> do
|
||||
let src = unEscapeString (uriPath u)
|
||||
withMeteredFile src meterupdate $
|
||||
L.writeFile file
|
||||
return True
|
||||
| otherwise -> downloadcurl
|
||||
Just u -> checkPolicy uo u False $
|
||||
case (urlDownloader uo, parseUrlConduit (show u)) of
|
||||
(DownloadWithConduit, Just req) ->
|
||||
downloadconduit req
|
||||
(DownloadWithConduit, Nothing)
|
||||
| isfileurl u -> downloadfile u
|
||||
| otherwise -> do
|
||||
unsupportedUrlScheme u
|
||||
return False
|
||||
(DownloadWithCurl _, _)
|
||||
| isfileurl u -> downloadfile u
|
||||
| otherwise -> downloadcurl
|
||||
Nothing -> return False
|
||||
|
||||
isfileurl u = uriScheme u == "file:"
|
||||
|
||||
downloadconduit req = catchMaybeIO (getFileSize file) >>= \case
|
||||
Nothing -> runResourceT $ do
|
||||
|
@ -333,6 +364,10 @@ download meterupdate url file uo =
|
|||
let msg = case he of
|
||||
HttpExceptionRequest _ (StatusCodeException _ msgb) ->
|
||||
B8.toString msgb
|
||||
HttpExceptionRequest _ (InternalException ie) ->
|
||||
case fromException ie of
|
||||
Nothing -> show ie
|
||||
Just (ConnectionRestricted why) -> why
|
||||
HttpExceptionRequest _ other -> show other
|
||||
_ -> show he
|
||||
#else
|
||||
|
@ -365,6 +400,12 @@ download meterupdate url file uo =
|
|||
, Param "-C", Param "-"
|
||||
]
|
||||
boolSystem "curl" (ps ++ [Param "-o", File file, File url])
|
||||
|
||||
downloadfile u = do
|
||||
let src = unEscapeString (uriPath u)
|
||||
withMeteredFile src meterupdate $
|
||||
L.writeFile file
|
||||
return True
|
||||
|
||||
{- Sinks a Response's body to a file. The file can either be opened in
|
||||
- WriteMode or AppendMode. Updates the meter as data is received.
|
||||
|
|
|
@ -53,6 +53,9 @@ contents that were checked into a repository earlier, you should avoid
|
|||
using the non-cryptographically-secure backends, and will need to use
|
||||
signed git commits. See [[tips/using_signed_git_commits]] for details.
|
||||
|
||||
Retrieval of WORM and URL from many [[special_remotes]] is prohibited
|
||||
for [[security_reasons|security/CVE-2018-10857_and_CVE-2018-10859]].
|
||||
|
||||
Note that the various 512 and 384 length hashes result in long paths,
|
||||
which are known to not work on Windows. If interoperability on Windows is a
|
||||
concern, avoid those.
|
||||
|
|
|
@ -3,3 +3,6 @@ It appears that `git annex importfeed` can not be used in with socks proxies bec
|
|||
For `addurl`, I could configure the system to use a socks proxy by setting `git config annex.web-download-command "curl --silent --preproxy socks4a://localhost:1080 %url -o %file"`; for `importfeed`, I found no option to override the command used to fetch the URL, and `wget` [lacks SOCKS support](https://savannah.gnu.org/bugs/?func=detailitem&item_id=43576).
|
||||
|
||||
Please consider using `web-download-command` for `importfeed` too, introducing a dedicated option `web-get-command` (that would output to stdout rather than %file), or otherwise supporting operation behind a SOCKS proxy.
|
||||
|
||||
> Closing this, since the http library git-annex uses supports socks
|
||||
> proxies via environment settings, as far as I know. [[done]] --[[Joey]]
|
||||
|
|
214
doc/bugs/security_hole_private_data_exposure_via_addurl.mdwn
Normal file
214
doc/bugs/security_hole_private_data_exposure_via_addurl.mdwn
Normal file
|
@ -0,0 +1,214 @@
|
|||
CVE-2018-10857
|
||||
|
||||
This is a security hole that allows exposure of
|
||||
private data in files located outside the git-annex repository.
|
||||
|
||||
The attacker needs to have control over one of the remotes of the git-annex
|
||||
repository. For example, they may provide a public git-annex repository
|
||||
that the victim clones. Or the victim may have paired repositories with
|
||||
them. Or, equivilantly, the attacker could have read access to the victim's
|
||||
git-annex repository (eg on a server somewhere), and some channel to get
|
||||
commits into it (eg a pull requests).
|
||||
|
||||
The attacker does `git-annex addurl --relaxed file:///etc/passwd` and commits
|
||||
this to the repository in some out of the way place. Then they wait for the
|
||||
victim to pull the change.
|
||||
|
||||
The easiest exploit is when the victim is running the assistant, or is
|
||||
periodically doing `git annex sync --content`. The victim may also perform
|
||||
the equivilant actions manually, not noticing they're operating on the
|
||||
file.
|
||||
|
||||
In either case, git-annex gets the content of the annexed file, following
|
||||
the file:// url. Then git-annex transfers the content back to the
|
||||
attacker's repository.
|
||||
|
||||
It may also be possible to exploit scp:// sftp:// smb:// etc urls to get at
|
||||
files on other computers that the user has access to as well as localhost.
|
||||
I was not able to get curl to download from scp:// or sftp:// on debian
|
||||
(unstable) but there may be configurations that allow it.
|
||||
|
||||
If the url is attached to a key using a cryptographic hash, then the
|
||||
attacker would need to already know at least the hash of the content
|
||||
to exploit this.
|
||||
|
||||
Sending that content back to them could be considered not a security
|
||||
hole. Except, I can guess what content some file on your system might have,
|
||||
and use this attack as an oracle to determine if I guessed right, and work
|
||||
toward penetrating your system using that information.
|
||||
|
||||
So, best to not treat addurl with a hash any differently than
|
||||
--relaxed and --fast when addressing this hole.
|
||||
|
||||
----
|
||||
|
||||
The fix must involve locking down the set of allowed in url schemes.
|
||||
Better to have a whitelist than a blacklist. http and https seems like the
|
||||
right default.
|
||||
|
||||
Some users do rely on file:// urls, and this is fine in some use cases,
|
||||
eg when you're not merging changes from anyone else.
|
||||
|
||||
So this probably needs to be a git config of allowed url schemes,
|
||||
with an appropriatly scary name, like `annex.security.allowed-url-schemes`.
|
||||
|
||||
Redirects from one url scheme to another could be usd to bypass such a
|
||||
whitelist. curl's `--proto` also affects redirects. http-conduit
|
||||
is limited to only http and will probably remain that way.
|
||||
|
||||
> done in [[!commit 28720c795ff57a55b48e56d15f9b6bcb977f48d9]] --[[Joey]]
|
||||
|
||||
----
|
||||
|
||||
The same kind of attack can be used to see the content of
|
||||
localhost urls and other non-globally-available urls.
|
||||
|
||||
Redirects and DNS rebinding attacks mean that checking the IP address
|
||||
of the hostname in the url is not good enough. It needs to check the IP
|
||||
address that is actually connected to, for each http connection made,
|
||||
including redirects.
|
||||
|
||||
There will need to be a config to relax checks, like
|
||||
with an appropriatly scary name, like
|
||||
`annex.security.allowed-http-addresses`. Users will want to enable
|
||||
support for specific subnets, or perhaps allow all addresses.
|
||||
|
||||
When git-annex is configured to use curl, there seems to be no way
|
||||
to prevent curl from following urls that redirect to localhost, other than
|
||||
disabling redirections. And unless git-annex also pre-resolves the IP
|
||||
address and rewrites it into the url passed to curl, DNS rebinding attacks
|
||||
would still be possible. Also, one of the remaining reasons people enable
|
||||
curl is to use a netrc file with passwords, and the content of
|
||||
urls on those sites could also be exposed by this attack. So, it seems curl
|
||||
should not be enabled by default and should have a big security warning if
|
||||
it's supported at all. Probably ought to only enable it
|
||||
when `annex.security.allowed-http-addresses=all`
|
||||
|
||||
http-client does not have hooks that can be used to find out what IP
|
||||
address it's connecting to in a secure enough way.
|
||||
See <https://github.com/snoyberg/http-client/issues/354>
|
||||
|
||||
Seems that building my own http Manager is the only way to go. By building
|
||||
my own, I can do the IP address checks inside it when it's setting up
|
||||
connections. And, the same manager can be passed to the S3 and WebDav libraries.
|
||||
(The url scheme checks could also be moved into that Manager, to prevent
|
||||
S3 redirect to file: url scenarios..)
|
||||
|
||||
> restricted http manager done and used in
|
||||
> [[!commit b54b2cdc0ef1373fc200c0d28fded3c04fd57212]];
|
||||
> curl also disabled by default
|
||||
|
||||
http proxies are another problem. They could be on the local network,
|
||||
or even on localhost, and http-client does not provide a way to force
|
||||
a http proxy to connect to an particular IP address (is that even possible?)
|
||||
May have to disable use of http proxies unless
|
||||
`annex.security.allowed-http-addresses=all`
|
||||
Or better, find what http proxy is enabled and prevent using it if it's on
|
||||
an IP not allowed there.
|
||||
|
||||
> done in [[!commit cc08135e659d3ca9ea157246433d8aa90de3baf7]]
|
||||
|
||||
----
|
||||
|
||||
The external special remote interface is another way to exploit this.
|
||||
Since it bypasses git-annex's usual url download code, whatever fixes are
|
||||
put in place there won't help external special remotes.
|
||||
|
||||
External special remotes that use GETURLS, typically in conjunction with
|
||||
CLAIMURL and CHECKURL, and then themselves download the content of urls
|
||||
in response to a TRANSFER RETRIEVE will have the same problems as
|
||||
git-annex's url downloading.
|
||||
|
||||
An external special remote might also make a simple http request to a
|
||||
key/value API to download a key, and follow a redirect to file:///
|
||||
or http://localhost/.
|
||||
|
||||
If the key uses a cryptographic hash, git-annex verifies the content.
|
||||
But, the attacker could have committed a key that doesn't
|
||||
use a hash. Also, the attacker could use the hash check as an oracle,
|
||||
to guess at the content of files.
|
||||
|
||||
If the external special remote is encrypted, the http content is passed
|
||||
though gpg. So, whatever location an attacker redirects it to would also
|
||||
have to be encrypted. gpg is not told what encryption key the content is
|
||||
expected to be encrypted with. (Could it be told somehow for hybrid and
|
||||
shared encryption which key to use? pubkey encryption of course uses
|
||||
the regular gpg public key).
|
||||
|
||||
So if the attacker knows a file that the user has encrypted with any of
|
||||
their gpg keys, they can provide that file, and hope it will be decrypted.
|
||||
Note that this does not need a redirect to a local file or web server; the
|
||||
attacker can make their web server serve up a gpg encrypted file.
|
||||
This is a separate vulnerability and was assigned CVE-2018-10859.
|
||||
|
||||
So, content downloaded from encrypted special remotes (both internal and
|
||||
external) must be rejected unless it can be verified with a hash. Then
|
||||
content using WORM and URL keys would not be able to be downloaded from
|
||||
them. Might as well also require a hash check for non-encrypted external
|
||||
special remotes, to block the redirection attack. There could be a config
|
||||
setting to say that the git-annex repository is not being shared with
|
||||
untrusted third parties, and relax that check.
|
||||
|
||||
> done in [[!commit b657242f5d946efae4cc77e8aef95dd2a306cd6b]]
|
||||
|
||||
Could also tighten down the gpg decryption to only allow decrypting with
|
||||
the provided symmetric key, as a further protection against CVE-2018-10859.
|
||||
If this can be done, then only remotes with encryption=pubkey will
|
||||
really need to reject WORM and URL keys, since encryption=shared
|
||||
and encryption=hybrid use a symetric key that's only used to encrypt data
|
||||
for that remote. Although opening those back up to WORM and URL would
|
||||
allow the remote sending other content stored on it, to get the wrong
|
||||
content decrypted. This seems unlikely to be a useful exploit in most
|
||||
cases, but perhaps not all cases, so probably best to not relax the
|
||||
rejection aven when doing this. It's still worth doing as a belt and braces
|
||||
fix.
|
||||
|
||||
> AFAICS, gpg does not have a way to specify to decrypt with only a
|
||||
> symmetric encryption key. It could be done by running gpg in an
|
||||
> environment with an empty keyring, but gpg agent makes that difficult and
|
||||
> it would be added complexity. Decided not to do it.
|
||||
|
||||
----
|
||||
|
||||
Built-in special remotes that use protocols on top of http, eg S3 and WebDAV,
|
||||
don't use Utility.Url, could also be exploited, and will need to be fixed
|
||||
separately.
|
||||
|
||||
> not affected for url schemes, because http-client only supports http,
|
||||
> not file:/
|
||||
|
||||
> done for localhost/lan in [[!commit b54b2cdc0ef1373fc200c0d28fded3c04fd57212]]
|
||||
|
||||
youtube-dl
|
||||
|
||||
> already disabled file:/. Added a scheme check, but it won't block
|
||||
> redirects to other schemes. But youtube-dl goes off and does its own thing with other
|
||||
> protocols anyway, so that's fine.
|
||||
>
|
||||
> The youtube-dl generic extractor will download media files (including
|
||||
> videos and photos) if passed an direct url to them. It does not seem to
|
||||
> extract video etc from tags on html pages.
|
||||
|
||||
> git-annex first checks if a web page
|
||||
> is html before pointing youtube-dl at it, to avoid using it to download
|
||||
> direct urls to media files. But that would not prevent a DNS rebinding
|
||||
> attack which made git-annex see the html page, and youtube-dl then see
|
||||
> a media file on localhost.
|
||||
>
|
||||
> Also, the current code in htmlOnly
|
||||
> runs youtube-dl if the html page download test fails to download
|
||||
> anything.
|
||||
>
|
||||
> Also, in the course of a download, youtube-dl could chain to other urls,
|
||||
> depending on how its extractor works. Those urls could redirect to
|
||||
> a localhost/lan web server.
|
||||
>
|
||||
> So, youtube-dl needs to be disabled unless http address security checks
|
||||
> are turned off.
|
||||
>
|
||||
> > done in [[!commit e62c4543c31a61186ebf2e4e0412df59fc8630c8]]
|
||||
|
||||
|
||||
----
|
||||
|
||||
Both security holes are now fixed. [[done]] --[[Joey]]
|
27
doc/devblog/day_499__security_hole.mdwn
Normal file
27
doc/devblog/day_499__security_hole.mdwn
Normal file
|
@ -0,0 +1,27 @@
|
|||
I'm writing this on a private branch, it won't be posted until a week from
|
||||
now when the security hole is disclosed.
|
||||
|
||||
Security is not compositional. You can have one good feature, and add
|
||||
another good feature, and the result is not two good features, but a new
|
||||
security hole. In this case
|
||||
[[bugs/security_hole_private_data_exposure_via_addurl]] (CVE-2018-10857).
|
||||
And it can be hard to spot this kind of security hole, but then once it's
|
||||
known it seems blindly obvious.
|
||||
|
||||
It came to me last night and by this morning I had decided the potential
|
||||
impact was large enough to do a coordinated disclosure. Spent the first
|
||||
half of the day thinking through ways to fix it that don't involve writing
|
||||
my own http library. Then started getting in touch with all the
|
||||
distributions' security teams. And then coded up a fairly complete fix for
|
||||
the worst part of the security hole, although a secondary part is going to
|
||||
need considerably more work.
|
||||
|
||||
It looks like the external special remotes are going to need at least some
|
||||
security review too, and I'm still thinking that part of the problem over.
|
||||
|
||||
Exhausted.
|
||||
|
||||
Today's work was sponsored by Trenton Cronholm
|
||||
[on Patreon](https://patreon.com/joeyh).
|
||||
|
||||
[[!meta date="Jun 15 2018 7:00 pm"]]
|
26
doc/devblog/day_500__security_hole_part_2.mdwn
Normal file
26
doc/devblog/day_500__security_hole_part_2.mdwn
Normal file
|
@ -0,0 +1,26 @@
|
|||
Most of the day was spent staring at the http-client source code and trying
|
||||
to find a way to add the IP address checks to it that I need to fully close
|
||||
the security hole.
|
||||
|
||||
In the end, I did find a way, with the duplication of a couple dozen lines
|
||||
of code from http-client. It will let the security fix be used with
|
||||
libraries like aws and DAV that build on top of http-client, too.
|
||||
|
||||
While the code is in git-annex for now, it's fully disconnected and
|
||||
would also be useful if a web browser were implemented in Haskell,
|
||||
to implement same-origin restrictions while avoiding DNS rebinding attacks.
|
||||
|
||||
Looks like http proxies and curl will need to be disabled by default,
|
||||
since this fix can't support either of them securely. I wonder how web
|
||||
browsers deal with http proxies, DNS rebinding attacks and same-origin?
|
||||
I can't think of a secure way.
|
||||
|
||||
Next I need a function that checks if an IP address is a link-local address
|
||||
or a private network address. For both ipv4 and ipv6. Could not find
|
||||
anything handy on hackage, so I'm gonna have to stare at some RFCs. Perhaps
|
||||
this evening, for now, it's time to swim in the river.
|
||||
|
||||
Today's work was sponsored by Jake Vosloo
|
||||
[on Patreon](https://patreon.com/joeyh)
|
||||
|
||||
[[!meta date="June 16 2018 4:00 pm"]]
|
13
doc/devblog/day_501__security_hole_part_3.mdwn
Normal file
13
doc/devblog/day_501__security_hole_part_3.mdwn
Normal file
|
@ -0,0 +1,13 @@
|
|||
Got the IP address restrictions for http implemented. (Except for http
|
||||
proxies.)
|
||||
|
||||
Unforunately as part of this, had to make youtube-dl and curl not be used
|
||||
by default. The annex.security.allowed-http-addresses config has to be
|
||||
opened up by the user in order to use those external commands, since they
|
||||
can follow arbitrary redirects.
|
||||
|
||||
Also thought some more about how external special remotes might be
|
||||
affected, and sent their authors' a heads-up.
|
||||
|
||||
[[!meta date="June 17 2018 4:00 pm"]]
|
||||
|
24
doc/devblog/day_502__security_hole_part_4.mdwn
Normal file
24
doc/devblog/day_502__security_hole_part_4.mdwn
Normal file
|
@ -0,0 +1,24 @@
|
|||
Spent several hours dealing with the problem of http proxies, which
|
||||
bypassed the IP address checks added to prevent the security hole.
|
||||
Eventually got it filtering out http proxies located on private IP
|
||||
addresses.
|
||||
|
||||
Other than the question of what to do about external special remotes
|
||||
that may be vulerable to related problems, it looks like the security
|
||||
hole is all closed off in git-annex now.
|
||||
|
||||
Added a new page [[security]] with details of this and past security holes
|
||||
in git-annex.
|
||||
|
||||
Several people I reached out to for help with special remotes have gotten
|
||||
back to me, and we're discussing how the security hole may affect them and
|
||||
what to do. Thanks especially to Robie Basak and Daniel Dent for their
|
||||
work on security analysis.
|
||||
|
||||
Also prepared a minimal backport of the security fixes for the git-annex in
|
||||
Debian stable, which will probably be more palatable to their security team
|
||||
than the full 2000+ lines of patches I've developed so far.
|
||||
The minimal fix is secure, but suboptimal; it prevents even safe urls from
|
||||
being downloaded from the web special remote by default.
|
||||
|
||||
[[!meta date="June 18 2018 4:00 pm"]]
|
19
doc/devblog/day_503__security_hole_part_5.mdwn
Normal file
19
doc/devblog/day_503__security_hole_part_5.mdwn
Normal file
|
@ -0,0 +1,19 @@
|
|||
Started testing that the security fix will build everywhere on
|
||||
release day. This is being particularly painful for the android build,
|
||||
which has very old libraries and needed http-client updated, with many
|
||||
follow-on changes, and is not successfully building yet after 5 hours.
|
||||
I really need to finish deprecating the android build.
|
||||
|
||||
Pretty exhausted from all this, and thinking what to do about
|
||||
external special remotes, I elaborated on an idea that Daniel Dent had
|
||||
raised in discussions about vulnerability, and realized that git-annex
|
||||
has a second, worse vulnerability. This new one could be used to trick a
|
||||
git-annex user into decrypting gpg encrypted data that they had
|
||||
never stored in git-annex. The attacker needs to have control of both an
|
||||
encrypted special remote and a git remote, so it's not an easy exploit to
|
||||
pull off, but it's still super bad.
|
||||
|
||||
This week is going to be a lot longer than I thought, and it's already
|
||||
feeling kind of endless..
|
||||
|
||||
[[!meta date="June 19 2018 8:00 pm"]]
|
10
doc/devblog/day_504__security_hole_part_6.mdwn
Normal file
10
doc/devblog/day_504__security_hole_part_6.mdwn
Normal file
|
@ -0,0 +1,10 @@
|
|||
Was getting dangerously close to burnt out, or exhaustion leading to
|
||||
mistakes, so yesterday I took the day off, aside from spending the morning
|
||||
babysitting the android build every half hour. (It did finally succeed.)
|
||||
|
||||
Today, got back into it, and implemented a fix for CVE-2018-10859 and also
|
||||
the one case of CVE-2018-10857 that had not been dealt with before.
|
||||
This fix was really a lot easier than the previous fixes for
|
||||
CVE-2018-10857.
|
||||
Unfortunately this did mean not letting URL and WORM keys be downloaded
|
||||
from many special remotes by default, which is going to be painful for some.
|
|
@ -10,9 +10,12 @@ git annex addurl `[url ...]`
|
|||
|
||||
Downloads each url to its own file, which is added to the annex.
|
||||
|
||||
When `youtube-dl` is installed, it's used to check for a video embedded in
|
||||
a web page at the url, and that is added to the annex instead.
|
||||
|
||||
When `youtube-dl` is installed, it can be used to check for a video
|
||||
embedded in a web page at the url, and that is added to the annex instead.
|
||||
(However, this is disabled by default as it can be a security risk.
|
||||
See the documentation of annex.security.allowed-http-addresses
|
||||
in [[git-annex]](1) for details.)
|
||||
|
||||
Urls to torrent files (including magnet links) will cause the content of
|
||||
the torrent to be downloaded, using `aria2c`.
|
||||
|
||||
|
|
|
@ -13,8 +13,11 @@ content has not already been added to the repository before, so you can
|
|||
delete, rename, etc the resulting files and repeated runs won't duplicate
|
||||
them.
|
||||
|
||||
When `youtube-dl` is installed, it's used to download links in the feed.
|
||||
When `youtube-dl` is installed, it can be used to download links in the feed.
|
||||
This allows importing e.g., YouTube playlists.
|
||||
(However, this is disabled by default as it can be a security risk.
|
||||
See the documentation of annex.security.allowed-http-addresses
|
||||
in [[git-annex]](1) for details.)
|
||||
|
||||
To make the import process add metadata to the imported files from the feed,
|
||||
`git config annex.genmetadata true`
|
||||
|
|
|
@ -1228,7 +1228,7 @@ Here are all the supported configuration settings.
|
|||
|
||||
Note that even when this is set to `false`, git-annex does verification
|
||||
in some edge cases, where it's likely the case than an
|
||||
object was downloaded incorrectly.
|
||||
object was downloaded incorrectly, or when needed for security.
|
||||
|
||||
* `remote.<name>.annex-export-tracking`
|
||||
|
||||
|
@ -1356,12 +1356,16 @@ Here are all the supported configuration settings.
|
|||
|
||||
* `annex.web-options`
|
||||
|
||||
Setting this makes git-annex use curl to download urls
|
||||
Options to pass to curl when git-annex uses it to download urls
|
||||
(rather than the default built-in url downloader).
|
||||
|
||||
For example, to force IPv4 only, set it to "-4".
|
||||
Or to make curl use your ~/.netrc file, set it to "--netrc".
|
||||
|
||||
Setting this option makes git-annex use curl, but only
|
||||
when annex.security.allowed-http-addresses is configured in a
|
||||
specific way. See its documentation.
|
||||
|
||||
* `annex.youtube-dl-options`
|
||||
|
||||
Options to pass to youtube-dl when using it to find the url to download
|
||||
|
@ -1387,12 +1391,76 @@ Here are all the supported configuration settings.
|
|||
If set, the command is run and each line of its output is used as a HTTP
|
||||
header. This overrides annex.http-headers.
|
||||
|
||||
* `annex.web-download-command`
|
||||
* `annex.security.allowed-url-schemes`
|
||||
|
||||
Use to specify a command to run to download a file from the web.
|
||||
List of URL schemes that git-annex is allowed to download content from.
|
||||
The default is "http https ftp".
|
||||
|
||||
In the command line, %url is replaced with the url to download,
|
||||
and %file is replaced with the file that it should be saved to.
|
||||
Think very carefully before changing this; there are security
|
||||
implications. For example, if it's changed to allow "file" URLs, then
|
||||
anyone who can get a commit into your git-annex repository could
|
||||
`git-annex addurl` a pointer to a private file located outside that
|
||||
repository, possibly causing it to be copied into your repository
|
||||
and transferred on to other remotes, exposing its content.
|
||||
|
||||
Some special remotes support their own domain-specific URL
|
||||
schemes; those are not affected by this configuration setting.
|
||||
|
||||
* `annex.security.allowed-http-addresses`
|
||||
|
||||
By default, git-annex only makes HTTP connections to public IP addresses;
|
||||
it will refuse to use HTTP servers on localhost or on a private network.
|
||||
|
||||
This setting can override that behavior, allowing access to particular
|
||||
IP addresses. For example "127.0.0.1 ::1" allows access to localhost
|
||||
(both IPV4 and IPV6). To allow access to all IP addresses, use "all"
|
||||
|
||||
Think very carefully before changing this; there are security
|
||||
implications. Anyone who can get a commit into your git-annex repository
|
||||
could `git annex addurl` an url on a private http server, possibly
|
||||
causing it to be downloaded into your repository and transferred to
|
||||
other remotes, exposing its content.
|
||||
|
||||
Note that, since the interfaces of curl and youtube-dl do not allow
|
||||
these IP address restrictions to be enforced, curl and youtube-dl will
|
||||
never be used unless annex.security.allowed-http-addresses=all.
|
||||
|
||||
* `annex.security.allow-unverified-downloads`,
|
||||
|
||||
For security reasons, git-annex refuses to download content from
|
||||
most special remotes when it cannot check a hash to verify
|
||||
that the correct content was downloaded. This particularly impacts
|
||||
downloading the content of URL or WORM keys, which lack hashes.
|
||||
|
||||
The best way to avoid problems due to this is to migrate files
|
||||
away from such keys, before their content reaches a special remote.
|
||||
See [[git-annex-migrate]](1).
|
||||
|
||||
When the content is only available from a special remote, you can
|
||||
use this configuration to force git-annex to download it.
|
||||
But you do so at your own risk, and it's very important you read and
|
||||
understand the information below first!
|
||||
|
||||
Downloading unverified content from encrypted special remotes is
|
||||
prevented, because the special remote could send some other encrypted
|
||||
content than what you expect, causing git-annex to decrypt data that you
|
||||
never checked into git-annex, and risking exposing the decrypted
|
||||
data to any non-encrypted remotes you send content to.
|
||||
|
||||
Downloading unverified content from (non-encrypted)
|
||||
external special remotes is prevented, because they could follow
|
||||
http redirects to web servers on localhost or on a private network,
|
||||
or in some cases to a file:/// url.
|
||||
|
||||
If you decide to bypass this security check, the best thing to do is
|
||||
to only set it temporarily while running the command that gets the file.
|
||||
The value to set the config to is "ACKTHPPT".
|
||||
For example:
|
||||
|
||||
git -c annex.security.allow-unverified-downloads=ACKTHPPT annex get myfile
|
||||
|
||||
It would be a good idea to check that it downloaded the file you expected,
|
||||
too.
|
||||
|
||||
* `annex.secure-erase-command`
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
[[!if test="news/*" then="""
|
||||
This is where announcements of new releases, features, and other news is
|
||||
posted. git-annex users are recommended to subscribe to this page's RSS
|
||||
feed.
|
||||
feed. Also, see [[security]] for security announcements.
|
||||
|
||||
[[!inline pages="./news/* and !./news/*/* and !*/Discussion" rootpage="news" show="30"]]
|
||||
|
||||
|
|
4
doc/security.mdwn
Normal file
4
doc/security.mdwn
Normal file
|
@ -0,0 +1,4 @@
|
|||
Information about all known security holes in git-annex, and their fixes.
|
||||
Subscribe to the RSS feed to be kept up to date.
|
||||
|
||||
[[!inline pages="./security/* and !./security/*/* and !*/Discussion" rootpage="security" show="0"]]
|
10
doc/security/CVE-2014-6274.mdwn
Normal file
10
doc/security/CVE-2014-6274.mdwn
Normal file
|
@ -0,0 +1,10 @@
|
|||
CVE-2014-6274: Security fix for S3 and glacier when using embedcreds=yes with
|
||||
encryption=pubkey or encryption=hybrid.
|
||||
|
||||
The creds embedded in the git repo were *not* encrypted.
|
||||
git-annex enableremote will warn when used on a remote that has
|
||||
this problem. For details, see [[upgrades/insecure_embedded_creds]].
|
||||
|
||||
Fixed in git-annex 5.20140919.
|
||||
|
||||
[[!meta date="Fri, 19 Sep 2014 12:53:42 -0400"]]
|
10
doc/security/CVE-2017-12976.mdwn
Normal file
10
doc/security/CVE-2017-12976.mdwn
Normal file
|
@ -0,0 +1,10 @@
|
|||
CVE-2017-12976: A hostname starting with a dash would get passed to ssh and be treated as
|
||||
an option. This could be used by an attacker who provides a crafted
|
||||
repository url to cause the victim to execute arbitrary code via
|
||||
`-oProxyCommand`.
|
||||
|
||||
Fixed in git-annex 6.20170818
|
||||
|
||||
This is related to a git security hole, [CVE-2017-1000117](https://marc.info/?l=git&m=150238802328673&w=2).
|
||||
|
||||
[[!meta date="Fri, 18 Aug 2017 11:19:06 -0400"]]
|
92
doc/security/CVE-2018-10857_and_CVE-2018-10859.mdwn
Normal file
92
doc/security/CVE-2018-10857_and_CVE-2018-10859.mdwn
Normal file
|
@ -0,0 +1,92 @@
|
|||
CVE-2018-10857: Some uses of git-annex were vulnerable to a private data
|
||||
exposure and exfiltration attack. It could expose the content of files
|
||||
located outside the git-annex repository, or content from a private web
|
||||
server on localhost or the LAN. Joey Hess discovered this attack.
|
||||
|
||||
CVE-2018-10859: A malicious server for a special remote could
|
||||
trick git-annex into decrypting a file that was encrypted to the user's gpg
|
||||
key. This attack could be used to expose encrypted data that was never
|
||||
stored in git-annex. Daniel Dent discovered this attack in collaboration
|
||||
with Joey Hess.
|
||||
|
||||
These security holes were fixed in git-annex 6.20180626. After upgrading
|
||||
git-annex, you should restart any git-annex assistant processes.
|
||||
|
||||
Also, some external special remotes (plugins) were improved, as a second
|
||||
line of defense against CVE-2018-10857:
|
||||
|
||||
* git-annex-remote-pcloud 0.0.2 (thanks to Tocho Tochev)
|
||||
|
||||
## attack descriptions
|
||||
|
||||
To perform these attacks, the attacker needs to have control over one of
|
||||
the remotes of the victim's git-annex repository. For example, they may
|
||||
provide a public git-annex repository that the victim clones. Or,
|
||||
equivilantly, the attacker could have read access to the victim's git-annex
|
||||
repository or a repository it pushes to, and some channel to get commits
|
||||
into it (eg pull requests).
|
||||
|
||||
These exploits are most likely to succeed when the victim is running the
|
||||
git-annex assistant, or is periodically running `git annex sync --content`.
|
||||
|
||||
To perform the private data exfiltration attack (CVE-2018-10857), the
|
||||
attacker runs `git-annex addurl --relaxed file:///etc/passwd` and commits
|
||||
this to the repository in some out of the way place. After the victim's
|
||||
git repository receives that change, git-annex follows the attacker-provided
|
||||
url to private data, which it stores in the git-annex repository.
|
||||
From there it transfers the content to the git-annex repository that
|
||||
the attacker has access to.
|
||||
|
||||
(As well as `file:///` urls, the attacker can use urls to private web
|
||||
servers. The url can also be one that the attacker controls, that redirects
|
||||
to such urls.)
|
||||
|
||||
To perform the gpg decryption attack (CVE-2018-10859), the attacker
|
||||
additionally needs to have control of the server hosting an encrypted
|
||||
special remote used by the victim's git-annex repository. The attacker uses
|
||||
`git annex addurl --relaxed` with an innocuous url, and waits for the
|
||||
user's git-annex to download it, and upload an (encrypted) copy to the
|
||||
special remote they also control. At some later point, when the user
|
||||
downloads the content from the special remote, the attacker instead sends
|
||||
them the content of a gpg encrypted file that they wish to have decrypted
|
||||
in its place. Finally, the attacker drops their own copy of the original
|
||||
innocuous url, and waits for git-annex to send them the accidentially
|
||||
decrypted file.
|
||||
|
||||
## git-annex security fixes
|
||||
|
||||
CVE-2018-10857 was fixed by making git-annex refuse to follow `file:///` urls
|
||||
and urls pointing to private/local IP addresses by default. Two new
|
||||
configuration settings, annex.security.allowed-url-schemes and
|
||||
annex.security.allowed-http-addresses, can relax this security policy,
|
||||
and are intended for cases where the git-annex repository is kept
|
||||
private and so the attack does not apply.
|
||||
|
||||
CVE-2018-10859 was fixed by making git-annex refuse to download encrypted
|
||||
content from special remotes, unless it knows the hash of the expected
|
||||
content. When the attacker provides some other gpg encrypted content, it
|
||||
will fail the hash check and be discarded.
|
||||
|
||||
External special remotes (plugins) that use HTTP/HTTPS could also be
|
||||
attacked using the CVE-2018-10857 method, if the attacker additionally has
|
||||
control of the server they connect to. To prevent such attacks,
|
||||
git-annex refuses to download content from external special remotes unless
|
||||
it can verify the hash of that content.
|
||||
|
||||
## impact on external special remotes
|
||||
|
||||
One variant of CVE-2018-10857 can exploit a vulnerable external special
|
||||
remote, and could not be prevented by git-annex. (git-annex's own built-in
|
||||
special remotes are not vulnerable to this attack.)
|
||||
|
||||
In this attack variant, the attacker guesses at the hash of a file stored
|
||||
on the victim's private web server, and adds it to the git-annex
|
||||
repository. The attacker also has control of the server hosting an
|
||||
encrypted special remote used by the victim's git-annex repository. They
|
||||
cause that server to redirect to the victim's web server. This allows the
|
||||
attacker to verify if the victim's web server contains a file that the
|
||||
attacker already knows the content of, assuming they can guess the URL to
|
||||
it.
|
||||
|
||||
Developers of external special remotes are encouraged to prevent this
|
||||
attack by not following such HTTP redirects.
|
|
@ -0,0 +1,12 @@
|
|||
A bug exposed the checksum of annexed files to encrypted
|
||||
special remotes, which are not supposed to have access to the checksum of
|
||||
the un-encrypted file. This only occurred when resuming uploads to the
|
||||
encrypted special remote, so it is considered a low-severity security hole.
|
||||
|
||||
For details, see [[!commit b890f3a53d936b5e40aa9acc5876cb98f18b9657]]
|
||||
|
||||
No CVE was assigned for this issue.
|
||||
|
||||
Fixed in git-annex 6.20160419
|
||||
|
||||
[[!meta date="Thu, 28 Apr 2016 09:31:14 -0400"]]
|
|
@ -123,7 +123,7 @@ while read line; do
|
|||
url="$2"
|
||||
# List contents of torrent.
|
||||
tmp=$(mktemp)
|
||||
if ! runcmd curl -o "$tmp" "$url"; then
|
||||
if ! runcmd curl --proto -all,http,https -o "$tmp" "$url"; then
|
||||
echo CHECKURL-FAILURE
|
||||
else
|
||||
oldIFS="$IFS"
|
||||
|
@ -166,7 +166,7 @@ while read line; do
|
|||
echo TRANSFER-FAILURE RETRIEVE "$key" "no known torrent urls for this key"
|
||||
else
|
||||
tmp=$(mktemp)
|
||||
if ! runcmd curl -o "$tmp" "$url"; then
|
||||
if ! runcmd curl --proto -all,http,https -o "$tmp" "$url"; then
|
||||
echo TRANSFER-FAILURE RETRIEVE "$key" "failed downloading torrent file from $url"
|
||||
else
|
||||
filenum="$(echo "$url" | sed 's/(.*#\(\d*\)/\1/')"
|
||||
|
|
|
@ -6,6 +6,6 @@ See [[tips/using_the_web_as_a_special_remote]] for usage examples.
|
|||
Currently git-annex only supports downloading content from the web;
|
||||
it cannot upload to it or remove content.
|
||||
|
||||
This special remote uses arbitrary urls on the web as the source for content.
|
||||
This special remote uses urls on the web as the source for content.
|
||||
git-annex can also download content from a normal git remote, accessible by
|
||||
http.
|
||||
|
|
|
@ -5,4 +5,7 @@
|
|||
date="2013-08-17T08:59:11Z"
|
||||
content="""
|
||||
When it says \"arbitrary urls\", it means it. The only requirement is that the url be well formed and that wget or whatever command you have it configured to use via annex.web-download-command knows how to download it.
|
||||
|
||||
Update 2018: That used to be the case, but it's now limited by default to
|
||||
http and https urls.
|
||||
"""]]
|
||||
|
|
|
@ -11,12 +11,6 @@ The first step is to install the Firefox plugin
|
|||
[FlashGot](http://flashgot.net/). We will use it to provide the Firefox
|
||||
shortcuts to add things to our annex.
|
||||
|
||||
We also need a normal download manager, if we want to get status updates as
|
||||
the download is done. We'll need to configure git-annex to use it by
|
||||
setting `annex.web-download-command` as Joey describes in his comment on
|
||||
[[todo/wishlist: allow configuration of downloader for addurl]]. See the
|
||||
manpage [[git-annex]] for more information on setting configuration.
|
||||
|
||||
Once we have installed all that, we need a script that has an interface
|
||||
which FlashGot can treat as a downloader, but which calls git-annex to do
|
||||
the actual downloading. Such a script is available from
|
||||
|
|
|
@ -1,8 +0,0 @@
|
|||
[[!comment format=mdwn
|
||||
username="http://joeyh.name/"
|
||||
nickname="joey"
|
||||
subject="comment 1"
|
||||
date="2013-04-11T20:16:02Z"
|
||||
content="""
|
||||
As of my last commit, you don't really need a separate download manager. The webapp will now display urls that `git annex addurl` is downloading in among the other transfers.
|
||||
"""]]
|
|
@ -84,6 +84,10 @@ manually. For a channel url like
|
|||
"https://www.youtube.com/channel/$foo", the
|
||||
feed is "https://www.youtube.com/feeds/videos.xml?channel_id=$foo"
|
||||
|
||||
Use of youtube-dl is disabled by default as it can be a security risk.
|
||||
See the documentation of annex.security.allowed-http-addresses
|
||||
in [[git-annex]] for details.)
|
||||
|
||||
## metadata
|
||||
|
||||
As well as storing the urls for items imported from a feed, git-annex can
|
||||
|
|
|
@ -78,6 +78,10 @@ When you have youtube-dl installed, you can just
|
|||
`git annex addurl http://youtube.com/foo` and it will detect that
|
||||
it is a video and download the video content for offline viewing.
|
||||
|
||||
(However, this is disabled by default as it can be a security risk.
|
||||
See the documentation of annex.security.allowed-http-addresses
|
||||
in [[git-annex]] for details.)
|
||||
|
||||
Later, in another clone of the repository, you can run `git annex get` on
|
||||
the file and it will also be downloaded with youtube-dl. This works
|
||||
even if the video host has transcoded or otherwise changed the video
|
||||
|
|
|
@ -22,3 +22,10 @@ what about the other settings, is it okay to hardcode those?
|
|||
|
||||
maybe this would be easier if there would be an options override just
|
||||
like rsync, but separate ones for curl and wget... --[[anarcat]]
|
||||
|
||||
> git-annex now only uses curl, and defaults to a built-in http downloader.
|
||||
> The annex.web-download-command is no longer supported. annex.web-options
|
||||
> can be used to pass options to curl.
|
||||
>
|
||||
> So, I don't think this todo is relevant anymore, closing [[done]].
|
||||
> --[[Joey]]
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
Name: git-annex
|
||||
Version: 6.20180529
|
||||
Version: 6.20180626
|
||||
Cabal-Version: >= 1.8
|
||||
License: GPL-3
|
||||
Maintainer: Joey Hess <id@joeyh.name>
|
||||
|
@ -340,7 +340,9 @@ Executable git-annex
|
|||
bloomfilter,
|
||||
edit-distance,
|
||||
resourcet,
|
||||
http-client,
|
||||
connection (>= 0.2.6),
|
||||
http-client (>= 0.4.31),
|
||||
http-client-tls,
|
||||
http-types (>= 0.7),
|
||||
http-conduit (>= 2.0),
|
||||
conduit,
|
||||
|
@ -1032,9 +1034,11 @@ Executable git-annex
|
|||
Utility.Gpg
|
||||
Utility.Hash
|
||||
Utility.HtmlDetect
|
||||
Utility.HttpManagerRestricted
|
||||
Utility.HumanNumber
|
||||
Utility.HumanTime
|
||||
Utility.InodeCache
|
||||
Utility.IPAddress
|
||||
Utility.LinuxMkLibs
|
||||
Utility.LockFile
|
||||
Utility.LockFile.LockStatus
|
||||
|
|
|
@ -1,4 +1,11 @@
|
|||
constraints: unix installed,
|
||||
blaze-html ==0.8.1.3,
|
||||
blaze-markup ==0.7.0.3,
|
||||
basement ==0.0.7,
|
||||
memory ==0.14.9,
|
||||
connection ==0.2.8,
|
||||
aws ==0.13.0,
|
||||
lifted-base ==0.2.3.6,
|
||||
Crypto ==4.2.5.1,
|
||||
binary ==0.7.6.1,
|
||||
DAV ==1.0.3,
|
||||
|
@ -6,16 +13,16 @@ constraints: unix installed,
|
|||
HUnit ==1.2.5.2,
|
||||
IfElse ==0.85,
|
||||
MissingH ==1.2.1.0,
|
||||
directory ==1.2.2.0,
|
||||
directory ==1.2.2.0,
|
||||
MonadRandom ==0.1.13,
|
||||
QuickCheck ==2.7.6,
|
||||
SafeSemaphore ==0.10.1,
|
||||
aeson ==0.7.0.6,
|
||||
ansi-wl-pprint ==0.6.7.1,
|
||||
appar ==0.1.4,
|
||||
asn1-encoding ==0.8.1.3,
|
||||
asn1-parse ==0.8.1,
|
||||
asn1-types ==0.2.3,
|
||||
asn1-encoding ==0.9.5,
|
||||
asn1-parse ==0.9.4,
|
||||
asn1-types ==0.3.2,
|
||||
async ==2.0.1.5,
|
||||
attoparsec ==0.11.3.4,
|
||||
attoparsec-conduit ==1.1.0,
|
||||
|
@ -36,7 +43,6 @@ constraints: unix installed,
|
|||
comonad ==4.2,
|
||||
conduit ==1.1.6,
|
||||
conduit-extra ==1.1.3,
|
||||
connection ==0.2.3,
|
||||
contravariant ==0.6.1.1,
|
||||
cookie ==0.4.1.2,
|
||||
cprng-aes ==0.5.2,
|
||||
|
@ -44,7 +50,7 @@ constraints: unix installed,
|
|||
crypto-cipher-types ==0.0.9,
|
||||
crypto-numbers ==0.2.3,
|
||||
crypto-pubkey ==0.2.4,
|
||||
crypto-pubkey-types ==0.4.2.2,
|
||||
crypto-pubkey-types ==0.4.2.3,
|
||||
crypto-random ==0.0.7,
|
||||
cryptohash ==0.11.6,
|
||||
cryptohash-conduit ==0.1.1,
|
||||
|
@ -80,19 +86,19 @@ constraints: unix installed,
|
|||
hashable ==1.2.1.0,
|
||||
hinotify ==0.3.5,
|
||||
hjsmin ==0.1.4.7,
|
||||
hslogger ==1.2.1,
|
||||
http-client ==0.4.11.1,
|
||||
http-client-tls ==0.2.2,
|
||||
http-conduit ==2.1.5,
|
||||
http-date ==0.0.2,
|
||||
http-types ==0.8.5,
|
||||
blaze-builder ==0.3.3.2,
|
||||
hslogger ==1.2.10,
|
||||
http-client ==0.4.31.1,
|
||||
http-client-tls ==0.2.4.1,
|
||||
http-conduit ==2.1.9,
|
||||
http-date ==0.0.6.1,
|
||||
http-types ==0.9.1,
|
||||
blaze-builder ==0.3.3.2,
|
||||
hxt ==9.3.1.4,
|
||||
hxt-charproperties ==9.1.1.1,
|
||||
hxt-regex-xmlschema ==9.0.4,
|
||||
hxt-unicode ==9.0.2.2,
|
||||
idna ==0.2,
|
||||
iproute ==1.2.11,
|
||||
iproute ==1.3.1,
|
||||
json ==0.5,
|
||||
keys ==3.10.1,
|
||||
language-javascript ==0.5.13,
|
||||
|
@ -107,7 +113,7 @@ constraints: unix installed,
|
|||
monads-tf ==0.1.0.2,
|
||||
mtl ==2.1.2,
|
||||
nats ==0.1.2,
|
||||
network ==2.4.1.2,
|
||||
network ==2.6.3.1
|
||||
network-conduit ==1.1.0,
|
||||
network-info ==0.2.0.5,
|
||||
network-multicast ==0.0.10,
|
||||
|
@ -141,7 +147,7 @@ constraints: unix installed,
|
|||
silently ==1.2.4.1,
|
||||
simple-sendfile ==0.2.14,
|
||||
skein ==1.0.9,
|
||||
socks ==0.5.4,
|
||||
socks ==0.5.5,
|
||||
split ==0.2.2,
|
||||
stm ==2.4.2,
|
||||
stm-chans ==3.0.0.2,
|
||||
|
@ -157,7 +163,7 @@ constraints: unix installed,
|
|||
text ==1.1.1.0,
|
||||
text-icu ==0.6.3.7,
|
||||
tf-random ==0.5,
|
||||
tls ==1.2.9,
|
||||
tls ==1.3.8,
|
||||
transformers ==0.3.0.0,
|
||||
transformers-base ==0.4.1,
|
||||
transformers-compat ==0.3.3.3,
|
||||
|
@ -174,13 +180,13 @@ constraints: unix installed,
|
|||
wai-app-static ==3.0.0.1,
|
||||
wai-extra ==3.0.1.2,
|
||||
wai-logger ==2.1.1,
|
||||
warp ==3.0.0.5,
|
||||
warp-tls ==3.0.0,
|
||||
warp ==3.0.8,
|
||||
warp-tls ==3.0.4.2,
|
||||
word8 ==0.1.1,
|
||||
x509 ==1.4.11,
|
||||
x509-store ==1.4.4,
|
||||
x509-system ==1.4.5,
|
||||
x509-validation ==1.5.0,
|
||||
x509 ==1.6.5,
|
||||
x509-store ==1.6.3,
|
||||
x509-system ==1.6.6,
|
||||
x509-validation ==1.6.8,
|
||||
xml ==1.3.13,
|
||||
xml-conduit ==1.2.1,
|
||||
xml-hamlet ==0.4.0.9,
|
||||
|
@ -197,6 +203,7 @@ constraints: unix installed,
|
|||
yesod-static ==1.2.4,
|
||||
zlib ==0.5.4.1,
|
||||
bytestring installed,
|
||||
process ==1.2.3.0,
|
||||
scientific ==0.3.3.1,
|
||||
clock ==0.4.6.0
|
||||
cryptonite ==cryptonite-0.16
|
||||
clock ==0.4.6.0,
|
||||
cryptonite ==0.15
|
||||
|
|
|
@ -0,0 +1,41 @@
|
|||
From cc0c373b69f93057cbdcb634a461e10ec019d87a Mon Sep 17 00:00:00 2001
|
||||
From: androidbuilder <androidbuilder@example.com>
|
||||
Date: Wed, 20 Jun 2018 00:29:11 +0100
|
||||
Subject: [PATCH] fix build on android
|
||||
|
||||
---
|
||||
Basement/Terminal.hs | 2 --
|
||||
basement.cabal | 1 -
|
||||
2 files changed, 3 deletions(-)
|
||||
|
||||
diff --git a/Basement/Terminal.hs b/Basement/Terminal.hs
|
||||
index 8136e52..cca9606 100644
|
||||
--- a/Basement/Terminal.hs
|
||||
+++ b/Basement/Terminal.hs
|
||||
@@ -1,11 +1,9 @@
|
||||
{-# LANGUAGE CPP #-}
|
||||
module Basement.Terminal
|
||||
( initialize
|
||||
- , getDimensions
|
||||
) where
|
||||
|
||||
import Basement.Compat.Base
|
||||
-import Basement.Terminal.Size (getDimensions)
|
||||
#ifdef mingw32_HOST_OS
|
||||
import System.IO (hSetEncoding, utf8, hPutStrLn, stderr, stdin, stdout)
|
||||
import System.Win32.Console (setConsoleCP, setConsoleOutputCP, getConsoleCP, getConsoleOutputCP)
|
||||
diff --git a/basement.cabal b/basement.cabal
|
||||
index af50291..0824c94 100644
|
||||
--- a/basement.cabal
|
||||
+++ b/basement.cabal
|
||||
@@ -135,7 +135,6 @@ library
|
||||
Basement.String.Encoding.ASCII7
|
||||
Basement.String.Encoding.ISO_8859_1
|
||||
|
||||
- Basement.Terminal.Size
|
||||
|
||||
|
||||
build-depends: base >= 4.7 && < 5
|
||||
--
|
||||
2.1.4
|
||||
|
|
@ -1,157 +0,0 @@
|
|||
From 7861b133bb269b50fcf709291449cb0473818902 Mon Sep 17 00:00:00 2001
|
||||
From: Joey Hess <joey@kitenet.net>
|
||||
Date: Sun, 29 Dec 2013 21:29:23 +0000
|
||||
Subject: [PATCH] remove Network.BSD symbols not available in bionic
|
||||
|
||||
---
|
||||
Network/BSD.hsc | 98 -------------------------------------------------------
|
||||
1 file changed, 98 deletions(-)
|
||||
|
||||
diff --git a/Network/BSD.hsc b/Network/BSD.hsc
|
||||
index d6dae85..27910f4 100644
|
||||
--- a/Network/BSD.hsc
|
||||
+++ b/Network/BSD.hsc
|
||||
@@ -30,15 +30,6 @@ module Network.BSD
|
||||
, getHostByAddr
|
||||
, hostAddress
|
||||
|
||||
-#if defined(HAVE_GETHOSTENT) && !defined(cygwin32_HOST_OS) && !defined(mingw32_HOST_OS) && !defined(_WIN32)
|
||||
- , getHostEntries
|
||||
-
|
||||
- -- ** Low level functionality
|
||||
- , setHostEntry
|
||||
- , getHostEntry
|
||||
- , endHostEntry
|
||||
-#endif
|
||||
-
|
||||
-- * Service names
|
||||
, ServiceEntry(..)
|
||||
, ServiceName
|
||||
@@ -64,14 +55,6 @@ module Network.BSD
|
||||
, getProtocolNumber
|
||||
, defaultProtocol
|
||||
|
||||
-#if !defined(cygwin32_HOST_OS) && !defined(mingw32_HOST_OS) && !defined(_WIN32)
|
||||
- , getProtocolEntries
|
||||
- -- ** Low level functionality
|
||||
- , setProtocolEntry
|
||||
- , getProtocolEntry
|
||||
- , endProtocolEntry
|
||||
-#endif
|
||||
-
|
||||
-- * Port numbers
|
||||
, PortNumber
|
||||
|
||||
@@ -83,11 +66,7 @@ module Network.BSD
|
||||
#if !defined(cygwin32_HOST_OS) && !defined(mingw32_HOST_OS) && !defined(_WIN32)
|
||||
, getNetworkByName
|
||||
, getNetworkByAddr
|
||||
- , getNetworkEntries
|
||||
-- ** Low level functionality
|
||||
- , setNetworkEntry
|
||||
- , getNetworkEntry
|
||||
- , endNetworkEntry
|
||||
#endif
|
||||
) where
|
||||
|
||||
@@ -303,31 +282,6 @@ getProtocolNumber proto = do
|
||||
(ProtocolEntry _ _ num) <- getProtocolByName proto
|
||||
return num
|
||||
|
||||
-#if !defined(cygwin32_HOST_OS) && !defined(mingw32_HOST_OS) && !defined(_WIN32)
|
||||
-getProtocolEntry :: IO ProtocolEntry -- Next Protocol Entry from DB
|
||||
-getProtocolEntry = withLock $ do
|
||||
- ent <- throwNoSuchThingIfNull "getProtocolEntry" "no such protocol entry"
|
||||
- $ trySysCall c_getprotoent
|
||||
- peek ent
|
||||
-
|
||||
-foreign import ccall unsafe "getprotoent" c_getprotoent :: IO (Ptr ProtocolEntry)
|
||||
-
|
||||
-setProtocolEntry :: Bool -> IO () -- Keep DB Open ?
|
||||
-setProtocolEntry flg = withLock $ trySysCall $ c_setprotoent (fromBool flg)
|
||||
-
|
||||
-foreign import ccall unsafe "setprotoent" c_setprotoent :: CInt -> IO ()
|
||||
-
|
||||
-endProtocolEntry :: IO ()
|
||||
-endProtocolEntry = withLock $ trySysCall $ c_endprotoent
|
||||
-
|
||||
-foreign import ccall unsafe "endprotoent" c_endprotoent :: IO ()
|
||||
-
|
||||
-getProtocolEntries :: Bool -> IO [ProtocolEntry]
|
||||
-getProtocolEntries stayOpen = withLock $ do
|
||||
- setProtocolEntry stayOpen
|
||||
- getEntries (getProtocolEntry) (endProtocolEntry)
|
||||
-#endif
|
||||
-
|
||||
-- ---------------------------------------------------------------------------
|
||||
-- Host lookups
|
||||
|
||||
@@ -402,31 +356,6 @@ getHostByAddr family addr = do
|
||||
foreign import CALLCONV safe "gethostbyaddr"
|
||||
c_gethostbyaddr :: Ptr HostAddress -> CInt -> CInt -> IO (Ptr HostEntry)
|
||||
|
||||
-#if defined(HAVE_GETHOSTENT) && !defined(cygwin32_HOST_OS) && !defined(mingw32_HOST_OS) && !defined(_WIN32)
|
||||
-getHostEntry :: IO HostEntry
|
||||
-getHostEntry = withLock $ do
|
||||
- throwNoSuchThingIfNull "getHostEntry" "unable to retrieve host entry"
|
||||
- $ trySysCall $ c_gethostent
|
||||
- >>= peek
|
||||
-
|
||||
-foreign import ccall unsafe "gethostent" c_gethostent :: IO (Ptr HostEntry)
|
||||
-
|
||||
-setHostEntry :: Bool -> IO ()
|
||||
-setHostEntry flg = withLock $ trySysCall $ c_sethostent (fromBool flg)
|
||||
-
|
||||
-foreign import ccall unsafe "sethostent" c_sethostent :: CInt -> IO ()
|
||||
-
|
||||
-endHostEntry :: IO ()
|
||||
-endHostEntry = withLock $ c_endhostent
|
||||
-
|
||||
-foreign import ccall unsafe "endhostent" c_endhostent :: IO ()
|
||||
-
|
||||
-getHostEntries :: Bool -> IO [HostEntry]
|
||||
-getHostEntries stayOpen = do
|
||||
- setHostEntry stayOpen
|
||||
- getEntries (getHostEntry) (endHostEntry)
|
||||
-#endif
|
||||
-
|
||||
-- ---------------------------------------------------------------------------
|
||||
-- Accessing network information
|
||||
|
||||
@@ -488,33 +417,6 @@ getNetworkByAddr addr family = withLock $ do
|
||||
foreign import ccall unsafe "getnetbyaddr"
|
||||
c_getnetbyaddr :: NetworkAddr -> CInt -> IO (Ptr NetworkEntry)
|
||||
|
||||
-getNetworkEntry :: IO NetworkEntry
|
||||
-getNetworkEntry = withLock $ do
|
||||
- throwNoSuchThingIfNull "getNetworkEntry" "no more network entries"
|
||||
- $ trySysCall $ c_getnetent
|
||||
- >>= peek
|
||||
-
|
||||
-foreign import ccall unsafe "getnetent" c_getnetent :: IO (Ptr NetworkEntry)
|
||||
-
|
||||
--- | Open the network name database. The parameter specifies
|
||||
--- whether a connection is maintained open between various
|
||||
--- networkEntry calls
|
||||
-setNetworkEntry :: Bool -> IO ()
|
||||
-setNetworkEntry flg = withLock $ trySysCall $ c_setnetent (fromBool flg)
|
||||
-
|
||||
-foreign import ccall unsafe "setnetent" c_setnetent :: CInt -> IO ()
|
||||
-
|
||||
--- | Close the connection to the network name database.
|
||||
-endNetworkEntry :: IO ()
|
||||
-endNetworkEntry = withLock $ trySysCall $ c_endnetent
|
||||
-
|
||||
-foreign import ccall unsafe "endnetent" c_endnetent :: IO ()
|
||||
-
|
||||
--- | Get the list of network entries.
|
||||
-getNetworkEntries :: Bool -> IO [NetworkEntry]
|
||||
-getNetworkEntries stayOpen = do
|
||||
- setNetworkEntry stayOpen
|
||||
- getEntries (getNetworkEntry) (endNetworkEntry)
|
||||
#endif
|
||||
|
||||
-- Mutex for name service lockdown
|
||||
--
|
||||
1.7.10.4
|
||||
|
|
@ -1,34 +0,0 @@
|
|||
From 478fc7ae42030c1345e75727e54e1f8f895d3e22 Mon Sep 17 00:00:00 2001
|
||||
From: dummy <dummy@example.com>
|
||||
Date: Wed, 15 Oct 2014 15:16:21 +0000
|
||||
Subject: [PATCH] avoid accept4
|
||||
|
||||
---
|
||||
Network/Socket.hsc | 4 ++--
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/Network/Socket.hsc b/Network/Socket.hsc
|
||||
index 2fe62ee..94db7a4 100644
|
||||
--- a/Network/Socket.hsc
|
||||
+++ b/Network/Socket.hsc
|
||||
@@ -511,7 +511,7 @@ accept sock@(MkSocket s family stype protocol status) = do
|
||||
#else
|
||||
with (fromIntegral sz) $ \ ptr_len -> do
|
||||
new_sock <-
|
||||
-# ifdef HAVE_ACCEPT4
|
||||
+#if 0
|
||||
throwSocketErrorIfMinus1RetryMayBlock "accept"
|
||||
(threadWaitRead (fromIntegral s))
|
||||
(c_accept4 s sockaddr ptr_len (#const SOCK_NONBLOCK))
|
||||
@@ -1602,7 +1602,7 @@ foreign import CALLCONV SAFE_ON_WIN "connect"
|
||||
c_connect :: CInt -> Ptr SockAddr -> CInt{-CSockLen???-} -> IO CInt
|
||||
foreign import CALLCONV unsafe "accept"
|
||||
c_accept :: CInt -> Ptr SockAddr -> Ptr CInt{-CSockLen???-} -> IO CInt
|
||||
-#ifdef HAVE_ACCEPT4
|
||||
+#if 0
|
||||
foreign import CALLCONV unsafe "accept4"
|
||||
c_accept4 :: CInt -> Ptr SockAddr -> Ptr CInt{-CSockLen???-} -> CInt -> IO CInt
|
||||
#endif
|
||||
--
|
||||
2.1.1
|
||||
|
|
@ -1,28 +0,0 @@
|
|||
From b1a581007759e2d9e53ef776e4f10d1de87b8377 Mon Sep 17 00:00:00 2001
|
||||
From: Joey Hess <joey@kitenet.net>
|
||||
Date: Tue, 7 May 2013 14:51:09 -0400
|
||||
Subject: [PATCH] getprotobyname hack for tcp and udp
|
||||
|
||||
Otherwise, core network stuff fails to get the numbers for these protocols.
|
||||
---
|
||||
Network/BSD.hsc | 4 ++++
|
||||
1 file changed, 4 insertions(+)
|
||||
|
||||
diff --git a/Network/BSD.hsc b/Network/BSD.hsc
|
||||
index f0c9f5b..a289143 100644
|
||||
--- a/Network/BSD.hsc
|
||||
+++ b/Network/BSD.hsc
|
||||
@@ -259,6 +259,10 @@ instance Storable ProtocolEntry where
|
||||
poke _p = error "Storable.poke(BSD.ProtocolEntry) not implemented"
|
||||
|
||||
getProtocolByName :: ProtocolName -> IO ProtocolEntry
|
||||
+getProtocolByName "tcp" = return $
|
||||
+ ProtocolEntry {protoName = "tcp", protoAliases = ["TCP"], protoNumber = 6}
|
||||
+getProtocolByName "udp" = return $
|
||||
+ ProtocolEntry {protoName = "udp", protoAliases = ["UDP"], protoNumber = 17}
|
||||
getProtocolByName name = withLock $ do
|
||||
withCString name $ \ name_cstr -> do
|
||||
throwNoSuchThingIfNull "getProtocolByName" ("no such protocol name: " ++ name)
|
||||
--
|
||||
1.8.2.rc3
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
From bfecbc7bd09cbbebdef12aa525dc17109326db3f Mon Sep 17 00:00:00 2001
|
||||
From: Joey Hess <joey@kitenet.net>
|
||||
Date: Sun, 29 Dec 2013 21:31:07 +0000
|
||||
Subject: [PATCH] no NODELAY on android
|
||||
|
||||
---
|
||||
Network/Socket.hsc | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/Network/Socket.hsc b/Network/Socket.hsc
|
||||
index 6c21209..c360889 100644
|
||||
--- a/Network/Socket.hsc
|
||||
+++ b/Network/Socket.hsc
|
||||
@@ -923,7 +923,7 @@ packSocketOption so =
|
||||
Just MaxSegment -> Just ((#const IPPROTO_TCP), (#const TCP_MAXSEG))
|
||||
#endif
|
||||
#ifdef TCP_NODELAY
|
||||
- Just NoDelay -> Just ((#const IPPROTO_TCP), (#const TCP_NODELAY))
|
||||
+ Just NoDelay -> Nothing -- Just ((#const IPPROTO_TCP), (#const TCP_NODELAY))
|
||||
#endif
|
||||
#ifdef TCP_CORK
|
||||
Just Cork -> Just ((#const IPPROTO_TCP), (#const TCP_CORK))
|
||||
--
|
||||
1.7.10.4
|
||||
|
|
@ -1,161 +0,0 @@
|
|||
From b3cb294077b627892721a2ebf9e0ce81f35f8c4c Mon Sep 17 00:00:00 2001
|
||||
From: dummy <dummy@example.com>
|
||||
Date: Sun, 25 May 2014 09:28:45 +0200
|
||||
Subject: [PATCH] android port fixes
|
||||
|
||||
Build note: Ensure a hsc2hs in PATH is modified to pass -x to the real
|
||||
one, to enable cross-compiling.
|
||||
---
|
||||
Network/Socket.hsc | 22 ++++++----------------
|
||||
Network/Socket/ByteString.hsc | 2 +-
|
||||
Network/Socket/Internal.hsc | 2 +-
|
||||
Network/Socket/Types.hsc | 4 ++--
|
||||
cbits/HsNet.c | 14 ++++++++++++++
|
||||
configure | 1 +
|
||||
6 files changed, 25 insertions(+), 20 deletions(-)
|
||||
|
||||
diff --git a/Network/Socket.hsc b/Network/Socket.hsc
|
||||
index 607b270..04a83e8 100644
|
||||
--- a/Network/Socket.hsc
|
||||
+++ b/Network/Socket.hsc
|
||||
@@ -35,7 +35,7 @@ module Network.Socket
|
||||
, SockAddr(..)
|
||||
, SocketStatus(..)
|
||||
, HostAddress
|
||||
-#if defined(IPV6_SOCKET_SUPPORT)
|
||||
+#if defined(IPV6_SOCKET_SUPPORTNO)
|
||||
, HostAddress6
|
||||
, FlowInfo
|
||||
, ScopeID
|
||||
@@ -52,7 +52,7 @@ module Network.Socket
|
||||
, HostName
|
||||
, ServiceName
|
||||
|
||||
-#if defined(IPV6_SOCKET_SUPPORT)
|
||||
+#if defined(IPV6_SOCKET_SUPPORT) || 1
|
||||
, AddrInfo(..)
|
||||
|
||||
, AddrInfoFlag(..)
|
||||
@@ -134,7 +134,7 @@ module Network.Socket
|
||||
-- * Special constants
|
||||
, aNY_PORT
|
||||
, iNADDR_ANY
|
||||
-#if defined(IPV6_SOCKET_SUPPORT)
|
||||
+#if defined(IPV6_SOCKET_SUPPORTNO)
|
||||
, iN6ADDR_ANY
|
||||
#endif
|
||||
, sOMAXCONN
|
||||
@@ -326,16 +326,6 @@ socket family stype protocol = do
|
||||
setNonBlockIfNeeded fd
|
||||
socket_status <- newMVar NotConnected
|
||||
let sock = MkSocket fd family stype protocol socket_status
|
||||
-#if HAVE_DECL_IPV6_V6ONLY
|
||||
-# if defined(mingw32_HOST_OS)
|
||||
- -- the IPv6Only option is only supported on Windows Vista and later,
|
||||
- -- so trying to change it might throw an error
|
||||
- when (family == AF_INET6) $
|
||||
- E.catch (setSocketOption sock IPv6Only 0) $ (\(_ :: E.IOException) -> return ())
|
||||
-# else
|
||||
- when (family == AF_INET6) $ setSocketOption sock IPv6Only 0
|
||||
-# endif
|
||||
-#endif
|
||||
return sock
|
||||
|
||||
-- | Build a pair of connected socket objects using the given address
|
||||
@@ -1061,9 +1051,9 @@ aNY_PORT = 0
|
||||
iNADDR_ANY :: HostAddress
|
||||
iNADDR_ANY = htonl (#const INADDR_ANY)
|
||||
|
||||
-foreign import CALLCONV unsafe "htonl" htonl :: Word32 -> Word32
|
||||
+foreign import CALLCONV unsafe "my_htonl" htonl :: Word32 -> Word32
|
||||
|
||||
-#if defined(IPV6_SOCKET_SUPPORT)
|
||||
+#if defined(IPV6_SOCKET_SUPPORTNO)
|
||||
-- | The IPv6 wild card address.
|
||||
|
||||
iN6ADDR_ANY :: HostAddress6
|
||||
@@ -1241,7 +1231,7 @@ unpackBits ((k,v):xs) r
|
||||
-----------------------------------------------------------------------------
|
||||
-- Address and service lookups
|
||||
|
||||
-#if defined(IPV6_SOCKET_SUPPORT)
|
||||
+#if defined(IPV6_SOCKET_SUPPORT) || 1
|
||||
|
||||
-- | Flags that control the querying behaviour of 'getAddrInfo'.
|
||||
data AddrInfoFlag
|
||||
diff --git a/Network/Socket/ByteString.hsc b/Network/Socket/ByteString.hsc
|
||||
index e21ad1b..c2dd70a 100644
|
||||
--- a/Network/Socket/ByteString.hsc
|
||||
+++ b/Network/Socket/ByteString.hsc
|
||||
@@ -197,7 +197,7 @@ sendMany sock@(MkSocket fd _ _ _ _) cs = do
|
||||
liftM fromIntegral . withIOVec cs $ \(iovsPtr, iovsLen) ->
|
||||
throwSocketErrorWaitWrite sock "writev" $
|
||||
c_writev (fromIntegral fd) iovsPtr
|
||||
- (fromIntegral (min iovsLen (#const IOV_MAX)))
|
||||
+ (fromIntegral (min iovsLen (0x0026)))
|
||||
#else
|
||||
sendMany sock = sendAll sock . B.concat
|
||||
#endif
|
||||
diff --git a/Network/Socket/Internal.hsc b/Network/Socket/Internal.hsc
|
||||
index 83333f7..0dd6a7d 100644
|
||||
--- a/Network/Socket/Internal.hsc
|
||||
+++ b/Network/Socket/Internal.hsc
|
||||
@@ -24,7 +24,7 @@ module Network.Socket.Internal
|
||||
(
|
||||
-- * Socket addresses
|
||||
HostAddress
|
||||
-#if defined(IPV6_SOCKET_SUPPORT)
|
||||
+#if defined(IPV6_SOCKET_SUPPORTNO)
|
||||
, HostAddress6
|
||||
, FlowInfo
|
||||
, ScopeID
|
||||
diff --git a/Network/Socket/Types.hsc b/Network/Socket/Types.hsc
|
||||
index 48a43bb..1c5994f 100644
|
||||
--- a/Network/Socket/Types.hsc
|
||||
+++ b/Network/Socket/Types.hsc
|
||||
@@ -711,8 +711,8 @@ intToPortNumber v = PortNum (htons (fromIntegral v))
|
||||
portNumberToInt :: PortNumber -> Int
|
||||
portNumberToInt (PortNum po) = fromIntegral (ntohs po)
|
||||
|
||||
-foreign import CALLCONV unsafe "ntohs" ntohs :: Word16 -> Word16
|
||||
-foreign import CALLCONV unsafe "htons" htons :: Word16 -> Word16
|
||||
+foreign import CALLCONV unsafe "my_ntohs" ntohs :: Word16 -> Word16
|
||||
+foreign import CALLCONV unsafe "my_htons" htons :: Word16 -> Word16
|
||||
--foreign import CALLCONV unsafe "ntohl" ntohl :: Word32 -> Word32
|
||||
|
||||
instance Enum PortNumber where
|
||||
diff --git a/cbits/HsNet.c b/cbits/HsNet.c
|
||||
index 86b55dc..5ea1199 100644
|
||||
--- a/cbits/HsNet.c
|
||||
+++ b/cbits/HsNet.c
|
||||
@@ -6,3 +6,17 @@
|
||||
|
||||
#define INLINE
|
||||
#include "HsNet.h"
|
||||
+
|
||||
+#include <sys/endian.h>
|
||||
+uint16_t my_htons(uint16_t v)
|
||||
+{
|
||||
+ htons(v);
|
||||
+}
|
||||
+uint32_t my_htonl(uint32_t v)
|
||||
+{
|
||||
+ htonl(v);
|
||||
+}
|
||||
+uint16_t my_ntohs(uint16_t v)
|
||||
+{
|
||||
+ ntohs(v);
|
||||
+}
|
||||
diff --git a/configure b/configure
|
||||
index db8240d..41674d9 100755
|
||||
--- a/configure
|
||||
+++ b/configure
|
||||
@@ -1,4 +1,5 @@
|
||||
#! /bin/sh
|
||||
+set -- --host=arm-linux-androideabi
|
||||
# Guess values for system-dependent variables and create Makefiles.
|
||||
# Generated by GNU Autoconf 2.69 for Haskell network package 2.3.0.14.
|
||||
#
|
||||
--
|
||||
2.0.0.rc2
|
||||
|
|
@ -0,0 +1,341 @@
|
|||
From 834a0d3bfe56b969a65eff834604442cde8798f7 Mon Sep 17 00:00:00 2001
|
||||
From: dummy <dummy@example.com>
|
||||
Date: Wed, 20 Jun 2018 05:06:41 +0100
|
||||
Subject: [PATCH] android port fixes
|
||||
|
||||
Build note: Ensure a hsc2hs in PATH is modified to pass -x to the real
|
||||
one, to enable cross-compiling.
|
||||
---
|
||||
Network/BSD.hsc | 84 -------------------------------------------
|
||||
Network/Socket.hsc | 16 ++++-----
|
||||
Network/Socket/ByteString.hsc | 2 +-
|
||||
Network/Socket/Internal.hsc | 2 +-
|
||||
Network/Socket/Types.hsc | 14 +++-----
|
||||
cbits/HsNet.c | 18 ++++++++++
|
||||
configure | 1 +
|
||||
include/HsNetworkConfig.h | 4 +--
|
||||
8 files changed, 36 insertions(+), 105 deletions(-)
|
||||
|
||||
diff --git a/Network/BSD.hsc b/Network/BSD.hsc
|
||||
index 67f2fcd..4c86af5 100644
|
||||
--- a/Network/BSD.hsc
|
||||
+++ b/Network/BSD.hsc
|
||||
@@ -28,12 +28,8 @@ module Network.BSD
|
||||
, hostAddress
|
||||
|
||||
#if defined(HAVE_GETHOSTENT) && !defined(mingw32_HOST_OS)
|
||||
- , getHostEntries
|
||||
-
|
||||
-- ** Low level functionality
|
||||
- , setHostEntry
|
||||
, getHostEntry
|
||||
- , endHostEntry
|
||||
#endif
|
||||
|
||||
-- * Service names
|
||||
@@ -61,14 +57,6 @@ module Network.BSD
|
||||
, getProtocolNumber
|
||||
, defaultProtocol
|
||||
|
||||
-#if !defined(mingw32_HOST_OS)
|
||||
- , getProtocolEntries
|
||||
- -- ** Low level functionality
|
||||
- , setProtocolEntry
|
||||
- , getProtocolEntry
|
||||
- , endProtocolEntry
|
||||
-#endif
|
||||
-
|
||||
-- * Port numbers
|
||||
, PortNumber
|
||||
|
||||
@@ -80,11 +68,6 @@ module Network.BSD
|
||||
#if !defined(mingw32_HOST_OS)
|
||||
, getNetworkByName
|
||||
, getNetworkByAddr
|
||||
- , getNetworkEntries
|
||||
- -- ** Low level functionality
|
||||
- , setNetworkEntry
|
||||
- , getNetworkEntry
|
||||
- , endNetworkEntry
|
||||
#endif
|
||||
|
||||
#if defined(HAVE_IF_NAMETOINDEX)
|
||||
@@ -298,30 +281,6 @@ getProtocolNumber proto = do
|
||||
(ProtocolEntry _ _ num) <- getProtocolByName proto
|
||||
return num
|
||||
|
||||
-#if !defined(mingw32_HOST_OS)
|
||||
-getProtocolEntry :: IO ProtocolEntry -- Next Protocol Entry from DB
|
||||
-getProtocolEntry = withLock $ do
|
||||
- ent <- throwNoSuchThingIfNull "getProtocolEntry" "no such protocol entry"
|
||||
- $ c_getprotoent
|
||||
- peek ent
|
||||
-
|
||||
-foreign import ccall unsafe "getprotoent" c_getprotoent :: IO (Ptr ProtocolEntry)
|
||||
-
|
||||
-setProtocolEntry :: Bool -> IO () -- Keep DB Open ?
|
||||
-setProtocolEntry flg = withLock $ c_setprotoent (fromBool flg)
|
||||
-
|
||||
-foreign import ccall unsafe "setprotoent" c_setprotoent :: CInt -> IO ()
|
||||
-
|
||||
-endProtocolEntry :: IO ()
|
||||
-endProtocolEntry = withLock $ c_endprotoent
|
||||
-
|
||||
-foreign import ccall unsafe "endprotoent" c_endprotoent :: IO ()
|
||||
-
|
||||
-getProtocolEntries :: Bool -> IO [ProtocolEntry]
|
||||
-getProtocolEntries stayOpen = withLock $ do
|
||||
- setProtocolEntry stayOpen
|
||||
- getEntries (getProtocolEntry) (endProtocolEntry)
|
||||
-#endif
|
||||
|
||||
-- ---------------------------------------------------------------------------
|
||||
-- Host lookups
|
||||
@@ -405,21 +364,6 @@ getHostEntry = withLock $ do
|
||||
>>= peek
|
||||
|
||||
foreign import ccall unsafe "gethostent" c_gethostent :: IO (Ptr HostEntry)
|
||||
-
|
||||
-setHostEntry :: Bool -> IO ()
|
||||
-setHostEntry flg = withLock $ c_sethostent (fromBool flg)
|
||||
-
|
||||
-foreign import ccall unsafe "sethostent" c_sethostent :: CInt -> IO ()
|
||||
-
|
||||
-endHostEntry :: IO ()
|
||||
-endHostEntry = withLock $ c_endhostent
|
||||
-
|
||||
-foreign import ccall unsafe "endhostent" c_endhostent :: IO ()
|
||||
-
|
||||
-getHostEntries :: Bool -> IO [HostEntry]
|
||||
-getHostEntries stayOpen = do
|
||||
- setHostEntry stayOpen
|
||||
- getEntries (getHostEntry) (endHostEntry)
|
||||
#endif
|
||||
|
||||
-- ---------------------------------------------------------------------------
|
||||
@@ -482,34 +426,6 @@ getNetworkByAddr addr family = withLock $ do
|
||||
|
||||
foreign import ccall unsafe "getnetbyaddr"
|
||||
c_getnetbyaddr :: NetworkAddr -> CInt -> IO (Ptr NetworkEntry)
|
||||
-
|
||||
-getNetworkEntry :: IO NetworkEntry
|
||||
-getNetworkEntry = withLock $ do
|
||||
- throwNoSuchThingIfNull "getNetworkEntry" "no more network entries"
|
||||
- $ c_getnetent
|
||||
- >>= peek
|
||||
-
|
||||
-foreign import ccall unsafe "getnetent" c_getnetent :: IO (Ptr NetworkEntry)
|
||||
-
|
||||
--- | Open the network name database. The parameter specifies
|
||||
--- whether a connection is maintained open between various
|
||||
--- networkEntry calls
|
||||
-setNetworkEntry :: Bool -> IO ()
|
||||
-setNetworkEntry flg = withLock $ c_setnetent (fromBool flg)
|
||||
-
|
||||
-foreign import ccall unsafe "setnetent" c_setnetent :: CInt -> IO ()
|
||||
-
|
||||
--- | Close the connection to the network name database.
|
||||
-endNetworkEntry :: IO ()
|
||||
-endNetworkEntry = withLock $ c_endnetent
|
||||
-
|
||||
-foreign import ccall unsafe "endnetent" c_endnetent :: IO ()
|
||||
-
|
||||
--- | Get the list of network entries.
|
||||
-getNetworkEntries :: Bool -> IO [NetworkEntry]
|
||||
-getNetworkEntries stayOpen = do
|
||||
- setNetworkEntry stayOpen
|
||||
- getEntries (getNetworkEntry) (endNetworkEntry)
|
||||
#endif
|
||||
|
||||
-- ---------------------------------------------------------------------------
|
||||
diff --git a/Network/Socket.hsc b/Network/Socket.hsc
|
||||
index 8b2e6fe..b02b80d 100644
|
||||
--- a/Network/Socket.hsc
|
||||
+++ b/Network/Socket.hsc
|
||||
@@ -59,7 +59,7 @@ module Network.Socket
|
||||
, HostName
|
||||
, ServiceName
|
||||
|
||||
-#if defined(IPV6_SOCKET_SUPPORT)
|
||||
+#if defined(IPV6_SOCKET_SUPPORT) || 1
|
||||
, AddrInfo(..)
|
||||
|
||||
, AddrInfoFlag(..)
|
||||
@@ -143,7 +143,7 @@ module Network.Socket
|
||||
-- * Special constants
|
||||
, aNY_PORT
|
||||
, iNADDR_ANY
|
||||
-#if defined(IPV6_SOCKET_SUPPORT)
|
||||
+#if defined(IPV6_SOCKET_SUPPORTNO)
|
||||
, iN6ADDR_ANY
|
||||
#endif
|
||||
, sOMAXCONN
|
||||
@@ -521,7 +521,7 @@ accept sock@(MkSocket s family stype protocol status) = do
|
||||
return new_sock
|
||||
#else
|
||||
with (fromIntegral sz) $ \ ptr_len -> do
|
||||
-# ifdef HAVE_ACCEPT4
|
||||
+#if 0
|
||||
new_sock <- throwSocketErrorIfMinus1RetryMayBlock "accept"
|
||||
(threadWaitRead (fromIntegral s))
|
||||
(c_accept4 s sockaddr ptr_len (#const SOCK_NONBLOCK))
|
||||
@@ -903,7 +903,7 @@ packSocketOption so =
|
||||
Just MaxSegment -> Just ((#const IPPROTO_TCP), (#const TCP_MAXSEG))
|
||||
#endif
|
||||
#ifdef TCP_NODELAY
|
||||
- Just NoDelay -> Just ((#const IPPROTO_TCP), (#const TCP_NODELAY))
|
||||
+ Just NoDelay -> Nothing -- Just ((#const IPPROTO_TCP), (#const TCP_NODELAY))
|
||||
#endif
|
||||
#ifdef TCP_USER_TIMEOUT
|
||||
Just UserTimeout -> Just ((#const IPPROTO_TCP), (#const TCP_USER_TIMEOUT))
|
||||
@@ -1036,9 +1036,9 @@ iNADDR_ANY :: HostAddress
|
||||
iNADDR_ANY = htonl (#const INADDR_ANY)
|
||||
|
||||
-- | Converts the from host byte order to network byte order.
|
||||
-foreign import CALLCONV unsafe "htonl" htonl :: Word32 -> Word32
|
||||
+foreign import CALLCONV unsafe "my_htonl" htonl :: Word32 -> Word32
|
||||
-- | Converts the from network byte order to host byte order.
|
||||
-foreign import CALLCONV unsafe "ntohl" ntohl :: Word32 -> Word32
|
||||
+foreign import CALLCONV unsafe "my_ntohl" ntohl :: Word32 -> Word32
|
||||
|
||||
#if defined(IPV6_SOCKET_SUPPORT)
|
||||
-- | The IPv6 wild card address.
|
||||
@@ -1206,7 +1206,7 @@ unpackBits ((k,v):xs) r
|
||||
-----------------------------------------------------------------------------
|
||||
-- Address and service lookups
|
||||
|
||||
-#if defined(IPV6_SOCKET_SUPPORT)
|
||||
+#if defined(IPV6_SOCKET_SUPPORT) || 1
|
||||
|
||||
-- | Flags that control the querying behaviour of 'getAddrInfo'.
|
||||
-- For more information, see <https://tools.ietf.org/html/rfc3493#page-25>
|
||||
@@ -1568,7 +1568,7 @@ foreign import CALLCONV unsafe "bind"
|
||||
c_bind :: CInt -> Ptr SockAddr -> CInt{-CSockLen???-} -> IO CInt
|
||||
foreign import CALLCONV SAFE_ON_WIN "connect"
|
||||
c_connect :: CInt -> Ptr SockAddr -> CInt{-CSockLen???-} -> IO CInt
|
||||
-#ifdef HAVE_ACCEPT4
|
||||
+#if 0
|
||||
foreign import CALLCONV unsafe "accept4"
|
||||
c_accept4 :: CInt -> Ptr SockAddr -> Ptr CInt{-CSockLen???-} -> CInt -> IO CInt
|
||||
#else
|
||||
diff --git a/Network/Socket/ByteString.hsc b/Network/Socket/ByteString.hsc
|
||||
index 93e29c9..a736932 100644
|
||||
--- a/Network/Socket/ByteString.hsc
|
||||
+++ b/Network/Socket/ByteString.hsc
|
||||
@@ -177,7 +177,7 @@ sendMany sock@(MkSocket fd _ _ _ _) cs = do
|
||||
liftM fromIntegral . withIOVec cs $ \(iovsPtr, iovsLen) ->
|
||||
throwSocketErrorWaitWrite sock "writev" $
|
||||
c_writev (fromIntegral fd) iovsPtr
|
||||
- (fromIntegral (min iovsLen (#const IOV_MAX)))
|
||||
+ (fromIntegral (min iovsLen (0x0026)))
|
||||
#else
|
||||
sendMany sock = sendAll sock . B.concat
|
||||
#endif
|
||||
diff --git a/Network/Socket/Internal.hsc b/Network/Socket/Internal.hsc
|
||||
index c8bf4f6..2463bd7 100644
|
||||
--- a/Network/Socket/Internal.hsc
|
||||
+++ b/Network/Socket/Internal.hsc
|
||||
@@ -24,7 +24,7 @@ module Network.Socket.Internal
|
||||
(
|
||||
-- * Socket addresses
|
||||
HostAddress
|
||||
-#if defined(IPV6_SOCKET_SUPPORT)
|
||||
+#if defined(IPV6_SOCKET_SUPPORTNO)
|
||||
, HostAddress6
|
||||
, FlowInfo
|
||||
, ScopeID
|
||||
diff --git a/Network/Socket/Types.hsc b/Network/Socket/Types.hsc
|
||||
index b42c98d..e5bb9fe 100644
|
||||
--- a/Network/Socket/Types.hsc
|
||||
+++ b/Network/Socket/Types.hsc
|
||||
@@ -758,10 +758,10 @@ intToPortNumber v = PortNum (htons (fromIntegral v))
|
||||
portNumberToInt :: PortNumber -> Int
|
||||
portNumberToInt (PortNum po) = fromIntegral (ntohs po)
|
||||
|
||||
-foreign import CALLCONV unsafe "ntohs" ntohs :: Word16 -> Word16
|
||||
-foreign import CALLCONV unsafe "htons" htons :: Word16 -> Word16
|
||||
-foreign import CALLCONV unsafe "ntohl" ntohl :: Word32 -> Word32
|
||||
-foreign import CALLCONV unsafe "htonl" htonl :: Word32 -> Word32
|
||||
+foreign import CALLCONV unsafe "my_ntohs" ntohs :: Word16 -> Word16
|
||||
+foreign import CALLCONV unsafe "my_htons" htons :: Word16 -> Word16
|
||||
+foreign import CALLCONV unsafe "my_ntohl" ntohl :: Word32 -> Word32
|
||||
+foreign import CALLCONV unsafe "my_htonl" htonl :: Word32 -> Word32
|
||||
|
||||
instance Enum PortNumber where
|
||||
toEnum = intToPortNumber
|
||||
@@ -1071,13 +1071,9 @@ poke32 p i0 a = do
|
||||
-- | Private newtype proxy for the Storable instance. To avoid orphan instances.
|
||||
newtype In6Addr = In6Addr HostAddress6
|
||||
|
||||
-#if __GLASGOW_HASKELL__ < 800
|
||||
-#let alignment t = "%lu", (unsigned long)offsetof(struct {char x__; t (y__); }, y__)
|
||||
-#endif
|
||||
-
|
||||
instance Storable In6Addr where
|
||||
sizeOf _ = #const sizeof(struct in6_addr)
|
||||
- alignment _ = #alignment struct in6_addr
|
||||
+ alignment _ = 64
|
||||
|
||||
peek p = do
|
||||
a <- peek32 p 0
|
||||
diff --git a/cbits/HsNet.c b/cbits/HsNet.c
|
||||
index 86b55dc..6225c32 100644
|
||||
--- a/cbits/HsNet.c
|
||||
+++ b/cbits/HsNet.c
|
||||
@@ -6,3 +6,21 @@
|
||||
|
||||
#define INLINE
|
||||
#include "HsNet.h"
|
||||
+
|
||||
+#include <sys/endian.h>
|
||||
+uint16_t my_htons(uint16_t v)
|
||||
+{
|
||||
+ htons(v);
|
||||
+}
|
||||
+uint32_t my_htonl(uint32_t v)
|
||||
+{
|
||||
+ htonl(v);
|
||||
+}
|
||||
+uint16_t my_ntohs(uint16_t v)
|
||||
+{
|
||||
+ ntohs(v);
|
||||
+}
|
||||
+uint32_t my_ntohl(uint32_t v)
|
||||
+{
|
||||
+ ntohl(v);
|
||||
+}
|
||||
diff --git a/configure b/configure
|
||||
index 9e82879..24ef3ce 100755
|
||||
--- a/configure
|
||||
+++ b/configure
|
||||
@@ -1,4 +1,5 @@
|
||||
#! /bin/sh
|
||||
+set -- --host=arm-linux-androideabi
|
||||
# Guess values for system-dependent variables and create Makefiles.
|
||||
# Generated by GNU Autoconf 2.69 for Haskell network package 2.3.0.14.
|
||||
#
|
||||
diff --git a/include/HsNetworkConfig.h b/include/HsNetworkConfig.h
|
||||
index 383f6e2..62b8852 100644
|
||||
--- a/include/HsNetworkConfig.h
|
||||
+++ b/include/HsNetworkConfig.h
|
||||
@@ -2,7 +2,7 @@
|
||||
/* include/HsNetworkConfig.h.in. Generated from configure.ac by autoheader. */
|
||||
|
||||
/* Define to 1 if you have the `accept4' function. */
|
||||
-#define HAVE_ACCEPT4 1
|
||||
+/* #undef HAVE_ACCEPT4 */
|
||||
|
||||
/* Define to 1 if you have the <arpa/inet.h> header file. */
|
||||
#define HAVE_ARPA_INET_H 1
|
||||
@@ -73,7 +73,7 @@
|
||||
#define HAVE_LIMITS_H 1
|
||||
|
||||
/* Define to 1 if you have the <linux/can.h> header file. */
|
||||
-#define HAVE_LINUX_CAN_H 1
|
||||
+/* #undef HAVE_LINUX_CAN_H */
|
||||
|
||||
/* Define to 1 if you have a Linux sendfile(2) implementation. */
|
||||
#define HAVE_LINUX_SENDFILE 1
|
||||
--
|
||||
2.1.4
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
From 2f1d2eddde94d339d91d7b018dc90542b7625fd3 Mon Sep 17 00:00:00 2001
|
||||
From: androidbuilder <androidbuilder@example.com>
|
||||
Date: Wed, 20 Jun 2018 14:41:04 +0100
|
||||
Subject: [PATCH] remove ipv6 stuff
|
||||
|
||||
---
|
||||
Network/Wai/Handler/Warp/Run.hs | 9 +--------
|
||||
1 file changed, 1 insertion(+), 8 deletions(-)
|
||||
|
||||
diff --git a/Network/Wai/Handler/Warp/Run.hs b/Network/Wai/Handler/Warp/Run.hs
|
||||
index 116b24e..5c7cbcb 100644
|
||||
--- a/Network/Wai/Handler/Warp/Run.hs
|
||||
+++ b/Network/Wai/Handler/Warp/Run.hs
|
||||
@@ -14,7 +14,7 @@ import Control.Monad (when, unless, void)
|
||||
import Data.ByteString (ByteString)
|
||||
import qualified Data.ByteString as S
|
||||
import Data.Char (chr)
|
||||
-import Data.IP (toHostAddress, toHostAddress6)
|
||||
+import Data.IP (toHostAddress)
|
||||
import Data.IORef (IORef, newIORef, readIORef, writeIORef)
|
||||
import Data.Streaming.Network (bindPortTCP)
|
||||
import Network (sClose, Socket)
|
||||
@@ -305,13 +305,6 @@ serveConnection conn ii origAddr transport settings app = do
|
||||
[a] -> Just (SockAddrInet (readInt clientPort)
|
||||
(toHostAddress a))
|
||||
_ -> Nothing
|
||||
- ["PROXY","TCP6",clientAddr,_,clientPort,_] ->
|
||||
- case [x | (x, t) <- reads (decodeAscii clientAddr), null t] of
|
||||
- [a] -> Just (SockAddrInet6 (readInt clientPort)
|
||||
- 0
|
||||
- (toHostAddress6 a)
|
||||
- 0)
|
||||
- _ -> Nothing
|
||||
("PROXY":"UNKNOWN":_) ->
|
||||
Just origAddr
|
||||
_ ->
|
||||
--
|
||||
2.1.4
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
From 717945172c2f3ff95cce9db2d075122bccfc9a1a Mon Sep 17 00:00:00 2001
|
||||
From: androidbuilder <androidbuilder@example.com>
|
||||
Date: Wed, 20 Jun 2018 02:01:30 +0100
|
||||
Subject: [PATCH] support Android cert store
|
||||
|
||||
Android has only hashsed cert files.
|
||||
See https://github.com/vincenthz/hs-certificate/issues/19
|
||||
---
|
||||
Data/X509/CertificateStore.hs | 2 +-
|
||||
2 files changed, 1 insertion(+), 1 deletion(-)
|
||||
delete mode 100644 Data/X509/.CertificateStore.hs.swp
|
||||
|
||||
diff --git a/Data/X509/CertificateStore.hs b/Data/X509/CertificateStore.hs
|
||||
index 07449a2..74b8bde 100644
|
||||
--- a/Data/X509/CertificateStore.hs
|
||||
+++ b/Data/X509/CertificateStore.hs
|
||||
@@ -106,7 +106,7 @@ listDirectoryCerts path =
|
||||
&& isDigit (s !! 9)
|
||||
&& (s !! 8) == '.'
|
||||
&& all isHexDigit (take 8 s)
|
||||
- isCert x = (not $ isPrefixOf "." x) && (not $ isHashedFile x)
|
||||
+ isCert x = (not $ isPrefixOf "." x)
|
||||
|
||||
getDirContents = E.catch (map (path </>) . filter isCert <$> getDirectoryContents path) emptyPaths
|
||||
where emptyPaths :: E.IOException -> IO [FilePath]
|
||||
--
|
||||
2.1.4
|
||||
|
|
@ -1,27 +0,0 @@
|
|||
From 61d0e47cd038f25157e48385fc080d0d374b214d Mon Sep 17 00:00:00 2001
|
||||
From: dummy <dummy@example.com>
|
||||
Date: Tue, 14 Oct 2014 02:07:57 +0000
|
||||
Subject: [PATCH] support Android cert store
|
||||
|
||||
Android has only hashsed cert files.
|
||||
See https://github.com/vincenthz/hs-certificate/issues/19
|
||||
---
|
||||
System/X509/Unix.hs | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/System/X509/Unix.hs b/System/X509/Unix.hs
|
||||
index 9df3331..a30da26 100644
|
||||
--- a/System/X509/Unix.hs
|
||||
+++ b/System/X509/Unix.hs
|
||||
@@ -56,7 +56,7 @@ listDirectoryCerts path = do
|
||||
&& isDigit (s !! 9)
|
||||
&& (s !! 8) == '.'
|
||||
&& all isHexDigit (take 8 s)
|
||||
- isCert x = (not $ isPrefixOf "." x) && (not $ isHashedFile x)
|
||||
+ isCert x = (not $ isPrefixOf "." x)
|
||||
|
||||
getDirContents = E.catch (Just <$> getDirectoryContents path) emptyPaths
|
||||
where emptyPaths :: E.IOException -> IO (Maybe [FilePath])
|
||||
--
|
||||
1.7.10.4
|
||||
|
24
standalone/android/haskell-patches/xss-sanitize_deps.patch
Normal file
24
standalone/android/haskell-patches/xss-sanitize_deps.patch
Normal file
|
@ -0,0 +1,24 @@
|
|||
From 41eb8ab50125eb6ccf260c5510407483f1d78dd4 Mon Sep 17 00:00:00 2001
|
||||
From: dummy <dummy@example.com>
|
||||
Date: Wed, 20 Jun 2018 14:52:52 +0100
|
||||
Subject: [PATCH] deps
|
||||
|
||||
---
|
||||
xss-sanitize.cabal | 1 +
|
||||
1 file changed, 1 insertion(+)
|
||||
|
||||
diff --git a/xss-sanitize.cabal b/xss-sanitize.cabal
|
||||
index 727dc95..2de4270 100644
|
||||
--- a/xss-sanitize.cabal
|
||||
+++ b/xss-sanitize.cabal
|
||||
@@ -19,6 +19,7 @@ library
|
||||
, tagsoup >= 0.12.2 && < 1
|
||||
, utf8-string >= 0.3 && < 1
|
||||
, network >= 2 && < 3
|
||||
+ , network-uri
|
||||
, css-text >= 0.1.1 && < 0.2
|
||||
, text >= 0.11 && < 2
|
||||
, attoparsec >= 0.10.0.3 && < 1
|
||||
--
|
||||
2.1.4
|
||||
|
|
@ -80,7 +80,6 @@ EOF
|
|||
patched unix-time
|
||||
patched lifted-base
|
||||
patched zlib
|
||||
patched MissingH
|
||||
patched distributive
|
||||
patched comonad
|
||||
patched iproute
|
||||
|
@ -93,15 +92,17 @@ EOF
|
|||
patched skein
|
||||
patched lens
|
||||
patched certificate
|
||||
patched x509-system
|
||||
patched x509-store
|
||||
patched persistent-template
|
||||
patched system-filepath
|
||||
patched optparse-applicative
|
||||
patched warp
|
||||
patched wai-app-static
|
||||
patched yesod-routes
|
||||
patched shakespeare
|
||||
patched yesod-core
|
||||
patched yesod-persistent
|
||||
patched xss-sanitize
|
||||
patched yesod-form
|
||||
patched crypto-numbers
|
||||
patched clock
|
||||
|
@ -113,6 +114,7 @@ EOF
|
|||
patched dns
|
||||
patched unbounded-delays
|
||||
patched uuid
|
||||
patched basement
|
||||
|
||||
cd ..
|
||||
|
||||
|
|
Loading…
Reference in a new issue