2013-07-28 19:27:36 +00:00
|
|
|
{- git-annex command
|
|
|
|
-
|
2024-01-30 19:37:29 +00:00
|
|
|
- Copyright 2013-2024 Joey Hess <id@joeyh.name>
|
2013-07-28 19:27:36 +00:00
|
|
|
-
|
2019-03-13 19:48:14 +00:00
|
|
|
- Licensed under the GNU AGPL version 3 or higher.
|
2013-07-28 19:27:36 +00:00
|
|
|
-}
|
|
|
|
|
2019-01-07 19:51:05 +00:00
|
|
|
{-# LANGUAGE OverloadedStrings #-}
|
2020-07-14 18:35:26 +00:00
|
|
|
{-# LANGUAGE BangPatterns #-}
|
2021-11-23 20:06:51 +00:00
|
|
|
{-# LANGUAGE CPP #-}
|
2013-12-29 19:52:20 +00:00
|
|
|
|
2013-07-28 19:27:36 +00:00
|
|
|
module Command.ImportFeed where
|
|
|
|
|
|
|
|
import Text.Feed.Import
|
|
|
|
import Text.Feed.Query
|
|
|
|
import Text.Feed.Types
|
|
|
|
import qualified Data.Set as S
|
|
|
|
import qualified Data.Map as M
|
2013-08-03 05:40:21 +00:00
|
|
|
import Data.Time.Clock
|
2024-01-30 19:37:29 +00:00
|
|
|
import Data.Time.Clock.POSIX
|
2014-04-07 20:55:04 +00:00
|
|
|
import Data.Time.Format
|
2020-06-24 18:24:50 +00:00
|
|
|
import Data.Time.Calendar
|
|
|
|
import Data.Time.LocalTime
|
2023-05-09 19:49:05 +00:00
|
|
|
import Control.Concurrent.STM
|
2017-08-28 16:29:00 +00:00
|
|
|
import qualified Data.Text as T
|
2021-11-15 17:32:31 +00:00
|
|
|
import qualified Data.Text.Encoding as TE
|
2020-11-04 18:20:37 +00:00
|
|
|
import qualified System.FilePath.ByteString as P
|
2021-11-15 17:32:31 +00:00
|
|
|
import qualified Data.ByteString as B
|
2013-07-28 19:27:36 +00:00
|
|
|
|
|
|
|
import Command
|
2016-01-20 20:36:33 +00:00
|
|
|
import qualified Annex
|
2013-09-28 18:35:21 +00:00
|
|
|
import qualified Annex.Url as Url
|
2014-12-11 20:43:46 +00:00
|
|
|
import qualified Remote
|
|
|
|
import qualified Types.Remote as Remote
|
|
|
|
import Types.UrlContents
|
2013-07-28 19:27:36 +00:00
|
|
|
import Logs.Web
|
2018-01-02 21:17:10 +00:00
|
|
|
import Logs.File
|
2013-07-28 19:27:36 +00:00
|
|
|
import qualified Utility.Format
|
|
|
|
import Utility.Tmp
|
2018-04-06 21:00:46 +00:00
|
|
|
import Utility.Metered
|
2024-02-05 19:16:25 +00:00
|
|
|
import Command.AddUrl (addUrlFile, downloadRemoteFile, parseDownloadOptions, DownloadOptions(..), checkClaimingUrl, checkCanAdd, addWorkTree, checkRaw, useYoutubeDl)
|
2014-12-17 17:57:52 +00:00
|
|
|
import Annex.UUID
|
2013-08-03 05:40:21 +00:00
|
|
|
import Backend.URL (fromUrl)
|
convert importfeed to youtube-dl
Fully working, including --fast/--relaxed.
Note that, while git-annex addurl --relaxed is not going to check
youtube-dl, I kept git annex importfeed --relaxed checking it.
Thinking is that, let's not break people's importfeed cron jobs, and
importfeed does not typically have to check a large number of new items,
so it's ok if it's a little bit slower when used with youtube playlist
feeds.
importfeed's behavior is also improved (?) when a feed has links in it
to non-media files. Before, those were skipped. Now, the content of the
link is downloaded. This had to be done, because trying to use
youtube-dl is slow, and if those were skipped, it would have to check
every time importfeed was run. While this behavior change may not be
desirable for some feeds, that intersperse links to web pages with
enclosures, it will be desirable for other feeds, that have
non-enclosure directy links to media files.
Remove old quvi modules.
This commit was sponsored by Øyvind Andersen Holm.
2017-11-29 21:05:27 +00:00
|
|
|
import Annex.Content
|
2022-10-26 17:58:20 +00:00
|
|
|
import Annex.WorkTree
|
convert importfeed to youtube-dl
Fully working, including --fast/--relaxed.
Note that, while git-annex addurl --relaxed is not going to check
youtube-dl, I kept git annex importfeed --relaxed checking it.
Thinking is that, let's not break people's importfeed cron jobs, and
importfeed does not typically have to check a large number of new items,
so it's ok if it's a little bit slower when used with youtube playlist
feeds.
importfeed's behavior is also improved (?) when a feed has links in it
to non-media files. Before, those were skipped. Now, the content of the
link is downloaded. This had to be done, because trying to use
youtube-dl is slow, and if those were skipped, it would have to check
every time importfeed was run. While this behavior change may not be
desirable for some feeds, that intersperse links to web pages with
enclosures, it will be desirable for other feeds, that have
non-enclosure directy links to media files.
Remove old quvi modules.
This commit was sponsored by Øyvind Andersen Holm.
2017-11-29 21:05:27 +00:00
|
|
|
import Annex.YoutubeDl
|
import metadata from feeds
When annex.genmetadata is set, metadata from the feed is added to files
that are imported from it.
Reused the same feedtitle and itemtitle, feedauthor, itemauthor, etc names
that are used in --template.
Also added title and author, which are the item title/author if available,
falling back to the feed title/author. These are more likely to be common
metadata fields.
(There is a small bit of dupication here, but once git gets
around to packing the object, it will compress it away.)
The itempubdate field is not included in the metadata as a string; instead
it is used to generate year and month fields, same as is done when adding
files with annex.genmetadata set.
This commit was sponsored by Amitai Schlair, who cooincidentially
is responsible for ikiwiki generating nice feed metadata!
2014-07-03 17:46:09 +00:00
|
|
|
import Types.MetaData
|
|
|
|
import Logs.MetaData
|
|
|
|
import Annex.MetaData
|
2019-12-20 19:01:34 +00:00
|
|
|
import Annex.FileMatcher
|
addurl --preserve-filename and a few related changes
* addurl --preserve-filename: New option, uses server-provided filename
without any sanitization, but with some security checking.
Not yet implemented for remotes other than the web.
* addurl, importfeed: Avoid adding filenames with leading '.', instead
it will be replaced with '_'.
This might be considered a security fix, but a CVE seems unwattanted.
It was possible for addurl to create a dotfile, which could change
behavior of some program. It was also possible for a web server to say
the file name was ".git" or "foo/.git". That would not overrwrite the
.git directory, but would cause addurl to fail; of course git won't
add "foo/.git".
sanitizeFilePath is too opinionated to remain in Utility, so moved it.
The changes to mkSafeFilePath are because it used sanitizeFilePath.
In particular:
isDrive will never succeed, because "c:" gets munged to "c_"
".." gets sanitized now
".git" gets sanitized now
It will never be null, because sanitizeFilePath keeps the length
the same, and splitDirectories never returns a null path.
Also, on the off chance a web server suggests a filename of "",
ignore that, rather than trying to save to such a filename, which would
fail in some way.
2020-05-08 20:09:29 +00:00
|
|
|
import Annex.UntrustedFilePath
|
2023-03-01 19:55:58 +00:00
|
|
|
import qualified Utility.RawFilePath as R
|
sqlite datbase for importfeed
importfeed: Use caching database to avoid needing to list urls on every
run, and avoid using too much memory.
Benchmarking in my podcasts repo, importfeed got 1.42 seconds faster,
and memory use dropped from 203000k to 59408k.
Database.ImportFeed is Database.ContentIdentifier with the serial number
filed off. There is a bit of code duplication I would like to avoid,
particularly recordAnnexBranchTree, and getAnnexBranchTree. But these use
the persistent sqlite tables, so despite the code being the same, they
cannot be factored out.
Since this database includes the contentidentifier metadata, it will be
slightly redundant if a sqlite database is ever added for metadata. I
did consider making such a generic database and using it for this. But,
that would then need importfeed to update both the url database and the
metadata database, which is twice as much work diffing the git-annex
branch trees. Or would entagle updating two databases in a complex way.
So instead it seems better to optimise the database that
importfeed needs, and if the metadata database is used by another command,
use a little more disk space and do a little bit of redundant work to
update it.
Sponsored-by: unqueued on Patreon
2023-10-23 20:12:26 +00:00
|
|
|
import qualified Database.ImportFeed as Db
|
2013-07-28 19:27:36 +00:00
|
|
|
|
2015-07-08 16:33:27 +00:00
|
|
|
cmd :: Command
|
2023-05-09 20:43:16 +00:00
|
|
|
cmd = notBareRepo $ withAnnexOptions os $
|
2015-07-08 19:08:02 +00:00
|
|
|
command "importfeed" SectionCommon "import files from podcast feeds"
|
2015-07-13 15:06:41 +00:00
|
|
|
(paramRepeating paramUrl) (seek <$$> optParser)
|
2023-05-09 20:43:16 +00:00
|
|
|
where
|
|
|
|
os = [jobsOption, jsonOptions, jsonProgressOption, backendOption]
|
2015-07-13 15:06:41 +00:00
|
|
|
|
|
|
|
data ImportFeedOptions = ImportFeedOptions
|
|
|
|
{ feedUrls :: CmdParams
|
|
|
|
, templateOption :: Maybe String
|
2024-01-30 19:37:29 +00:00
|
|
|
, scrapeOption :: Bool
|
2017-11-30 20:48:35 +00:00
|
|
|
, downloadOptions :: DownloadOptions
|
2015-03-05 18:46:08 +00:00
|
|
|
}
|
|
|
|
|
2015-07-13 15:06:41 +00:00
|
|
|
optParser :: CmdParamsDesc -> Parser ImportFeedOptions
|
|
|
|
optParser desc = ImportFeedOptions
|
|
|
|
<$> cmdParams desc
|
|
|
|
<*> optional (strOption
|
|
|
|
( long "template" <> metavar paramFormat
|
|
|
|
<> help "template for filenames"
|
|
|
|
))
|
2024-01-30 19:37:29 +00:00
|
|
|
<*> switch
|
|
|
|
( long "scrape"
|
|
|
|
<> help "scrape website for content to import"
|
|
|
|
)
|
2017-11-30 20:48:35 +00:00
|
|
|
<*> parseDownloadOptions False
|
2015-07-13 15:06:41 +00:00
|
|
|
|
|
|
|
seek :: ImportFeedOptions -> CommandSeek
|
2023-05-09 19:49:05 +00:00
|
|
|
seek o = startConcurrency commandStages $ do
|
2019-12-20 19:01:34 +00:00
|
|
|
addunlockedmatcher <- addUnlockedMatcher
|
2015-07-13 15:06:41 +00:00
|
|
|
cache <- getCache (templateOption o)
|
2023-05-09 19:49:05 +00:00
|
|
|
dlst <- liftIO $ newTMVarIO M.empty
|
|
|
|
checkst <- liftIO $ newTVarIO M.empty
|
|
|
|
|
|
|
|
forM_ (feedUrls o) $ \url -> do
|
|
|
|
liftIO $ atomically $ do
|
|
|
|
m <- takeTMVar dlst
|
|
|
|
putTMVar dlst (M.insert url Nothing m)
|
2024-01-30 19:37:29 +00:00
|
|
|
commandAction $ getFeed o url dlst
|
2023-05-09 19:49:05 +00:00
|
|
|
startpendingdownloads addunlockedmatcher cache dlst checkst False
|
|
|
|
|
|
|
|
startpendingdownloads addunlockedmatcher cache dlst checkst True
|
2015-07-13 15:06:41 +00:00
|
|
|
|
2023-05-09 20:22:09 +00:00
|
|
|
clearfeedproblems checkst
|
2023-05-09 19:49:05 +00:00
|
|
|
where
|
|
|
|
getpendingdownloads dlst blocking
|
|
|
|
| blocking = do
|
|
|
|
m <- takeTMVar dlst
|
|
|
|
if M.null m
|
|
|
|
then do
|
|
|
|
putTMVar dlst m
|
|
|
|
return m
|
|
|
|
else
|
|
|
|
let (pending, rest) = M.partition ispending m
|
2023-07-12 02:08:19 +00:00
|
|
|
in if M.null pending || not (M.null rest)
|
2023-05-09 19:49:05 +00:00
|
|
|
then retry
|
|
|
|
else do
|
|
|
|
putTMVar dlst rest
|
|
|
|
return pending
|
|
|
|
| otherwise = do
|
|
|
|
m <- takeTMVar dlst
|
|
|
|
let (pending, rest) = M.partition ispending m
|
|
|
|
putTMVar dlst rest
|
|
|
|
return pending
|
|
|
|
where
|
|
|
|
ispending Nothing = False
|
|
|
|
ispending (Just _) = True
|
|
|
|
|
|
|
|
startpendingdownloads addunlockedmatcher cache dlst checkst blocking = do
|
|
|
|
m <- liftIO $ atomically $ getpendingdownloads dlst blocking
|
|
|
|
|
|
|
|
forM_ (M.toList m) $ \(url, v) -> case v of
|
|
|
|
Nothing -> noop
|
|
|
|
Just Nothing -> noop
|
|
|
|
Just (Just is) ->
|
|
|
|
forM_ is $ \i -> do
|
|
|
|
cv <- liftIO newEmptyTMVarIO
|
|
|
|
liftIO $ atomically $ modifyTVar checkst $
|
|
|
|
M.insertWith (++) url [cv]
|
|
|
|
commandAction $
|
|
|
|
startDownload addunlockedmatcher o cache cv i
|
|
|
|
|
2023-05-09 20:22:09 +00:00
|
|
|
clearfeedproblems checkst = do
|
2023-05-09 19:49:05 +00:00
|
|
|
m <- liftIO $ atomically $ readTVar checkst
|
|
|
|
forM_ (M.toList m) $ \(url, cvl) ->
|
2023-05-09 20:22:09 +00:00
|
|
|
whenM (and <$> mapM (liftIO . atomically . takeTMVar) cvl) $
|
|
|
|
clearFeedProblem url
|
2023-05-09 19:49:05 +00:00
|
|
|
|
|
|
|
getFeed
|
2024-01-30 19:37:29 +00:00
|
|
|
:: ImportFeedOptions
|
|
|
|
-> URLString
|
2023-05-09 19:49:05 +00:00
|
|
|
-> TMVar (M.Map URLString (Maybe (Maybe [ToDownload])))
|
|
|
|
-> CommandStart
|
2024-01-30 19:37:29 +00:00
|
|
|
getFeed o url st =
|
2023-05-09 20:43:16 +00:00
|
|
|
starting "importfeed" (ActionItemOther (Just (UnquotedString url))) (SeekInput [url]) $
|
2024-01-30 19:37:29 +00:00
|
|
|
go `onException` recordfail
|
2023-05-09 19:49:05 +00:00
|
|
|
where
|
|
|
|
record v = liftIO $ atomically $ do
|
|
|
|
m <- takeTMVar st
|
|
|
|
putTMVar st (M.insert url v m)
|
|
|
|
recordfail = record (Just Nothing)
|
|
|
|
|
2024-01-30 19:37:29 +00:00
|
|
|
go
|
|
|
|
| scrapeOption o = scrape
|
|
|
|
| otherwise = get
|
|
|
|
|
2023-05-09 19:49:05 +00:00
|
|
|
get = withTmpFile "feed" $ \tmpf h -> do
|
2021-11-15 19:31:02 +00:00
|
|
|
liftIO $ hClose h
|
|
|
|
ifM (downloadFeed url tmpf)
|
2023-05-09 19:49:05 +00:00
|
|
|
( parse tmpf
|
|
|
|
, do
|
|
|
|
recordfail
|
|
|
|
next $ feedProblem url
|
|
|
|
"downloading the feed failed"
|
2021-11-15 19:31:02 +00:00
|
|
|
)
|
2023-05-09 19:49:05 +00:00
|
|
|
|
2021-11-15 19:31:02 +00:00
|
|
|
-- Use parseFeedFromFile rather than reading the file
|
|
|
|
-- ourselves because it goes out of its way to handle encodings.
|
2023-05-09 19:49:05 +00:00
|
|
|
parse tmpf = liftIO (parseFeedFromFile' tmpf) >>= \case
|
2021-11-15 19:31:02 +00:00
|
|
|
Nothing -> debugfeedcontent tmpf "parsing the feed failed"
|
2023-03-11 17:52:41 +00:00
|
|
|
Just f -> do
|
2023-04-10 21:03:41 +00:00
|
|
|
case decodeBS $ fromFeedText $ getFeedTitle f of
|
2023-03-11 17:52:41 +00:00
|
|
|
"" -> noop
|
2023-04-10 21:03:41 +00:00
|
|
|
t -> showNote (UnquotedString ('"' : t ++ "\""))
|
2023-03-11 17:52:41 +00:00
|
|
|
case findDownloads url f of
|
|
|
|
[] -> debugfeedcontent tmpf "bad feed content; no enclosures to download"
|
|
|
|
l -> do
|
2023-05-09 19:49:05 +00:00
|
|
|
record (Just (Just l))
|
|
|
|
next $ return True
|
|
|
|
|
2021-11-15 19:31:02 +00:00
|
|
|
debugfeedcontent tmpf msg = do
|
|
|
|
feedcontent <- liftIO $ readFile tmpf
|
2021-04-06 19:41:24 +00:00
|
|
|
fastDebug "Command.ImportFeed" $ unlines
|
2019-06-20 16:37:07 +00:00
|
|
|
[ "start of feed content"
|
|
|
|
, feedcontent
|
|
|
|
, "end of feed content"
|
|
|
|
]
|
2023-05-09 19:49:05 +00:00
|
|
|
recordfail
|
|
|
|
next $ feedProblem url
|
2021-04-05 19:21:20 +00:00
|
|
|
(msg ++ " (use --debug --debugfilter=ImportFeed to see the feed content that was downloaded)")
|
2024-01-30 19:37:29 +00:00
|
|
|
|
|
|
|
scrape = youtubePlaylist url >>= \case
|
|
|
|
Left err -> do
|
|
|
|
recordfail
|
|
|
|
next $ feedProblem url err
|
|
|
|
Right playlist -> do
|
|
|
|
record (Just (Just (playlistDownloads url playlist)))
|
|
|
|
next $ return True
|
2013-07-28 22:16:24 +00:00
|
|
|
|
2021-11-23 20:06:51 +00:00
|
|
|
parseFeedFromFile' :: FilePath -> IO (Maybe Feed)
|
|
|
|
#if MIN_VERSION_feed(1,1,0)
|
|
|
|
parseFeedFromFile' = parseFeedFromFile
|
|
|
|
#else
|
|
|
|
parseFeedFromFile' f = catchMaybeIO (parseFeedFromFile f)
|
|
|
|
#endif
|
|
|
|
|
2013-07-28 23:08:50 +00:00
|
|
|
data ToDownload = ToDownload
|
2024-01-30 18:00:56 +00:00
|
|
|
{ feedurl :: URLString
|
2013-12-29 19:52:20 +00:00
|
|
|
, location :: DownloadLocation
|
2024-01-30 18:00:56 +00:00
|
|
|
, itemid :: Maybe B.ByteString
|
|
|
|
-- Either the parsed or unparsed date.
|
|
|
|
, itempubdate :: Maybe (Either String UTCTime)
|
|
|
|
-- Fields that are used as metadata and to generate the filename.
|
|
|
|
, itemfields :: [(String, String)]
|
2024-01-30 19:37:29 +00:00
|
|
|
-- True when youtube-dl found this by scraping, so certainly
|
|
|
|
-- supports downloading it.
|
|
|
|
, youtubedlscraped :: Bool
|
2013-07-28 23:08:50 +00:00
|
|
|
}
|
|
|
|
|
convert importfeed to youtube-dl
Fully working, including --fast/--relaxed.
Note that, while git-annex addurl --relaxed is not going to check
youtube-dl, I kept git annex importfeed --relaxed checking it.
Thinking is that, let's not break people's importfeed cron jobs, and
importfeed does not typically have to check a large number of new items,
so it's ok if it's a little bit slower when used with youtube playlist
feeds.
importfeed's behavior is also improved (?) when a feed has links in it
to non-media files. Before, those were skipped. Now, the content of the
link is downloaded. This had to be done, because trying to use
youtube-dl is slow, and if those were skipped, it would have to check
every time importfeed was run. While this behavior change may not be
desirable for some feeds, that intersperse links to web pages with
enclosures, it will be desirable for other feeds, that have
non-enclosure directy links to media files.
Remove old quvi modules.
This commit was sponsored by Øyvind Andersen Holm.
2017-11-29 21:05:27 +00:00
|
|
|
data DownloadLocation = Enclosure URLString | MediaLink URLString
|
2013-07-28 23:08:50 +00:00
|
|
|
|
2015-03-31 17:29:51 +00:00
|
|
|
type ItemId = String
|
|
|
|
|
2013-07-28 22:16:24 +00:00
|
|
|
data Cache = Cache
|
sqlite datbase for importfeed
importfeed: Use caching database to avoid needing to list urls on every
run, and avoid using too much memory.
Benchmarking in my podcasts repo, importfeed got 1.42 seconds faster,
and memory use dropped from 203000k to 59408k.
Database.ImportFeed is Database.ContentIdentifier with the serial number
filed off. There is a bit of code duplication I would like to avoid,
particularly recordAnnexBranchTree, and getAnnexBranchTree. But these use
the persistent sqlite tables, so despite the code being the same, they
cannot be factored out.
Since this database includes the contentidentifier metadata, it will be
slightly redundant if a sqlite database is ever added for metadata. I
did consider making such a generic database and using it for this. But,
that would then need importfeed to update both the url database and the
metadata database, which is twice as much work diffing the git-annex
branch trees. Or would entagle updating two databases in a complex way.
So instead it seems better to optimise the database that
importfeed needs, and if the metadata database is used by another command,
use a little more disk space and do a little bit of redundant work to
update it.
Sponsored-by: unqueued on Patreon
2023-10-23 20:12:26 +00:00
|
|
|
{ dbhandle :: Maybe Db.ImportFeedDbHandle
|
2013-07-28 22:16:24 +00:00
|
|
|
, template :: Utility.Format.Format
|
|
|
|
}
|
|
|
|
|
|
|
|
getCache :: Maybe String -> Annex Cache
|
2022-06-28 19:28:14 +00:00
|
|
|
getCache opttemplate = ifM (Annex.getRead Annex.force)
|
sqlite datbase for importfeed
importfeed: Use caching database to avoid needing to list urls on every
run, and avoid using too much memory.
Benchmarking in my podcasts repo, importfeed got 1.42 seconds faster,
and memory use dropped from 203000k to 59408k.
Database.ImportFeed is Database.ContentIdentifier with the serial number
filed off. There is a bit of code duplication I would like to avoid,
particularly recordAnnexBranchTree, and getAnnexBranchTree. But these use
the persistent sqlite tables, so despite the code being the same, they
cannot be factored out.
Since this database includes the contentidentifier metadata, it will be
slightly redundant if a sqlite database is ever added for metadata. I
did consider making such a generic database and using it for this. But,
that would then need importfeed to update both the url database and the
metadata database, which is twice as much work diffing the git-annex
branch trees. Or would entagle updating two databases in a complex way.
So instead it seems better to optimise the database that
importfeed needs, and if the metadata database is used by another command,
use a little more disk space and do a little bit of redundant work to
update it.
Sponsored-by: unqueued on Patreon
2023-10-23 20:12:26 +00:00
|
|
|
( ret Nothing
|
2013-07-31 16:19:00 +00:00
|
|
|
, do
|
2023-05-09 20:43:16 +00:00
|
|
|
j <- jsonOutputEnabled
|
|
|
|
unless j $
|
|
|
|
showStartMessage (StartMessage "importfeed" (ActionItemOther (Just "gathering known urls")) (SeekInput []))
|
sqlite datbase for importfeed
importfeed: Use caching database to avoid needing to list urls on every
run, and avoid using too much memory.
Benchmarking in my podcasts repo, importfeed got 1.42 seconds faster,
and memory use dropped from 203000k to 59408k.
Database.ImportFeed is Database.ContentIdentifier with the serial number
filed off. There is a bit of code duplication I would like to avoid,
particularly recordAnnexBranchTree, and getAnnexBranchTree. But these use
the persistent sqlite tables, so despite the code being the same, they
cannot be factored out.
Since this database includes the contentidentifier metadata, it will be
slightly redundant if a sqlite database is ever added for metadata. I
did consider making such a generic database and using it for this. But,
that would then need importfeed to update both the url database and the
metadata database, which is twice as much work diffing the git-annex
branch trees. Or would entagle updating two databases in a complex way.
So instead it seems better to optimise the database that
importfeed needs, and if the metadata database is used by another command,
use a little more disk space and do a little bit of redundant work to
update it.
Sponsored-by: unqueued on Patreon
2023-10-23 20:12:26 +00:00
|
|
|
h <- Db.openDb
|
2023-05-09 20:43:16 +00:00
|
|
|
unless j
|
|
|
|
showEndOk
|
sqlite datbase for importfeed
importfeed: Use caching database to avoid needing to list urls on every
run, and avoid using too much memory.
Benchmarking in my podcasts repo, importfeed got 1.42 seconds faster,
and memory use dropped from 203000k to 59408k.
Database.ImportFeed is Database.ContentIdentifier with the serial number
filed off. There is a bit of code duplication I would like to avoid,
particularly recordAnnexBranchTree, and getAnnexBranchTree. But these use
the persistent sqlite tables, so despite the code being the same, they
cannot be factored out.
Since this database includes the contentidentifier metadata, it will be
slightly redundant if a sqlite database is ever added for metadata. I
did consider making such a generic database and using it for this. But,
that would then need importfeed to update both the url database and the
metadata database, which is twice as much work diffing the git-annex
branch trees. Or would entagle updating two databases in a complex way.
So instead it seems better to optimise the database that
importfeed needs, and if the metadata database is used by another command,
use a little more disk space and do a little bit of redundant work to
update it.
Sponsored-by: unqueued on Patreon
2023-10-23 20:12:26 +00:00
|
|
|
ret (Just h)
|
2013-07-31 16:19:00 +00:00
|
|
|
)
|
2013-07-28 19:27:36 +00:00
|
|
|
where
|
2013-07-28 22:16:24 +00:00
|
|
|
tmpl = Utility.Format.gen $ fromMaybe defaultTemplate opttemplate
|
sqlite datbase for importfeed
importfeed: Use caching database to avoid needing to list urls on every
run, and avoid using too much memory.
Benchmarking in my podcasts repo, importfeed got 1.42 seconds faster,
and memory use dropped from 203000k to 59408k.
Database.ImportFeed is Database.ContentIdentifier with the serial number
filed off. There is a bit of code duplication I would like to avoid,
particularly recordAnnexBranchTree, and getAnnexBranchTree. But these use
the persistent sqlite tables, so despite the code being the same, they
cannot be factored out.
Since this database includes the contentidentifier metadata, it will be
slightly redundant if a sqlite database is ever added for metadata. I
did consider making such a generic database and using it for this. But,
that would then need importfeed to update both the url database and the
metadata database, which is twice as much work diffing the git-annex
branch trees. Or would entagle updating two databases in a complex way.
So instead it seems better to optimise the database that
importfeed needs, and if the metadata database is used by another command,
use a little more disk space and do a little bit of redundant work to
update it.
Sponsored-by: unqueued on Patreon
2023-10-23 20:12:26 +00:00
|
|
|
ret h = return $ Cache h tmpl
|
2021-04-23 15:44:10 +00:00
|
|
|
|
2018-12-30 20:14:55 +00:00
|
|
|
findDownloads :: URLString -> Feed -> [ToDownload]
|
|
|
|
findDownloads u f = catMaybes $ map mk (feedItems f)
|
2013-07-28 19:27:36 +00:00
|
|
|
where
|
2018-12-30 20:14:55 +00:00
|
|
|
mk i = case getItemEnclosure i of
|
2018-12-30 19:59:15 +00:00
|
|
|
Just (enclosureurl, _, _) ->
|
2024-01-30 18:00:56 +00:00
|
|
|
Just $ mk' i
|
|
|
|
(Enclosure $ decodeBS $ fromFeedText enclosureurl)
|
convert importfeed to youtube-dl
Fully working, including --fast/--relaxed.
Note that, while git-annex addurl --relaxed is not going to check
youtube-dl, I kept git annex importfeed --relaxed checking it.
Thinking is that, let's not break people's importfeed cron jobs, and
importfeed does not typically have to check a large number of new items,
so it's ok if it's a little bit slower when used with youtube playlist
feeds.
importfeed's behavior is also improved (?) when a feed has links in it
to non-media files. Before, those were skipped. Now, the content of the
link is downloaded. This had to be done, because trying to use
youtube-dl is slow, and if those were skipped, it would have to check
every time importfeed was run. While this behavior change may not be
desirable for some feeds, that intersperse links to web pages with
enclosures, it will be desirable for other feeds, that have
non-enclosure directy links to media files.
Remove old quvi modules.
This commit was sponsored by Øyvind Andersen Holm.
2017-11-29 21:05:27 +00:00
|
|
|
Nothing -> case getItemLink i of
|
2024-01-30 18:00:56 +00:00
|
|
|
Just l -> Just $ mk' i
|
|
|
|
(MediaLink $ decodeBS $ fromFeedText l)
|
2018-12-30 19:59:15 +00:00
|
|
|
Nothing -> Nothing
|
2024-01-30 18:00:56 +00:00
|
|
|
mk' i l = ToDownload
|
|
|
|
{ feedurl = u
|
|
|
|
, location = l
|
|
|
|
, itemid = case getItemId i of
|
|
|
|
Just (_, iid) -> Just (fromFeedText iid)
|
|
|
|
_ -> Nothing
|
|
|
|
, itempubdate = case getItemPublishDate i :: Maybe (Maybe UTCTime) of
|
|
|
|
Just (Just d) -> Just (Right d)
|
|
|
|
_ -> Left . decodeBS . fromFeedText
|
|
|
|
<$> getItemPublishDateString i
|
|
|
|
, itemfields = extractFeedItemFields f i u
|
2024-01-30 19:37:29 +00:00
|
|
|
, youtubedlscraped = False
|
2024-01-30 18:00:56 +00:00
|
|
|
}
|
2013-07-28 19:27:36 +00:00
|
|
|
|
|
|
|
{- Feeds change, so a feed download cannot be resumed. -}
|
2021-11-15 19:31:02 +00:00
|
|
|
downloadFeed :: URLString -> FilePath -> Annex Bool
|
|
|
|
downloadFeed url f
|
2016-11-16 01:29:54 +00:00
|
|
|
| Url.parseURIRelaxed url == Nothing = giveup "invalid feed url"
|
2021-11-15 19:31:02 +00:00
|
|
|
| otherwise = Url.withUrlOptions $
|
|
|
|
Url.download nullMeterUpdate Nothing url f
|
2013-07-28 19:27:36 +00:00
|
|
|
|
2023-05-09 19:49:05 +00:00
|
|
|
startDownload :: AddUnlockedMatcher -> ImportFeedOptions -> Cache -> TMVar Bool -> ToDownload -> CommandStart
|
|
|
|
startDownload addunlockedmatcher opts cache cv todownload = case location todownload of
|
|
|
|
Enclosure url -> startdownloadenclosure url
|
convert importfeed to youtube-dl
Fully working, including --fast/--relaxed.
Note that, while git-annex addurl --relaxed is not going to check
youtube-dl, I kept git annex importfeed --relaxed checking it.
Thinking is that, let's not break people's importfeed cron jobs, and
importfeed does not typically have to check a large number of new items,
so it's ok if it's a little bit slower when used with youtube playlist
feeds.
importfeed's behavior is also improved (?) when a feed has links in it
to non-media files. Before, those were skipped. Now, the content of the
link is downloaded. This had to be done, because trying to use
youtube-dl is slow, and if those were skipped, it would have to check
every time importfeed was run. While this behavior change may not be
desirable for some feeds, that intersperse links to web pages with
enclosures, it will be desirable for other feeds, that have
non-enclosure directy links to media files.
Remove old quvi modules.
This commit was sponsored by Øyvind Andersen Holm.
2017-11-29 21:05:27 +00:00
|
|
|
MediaLink linkurl -> do
|
|
|
|
let mediaurl = setDownloader linkurl YoutubeDownloader
|
|
|
|
let mediakey = Backend.URL.fromUrl mediaurl Nothing
|
|
|
|
-- Old versions of git-annex that used quvi might have
|
2018-09-02 20:16:08 +00:00
|
|
|
-- used the quviurl for this, so check if it's known
|
convert importfeed to youtube-dl
Fully working, including --fast/--relaxed.
Note that, while git-annex addurl --relaxed is not going to check
youtube-dl, I kept git annex importfeed --relaxed checking it.
Thinking is that, let's not break people's importfeed cron jobs, and
importfeed does not typically have to check a large number of new items,
so it's ok if it's a little bit slower when used with youtube playlist
feeds.
importfeed's behavior is also improved (?) when a feed has links in it
to non-media files. Before, those were skipped. Now, the content of the
link is downloaded. This had to be done, because trying to use
youtube-dl is slow, and if those were skipped, it would have to check
every time importfeed was run. While this behavior change may not be
desirable for some feeds, that intersperse links to web pages with
enclosures, it will be desirable for other feeds, that have
non-enclosure directy links to media files.
Remove old quvi modules.
This commit was sponsored by Øyvind Andersen Holm.
2017-11-29 21:05:27 +00:00
|
|
|
-- to avoid adding it a second time.
|
|
|
|
let quviurl = setDownloader linkurl QuviDownloader
|
|
|
|
checkknown mediaurl $ checkknown quviurl $
|
2022-06-28 19:28:14 +00:00
|
|
|
ifM (Annex.getRead Annex.fast <||> pure (relaxedOption (downloadOptions opts)))
|
convert importfeed to youtube-dl
Fully working, including --fast/--relaxed.
Note that, while git-annex addurl --relaxed is not going to check
youtube-dl, I kept git annex importfeed --relaxed checking it.
Thinking is that, let's not break people's importfeed cron jobs, and
importfeed does not typically have to check a large number of new items,
so it's ok if it's a little bit slower when used with youtube playlist
feeds.
importfeed's behavior is also improved (?) when a feed has links in it
to non-media files. Before, those were skipped. Now, the content of the
link is downloaded. This had to be done, because trying to use
youtube-dl is slow, and if those were skipped, it would have to check
every time importfeed was run. While this behavior change may not be
desirable for some feeds, that intersperse links to web pages with
enclosures, it will be desirable for other feeds, that have
non-enclosure directy links to media files.
Remove old quvi modules.
This commit was sponsored by Øyvind Andersen Holm.
2017-11-29 21:05:27 +00:00
|
|
|
( addmediafast linkurl mediaurl mediakey
|
|
|
|
, downloadmedia linkurl mediaurl mediakey
|
|
|
|
)
|
2013-07-31 16:19:00 +00:00
|
|
|
where
|
2015-03-31 17:29:51 +00:00
|
|
|
{- Avoids downloading any items that are already known to be
|
sqlite datbase for importfeed
importfeed: Use caching database to avoid needing to list urls on every
run, and avoid using too much memory.
Benchmarking in my podcasts repo, importfeed got 1.42 seconds faster,
and memory use dropped from 203000k to 59408k.
Database.ImportFeed is Database.ContentIdentifier with the serial number
filed off. There is a bit of code duplication I would like to avoid,
particularly recordAnnexBranchTree, and getAnnexBranchTree. But these use
the persistent sqlite tables, so despite the code being the same, they
cannot be factored out.
Since this database includes the contentidentifier metadata, it will be
slightly redundant if a sqlite database is ever added for metadata. I
did consider making such a generic database and using it for this. But,
that would then need importfeed to update both the url database and the
metadata database, which is twice as much work diffing the git-annex
branch trees. Or would entagle updating two databases in a complex way.
So instead it seems better to optimise the database that
importfeed needs, and if the metadata database is used by another command,
use a little more disk space and do a little bit of redundant work to
update it.
Sponsored-by: unqueued on Patreon
2023-10-23 20:12:26 +00:00
|
|
|
- associated with a file in the annex. -}
|
|
|
|
checkknown url a = case dbhandle cache of
|
|
|
|
Just db -> ifM (liftIO $ Db.isKnownUrl db url)
|
|
|
|
( nothingtodo
|
2024-01-30 18:00:56 +00:00
|
|
|
, case itemid todownload of
|
|
|
|
Just iid ->
|
|
|
|
ifM (liftIO $ Db.isKnownItemId db iid)
|
sqlite datbase for importfeed
importfeed: Use caching database to avoid needing to list urls on every
run, and avoid using too much memory.
Benchmarking in my podcasts repo, importfeed got 1.42 seconds faster,
and memory use dropped from 203000k to 59408k.
Database.ImportFeed is Database.ContentIdentifier with the serial number
filed off. There is a bit of code duplication I would like to avoid,
particularly recordAnnexBranchTree, and getAnnexBranchTree. But these use
the persistent sqlite tables, so despite the code being the same, they
cannot be factored out.
Since this database includes the contentidentifier metadata, it will be
slightly redundant if a sqlite database is ever added for metadata. I
did consider making such a generic database and using it for this. But,
that would then need importfeed to update both the url database and the
metadata database, which is twice as much work diffing the git-annex
branch trees. Or would entagle updating two databases in a complex way.
So instead it seems better to optimise the database that
importfeed needs, and if the metadata database is used by another command,
use a little more disk space and do a little bit of redundant work to
update it.
Sponsored-by: unqueued on Patreon
2023-10-23 20:12:26 +00:00
|
|
|
( nothingtodo
|
|
|
|
, a
|
|
|
|
)
|
2024-01-30 18:00:56 +00:00
|
|
|
Nothing -> a
|
sqlite datbase for importfeed
importfeed: Use caching database to avoid needing to list urls on every
run, and avoid using too much memory.
Benchmarking in my podcasts repo, importfeed got 1.42 seconds faster,
and memory use dropped from 203000k to 59408k.
Database.ImportFeed is Database.ContentIdentifier with the serial number
filed off. There is a bit of code duplication I would like to avoid,
particularly recordAnnexBranchTree, and getAnnexBranchTree. But these use
the persistent sqlite tables, so despite the code being the same, they
cannot be factored out.
Since this database includes the contentidentifier metadata, it will be
slightly redundant if a sqlite database is ever added for metadata. I
did consider making such a generic database and using it for this. But,
that would then need importfeed to update both the url database and the
metadata database, which is twice as much work diffing the git-annex
branch trees. Or would entagle updating two databases in a complex way.
So instead it seems better to optimise the database that
importfeed needs, and if the metadata database is used by another command,
use a little more disk space and do a little bit of redundant work to
update it.
Sponsored-by: unqueued on Patreon
2023-10-23 20:12:26 +00:00
|
|
|
)
|
|
|
|
Nothing -> a
|
2013-12-29 19:52:20 +00:00
|
|
|
|
2023-05-09 19:49:05 +00:00
|
|
|
nothingtodo = recordsuccess >> stop
|
|
|
|
|
|
|
|
recordsuccess = liftIO $ atomically $ putTMVar cv True
|
|
|
|
|
|
|
|
startdownloadenclosure :: URLString -> CommandStart
|
2023-05-09 20:43:16 +00:00
|
|
|
startdownloadenclosure url = checkknown url $ startUrlDownload cv todownload url $
|
2023-05-09 19:49:05 +00:00
|
|
|
downloadEnclosure addunlockedmatcher opts cache cv todownload url
|
|
|
|
|
2024-02-05 19:16:25 +00:00
|
|
|
downloadmedia linkurl mediaurl mediakey =
|
|
|
|
ifM (useYoutubeDl (downloadOptions opts) <&&> youtubeDlSupported linkurl)
|
2023-05-09 20:43:16 +00:00
|
|
|
( startUrlDownload cv todownload linkurl $
|
2023-05-09 19:49:05 +00:00
|
|
|
withTmpWorkDir mediakey $ \workdir -> do
|
2020-12-15 05:13:21 +00:00
|
|
|
dl <- youtubeDl linkurl (fromRawFilePath workdir) nullMeterUpdate
|
|
|
|
case dl of
|
|
|
|
Right (Just mediafile) -> do
|
|
|
|
let ext = case takeExtension mediafile of
|
|
|
|
[] -> ".m"
|
|
|
|
s -> s
|
2023-05-09 19:49:05 +00:00
|
|
|
runDownload todownload linkurl ext cache cv $ \f ->
|
2020-12-15 05:13:21 +00:00
|
|
|
checkCanAdd (downloadOptions opts) f $ \canadd -> do
|
|
|
|
addWorkTree canadd addunlockedmatcher webUUID mediaurl f mediakey (Just (toRawFilePath mediafile))
|
|
|
|
return (Just [mediakey])
|
|
|
|
-- youtube-dl didn't support it, so
|
|
|
|
-- download it as if the link were
|
|
|
|
-- an enclosure.
|
2023-05-09 19:49:05 +00:00
|
|
|
Right Nothing -> contdownloadlink
|
2020-12-15 05:13:21 +00:00
|
|
|
Left msg -> do
|
filter out control characters in warning messages
Converted warning and similar to use StringContainingQuotedPath. Most
warnings are static strings, some do refer to filepaths that need to be
quoted, and others don't need quoting.
Note that, since quote filters out control characters of even
UnquotedString, this makes all warnings safe, even when an attacker
sneaks in a control character in some other way.
When json is being output, no quoting is done, since json gets its own
quoting.
This does, as a side effect, make warning messages in json output not
be indented. The indentation is only needed to offset warning messages
underneath the display of the file they apply to, so that's ok.
Sponsored-by: Brett Eisenberg on Patreon
2023-04-10 18:47:32 +00:00
|
|
|
warning $ UnquotedString $ linkurl ++ ": " ++ msg
|
2023-05-09 19:49:05 +00:00
|
|
|
liftIO $ atomically $ putTMVar cv False
|
|
|
|
next $ return False
|
|
|
|
, startdownloadlink
|
2020-12-15 05:13:21 +00:00
|
|
|
)
|
2017-11-30 20:48:35 +00:00
|
|
|
where
|
2023-05-09 19:49:05 +00:00
|
|
|
startdownloadlink = checkRaw (Just linkurl) (downloadOptions opts) nothingtodo $
|
|
|
|
startdownloadenclosure linkurl
|
|
|
|
contdownloadlink = downloadEnclosure addunlockedmatcher opts cache cv todownload linkurl
|
2017-11-30 20:48:35 +00:00
|
|
|
|
|
|
|
addmediafast linkurl mediaurl mediakey =
|
2024-02-05 19:16:25 +00:00
|
|
|
ifM (useYoutubeDl (downloadOptions opts)
|
2024-01-30 19:37:29 +00:00
|
|
|
<&&> (pure (youtubedlscraped todownload) <||> youtubeDlSupported linkurl))
|
2023-05-09 20:43:16 +00:00
|
|
|
( startUrlDownload cv todownload linkurl $ do
|
2023-05-09 19:49:05 +00:00
|
|
|
runDownload todownload linkurl ".m" cache cv $ \f ->
|
2021-11-17 17:23:55 +00:00
|
|
|
checkCanAdd (downloadOptions opts) f $ \canadd -> do
|
|
|
|
addWorkTree canadd addunlockedmatcher webUUID mediaurl f mediakey Nothing
|
|
|
|
return (Just [mediakey])
|
2023-05-09 19:49:05 +00:00
|
|
|
, startdownloadenclosure linkurl
|
2017-11-30 20:48:35 +00:00
|
|
|
)
|
import metadata from feeds
When annex.genmetadata is set, metadata from the feed is added to files
that are imported from it.
Reused the same feedtitle and itemtitle, feedauthor, itemauthor, etc names
that are used in --template.
Also added title and author, which are the item title/author if available,
falling back to the feed title/author. These are more likely to be common
metadata fields.
(There is a small bit of dupication here, but once git gets
around to packing the object, it will compress it away.)
The itempubdate field is not included in the metadata as a string; instead
it is used to generate year and month fields, same as is done when adding
files with annex.genmetadata set.
This commit was sponsored by Amitai Schlair, who cooincidentially
is responsible for ikiwiki generating nice feed metadata!
2014-07-03 17:46:09 +00:00
|
|
|
|
2023-05-09 19:49:05 +00:00
|
|
|
downloadEnclosure :: AddUnlockedMatcher -> ImportFeedOptions -> Cache -> TMVar Bool -> ToDownload -> URLString -> CommandPerform
|
|
|
|
downloadEnclosure addunlockedmatcher opts cache cv todownload url =
|
|
|
|
runDownload todownload url (takeWhile (/= '?') $ takeExtension url) cache cv $ \f -> do
|
|
|
|
let f' = fromRawFilePath f
|
2024-02-05 19:16:25 +00:00
|
|
|
r <- checkClaimingUrl (downloadOptions opts) url
|
2023-05-09 19:49:05 +00:00
|
|
|
if Remote.uuid r == webUUID || rawOption (downloadOptions opts)
|
|
|
|
then checkRaw (Just url) (downloadOptions opts) (pure Nothing) $ do
|
|
|
|
let dlopts = (downloadOptions opts)
|
|
|
|
-- force using the filename
|
|
|
|
-- chosen here
|
|
|
|
{ fileOption = Just f'
|
|
|
|
-- don't use youtube-dl
|
|
|
|
, rawOption = True
|
|
|
|
}
|
|
|
|
let go urlinfo = Just . maybeToList <$> addUrlFile addunlockedmatcher dlopts url urlinfo f
|
|
|
|
if relaxedOption (downloadOptions opts)
|
|
|
|
then go Url.assumeUrlExists
|
|
|
|
else Url.withUrlOptions (Url.getUrlInfo url) >>= \case
|
|
|
|
Right urlinfo -> go urlinfo
|
|
|
|
Left err -> do
|
|
|
|
warning (UnquotedString err)
|
|
|
|
return (Just [])
|
|
|
|
else do
|
|
|
|
res <- tryNonAsync $ maybe
|
|
|
|
(giveup $ "unable to checkUrl of " ++ Remote.name r)
|
|
|
|
(flip id url)
|
|
|
|
(Remote.checkUrl r)
|
|
|
|
case res of
|
|
|
|
Left _ -> return (Just [])
|
|
|
|
Right (UrlContents sz _) ->
|
|
|
|
Just . maybeToList <$>
|
|
|
|
downloadRemoteFile addunlockedmatcher r (downloadOptions opts) url f sz
|
|
|
|
Right (UrlMulti l) -> do
|
|
|
|
kl <- forM l $ \(url', sz, subf) ->
|
|
|
|
let dest = f P.</> toRawFilePath (sanitizeFilePath subf)
|
|
|
|
in downloadRemoteFile addunlockedmatcher r (downloadOptions opts) url' dest sz
|
|
|
|
return $ Just $ if all isJust kl
|
|
|
|
then catMaybes kl
|
|
|
|
else []
|
|
|
|
|
|
|
|
runDownload
|
|
|
|
:: ToDownload
|
|
|
|
-> URLString
|
|
|
|
-> String
|
|
|
|
-> Cache
|
|
|
|
-> TMVar Bool
|
|
|
|
-> (RawFilePath -> Annex (Maybe [Key]))
|
|
|
|
-> CommandPerform
|
|
|
|
runDownload todownload url extension cache cv getter = do
|
|
|
|
dest <- makeunique (1 :: Integer) $
|
|
|
|
feedFile (template cache) todownload extension
|
|
|
|
case dest of
|
|
|
|
Nothing -> do
|
|
|
|
recordsuccess
|
2024-02-02 19:57:07 +00:00
|
|
|
next $ return True
|
2023-05-09 19:49:05 +00:00
|
|
|
Just f -> getter (toRawFilePath f) >>= \case
|
|
|
|
Just ks
|
|
|
|
-- Download problem.
|
|
|
|
| null ks -> do
|
2023-05-09 20:22:09 +00:00
|
|
|
broken <- checkFeedBroken (feedurl todownload)
|
|
|
|
when broken $
|
|
|
|
void $ feedProblem url "download failed"
|
|
|
|
liftIO $ atomically $ putTMVar cv broken
|
2023-05-09 19:49:05 +00:00
|
|
|
next $ return False
|
|
|
|
| otherwise -> do
|
|
|
|
forM_ ks $ \key ->
|
|
|
|
ifM (annexGenMetaData <$> Annex.getGitConfig)
|
|
|
|
( addMetaData key $ extractMetaData todownload
|
|
|
|
, addMetaData key $ minimalMetaData todownload
|
|
|
|
)
|
|
|
|
recordsuccess
|
|
|
|
next $ return True
|
|
|
|
-- Was not able to add anything, but not
|
|
|
|
-- because of a download problem.
|
|
|
|
Nothing -> do
|
|
|
|
recordsuccess
|
|
|
|
next $ return False
|
|
|
|
where
|
|
|
|
recordsuccess = liftIO $ atomically $ putTMVar cv True
|
|
|
|
|
|
|
|
forced = Annex.getRead Annex.force
|
|
|
|
|
|
|
|
{- Find a unique filename to save the url to.
|
|
|
|
- If the file exists, prefixes it with a number.
|
|
|
|
- When forced, the file may already exist and have the same
|
|
|
|
- url, in which case Nothing is returned as it does not need
|
|
|
|
- to be re-downloaded. -}
|
|
|
|
makeunique n file = ifM alreadyexists
|
|
|
|
( ifM forced
|
|
|
|
( lookupKey (toRawFilePath f) >>= \case
|
|
|
|
Just k -> checksameurl k
|
|
|
|
Nothing -> tryanother
|
|
|
|
, tryanother
|
|
|
|
)
|
|
|
|
, return $ Just f
|
|
|
|
)
|
|
|
|
where
|
|
|
|
f = if n < 2
|
|
|
|
then file
|
|
|
|
else
|
|
|
|
let (d, base) = splitFileName file
|
|
|
|
in d </> show n ++ "_" ++ base
|
|
|
|
tryanother = makeunique (n + 1) file
|
|
|
|
alreadyexists = liftIO $ isJust <$> catchMaybeIO (R.getSymbolicLinkStatus (toRawFilePath f))
|
2024-02-02 19:50:41 +00:00
|
|
|
checksameurl k = ifM (elem url . map fst . map getDownloader <$> getUrls k)
|
2023-05-09 19:49:05 +00:00
|
|
|
( return Nothing
|
|
|
|
, tryanother
|
|
|
|
)
|
|
|
|
|
2023-05-09 20:43:16 +00:00
|
|
|
startUrlDownload :: TMVar Bool -> ToDownload -> URLString -> CommandPerform -> CommandStart
|
2023-05-09 20:59:44 +00:00
|
|
|
startUrlDownload cv todownload url a = do
|
|
|
|
starting "addurl"
|
|
|
|
(ActionItemOther (Just (UnquotedString url)))
|
|
|
|
(SeekInput [feedurl todownload])
|
|
|
|
(go `onException` recordfailure)
|
2023-05-09 19:49:05 +00:00
|
|
|
where
|
2023-05-09 20:22:09 +00:00
|
|
|
recordfailure = do
|
|
|
|
void $ feedProblem url "download failed"
|
|
|
|
liftIO $ atomically $ tryPutTMVar cv False
|
2023-05-09 20:59:44 +00:00
|
|
|
go = do
|
|
|
|
maybeAddJSONField "url" url
|
|
|
|
a
|
2021-11-17 17:23:55 +00:00
|
|
|
|
2013-07-28 22:16:24 +00:00
|
|
|
defaultTemplate :: String
|
2013-07-28 23:08:50 +00:00
|
|
|
defaultTemplate = "${feedtitle}/${itemtitle}${extension}"
|
2013-07-28 19:27:36 +00:00
|
|
|
|
2013-08-01 15:57:05 +00:00
|
|
|
{- Generates a filename to use for a feed item by filling out the template.
|
|
|
|
- The filename may not be unique. -}
|
2013-12-29 19:52:20 +00:00
|
|
|
feedFile :: Utility.Format.Format -> ToDownload -> String -> FilePath
|
2020-08-05 15:35:00 +00:00
|
|
|
feedFile tmpl i extension = sanitizeLeadingFilePathCharacter $
|
|
|
|
Utility.Format.format tmpl $
|
2024-01-30 18:00:56 +00:00
|
|
|
M.map sanitizeFilePathComponent $ M.fromList $ itemfields i ++
|
2020-08-05 15:35:00 +00:00
|
|
|
[ ("extension", extension)
|
2024-01-30 18:00:56 +00:00
|
|
|
, extractField "itempubdate" [itempubdatestring]
|
2020-08-05 15:35:00 +00:00
|
|
|
, extractField "itempubyear" [itempubyear]
|
|
|
|
, extractField "itempubmonth" [itempubmonth]
|
|
|
|
, extractField "itempubday" [itempubday]
|
|
|
|
, extractField "itempubhour" [itempubhour]
|
|
|
|
, extractField "itempubminute" [itempubminute]
|
|
|
|
, extractField "itempubsecond" [itempubsecond]
|
|
|
|
]
|
2013-07-28 19:27:36 +00:00
|
|
|
where
|
2024-01-30 18:00:56 +00:00
|
|
|
pubdate = maybe Nothing eitherToMaybe (itempubdate i)
|
2020-06-24 18:24:50 +00:00
|
|
|
|
2024-01-30 18:00:56 +00:00
|
|
|
itempubdatestring = case itempubdate i of
|
|
|
|
Just (Right pd) -> Just $ formatTime defaultTimeLocale "%F" pd
|
2014-04-07 20:55:04 +00:00
|
|
|
-- if date cannot be parsed, use the raw string
|
2024-01-30 18:00:56 +00:00
|
|
|
Just (Left s) -> Just $ replace "/" "-" s
|
|
|
|
Nothing -> Nothing
|
2020-06-24 18:24:50 +00:00
|
|
|
|
|
|
|
(itempubyear, itempubmonth, itempubday) = case pubdate of
|
|
|
|
Nothing -> (Nothing, Nothing, Nothing)
|
|
|
|
Just pd ->
|
|
|
|
let (y, m, d) = toGregorian (utctDay pd)
|
|
|
|
in (Just (show y), Just (show m), Just (show d))
|
|
|
|
|
|
|
|
(itempubhour, itempubminute, itempubsecond) = case pubdate of
|
|
|
|
Nothing -> (Nothing, Nothing, Nothing)
|
|
|
|
Just pd ->
|
|
|
|
let tod = timeToTimeOfDay (utctDayTime pd)
|
|
|
|
in ( Just (show (todHour tod))
|
|
|
|
, Just (show (todMin tod))
|
|
|
|
-- avoid fractional seconds
|
|
|
|
, Just (takeWhile (/= '.') (show (todSec tod)))
|
|
|
|
)
|
2014-04-07 20:55:04 +00:00
|
|
|
|
import metadata from feeds
When annex.genmetadata is set, metadata from the feed is added to files
that are imported from it.
Reused the same feedtitle and itemtitle, feedauthor, itemauthor, etc names
that are used in --template.
Also added title and author, which are the item title/author if available,
falling back to the feed title/author. These are more likely to be common
metadata fields.
(There is a small bit of dupication here, but once git gets
around to packing the object, it will compress it away.)
The itempubdate field is not included in the metadata as a string; instead
it is used to generate year and month fields, same as is done when adding
files with annex.genmetadata set.
This commit was sponsored by Amitai Schlair, who cooincidentially
is responsible for ikiwiki generating nice feed metadata!
2014-07-03 17:46:09 +00:00
|
|
|
extractMetaData :: ToDownload -> MetaData
|
2024-01-30 18:00:56 +00:00
|
|
|
extractMetaData i = case itempubdate i of
|
|
|
|
Just (Right d) -> unionMetaData meta (dateMetaData d meta)
|
import metadata from feeds
When annex.genmetadata is set, metadata from the feed is added to files
that are imported from it.
Reused the same feedtitle and itemtitle, feedauthor, itemauthor, etc names
that are used in --template.
Also added title and author, which are the item title/author if available,
falling back to the feed title/author. These are more likely to be common
metadata fields.
(There is a small bit of dupication here, but once git gets
around to packing the object, it will compress it away.)
The itempubdate field is not included in the metadata as a string; instead
it is used to generate year and month fields, same as is done when adding
files with annex.genmetadata set.
This commit was sponsored by Amitai Schlair, who cooincidentially
is responsible for ikiwiki generating nice feed metadata!
2014-07-03 17:46:09 +00:00
|
|
|
_ -> meta
|
|
|
|
where
|
2019-01-07 19:51:05 +00:00
|
|
|
tometa (k, v) = (mkMetaFieldUnchecked (T.pack k), S.singleton (toMetaValue (encodeBS v)))
|
2024-01-30 18:00:56 +00:00
|
|
|
meta = MetaData $ M.fromList $ map tometa $ itemfields i
|
import metadata from feeds
When annex.genmetadata is set, metadata from the feed is added to files
that are imported from it.
Reused the same feedtitle and itemtitle, feedauthor, itemauthor, etc names
that are used in --template.
Also added title and author, which are the item title/author if available,
falling back to the feed title/author. These are more likely to be common
metadata fields.
(There is a small bit of dupication here, but once git gets
around to packing the object, it will compress it away.)
The itempubdate field is not included in the metadata as a string; instead
it is used to generate year and month fields, same as is done when adding
files with annex.genmetadata set.
This commit was sponsored by Amitai Schlair, who cooincidentially
is responsible for ikiwiki generating nice feed metadata!
2014-07-03 17:46:09 +00:00
|
|
|
|
2015-03-31 17:29:51 +00:00
|
|
|
minimalMetaData :: ToDownload -> MetaData
|
2024-01-30 18:00:56 +00:00
|
|
|
minimalMetaData i = case itemid i of
|
|
|
|
Nothing -> emptyMetaData
|
|
|
|
Just iid -> MetaData $ M.singleton itemIdField
|
|
|
|
(S.singleton $ toMetaValue iid)
|
|
|
|
|
2024-01-30 19:37:29 +00:00
|
|
|
noneValue :: String
|
|
|
|
noneValue = "none"
|
|
|
|
|
|
|
|
extractField :: String -> [Maybe String] -> (String, String)
|
|
|
|
extractField k [] = (k, noneValue)
|
|
|
|
extractField k (Just v:_)
|
|
|
|
| not (null v) = (k, v)
|
|
|
|
extractField k (_:rest) = extractField k rest
|
|
|
|
|
2024-01-30 18:00:56 +00:00
|
|
|
extractFeedItemFields :: Feed -> Item -> URLString -> [(String, String)]
|
|
|
|
extractFeedItemFields f i u = map (uncurry extractField)
|
|
|
|
[ ("feedurl", [Just u])
|
2023-07-06 04:10:19 +00:00
|
|
|
, ("feedtitle", [feedtitle])
|
import metadata from feeds
When annex.genmetadata is set, metadata from the feed is added to files
that are imported from it.
Reused the same feedtitle and itemtitle, feedauthor, itemauthor, etc names
that are used in --template.
Also added title and author, which are the item title/author if available,
falling back to the feed title/author. These are more likely to be common
metadata fields.
(There is a small bit of dupication here, but once git gets
around to packing the object, it will compress it away.)
The itempubdate field is not included in the metadata as a string; instead
it is used to generate year and month fields, same as is done when adding
files with annex.genmetadata set.
This commit was sponsored by Amitai Schlair, who cooincidentially
is responsible for ikiwiki generating nice feed metadata!
2014-07-03 17:46:09 +00:00
|
|
|
, ("itemtitle", [itemtitle])
|
|
|
|
, ("feedauthor", [feedauthor])
|
|
|
|
, ("itemauthor", [itemauthor])
|
2024-01-30 18:00:56 +00:00
|
|
|
, ("itemsummary", [decodeBS . fromFeedText <$> getItemSummary i])
|
|
|
|
, ("itemdescription", [decodeBS . fromFeedText <$> getItemDescription i])
|
|
|
|
, ("itemrights", [decodeBS . fromFeedText <$> getItemRights i])
|
|
|
|
, ("itemid", [decodeBS . fromFeedText . snd <$> getItemId i])
|
import metadata from feeds
When annex.genmetadata is set, metadata from the feed is added to files
that are imported from it.
Reused the same feedtitle and itemtitle, feedauthor, itemauthor, etc names
that are used in --template.
Also added title and author, which are the item title/author if available,
falling back to the feed title/author. These are more likely to be common
metadata fields.
(There is a small bit of dupication here, but once git gets
around to packing the object, it will compress it away.)
The itempubdate field is not included in the metadata as a string; instead
it is used to generate year and month fields, same as is done when adding
files with annex.genmetadata set.
This commit was sponsored by Amitai Schlair, who cooincidentially
is responsible for ikiwiki generating nice feed metadata!
2014-07-03 17:46:09 +00:00
|
|
|
, ("title", [itemtitle, feedtitle])
|
|
|
|
, ("author", [itemauthor, feedauthor])
|
|
|
|
]
|
|
|
|
where
|
2024-01-30 18:00:56 +00:00
|
|
|
feedtitle = Just $ decodeBS $ fromFeedText $ getFeedTitle f
|
|
|
|
itemtitle = decodeBS . fromFeedText <$> getItemTitle i
|
|
|
|
feedauthor = decodeBS . fromFeedText <$> getFeedAuthor f
|
|
|
|
itemauthor = decodeBS . fromFeedText <$> getItemAuthor i
|
import metadata from feeds
When annex.genmetadata is set, metadata from the feed is added to files
that are imported from it.
Reused the same feedtitle and itemtitle, feedauthor, itemauthor, etc names
that are used in --template.
Also added title and author, which are the item title/author if available,
falling back to the feed title/author. These are more likely to be common
metadata fields.
(There is a small bit of dupication here, but once git gets
around to packing the object, it will compress it away.)
The itempubdate field is not included in the metadata as a string; instead
it is used to generate year and month fields, same as is done when adding
files with annex.genmetadata set.
This commit was sponsored by Amitai Schlair, who cooincidentially
is responsible for ikiwiki generating nice feed metadata!
2014-07-03 17:46:09 +00:00
|
|
|
|
2024-01-30 19:37:29 +00:00
|
|
|
playlistFields :: URLString -> YoutubePlaylistItem -> [(String, String)]
|
|
|
|
playlistFields u i = map (uncurry extractField)
|
|
|
|
[ ("feedurl", [Just u])
|
|
|
|
, ("feedtitle", [youtube_playlist_title i])
|
|
|
|
, ("itemtitle", [youtube_title i])
|
|
|
|
, ("feedauthor", [youtube_playlist_uploader i])
|
|
|
|
, ("itemauthor", [youtube_playlist_uploader i])
|
|
|
|
-- itemsummary omitted, no equivilant in yt-dlp data
|
|
|
|
, ("itemdescription", [youtube_description i])
|
|
|
|
, ("itemrights", [youtube_license i])
|
|
|
|
, ("itemid", [youtube_url i])
|
|
|
|
, ("title", [youtube_title i, youtube_playlist_title i])
|
|
|
|
, ("author", [youtube_playlist_uploader i])
|
|
|
|
]
|
import metadata from feeds
When annex.genmetadata is set, metadata from the feed is added to files
that are imported from it.
Reused the same feedtitle and itemtitle, feedauthor, itemauthor, etc names
that are used in --template.
Also added title and author, which are the item title/author if available,
falling back to the feed title/author. These are more likely to be common
metadata fields.
(There is a small bit of dupication here, but once git gets
around to packing the object, it will compress it away.)
The itempubdate field is not included in the metadata as a string; instead
it is used to generate year and month fields, same as is done when adding
files with annex.genmetadata set.
This commit was sponsored by Amitai Schlair, who cooincidentially
is responsible for ikiwiki generating nice feed metadata!
2014-07-03 17:46:09 +00:00
|
|
|
|
2024-01-30 19:37:29 +00:00
|
|
|
playlistDownloads :: URLString -> [YoutubePlaylistItem] -> [ToDownload]
|
|
|
|
playlistDownloads url = mapMaybe go
|
|
|
|
where
|
|
|
|
go i = do
|
|
|
|
iurl <- youtube_url i
|
|
|
|
return $ ToDownload
|
|
|
|
{ feedurl = url
|
|
|
|
, location = MediaLink iurl
|
|
|
|
, itemid = Just (encodeBS iurl)
|
|
|
|
, itempubdate =
|
|
|
|
Right . posixSecondsToUTCTime . fromIntegral
|
|
|
|
<$> youtube_timestamp i
|
|
|
|
, itemfields = playlistFields url i
|
|
|
|
, youtubedlscraped = True
|
|
|
|
}
|
2015-03-31 17:29:51 +00:00
|
|
|
|
2013-08-03 05:40:21 +00:00
|
|
|
{- Called when there is a problem with a feed.
|
2018-11-04 21:41:49 +00:00
|
|
|
-
|
|
|
|
- If the feed has been broken for some time,
|
|
|
|
- returns False, otherwise only warns. -}
|
|
|
|
feedProblem :: URLString -> String -> Annex Bool
|
2013-08-03 05:40:21 +00:00
|
|
|
feedProblem url message = ifM (checkFeedBroken url)
|
2018-11-04 21:41:49 +00:00
|
|
|
( do
|
filter out control characters in warning messages
Converted warning and similar to use StringContainingQuotedPath. Most
warnings are static strings, some do refer to filepaths that need to be
quoted, and others don't need quoting.
Note that, since quote filters out control characters of even
UnquotedString, this makes all warnings safe, even when an attacker
sneaks in a control character in some other way.
When json is being output, no quoting is done, since json gets its own
quoting.
This does, as a side effect, make warning messages in json output not
be indented. The indentation is only needed to offset warning messages
underneath the display of the file they apply to, so that's ok.
Sponsored-by: Brett Eisenberg on Patreon
2023-04-10 18:47:32 +00:00
|
|
|
warning $ UnquotedString $ message ++ " (having repeated problems with feed: " ++ url ++ ")"
|
2018-11-04 21:41:49 +00:00
|
|
|
return False
|
|
|
|
, do
|
2023-05-09 19:49:05 +00:00
|
|
|
warning $ UnquotedString $ "warning: " ++ message ++ " (feed: " ++ url ++ ")"
|
2018-11-04 21:41:49 +00:00
|
|
|
return True
|
2013-08-03 05:40:21 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
{- A feed is only broken if problems have occurred repeatedly, for at
|
|
|
|
- least 23 hours. -}
|
|
|
|
checkFeedBroken :: URLString -> Annex Bool
|
|
|
|
checkFeedBroken url = checkFeedBroken' url =<< feedState url
|
2020-11-03 22:34:27 +00:00
|
|
|
checkFeedBroken' :: URLString -> RawFilePath -> Annex Bool
|
2013-08-03 05:40:21 +00:00
|
|
|
checkFeedBroken' url f = do
|
2020-11-03 22:34:27 +00:00
|
|
|
prev <- maybe Nothing readish
|
2020-11-04 18:20:37 +00:00
|
|
|
<$> liftIO (catchMaybeIO $ readFile (fromRawFilePath f))
|
2013-08-03 05:40:21 +00:00
|
|
|
now <- liftIO getCurrentTime
|
|
|
|
case prev of
|
|
|
|
Nothing -> do
|
2018-01-02 21:17:10 +00:00
|
|
|
writeLogFile f $ show now
|
2013-08-03 05:40:21 +00:00
|
|
|
return False
|
|
|
|
Just prevtime -> do
|
|
|
|
let broken = diffUTCTime now prevtime > 60 * 60 * 23
|
|
|
|
when broken $
|
|
|
|
-- Avoid repeatedly complaining about
|
|
|
|
-- broken feed.
|
|
|
|
clearFeedProblem url
|
|
|
|
return broken
|
|
|
|
|
|
|
|
clearFeedProblem :: URLString -> Annex ()
|
2020-11-03 22:34:27 +00:00
|
|
|
clearFeedProblem url =
|
|
|
|
void $ liftIO . tryIO . removeFile . fromRawFilePath
|
|
|
|
=<< feedState url
|
2013-08-03 05:40:21 +00:00
|
|
|
|
2020-11-03 22:34:27 +00:00
|
|
|
feedState :: URLString -> Annex RawFilePath
|
2015-05-23 02:41:36 +00:00
|
|
|
feedState url = fromRepo $ gitAnnexFeedState $ fromUrl url Nothing
|
2021-11-15 17:32:31 +00:00
|
|
|
|
|
|
|
{- The feed library parses the feed to Text, and does not use the
|
|
|
|
- filesystem encoding to do it, so when the locale is not unicode
|
|
|
|
- capable, a Text value can still include unicode characters.
|
|
|
|
-
|
|
|
|
- So, it's not safe to use T.unpack to convert that to a String,
|
|
|
|
- because later use of that String by eg encodeBS will crash
|
2023-03-14 02:39:16 +00:00
|
|
|
- with an encoding error. Use this instead.
|
2021-11-15 17:32:31 +00:00
|
|
|
-
|
|
|
|
- This should not be used on a Text that is read using the
|
|
|
|
- filesystem encoding because it does not reverse that encoding.
|
|
|
|
-}
|
|
|
|
fromFeedText :: T.Text -> B.ByteString
|
|
|
|
fromFeedText = TE.encodeUtf8
|