2018-01-02 21:17:10 +00:00
|
|
|
{- git-annex log files
|
|
|
|
-
|
2022-08-11 20:57:44 +00:00
|
|
|
- Copyright 2018-2022 Joey Hess <id@joeyh.name>
|
2018-01-02 21:17:10 +00:00
|
|
|
-
|
2018-10-25 18:43:13 +00:00
|
|
|
- Licensed under the GNU AGPL version 3 or higher.
|
2018-01-02 21:17:10 +00:00
|
|
|
-}
|
|
|
|
|
2020-10-20 20:42:28 +00:00
|
|
|
{-# LANGUAGE BangPatterns #-}
|
|
|
|
|
|
|
|
module Logs.File (
|
|
|
|
writeLogFile,
|
|
|
|
withLogHandle,
|
|
|
|
appendLogFile,
|
|
|
|
modifyLogFile,
|
|
|
|
streamLogFile,
|
|
|
|
checkLogFile,
|
add restage log
When pointer files need to be restaged, they're first written to the
log, and then when the restage operation runs, it reads the log. This
way, if the git-annex process is interrupted before it can do the
restaging, a later git-annex process can do it.
Currently, this lets a git-annex get/drop command be interrupted and
then re-ran, and as long as it gets/drops additional files, it will
clean up after the interrupted command. But more changes are
needed to make it easier to restage after an interrupted process.
Kept using the git queue to run the restage action, even though the
list of files that it builds up for that action is not actually used by
the action. This could perhaps be simplified to make restaging a cleanup
action that gets registered, rather than using the git queue for it. But
I wasn't sure if that would cause visible behavior changes, when eg
dropping a large number of files, currently the git queue flushes
periodically, and so it restages incrementally, rather than all at the
end.
In restagePointerFiles, it reads the restage log twice, once to get
the number of files and size, and a second time to process it.
This seemed better than reading the whole file into memory, since
potentially a huge number of files could be in there. Probably the OS
will cache the file in memory and there will not be much performance
impact. It might be better to keep running tallies in another file
though. But updating that atomically with the log seems hard.
Also note that it's possible for calcRestageLog to see a different file
than streamRestageLog does. More files may be added to the log in
between. That is ok, it will only cause the filterprocessfaster heuristic to
operate with slightly out of date information, so it may make the wrong
choice for the files that got added and be a little slower than ideal.
Sponsored-by: Dartmouth College's DANDI project
2022-09-23 18:38:59 +00:00
|
|
|
calcLogFile,
|
2020-10-20 20:42:28 +00:00
|
|
|
) where
|
2018-01-02 21:17:10 +00:00
|
|
|
|
|
|
|
import Annex.Common
|
|
|
|
import Annex.Perms
|
2018-10-25 18:43:13 +00:00
|
|
|
import Annex.LockFile
|
2019-05-20 20:37:04 +00:00
|
|
|
import Annex.ReplaceFile
|
2018-01-02 21:17:10 +00:00
|
|
|
import Utility.Tmp
|
|
|
|
|
2020-10-20 20:42:28 +00:00
|
|
|
import qualified Data.ByteString.Lazy as L
|
|
|
|
import qualified Data.ByteString.Lazy.Char8 as L8
|
|
|
|
|
2018-01-04 18:46:58 +00:00
|
|
|
-- | Writes content to a file, replacing the file atomically, and
|
|
|
|
-- making the new file have whatever permissions the git repository is
|
|
|
|
-- configured to use. Creates the parent directory when necessary.
|
2020-10-29 16:02:46 +00:00
|
|
|
writeLogFile :: RawFilePath -> String -> Annex ()
|
|
|
|
writeLogFile f c = createDirWhenNeeded f $ viaTmp writelog (fromRawFilePath f) c
|
2018-01-02 21:17:10 +00:00
|
|
|
where
|
2020-11-05 22:45:37 +00:00
|
|
|
writelog tmp c' = do
|
|
|
|
liftIO $ writeFile tmp c'
|
|
|
|
setAnnexFilePerm (toRawFilePath tmp)
|
2018-10-25 18:43:13 +00:00
|
|
|
|
2019-05-20 20:37:04 +00:00
|
|
|
-- | Runs the action with a handle connected to a temp file.
|
|
|
|
-- The temp file replaces the log file once the action succeeds.
|
2020-10-29 16:02:46 +00:00
|
|
|
withLogHandle :: RawFilePath -> (Handle -> Annex a) -> Annex a
|
2019-05-20 20:37:04 +00:00
|
|
|
withLogHandle f a = do
|
|
|
|
createAnnexDirectory (parentDir f)
|
2020-10-29 16:02:46 +00:00
|
|
|
replaceGitAnnexDirFile (fromRawFilePath f) $ \tmp ->
|
2019-05-20 20:37:04 +00:00
|
|
|
bracket (setup tmp) cleanup a
|
|
|
|
where
|
|
|
|
setup tmp = do
|
2020-11-05 22:45:37 +00:00
|
|
|
setAnnexFilePerm (toRawFilePath tmp)
|
2019-05-20 20:37:04 +00:00
|
|
|
liftIO $ openFile tmp WriteMode
|
|
|
|
cleanup h = liftIO $ hClose h
|
|
|
|
|
2018-10-25 18:43:13 +00:00
|
|
|
-- | Appends a line to a log file, first locking it to prevent
|
|
|
|
-- concurrent writers.
|
2022-08-11 20:57:44 +00:00
|
|
|
appendLogFile :: RawFilePath -> RawFilePath -> L.ByteString -> Annex ()
|
2020-10-29 16:02:46 +00:00
|
|
|
appendLogFile f lck c =
|
2020-11-03 14:11:04 +00:00
|
|
|
createDirWhenNeeded f $
|
2020-10-29 16:02:46 +00:00
|
|
|
withExclusiveLock lck $ do
|
2020-11-03 14:11:04 +00:00
|
|
|
liftIO $ withFile f' AppendMode $
|
|
|
|
\h -> L8.hPutStrLn h c
|
2020-11-05 22:45:37 +00:00
|
|
|
setAnnexFilePerm (toRawFilePath f')
|
2020-11-03 14:11:04 +00:00
|
|
|
where
|
|
|
|
f' = fromRawFilePath f
|
2018-10-25 18:43:13 +00:00
|
|
|
|
2020-10-20 20:42:28 +00:00
|
|
|
-- | Modifies a log file.
|
|
|
|
--
|
|
|
|
-- If the function does not make any changes, avoids rewriting the file
|
|
|
|
-- for speed, but that does mean the whole file content has to be buffered
|
|
|
|
-- in memory.
|
|
|
|
--
|
|
|
|
-- The file is locked to prevent concurrent writers, and it is written
|
|
|
|
-- atomically.
|
2022-08-11 20:57:44 +00:00
|
|
|
modifyLogFile :: RawFilePath -> RawFilePath -> ([L.ByteString] -> [L.ByteString]) -> Annex ()
|
2020-10-20 20:42:28 +00:00
|
|
|
modifyLogFile f lck modf = withExclusiveLock lck $ do
|
|
|
|
ls <- liftIO $ fromMaybe []
|
2020-11-03 14:11:04 +00:00
|
|
|
<$> tryWhenExists (L8.lines <$> L.readFile f')
|
2020-10-20 20:42:28 +00:00
|
|
|
let ls' = modf ls
|
|
|
|
when (ls' /= ls) $
|
2020-11-03 14:11:04 +00:00
|
|
|
createDirWhenNeeded f $
|
|
|
|
viaTmp writelog f' (L8.unlines ls')
|
2020-10-20 20:42:28 +00:00
|
|
|
where
|
2020-11-03 14:11:04 +00:00
|
|
|
f' = fromRawFilePath f
|
|
|
|
writelog lf b = do
|
|
|
|
liftIO $ L.writeFile lf b
|
2020-11-05 22:45:37 +00:00
|
|
|
setAnnexFilePerm (toRawFilePath lf)
|
2020-10-20 20:42:28 +00:00
|
|
|
|
|
|
|
-- | Checks the content of a log file to see if any line matches.
|
2022-10-07 17:19:17 +00:00
|
|
|
checkLogFile :: RawFilePath -> RawFilePath -> (L.ByteString -> Bool) -> Annex Bool
|
|
|
|
checkLogFile f lck matchf = withSharedLock lck $ bracket setup cleanup go
|
2020-10-20 20:42:28 +00:00
|
|
|
where
|
2022-10-07 17:19:17 +00:00
|
|
|
setup = liftIO $ tryWhenExists $ openFile f' ReadMode
|
2020-10-20 20:42:28 +00:00
|
|
|
cleanup Nothing = noop
|
|
|
|
cleanup (Just h) = liftIO $ hClose h
|
|
|
|
go Nothing = return False
|
|
|
|
go (Just h) = do
|
2022-10-07 17:19:17 +00:00
|
|
|
!r <- liftIO (any matchf . L8.lines <$> L.hGetContents h)
|
2020-10-20 20:42:28 +00:00
|
|
|
return r
|
2022-10-07 17:19:17 +00:00
|
|
|
f' = fromRawFilePath f
|
2020-10-20 20:42:28 +00:00
|
|
|
|
add restage log
When pointer files need to be restaged, they're first written to the
log, and then when the restage operation runs, it reads the log. This
way, if the git-annex process is interrupted before it can do the
restaging, a later git-annex process can do it.
Currently, this lets a git-annex get/drop command be interrupted and
then re-ran, and as long as it gets/drops additional files, it will
clean up after the interrupted command. But more changes are
needed to make it easier to restage after an interrupted process.
Kept using the git queue to run the restage action, even though the
list of files that it builds up for that action is not actually used by
the action. This could perhaps be simplified to make restaging a cleanup
action that gets registered, rather than using the git queue for it. But
I wasn't sure if that would cause visible behavior changes, when eg
dropping a large number of files, currently the git queue flushes
periodically, and so it restages incrementally, rather than all at the
end.
In restagePointerFiles, it reads the restage log twice, once to get
the number of files and size, and a second time to process it.
This seemed better than reading the whole file into memory, since
potentially a huge number of files could be in there. Probably the OS
will cache the file in memory and there will not be much performance
impact. It might be better to keep running tallies in another file
though. But updating that atomically with the log seems hard.
Also note that it's possible for calcRestageLog to see a different file
than streamRestageLog does. More files may be added to the log in
between. That is ok, it will only cause the filterprocessfaster heuristic to
operate with slightly out of date information, so it may make the wrong
choice for the files that got added and be a little slower than ideal.
Sponsored-by: Dartmouth College's DANDI project
2022-09-23 18:38:59 +00:00
|
|
|
-- | Folds a function over lines of a log file to calculate a value.
|
2022-10-07 17:19:17 +00:00
|
|
|
calcLogFile :: RawFilePath -> RawFilePath -> t -> (L.ByteString -> t -> t) -> Annex t
|
|
|
|
calcLogFile f lck start update = withSharedLock lck $ bracket setup cleanup go
|
add restage log
When pointer files need to be restaged, they're first written to the
log, and then when the restage operation runs, it reads the log. This
way, if the git-annex process is interrupted before it can do the
restaging, a later git-annex process can do it.
Currently, this lets a git-annex get/drop command be interrupted and
then re-ran, and as long as it gets/drops additional files, it will
clean up after the interrupted command. But more changes are
needed to make it easier to restage after an interrupted process.
Kept using the git queue to run the restage action, even though the
list of files that it builds up for that action is not actually used by
the action. This could perhaps be simplified to make restaging a cleanup
action that gets registered, rather than using the git queue for it. But
I wasn't sure if that would cause visible behavior changes, when eg
dropping a large number of files, currently the git queue flushes
periodically, and so it restages incrementally, rather than all at the
end.
In restagePointerFiles, it reads the restage log twice, once to get
the number of files and size, and a second time to process it.
This seemed better than reading the whole file into memory, since
potentially a huge number of files could be in there. Probably the OS
will cache the file in memory and there will not be much performance
impact. It might be better to keep running tallies in another file
though. But updating that atomically with the log seems hard.
Also note that it's possible for calcRestageLog to see a different file
than streamRestageLog does. More files may be added to the log in
between. That is ok, it will only cause the filterprocessfaster heuristic to
operate with slightly out of date information, so it may make the wrong
choice for the files that got added and be a little slower than ideal.
Sponsored-by: Dartmouth College's DANDI project
2022-09-23 18:38:59 +00:00
|
|
|
where
|
2022-10-07 17:19:17 +00:00
|
|
|
setup = liftIO $ tryWhenExists $ openFile f' ReadMode
|
add restage log
When pointer files need to be restaged, they're first written to the
log, and then when the restage operation runs, it reads the log. This
way, if the git-annex process is interrupted before it can do the
restaging, a later git-annex process can do it.
Currently, this lets a git-annex get/drop command be interrupted and
then re-ran, and as long as it gets/drops additional files, it will
clean up after the interrupted command. But more changes are
needed to make it easier to restage after an interrupted process.
Kept using the git queue to run the restage action, even though the
list of files that it builds up for that action is not actually used by
the action. This could perhaps be simplified to make restaging a cleanup
action that gets registered, rather than using the git queue for it. But
I wasn't sure if that would cause visible behavior changes, when eg
dropping a large number of files, currently the git queue flushes
periodically, and so it restages incrementally, rather than all at the
end.
In restagePointerFiles, it reads the restage log twice, once to get
the number of files and size, and a second time to process it.
This seemed better than reading the whole file into memory, since
potentially a huge number of files could be in there. Probably the OS
will cache the file in memory and there will not be much performance
impact. It might be better to keep running tallies in another file
though. But updating that atomically with the log seems hard.
Also note that it's possible for calcRestageLog to see a different file
than streamRestageLog does. More files may be added to the log in
between. That is ok, it will only cause the filterprocessfaster heuristic to
operate with slightly out of date information, so it may make the wrong
choice for the files that got added and be a little slower than ideal.
Sponsored-by: Dartmouth College's DANDI project
2022-09-23 18:38:59 +00:00
|
|
|
cleanup Nothing = noop
|
|
|
|
cleanup (Just h) = liftIO $ hClose h
|
|
|
|
go Nothing = return start
|
2022-10-07 17:19:17 +00:00
|
|
|
go (Just h) = go' start =<< liftIO (L8.lines <$> L.hGetContents h)
|
add restage log
When pointer files need to be restaged, they're first written to the
log, and then when the restage operation runs, it reads the log. This
way, if the git-annex process is interrupted before it can do the
restaging, a later git-annex process can do it.
Currently, this lets a git-annex get/drop command be interrupted and
then re-ran, and as long as it gets/drops additional files, it will
clean up after the interrupted command. But more changes are
needed to make it easier to restage after an interrupted process.
Kept using the git queue to run the restage action, even though the
list of files that it builds up for that action is not actually used by
the action. This could perhaps be simplified to make restaging a cleanup
action that gets registered, rather than using the git queue for it. But
I wasn't sure if that would cause visible behavior changes, when eg
dropping a large number of files, currently the git queue flushes
periodically, and so it restages incrementally, rather than all at the
end.
In restagePointerFiles, it reads the restage log twice, once to get
the number of files and size, and a second time to process it.
This seemed better than reading the whole file into memory, since
potentially a huge number of files could be in there. Probably the OS
will cache the file in memory and there will not be much performance
impact. It might be better to keep running tallies in another file
though. But updating that atomically with the log seems hard.
Also note that it's possible for calcRestageLog to see a different file
than streamRestageLog does. More files may be added to the log in
between. That is ok, it will only cause the filterprocessfaster heuristic to
operate with slightly out of date information, so it may make the wrong
choice for the files that got added and be a little slower than ideal.
Sponsored-by: Dartmouth College's DANDI project
2022-09-23 18:38:59 +00:00
|
|
|
go' v [] = return v
|
|
|
|
go' v (l:ls) = do
|
|
|
|
let !v' = update l v
|
|
|
|
go' v' ls
|
2022-10-07 17:19:17 +00:00
|
|
|
f' = fromRawFilePath f
|
2020-10-20 20:42:28 +00:00
|
|
|
|
2022-09-23 17:49:01 +00:00
|
|
|
-- | Streams lines from a log file, passing each line to the processor,
|
|
|
|
-- and then empties the file at the end.
|
2018-10-25 18:43:13 +00:00
|
|
|
--
|
2022-09-23 17:49:01 +00:00
|
|
|
-- If the processor is interrupted or throws an exception, the log file is
|
2018-10-25 18:43:13 +00:00
|
|
|
-- left unchanged.
|
|
|
|
--
|
2022-09-23 17:49:01 +00:00
|
|
|
-- There is also a finalizer, that is run once all lines have been
|
|
|
|
-- streamed. It is run even if the log file does not exist. If the
|
|
|
|
-- finalizer throws an exception, the log file is left unchanged.
|
2018-10-25 18:43:13 +00:00
|
|
|
--
|
|
|
|
-- Locking is used to prevent writes to to the log file while this
|
|
|
|
-- is running.
|
2022-09-23 17:49:01 +00:00
|
|
|
streamLogFile :: FilePath -> RawFilePath -> Annex () -> (String -> Annex ()) -> Annex ()
|
|
|
|
streamLogFile f lck finalizer processor =
|
|
|
|
withExclusiveLock lck $ bracketOnError setup cleanup go
|
2018-10-25 18:43:13 +00:00
|
|
|
where
|
|
|
|
setup = liftIO $ tryWhenExists $ openFile f ReadMode
|
|
|
|
cleanup Nothing = noop
|
|
|
|
cleanup (Just h) = liftIO $ hClose h
|
2022-09-23 17:49:01 +00:00
|
|
|
go Nothing = finalizer
|
2018-10-25 18:43:13 +00:00
|
|
|
go (Just h) = do
|
2022-09-23 17:49:01 +00:00
|
|
|
mapM_ processor =<< liftIO (lines <$> hGetContents h)
|
2018-10-25 18:43:13 +00:00
|
|
|
liftIO $ hClose h
|
2022-09-23 17:49:01 +00:00
|
|
|
finalizer
|
2018-10-25 18:43:13 +00:00
|
|
|
liftIO $ writeFile f ""
|
2020-11-05 22:45:37 +00:00
|
|
|
setAnnexFilePerm (toRawFilePath f)
|
2018-10-25 18:43:13 +00:00
|
|
|
|
2020-10-29 16:02:46 +00:00
|
|
|
createDirWhenNeeded :: RawFilePath -> Annex () -> Annex ()
|
2018-10-25 18:43:13 +00:00
|
|
|
createDirWhenNeeded f a = a `catchNonAsync` \_e -> do
|
|
|
|
-- Most of the time, the directory will exist, so this is only
|
|
|
|
-- done if writing the file fails.
|
|
|
|
createAnnexDirectory (parentDir f)
|
|
|
|
a
|