assistant: Fix race in v6 mode that caused downloaded file content to sometimes not replace pointer files.

The keys database handle needs to be closed after merging, because the
smudge filter, in another process, updates the database. Old cached info
can be read for a while from the open database handle; closing it ensures
that the info written by the smudge filter is available.

This is pretty horribly ad-hoc, and it's especially nasty that the
transferrer closes the database every time.
This commit is contained in:
Joey Hess 2016-05-16 14:49:12 -04:00
parent fb8ab2469d
commit 5f0b551c0c
Failed to extract signature
5 changed files with 38 additions and 4 deletions

View file

@ -19,6 +19,7 @@ import qualified Annex.Branch
import qualified Git
import qualified Git.Branch
import qualified Command.Sync
import qualified Database.Keys
import Annex.TaggedPush
import Remote (remoteFromUUID)
@ -89,6 +90,12 @@ onChange file
currbranch mergeConfig
Git.Branch.AutomaticCommit
changedbranch
-- Merging can cause new associated files
-- to appear and the smudge filter will
-- add them to the database. To ensure that
-- this process sees those changes, close
-- the database if it was open.
liftAnnex $ Database.Keys.closeDb
mergecurrent _ = noop
handleDesynced = case fromTaggedBranch changedbranch of

View file

@ -16,6 +16,7 @@ import Annex.Transfer
import qualified Remote
import Utility.SimpleProtocol (dupIoHandles)
import Git.Types (RemoteName)
import qualified Database.Keys
data TransferRequest = TransferRequest Direction Remote Key AssociatedFile
@ -41,8 +42,13 @@ start = do
return ok
| otherwise = notifyTransfer direction file $
download (Remote.uuid remote) key file forwardRetry observer $ \p ->
getViaTmp (RemoteVerify remote) key $ \t ->
Remote.retrieveKeyFile remote key file t p
getViaTmp (RemoteVerify remote) key $ \t -> do
r <- Remote.retrieveKeyFile remote key file t p
-- Make sure we get the current
-- associated files data for the key,
-- not old cached data.
Database.Keys.closeDb
return r
observer False t tinfo = recordFailedTransfer t tinfo
observer True _ _ = noop

View file

@ -9,6 +9,7 @@
module Database.Keys (
DbHandle,
closeDb,
addAssociatedFile,
getAssociatedFiles,
getAssociatedKey,
@ -137,6 +138,16 @@ openDb createdb _ = catchPermissionDenied permerr $ withExclusiveLock gitAnnexKe
False -> return DbUnavailable
True -> throwM e
{- Closes the database if it was open. Any writes will be flushed to it.
-
- This does not normally need to be called; the database will auto-close
- when the handle is garbage collected. However, this can be used to
- force a re-read of the database, in case another process has written
- data to it.
-}
closeDb :: Annex ()
closeDb = liftIO . closeDbHandle =<< getDbHandle
addAssociatedFile :: Key -> TopFilePath -> Annex ()
addAssociatedFile k f = runWriterIO $ SQL.addAssociatedFile (toIKey k) f

View file

@ -11,6 +11,7 @@ module Database.Keys.Handle (
DbState(..),
withDbState,
flushDbQueue,
closeDbHandle,
) where
import qualified Database.Queue as H
@ -38,8 +39,7 @@ newDbHandle = DbHandle <$> newMVar DbClosed
withDbState
:: (MonadIO m, MonadCatch m)
=> DbHandle
-> (DbState
-> m (v, DbState))
-> (DbState -> m (v, DbState))
-> m v
withDbState (DbHandle mvar) a = do
st <- liftIO $ takeMVar mvar
@ -55,3 +55,11 @@ flushDbQueue (DbHandle mvar) = go =<< readMVar mvar
where
go (DbOpen qh) = H.flushDbQueue qh
go _ = return ()
closeDbHandle :: DbHandle -> IO ()
closeDbHandle h = withDbState h go
where
go (DbOpen qh) = do
H.closeDbQueue qh
return ((), DbClosed)
go st = return ((), st)

2
debian/changelog vendored
View file

@ -8,6 +8,8 @@ git-annex (6.20160512) UNRELEASED; urgency=medium
original branch.
* assistant: Fix bug that caused v6 pointer files to be annexed by the
assistant.
* assistant: Fix race in v6 mode that caused downloaded file content to
sometimes not replace pointer files.
-- Joey Hess <id@joeyh.name> Wed, 11 May 2016 16:08:38 -0400