split out raw sql interface

This commit is contained in:
Joey Hess 2016-01-11 15:52:11 -04:00
parent 2a27c77170
commit 8111eb21e6
Failed to extract signature
2 changed files with 123 additions and 97 deletions

View file

@ -21,10 +21,9 @@ module Database.Keys (
addInodeCaches, addInodeCaches,
getInodeCaches, getInodeCaches,
removeInodeCaches, removeInodeCaches,
AssociatedId,
ContentId,
) where ) where
import qualified Database.Keys.SQL as SQL
import Database.Types import Database.Types
import Database.Keys.Handle import Database.Keys.Handle
import qualified Database.Queue as H import qualified Database.Queue as H
@ -42,24 +41,7 @@ import Git.Ref
import Git.FilePath import Git.FilePath
import Annex.CatFile import Annex.CatFile
import Database.Persist.TH
import Database.Esqueleto hiding (Key) import Database.Esqueleto hiding (Key)
import Data.Time.Clock
share [mkPersist sqlSettings, mkMigrate "migrateKeysDb"] [persistLowerCase|
Associated
key SKey
file FilePath
KeyFileIndex key file
Content
key SKey
cache SInodeCache
KeyCacheIndex key cache
|]
newtype ReadHandle = ReadHandle H.DbQueue
type Reader v = ReadHandle -> Annex v
{- Runs an action that reads from the database. {- Runs an action that reads from the database.
- -
@ -72,7 +54,7 @@ type Reader v = ReadHandle -> Annex v
- -
- Any queued writes will be flushed before the read. - Any queued writes will be flushed before the read.
-} -}
runReader :: Monoid v => Reader v -> Annex v runReader :: Monoid v => (SQL.ReadHandle -> Annex v) -> Annex v
runReader a = do runReader a = do
h <- getDbHandle h <- getDbHandle
withDbState h go withDbState h go
@ -80,50 +62,39 @@ runReader a = do
go DbEmpty = return (mempty, DbEmpty) go DbEmpty = return (mempty, DbEmpty)
go st@(DbOpen qh) = do go st@(DbOpen qh) = do
liftIO $ H.flushDbQueue qh liftIO $ H.flushDbQueue qh
v <- a (ReadHandle qh) v <- a (SQL.ReadHandle qh)
return (v, st) return (v, st)
go DbClosed = do go DbClosed = do
st' <- openDb False DbClosed st' <- openDb False DbClosed
v <- case st' of v <- case st' of
(DbOpen qh) -> a (ReadHandle qh) (DbOpen qh) -> a (SQL.ReadHandle qh)
_ -> return mempty _ -> return mempty
return (v, st') return (v, st')
readDb :: SqlPersistM a -> ReadHandle -> Annex a runReaderIO :: Monoid v => (SQL.ReadHandle -> IO v) -> Annex v
readDb a (ReadHandle h) = liftIO $ H.queryDbQueue h a runReaderIO a = runReader (liftIO . a)
newtype WriteHandle = WriteHandle H.DbQueue
type Writer = WriteHandle -> Annex ()
{- Runs an action that writes to the database. Typically this is used to {- Runs an action that writes to the database. Typically this is used to
- queue changes, which will be flushed at a later point. - queue changes, which will be flushed at a later point.
- -
- The database is created if it doesn't exist yet. -} - The database is created if it doesn't exist yet. -}
runWriter :: Writer -> Annex () runWriter :: (SQL.WriteHandle -> Annex ()) -> Annex ()
runWriter a = do runWriter a = do
h <- getDbHandle h <- getDbHandle
withDbState h go withDbState h go
where where
go st@(DbOpen qh) = do go st@(DbOpen qh) = do
v <- a (WriteHandle qh) v <- a (SQL.WriteHandle qh)
return (v, st) return (v, st)
go st = do go st = do
st' <- openDb True st st' <- openDb True st
v <- case st' of v <- case st' of
DbOpen qh -> a (WriteHandle qh) DbOpen qh -> a (SQL.WriteHandle qh)
_ -> error "internal" _ -> error "internal"
return (v, st') return (v, st')
queueDb :: SqlPersistM () -> WriteHandle -> Annex () runWriterIO :: (SQL.WriteHandle -> IO ()) -> Annex ()
queueDb a (WriteHandle h) = liftIO $ H.queueDb h checkcommit a runWriterIO a = runWriter (liftIO . a)
where
-- commit queue after 1000 changes or 5 minutes, whichever comes first
checkcommit sz lastcommittime
| sz > 1000 = return True
| otherwise = do
now <- getCurrentTime
return $ diffUTCTime lastcommittime now > 300
{- Gets the handle cached in Annex state; creates a new one if it's not yet {- Gets the handle cached in Annex state; creates a new one if it's not yet
- available, but doesn't open the database. -} - available, but doesn't open the database. -}
@ -157,7 +128,7 @@ openDb createdb _ = withExclusiveLock gitAnnexKeysDbLock $ do
liftIO $ do liftIO $ do
createDirectoryIfMissing True dbdir createDirectoryIfMissing True dbdir
H.initDb db $ void $ H.initDb db $ void $
runMigrationSilent migrateKeysDb runMigrationSilent SQL.migrateKeysDb
setAnnexDirPerm dbdir setAnnexDirPerm dbdir
setAnnexFilePerm db setAnnexFilePerm db
open db open db
@ -166,49 +137,20 @@ openDb createdb _ = withExclusiveLock gitAnnexKeysDbLock $ do
open db = liftIO $ DbOpen <$> H.openDbQueue db "content" open db = liftIO $ DbOpen <$> H.openDbQueue db "content"
addAssociatedFile :: Key -> TopFilePath -> Annex () addAssociatedFile :: Key -> TopFilePath -> Annex ()
addAssociatedFile k f = runWriter $ addAssociatedFile' k f addAssociatedFile k f = runWriterIO $ SQL.addAssociatedFile (toSKey k) f
addAssociatedFile' :: Key -> TopFilePath -> Writer
addAssociatedFile' k f = queueDb $ do
-- If the same file was associated with a different key before,
-- remove that.
delete $ from $ \r -> do
where_ (r ^. AssociatedFile ==. val (getTopFilePath f) &&. not_ (r ^. AssociatedKey ==. val sk))
void $ insertUnique $ Associated sk (getTopFilePath f)
where
sk = toSKey k
{- Note that the files returned were once associated with the key, but {- Note that the files returned were once associated with the key, but
- some of them may not be any longer. -} - some of them may not be any longer. -}
getAssociatedFiles :: Key -> Annex [TopFilePath] getAssociatedFiles :: Key -> Annex [TopFilePath]
getAssociatedFiles = runReader . getAssociatedFiles' . toSKey getAssociatedFiles = runReaderIO . SQL.getAssociatedFiles . toSKey
getAssociatedFiles' :: SKey -> Reader [TopFilePath]
getAssociatedFiles' sk = readDb $ do
l <- select $ from $ \r -> do
where_ (r ^. AssociatedKey ==. val sk)
return (r ^. AssociatedFile)
return $ map (asTopFilePath . unValue) l
{- Gets any keys that are on record as having a particular associated file. {- Gets any keys that are on record as having a particular associated file.
- (Should be one or none but the database doesn't enforce that.) -} - (Should be one or none but the database doesn't enforce that.) -}
getAssociatedKey :: TopFilePath -> Annex [Key] getAssociatedKey :: TopFilePath -> Annex [Key]
getAssociatedKey = runReader . getAssociatedKey' getAssociatedKey = map fromSKey <$$> runReaderIO . SQL.getAssociatedKey
getAssociatedKey' :: TopFilePath -> Reader [Key]
getAssociatedKey' f = readDb $ do
l <- select $ from $ \r -> do
where_ (r ^. AssociatedFile ==. val (getTopFilePath f))
return (r ^. AssociatedKey)
return $ map (fromSKey . unValue) l
removeAssociatedFile :: Key -> TopFilePath -> Annex () removeAssociatedFile :: Key -> TopFilePath -> Annex ()
removeAssociatedFile k = runWriter . removeAssociatedFile' (toSKey k) removeAssociatedFile k = runWriterIO . SQL.removeAssociatedFile (toSKey k)
removeAssociatedFile' :: SKey -> TopFilePath -> Writer
removeAssociatedFile' sk f = queueDb $
delete $ from $ \r -> do
where_ (r ^. AssociatedKey ==. val sk &&. r ^. AssociatedFile ==. val (getTopFilePath f))
{- Find all unlocked associated files. This is expensive, and so normally {- Find all unlocked associated files. This is expensive, and so normally
- the associated files are updated incrementally when changes are noticed. -} - the associated files are updated incrementally when changes are noticed. -}
@ -224,12 +166,12 @@ scanAssociatedFiles = whenM (isJust <$> inRepo Git.Branch.current) $
=<< catKey (Git.LsTree.sha i) =<< catKey (Git.LsTree.sha i)
liftIO $ void cleanup liftIO $ void cleanup
where where
dropallassociated = queueDb $ dropallassociated h = liftIO $ flip SQL.queueDb h $
delete $ from $ \(_r :: SqlExpr (Entity Associated)) -> delete $ from $ \(_r :: SqlExpr (Entity SQL.Associated)) ->
return () return ()
isregfile i = Git.Types.toBlobType (Git.LsTree.mode i) == Just Git.Types.FileBlob isregfile i = Git.Types.toBlobType (Git.LsTree.mode i) == Just Git.Types.FileBlob
add h i k = flip queueDb h $ add h i k = liftIO $ flip SQL.queueDb h $
void $ insertUnique $ Associated void $ insertUnique $ SQL.Associated
(toSKey k) (toSKey k)
(getTopFilePath $ Git.LsTree.file i) (getTopFilePath $ Git.LsTree.file i)
@ -239,28 +181,12 @@ storeInodeCaches k fs = withTSDelta $ \d ->
addInodeCaches k . catMaybes =<< liftIO (mapM (`genInodeCache` d) fs) addInodeCaches k . catMaybes =<< liftIO (mapM (`genInodeCache` d) fs)
addInodeCaches :: Key -> [InodeCache] -> Annex () addInodeCaches :: Key -> [InodeCache] -> Annex ()
addInodeCaches k is = runWriter $ addInodeCaches' (toSKey k) is addInodeCaches k is = runWriterIO $ SQL.addInodeCaches (toSKey k) is
addInodeCaches' :: SKey -> [InodeCache] -> Writer
addInodeCaches' sk is = queueDb $
forM_ is $ \i -> insertUnique $ Content sk (toSInodeCache i)
{- A key may have multiple InodeCaches; one for the annex object, and one {- A key may have multiple InodeCaches; one for the annex object, and one
- for each pointer file that is a copy of it. -} - for each pointer file that is a copy of it. -}
getInodeCaches :: Key -> Annex [InodeCache] getInodeCaches :: Key -> Annex [InodeCache]
getInodeCaches = runReader . getInodeCaches' . toSKey getInodeCaches = runReaderIO . SQL.getInodeCaches . toSKey
getInodeCaches' :: SKey -> Reader [InodeCache]
getInodeCaches' sk = readDb $ do
l <- select $ from $ \r -> do
where_ (r ^. ContentKey ==. val sk)
return (r ^. ContentCache)
return $ map (fromSInodeCache. unValue) l
removeInodeCaches :: Key -> Annex () removeInodeCaches :: Key -> Annex ()
removeInodeCaches = runWriter . removeInodeCaches' . toSKey removeInodeCaches = runWriterIO . SQL.removeInodeCaches . toSKey
removeInodeCaches' :: SKey -> Writer
removeInodeCaches' sk = queueDb $
delete $ from $ \r -> do
where_ (r ^. ContentKey ==. val sk)

100
Database/Keys/SQL.hs Normal file
View file

@ -0,0 +1,100 @@
{- Sqlite database of information about Keys
-
- Copyright 2015-2016 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
{-# LANGUAGE QuasiQuotes, TypeFamilies, TemplateHaskell #-}
{-# LANGUAGE OverloadedStrings, GADTs, FlexibleContexts #-}
{-# LANGUAGE MultiParamTypeClasses, GeneralizedNewtypeDeriving #-}
{-# LANGUAGE RankNTypes, ScopedTypeVariables #-}
module Database.Keys.SQL where
import Database.Types
import qualified Database.Queue as H
import Utility.InodeCache
import Git.FilePath
import Database.Persist.TH
import Database.Esqueleto hiding (Key)
import Data.Time.Clock
import Control.Monad
share [mkPersist sqlSettings, mkMigrate "migrateKeysDb"] [persistLowerCase|
Associated
key SKey
file FilePath
KeyFileIndex key file
Content
key SKey
cache SInodeCache
KeyCacheIndex key cache
|]
newtype ReadHandle = ReadHandle H.DbQueue
readDb :: SqlPersistM a -> ReadHandle -> IO a
readDb a (ReadHandle h) = H.queryDbQueue h a
newtype WriteHandle = WriteHandle H.DbQueue
queueDb :: SqlPersistM () -> WriteHandle -> IO ()
queueDb a (WriteHandle h) = H.queueDb h checkcommit a
where
-- commit queue after 1000 changes or 5 minutes, whichever comes first
checkcommit sz lastcommittime
| sz > 1000 = return True
| otherwise = do
now <- getCurrentTime
return $ diffUTCTime lastcommittime now > 300
addAssociatedFile :: SKey -> TopFilePath -> WriteHandle -> IO ()
addAssociatedFile sk f = queueDb $ do
-- If the same file was associated with a different key before,
-- remove that.
delete $ from $ \r -> do
where_ (r ^. AssociatedFile ==. val (getTopFilePath f) &&. not_ (r ^. AssociatedKey ==. val sk))
void $ insertUnique $ Associated sk (getTopFilePath f)
{- Note that the files returned were once associated with the key, but
- some of them may not be any longer. -}
getAssociatedFiles :: SKey -> ReadHandle -> IO [TopFilePath]
getAssociatedFiles sk = readDb $ do
l <- select $ from $ \r -> do
where_ (r ^. AssociatedKey ==. val sk)
return (r ^. AssociatedFile)
return $ map (asTopFilePath . unValue) l
{- Gets any keys that are on record as having a particular associated file.
- (Should be one or none but the database doesn't enforce that.) -}
getAssociatedKey :: TopFilePath -> ReadHandle -> IO [SKey]
getAssociatedKey f = readDb $ do
l <- select $ from $ \r -> do
where_ (r ^. AssociatedFile ==. val (getTopFilePath f))
return (r ^. AssociatedKey)
return $ map unValue l
removeAssociatedFile :: SKey -> TopFilePath -> WriteHandle -> IO ()
removeAssociatedFile sk f = queueDb $
delete $ from $ \r -> do
where_ (r ^. AssociatedKey ==. val sk &&. r ^. AssociatedFile ==. val (getTopFilePath f))
addInodeCaches :: SKey -> [InodeCache] -> WriteHandle -> IO ()
addInodeCaches sk is = queueDb $
forM_ is $ \i -> insertUnique $ Content sk (toSInodeCache i)
{- A key may have multiple InodeCaches; one for the annex object, and one
- for each pointer file that is a copy of it. -}
getInodeCaches :: SKey -> ReadHandle -> IO [InodeCache]
getInodeCaches sk = readDb $ do
l <- select $ from $ \r -> do
where_ (r ^. ContentKey ==. val sk)
return (r ^. ContentCache)
return $ map (fromSInodeCache. unValue) l
removeInodeCaches :: SKey -> WriteHandle -> IO ()
removeInodeCaches sk = queueDb $
delete $ from $ \r -> do
where_ (r ^. ContentKey ==. val sk)