2015-12-09 21:00:37 +00:00
|
|
|
|
{- Sqlite database of information about Keys
|
2015-12-07 17:42:03 +00:00
|
|
|
|
-
|
2016-01-01 19:09:42 +00:00
|
|
|
|
- Copyright 2015-2016 Joey Hess <id@joeyh.name>
|
|
|
|
|
-
|
2015-12-07 17:42:03 +00:00
|
|
|
|
- Licensed under the GNU GPL version 3 or higher.
|
|
|
|
|
-}
|
|
|
|
|
|
|
|
|
|
{-# LANGUAGE QuasiQuotes, TypeFamilies, TemplateHaskell #-}
|
|
|
|
|
{-# LANGUAGE OverloadedStrings, GADTs, FlexibleContexts #-}
|
|
|
|
|
{-# LANGUAGE MultiParamTypeClasses, GeneralizedNewtypeDeriving #-}
|
2016-01-01 19:09:42 +00:00
|
|
|
|
{-# LANGUAGE RankNTypes, ScopedTypeVariables #-}
|
2015-12-07 17:42:03 +00:00
|
|
|
|
|
2015-12-09 21:00:37 +00:00
|
|
|
|
module Database.Keys (
|
2015-12-07 17:42:03 +00:00
|
|
|
|
DbHandle,
|
2015-12-09 21:00:37 +00:00
|
|
|
|
addAssociatedFile,
|
|
|
|
|
getAssociatedFiles,
|
2015-12-15 17:05:23 +00:00
|
|
|
|
getAssociatedKey,
|
2015-12-09 21:00:37 +00:00
|
|
|
|
removeAssociatedFile,
|
2016-01-01 19:09:42 +00:00
|
|
|
|
scanAssociatedFiles,
|
2015-12-09 21:47:05 +00:00
|
|
|
|
storeInodeCaches,
|
|
|
|
|
addInodeCaches,
|
|
|
|
|
getInodeCaches,
|
|
|
|
|
removeInodeCaches,
|
2015-12-07 17:42:03 +00:00
|
|
|
|
AssociatedId,
|
2015-12-09 21:47:05 +00:00
|
|
|
|
ContentId,
|
2015-12-07 17:42:03 +00:00
|
|
|
|
) where
|
|
|
|
|
|
|
|
|
|
import Database.Types
|
2015-12-23 22:34:51 +00:00
|
|
|
|
import Database.Keys.Handle
|
2015-12-23 18:59:58 +00:00
|
|
|
|
import qualified Database.Queue as H
|
2015-12-07 17:42:03 +00:00
|
|
|
|
import Locations
|
2016-01-01 19:50:59 +00:00
|
|
|
|
import Common.Annex hiding (delete)
|
|
|
|
|
import qualified Annex
|
2015-12-07 17:42:03 +00:00
|
|
|
|
import Annex.Perms
|
|
|
|
|
import Annex.LockFile
|
2015-12-09 21:00:37 +00:00
|
|
|
|
import Utility.InodeCache
|
2015-12-09 21:47:05 +00:00
|
|
|
|
import Annex.InodeSentinal
|
2016-01-01 19:09:42 +00:00
|
|
|
|
import qualified Git.Types
|
|
|
|
|
import qualified Git.LsTree
|
2016-01-01 19:16:16 +00:00
|
|
|
|
import qualified Git.Branch
|
2016-01-01 19:09:42 +00:00
|
|
|
|
import Git.Ref
|
|
|
|
|
import Git.FilePath
|
|
|
|
|
import Annex.CatFile
|
2015-12-07 17:42:03 +00:00
|
|
|
|
|
|
|
|
|
import Database.Persist.TH
|
|
|
|
|
import Database.Esqueleto hiding (Key)
|
2015-12-23 22:34:51 +00:00
|
|
|
|
import Data.Time.Clock
|
2015-12-07 17:42:03 +00:00
|
|
|
|
|
2015-12-09 21:00:37 +00:00
|
|
|
|
share [mkPersist sqlSettings, mkMigrate "migrateKeysDb"] [persistLowerCase|
|
2015-12-07 17:42:03 +00:00
|
|
|
|
Associated
|
|
|
|
|
key SKey
|
|
|
|
|
file FilePath
|
|
|
|
|
KeyFileIndex key file
|
2015-12-09 21:47:05 +00:00
|
|
|
|
Content
|
2015-12-09 21:00:37 +00:00
|
|
|
|
key SKey
|
2015-12-09 21:47:05 +00:00
|
|
|
|
cache SInodeCache
|
|
|
|
|
KeyCacheIndex key cache
|
2015-12-07 17:42:03 +00:00
|
|
|
|
|]
|
|
|
|
|
|
2015-12-23 22:34:51 +00:00
|
|
|
|
newtype ReadHandle = ReadHandle H.DbQueue
|
|
|
|
|
|
|
|
|
|
type Reader v = ReadHandle -> Annex v
|
|
|
|
|
|
|
|
|
|
{- Runs an action that reads from the database.
|
|
|
|
|
-
|
|
|
|
|
- If the database doesn't already exist, it's not created; mempty is
|
|
|
|
|
- returned instead. This way, when the keys database is not in use,
|
|
|
|
|
- there's minimal overhead in checking it.
|
|
|
|
|
-
|
|
|
|
|
- If the database is already open, any writes are flushed to it, to ensure
|
|
|
|
|
- consistency.
|
|
|
|
|
-
|
|
|
|
|
- Any queued writes will be flushed before the read.
|
|
|
|
|
-}
|
|
|
|
|
runReader :: Monoid v => Reader v -> Annex v
|
|
|
|
|
runReader a = do
|
|
|
|
|
h <- getDbHandle
|
|
|
|
|
withDbState h go
|
|
|
|
|
where
|
|
|
|
|
go DbEmpty = return (mempty, DbEmpty)
|
|
|
|
|
go st@(DbOpen qh) = do
|
|
|
|
|
liftIO $ H.flushDbQueue qh
|
|
|
|
|
v <- a (ReadHandle qh)
|
|
|
|
|
return (v, st)
|
|
|
|
|
go DbClosed = do
|
|
|
|
|
st' <- openDb False DbClosed
|
|
|
|
|
v <- case st' of
|
|
|
|
|
(DbOpen qh) -> a (ReadHandle qh)
|
|
|
|
|
_ -> return mempty
|
|
|
|
|
return (v, st')
|
|
|
|
|
|
|
|
|
|
readDb :: SqlPersistM a -> ReadHandle -> Annex a
|
|
|
|
|
readDb a (ReadHandle h) = liftIO $ H.queryDbQueue h a
|
|
|
|
|
|
|
|
|
|
newtype WriteHandle = WriteHandle H.DbQueue
|
|
|
|
|
|
|
|
|
|
type Writer = WriteHandle -> Annex ()
|
|
|
|
|
|
|
|
|
|
{- Runs an action that writes to the database. Typically this is used to
|
|
|
|
|
- queue changes, which will be flushed at a later point.
|
|
|
|
|
-
|
|
|
|
|
- The database is created if it doesn't exist yet. -}
|
|
|
|
|
runWriter :: Writer -> Annex ()
|
|
|
|
|
runWriter a = do
|
|
|
|
|
h <- getDbHandle
|
|
|
|
|
withDbState h go
|
|
|
|
|
where
|
|
|
|
|
go st@(DbOpen qh) = do
|
|
|
|
|
v <- a (WriteHandle qh)
|
|
|
|
|
return (v, st)
|
|
|
|
|
go st = do
|
|
|
|
|
st' <- openDb True st
|
|
|
|
|
v <- case st' of
|
|
|
|
|
DbOpen qh -> a (WriteHandle qh)
|
|
|
|
|
_ -> error "internal"
|
2015-12-24 17:06:03 +00:00
|
|
|
|
return (v, st')
|
2015-12-23 22:34:51 +00:00
|
|
|
|
|
|
|
|
|
queueDb :: SqlPersistM () -> WriteHandle -> Annex ()
|
|
|
|
|
queueDb a (WriteHandle h) = liftIO $ H.queueDb h checkcommit a
|
|
|
|
|
where
|
|
|
|
|
-- commit queue after 1000 changes or 5 minutes, whichever comes first
|
|
|
|
|
checkcommit sz lastcommittime
|
|
|
|
|
| sz > 1000 = return True
|
|
|
|
|
| otherwise = do
|
|
|
|
|
now <- getCurrentTime
|
|
|
|
|
return $ diffUTCTime lastcommittime now > 300
|
|
|
|
|
|
|
|
|
|
{- Gets the handle cached in Annex state; creates a new one if it's not yet
|
|
|
|
|
- available, but doesn't open the database. -}
|
|
|
|
|
getDbHandle :: Annex DbHandle
|
2016-01-01 19:50:59 +00:00
|
|
|
|
getDbHandle = go =<< Annex.getState Annex.keysdbhandle
|
2015-12-23 22:34:51 +00:00
|
|
|
|
where
|
|
|
|
|
go (Just h) = pure h
|
|
|
|
|
go Nothing = do
|
|
|
|
|
h <- liftIO newDbHandle
|
2016-01-01 19:50:59 +00:00
|
|
|
|
Annex.changeState $ \s -> s { Annex.keysdbhandle = Just h }
|
2015-12-23 22:34:51 +00:00
|
|
|
|
return h
|
|
|
|
|
|
|
|
|
|
{- Opens the database, perhaps creating it if it doesn't exist yet.
|
2015-12-16 17:24:45 +00:00
|
|
|
|
-
|
|
|
|
|
- Multiple readers and writers can have the database open at the same
|
|
|
|
|
- time. Database.Handle deals with the concurrency issues.
|
|
|
|
|
- The lock is held while opening the database, so that when
|
|
|
|
|
- the database doesn't exist yet, one caller wins the lock and
|
|
|
|
|
- can create it undisturbed.
|
|
|
|
|
-}
|
2015-12-23 22:34:51 +00:00
|
|
|
|
openDb :: Bool -> DbState -> Annex DbState
|
|
|
|
|
openDb _ st@(DbOpen _) = return st
|
|
|
|
|
openDb False DbEmpty = return DbEmpty
|
|
|
|
|
openDb createdb _ = withExclusiveLock gitAnnexKeysDbLock $ do
|
2015-12-09 21:00:37 +00:00
|
|
|
|
dbdir <- fromRepo gitAnnexKeysDb
|
2015-12-07 17:42:03 +00:00
|
|
|
|
let db = dbdir </> "db"
|
2015-12-23 22:34:51 +00:00
|
|
|
|
dbexists <- liftIO $ doesFileExist db
|
|
|
|
|
case (dbexists, createdb) of
|
|
|
|
|
(True, _) -> open db
|
|
|
|
|
(False, True) -> do
|
|
|
|
|
liftIO $ do
|
|
|
|
|
createDirectoryIfMissing True dbdir
|
|
|
|
|
H.initDb db $ void $
|
|
|
|
|
runMigrationSilent migrateKeysDb
|
|
|
|
|
setAnnexDirPerm dbdir
|
|
|
|
|
setAnnexFilePerm db
|
|
|
|
|
open db
|
|
|
|
|
(False, False) -> return DbEmpty
|
|
|
|
|
where
|
|
|
|
|
open db = liftIO $ DbOpen <$> H.openDbQueue db "content"
|
2015-12-09 18:55:47 +00:00
|
|
|
|
|
2015-12-09 21:00:37 +00:00
|
|
|
|
addAssociatedFile :: Key -> FilePath -> Annex ()
|
2015-12-23 22:34:51 +00:00
|
|
|
|
addAssociatedFile k f = runWriter $ addAssociatedFile' k f
|
|
|
|
|
|
|
|
|
|
addAssociatedFile' :: Key -> FilePath -> Writer
|
|
|
|
|
addAssociatedFile' k f = queueDb $ do
|
2015-12-07 17:42:03 +00:00
|
|
|
|
-- If the same file was associated with a different key before,
|
|
|
|
|
-- remove that.
|
|
|
|
|
delete $ from $ \r -> do
|
|
|
|
|
where_ (r ^. AssociatedFile ==. val f &&. r ^. AssociatedKey ==. val sk)
|
|
|
|
|
void $ insertUnique $ Associated sk f
|
|
|
|
|
where
|
|
|
|
|
sk = toSKey k
|
|
|
|
|
|
2015-12-09 21:00:37 +00:00
|
|
|
|
{- Note that the files returned were once associated with the key, but
|
2015-12-07 17:42:03 +00:00
|
|
|
|
- some of them may not be any longer. -}
|
2015-12-09 21:00:37 +00:00
|
|
|
|
getAssociatedFiles :: Key -> Annex [FilePath]
|
2015-12-23 22:34:51 +00:00
|
|
|
|
getAssociatedFiles = runReader . getAssociatedFiles' . toSKey
|
2015-12-07 17:42:03 +00:00
|
|
|
|
|
2015-12-23 22:34:51 +00:00
|
|
|
|
getAssociatedFiles' :: SKey -> Reader [FilePath]
|
|
|
|
|
getAssociatedFiles' sk = readDb $ do
|
2015-12-07 17:42:03 +00:00
|
|
|
|
l <- select $ from $ \r -> do
|
|
|
|
|
where_ (r ^. AssociatedKey ==. val sk)
|
|
|
|
|
return (r ^. AssociatedFile)
|
|
|
|
|
return $ map unValue l
|
|
|
|
|
|
2015-12-15 17:05:23 +00:00
|
|
|
|
{- Gets any keys that are on record as having a particular associated file.
|
|
|
|
|
- (Should be one or none but the database doesn't enforce that.) -}
|
|
|
|
|
getAssociatedKey :: FilePath -> Annex [Key]
|
2015-12-23 22:34:51 +00:00
|
|
|
|
getAssociatedKey = runReader . getAssociatedKey'
|
2015-12-15 17:05:23 +00:00
|
|
|
|
|
2015-12-23 22:34:51 +00:00
|
|
|
|
getAssociatedKey' :: FilePath -> Reader [Key]
|
|
|
|
|
getAssociatedKey' f = readDb $ do
|
2015-12-15 17:05:23 +00:00
|
|
|
|
l <- select $ from $ \r -> do
|
|
|
|
|
where_ (r ^. AssociatedFile ==. val f)
|
|
|
|
|
return (r ^. AssociatedKey)
|
|
|
|
|
return $ map (fromSKey . unValue) l
|
|
|
|
|
|
2015-12-09 21:00:37 +00:00
|
|
|
|
removeAssociatedFile :: Key -> FilePath -> Annex ()
|
2015-12-23 22:34:51 +00:00
|
|
|
|
removeAssociatedFile k = runWriter . removeAssociatedFile' (toSKey k)
|
|
|
|
|
|
|
|
|
|
removeAssociatedFile' :: SKey -> FilePath -> Writer
|
|
|
|
|
removeAssociatedFile' sk f = queueDb $
|
2015-12-07 17:42:03 +00:00
|
|
|
|
delete $ from $ \r -> do
|
|
|
|
|
where_ (r ^. AssociatedKey ==. val sk &&. r ^. AssociatedFile ==. val f)
|
2016-01-01 19:09:42 +00:00
|
|
|
|
|
|
|
|
|
{- Find all unlocked associated files. This is expensive, and so normally
|
|
|
|
|
- the associated files are updated incrementally when changes are noticed. -}
|
|
|
|
|
scanAssociatedFiles :: Annex ()
|
2016-01-01 19:16:16 +00:00
|
|
|
|
scanAssociatedFiles = whenM (isJust <$> inRepo Git.Branch.current) $
|
|
|
|
|
runWriter $ \h -> do
|
|
|
|
|
showSideAction "scanning for unlocked files"
|
|
|
|
|
dropallassociated h
|
2016-01-01 19:50:59 +00:00
|
|
|
|
(l, cleanup) <- inRepo $ Git.LsTree.lsTree headRef
|
2016-01-01 19:16:16 +00:00
|
|
|
|
forM_ l $ \i ->
|
|
|
|
|
when (isregfile i) $
|
|
|
|
|
maybe noop (add h i)
|
|
|
|
|
=<< catKey (Git.Types.Ref $ Git.LsTree.sha i)
|
2016-01-01 19:50:59 +00:00
|
|
|
|
liftIO $ void cleanup
|
2016-01-01 19:09:42 +00:00
|
|
|
|
where
|
|
|
|
|
dropallassociated = queueDb $
|
|
|
|
|
delete $ from $ \(_r :: SqlExpr (Entity Associated)) ->
|
|
|
|
|
return ()
|
|
|
|
|
isregfile i = Git.Types.toBlobType (Git.LsTree.mode i) == Just Git.Types.FileBlob
|
|
|
|
|
add h i k = flip queueDb h $
|
|
|
|
|
void $ insertUnique $ Associated
|
|
|
|
|
(toSKey k)
|
|
|
|
|
(getTopFilePath $ Git.LsTree.file i)
|
2015-12-09 21:00:37 +00:00
|
|
|
|
|
2015-12-09 21:47:05 +00:00
|
|
|
|
{- Stats the files, and stores their InodeCaches. -}
|
|
|
|
|
storeInodeCaches :: Key -> [FilePath] -> Annex ()
|
|
|
|
|
storeInodeCaches k fs = withTSDelta $ \d ->
|
|
|
|
|
addInodeCaches k . catMaybes =<< liftIO (mapM (`genInodeCache` d) fs)
|
|
|
|
|
|
|
|
|
|
addInodeCaches :: Key -> [InodeCache] -> Annex ()
|
2015-12-23 22:34:51 +00:00
|
|
|
|
addInodeCaches k is = runWriter $ addInodeCaches' (toSKey k) is
|
|
|
|
|
|
|
|
|
|
addInodeCaches' :: SKey -> [InodeCache] -> Writer
|
|
|
|
|
addInodeCaches' sk is = queueDb $
|
|
|
|
|
forM_ is $ \i -> insertUnique $ Content sk (toSInodeCache i)
|
2015-12-09 21:00:37 +00:00
|
|
|
|
|
2015-12-09 21:47:05 +00:00
|
|
|
|
{- A key may have multiple InodeCaches; one for the annex object, and one
|
|
|
|
|
- for each pointer file that is a copy of it. -}
|
|
|
|
|
getInodeCaches :: Key -> Annex [InodeCache]
|
2015-12-23 22:34:51 +00:00
|
|
|
|
getInodeCaches = runReader . getInodeCaches' . toSKey
|
|
|
|
|
|
|
|
|
|
getInodeCaches' :: SKey -> Reader [InodeCache]
|
|
|
|
|
getInodeCaches' sk = readDb $ do
|
2015-12-09 21:00:37 +00:00
|
|
|
|
l <- select $ from $ \r -> do
|
2015-12-09 21:47:05 +00:00
|
|
|
|
where_ (r ^. ContentKey ==. val sk)
|
|
|
|
|
return (r ^. ContentCache)
|
|
|
|
|
return $ map (fromSInodeCache . unValue) l
|
|
|
|
|
|
|
|
|
|
removeInodeCaches :: Key -> Annex ()
|
2015-12-23 22:34:51 +00:00
|
|
|
|
removeInodeCaches = runWriter . removeInodeCaches' . toSKey
|
|
|
|
|
|
|
|
|
|
removeInodeCaches' :: SKey -> Writer
|
|
|
|
|
removeInodeCaches' sk = queueDb $
|
2015-12-09 21:47:05 +00:00
|
|
|
|
delete $ from $ \r -> do
|
|
|
|
|
where_ (r ^. ContentKey ==. val sk)
|