improve data types for sqlite

This is a non-backwards compatable change, so not suitable for merging
w/o a annex.version bump and transition code. Not yet tested.

This improves performance of git-annex benchmark --databases
across the board by 10-25%, since eg Key roundtrips as a ByteString.

(serializeKey' produces a lazy ByteString, so there is still a
copy involved in converting it to a strict ByteString. It may be faster
to switch to using bytestring-strict-builder.)

FilePath and Key are both stored as blobs. This avoids mojibake in some
situations. It would be possible to use varchar instead, if persistent
could avoid converting that to Text, but it seems there is no good
way to do so. See doc/todo/sqlite_database_improvements.mdwn

Eliminated some ugly artifacts of using Read/Show serialization;
constructors and quoted strings are no longer stored in sqlite.

Renamed SRef to SSha to reflect that it is only ever a git sha,
not a ref name. Since it is limited to the characters in a sha,
it is not affected by mojibake, so still uses String.
This commit is contained in:
Joey Hess 2019-10-29 12:28:01 -04:00
parent e1b21a0491
commit c35a9047d3
No known key found for this signature in database
GPG key ID: DB12DB0FF05F8F38
9 changed files with 135 additions and 189 deletions

View file

@ -19,7 +19,6 @@ import Git.FilePath
import qualified Git.Ref
import qualified Git.LsTree
import qualified Git.Types
import Database.Types
import qualified Database.Keys
import qualified Database.Keys.SQL
@ -91,7 +90,7 @@ scanUnlockedFiles replacefiles = whenM (inRepo Git.Ref.headExists) $ do
add i k = do
let tf = Git.LsTree.file i
Database.Keys.runWriter $
liftIO . Database.Keys.SQL.addAssociatedFileFast (toIKey k) tf
liftIO . Database.Keys.SQL.addAssociatedFileFast k tf
whenM (pure replacefiles <&&> inAnnex k) $ do
f <- fromRepo $ fromTopFilePath tf
destmode <- liftIO $ catchMaybeIO $ fileMode <$> getFileStatus f

View file

@ -16,7 +16,6 @@ import Types.Benchmark
import qualified Database.Keys.SQL as SQL
import qualified Database.Queue as H
import Database.Init
import Database.Types
import Utility.Tmp.Dir
import Git.FilePath
import Types.Key
@ -55,11 +54,11 @@ benchmarkDbs _ = error "not built with criterion, cannot benchmark"
getAssociatedFilesHitBench :: BenchDb -> Benchmark
getAssociatedFilesHitBench (BenchDb h num) = bench ("getAssociatedFiles from " ++ show num ++ " (hit)") $ nfIO $ do
n <- getStdRandom (randomR (1,num))
SQL.getAssociatedFiles (toIKey (keyN n)) (SQL.ReadHandle h)
SQL.getAssociatedFiles (keyN n) (SQL.ReadHandle h)
getAssociatedFilesMissBench :: BenchDb -> Benchmark
getAssociatedFilesMissBench (BenchDb h num) = bench ("getAssociatedFiles from " ++ show num ++ " (miss)") $ nfIO $
SQL.getAssociatedFiles (toIKey keyMiss) (SQL.ReadHandle h)
SQL.getAssociatedFiles keyMiss (SQL.ReadHandle h)
getAssociatedKeyHitBench :: BenchDb -> Benchmark
getAssociatedKeyHitBench (BenchDb h num) = bench ("getAssociatedKey from " ++ show num ++ " (hit)") $ nfIO $ do
@ -73,19 +72,19 @@ getAssociatedKeyMissBench (BenchDb h num) = bench ("getAssociatedKey from " ++ s
addAssociatedFileOldBench :: BenchDb -> Benchmark
addAssociatedFileOldBench (BenchDb h num) = bench ("addAssociatedFile to " ++ show num ++ " (old)") $ nfIO $ do
n <- getStdRandom (randomR (1,num))
SQL.addAssociatedFile (toIKey (keyN n)) (fileN n) (SQL.WriteHandle h)
SQL.addAssociatedFile (keyN n) (fileN n) (SQL.WriteHandle h)
H.flushDbQueue h
addAssociatedFileNewBench :: BenchDb -> Benchmark
addAssociatedFileNewBench (BenchDb h num) = bench ("addAssociatedFile to " ++ show num ++ " (new)") $ nfIO $ do
n <- getStdRandom (randomR (1,num))
SQL.addAssociatedFile (toIKey (keyN n)) (fileN (n+1)) (SQL.WriteHandle h)
SQL.addAssociatedFile (keyN n) (fileN (n+1)) (SQL.WriteHandle h)
H.flushDbQueue h
populateAssociatedFiles :: H.DbQueue -> Int -> IO ()
populateAssociatedFiles h num = do
forM_ [1..num] $ \n ->
SQL.addAssociatedFile (toIKey (keyN n)) (fileN n) (SQL.WriteHandle h)
SQL.addAssociatedFile (keyN n) (fileN n) (SQL.WriteHandle h)
H.flushDbQueue h
keyN :: Int -> Key

View file

@ -52,11 +52,11 @@ share [mkPersist sqlSettings, mkMigrate "migrateContentIdentifier"] [persistLowe
ContentIdentifiers
remote UUID
cid ContentIdentifier
key IKey
key Key
-- The last git-annex branch tree sha that was used to update
-- ContentIdentifiers
AnnexBranch
tree SRef
tree SSha
UniqueTree tree
|]
@ -92,13 +92,13 @@ flushDbQueue (ContentIdentifierHandle h) = H.flushDbQueue h
-- Be sure to also update the git-annex branch when using this.
recordContentIdentifier :: ContentIdentifierHandle -> RemoteStateHandle -> ContentIdentifier -> Key -> IO ()
recordContentIdentifier h (RemoteStateHandle u) cid k = queueDb h $ do
void $ insert_ $ ContentIdentifiers u cid (toIKey k)
void $ insert_ $ ContentIdentifiers u cid k
getContentIdentifiers :: ContentIdentifierHandle -> RemoteStateHandle -> Key -> IO [ContentIdentifier]
getContentIdentifiers (ContentIdentifierHandle h) (RemoteStateHandle u) k =
H.queryDbQueue h $ do
l <- selectList
[ ContentIdentifiersKey ==. toIKey k
[ ContentIdentifiersKey ==. k
, ContentIdentifiersRemote ==. u
] []
return $ map (contentIdentifiersCid . entityVal) l
@ -110,18 +110,18 @@ getContentIdentifierKeys (ContentIdentifierHandle h) (RemoteStateHandle u) cid =
[ ContentIdentifiersCid ==. cid
, ContentIdentifiersRemote ==. u
] []
return $ map (fromIKey . contentIdentifiersKey . entityVal) l
return $ map (contentIdentifiersKey . entityVal) l
recordAnnexBranchTree :: ContentIdentifierHandle -> Sha -> IO ()
recordAnnexBranchTree h s = queueDb h $ do
deleteWhere ([] :: [Filter AnnexBranch])
void $ insertUnique $ AnnexBranch $ toSRef s
void $ insertUnique $ AnnexBranch $ toSSha s
getAnnexBranchTree :: ContentIdentifierHandle -> IO Sha
getAnnexBranchTree (ContentIdentifierHandle h) = H.queryDbQueue h $ do
l <- selectList ([] :: [Filter AnnexBranch]) []
case l of
(s:[]) -> return $ fromSRef $ annexBranchTree $ entityVal s
(s:[]) -> return $ fromSSha $ annexBranchTree $ entityVal s
_ -> return emptyTree
{- Check if the git-annex branch has been updated and the database needs

View file

@ -63,7 +63,7 @@ data ExportHandle = ExportHandle H.DbQueue UUID
share [mkPersist sqlSettings, mkMigrate "migrateExport"] [persistLowerCase|
-- Files that have been exported to the remote and are present on it.
Exported
key IKey
key Key
file SFilePath
ExportedIndex key file
-- Directories that exist on the remote, and the files that are in them.
@ -74,12 +74,12 @@ ExportedDirectory
-- The content of the tree that has been exported to the remote.
-- Not all of these files are necessarily present on the remote yet.
ExportTree
key IKey
key Key
file SFilePath
ExportTreeIndex key file
-- The tree stored in ExportTree
ExportTreeCurrent
tree SRef
tree SSha
UniqueTree tree
|]
@ -115,43 +115,40 @@ flushDbQueue (ExportHandle h _) = H.flushDbQueue h
recordExportTreeCurrent :: ExportHandle -> Sha -> IO ()
recordExportTreeCurrent h s = queueDb h $ do
deleteWhere ([] :: [Filter ExportTreeCurrent])
void $ insertUnique $ ExportTreeCurrent $ toSRef s
void $ insertUnique $ ExportTreeCurrent $ toSSha s
getExportTreeCurrent :: ExportHandle -> IO (Maybe Sha)
getExportTreeCurrent (ExportHandle h _) = H.queryDbQueue h $ do
l <- selectList ([] :: [Filter ExportTreeCurrent]) []
case l of
(s:[]) -> return $ Just $ fromSRef $ exportTreeCurrentTree $ entityVal s
(s:[]) -> return $ Just $ fromSSha $
exportTreeCurrentTree $ entityVal s
_ -> return Nothing
addExportedLocation :: ExportHandle -> Key -> ExportLocation -> IO ()
addExportedLocation h k el = queueDb h $ do
void $ insertUnique $ Exported ik ef
void $ insertUnique $ Exported k ef
let edirs = map
(\ed -> ExportedDirectory (toSFilePath (fromExportDirectory ed)) ef)
(exportDirectories el)
putMany edirs
where
ik = toIKey k
ef = toSFilePath (fromExportLocation el)
removeExportedLocation :: ExportHandle -> Key -> ExportLocation -> IO ()
removeExportedLocation h k el = queueDb h $ do
deleteWhere [ExportedKey ==. ik, ExportedFile ==. ef]
deleteWhere [ExportedKey ==. k, ExportedFile ==. ef]
let subdirs = map (toSFilePath . fromExportDirectory)
(exportDirectories el)
deleteWhere [ExportedDirectoryFile ==. ef, ExportedDirectorySubdir <-. subdirs]
where
ik = toIKey k
ef = toSFilePath (fromExportLocation el)
{- Note that this does not see recently queued changes. -}
getExportedLocation :: ExportHandle -> Key -> IO [ExportLocation]
getExportedLocation (ExportHandle h _) k = H.queryDbQueue h $ do
l <- selectList [ExportedKey ==. ik] []
l <- selectList [ExportedKey ==. k] []
return $ map (mkExportLocation . fromSFilePath . exportedFile . entityVal) l
where
ik = toIKey k
{- Note that this does not see recently queued changes. -}
isExportDirectoryEmpty :: ExportHandle -> ExportDirectory -> IO Bool
@ -164,10 +161,8 @@ isExportDirectoryEmpty (ExportHandle h _) d = H.queryDbQueue h $ do
{- Get locations in the export that might contain a key. -}
getExportTree :: ExportHandle -> Key -> IO [ExportLocation]
getExportTree (ExportHandle h _) k = H.queryDbQueue h $ do
l <- selectList [ExportTreeKey ==. ik] []
l <- selectList [ExportTreeKey ==. k] []
return $ map (mkExportLocation . fromSFilePath . exportTreeFile . entityVal) l
where
ik = toIKey k
{- Get keys that might be currently exported to a location.
-
@ -178,23 +173,21 @@ getExportTree (ExportHandle h _) k = H.queryDbQueue h $ do
-}
getExportTreeKey :: ExportHandle -> ExportLocation -> IO [Key]
getExportTreeKey (ExportHandle h _) el = H.queryDbQueue h $ do
map (fromIKey . exportTreeKey . entityVal)
map (exportTreeKey . entityVal)
<$> selectList [ExportTreeFile ==. ef] []
where
ef = toSFilePath (fromExportLocation el)
addExportTree :: ExportHandle -> Key -> ExportLocation -> IO ()
addExportTree h k loc = queueDb h $
void $ insertUnique $ ExportTree ik ef
void $ insertUnique $ ExportTree k ef
where
ik = toIKey k
ef = toSFilePath (fromExportLocation loc)
removeExportTree :: ExportHandle -> Key -> ExportLocation -> IO ()
removeExportTree h k loc = queueDb h $
deleteWhere [ExportTreeKey ==. ik, ExportTreeFile ==. ef]
deleteWhere [ExportTreeKey ==. k, ExportTreeFile ==. ef]
where
ik = toIKey k
ef = toSFilePath (fromExportLocation loc)
-- An action that is passed the old and new values that were exported,

View file

@ -1,6 +1,6 @@
{- Sqlite database used for incremental fsck.
-
- Copyright 2015 Joey Hess <id@joeyh.name>
- Copyright 2015-2019 Joey Hess <id@joeyh.name>
-:
- Licensed under the GNU AGPL version 3 or higher.
-}
@ -39,7 +39,7 @@ data FsckHandle = FsckHandle H.DbQueue UUID
- of the latest incremental fsck pass. -}
share [mkPersist sqlSettings, mkMigrate "migrateFsck"] [persistLowerCase|
Fscked
key SKey
key Key
UniqueKey key
|]
@ -74,10 +74,8 @@ closeDb (FsckHandle h u) = do
addDb :: FsckHandle -> Key -> IO ()
addDb (FsckHandle h _) k = H.queueDb h checkcommit $
void $ insertUnique $ Fscked sk
void $ insertUnique $ Fscked k
where
sk = toSKey k
-- commit queue after 1000 files or 5 minutes, whichever comes first
checkcommit sz lastcommittime
| sz > 1000 = return True
@ -87,9 +85,9 @@ addDb (FsckHandle h _) k = H.queueDb h checkcommit $
{- Doesn't know about keys that were just added with addDb. -}
inDb :: FsckHandle -> Key -> IO Bool
inDb (FsckHandle h _) = H.queryDbQueue h . inDb' . toSKey
inDb (FsckHandle h _) = H.queryDbQueue h . inDb'
inDb' :: SKey -> SqlPersistM Bool
inDb' sk = do
r <- selectList [FsckedKey ==. sk] []
inDb' :: Key -> SqlPersistM Bool
inDb' k = do
r <- selectList [FsckedKey ==. k] []
return $ not $ null r

View file

@ -153,20 +153,20 @@ closeDb = Annex.getState Annex.keysdbhandle >>= \case
Just h -> liftIO (closeDbHandle h)
addAssociatedFile :: Key -> TopFilePath -> Annex ()
addAssociatedFile k f = runWriterIO $ SQL.addAssociatedFile (toIKey k) f
addAssociatedFile k f = runWriterIO $ SQL.addAssociatedFile k f
{- Note that the files returned were once associated with the key, but
- some of them may not be any longer. -}
getAssociatedFiles :: Key -> Annex [TopFilePath]
getAssociatedFiles = runReaderIO . SQL.getAssociatedFiles . toIKey
getAssociatedFiles = runReaderIO . SQL.getAssociatedFiles
{- Gets any keys that are on record as having a particular associated file.
- (Should be one or none but the database doesn't enforce that.) -}
getAssociatedKey :: TopFilePath -> Annex [Key]
getAssociatedKey = map fromIKey <$$> runReaderIO . SQL.getAssociatedKey
getAssociatedKey = runReaderIO . SQL.getAssociatedKey
removeAssociatedFile :: Key -> TopFilePath -> Annex ()
removeAssociatedFile k = runWriterIO . SQL.removeAssociatedFile (toIKey k)
removeAssociatedFile k = runWriterIO . SQL.removeAssociatedFile k
{- Stats the files, and stores their InodeCaches. -}
storeInodeCaches :: Key -> [FilePath] -> Annex ()
@ -178,15 +178,15 @@ storeInodeCaches' k fs ics = withTSDelta $ \d ->
=<< liftIO (mapM (`genInodeCache` d) fs)
addInodeCaches :: Key -> [InodeCache] -> Annex ()
addInodeCaches k is = runWriterIO $ SQL.addInodeCaches (toIKey k) is
addInodeCaches k is = runWriterIO $ SQL.addInodeCaches k is
{- A key may have multiple InodeCaches; one for the annex object, and one
- for each pointer file that is a copy of it. -}
getInodeCaches :: Key -> Annex [InodeCache]
getInodeCaches = runReaderIO . SQL.getInodeCaches . toIKey
getInodeCaches = runReaderIO . SQL.getInodeCaches
removeInodeCaches :: Key -> Annex ()
removeInodeCaches = runWriterIO . SQL.removeInodeCaches . toIKey
removeInodeCaches = runWriterIO . SQL.removeInodeCaches
isInodeKnown :: InodeCache -> SentinalStatus -> Annex Bool
isInodeKnown i s = or <$> runReaderIO ((:[]) <$$> SQL.isInodeKnown i s)
@ -288,9 +288,8 @@ reconcileStaged qh = do
-- Note that database writes done in here will not necessarily
-- be visible to database reads also done in here.
reconcile file key = do
let ikey = toIKey key
liftIO $ SQL.addAssociatedFileFast ikey file (SQL.WriteHandle qh)
caches <- liftIO $ SQL.getInodeCaches ikey (SQL.ReadHandle qh)
liftIO $ SQL.addAssociatedFileFast key file (SQL.WriteHandle qh)
caches <- liftIO $ SQL.getInodeCaches key (SQL.ReadHandle qh)
keyloc <- calcRepo (gitAnnexLocation key)
keypopulated <- sameInodeCache keyloc caches
p <- fromRepo $ fromTopFilePath file
@ -300,6 +299,6 @@ reconcileStaged qh = do
populatePointerFile (Restage True) key keyloc p >>= \case
Nothing -> return ()
Just ic -> liftIO $
SQL.addInodeCaches ikey [ic] (SQL.WriteHandle qh)
SQL.addInodeCaches key [ic] (SQL.WriteHandle qh)
(False, True) -> depopulatePointerFile key p
_ -> return ()

View file

@ -19,7 +19,7 @@ import qualified Database.Queue as H
import Utility.InodeCache
import Git.FilePath
import Database.Persist.Sql
import Database.Persist.Sql hiding (Key)
import Database.Persist.TH
import Data.Time.Clock
import Control.Monad
@ -29,13 +29,13 @@ import qualified Data.Conduit.List as CL
share [mkPersist sqlSettings, mkMigrate "migrateKeysDb"] [persistLowerCase|
Associated
key IKey
key Key
file SFilePath
KeyFileIndex key file
FileKeyIndex file key
Content
key IKey
cache SInodeCache
key Key
cache InodeCache
KeyCacheIndex key cache
|]
@ -62,20 +62,20 @@ queueDb a (WriteHandle h) = H.queueDb h checkcommit a
now <- getCurrentTime
return $ diffUTCTime now lastcommittime > 300
addAssociatedFile :: IKey -> TopFilePath -> WriteHandle -> IO ()
addAssociatedFile ik f = queueDb $ do
addAssociatedFile :: Key -> TopFilePath -> WriteHandle -> IO ()
addAssociatedFile k f = queueDb $ do
-- If the same file was associated with a different key before,
-- remove that.
deleteWhere [AssociatedFile ==. af, AssociatedKey !=. ik]
void $ insertUnique $ Associated ik af
deleteWhere [AssociatedFile ==. af, AssociatedKey !=. k]
void $ insertUnique $ Associated k af
where
af = toSFilePath (getTopFilePath f)
-- Does not remove any old association for a file, but less expensive
-- than addAssociatedFile. Calling dropAllAssociatedFiles first and then
-- this is an efficient way to update all associated files.
addAssociatedFileFast :: IKey -> TopFilePath -> WriteHandle -> IO ()
addAssociatedFileFast ik f = queueDb $ void $ insertUnique $ Associated ik af
addAssociatedFileFast :: Key -> TopFilePath -> WriteHandle -> IO ()
addAssociatedFileFast k f = queueDb $ void $ insertUnique $ Associated k af
where
af = toSFilePath (getTopFilePath f)
@ -85,40 +85,40 @@ dropAllAssociatedFiles = queueDb $
{- Note that the files returned were once associated with the key, but
- some of them may not be any longer. -}
getAssociatedFiles :: IKey -> ReadHandle -> IO [TopFilePath]
getAssociatedFiles ik = readDb $ do
l <- selectList [AssociatedKey ==. ik] []
getAssociatedFiles :: Key -> ReadHandle -> IO [TopFilePath]
getAssociatedFiles k = readDb $ do
l <- selectList [AssociatedKey ==. k] []
return $ map (asTopFilePath . fromSFilePath . associatedFile . entityVal) l
{- Gets any keys that are on record as having a particular associated file.
- (Should be one or none but the database doesn't enforce that.) -}
getAssociatedKey :: TopFilePath -> ReadHandle -> IO [IKey]
getAssociatedKey :: TopFilePath -> ReadHandle -> IO [Key]
getAssociatedKey f = readDb $ do
l <- selectList [AssociatedFile ==. af] []
return $ map (associatedKey . entityVal) l
where
af = toSFilePath (getTopFilePath f)
removeAssociatedFile :: IKey -> TopFilePath -> WriteHandle -> IO ()
removeAssociatedFile ik f = queueDb $
deleteWhere [AssociatedKey ==. ik, AssociatedFile ==. af]
removeAssociatedFile :: Key -> TopFilePath -> WriteHandle -> IO ()
removeAssociatedFile k f = queueDb $
deleteWhere [AssociatedKey ==. k, AssociatedFile ==. af]
where
af = toSFilePath (getTopFilePath f)
addInodeCaches :: IKey -> [InodeCache] -> WriteHandle -> IO ()
addInodeCaches ik is = queueDb $
forM_ is $ \i -> insertUnique $ Content ik (toSInodeCache i)
addInodeCaches :: Key -> [InodeCache] -> WriteHandle -> IO ()
addInodeCaches k is = queueDb $
forM_ is $ \i -> insertUnique $ Content k i
{- A key may have multiple InodeCaches; one for the annex object, and one
- for each pointer file that is a copy of it. -}
getInodeCaches :: IKey -> ReadHandle -> IO [InodeCache]
getInodeCaches ik = readDb $ do
l <- selectList [ContentKey ==. ik] []
return $ map (fromSInodeCache. contentCache . entityVal) l
getInodeCaches :: Key -> ReadHandle -> IO [InodeCache]
getInodeCaches k = readDb $ do
l <- selectList [ContentKey ==. k] []
return $ map (contentCache . entityVal) l
removeInodeCaches :: IKey -> WriteHandle -> IO ()
removeInodeCaches ik = queueDb $
deleteWhere [ContentKey ==. ik]
removeInodeCaches :: Key -> WriteHandle -> IO ()
removeInodeCaches k = queueDb $
deleteWhere [ContentKey ==. k]
{- Check if the inode is known to be used for an annexed file.
-
@ -131,9 +131,7 @@ isInodeKnown i s = readDb query
| sentinalInodesChanged s =
withRawQuery likesql [] $ isJust <$> CL.head
| otherwise =
isJust <$> selectFirst [ContentCache ==. si] []
si = toSInodeCache i
isJust <$> selectFirst [ContentCache ==. i] []
likesql = T.concat
[ "SELECT key FROM content WHERE "
@ -143,7 +141,7 @@ isInodeKnown i s = readDb query
mklike p = T.concat
[ "cache LIKE "
, "'I \"" -- SInodeCache serializes as I "..."
, "'"
, T.pack p
, "\"'"
, "'"
]

View file

@ -5,124 +5,50 @@
- Licensed under the GNU AGPL version 3 or higher.
-}
{-# LANGUAGE TemplateHaskell #-}
{-# OPTIONS_GHC -fno-warn-orphans #-}
module Database.Types where
module Database.Types (
module Database.Types,
Key,
) where
import Database.Persist.TH
import Database.Persist.Class hiding (Key)
import Database.Persist.Sql hiding (Key)
import Data.Maybe
import Data.Char
import qualified Data.ByteString as S
import qualified Data.ByteString.Lazy as L
import qualified Data.Text as T
import Control.DeepSeq
import qualified Data.Attoparsec.ByteString as A
import Utility.PartialPrelude
import Key
import Utility.InodeCache
import Git.Types (Ref(..))
import Utility.FileSystemEncoding
import Git.Types
import Types.UUID
import Types.Import
-- A serialized Key
newtype SKey = SKey String
deriving (Show, Read)
instance PersistField Key where
toPersistValue = toPersistValue . L.toStrict . serializeKey'
fromPersistValue b = fromPersistValue b >>= parse
where
parse = either (Left . T.pack) Right . A.parseOnly keyParser
toSKey :: Key -> SKey
toSKey = SKey . serializeKey
-- A key can contain arbitrarily encoded characters, so store in sqlite as a
-- blob to avoid encoding problems.
instance PersistFieldSql Key where
sqlType _ = SqlBlob
fromSKey :: SKey -> Key
fromSKey (SKey s) = fromMaybe (error $ "bad serialized Key " ++ s) (deserializeKey s)
instance PersistField InodeCache where
toPersistValue = toPersistValue . showInodeCache
fromPersistValue b = fromPersistValue b >>= parse
where
parse s = maybe
(Left $ T.pack $ "bad serialized InodeCache "++ s)
Right
(readInodeCache s)
derivePersistField "SKey"
-- A Key index. More efficient than SKey, but its Read instance does not
-- work when it's used in any kind of complex data structure.
newtype IKey = IKey String
instance NFData IKey where
rnf (IKey s) = rnf s
instance Read IKey where
readsPrec _ s = [(IKey s, "")]
instance Show IKey where
show (IKey s) = s
toIKey :: Key -> IKey
toIKey = IKey . serializeKey
fromIKey :: IKey -> Key
fromIKey (IKey s) = fromMaybe (error $ "bad serialized Key " ++ s) (deserializeKey s)
derivePersistField "IKey"
-- A serialized InodeCache
newtype SInodeCache = I String
deriving (Show, Read)
toSInodeCache :: InodeCache -> SInodeCache
toSInodeCache = I . showInodeCache
fromSInodeCache :: SInodeCache -> InodeCache
fromSInodeCache (I s) = fromMaybe (error $ "bad serialized InodeCache " ++ s) (readInodeCache s)
derivePersistField "SInodeCache"
-- A serialized FilePath.
--
-- Not all unicode characters round-trip through sqlite. In particular,
-- surrigate code points do not. So, escape the FilePath. But, only when
-- it contains such characters.
newtype SFilePath = SFilePath String
-- Note that Read instance does not work when used in any kind of complex
-- data structure.
instance Read SFilePath where
readsPrec _ s = [(SFilePath s, "")]
instance Show SFilePath where
show (SFilePath s) = s
toSFilePath :: FilePath -> SFilePath
toSFilePath s@('"':_) = SFilePath (show s)
toSFilePath s
| any needsescape s = SFilePath (show s)
| otherwise = SFilePath s
where
needsescape c = case generalCategory c of
Surrogate -> True
PrivateUse -> True
NotAssigned -> True
_ -> False
fromSFilePath :: SFilePath -> FilePath
fromSFilePath (SFilePath s@('"':_)) =
fromMaybe (error "bad serialized SFilePath " ++ s) (readish s)
fromSFilePath (SFilePath s) = s
derivePersistField "SFilePath"
-- A serialized Ref
newtype SRef = SRef Ref
-- Note that Read instance does not work when used in any kind of complex
-- data structure.
instance Read SRef where
readsPrec _ s = [(SRef (Ref s), "")]
instance Show SRef where
show (SRef (Ref s)) = s
derivePersistField "SRef"
toSRef :: Ref -> SRef
toSRef = SRef
fromSRef :: SRef -> Ref
fromSRef (SRef r) = r
instance PersistFieldSql InodeCache where
sqlType _ = SqlString
instance PersistField UUID where
toPersistValue u = toPersistValue b
@ -146,3 +72,37 @@ instance PersistField ContentIdentifier where
instance PersistFieldSql ContentIdentifier where
sqlType _ = SqlBlob
-- A serialized FilePath. Stored as a ByteString to avoid encoding problems.
newtype SFilePath = SFilePath S.ByteString
deriving (Eq, Show)
toSFilePath :: FilePath -> SFilePath
toSFilePath = SFilePath . encodeBS
fromSFilePath :: SFilePath -> FilePath
fromSFilePath (SFilePath b) = decodeBS b
instance PersistField SFilePath where
toPersistValue (SFilePath b) = toPersistValue b
fromPersistValue v = SFilePath <$> fromPersistValue v
instance PersistFieldSql SFilePath where
sqlType _ = SqlBlob
-- A serialized git Sha
newtype SSha = SSha String
deriving (Eq, Show)
toSSha :: Sha -> SSha
toSSha (Ref s) = SSha s
fromSSha :: SSha -> Ref
fromSSha (SSha s) = Ref s
instance PersistField SSha where
toPersistValue (SSha b) = toPersistValue b
fromPersistValue v = SSha <$> fromPersistValue v
instance PersistFieldSql SSha where
sqlType _ = SqlString

2
Key.hs
View file

@ -140,7 +140,7 @@ deserializeKey :: String -> Maybe Key
deserializeKey = deserializeKey' . encodeBS'
deserializeKey' :: S.ByteString -> Maybe Key
deserializeKey' b = eitherToMaybe $ A.parseOnly keyParser b
deserializeKey' = eitherToMaybe . A.parseOnly keyParser
{- This splits any extension out of the keyName, returning the
- keyName minus extension, and the extension (including leading dot).