move cleanOldKey into ingest
This commit is contained in:
parent
cfaac52b88
commit
d8a8c77a8f
2 changed files with 32 additions and 29 deletions
|
@ -13,6 +13,7 @@ module Annex.Ingest (
|
||||||
ingest,
|
ingest,
|
||||||
finishIngestDirect,
|
finishIngestDirect,
|
||||||
finishIngestUnlocked,
|
finishIngestUnlocked,
|
||||||
|
cleanOldKeys,
|
||||||
addLink,
|
addLink,
|
||||||
makeLink,
|
makeLink,
|
||||||
restoreFile,
|
restoreFile,
|
||||||
|
@ -27,6 +28,7 @@ import Annex.Content.Direct
|
||||||
import Annex.Perms
|
import Annex.Perms
|
||||||
import Annex.Link
|
import Annex.Link
|
||||||
import Annex.MetaData
|
import Annex.MetaData
|
||||||
|
import Logs.Location
|
||||||
import qualified Annex
|
import qualified Annex
|
||||||
import qualified Annex.Queue
|
import qualified Annex.Queue
|
||||||
import qualified Database.Keys
|
import qualified Database.Keys
|
||||||
|
@ -143,6 +145,7 @@ ingest (Just (LockedDown lockingfile source)) = withTSDelta $ \delta -> do
|
||||||
-- linkAnnex falls back to copying if a file
|
-- linkAnnex falls back to copying if a file
|
||||||
-- already has a hard link.
|
-- already has a hard link.
|
||||||
cleanCruft source
|
cleanCruft source
|
||||||
|
cleanOldKeys (keyFilename source) key
|
||||||
r <- linkAnnex key (keyFilename source) (Just cache)
|
r <- linkAnnex key (keyFilename source) (Just cache)
|
||||||
case r of
|
case r of
|
||||||
LinkAnnexFailed -> failure "failed to link to annex"
|
LinkAnnexFailed -> failure "failed to link to annex"
|
||||||
|
@ -193,6 +196,34 @@ cleanCruft :: KeySource -> Annex ()
|
||||||
cleanCruft source = when (contentLocation source /= keyFilename source) $
|
cleanCruft source = when (contentLocation source /= keyFilename source) $
|
||||||
liftIO $ nukeFile $ contentLocation source
|
liftIO $ nukeFile $ contentLocation source
|
||||||
|
|
||||||
|
-- If a worktree file was was hard linked to an annex object before,
|
||||||
|
-- modifying the file would have caused the object to have the wrong
|
||||||
|
-- content. Clean up from that.
|
||||||
|
cleanOldKeys :: FilePath -> Key -> Annex ()
|
||||||
|
cleanOldKeys file newkey = do
|
||||||
|
oldkeys <- filter (/= newkey)
|
||||||
|
<$> Database.Keys.getAssociatedKey file
|
||||||
|
mapM_ go oldkeys
|
||||||
|
where
|
||||||
|
go key = do
|
||||||
|
obj <- calcRepo (gitAnnexLocation key)
|
||||||
|
caches <- Database.Keys.getInodeCaches key
|
||||||
|
unlessM (sameInodeCache obj caches) $ do
|
||||||
|
unlinkAnnex key
|
||||||
|
fs <- filter (/= file)
|
||||||
|
<$> Database.Keys.getAssociatedFiles key
|
||||||
|
fs' <- filterM (`sameInodeCache` caches) fs
|
||||||
|
case fs' of
|
||||||
|
-- If linkAnnex fails, the associated
|
||||||
|
-- file with the content is still present,
|
||||||
|
-- so no need for any recovery.
|
||||||
|
(f:_) -> do
|
||||||
|
ic <- withTSDelta (liftIO . genInodeCache f)
|
||||||
|
void $ linkAnnex key f ic
|
||||||
|
_ -> lostcontent
|
||||||
|
where
|
||||||
|
lostcontent = logStatus key InfoMissing
|
||||||
|
|
||||||
{- On error, put the file back so it doesn't seem to have vanished.
|
{- On error, put the file back so it doesn't seem to have vanished.
|
||||||
- This can be called before or after the symlink is in place. -}
|
- This can be called before or after the symlink is in place. -}
|
||||||
restoreFile :: FilePath -> Key -> SomeException -> Annex a
|
restoreFile :: FilePath -> Key -> SomeException -> Annex a
|
||||||
|
|
|
@ -74,39 +74,11 @@ clean file = do
|
||||||
if isJust (parseLinkOrPointer b)
|
if isJust (parseLinkOrPointer b)
|
||||||
then liftIO $ B.hPut stdout b
|
then liftIO $ B.hPut stdout b
|
||||||
else ifM (shouldAnnex file)
|
else ifM (shouldAnnex file)
|
||||||
( do
|
( liftIO . emitPointer =<< ingest file
|
||||||
k <- ingest file
|
|
||||||
oldkeys <- filter (/= k)
|
|
||||||
<$> Database.Keys.getAssociatedKey file
|
|
||||||
mapM_ (cleanOldKey file) oldkeys
|
|
||||||
Database.Keys.addAssociatedFile k file
|
|
||||||
liftIO $ emitPointer k
|
|
||||||
, liftIO $ B.hPut stdout b
|
, liftIO $ B.hPut stdout b
|
||||||
)
|
)
|
||||||
stop
|
stop
|
||||||
|
|
||||||
-- If the file being cleaned was hard linked to the old key's annex object,
|
|
||||||
-- modifying the file will have caused the object to have the wrong content.
|
|
||||||
-- Clean up from that.
|
|
||||||
cleanOldKey :: FilePath -> Key -> Annex ()
|
|
||||||
cleanOldKey modifiedfile key = do
|
|
||||||
obj <- calcRepo (gitAnnexLocation key)
|
|
||||||
caches <- Database.Keys.getInodeCaches key
|
|
||||||
unlessM (sameInodeCache obj caches) $ do
|
|
||||||
unlinkAnnex key
|
|
||||||
fs <- filter (/= modifiedfile)
|
|
||||||
<$> Database.Keys.getAssociatedFiles key
|
|
||||||
fs' <- filterM (`sameInodeCache` caches) fs
|
|
||||||
case fs' of
|
|
||||||
-- If linkAnnex fails, the file with the content
|
|
||||||
-- is still present, so no need for any recovery.
|
|
||||||
(f:_) -> do
|
|
||||||
ic <- withTSDelta (liftIO . genInodeCache f)
|
|
||||||
void $ linkAnnex key f ic
|
|
||||||
_ -> lostcontent
|
|
||||||
where
|
|
||||||
lostcontent = logStatus key InfoMissing
|
|
||||||
|
|
||||||
shouldAnnex :: FilePath -> Annex Bool
|
shouldAnnex :: FilePath -> Annex Bool
|
||||||
shouldAnnex file = do
|
shouldAnnex file = do
|
||||||
matcher <- largeFilesMatcher
|
matcher <- largeFilesMatcher
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue