2011-03-15 21:47:29 +00:00
|
|
|
{- git-annex Key data type
|
2011-06-02 01:56:04 +00:00
|
|
|
-
|
|
|
|
- Most things should not need this, using Types instead
|
2011-03-15 21:47:29 +00:00
|
|
|
-
|
2015-01-21 16:50:09 +00:00
|
|
|
- Copyright 2011-2014 Joey Hess <id@joeyh.name>
|
2011-03-15 21:47:29 +00:00
|
|
|
-
|
|
|
|
- Licensed under the GNU GPL version 3 or higher.
|
|
|
|
-}
|
|
|
|
|
2011-06-02 01:56:04 +00:00
|
|
|
module Types.Key (
|
2011-03-16 01:34:13 +00:00
|
|
|
Key(..),
|
2013-07-04 06:36:02 +00:00
|
|
|
AssociatedFile,
|
2011-03-16 01:34:13 +00:00
|
|
|
stubKey,
|
2012-08-08 20:06:01 +00:00
|
|
|
key2file,
|
|
|
|
file2key,
|
2014-07-25 20:09:23 +00:00
|
|
|
nonChunkKey,
|
resume interrupted chunked downloads
Leverage the new chunked remotes to automatically resume downloads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also properly handle starting a download
from one remote, interrupting, and resuming from another one, and so on.
(Resuming interrupted chunked uploads is similarly doable, although
slightly more expensive.)
This commit was sponsored by Thomas Djärv.
2014-07-27 22:52:42 +00:00
|
|
|
chunkKeyOffset,
|
2014-08-02 19:51:58 +00:00
|
|
|
isChunkKey,
|
2015-05-30 06:08:49 +00:00
|
|
|
isKeyPrefix,
|
2011-03-16 01:34:13 +00:00
|
|
|
|
2013-10-16 16:46:24 +00:00
|
|
|
prop_idempotent_key_encode,
|
|
|
|
prop_idempotent_key_decode
|
2011-03-16 01:34:13 +00:00
|
|
|
) where
|
2011-03-15 21:47:29 +00:00
|
|
|
|
2011-03-16 01:34:13 +00:00
|
|
|
import System.Posix.Types
|
2011-03-15 21:47:29 +00:00
|
|
|
|
2011-10-04 02:24:57 +00:00
|
|
|
import Common
|
2013-02-28 01:48:46 +00:00
|
|
|
import Utility.QuickCheck
|
2015-06-16 22:37:41 +00:00
|
|
|
import Utility.Bloom
|
2011-10-04 02:24:57 +00:00
|
|
|
|
2013-07-04 06:36:02 +00:00
|
|
|
{- A Key has a unique name, which is derived from a particular backend,
|
2011-03-16 01:34:13 +00:00
|
|
|
- and may contain other optional metadata. -}
|
2013-07-04 06:45:46 +00:00
|
|
|
data Key = Key
|
|
|
|
{ keyName :: String
|
|
|
|
, keyBackendName :: String
|
|
|
|
, keySize :: Maybe Integer
|
|
|
|
, keyMtime :: Maybe EpochTime
|
2014-07-24 17:36:23 +00:00
|
|
|
, keyChunkSize :: Maybe Integer
|
|
|
|
, keyChunkNum :: Maybe Integer
|
2013-07-04 06:45:46 +00:00
|
|
|
} deriving (Eq, Ord, Read, Show)
|
2011-03-15 21:47:29 +00:00
|
|
|
|
2013-07-04 06:36:02 +00:00
|
|
|
{- A filename may be associated with a Key. -}
|
|
|
|
type AssociatedFile = Maybe FilePath
|
|
|
|
|
2011-03-16 01:34:13 +00:00
|
|
|
stubKey :: Key
|
2013-07-04 06:45:46 +00:00
|
|
|
stubKey = Key
|
|
|
|
{ keyName = ""
|
|
|
|
, keyBackendName = ""
|
|
|
|
, keySize = Nothing
|
|
|
|
, keyMtime = Nothing
|
2014-07-24 17:36:23 +00:00
|
|
|
, keyChunkSize = Nothing
|
|
|
|
, keyChunkNum = Nothing
|
2013-07-04 06:45:46 +00:00
|
|
|
}
|
2011-03-16 01:34:13 +00:00
|
|
|
|
2014-07-25 20:09:23 +00:00
|
|
|
-- Gets the parent of a chunk key.
|
|
|
|
nonChunkKey :: Key -> Key
|
|
|
|
nonChunkKey k = k
|
|
|
|
{ keyChunkSize = Nothing
|
|
|
|
, keyChunkNum = Nothing
|
|
|
|
}
|
|
|
|
|
resume interrupted chunked downloads
Leverage the new chunked remotes to automatically resume downloads.
Sort of like rsync, although of course not as efficient since this
needs to start at a chunk boundry.
But, unlike rsync, this method will work for S3, WebDAV, external
special remotes, etc, etc. Only directory special remotes so far,
but many more soon!
This implementation will also properly handle starting a download
from one remote, interrupting, and resuming from another one, and so on.
(Resuming interrupted chunked uploads is similarly doable, although
slightly more expensive.)
This commit was sponsored by Thomas Djärv.
2014-07-27 22:52:42 +00:00
|
|
|
-- Where a chunk key is offset within its parent.
|
|
|
|
chunkKeyOffset :: Key -> Maybe Integer
|
|
|
|
chunkKeyOffset k = (*)
|
|
|
|
<$> keyChunkSize k
|
|
|
|
<*> (pred <$> keyChunkNum k)
|
|
|
|
|
2014-08-02 19:51:58 +00:00
|
|
|
isChunkKey :: Key -> Bool
|
|
|
|
isChunkKey k = isJust (keyChunkSize k) && isJust (keyChunkNum k)
|
|
|
|
|
2015-05-30 06:08:49 +00:00
|
|
|
-- Checks if a string looks like at least the start of a key.
|
|
|
|
isKeyPrefix :: String -> Bool
|
|
|
|
isKeyPrefix s = [fieldSep, fieldSep] `isInfixOf` s
|
|
|
|
|
2011-03-15 21:47:29 +00:00
|
|
|
fieldSep :: Char
|
2011-03-16 01:54:38 +00:00
|
|
|
fieldSep = '-'
|
2011-03-15 21:47:29 +00:00
|
|
|
|
2012-09-21 20:23:25 +00:00
|
|
|
{- Converts a key to a string that is suitable for use as a filename.
|
2011-03-16 01:54:38 +00:00
|
|
|
- The name field is always shown last, separated by doubled fieldSeps,
|
|
|
|
- and is the only field allowed to contain the fieldSep. -}
|
2012-08-08 20:06:01 +00:00
|
|
|
key2file :: Key -> FilePath
|
2014-07-24 17:36:23 +00:00
|
|
|
key2file Key { keyBackendName = b, keySize = s, keyMtime = m, keyChunkSize = cs, keyChunkNum = cn, keyName = n } =
|
|
|
|
b +++ ('s' ?: s) +++ ('m' ?: m) +++ ('S' ?: cs) +++ ('C' ?: cn) +++ (fieldSep : n)
|
2012-11-11 04:51:07 +00:00
|
|
|
where
|
|
|
|
"" +++ y = y
|
|
|
|
x +++ "" = x
|
|
|
|
x +++ y = x ++ fieldSep:y
|
2014-07-24 17:36:23 +00:00
|
|
|
f ?: (Just v) = f : show v
|
2012-11-11 04:51:07 +00:00
|
|
|
_ ?: _ = ""
|
2011-03-15 23:11:21 +00:00
|
|
|
|
2012-08-08 20:06:01 +00:00
|
|
|
file2key :: FilePath -> Maybe Key
|
2013-11-11 19:41:31 +00:00
|
|
|
file2key s
|
|
|
|
| key == Just stubKey || (keyName <$> key) == Just "" || (keyBackendName <$> key) == Just "" = Nothing
|
|
|
|
| otherwise = key
|
2012-11-11 04:51:07 +00:00
|
|
|
where
|
|
|
|
key = startbackend stubKey s
|
2011-03-15 21:47:29 +00:00
|
|
|
|
2012-11-11 04:51:07 +00:00
|
|
|
startbackend k v = sepfield k v addbackend
|
2011-03-15 23:11:21 +00:00
|
|
|
|
2012-11-11 04:51:07 +00:00
|
|
|
sepfield k v a = case span (/= fieldSep) v of
|
|
|
|
(v', _:r) -> findfields r $ a k v'
|
|
|
|
_ -> Nothing
|
2011-03-16 01:54:38 +00:00
|
|
|
|
2012-11-11 04:51:07 +00:00
|
|
|
findfields (c:v) (Just k)
|
|
|
|
| c == fieldSep = Just $ k { keyName = v }
|
|
|
|
| otherwise = sepfield k v $ addfield c
|
|
|
|
findfields _ v = v
|
2011-03-16 01:54:38 +00:00
|
|
|
|
2012-11-11 04:51:07 +00:00
|
|
|
addbackend k v = Just k { keyBackendName = v }
|
2014-03-05 03:58:43 +00:00
|
|
|
addfield 's' k v = do
|
|
|
|
sz <- readish v
|
|
|
|
return $ k { keySize = Just sz }
|
|
|
|
addfield 'm' k v = do
|
|
|
|
mtime <- readish v
|
|
|
|
return $ k { keyMtime = Just mtime }
|
2014-07-24 17:36:23 +00:00
|
|
|
addfield 'S' k v = do
|
|
|
|
chunksize <- readish v
|
|
|
|
return $ k { keyChunkSize = Just chunksize }
|
|
|
|
addfield 'C' k v = case readish v of
|
|
|
|
Just chunknum | chunknum > 0 ->
|
|
|
|
return $ k { keyChunkNum = Just chunknum }
|
|
|
|
_ -> return k
|
2012-11-11 04:51:07 +00:00
|
|
|
addfield _ _ _ = Nothing
|
2011-03-15 21:47:29 +00:00
|
|
|
|
2013-02-28 01:42:07 +00:00
|
|
|
instance Arbitrary Key where
|
|
|
|
arbitrary = Key
|
2013-09-11 17:02:10 +00:00
|
|
|
<$> (listOf1 $ elements $ ['A'..'Z'] ++ ['a'..'z'] ++ ['0'..'9'] ++ "-_\r\n \t")
|
2013-02-28 01:42:07 +00:00
|
|
|
<*> (listOf1 $ elements ['A'..'Z']) -- BACKEND
|
|
|
|
<*> ((abs <$>) <$> arbitrary) -- size cannot be negative
|
|
|
|
<*> arbitrary
|
2014-07-24 17:36:23 +00:00
|
|
|
<*> ((abs <$>) <$> arbitrary) -- chunksize cannot be negative
|
|
|
|
<*> ((succ . abs <$>) <$> arbitrary) -- chunknum cannot be 0 or negative
|
2013-02-28 01:42:07 +00:00
|
|
|
|
2015-06-16 22:37:41 +00:00
|
|
|
instance Hashable Key where
|
2015-06-16 22:38:12 +00:00
|
|
|
hashIO32 = hashIO32 . key2file
|
|
|
|
hashIO64 = hashIO64 . key2file
|
2015-06-16 22:37:41 +00:00
|
|
|
|
2012-08-08 20:06:01 +00:00
|
|
|
prop_idempotent_key_encode :: Key -> Bool
|
|
|
|
prop_idempotent_key_encode k = Just k == (file2key . key2file) k
|
2013-10-16 16:46:24 +00:00
|
|
|
|
|
|
|
prop_idempotent_key_decode :: FilePath -> Bool
|
2014-03-05 04:23:22 +00:00
|
|
|
prop_idempotent_key_decode f
|
|
|
|
| normalfieldorder = maybe True (\k -> key2file k == f) (file2key f)
|
|
|
|
| otherwise = True
|
|
|
|
where
|
2014-10-09 18:53:13 +00:00
|
|
|
-- file2key will accept the fields in any order, so don't
|
2014-03-05 04:23:22 +00:00
|
|
|
-- try the test unless the fields are in the normal order
|
2014-07-24 17:36:23 +00:00
|
|
|
normalfieldorder = fields `isPrefixOf` "smSC"
|
2014-03-05 04:23:22 +00:00
|
|
|
fields = map (f !!) $ filter (< length f) $ map succ $
|
|
|
|
elemIndices fieldSep f
|