2011-08-22 20:14:12 +00:00
|
|
|
{- path manipulation
|
|
|
|
-
|
2015-01-21 16:50:09 +00:00
|
|
|
- Copyright 2010-2014 Joey Hess <id@joeyh.name>
|
2011-08-22 20:14:12 +00:00
|
|
|
-
|
2014-05-10 14:01:27 +00:00
|
|
|
- License: BSD-2-clause
|
2011-08-22 20:14:12 +00:00
|
|
|
-}
|
2013-04-14 16:51:05 +00:00
|
|
|
|
2015-01-21 17:54:47 +00:00
|
|
|
{-# LANGUAGE PackageImports, CPP #-}
|
2015-05-10 20:31:50 +00:00
|
|
|
{-# OPTIONS_GHC -fno-warn-tabs #-}
|
2011-08-22 20:14:12 +00:00
|
|
|
|
|
|
|
module Utility.Path where
|
|
|
|
|
|
|
|
import Data.String.Utils
|
|
|
|
import System.FilePath
|
|
|
|
import System.Directory
|
|
|
|
import Data.List
|
|
|
|
import Data.Maybe
|
2013-08-22 22:25:21 +00:00
|
|
|
import Data.Char
|
2011-08-25 04:28:55 +00:00
|
|
|
import Control.Applicative
|
2015-05-10 20:19:56 +00:00
|
|
|
import Prelude
|
2011-10-16 04:31:25 +00:00
|
|
|
|
2013-08-02 16:27:32 +00:00
|
|
|
#ifdef mingw32_HOST_OS
|
2013-05-14 19:21:35 +00:00
|
|
|
import qualified System.FilePath.Posix as Posix
|
|
|
|
#else
|
Fix a few bugs involving filenames that are at or near the filesystem's maximum filename length limit.
Started with a problem when running addurl on a really long url,
because the whole url is munged into the filename. Ended up doing
a fairly extensive review for places where filenames could get too large,
although it's hard to say I'm not missed any..
Backend.Url had a 128 character limit, which is fine when the limit is 255,
but not if it's a lot shorter on some systems. So check the pathconf()
limit. Note that this could result in fromUrl creating different keys
for the same url, if run on systems with different limits. I don't see
this is likely to cause any problems. That can already happen when using
addurl --fast, or if the content of an url changes.
Both Command.AddUrl and Backend.Url assumed that urls don't contain a
lot of multi-byte unicode, and would fail to truncate an url that did
properly.
A few places use a filename as the template to make a temp file.
While that's nice in that the temp file name can be easily related back to
the original filename, it could lead to `git annex add` failing to add a
filename that was at or close to the maximum length.
Note that in Command.Add.lockdown, the template is still derived from the
filename, just with enough space left to turn it into a temp file.
This is an important optimisation, because the assistant may lock down
a bunch of files all at once, and using the same template for all of them
would cause openTempFile to iterate through the same set of names,
looking for an unused temp file. I'm not very happy with the relatedTemplate
hack, but it avoids that slowdown.
Backend.WORM does not limit the filename stored in the key.
I have not tried to change that; so git annex add will fail on really long
filenames when using the WORM backend. It seems better to preserve the
invariant that a WORM key always contains the complete filename, since
the filename is the only unique material in the key, other than mtime and
size. Since nobody has complained about add failing (I think I saw it
once?) on WORM, probably it's ok, or nobody but me uses it.
There may be compatability problems if using git annex addurl --fast
or the WORM backend on a system with the 255 limit and then trying to use
that repo in a system with a smaller limit. I have not tried to deal with
those.
This commit was sponsored by Alexander Brem. Thanks!
2013-07-30 21:49:11 +00:00
|
|
|
import System.Posix.Files
|
2014-12-23 15:52:23 +00:00
|
|
|
import Utility.Exception
|
2013-05-14 17:24:15 +00:00
|
|
|
#endif
|
|
|
|
|
2015-01-21 17:54:47 +00:00
|
|
|
import qualified "MissingH" System.Path as MissingH
|
2011-10-16 04:31:25 +00:00
|
|
|
import Utility.Monad
|
2012-10-25 22:17:32 +00:00
|
|
|
import Utility.UserInfo
|
2011-08-22 20:14:12 +00:00
|
|
|
|
2014-02-07 21:10:51 +00:00
|
|
|
{- Simplifies a path, removing any ".." or ".", and removing the trailing
|
|
|
|
- path separator.
|
|
|
|
-
|
|
|
|
- On Windows, preserves whichever style of path separator might be used in
|
|
|
|
- the input FilePaths. This is done because some programs in Windows
|
|
|
|
- demand a particular path separator -- and which one actually varies!
|
|
|
|
-
|
|
|
|
- This does not guarantee that two paths that refer to the same location,
|
|
|
|
- and are both relative to the same location (or both absolute) will
|
|
|
|
- yeild the same result. Run both through normalise from System.FilePath
|
|
|
|
- to ensure that.
|
|
|
|
-}
|
|
|
|
simplifyPath :: FilePath -> FilePath
|
|
|
|
simplifyPath path = dropTrailingPathSeparator $
|
|
|
|
joinDrive drive $ joinPath $ norm [] $ splitPath path'
|
|
|
|
where
|
|
|
|
(drive, path') = splitDrive path
|
|
|
|
|
|
|
|
norm c [] = reverse c
|
|
|
|
norm c (p:ps)
|
|
|
|
| p' == ".." = norm (drop 1 c) ps
|
|
|
|
| p' == "." = norm c ps
|
|
|
|
| otherwise = norm (p:c) ps
|
|
|
|
where
|
|
|
|
p' = dropTrailingPathSeparator p
|
|
|
|
|
|
|
|
{- Makes a path absolute.
|
|
|
|
-
|
2013-05-12 21:36:44 +00:00
|
|
|
- The first parameter is a base directory (ie, the cwd) to use if the path
|
|
|
|
- is not already absolute.
|
|
|
|
-
|
2014-02-07 21:10:51 +00:00
|
|
|
- Does not attempt to deal with edge cases or ensure security with
|
|
|
|
- untrusted inputs.
|
2013-05-12 21:36:44 +00:00
|
|
|
-}
|
2014-02-07 21:10:51 +00:00
|
|
|
absPathFrom :: FilePath -> FilePath -> FilePath
|
|
|
|
absPathFrom dir path = simplifyPath (combine dir path)
|
2014-02-06 21:08:54 +00:00
|
|
|
|
2015-01-21 17:54:47 +00:00
|
|
|
{- On Windows, this converts the paths to unix-style, in order to run
|
2015-02-09 19:24:33 +00:00
|
|
|
- MissingH's absNormPath on them. -}
|
2015-01-21 17:54:47 +00:00
|
|
|
absNormPathUnix :: FilePath -> FilePath -> Maybe FilePath
|
|
|
|
#ifndef mingw32_HOST_OS
|
|
|
|
absNormPathUnix dir path = MissingH.absNormPath dir path
|
|
|
|
#else
|
|
|
|
absNormPathUnix dir path = todos <$> MissingH.absNormPath (fromdos dir) (fromdos path)
|
|
|
|
where
|
|
|
|
fromdos = replace "\\" "/"
|
|
|
|
todos = replace "/" "\\"
|
|
|
|
#endif
|
|
|
|
|
2015-01-09 18:26:52 +00:00
|
|
|
{- takeDirectory "foo/bar/" is "foo/bar". This instead yields "foo" -}
|
2015-01-09 17:11:56 +00:00
|
|
|
parentDir :: FilePath -> FilePath
|
2015-01-09 18:26:52 +00:00
|
|
|
parentDir = takeDirectory . dropTrailingPathSeparator
|
|
|
|
|
|
|
|
{- Just the parent directory of a path, or Nothing if the path has no
|
|
|
|
- parent (ie for "/" or ".") -}
|
|
|
|
upFrom :: FilePath -> Maybe FilePath
|
|
|
|
upFrom dir
|
2015-01-20 21:23:23 +00:00
|
|
|
| length dirs < 2 = Nothing
|
2015-01-09 18:26:52 +00:00
|
|
|
| otherwise = Just $ joinDrive drive (join s $ init dirs)
|
2012-12-13 04:24:19 +00:00
|
|
|
where
|
2013-05-12 18:58:46 +00:00
|
|
|
-- on Unix, the drive will be "/" when the dir is absolute, otherwise ""
|
|
|
|
(drive, path) = splitDrive dir
|
|
|
|
dirs = filter (not . null) $ split s path
|
2012-12-13 04:24:19 +00:00
|
|
|
s = [pathSeparator]
|
2011-08-22 20:14:12 +00:00
|
|
|
|
2015-01-09 18:26:52 +00:00
|
|
|
prop_upFrom_basics :: FilePath -> Bool
|
|
|
|
prop_upFrom_basics dir
|
2011-08-22 20:14:12 +00:00
|
|
|
| null dir = True
|
2015-01-09 18:26:52 +00:00
|
|
|
| dir == "/" = p == Nothing
|
|
|
|
| otherwise = p /= Just dir
|
2012-12-13 04:24:19 +00:00
|
|
|
where
|
2015-01-09 18:26:52 +00:00
|
|
|
p = upFrom dir
|
2011-08-22 20:14:12 +00:00
|
|
|
|
|
|
|
{- Checks if the first FilePath is, or could be said to contain the second.
|
|
|
|
- For example, "foo/" contains "foo/bar". Also, "foo", "./foo", "foo/" etc
|
|
|
|
- are all equivilant.
|
|
|
|
-}
|
|
|
|
dirContains :: FilePath -> FilePath -> Bool
|
2014-02-07 21:10:51 +00:00
|
|
|
dirContains a b = a == b || a' == b' || (addTrailingPathSeparator a') `isPrefixOf` b'
|
2012-12-13 04:24:19 +00:00
|
|
|
where
|
|
|
|
a' = norm a
|
|
|
|
b' = norm b
|
2014-02-07 21:10:51 +00:00
|
|
|
norm = normalise . simplifyPath
|
2011-08-22 20:14:12 +00:00
|
|
|
|
2014-02-07 21:10:51 +00:00
|
|
|
{- Converts a filename into an absolute path.
|
2012-01-23 20:57:45 +00:00
|
|
|
-
|
|
|
|
- Unlike Directory.canonicalizePath, this does not require the path
|
|
|
|
- already exists. -}
|
2011-08-22 20:14:12 +00:00
|
|
|
absPath :: FilePath -> IO FilePath
|
|
|
|
absPath file = do
|
|
|
|
cwd <- getCurrentDirectory
|
|
|
|
return $ absPathFrom cwd file
|
|
|
|
|
|
|
|
{- Constructs a relative path from the CWD to a file.
|
|
|
|
-
|
|
|
|
- For example, assuming CWD is /tmp/foo/bar:
|
|
|
|
- relPathCwdToFile "/tmp/foo" == ".."
|
|
|
|
- relPathCwdToFile "/tmp/foo/bar" == ""
|
|
|
|
-}
|
|
|
|
relPathCwdToFile :: FilePath -> IO FilePath
|
2015-01-06 19:31:24 +00:00
|
|
|
relPathCwdToFile f = do
|
|
|
|
c <- getCurrentDirectory
|
|
|
|
relPathDirToFile c f
|
2011-08-22 20:14:12 +00:00
|
|
|
|
2015-01-06 19:31:24 +00:00
|
|
|
{- Constructs a relative path from a directory to a file. -}
|
|
|
|
relPathDirToFile :: FilePath -> FilePath -> IO FilePath
|
|
|
|
relPathDirToFile from to = relPathDirToFileAbs <$> absPath from <*> absPath to
|
|
|
|
|
|
|
|
{- This requires the first path to be absolute, and the
|
|
|
|
- second path cannot contain ../ or ./
|
2015-04-14 18:07:55 +00:00
|
|
|
-
|
|
|
|
- On Windows, if the paths are on different drives,
|
|
|
|
- a relative path is not possible and the path is simply
|
|
|
|
- returned as-is.
|
2011-08-22 20:14:12 +00:00
|
|
|
-}
|
2015-01-06 19:31:24 +00:00
|
|
|
relPathDirToFileAbs :: FilePath -> FilePath -> FilePath
|
2015-04-14 18:07:55 +00:00
|
|
|
relPathDirToFileAbs from to
|
|
|
|
| takeDrive from /= takeDrive to = to
|
|
|
|
| otherwise = join s $ dotdots ++ uncommon
|
2012-12-13 04:24:19 +00:00
|
|
|
where
|
|
|
|
s = [pathSeparator]
|
|
|
|
pfrom = split s from
|
|
|
|
pto = split s to
|
|
|
|
common = map fst $ takeWhile same $ zip pfrom pto
|
|
|
|
same (c,d) = c == d
|
|
|
|
uncommon = drop numcommon pto
|
|
|
|
dotdots = replicate (length pfrom - numcommon) ".."
|
|
|
|
numcommon = length common
|
2011-08-22 20:14:12 +00:00
|
|
|
|
|
|
|
prop_relPathDirToFile_basics :: FilePath -> FilePath -> Bool
|
|
|
|
prop_relPathDirToFile_basics from to
|
2015-04-14 19:15:29 +00:00
|
|
|
| null from || null to = True
|
2011-08-22 20:14:12 +00:00
|
|
|
| from == to = null r
|
|
|
|
| otherwise = not (null r)
|
2012-12-13 04:24:19 +00:00
|
|
|
where
|
2015-01-06 19:31:24 +00:00
|
|
|
r = relPathDirToFileAbs from to
|
2011-09-19 05:37:04 +00:00
|
|
|
|
2012-03-05 16:42:52 +00:00
|
|
|
prop_relPathDirToFile_regressionTest :: Bool
|
|
|
|
prop_relPathDirToFile_regressionTest = same_dir_shortcurcuits_at_difference
|
2012-12-13 04:24:19 +00:00
|
|
|
where
|
|
|
|
{- Two paths have the same directory component at the same
|
|
|
|
- location, but it's not really the same directory.
|
|
|
|
- Code used to get this wrong. -}
|
|
|
|
same_dir_shortcurcuits_at_difference =
|
2015-01-06 19:31:24 +00:00
|
|
|
relPathDirToFileAbs (joinPath [pathSeparator : "tmp", "r", "lll", "xxx", "yyy", "18"])
|
2013-06-18 17:08:28 +00:00
|
|
|
(joinPath [pathSeparator : "tmp", "r", ".git", "annex", "objects", "18", "gk", "SHA256-foo", "SHA256-foo"])
|
|
|
|
== joinPath ["..", "..", "..", "..", ".git", "annex", "objects", "18", "gk", "SHA256-foo", "SHA256-foo"]
|
2012-03-05 16:42:52 +00:00
|
|
|
|
2012-11-25 21:54:08 +00:00
|
|
|
{- Given an original list of paths, and an expanded list derived from it,
|
2015-04-02 05:44:32 +00:00
|
|
|
- which may be arbitrarily reordered, generates a list of lists, where
|
|
|
|
- each sublist corresponds to one of the original paths.
|
|
|
|
-
|
|
|
|
- When the original path is a directory, any items in the expanded list
|
|
|
|
- that are contained in that directory will appear in its segment.
|
|
|
|
-
|
|
|
|
- The order of the original list of paths is attempted to be preserved in
|
|
|
|
- the order of the returned segments. However, doing so has a O^NM
|
|
|
|
- growth factor. So, if the original list has more than 100 paths on it,
|
|
|
|
- we stop preserving ordering at that point. Presumably a user passing
|
|
|
|
- that many paths in doesn't care too much about order of the later ones.
|
2011-09-19 05:37:04 +00:00
|
|
|
-}
|
2012-11-25 21:54:08 +00:00
|
|
|
segmentPaths :: [FilePath] -> [FilePath] -> [[FilePath]]
|
|
|
|
segmentPaths [] new = [new]
|
|
|
|
segmentPaths [_] new = [new] -- optimisation
|
2015-04-02 03:14:59 +00:00
|
|
|
segmentPaths (l:ls) new = found : segmentPaths ls rest
|
2012-12-13 04:24:19 +00:00
|
|
|
where
|
2015-04-02 05:44:32 +00:00
|
|
|
(found, rest) = if length ls < 100
|
|
|
|
then partition (l `dirContains`) new
|
|
|
|
else break (\p -> not (l `dirContains` p)) new
|
2011-09-19 05:37:04 +00:00
|
|
|
|
2012-11-25 21:54:08 +00:00
|
|
|
{- This assumes that it's cheaper to call segmentPaths on the result,
|
|
|
|
- than it would be to run the action separately with each path. In
|
|
|
|
- the case of git file list commands, that assumption tends to hold.
|
2011-09-19 05:37:04 +00:00
|
|
|
-}
|
2012-11-25 21:54:08 +00:00
|
|
|
runSegmentPaths :: ([FilePath] -> IO [FilePath]) -> [FilePath] -> IO [[FilePath]]
|
|
|
|
runSegmentPaths a paths = segmentPaths paths <$> a paths
|
2011-10-16 04:31:25 +00:00
|
|
|
|
2012-08-02 11:47:50 +00:00
|
|
|
{- Converts paths in the home directory to use ~/ -}
|
|
|
|
relHome :: FilePath -> IO String
|
|
|
|
relHome path = do
|
|
|
|
home <- myHomeDir
|
|
|
|
return $ if dirContains home path
|
2015-01-06 19:31:24 +00:00
|
|
|
then "~/" ++ relPathDirToFileAbs home path
|
2012-08-02 11:47:50 +00:00
|
|
|
else path
|
|
|
|
|
2012-12-14 19:52:44 +00:00
|
|
|
{- Checks if a command is available in PATH.
|
|
|
|
-
|
|
|
|
- The command may be fully-qualified, in which case, this succeeds as
|
|
|
|
- long as it exists. -}
|
2011-10-16 04:31:25 +00:00
|
|
|
inPath :: String -> IO Bool
|
2012-12-14 19:52:44 +00:00
|
|
|
inPath command = isJust <$> searchPath command
|
|
|
|
|
|
|
|
{- Finds a command in PATH and returns the full path to it.
|
|
|
|
-
|
|
|
|
- The command may be fully qualified already, in which case it will
|
|
|
|
- be returned if it exists.
|
|
|
|
-}
|
|
|
|
searchPath :: String -> IO (Maybe FilePath)
|
|
|
|
searchPath command
|
|
|
|
| isAbsolute command = check command
|
|
|
|
| otherwise = getSearchPath >>= getM indir
|
2012-12-13 04:24:19 +00:00
|
|
|
where
|
2012-12-14 19:52:44 +00:00
|
|
|
indir d = check $ d </> command
|
2013-07-06 04:48:47 +00:00
|
|
|
check f = firstM doesFileExist
|
2013-08-02 16:27:32 +00:00
|
|
|
#ifdef mingw32_HOST_OS
|
2013-07-06 04:48:47 +00:00
|
|
|
[f, f ++ ".exe"]
|
|
|
|
#else
|
|
|
|
[f]
|
|
|
|
#endif
|
2012-01-03 04:09:09 +00:00
|
|
|
|
|
|
|
{- Checks if a filename is a unix dotfile. All files inside dotdirs
|
|
|
|
- count as dotfiles. -}
|
|
|
|
dotfile :: FilePath -> Bool
|
|
|
|
dotfile file
|
|
|
|
| f == "." = False
|
|
|
|
| f == ".." = False
|
|
|
|
| f == "" = False
|
|
|
|
| otherwise = "." `isPrefixOf` f || dotfile (takeDirectory file)
|
2012-12-13 04:24:19 +00:00
|
|
|
where
|
|
|
|
f = takeFileName file
|
2013-05-14 17:24:15 +00:00
|
|
|
|
|
|
|
{- Converts a DOS style path to a Cygwin style path. Only on Windows.
|
|
|
|
- Any trailing '\' is preserved as a trailing '/' -}
|
|
|
|
toCygPath :: FilePath -> FilePath
|
2013-08-02 16:27:32 +00:00
|
|
|
#ifndef mingw32_HOST_OS
|
2013-05-14 17:24:15 +00:00
|
|
|
toCygPath = id
|
|
|
|
#else
|
|
|
|
toCygPath p
|
|
|
|
| null drive = recombine parts
|
|
|
|
| otherwise = recombine $ "/cygdrive" : driveletter drive : parts
|
|
|
|
where
|
2014-10-09 18:53:13 +00:00
|
|
|
(drive, p') = splitDrive p
|
2013-05-14 17:24:15 +00:00
|
|
|
parts = splitDirectories p'
|
2014-10-09 18:53:13 +00:00
|
|
|
driveletter = map toLower . takeWhile (/= ':')
|
2013-05-14 17:24:15 +00:00
|
|
|
recombine = fixtrailing . Posix.joinPath
|
2014-10-09 18:53:13 +00:00
|
|
|
fixtrailing s
|
2013-05-14 17:24:15 +00:00
|
|
|
| hasTrailingPathSeparator p = Posix.addTrailingPathSeparator s
|
|
|
|
| otherwise = s
|
|
|
|
#endif
|
Fix a few bugs involving filenames that are at or near the filesystem's maximum filename length limit.
Started with a problem when running addurl on a really long url,
because the whole url is munged into the filename. Ended up doing
a fairly extensive review for places where filenames could get too large,
although it's hard to say I'm not missed any..
Backend.Url had a 128 character limit, which is fine when the limit is 255,
but not if it's a lot shorter on some systems. So check the pathconf()
limit. Note that this could result in fromUrl creating different keys
for the same url, if run on systems with different limits. I don't see
this is likely to cause any problems. That can already happen when using
addurl --fast, or if the content of an url changes.
Both Command.AddUrl and Backend.Url assumed that urls don't contain a
lot of multi-byte unicode, and would fail to truncate an url that did
properly.
A few places use a filename as the template to make a temp file.
While that's nice in that the temp file name can be easily related back to
the original filename, it could lead to `git annex add` failing to add a
filename that was at or close to the maximum length.
Note that in Command.Add.lockdown, the template is still derived from the
filename, just with enough space left to turn it into a temp file.
This is an important optimisation, because the assistant may lock down
a bunch of files all at once, and using the same template for all of them
would cause openTempFile to iterate through the same set of names,
looking for an unused temp file. I'm not very happy with the relatedTemplate
hack, but it avoids that slowdown.
Backend.WORM does not limit the filename stored in the key.
I have not tried to change that; so git annex add will fail on really long
filenames when using the WORM backend. It seems better to preserve the
invariant that a WORM key always contains the complete filename, since
the filename is the only unique material in the key, other than mtime and
size. Since nobody has complained about add failing (I think I saw it
once?) on WORM, probably it's ok, or nobody but me uses it.
There may be compatability problems if using git annex addurl --fast
or the WORM backend on a system with the 255 limit and then trying to use
that repo in a system with a smaller limit. I have not tried to deal with
those.
This commit was sponsored by Alexander Brem. Thanks!
2013-07-30 21:49:11 +00:00
|
|
|
|
|
|
|
{- Maximum size to use for a file in a specified directory.
|
|
|
|
-
|
|
|
|
- Many systems have a 255 byte limit to the name of a file,
|
|
|
|
- so that's taken as the max if the system has a larger limit, or has no
|
|
|
|
- limit.
|
|
|
|
-}
|
|
|
|
fileNameLengthLimit :: FilePath -> IO Int
|
2013-08-02 16:27:32 +00:00
|
|
|
#ifdef mingw32_HOST_OS
|
Fix a few bugs involving filenames that are at or near the filesystem's maximum filename length limit.
Started with a problem when running addurl on a really long url,
because the whole url is munged into the filename. Ended up doing
a fairly extensive review for places where filenames could get too large,
although it's hard to say I'm not missed any..
Backend.Url had a 128 character limit, which is fine when the limit is 255,
but not if it's a lot shorter on some systems. So check the pathconf()
limit. Note that this could result in fromUrl creating different keys
for the same url, if run on systems with different limits. I don't see
this is likely to cause any problems. That can already happen when using
addurl --fast, or if the content of an url changes.
Both Command.AddUrl and Backend.Url assumed that urls don't contain a
lot of multi-byte unicode, and would fail to truncate an url that did
properly.
A few places use a filename as the template to make a temp file.
While that's nice in that the temp file name can be easily related back to
the original filename, it could lead to `git annex add` failing to add a
filename that was at or close to the maximum length.
Note that in Command.Add.lockdown, the template is still derived from the
filename, just with enough space left to turn it into a temp file.
This is an important optimisation, because the assistant may lock down
a bunch of files all at once, and using the same template for all of them
would cause openTempFile to iterate through the same set of names,
looking for an unused temp file. I'm not very happy with the relatedTemplate
hack, but it avoids that slowdown.
Backend.WORM does not limit the filename stored in the key.
I have not tried to change that; so git annex add will fail on really long
filenames when using the WORM backend. It seems better to preserve the
invariant that a WORM key always contains the complete filename, since
the filename is the only unique material in the key, other than mtime and
size. Since nobody has complained about add failing (I think I saw it
once?) on WORM, probably it's ok, or nobody but me uses it.
There may be compatability problems if using git annex addurl --fast
or the WORM backend on a system with the 255 limit and then trying to use
that repo in a system with a smaller limit. I have not tried to deal with
those.
This commit was sponsored by Alexander Brem. Thanks!
2013-07-30 21:49:11 +00:00
|
|
|
fileNameLengthLimit _ = return 255
|
|
|
|
#else
|
|
|
|
fileNameLengthLimit dir = do
|
2014-12-23 15:52:23 +00:00
|
|
|
-- getPathVar can fail due to statfs(2) overflow
|
|
|
|
l <- catchDefaultIO 0 $
|
|
|
|
fromIntegral <$> getPathVar dir FileNameLimit
|
Fix a few bugs involving filenames that are at or near the filesystem's maximum filename length limit.
Started with a problem when running addurl on a really long url,
because the whole url is munged into the filename. Ended up doing
a fairly extensive review for places where filenames could get too large,
although it's hard to say I'm not missed any..
Backend.Url had a 128 character limit, which is fine when the limit is 255,
but not if it's a lot shorter on some systems. So check the pathconf()
limit. Note that this could result in fromUrl creating different keys
for the same url, if run on systems with different limits. I don't see
this is likely to cause any problems. That can already happen when using
addurl --fast, or if the content of an url changes.
Both Command.AddUrl and Backend.Url assumed that urls don't contain a
lot of multi-byte unicode, and would fail to truncate an url that did
properly.
A few places use a filename as the template to make a temp file.
While that's nice in that the temp file name can be easily related back to
the original filename, it could lead to `git annex add` failing to add a
filename that was at or close to the maximum length.
Note that in Command.Add.lockdown, the template is still derived from the
filename, just with enough space left to turn it into a temp file.
This is an important optimisation, because the assistant may lock down
a bunch of files all at once, and using the same template for all of them
would cause openTempFile to iterate through the same set of names,
looking for an unused temp file. I'm not very happy with the relatedTemplate
hack, but it avoids that slowdown.
Backend.WORM does not limit the filename stored in the key.
I have not tried to change that; so git annex add will fail on really long
filenames when using the WORM backend. It seems better to preserve the
invariant that a WORM key always contains the complete filename, since
the filename is the only unique material in the key, other than mtime and
size. Since nobody has complained about add failing (I think I saw it
once?) on WORM, probably it's ok, or nobody but me uses it.
There may be compatability problems if using git annex addurl --fast
or the WORM backend on a system with the 255 limit and then trying to use
that repo in a system with a smaller limit. I have not tried to deal with
those.
This commit was sponsored by Alexander Brem. Thanks!
2013-07-30 21:49:11 +00:00
|
|
|
if l <= 0
|
|
|
|
then return 255
|
|
|
|
else return $ minimum [l, 255]
|
|
|
|
where
|
|
|
|
#endif
|
2013-08-22 22:25:21 +00:00
|
|
|
|
|
|
|
{- Given a string that we'd like to use as the basis for FilePath, but that
|
|
|
|
- was provided by a third party and is not to be trusted, returns the closest
|
|
|
|
- sane FilePath.
|
|
|
|
-
|
2013-12-27 21:52:20 +00:00
|
|
|
- All spaces and punctuation and other wacky stuff are replaced
|
2014-12-12 00:08:49 +00:00
|
|
|
- with '_', except for '.'
|
|
|
|
- "../" will thus turn into ".._", which is safe.
|
2013-08-22 22:25:21 +00:00
|
|
|
-}
|
|
|
|
sanitizeFilePath :: String -> FilePath
|
|
|
|
sanitizeFilePath = map sanitize
|
|
|
|
where
|
2014-10-09 18:53:13 +00:00
|
|
|
sanitize c
|
2013-08-22 22:25:21 +00:00
|
|
|
| c == '.' = c
|
2013-12-27 21:52:20 +00:00
|
|
|
| isSpace c || isPunctuation c || isSymbol c || isControl c || c == '/' = '_'
|
2013-08-22 22:25:21 +00:00
|
|
|
| otherwise = c
|
2014-02-16 21:39:54 +00:00
|
|
|
|
|
|
|
{- Similar to splitExtensions, but knows that some things in FilePaths
|
|
|
|
- after a dot are too long to be extensions. -}
|
|
|
|
splitShortExtensions :: FilePath -> (FilePath, [String])
|
|
|
|
splitShortExtensions = splitShortExtensions' 5 -- enough for ".jpeg"
|
|
|
|
splitShortExtensions' :: Int -> FilePath -> (FilePath, [String])
|
|
|
|
splitShortExtensions' maxextension = go []
|
|
|
|
where
|
|
|
|
go c f
|
|
|
|
| len > 0 && len <= maxextension && not (null base) =
|
|
|
|
go (ext:c) base
|
|
|
|
| otherwise = (f, c)
|
|
|
|
where
|
|
|
|
(base, ext) = splitExtension f
|
|
|
|
len = length ext
|