1
0
Fork 0
mirror of https://github.com/borgbackup/borg.git synced 2024-12-23 08:16:54 +00:00

more compatible sparse file testing, fixes #1310

removed the pointless platform check.

just first test the input file with the same checks we expect succeeding
on the extracted file. skip sparse archiving / extraction testing if the input
file checks fail - likely we have a problem with the OS or the FS then.
This commit is contained in:
Thomas Waldmann 2016-07-11 20:20:10 +02:00
parent 8c4802312d
commit 0fb6cb8417

View file

@ -393,11 +393,30 @@ def _set_repository_id(self, path, id):
return repository.id
def test_sparse_file(self):
# no sparse file support on Mac OS X
sparse_support = sys.platform != 'darwin'
def is_sparse(fn, total_size, hole_size):
st = os.stat(fn)
assert st.st_size == total_size
sparse = True
if sparse and hasattr(st, 'st_blocks') and st.st_blocks * 512 >= st.st_size:
sparse = False
if sparse and hasattr(os, 'SEEK_HOLE') and hasattr(os, 'SEEK_DATA'):
with open(fn, 'rb') as fd:
# only check if the first hole is as expected, because the 2nd hole check
# is problematic on xfs due to its "dynamic speculative EOF preallocation
try:
if fd.seek(0, os.SEEK_HOLE) != 0:
sparse = False
if fd.seek(0, os.SEEK_DATA) != hole_size:
sparse = False
except OSError:
# OS/FS does not really support SEEK_HOLE/SEEK_DATA
sparse = False
return sparse
filename = os.path.join(self.input_path, 'sparse')
content = b'foobar'
hole_size = 5 * (1 << CHUNK_MAX_EXP) # 5 full chunker buffers
total_size = hole_size + len(content) + hole_size
with open(filename, 'wb') as fd:
# create a file that has a hole at the beginning and end (if the
# OS and filesystem supports sparse files)
@ -406,14 +425,14 @@ def test_sparse_file(self):
fd.seek(hole_size, 1)
pos = fd.tell()
fd.truncate(pos)
total_len = hole_size + len(content) + hole_size
st = os.stat(filename)
self.assert_equal(st.st_size, total_len)
if sparse_support and hasattr(st, 'st_blocks'):
self.assert_true(st.st_blocks * 512 < total_len) # is input sparse?
# we first check if we could create a sparse input file:
sparse_support = is_sparse(filename, total_size, hole_size)
if sparse_support:
# we could create a sparse input file, so creating a backup of it and
# extracting it again (as sparse) should also work:
self.cmd('init', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
with changedir(self.output_path):
self.cmd('extract', '--sparse', self.repository_location + '::test')
self.assert_dirs_equal('input', 'output/input')
filename = os.path.join(self.output_path, 'input', 'sparse')
@ -422,23 +441,7 @@ def test_sparse_file(self):
self.assert_equal(fd.read(hole_size), b'\0' * hole_size)
self.assert_equal(fd.read(len(content)), content)
self.assert_equal(fd.read(hole_size), b'\0' * hole_size)
st = os.stat(filename)
self.assert_equal(st.st_size, total_len)
if sparse_support:
if hasattr(st, 'st_blocks'):
# do only check if it is less, do NOT check if it is much less
# as that causes troubles on xfs, zfs, ntfs:
self.assert_true(st.st_blocks * 512 < total_len)
if hasattr(os, 'SEEK_HOLE') and hasattr(os, 'SEEK_DATA'):
with open(filename, 'rb') as fd:
# only check if the first hole is as expected, because the 2nd hole check
# is problematic on xfs due to its "dynamic speculative EOF preallocation
try:
self.assert_equal(fd.seek(0, os.SEEK_HOLE), 0)
self.assert_equal(fd.seek(0, os.SEEK_DATA), hole_size)
except OSError:
# does not really support SEEK_HOLE/SEEK_DATA
pass
self.assert_true(is_sparse(filename, total_size, hole_size))
def test_unusual_filenames(self):
filenames = ['normal', 'with some blanks', '(with_parens)', ]