2015-04-06 21:07:10 +00:00
|
|
|
from binascii import hexlify
|
|
|
|
from configparser import RawConfigParser
|
2010-10-16 09:45:36 +00:00
|
|
|
import os
|
2013-06-24 20:41:05 +00:00
|
|
|
from io import StringIO
|
2012-12-09 22:06:33 +00:00
|
|
|
import stat
|
2013-07-25 20:54:19 +00:00
|
|
|
import subprocess
|
2010-10-30 11:44:25 +00:00
|
|
|
import sys
|
2010-10-16 09:45:36 +00:00
|
|
|
import shutil
|
|
|
|
import tempfile
|
2013-07-25 20:54:19 +00:00
|
|
|
import time
|
|
|
|
import unittest
|
2013-08-12 11:39:46 +00:00
|
|
|
from hashlib import sha256
|
2013-07-08 21:38:27 +00:00
|
|
|
from attic import xattr
|
simple sparse file support, made chunk buffer size flexible
Implemented sparse file support to remove this blocker for people backing up lots of
huge sparse files (like VM images). Attic could not support this use case yet as it would
have restored all files to their fully expanded size, possibly running out of disk space if
the total expanded size would be bigger than the available space.
Please note that this is a very simple implementation of sparse file support - at backup time,
it does not do anything special (it just reads all these zero bytes, chunks, compresses and
encrypts them as usual). At restore time, it detects chunks that are completely filled with zeros
and does a seek on the output file rather than a normal data write, so it creates a hole in
a sparse file. The chunk size for these all-zero chunks is currently 10MiB, so it'll create holes
of multiples of that size (depends also a bit on fs block size, alignment, previously written data).
Special cases like sparse files starting and/or ending with a hole are supported.
Please note that it will currently always create sparse files at restore time if it detects all-zero
chunks.
Also improved:
I needed a constant for the max. chunk size, so I introduced CHUNK_MAX (see also
existing CHUNK_MIN) for the maximum chunk size (which is the same as the chunk
buffer size).
Attic still always uses 10MiB chunk buffer size now, but it could be changed now more easily.
2015-04-15 14:29:18 +00:00
|
|
|
from attic.archive import Archive, ChunkBuffer, CHUNK_MAX
|
2013-07-08 21:38:27 +00:00
|
|
|
from attic.archiver import Archiver
|
2015-04-06 21:07:10 +00:00
|
|
|
from attic.cache import Cache
|
2014-02-24 21:43:17 +00:00
|
|
|
from attic.crypto import bytes_to_long, num_aes_blocks
|
2014-02-19 21:46:15 +00:00
|
|
|
from attic.helpers import Manifest
|
2014-03-24 20:28:59 +00:00
|
|
|
from attic.remote import RemoteRepository, PathNotAllowed
|
2013-07-08 21:38:27 +00:00
|
|
|
from attic.repository import Repository
|
2015-05-09 18:47:50 +00:00
|
|
|
from attic.testsuite import BaseTestCase
|
2014-02-24 21:43:17 +00:00
|
|
|
from attic.testsuite.mock import patch
|
2010-10-16 09:45:36 +00:00
|
|
|
|
2013-07-25 20:54:19 +00:00
|
|
|
try:
|
|
|
|
import llfuse
|
|
|
|
has_llfuse = True
|
|
|
|
except ImportError:
|
|
|
|
has_llfuse = False
|
|
|
|
|
2014-04-08 19:52:26 +00:00
|
|
|
has_lchflags = hasattr(os, 'lchflags')
|
|
|
|
|
2014-02-16 21:21:18 +00:00
|
|
|
src_dir = os.path.join(os.getcwd(), os.path.dirname(__file__), '..')
|
2010-10-16 09:45:36 +00:00
|
|
|
|
2013-07-25 20:54:19 +00:00
|
|
|
|
2013-06-30 20:32:27 +00:00
|
|
|
class changedir:
|
|
|
|
def __init__(self, dir):
|
|
|
|
self.dir = dir
|
|
|
|
|
|
|
|
def __enter__(self):
|
|
|
|
self.old = os.getcwd()
|
|
|
|
os.chdir(self.dir)
|
|
|
|
|
|
|
|
def __exit__(self, *args, **kw):
|
|
|
|
os.chdir(self.old)
|
|
|
|
|
2013-06-24 20:41:05 +00:00
|
|
|
|
2015-04-06 21:07:10 +00:00
|
|
|
class environment_variable:
|
|
|
|
def __init__(self, **values):
|
|
|
|
self.values = values
|
|
|
|
self.old_values = {}
|
|
|
|
|
|
|
|
def __enter__(self):
|
|
|
|
for k, v in self.values.items():
|
|
|
|
self.old_values[k] = os.environ.get(k)
|
|
|
|
os.environ[k] = v
|
|
|
|
|
|
|
|
def __exit__(self, *args, **kw):
|
|
|
|
for k, v in self.old_values.items():
|
|
|
|
if v is not None:
|
|
|
|
os.environ[k] = v
|
|
|
|
|
|
|
|
|
2015-05-09 18:47:50 +00:00
|
|
|
class ArchiverTestCaseBase(BaseTestCase):
|
2010-10-16 09:45:36 +00:00
|
|
|
|
2011-09-12 19:33:32 +00:00
|
|
|
prefix = ''
|
|
|
|
|
2010-10-16 09:45:36 +00:00
|
|
|
def setUp(self):
|
2015-05-09 16:40:55 +00:00
|
|
|
os.environ['BORG_CHECK_I_KNOW_WHAT_I_AM_DOING'] = '1'
|
2010-10-16 09:45:36 +00:00
|
|
|
self.archiver = Archiver()
|
2013-07-04 09:59:15 +00:00
|
|
|
self.tmpdir = tempfile.mkdtemp()
|
2013-06-20 10:44:58 +00:00
|
|
|
self.repository_path = os.path.join(self.tmpdir, 'repository')
|
|
|
|
self.repository_location = self.prefix + self.repository_path
|
2010-10-31 19:12:32 +00:00
|
|
|
self.input_path = os.path.join(self.tmpdir, 'input')
|
|
|
|
self.output_path = os.path.join(self.tmpdir, 'output')
|
2011-08-06 11:01:58 +00:00
|
|
|
self.keys_path = os.path.join(self.tmpdir, 'keys')
|
|
|
|
self.cache_path = os.path.join(self.tmpdir, 'cache')
|
2014-02-08 17:44:48 +00:00
|
|
|
self.exclude_file_path = os.path.join(self.tmpdir, 'excludes')
|
2015-05-09 16:40:55 +00:00
|
|
|
os.environ['BORG_KEYS_DIR'] = self.keys_path
|
|
|
|
os.environ['BORG_CACHE_DIR'] = self.cache_path
|
2010-10-31 19:12:32 +00:00
|
|
|
os.mkdir(self.input_path)
|
|
|
|
os.mkdir(self.output_path)
|
2011-08-06 11:01:58 +00:00
|
|
|
os.mkdir(self.keys_path)
|
|
|
|
os.mkdir(self.cache_path)
|
2014-02-08 17:44:48 +00:00
|
|
|
with open(self.exclude_file_path, 'wb') as fd:
|
|
|
|
fd.write(b'input/file2\n# A commment line, then a blank line\n\n')
|
2013-06-27 11:28:59 +00:00
|
|
|
self._old_wd = os.getcwd()
|
2010-10-31 19:12:32 +00:00
|
|
|
os.chdir(self.tmpdir)
|
2010-10-16 09:45:36 +00:00
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
shutil.rmtree(self.tmpdir)
|
2013-06-27 11:28:59 +00:00
|
|
|
os.chdir(self._old_wd)
|
2010-10-16 09:45:36 +00:00
|
|
|
|
2015-05-09 20:31:21 +00:00
|
|
|
def cmd(self, *args, **kw):
|
2013-07-25 20:54:19 +00:00
|
|
|
exit_code = kw.get('exit_code', 0)
|
|
|
|
fork = kw.get('fork', False)
|
|
|
|
if fork:
|
|
|
|
try:
|
|
|
|
output = subprocess.check_output((sys.executable, '-m', 'attic.archiver') + args)
|
|
|
|
ret = 0
|
|
|
|
except subprocess.CalledProcessError as e:
|
|
|
|
output = e.output
|
|
|
|
ret = e.returncode
|
|
|
|
output = os.fsdecode(output)
|
|
|
|
if ret != exit_code:
|
|
|
|
print(output)
|
|
|
|
self.assert_equal(exit_code, ret)
|
|
|
|
return output
|
2011-08-06 11:01:58 +00:00
|
|
|
args = list(args)
|
2015-05-08 15:41:50 +00:00
|
|
|
stdin, stdout, stderr = sys.stdin, sys.stdout, sys.stderr
|
2010-10-30 11:44:25 +00:00
|
|
|
try:
|
2015-05-08 15:41:50 +00:00
|
|
|
sys.stdin = StringIO()
|
2010-10-30 11:44:25 +00:00
|
|
|
output = StringIO()
|
|
|
|
sys.stdout = sys.stderr = output
|
2010-10-31 19:12:32 +00:00
|
|
|
ret = self.archiver.run(args)
|
2015-05-08 15:41:50 +00:00
|
|
|
sys.stdin, sys.stdout, sys.stderr = stdin, stdout, stderr
|
2010-10-31 19:12:32 +00:00
|
|
|
if ret != exit_code:
|
2013-06-03 11:45:48 +00:00
|
|
|
print(output.getvalue())
|
2013-06-24 20:41:05 +00:00
|
|
|
self.assert_equal(exit_code, ret)
|
2010-10-30 11:44:25 +00:00
|
|
|
return output.getvalue()
|
|
|
|
finally:
|
2015-05-08 15:41:50 +00:00
|
|
|
sys.stdin, sys.stdout, sys.stderr = stdin, stdout, stderr
|
2010-10-16 09:45:36 +00:00
|
|
|
|
|
|
|
def create_src_archive(self, name):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('create', self.repository_location + '::' + name, src_dir)
|
2010-10-16 09:45:36 +00:00
|
|
|
|
2014-02-16 21:21:18 +00:00
|
|
|
|
|
|
|
class ArchiverTestCase(ArchiverTestCaseBase):
|
|
|
|
|
2014-04-30 21:27:04 +00:00
|
|
|
def create_regular_file(self, name, size=0, contents=None):
|
2010-10-31 19:12:32 +00:00
|
|
|
filename = os.path.join(self.input_path, name)
|
|
|
|
if not os.path.exists(os.path.dirname(filename)):
|
|
|
|
os.makedirs(os.path.dirname(filename))
|
2013-06-03 11:45:48 +00:00
|
|
|
with open(filename, 'wb') as fd:
|
2014-04-30 21:27:04 +00:00
|
|
|
if contents is None:
|
|
|
|
contents = b'X' * size
|
|
|
|
fd.write(contents)
|
2010-10-31 19:12:32 +00:00
|
|
|
|
2013-07-25 20:54:19 +00:00
|
|
|
def create_test_files(self):
|
|
|
|
"""Create a minimal test case including all supported file types
|
|
|
|
"""
|
2012-12-09 22:06:33 +00:00
|
|
|
# File
|
2014-04-30 21:43:32 +00:00
|
|
|
self.create_regular_file('empty', size=0)
|
2015-03-26 00:23:25 +00:00
|
|
|
# next code line raises OverflowError on 32bit cpu (raspberry pi 2):
|
2014-05-18 16:28:26 +00:00
|
|
|
# 2600-01-01 > 2**64 ns
|
2015-03-26 00:23:25 +00:00
|
|
|
#os.utime('input/empty', (19880895600, 19880895600))
|
|
|
|
# thus, we better test with something not that far in future:
|
|
|
|
# 2038-01-19 (1970 + 2^31 - 1 seconds) is the 32bit "deadline":
|
|
|
|
os.utime('input/empty', (2**31 - 1, 2**31 - 1))
|
2014-04-30 21:43:32 +00:00
|
|
|
self.create_regular_file('file1', size=1024 * 80)
|
|
|
|
self.create_regular_file('flagfile', size=1024)
|
2012-12-09 22:06:33 +00:00
|
|
|
# Directory
|
2014-04-30 21:43:32 +00:00
|
|
|
self.create_regular_file('dir2/file2', size=1024 * 80)
|
2012-12-09 22:06:33 +00:00
|
|
|
# File owner
|
|
|
|
os.chown('input/file1', 100, 200)
|
|
|
|
# File mode
|
2013-06-03 11:45:48 +00:00
|
|
|
os.chmod('input/file1', 0o7755)
|
2013-07-27 12:31:28 +00:00
|
|
|
os.chmod('input/dir2', 0o555)
|
2012-12-09 22:06:33 +00:00
|
|
|
# Block device
|
2013-06-03 11:45:48 +00:00
|
|
|
os.mknod('input/bdev', 0o600 | stat.S_IFBLK, os.makedev(10, 20))
|
2012-12-09 22:06:33 +00:00
|
|
|
# Char device
|
2013-06-03 11:45:48 +00:00
|
|
|
os.mknod('input/cdev', 0o600 | stat.S_IFCHR, os.makedev(30, 40))
|
2012-12-09 22:06:33 +00:00
|
|
|
# Hard link
|
2011-08-21 19:35:00 +00:00
|
|
|
os.link(os.path.join(self.input_path, 'file1'),
|
2010-11-23 11:55:42 +00:00
|
|
|
os.path.join(self.input_path, 'hardlink'))
|
2012-12-09 22:06:33 +00:00
|
|
|
# Symlink
|
2010-10-31 19:12:32 +00:00
|
|
|
os.symlink('somewhere', os.path.join(self.input_path, 'link1'))
|
2015-03-14 23:20:50 +00:00
|
|
|
if xattr.is_enabled(self.input_path):
|
2014-04-08 19:22:57 +00:00
|
|
|
xattr.setxattr(os.path.join(self.input_path, 'file1'), 'user.foo', b'bar')
|
2015-03-14 23:20:50 +00:00
|
|
|
# XXX this always fails for me
|
|
|
|
# ubuntu 14.04, on a TMP dir filesystem with user_xattr, using fakeroot
|
|
|
|
# same for newer ubuntu and centos.
|
|
|
|
# if this is supported just on specific platform, platform should be checked first,
|
|
|
|
# so that the test setup for all tests using it does not fail here always for others.
|
|
|
|
#xattr.setxattr(os.path.join(self.input_path, 'link1'), 'user.foo_symlink', b'bar_symlink', follow_symlinks=False)
|
2012-12-09 22:06:33 +00:00
|
|
|
# FIFO node
|
2010-10-31 19:12:32 +00:00
|
|
|
os.mkfifo(os.path.join(self.input_path, 'fifo1'))
|
2014-04-08 19:52:26 +00:00
|
|
|
if has_lchflags:
|
2014-04-13 19:52:49 +00:00
|
|
|
os.lchflags(os.path.join(self.input_path, 'flagfile'), stat.UF_NODUMP)
|
2013-07-25 20:54:19 +00:00
|
|
|
|
|
|
|
def test_basic_functionality(self):
|
|
|
|
self.create_test_files()
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('init', self.repository_location)
|
|
|
|
self.cmd('create', self.repository_location + '::test', 'input')
|
|
|
|
self.cmd('create', self.repository_location + '::test.2', 'input')
|
2013-06-30 20:32:27 +00:00
|
|
|
with changedir('output'):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('extract', self.repository_location + '::test')
|
|
|
|
self.assert_equal(len(self.cmd('list', self.repository_location).splitlines()), 2)
|
|
|
|
self.assert_equal(len(self.cmd('list', self.repository_location + '::test').splitlines()), 11)
|
2013-07-26 11:18:57 +00:00
|
|
|
self.assert_dirs_equal('input', 'output/input')
|
2015-05-09 20:31:21 +00:00
|
|
|
info_output = self.cmd('info', self.repository_location + '::test')
|
2014-10-27 21:00:56 +00:00
|
|
|
self.assert_in('Number of files: 4', info_output)
|
2011-08-21 19:35:00 +00:00
|
|
|
shutil.rmtree(self.cache_path)
|
2015-05-09 16:40:55 +00:00
|
|
|
with environment_variable(BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK='1'):
|
2015-05-09 20:31:21 +00:00
|
|
|
info_output2 = self.cmd('info', self.repository_location + '::test')
|
2011-08-21 19:35:00 +00:00
|
|
|
# info_output2 starts with some "initializing cache" text but should
|
|
|
|
# end the same way as info_output
|
|
|
|
assert info_output2.endswith(info_output)
|
2010-10-16 09:45:36 +00:00
|
|
|
|
2015-04-06 21:07:10 +00:00
|
|
|
def _extract_repository_id(self, path):
|
|
|
|
return Repository(self.repository_path).id
|
|
|
|
|
|
|
|
def _set_repository_id(self, path, id):
|
|
|
|
config = RawConfigParser()
|
|
|
|
config.read(os.path.join(path, 'config'))
|
|
|
|
config.set('repository', 'id', hexlify(id).decode('ascii'))
|
|
|
|
with open(os.path.join(path, 'config'), 'w') as fd:
|
|
|
|
config.write(fd)
|
|
|
|
return Repository(self.repository_path).id
|
|
|
|
|
simple sparse file support, made chunk buffer size flexible
Implemented sparse file support to remove this blocker for people backing up lots of
huge sparse files (like VM images). Attic could not support this use case yet as it would
have restored all files to their fully expanded size, possibly running out of disk space if
the total expanded size would be bigger than the available space.
Please note that this is a very simple implementation of sparse file support - at backup time,
it does not do anything special (it just reads all these zero bytes, chunks, compresses and
encrypts them as usual). At restore time, it detects chunks that are completely filled with zeros
and does a seek on the output file rather than a normal data write, so it creates a hole in
a sparse file. The chunk size for these all-zero chunks is currently 10MiB, so it'll create holes
of multiples of that size (depends also a bit on fs block size, alignment, previously written data).
Special cases like sparse files starting and/or ending with a hole are supported.
Please note that it will currently always create sparse files at restore time if it detects all-zero
chunks.
Also improved:
I needed a constant for the max. chunk size, so I introduced CHUNK_MAX (see also
existing CHUNK_MIN) for the maximum chunk size (which is the same as the chunk
buffer size).
Attic still always uses 10MiB chunk buffer size now, but it could be changed now more easily.
2015-04-15 14:29:18 +00:00
|
|
|
def test_sparse_file(self):
|
2015-04-26 22:48:09 +00:00
|
|
|
# no sparse file support on Mac OS X
|
|
|
|
sparse_support = sys.platform != 'darwin'
|
simple sparse file support, made chunk buffer size flexible
Implemented sparse file support to remove this blocker for people backing up lots of
huge sparse files (like VM images). Attic could not support this use case yet as it would
have restored all files to their fully expanded size, possibly running out of disk space if
the total expanded size would be bigger than the available space.
Please note that this is a very simple implementation of sparse file support - at backup time,
it does not do anything special (it just reads all these zero bytes, chunks, compresses and
encrypts them as usual). At restore time, it detects chunks that are completely filled with zeros
and does a seek on the output file rather than a normal data write, so it creates a hole in
a sparse file. The chunk size for these all-zero chunks is currently 10MiB, so it'll create holes
of multiples of that size (depends also a bit on fs block size, alignment, previously written data).
Special cases like sparse files starting and/or ending with a hole are supported.
Please note that it will currently always create sparse files at restore time if it detects all-zero
chunks.
Also improved:
I needed a constant for the max. chunk size, so I introduced CHUNK_MAX (see also
existing CHUNK_MIN) for the maximum chunk size (which is the same as the chunk
buffer size).
Attic still always uses 10MiB chunk buffer size now, but it could be changed now more easily.
2015-04-15 14:29:18 +00:00
|
|
|
filename = os.path.join(self.input_path, 'sparse')
|
|
|
|
content = b'foobar'
|
|
|
|
hole_size = 5 * CHUNK_MAX # 5 full chunker buffers
|
|
|
|
with open(filename, 'wb') as fd:
|
2015-04-26 22:48:09 +00:00
|
|
|
# create a file that has a hole at the beginning and end (if the
|
|
|
|
# OS and filesystem supports sparse files)
|
simple sparse file support, made chunk buffer size flexible
Implemented sparse file support to remove this blocker for people backing up lots of
huge sparse files (like VM images). Attic could not support this use case yet as it would
have restored all files to their fully expanded size, possibly running out of disk space if
the total expanded size would be bigger than the available space.
Please note that this is a very simple implementation of sparse file support - at backup time,
it does not do anything special (it just reads all these zero bytes, chunks, compresses and
encrypts them as usual). At restore time, it detects chunks that are completely filled with zeros
and does a seek on the output file rather than a normal data write, so it creates a hole in
a sparse file. The chunk size for these all-zero chunks is currently 10MiB, so it'll create holes
of multiples of that size (depends also a bit on fs block size, alignment, previously written data).
Special cases like sparse files starting and/or ending with a hole are supported.
Please note that it will currently always create sparse files at restore time if it detects all-zero
chunks.
Also improved:
I needed a constant for the max. chunk size, so I introduced CHUNK_MAX (see also
existing CHUNK_MIN) for the maximum chunk size (which is the same as the chunk
buffer size).
Attic still always uses 10MiB chunk buffer size now, but it could be changed now more easily.
2015-04-15 14:29:18 +00:00
|
|
|
fd.seek(hole_size, 1)
|
|
|
|
fd.write(content)
|
|
|
|
fd.seek(hole_size, 1)
|
|
|
|
pos = fd.tell()
|
|
|
|
fd.truncate(pos)
|
|
|
|
total_len = hole_size + len(content) + hole_size
|
|
|
|
st = os.stat(filename)
|
|
|
|
self.assert_equal(st.st_size, total_len)
|
2015-04-26 22:48:09 +00:00
|
|
|
if sparse_support and hasattr(st, 'st_blocks'):
|
simple sparse file support, made chunk buffer size flexible
Implemented sparse file support to remove this blocker for people backing up lots of
huge sparse files (like VM images). Attic could not support this use case yet as it would
have restored all files to their fully expanded size, possibly running out of disk space if
the total expanded size would be bigger than the available space.
Please note that this is a very simple implementation of sparse file support - at backup time,
it does not do anything special (it just reads all these zero bytes, chunks, compresses and
encrypts them as usual). At restore time, it detects chunks that are completely filled with zeros
and does a seek on the output file rather than a normal data write, so it creates a hole in
a sparse file. The chunk size for these all-zero chunks is currently 10MiB, so it'll create holes
of multiples of that size (depends also a bit on fs block size, alignment, previously written data).
Special cases like sparse files starting and/or ending with a hole are supported.
Please note that it will currently always create sparse files at restore time if it detects all-zero
chunks.
Also improved:
I needed a constant for the max. chunk size, so I introduced CHUNK_MAX (see also
existing CHUNK_MIN) for the maximum chunk size (which is the same as the chunk
buffer size).
Attic still always uses 10MiB chunk buffer size now, but it could be changed now more easily.
2015-04-15 14:29:18 +00:00
|
|
|
self.assert_true(st.st_blocks * 512 < total_len / 10) # is input sparse?
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('init', self.repository_location)
|
|
|
|
self.cmd('create', self.repository_location + '::test', 'input')
|
simple sparse file support, made chunk buffer size flexible
Implemented sparse file support to remove this blocker for people backing up lots of
huge sparse files (like VM images). Attic could not support this use case yet as it would
have restored all files to their fully expanded size, possibly running out of disk space if
the total expanded size would be bigger than the available space.
Please note that this is a very simple implementation of sparse file support - at backup time,
it does not do anything special (it just reads all these zero bytes, chunks, compresses and
encrypts them as usual). At restore time, it detects chunks that are completely filled with zeros
and does a seek on the output file rather than a normal data write, so it creates a hole in
a sparse file. The chunk size for these all-zero chunks is currently 10MiB, so it'll create holes
of multiples of that size (depends also a bit on fs block size, alignment, previously written data).
Special cases like sparse files starting and/or ending with a hole are supported.
Please note that it will currently always create sparse files at restore time if it detects all-zero
chunks.
Also improved:
I needed a constant for the max. chunk size, so I introduced CHUNK_MAX (see also
existing CHUNK_MIN) for the maximum chunk size (which is the same as the chunk
buffer size).
Attic still always uses 10MiB chunk buffer size now, but it could be changed now more easily.
2015-04-15 14:29:18 +00:00
|
|
|
with changedir('output'):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('extract', '--sparse', self.repository_location + '::test')
|
simple sparse file support, made chunk buffer size flexible
Implemented sparse file support to remove this blocker for people backing up lots of
huge sparse files (like VM images). Attic could not support this use case yet as it would
have restored all files to their fully expanded size, possibly running out of disk space if
the total expanded size would be bigger than the available space.
Please note that this is a very simple implementation of sparse file support - at backup time,
it does not do anything special (it just reads all these zero bytes, chunks, compresses and
encrypts them as usual). At restore time, it detects chunks that are completely filled with zeros
and does a seek on the output file rather than a normal data write, so it creates a hole in
a sparse file. The chunk size for these all-zero chunks is currently 10MiB, so it'll create holes
of multiples of that size (depends also a bit on fs block size, alignment, previously written data).
Special cases like sparse files starting and/or ending with a hole are supported.
Please note that it will currently always create sparse files at restore time if it detects all-zero
chunks.
Also improved:
I needed a constant for the max. chunk size, so I introduced CHUNK_MAX (see also
existing CHUNK_MIN) for the maximum chunk size (which is the same as the chunk
buffer size).
Attic still always uses 10MiB chunk buffer size now, but it could be changed now more easily.
2015-04-15 14:29:18 +00:00
|
|
|
self.assert_dirs_equal('input', 'output/input')
|
|
|
|
filename = os.path.join(self.output_path, 'input', 'sparse')
|
|
|
|
with open(filename, 'rb') as fd:
|
|
|
|
# check if file contents are as expected
|
|
|
|
self.assert_equal(fd.read(hole_size), b'\0' * hole_size)
|
|
|
|
self.assert_equal(fd.read(len(content)), content)
|
|
|
|
self.assert_equal(fd.read(hole_size), b'\0' * hole_size)
|
|
|
|
st = os.stat(filename)
|
|
|
|
self.assert_equal(st.st_size, total_len)
|
2015-04-26 22:48:09 +00:00
|
|
|
if sparse_support and hasattr(st, 'st_blocks'):
|
simple sparse file support, made chunk buffer size flexible
Implemented sparse file support to remove this blocker for people backing up lots of
huge sparse files (like VM images). Attic could not support this use case yet as it would
have restored all files to their fully expanded size, possibly running out of disk space if
the total expanded size would be bigger than the available space.
Please note that this is a very simple implementation of sparse file support - at backup time,
it does not do anything special (it just reads all these zero bytes, chunks, compresses and
encrypts them as usual). At restore time, it detects chunks that are completely filled with zeros
and does a seek on the output file rather than a normal data write, so it creates a hole in
a sparse file. The chunk size for these all-zero chunks is currently 10MiB, so it'll create holes
of multiples of that size (depends also a bit on fs block size, alignment, previously written data).
Special cases like sparse files starting and/or ending with a hole are supported.
Please note that it will currently always create sparse files at restore time if it detects all-zero
chunks.
Also improved:
I needed a constant for the max. chunk size, so I introduced CHUNK_MAX (see also
existing CHUNK_MIN) for the maximum chunk size (which is the same as the chunk
buffer size).
Attic still always uses 10MiB chunk buffer size now, but it could be changed now more easily.
2015-04-15 14:29:18 +00:00
|
|
|
self.assert_true(st.st_blocks * 512 < total_len / 10) # is output sparse?
|
|
|
|
|
2015-04-06 21:07:10 +00:00
|
|
|
def test_repository_swap_detection(self):
|
|
|
|
self.create_test_files()
|
2015-05-09 16:40:55 +00:00
|
|
|
os.environ['BORG_PASSPHRASE'] = 'passphrase'
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('init', '--encryption=passphrase', self.repository_location)
|
2015-04-06 21:07:10 +00:00
|
|
|
repository_id = self._extract_repository_id(self.repository_path)
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('create', self.repository_location + '::test', 'input')
|
2015-04-06 21:07:10 +00:00
|
|
|
shutil.rmtree(self.repository_path)
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('init', '--encryption=none', self.repository_location)
|
2015-04-06 21:07:10 +00:00
|
|
|
self._set_repository_id(self.repository_path, repository_id)
|
|
|
|
self.assert_equal(repository_id, self._extract_repository_id(self.repository_path))
|
2015-05-09 20:31:21 +00:00
|
|
|
self.assert_raises(Cache.EncryptionMethodMismatch, lambda :self.cmd('create', self.repository_location + '::test.2', 'input'))
|
2015-04-06 21:07:10 +00:00
|
|
|
|
2015-04-13 20:35:09 +00:00
|
|
|
def test_repository_swap_detection2(self):
|
|
|
|
self.create_test_files()
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('init', '--encryption=none', self.repository_location + '_unencrypted')
|
2015-05-09 16:40:55 +00:00
|
|
|
os.environ['BORG_PASSPHRASE'] = 'passphrase'
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('init', '--encryption=passphrase', self.repository_location + '_encrypted')
|
|
|
|
self.cmd('create', self.repository_location + '_encrypted::test', 'input')
|
2015-04-13 20:35:09 +00:00
|
|
|
shutil.rmtree(self.repository_path + '_encrypted')
|
|
|
|
os.rename(self.repository_path + '_unencrypted', self.repository_path + '_encrypted')
|
2015-05-09 20:31:21 +00:00
|
|
|
self.assert_raises(Cache.RepositoryAccessAborted, lambda :self.cmd('create', self.repository_location + '_encrypted::test.2', 'input'))
|
2015-04-13 20:35:09 +00:00
|
|
|
|
2014-08-02 20:15:21 +00:00
|
|
|
def test_strip_components(self):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('init', self.repository_location)
|
2014-08-02 20:15:21 +00:00
|
|
|
self.create_regular_file('dir/file')
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('create', self.repository_location + '::test', 'input')
|
2014-08-02 20:15:21 +00:00
|
|
|
with changedir('output'):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('extract', self.repository_location + '::test', '--strip-components', '3')
|
2014-08-02 20:15:21 +00:00
|
|
|
self.assert_true(not os.path.exists('file'))
|
|
|
|
with self.assert_creates_file('file'):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('extract', self.repository_location + '::test', '--strip-components', '2')
|
2014-08-02 20:15:21 +00:00
|
|
|
with self.assert_creates_file('dir/file'):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('extract', self.repository_location + '::test', '--strip-components', '1')
|
2014-08-02 20:15:21 +00:00
|
|
|
with self.assert_creates_file('input/dir/file'):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('extract', self.repository_location + '::test', '--strip-components', '0')
|
2014-08-02 20:15:21 +00:00
|
|
|
|
2013-06-22 11:33:21 +00:00
|
|
|
def test_extract_include_exclude(self):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('init', self.repository_location)
|
2014-04-30 21:43:32 +00:00
|
|
|
self.create_regular_file('file1', size=1024 * 80)
|
|
|
|
self.create_regular_file('file2', size=1024 * 80)
|
|
|
|
self.create_regular_file('file3', size=1024 * 80)
|
|
|
|
self.create_regular_file('file4', size=1024 * 80)
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('create', '--exclude=input/file4', self.repository_location + '::test', 'input')
|
2013-06-30 20:32:27 +00:00
|
|
|
with changedir('output'):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('extract', self.repository_location + '::test', 'input/file1', )
|
2013-06-24 20:41:05 +00:00
|
|
|
self.assert_equal(sorted(os.listdir('output/input')), ['file1'])
|
2013-06-30 20:32:27 +00:00
|
|
|
with changedir('output'):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('extract', '--exclude=input/file2', self.repository_location + '::test')
|
2013-06-24 20:41:05 +00:00
|
|
|
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file3'])
|
2014-02-08 17:44:48 +00:00
|
|
|
with changedir('output'):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('extract', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test')
|
2014-02-08 17:44:48 +00:00
|
|
|
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file3'])
|
2013-06-22 11:33:21 +00:00
|
|
|
|
2014-04-30 21:27:04 +00:00
|
|
|
def test_exclude_caches(self):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('init', self.repository_location)
|
2014-04-30 21:27:04 +00:00
|
|
|
self.create_regular_file('file1', size=1024 * 80)
|
2015-03-17 22:47:21 +00:00
|
|
|
self.create_regular_file('cache1/CACHEDIR.TAG', contents=b'Signature: 8a477f597d28d172789f06886806bc55 extra stuff')
|
|
|
|
self.create_regular_file('cache2/CACHEDIR.TAG', contents=b'invalid signature')
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('create', '--exclude-caches', self.repository_location + '::test', 'input')
|
2014-04-30 21:27:04 +00:00
|
|
|
with changedir('output'):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('extract', self.repository_location + '::test')
|
2014-04-30 21:27:04 +00:00
|
|
|
self.assert_equal(sorted(os.listdir('output/input')), ['cache2', 'file1'])
|
|
|
|
self.assert_equal(sorted(os.listdir('output/input/cache2')), ['CACHEDIR.TAG'])
|
|
|
|
|
2013-08-03 11:34:14 +00:00
|
|
|
def test_path_normalization(self):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('init', self.repository_location)
|
2014-04-30 21:43:32 +00:00
|
|
|
self.create_regular_file('dir1/dir2/file', size=1024 * 80)
|
2013-08-03 11:34:14 +00:00
|
|
|
with changedir('input/dir1/dir2'):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('create', self.repository_location + '::test', '../../../input/dir1/../dir1/dir2/..')
|
|
|
|
output = self.cmd('list', self.repository_location + '::test')
|
2013-08-03 11:34:14 +00:00
|
|
|
self.assert_not_in('..', output)
|
|
|
|
self.assert_in(' input/dir1/dir2/file', output)
|
|
|
|
|
2014-12-14 18:15:54 +00:00
|
|
|
def test_exclude_normalization(self):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('init', self.repository_location)
|
2014-12-14 18:15:54 +00:00
|
|
|
self.create_regular_file('file1', size=1024 * 80)
|
|
|
|
self.create_regular_file('file2', size=1024 * 80)
|
|
|
|
with changedir('input'):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('create', '--exclude=file1', self.repository_location + '::test1', '.')
|
2014-12-14 18:15:54 +00:00
|
|
|
with changedir('output'):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('extract', self.repository_location + '::test1')
|
2014-12-14 18:15:54 +00:00
|
|
|
self.assert_equal(sorted(os.listdir('output')), ['file2'])
|
|
|
|
with changedir('input'):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('create', '--exclude=./file1', self.repository_location + '::test2', '.')
|
2014-12-14 18:15:54 +00:00
|
|
|
with changedir('output'):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('extract', self.repository_location + '::test2')
|
2014-12-14 18:15:54 +00:00
|
|
|
self.assert_equal(sorted(os.listdir('output')), ['file2'])
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('create', '--exclude=input/./file1', self.repository_location + '::test3', 'input')
|
2014-12-14 18:15:54 +00:00
|
|
|
with changedir('output'):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('extract', self.repository_location + '::test3')
|
2014-12-14 18:15:54 +00:00
|
|
|
self.assert_equal(sorted(os.listdir('output/input')), ['file2'])
|
|
|
|
|
2014-02-16 21:37:12 +00:00
|
|
|
def test_repeated_files(self):
|
2014-04-30 21:43:32 +00:00
|
|
|
self.create_regular_file('file1', size=1024 * 80)
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('init', self.repository_location)
|
|
|
|
self.cmd('create', self.repository_location + '::test', 'input', 'input')
|
2014-02-16 21:37:12 +00:00
|
|
|
|
2012-12-06 22:04:01 +00:00
|
|
|
def test_overwrite(self):
|
2014-04-30 21:43:32 +00:00
|
|
|
self.create_regular_file('file1', size=1024 * 80)
|
|
|
|
self.create_regular_file('dir2/file2', size=1024 * 80)
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('init', self.repository_location)
|
|
|
|
self.cmd('create', self.repository_location + '::test', 'input')
|
2012-12-06 22:04:01 +00:00
|
|
|
# Overwriting regular files and directories should be supported
|
|
|
|
os.mkdir('output/input')
|
|
|
|
os.mkdir('output/input/file1')
|
|
|
|
os.mkdir('output/input/dir2')
|
2013-06-30 20:32:27 +00:00
|
|
|
with changedir('output'):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('extract', self.repository_location + '::test')
|
2013-07-26 11:18:57 +00:00
|
|
|
self.assert_dirs_equal('input', 'output/input')
|
2012-12-06 22:04:01 +00:00
|
|
|
# But non-empty dirs should fail
|
|
|
|
os.unlink('output/input/file1')
|
|
|
|
os.mkdir('output/input/file1')
|
|
|
|
os.mkdir('output/input/file1/dir')
|
2013-06-30 20:32:27 +00:00
|
|
|
with changedir('output'):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('extract', self.repository_location + '::test', exit_code=1)
|
2012-12-06 22:04:01 +00:00
|
|
|
|
2015-03-24 06:11:00 +00:00
|
|
|
def test_rename(self):
|
|
|
|
self.create_regular_file('file1', size=1024 * 80)
|
|
|
|
self.create_regular_file('dir2/file2', size=1024 * 80)
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('init', self.repository_location)
|
|
|
|
self.cmd('create', self.repository_location + '::test', 'input')
|
|
|
|
self.cmd('create', self.repository_location + '::test.2', 'input')
|
|
|
|
self.cmd('extract', '--dry-run', self.repository_location + '::test')
|
|
|
|
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
|
|
|
|
self.cmd('rename', self.repository_location + '::test', 'test.3')
|
|
|
|
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
|
|
|
|
self.cmd('rename', self.repository_location + '::test.2', 'test.4')
|
|
|
|
self.cmd('extract', '--dry-run', self.repository_location + '::test.3')
|
|
|
|
self.cmd('extract', '--dry-run', self.repository_location + '::test.4')
|
2015-03-24 06:11:00 +00:00
|
|
|
# Make sure both archives have been renamed
|
|
|
|
repository = Repository(self.repository_path)
|
|
|
|
manifest, key = Manifest.load(repository)
|
|
|
|
self.assert_equal(len(manifest.archives), 2)
|
|
|
|
self.assert_in('test.3', manifest.archives)
|
|
|
|
self.assert_in('test.4', manifest.archives)
|
|
|
|
|
2012-11-30 20:47:35 +00:00
|
|
|
def test_delete(self):
|
2014-04-30 21:43:32 +00:00
|
|
|
self.create_regular_file('file1', size=1024 * 80)
|
|
|
|
self.create_regular_file('dir2/file2', size=1024 * 80)
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('init', self.repository_location)
|
|
|
|
self.cmd('create', self.repository_location + '::test', 'input')
|
|
|
|
self.cmd('create', self.repository_location + '::test.2', 'input')
|
|
|
|
self.cmd('extract', '--dry-run', self.repository_location + '::test')
|
|
|
|
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
|
|
|
|
self.cmd('delete', self.repository_location + '::test')
|
|
|
|
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
|
|
|
|
self.cmd('delete', self.repository_location + '::test.2')
|
2012-11-30 20:47:35 +00:00
|
|
|
# Make sure all data except the manifest has been deleted
|
2013-06-20 10:44:58 +00:00
|
|
|
repository = Repository(self.repository_path)
|
2014-02-08 23:17:32 +00:00
|
|
|
self.assert_equal(len(repository), 1)
|
2012-11-30 20:47:35 +00:00
|
|
|
|
2013-06-20 10:44:58 +00:00
|
|
|
def test_corrupted_repository(self):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('init', self.repository_location)
|
2010-10-16 09:45:36 +00:00
|
|
|
self.create_src_archive('test')
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('extract', '--dry-run', self.repository_location + '::test')
|
|
|
|
self.cmd('check', self.repository_location)
|
2013-06-20 10:44:58 +00:00
|
|
|
name = sorted(os.listdir(os.path.join(self.tmpdir, 'repository', 'data', '0')), reverse=True)[0]
|
2015-03-17 22:09:14 +00:00
|
|
|
with open(os.path.join(self.tmpdir, 'repository', 'data', '0', name), 'r+') as fd:
|
|
|
|
fd.seek(100)
|
|
|
|
fd.write('XXXX')
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('check', self.repository_location, exit_code=1)
|
2010-10-16 09:45:36 +00:00
|
|
|
|
2013-08-11 20:18:56 +00:00
|
|
|
def test_readonly_repository(self):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('init', self.repository_location)
|
2013-08-11 20:18:56 +00:00
|
|
|
self.create_src_archive('test')
|
|
|
|
os.system('chmod -R ugo-w ' + self.repository_path)
|
|
|
|
try:
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('extract', '--dry-run', self.repository_location + '::test')
|
2013-08-11 20:18:56 +00:00
|
|
|
finally:
|
|
|
|
# Restore permissions so shutil.rmtree is able to delete it
|
|
|
|
os.system('chmod -R u+w ' + self.repository_path)
|
|
|
|
|
2014-02-19 21:46:15 +00:00
|
|
|
def test_cmdline_compatibility(self):
|
2014-04-30 21:43:32 +00:00
|
|
|
self.create_regular_file('file1', size=1024 * 80)
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('init', self.repository_location)
|
|
|
|
self.cmd('create', self.repository_location + '::test', 'input')
|
|
|
|
output = self.cmd('verify', '-v', self.repository_location + '::test')
|
2015-05-09 16:40:55 +00:00
|
|
|
self.assert_in('"borg verify" has been deprecated', output)
|
2015-05-09 20:31:21 +00:00
|
|
|
output = self.cmd('prune', self.repository_location, '--hourly=1')
|
2014-02-19 21:46:15 +00:00
|
|
|
self.assert_in('"--hourly" has been deprecated. Use "--keep-hourly" instead', output)
|
|
|
|
|
2013-06-20 10:44:58 +00:00
|
|
|
def test_prune_repository(self):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('init', self.repository_location)
|
|
|
|
self.cmd('create', self.repository_location + '::test1', src_dir)
|
|
|
|
self.cmd('create', self.repository_location + '::test2', src_dir)
|
|
|
|
output = self.cmd('prune', '-v', '--dry-run', self.repository_location, '--keep-daily=2')
|
2014-02-26 19:27:08 +00:00
|
|
|
self.assert_in('Keeping archive: test2', output)
|
|
|
|
self.assert_in('Would prune: test1', output)
|
2015-05-09 20:31:21 +00:00
|
|
|
output = self.cmd('list', self.repository_location)
|
2014-02-20 02:33:05 +00:00
|
|
|
self.assert_in('test1', output)
|
|
|
|
self.assert_in('test2', output)
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('prune', self.repository_location, '--keep-daily=2')
|
|
|
|
output = self.cmd('list', self.repository_location)
|
2014-02-20 02:33:05 +00:00
|
|
|
self.assert_not_in('test1', output)
|
|
|
|
self.assert_in('test2', output)
|
2010-10-31 20:55:09 +00:00
|
|
|
|
2013-06-26 19:20:31 +00:00
|
|
|
def test_usage(self):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.assert_raises(SystemExit, lambda: self.cmd())
|
|
|
|
self.assert_raises(SystemExit, lambda: self.cmd('-h'))
|
2013-06-26 19:20:31 +00:00
|
|
|
|
2013-07-25 20:54:19 +00:00
|
|
|
@unittest.skipUnless(has_llfuse, 'llfuse not installed')
|
2014-03-26 20:47:01 +00:00
|
|
|
def test_fuse_mount_repository(self):
|
|
|
|
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
|
|
|
|
os.mkdir(mountpoint)
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('init', self.repository_location)
|
2014-03-26 20:47:01 +00:00
|
|
|
self.create_test_files()
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('create', self.repository_location + '::archive', 'input')
|
|
|
|
self.cmd('create', self.repository_location + '::archive2', 'input')
|
2014-03-26 20:47:01 +00:00
|
|
|
try:
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('mount', self.repository_location, mountpoint, fork=True)
|
2014-03-26 20:47:01 +00:00
|
|
|
self.wait_for_mount(mountpoint)
|
|
|
|
self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'archive', 'input'))
|
|
|
|
self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'archive2', 'input'))
|
|
|
|
finally:
|
|
|
|
if sys.platform.startswith('linux'):
|
|
|
|
os.system('fusermount -u ' + mountpoint)
|
|
|
|
else:
|
|
|
|
os.system('umount ' + mountpoint)
|
|
|
|
os.rmdir(mountpoint)
|
|
|
|
# Give the daemon some time to exit
|
|
|
|
time.sleep(.2)
|
|
|
|
|
|
|
|
@unittest.skipUnless(has_llfuse, 'llfuse not installed')
|
|
|
|
def test_fuse_mount_archive(self):
|
2013-07-25 20:54:19 +00:00
|
|
|
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
|
|
|
|
os.mkdir(mountpoint)
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('init', self.repository_location)
|
2013-07-25 20:54:19 +00:00
|
|
|
self.create_test_files()
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('create', self.repository_location + '::archive', 'input')
|
2013-07-25 20:54:19 +00:00
|
|
|
try:
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('mount', self.repository_location + '::archive', mountpoint, fork=True)
|
2013-07-26 11:18:57 +00:00
|
|
|
self.wait_for_mount(mountpoint)
|
2013-07-30 12:52:02 +00:00
|
|
|
self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'input'))
|
2013-07-25 20:54:19 +00:00
|
|
|
finally:
|
2013-07-27 12:31:28 +00:00
|
|
|
if sys.platform.startswith('linux'):
|
|
|
|
os.system('fusermount -u ' + mountpoint)
|
|
|
|
else:
|
|
|
|
os.system('umount ' + mountpoint)
|
2013-07-25 20:54:19 +00:00
|
|
|
os.rmdir(mountpoint)
|
|
|
|
# Give the daemon some time to exit
|
|
|
|
time.sleep(.2)
|
|
|
|
|
2013-08-12 11:39:46 +00:00
|
|
|
def verify_aes_counter_uniqueness(self, method):
|
|
|
|
seen = set() # Chunks already seen
|
|
|
|
used = set() # counter values already used
|
|
|
|
|
|
|
|
def verify_uniqueness():
|
|
|
|
repository = Repository(self.repository_path)
|
2014-07-10 13:32:12 +00:00
|
|
|
for key, _ in repository.open_index(repository.get_transaction_id()).iteritems():
|
2013-08-12 11:39:46 +00:00
|
|
|
data = repository.get(key)
|
|
|
|
hash = sha256(data).digest()
|
2015-03-17 22:47:21 +00:00
|
|
|
if hash not in seen:
|
2013-08-12 11:39:46 +00:00
|
|
|
seen.add(hash)
|
|
|
|
num_blocks = num_aes_blocks(len(data) - 41)
|
2013-08-12 12:39:14 +00:00
|
|
|
nonce = bytes_to_long(data[33:41])
|
|
|
|
for counter in range(nonce, nonce + num_blocks):
|
2013-08-12 11:39:46 +00:00
|
|
|
self.assert_not_in(counter, used)
|
|
|
|
used.add(counter)
|
|
|
|
|
|
|
|
self.create_test_files()
|
2015-05-09 16:40:55 +00:00
|
|
|
os.environ['BORG_PASSPHRASE'] = 'passphrase'
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('init', '--encryption=' + method, self.repository_location)
|
2013-08-12 11:39:46 +00:00
|
|
|
verify_uniqueness()
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('create', self.repository_location + '::test', 'input')
|
2013-08-12 11:39:46 +00:00
|
|
|
verify_uniqueness()
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('create', self.repository_location + '::test.2', 'input')
|
2013-08-12 11:39:46 +00:00
|
|
|
verify_uniqueness()
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('delete', self.repository_location + '::test.2')
|
2013-08-12 11:39:46 +00:00
|
|
|
verify_uniqueness()
|
2013-08-12 12:39:14 +00:00
|
|
|
self.assert_equal(used, set(range(len(used))))
|
2013-08-12 11:39:46 +00:00
|
|
|
|
|
|
|
def test_aes_counter_uniqueness_keyfile(self):
|
|
|
|
self.verify_aes_counter_uniqueness('keyfile')
|
|
|
|
|
|
|
|
def test_aes_counter_uniqueness_passphrase(self):
|
|
|
|
self.verify_aes_counter_uniqueness('passphrase')
|
|
|
|
|
|
|
|
|
2014-02-16 21:21:18 +00:00
|
|
|
class ArchiverCheckTestCase(ArchiverTestCaseBase):
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
super(ArchiverCheckTestCase, self).setUp()
|
2014-02-24 21:43:17 +00:00
|
|
|
with patch.object(ChunkBuffer, 'BUFFER_SIZE', 10):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('init', self.repository_location)
|
2014-02-24 21:43:17 +00:00
|
|
|
self.create_src_archive('archive1')
|
|
|
|
self.create_src_archive('archive2')
|
2014-02-16 21:21:18 +00:00
|
|
|
|
|
|
|
def open_archive(self, name):
|
|
|
|
repository = Repository(self.repository_path)
|
|
|
|
manifest, key = Manifest.load(repository)
|
|
|
|
archive = Archive(repository, key, manifest, name)
|
|
|
|
return archive, repository
|
|
|
|
|
2014-02-26 22:13:48 +00:00
|
|
|
def test_check_usage(self):
|
2015-05-09 20:31:21 +00:00
|
|
|
output = self.cmd('check', self.repository_location, exit_code=0)
|
2014-02-26 22:13:48 +00:00
|
|
|
self.assert_in('Starting repository check', output)
|
|
|
|
self.assert_in('Starting archive consistency check', output)
|
2015-05-09 20:31:21 +00:00
|
|
|
output = self.cmd('check', '--repository-only', self.repository_location, exit_code=0)
|
2014-02-26 22:13:48 +00:00
|
|
|
self.assert_in('Starting repository check', output)
|
|
|
|
self.assert_not_in('Starting archive consistency check', output)
|
2015-05-09 20:31:21 +00:00
|
|
|
output = self.cmd('check', '--archives-only', self.repository_location, exit_code=0)
|
2014-02-26 22:13:48 +00:00
|
|
|
self.assert_not_in('Starting repository check', output)
|
|
|
|
self.assert_in('Starting archive consistency check', output)
|
|
|
|
|
2014-02-16 21:21:18 +00:00
|
|
|
def test_missing_file_chunk(self):
|
|
|
|
archive, repository = self.open_archive('archive1')
|
|
|
|
for item in archive.iter_items():
|
|
|
|
if item[b'path'].endswith('testsuite/archiver.py'):
|
|
|
|
repository.delete(item[b'chunks'][-1][0])
|
|
|
|
break
|
|
|
|
repository.commit()
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('check', self.repository_location, exit_code=1)
|
|
|
|
self.cmd('check', '--repair', self.repository_location, exit_code=0)
|
|
|
|
self.cmd('check', self.repository_location, exit_code=0)
|
2014-02-16 21:21:18 +00:00
|
|
|
|
|
|
|
def test_missing_archive_item_chunk(self):
|
|
|
|
archive, repository = self.open_archive('archive1')
|
2014-02-24 21:43:17 +00:00
|
|
|
repository.delete(archive.metadata[b'items'][-5])
|
2014-02-16 21:21:18 +00:00
|
|
|
repository.commit()
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('check', self.repository_location, exit_code=1)
|
|
|
|
self.cmd('check', '--repair', self.repository_location, exit_code=0)
|
|
|
|
self.cmd('check', self.repository_location, exit_code=0)
|
2014-02-16 21:21:18 +00:00
|
|
|
|
|
|
|
def test_missing_archive_metadata(self):
|
|
|
|
archive, repository = self.open_archive('archive1')
|
|
|
|
repository.delete(archive.id)
|
|
|
|
repository.commit()
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('check', self.repository_location, exit_code=1)
|
|
|
|
self.cmd('check', '--repair', self.repository_location, exit_code=0)
|
|
|
|
self.cmd('check', self.repository_location, exit_code=0)
|
2014-02-16 21:21:18 +00:00
|
|
|
|
|
|
|
def test_missing_manifest(self):
|
|
|
|
archive, repository = self.open_archive('archive1')
|
|
|
|
repository.delete(Manifest.MANIFEST_ID)
|
|
|
|
repository.commit()
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('check', self.repository_location, exit_code=1)
|
|
|
|
output = self.cmd('check', '--repair', self.repository_location, exit_code=0)
|
2014-03-01 14:00:21 +00:00
|
|
|
self.assert_in('archive1', output)
|
|
|
|
self.assert_in('archive2', output)
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('check', self.repository_location, exit_code=0)
|
2014-02-16 21:21:18 +00:00
|
|
|
|
|
|
|
def test_extra_chunks(self):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('check', self.repository_location, exit_code=0)
|
2014-02-16 21:21:18 +00:00
|
|
|
repository = Repository(self.repository_location)
|
|
|
|
repository.put(b'01234567890123456789012345678901', b'xxxx')
|
|
|
|
repository.commit()
|
|
|
|
repository.close()
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('check', self.repository_location, exit_code=1)
|
|
|
|
self.cmd('check', self.repository_location, exit_code=1)
|
|
|
|
self.cmd('check', '--repair', self.repository_location, exit_code=0)
|
|
|
|
self.cmd('check', self.repository_location, exit_code=0)
|
|
|
|
self.cmd('extract', '--dry-run', self.repository_location + '::archive1', exit_code=0)
|
2014-02-16 21:21:18 +00:00
|
|
|
|
2011-10-29 15:01:07 +00:00
|
|
|
|
2013-06-24 20:41:05 +00:00
|
|
|
class RemoteArchiverTestCase(ArchiverTestCase):
|
2013-07-03 20:30:04 +00:00
|
|
|
prefix = '__testsuite__:'
|
2014-03-24 20:28:59 +00:00
|
|
|
|
|
|
|
def test_remote_repo_restrict_to_path(self):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('init', self.repository_location)
|
2014-03-24 20:28:59 +00:00
|
|
|
path_prefix = os.path.dirname(self.repository_path)
|
|
|
|
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', '/foo']):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.assert_raises(PathNotAllowed, lambda: self.cmd('init', self.repository_location + '_1'))
|
2014-03-24 20:28:59 +00:00
|
|
|
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', path_prefix]):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('init', self.repository_location + '_2')
|
2014-03-24 20:28:59 +00:00
|
|
|
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', '/foo', '--restrict-to-path', path_prefix]):
|
2015-05-09 20:31:21 +00:00
|
|
|
self.cmd('init', self.repository_location + '_3')
|