mirror of
https://github.com/borgbackup/borg.git
synced 2025-03-13 07:33:47 +00:00
Improved handling of already existing destination files
This commit is contained in:
parent
e48d7aa88c
commit
8bb522f1f7
3 changed files with 64 additions and 32 deletions
|
@ -1,7 +1,7 @@
|
|||
from __future__ import with_statement
|
||||
from datetime import datetime, timedelta
|
||||
from getpass import getuser
|
||||
from itertools import izip_longest
|
||||
from itertools import izip_longest, islice
|
||||
import msgpack
|
||||
import os
|
||||
import socket
|
||||
|
@ -23,6 +23,7 @@ have_lchmod = hasattr(os, 'lchmod')
|
|||
linux = sys.platform == 'linux2'
|
||||
|
||||
|
||||
|
||||
class ItemIter(object):
|
||||
|
||||
def __init__(self, unpacker, filter):
|
||||
|
@ -133,11 +134,18 @@ class Archive(object):
|
|||
|
||||
def iter_items(self, filter=None):
|
||||
unpacker = msgpack.Unpacker()
|
||||
for id in self.metadata['items']:
|
||||
unpacker.feed(self.key.decrypt(id, self.store.get(id)))
|
||||
iter = ItemIter(unpacker, filter)
|
||||
for item in iter:
|
||||
yield item, iter.peek
|
||||
i = 0
|
||||
n = 20
|
||||
while True:
|
||||
items = self.metadata['items'][i:i + n]
|
||||
i += n
|
||||
if not items:
|
||||
break
|
||||
for id, chunk in [(id, chunk) for id, chunk in izip_longest(items, self.store.get_many(items))]:
|
||||
unpacker.feed(self.key.decrypt(id, chunk))
|
||||
iter = ItemIter(unpacker, filter)
|
||||
for item in iter:
|
||||
yield item, iter.peek
|
||||
|
||||
def add_item(self, item):
|
||||
self.items.write(msgpack.packb(item))
|
||||
|
@ -215,10 +223,19 @@ class Archive(object):
|
|||
cache.rollback()
|
||||
return stats
|
||||
|
||||
def extract_item(self, item, dest=None, start_cb=None, restore_attrs=True, peek=None):
|
||||
def extract_item(self, item, dest=None, restore_attrs=True, peek=None):
|
||||
dest = dest or self.cwd
|
||||
assert item['path'][0] not in ('/', '\\', ':')
|
||||
path = os.path.join(dest, encode_filename(item['path']))
|
||||
# Attempt to remove existing files, ignore errors on failure
|
||||
try:
|
||||
st = os.lstat(path)
|
||||
if stat.S_ISDIR(st.st_mode):
|
||||
os.rmdir(path)
|
||||
else:
|
||||
os.unlink(path)
|
||||
except OSError:
|
||||
pass
|
||||
mode = item['mode']
|
||||
if stat.S_ISDIR(mode):
|
||||
if not os.path.exists(path):
|
||||
|
@ -235,19 +252,11 @@ class Archive(object):
|
|||
os.unlink(path)
|
||||
os.link(source, path)
|
||||
else:
|
||||
n = len(item['chunks'])
|
||||
## 0 chunks indicates an empty (0 bytes) file
|
||||
if n == 0:
|
||||
open(path, 'wb').close()
|
||||
start_cb(item)
|
||||
else:
|
||||
fd = open(path, 'wb')
|
||||
start_cb(item)
|
||||
with open(path, 'wbx') as fd:
|
||||
ids = [id for id, size, csize in item['chunks']]
|
||||
for id, chunk in izip_longest(ids, self.store.get_many(ids, peek)):
|
||||
data = self.key.decrypt(id, chunk)
|
||||
fd.write(data)
|
||||
fd.close()
|
||||
self.restore_attrs(path, item)
|
||||
elif stat.S_ISFIFO(mode):
|
||||
if not os.path.exists(os.path.dirname(path)):
|
||||
|
@ -312,8 +321,8 @@ class Archive(object):
|
|||
|
||||
def delete(self, cache):
|
||||
unpacker = msgpack.Unpacker()
|
||||
for id, chunk in izip_longest(self.metadata['items'], self.store.get_many(self.metadata['items'])):
|
||||
unpacker.feed(self.key.decrypt(id, chunk))
|
||||
for id in self.metadata['items']:
|
||||
unpacker.feed(self.key.decrypt(id, self.store.get(id)))
|
||||
for item in unpacker:
|
||||
try:
|
||||
for chunk_id, size, csize in item['chunks']:
|
||||
|
|
|
@ -31,7 +31,7 @@ class Archiver(object):
|
|||
def print_error(self, msg, *args):
|
||||
msg = args and msg % args or msg
|
||||
self.exit_code = 1
|
||||
print >> sys.stderr, msg
|
||||
print >> sys.stderr, 'darc: ' + msg
|
||||
|
||||
def print_verbose(self, msg, *args, **kw):
|
||||
if self.verbose:
|
||||
|
@ -149,22 +149,24 @@ class Archiver(object):
|
|||
self.print_error('Unknown file type: %s', path)
|
||||
|
||||
def do_extract(self, args):
|
||||
def start_cb(item):
|
||||
self.print_verbose(item['path'])
|
||||
|
||||
store = self.open_store(args.archive)
|
||||
manifest, key = Manifest.load(store)
|
||||
archive = Archive(store, key, manifest, args.archive.archive,
|
||||
numeric_owner=args.numeric_owner)
|
||||
dirs = []
|
||||
for item, peek in archive.iter_items(lambda item: not exclude_path(item['path'], args.patterns)):
|
||||
if stat.S_ISDIR(item['mode']):
|
||||
dirs.append(item)
|
||||
archive.extract_item(item, args.dest, start_cb, restore_attrs=False)
|
||||
else:
|
||||
archive.extract_item(item, args.dest, start_cb, peek=peek)
|
||||
if dirs and not item['path'].startswith(dirs[-1]['path']):
|
||||
while dirs and not item['path'].startswith(dirs[-1]['path']):
|
||||
archive.extract_item(dirs.pop(-1), args.dest)
|
||||
self.print_verbose(item['path'])
|
||||
try:
|
||||
if stat.S_ISDIR(item['mode']):
|
||||
dirs.append(item)
|
||||
archive.extract_item(item, args.dest, restore_attrs=False)
|
||||
else:
|
||||
archive.extract_item(item, args.dest, peek=peek)
|
||||
except IOError, e:
|
||||
self.print_error('%s: %s', item['path'], e)
|
||||
|
||||
while dirs:
|
||||
archive.extract_item(dirs.pop(-1), args.dest)
|
||||
return self.exit_code
|
||||
|
@ -409,7 +411,10 @@ class Archiver(object):
|
|||
|
||||
def main():
|
||||
archiver = Archiver()
|
||||
sys.exit(archiver.run())
|
||||
exit_code = archiver.run()
|
||||
if exit_code:
|
||||
archiver.print_error('Exiting with failure statue due to previous errors')
|
||||
sys.exit(exit_code)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
24
darc/test.py
24
darc/test.py
|
@ -41,7 +41,6 @@ class Test(unittest.TestCase):
|
|||
shutil.rmtree(self.tmpdir)
|
||||
|
||||
def darc(self, *args, **kwargs):
|
||||
os.environ['DARC_PASSPHRASE'] = ''
|
||||
exit_code = kwargs.get('exit_code', 0)
|
||||
args = list(args)
|
||||
try:
|
||||
|
@ -58,7 +57,7 @@ class Test(unittest.TestCase):
|
|||
sys.stdout, sys.stderr = stdout, stderr
|
||||
|
||||
def create_src_archive(self, name):
|
||||
src_dir = os.path.join(os.getcwd(), os.path.dirname(__file__))
|
||||
src_dir = os.path.join(os.getcwd(), os.path.dirname(__file__), '..')
|
||||
self.darc('init', self.store_location)
|
||||
self.darc('create', self.store_location + '::' + name, src_dir)
|
||||
|
||||
|
@ -66,7 +65,7 @@ class Test(unittest.TestCase):
|
|||
filename = os.path.join(self.input_path, name)
|
||||
if not os.path.exists(os.path.dirname(filename)):
|
||||
os.makedirs(os.path.dirname(filename))
|
||||
with open(filename, 'wb') as fd:
|
||||
with open(filename, 'wbx') as fd:
|
||||
fd.write('X' * size)
|
||||
|
||||
def get_xattrs(self, path):
|
||||
|
@ -98,6 +97,8 @@ class Test(unittest.TestCase):
|
|||
def test_basic_functionality(self):
|
||||
self.create_regual_file('file1', size=1024 * 80)
|
||||
self.create_regual_file('dir2/file2', size=1024 * 80)
|
||||
os.chmod('input/file1', 0600)
|
||||
os.chmod('input/dir2', 0700)
|
||||
x = xattr(os.path.join(self.input_path, 'file1'))
|
||||
x.set('user.foo', 'bar')
|
||||
os.link(os.path.join(self.input_path, 'file1'),
|
||||
|
@ -116,6 +117,23 @@ class Test(unittest.TestCase):
|
|||
# end the same way as info_output
|
||||
assert info_output2.endswith(info_output)
|
||||
|
||||
def test_overwrite(self):
|
||||
self.create_regual_file('file1', size=1024 * 80)
|
||||
self.create_regual_file('dir2/file2', size=1024 * 80)
|
||||
self.darc('init', self.store_location)
|
||||
self.darc('create', self.store_location + '::test', 'input')
|
||||
# Overwriting regular files and directories should be supported
|
||||
os.mkdir('output/input')
|
||||
os.mkdir('output/input/file1')
|
||||
os.mkdir('output/input/dir2')
|
||||
self.darc('extract', self.store_location + '::test', 'output')
|
||||
self.diff_dirs('input', 'output/input')
|
||||
# But non-empty dirs should fail
|
||||
os.unlink('output/input/file1')
|
||||
os.mkdir('output/input/file1')
|
||||
os.mkdir('output/input/file1/dir')
|
||||
self.darc('extract', self.store_location + '::test', 'output', exit_code=1)
|
||||
|
||||
def test_delete(self):
|
||||
self.create_regual_file('file1', size=1024 * 80)
|
||||
self.create_regual_file('dir2/file2', size=1024 * 80)
|
||||
|
|
Loading…
Add table
Reference in a new issue