mirror of https://github.com/borgbackup/borg.git
mention failed operation in per-file warnings
on the one hand one can say it's ugly global state, on the other it's totally handy! just have to keep that in mind for MT, but it's rather obvious.
This commit is contained in:
parent
ccf882a885
commit
34e19ccb6a
|
@ -114,29 +114,40 @@ class BackupOSError(Exception):
|
||||||
|
|
||||||
Any unwrapped IO error is critical and aborts execution (for example repository IO failure).
|
Any unwrapped IO error is critical and aborts execution (for example repository IO failure).
|
||||||
"""
|
"""
|
||||||
def __init__(self, os_error):
|
def __init__(self, op, os_error):
|
||||||
|
self.op = op
|
||||||
self.os_error = os_error
|
self.os_error = os_error
|
||||||
self.errno = os_error.errno
|
self.errno = os_error.errno
|
||||||
self.strerror = os_error.strerror
|
self.strerror = os_error.strerror
|
||||||
self.filename = os_error.filename
|
self.filename = os_error.filename
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
|
if self.op:
|
||||||
|
return '%s: %s' % (self.op, self.os_error)
|
||||||
|
else:
|
||||||
return str(self.os_error)
|
return str(self.os_error)
|
||||||
|
|
||||||
|
|
||||||
class BackupIO:
|
class BackupIO:
|
||||||
|
op = ''
|
||||||
|
|
||||||
|
def __call__(self, op=''):
|
||||||
|
self.op = op
|
||||||
|
return self
|
||||||
|
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
if exc_type and issubclass(exc_type, OSError):
|
if exc_type and issubclass(exc_type, OSError):
|
||||||
raise BackupOSError(exc_val) from exc_val
|
raise BackupOSError(self.op, exc_val) from exc_val
|
||||||
|
|
||||||
|
|
||||||
backup_io = BackupIO()
|
backup_io = BackupIO()
|
||||||
|
|
||||||
|
|
||||||
def backup_io_iter(iterator):
|
def backup_io_iter(iterator):
|
||||||
|
backup_io.op = 'read'
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
with backup_io:
|
with backup_io:
|
||||||
|
@ -477,13 +488,13 @@ Number of files: {0.stats.nfiles}'''.format(
|
||||||
pass
|
pass
|
||||||
mode = item.mode
|
mode = item.mode
|
||||||
if stat.S_ISREG(mode):
|
if stat.S_ISREG(mode):
|
||||||
with backup_io:
|
with backup_io('makedirs'):
|
||||||
if not os.path.exists(os.path.dirname(path)):
|
if not os.path.exists(os.path.dirname(path)):
|
||||||
os.makedirs(os.path.dirname(path))
|
os.makedirs(os.path.dirname(path))
|
||||||
# Hard link?
|
# Hard link?
|
||||||
if 'source' in item:
|
if 'source' in item:
|
||||||
source = os.path.join(dest, *item.source.split(os.sep)[stripped_components:])
|
source = os.path.join(dest, *item.source.split(os.sep)[stripped_components:])
|
||||||
with backup_io:
|
with backup_io('link'):
|
||||||
if os.path.exists(path):
|
if os.path.exists(path):
|
||||||
os.unlink(path)
|
os.unlink(path)
|
||||||
if item.source not in hardlink_masters:
|
if item.source not in hardlink_masters:
|
||||||
|
@ -496,20 +507,20 @@ Number of files: {0.stats.nfiles}'''.format(
|
||||||
os.link(link_target, path)
|
os.link(link_target, path)
|
||||||
return
|
return
|
||||||
# Extract chunks, since the item which had the chunks was not extracted
|
# Extract chunks, since the item which had the chunks was not extracted
|
||||||
with backup_io:
|
with backup_io('open'):
|
||||||
fd = open(path, 'wb')
|
fd = open(path, 'wb')
|
||||||
with fd:
|
with fd:
|
||||||
ids = [c.id for c in item.chunks]
|
ids = [c.id for c in item.chunks]
|
||||||
for _, data in self.pipeline.fetch_many(ids, is_preloaded=True):
|
for _, data in self.pipeline.fetch_many(ids, is_preloaded=True):
|
||||||
if pi:
|
if pi:
|
||||||
pi.show(increase=len(data), info=[remove_surrogates(item.path)])
|
pi.show(increase=len(data), info=[remove_surrogates(item.path)])
|
||||||
with backup_io:
|
with backup_io('write'):
|
||||||
if sparse and self.zeros.startswith(data):
|
if sparse and self.zeros.startswith(data):
|
||||||
# all-zero chunk: create a hole in a sparse file
|
# all-zero chunk: create a hole in a sparse file
|
||||||
fd.seek(len(data), 1)
|
fd.seek(len(data), 1)
|
||||||
else:
|
else:
|
||||||
fd.write(data)
|
fd.write(data)
|
||||||
with backup_io:
|
with backup_io('truncate'):
|
||||||
pos = fd.tell()
|
pos = fd.tell()
|
||||||
fd.truncate(pos)
|
fd.truncate(pos)
|
||||||
fd.flush()
|
fd.flush()
|
||||||
|
@ -556,6 +567,7 @@ Number of files: {0.stats.nfiles}'''.format(
|
||||||
|
|
||||||
Does not access the repository.
|
Does not access the repository.
|
||||||
"""
|
"""
|
||||||
|
backup_io.op = 'attrs'
|
||||||
uid = gid = None
|
uid = gid = None
|
||||||
if not self.numeric_owner:
|
if not self.numeric_owner:
|
||||||
uid = user2uid(item.user)
|
uid = user2uid(item.user)
|
||||||
|
@ -707,7 +719,7 @@ Number of files: {0.stats.nfiles}'''.format(
|
||||||
|
|
||||||
def stat_ext_attrs(self, st, path):
|
def stat_ext_attrs(self, st, path):
|
||||||
attrs = {}
|
attrs = {}
|
||||||
with backup_io:
|
with backup_io('extended stat'):
|
||||||
xattrs = xattr.get_all(path, follow_symlinks=False)
|
xattrs = xattr.get_all(path, follow_symlinks=False)
|
||||||
bsdflags = get_flags(path, st)
|
bsdflags = get_flags(path, st)
|
||||||
acl_get(path, attrs, st, self.numeric_owner)
|
acl_get(path, attrs, st, self.numeric_owner)
|
||||||
|
@ -744,7 +756,7 @@ Number of files: {0.stats.nfiles}'''.format(
|
||||||
return 'b' # block device
|
return 'b' # block device
|
||||||
|
|
||||||
def process_symlink(self, path, st):
|
def process_symlink(self, path, st):
|
||||||
with backup_io:
|
with backup_io('readlink'):
|
||||||
source = os.readlink(path)
|
source = os.readlink(path)
|
||||||
item = Item(path=make_path_safe(path), source=source)
|
item = Item(path=make_path_safe(path), source=source)
|
||||||
item.update(self.stat_attrs(st, path))
|
item.update(self.stat_attrs(st, path))
|
||||||
|
@ -856,7 +868,7 @@ Number of files: {0.stats.nfiles}'''.format(
|
||||||
else:
|
else:
|
||||||
compress = self.compression_decider1.decide(path)
|
compress = self.compression_decider1.decide(path)
|
||||||
self.file_compression_logger.debug('%s -> compression %s', path, compress['name'])
|
self.file_compression_logger.debug('%s -> compression %s', path, compress['name'])
|
||||||
with backup_io:
|
with backup_io('open'):
|
||||||
fh = Archive._open_rb(path)
|
fh = Archive._open_rb(path)
|
||||||
with os.fdopen(fh, 'rb') as fd:
|
with os.fdopen(fh, 'rb') as fd:
|
||||||
self.chunk_file(item, cache, self.stats, backup_io_iter(self.chunker.chunkify(fd, fh)), compress=compress)
|
self.chunk_file(item, cache, self.stats, backup_io_iter(self.chunker.chunkify(fd, fh)), compress=compress)
|
||||||
|
|
|
@ -365,7 +365,7 @@ class Archiver:
|
||||||
try:
|
try:
|
||||||
st = os.lstat(path)
|
st = os.lstat(path)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
self.print_warning('%s: %s', path, e)
|
self.print_warning('%s: stat: %s', path, e)
|
||||||
return
|
return
|
||||||
if (st.st_ino, st.st_dev) in skip_inodes:
|
if (st.st_ino, st.st_dev) in skip_inodes:
|
||||||
return
|
return
|
||||||
|
@ -380,7 +380,7 @@ class Archiver:
|
||||||
self.print_file_status('x', path)
|
self.print_file_status('x', path)
|
||||||
return
|
return
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
self.print_warning('%s: %s', path, e)
|
self.print_warning('%s: flags: %s', path, e)
|
||||||
return
|
return
|
||||||
if stat.S_ISREG(st.st_mode):
|
if stat.S_ISREG(st.st_mode):
|
||||||
if not dry_run:
|
if not dry_run:
|
||||||
|
@ -407,7 +407,7 @@ class Archiver:
|
||||||
entries = helpers.scandir_inorder(path)
|
entries = helpers.scandir_inorder(path)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
status = 'E'
|
status = 'E'
|
||||||
self.print_warning('%s: %s', path, e)
|
self.print_warning('%s: scandir: %s', path, e)
|
||||||
else:
|
else:
|
||||||
for dirent in entries:
|
for dirent in entries:
|
||||||
normpath = os.path.normpath(dirent.path)
|
normpath = os.path.normpath(dirent.path)
|
||||||
|
|
Loading…
Reference in New Issue