1
0
Fork 0
mirror of https://github.com/borgbackup/borg.git synced 2024-12-25 09:19:31 +00:00

Fix --progress option. (#3557)

Fix --progress option, fixes #3431
This commit is contained in:
Rémi Oudin 2018-03-10 15:11:08 +01:00 committed by TW
parent 1473ea708c
commit cb7887836a
2 changed files with 16 additions and 11 deletions

View file

@ -441,9 +441,11 @@ def iter_items(self, filter=None, preload=False):
filter=lambda item: self.item_filter(item, filter)):
yield item
def add_item(self, item, show_progress=True):
def add_item(self, item, show_progress=True, stats=None):
if show_progress and self.show_progress:
self.stats.show_progress(item=item, dt=0.2)
if stats is None:
stats = self.stats
stats.show_progress(item=item, dt=0.2)
self.items_buffer.add(item)
def write_checkpoint(self):
@ -1008,7 +1010,7 @@ def write_part_file(self, item, from_chunk, number):
self.write_checkpoint()
return length, number
def process_file_chunks(self, item, cache, stats, chunk_iter, chunk_processor=None):
def process_file_chunks(self, item, cache, stats, show_progress, chunk_iter, chunk_processor=None):
if not chunk_processor:
def chunk_processor(data):
chunk_entry = cache.add_chunk(self.key.id_hash(data), data, stats, wait=False)
@ -1024,6 +1026,8 @@ def chunk_processor(data):
part_number = 1
for data in chunk_iter:
item.chunks.append(chunk_processor(data))
if show_progress:
stats.show_progress(item=item, dt=0.2)
if self.checkpoint_interval and time.monotonic() - self.last_checkpoint > self.checkpoint_interval:
from_chunk, part_number = self.write_part_file(item, from_chunk, part_number)
self.last_checkpoint = time.monotonic()
@ -1050,12 +1054,13 @@ class FilesystemObjectProcessors:
def __init__(self, *, metadata_collector, cache, key,
add_item, process_file_chunks,
chunker_params):
chunker_params, show_progress):
self.metadata_collector = metadata_collector
self.cache = cache
self.key = key
self.add_item = add_item
self.process_file_chunks = process_file_chunks
self.show_progress = show_progress
self.hard_links = {}
self.stats = Statistics() # threading: done by cache (including progress)
@ -1077,7 +1082,7 @@ def create_helper(self, path, st, status=None, hardlinkable=True):
hardlink_master = True
yield item, status, hardlinked, hardlink_master
# if we get here, "with"-block worked ok without error/exception, the item was processed ok...
self.add_item(item)
self.add_item(item, stats=self.stats)
# ... and added to the archive, so we can remember it to refer to it later in the archive:
if hardlink_master:
self.hard_links[(st.st_ino, st.st_dev)] = safe_path
@ -1120,10 +1125,10 @@ def process_stdin(self, path, cache):
mtime=t, atime=t, ctime=t,
)
fd = sys.stdin.buffer # binary
self.process_file_chunks(item, cache, self.stats, backup_io_iter(self.chunker.chunkify(fd)))
self.process_file_chunks(item, cache, self.stats, self.show_progress, backup_io_iter(self.chunker.chunkify(fd)))
item.get_size(memorize=True)
self.stats.nfiles += 1
self.add_item(item)
self.add_item(item, stats=self.stats)
return 'i' # stdin
def process_file(self, path, st, cache, ignore_inode=False, files_cache_mode=DEFAULT_FILES_CACHE_MODE):
@ -1163,7 +1168,7 @@ def process_file(self, path, st, cache, ignore_inode=False, files_cache_mode=DEF
with backup_io('open'):
fh = Archive._open_rb(path)
with os.fdopen(fh, 'rb') as fd:
self.process_file_chunks(item, cache, self.stats, backup_io_iter(self.chunker.chunkify(fd, fh)))
self.process_file_chunks(item, cache, self.stats, self.show_progress, backup_io_iter(self.chunker.chunkify(fd, fh)))
if not is_special_file:
# we must not memorize special files, because the contents of e.g. a
# block or char device will change without its mtime/size/inode changing.
@ -1813,7 +1818,7 @@ def process_item(self, archive, target, item):
if 'chunks' in item:
self.process_chunks(archive, target, item)
target.stats.nfiles += 1
target.add_item(item)
target.add_item(item, stats=self.stats)
self.print_file_status(file_status(item.mode), item.path)
def process_chunks(self, archive, target, item):
@ -1823,7 +1828,7 @@ def process_chunks(self, archive, target, item):
return item.chunks
chunk_iterator = self.iter_chunks(archive, target, list(item.chunks))
chunk_processor = partial(self.chunk_processor, target)
target.process_file_chunks(item, self.cache, target.stats, chunk_iterator, chunk_processor)
target.process_file_chunks(item, self.cache, target.stats, self.progress, chunk_iterator, chunk_processor)
def chunk_processor(self, target, data):
chunk_id = self.key.id_hash(data)

View file

@ -524,7 +524,7 @@ def create_inner(archive, cache, fso):
checkpoint_interval=args.checkpoint_interval, rechunkify=False)
fso = FilesystemObjectProcessors(metadata_collector=metadata_collector, cache=cache, key=key,
process_file_chunks=cp.process_file_chunks, add_item=archive.add_item,
chunker_params=args.chunker_params)
chunker_params=args.chunker_params, show_progress=args.progress)
create_inner(archive, cache, fso)
else:
create_inner(None, None, None)