2022-04-02 18:11:05 +00:00
|
|
|
import base64
|
2016-11-12 12:32:57 +00:00
|
|
|
import json
|
2016-05-30 23:18:03 +00:00
|
|
|
import os
|
|
|
|
import socket
|
|
|
|
import stat
|
|
|
|
import sys
|
|
|
|
import time
|
2017-07-19 12:29:14 +00:00
|
|
|
from collections import OrderedDict
|
2016-06-27 18:56:41 +00:00
|
|
|
from contextlib import contextmanager
|
2016-12-17 12:26:28 +00:00
|
|
|
from datetime import datetime, timezone, timedelta
|
2016-11-19 18:09:47 +00:00
|
|
|
from functools import partial
|
2010-10-24 20:07:54 +00:00
|
|
|
from getpass import getuser
|
2016-05-30 23:18:03 +00:00
|
|
|
from io import BytesIO
|
2017-07-19 12:29:14 +00:00
|
|
|
from itertools import groupby, zip_longest
|
2016-05-30 23:18:03 +00:00
|
|
|
from shutil import get_terminal_size
|
|
|
|
|
2018-11-10 22:34:43 +00:00
|
|
|
from .platformflags import is_win32, is_linux, is_freebsd, is_darwin
|
2015-10-06 16:33:55 +00:00
|
|
|
from .logger import create_logger
|
2017-05-02 16:52:36 +00:00
|
|
|
|
2015-10-06 16:33:55 +00:00
|
|
|
logger = create_logger()
|
|
|
|
|
2015-05-22 17:21:41 +00:00
|
|
|
from . import xattr
|
2021-01-14 19:41:57 +00:00
|
|
|
from .chunker import get_chunker, Chunk
|
2017-05-02 17:05:27 +00:00
|
|
|
from .cache import ChunkListEntry
|
|
|
|
from .crypto.key import key_factory
|
2017-03-31 10:02:30 +00:00
|
|
|
from .compress import Compressor, CompressionSpec
|
2016-04-17 01:15:19 +00:00
|
|
|
from .constants import * # NOQA
|
2016-10-21 23:50:35 +00:00
|
|
|
from .crypto.low_level import IntegrityError as IntegrityErrorBase
|
2017-06-13 12:15:37 +00:00
|
|
|
from .hashindex import ChunkIndex, ChunkIndexEntry, CacheSynchronizer
|
2016-05-30 22:33:13 +00:00
|
|
|
from .helpers import Manifest
|
2017-04-02 00:46:44 +00:00
|
|
|
from .helpers import hardlinkable
|
2017-04-03 20:05:53 +00:00
|
|
|
from .helpers import ChunkIteratorFileWrapper, open_item
|
2017-03-08 16:13:42 +00:00
|
|
|
from .helpers import Error, IntegrityError, set_ec
|
2018-11-10 20:43:45 +00:00
|
|
|
from .platform import uid2user, user2uid, gid2group, group2gid
|
2016-05-30 22:33:13 +00:00
|
|
|
from .helpers import parse_timestamp, to_localtime
|
2017-08-16 15:57:08 +00:00
|
|
|
from .helpers import OutputTimestamp, format_timedelta, format_file_size, file_status, FileSize
|
2016-12-03 16:50:50 +00:00
|
|
|
from .helpers import safe_encode, safe_decode, make_path_safe, remove_surrogates
|
|
|
|
from .helpers import StableDict
|
|
|
|
from .helpers import bin_to_hex
|
2017-03-15 17:54:34 +00:00
|
|
|
from .helpers import safe_ns
|
2016-11-13 21:34:15 +00:00
|
|
|
from .helpers import ellipsis_truncate, ProgressIndicatorPercent, log_multi
|
2020-11-15 14:31:01 +00:00
|
|
|
from .helpers import os_open, flags_normal, flags_dir
|
2021-10-14 15:46:10 +00:00
|
|
|
from .helpers import os_stat
|
2018-07-01 00:34:48 +00:00
|
|
|
from .helpers import msgpack
|
2019-06-22 21:19:37 +00:00
|
|
|
from .helpers import sig_int
|
2021-01-14 19:41:57 +00:00
|
|
|
from .lrucache import LRUCache
|
2017-05-01 14:58:29 +00:00
|
|
|
from .patterns import PathPrefixPattern, FnmatchPattern, IECommand
|
2017-07-19 12:29:14 +00:00
|
|
|
from .item import Item, ArchiveItem, ItemDiff
|
2018-08-04 15:40:04 +00:00
|
|
|
from .platform import acl_get, acl_set, set_flags, get_flags, swidth, hostname
|
2016-05-30 23:18:03 +00:00
|
|
|
from .remote import cache_if_remote
|
2017-02-17 04:00:37 +00:00
|
|
|
from .repository import Repository, LIST_SCAN_LIMIT
|
2010-10-20 17:59:15 +00:00
|
|
|
|
2020-01-17 19:56:23 +00:00
|
|
|
has_link = hasattr(os, 'link')
|
2010-10-27 17:30:21 +00:00
|
|
|
|
2010-10-20 17:59:15 +00:00
|
|
|
|
2016-05-18 21:59:47 +00:00
|
|
|
class Statistics:
|
|
|
|
|
2021-03-20 23:33:31 +00:00
|
|
|
def __init__(self, output_json=False, iec=False):
|
2016-11-12 12:32:57 +00:00
|
|
|
self.output_json = output_json
|
2021-03-20 23:33:31 +00:00
|
|
|
self.iec = iec
|
2016-05-18 21:59:47 +00:00
|
|
|
self.osize = self.csize = self.usize = self.nfiles = 0
|
2019-02-23 08:44:33 +00:00
|
|
|
self.osize_parts = self.csize_parts = self.usize_parts = self.nfiles_parts = 0
|
2016-05-18 21:59:47 +00:00
|
|
|
self.last_progress = 0 # timestamp when last progress was shown
|
|
|
|
|
2019-02-23 08:44:33 +00:00
|
|
|
def update(self, size, csize, unique, part=False):
|
|
|
|
if not part:
|
|
|
|
self.osize += size
|
|
|
|
self.csize += csize
|
|
|
|
if unique:
|
|
|
|
self.usize += csize
|
|
|
|
else:
|
|
|
|
self.osize_parts += size
|
|
|
|
self.csize_parts += csize
|
|
|
|
if unique:
|
|
|
|
self.usize_parts += csize
|
2016-05-18 21:59:47 +00:00
|
|
|
|
2017-10-29 13:17:44 +00:00
|
|
|
def __add__(self, other):
|
|
|
|
if not isinstance(other, Statistics):
|
|
|
|
raise TypeError('can only add Statistics objects')
|
2021-03-20 23:33:31 +00:00
|
|
|
stats = Statistics(self.output_json, self.iec)
|
2017-10-29 13:17:44 +00:00
|
|
|
stats.osize = self.osize + other.osize
|
|
|
|
stats.csize = self.csize + other.csize
|
|
|
|
stats.usize = self.usize + other.usize
|
|
|
|
stats.nfiles = self.nfiles + other.nfiles
|
2019-02-23 08:44:33 +00:00
|
|
|
stats.osize_parts = self.osize_parts + other.osize_parts
|
|
|
|
stats.csize_parts = self.csize_parts + other.csize_parts
|
|
|
|
stats.usize_parts = self.usize_parts + other.usize_parts
|
|
|
|
stats.nfiles_parts = self.nfiles_parts + other.nfiles_parts
|
2017-10-29 13:17:44 +00:00
|
|
|
return stats
|
|
|
|
|
2016-07-30 21:16:19 +00:00
|
|
|
summary = "{label:15} {stats.osize_fmt:>20s} {stats.csize_fmt:>20s} {stats.usize_fmt:>20s}"
|
2016-05-18 21:59:47 +00:00
|
|
|
|
|
|
|
def __str__(self):
|
|
|
|
return self.summary.format(stats=self, label='This archive:')
|
|
|
|
|
|
|
|
def __repr__(self):
|
|
|
|
return "<{cls} object at {hash:#x} ({self.osize}, {self.csize}, {self.usize})>".format(
|
|
|
|
cls=type(self).__name__, hash=id(self), self=self)
|
|
|
|
|
2017-02-23 10:54:57 +00:00
|
|
|
def as_dict(self):
|
|
|
|
return {
|
2021-03-20 23:33:31 +00:00
|
|
|
'original_size': FileSize(self.osize, iec=self.iec),
|
|
|
|
'compressed_size': FileSize(self.csize, iec=self.iec),
|
|
|
|
'deduplicated_size': FileSize(self.usize, iec=self.iec),
|
2017-02-23 10:54:57 +00:00
|
|
|
'nfiles': self.nfiles,
|
|
|
|
}
|
|
|
|
|
2022-02-13 23:26:26 +00:00
|
|
|
def as_raw_dict(self):
|
|
|
|
return {
|
|
|
|
'size': self.osize,
|
|
|
|
'csize': self.csize,
|
|
|
|
'nfiles': self.nfiles,
|
|
|
|
'size_parts': self.osize_parts,
|
|
|
|
'csize_parts': self.csize_parts,
|
|
|
|
'nfiles_parts': self.nfiles_parts,
|
|
|
|
}
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def from_raw_dict(cls, **kw):
|
|
|
|
self = cls()
|
|
|
|
self.osize = kw['size']
|
|
|
|
self.csize = kw['csize']
|
|
|
|
self.nfiles = kw['nfiles']
|
|
|
|
self.osize_parts = kw['size_parts']
|
|
|
|
self.csize_parts = kw['csize_parts']
|
|
|
|
self.nfiles_parts = kw['nfiles_parts']
|
|
|
|
return self
|
|
|
|
|
2016-05-18 21:59:47 +00:00
|
|
|
@property
|
|
|
|
def osize_fmt(self):
|
2021-03-20 23:33:31 +00:00
|
|
|
return format_file_size(self.osize, iec=self.iec)
|
2016-05-18 21:59:47 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def usize_fmt(self):
|
2021-03-20 23:33:31 +00:00
|
|
|
return format_file_size(self.usize, iec=self.iec)
|
2016-05-18 21:59:47 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def csize_fmt(self):
|
2021-03-20 23:33:31 +00:00
|
|
|
return format_file_size(self.csize, iec=self.iec)
|
2016-05-18 21:59:47 +00:00
|
|
|
|
|
|
|
def show_progress(self, item=None, final=False, stream=None, dt=None):
|
2016-12-17 12:26:28 +00:00
|
|
|
now = time.monotonic()
|
2016-05-18 21:59:47 +00:00
|
|
|
if dt is None or now - self.last_progress > dt:
|
|
|
|
self.last_progress = now
|
2016-11-12 12:32:57 +00:00
|
|
|
if self.output_json:
|
|
|
|
data = self.as_dict()
|
|
|
|
data.update({
|
2017-03-09 20:12:07 +00:00
|
|
|
'time': time.time(),
|
2016-11-12 12:32:57 +00:00
|
|
|
'type': 'archive_progress',
|
|
|
|
'path': remove_surrogates(item.path if item else ''),
|
|
|
|
})
|
|
|
|
msg = json.dumps(data)
|
|
|
|
end = '\n'
|
2016-05-18 21:59:47 +00:00
|
|
|
else:
|
2016-11-12 12:32:57 +00:00
|
|
|
columns, lines = get_terminal_size()
|
|
|
|
if not final:
|
|
|
|
msg = '{0.osize_fmt} O {0.csize_fmt} C {0.usize_fmt} D {0.nfiles} N '.format(self)
|
|
|
|
path = remove_surrogates(item.path) if item else ''
|
|
|
|
space = columns - swidth(msg)
|
|
|
|
if space < 12:
|
|
|
|
msg = ''
|
|
|
|
space = columns - swidth(msg)
|
|
|
|
if space >= 8:
|
|
|
|
msg += ellipsis_truncate(path, space)
|
|
|
|
else:
|
|
|
|
msg = ' ' * columns
|
|
|
|
end = '\r'
|
|
|
|
print(msg, end=end, file=stream or sys.stderr, flush=True)
|
2016-05-18 21:59:47 +00:00
|
|
|
|
|
|
|
|
2016-07-02 19:04:51 +00:00
|
|
|
def is_special(mode):
|
|
|
|
# file types that get special treatment in --read-special mode
|
|
|
|
return stat.S_ISBLK(mode) or stat.S_ISCHR(mode) or stat.S_ISFIFO(mode)
|
|
|
|
|
|
|
|
|
2018-03-24 23:21:06 +00:00
|
|
|
class BackupError(Exception):
|
|
|
|
"""
|
|
|
|
Exception raised for non-OSError-based exceptions while accessing backup files.
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
2016-06-30 22:13:53 +00:00
|
|
|
class BackupOSError(Exception):
|
2016-07-03 21:58:12 +00:00
|
|
|
"""
|
|
|
|
Wrapper for OSError raised while accessing backup files.
|
|
|
|
|
|
|
|
Borg does different kinds of IO, and IO failures have different consequences.
|
|
|
|
This wrapper represents failures of input file or extraction IO.
|
|
|
|
These are non-critical and are only reported (exit code = 1, warning).
|
|
|
|
|
|
|
|
Any unwrapped IO error is critical and aborts execution (for example repository IO failure).
|
|
|
|
"""
|
2016-12-14 14:20:08 +00:00
|
|
|
def __init__(self, op, os_error):
|
|
|
|
self.op = op
|
2016-06-27 18:56:41 +00:00
|
|
|
self.os_error = os_error
|
|
|
|
self.errno = os_error.errno
|
|
|
|
self.strerror = os_error.strerror
|
|
|
|
self.filename = os_error.filename
|
|
|
|
|
|
|
|
def __str__(self):
|
2016-12-14 14:20:08 +00:00
|
|
|
if self.op:
|
2022-02-27 18:31:33 +00:00
|
|
|
return f'{self.op}: {self.os_error}'
|
2016-12-14 14:20:08 +00:00
|
|
|
else:
|
|
|
|
return str(self.os_error)
|
2016-06-27 18:56:41 +00:00
|
|
|
|
|
|
|
|
2016-12-02 23:12:48 +00:00
|
|
|
class BackupIO:
|
2016-12-14 14:20:08 +00:00
|
|
|
op = ''
|
|
|
|
|
|
|
|
def __call__(self, op=''):
|
|
|
|
self.op = op
|
|
|
|
return self
|
|
|
|
|
2016-12-02 23:12:48 +00:00
|
|
|
def __enter__(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
|
|
if exc_type and issubclass(exc_type, OSError):
|
2016-12-14 14:20:08 +00:00
|
|
|
raise BackupOSError(self.op, exc_val) from exc_val
|
2016-12-02 23:12:48 +00:00
|
|
|
|
|
|
|
|
|
|
|
backup_io = BackupIO()
|
2016-06-27 18:56:41 +00:00
|
|
|
|
|
|
|
|
2016-07-03 21:57:55 +00:00
|
|
|
def backup_io_iter(iterator):
|
2016-12-14 14:20:08 +00:00
|
|
|
backup_io.op = 'read'
|
2016-06-27 18:56:41 +00:00
|
|
|
while True:
|
2017-07-22 00:42:12 +00:00
|
|
|
with backup_io:
|
|
|
|
try:
|
2016-06-27 18:56:41 +00:00
|
|
|
item = next(iterator)
|
2017-07-22 00:42:12 +00:00
|
|
|
except StopIteration:
|
|
|
|
return
|
2016-06-27 18:56:41 +00:00
|
|
|
yield item
|
|
|
|
|
|
|
|
|
2019-02-17 05:45:24 +00:00
|
|
|
def stat_update_check(st_old, st_curr):
|
|
|
|
"""
|
|
|
|
this checks for some race conditions between the first filename-based stat()
|
|
|
|
we did before dispatching to the (hopefully correct) file type backup handler
|
|
|
|
and the (hopefully) fd-based fstat() we did in the handler.
|
|
|
|
|
|
|
|
if there is a problematic difference (e.g. file type changed), we rather
|
|
|
|
skip the file than being tricked into a security problem.
|
|
|
|
|
|
|
|
such races should only happen if:
|
|
|
|
- we are backing up a live filesystem (no snapshot, not inactive)
|
|
|
|
- if files change due to normal fs activity at an unfortunate time
|
|
|
|
- if somebody is doing an attack against us
|
|
|
|
"""
|
|
|
|
# assuming that a file type change implicates a different inode change AND that inode numbers
|
|
|
|
# are not duplicate in a short timeframe, this check is redundant and solved by the ino check:
|
|
|
|
if stat.S_IFMT(st_old.st_mode) != stat.S_IFMT(st_curr.st_mode):
|
|
|
|
# in this case, we dispatched to wrong handler - abort
|
|
|
|
raise BackupError('file type changed (race condition), skipping file')
|
|
|
|
if st_old.st_ino != st_curr.st_ino:
|
|
|
|
# in this case, the hardlinks-related code in create_helper has the wrong inode - abort!
|
|
|
|
raise BackupError('file inode changed (race condition), skipping file')
|
|
|
|
# looks ok, we are still dealing with the same thing - return current stat:
|
|
|
|
return st_curr
|
|
|
|
|
|
|
|
|
2018-08-12 15:39:30 +00:00
|
|
|
@contextmanager
|
2018-08-12 23:18:00 +00:00
|
|
|
def OsOpen(*, flags, path=None, parent_fd=None, name=None, noatime=False, op='open'):
|
2018-08-12 15:39:30 +00:00
|
|
|
with backup_io(op):
|
2018-08-12 23:18:00 +00:00
|
|
|
fd = os_open(path=path, parent_fd=parent_fd, name=name, flags=flags, noatime=noatime)
|
2018-08-12 15:39:30 +00:00
|
|
|
try:
|
|
|
|
yield fd
|
|
|
|
finally:
|
2019-08-08 21:48:23 +00:00
|
|
|
# On windows fd is None for directories.
|
|
|
|
if fd is not None:
|
|
|
|
os.close(fd)
|
2018-08-12 15:39:30 +00:00
|
|
|
|
|
|
|
|
2014-01-22 19:58:48 +00:00
|
|
|
class DownloadPipeline:
|
2012-11-27 23:03:35 +00:00
|
|
|
|
2014-01-22 19:58:48 +00:00
|
|
|
def __init__(self, repository, key):
|
|
|
|
self.repository = repository
|
|
|
|
self.key = key
|
2012-11-27 23:03:35 +00:00
|
|
|
|
2019-05-02 19:02:26 +00:00
|
|
|
def unpack_many(self, ids, filter=None, partial_extract=False, preload=False, hardlink_masters=None):
|
2016-08-19 22:04:55 +00:00
|
|
|
"""
|
|
|
|
Return iterator of items.
|
|
|
|
|
|
|
|
*ids* is a chunk ID list of an item stream. *filter* is a callable
|
|
|
|
to decide whether an item will be yielded. *preload* preloads the data chunks of every yielded item.
|
|
|
|
|
|
|
|
Warning: if *preload* is True then all data chunks of every yielded item have to be retrieved,
|
|
|
|
otherwise preloaded chunks will accumulate in RemoteRepository and create a memory leak.
|
|
|
|
"""
|
2020-06-01 11:22:25 +00:00
|
|
|
def _preload(chunks):
|
|
|
|
self.repository.preload([c.id for c in chunks])
|
|
|
|
|
2019-06-10 16:23:28 +00:00
|
|
|
masters_preloaded = set()
|
2014-01-22 19:58:48 +00:00
|
|
|
unpacker = msgpack.Unpacker(use_list=False)
|
2017-04-03 20:05:53 +00:00
|
|
|
for data in self.fetch_many(ids):
|
2014-01-22 19:58:48 +00:00
|
|
|
unpacker.feed(data)
|
2016-05-31 23:45:45 +00:00
|
|
|
items = [Item(internal_dict=item) for item in unpacker]
|
2016-04-16 15:48:47 +00:00
|
|
|
for item in items:
|
2016-05-31 23:45:45 +00:00
|
|
|
if 'chunks' in item:
|
|
|
|
item.chunks = [ChunkListEntry(*e) for e in item.chunks]
|
2019-05-02 19:02:26 +00:00
|
|
|
|
2016-08-22 20:58:54 +00:00
|
|
|
if filter:
|
|
|
|
items = [item for item in items if filter(item)]
|
2019-05-02 19:02:26 +00:00
|
|
|
|
2014-01-23 21:13:08 +00:00
|
|
|
if preload:
|
2019-05-02 19:02:26 +00:00
|
|
|
if filter and partial_extract:
|
|
|
|
# if we do only a partial extraction, it gets a bit
|
|
|
|
# complicated with computing the preload items: if a hardlink master item is not
|
|
|
|
# selected (== not extracted), we will still need to preload its chunks if a
|
|
|
|
# corresponding hardlink slave is selected (== is extracted).
|
|
|
|
# due to a side effect of the filter() call, we now have hardlink_masters dict populated.
|
|
|
|
for item in items:
|
2021-11-10 22:30:20 +00:00
|
|
|
if hardlinkable(item.mode):
|
|
|
|
source = item.get('source')
|
|
|
|
if source is None: # maybe a hardlink master
|
|
|
|
if 'chunks' in item:
|
|
|
|
_preload(item.chunks)
|
|
|
|
# if this is a hl master, remember that we already preloaded all chunks of it (if any):
|
|
|
|
if item.get('hardlink_master', True):
|
|
|
|
masters_preloaded.add(item.path)
|
|
|
|
else: # hardlink slave
|
|
|
|
if source not in masters_preloaded:
|
|
|
|
# we only need to preload *once* (for the 1st selected slave)
|
|
|
|
chunks, _ = hardlink_masters[source]
|
|
|
|
if chunks is not None:
|
|
|
|
_preload(chunks)
|
|
|
|
masters_preloaded.add(source)
|
2019-05-02 19:02:26 +00:00
|
|
|
else:
|
|
|
|
# easy: we do not have a filter, thus all items are selected, thus we need to preload all chunks.
|
|
|
|
for item in items:
|
|
|
|
if 'chunks' in item:
|
2020-06-01 11:22:25 +00:00
|
|
|
_preload(item.chunks)
|
2019-05-02 19:02:26 +00:00
|
|
|
|
2014-01-22 19:58:48 +00:00
|
|
|
for item in items:
|
|
|
|
yield item
|
|
|
|
|
|
|
|
def fetch_many(self, ids, is_preloaded=False):
|
2014-02-16 21:21:18 +00:00
|
|
|
for id_, data in zip(ids, self.repository.get_many(ids, is_preloaded=is_preloaded)):
|
2014-01-22 19:58:48 +00:00
|
|
|
yield self.key.decrypt(id_, data)
|
|
|
|
|
|
|
|
|
|
|
|
class ChunkBuffer:
|
2016-08-14 13:07:18 +00:00
|
|
|
BUFFER_SIZE = 8 * 1024 * 1024
|
2014-01-22 19:58:48 +00:00
|
|
|
|
2016-01-15 19:56:21 +00:00
|
|
|
def __init__(self, key, chunker_params=ITEMS_CHUNKER_PARAMS):
|
2014-07-10 13:44:29 +00:00
|
|
|
self.buffer = BytesIO()
|
2018-07-01 00:34:48 +00:00
|
|
|
self.packer = msgpack.Packer()
|
2014-01-22 19:58:48 +00:00
|
|
|
self.chunks = []
|
|
|
|
self.key = key
|
2019-01-05 03:38:06 +00:00
|
|
|
self.chunker = get_chunker(*chunker_params, seed=self.key.chunk_seed)
|
2012-11-27 23:03:35 +00:00
|
|
|
|
2014-01-22 19:58:48 +00:00
|
|
|
def add(self, item):
|
2016-05-31 23:45:45 +00:00
|
|
|
self.buffer.write(self.packer.pack(item.as_dict()))
|
2014-02-16 21:21:18 +00:00
|
|
|
if self.is_full():
|
|
|
|
self.flush()
|
|
|
|
|
|
|
|
def write_chunk(self, chunk):
|
|
|
|
raise NotImplementedError
|
2012-11-27 23:03:35 +00:00
|
|
|
|
2014-01-22 19:58:48 +00:00
|
|
|
def flush(self, flush=False):
|
|
|
|
if self.buffer.tell() == 0:
|
|
|
|
return
|
|
|
|
self.buffer.seek(0)
|
2017-04-03 20:05:53 +00:00
|
|
|
# The chunker returns a memoryview to its internal buffer,
|
|
|
|
# thus a copy is needed before resuming the chunker iterator.
|
2021-01-15 20:51:15 +00:00
|
|
|
# note: this is the items metadata stream chunker, we only will get CH_DATA allocation here (because there are,
|
|
|
|
# no all-zero chunks in a metadata stream), thus chunk.data will always be bytes/memoryview and allocation
|
|
|
|
# is always CH_DATA and never CH_ALLOC/CH_HOLE).
|
2020-12-15 01:37:26 +00:00
|
|
|
chunks = list(bytes(chunk.data) for chunk in self.chunker.chunkify(self.buffer))
|
2014-01-22 19:58:48 +00:00
|
|
|
self.buffer.seek(0)
|
|
|
|
self.buffer.truncate(0)
|
2014-08-30 13:10:41 +00:00
|
|
|
# Leave the last partial chunk in the buffer unless flush is True
|
2014-01-22 19:58:48 +00:00
|
|
|
end = None if flush or len(chunks) == 1 else -1
|
|
|
|
for chunk in chunks[:end]:
|
2014-02-16 21:21:18 +00:00
|
|
|
self.chunks.append(self.write_chunk(chunk))
|
2014-01-22 19:58:48 +00:00
|
|
|
if end == -1:
|
2017-04-03 20:05:53 +00:00
|
|
|
self.buffer.write(chunks[-1])
|
2012-11-27 23:03:35 +00:00
|
|
|
|
2014-01-22 19:58:48 +00:00
|
|
|
def is_full(self):
|
|
|
|
return self.buffer.tell() > self.BUFFER_SIZE
|
2012-11-27 23:03:35 +00:00
|
|
|
|
2014-01-22 19:58:48 +00:00
|
|
|
|
2014-02-16 21:21:18 +00:00
|
|
|
class CacheChunkBuffer(ChunkBuffer):
|
|
|
|
|
2016-01-15 19:56:21 +00:00
|
|
|
def __init__(self, cache, key, stats, chunker_params=ITEMS_CHUNKER_PARAMS):
|
2015-07-11 16:31:49 +00:00
|
|
|
super().__init__(key, chunker_params)
|
2014-02-16 21:21:18 +00:00
|
|
|
self.cache = cache
|
|
|
|
self.stats = stats
|
|
|
|
|
|
|
|
def write_chunk(self, chunk):
|
2017-04-03 20:05:53 +00:00
|
|
|
id_, _, _ = self.cache.add_chunk(self.key.id_hash(chunk), chunk, self.stats, wait=False)
|
2017-03-05 04:19:32 +00:00
|
|
|
self.cache.repository.async_response(wait=False)
|
2014-02-16 21:21:18 +00:00
|
|
|
return id_
|
|
|
|
|
|
|
|
|
2021-03-06 23:27:07 +00:00
|
|
|
def get_item_uid_gid(item, *, numeric, uid_forced=None, gid_forced=None, uid_default=0, gid_default=0):
|
|
|
|
if uid_forced is not None:
|
|
|
|
uid = uid_forced
|
|
|
|
else:
|
|
|
|
uid = None if numeric else user2uid(item.user)
|
|
|
|
uid = item.uid if uid is None else uid
|
|
|
|
if uid < 0:
|
|
|
|
uid = uid_default
|
|
|
|
if gid_forced is not None:
|
|
|
|
gid = gid_forced
|
|
|
|
else:
|
|
|
|
gid = None if numeric else group2gid(item.group)
|
|
|
|
gid = item.gid if gid is None else gid
|
|
|
|
if gid < 0:
|
|
|
|
gid = gid_default
|
|
|
|
return uid, gid
|
|
|
|
|
|
|
|
|
2014-01-22 19:58:48 +00:00
|
|
|
class Archive:
|
2010-10-20 17:59:15 +00:00
|
|
|
|
2013-12-15 19:35:29 +00:00
|
|
|
class DoesNotExist(Error):
|
|
|
|
"""Archive {} does not exist"""
|
2010-10-30 11:44:25 +00:00
|
|
|
|
2013-12-15 19:35:29 +00:00
|
|
|
class AlreadyExists(Error):
|
|
|
|
"""Archive {} already exists"""
|
2011-09-10 15:19:02 +00:00
|
|
|
|
2015-04-21 20:29:10 +00:00
|
|
|
class IncompatibleFilesystemEncodingError(Error):
|
|
|
|
"""Failed to encode filename "{}" into file system encoding "{}". Consider configuring the LANG environment variable."""
|
|
|
|
|
2013-06-20 10:44:58 +00:00
|
|
|
def __init__(self, repository, key, manifest, name, cache=None, create=False,
|
2021-04-16 13:02:16 +00:00
|
|
|
checkpoint_interval=1800, numeric_ids=False, noatime=False, noctime=False,
|
2021-02-16 20:30:01 +00:00
|
|
|
noflags=False, noacls=False, noxattrs=False,
|
2017-10-14 03:18:21 +00:00
|
|
|
progress=False, chunker_params=CHUNKER_PARAMS, start=None, start_monotonic=None, end=None,
|
2021-03-20 23:33:31 +00:00
|
|
|
consider_part_files=False, log_json=False, iec=False):
|
2013-06-03 11:45:48 +00:00
|
|
|
self.cwd = os.getcwd()
|
2011-07-30 19:13:48 +00:00
|
|
|
self.key = key
|
2013-06-20 10:44:58 +00:00
|
|
|
self.repository = repository
|
2011-07-30 19:13:48 +00:00
|
|
|
self.cache = cache
|
2011-09-04 21:02:47 +00:00
|
|
|
self.manifest = manifest
|
2010-10-20 20:53:58 +00:00
|
|
|
self.hard_links = {}
|
2021-03-20 23:33:31 +00:00
|
|
|
self.stats = Statistics(output_json=log_json, iec=iec)
|
|
|
|
self.iec = iec
|
2015-03-24 03:24:54 +00:00
|
|
|
self.show_progress = progress
|
2017-07-04 03:20:12 +00:00
|
|
|
self.name = name # overwritten later with name from archive metadata
|
|
|
|
self.name_in_manifest = name # can differ from .name later (if borg check fixed duplicate archive names)
|
|
|
|
self.comment = None
|
2011-09-10 15:19:02 +00:00
|
|
|
self.checkpoint_interval = checkpoint_interval
|
2021-04-16 13:02:16 +00:00
|
|
|
self.numeric_ids = numeric_ids
|
2016-11-28 01:23:32 +00:00
|
|
|
self.noatime = noatime
|
|
|
|
self.noctime = noctime
|
2020-03-18 20:39:48 +00:00
|
|
|
self.noflags = noflags
|
2021-02-16 20:08:47 +00:00
|
|
|
self.noacls = noacls
|
2021-02-16 20:30:01 +00:00
|
|
|
self.noxattrs = noxattrs
|
2016-12-17 12:26:28 +00:00
|
|
|
assert (start is None) == (start_monotonic is None), 'Logic error: if start is given, start_monotonic must be given as well and vice versa.'
|
2016-03-11 22:37:37 +00:00
|
|
|
if start is None:
|
|
|
|
start = datetime.utcnow()
|
2016-12-17 12:26:28 +00:00
|
|
|
start_monotonic = time.monotonic()
|
2016-03-12 11:40:39 +00:00
|
|
|
self.chunker_params = chunker_params
|
2015-10-02 19:56:21 +00:00
|
|
|
self.start = start
|
2016-12-17 12:26:28 +00:00
|
|
|
self.start_monotonic = start_monotonic
|
2016-03-11 22:37:37 +00:00
|
|
|
if end is None:
|
|
|
|
end = datetime.utcnow()
|
2015-10-02 19:56:21 +00:00
|
|
|
self.end = end
|
2016-07-21 22:19:56 +00:00
|
|
|
self.consider_part_files = consider_part_files
|
2014-01-22 19:58:48 +00:00
|
|
|
self.pipeline = DownloadPipeline(self.repository, self.key)
|
2017-02-23 11:28:01 +00:00
|
|
|
self.create = create
|
|
|
|
if self.create:
|
2016-01-15 19:56:21 +00:00
|
|
|
self.items_buffer = CacheChunkBuffer(self.cache, self.key, self.stats)
|
2011-09-10 15:19:02 +00:00
|
|
|
if name in manifest.archives:
|
2012-12-09 22:06:33 +00:00
|
|
|
raise self.AlreadyExists(name)
|
2011-09-10 15:19:02 +00:00
|
|
|
i = 0
|
|
|
|
while True:
|
2022-02-27 18:31:33 +00:00
|
|
|
self.checkpoint_name = '{}.checkpoint{}'.format(name, i and ('.%d' % i) or '')
|
2015-03-17 22:47:21 +00:00
|
|
|
if self.checkpoint_name not in manifest.archives:
|
2011-09-10 15:19:02 +00:00
|
|
|
break
|
|
|
|
i += 1
|
|
|
|
else:
|
2016-08-15 02:17:41 +00:00
|
|
|
info = self.manifest.archives.get(name)
|
|
|
|
if info is None:
|
2012-12-09 22:06:33 +00:00
|
|
|
raise self.DoesNotExist(name)
|
2016-08-15 02:17:41 +00:00
|
|
|
self.load(info.id)
|
2011-08-15 20:32:26 +00:00
|
|
|
|
2015-03-24 06:11:00 +00:00
|
|
|
def _load_meta(self, id):
|
2017-04-03 20:05:53 +00:00
|
|
|
data = self.key.decrypt(id, self.repository.get(id))
|
2018-07-01 00:34:48 +00:00
|
|
|
metadata = ArchiveItem(internal_dict=msgpack.unpackb(data))
|
2016-08-14 23:11:33 +00:00
|
|
|
if metadata.version != 1:
|
2015-03-24 06:11:00 +00:00
|
|
|
raise Exception('Unknown archive metadata version')
|
|
|
|
return metadata
|
|
|
|
|
2010-10-21 19:21:43 +00:00
|
|
|
def load(self, id):
|
|
|
|
self.id = id
|
2015-03-24 06:11:00 +00:00
|
|
|
self.metadata = self._load_meta(self.id)
|
2016-08-14 23:11:33 +00:00
|
|
|
self.metadata.cmdline = [safe_decode(arg) for arg in self.metadata.cmdline]
|
|
|
|
self.name = self.metadata.name
|
2017-07-04 03:20:12 +00:00
|
|
|
self.comment = self.metadata.get('comment', '')
|
2010-10-25 17:51:47 +00:00
|
|
|
|
2011-06-16 19:55:54 +00:00
|
|
|
@property
|
|
|
|
def ts(self):
|
2016-02-05 01:02:04 +00:00
|
|
|
"""Timestamp of archive creation (start) in UTC"""
|
2016-08-14 23:11:33 +00:00
|
|
|
ts = self.metadata.time
|
2016-02-07 01:35:31 +00:00
|
|
|
return parse_timestamp(ts)
|
2011-06-16 19:55:54 +00:00
|
|
|
|
2016-02-05 01:02:04 +00:00
|
|
|
@property
|
|
|
|
def ts_end(self):
|
|
|
|
"""Timestamp of archive creation (end) in UTC"""
|
2016-02-07 01:35:31 +00:00
|
|
|
# fall back to time if there is no time_end present in metadata
|
2016-08-14 23:11:33 +00:00
|
|
|
ts = self.metadata.get('time_end') or self.metadata.time
|
2016-02-07 01:35:31 +00:00
|
|
|
return parse_timestamp(ts)
|
2016-02-05 01:02:04 +00:00
|
|
|
|
2015-10-02 19:56:21 +00:00
|
|
|
@property
|
|
|
|
def fpr(self):
|
2016-04-23 20:42:56 +00:00
|
|
|
return bin_to_hex(self.id)
|
2015-10-02 19:56:21 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def duration(self):
|
2016-01-30 20:32:45 +00:00
|
|
|
return format_timedelta(self.end - self.start)
|
2015-10-02 19:56:21 +00:00
|
|
|
|
2016-04-23 23:29:17 +00:00
|
|
|
@property
|
|
|
|
def duration_from_meta(self):
|
|
|
|
return format_timedelta(self.ts_end - self.ts)
|
|
|
|
|
2017-02-23 10:54:57 +00:00
|
|
|
def info(self):
|
2017-02-23 11:28:01 +00:00
|
|
|
if self.create:
|
|
|
|
stats = self.stats
|
|
|
|
start = self.start.replace(tzinfo=timezone.utc)
|
|
|
|
end = self.end.replace(tzinfo=timezone.utc)
|
|
|
|
else:
|
|
|
|
stats = self.calc_stats(self.cache)
|
|
|
|
start = self.ts
|
|
|
|
end = self.ts_end
|
|
|
|
info = {
|
2017-02-23 10:54:57 +00:00
|
|
|
'name': self.name,
|
|
|
|
'id': self.fpr,
|
2017-08-16 15:57:08 +00:00
|
|
|
'start': OutputTimestamp(start),
|
|
|
|
'end': OutputTimestamp(end),
|
2017-02-23 11:28:01 +00:00
|
|
|
'duration': (end - start).total_seconds(),
|
|
|
|
'stats': stats.as_dict(),
|
2017-02-23 10:54:57 +00:00
|
|
|
'limits': {
|
|
|
|
'max_archive_size': self.cache.chunks[self.id].csize / MAX_DATA_SIZE,
|
|
|
|
},
|
|
|
|
}
|
2017-02-23 11:28:01 +00:00
|
|
|
if self.create:
|
|
|
|
info['command_line'] = sys.argv
|
|
|
|
else:
|
|
|
|
info.update({
|
|
|
|
'command_line': self.metadata.cmdline,
|
|
|
|
'hostname': self.metadata.hostname,
|
|
|
|
'username': self.metadata.username,
|
2017-02-23 20:34:13 +00:00
|
|
|
'comment': self.metadata.get('comment', ''),
|
2018-01-25 20:02:39 +00:00
|
|
|
'chunker_params': self.metadata.get('chunker_params', ''),
|
2017-02-23 11:28:01 +00:00
|
|
|
})
|
|
|
|
return info
|
2017-02-23 10:54:57 +00:00
|
|
|
|
2015-10-02 19:56:21 +00:00
|
|
|
def __str__(self):
|
2016-02-08 19:17:35 +00:00
|
|
|
return '''\
|
2020-12-16 12:46:29 +00:00
|
|
|
Repository: {location}
|
2016-02-08 19:17:35 +00:00
|
|
|
Archive name: {0.name}
|
2015-10-02 19:56:21 +00:00
|
|
|
Archive fingerprint: {0.fpr}
|
2016-02-08 19:17:35 +00:00
|
|
|
Time (start): {start}
|
|
|
|
Time (end): {end}
|
2015-10-02 19:56:21 +00:00
|
|
|
Duration: {0.duration}
|
2017-02-22 15:53:03 +00:00
|
|
|
Number of files: {0.stats.nfiles}
|
|
|
|
Utilization of max. archive size: {csize_max:.0%}
|
|
|
|
'''.format(
|
2016-02-08 19:17:35 +00:00
|
|
|
self,
|
2017-08-16 15:57:08 +00:00
|
|
|
start=OutputTimestamp(self.start.replace(tzinfo=timezone.utc)),
|
|
|
|
end=OutputTimestamp(self.end.replace(tzinfo=timezone.utc)),
|
2020-12-16 12:46:29 +00:00
|
|
|
csize_max=self.cache.chunks[self.id].csize / MAX_DATA_SIZE,
|
|
|
|
location=self.repository._location.canonical_path()
|
|
|
|
)
|
2015-10-02 19:56:21 +00:00
|
|
|
|
2011-08-11 19:18:13 +00:00
|
|
|
def __repr__(self):
|
|
|
|
return 'Archive(%r)' % self.name
|
|
|
|
|
2016-06-26 22:25:05 +00:00
|
|
|
def item_filter(self, item, filter=None):
|
2016-07-21 22:19:56 +00:00
|
|
|
if not self.consider_part_files and 'part' in item:
|
|
|
|
# this is a part(ial) file, we usually don't want to consider it.
|
2016-07-21 20:24:48 +00:00
|
|
|
return False
|
|
|
|
return filter(item) if filter else True
|
2016-06-26 22:25:05 +00:00
|
|
|
|
2019-05-02 19:02:26 +00:00
|
|
|
def iter_items(self, filter=None, partial_extract=False, preload=False, hardlink_masters=None):
|
2020-12-17 21:28:42 +00:00
|
|
|
# note: when calling this with preload=True, later fetch_many() must be called with
|
|
|
|
# is_preloaded=True or the RemoteRepository code will leak memory!
|
2019-05-02 19:02:26 +00:00
|
|
|
assert not (filter and partial_extract and preload) or hardlink_masters is not None
|
|
|
|
for item in self.pipeline.unpack_many(self.metadata.items, partial_extract=partial_extract,
|
|
|
|
preload=preload, hardlink_masters=hardlink_masters,
|
2016-06-26 22:25:05 +00:00
|
|
|
filter=lambda item: self.item_filter(item, filter)):
|
2014-01-23 21:13:08 +00:00
|
|
|
yield item
|
2010-11-29 20:08:37 +00:00
|
|
|
|
2018-03-10 14:11:08 +00:00
|
|
|
def add_item(self, item, show_progress=True, stats=None):
|
2016-06-26 16:07:01 +00:00
|
|
|
if show_progress and self.show_progress:
|
2018-03-10 14:11:08 +00:00
|
|
|
if stats is None:
|
|
|
|
stats = self.stats
|
|
|
|
stats.show_progress(item=item, dt=0.2)
|
2014-01-22 19:58:48 +00:00
|
|
|
self.items_buffer.add(item)
|
2010-12-04 20:03:02 +00:00
|
|
|
|
2011-09-10 15:19:02 +00:00
|
|
|
def write_checkpoint(self):
|
|
|
|
self.save(self.checkpoint_name)
|
|
|
|
del self.manifest.archives[self.checkpoint_name]
|
2014-03-19 21:32:07 +00:00
|
|
|
self.cache.chunk_decref(self.id, self.stats)
|
2011-09-10 15:19:02 +00:00
|
|
|
|
2019-02-23 08:44:33 +00:00
|
|
|
def save(self, name=None, comment=None, timestamp=None, stats=None, additional_metadata=None):
|
2011-09-10 15:19:02 +00:00
|
|
|
name = name or self.name
|
2011-09-04 21:02:47 +00:00
|
|
|
if name in self.manifest.archives:
|
2011-09-10 15:19:02 +00:00
|
|
|
raise self.AlreadyExists(name)
|
2014-01-22 19:58:48 +00:00
|
|
|
self.items_buffer.flush(flush=True)
|
2016-12-17 12:26:28 +00:00
|
|
|
duration = timedelta(seconds=time.monotonic() - self.start_monotonic)
|
2015-04-18 19:36:10 +00:00
|
|
|
if timestamp is None:
|
2017-08-24 02:07:37 +00:00
|
|
|
end = datetime.utcnow()
|
|
|
|
start = end - duration
|
2016-02-05 01:02:04 +00:00
|
|
|
else:
|
2017-08-24 02:07:37 +00:00
|
|
|
end = timestamp + duration
|
|
|
|
start = timestamp
|
|
|
|
self.start = start
|
|
|
|
self.end = end
|
2016-04-07 09:29:52 +00:00
|
|
|
metadata = {
|
2010-10-20 19:08:46 +00:00
|
|
|
'version': 1,
|
2010-10-20 17:59:15 +00:00
|
|
|
'name': name,
|
2016-08-14 23:11:33 +00:00
|
|
|
'comment': comment or '',
|
2014-01-22 19:58:48 +00:00
|
|
|
'items': self.items_buffer.chunks,
|
2010-10-21 19:21:43 +00:00
|
|
|
'cmdline': sys.argv,
|
2018-08-04 15:40:04 +00:00
|
|
|
'hostname': hostname,
|
2010-10-24 20:07:54 +00:00
|
|
|
'username': getuser(),
|
2017-09-05 04:13:47 +00:00
|
|
|
'time': start.strftime(ISO_FORMAT),
|
|
|
|
'time_end': end.strftime(ISO_FORMAT),
|
2016-03-12 11:40:39 +00:00
|
|
|
'chunker_params': self.chunker_params,
|
2016-04-07 09:29:52 +00:00
|
|
|
}
|
2019-02-23 08:44:33 +00:00
|
|
|
if stats is not None:
|
|
|
|
metadata.update({
|
|
|
|
'size': stats.osize,
|
|
|
|
'csize': stats.csize,
|
|
|
|
'nfiles': stats.nfiles,
|
|
|
|
'size_parts': stats.osize_parts,
|
|
|
|
'csize_parts': stats.csize_parts,
|
|
|
|
'nfiles_parts': stats.nfiles_parts})
|
2016-04-07 09:29:52 +00:00
|
|
|
metadata.update(additional_metadata or {})
|
2016-08-14 23:11:33 +00:00
|
|
|
metadata = ArchiveItem(metadata)
|
2016-12-16 23:51:25 +00:00
|
|
|
data = self.key.pack_and_authenticate_metadata(metadata.as_dict(), context=b'archive')
|
2011-08-15 20:32:26 +00:00
|
|
|
self.id = self.key.id_hash(data)
|
2020-09-08 19:00:27 +00:00
|
|
|
try:
|
|
|
|
self.cache.add_chunk(self.id, data, self.stats)
|
|
|
|
except IntegrityError as err:
|
|
|
|
err_msg = str(err)
|
|
|
|
# hack to avoid changing the RPC protocol by introducing new (more specific) exception class
|
|
|
|
if 'More than allowed put data' in err_msg:
|
|
|
|
raise Error('%s - archive too big (issue #1473)!' % err_msg)
|
|
|
|
else:
|
|
|
|
raise
|
2017-03-05 04:19:32 +00:00
|
|
|
while self.repository.async_response(wait=True) is not None:
|
|
|
|
pass
|
2016-08-15 02:17:41 +00:00
|
|
|
self.manifest.archives[name] = (self.id, metadata.time)
|
2011-09-04 21:02:47 +00:00
|
|
|
self.manifest.write()
|
2018-06-24 17:08:49 +00:00
|
|
|
self.repository.commit(compact=False)
|
2011-09-10 15:19:02 +00:00
|
|
|
self.cache.commit()
|
2010-10-20 17:59:15 +00:00
|
|
|
|
2019-02-23 13:56:53 +00:00
|
|
|
def calc_stats(self, cache, want_unique=True):
|
2022-02-13 23:26:26 +00:00
|
|
|
# caching wrapper around _calc_stats which is rather slow for archives made with borg < 1.2
|
|
|
|
have_borg12_meta = self.metadata.get('nfiles') is not None
|
|
|
|
try:
|
|
|
|
stats = Statistics.from_raw_dict(**cache.pre12_meta[self.fpr])
|
|
|
|
except KeyError: # not in pre12_meta cache
|
|
|
|
stats = self._calc_stats(cache, want_unique=want_unique)
|
|
|
|
if not have_borg12_meta:
|
|
|
|
cache.pre12_meta[self.fpr] = stats.as_raw_dict()
|
|
|
|
return stats
|
|
|
|
|
|
|
|
def _calc_stats(self, cache, want_unique=True):
|
2019-02-23 13:56:53 +00:00
|
|
|
have_borg12_meta = self.metadata.get('nfiles') is not None
|
|
|
|
|
|
|
|
if have_borg12_meta and not want_unique:
|
|
|
|
unique_csize = 0
|
|
|
|
else:
|
|
|
|
def add(id):
|
|
|
|
entry = cache.chunks[id]
|
|
|
|
archive_index.add(id, 1, entry.size, entry.csize)
|
|
|
|
|
|
|
|
archive_index = ChunkIndex()
|
|
|
|
sync = CacheSynchronizer(archive_index)
|
|
|
|
add(self.id)
|
2022-04-07 16:03:25 +00:00
|
|
|
# we must escape any % char in the archive name, because we use it in a format string, see #6500
|
|
|
|
arch_name_escd = self.name.replace('%', '%%')
|
2022-02-14 16:57:54 +00:00
|
|
|
pi = ProgressIndicatorPercent(total=len(self.metadata.items),
|
2022-04-07 16:03:25 +00:00
|
|
|
msg='Calculating statistics for archive %s ... %%3.0f%%%%' % arch_name_escd,
|
2020-02-29 19:39:27 +00:00
|
|
|
msgid='archive.calc_stats')
|
2019-02-23 13:56:53 +00:00
|
|
|
for id, chunk in zip(self.metadata.items, self.repository.get_many(self.metadata.items)):
|
|
|
|
pi.show(increase=1)
|
|
|
|
add(id)
|
|
|
|
data = self.key.decrypt(id, chunk)
|
|
|
|
sync.feed(data)
|
|
|
|
unique_csize = archive_index.stats_against(cache.chunks)[3]
|
|
|
|
pi.finish()
|
|
|
|
|
2021-03-20 23:33:31 +00:00
|
|
|
stats = Statistics(iec=self.iec)
|
2019-02-04 02:26:45 +00:00
|
|
|
stats.usize = unique_csize # the part files use same chunks as the full file
|
2019-02-23 13:56:53 +00:00
|
|
|
if not have_borg12_meta:
|
|
|
|
if self.consider_part_files:
|
|
|
|
stats.nfiles = sync.num_files_totals
|
|
|
|
stats.osize = sync.size_totals
|
|
|
|
stats.csize = sync.csize_totals
|
|
|
|
else:
|
|
|
|
stats.nfiles = sync.num_files_totals - sync.num_files_parts
|
|
|
|
stats.osize = sync.size_totals - sync.size_parts
|
|
|
|
stats.csize = sync.csize_totals - sync.csize_parts
|
|
|
|
else:
|
|
|
|
if self.consider_part_files:
|
|
|
|
stats.nfiles = self.metadata.nfiles_parts + self.metadata.nfiles
|
|
|
|
stats.osize = self.metadata.size_parts + self.metadata.size
|
|
|
|
stats.csize = self.metadata.csize_parts + self.metadata.csize
|
|
|
|
else:
|
|
|
|
stats.nfiles = self.metadata.nfiles
|
|
|
|
stats.osize = self.metadata.size
|
|
|
|
stats.csize = self.metadata.csize
|
2011-07-30 20:50:59 +00:00
|
|
|
return stats
|
2010-10-20 17:59:15 +00:00
|
|
|
|
2017-04-01 23:38:58 +00:00
|
|
|
@contextmanager
|
|
|
|
def extract_helper(self, dest, item, path, stripped_components, original_path, hardlink_masters):
|
|
|
|
hardlink_set = False
|
|
|
|
# Hard link?
|
|
|
|
if 'source' in item:
|
|
|
|
source = os.path.join(dest, *item.source.split(os.sep)[stripped_components:])
|
|
|
|
chunks, link_target = hardlink_masters.get(item.source, (None, source))
|
2020-01-17 19:56:23 +00:00
|
|
|
if link_target and has_link:
|
2017-04-01 23:38:58 +00:00
|
|
|
# Hard link was extracted previously, just link
|
|
|
|
with backup_io('link'):
|
|
|
|
os.link(link_target, path)
|
|
|
|
hardlink_set = True
|
|
|
|
elif chunks is not None:
|
|
|
|
# assign chunks to this item, since the item which had the chunks was not extracted
|
|
|
|
item.chunks = chunks
|
|
|
|
yield hardlink_set
|
2017-04-05 11:54:58 +00:00
|
|
|
if not hardlink_set and hardlink_masters:
|
2020-01-17 19:56:23 +00:00
|
|
|
if has_link:
|
|
|
|
# Update master entry with extracted item path, so that following hardlinks don't extract twice.
|
|
|
|
# We have hardlinking support, so we will hardlink not extract.
|
|
|
|
hardlink_masters[item.get('source') or original_path] = (None, path)
|
|
|
|
else:
|
|
|
|
# Broken platform with no hardlinking support.
|
|
|
|
# In this case, we *want* to extract twice, because there is no other way.
|
|
|
|
pass
|
2017-04-01 23:38:58 +00:00
|
|
|
|
2016-03-17 21:39:57 +00:00
|
|
|
def extract_item(self, item, restore_attrs=True, dry_run=False, stdout=False, sparse=False,
|
2016-08-25 19:16:20 +00:00
|
|
|
hardlink_masters=None, stripped_components=0, original_path=None, pi=None):
|
2016-03-17 21:39:57 +00:00
|
|
|
"""
|
|
|
|
Extract archive item.
|
|
|
|
|
|
|
|
:param item: the item to extract
|
|
|
|
:param restore_attrs: restore file attributes
|
|
|
|
:param dry_run: do not write any data
|
|
|
|
:param stdout: write extracted data to stdout
|
|
|
|
:param sparse: write sparse files (chunk-granularity, independent of the original being sparse)
|
|
|
|
:param hardlink_masters: maps paths to (chunks, link_target) for extracting subtrees with hardlinks correctly
|
2016-08-25 19:16:20 +00:00
|
|
|
:param stripped_components: stripped leading path components to correct hard link extraction
|
2016-05-31 23:45:45 +00:00
|
|
|
:param original_path: 'path' key as stored in archive
|
2016-08-07 12:17:56 +00:00
|
|
|
:param pi: ProgressIndicatorPercent (or similar) for file extraction progress (in bytes)
|
2016-03-17 21:39:57 +00:00
|
|
|
"""
|
2016-08-25 19:16:20 +00:00
|
|
|
hardlink_masters = hardlink_masters or {}
|
2016-07-10 23:23:27 +00:00
|
|
|
has_damaged_chunks = 'chunks_healthy' in item
|
2015-03-01 04:07:29 +00:00
|
|
|
if dry_run or stdout:
|
2016-05-31 23:45:45 +00:00
|
|
|
if 'chunks' in item:
|
2017-02-20 21:24:19 +00:00
|
|
|
item_chunks_size = 0
|
2017-04-03 20:05:53 +00:00
|
|
|
for data in self.pipeline.fetch_many([c.id for c in item.chunks], is_preloaded=True):
|
2016-08-07 12:17:56 +00:00
|
|
|
if pi:
|
2016-11-13 21:34:15 +00:00
|
|
|
pi.show(increase=len(data), info=[remove_surrogates(item.path)])
|
2015-03-01 04:07:29 +00:00
|
|
|
if stdout:
|
|
|
|
sys.stdout.buffer.write(data)
|
2017-02-20 21:24:19 +00:00
|
|
|
item_chunks_size += len(data)
|
2015-03-01 04:07:29 +00:00
|
|
|
if stdout:
|
|
|
|
sys.stdout.buffer.flush()
|
2017-02-20 21:24:19 +00:00
|
|
|
if 'size' in item:
|
|
|
|
item_size = item.size
|
|
|
|
if item_size != item_chunks_size:
|
2018-03-24 23:21:06 +00:00
|
|
|
raise BackupError('Size inconsistency detected: size {}, chunks size {}'.format(
|
|
|
|
item_size, item_chunks_size))
|
2016-07-09 16:19:25 +00:00
|
|
|
if has_damaged_chunks:
|
2018-03-24 23:21:06 +00:00
|
|
|
raise BackupError('File has damaged (all-zero) chunks. Try running borg check --repair.')
|
2014-02-18 20:33:06 +00:00
|
|
|
return
|
|
|
|
|
2016-05-31 23:45:45 +00:00
|
|
|
original_path = original_path or item.path
|
2013-06-30 20:32:27 +00:00
|
|
|
dest = self.cwd
|
2017-10-09 23:36:44 +00:00
|
|
|
if item.path.startswith(('/', '../')):
|
2013-08-03 11:34:14 +00:00
|
|
|
raise Exception('Path should be relative and local')
|
2016-05-31 23:45:45 +00:00
|
|
|
path = os.path.join(dest, item.path)
|
2012-12-06 22:04:01 +00:00
|
|
|
# Attempt to remove existing files, ignore errors on failure
|
|
|
|
try:
|
2017-05-18 00:44:00 +00:00
|
|
|
st = os.stat(path, follow_symlinks=False)
|
2012-12-06 22:04:01 +00:00
|
|
|
if stat.S_ISDIR(st.st_mode):
|
|
|
|
os.rmdir(path)
|
|
|
|
else:
|
|
|
|
os.unlink(path)
|
2015-04-21 20:29:10 +00:00
|
|
|
except UnicodeEncodeError:
|
2015-12-14 23:17:03 +00:00
|
|
|
raise self.IncompatibleFilesystemEncodingError(path, sys.getfilesystemencoding()) from None
|
2012-12-06 22:04:01 +00:00
|
|
|
except OSError:
|
|
|
|
pass
|
2017-03-28 21:22:25 +00:00
|
|
|
|
|
|
|
def make_parent(path):
|
|
|
|
parent_dir = os.path.dirname(path)
|
|
|
|
if not os.path.exists(parent_dir):
|
|
|
|
os.makedirs(parent_dir)
|
|
|
|
|
2016-05-31 23:45:45 +00:00
|
|
|
mode = item.mode
|
2015-05-31 19:53:37 +00:00
|
|
|
if stat.S_ISREG(mode):
|
2016-12-14 14:20:08 +00:00
|
|
|
with backup_io('makedirs'):
|
2017-03-28 21:22:25 +00:00
|
|
|
make_parent(path)
|
2017-04-01 23:38:58 +00:00
|
|
|
with self.extract_helper(dest, item, path, stripped_components, original_path,
|
|
|
|
hardlink_masters) as hardlink_set:
|
2017-04-01 23:22:25 +00:00
|
|
|
if hardlink_set:
|
2016-03-17 21:39:57 +00:00
|
|
|
return
|
2017-04-01 23:22:25 +00:00
|
|
|
with backup_io('open'):
|
|
|
|
fd = open(path, 'wb')
|
|
|
|
with fd:
|
|
|
|
ids = [c.id for c in item.chunks]
|
|
|
|
for data in self.pipeline.fetch_many(ids, is_preloaded=True):
|
|
|
|
if pi:
|
|
|
|
pi.show(increase=len(data), info=[remove_surrogates(item.path)])
|
|
|
|
with backup_io('write'):
|
2021-01-08 17:45:46 +00:00
|
|
|
if sparse and zeros.startswith(data):
|
2017-04-01 23:22:25 +00:00
|
|
|
# all-zero chunk: create a hole in a sparse file
|
|
|
|
fd.seek(len(data), 1)
|
|
|
|
else:
|
|
|
|
fd.write(data)
|
|
|
|
with backup_io('truncate_and_attrs'):
|
|
|
|
pos = item_chunks_size = fd.tell()
|
|
|
|
fd.truncate(pos)
|
|
|
|
fd.flush()
|
|
|
|
self.restore_attrs(path, item, fd=fd.fileno())
|
|
|
|
if 'size' in item:
|
|
|
|
item_size = item.size
|
|
|
|
if item_size != item_chunks_size:
|
2018-03-24 23:21:06 +00:00
|
|
|
raise BackupError('Size inconsistency detected: size {}, chunks size {}'.format(
|
|
|
|
item_size, item_chunks_size))
|
2017-04-01 23:22:25 +00:00
|
|
|
if has_damaged_chunks:
|
2018-03-24 23:21:06 +00:00
|
|
|
raise BackupError('File has damaged (all-zero) chunks. Try running borg check --repair.')
|
2016-06-30 22:14:10 +00:00
|
|
|
return
|
2016-12-02 23:12:48 +00:00
|
|
|
with backup_io:
|
2016-06-30 22:14:10 +00:00
|
|
|
# No repository access beyond this point.
|
|
|
|
if stat.S_ISDIR(mode):
|
2017-03-28 21:22:25 +00:00
|
|
|
make_parent(path)
|
2016-06-30 22:14:10 +00:00
|
|
|
if not os.path.exists(path):
|
2017-03-28 21:22:25 +00:00
|
|
|
os.mkdir(path)
|
2016-06-30 22:14:10 +00:00
|
|
|
if restore_attrs:
|
|
|
|
self.restore_attrs(path, item)
|
|
|
|
elif stat.S_ISLNK(mode):
|
2017-03-28 21:22:25 +00:00
|
|
|
make_parent(path)
|
2016-07-04 17:07:37 +00:00
|
|
|
source = item.source
|
2016-06-30 22:14:10 +00:00
|
|
|
try:
|
|
|
|
os.symlink(source, path)
|
|
|
|
except UnicodeEncodeError:
|
|
|
|
raise self.IncompatibleFilesystemEncodingError(source, sys.getfilesystemencoding()) from None
|
|
|
|
self.restore_attrs(path, item, symlink=True)
|
|
|
|
elif stat.S_ISFIFO(mode):
|
2017-03-28 21:22:25 +00:00
|
|
|
make_parent(path)
|
2017-04-01 23:38:58 +00:00
|
|
|
with self.extract_helper(dest, item, path, stripped_components, original_path,
|
|
|
|
hardlink_masters) as hardlink_set:
|
|
|
|
if hardlink_set:
|
|
|
|
return
|
|
|
|
os.mkfifo(path)
|
|
|
|
self.restore_attrs(path, item)
|
2016-06-30 22:14:10 +00:00
|
|
|
elif stat.S_ISCHR(mode) or stat.S_ISBLK(mode):
|
2017-03-28 21:22:25 +00:00
|
|
|
make_parent(path)
|
2017-04-01 23:38:58 +00:00
|
|
|
with self.extract_helper(dest, item, path, stripped_components, original_path,
|
|
|
|
hardlink_masters) as hardlink_set:
|
|
|
|
if hardlink_set:
|
|
|
|
return
|
|
|
|
os.mknod(path, item.mode, item.rdev)
|
|
|
|
self.restore_attrs(path, item)
|
2016-06-30 22:14:10 +00:00
|
|
|
else:
|
2016-07-04 17:07:37 +00:00
|
|
|
raise Exception('Unknown archive item type %r' % item.mode)
|
2010-10-20 20:53:58 +00:00
|
|
|
|
2013-06-03 11:45:48 +00:00
|
|
|
def restore_attrs(self, path, item, symlink=False, fd=None):
|
2016-06-30 22:14:10 +00:00
|
|
|
"""
|
2016-07-03 21:58:12 +00:00
|
|
|
Restore filesystem attributes on *path* (*fd*) from *item*.
|
2016-06-30 22:14:10 +00:00
|
|
|
|
|
|
|
Does not access the repository.
|
|
|
|
"""
|
2016-12-14 14:20:08 +00:00
|
|
|
backup_io.op = 'attrs'
|
2021-04-16 13:02:16 +00:00
|
|
|
uid, gid = get_item_uid_gid(item, numeric=self.numeric_ids)
|
2013-06-03 11:45:48 +00:00
|
|
|
# This code is a bit of a mess due to os specific differences
|
2018-11-10 22:34:43 +00:00
|
|
|
if not is_win32:
|
2018-11-10 20:48:46 +00:00
|
|
|
try:
|
|
|
|
if fd:
|
|
|
|
os.fchown(fd, uid, gid)
|
|
|
|
else:
|
|
|
|
os.chown(path, uid, gid, follow_symlinks=False)
|
|
|
|
except OSError:
|
|
|
|
pass
|
2013-06-03 11:45:48 +00:00
|
|
|
if fd:
|
2018-11-10 20:48:46 +00:00
|
|
|
os.fchmod(fd, item.mode)
|
2022-04-04 04:47:47 +00:00
|
|
|
else:
|
|
|
|
# To check whether a particular function in the os module accepts False for its
|
|
|
|
# follow_symlinks parameter, the in operator on supports_follow_symlinks should be
|
|
|
|
# used. However, os.chmod is special as some platforms without a working lchmod() do
|
|
|
|
# have fchmodat(), which has a flag that makes it behave like lchmod(). fchmodat()
|
|
|
|
# is ignored when deciding whether or not os.chmod should be set in
|
|
|
|
# os.supports_follow_symlinks. Work around this by using try/except.
|
|
|
|
try:
|
|
|
|
os.chmod(path, item.mode, follow_symlinks=False)
|
|
|
|
except NotImplementedError:
|
|
|
|
if not symlink:
|
|
|
|
os.chmod(path, item.mode)
|
2018-11-10 20:48:46 +00:00
|
|
|
mtime = item.mtime
|
|
|
|
if 'atime' in item:
|
|
|
|
atime = item.atime
|
2013-06-03 11:45:48 +00:00
|
|
|
else:
|
2018-11-10 20:48:46 +00:00
|
|
|
# old archives only had mtime in item metadata
|
|
|
|
atime = mtime
|
|
|
|
if 'birthtime' in item:
|
|
|
|
birthtime = item.birthtime
|
|
|
|
try:
|
|
|
|
# This should work on FreeBSD, NetBSD, and Darwin and be harmless on other platforms.
|
|
|
|
# See utimes(2) on either of the BSDs for details.
|
|
|
|
if fd:
|
|
|
|
os.utime(fd, None, ns=(atime, birthtime))
|
|
|
|
else:
|
|
|
|
os.utime(path, None, ns=(atime, birthtime), follow_symlinks=False)
|
|
|
|
except OSError:
|
|
|
|
# some systems don't support calling utime on a symlink
|
|
|
|
pass
|
2017-11-13 13:55:10 +00:00
|
|
|
try:
|
|
|
|
if fd:
|
2018-11-10 20:48:46 +00:00
|
|
|
os.utime(fd, None, ns=(atime, mtime))
|
2017-11-13 13:55:10 +00:00
|
|
|
else:
|
2018-11-10 20:48:46 +00:00
|
|
|
os.utime(path, None, ns=(atime, mtime), follow_symlinks=False)
|
2017-11-13 13:55:10 +00:00
|
|
|
except OSError:
|
|
|
|
# some systems don't support calling utime on a symlink
|
|
|
|
pass
|
2021-02-16 20:08:47 +00:00
|
|
|
if not self.noacls:
|
2021-04-16 13:02:16 +00:00
|
|
|
acl_set(path, item, self.numeric_ids, fd=fd)
|
2021-02-16 20:30:01 +00:00
|
|
|
if not self.noxattrs:
|
|
|
|
# chown removes Linux capabilities, so set the extended attributes at the end, after chown, since they include
|
|
|
|
# the Linux capabilities in the "security.capability" attribute.
|
|
|
|
warning = xattr.set_all(fd or path, item.get('xattrs', {}), follow_symlinks=False)
|
|
|
|
if warning:
|
|
|
|
set_ec(EXIT_WARNING)
|
2018-11-10 22:34:43 +00:00
|
|
|
# bsdflags include the immutable flag and need to be set last:
|
2020-03-18 20:39:48 +00:00
|
|
|
if not self.noflags and 'bsdflags' in item:
|
2018-11-10 22:34:43 +00:00
|
|
|
try:
|
|
|
|
set_flags(path, item.bsdflags, fd=fd)
|
|
|
|
except OSError:
|
|
|
|
pass
|
2010-10-20 17:59:15 +00:00
|
|
|
|
2016-04-08 05:07:14 +00:00
|
|
|
def set_meta(self, key, value):
|
2016-08-14 23:11:33 +00:00
|
|
|
metadata = self._load_meta(self.id)
|
|
|
|
setattr(metadata, key, value)
|
2018-07-01 00:34:48 +00:00
|
|
|
data = msgpack.packb(metadata.as_dict())
|
2015-03-24 06:11:00 +00:00
|
|
|
new_id = self.key.id_hash(data)
|
2017-04-03 20:05:53 +00:00
|
|
|
self.cache.add_chunk(new_id, data, self.stats)
|
2016-08-15 02:17:41 +00:00
|
|
|
self.manifest.archives[self.name] = (new_id, metadata.time)
|
2015-03-24 06:11:00 +00:00
|
|
|
self.cache.chunk_decref(self.id, self.stats)
|
2016-04-07 09:29:52 +00:00
|
|
|
self.id = new_id
|
2016-04-08 05:07:14 +00:00
|
|
|
|
|
|
|
def rename(self, name):
|
|
|
|
if name in self.manifest.archives:
|
|
|
|
raise self.AlreadyExists(name)
|
|
|
|
oldname = self.name
|
|
|
|
self.name = name
|
2016-08-14 23:11:33 +00:00
|
|
|
self.set_meta('name', name)
|
2016-04-08 05:07:14 +00:00
|
|
|
del self.manifest.archives[oldname]
|
2015-03-24 06:11:00 +00:00
|
|
|
|
2016-07-01 02:27:06 +00:00
|
|
|
def delete(self, stats, progress=False, forced=False):
|
|
|
|
class ChunksIndexError(Error):
|
|
|
|
"""Chunk ID {} missing from chunks index, corrupted chunks index - aborting transaction."""
|
|
|
|
|
2017-03-05 04:19:32 +00:00
|
|
|
exception_ignored = object()
|
|
|
|
|
|
|
|
def fetch_async_response(wait=True):
|
2016-07-01 02:27:06 +00:00
|
|
|
try:
|
2017-03-05 04:19:32 +00:00
|
|
|
return self.repository.async_response(wait=wait)
|
2018-10-29 10:54:24 +00:00
|
|
|
except Repository.ObjectNotFound:
|
2017-03-05 04:19:32 +00:00
|
|
|
nonlocal error
|
2016-07-01 02:27:06 +00:00
|
|
|
# object not in repo - strange, but we wanted to delete it anyway.
|
2017-02-19 06:07:12 +00:00
|
|
|
if forced == 0:
|
2016-07-01 02:27:06 +00:00
|
|
|
raise
|
|
|
|
error = True
|
2017-03-05 04:19:32 +00:00
|
|
|
return exception_ignored # must not return None here
|
|
|
|
|
2019-04-19 16:36:38 +00:00
|
|
|
def chunk_decref(id, stats, part=False):
|
2017-03-05 04:19:32 +00:00
|
|
|
try:
|
2019-04-19 16:36:38 +00:00
|
|
|
self.cache.chunk_decref(id, stats, wait=False, part=part)
|
2017-03-05 04:19:32 +00:00
|
|
|
except KeyError:
|
|
|
|
cid = bin_to_hex(id)
|
|
|
|
raise ChunksIndexError(cid)
|
|
|
|
else:
|
|
|
|
fetch_async_response(wait=False)
|
2016-07-01 02:27:06 +00:00
|
|
|
|
|
|
|
error = False
|
|
|
|
try:
|
|
|
|
unpacker = msgpack.Unpacker(use_list=False)
|
2016-08-14 23:11:33 +00:00
|
|
|
items_ids = self.metadata.items
|
2017-02-27 19:38:02 +00:00
|
|
|
pi = ProgressIndicatorPercent(total=len(items_ids), msg="Decrementing references %3.0f%%", msgid='archive.delete')
|
2016-07-01 02:27:06 +00:00
|
|
|
for (i, (items_id, data)) in enumerate(zip(items_ids, self.repository.get_many(items_ids))):
|
|
|
|
if progress:
|
|
|
|
pi.show(i)
|
2017-04-03 20:05:53 +00:00
|
|
|
data = self.key.decrypt(items_id, data)
|
2016-07-05 23:33:53 +00:00
|
|
|
unpacker.feed(data)
|
2016-07-01 02:27:06 +00:00
|
|
|
chunk_decref(items_id, stats)
|
|
|
|
try:
|
|
|
|
for item in unpacker:
|
2016-07-05 23:33:53 +00:00
|
|
|
item = Item(internal_dict=item)
|
|
|
|
if 'chunks' in item:
|
2019-04-19 16:36:38 +00:00
|
|
|
part = not self.consider_part_files and 'part' in item
|
2016-07-05 23:33:53 +00:00
|
|
|
for chunk_id, size, csize in item.chunks:
|
2019-04-19 16:36:38 +00:00
|
|
|
chunk_decref(chunk_id, stats, part=part)
|
2016-07-01 02:27:06 +00:00
|
|
|
except (TypeError, ValueError):
|
|
|
|
# if items metadata spans multiple chunks and one chunk got dropped somehow,
|
|
|
|
# it could be that unpacker yields bad types
|
2017-02-19 06:07:12 +00:00
|
|
|
if forced == 0:
|
2016-07-01 02:27:06 +00:00
|
|
|
raise
|
|
|
|
error = True
|
2016-01-16 19:46:49 +00:00
|
|
|
if progress:
|
2016-07-01 02:27:06 +00:00
|
|
|
pi.finish()
|
|
|
|
except (msgpack.UnpackException, Repository.ObjectNotFound):
|
|
|
|
# items metadata corrupted
|
2017-02-19 06:07:12 +00:00
|
|
|
if forced == 0:
|
2016-07-01 02:27:06 +00:00
|
|
|
raise
|
|
|
|
error = True
|
|
|
|
# in forced delete mode, we try hard to delete at least the manifest entry,
|
|
|
|
# if possible also the archive superblock, even if processing the items raises
|
|
|
|
# some harmless exception.
|
|
|
|
chunk_decref(self.id, stats)
|
2011-09-04 21:02:47 +00:00
|
|
|
del self.manifest.archives[self.name]
|
2017-03-05 04:19:32 +00:00
|
|
|
while fetch_async_response(wait=True) is not None:
|
|
|
|
# we did async deletes, process outstanding results (== exceptions),
|
|
|
|
# so there is nothing pending when we return and our caller wants to commit.
|
|
|
|
pass
|
2016-07-01 02:27:06 +00:00
|
|
|
if error:
|
|
|
|
logger.warning('forced deletion succeeded, but the deleted archive was corrupted.')
|
|
|
|
logger.warning('borg check --repair is required to free all space.')
|
2010-10-20 17:59:15 +00:00
|
|
|
|
2017-07-19 12:29:14 +00:00
|
|
|
@staticmethod
|
|
|
|
def compare_archives_iter(archive1, archive2, matcher=None, can_compare_chunk_ids=False):
|
|
|
|
"""
|
|
|
|
Yields tuples with a path and an ItemDiff instance describing changes/indicating equality.
|
2017-07-19 10:56:05 +00:00
|
|
|
|
2017-07-19 12:29:14 +00:00
|
|
|
:param matcher: PatternMatcher class to restrict results to only matching paths.
|
|
|
|
:param can_compare_chunk_ids: Whether --chunker-params are the same for both archives.
|
|
|
|
"""
|
2017-07-19 10:56:05 +00:00
|
|
|
|
|
|
|
def hardlink_master_seen(item):
|
|
|
|
return 'source' not in item or not hardlinkable(item.mode) or item.source in hardlink_masters
|
|
|
|
|
|
|
|
def is_hardlink_master(item):
|
2022-02-13 22:23:40 +00:00
|
|
|
return item.get('hardlink_master', True) and 'source' not in item and hardlinkable(item.mode)
|
2017-07-19 10:56:05 +00:00
|
|
|
|
|
|
|
def update_hardlink_masters(item1, item2):
|
|
|
|
if is_hardlink_master(item1) or is_hardlink_master(item2):
|
|
|
|
hardlink_masters[item1.path] = (item1, item2)
|
|
|
|
|
|
|
|
def has_hardlink_master(item, hardlink_masters):
|
|
|
|
return hardlinkable(item.mode) and item.get('source') in hardlink_masters
|
|
|
|
|
2017-07-19 12:29:14 +00:00
|
|
|
def compare_items(item1, item2):
|
2017-07-19 10:56:05 +00:00
|
|
|
if has_hardlink_master(item1, hardlink_masters):
|
|
|
|
item1 = hardlink_masters[item1.source][0]
|
|
|
|
if has_hardlink_master(item2, hardlink_masters):
|
|
|
|
item2 = hardlink_masters[item2.source][1]
|
2017-07-19 12:29:14 +00:00
|
|
|
return ItemDiff(item1, item2,
|
|
|
|
archive1.pipeline.fetch_many([c.id for c in item1.get('chunks', [])]),
|
|
|
|
archive2.pipeline.fetch_many([c.id for c in item2.get('chunks', [])]),
|
|
|
|
can_compare_chunk_ids=can_compare_chunk_ids)
|
2017-07-19 10:56:05 +00:00
|
|
|
|
2017-07-19 12:29:14 +00:00
|
|
|
def defer_if_necessary(item1, item2):
|
|
|
|
"""Adds item tuple to deferred if necessary and returns True, if items were deferred"""
|
2017-07-19 10:56:05 +00:00
|
|
|
update_hardlink_masters(item1, item2)
|
2017-07-19 12:29:14 +00:00
|
|
|
defer = not hardlink_master_seen(item1) or not hardlink_master_seen(item2)
|
|
|
|
if defer:
|
2017-07-19 10:56:05 +00:00
|
|
|
deferred.append((item1, item2))
|
2017-07-19 12:29:14 +00:00
|
|
|
return defer
|
2017-07-19 10:56:05 +00:00
|
|
|
|
2017-07-19 12:29:14 +00:00
|
|
|
orphans_archive1 = OrderedDict()
|
|
|
|
orphans_archive2 = OrderedDict()
|
2017-07-19 10:56:05 +00:00
|
|
|
deferred = []
|
|
|
|
hardlink_masters = {}
|
|
|
|
|
|
|
|
for item1, item2 in zip_longest(
|
|
|
|
archive1.iter_items(lambda item: matcher.match(item.path)),
|
|
|
|
archive2.iter_items(lambda item: matcher.match(item.path)),
|
|
|
|
):
|
|
|
|
if item1 and item2 and item1.path == item2.path:
|
2017-07-19 12:29:14 +00:00
|
|
|
if not defer_if_necessary(item1, item2):
|
|
|
|
yield (item1.path, compare_items(item1, item2))
|
2017-07-19 10:56:05 +00:00
|
|
|
continue
|
|
|
|
if item1:
|
|
|
|
matching_orphan = orphans_archive2.pop(item1.path, None)
|
|
|
|
if matching_orphan:
|
2017-07-19 12:29:14 +00:00
|
|
|
if not defer_if_necessary(item1, matching_orphan):
|
|
|
|
yield (item1.path, compare_items(item1, matching_orphan))
|
2017-07-19 10:56:05 +00:00
|
|
|
else:
|
|
|
|
orphans_archive1[item1.path] = item1
|
|
|
|
if item2:
|
|
|
|
matching_orphan = orphans_archive1.pop(item2.path, None)
|
|
|
|
if matching_orphan:
|
2017-07-19 12:29:14 +00:00
|
|
|
if not defer_if_necessary(matching_orphan, item2):
|
|
|
|
yield (matching_orphan.path, compare_items(matching_orphan, item2))
|
2017-07-19 10:56:05 +00:00
|
|
|
else:
|
|
|
|
orphans_archive2[item2.path] = item2
|
|
|
|
# At this point orphans_* contain items that had no matching partner in the other archive
|
|
|
|
for added in orphans_archive2.values():
|
|
|
|
path = added.path
|
2017-07-19 12:29:14 +00:00
|
|
|
deleted_item = Item.create_deleted(path)
|
2017-07-19 10:56:05 +00:00
|
|
|
update_hardlink_masters(deleted_item, added)
|
2017-07-19 12:29:14 +00:00
|
|
|
yield (path, compare_items(deleted_item, added))
|
2017-07-19 10:56:05 +00:00
|
|
|
for deleted in orphans_archive1.values():
|
|
|
|
path = deleted.path
|
2017-07-19 12:29:14 +00:00
|
|
|
deleted_item = Item.create_deleted(path)
|
2017-07-19 10:56:05 +00:00
|
|
|
update_hardlink_masters(deleted, deleted_item)
|
2017-07-19 12:29:14 +00:00
|
|
|
yield (path, compare_items(deleted, deleted_item))
|
2017-07-19 10:56:05 +00:00
|
|
|
for item1, item2 in deferred:
|
|
|
|
assert hardlink_master_seen(item1)
|
|
|
|
assert hardlink_master_seen(item2)
|
2017-07-19 12:29:14 +00:00
|
|
|
yield (path, compare_items(item1, item2))
|
2017-07-19 10:56:05 +00:00
|
|
|
|
2017-07-29 14:11:33 +00:00
|
|
|
|
|
|
|
class MetadataCollector:
|
2021-04-16 13:02:16 +00:00
|
|
|
def __init__(self, *, noatime, noctime, nobirthtime, numeric_ids, noflags, noacls, noxattrs):
|
2017-07-29 14:11:33 +00:00
|
|
|
self.noatime = noatime
|
|
|
|
self.noctime = noctime
|
2021-04-16 13:02:16 +00:00
|
|
|
self.numeric_ids = numeric_ids
|
2020-03-18 20:39:48 +00:00
|
|
|
self.noflags = noflags
|
2021-02-16 20:30:01 +00:00
|
|
|
self.noacls = noacls
|
|
|
|
self.noxattrs = noxattrs
|
2017-11-13 13:55:10 +00:00
|
|
|
self.nobirthtime = nobirthtime
|
2017-07-29 14:11:33 +00:00
|
|
|
|
2016-06-26 14:59:38 +00:00
|
|
|
def stat_simple_attrs(self, st):
|
2016-05-31 23:45:45 +00:00
|
|
|
attrs = dict(
|
|
|
|
mode=st.st_mode,
|
2016-06-26 14:59:38 +00:00
|
|
|
uid=st.st_uid,
|
|
|
|
gid=st.st_gid,
|
2017-03-15 17:54:34 +00:00
|
|
|
mtime=safe_ns(st.st_mtime_ns),
|
2016-05-31 23:45:45 +00:00
|
|
|
)
|
2016-11-28 01:23:32 +00:00
|
|
|
# borg can work with archives only having mtime (older attic archives do not have
|
|
|
|
# atime/ctime). it can be useful to omit atime/ctime, if they change without the
|
|
|
|
# file content changing - e.g. to get better metadata deduplication.
|
|
|
|
if not self.noatime:
|
2017-03-15 17:54:34 +00:00
|
|
|
attrs['atime'] = safe_ns(st.st_atime_ns)
|
2016-11-28 01:23:32 +00:00
|
|
|
if not self.noctime:
|
2017-03-15 17:54:34 +00:00
|
|
|
attrs['ctime'] = safe_ns(st.st_ctime_ns)
|
2017-11-13 13:55:10 +00:00
|
|
|
if not self.nobirthtime and hasattr(st, 'st_birthtime'):
|
|
|
|
# sadly, there's no stat_result.st_birthtime_ns
|
|
|
|
attrs['birthtime'] = safe_ns(int(st.st_birthtime * 10**9))
|
2021-04-16 13:02:16 +00:00
|
|
|
if self.numeric_ids:
|
2016-05-31 23:45:45 +00:00
|
|
|
attrs['user'] = attrs['group'] = None
|
2016-06-26 14:59:38 +00:00
|
|
|
else:
|
|
|
|
attrs['user'] = uid2user(st.st_uid)
|
|
|
|
attrs['group'] = gid2group(st.st_gid)
|
|
|
|
return attrs
|
|
|
|
|
2018-07-05 19:06:21 +00:00
|
|
|
def stat_ext_attrs(self, st, path, fd=None):
|
2016-06-26 14:59:38 +00:00
|
|
|
attrs = {}
|
2016-12-14 14:20:08 +00:00
|
|
|
with backup_io('extended stat'):
|
2021-02-16 21:24:11 +00:00
|
|
|
flags = 0 if self.noflags else get_flags(path, st, fd=fd)
|
|
|
|
xattrs = {} if self.noxattrs else xattr.get_all(fd or path, follow_symlinks=False)
|
2021-02-16 20:08:47 +00:00
|
|
|
if not self.noacls:
|
2021-04-16 13:02:16 +00:00
|
|
|
acl_get(path, attrs, st, self.numeric_ids, fd=fd)
|
2013-07-29 19:09:31 +00:00
|
|
|
if xattrs:
|
2016-05-31 23:45:45 +00:00
|
|
|
attrs['xattrs'] = StableDict(xattrs)
|
2020-03-18 20:39:48 +00:00
|
|
|
if flags:
|
|
|
|
attrs['bsdflags'] = flags
|
2016-05-31 23:45:45 +00:00
|
|
|
return attrs
|
2010-10-30 11:44:25 +00:00
|
|
|
|
2018-07-05 19:06:21 +00:00
|
|
|
def stat_attrs(self, st, path, fd=None):
|
2016-06-26 14:59:38 +00:00
|
|
|
attrs = self.stat_simple_attrs(st)
|
2018-07-05 19:06:21 +00:00
|
|
|
attrs.update(self.stat_ext_attrs(st, path, fd=fd))
|
2016-06-26 14:59:38 +00:00
|
|
|
return attrs
|
|
|
|
|
2017-03-26 11:51:04 +00:00
|
|
|
|
2021-01-14 19:41:57 +00:00
|
|
|
# remember a few recently used all-zero chunk hashes in this mapping.
|
|
|
|
# (hash_func, chunk_length) -> chunk_hash
|
|
|
|
# we play safe and have the hash_func in the mapping key, in case we
|
|
|
|
# have different hash_funcs within the same borg run.
|
|
|
|
zero_chunk_ids = LRUCache(10, dispose=lambda _: None)
|
|
|
|
|
|
|
|
|
|
|
|
def cached_hash(chunk, id_hash):
|
|
|
|
allocation = chunk.meta['allocation']
|
|
|
|
if allocation == CH_DATA:
|
|
|
|
data = chunk.data
|
|
|
|
chunk_id = id_hash(data)
|
|
|
|
elif allocation in (CH_HOLE, CH_ALLOC):
|
|
|
|
size = chunk.meta['size']
|
|
|
|
assert size <= len(zeros)
|
|
|
|
data = memoryview(zeros)[:size]
|
|
|
|
try:
|
|
|
|
chunk_id = zero_chunk_ids[(id_hash, size)]
|
|
|
|
except KeyError:
|
|
|
|
chunk_id = id_hash(data)
|
|
|
|
zero_chunk_ids[(id_hash, size)] = chunk_id
|
|
|
|
else:
|
|
|
|
raise ValueError('unexpected allocation type')
|
|
|
|
return chunk_id, data
|
|
|
|
|
|
|
|
|
2017-07-29 14:11:33 +00:00
|
|
|
class ChunksProcessor:
|
|
|
|
# Processes an iterator of chunks for an Item
|
2012-03-03 13:02:22 +00:00
|
|
|
|
2017-07-29 14:11:33 +00:00
|
|
|
def __init__(self, *, key, cache,
|
|
|
|
add_item, write_checkpoint,
|
2017-10-29 09:53:12 +00:00
|
|
|
checkpoint_interval, rechunkify):
|
2017-07-29 14:11:33 +00:00
|
|
|
self.key = key
|
|
|
|
self.cache = cache
|
|
|
|
self.add_item = add_item
|
|
|
|
self.write_checkpoint = write_checkpoint
|
|
|
|
self.checkpoint_interval = checkpoint_interval
|
|
|
|
self.last_checkpoint = time.monotonic()
|
2017-10-29 09:53:12 +00:00
|
|
|
self.rechunkify = rechunkify
|
2010-10-30 11:44:25 +00:00
|
|
|
|
2016-11-19 18:09:47 +00:00
|
|
|
def write_part_file(self, item, from_chunk, number):
|
|
|
|
item = Item(internal_dict=item.as_dict())
|
|
|
|
length = len(item.chunks)
|
|
|
|
# the item should only have the *additional* chunks we processed after the last partial item:
|
|
|
|
item.chunks = item.chunks[from_chunk:]
|
2017-10-14 02:24:26 +00:00
|
|
|
# for borg recreate, we already have a size member in the source item (giving the total file size),
|
|
|
|
# but we consider only a part of the file here, thus we must recompute the size from the chunks:
|
|
|
|
item.get_size(memorize=True, from_chunks=True)
|
2016-11-19 18:09:47 +00:00
|
|
|
item.path += '.borg_part_%d' % number
|
|
|
|
item.part = number
|
|
|
|
number += 1
|
|
|
|
self.add_item(item, show_progress=False)
|
|
|
|
self.write_checkpoint()
|
|
|
|
return length, number
|
|
|
|
|
2019-06-22 21:19:37 +00:00
|
|
|
def maybe_checkpoint(self, item, from_chunk, part_number, forced=False):
|
|
|
|
sig_int_triggered = sig_int and sig_int.action_triggered()
|
|
|
|
if forced or sig_int_triggered or \
|
|
|
|
self.checkpoint_interval and time.monotonic() - self.last_checkpoint > self.checkpoint_interval:
|
|
|
|
if sig_int_triggered:
|
|
|
|
logger.info('checkpoint requested: starting checkpoint creation...')
|
|
|
|
from_chunk, part_number = self.write_part_file(item, from_chunk, part_number)
|
|
|
|
self.last_checkpoint = time.monotonic()
|
|
|
|
if sig_int_triggered:
|
|
|
|
sig_int.action_completed()
|
|
|
|
logger.info('checkpoint requested: finished checkpoint creation!')
|
|
|
|
return from_chunk, part_number
|
|
|
|
|
2018-03-10 14:11:08 +00:00
|
|
|
def process_file_chunks(self, item, cache, stats, show_progress, chunk_iter, chunk_processor=None):
|
2016-11-19 18:09:47 +00:00
|
|
|
if not chunk_processor:
|
2020-12-15 01:37:26 +00:00
|
|
|
def chunk_processor(chunk):
|
2021-01-08 18:16:47 +00:00
|
|
|
chunk_id, data = cached_hash(chunk, self.key.id_hash)
|
2020-12-15 01:37:26 +00:00
|
|
|
chunk_entry = cache.add_chunk(chunk_id, data, stats, wait=False)
|
2017-03-05 04:19:32 +00:00
|
|
|
self.cache.repository.async_response(wait=False)
|
|
|
|
return chunk_entry
|
2016-07-21 21:56:58 +00:00
|
|
|
|
2016-06-26 16:07:01 +00:00
|
|
|
item.chunks = []
|
2017-10-29 09:53:12 +00:00
|
|
|
# if we rechunkify, we'll get a fundamentally different chunks list, thus we need
|
|
|
|
# to get rid of .chunks_healthy, as it might not correspond to .chunks any more.
|
|
|
|
if self.rechunkify and 'chunks_healthy' in item:
|
|
|
|
del item.chunks_healthy
|
2016-07-21 21:56:58 +00:00
|
|
|
from_chunk = 0
|
|
|
|
part_number = 1
|
2020-12-15 01:37:26 +00:00
|
|
|
for chunk in chunk_iter:
|
|
|
|
item.chunks.append(chunk_processor(chunk))
|
2018-03-10 14:11:08 +00:00
|
|
|
if show_progress:
|
|
|
|
stats.show_progress(item=item, dt=0.2)
|
2019-06-22 21:19:37 +00:00
|
|
|
from_chunk, part_number = self.maybe_checkpoint(item, from_chunk, part_number, forced=False)
|
2016-07-21 21:56:58 +00:00
|
|
|
else:
|
2016-07-28 15:55:40 +00:00
|
|
|
if part_number > 1:
|
|
|
|
if item.chunks[from_chunk:]:
|
|
|
|
# if we already have created a part item inside this file, we want to put the final
|
|
|
|
# chunks (if any) into a part item also (so all parts can be concatenated to get
|
|
|
|
# the complete file):
|
2019-06-22 21:19:37 +00:00
|
|
|
from_chunk, part_number = self.maybe_checkpoint(item, from_chunk, part_number, forced=True)
|
2016-07-28 15:55:40 +00:00
|
|
|
|
|
|
|
# if we created part files, we have referenced all chunks from the part files,
|
|
|
|
# but we also will reference the same chunks also from the final, complete file:
|
|
|
|
for chunk in item.chunks:
|
2019-02-23 08:44:33 +00:00
|
|
|
cache.chunk_incref(chunk.id, stats, size=chunk.size, part=True)
|
|
|
|
stats.nfiles_parts += part_number - 1
|
2016-06-26 16:07:01 +00:00
|
|
|
|
2017-07-29 14:11:33 +00:00
|
|
|
|
|
|
|
class FilesystemObjectProcessors:
|
|
|
|
# When ported to threading, then this doesn't need chunker, cache, key any more.
|
|
|
|
# write_checkpoint should then be in the item buffer,
|
|
|
|
# and process_file becomes a callback passed to __init__.
|
|
|
|
|
|
|
|
def __init__(self, *, metadata_collector, cache, key,
|
|
|
|
add_item, process_file_chunks,
|
2021-02-07 02:42:46 +00:00
|
|
|
chunker_params, show_progress, sparse,
|
2021-04-26 15:50:21 +00:00
|
|
|
log_json, iec, file_status_printer=None):
|
2017-07-29 14:11:33 +00:00
|
|
|
self.metadata_collector = metadata_collector
|
|
|
|
self.cache = cache
|
|
|
|
self.key = key
|
|
|
|
self.add_item = add_item
|
|
|
|
self.process_file_chunks = process_file_chunks
|
2018-03-10 14:11:08 +00:00
|
|
|
self.show_progress = show_progress
|
2021-04-26 15:50:21 +00:00
|
|
|
self.print_file_status = file_status_printer or (lambda *args: None)
|
2017-07-29 14:11:33 +00:00
|
|
|
|
|
|
|
self.hard_links = {}
|
2021-03-20 23:33:31 +00:00
|
|
|
self.stats = Statistics(output_json=log_json, iec=iec) # threading: done by cache (including progress)
|
2017-07-29 14:11:33 +00:00
|
|
|
self.cwd = os.getcwd()
|
2020-12-10 23:34:11 +00:00
|
|
|
self.chunker = get_chunker(*chunker_params, seed=key.chunk_seed, sparse=sparse)
|
2017-07-29 14:11:33 +00:00
|
|
|
|
|
|
|
@contextmanager
|
|
|
|
def create_helper(self, path, st, status=None, hardlinkable=True):
|
|
|
|
safe_path = make_path_safe(path)
|
|
|
|
item = Item(path=safe_path)
|
|
|
|
hardlink_master = False
|
|
|
|
hardlinked = hardlinkable and st.st_nlink > 1
|
|
|
|
if hardlinked:
|
|
|
|
source = self.hard_links.get((st.st_ino, st.st_dev))
|
|
|
|
if source is not None:
|
|
|
|
item.source = source
|
|
|
|
status = 'h' # hardlink (to already seen inodes)
|
|
|
|
else:
|
|
|
|
hardlink_master = True
|
|
|
|
yield item, status, hardlinked, hardlink_master
|
|
|
|
# if we get here, "with"-block worked ok without error/exception, the item was processed ok...
|
2018-03-10 14:11:08 +00:00
|
|
|
self.add_item(item, stats=self.stats)
|
2017-07-29 14:11:33 +00:00
|
|
|
# ... and added to the archive, so we can remember it to refer to it later in the archive:
|
|
|
|
if hardlink_master:
|
|
|
|
self.hard_links[(st.st_ino, st.st_dev)] = safe_path
|
|
|
|
|
2020-11-15 14:31:01 +00:00
|
|
|
def process_dir_with_fd(self, *, path, fd, st):
|
2017-07-29 14:11:33 +00:00
|
|
|
with self.create_helper(path, st, 'd', hardlinkable=False) as (item, status, hardlinked, hardlink_master):
|
2018-08-12 23:18:00 +00:00
|
|
|
item.update(self.metadata_collector.stat_attrs(st, path, fd=fd))
|
2017-07-29 14:11:33 +00:00
|
|
|
return status
|
|
|
|
|
2020-11-15 14:31:01 +00:00
|
|
|
def process_dir(self, *, path, parent_fd, name, st):
|
|
|
|
with self.create_helper(path, st, 'd', hardlinkable=False) as (item, status, hardlinked, hardlink_master):
|
|
|
|
with OsOpen(path=path, parent_fd=parent_fd, name=name, flags=flags_dir,
|
|
|
|
noatime=True, op='dir_open') as fd:
|
|
|
|
# fd is None for directories on windows, in that case a race condition check is not possible.
|
|
|
|
if fd is not None:
|
|
|
|
with backup_io('fstat'):
|
|
|
|
st = stat_update_check(st, os.fstat(fd))
|
|
|
|
item.update(self.metadata_collector.stat_attrs(st, path, fd=fd))
|
|
|
|
return status
|
|
|
|
|
2018-08-12 23:18:00 +00:00
|
|
|
def process_fifo(self, *, path, parent_fd, name, st):
|
2017-07-29 14:11:33 +00:00
|
|
|
with self.create_helper(path, st, 'f') as (item, status, hardlinked, hardlink_master): # fifo
|
2018-08-12 23:18:00 +00:00
|
|
|
with OsOpen(path=path, parent_fd=parent_fd, name=name, flags=flags_normal, noatime=True) as fd:
|
|
|
|
with backup_io('fstat'):
|
2019-02-17 05:45:24 +00:00
|
|
|
st = stat_update_check(st, os.fstat(fd))
|
2018-08-12 23:18:00 +00:00
|
|
|
item.update(self.metadata_collector.stat_attrs(st, path, fd=fd))
|
|
|
|
return status
|
2017-07-29 14:11:33 +00:00
|
|
|
|
2018-08-12 23:18:00 +00:00
|
|
|
def process_dev(self, *, path, parent_fd, name, st, dev_type):
|
2017-07-29 14:11:33 +00:00
|
|
|
with self.create_helper(path, st, dev_type) as (item, status, hardlinked, hardlink_master): # char/block device
|
2018-12-24 00:30:51 +00:00
|
|
|
# looks like we can not work fd-based here without causing issues when trying to open/close the device
|
|
|
|
with backup_io('stat'):
|
2021-10-14 15:46:10 +00:00
|
|
|
st = stat_update_check(st, os_stat(path=path, parent_fd=parent_fd, name=name, follow_symlinks=False))
|
2018-12-24 00:30:51 +00:00
|
|
|
item.rdev = st.st_rdev
|
|
|
|
item.update(self.metadata_collector.stat_attrs(st, path))
|
|
|
|
return status
|
2017-07-29 14:11:33 +00:00
|
|
|
|
2018-08-12 23:18:00 +00:00
|
|
|
def process_symlink(self, *, path, parent_fd, name, st):
|
2017-07-29 14:11:33 +00:00
|
|
|
# note: using hardlinkable=False because we can not support hardlinked symlinks,
|
|
|
|
# due to the dual-use of item.source, see issue #2343:
|
2017-10-17 04:39:31 +00:00
|
|
|
# hardlinked symlinks will be archived [and extracted] as non-hardlinked symlinks.
|
2017-07-29 14:11:33 +00:00
|
|
|
with self.create_helper(path, st, 's', hardlinkable=False) as (item, status, hardlinked, hardlink_master):
|
2018-08-12 23:18:00 +00:00
|
|
|
fname = name if name is not None and parent_fd is not None else path
|
2017-07-29 14:11:33 +00:00
|
|
|
with backup_io('readlink'):
|
2018-08-12 23:18:00 +00:00
|
|
|
source = os.readlink(fname, dir_fd=parent_fd)
|
2017-07-29 14:11:33 +00:00
|
|
|
item.source = source
|
2018-08-12 23:18:00 +00:00
|
|
|
item.update(self.metadata_collector.stat_attrs(st, path)) # can't use FD here?
|
2017-07-29 14:11:33 +00:00
|
|
|
return status
|
|
|
|
|
2020-11-01 17:45:56 +00:00
|
|
|
def process_pipe(self, *, path, cache, fd, mode, user, group):
|
2021-04-26 15:50:21 +00:00
|
|
|
status = 'i' # stdin (or other pipe)
|
|
|
|
self.print_file_status(status, path)
|
|
|
|
status = None # we already printed the status
|
2020-11-01 17:45:56 +00:00
|
|
|
uid = user2uid(user)
|
|
|
|
if uid is None:
|
|
|
|
raise Error("no such user: %s" % user)
|
|
|
|
gid = group2gid(group)
|
|
|
|
if gid is None:
|
|
|
|
raise Error("no such group: %s" % group)
|
2016-05-31 23:45:45 +00:00
|
|
|
t = int(time.time()) * 1000000000
|
|
|
|
item = Item(
|
|
|
|
path=path,
|
2020-11-01 17:45:56 +00:00
|
|
|
mode=mode & 0o107777 | 0o100000, # forcing regular file mode
|
|
|
|
uid=uid, user=user,
|
|
|
|
gid=gid, group=group,
|
2016-05-31 23:45:45 +00:00
|
|
|
mtime=t, atime=t, ctime=t,
|
|
|
|
)
|
2018-03-10 14:11:08 +00:00
|
|
|
self.process_file_chunks(item, cache, self.stats, self.show_progress, backup_io_iter(self.chunker.chunkify(fd)))
|
2017-02-18 06:02:11 +00:00
|
|
|
item.get_size(memorize=True)
|
2016-06-26 15:14:13 +00:00
|
|
|
self.stats.nfiles += 1
|
2018-03-10 14:11:08 +00:00
|
|
|
self.add_item(item, stats=self.stats)
|
2021-04-26 15:50:21 +00:00
|
|
|
return status
|
2015-03-01 03:29:44 +00:00
|
|
|
|
2019-02-20 09:13:09 +00:00
|
|
|
def process_file(self, *, path, parent_fd, name, st, cache, flags=flags_normal):
|
2017-03-26 11:51:04 +00:00
|
|
|
with self.create_helper(path, st, None) as (item, status, hardlinked, hardlink_master): # no status yet
|
2019-02-20 09:13:09 +00:00
|
|
|
with OsOpen(path=path, parent_fd=parent_fd, name=name, flags=flags, noatime=True) as fd:
|
2018-08-12 02:50:27 +00:00
|
|
|
with backup_io('fstat'):
|
2019-02-17 05:45:24 +00:00
|
|
|
st = stat_update_check(st, os.fstat(fd))
|
2019-04-06 21:52:16 +00:00
|
|
|
item.update(self.metadata_collector.stat_simple_attrs(st))
|
2018-08-12 02:50:27 +00:00
|
|
|
is_special_file = is_special(st.st_mode)
|
2020-06-14 13:36:22 +00:00
|
|
|
if is_special_file:
|
|
|
|
# we process a special file like a regular file. reflect that in mode,
|
|
|
|
# so it can be extracted / accessed in FUSE mount like a regular file.
|
|
|
|
# this needs to be done early, so that part files also get the patched mode.
|
|
|
|
item.mode = stat.S_IFREG | stat.S_IMODE(item.mode)
|
2018-08-12 02:50:27 +00:00
|
|
|
if not hardlinked or hardlink_master:
|
|
|
|
if not is_special_file:
|
2021-02-23 21:56:38 +00:00
|
|
|
hashed_path = safe_encode(os.path.join(self.cwd, path))
|
|
|
|
path_hash = self.key.id_hash(hashed_path)
|
|
|
|
known, ids = cache.file_known_and_unchanged(hashed_path, path_hash, st)
|
2017-03-26 11:51:04 +00:00
|
|
|
else:
|
2018-08-12 02:50:27 +00:00
|
|
|
# in --read-special mode, we may be called for special files.
|
|
|
|
# there should be no information in the cache about special files processed in
|
|
|
|
# read-special mode, but we better play safe as this was wrong in the past:
|
2021-02-23 21:56:38 +00:00
|
|
|
hashed_path = path_hash = None
|
2018-08-12 02:50:27 +00:00
|
|
|
known, ids = False, None
|
|
|
|
chunks = None
|
|
|
|
if ids is not None:
|
|
|
|
# Make sure all ids are available
|
|
|
|
for id_ in ids:
|
|
|
|
if not cache.seen_chunk(id_):
|
|
|
|
status = 'M' # cache said it is unmodified, but we lost a chunk: process file like modified
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
chunks = [cache.chunk_incref(id_, self.stats) for id_ in ids]
|
|
|
|
status = 'U' # regular file, unchanged
|
|
|
|
else:
|
|
|
|
status = 'M' if known else 'A' # regular file, modified or added
|
2021-04-26 15:50:21 +00:00
|
|
|
self.print_file_status(status, path)
|
|
|
|
status = None # we already printed the status
|
2018-08-12 02:50:27 +00:00
|
|
|
item.hardlink_master = hardlinked
|
|
|
|
# Only chunkify the file if needed
|
|
|
|
if chunks is not None:
|
|
|
|
item.chunks = chunks
|
|
|
|
else:
|
|
|
|
with backup_io('read'):
|
|
|
|
self.process_file_chunks(item, cache, self.stats, self.show_progress, backup_io_iter(self.chunker.chunkify(None, fd)))
|
2019-03-11 19:41:23 +00:00
|
|
|
if is_win32:
|
|
|
|
changed_while_backup = False # TODO
|
|
|
|
else:
|
|
|
|
with backup_io('fstat2'):
|
|
|
|
st2 = os.fstat(fd)
|
|
|
|
# special files:
|
|
|
|
# - fifos change naturally, because they are fed from the other side. no problem.
|
|
|
|
# - blk/chr devices don't change ctime anyway.
|
|
|
|
changed_while_backup = not is_special_file and st.st_ctime_ns != st2.st_ctime_ns
|
|
|
|
if changed_while_backup:
|
|
|
|
status = 'C' # regular file changed while we backed it up, might be inconsistent/corrupt!
|
|
|
|
if not is_special_file and not changed_while_backup:
|
create: do not give chunker a py file object, it is not needed
the os level file handle is enough, the chunker will prefer it if
valid and won't use the file obj, so we can give None there.
this saves these unneeded syscalls:
fstat(5, {st_mode=S_IFREG|0664, st_size=227063, ...}) = 0
ioctl(5, TCGETS, 0x7ffd635635f0) = -1 ENOTTY (Inappropriate ioctl for device)
lseek(5, 0, SEEK_CUR) = 0
2018-07-06 20:45:24 +00:00
|
|
|
# we must not memorize special files, because the contents of e.g. a
|
|
|
|
# block or char device will change without its mtime/size/inode changing.
|
2019-03-11 19:41:23 +00:00
|
|
|
# also, we must not memorize a potentially inconsistent/corrupt file that
|
|
|
|
# changed while we backed it up.
|
2021-02-23 21:56:38 +00:00
|
|
|
cache.memorize_file(hashed_path, path_hash, st, [c.id for c in item.chunks])
|
2018-08-12 02:50:27 +00:00
|
|
|
self.stats.nfiles += 1
|
2019-04-06 21:52:16 +00:00
|
|
|
item.update(self.metadata_collector.stat_ext_attrs(st, path, fd=fd))
|
2018-08-12 02:50:27 +00:00
|
|
|
item.get_size(memorize=True)
|
2018-08-12 23:18:00 +00:00
|
|
|
return status
|
2010-10-20 17:59:15 +00:00
|
|
|
|
2014-02-16 21:21:18 +00:00
|
|
|
|
2021-06-09 23:41:11 +00:00
|
|
|
class TarfileObjectProcessors:
|
|
|
|
def __init__(self, *, cache, key,
|
|
|
|
add_item, process_file_chunks,
|
|
|
|
chunker_params, show_progress,
|
|
|
|
log_json, iec, file_status_printer=None):
|
|
|
|
self.cache = cache
|
|
|
|
self.key = key
|
|
|
|
self.add_item = add_item
|
|
|
|
self.process_file_chunks = process_file_chunks
|
|
|
|
self.show_progress = show_progress
|
|
|
|
self.print_file_status = file_status_printer or (lambda *args: None)
|
|
|
|
|
|
|
|
self.stats = Statistics(output_json=log_json, iec=iec) # threading: done by cache (including progress)
|
|
|
|
self.chunker = get_chunker(*chunker_params, seed=key.chunk_seed, sparse=False)
|
|
|
|
|
|
|
|
@contextmanager
|
|
|
|
def create_helper(self, tarinfo, status=None, type=None):
|
2022-04-02 18:11:05 +00:00
|
|
|
ph = tarinfo.pax_headers
|
|
|
|
if ph and 'BORG.item.version' in ph:
|
|
|
|
assert ph['BORG.item.version'] == '1'
|
|
|
|
meta_bin = base64.b64decode(ph['BORG.item.meta'])
|
|
|
|
meta_dict = msgpack.unpackb(meta_bin, object_hook=StableDict)
|
|
|
|
item = Item(internal_dict=meta_dict)
|
|
|
|
else:
|
|
|
|
def s_to_ns(s):
|
|
|
|
return safe_ns(int(float(s) * 1e9))
|
|
|
|
|
|
|
|
item = Item(path=make_path_safe(tarinfo.name), mode=tarinfo.mode | type,
|
|
|
|
uid=tarinfo.uid, gid=tarinfo.gid, user=tarinfo.uname or None, group=tarinfo.gname or None,
|
|
|
|
mtime=s_to_ns(tarinfo.mtime))
|
|
|
|
if ph:
|
|
|
|
# note: for mtime this is a bit redundant as it is already done by tarfile module,
|
|
|
|
# but we just do it in our way to be consistent for sure.
|
|
|
|
for name in 'atime', 'ctime', 'mtime':
|
|
|
|
if name in ph:
|
|
|
|
ns = s_to_ns(ph[name])
|
|
|
|
setattr(item, name, ns)
|
2021-06-09 23:41:11 +00:00
|
|
|
yield item, status
|
|
|
|
# if we get here, "with"-block worked ok without error/exception, the item was processed ok...
|
|
|
|
self.add_item(item, stats=self.stats)
|
|
|
|
|
|
|
|
def process_dir(self, *, tarinfo, status, type):
|
|
|
|
with self.create_helper(tarinfo, status, type) as (item, status):
|
|
|
|
return status
|
|
|
|
|
|
|
|
def process_fifo(self, *, tarinfo, status, type):
|
2021-06-10 10:19:20 +00:00
|
|
|
with self.create_helper(tarinfo, status, type) as (item, status):
|
2021-06-09 23:41:11 +00:00
|
|
|
return status
|
|
|
|
|
|
|
|
def process_dev(self, *, tarinfo, status, type):
|
2021-06-10 10:19:20 +00:00
|
|
|
with self.create_helper(tarinfo, status, type) as (item, status):
|
2021-06-09 23:41:11 +00:00
|
|
|
item.rdev = os.makedev(tarinfo.devmajor, tarinfo.devminor)
|
|
|
|
return status
|
|
|
|
|
|
|
|
def process_link(self, *, tarinfo, status, type):
|
|
|
|
with self.create_helper(tarinfo, status, type) as (item, status):
|
|
|
|
item.source = tarinfo.linkname
|
|
|
|
return status
|
|
|
|
|
|
|
|
def process_file(self, *, tarinfo, status, type, tar):
|
|
|
|
with self.create_helper(tarinfo, status, type) as (item, status):
|
|
|
|
self.print_file_status(status, tarinfo.name)
|
|
|
|
status = None # we already printed the status
|
|
|
|
fd = tar.extractfile(tarinfo)
|
|
|
|
self.process_file_chunks(item, self.cache, self.stats, self.show_progress,
|
|
|
|
backup_io_iter(self.chunker.chunkify(fd)))
|
|
|
|
item.get_size(memorize=True)
|
|
|
|
self.stats.nfiles += 1
|
|
|
|
return status
|
|
|
|
|
|
|
|
|
2016-06-12 21:36:56 +00:00
|
|
|
def valid_msgpacked_dict(d, keys_serialized):
|
|
|
|
"""check if the data <d> looks like a msgpacked dict"""
|
|
|
|
d_len = len(d)
|
|
|
|
if d_len == 0:
|
|
|
|
return False
|
|
|
|
if d[0] & 0xf0 == 0x80: # object is a fixmap (up to 15 elements)
|
|
|
|
offs = 1
|
|
|
|
elif d[0] == 0xde: # object is a map16 (up to 2^16-1 elements)
|
|
|
|
offs = 3
|
|
|
|
else:
|
|
|
|
# object is not a map (dict)
|
|
|
|
# note: we must not have dicts with > 2^16-1 elements
|
|
|
|
return False
|
|
|
|
if d_len <= offs:
|
|
|
|
return False
|
|
|
|
# is the first dict key a bytestring?
|
|
|
|
if d[offs] & 0xe0 == 0xa0: # key is a small bytestring (up to 31 chars)
|
|
|
|
pass
|
|
|
|
elif d[offs] in (0xd9, 0xda, 0xdb): # key is a str8, str16 or str32
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
# key is not a bytestring
|
|
|
|
return False
|
|
|
|
# is the bytestring any of the expected key names?
|
|
|
|
key_serialized = d[offs:]
|
|
|
|
return any(key_serialized.startswith(pattern) for pattern in keys_serialized)
|
|
|
|
|
|
|
|
|
2015-07-15 09:30:25 +00:00
|
|
|
class RobustUnpacker:
|
2014-02-24 21:43:17 +00:00
|
|
|
"""A restartable/robust version of the streaming msgpack unpacker
|
|
|
|
"""
|
2016-06-12 21:36:56 +00:00
|
|
|
def __init__(self, validator, item_keys):
|
2015-07-11 16:31:49 +00:00
|
|
|
super().__init__()
|
2016-06-12 21:36:56 +00:00
|
|
|
self.item_keys = [msgpack.packb(name.encode()) for name in item_keys]
|
2014-02-24 21:43:17 +00:00
|
|
|
self.validator = validator
|
|
|
|
self._buffered_data = []
|
|
|
|
self._resync = False
|
|
|
|
self._unpacker = msgpack.Unpacker(object_hook=StableDict)
|
|
|
|
|
|
|
|
def resync(self):
|
|
|
|
self._buffered_data = []
|
|
|
|
self._resync = True
|
|
|
|
|
|
|
|
def feed(self, data):
|
|
|
|
if self._resync:
|
|
|
|
self._buffered_data.append(data)
|
|
|
|
else:
|
|
|
|
self._unpacker.feed(data)
|
|
|
|
|
|
|
|
def __iter__(self):
|
|
|
|
return self
|
|
|
|
|
|
|
|
def __next__(self):
|
|
|
|
if self._resync:
|
2014-02-24 22:37:21 +00:00
|
|
|
data = b''.join(self._buffered_data)
|
2014-02-24 21:43:17 +00:00
|
|
|
while self._resync:
|
|
|
|
if not data:
|
|
|
|
raise StopIteration
|
2016-06-12 21:36:56 +00:00
|
|
|
# Abort early if the data does not look like a serialized item dict
|
|
|
|
if not valid_msgpacked_dict(data, self.item_keys):
|
2014-03-04 20:15:52 +00:00
|
|
|
data = data[1:]
|
|
|
|
continue
|
2014-02-24 22:37:21 +00:00
|
|
|
self._unpacker = msgpack.Unpacker(object_hook=StableDict)
|
|
|
|
self._unpacker.feed(data)
|
|
|
|
try:
|
2018-07-01 00:34:48 +00:00
|
|
|
item = next(self._unpacker)
|
|
|
|
except (msgpack.UnpackException, StopIteration):
|
2016-07-28 20:23:38 +00:00
|
|
|
# as long as we are resyncing, we also ignore StopIteration
|
2016-07-28 20:12:34 +00:00
|
|
|
pass
|
|
|
|
else:
|
2014-02-24 21:43:17 +00:00
|
|
|
if self.validator(item):
|
|
|
|
self._resync = False
|
2014-02-24 22:37:21 +00:00
|
|
|
return item
|
|
|
|
data = data[1:]
|
2014-02-24 21:43:17 +00:00
|
|
|
else:
|
2018-07-01 00:34:48 +00:00
|
|
|
return next(self._unpacker)
|
2014-02-24 21:43:17 +00:00
|
|
|
|
|
|
|
|
2014-02-16 21:21:18 +00:00
|
|
|
class ArchiveChecker:
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
self.error_found = False
|
|
|
|
self.possibly_superseded = set()
|
|
|
|
|
2017-06-11 10:13:42 +00:00
|
|
|
def check(self, repository, repair=False, archive=None, first=0, last=0, sort_by='', glob=None,
|
2016-10-18 01:57:42 +00:00
|
|
|
verify_data=False, save_space=False):
|
2016-05-13 20:50:34 +00:00
|
|
|
"""Perform a set of checks on 'repository'
|
|
|
|
|
|
|
|
:param repair: enable repair mode, write updated or corrected data into repository
|
|
|
|
:param archive: only check this archive
|
2016-10-18 01:57:42 +00:00
|
|
|
:param first/last/sort_by: only check this number of first/last archives ordered by sort_by
|
2017-06-11 10:13:42 +00:00
|
|
|
:param glob: only check archives matching this glob
|
2016-05-13 20:50:34 +00:00
|
|
|
:param verify_data: integrity verification of data referenced by archives
|
|
|
|
:param save_space: Repository.commit(save_space)
|
|
|
|
"""
|
2015-12-07 23:21:46 +00:00
|
|
|
logger.info('Starting archive consistency check...')
|
2017-06-11 10:13:42 +00:00
|
|
|
self.check_all = archive is None and not any((first, last, glob))
|
2014-02-24 22:37:21 +00:00
|
|
|
self.repair = repair
|
|
|
|
self.repository = repository
|
|
|
|
self.init_chunks()
|
2016-11-13 14:58:42 +00:00
|
|
|
if not self.chunks:
|
|
|
|
logger.error('Repository contains no apparent data at all, cannot continue check/repair.')
|
|
|
|
return False
|
2014-02-24 22:37:21 +00:00
|
|
|
self.key = self.identify_key(repository)
|
2016-07-28 16:41:08 +00:00
|
|
|
if verify_data:
|
|
|
|
self.verify_data()
|
2015-03-17 22:47:21 +00:00
|
|
|
if Manifest.MANIFEST_ID not in self.chunks:
|
2015-12-07 23:21:46 +00:00
|
|
|
logger.error("Repository manifest not found!")
|
|
|
|
self.error_found = True
|
2014-02-24 22:37:21 +00:00
|
|
|
self.manifest = self.rebuild_manifest()
|
|
|
|
else:
|
2016-11-30 04:38:04 +00:00
|
|
|
try:
|
2017-02-06 22:19:02 +00:00
|
|
|
self.manifest, _ = Manifest.load(repository, (Manifest.Operation.CHECK,), key=self.key)
|
2016-10-21 23:50:35 +00:00
|
|
|
except IntegrityErrorBase as exc:
|
2016-11-30 04:38:04 +00:00
|
|
|
logger.error('Repository manifest is corrupted: %s', exc)
|
|
|
|
self.error_found = True
|
|
|
|
del self.chunks[Manifest.MANIFEST_ID]
|
|
|
|
self.manifest = self.rebuild_manifest()
|
2017-06-11 10:13:42 +00:00
|
|
|
self.rebuild_refcounts(archive=archive, first=first, last=last, sort_by=sort_by, glob=glob)
|
2015-08-09 10:43:57 +00:00
|
|
|
self.orphan_chunks_check()
|
2015-11-18 01:27:25 +00:00
|
|
|
self.finish(save_space=save_space)
|
2015-12-07 23:21:46 +00:00
|
|
|
if self.error_found:
|
|
|
|
logger.error('Archive consistency check complete, problems found.')
|
|
|
|
else:
|
2015-10-02 14:58:08 +00:00
|
|
|
logger.info('Archive consistency check complete, no problems found.')
|
2014-02-24 22:37:21 +00:00
|
|
|
return self.repair or not self.error_found
|
|
|
|
|
2014-02-16 21:21:18 +00:00
|
|
|
def init_chunks(self):
|
2014-02-24 22:37:21 +00:00
|
|
|
"""Fetch a list of all object keys from repository
|
|
|
|
"""
|
2018-06-12 20:12:02 +00:00
|
|
|
# Explicitly set the initial usable hash table capacity to avoid performance issues
|
2016-09-14 00:53:41 +00:00
|
|
|
# due to hash table "resonance".
|
2018-06-12 20:12:02 +00:00
|
|
|
# Since reconstruction of archive items can add some new chunks, add 10 % headroom.
|
|
|
|
self.chunks = ChunkIndex(usable=len(self.repository) * 1.1)
|
2014-02-16 21:21:18 +00:00
|
|
|
marker = None
|
|
|
|
while True:
|
2017-02-17 04:00:37 +00:00
|
|
|
result = self.repository.list(limit=LIST_SCAN_LIMIT, marker=marker)
|
2014-02-16 21:21:18 +00:00
|
|
|
if not result:
|
|
|
|
break
|
|
|
|
marker = result[-1]
|
2016-04-16 15:48:47 +00:00
|
|
|
init_entry = ChunkIndexEntry(refcount=0, size=0, csize=0)
|
2014-02-16 21:21:18 +00:00
|
|
|
for id_ in result:
|
2016-04-16 15:48:47 +00:00
|
|
|
self.chunks[id_] = init_entry
|
2014-02-16 21:21:18 +00:00
|
|
|
|
|
|
|
def identify_key(self, repository):
|
2016-06-12 21:36:56 +00:00
|
|
|
try:
|
|
|
|
some_chunkid, _ = next(self.chunks.iteritems())
|
|
|
|
except StopIteration:
|
|
|
|
# repo is completely empty, no chunks
|
|
|
|
return None
|
|
|
|
cdata = repository.get(some_chunkid)
|
2014-02-16 21:21:18 +00:00
|
|
|
return key_factory(repository, cdata)
|
|
|
|
|
2016-05-13 20:50:34 +00:00
|
|
|
def verify_data(self):
|
|
|
|
logger.info('Starting cryptographic data integrity verification...')
|
2016-10-04 20:05:26 +00:00
|
|
|
chunks_count_index = len(self.chunks)
|
|
|
|
chunks_count_segments = 0
|
2016-07-28 16:40:20 +00:00
|
|
|
errors = 0
|
2016-09-08 23:27:27 +00:00
|
|
|
defect_chunks = []
|
2017-02-27 19:38:02 +00:00
|
|
|
pi = ProgressIndicatorPercent(total=chunks_count_index, msg="Verifying data %6.2f%%", step=0.01,
|
|
|
|
msgid='check.verify_data')
|
2016-10-04 02:55:10 +00:00
|
|
|
marker = None
|
|
|
|
while True:
|
|
|
|
chunk_ids = self.repository.scan(limit=100, marker=marker)
|
|
|
|
if not chunk_ids:
|
|
|
|
break
|
2016-10-04 20:05:26 +00:00
|
|
|
chunks_count_segments += len(chunk_ids)
|
2016-10-04 02:55:10 +00:00
|
|
|
marker = chunk_ids[-1]
|
2016-09-16 00:49:54 +00:00
|
|
|
chunk_data_iter = self.repository.get_many(chunk_ids)
|
|
|
|
chunk_ids_revd = list(reversed(chunk_ids))
|
|
|
|
while chunk_ids_revd:
|
|
|
|
pi.show()
|
|
|
|
chunk_id = chunk_ids_revd.pop(-1) # better efficiency
|
|
|
|
try:
|
|
|
|
encrypted_data = next(chunk_data_iter)
|
2016-10-21 23:50:35 +00:00
|
|
|
except (Repository.ObjectNotFound, IntegrityErrorBase) as err:
|
2016-09-16 00:49:54 +00:00
|
|
|
self.error_found = True
|
|
|
|
errors += 1
|
|
|
|
logger.error('chunk %s: %s', bin_to_hex(chunk_id), err)
|
2016-10-21 23:50:35 +00:00
|
|
|
if isinstance(err, IntegrityErrorBase):
|
2016-09-16 00:49:54 +00:00
|
|
|
defect_chunks.append(chunk_id)
|
|
|
|
# as the exception killed our generator, make a new one for remaining chunks:
|
|
|
|
if chunk_ids_revd:
|
|
|
|
chunk_ids = list(reversed(chunk_ids_revd))
|
|
|
|
chunk_data_iter = self.repository.get_many(chunk_ids)
|
|
|
|
else:
|
2017-04-03 20:05:53 +00:00
|
|
|
_chunk_id = None if chunk_id == Manifest.MANIFEST_ID else chunk_id
|
2016-09-16 00:49:54 +00:00
|
|
|
try:
|
2017-04-03 20:05:53 +00:00
|
|
|
self.key.decrypt(_chunk_id, encrypted_data)
|
2016-10-21 23:50:35 +00:00
|
|
|
except IntegrityErrorBase as integrity_error:
|
2016-09-16 00:49:54 +00:00
|
|
|
self.error_found = True
|
|
|
|
errors += 1
|
|
|
|
logger.error('chunk %s, integrity error: %s', bin_to_hex(chunk_id), integrity_error)
|
|
|
|
defect_chunks.append(chunk_id)
|
2016-05-13 20:50:34 +00:00
|
|
|
pi.finish()
|
2016-10-04 20:05:26 +00:00
|
|
|
if chunks_count_index != chunks_count_segments:
|
|
|
|
logger.error('Repo/Chunks index object count vs. segment files object count mismatch.')
|
|
|
|
logger.error('Repo/Chunks index: %d objects != segment files: %d objects',
|
|
|
|
chunks_count_index, chunks_count_segments)
|
2016-09-08 23:27:27 +00:00
|
|
|
if defect_chunks:
|
|
|
|
if self.repair:
|
|
|
|
# if we kill the defect chunk here, subsequent actions within this "borg check"
|
|
|
|
# run will find missing chunks and replace them with all-zero replacement
|
|
|
|
# chunks and flag the files as "repaired".
|
|
|
|
# if another backup is done later and the missing chunks get backupped again,
|
|
|
|
# a "borg check" afterwards can heal all files where this chunk was missing.
|
|
|
|
logger.warning('Found defect chunks. They will be deleted now, so affected files can '
|
|
|
|
'get repaired now and maybe healed later.')
|
|
|
|
for defect_chunk in defect_chunks:
|
|
|
|
# remote repo (ssh): retry might help for strange network / NIC / RAM errors
|
|
|
|
# as the chunk will be retransmitted from remote server.
|
|
|
|
# local repo (fs): as chunks.iteritems loop usually pumps a lot of data through,
|
|
|
|
# a defect chunk is likely not in the fs cache any more and really gets re-read
|
|
|
|
# from the underlying media.
|
|
|
|
try:
|
2017-04-25 13:48:16 +00:00
|
|
|
encrypted_data = self.repository.get(defect_chunk)
|
2016-09-08 23:27:27 +00:00
|
|
|
_chunk_id = None if defect_chunk == Manifest.MANIFEST_ID else defect_chunk
|
|
|
|
self.key.decrypt(_chunk_id, encrypted_data)
|
2016-10-21 23:50:35 +00:00
|
|
|
except IntegrityErrorBase:
|
2016-09-08 23:27:27 +00:00
|
|
|
# failed twice -> get rid of this chunk
|
|
|
|
del self.chunks[defect_chunk]
|
|
|
|
self.repository.delete(defect_chunk)
|
|
|
|
logger.debug('chunk %s deleted.', bin_to_hex(defect_chunk))
|
|
|
|
else:
|
2021-06-05 12:59:01 +00:00
|
|
|
logger.warning('chunk %s not deleted, did not consistently fail.', bin_to_hex(defect_chunk))
|
2016-09-08 23:27:27 +00:00
|
|
|
else:
|
|
|
|
logger.warning('Found defect chunks. With --repair, they would get deleted, so affected '
|
|
|
|
'files could get repaired then and maybe healed later.')
|
|
|
|
for defect_chunk in defect_chunks:
|
|
|
|
logger.debug('chunk %s is defect.', bin_to_hex(defect_chunk))
|
2016-05-13 20:50:34 +00:00
|
|
|
log = logger.error if errors else logger.info
|
2016-10-04 20:05:26 +00:00
|
|
|
log('Finished cryptographic data integrity verification, verified %d chunks with %d integrity errors.',
|
|
|
|
chunks_count_segments, errors)
|
2016-05-13 20:50:34 +00:00
|
|
|
|
2014-02-16 21:21:18 +00:00
|
|
|
def rebuild_manifest(self):
|
2014-02-24 22:37:21 +00:00
|
|
|
"""Rebuild the manifest object if it is missing
|
|
|
|
|
|
|
|
Iterates through all objects in the repository looking for archive metadata blocks.
|
|
|
|
"""
|
2016-06-12 21:36:56 +00:00
|
|
|
required_archive_keys = frozenset(key.encode() for key in REQUIRED_ARCHIVE_KEYS)
|
|
|
|
|
|
|
|
def valid_archive(obj):
|
|
|
|
if not isinstance(obj, dict):
|
|
|
|
return False
|
|
|
|
keys = set(obj)
|
|
|
|
return required_archive_keys.issubset(keys)
|
|
|
|
|
2015-12-07 23:21:46 +00:00
|
|
|
logger.info('Rebuilding missing manifest, this might take some time...')
|
2016-06-12 21:36:56 +00:00
|
|
|
# as we have lost the manifest, we do not know any more what valid item keys we had.
|
|
|
|
# collecting any key we encounter in a damaged repo seems unwise, thus we just use
|
|
|
|
# the hardcoded list from the source code. thus, it is not recommended to rebuild a
|
|
|
|
# lost manifest on a older borg version than the most recent one that was ever used
|
|
|
|
# within this repository (assuming that newer borg versions support more item keys).
|
2014-02-16 21:21:18 +00:00
|
|
|
manifest = Manifest(self.key, self.repository)
|
2016-06-12 21:36:56 +00:00
|
|
|
archive_keys_serialized = [msgpack.packb(name.encode()) for name in ARCHIVE_KEYS]
|
2018-04-28 17:12:01 +00:00
|
|
|
pi = ProgressIndicatorPercent(total=len(self.chunks), msg="Rebuilding manifest %6.2f%%", step=0.01,
|
|
|
|
msgid='check.rebuild_manifest')
|
2014-02-16 21:21:18 +00:00
|
|
|
for chunk_id, _ in self.chunks.iteritems():
|
2018-04-28 17:12:01 +00:00
|
|
|
pi.show()
|
2014-02-16 21:21:18 +00:00
|
|
|
cdata = self.repository.get(chunk_id)
|
2016-11-30 04:38:04 +00:00
|
|
|
try:
|
2017-04-03 20:05:53 +00:00
|
|
|
data = self.key.decrypt(chunk_id, cdata)
|
2016-10-21 23:50:35 +00:00
|
|
|
except IntegrityErrorBase as exc:
|
2016-11-30 04:38:04 +00:00
|
|
|
logger.error('Skipping corrupted chunk: %s', exc)
|
|
|
|
self.error_found = True
|
|
|
|
continue
|
2016-06-12 21:36:56 +00:00
|
|
|
if not valid_msgpacked_dict(data, archive_keys_serialized):
|
2014-03-01 14:00:21 +00:00
|
|
|
continue
|
2015-03-17 22:47:21 +00:00
|
|
|
if b'cmdline' not in data or b'\xa7version\x01' not in data:
|
2014-03-01 14:00:21 +00:00
|
|
|
continue
|
2014-02-16 21:21:18 +00:00
|
|
|
try:
|
|
|
|
archive = msgpack.unpackb(data)
|
2018-07-01 00:34:48 +00:00
|
|
|
# Ignore exceptions that might be raised when feeding msgpack with invalid data
|
|
|
|
except msgpack.UnpackException:
|
2014-02-16 21:21:18 +00:00
|
|
|
continue
|
2016-06-12 21:36:56 +00:00
|
|
|
if valid_archive(archive):
|
2016-08-14 23:11:33 +00:00
|
|
|
archive = ArchiveItem(internal_dict=archive)
|
2016-12-17 00:48:33 +00:00
|
|
|
name = archive.name
|
|
|
|
logger.info('Found archive %s', name)
|
|
|
|
if name in manifest.archives:
|
|
|
|
i = 1
|
|
|
|
while True:
|
|
|
|
new_name = '%s.%d' % (name, i)
|
|
|
|
if new_name not in manifest.archives:
|
|
|
|
break
|
|
|
|
i += 1
|
|
|
|
logger.warning('Duplicate archive name %s, storing as %s', name, new_name)
|
|
|
|
name = new_name
|
|
|
|
manifest.archives[name] = (chunk_id, archive.time)
|
2018-04-28 17:12:01 +00:00
|
|
|
pi.finish()
|
2015-12-07 23:21:46 +00:00
|
|
|
logger.info('Manifest rebuild complete.')
|
2014-02-16 21:21:18 +00:00
|
|
|
return manifest
|
|
|
|
|
2017-06-11 10:13:42 +00:00
|
|
|
def rebuild_refcounts(self, archive=None, first=0, last=0, sort_by='', glob=None):
|
2014-02-24 22:37:21 +00:00
|
|
|
"""Rebuild object reference counts by walking the metadata
|
2014-02-16 21:21:18 +00:00
|
|
|
|
2014-02-24 22:37:21 +00:00
|
|
|
Missing and/or incorrect data is repaired when detected
|
|
|
|
"""
|
2017-06-17 18:17:08 +00:00
|
|
|
# Exclude the manifest from chunks (manifest entry might be already deleted from self.chunks)
|
|
|
|
self.chunks.pop(Manifest.MANIFEST_ID, None)
|
2014-02-18 20:16:36 +00:00
|
|
|
|
2014-02-24 22:37:21 +00:00
|
|
|
def mark_as_possibly_superseded(id_):
|
2016-04-16 15:48:47 +00:00
|
|
|
if self.chunks.get(id_, ChunkIndexEntry(0, 0, 0)).refcount == 0:
|
2014-02-16 21:21:18 +00:00
|
|
|
self.possibly_superseded.add(id_)
|
|
|
|
|
|
|
|
def add_callback(chunk):
|
2017-04-03 20:05:53 +00:00
|
|
|
id_ = self.key.id_hash(chunk)
|
2022-03-21 11:33:11 +00:00
|
|
|
cdata = self.key.encrypt(id_, chunk)
|
2017-04-03 20:05:53 +00:00
|
|
|
add_reference(id_, len(chunk), len(cdata), cdata)
|
2014-02-16 21:21:18 +00:00
|
|
|
return id_
|
|
|
|
|
|
|
|
def add_reference(id_, size, csize, cdata=None):
|
|
|
|
try:
|
2016-04-11 22:10:44 +00:00
|
|
|
self.chunks.incref(id_)
|
2014-02-16 21:21:18 +00:00
|
|
|
except KeyError:
|
|
|
|
assert cdata is not None
|
2016-04-16 15:48:47 +00:00
|
|
|
self.chunks[id_] = ChunkIndexEntry(refcount=1, size=size, csize=csize)
|
2014-02-16 21:21:18 +00:00
|
|
|
if self.repair:
|
|
|
|
self.repository.put(id_, cdata)
|
|
|
|
|
2019-02-01 22:30:45 +00:00
|
|
|
def verify_file_chunks(archive_name, item):
|
2016-07-09 14:38:07 +00:00
|
|
|
"""Verifies that all file chunks are present.
|
2014-02-24 22:37:21 +00:00
|
|
|
|
2016-07-09 14:38:07 +00:00
|
|
|
Missing file chunks will be replaced with new chunks of the same length containing all zeros.
|
|
|
|
If a previously missing file chunk re-appears, the replacement chunk is replaced by the correct one.
|
2014-02-24 22:37:21 +00:00
|
|
|
"""
|
2017-02-18 23:49:36 +00:00
|
|
|
def replacement_chunk(size):
|
2021-01-08 18:29:29 +00:00
|
|
|
chunk = Chunk(None, allocation=CH_ALLOC, size=size)
|
|
|
|
chunk_id, data = cached_hash(chunk, self.key.id_hash)
|
2022-03-21 11:33:11 +00:00
|
|
|
cdata = self.key.encrypt(chunk_id, data)
|
2017-02-18 23:49:36 +00:00
|
|
|
csize = len(cdata)
|
|
|
|
return chunk_id, size, csize, cdata
|
|
|
|
|
2014-02-16 21:21:18 +00:00
|
|
|
offset = 0
|
|
|
|
chunk_list = []
|
2016-07-06 21:10:04 +00:00
|
|
|
chunks_replaced = False
|
2016-07-10 23:23:27 +00:00
|
|
|
has_chunks_healthy = 'chunks_healthy' in item
|
|
|
|
chunks_current = item.chunks
|
|
|
|
chunks_healthy = item.chunks_healthy if has_chunks_healthy else chunks_current
|
2017-10-29 10:25:11 +00:00
|
|
|
if has_chunks_healthy and len(chunks_current) != len(chunks_healthy):
|
|
|
|
# should never happen, but there was issue #3218.
|
2022-02-27 18:31:33 +00:00
|
|
|
logger.warning(f'{archive_name}: {item.path}: Invalid chunks_healthy metadata removed!')
|
2017-10-29 10:25:11 +00:00
|
|
|
del item.chunks_healthy
|
|
|
|
has_chunks_healthy = False
|
|
|
|
chunks_healthy = chunks_current
|
2016-07-09 14:38:07 +00:00
|
|
|
for chunk_current, chunk_healthy in zip(chunks_current, chunks_healthy):
|
|
|
|
chunk_id, size, csize = chunk_healthy
|
2015-03-17 22:47:21 +00:00
|
|
|
if chunk_id not in self.chunks:
|
2016-07-09 14:38:07 +00:00
|
|
|
# a chunk of the healthy list is missing
|
|
|
|
if chunk_current == chunk_healthy:
|
2021-04-19 21:46:21 +00:00
|
|
|
logger.error('{}: {}: New missing file chunk detected (Byte {}-{}, Chunk {}). '
|
2019-02-01 22:30:45 +00:00
|
|
|
'Replacing with all-zero chunk.'.format(
|
2021-04-19 21:46:21 +00:00
|
|
|
archive_name, item.path, offset, offset + size, bin_to_hex(chunk_id)))
|
2016-07-09 14:38:07 +00:00
|
|
|
self.error_found = chunks_replaced = True
|
2017-02-18 23:49:36 +00:00
|
|
|
chunk_id, size, csize, cdata = replacement_chunk(size)
|
2016-07-09 14:38:07 +00:00
|
|
|
add_reference(chunk_id, size, csize, cdata)
|
|
|
|
else:
|
2021-04-19 21:46:21 +00:00
|
|
|
logger.info('{}: {}: Previously missing file chunk is still missing (Byte {}-{}, Chunk {}). '
|
|
|
|
'It has an all-zero replacement chunk already.'.format(
|
|
|
|
archive_name, item.path, offset, offset + size, bin_to_hex(chunk_id)))
|
2016-07-09 14:38:07 +00:00
|
|
|
chunk_id, size, csize = chunk_current
|
2017-02-18 23:49:36 +00:00
|
|
|
if chunk_id in self.chunks:
|
|
|
|
add_reference(chunk_id, size, csize)
|
|
|
|
else:
|
2021-04-19 21:46:21 +00:00
|
|
|
logger.warning('{}: {}: Missing all-zero replacement chunk detected (Byte {}-{}, Chunk {}). '
|
2019-02-01 22:30:45 +00:00
|
|
|
'Generating new replacement chunk.'.format(
|
2021-04-19 21:46:21 +00:00
|
|
|
archive_name, item.path, offset, offset + size, bin_to_hex(chunk_id)))
|
2017-02-18 23:49:36 +00:00
|
|
|
self.error_found = chunks_replaced = True
|
|
|
|
chunk_id, size, csize, cdata = replacement_chunk(size)
|
|
|
|
add_reference(chunk_id, size, csize, cdata)
|
2014-02-16 21:21:18 +00:00
|
|
|
else:
|
2016-07-09 14:38:07 +00:00
|
|
|
if chunk_current == chunk_healthy:
|
|
|
|
# normal case, all fine.
|
|
|
|
add_reference(chunk_id, size, csize)
|
|
|
|
else:
|
2021-04-19 21:46:21 +00:00
|
|
|
logger.info('{}: {}: Healed previously missing file chunk! (Byte {}-{}, Chunk {}).'.format(
|
|
|
|
archive_name, item.path, offset, offset + size, bin_to_hex(chunk_id)))
|
2016-07-09 14:38:07 +00:00
|
|
|
add_reference(chunk_id, size, csize)
|
|
|
|
mark_as_possibly_superseded(chunk_current[0]) # maybe orphaned the all-zero replacement chunk
|
|
|
|
chunk_list.append([chunk_id, size, csize]) # list-typed element as chunks_healthy is list-of-lists
|
2014-02-16 21:21:18 +00:00
|
|
|
offset += size
|
2016-07-09 14:38:07 +00:00
|
|
|
if chunks_replaced and not has_chunks_healthy:
|
2016-07-06 20:42:18 +00:00
|
|
|
# if this is first repair, remember the correct chunk IDs, so we can maybe heal the file later
|
2016-07-08 10:13:52 +00:00
|
|
|
item.chunks_healthy = item.chunks
|
2016-07-09 14:38:07 +00:00
|
|
|
if has_chunks_healthy and chunk_list == chunks_healthy:
|
2022-02-27 18:31:33 +00:00
|
|
|
logger.info(f'{archive_name}: {item.path}: Completely healed previously damaged file!')
|
2016-07-10 23:23:27 +00:00
|
|
|
del item.chunks_healthy
|
2016-05-31 23:45:45 +00:00
|
|
|
item.chunks = chunk_list
|
2017-02-18 22:09:40 +00:00
|
|
|
if 'size' in item:
|
|
|
|
item_size = item.size
|
|
|
|
item_chunks_size = item.get_size(compressed=False, from_chunks=True)
|
|
|
|
if item_size != item_chunks_size:
|
|
|
|
# just warn, but keep the inconsistency, so that borg extract can warn about it.
|
2019-02-01 22:30:45 +00:00
|
|
|
logger.warning('{}: {}: size inconsistency detected: size {}, chunks size {}'.format(
|
|
|
|
archive_name, item.path, item_size, item_chunks_size))
|
2014-02-16 21:21:18 +00:00
|
|
|
|
|
|
|
def robust_iterator(archive):
|
2014-02-24 22:37:21 +00:00
|
|
|
"""Iterates through all archive items
|
|
|
|
|
|
|
|
Missing item chunks will be skipped and the msgpack stream will be restarted
|
|
|
|
"""
|
2016-06-12 21:36:56 +00:00
|
|
|
item_keys = frozenset(key.encode() for key in self.manifest.item_keys)
|
|
|
|
required_item_keys = frozenset(key.encode() for key in REQUIRED_ITEM_KEYS)
|
2017-12-16 00:16:05 +00:00
|
|
|
unpacker = RobustUnpacker(lambda item: isinstance(item, StableDict) and b'path' in item,
|
2016-06-12 21:36:56 +00:00
|
|
|
self.manifest.item_keys)
|
2014-02-24 21:43:17 +00:00
|
|
|
_state = 0
|
2015-03-17 22:47:21 +00:00
|
|
|
|
2014-02-16 21:21:18 +00:00
|
|
|
def missing_chunk_detector(chunk_id):
|
2014-02-24 21:43:17 +00:00
|
|
|
nonlocal _state
|
2015-03-17 22:47:21 +00:00
|
|
|
if _state % 2 != int(chunk_id not in self.chunks):
|
2014-02-24 21:43:17 +00:00
|
|
|
_state += 1
|
|
|
|
return _state
|
2015-03-17 22:47:21 +00:00
|
|
|
|
2015-11-03 22:45:49 +00:00
|
|
|
def report(msg, chunk_id, chunk_no):
|
2016-04-23 20:42:56 +00:00
|
|
|
cid = bin_to_hex(chunk_id)
|
2016-10-09 22:22:01 +00:00
|
|
|
msg += ' [chunk: %06d_%s]' % (chunk_no, cid) # see "debug dump-archive-items"
|
2015-12-07 23:21:46 +00:00
|
|
|
self.error_found = True
|
|
|
|
logger.error(msg)
|
2015-11-03 22:45:49 +00:00
|
|
|
|
2016-11-13 14:58:42 +00:00
|
|
|
def list_keys_safe(keys):
|
2022-02-27 18:31:33 +00:00
|
|
|
return ', '.join(k.decode(errors='replace') if isinstance(k, bytes) else str(k) for k in keys)
|
2016-11-13 14:58:42 +00:00
|
|
|
|
2016-06-12 21:36:56 +00:00
|
|
|
def valid_item(obj):
|
|
|
|
if not isinstance(obj, StableDict):
|
2016-11-13 14:58:42 +00:00
|
|
|
return False, 'not a dictionary'
|
|
|
|
# A bug in Attic up to and including release 0.13 added a (meaningless) b'acl' key to every item.
|
|
|
|
# We ignore it here, should it exist. See test_attic013_acl_bug for details.
|
|
|
|
obj.pop(b'acl', None)
|
2016-06-12 21:36:56 +00:00
|
|
|
keys = set(obj)
|
2016-11-13 14:58:42 +00:00
|
|
|
if not required_item_keys.issubset(keys):
|
|
|
|
return False, 'missing required keys: ' + list_keys_safe(required_item_keys - keys)
|
|
|
|
if not keys.issubset(item_keys):
|
|
|
|
return False, 'invalid keys: ' + list_keys_safe(keys - item_keys)
|
|
|
|
return True, ''
|
2016-06-12 21:36:56 +00:00
|
|
|
|
2015-11-03 22:45:49 +00:00
|
|
|
i = 0
|
2016-08-14 23:11:33 +00:00
|
|
|
for state, items in groupby(archive.items, missing_chunk_detector):
|
2014-02-24 21:43:17 +00:00
|
|
|
items = list(items)
|
2014-02-16 21:21:18 +00:00
|
|
|
if state % 2:
|
2015-11-03 22:45:49 +00:00
|
|
|
for chunk_id in items:
|
|
|
|
report('item metadata chunk missing', chunk_id, i)
|
|
|
|
i += 1
|
2014-02-24 21:43:17 +00:00
|
|
|
continue
|
|
|
|
if state > 0:
|
|
|
|
unpacker.resync()
|
2014-03-13 21:29:47 +00:00
|
|
|
for chunk_id, cdata in zip(items, repository.get_many(items)):
|
2015-11-03 22:45:49 +00:00
|
|
|
try:
|
2022-04-06 23:22:34 +00:00
|
|
|
data = self.key.decrypt(chunk_id, cdata)
|
|
|
|
unpacker.feed(data)
|
2015-11-03 22:45:49 +00:00
|
|
|
for item in unpacker:
|
2016-11-13 14:58:42 +00:00
|
|
|
valid, reason = valid_item(item)
|
|
|
|
if valid:
|
2016-05-31 23:45:45 +00:00
|
|
|
yield Item(internal_dict=item)
|
2015-11-03 22:45:49 +00:00
|
|
|
else:
|
2016-11-13 14:58:42 +00:00
|
|
|
report('Did not get expected metadata dict when unpacking item metadata (%s)' % reason, chunk_id, i)
|
2022-04-06 23:22:34 +00:00
|
|
|
except IntegrityError as integrity_error:
|
|
|
|
# key.decrypt() detected integrity issues.
|
|
|
|
# maybe the repo gave us a valid cdata, but not for the chunk_id we wanted.
|
|
|
|
# or the authentication of cdata failed, meaning the encrypted data was corrupted.
|
|
|
|
report(str(integrity_error), chunk_id, i)
|
2018-10-29 10:54:24 +00:00
|
|
|
except msgpack.UnpackException:
|
2016-07-28 20:10:29 +00:00
|
|
|
report('Unpacker crashed while unpacking item metadata, trying to resync...', chunk_id, i)
|
|
|
|
unpacker.resync()
|
2015-11-03 22:45:49 +00:00
|
|
|
except Exception:
|
2022-04-06 23:22:34 +00:00
|
|
|
report('Exception while decrypting or unpacking item metadata', chunk_id, i)
|
2015-11-03 22:45:49 +00:00
|
|
|
raise
|
|
|
|
i += 1
|
2014-02-16 21:21:18 +00:00
|
|
|
|
2015-08-08 20:11:40 +00:00
|
|
|
if archive is None:
|
2016-10-18 01:57:42 +00:00
|
|
|
sort_by = sort_by.split(',')
|
2017-06-11 10:13:42 +00:00
|
|
|
if any((first, last, glob)):
|
|
|
|
archive_infos = self.manifest.archives.list(sort_by=sort_by, glob=glob, first=first, last=last)
|
|
|
|
if glob and not archive_infos:
|
|
|
|
logger.warning('--glob-archives %s does not match any archives', glob)
|
2017-01-12 16:03:51 +00:00
|
|
|
if first and len(archive_infos) < first:
|
|
|
|
logger.warning('--first %d archives: only found %d archives', first, len(archive_infos))
|
|
|
|
if last and len(archive_infos) < last:
|
|
|
|
logger.warning('--last %d archives: only found %d archives', last, len(archive_infos))
|
2016-10-18 01:57:42 +00:00
|
|
|
else:
|
|
|
|
archive_infos = self.manifest.archives.list(sort_by=sort_by)
|
2015-08-08 20:11:40 +00:00
|
|
|
else:
|
|
|
|
# we only want one specific archive
|
2017-01-12 14:01:41 +00:00
|
|
|
try:
|
|
|
|
archive_infos = [self.manifest.archives[archive]]
|
|
|
|
except KeyError:
|
2016-05-13 20:50:34 +00:00
|
|
|
logger.error("Archive '%s' not found.", archive)
|
2017-01-12 14:01:41 +00:00
|
|
|
self.error_found = True
|
|
|
|
return
|
2016-10-18 01:57:42 +00:00
|
|
|
num_archives = len(archive_infos)
|
2016-01-16 22:42:54 +00:00
|
|
|
|
2021-05-14 16:17:04 +00:00
|
|
|
pi = ProgressIndicatorPercent(total=num_archives, msg='Checking archives %3.1f%%', step=0.1,
|
|
|
|
msgid='check.rebuild_refcounts')
|
2016-01-16 22:42:54 +00:00
|
|
|
with cache_if_remote(self.repository) as repository:
|
2016-10-18 01:57:42 +00:00
|
|
|
for i, info in enumerate(archive_infos):
|
2021-05-14 16:17:04 +00:00
|
|
|
pi.show(i)
|
2022-02-27 18:31:33 +00:00
|
|
|
logger.info(f'Analyzing archive {info.name} ({i + 1}/{num_archives})')
|
2016-08-15 02:17:41 +00:00
|
|
|
archive_id = info.id
|
2016-01-16 22:42:54 +00:00
|
|
|
if archive_id not in self.chunks:
|
2022-04-06 23:35:35 +00:00
|
|
|
logger.error('Archive metadata block %s is missing!', bin_to_hex(archive_id))
|
2016-01-16 22:42:54 +00:00
|
|
|
self.error_found = True
|
2016-08-15 02:17:41 +00:00
|
|
|
del self.manifest.archives[info.name]
|
2016-01-16 22:42:54 +00:00
|
|
|
continue
|
|
|
|
mark_as_possibly_superseded(archive_id)
|
|
|
|
cdata = self.repository.get(archive_id)
|
2022-04-06 23:35:35 +00:00
|
|
|
try:
|
|
|
|
data = self.key.decrypt(archive_id, cdata)
|
|
|
|
except IntegrityError as integrity_error:
|
|
|
|
logger.error('Archive metadata block %s is corrupted: %s', bin_to_hex(archive_id), integrity_error)
|
|
|
|
self.error_found = True
|
|
|
|
del self.manifest.archives[info.name]
|
|
|
|
continue
|
2016-08-14 23:11:33 +00:00
|
|
|
archive = ArchiveItem(internal_dict=msgpack.unpackb(data))
|
|
|
|
if archive.version != 1:
|
2016-01-16 22:42:54 +00:00
|
|
|
raise Exception('Unknown archive metadata version')
|
2016-08-14 23:11:33 +00:00
|
|
|
archive.cmdline = [safe_decode(arg) for arg in archive.cmdline]
|
2016-01-16 22:42:54 +00:00
|
|
|
items_buffer = ChunkBuffer(self.key)
|
|
|
|
items_buffer.write_chunk = add_callback
|
|
|
|
for item in robust_iterator(archive):
|
2016-05-31 23:45:45 +00:00
|
|
|
if 'chunks' in item:
|
2019-02-01 22:30:45 +00:00
|
|
|
verify_file_chunks(info.name, item)
|
2016-01-16 22:42:54 +00:00
|
|
|
items_buffer.add(item)
|
|
|
|
items_buffer.flush(flush=True)
|
2016-08-14 23:11:33 +00:00
|
|
|
for previous_item_id in archive.items:
|
2016-01-16 22:42:54 +00:00
|
|
|
mark_as_possibly_superseded(previous_item_id)
|
2016-08-14 23:11:33 +00:00
|
|
|
archive.items = items_buffer.chunks
|
2018-07-01 00:34:48 +00:00
|
|
|
data = msgpack.packb(archive.as_dict())
|
2016-01-16 22:42:54 +00:00
|
|
|
new_archive_id = self.key.id_hash(data)
|
2022-03-21 11:33:11 +00:00
|
|
|
cdata = self.key.encrypt(new_archive_id, data)
|
2016-01-16 22:42:54 +00:00
|
|
|
add_reference(new_archive_id, len(data), len(cdata), cdata)
|
2016-08-15 02:17:41 +00:00
|
|
|
self.manifest.archives[info.name] = (new_archive_id, info.ts)
|
2021-05-14 16:17:04 +00:00
|
|
|
pi.finish()
|
2014-02-24 22:37:21 +00:00
|
|
|
|
2015-08-09 10:43:57 +00:00
|
|
|
def orphan_chunks_check(self):
|
|
|
|
if self.check_all:
|
2016-04-16 15:48:47 +00:00
|
|
|
unused = {id_ for id_, entry in self.chunks.iteritems() if entry.refcount == 0}
|
2015-08-09 10:43:57 +00:00
|
|
|
orphaned = unused - self.possibly_superseded
|
|
|
|
if orphaned:
|
2022-02-27 18:31:33 +00:00
|
|
|
logger.error(f'{len(orphaned)} orphaned objects found!')
|
2015-12-07 23:21:46 +00:00
|
|
|
self.error_found = True
|
2018-04-28 19:03:08 +00:00
|
|
|
if self.repair and unused:
|
|
|
|
logger.info('Deleting %d orphaned and %d superseded objects...' % (
|
|
|
|
len(orphaned), len(self.possibly_superseded)))
|
2015-08-09 10:43:57 +00:00
|
|
|
for id_ in unused:
|
|
|
|
self.repository.delete(id_)
|
2018-04-28 19:03:08 +00:00
|
|
|
logger.info('Finished deleting orphaned/superseded objects.')
|
2015-08-09 10:43:57 +00:00
|
|
|
else:
|
2016-04-03 15:58:15 +00:00
|
|
|
logger.info('Orphaned objects check skipped (needs all archives checked).')
|
2015-08-09 10:43:57 +00:00
|
|
|
|
2015-11-18 01:27:25 +00:00
|
|
|
def finish(self, save_space=False):
|
2014-02-24 22:37:21 +00:00
|
|
|
if self.repair:
|
2018-04-27 21:57:47 +00:00
|
|
|
logger.info('Writing Manifest.')
|
2014-02-24 22:37:21 +00:00
|
|
|
self.manifest.write()
|
2018-06-24 17:08:49 +00:00
|
|
|
logger.info('Committing repo.')
|
|
|
|
self.repository.commit(compact=False, save_space=save_space)
|
2016-04-07 09:29:52 +00:00
|
|
|
|
|
|
|
|
|
|
|
class ArchiveRecreater:
|
|
|
|
class Interrupted(Exception):
|
|
|
|
def __init__(self, metadata=None):
|
|
|
|
self.metadata = metadata or {}
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def is_temporary_archive(archive_name):
|
|
|
|
return archive_name.endswith('.recreate')
|
|
|
|
|
|
|
|
def __init__(self, repository, manifest, key, cache, matcher,
|
2017-01-29 17:13:51 +00:00
|
|
|
exclude_caches=False, exclude_if_present=None, keep_exclude_tags=False,
|
2017-04-04 13:11:15 +00:00
|
|
|
chunker_params=None, compression=None, recompress=False, always_recompress=False,
|
2016-11-19 18:09:47 +00:00
|
|
|
dry_run=False, stats=False, progress=False, file_status_printer=None,
|
2019-11-16 10:03:34 +00:00
|
|
|
timestamp=None, checkpoint_interval=1800):
|
2016-04-07 09:29:52 +00:00
|
|
|
self.repository = repository
|
|
|
|
self.key = key
|
|
|
|
self.manifest = manifest
|
|
|
|
self.cache = cache
|
|
|
|
|
|
|
|
self.matcher = matcher
|
|
|
|
self.exclude_caches = exclude_caches
|
|
|
|
self.exclude_if_present = exclude_if_present or []
|
2017-01-29 17:13:51 +00:00
|
|
|
self.keep_exclude_tags = keep_exclude_tags
|
2016-04-07 09:29:52 +00:00
|
|
|
|
2016-12-02 19:19:59 +00:00
|
|
|
self.rechunkify = chunker_params is not None
|
|
|
|
if self.rechunkify:
|
|
|
|
logger.debug('Rechunking archives to %s', chunker_params)
|
2016-04-07 09:29:52 +00:00
|
|
|
self.chunker_params = chunker_params or CHUNKER_PARAMS
|
2017-04-04 13:11:15 +00:00
|
|
|
self.recompress = recompress
|
2016-07-31 21:09:57 +00:00
|
|
|
self.always_recompress = always_recompress
|
2016-04-18 23:13:10 +00:00
|
|
|
self.compression = compression or CompressionSpec('none')
|
|
|
|
self.seen_chunks = set()
|
2016-04-07 09:29:52 +00:00
|
|
|
|
2019-11-16 10:03:34 +00:00
|
|
|
self.timestamp = timestamp
|
2016-04-07 09:29:52 +00:00
|
|
|
self.dry_run = dry_run
|
|
|
|
self.stats = stats
|
|
|
|
self.progress = progress
|
|
|
|
self.print_file_status = file_status_printer or (lambda *args: None)
|
2016-12-02 17:15:11 +00:00
|
|
|
self.checkpoint_interval = None if dry_run else checkpoint_interval
|
2016-04-07 09:29:52 +00:00
|
|
|
|
2016-08-02 13:53:29 +00:00
|
|
|
def recreate(self, archive_name, comment=None, target_name=None):
|
2016-04-07 09:29:52 +00:00
|
|
|
assert not self.is_temporary_archive(archive_name)
|
|
|
|
archive = self.open_archive(archive_name)
|
2016-11-19 15:49:20 +00:00
|
|
|
target = self.create_target(archive, target_name)
|
2016-04-07 09:29:52 +00:00
|
|
|
if self.exclude_if_present or self.exclude_caches:
|
|
|
|
self.matcher_add_tagged_dirs(archive)
|
2016-04-10 12:09:05 +00:00
|
|
|
if self.matcher.empty() and not self.recompress and not target.recreate_rechunkify and comment is None:
|
2017-06-03 13:47:01 +00:00
|
|
|
return False
|
2016-11-19 15:49:20 +00:00
|
|
|
self.process_items(archive, target)
|
2016-08-02 13:53:29 +00:00
|
|
|
replace_original = target_name is None
|
2016-12-02 10:09:52 +00:00
|
|
|
self.save(archive, target, comment, replace_original=replace_original)
|
2017-06-03 13:47:01 +00:00
|
|
|
return True
|
2016-04-07 09:29:52 +00:00
|
|
|
|
2016-11-19 15:49:20 +00:00
|
|
|
def process_items(self, archive, target):
|
2016-04-07 09:29:52 +00:00
|
|
|
matcher = self.matcher
|
|
|
|
target_is_subset = not matcher.empty()
|
|
|
|
hardlink_masters = {} if target_is_subset else None
|
|
|
|
|
|
|
|
def item_is_hardlink_master(item):
|
|
|
|
return (target_is_subset and
|
2017-04-02 00:46:44 +00:00
|
|
|
hardlinkable(item.mode) and
|
2016-05-31 23:45:45 +00:00
|
|
|
item.get('hardlink_master', True) and
|
2017-03-28 20:02:54 +00:00
|
|
|
'source' not in item)
|
2016-04-07 09:29:52 +00:00
|
|
|
|
|
|
|
for item in archive.iter_items():
|
2016-05-31 23:45:45 +00:00
|
|
|
if not matcher.match(item.path):
|
|
|
|
self.print_file_status('x', item.path)
|
2017-03-28 20:02:54 +00:00
|
|
|
if item_is_hardlink_master(item):
|
2017-10-29 13:49:42 +00:00
|
|
|
hardlink_masters[item.path] = (item.get('chunks'), item.get('chunks_healthy'), None)
|
2016-04-07 09:29:52 +00:00
|
|
|
continue
|
2017-04-02 00:46:44 +00:00
|
|
|
if target_is_subset and hardlinkable(item.mode) and item.get('source') in hardlink_masters:
|
2016-04-07 09:29:52 +00:00
|
|
|
# master of this hard link is outside the target subset
|
2017-10-29 13:49:42 +00:00
|
|
|
chunks, chunks_healthy, new_source = hardlink_masters[item.source]
|
2016-04-07 09:29:52 +00:00
|
|
|
if new_source is None:
|
|
|
|
# First item to use this master, move the chunks
|
2016-05-31 23:45:45 +00:00
|
|
|
item.chunks = chunks
|
2017-10-29 13:49:42 +00:00
|
|
|
if chunks_healthy is not None:
|
|
|
|
item.chunks_healthy = chunks_healthy
|
|
|
|
hardlink_masters[item.source] = (None, None, item.path)
|
2016-05-31 23:45:45 +00:00
|
|
|
del item.source
|
2016-04-07 09:29:52 +00:00
|
|
|
else:
|
|
|
|
# Master was already moved, only update this item's source
|
2016-05-31 23:45:45 +00:00
|
|
|
item.source = new_source
|
2016-04-07 09:29:52 +00:00
|
|
|
if self.dry_run:
|
2016-05-31 23:45:45 +00:00
|
|
|
self.print_file_status('-', item.path)
|
2016-04-07 09:29:52 +00:00
|
|
|
else:
|
2016-11-19 15:49:20 +00:00
|
|
|
self.process_item(archive, target, item)
|
2016-04-07 09:29:52 +00:00
|
|
|
if self.progress:
|
|
|
|
target.stats.show_progress(final=True)
|
|
|
|
|
|
|
|
def process_item(self, archive, target, item):
|
2021-04-26 15:50:21 +00:00
|
|
|
status = file_status(item.mode)
|
2016-05-31 23:45:45 +00:00
|
|
|
if 'chunks' in item:
|
2021-04-26 15:50:21 +00:00
|
|
|
self.print_file_status(status, item.path)
|
|
|
|
status = None
|
2016-11-19 18:09:47 +00:00
|
|
|
self.process_chunks(archive, target, item)
|
2016-04-07 09:29:52 +00:00
|
|
|
target.stats.nfiles += 1
|
2018-03-10 14:41:01 +00:00
|
|
|
target.add_item(item, stats=target.stats)
|
2021-04-26 15:50:21 +00:00
|
|
|
self.print_file_status(status, item.path)
|
2016-04-07 09:29:52 +00:00
|
|
|
|
|
|
|
def process_chunks(self, archive, target, item):
|
|
|
|
if not self.recompress and not target.recreate_rechunkify:
|
2016-05-31 23:45:45 +00:00
|
|
|
for chunk_id, size, csize in item.chunks:
|
2016-04-07 09:29:52 +00:00
|
|
|
self.cache.chunk_incref(chunk_id, target.stats)
|
2016-05-31 23:45:45 +00:00
|
|
|
return item.chunks
|
2016-12-02 11:54:27 +00:00
|
|
|
chunk_iterator = self.iter_chunks(archive, target, list(item.chunks))
|
2017-04-03 19:48:06 +00:00
|
|
|
chunk_processor = partial(self.chunk_processor, target)
|
2018-03-10 14:11:08 +00:00
|
|
|
target.process_file_chunks(item, self.cache, target.stats, self.progress, chunk_iterator, chunk_processor)
|
2016-11-19 18:09:47 +00:00
|
|
|
|
2020-12-15 01:37:26 +00:00
|
|
|
def chunk_processor(self, target, chunk):
|
2021-01-08 18:16:47 +00:00
|
|
|
chunk_id, data = cached_hash(chunk, self.key.id_hash)
|
2016-11-19 18:09:47 +00:00
|
|
|
if chunk_id in self.seen_chunks:
|
|
|
|
return self.cache.chunk_incref(chunk_id, target.stats)
|
|
|
|
overwrite = self.recompress
|
|
|
|
if self.recompress and not self.always_recompress and chunk_id in self.cache.chunks:
|
|
|
|
# Check if this chunk is already compressed the way we want it
|
|
|
|
old_chunk = self.key.decrypt(None, self.repository.get(chunk_id), decompress=False)
|
2017-04-03 20:05:53 +00:00
|
|
|
if Compressor.detect(old_chunk).name == self.key.compressor.decide(data).name:
|
2016-11-19 18:09:47 +00:00
|
|
|
# Stored chunk has the same compression we wanted
|
|
|
|
overwrite = False
|
2017-04-03 20:05:53 +00:00
|
|
|
chunk_entry = self.cache.add_chunk(chunk_id, data, target.stats, overwrite=overwrite, wait=False)
|
2017-03-05 04:19:32 +00:00
|
|
|
self.cache.repository.async_response(wait=False)
|
2016-12-02 10:39:10 +00:00
|
|
|
self.seen_chunks.add(chunk_entry.id)
|
|
|
|
return chunk_entry
|
2016-11-19 18:09:47 +00:00
|
|
|
|
2016-12-02 11:54:27 +00:00
|
|
|
def iter_chunks(self, archive, target, chunks):
|
2016-11-19 18:09:47 +00:00
|
|
|
chunk_iterator = archive.pipeline.fetch_many([chunk_id for chunk_id, _, _ in chunks])
|
2016-04-07 09:29:52 +00:00
|
|
|
if target.recreate_rechunkify:
|
|
|
|
# The target.chunker will read the file contents through ChunkIteratorFileWrapper chunk-by-chunk
|
|
|
|
# (does not load the entire file into memory)
|
|
|
|
file = ChunkIteratorFileWrapper(chunk_iterator)
|
2016-12-02 10:20:26 +00:00
|
|
|
yield from target.chunker.chunkify(file)
|
2016-11-19 18:09:47 +00:00
|
|
|
else:
|
|
|
|
for chunk in chunk_iterator:
|
2020-12-15 01:37:26 +00:00
|
|
|
yield Chunk(chunk, size=len(chunk), allocation=CH_DATA)
|
2016-03-18 02:16:12 +00:00
|
|
|
|
2016-11-19 18:09:47 +00:00
|
|
|
def save(self, archive, target, comment=None, replace_original=True):
|
2016-04-07 09:29:52 +00:00
|
|
|
if self.dry_run:
|
2016-11-19 18:09:47 +00:00
|
|
|
return
|
|
|
|
if comment is None:
|
|
|
|
comment = archive.metadata.get('comment', '')
|
2019-11-16 10:03:34 +00:00
|
|
|
|
|
|
|
# Keep for the statistics if necessary
|
|
|
|
if self.stats:
|
|
|
|
_start = target.start
|
|
|
|
|
|
|
|
if self.timestamp is None:
|
|
|
|
additional_metadata = {
|
|
|
|
'time': archive.metadata.time,
|
|
|
|
'time_end': archive.metadata.get('time_end') or archive.metadata.time,
|
|
|
|
'cmdline': archive.metadata.cmdline,
|
|
|
|
# but also remember recreate metadata:
|
|
|
|
'recreate_cmdline': sys.argv,
|
|
|
|
}
|
|
|
|
else:
|
|
|
|
additional_metadata = {
|
|
|
|
'cmdline': archive.metadata.cmdline,
|
|
|
|
# but also remember recreate metadata:
|
|
|
|
'recreate_cmdline': sys.argv,
|
|
|
|
}
|
|
|
|
|
|
|
|
target.save(comment=comment, timestamp=self.timestamp,
|
|
|
|
stats=target.stats, additional_metadata=additional_metadata)
|
2016-11-19 18:09:47 +00:00
|
|
|
if replace_original:
|
|
|
|
archive.delete(Statistics(), progress=self.progress)
|
|
|
|
target.rename(archive.name)
|
|
|
|
if self.stats:
|
2019-11-16 10:03:34 +00:00
|
|
|
target.start = _start
|
2016-11-19 18:09:47 +00:00
|
|
|
target.end = datetime.utcnow()
|
|
|
|
log_multi(DASHES,
|
|
|
|
str(target),
|
|
|
|
DASHES,
|
|
|
|
str(target.stats),
|
|
|
|
str(self.cache),
|
|
|
|
DASHES)
|
2016-04-07 09:29:52 +00:00
|
|
|
|
|
|
|
def matcher_add_tagged_dirs(self, archive):
|
|
|
|
"""Add excludes to the matcher created by exclude_cache and exclude_if_present."""
|
|
|
|
def exclude(dir, tag_item):
|
2017-01-29 17:13:51 +00:00
|
|
|
if self.keep_exclude_tags:
|
allow excluding parent and including child, fixes #2314
This fixes the problem raised by issue #2314 by requiring that each root
subtree be fully traversed.
The problem occurs when a patterns file excludes a parent directory P later
in the file, but earlier in the file a subdirectory S of P is included.
Because a tree is processed recursively with a depth-first search, P is
processed before S is. Previously, if P was excluded, then S would not even
be considered. Now, it is possible to recurse into P nonetheless, while not
adding P (as a directory entry) to the archive.
With this commit, a `-` in a patterns-file will allow an excluded directory
to be searched for matching descendants. If the old behavior is desired, it
can be achieved by using a `!` in place of the `-`.
The following is a list of specific changes made by this commit:
* renamed InclExclPattern named-tuple -> CmdTuple (with names 'val' and 'cmd'), since it is used more generally for commands, and not only for representing patterns.
* represent commands as IECommand enum types (RootPath, PatternStyle, Include, Exclude, ExcludeNoRecurse)
* archiver: Archiver.build_matcher() paths arg renamed -> include_paths to prevent confusion as to whether the list of paths are to be included or excluded.
* helpers: PatternMatcher has recurse_dir attribute that is used to communicate whether an excluded dir should be recursed (used by Archiver._process())
* archiver: Archiver.build_matcher() now only returns a PatternMatcher instance, and not an include_patterns list -- this list is now created and housed within the PatternMatcher instance, and can be accessed from there.
* moved operation of finding unmatched patterns from Archiver to PatternMatcher.get_unmatched_include_patterns()
* added / modified some documentation of code
* renamed _PATTERN_STYLES -> _PATTERN_CLASSES since "style" is ambiguous and this helps clarify that the set contains classes and not instances.
* have PatternBase subclass instances store whether excluded dirs are to be recursed. Because PatternBase objs are created corresponding to each +, -, ! command it is necessary to differentiate - from ! within these objects.
* add test for '!' exclusion rule (which doesn't recurse)
2017-03-20 22:04:45 +00:00
|
|
|
tag_files.append(PathPrefixPattern(tag_item.path, recurse_dir=False))
|
|
|
|
tagged_dirs.append(FnmatchPattern(dir + '/', recurse_dir=False))
|
2016-04-07 09:29:52 +00:00
|
|
|
else:
|
allow excluding parent and including child, fixes #2314
This fixes the problem raised by issue #2314 by requiring that each root
subtree be fully traversed.
The problem occurs when a patterns file excludes a parent directory P later
in the file, but earlier in the file a subdirectory S of P is included.
Because a tree is processed recursively with a depth-first search, P is
processed before S is. Previously, if P was excluded, then S would not even
be considered. Now, it is possible to recurse into P nonetheless, while not
adding P (as a directory entry) to the archive.
With this commit, a `-` in a patterns-file will allow an excluded directory
to be searched for matching descendants. If the old behavior is desired, it
can be achieved by using a `!` in place of the `-`.
The following is a list of specific changes made by this commit:
* renamed InclExclPattern named-tuple -> CmdTuple (with names 'val' and 'cmd'), since it is used more generally for commands, and not only for representing patterns.
* represent commands as IECommand enum types (RootPath, PatternStyle, Include, Exclude, ExcludeNoRecurse)
* archiver: Archiver.build_matcher() paths arg renamed -> include_paths to prevent confusion as to whether the list of paths are to be included or excluded.
* helpers: PatternMatcher has recurse_dir attribute that is used to communicate whether an excluded dir should be recursed (used by Archiver._process())
* archiver: Archiver.build_matcher() now only returns a PatternMatcher instance, and not an include_patterns list -- this list is now created and housed within the PatternMatcher instance, and can be accessed from there.
* moved operation of finding unmatched patterns from Archiver to PatternMatcher.get_unmatched_include_patterns()
* added / modified some documentation of code
* renamed _PATTERN_STYLES -> _PATTERN_CLASSES since "style" is ambiguous and this helps clarify that the set contains classes and not instances.
* have PatternBase subclass instances store whether excluded dirs are to be recursed. Because PatternBase objs are created corresponding to each +, -, ! command it is necessary to differentiate - from ! within these objects.
* add test for '!' exclusion rule (which doesn't recurse)
2017-03-20 22:04:45 +00:00
|
|
|
tagged_dirs.append(PathPrefixPattern(dir, recurse_dir=False))
|
2016-04-07 09:29:52 +00:00
|
|
|
|
|
|
|
matcher = self.matcher
|
|
|
|
tag_files = []
|
|
|
|
tagged_dirs = []
|
2020-06-14 19:43:28 +00:00
|
|
|
|
|
|
|
# to support reading hard-linked CACHEDIR.TAGs (aka CACHE_TAG_NAME), similar to hardlink_masters:
|
2016-04-07 09:29:52 +00:00
|
|
|
cachedir_masters = {}
|
|
|
|
|
2020-06-14 19:43:28 +00:00
|
|
|
if self.exclude_caches:
|
|
|
|
# sadly, due to how CACHEDIR.TAG works (filename AND file [header] contents) and
|
|
|
|
# how borg deals with hardlinks (slave hardlinks referring back to master hardlinks),
|
|
|
|
# we need to pass over the archive collecting hardlink master paths.
|
|
|
|
# as seen in issue #4911, the master paths can have an arbitrary filenames,
|
|
|
|
# not just CACHEDIR.TAG.
|
|
|
|
for item in archive.iter_items(filter=lambda item: os.path.basename(item.path) == CACHE_TAG_NAME):
|
|
|
|
if stat.S_ISREG(item.mode) and 'chunks' not in item and 'source' in item:
|
|
|
|
# this is a hardlink slave, referring back to its hardlink master (via item.source)
|
|
|
|
cachedir_masters[item.source] = None # we know the key (path), but not the value (item) yet
|
|
|
|
|
2016-04-07 09:29:52 +00:00
|
|
|
for item in archive.iter_items(
|
2020-06-14 19:43:28 +00:00
|
|
|
filter=lambda item: os.path.basename(item.path) == CACHE_TAG_NAME or matcher.match(item.path)):
|
|
|
|
if self.exclude_caches and item.path in cachedir_masters:
|
2016-05-31 23:45:45 +00:00
|
|
|
cachedir_masters[item.path] = item
|
2017-01-29 17:13:51 +00:00
|
|
|
dir, tag_file = os.path.split(item.path)
|
|
|
|
if tag_file in self.exclude_if_present:
|
|
|
|
exclude(dir, item)
|
2020-06-14 19:43:28 +00:00
|
|
|
elif self.exclude_caches and tag_file == CACHE_TAG_NAME and stat.S_ISREG(item.mode):
|
|
|
|
content_item = item if 'chunks' in item else cachedir_masters[item.source]
|
|
|
|
file = open_item(archive, content_item)
|
|
|
|
if file.read(len(CACHE_TAG_CONTENTS)) == CACHE_TAG_CONTENTS:
|
|
|
|
exclude(dir, item)
|
allow excluding parent and including child, fixes #2314
This fixes the problem raised by issue #2314 by requiring that each root
subtree be fully traversed.
The problem occurs when a patterns file excludes a parent directory P later
in the file, but earlier in the file a subdirectory S of P is included.
Because a tree is processed recursively with a depth-first search, P is
processed before S is. Previously, if P was excluded, then S would not even
be considered. Now, it is possible to recurse into P nonetheless, while not
adding P (as a directory entry) to the archive.
With this commit, a `-` in a patterns-file will allow an excluded directory
to be searched for matching descendants. If the old behavior is desired, it
can be achieved by using a `!` in place of the `-`.
The following is a list of specific changes made by this commit:
* renamed InclExclPattern named-tuple -> CmdTuple (with names 'val' and 'cmd'), since it is used more generally for commands, and not only for representing patterns.
* represent commands as IECommand enum types (RootPath, PatternStyle, Include, Exclude, ExcludeNoRecurse)
* archiver: Archiver.build_matcher() paths arg renamed -> include_paths to prevent confusion as to whether the list of paths are to be included or excluded.
* helpers: PatternMatcher has recurse_dir attribute that is used to communicate whether an excluded dir should be recursed (used by Archiver._process())
* archiver: Archiver.build_matcher() now only returns a PatternMatcher instance, and not an include_patterns list -- this list is now created and housed within the PatternMatcher instance, and can be accessed from there.
* moved operation of finding unmatched patterns from Archiver to PatternMatcher.get_unmatched_include_patterns()
* added / modified some documentation of code
* renamed _PATTERN_STYLES -> _PATTERN_CLASSES since "style" is ambiguous and this helps clarify that the set contains classes and not instances.
* have PatternBase subclass instances store whether excluded dirs are to be recursed. Because PatternBase objs are created corresponding to each +, -, ! command it is necessary to differentiate - from ! within these objects.
* add test for '!' exclusion rule (which doesn't recurse)
2017-03-20 22:04:45 +00:00
|
|
|
matcher.add(tag_files, IECommand.Include)
|
|
|
|
matcher.add(tagged_dirs, IECommand.ExcludeNoRecurse)
|
2016-04-07 09:29:52 +00:00
|
|
|
|
2016-11-19 15:49:20 +00:00
|
|
|
def create_target(self, archive, target_name=None):
|
|
|
|
"""Create target archive."""
|
2016-08-02 13:53:29 +00:00
|
|
|
target_name = target_name or archive.name + '.recreate'
|
2016-11-19 15:49:20 +00:00
|
|
|
target = self.create_target_archive(target_name)
|
2016-04-07 09:29:52 +00:00
|
|
|
# If the archives use the same chunker params, then don't rechunkify
|
2016-12-02 19:19:59 +00:00
|
|
|
source_chunker_params = tuple(archive.metadata.get('chunker_params', []))
|
2019-01-05 03:38:06 +00:00
|
|
|
if len(source_chunker_params) == 4 and isinstance(source_chunker_params[0], int):
|
|
|
|
# this is a borg < 1.2 chunker_params tuple, no chunker algo specified, but we only had buzhash:
|
2019-02-13 03:36:09 +00:00
|
|
|
source_chunker_params = (CH_BUZHASH, ) + source_chunker_params
|
2016-12-02 19:19:59 +00:00
|
|
|
target.recreate_rechunkify = self.rechunkify and source_chunker_params != target.chunker_params
|
|
|
|
if target.recreate_rechunkify:
|
|
|
|
logger.debug('Rechunking archive from %s to %s', source_chunker_params or '(unknown)', target.chunker_params)
|
2017-07-29 14:11:33 +00:00
|
|
|
target.process_file_chunks = ChunksProcessor(
|
|
|
|
cache=self.cache, key=self.key,
|
|
|
|
add_item=target.add_item, write_checkpoint=target.write_checkpoint,
|
2017-10-29 09:53:12 +00:00
|
|
|
checkpoint_interval=self.checkpoint_interval, rechunkify=target.recreate_rechunkify).process_file_chunks
|
2019-01-05 03:38:06 +00:00
|
|
|
target.chunker = get_chunker(*target.chunker_params, seed=self.key.chunk_seed)
|
2016-11-19 15:49:20 +00:00
|
|
|
return target
|
2016-04-07 09:29:52 +00:00
|
|
|
|
|
|
|
def create_target_archive(self, name):
|
|
|
|
target = Archive(self.repository, self.key, self.manifest, name, create=True,
|
|
|
|
progress=self.progress, chunker_params=self.chunker_params, cache=self.cache,
|
2017-04-03 19:48:06 +00:00
|
|
|
checkpoint_interval=self.checkpoint_interval)
|
2016-04-07 09:29:52 +00:00
|
|
|
return target
|
|
|
|
|
|
|
|
def open_archive(self, name, **kwargs):
|
|
|
|
return Archive(self.repository, self.key, self.manifest, name, cache=self.cache, **kwargs)
|