2022-04-02 18:11:05 +00:00
|
|
|
import base64
|
2016-11-12 12:32:57 +00:00
|
|
|
import json
|
2016-05-30 23:18:03 +00:00
|
|
|
import os
|
|
|
|
import stat
|
|
|
|
import sys
|
|
|
|
import time
|
2022-10-19 19:40:02 +00:00
|
|
|
from collections import OrderedDict, defaultdict
|
2016-06-27 18:56:41 +00:00
|
|
|
from contextlib import contextmanager
|
2022-08-11 20:35:18 +00:00
|
|
|
from datetime import datetime, timedelta
|
2016-11-19 18:09:47 +00:00
|
|
|
from functools import partial
|
2010-10-24 20:07:54 +00:00
|
|
|
from getpass import getuser
|
2016-05-30 23:18:03 +00:00
|
|
|
from io import BytesIO
|
2017-07-19 12:29:14 +00:00
|
|
|
from itertools import groupby, zip_longest
|
2016-05-30 23:18:03 +00:00
|
|
|
from shutil import get_terminal_size
|
|
|
|
|
2022-08-13 20:02:04 +00:00
|
|
|
from .platformflags import is_win32
|
2015-10-06 16:33:55 +00:00
|
|
|
from .logger import create_logger
|
2017-05-02 16:52:36 +00:00
|
|
|
|
2015-10-06 16:33:55 +00:00
|
|
|
logger = create_logger()
|
|
|
|
|
2015-05-22 17:21:41 +00:00
|
|
|
from . import xattr
|
2021-01-14 19:41:57 +00:00
|
|
|
from .chunker import get_chunker, Chunk
|
2017-05-02 17:05:27 +00:00
|
|
|
from .cache import ChunkListEntry
|
2022-07-29 07:08:03 +00:00
|
|
|
from .crypto.key import key_factory, UnsupportedPayloadError, AEADKeyBase
|
2017-03-31 10:02:30 +00:00
|
|
|
from .compress import Compressor, CompressionSpec
|
2016-04-17 01:15:19 +00:00
|
|
|
from .constants import * # NOQA
|
2016-10-21 23:50:35 +00:00
|
|
|
from .crypto.low_level import IntegrityError as IntegrityErrorBase
|
2017-06-13 12:15:37 +00:00
|
|
|
from .hashindex import ChunkIndex, ChunkIndexEntry, CacheSynchronizer
|
2022-05-08 12:14:47 +00:00
|
|
|
from .helpers import HardLinkManager
|
2017-04-03 20:05:53 +00:00
|
|
|
from .helpers import ChunkIteratorFileWrapper, open_item
|
2017-03-08 16:13:42 +00:00
|
|
|
from .helpers import Error, IntegrityError, set_ec
|
2018-11-10 20:43:45 +00:00
|
|
|
from .platform import uid2user, user2uid, gid2group, group2gid
|
2022-12-04 09:51:02 +00:00
|
|
|
from .helpers import parse_timestamp, archive_ts_now
|
2017-08-16 15:57:08 +00:00
|
|
|
from .helpers import OutputTimestamp, format_timedelta, format_file_size, file_status, FileSize
|
2022-12-29 19:03:46 +00:00
|
|
|
from .helpers import safe_encode, make_path_safe, remove_surrogates, text_to_json
|
2016-12-03 16:50:50 +00:00
|
|
|
from .helpers import StableDict
|
|
|
|
from .helpers import bin_to_hex
|
2017-03-15 17:54:34 +00:00
|
|
|
from .helpers import safe_ns
|
2016-11-13 21:34:15 +00:00
|
|
|
from .helpers import ellipsis_truncate, ProgressIndicatorPercent, log_multi
|
2020-11-15 14:31:01 +00:00
|
|
|
from .helpers import os_open, flags_normal, flags_dir
|
2021-10-14 15:46:10 +00:00
|
|
|
from .helpers import os_stat
|
2018-07-01 00:34:48 +00:00
|
|
|
from .helpers import msgpack
|
2019-06-22 21:19:37 +00:00
|
|
|
from .helpers import sig_int
|
2022-08-13 20:02:04 +00:00
|
|
|
from .helpers.lrucache import LRUCache
|
2022-08-13 19:55:12 +00:00
|
|
|
from .manifest import Manifest
|
2017-05-01 14:58:29 +00:00
|
|
|
from .patterns import PathPrefixPattern, FnmatchPattern, IECommand
|
2017-07-19 12:29:14 +00:00
|
|
|
from .item import Item, ArchiveItem, ItemDiff
|
2018-08-04 15:40:04 +00:00
|
|
|
from .platform import acl_get, acl_set, set_flags, get_flags, swidth, hostname
|
2016-05-30 23:18:03 +00:00
|
|
|
from .remote import cache_if_remote
|
2017-02-17 04:00:37 +00:00
|
|
|
from .repository import Repository, LIST_SCAN_LIMIT
|
2022-08-23 01:25:06 +00:00
|
|
|
from .repoobj import RepoObj
|
2010-10-20 17:59:15 +00:00
|
|
|
|
2020-01-17 19:56:23 +00:00
|
|
|
has_link = hasattr(os, "link")
|
2010-10-27 17:30:21 +00:00
|
|
|
|
2010-10-20 17:59:15 +00:00
|
|
|
|
2016-05-18 21:59:47 +00:00
|
|
|
class Statistics:
|
2021-03-20 23:33:31 +00:00
|
|
|
def __init__(self, output_json=False, iec=False):
|
2016-11-12 12:32:57 +00:00
|
|
|
self.output_json = output_json
|
2021-03-20 23:33:31 +00:00
|
|
|
self.iec = iec
|
2022-06-11 20:29:43 +00:00
|
|
|
self.osize = self.usize = self.nfiles = 0
|
|
|
|
self.osize_parts = self.usize_parts = self.nfiles_parts = 0
|
2016-05-18 21:59:47 +00:00
|
|
|
self.last_progress = 0 # timestamp when last progress was shown
|
2022-10-19 19:40:02 +00:00
|
|
|
self.files_stats = defaultdict(int)
|
|
|
|
self.chunking_time = 0.0
|
|
|
|
self.hashing_time = 0.0
|
|
|
|
self.rx_bytes = 0
|
|
|
|
self.tx_bytes = 0
|
2016-05-18 21:59:47 +00:00
|
|
|
|
2022-06-11 20:29:43 +00:00
|
|
|
def update(self, size, unique, part=False):
|
2019-02-23 08:44:33 +00:00
|
|
|
if not part:
|
|
|
|
self.osize += size
|
2022-06-11 20:29:43 +00:00
|
|
|
if unique:
|
|
|
|
self.usize += size
|
2019-02-23 08:44:33 +00:00
|
|
|
else:
|
|
|
|
self.osize_parts += size
|
2022-06-11 20:29:43 +00:00
|
|
|
if unique:
|
|
|
|
self.usize_parts += size
|
2016-05-18 21:59:47 +00:00
|
|
|
|
2017-10-29 13:17:44 +00:00
|
|
|
def __add__(self, other):
|
|
|
|
if not isinstance(other, Statistics):
|
|
|
|
raise TypeError("can only add Statistics objects")
|
2021-03-20 23:33:31 +00:00
|
|
|
stats = Statistics(self.output_json, self.iec)
|
2017-10-29 13:17:44 +00:00
|
|
|
stats.osize = self.osize + other.osize
|
2022-06-11 20:29:43 +00:00
|
|
|
stats.usize = self.usize + other.usize
|
2017-10-29 13:17:44 +00:00
|
|
|
stats.nfiles = self.nfiles + other.nfiles
|
2019-02-23 08:44:33 +00:00
|
|
|
stats.osize_parts = self.osize_parts + other.osize_parts
|
2022-06-11 20:29:43 +00:00
|
|
|
stats.usize_parts = self.usize_parts + other.usize_parts
|
2019-02-23 08:44:33 +00:00
|
|
|
stats.nfiles_parts = self.nfiles_parts + other.nfiles_parts
|
2022-10-19 19:40:02 +00:00
|
|
|
stats.chunking_time = self.chunking_time + other.chunking_time
|
|
|
|
stats.hashing_time = self.hashing_time + other.hashing_time
|
|
|
|
for key in other.files_stats:
|
|
|
|
stats.files_stats[key] = self.files_stats[key] + other.files_stats[key]
|
|
|
|
|
2017-10-29 13:17:44 +00:00
|
|
|
return stats
|
|
|
|
|
2016-05-18 21:59:47 +00:00
|
|
|
def __str__(self):
|
2022-10-19 19:40:02 +00:00
|
|
|
hashing_time = format_timedelta(timedelta(seconds=self.hashing_time))
|
|
|
|
chunking_time = format_timedelta(timedelta(seconds=self.chunking_time))
|
2022-06-23 12:13:19 +00:00
|
|
|
return """\
|
|
|
|
Number of files: {stats.nfiles}
|
|
|
|
Original size: {stats.osize_fmt}
|
|
|
|
Deduplicated size: {stats.usize_fmt}
|
2022-10-19 19:40:02 +00:00
|
|
|
Time spent in hashing: {hashing_time}
|
|
|
|
Time spent in chunking: {chunking_time}
|
|
|
|
Added files: {added_files}
|
|
|
|
Unchanged files: {unchanged_files}
|
|
|
|
Modified files: {modified_files}
|
|
|
|
Error files: {error_files}
|
|
|
|
Bytes read from remote: {stats.rx_bytes}
|
|
|
|
Bytes sent to remote: {stats.tx_bytes}
|
2022-06-23 12:13:19 +00:00
|
|
|
""".format(
|
2022-10-19 19:40:02 +00:00
|
|
|
stats=self,
|
|
|
|
hashing_time=hashing_time,
|
|
|
|
chunking_time=chunking_time,
|
|
|
|
added_files=self.files_stats["A"],
|
|
|
|
unchanged_files=self.files_stats["U"],
|
|
|
|
modified_files=self.files_stats["M"],
|
|
|
|
error_files=self.files_stats["E"],
|
2022-06-23 12:13:19 +00:00
|
|
|
)
|
2016-05-18 21:59:47 +00:00
|
|
|
|
|
|
|
def __repr__(self):
|
2022-06-11 20:29:43 +00:00
|
|
|
return "<{cls} object at {hash:#x} ({self.osize}, {self.usize})>".format(
|
2016-05-18 21:59:47 +00:00
|
|
|
cls=type(self).__name__, hash=id(self), self=self
|
|
|
|
)
|
|
|
|
|
2017-02-23 10:54:57 +00:00
|
|
|
def as_dict(self):
|
|
|
|
return {
|
2021-03-20 23:33:31 +00:00
|
|
|
"original_size": FileSize(self.osize, iec=self.iec),
|
2022-06-11 20:29:43 +00:00
|
|
|
"deduplicated_size": FileSize(self.usize, iec=self.iec),
|
2017-02-23 10:54:57 +00:00
|
|
|
"nfiles": self.nfiles,
|
2022-10-19 19:40:02 +00:00
|
|
|
"hashing_time": self.hashing_time,
|
|
|
|
"chunking_time": self.chunking_time,
|
|
|
|
"files_stats": self.files_stats,
|
2017-02-23 10:54:57 +00:00
|
|
|
}
|
|
|
|
|
2022-02-13 23:26:26 +00:00
|
|
|
def as_raw_dict(self):
|
|
|
|
return {
|
|
|
|
"size": self.osize,
|
|
|
|
"nfiles": self.nfiles,
|
|
|
|
"size_parts": self.osize_parts,
|
|
|
|
"nfiles_parts": self.nfiles_parts,
|
|
|
|
}
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def from_raw_dict(cls, **kw):
|
|
|
|
self = cls()
|
|
|
|
self.osize = kw["size"]
|
|
|
|
self.nfiles = kw["nfiles"]
|
|
|
|
self.osize_parts = kw["size_parts"]
|
|
|
|
self.nfiles_parts = kw["nfiles_parts"]
|
|
|
|
return self
|
|
|
|
|
2016-05-18 21:59:47 +00:00
|
|
|
@property
|
|
|
|
def osize_fmt(self):
|
2021-03-20 23:33:31 +00:00
|
|
|
return format_file_size(self.osize, iec=self.iec)
|
2016-05-18 21:59:47 +00:00
|
|
|
|
2022-06-11 20:29:43 +00:00
|
|
|
@property
|
|
|
|
def usize_fmt(self):
|
|
|
|
return format_file_size(self.usize, iec=self.iec)
|
|
|
|
|
2016-05-18 21:59:47 +00:00
|
|
|
def show_progress(self, item=None, final=False, stream=None, dt=None):
|
2016-12-17 12:26:28 +00:00
|
|
|
now = time.monotonic()
|
2016-05-18 21:59:47 +00:00
|
|
|
if dt is None or now - self.last_progress > dt:
|
|
|
|
self.last_progress = now
|
2016-11-12 12:32:57 +00:00
|
|
|
if self.output_json:
|
2022-05-08 16:32:07 +00:00
|
|
|
if not final:
|
|
|
|
data = self.as_dict()
|
2022-12-29 19:03:46 +00:00
|
|
|
if item:
|
|
|
|
data.update(text_to_json("path", item.path))
|
2022-05-08 16:32:07 +00:00
|
|
|
else:
|
|
|
|
data = {}
|
2016-11-12 12:32:57 +00:00
|
|
|
data.update({"time": time.time(), "type": "archive_progress", "finished": final})
|
|
|
|
msg = json.dumps(data)
|
|
|
|
end = "\n"
|
2016-05-18 21:59:47 +00:00
|
|
|
else:
|
2016-11-12 12:32:57 +00:00
|
|
|
columns, lines = get_terminal_size()
|
|
|
|
if not final:
|
2022-06-11 20:29:43 +00:00
|
|
|
msg = "{0.osize_fmt} O {0.usize_fmt} U {0.nfiles} N ".format(self)
|
2016-11-12 12:32:57 +00:00
|
|
|
path = remove_surrogates(item.path) if item else ""
|
|
|
|
space = columns - swidth(msg)
|
|
|
|
if space < 12:
|
|
|
|
msg = ""
|
|
|
|
space = columns - swidth(msg)
|
|
|
|
if space >= 8:
|
|
|
|
msg += ellipsis_truncate(path, space)
|
|
|
|
else:
|
|
|
|
msg = " " * columns
|
|
|
|
end = "\r"
|
|
|
|
print(msg, end=end, file=stream or sys.stderr, flush=True)
|
2016-05-18 21:59:47 +00:00
|
|
|
|
|
|
|
|
2016-07-02 19:04:51 +00:00
|
|
|
def is_special(mode):
|
|
|
|
# file types that get special treatment in --read-special mode
|
|
|
|
return stat.S_ISBLK(mode) or stat.S_ISCHR(mode) or stat.S_ISFIFO(mode)
|
|
|
|
|
|
|
|
|
2018-03-24 23:21:06 +00:00
|
|
|
class BackupError(Exception):
|
|
|
|
"""
|
|
|
|
Exception raised for non-OSError-based exceptions while accessing backup files.
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
2016-06-30 22:13:53 +00:00
|
|
|
class BackupOSError(Exception):
|
2016-07-03 21:58:12 +00:00
|
|
|
"""
|
|
|
|
Wrapper for OSError raised while accessing backup files.
|
|
|
|
|
|
|
|
Borg does different kinds of IO, and IO failures have different consequences.
|
|
|
|
This wrapper represents failures of input file or extraction IO.
|
|
|
|
These are non-critical and are only reported (exit code = 1, warning).
|
|
|
|
|
|
|
|
Any unwrapped IO error is critical and aborts execution (for example repository IO failure).
|
|
|
|
"""
|
2022-07-06 13:37:27 +00:00
|
|
|
|
2016-12-14 14:20:08 +00:00
|
|
|
def __init__(self, op, os_error):
|
|
|
|
self.op = op
|
2016-06-27 18:56:41 +00:00
|
|
|
self.os_error = os_error
|
|
|
|
self.errno = os_error.errno
|
|
|
|
self.strerror = os_error.strerror
|
|
|
|
self.filename = os_error.filename
|
|
|
|
|
|
|
|
def __str__(self):
|
2016-12-14 14:20:08 +00:00
|
|
|
if self.op:
|
2022-02-27 18:31:33 +00:00
|
|
|
return f"{self.op}: {self.os_error}"
|
2016-12-14 14:20:08 +00:00
|
|
|
else:
|
|
|
|
return str(self.os_error)
|
2016-06-27 18:56:41 +00:00
|
|
|
|
|
|
|
|
2016-12-02 23:12:48 +00:00
|
|
|
class BackupIO:
|
2016-12-14 14:20:08 +00:00
|
|
|
op = ""
|
|
|
|
|
|
|
|
def __call__(self, op=""):
|
|
|
|
self.op = op
|
|
|
|
return self
|
|
|
|
|
2016-12-02 23:12:48 +00:00
|
|
|
def __enter__(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
|
|
if exc_type and issubclass(exc_type, OSError):
|
2016-12-14 14:20:08 +00:00
|
|
|
raise BackupOSError(self.op, exc_val) from exc_val
|
2016-12-02 23:12:48 +00:00
|
|
|
|
|
|
|
|
|
|
|
backup_io = BackupIO()
|
2016-06-27 18:56:41 +00:00
|
|
|
|
|
|
|
|
2016-07-03 21:57:55 +00:00
|
|
|
def backup_io_iter(iterator):
|
2016-12-14 14:20:08 +00:00
|
|
|
backup_io.op = "read"
|
2016-06-27 18:56:41 +00:00
|
|
|
while True:
|
2017-07-22 00:42:12 +00:00
|
|
|
with backup_io:
|
|
|
|
try:
|
2016-06-27 18:56:41 +00:00
|
|
|
item = next(iterator)
|
2017-07-22 00:42:12 +00:00
|
|
|
except StopIteration:
|
|
|
|
return
|
2016-06-27 18:56:41 +00:00
|
|
|
yield item
|
|
|
|
|
|
|
|
|
2019-02-17 05:45:24 +00:00
|
|
|
def stat_update_check(st_old, st_curr):
|
|
|
|
"""
|
|
|
|
this checks for some race conditions between the first filename-based stat()
|
|
|
|
we did before dispatching to the (hopefully correct) file type backup handler
|
|
|
|
and the (hopefully) fd-based fstat() we did in the handler.
|
|
|
|
|
|
|
|
if there is a problematic difference (e.g. file type changed), we rather
|
|
|
|
skip the file than being tricked into a security problem.
|
|
|
|
|
|
|
|
such races should only happen if:
|
|
|
|
- we are backing up a live filesystem (no snapshot, not inactive)
|
|
|
|
- if files change due to normal fs activity at an unfortunate time
|
|
|
|
- if somebody is doing an attack against us
|
|
|
|
"""
|
|
|
|
# assuming that a file type change implicates a different inode change AND that inode numbers
|
|
|
|
# are not duplicate in a short timeframe, this check is redundant and solved by the ino check:
|
|
|
|
if stat.S_IFMT(st_old.st_mode) != stat.S_IFMT(st_curr.st_mode):
|
|
|
|
# in this case, we dispatched to wrong handler - abort
|
|
|
|
raise BackupError("file type changed (race condition), skipping file")
|
|
|
|
if st_old.st_ino != st_curr.st_ino:
|
|
|
|
# in this case, the hardlinks-related code in create_helper has the wrong inode - abort!
|
|
|
|
raise BackupError("file inode changed (race condition), skipping file")
|
|
|
|
# looks ok, we are still dealing with the same thing - return current stat:
|
|
|
|
return st_curr
|
|
|
|
|
|
|
|
|
2018-08-12 15:39:30 +00:00
|
|
|
@contextmanager
|
2018-08-12 23:18:00 +00:00
|
|
|
def OsOpen(*, flags, path=None, parent_fd=None, name=None, noatime=False, op="open"):
|
2018-08-12 15:39:30 +00:00
|
|
|
with backup_io(op):
|
2018-08-12 23:18:00 +00:00
|
|
|
fd = os_open(path=path, parent_fd=parent_fd, name=name, flags=flags, noatime=noatime)
|
2018-08-12 15:39:30 +00:00
|
|
|
try:
|
|
|
|
yield fd
|
|
|
|
finally:
|
2019-08-08 21:48:23 +00:00
|
|
|
# On windows fd is None for directories.
|
|
|
|
if fd is not None:
|
|
|
|
os.close(fd)
|
2018-08-12 15:39:30 +00:00
|
|
|
|
|
|
|
|
2014-01-22 19:58:48 +00:00
|
|
|
class DownloadPipeline:
|
2022-08-23 01:25:06 +00:00
|
|
|
def __init__(self, repository, repo_objs):
|
2014-01-22 19:58:48 +00:00
|
|
|
self.repository = repository
|
2022-08-23 01:25:06 +00:00
|
|
|
self.repo_objs = repo_objs
|
2012-11-27 23:03:35 +00:00
|
|
|
|
2022-05-08 12:14:47 +00:00
|
|
|
def unpack_many(self, ids, *, filter=None, preload=False):
|
2016-08-19 22:04:55 +00:00
|
|
|
"""
|
|
|
|
Return iterator of items.
|
|
|
|
|
|
|
|
*ids* is a chunk ID list of an item stream. *filter* is a callable
|
|
|
|
to decide whether an item will be yielded. *preload* preloads the data chunks of every yielded item.
|
|
|
|
|
|
|
|
Warning: if *preload* is True then all data chunks of every yielded item have to be retrieved,
|
|
|
|
otherwise preloaded chunks will accumulate in RemoteRepository and create a memory leak.
|
|
|
|
"""
|
2022-05-08 12:14:47 +00:00
|
|
|
hlids_preloaded = set()
|
2014-01-22 19:58:48 +00:00
|
|
|
unpacker = msgpack.Unpacker(use_list=False)
|
2017-04-03 20:05:53 +00:00
|
|
|
for data in self.fetch_many(ids):
|
2014-01-22 19:58:48 +00:00
|
|
|
unpacker.feed(data)
|
2016-05-31 23:45:45 +00:00
|
|
|
items = [Item(internal_dict=item) for item in unpacker]
|
2016-04-16 15:48:47 +00:00
|
|
|
for item in items:
|
2016-05-31 23:45:45 +00:00
|
|
|
if "chunks" in item:
|
|
|
|
item.chunks = [ChunkListEntry(*e) for e in item.chunks]
|
2019-05-02 19:02:26 +00:00
|
|
|
|
2016-08-22 20:58:54 +00:00
|
|
|
if filter:
|
|
|
|
items = [item for item in items if filter(item)]
|
2019-05-02 19:02:26 +00:00
|
|
|
|
2014-01-23 21:13:08 +00:00
|
|
|
if preload:
|
2022-05-08 12:14:47 +00:00
|
|
|
for item in items:
|
|
|
|
if "chunks" in item:
|
|
|
|
hlid = item.get("hlid", None)
|
|
|
|
if hlid is None:
|
|
|
|
preload_chunks = True
|
|
|
|
else:
|
|
|
|
if hlid in hlids_preloaded:
|
|
|
|
preload_chunks = False
|
|
|
|
else:
|
|
|
|
# not having the hardlink's chunks already preloaded for other hardlink to same inode
|
|
|
|
preload_chunks = True
|
|
|
|
hlids_preloaded.add(hlid)
|
|
|
|
if preload_chunks:
|
|
|
|
self.repository.preload([c.id for c in item.chunks])
|
2019-05-02 19:02:26 +00:00
|
|
|
|
2014-01-22 19:58:48 +00:00
|
|
|
for item in items:
|
|
|
|
yield item
|
|
|
|
|
|
|
|
def fetch_many(self, ids, is_preloaded=False):
|
2022-08-23 01:25:06 +00:00
|
|
|
for id_, cdata in zip(ids, self.repository.get_many(ids, is_preloaded=is_preloaded)):
|
|
|
|
_, data = self.repo_objs.parse(id_, cdata)
|
|
|
|
yield data
|
2014-01-22 19:58:48 +00:00
|
|
|
|
|
|
|
|
|
|
|
class ChunkBuffer:
|
2016-08-14 13:07:18 +00:00
|
|
|
BUFFER_SIZE = 8 * 1024 * 1024
|
2014-01-22 19:58:48 +00:00
|
|
|
|
2016-01-15 19:56:21 +00:00
|
|
|
def __init__(self, key, chunker_params=ITEMS_CHUNKER_PARAMS):
|
2014-07-10 13:44:29 +00:00
|
|
|
self.buffer = BytesIO()
|
2018-07-01 00:34:48 +00:00
|
|
|
self.packer = msgpack.Packer()
|
2014-01-22 19:58:48 +00:00
|
|
|
self.chunks = []
|
|
|
|
self.key = key
|
2022-10-02 12:09:19 +00:00
|
|
|
self.chunker = get_chunker(*chunker_params, seed=self.key.chunk_seed, sparse=False)
|
2012-11-27 23:03:35 +00:00
|
|
|
|
2014-01-22 19:58:48 +00:00
|
|
|
def add(self, item):
|
2016-05-31 23:45:45 +00:00
|
|
|
self.buffer.write(self.packer.pack(item.as_dict()))
|
2014-02-16 21:21:18 +00:00
|
|
|
if self.is_full():
|
|
|
|
self.flush()
|
|
|
|
|
|
|
|
def write_chunk(self, chunk):
|
|
|
|
raise NotImplementedError
|
2012-11-27 23:03:35 +00:00
|
|
|
|
2014-01-22 19:58:48 +00:00
|
|
|
def flush(self, flush=False):
|
|
|
|
if self.buffer.tell() == 0:
|
|
|
|
return
|
|
|
|
self.buffer.seek(0)
|
2017-04-03 20:05:53 +00:00
|
|
|
# The chunker returns a memoryview to its internal buffer,
|
|
|
|
# thus a copy is needed before resuming the chunker iterator.
|
2022-04-13 22:09:53 +00:00
|
|
|
# the metadata stream may produce all-zero chunks, so deal
|
|
|
|
# with CH_ALLOC (and CH_HOLE, for completeness) here.
|
|
|
|
chunks = []
|
|
|
|
for chunk in self.chunker.chunkify(self.buffer):
|
|
|
|
alloc = chunk.meta["allocation"]
|
|
|
|
if alloc == CH_DATA:
|
|
|
|
data = bytes(chunk.data)
|
|
|
|
elif alloc in (CH_ALLOC, CH_HOLE):
|
|
|
|
data = zeros[: chunk.meta["size"]]
|
|
|
|
else:
|
|
|
|
raise ValueError("chunk allocation has unsupported value of %r" % alloc)
|
|
|
|
chunks.append(data)
|
2014-01-22 19:58:48 +00:00
|
|
|
self.buffer.seek(0)
|
|
|
|
self.buffer.truncate(0)
|
2014-08-30 13:10:41 +00:00
|
|
|
# Leave the last partial chunk in the buffer unless flush is True
|
2014-01-22 19:58:48 +00:00
|
|
|
end = None if flush or len(chunks) == 1 else -1
|
|
|
|
for chunk in chunks[:end]:
|
2014-02-16 21:21:18 +00:00
|
|
|
self.chunks.append(self.write_chunk(chunk))
|
2014-01-22 19:58:48 +00:00
|
|
|
if end == -1:
|
2017-04-03 20:05:53 +00:00
|
|
|
self.buffer.write(chunks[-1])
|
2012-11-27 23:03:35 +00:00
|
|
|
|
2014-01-22 19:58:48 +00:00
|
|
|
def is_full(self):
|
|
|
|
return self.buffer.tell() > self.BUFFER_SIZE
|
2012-11-27 23:03:35 +00:00
|
|
|
|
2014-01-22 19:58:48 +00:00
|
|
|
|
2014-02-16 21:21:18 +00:00
|
|
|
class CacheChunkBuffer(ChunkBuffer):
|
2016-01-15 19:56:21 +00:00
|
|
|
def __init__(self, cache, key, stats, chunker_params=ITEMS_CHUNKER_PARAMS):
|
2015-07-11 16:31:49 +00:00
|
|
|
super().__init__(key, chunker_params)
|
2014-02-16 21:21:18 +00:00
|
|
|
self.cache = cache
|
|
|
|
self.stats = stats
|
|
|
|
|
|
|
|
def write_chunk(self, chunk):
|
2022-09-05 00:53:28 +00:00
|
|
|
id_, _ = self.cache.add_chunk(self.key.id_hash(chunk), {}, chunk, stats=self.stats, wait=False)
|
2017-03-05 04:19:32 +00:00
|
|
|
self.cache.repository.async_response(wait=False)
|
2014-02-16 21:21:18 +00:00
|
|
|
return id_
|
|
|
|
|
|
|
|
|
2021-03-06 23:27:07 +00:00
|
|
|
def get_item_uid_gid(item, *, numeric, uid_forced=None, gid_forced=None, uid_default=0, gid_default=0):
|
|
|
|
if uid_forced is not None:
|
|
|
|
uid = uid_forced
|
|
|
|
else:
|
2022-05-29 22:05:07 +00:00
|
|
|
uid = None if numeric else user2uid(item.get("user"))
|
2023-01-14 22:47:18 +00:00
|
|
|
uid = item.get("uid") if uid is None else uid
|
|
|
|
if uid is None or uid < 0:
|
2021-03-06 23:27:07 +00:00
|
|
|
uid = uid_default
|
|
|
|
if gid_forced is not None:
|
|
|
|
gid = gid_forced
|
|
|
|
else:
|
2022-05-29 22:05:07 +00:00
|
|
|
gid = None if numeric else group2gid(item.get("group"))
|
2023-01-14 22:47:18 +00:00
|
|
|
gid = item.get("gid") if gid is None else gid
|
|
|
|
if gid is None or gid < 0:
|
2021-03-06 23:27:07 +00:00
|
|
|
gid = gid_default
|
|
|
|
return uid, gid
|
|
|
|
|
|
|
|
|
2022-08-23 01:25:06 +00:00
|
|
|
def archive_get_items(metadata, *, repo_objs, repository):
|
2022-08-05 20:06:08 +00:00
|
|
|
if "item_ptrs" in metadata: # looks like a v2+ archive
|
|
|
|
assert "items" not in metadata
|
|
|
|
items = []
|
2022-08-23 01:25:06 +00:00
|
|
|
for id, cdata in zip(metadata.item_ptrs, repository.get_many(metadata.item_ptrs)):
|
|
|
|
_, data = repo_objs.parse(id, cdata)
|
2022-08-05 20:06:08 +00:00
|
|
|
ids = msgpack.unpackb(data)
|
|
|
|
items.extend(ids)
|
|
|
|
return items
|
|
|
|
|
|
|
|
if "items" in metadata: # legacy, v1 archive
|
|
|
|
assert "item_ptrs" not in metadata
|
|
|
|
return metadata.items
|
|
|
|
|
|
|
|
|
2022-08-23 01:25:06 +00:00
|
|
|
def archive_put_items(chunk_ids, *, repo_objs, cache=None, stats=None, add_reference=None):
|
2022-08-05 20:06:08 +00:00
|
|
|
"""gets a (potentially large) list of archive metadata stream chunk ids and writes them to repo objects"""
|
|
|
|
item_ptrs = []
|
|
|
|
for i in range(0, len(chunk_ids), IDS_PER_CHUNK):
|
|
|
|
data = msgpack.packb(chunk_ids[i : i + IDS_PER_CHUNK])
|
2022-08-23 01:25:06 +00:00
|
|
|
id = repo_objs.id_hash(data)
|
2022-08-05 20:06:08 +00:00
|
|
|
if cache is not None and stats is not None:
|
2022-09-05 00:53:28 +00:00
|
|
|
cache.add_chunk(id, {}, data, stats=stats)
|
2022-08-05 20:06:08 +00:00
|
|
|
elif add_reference is not None:
|
2022-08-23 01:25:06 +00:00
|
|
|
cdata = repo_objs.format(id, {}, data)
|
2022-08-05 20:06:08 +00:00
|
|
|
add_reference(id, len(data), cdata)
|
|
|
|
else:
|
|
|
|
raise NotImplementedError
|
|
|
|
item_ptrs.append(id)
|
|
|
|
return item_ptrs
|
|
|
|
|
|
|
|
|
2014-01-22 19:58:48 +00:00
|
|
|
class Archive:
|
2013-12-15 19:35:29 +00:00
|
|
|
class DoesNotExist(Error):
|
|
|
|
"""Archive {} does not exist"""
|
2010-10-30 11:44:25 +00:00
|
|
|
|
2013-12-15 19:35:29 +00:00
|
|
|
class AlreadyExists(Error):
|
|
|
|
"""Archive {} already exists"""
|
2011-09-10 15:19:02 +00:00
|
|
|
|
2015-04-21 20:29:10 +00:00
|
|
|
class IncompatibleFilesystemEncodingError(Error):
|
|
|
|
"""Failed to encode filename "{}" into file system encoding "{}". Consider configuring the LANG environment variable."""
|
|
|
|
|
2013-06-20 10:44:58 +00:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
manifest,
|
|
|
|
name,
|
|
|
|
cache=None,
|
|
|
|
create=False,
|
2021-04-16 13:02:16 +00:00
|
|
|
checkpoint_interval=1800,
|
|
|
|
numeric_ids=False,
|
|
|
|
noatime=False,
|
|
|
|
noctime=False,
|
2021-02-16 20:30:01 +00:00
|
|
|
noflags=False,
|
|
|
|
noacls=False,
|
|
|
|
noxattrs=False,
|
2017-10-14 03:18:21 +00:00
|
|
|
progress=False,
|
|
|
|
chunker_params=CHUNKER_PARAMS,
|
|
|
|
start=None,
|
|
|
|
start_monotonic=None,
|
|
|
|
end=None,
|
2021-03-20 23:33:31 +00:00
|
|
|
consider_part_files=False,
|
|
|
|
log_json=False,
|
|
|
|
iec=False,
|
|
|
|
):
|
2013-06-03 11:45:48 +00:00
|
|
|
self.cwd = os.getcwd()
|
2022-08-23 01:25:06 +00:00
|
|
|
assert isinstance(manifest, Manifest)
|
2011-09-04 21:02:47 +00:00
|
|
|
self.manifest = manifest
|
2022-08-23 01:25:06 +00:00
|
|
|
self.key = manifest.repo_objs.key
|
|
|
|
self.repo_objs = manifest.repo_objs
|
|
|
|
self.repository = manifest.repository
|
|
|
|
self.cache = cache
|
2021-03-20 23:33:31 +00:00
|
|
|
self.stats = Statistics(output_json=log_json, iec=iec)
|
|
|
|
self.iec = iec
|
2015-03-24 03:24:54 +00:00
|
|
|
self.show_progress = progress
|
2017-07-04 03:20:12 +00:00
|
|
|
self.name = name # overwritten later with name from archive metadata
|
|
|
|
self.name_in_manifest = name # can differ from .name later (if borg check fixed duplicate archive names)
|
|
|
|
self.comment = None
|
2011-09-10 15:19:02 +00:00
|
|
|
self.checkpoint_interval = checkpoint_interval
|
2021-04-16 13:02:16 +00:00
|
|
|
self.numeric_ids = numeric_ids
|
2016-11-28 01:23:32 +00:00
|
|
|
self.noatime = noatime
|
|
|
|
self.noctime = noctime
|
2020-03-18 20:39:48 +00:00
|
|
|
self.noflags = noflags
|
2021-02-16 20:08:47 +00:00
|
|
|
self.noacls = noacls
|
2021-02-16 20:30:01 +00:00
|
|
|
self.noxattrs = noxattrs
|
2016-12-17 12:26:28 +00:00
|
|
|
assert (start is None) == (
|
|
|
|
start_monotonic is None
|
|
|
|
), "Logic error: if start is given, start_monotonic must be given as well and vice versa."
|
2016-03-11 22:37:37 +00:00
|
|
|
if start is None:
|
2022-12-04 09:51:02 +00:00
|
|
|
start = archive_ts_now()
|
2016-12-17 12:26:28 +00:00
|
|
|
start_monotonic = time.monotonic()
|
2016-03-12 11:40:39 +00:00
|
|
|
self.chunker_params = chunker_params
|
2015-10-02 19:56:21 +00:00
|
|
|
self.start = start
|
2016-12-17 12:26:28 +00:00
|
|
|
self.start_monotonic = start_monotonic
|
2016-03-11 22:37:37 +00:00
|
|
|
if end is None:
|
2022-12-04 09:51:02 +00:00
|
|
|
end = archive_ts_now()
|
2015-10-02 19:56:21 +00:00
|
|
|
self.end = end
|
2016-07-21 22:19:56 +00:00
|
|
|
self.consider_part_files = consider_part_files
|
2022-08-23 01:25:06 +00:00
|
|
|
self.pipeline = DownloadPipeline(self.repository, self.repo_objs)
|
2017-02-23 11:28:01 +00:00
|
|
|
self.create = create
|
|
|
|
if self.create:
|
2016-01-15 19:56:21 +00:00
|
|
|
self.items_buffer = CacheChunkBuffer(self.cache, self.key, self.stats)
|
2011-09-10 15:19:02 +00:00
|
|
|
if name in manifest.archives:
|
2012-12-09 22:06:33 +00:00
|
|
|
raise self.AlreadyExists(name)
|
2011-09-10 15:19:02 +00:00
|
|
|
i = 0
|
|
|
|
while True:
|
2022-02-27 18:31:33 +00:00
|
|
|
self.checkpoint_name = "{}.checkpoint{}".format(name, i and (".%d" % i) or "")
|
2015-03-17 22:47:21 +00:00
|
|
|
if self.checkpoint_name not in manifest.archives:
|
2011-09-10 15:19:02 +00:00
|
|
|
break
|
|
|
|
i += 1
|
|
|
|
else:
|
2016-08-15 02:17:41 +00:00
|
|
|
info = self.manifest.archives.get(name)
|
|
|
|
if info is None:
|
2012-12-09 22:06:33 +00:00
|
|
|
raise self.DoesNotExist(name)
|
2016-08-15 02:17:41 +00:00
|
|
|
self.load(info.id)
|
2011-08-15 20:32:26 +00:00
|
|
|
|
2015-03-24 06:11:00 +00:00
|
|
|
def _load_meta(self, id):
|
2022-08-23 01:25:06 +00:00
|
|
|
cdata = self.repository.get(id)
|
|
|
|
_, data = self.repo_objs.parse(id, cdata)
|
2018-07-01 00:34:48 +00:00
|
|
|
metadata = ArchiveItem(internal_dict=msgpack.unpackb(data))
|
2022-05-17 00:00:00 +00:00
|
|
|
if metadata.version not in (1, 2): # legacy: still need to read v1 archives
|
2015-03-24 06:11:00 +00:00
|
|
|
raise Exception("Unknown archive metadata version")
|
2022-08-05 20:06:08 +00:00
|
|
|
# note: metadata.items must not get written to disk!
|
2022-08-23 01:25:06 +00:00
|
|
|
metadata.items = archive_get_items(metadata, repo_objs=self.repo_objs, repository=self.repository)
|
2015-03-24 06:11:00 +00:00
|
|
|
return metadata
|
|
|
|
|
2010-10-21 19:21:43 +00:00
|
|
|
def load(self, id):
|
|
|
|
self.id = id
|
2015-03-24 06:11:00 +00:00
|
|
|
self.metadata = self._load_meta(self.id)
|
2016-08-14 23:11:33 +00:00
|
|
|
self.name = self.metadata.name
|
2017-07-04 03:20:12 +00:00
|
|
|
self.comment = self.metadata.get("comment", "")
|
2010-10-25 17:51:47 +00:00
|
|
|
|
2011-06-16 19:55:54 +00:00
|
|
|
@property
|
|
|
|
def ts(self):
|
2016-02-05 01:02:04 +00:00
|
|
|
"""Timestamp of archive creation (start) in UTC"""
|
2016-08-14 23:11:33 +00:00
|
|
|
ts = self.metadata.time
|
2016-02-07 01:35:31 +00:00
|
|
|
return parse_timestamp(ts)
|
2011-06-16 19:55:54 +00:00
|
|
|
|
2016-02-05 01:02:04 +00:00
|
|
|
@property
|
|
|
|
def ts_end(self):
|
|
|
|
"""Timestamp of archive creation (end) in UTC"""
|
2016-02-07 01:35:31 +00:00
|
|
|
# fall back to time if there is no time_end present in metadata
|
2016-08-14 23:11:33 +00:00
|
|
|
ts = self.metadata.get("time_end") or self.metadata.time
|
2016-02-07 01:35:31 +00:00
|
|
|
return parse_timestamp(ts)
|
2016-02-05 01:02:04 +00:00
|
|
|
|
2015-10-02 19:56:21 +00:00
|
|
|
@property
|
|
|
|
def fpr(self):
|
2016-04-23 20:42:56 +00:00
|
|
|
return bin_to_hex(self.id)
|
2015-10-02 19:56:21 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def duration(self):
|
2016-01-30 20:32:45 +00:00
|
|
|
return format_timedelta(self.end - self.start)
|
2015-10-02 19:56:21 +00:00
|
|
|
|
2016-04-23 23:29:17 +00:00
|
|
|
@property
|
|
|
|
def duration_from_meta(self):
|
|
|
|
return format_timedelta(self.ts_end - self.ts)
|
|
|
|
|
2017-02-23 10:54:57 +00:00
|
|
|
def info(self):
|
2017-02-23 11:28:01 +00:00
|
|
|
if self.create:
|
|
|
|
stats = self.stats
|
2022-08-11 20:35:18 +00:00
|
|
|
start = self.start
|
|
|
|
end = self.end
|
2017-02-23 11:28:01 +00:00
|
|
|
else:
|
|
|
|
stats = self.calc_stats(self.cache)
|
|
|
|
start = self.ts
|
|
|
|
end = self.ts_end
|
|
|
|
info = {
|
2017-02-23 10:54:57 +00:00
|
|
|
"name": self.name,
|
|
|
|
"id": self.fpr,
|
2017-08-16 15:57:08 +00:00
|
|
|
"start": OutputTimestamp(start),
|
|
|
|
"end": OutputTimestamp(end),
|
2017-02-23 11:28:01 +00:00
|
|
|
"duration": (end - start).total_seconds(),
|
|
|
|
"stats": stats.as_dict(),
|
2017-02-23 10:54:57 +00:00
|
|
|
}
|
2017-02-23 11:28:01 +00:00
|
|
|
if self.create:
|
|
|
|
info["command_line"] = sys.argv
|
|
|
|
else:
|
|
|
|
info.update(
|
|
|
|
{
|
|
|
|
"command_line": self.metadata.cmdline,
|
|
|
|
"hostname": self.metadata.hostname,
|
|
|
|
"username": self.metadata.username,
|
2017-02-23 20:34:13 +00:00
|
|
|
"comment": self.metadata.get("comment", ""),
|
2018-01-25 20:02:39 +00:00
|
|
|
"chunker_params": self.metadata.get("chunker_params", ""),
|
2017-02-23 11:28:01 +00:00
|
|
|
}
|
|
|
|
)
|
|
|
|
return info
|
2017-02-23 10:54:57 +00:00
|
|
|
|
2015-10-02 19:56:21 +00:00
|
|
|
def __str__(self):
|
2016-02-08 19:17:35 +00:00
|
|
|
return """\
|
2020-12-16 12:46:29 +00:00
|
|
|
Repository: {location}
|
2016-02-08 19:17:35 +00:00
|
|
|
Archive name: {0.name}
|
2015-10-02 19:56:21 +00:00
|
|
|
Archive fingerprint: {0.fpr}
|
2016-02-08 19:17:35 +00:00
|
|
|
Time (start): {start}
|
|
|
|
Time (end): {end}
|
2015-10-02 19:56:21 +00:00
|
|
|
Duration: {0.duration}
|
2017-02-22 15:53:03 +00:00
|
|
|
""".format(
|
2016-02-08 19:17:35 +00:00
|
|
|
self,
|
2022-08-11 20:35:18 +00:00
|
|
|
start=OutputTimestamp(self.start),
|
|
|
|
end=OutputTimestamp(self.end),
|
2020-12-16 12:46:29 +00:00
|
|
|
location=self.repository._location.canonical_path(),
|
|
|
|
)
|
2015-10-02 19:56:21 +00:00
|
|
|
|
2011-08-11 19:18:13 +00:00
|
|
|
def __repr__(self):
|
|
|
|
return "Archive(%r)" % self.name
|
|
|
|
|
2016-06-26 22:25:05 +00:00
|
|
|
def item_filter(self, item, filter=None):
|
2016-07-21 22:19:56 +00:00
|
|
|
if not self.consider_part_files and "part" in item:
|
|
|
|
# this is a part(ial) file, we usually don't want to consider it.
|
2016-07-21 20:24:48 +00:00
|
|
|
return False
|
|
|
|
return filter(item) if filter else True
|
2016-06-26 22:25:05 +00:00
|
|
|
|
2022-05-08 12:14:47 +00:00
|
|
|
def iter_items(self, filter=None, preload=False):
|
2020-12-17 21:28:42 +00:00
|
|
|
# note: when calling this with preload=True, later fetch_many() must be called with
|
|
|
|
# is_preloaded=True or the RemoteRepository code will leak memory!
|
2022-05-08 12:14:47 +00:00
|
|
|
for item in self.pipeline.unpack_many(
|
2016-06-26 22:25:05 +00:00
|
|
|
self.metadata.items, preload=preload, filter=lambda item: self.item_filter(item, filter)
|
|
|
|
):
|
2014-01-23 21:13:08 +00:00
|
|
|
yield item
|
2010-11-29 20:08:37 +00:00
|
|
|
|
2018-03-10 14:11:08 +00:00
|
|
|
def add_item(self, item, show_progress=True, stats=None):
|
2016-06-26 16:07:01 +00:00
|
|
|
if show_progress and self.show_progress:
|
2018-03-10 14:11:08 +00:00
|
|
|
if stats is None:
|
|
|
|
stats = self.stats
|
|
|
|
stats.show_progress(item=item, dt=0.2)
|
2014-01-22 19:58:48 +00:00
|
|
|
self.items_buffer.add(item)
|
2010-12-04 20:03:02 +00:00
|
|
|
|
2011-09-10 15:19:02 +00:00
|
|
|
def write_checkpoint(self):
|
|
|
|
self.save(self.checkpoint_name)
|
|
|
|
del self.manifest.archives[self.checkpoint_name]
|
2014-03-19 21:32:07 +00:00
|
|
|
self.cache.chunk_decref(self.id, self.stats)
|
2011-09-10 15:19:02 +00:00
|
|
|
|
2019-02-23 08:44:33 +00:00
|
|
|
def save(self, name=None, comment=None, timestamp=None, stats=None, additional_metadata=None):
|
2011-09-10 15:19:02 +00:00
|
|
|
name = name or self.name
|
2011-09-04 21:02:47 +00:00
|
|
|
if name in self.manifest.archives:
|
2011-09-10 15:19:02 +00:00
|
|
|
raise self.AlreadyExists(name)
|
2014-01-22 19:58:48 +00:00
|
|
|
self.items_buffer.flush(flush=True)
|
2022-08-23 01:25:06 +00:00
|
|
|
item_ptrs = archive_put_items(
|
|
|
|
self.items_buffer.chunks, repo_objs=self.repo_objs, cache=self.cache, stats=self.stats
|
|
|
|
)
|
2016-12-17 12:26:28 +00:00
|
|
|
duration = timedelta(seconds=time.monotonic() - self.start_monotonic)
|
2015-04-18 19:36:10 +00:00
|
|
|
if timestamp is None:
|
2022-12-04 09:51:02 +00:00
|
|
|
end = archive_ts_now()
|
2017-08-24 02:07:37 +00:00
|
|
|
start = end - duration
|
2016-02-05 01:02:04 +00:00
|
|
|
else:
|
2017-08-24 02:07:37 +00:00
|
|
|
start = timestamp
|
2022-08-11 20:35:18 +00:00
|
|
|
end = start + duration
|
2017-08-24 02:07:37 +00:00
|
|
|
self.start = start
|
|
|
|
self.end = end
|
2016-04-07 09:29:52 +00:00
|
|
|
metadata = {
|
2022-05-17 00:00:00 +00:00
|
|
|
"version": 2,
|
2010-10-20 17:59:15 +00:00
|
|
|
"name": name,
|
2016-08-14 23:11:33 +00:00
|
|
|
"comment": comment or "",
|
2022-08-05 20:06:08 +00:00
|
|
|
"item_ptrs": item_ptrs, # see #1473
|
2010-10-21 19:21:43 +00:00
|
|
|
"cmdline": sys.argv,
|
2018-08-04 15:40:04 +00:00
|
|
|
"hostname": hostname,
|
2010-10-24 20:07:54 +00:00
|
|
|
"username": getuser(),
|
2022-08-11 19:18:56 +00:00
|
|
|
"time": start.isoformat(timespec="microseconds"),
|
|
|
|
"time_end": end.isoformat(timespec="microseconds"),
|
2016-03-12 11:40:39 +00:00
|
|
|
"chunker_params": self.chunker_params,
|
2016-04-07 09:29:52 +00:00
|
|
|
}
|
2022-10-03 20:27:42 +00:00
|
|
|
# we always want to create archives with the addtl. metadata (nfiles, etc.),
|
|
|
|
# because borg info relies on them. so, either use the given stats (from args)
|
|
|
|
# or fall back to self.stats if it was not given.
|
|
|
|
stats = stats or self.stats
|
|
|
|
metadata.update(
|
|
|
|
{
|
|
|
|
"size": stats.osize,
|
|
|
|
"nfiles": stats.nfiles,
|
|
|
|
"size_parts": stats.osize_parts,
|
|
|
|
"nfiles_parts": stats.nfiles_parts,
|
|
|
|
}
|
|
|
|
)
|
2016-04-07 09:29:52 +00:00
|
|
|
metadata.update(additional_metadata or {})
|
2016-08-14 23:11:33 +00:00
|
|
|
metadata = ArchiveItem(metadata)
|
2016-12-16 23:51:25 +00:00
|
|
|
data = self.key.pack_and_authenticate_metadata(metadata.as_dict(), context=b"archive")
|
2022-08-23 01:25:06 +00:00
|
|
|
self.id = self.repo_objs.id_hash(data)
|
2020-09-08 19:00:27 +00:00
|
|
|
try:
|
2022-09-05 00:53:28 +00:00
|
|
|
self.cache.add_chunk(self.id, {}, data, stats=self.stats)
|
2020-09-08 19:00:27 +00:00
|
|
|
except IntegrityError as err:
|
|
|
|
err_msg = str(err)
|
|
|
|
# hack to avoid changing the RPC protocol by introducing new (more specific) exception class
|
|
|
|
if "More than allowed put data" in err_msg:
|
|
|
|
raise Error("%s - archive too big (issue #1473)!" % err_msg)
|
|
|
|
else:
|
|
|
|
raise
|
2017-03-05 04:19:32 +00:00
|
|
|
while self.repository.async_response(wait=True) is not None:
|
|
|
|
pass
|
2016-08-15 02:17:41 +00:00
|
|
|
self.manifest.archives[name] = (self.id, metadata.time)
|
2011-09-04 21:02:47 +00:00
|
|
|
self.manifest.write()
|
2018-06-24 17:08:49 +00:00
|
|
|
self.repository.commit(compact=False)
|
2011-09-10 15:19:02 +00:00
|
|
|
self.cache.commit()
|
2010-10-20 17:59:15 +00:00
|
|
|
|
2019-02-23 13:56:53 +00:00
|
|
|
def calc_stats(self, cache, want_unique=True):
|
2022-06-10 20:27:11 +00:00
|
|
|
if not want_unique:
|
|
|
|
unique_size = 0
|
|
|
|
else:
|
2022-07-06 13:37:27 +00:00
|
|
|
|
2022-06-10 20:27:11 +00:00
|
|
|
def add(id):
|
|
|
|
entry = cache.chunks[id]
|
|
|
|
archive_index.add(id, 1, entry.size)
|
|
|
|
|
|
|
|
archive_index = ChunkIndex()
|
|
|
|
sync = CacheSynchronizer(archive_index)
|
|
|
|
add(self.id)
|
|
|
|
# we must escape any % char in the archive name, because we use it in a format string, see #6500
|
|
|
|
arch_name_escd = self.name.replace("%", "%%")
|
|
|
|
pi = ProgressIndicatorPercent(
|
|
|
|
total=len(self.metadata.items),
|
|
|
|
msg="Calculating statistics for archive %s ... %%3.0f%%%%" % arch_name_escd,
|
|
|
|
msgid="archive.calc_stats",
|
|
|
|
)
|
|
|
|
for id, chunk in zip(self.metadata.items, self.repository.get_many(self.metadata.items)):
|
|
|
|
pi.show(increase=1)
|
|
|
|
add(id)
|
2022-08-23 01:25:06 +00:00
|
|
|
_, data = self.repo_objs.parse(id, chunk)
|
2022-06-10 20:27:11 +00:00
|
|
|
sync.feed(data)
|
|
|
|
unique_size = archive_index.stats_against(cache.chunks)[1]
|
|
|
|
pi.finish()
|
2022-02-13 23:26:26 +00:00
|
|
|
|
2021-03-20 23:33:31 +00:00
|
|
|
stats = Statistics(iec=self.iec)
|
2022-06-10 20:27:11 +00:00
|
|
|
stats.usize = unique_size # the part files use same chunks as the full file
|
2022-06-10 13:59:29 +00:00
|
|
|
stats.nfiles = self.metadata.nfiles
|
|
|
|
stats.osize = self.metadata.size
|
|
|
|
if self.consider_part_files:
|
|
|
|
stats.nfiles += self.metadata.nfiles_parts
|
|
|
|
stats.osize += self.metadata.size_parts
|
2011-07-30 20:50:59 +00:00
|
|
|
return stats
|
2010-10-20 17:59:15 +00:00
|
|
|
|
2017-04-01 23:38:58 +00:00
|
|
|
@contextmanager
|
2022-05-08 12:14:47 +00:00
|
|
|
def extract_helper(self, item, path, hlm, *, dry_run=False):
|
2017-04-01 23:38:58 +00:00
|
|
|
hardlink_set = False
|
|
|
|
# Hard link?
|
2022-05-08 12:14:47 +00:00
|
|
|
if "hlid" in item:
|
|
|
|
link_target = hlm.retrieve(id=item.hlid)
|
|
|
|
if link_target is not None and has_link:
|
|
|
|
if not dry_run:
|
|
|
|
# another hardlink to same inode (same hlid) was extracted previously, just link to it
|
|
|
|
with backup_io("link"):
|
|
|
|
os.link(link_target, path, follow_symlinks=False)
|
|
|
|
hardlink_set = True
|
2017-04-01 23:38:58 +00:00
|
|
|
yield hardlink_set
|
2022-05-08 12:14:47 +00:00
|
|
|
if not hardlink_set:
|
|
|
|
if "hlid" in item and has_link:
|
|
|
|
# Update entry with extracted item path, so that following hardlinks don't extract twice.
|
2020-01-17 19:56:23 +00:00
|
|
|
# We have hardlinking support, so we will hardlink not extract.
|
2022-05-08 12:14:47 +00:00
|
|
|
hlm.remember(id=item.hlid, info=path)
|
2020-01-17 19:56:23 +00:00
|
|
|
else:
|
|
|
|
# Broken platform with no hardlinking support.
|
|
|
|
# In this case, we *want* to extract twice, because there is no other way.
|
|
|
|
pass
|
2017-04-01 23:38:58 +00:00
|
|
|
|
2016-03-17 21:39:57 +00:00
|
|
|
def extract_item(
|
|
|
|
self,
|
|
|
|
item,
|
|
|
|
restore_attrs=True,
|
|
|
|
dry_run=False,
|
|
|
|
stdout=False,
|
|
|
|
sparse=False,
|
2022-05-08 12:14:47 +00:00
|
|
|
hlm=None,
|
|
|
|
stripped_components=0,
|
|
|
|
original_path=None,
|
|
|
|
pi=None,
|
|
|
|
):
|
2016-03-17 21:39:57 +00:00
|
|
|
"""
|
|
|
|
Extract archive item.
|
|
|
|
|
|
|
|
:param item: the item to extract
|
|
|
|
:param restore_attrs: restore file attributes
|
|
|
|
:param dry_run: do not write any data
|
|
|
|
:param stdout: write extracted data to stdout
|
|
|
|
:param sparse: write sparse files (chunk-granularity, independent of the original being sparse)
|
2022-05-08 12:14:47 +00:00
|
|
|
:param hlm: maps hlid to link_target for extracting subtrees with hardlinks correctly
|
2016-08-25 19:16:20 +00:00
|
|
|
:param stripped_components: stripped leading path components to correct hard link extraction
|
2016-05-31 23:45:45 +00:00
|
|
|
:param original_path: 'path' key as stored in archive
|
2016-08-07 12:17:56 +00:00
|
|
|
:param pi: ProgressIndicatorPercent (or similar) for file extraction progress (in bytes)
|
2016-03-17 21:39:57 +00:00
|
|
|
"""
|
2016-07-10 23:23:27 +00:00
|
|
|
has_damaged_chunks = "chunks_healthy" in item
|
2015-03-01 04:07:29 +00:00
|
|
|
if dry_run or stdout:
|
2022-05-08 12:14:47 +00:00
|
|
|
with self.extract_helper(item, "", hlm, dry_run=dry_run or stdout) as hardlink_set:
|
|
|
|
if not hardlink_set:
|
|
|
|
# it does not really set hardlinks due to dry_run, but we need to behave same
|
|
|
|
# as non-dry_run concerning fetching preloaded chunks from the pipeline or
|
|
|
|
# it would get stuck.
|
|
|
|
if "chunks" in item:
|
|
|
|
item_chunks_size = 0
|
|
|
|
for data in self.pipeline.fetch_many([c.id for c in item.chunks], is_preloaded=True):
|
|
|
|
if pi:
|
|
|
|
pi.show(increase=len(data), info=[remove_surrogates(item.path)])
|
|
|
|
if stdout:
|
|
|
|
sys.stdout.buffer.write(data)
|
|
|
|
item_chunks_size += len(data)
|
|
|
|
if stdout:
|
|
|
|
sys.stdout.buffer.flush()
|
|
|
|
if "size" in item:
|
|
|
|
item_size = item.size
|
|
|
|
if item_size != item_chunks_size:
|
|
|
|
raise BackupError(
|
|
|
|
"Size inconsistency detected: size {}, chunks size {}".format(
|
|
|
|
item_size, item_chunks_size
|
|
|
|
)
|
2022-07-06 13:37:27 +00:00
|
|
|
)
|
2016-07-09 16:19:25 +00:00
|
|
|
if has_damaged_chunks:
|
2018-03-24 23:21:06 +00:00
|
|
|
raise BackupError("File has damaged (all-zero) chunks. Try running borg check --repair.")
|
2014-02-18 20:33:06 +00:00
|
|
|
return
|
|
|
|
|
2016-05-31 23:45:45 +00:00
|
|
|
original_path = original_path or item.path
|
2013-06-30 20:32:27 +00:00
|
|
|
dest = self.cwd
|
2017-10-09 23:36:44 +00:00
|
|
|
if item.path.startswith(("/", "../")):
|
2013-08-03 11:34:14 +00:00
|
|
|
raise Exception("Path should be relative and local")
|
2016-05-31 23:45:45 +00:00
|
|
|
path = os.path.join(dest, item.path)
|
2012-12-06 22:04:01 +00:00
|
|
|
# Attempt to remove existing files, ignore errors on failure
|
|
|
|
try:
|
2017-05-18 00:44:00 +00:00
|
|
|
st = os.stat(path, follow_symlinks=False)
|
2012-12-06 22:04:01 +00:00
|
|
|
if stat.S_ISDIR(st.st_mode):
|
|
|
|
os.rmdir(path)
|
|
|
|
else:
|
|
|
|
os.unlink(path)
|
2015-04-21 20:29:10 +00:00
|
|
|
except UnicodeEncodeError:
|
2015-12-14 23:17:03 +00:00
|
|
|
raise self.IncompatibleFilesystemEncodingError(path, sys.getfilesystemencoding()) from None
|
2012-12-06 22:04:01 +00:00
|
|
|
except OSError:
|
|
|
|
pass
|
2017-03-28 21:22:25 +00:00
|
|
|
|
|
|
|
def make_parent(path):
|
|
|
|
parent_dir = os.path.dirname(path)
|
|
|
|
if not os.path.exists(parent_dir):
|
|
|
|
os.makedirs(parent_dir)
|
|
|
|
|
2016-05-31 23:45:45 +00:00
|
|
|
mode = item.mode
|
2015-05-31 19:53:37 +00:00
|
|
|
if stat.S_ISREG(mode):
|
2016-12-14 14:20:08 +00:00
|
|
|
with backup_io("makedirs"):
|
2017-03-28 21:22:25 +00:00
|
|
|
make_parent(path)
|
2022-05-08 12:14:47 +00:00
|
|
|
with self.extract_helper(item, path, hlm) as hardlink_set:
|
2017-04-01 23:22:25 +00:00
|
|
|
if hardlink_set:
|
2016-03-17 21:39:57 +00:00
|
|
|
return
|
2017-04-01 23:22:25 +00:00
|
|
|
with backup_io("open"):
|
|
|
|
fd = open(path, "wb")
|
|
|
|
with fd:
|
|
|
|
ids = [c.id for c in item.chunks]
|
|
|
|
for data in self.pipeline.fetch_many(ids, is_preloaded=True):
|
|
|
|
if pi:
|
|
|
|
pi.show(increase=len(data), info=[remove_surrogates(item.path)])
|
|
|
|
with backup_io("write"):
|
2021-01-08 17:45:46 +00:00
|
|
|
if sparse and zeros.startswith(data):
|
2017-04-01 23:22:25 +00:00
|
|
|
# all-zero chunk: create a hole in a sparse file
|
|
|
|
fd.seek(len(data), 1)
|
|
|
|
else:
|
|
|
|
fd.write(data)
|
|
|
|
with backup_io("truncate_and_attrs"):
|
|
|
|
pos = item_chunks_size = fd.tell()
|
|
|
|
fd.truncate(pos)
|
|
|
|
fd.flush()
|
|
|
|
self.restore_attrs(path, item, fd=fd.fileno())
|
|
|
|
if "size" in item:
|
|
|
|
item_size = item.size
|
|
|
|
if item_size != item_chunks_size:
|
2018-03-24 23:21:06 +00:00
|
|
|
raise BackupError(
|
|
|
|
"Size inconsistency detected: size {}, chunks size {}".format(item_size, item_chunks_size)
|
|
|
|
)
|
2017-04-01 23:22:25 +00:00
|
|
|
if has_damaged_chunks:
|
2018-03-24 23:21:06 +00:00
|
|
|
raise BackupError("File has damaged (all-zero) chunks. Try running borg check --repair.")
|
2016-06-30 22:14:10 +00:00
|
|
|
return
|
2016-12-02 23:12:48 +00:00
|
|
|
with backup_io:
|
2016-06-30 22:14:10 +00:00
|
|
|
# No repository access beyond this point.
|
|
|
|
if stat.S_ISDIR(mode):
|
2017-03-28 21:22:25 +00:00
|
|
|
make_parent(path)
|
2016-06-30 22:14:10 +00:00
|
|
|
if not os.path.exists(path):
|
2017-03-28 21:22:25 +00:00
|
|
|
os.mkdir(path)
|
2016-06-30 22:14:10 +00:00
|
|
|
if restore_attrs:
|
|
|
|
self.restore_attrs(path, item)
|
|
|
|
elif stat.S_ISLNK(mode):
|
2017-03-28 21:22:25 +00:00
|
|
|
make_parent(path)
|
2022-05-08 12:14:47 +00:00
|
|
|
with self.extract_helper(item, path, hlm) as hardlink_set:
|
|
|
|
if hardlink_set:
|
|
|
|
# unusual, but possible: this is a hardlinked symlink.
|
|
|
|
return
|
|
|
|
source = item.source
|
|
|
|
try:
|
|
|
|
os.symlink(source, path)
|
|
|
|
except UnicodeEncodeError:
|
|
|
|
raise self.IncompatibleFilesystemEncodingError(source, sys.getfilesystemencoding()) from None
|
|
|
|
self.restore_attrs(path, item, symlink=True)
|
2016-06-30 22:14:10 +00:00
|
|
|
elif stat.S_ISFIFO(mode):
|
2017-03-28 21:22:25 +00:00
|
|
|
make_parent(path)
|
2022-05-08 12:14:47 +00:00
|
|
|
with self.extract_helper(item, path, hlm) as hardlink_set:
|
2017-04-01 23:38:58 +00:00
|
|
|
if hardlink_set:
|
|
|
|
return
|
|
|
|
os.mkfifo(path)
|
|
|
|
self.restore_attrs(path, item)
|
2016-06-30 22:14:10 +00:00
|
|
|
elif stat.S_ISCHR(mode) or stat.S_ISBLK(mode):
|
2017-03-28 21:22:25 +00:00
|
|
|
make_parent(path)
|
2022-05-08 12:14:47 +00:00
|
|
|
with self.extract_helper(item, path, hlm) as hardlink_set:
|
2017-04-01 23:38:58 +00:00
|
|
|
if hardlink_set:
|
|
|
|
return
|
|
|
|
os.mknod(path, item.mode, item.rdev)
|
|
|
|
self.restore_attrs(path, item)
|
2016-06-30 22:14:10 +00:00
|
|
|
else:
|
2016-07-04 17:07:37 +00:00
|
|
|
raise Exception("Unknown archive item type %r" % item.mode)
|
2010-10-20 20:53:58 +00:00
|
|
|
|
2013-06-03 11:45:48 +00:00
|
|
|
def restore_attrs(self, path, item, symlink=False, fd=None):
|
2016-06-30 22:14:10 +00:00
|
|
|
"""
|
2016-07-03 21:58:12 +00:00
|
|
|
Restore filesystem attributes on *path* (*fd*) from *item*.
|
2016-06-30 22:14:10 +00:00
|
|
|
|
|
|
|
Does not access the repository.
|
|
|
|
"""
|
2016-12-14 14:20:08 +00:00
|
|
|
backup_io.op = "attrs"
|
2021-04-16 13:02:16 +00:00
|
|
|
uid, gid = get_item_uid_gid(item, numeric=self.numeric_ids)
|
2013-06-03 11:45:48 +00:00
|
|
|
# This code is a bit of a mess due to os specific differences
|
2018-11-10 22:34:43 +00:00
|
|
|
if not is_win32:
|
2018-11-10 20:48:46 +00:00
|
|
|
try:
|
|
|
|
if fd:
|
|
|
|
os.fchown(fd, uid, gid)
|
|
|
|
else:
|
|
|
|
os.chown(path, uid, gid, follow_symlinks=False)
|
|
|
|
except OSError:
|
|
|
|
pass
|
2013-06-03 11:45:48 +00:00
|
|
|
if fd:
|
2018-11-10 20:48:46 +00:00
|
|
|
os.fchmod(fd, item.mode)
|
2022-04-04 04:47:47 +00:00
|
|
|
else:
|
|
|
|
# To check whether a particular function in the os module accepts False for its
|
|
|
|
# follow_symlinks parameter, the in operator on supports_follow_symlinks should be
|
|
|
|
# used. However, os.chmod is special as some platforms without a working lchmod() do
|
|
|
|
# have fchmodat(), which has a flag that makes it behave like lchmod(). fchmodat()
|
|
|
|
# is ignored when deciding whether or not os.chmod should be set in
|
|
|
|
# os.supports_follow_symlinks. Work around this by using try/except.
|
|
|
|
try:
|
|
|
|
os.chmod(path, item.mode, follow_symlinks=False)
|
|
|
|
except NotImplementedError:
|
|
|
|
if not symlink:
|
|
|
|
os.chmod(path, item.mode)
|
2022-12-29 22:53:10 +00:00
|
|
|
if not self.noacls:
|
|
|
|
acl_set(path, item, self.numeric_ids, fd=fd)
|
|
|
|
if not self.noxattrs and "xattrs" in item:
|
|
|
|
# chown removes Linux capabilities, so set the extended attributes at the end, after chown, since they include
|
|
|
|
# the Linux capabilities in the "security.capability" attribute.
|
|
|
|
warning = xattr.set_all(fd or path, item.xattrs, follow_symlinks=False)
|
|
|
|
if warning:
|
|
|
|
set_ec(EXIT_WARNING)
|
|
|
|
# set timestamps rather late
|
2018-11-10 20:48:46 +00:00
|
|
|
mtime = item.mtime
|
2022-09-14 16:19:14 +00:00
|
|
|
atime = item.atime if "atime" in item else mtime
|
2018-11-10 20:48:46 +00:00
|
|
|
if "birthtime" in item:
|
|
|
|
birthtime = item.birthtime
|
|
|
|
try:
|
|
|
|
# This should work on FreeBSD, NetBSD, and Darwin and be harmless on other platforms.
|
|
|
|
# See utimes(2) on either of the BSDs for details.
|
|
|
|
if fd:
|
|
|
|
os.utime(fd, None, ns=(atime, birthtime))
|
|
|
|
else:
|
|
|
|
os.utime(path, None, ns=(atime, birthtime), follow_symlinks=False)
|
|
|
|
except OSError:
|
|
|
|
# some systems don't support calling utime on a symlink
|
|
|
|
pass
|
2017-11-13 13:55:10 +00:00
|
|
|
try:
|
|
|
|
if fd:
|
2018-11-10 20:48:46 +00:00
|
|
|
os.utime(fd, None, ns=(atime, mtime))
|
2017-11-13 13:55:10 +00:00
|
|
|
else:
|
2018-11-10 20:48:46 +00:00
|
|
|
os.utime(path, None, ns=(atime, mtime), follow_symlinks=False)
|
2017-11-13 13:55:10 +00:00
|
|
|
except OSError:
|
|
|
|
# some systems don't support calling utime on a symlink
|
|
|
|
pass
|
2018-11-10 22:34:43 +00:00
|
|
|
# bsdflags include the immutable flag and need to be set last:
|
2020-03-18 20:39:48 +00:00
|
|
|
if not self.noflags and "bsdflags" in item:
|
2018-11-10 22:34:43 +00:00
|
|
|
try:
|
|
|
|
set_flags(path, item.bsdflags, fd=fd)
|
|
|
|
except OSError:
|
|
|
|
pass
|
2010-10-20 17:59:15 +00:00
|
|
|
|
2016-04-08 05:07:14 +00:00
|
|
|
def set_meta(self, key, value):
|
2016-08-14 23:11:33 +00:00
|
|
|
metadata = self._load_meta(self.id)
|
|
|
|
setattr(metadata, key, value)
|
2022-08-05 20:06:08 +00:00
|
|
|
if "items" in metadata:
|
|
|
|
del metadata.items
|
2018-07-01 00:34:48 +00:00
|
|
|
data = msgpack.packb(metadata.as_dict())
|
2015-03-24 06:11:00 +00:00
|
|
|
new_id = self.key.id_hash(data)
|
2022-09-05 00:53:28 +00:00
|
|
|
self.cache.add_chunk(new_id, {}, data, stats=self.stats)
|
2016-08-15 02:17:41 +00:00
|
|
|
self.manifest.archives[self.name] = (new_id, metadata.time)
|
2015-03-24 06:11:00 +00:00
|
|
|
self.cache.chunk_decref(self.id, self.stats)
|
2016-04-07 09:29:52 +00:00
|
|
|
self.id = new_id
|
2016-04-08 05:07:14 +00:00
|
|
|
|
|
|
|
def rename(self, name):
|
|
|
|
if name in self.manifest.archives:
|
|
|
|
raise self.AlreadyExists(name)
|
|
|
|
oldname = self.name
|
|
|
|
self.name = name
|
2016-08-14 23:11:33 +00:00
|
|
|
self.set_meta("name", name)
|
2016-04-08 05:07:14 +00:00
|
|
|
del self.manifest.archives[oldname]
|
2015-03-24 06:11:00 +00:00
|
|
|
|
2016-07-01 02:27:06 +00:00
|
|
|
def delete(self, stats, progress=False, forced=False):
|
|
|
|
class ChunksIndexError(Error):
|
|
|
|
"""Chunk ID {} missing from chunks index, corrupted chunks index - aborting transaction."""
|
|
|
|
|
2017-03-05 04:19:32 +00:00
|
|
|
exception_ignored = object()
|
|
|
|
|
|
|
|
def fetch_async_response(wait=True):
|
2016-07-01 02:27:06 +00:00
|
|
|
try:
|
2017-03-05 04:19:32 +00:00
|
|
|
return self.repository.async_response(wait=wait)
|
2018-10-29 10:54:24 +00:00
|
|
|
except Repository.ObjectNotFound:
|
2017-03-05 04:19:32 +00:00
|
|
|
nonlocal error
|
2016-07-01 02:27:06 +00:00
|
|
|
# object not in repo - strange, but we wanted to delete it anyway.
|
2017-02-19 06:07:12 +00:00
|
|
|
if forced == 0:
|
2016-07-01 02:27:06 +00:00
|
|
|
raise
|
|
|
|
error = True
|
2017-03-05 04:19:32 +00:00
|
|
|
return exception_ignored # must not return None here
|
|
|
|
|
2019-04-19 16:36:38 +00:00
|
|
|
def chunk_decref(id, stats, part=False):
|
2017-03-05 04:19:32 +00:00
|
|
|
try:
|
2019-04-19 16:36:38 +00:00
|
|
|
self.cache.chunk_decref(id, stats, wait=False, part=part)
|
2017-03-05 04:19:32 +00:00
|
|
|
except KeyError:
|
|
|
|
cid = bin_to_hex(id)
|
|
|
|
raise ChunksIndexError(cid)
|
|
|
|
else:
|
|
|
|
fetch_async_response(wait=False)
|
2016-07-01 02:27:06 +00:00
|
|
|
|
|
|
|
error = False
|
|
|
|
try:
|
|
|
|
unpacker = msgpack.Unpacker(use_list=False)
|
2016-08-14 23:11:33 +00:00
|
|
|
items_ids = self.metadata.items
|
2017-02-27 19:38:02 +00:00
|
|
|
pi = ProgressIndicatorPercent(
|
|
|
|
total=len(items_ids), msg="Decrementing references %3.0f%%", msgid="archive.delete"
|
|
|
|
)
|
2016-07-01 02:27:06 +00:00
|
|
|
for (i, (items_id, data)) in enumerate(zip(items_ids, self.repository.get_many(items_ids))):
|
|
|
|
if progress:
|
|
|
|
pi.show(i)
|
2022-08-23 01:25:06 +00:00
|
|
|
_, data = self.repo_objs.parse(items_id, data)
|
2016-07-05 23:33:53 +00:00
|
|
|
unpacker.feed(data)
|
2016-07-01 02:27:06 +00:00
|
|
|
chunk_decref(items_id, stats)
|
|
|
|
try:
|
|
|
|
for item in unpacker:
|
2016-07-05 23:33:53 +00:00
|
|
|
item = Item(internal_dict=item)
|
|
|
|
if "chunks" in item:
|
2019-04-19 16:36:38 +00:00
|
|
|
part = not self.consider_part_files and "part" in item
|
2022-06-10 18:36:58 +00:00
|
|
|
for chunk_id, size in item.chunks:
|
2019-04-19 16:36:38 +00:00
|
|
|
chunk_decref(chunk_id, stats, part=part)
|
2016-07-01 02:27:06 +00:00
|
|
|
except (TypeError, ValueError):
|
|
|
|
# if items metadata spans multiple chunks and one chunk got dropped somehow,
|
|
|
|
# it could be that unpacker yields bad types
|
2017-02-19 06:07:12 +00:00
|
|
|
if forced == 0:
|
2016-07-01 02:27:06 +00:00
|
|
|
raise
|
|
|
|
error = True
|
2016-01-16 19:46:49 +00:00
|
|
|
if progress:
|
2016-07-01 02:27:06 +00:00
|
|
|
pi.finish()
|
|
|
|
except (msgpack.UnpackException, Repository.ObjectNotFound):
|
|
|
|
# items metadata corrupted
|
2017-02-19 06:07:12 +00:00
|
|
|
if forced == 0:
|
2016-07-01 02:27:06 +00:00
|
|
|
raise
|
|
|
|
error = True
|
2022-08-05 20:06:08 +00:00
|
|
|
|
|
|
|
# delete the blocks that store all the references that end up being loaded into metadata.items:
|
|
|
|
for id in self.metadata.item_ptrs:
|
|
|
|
chunk_decref(id, stats)
|
|
|
|
|
2016-07-01 02:27:06 +00:00
|
|
|
# in forced delete mode, we try hard to delete at least the manifest entry,
|
|
|
|
# if possible also the archive superblock, even if processing the items raises
|
|
|
|
# some harmless exception.
|
|
|
|
chunk_decref(self.id, stats)
|
2011-09-04 21:02:47 +00:00
|
|
|
del self.manifest.archives[self.name]
|
2017-03-05 04:19:32 +00:00
|
|
|
while fetch_async_response(wait=True) is not None:
|
|
|
|
# we did async deletes, process outstanding results (== exceptions),
|
|
|
|
# so there is nothing pending when we return and our caller wants to commit.
|
|
|
|
pass
|
2016-07-01 02:27:06 +00:00
|
|
|
if error:
|
|
|
|
logger.warning("forced deletion succeeded, but the deleted archive was corrupted.")
|
|
|
|
logger.warning("borg check --repair is required to free all space.")
|
2010-10-20 17:59:15 +00:00
|
|
|
|
2017-07-19 12:29:14 +00:00
|
|
|
@staticmethod
|
|
|
|
def compare_archives_iter(archive1, archive2, matcher=None, can_compare_chunk_ids=False):
|
|
|
|
"""
|
|
|
|
Yields tuples with a path and an ItemDiff instance describing changes/indicating equality.
|
2017-07-19 10:56:05 +00:00
|
|
|
|
2017-07-19 12:29:14 +00:00
|
|
|
:param matcher: PatternMatcher class to restrict results to only matching paths.
|
|
|
|
:param can_compare_chunk_ids: Whether --chunker-params are the same for both archives.
|
|
|
|
"""
|
2017-07-19 10:56:05 +00:00
|
|
|
|
2017-07-19 12:29:14 +00:00
|
|
|
def compare_items(item1, item2):
|
|
|
|
return ItemDiff(
|
|
|
|
item1,
|
|
|
|
item2,
|
|
|
|
archive1.pipeline.fetch_many([c.id for c in item1.get("chunks", [])]),
|
|
|
|
archive2.pipeline.fetch_many([c.id for c in item2.get("chunks", [])]),
|
|
|
|
can_compare_chunk_ids=can_compare_chunk_ids,
|
|
|
|
)
|
2017-07-19 10:56:05 +00:00
|
|
|
|
2017-07-19 12:29:14 +00:00
|
|
|
orphans_archive1 = OrderedDict()
|
|
|
|
orphans_archive2 = OrderedDict()
|
2017-07-19 10:56:05 +00:00
|
|
|
|
|
|
|
for item1, item2 in zip_longest(
|
|
|
|
archive1.iter_items(lambda item: matcher.match(item.path)),
|
|
|
|
archive2.iter_items(lambda item: matcher.match(item.path)),
|
|
|
|
):
|
|
|
|
if item1 and item2 and item1.path == item2.path:
|
2022-05-08 12:14:47 +00:00
|
|
|
yield (item1.path, compare_items(item1, item2))
|
2017-07-19 10:56:05 +00:00
|
|
|
continue
|
|
|
|
if item1:
|
|
|
|
matching_orphan = orphans_archive2.pop(item1.path, None)
|
|
|
|
if matching_orphan:
|
2022-05-08 12:14:47 +00:00
|
|
|
yield (item1.path, compare_items(item1, matching_orphan))
|
2017-07-19 10:56:05 +00:00
|
|
|
else:
|
|
|
|
orphans_archive1[item1.path] = item1
|
|
|
|
if item2:
|
|
|
|
matching_orphan = orphans_archive1.pop(item2.path, None)
|
|
|
|
if matching_orphan:
|
2022-05-08 12:14:47 +00:00
|
|
|
yield (matching_orphan.path, compare_items(matching_orphan, item2))
|
2017-07-19 10:56:05 +00:00
|
|
|
else:
|
|
|
|
orphans_archive2[item2.path] = item2
|
|
|
|
# At this point orphans_* contain items that had no matching partner in the other archive
|
|
|
|
for added in orphans_archive2.values():
|
|
|
|
path = added.path
|
2017-07-19 12:29:14 +00:00
|
|
|
deleted_item = Item.create_deleted(path)
|
|
|
|
yield (path, compare_items(deleted_item, added))
|
2017-07-19 10:56:05 +00:00
|
|
|
for deleted in orphans_archive1.values():
|
|
|
|
path = deleted.path
|
2017-07-19 12:29:14 +00:00
|
|
|
deleted_item = Item.create_deleted(path)
|
|
|
|
yield (path, compare_items(deleted, deleted_item))
|
2017-07-19 10:56:05 +00:00
|
|
|
|
2017-07-29 14:11:33 +00:00
|
|
|
|
|
|
|
class MetadataCollector:
|
2021-04-16 13:02:16 +00:00
|
|
|
def __init__(self, *, noatime, noctime, nobirthtime, numeric_ids, noflags, noacls, noxattrs):
|
2017-07-29 14:11:33 +00:00
|
|
|
self.noatime = noatime
|
|
|
|
self.noctime = noctime
|
2021-04-16 13:02:16 +00:00
|
|
|
self.numeric_ids = numeric_ids
|
2020-03-18 20:39:48 +00:00
|
|
|
self.noflags = noflags
|
2021-02-16 20:30:01 +00:00
|
|
|
self.noacls = noacls
|
|
|
|
self.noxattrs = noxattrs
|
2017-11-13 13:55:10 +00:00
|
|
|
self.nobirthtime = nobirthtime
|
2017-07-29 14:11:33 +00:00
|
|
|
|
2016-06-26 14:59:38 +00:00
|
|
|
def stat_simple_attrs(self, st):
|
2022-09-14 16:42:10 +00:00
|
|
|
attrs = {}
|
|
|
|
attrs["mode"] = st.st_mode
|
2022-07-13 14:55:29 +00:00
|
|
|
# borg can work with archives only having mtime (very old borg archives do not have
|
2016-11-28 01:23:32 +00:00
|
|
|
# atime/ctime). it can be useful to omit atime/ctime, if they change without the
|
|
|
|
# file content changing - e.g. to get better metadata deduplication.
|
2022-09-14 16:19:14 +00:00
|
|
|
attrs["mtime"] = safe_ns(st.st_mtime_ns)
|
2016-11-28 01:23:32 +00:00
|
|
|
if not self.noatime:
|
2017-03-15 17:54:34 +00:00
|
|
|
attrs["atime"] = safe_ns(st.st_atime_ns)
|
2016-11-28 01:23:32 +00:00
|
|
|
if not self.noctime:
|
2017-03-15 17:54:34 +00:00
|
|
|
attrs["ctime"] = safe_ns(st.st_ctime_ns)
|
2017-11-13 13:55:10 +00:00
|
|
|
if not self.nobirthtime and hasattr(st, "st_birthtime"):
|
|
|
|
# sadly, there's no stat_result.st_birthtime_ns
|
|
|
|
attrs["birthtime"] = safe_ns(int(st.st_birthtime * 10**9))
|
2022-09-14 16:42:10 +00:00
|
|
|
attrs["uid"] = st.st_uid
|
|
|
|
attrs["gid"] = st.st_gid
|
2022-05-29 22:05:07 +00:00
|
|
|
if not self.numeric_ids:
|
|
|
|
user = uid2user(st.st_uid)
|
|
|
|
if user is not None:
|
|
|
|
attrs["user"] = user
|
|
|
|
group = gid2group(st.st_gid)
|
|
|
|
if group is not None:
|
|
|
|
attrs["group"] = group
|
2016-06-26 14:59:38 +00:00
|
|
|
return attrs
|
|
|
|
|
2018-07-05 19:06:21 +00:00
|
|
|
def stat_ext_attrs(self, st, path, fd=None):
|
2016-06-26 14:59:38 +00:00
|
|
|
attrs = {}
|
2022-09-05 14:06:10 +00:00
|
|
|
if not self.noflags:
|
|
|
|
with backup_io("extended stat (flags)"):
|
|
|
|
flags = get_flags(path, st, fd=fd)
|
2022-09-14 09:24:50 +00:00
|
|
|
attrs["bsdflags"] = flags
|
2022-09-05 14:06:10 +00:00
|
|
|
if not self.noxattrs:
|
|
|
|
with backup_io("extended stat (xattrs)"):
|
|
|
|
xattrs = xattr.get_all(fd or path, follow_symlinks=False)
|
2022-09-14 09:33:22 +00:00
|
|
|
attrs["xattrs"] = StableDict(xattrs)
|
2022-09-05 14:06:10 +00:00
|
|
|
if not self.noacls:
|
|
|
|
with backup_io("extended stat (ACLs)"):
|
2021-04-16 13:02:16 +00:00
|
|
|
acl_get(path, attrs, st, self.numeric_ids, fd=fd)
|
2016-05-31 23:45:45 +00:00
|
|
|
return attrs
|
2010-10-30 11:44:25 +00:00
|
|
|
|
2018-07-05 19:06:21 +00:00
|
|
|
def stat_attrs(self, st, path, fd=None):
|
2016-06-26 14:59:38 +00:00
|
|
|
attrs = self.stat_simple_attrs(st)
|
2018-07-05 19:06:21 +00:00
|
|
|
attrs.update(self.stat_ext_attrs(st, path, fd=fd))
|
2016-06-26 14:59:38 +00:00
|
|
|
return attrs
|
|
|
|
|
2017-03-26 11:51:04 +00:00
|
|
|
|
2021-01-14 19:41:57 +00:00
|
|
|
# remember a few recently used all-zero chunk hashes in this mapping.
|
|
|
|
# (hash_func, chunk_length) -> chunk_hash
|
|
|
|
# we play safe and have the hash_func in the mapping key, in case we
|
|
|
|
# have different hash_funcs within the same borg run.
|
|
|
|
zero_chunk_ids = LRUCache(10, dispose=lambda _: None)
|
|
|
|
|
|
|
|
|
|
|
|
def cached_hash(chunk, id_hash):
|
|
|
|
allocation = chunk.meta["allocation"]
|
|
|
|
if allocation == CH_DATA:
|
|
|
|
data = chunk.data
|
|
|
|
chunk_id = id_hash(data)
|
|
|
|
elif allocation in (CH_HOLE, CH_ALLOC):
|
|
|
|
size = chunk.meta["size"]
|
|
|
|
assert size <= len(zeros)
|
|
|
|
data = memoryview(zeros)[:size]
|
|
|
|
try:
|
|
|
|
chunk_id = zero_chunk_ids[(id_hash, size)]
|
|
|
|
except KeyError:
|
|
|
|
chunk_id = id_hash(data)
|
|
|
|
zero_chunk_ids[(id_hash, size)] = chunk_id
|
|
|
|
else:
|
|
|
|
raise ValueError("unexpected allocation type")
|
|
|
|
return chunk_id, data
|
|
|
|
|
|
|
|
|
2017-07-29 14:11:33 +00:00
|
|
|
class ChunksProcessor:
|
|
|
|
# Processes an iterator of chunks for an Item
|
2012-03-03 13:02:22 +00:00
|
|
|
|
2017-07-29 14:11:33 +00:00
|
|
|
def __init__(self, *, key, cache, add_item, write_checkpoint, checkpoint_interval, rechunkify):
|
|
|
|
self.key = key
|
|
|
|
self.cache = cache
|
|
|
|
self.add_item = add_item
|
|
|
|
self.write_checkpoint = write_checkpoint
|
|
|
|
self.checkpoint_interval = checkpoint_interval
|
|
|
|
self.last_checkpoint = time.monotonic()
|
2017-10-29 09:53:12 +00:00
|
|
|
self.rechunkify = rechunkify
|
2010-10-30 11:44:25 +00:00
|
|
|
|
2016-11-19 18:09:47 +00:00
|
|
|
def write_part_file(self, item, from_chunk, number):
|
|
|
|
item = Item(internal_dict=item.as_dict())
|
|
|
|
length = len(item.chunks)
|
|
|
|
# the item should only have the *additional* chunks we processed after the last partial item:
|
|
|
|
item.chunks = item.chunks[from_chunk:]
|
2017-10-14 02:24:26 +00:00
|
|
|
# for borg recreate, we already have a size member in the source item (giving the total file size),
|
|
|
|
# but we consider only a part of the file here, thus we must recompute the size from the chunks:
|
|
|
|
item.get_size(memorize=True, from_chunks=True)
|
2016-11-19 18:09:47 +00:00
|
|
|
item.path += ".borg_part_%d" % number
|
|
|
|
item.part = number
|
|
|
|
number += 1
|
|
|
|
self.add_item(item, show_progress=False)
|
|
|
|
self.write_checkpoint()
|
|
|
|
return length, number
|
|
|
|
|
2019-06-22 21:19:37 +00:00
|
|
|
def maybe_checkpoint(self, item, from_chunk, part_number, forced=False):
|
|
|
|
sig_int_triggered = sig_int and sig_int.action_triggered()
|
|
|
|
if (
|
|
|
|
forced
|
|
|
|
or sig_int_triggered
|
|
|
|
or self.checkpoint_interval
|
|
|
|
and time.monotonic() - self.last_checkpoint > self.checkpoint_interval
|
|
|
|
):
|
|
|
|
if sig_int_triggered:
|
|
|
|
logger.info("checkpoint requested: starting checkpoint creation...")
|
|
|
|
from_chunk, part_number = self.write_part_file(item, from_chunk, part_number)
|
|
|
|
self.last_checkpoint = time.monotonic()
|
|
|
|
if sig_int_triggered:
|
|
|
|
sig_int.action_completed()
|
|
|
|
logger.info("checkpoint requested: finished checkpoint creation!")
|
|
|
|
return from_chunk, part_number
|
|
|
|
|
2018-03-10 14:11:08 +00:00
|
|
|
def process_file_chunks(self, item, cache, stats, show_progress, chunk_iter, chunk_processor=None):
|
2016-11-19 18:09:47 +00:00
|
|
|
if not chunk_processor:
|
2022-07-06 13:37:27 +00:00
|
|
|
|
2020-12-15 01:37:26 +00:00
|
|
|
def chunk_processor(chunk):
|
2022-10-19 19:40:02 +00:00
|
|
|
started_hashing = time.monotonic()
|
2021-01-08 18:16:47 +00:00
|
|
|
chunk_id, data = cached_hash(chunk, self.key.id_hash)
|
2022-10-19 19:40:02 +00:00
|
|
|
stats.hashing_time += time.monotonic() - started_hashing
|
2022-09-05 00:53:28 +00:00
|
|
|
chunk_entry = cache.add_chunk(chunk_id, {}, data, stats=stats, wait=False)
|
2017-03-05 04:19:32 +00:00
|
|
|
self.cache.repository.async_response(wait=False)
|
|
|
|
return chunk_entry
|
2016-07-21 21:56:58 +00:00
|
|
|
|
2016-06-26 16:07:01 +00:00
|
|
|
item.chunks = []
|
2017-10-29 09:53:12 +00:00
|
|
|
# if we rechunkify, we'll get a fundamentally different chunks list, thus we need
|
|
|
|
# to get rid of .chunks_healthy, as it might not correspond to .chunks any more.
|
|
|
|
if self.rechunkify and "chunks_healthy" in item:
|
|
|
|
del item.chunks_healthy
|
2016-07-21 21:56:58 +00:00
|
|
|
from_chunk = 0
|
|
|
|
part_number = 1
|
2020-12-15 01:37:26 +00:00
|
|
|
for chunk in chunk_iter:
|
|
|
|
item.chunks.append(chunk_processor(chunk))
|
2018-03-10 14:11:08 +00:00
|
|
|
if show_progress:
|
|
|
|
stats.show_progress(item=item, dt=0.2)
|
2019-06-22 21:19:37 +00:00
|
|
|
from_chunk, part_number = self.maybe_checkpoint(item, from_chunk, part_number, forced=False)
|
2016-07-21 21:56:58 +00:00
|
|
|
else:
|
2016-07-28 15:55:40 +00:00
|
|
|
if part_number > 1:
|
|
|
|
if item.chunks[from_chunk:]:
|
|
|
|
# if we already have created a part item inside this file, we want to put the final
|
|
|
|
# chunks (if any) into a part item also (so all parts can be concatenated to get
|
|
|
|
# the complete file):
|
2019-06-22 21:19:37 +00:00
|
|
|
from_chunk, part_number = self.maybe_checkpoint(item, from_chunk, part_number, forced=True)
|
2016-07-28 15:55:40 +00:00
|
|
|
|
|
|
|
# if we created part files, we have referenced all chunks from the part files,
|
|
|
|
# but we also will reference the same chunks also from the final, complete file:
|
|
|
|
for chunk in item.chunks:
|
2019-02-23 08:44:33 +00:00
|
|
|
cache.chunk_incref(chunk.id, stats, size=chunk.size, part=True)
|
|
|
|
stats.nfiles_parts += part_number - 1
|
2016-06-26 16:07:01 +00:00
|
|
|
|
2017-07-29 14:11:33 +00:00
|
|
|
|
|
|
|
class FilesystemObjectProcessors:
|
|
|
|
# When ported to threading, then this doesn't need chunker, cache, key any more.
|
|
|
|
# write_checkpoint should then be in the item buffer,
|
|
|
|
# and process_file becomes a callback passed to __init__.
|
|
|
|
|
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
*,
|
|
|
|
metadata_collector,
|
|
|
|
cache,
|
|
|
|
key,
|
|
|
|
add_item,
|
|
|
|
process_file_chunks,
|
2021-02-07 02:42:46 +00:00
|
|
|
chunker_params,
|
|
|
|
show_progress,
|
|
|
|
sparse,
|
2021-04-26 15:50:21 +00:00
|
|
|
log_json,
|
|
|
|
iec,
|
|
|
|
file_status_printer=None,
|
|
|
|
):
|
2017-07-29 14:11:33 +00:00
|
|
|
self.metadata_collector = metadata_collector
|
|
|
|
self.cache = cache
|
|
|
|
self.key = key
|
|
|
|
self.add_item = add_item
|
|
|
|
self.process_file_chunks = process_file_chunks
|
2018-03-10 14:11:08 +00:00
|
|
|
self.show_progress = show_progress
|
2021-04-26 15:50:21 +00:00
|
|
|
self.print_file_status = file_status_printer or (lambda *args: None)
|
2017-07-29 14:11:33 +00:00
|
|
|
|
2022-05-17 17:46:52 +00:00
|
|
|
self.hlm = HardLinkManager(id_type=tuple, info_type=(list, type(None))) # (dev, ino) -> chunks or None
|
2021-03-20 23:33:31 +00:00
|
|
|
self.stats = Statistics(output_json=log_json, iec=iec) # threading: done by cache (including progress)
|
2017-07-29 14:11:33 +00:00
|
|
|
self.cwd = os.getcwd()
|
2020-12-10 23:34:11 +00:00
|
|
|
self.chunker = get_chunker(*chunker_params, seed=key.chunk_seed, sparse=sparse)
|
2017-07-29 14:11:33 +00:00
|
|
|
|
|
|
|
@contextmanager
|
|
|
|
def create_helper(self, path, st, status=None, hardlinkable=True):
|
|
|
|
safe_path = make_path_safe(path)
|
|
|
|
item = Item(path=safe_path)
|
|
|
|
hardlinked = hardlinkable and st.st_nlink > 1
|
2022-05-08 12:14:47 +00:00
|
|
|
update_map = False
|
2017-07-29 14:11:33 +00:00
|
|
|
if hardlinked:
|
2022-05-08 12:14:47 +00:00
|
|
|
status = "h" # hardlink
|
2022-05-17 17:46:52 +00:00
|
|
|
nothing = object()
|
|
|
|
chunks = self.hlm.retrieve(id=(st.st_ino, st.st_dev), default=nothing)
|
|
|
|
if chunks is nothing:
|
2022-05-08 12:14:47 +00:00
|
|
|
update_map = True
|
2022-05-17 17:46:52 +00:00
|
|
|
elif chunks is not None:
|
2022-05-08 12:14:47 +00:00
|
|
|
item.chunks = chunks
|
2022-05-17 17:46:52 +00:00
|
|
|
item.hlid = self.hlm.hardlink_id_from_inode(ino=st.st_ino, dev=st.st_dev)
|
2022-05-08 12:14:47 +00:00
|
|
|
yield item, status, hardlinked
|
2018-03-10 14:11:08 +00:00
|
|
|
self.add_item(item, stats=self.stats)
|
2022-05-08 12:14:47 +00:00
|
|
|
if update_map:
|
|
|
|
# remember the hlid of this fs object and if the item has chunks,
|
|
|
|
# also remember them, so we do not have to re-chunk a hardlink.
|
|
|
|
chunks = item.chunks if "chunks" in item else None
|
2022-05-17 17:46:52 +00:00
|
|
|
self.hlm.remember(id=(st.st_ino, st.st_dev), info=chunks)
|
2017-07-29 14:11:33 +00:00
|
|
|
|
2020-11-15 14:31:01 +00:00
|
|
|
def process_dir_with_fd(self, *, path, fd, st):
|
2022-05-08 12:14:47 +00:00
|
|
|
with self.create_helper(path, st, "d", hardlinkable=False) as (item, status, hardlinked):
|
2018-08-12 23:18:00 +00:00
|
|
|
item.update(self.metadata_collector.stat_attrs(st, path, fd=fd))
|
2017-07-29 14:11:33 +00:00
|
|
|
return status
|
|
|
|
|
2020-11-15 14:31:01 +00:00
|
|
|
def process_dir(self, *, path, parent_fd, name, st):
|
2022-05-08 12:14:47 +00:00
|
|
|
with self.create_helper(path, st, "d", hardlinkable=False) as (item, status, hardlinked):
|
2020-11-15 14:31:01 +00:00
|
|
|
with OsOpen(path=path, parent_fd=parent_fd, name=name, flags=flags_dir, noatime=True, op="dir_open") as fd:
|
|
|
|
# fd is None for directories on windows, in that case a race condition check is not possible.
|
|
|
|
if fd is not None:
|
|
|
|
with backup_io("fstat"):
|
|
|
|
st = stat_update_check(st, os.fstat(fd))
|
|
|
|
item.update(self.metadata_collector.stat_attrs(st, path, fd=fd))
|
|
|
|
return status
|
|
|
|
|
2018-08-12 23:18:00 +00:00
|
|
|
def process_fifo(self, *, path, parent_fd, name, st):
|
2022-05-08 12:14:47 +00:00
|
|
|
with self.create_helper(path, st, "f") as (item, status, hardlinked): # fifo
|
2018-08-12 23:18:00 +00:00
|
|
|
with OsOpen(path=path, parent_fd=parent_fd, name=name, flags=flags_normal, noatime=True) as fd:
|
|
|
|
with backup_io("fstat"):
|
2019-02-17 05:45:24 +00:00
|
|
|
st = stat_update_check(st, os.fstat(fd))
|
2018-08-12 23:18:00 +00:00
|
|
|
item.update(self.metadata_collector.stat_attrs(st, path, fd=fd))
|
|
|
|
return status
|
2017-07-29 14:11:33 +00:00
|
|
|
|
2018-08-12 23:18:00 +00:00
|
|
|
def process_dev(self, *, path, parent_fd, name, st, dev_type):
|
2022-05-08 12:14:47 +00:00
|
|
|
with self.create_helper(path, st, dev_type) as (item, status, hardlinked): # char/block device
|
2018-12-24 00:30:51 +00:00
|
|
|
# looks like we can not work fd-based here without causing issues when trying to open/close the device
|
|
|
|
with backup_io("stat"):
|
2021-10-14 15:46:10 +00:00
|
|
|
st = stat_update_check(st, os_stat(path=path, parent_fd=parent_fd, name=name, follow_symlinks=False))
|
2018-12-24 00:30:51 +00:00
|
|
|
item.rdev = st.st_rdev
|
|
|
|
item.update(self.metadata_collector.stat_attrs(st, path))
|
|
|
|
return status
|
2017-07-29 14:11:33 +00:00
|
|
|
|
2018-08-12 23:18:00 +00:00
|
|
|
def process_symlink(self, *, path, parent_fd, name, st):
|
2022-05-08 12:14:47 +00:00
|
|
|
with self.create_helper(path, st, "s", hardlinkable=True) as (item, status, hardlinked):
|
2018-08-12 23:18:00 +00:00
|
|
|
fname = name if name is not None and parent_fd is not None else path
|
2017-07-29 14:11:33 +00:00
|
|
|
with backup_io("readlink"):
|
2018-08-12 23:18:00 +00:00
|
|
|
source = os.readlink(fname, dir_fd=parent_fd)
|
2017-07-29 14:11:33 +00:00
|
|
|
item.source = source
|
2018-08-12 23:18:00 +00:00
|
|
|
item.update(self.metadata_collector.stat_attrs(st, path)) # can't use FD here?
|
2017-07-29 14:11:33 +00:00
|
|
|
return status
|
|
|
|
|
2020-11-01 17:45:56 +00:00
|
|
|
def process_pipe(self, *, path, cache, fd, mode, user, group):
|
2021-04-26 15:50:21 +00:00
|
|
|
status = "i" # stdin (or other pipe)
|
|
|
|
self.print_file_status(status, path)
|
|
|
|
status = None # we already printed the status
|
2020-11-01 17:45:56 +00:00
|
|
|
uid = user2uid(user)
|
|
|
|
if uid is None:
|
|
|
|
raise Error("no such user: %s" % user)
|
|
|
|
gid = group2gid(group)
|
|
|
|
if gid is None:
|
|
|
|
raise Error("no such group: %s" % group)
|
2016-05-31 23:45:45 +00:00
|
|
|
t = int(time.time()) * 1000000000
|
|
|
|
item = Item(
|
|
|
|
path=path,
|
2020-11-01 17:45:56 +00:00
|
|
|
mode=mode & 0o107777 | 0o100000, # forcing regular file mode
|
|
|
|
uid=uid,
|
|
|
|
user=user,
|
|
|
|
gid=gid,
|
|
|
|
group=group,
|
2016-05-31 23:45:45 +00:00
|
|
|
mtime=t,
|
|
|
|
atime=t,
|
|
|
|
ctime=t,
|
|
|
|
)
|
2018-03-10 14:11:08 +00:00
|
|
|
self.process_file_chunks(item, cache, self.stats, self.show_progress, backup_io_iter(self.chunker.chunkify(fd)))
|
2017-02-18 06:02:11 +00:00
|
|
|
item.get_size(memorize=True)
|
2016-06-26 15:14:13 +00:00
|
|
|
self.stats.nfiles += 1
|
2018-03-10 14:11:08 +00:00
|
|
|
self.add_item(item, stats=self.stats)
|
2021-04-26 15:50:21 +00:00
|
|
|
return status
|
2015-03-01 03:29:44 +00:00
|
|
|
|
2019-02-20 09:13:09 +00:00
|
|
|
def process_file(self, *, path, parent_fd, name, st, cache, flags=flags_normal):
|
2022-05-08 12:14:47 +00:00
|
|
|
with self.create_helper(path, st, None) as (item, status, hardlinked): # no status yet
|
2019-02-20 09:13:09 +00:00
|
|
|
with OsOpen(path=path, parent_fd=parent_fd, name=name, flags=flags, noatime=True) as fd:
|
2018-08-12 02:50:27 +00:00
|
|
|
with backup_io("fstat"):
|
2019-02-17 05:45:24 +00:00
|
|
|
st = stat_update_check(st, os.fstat(fd))
|
2019-04-06 21:52:16 +00:00
|
|
|
item.update(self.metadata_collector.stat_simple_attrs(st))
|
2018-08-12 02:50:27 +00:00
|
|
|
is_special_file = is_special(st.st_mode)
|
2020-06-14 13:36:22 +00:00
|
|
|
if is_special_file:
|
|
|
|
# we process a special file like a regular file. reflect that in mode,
|
|
|
|
# so it can be extracted / accessed in FUSE mount like a regular file.
|
|
|
|
# this needs to be done early, so that part files also get the patched mode.
|
|
|
|
item.mode = stat.S_IFREG | stat.S_IMODE(item.mode)
|
2022-05-08 12:14:47 +00:00
|
|
|
if "chunks" in item: # create_helper might have put chunks from a previous hardlink there
|
2022-06-10 18:36:58 +00:00
|
|
|
[cache.chunk_incref(id_, self.stats) for id_, _ in item.chunks]
|
2022-05-08 12:14:47 +00:00
|
|
|
else: # normal case, no "2nd+" hardlink
|
2018-08-12 02:50:27 +00:00
|
|
|
if not is_special_file:
|
2021-02-23 21:56:38 +00:00
|
|
|
hashed_path = safe_encode(os.path.join(self.cwd, path))
|
2022-10-19 19:40:02 +00:00
|
|
|
started_hashing = time.monotonic()
|
2021-02-23 21:56:38 +00:00
|
|
|
path_hash = self.key.id_hash(hashed_path)
|
2022-10-19 19:40:02 +00:00
|
|
|
self.stats.hashing_time += time.monotonic() - started_hashing
|
2021-02-23 21:56:38 +00:00
|
|
|
known, ids = cache.file_known_and_unchanged(hashed_path, path_hash, st)
|
2017-03-26 11:51:04 +00:00
|
|
|
else:
|
2018-08-12 02:50:27 +00:00
|
|
|
# in --read-special mode, we may be called for special files.
|
|
|
|
# there should be no information in the cache about special files processed in
|
|
|
|
# read-special mode, but we better play safe as this was wrong in the past:
|
2021-02-23 21:56:38 +00:00
|
|
|
hashed_path = path_hash = None
|
2018-08-12 02:50:27 +00:00
|
|
|
known, ids = False, None
|
|
|
|
chunks = None
|
|
|
|
if ids is not None:
|
|
|
|
# Make sure all ids are available
|
|
|
|
for id_ in ids:
|
|
|
|
if not cache.seen_chunk(id_):
|
|
|
|
status = (
|
|
|
|
"M" # cache said it is unmodified, but we lost a chunk: process file like modified
|
2022-07-06 13:37:27 +00:00
|
|
|
)
|
2018-08-12 02:50:27 +00:00
|
|
|
break
|
|
|
|
else:
|
|
|
|
chunks = [cache.chunk_incref(id_, self.stats) for id_ in ids]
|
|
|
|
status = "U" # regular file, unchanged
|
|
|
|
else:
|
|
|
|
status = "M" if known else "A" # regular file, modified or added
|
2021-04-26 15:50:21 +00:00
|
|
|
self.print_file_status(status, path)
|
2022-10-19 19:40:02 +00:00
|
|
|
self.stats.files_stats[status] += 1
|
2021-04-26 15:50:21 +00:00
|
|
|
status = None # we already printed the status
|
2018-08-12 02:50:27 +00:00
|
|
|
# Only chunkify the file if needed
|
|
|
|
if chunks is not None:
|
|
|
|
item.chunks = chunks
|
|
|
|
else:
|
|
|
|
with backup_io("read"):
|
|
|
|
self.process_file_chunks(
|
|
|
|
item,
|
|
|
|
cache,
|
|
|
|
self.stats,
|
|
|
|
self.show_progress,
|
|
|
|
backup_io_iter(self.chunker.chunkify(None, fd)),
|
|
|
|
)
|
2022-10-19 19:40:02 +00:00
|
|
|
self.stats.chunking_time = self.chunker.chunking_time
|
2019-03-11 19:41:23 +00:00
|
|
|
if is_win32:
|
|
|
|
changed_while_backup = False # TODO
|
|
|
|
else:
|
|
|
|
with backup_io("fstat2"):
|
|
|
|
st2 = os.fstat(fd)
|
|
|
|
# special files:
|
|
|
|
# - fifos change naturally, because they are fed from the other side. no problem.
|
|
|
|
# - blk/chr devices don't change ctime anyway.
|
|
|
|
changed_while_backup = not is_special_file and st.st_ctime_ns != st2.st_ctime_ns
|
|
|
|
if changed_while_backup:
|
|
|
|
status = "C" # regular file changed while we backed it up, might be inconsistent/corrupt!
|
|
|
|
if not is_special_file and not changed_while_backup:
|
create: do not give chunker a py file object, it is not needed
the os level file handle is enough, the chunker will prefer it if
valid and won't use the file obj, so we can give None there.
this saves these unneeded syscalls:
fstat(5, {st_mode=S_IFREG|0664, st_size=227063, ...}) = 0
ioctl(5, TCGETS, 0x7ffd635635f0) = -1 ENOTTY (Inappropriate ioctl for device)
lseek(5, 0, SEEK_CUR) = 0
2018-07-06 20:45:24 +00:00
|
|
|
# we must not memorize special files, because the contents of e.g. a
|
|
|
|
# block or char device will change without its mtime/size/inode changing.
|
2019-03-11 19:41:23 +00:00
|
|
|
# also, we must not memorize a potentially inconsistent/corrupt file that
|
|
|
|
# changed while we backed it up.
|
2021-02-23 21:56:38 +00:00
|
|
|
cache.memorize_file(hashed_path, path_hash, st, [c.id for c in item.chunks])
|
2022-05-08 12:14:47 +00:00
|
|
|
self.stats.nfiles += 1
|
2019-04-06 21:52:16 +00:00
|
|
|
item.update(self.metadata_collector.stat_ext_attrs(st, path, fd=fd))
|
2018-08-12 02:50:27 +00:00
|
|
|
item.get_size(memorize=True)
|
2018-08-12 23:18:00 +00:00
|
|
|
return status
|
2010-10-20 17:59:15 +00:00
|
|
|
|
2014-02-16 21:21:18 +00:00
|
|
|
|
2021-06-09 23:41:11 +00:00
|
|
|
class TarfileObjectProcessors:
|
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
*,
|
|
|
|
cache,
|
|
|
|
key,
|
|
|
|
add_item,
|
|
|
|
process_file_chunks,
|
|
|
|
chunker_params,
|
|
|
|
show_progress,
|
|
|
|
log_json,
|
|
|
|
iec,
|
|
|
|
file_status_printer=None,
|
|
|
|
):
|
|
|
|
self.cache = cache
|
|
|
|
self.key = key
|
|
|
|
self.add_item = add_item
|
|
|
|
self.process_file_chunks = process_file_chunks
|
|
|
|
self.show_progress = show_progress
|
|
|
|
self.print_file_status = file_status_printer or (lambda *args: None)
|
|
|
|
|
|
|
|
self.stats = Statistics(output_json=log_json, iec=iec) # threading: done by cache (including progress)
|
|
|
|
self.chunker = get_chunker(*chunker_params, seed=key.chunk_seed, sparse=False)
|
2022-05-08 12:14:47 +00:00
|
|
|
self.hlm = HardLinkManager(id_type=str, info_type=list) # path -> chunks
|
2021-06-09 23:41:11 +00:00
|
|
|
|
|
|
|
@contextmanager
|
|
|
|
def create_helper(self, tarinfo, status=None, type=None):
|
2022-04-02 18:11:05 +00:00
|
|
|
ph = tarinfo.pax_headers
|
|
|
|
if ph and "BORG.item.version" in ph:
|
|
|
|
assert ph["BORG.item.version"] == "1"
|
|
|
|
meta_bin = base64.b64decode(ph["BORG.item.meta"])
|
|
|
|
meta_dict = msgpack.unpackb(meta_bin, object_hook=StableDict)
|
|
|
|
item = Item(internal_dict=meta_dict)
|
|
|
|
else:
|
2022-07-06 13:37:27 +00:00
|
|
|
|
2022-04-02 18:11:05 +00:00
|
|
|
def s_to_ns(s):
|
|
|
|
return safe_ns(int(float(s) * 1e9))
|
|
|
|
|
|
|
|
item = Item(
|
|
|
|
path=make_path_safe(tarinfo.name),
|
|
|
|
mode=tarinfo.mode | type,
|
2022-05-29 22:05:07 +00:00
|
|
|
uid=tarinfo.uid,
|
|
|
|
gid=tarinfo.gid,
|
|
|
|
mtime=s_to_ns(tarinfo.mtime),
|
|
|
|
)
|
|
|
|
if tarinfo.uname:
|
|
|
|
item.user = tarinfo.uname
|
|
|
|
if tarinfo.gname:
|
|
|
|
item.group = tarinfo.gname
|
2022-04-02 18:11:05 +00:00
|
|
|
if ph:
|
|
|
|
# note: for mtime this is a bit redundant as it is already done by tarfile module,
|
|
|
|
# but we just do it in our way to be consistent for sure.
|
|
|
|
for name in "atime", "ctime", "mtime":
|
|
|
|
if name in ph:
|
|
|
|
ns = s_to_ns(ph[name])
|
|
|
|
setattr(item, name, ns)
|
2021-06-09 23:41:11 +00:00
|
|
|
yield item, status
|
|
|
|
# if we get here, "with"-block worked ok without error/exception, the item was processed ok...
|
|
|
|
self.add_item(item, stats=self.stats)
|
|
|
|
|
|
|
|
def process_dir(self, *, tarinfo, status, type):
|
|
|
|
with self.create_helper(tarinfo, status, type) as (item, status):
|
|
|
|
return status
|
|
|
|
|
|
|
|
def process_fifo(self, *, tarinfo, status, type):
|
2021-06-10 10:19:20 +00:00
|
|
|
with self.create_helper(tarinfo, status, type) as (item, status):
|
2021-06-09 23:41:11 +00:00
|
|
|
return status
|
|
|
|
|
|
|
|
def process_dev(self, *, tarinfo, status, type):
|
2021-06-10 10:19:20 +00:00
|
|
|
with self.create_helper(tarinfo, status, type) as (item, status):
|
2021-06-09 23:41:11 +00:00
|
|
|
item.rdev = os.makedev(tarinfo.devmajor, tarinfo.devminor)
|
|
|
|
return status
|
|
|
|
|
2022-05-08 12:14:47 +00:00
|
|
|
def process_symlink(self, *, tarinfo, status, type):
|
2021-06-09 23:41:11 +00:00
|
|
|
with self.create_helper(tarinfo, status, type) as (item, status):
|
|
|
|
item.source = tarinfo.linkname
|
|
|
|
return status
|
|
|
|
|
2022-05-08 12:14:47 +00:00
|
|
|
def process_hardlink(self, *, tarinfo, status, type):
|
|
|
|
with self.create_helper(tarinfo, status, type) as (item, status):
|
|
|
|
# create a not hardlinked borg item, reusing the chunks, see HardLinkManager.__doc__
|
|
|
|
chunks = self.hlm.retrieve(tarinfo.linkname)
|
|
|
|
if chunks is not None:
|
|
|
|
item.chunks = chunks
|
|
|
|
item.get_size(memorize=True, from_chunks=True)
|
|
|
|
self.stats.nfiles += 1
|
|
|
|
return status
|
|
|
|
|
2021-06-09 23:41:11 +00:00
|
|
|
def process_file(self, *, tarinfo, status, type, tar):
|
|
|
|
with self.create_helper(tarinfo, status, type) as (item, status):
|
|
|
|
self.print_file_status(status, tarinfo.name)
|
|
|
|
status = None # we already printed the status
|
|
|
|
fd = tar.extractfile(tarinfo)
|
|
|
|
self.process_file_chunks(
|
|
|
|
item, self.cache, self.stats, self.show_progress, backup_io_iter(self.chunker.chunkify(fd))
|
|
|
|
)
|
2022-05-08 12:14:47 +00:00
|
|
|
item.get_size(memorize=True, from_chunks=True)
|
2021-06-09 23:41:11 +00:00
|
|
|
self.stats.nfiles += 1
|
2022-05-08 12:14:47 +00:00
|
|
|
# we need to remember ALL files, see HardLinkManager.__doc__
|
|
|
|
self.hlm.remember(id=tarinfo.name, info=item.chunks)
|
2021-06-09 23:41:11 +00:00
|
|
|
return status
|
|
|
|
|
|
|
|
|
2016-06-12 21:36:56 +00:00
|
|
|
def valid_msgpacked_dict(d, keys_serialized):
|
|
|
|
"""check if the data <d> looks like a msgpacked dict"""
|
|
|
|
d_len = len(d)
|
|
|
|
if d_len == 0:
|
|
|
|
return False
|
|
|
|
if d[0] & 0xF0 == 0x80: # object is a fixmap (up to 15 elements)
|
|
|
|
offs = 1
|
|
|
|
elif d[0] == 0xDE: # object is a map16 (up to 2^16-1 elements)
|
|
|
|
offs = 3
|
|
|
|
else:
|
|
|
|
# object is not a map (dict)
|
|
|
|
# note: we must not have dicts with > 2^16-1 elements
|
|
|
|
return False
|
|
|
|
if d_len <= offs:
|
|
|
|
return False
|
|
|
|
# is the first dict key a bytestring?
|
|
|
|
if d[offs] & 0xE0 == 0xA0: # key is a small bytestring (up to 31 chars)
|
|
|
|
pass
|
|
|
|
elif d[offs] in (0xD9, 0xDA, 0xDB): # key is a str8, str16 or str32
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
# key is not a bytestring
|
|
|
|
return False
|
|
|
|
# is the bytestring any of the expected key names?
|
|
|
|
key_serialized = d[offs:]
|
|
|
|
return any(key_serialized.startswith(pattern) for pattern in keys_serialized)
|
|
|
|
|
|
|
|
|
2015-07-15 09:30:25 +00:00
|
|
|
class RobustUnpacker:
|
2014-02-24 21:43:17 +00:00
|
|
|
"""A restartable/robust version of the streaming msgpack unpacker"""
|
2022-07-06 13:37:27 +00:00
|
|
|
|
2016-06-12 21:36:56 +00:00
|
|
|
def __init__(self, validator, item_keys):
|
2015-07-11 16:31:49 +00:00
|
|
|
super().__init__()
|
2022-05-05 17:36:02 +00:00
|
|
|
self.item_keys = [msgpack.packb(name) for name in item_keys]
|
2014-02-24 21:43:17 +00:00
|
|
|
self.validator = validator
|
|
|
|
self._buffered_data = []
|
|
|
|
self._resync = False
|
|
|
|
self._unpacker = msgpack.Unpacker(object_hook=StableDict)
|
|
|
|
|
|
|
|
def resync(self):
|
|
|
|
self._buffered_data = []
|
|
|
|
self._resync = True
|
|
|
|
|
|
|
|
def feed(self, data):
|
|
|
|
if self._resync:
|
|
|
|
self._buffered_data.append(data)
|
|
|
|
else:
|
|
|
|
self._unpacker.feed(data)
|
|
|
|
|
|
|
|
def __iter__(self):
|
|
|
|
return self
|
|
|
|
|
|
|
|
def __next__(self):
|
|
|
|
if self._resync:
|
2014-02-24 22:37:21 +00:00
|
|
|
data = b"".join(self._buffered_data)
|
2014-02-24 21:43:17 +00:00
|
|
|
while self._resync:
|
|
|
|
if not data:
|
|
|
|
raise StopIteration
|
2016-06-12 21:36:56 +00:00
|
|
|
# Abort early if the data does not look like a serialized item dict
|
|
|
|
if not valid_msgpacked_dict(data, self.item_keys):
|
2014-03-04 20:15:52 +00:00
|
|
|
data = data[1:]
|
|
|
|
continue
|
2014-02-24 22:37:21 +00:00
|
|
|
self._unpacker = msgpack.Unpacker(object_hook=StableDict)
|
|
|
|
self._unpacker.feed(data)
|
|
|
|
try:
|
2018-07-01 00:34:48 +00:00
|
|
|
item = next(self._unpacker)
|
|
|
|
except (msgpack.UnpackException, StopIteration):
|
2016-07-28 20:23:38 +00:00
|
|
|
# as long as we are resyncing, we also ignore StopIteration
|
2016-07-28 20:12:34 +00:00
|
|
|
pass
|
|
|
|
else:
|
2014-02-24 21:43:17 +00:00
|
|
|
if self.validator(item):
|
|
|
|
self._resync = False
|
2014-02-24 22:37:21 +00:00
|
|
|
return item
|
|
|
|
data = data[1:]
|
2014-02-24 21:43:17 +00:00
|
|
|
else:
|
2018-07-01 00:34:48 +00:00
|
|
|
return next(self._unpacker)
|
2014-02-24 21:43:17 +00:00
|
|
|
|
|
|
|
|
2014-02-16 21:21:18 +00:00
|
|
|
class ArchiveChecker:
|
|
|
|
def __init__(self):
|
|
|
|
self.error_found = False
|
|
|
|
self.possibly_superseded = set()
|
|
|
|
|
2022-12-17 15:48:54 +00:00
|
|
|
def check(self, repository, repair=False, first=0, last=0, sort_by="", match=None, verify_data=False):
|
2016-05-13 20:50:34 +00:00
|
|
|
"""Perform a set of checks on 'repository'
|
|
|
|
|
|
|
|
:param repair: enable repair mode, write updated or corrected data into repository
|
2016-10-18 01:57:42 +00:00
|
|
|
:param first/last/sort_by: only check this number of first/last archives ordered by sort_by
|
2022-09-15 22:37:18 +00:00
|
|
|
:param match: only check archives matching this pattern
|
2016-05-13 20:50:34 +00:00
|
|
|
:param verify_data: integrity verification of data referenced by archives
|
|
|
|
"""
|
2015-12-07 23:21:46 +00:00
|
|
|
logger.info("Starting archive consistency check...")
|
2022-09-15 22:37:18 +00:00
|
|
|
self.check_all = not any((first, last, match))
|
2014-02-24 22:37:21 +00:00
|
|
|
self.repair = repair
|
|
|
|
self.repository = repository
|
|
|
|
self.init_chunks()
|
2016-11-13 14:58:42 +00:00
|
|
|
if not self.chunks:
|
|
|
|
logger.error("Repository contains no apparent data at all, cannot continue check/repair.")
|
|
|
|
return False
|
2022-07-29 07:08:03 +00:00
|
|
|
self.key = self.make_key(repository)
|
2022-08-23 01:25:06 +00:00
|
|
|
self.repo_objs = RepoObj(self.key)
|
2016-07-28 16:41:08 +00:00
|
|
|
if verify_data:
|
|
|
|
self.verify_data()
|
2015-03-17 22:47:21 +00:00
|
|
|
if Manifest.MANIFEST_ID not in self.chunks:
|
2015-12-07 23:21:46 +00:00
|
|
|
logger.error("Repository manifest not found!")
|
|
|
|
self.error_found = True
|
2014-02-24 22:37:21 +00:00
|
|
|
self.manifest = self.rebuild_manifest()
|
|
|
|
else:
|
2016-11-30 04:38:04 +00:00
|
|
|
try:
|
2022-08-23 01:25:06 +00:00
|
|
|
self.manifest = Manifest.load(repository, (Manifest.Operation.CHECK,), key=self.key)
|
2016-10-21 23:50:35 +00:00
|
|
|
except IntegrityErrorBase as exc:
|
2016-11-30 04:38:04 +00:00
|
|
|
logger.error("Repository manifest is corrupted: %s", exc)
|
|
|
|
self.error_found = True
|
|
|
|
del self.chunks[Manifest.MANIFEST_ID]
|
|
|
|
self.manifest = self.rebuild_manifest()
|
2022-09-15 22:37:18 +00:00
|
|
|
self.rebuild_refcounts(match=match, first=first, last=last, sort_by=sort_by)
|
2015-08-09 10:43:57 +00:00
|
|
|
self.orphan_chunks_check()
|
2022-12-17 15:48:54 +00:00
|
|
|
self.finish()
|
2015-12-07 23:21:46 +00:00
|
|
|
if self.error_found:
|
|
|
|
logger.error("Archive consistency check complete, problems found.")
|
|
|
|
else:
|
2015-10-02 14:58:08 +00:00
|
|
|
logger.info("Archive consistency check complete, no problems found.")
|
2014-02-24 22:37:21 +00:00
|
|
|
return self.repair or not self.error_found
|
|
|
|
|
2014-02-16 21:21:18 +00:00
|
|
|
def init_chunks(self):
|
2014-02-24 22:37:21 +00:00
|
|
|
"""Fetch a list of all object keys from repository"""
|
2018-06-12 20:12:02 +00:00
|
|
|
# Explicitly set the initial usable hash table capacity to avoid performance issues
|
2016-09-14 00:53:41 +00:00
|
|
|
# due to hash table "resonance".
|
2018-06-12 20:12:02 +00:00
|
|
|
# Since reconstruction of archive items can add some new chunks, add 10 % headroom.
|
|
|
|
self.chunks = ChunkIndex(usable=len(self.repository) * 1.1)
|
2014-02-16 21:21:18 +00:00
|
|
|
marker = None
|
|
|
|
while True:
|
2017-02-17 04:00:37 +00:00
|
|
|
result = self.repository.list(limit=LIST_SCAN_LIMIT, marker=marker)
|
2014-02-16 21:21:18 +00:00
|
|
|
if not result:
|
|
|
|
break
|
|
|
|
marker = result[-1]
|
2022-06-10 20:23:27 +00:00
|
|
|
init_entry = ChunkIndexEntry(refcount=0, size=0)
|
2014-02-16 21:21:18 +00:00
|
|
|
for id_ in result:
|
2016-04-16 15:48:47 +00:00
|
|
|
self.chunks[id_] = init_entry
|
2014-02-16 21:21:18 +00:00
|
|
|
|
2022-07-29 07:08:03 +00:00
|
|
|
def make_key(self, repository):
|
|
|
|
attempt = 0
|
|
|
|
for chunkid, _ in self.chunks.iteritems():
|
|
|
|
attempt += 1
|
|
|
|
if attempt > 999:
|
|
|
|
# we did a lot of attempts, but could not create the key via key_factory, give up.
|
|
|
|
break
|
|
|
|
cdata = repository.get(chunkid)
|
|
|
|
try:
|
|
|
|
return key_factory(repository, cdata)
|
|
|
|
except UnsupportedPayloadError:
|
|
|
|
# we get here, if the cdata we got has a corrupted key type byte
|
|
|
|
pass # ignore it, just try the next chunk
|
|
|
|
if attempt == 0:
|
|
|
|
msg = "make_key: repository has no chunks at all!"
|
|
|
|
else:
|
|
|
|
msg = "make_key: failed to create the key (tried %d chunks)" % attempt
|
|
|
|
raise IntegrityError(msg)
|
2014-02-16 21:21:18 +00:00
|
|
|
|
2016-05-13 20:50:34 +00:00
|
|
|
def verify_data(self):
|
|
|
|
logger.info("Starting cryptographic data integrity verification...")
|
2016-10-04 20:05:26 +00:00
|
|
|
chunks_count_index = len(self.chunks)
|
|
|
|
chunks_count_segments = 0
|
2016-07-28 16:40:20 +00:00
|
|
|
errors = 0
|
2022-07-20 12:53:50 +00:00
|
|
|
# for the new crypto, derived from AEADKeyBase, we know that it checks authenticity on
|
|
|
|
# the crypto.low_level level - invalid chunks will fail to AEAD authenticate.
|
|
|
|
# for these key types, we know that there is no need to decompress the data afterwards.
|
|
|
|
# for all other modes, we assume that we must decompress, so we can verify authenticity
|
|
|
|
# based on the plaintext MAC (via calling ._assert_id(id, plaintext)).
|
|
|
|
decompress = not isinstance(self.key, AEADKeyBase)
|
2016-09-08 23:27:27 +00:00
|
|
|
defect_chunks = []
|
2017-02-27 19:38:02 +00:00
|
|
|
pi = ProgressIndicatorPercent(
|
|
|
|
total=chunks_count_index, msg="Verifying data %6.2f%%", step=0.01, msgid="check.verify_data"
|
|
|
|
)
|
2022-09-19 19:14:25 +00:00
|
|
|
state = None
|
2016-10-04 02:55:10 +00:00
|
|
|
while True:
|
2022-09-19 19:14:25 +00:00
|
|
|
chunk_ids, state = self.repository.scan(limit=100, state=state)
|
2016-10-04 02:55:10 +00:00
|
|
|
if not chunk_ids:
|
|
|
|
break
|
2016-10-04 20:05:26 +00:00
|
|
|
chunks_count_segments += len(chunk_ids)
|
2016-09-16 00:49:54 +00:00
|
|
|
chunk_data_iter = self.repository.get_many(chunk_ids)
|
|
|
|
chunk_ids_revd = list(reversed(chunk_ids))
|
|
|
|
while chunk_ids_revd:
|
|
|
|
pi.show()
|
|
|
|
chunk_id = chunk_ids_revd.pop(-1) # better efficiency
|
|
|
|
try:
|
|
|
|
encrypted_data = next(chunk_data_iter)
|
2016-10-21 23:50:35 +00:00
|
|
|
except (Repository.ObjectNotFound, IntegrityErrorBase) as err:
|
2016-09-16 00:49:54 +00:00
|
|
|
self.error_found = True
|
|
|
|
errors += 1
|
|
|
|
logger.error("chunk %s: %s", bin_to_hex(chunk_id), err)
|
2016-10-21 23:50:35 +00:00
|
|
|
if isinstance(err, IntegrityErrorBase):
|
2016-09-16 00:49:54 +00:00
|
|
|
defect_chunks.append(chunk_id)
|
|
|
|
# as the exception killed our generator, make a new one for remaining chunks:
|
|
|
|
if chunk_ids_revd:
|
|
|
|
chunk_ids = list(reversed(chunk_ids_revd))
|
|
|
|
chunk_data_iter = self.repository.get_many(chunk_ids)
|
|
|
|
else:
|
|
|
|
try:
|
2022-08-23 01:25:06 +00:00
|
|
|
self.repo_objs.parse(chunk_id, encrypted_data, decompress=decompress)
|
2016-10-21 23:50:35 +00:00
|
|
|
except IntegrityErrorBase as integrity_error:
|
2016-09-16 00:49:54 +00:00
|
|
|
self.error_found = True
|
|
|
|
errors += 1
|
|
|
|
logger.error("chunk %s, integrity error: %s", bin_to_hex(chunk_id), integrity_error)
|
|
|
|
defect_chunks.append(chunk_id)
|
2016-05-13 20:50:34 +00:00
|
|
|
pi.finish()
|
2016-10-04 20:05:26 +00:00
|
|
|
if chunks_count_index != chunks_count_segments:
|
|
|
|
logger.error("Repo/Chunks index object count vs. segment files object count mismatch.")
|
|
|
|
logger.error(
|
|
|
|
"Repo/Chunks index: %d objects != segment files: %d objects", chunks_count_index, chunks_count_segments
|
|
|
|
)
|
2016-09-08 23:27:27 +00:00
|
|
|
if defect_chunks:
|
|
|
|
if self.repair:
|
|
|
|
# if we kill the defect chunk here, subsequent actions within this "borg check"
|
|
|
|
# run will find missing chunks and replace them with all-zero replacement
|
|
|
|
# chunks and flag the files as "repaired".
|
2022-12-29 00:01:48 +00:00
|
|
|
# if another backup is done later and the missing chunks get backed up again,
|
2016-09-08 23:27:27 +00:00
|
|
|
# a "borg check" afterwards can heal all files where this chunk was missing.
|
|
|
|
logger.warning(
|
|
|
|
"Found defect chunks. They will be deleted now, so affected files can "
|
|
|
|
"get repaired now and maybe healed later."
|
|
|
|
)
|
|
|
|
for defect_chunk in defect_chunks:
|
|
|
|
# remote repo (ssh): retry might help for strange network / NIC / RAM errors
|
|
|
|
# as the chunk will be retransmitted from remote server.
|
|
|
|
# local repo (fs): as chunks.iteritems loop usually pumps a lot of data through,
|
|
|
|
# a defect chunk is likely not in the fs cache any more and really gets re-read
|
|
|
|
# from the underlying media.
|
|
|
|
try:
|
2017-04-25 13:48:16 +00:00
|
|
|
encrypted_data = self.repository.get(defect_chunk)
|
2022-08-23 01:25:06 +00:00
|
|
|
self.repo_objs.parse(defect_chunk, encrypted_data, decompress=decompress)
|
2016-10-21 23:50:35 +00:00
|
|
|
except IntegrityErrorBase:
|
2016-09-08 23:27:27 +00:00
|
|
|
# failed twice -> get rid of this chunk
|
|
|
|
del self.chunks[defect_chunk]
|
|
|
|
self.repository.delete(defect_chunk)
|
|
|
|
logger.debug("chunk %s deleted.", bin_to_hex(defect_chunk))
|
|
|
|
else:
|
2021-06-05 12:59:01 +00:00
|
|
|
logger.warning("chunk %s not deleted, did not consistently fail.", bin_to_hex(defect_chunk))
|
2016-09-08 23:27:27 +00:00
|
|
|
else:
|
|
|
|
logger.warning(
|
|
|
|
"Found defect chunks. With --repair, they would get deleted, so affected "
|
|
|
|
"files could get repaired then and maybe healed later."
|
|
|
|
)
|
|
|
|
for defect_chunk in defect_chunks:
|
|
|
|
logger.debug("chunk %s is defect.", bin_to_hex(defect_chunk))
|
2016-05-13 20:50:34 +00:00
|
|
|
log = logger.error if errors else logger.info
|
2016-10-04 20:05:26 +00:00
|
|
|
log(
|
|
|
|
"Finished cryptographic data integrity verification, verified %d chunks with %d integrity errors.",
|
|
|
|
chunks_count_segments,
|
|
|
|
errors,
|
|
|
|
)
|
2016-05-13 20:50:34 +00:00
|
|
|
|
2014-02-16 21:21:18 +00:00
|
|
|
def rebuild_manifest(self):
|
2014-02-24 22:37:21 +00:00
|
|
|
"""Rebuild the manifest object if it is missing
|
|
|
|
|
|
|
|
Iterates through all objects in the repository looking for archive metadata blocks.
|
|
|
|
"""
|
2022-07-06 13:37:27 +00:00
|
|
|
|
2016-06-12 21:36:56 +00:00
|
|
|
def valid_archive(obj):
|
|
|
|
if not isinstance(obj, dict):
|
|
|
|
return False
|
2022-05-06 01:59:10 +00:00
|
|
|
return REQUIRED_ARCHIVE_KEYS.issubset(obj)
|
2016-06-12 21:36:56 +00:00
|
|
|
|
2015-12-07 23:21:46 +00:00
|
|
|
logger.info("Rebuilding missing manifest, this might take some time...")
|
2016-06-12 21:36:56 +00:00
|
|
|
# as we have lost the manifest, we do not know any more what valid item keys we had.
|
|
|
|
# collecting any key we encounter in a damaged repo seems unwise, thus we just use
|
|
|
|
# the hardcoded list from the source code. thus, it is not recommended to rebuild a
|
|
|
|
# lost manifest on a older borg version than the most recent one that was ever used
|
|
|
|
# within this repository (assuming that newer borg versions support more item keys).
|
2014-02-16 21:21:18 +00:00
|
|
|
manifest = Manifest(self.key, self.repository)
|
2022-05-05 17:36:02 +00:00
|
|
|
archive_keys_serialized = [msgpack.packb(name) for name in ARCHIVE_KEYS]
|
2018-04-28 17:12:01 +00:00
|
|
|
pi = ProgressIndicatorPercent(
|
|
|
|
total=len(self.chunks), msg="Rebuilding manifest %6.2f%%", step=0.01, msgid="check.rebuild_manifest"
|
|
|
|
)
|
2014-02-16 21:21:18 +00:00
|
|
|
for chunk_id, _ in self.chunks.iteritems():
|
2018-04-28 17:12:01 +00:00
|
|
|
pi.show()
|
2014-02-16 21:21:18 +00:00
|
|
|
cdata = self.repository.get(chunk_id)
|
2016-11-30 04:38:04 +00:00
|
|
|
try:
|
2022-08-23 01:25:06 +00:00
|
|
|
_, data = self.repo_objs.parse(chunk_id, cdata)
|
2016-10-21 23:50:35 +00:00
|
|
|
except IntegrityErrorBase as exc:
|
2016-11-30 04:38:04 +00:00
|
|
|
logger.error("Skipping corrupted chunk: %s", exc)
|
|
|
|
self.error_found = True
|
|
|
|
continue
|
2016-06-12 21:36:56 +00:00
|
|
|
if not valid_msgpacked_dict(data, archive_keys_serialized):
|
2014-03-01 14:00:21 +00:00
|
|
|
continue
|
2022-05-17 00:00:00 +00:00
|
|
|
if b"cmdline" not in data or b"\xa7version\x02" not in data:
|
2014-03-01 14:00:21 +00:00
|
|
|
continue
|
2014-02-16 21:21:18 +00:00
|
|
|
try:
|
|
|
|
archive = msgpack.unpackb(data)
|
2018-07-01 00:34:48 +00:00
|
|
|
# Ignore exceptions that might be raised when feeding msgpack with invalid data
|
|
|
|
except msgpack.UnpackException:
|
2014-02-16 21:21:18 +00:00
|
|
|
continue
|
2016-06-12 21:36:56 +00:00
|
|
|
if valid_archive(archive):
|
2016-08-14 23:11:33 +00:00
|
|
|
archive = ArchiveItem(internal_dict=archive)
|
2016-12-17 00:48:33 +00:00
|
|
|
name = archive.name
|
|
|
|
logger.info("Found archive %s", name)
|
|
|
|
if name in manifest.archives:
|
|
|
|
i = 1
|
|
|
|
while True:
|
|
|
|
new_name = "%s.%d" % (name, i)
|
|
|
|
if new_name not in manifest.archives:
|
|
|
|
break
|
|
|
|
i += 1
|
|
|
|
logger.warning("Duplicate archive name %s, storing as %s", name, new_name)
|
|
|
|
name = new_name
|
|
|
|
manifest.archives[name] = (chunk_id, archive.time)
|
2018-04-28 17:12:01 +00:00
|
|
|
pi.finish()
|
2015-12-07 23:21:46 +00:00
|
|
|
logger.info("Manifest rebuild complete.")
|
2014-02-16 21:21:18 +00:00
|
|
|
return manifest
|
|
|
|
|
2022-09-15 22:37:18 +00:00
|
|
|
def rebuild_refcounts(self, first=0, last=0, sort_by="", match=None):
|
2014-02-24 22:37:21 +00:00
|
|
|
"""Rebuild object reference counts by walking the metadata
|
2014-02-16 21:21:18 +00:00
|
|
|
|
2014-02-24 22:37:21 +00:00
|
|
|
Missing and/or incorrect data is repaired when detected
|
|
|
|
"""
|
2017-06-17 18:17:08 +00:00
|
|
|
# Exclude the manifest from chunks (manifest entry might be already deleted from self.chunks)
|
|
|
|
self.chunks.pop(Manifest.MANIFEST_ID, None)
|
2014-02-18 20:16:36 +00:00
|
|
|
|
2014-02-24 22:37:21 +00:00
|
|
|
def mark_as_possibly_superseded(id_):
|
2022-06-10 20:23:27 +00:00
|
|
|
if self.chunks.get(id_, ChunkIndexEntry(0, 0)).refcount == 0:
|
2014-02-16 21:21:18 +00:00
|
|
|
self.possibly_superseded.add(id_)
|
|
|
|
|
|
|
|
def add_callback(chunk):
|
2017-04-03 20:05:53 +00:00
|
|
|
id_ = self.key.id_hash(chunk)
|
2022-08-23 01:25:06 +00:00
|
|
|
cdata = self.repo_objs.format(id_, {}, chunk)
|
2022-06-10 13:59:29 +00:00
|
|
|
add_reference(id_, len(chunk), cdata)
|
2014-02-16 21:21:18 +00:00
|
|
|
return id_
|
|
|
|
|
2022-06-10 13:59:29 +00:00
|
|
|
def add_reference(id_, size, cdata=None):
|
2014-02-16 21:21:18 +00:00
|
|
|
try:
|
2016-04-11 22:10:44 +00:00
|
|
|
self.chunks.incref(id_)
|
2014-02-16 21:21:18 +00:00
|
|
|
except KeyError:
|
|
|
|
assert cdata is not None
|
2022-06-10 20:23:27 +00:00
|
|
|
self.chunks[id_] = ChunkIndexEntry(refcount=1, size=size)
|
2014-02-16 21:21:18 +00:00
|
|
|
if self.repair:
|
|
|
|
self.repository.put(id_, cdata)
|
|
|
|
|
2019-02-01 22:30:45 +00:00
|
|
|
def verify_file_chunks(archive_name, item):
|
2016-07-09 14:38:07 +00:00
|
|
|
"""Verifies that all file chunks are present.
|
2014-02-24 22:37:21 +00:00
|
|
|
|
2016-07-09 14:38:07 +00:00
|
|
|
Missing file chunks will be replaced with new chunks of the same length containing all zeros.
|
|
|
|
If a previously missing file chunk re-appears, the replacement chunk is replaced by the correct one.
|
2014-02-24 22:37:21 +00:00
|
|
|
"""
|
2022-07-06 13:37:27 +00:00
|
|
|
|
2017-02-18 23:49:36 +00:00
|
|
|
def replacement_chunk(size):
|
2021-01-08 18:29:29 +00:00
|
|
|
chunk = Chunk(None, allocation=CH_ALLOC, size=size)
|
|
|
|
chunk_id, data = cached_hash(chunk, self.key.id_hash)
|
2022-08-23 01:25:06 +00:00
|
|
|
cdata = self.repo_objs.format(chunk_id, {}, data)
|
2022-06-10 13:59:29 +00:00
|
|
|
return chunk_id, size, cdata
|
2017-02-18 23:49:36 +00:00
|
|
|
|
2014-02-16 21:21:18 +00:00
|
|
|
offset = 0
|
|
|
|
chunk_list = []
|
2016-07-06 21:10:04 +00:00
|
|
|
chunks_replaced = False
|
2016-07-10 23:23:27 +00:00
|
|
|
has_chunks_healthy = "chunks_healthy" in item
|
|
|
|
chunks_current = item.chunks
|
|
|
|
chunks_healthy = item.chunks_healthy if has_chunks_healthy else chunks_current
|
2017-10-29 10:25:11 +00:00
|
|
|
if has_chunks_healthy and len(chunks_current) != len(chunks_healthy):
|
|
|
|
# should never happen, but there was issue #3218.
|
2022-02-27 18:31:33 +00:00
|
|
|
logger.warning(f"{archive_name}: {item.path}: Invalid chunks_healthy metadata removed!")
|
2017-10-29 10:25:11 +00:00
|
|
|
del item.chunks_healthy
|
|
|
|
has_chunks_healthy = False
|
|
|
|
chunks_healthy = chunks_current
|
2016-07-09 14:38:07 +00:00
|
|
|
for chunk_current, chunk_healthy in zip(chunks_current, chunks_healthy):
|
2022-06-10 18:36:58 +00:00
|
|
|
chunk_id, size = chunk_healthy
|
2015-03-17 22:47:21 +00:00
|
|
|
if chunk_id not in self.chunks:
|
2016-07-09 14:38:07 +00:00
|
|
|
# a chunk of the healthy list is missing
|
|
|
|
if chunk_current == chunk_healthy:
|
2021-04-19 21:46:21 +00:00
|
|
|
logger.error(
|
|
|
|
"{}: {}: New missing file chunk detected (Byte {}-{}, Chunk {}). "
|
2019-02-01 22:30:45 +00:00
|
|
|
"Replacing with all-zero chunk.".format(
|
2021-04-19 21:46:21 +00:00
|
|
|
archive_name, item.path, offset, offset + size, bin_to_hex(chunk_id)
|
2022-07-06 13:37:27 +00:00
|
|
|
)
|
|
|
|
)
|
2016-07-09 14:38:07 +00:00
|
|
|
self.error_found = chunks_replaced = True
|
2022-06-10 13:59:29 +00:00
|
|
|
chunk_id, size, cdata = replacement_chunk(size)
|
|
|
|
add_reference(chunk_id, size, cdata)
|
2016-07-09 14:38:07 +00:00
|
|
|
else:
|
2021-04-19 21:46:21 +00:00
|
|
|
logger.info(
|
|
|
|
"{}: {}: Previously missing file chunk is still missing (Byte {}-{}, Chunk {}). "
|
|
|
|
"It has an all-zero replacement chunk already.".format(
|
|
|
|
archive_name, item.path, offset, offset + size, bin_to_hex(chunk_id)
|
2022-07-06 13:37:27 +00:00
|
|
|
)
|
|
|
|
)
|
2022-06-10 18:36:58 +00:00
|
|
|
chunk_id, size = chunk_current
|
2017-02-18 23:49:36 +00:00
|
|
|
if chunk_id in self.chunks:
|
2022-06-10 13:59:29 +00:00
|
|
|
add_reference(chunk_id, size)
|
2017-02-18 23:49:36 +00:00
|
|
|
else:
|
2021-04-19 21:46:21 +00:00
|
|
|
logger.warning(
|
|
|
|
"{}: {}: Missing all-zero replacement chunk detected (Byte {}-{}, Chunk {}). "
|
2019-02-01 22:30:45 +00:00
|
|
|
"Generating new replacement chunk.".format(
|
2021-04-19 21:46:21 +00:00
|
|
|
archive_name, item.path, offset, offset + size, bin_to_hex(chunk_id)
|
2022-07-06 13:37:27 +00:00
|
|
|
)
|
|
|
|
)
|
2017-02-18 23:49:36 +00:00
|
|
|
self.error_found = chunks_replaced = True
|
2022-06-10 13:59:29 +00:00
|
|
|
chunk_id, size, cdata = replacement_chunk(size)
|
|
|
|
add_reference(chunk_id, size, cdata)
|
2014-02-16 21:21:18 +00:00
|
|
|
else:
|
2016-07-09 14:38:07 +00:00
|
|
|
if chunk_current == chunk_healthy:
|
|
|
|
# normal case, all fine.
|
2022-06-10 13:59:29 +00:00
|
|
|
add_reference(chunk_id, size)
|
2016-07-09 14:38:07 +00:00
|
|
|
else:
|
2021-04-19 21:46:21 +00:00
|
|
|
logger.info(
|
|
|
|
"{}: {}: Healed previously missing file chunk! (Byte {}-{}, Chunk {}).".format(
|
|
|
|
archive_name, item.path, offset, offset + size, bin_to_hex(chunk_id)
|
2022-07-06 13:37:27 +00:00
|
|
|
)
|
|
|
|
)
|
2022-06-10 13:59:29 +00:00
|
|
|
add_reference(chunk_id, size)
|
2016-07-09 14:38:07 +00:00
|
|
|
mark_as_possibly_superseded(chunk_current[0]) # maybe orphaned the all-zero replacement chunk
|
2022-06-10 18:36:58 +00:00
|
|
|
chunk_list.append([chunk_id, size]) # list-typed element as chunks_healthy is list-of-lists
|
2014-02-16 21:21:18 +00:00
|
|
|
offset += size
|
2016-07-09 14:38:07 +00:00
|
|
|
if chunks_replaced and not has_chunks_healthy:
|
2016-07-06 20:42:18 +00:00
|
|
|
# if this is first repair, remember the correct chunk IDs, so we can maybe heal the file later
|
2016-07-08 10:13:52 +00:00
|
|
|
item.chunks_healthy = item.chunks
|
2016-07-09 14:38:07 +00:00
|
|
|
if has_chunks_healthy and chunk_list == chunks_healthy:
|
2022-02-27 18:31:33 +00:00
|
|
|
logger.info(f"{archive_name}: {item.path}: Completely healed previously damaged file!")
|
2016-07-10 23:23:27 +00:00
|
|
|
del item.chunks_healthy
|
2016-05-31 23:45:45 +00:00
|
|
|
item.chunks = chunk_list
|
2017-02-18 22:09:40 +00:00
|
|
|
if "size" in item:
|
|
|
|
item_size = item.size
|
2022-06-10 18:54:57 +00:00
|
|
|
item_chunks_size = item.get_size(from_chunks=True)
|
2017-02-18 22:09:40 +00:00
|
|
|
if item_size != item_chunks_size:
|
|
|
|
# just warn, but keep the inconsistency, so that borg extract can warn about it.
|
2019-02-01 22:30:45 +00:00
|
|
|
logger.warning(
|
|
|
|
"{}: {}: size inconsistency detected: size {}, chunks size {}".format(
|
|
|
|
archive_name, item.path, item_size, item_chunks_size
|
|
|
|
)
|
2022-07-06 13:37:27 +00:00
|
|
|
)
|
2014-02-16 21:21:18 +00:00
|
|
|
|
|
|
|
def robust_iterator(archive):
|
2014-02-24 22:37:21 +00:00
|
|
|
"""Iterates through all archive items
|
|
|
|
|
|
|
|
Missing item chunks will be skipped and the msgpack stream will be restarted
|
|
|
|
"""
|
2022-05-05 17:36:02 +00:00
|
|
|
item_keys = self.manifest.item_keys
|
|
|
|
required_item_keys = REQUIRED_ITEM_KEYS
|
|
|
|
unpacker = RobustUnpacker(
|
|
|
|
lambda item: isinstance(item, StableDict) and "path" in item, self.manifest.item_keys
|
2016-06-12 21:36:56 +00:00
|
|
|
)
|
2014-02-24 21:43:17 +00:00
|
|
|
_state = 0
|
2015-03-17 22:47:21 +00:00
|
|
|
|
2014-02-16 21:21:18 +00:00
|
|
|
def missing_chunk_detector(chunk_id):
|
2014-02-24 21:43:17 +00:00
|
|
|
nonlocal _state
|
2015-03-17 22:47:21 +00:00
|
|
|
if _state % 2 != int(chunk_id not in self.chunks):
|
2014-02-24 21:43:17 +00:00
|
|
|
_state += 1
|
|
|
|
return _state
|
2015-03-17 22:47:21 +00:00
|
|
|
|
2015-11-03 22:45:49 +00:00
|
|
|
def report(msg, chunk_id, chunk_no):
|
2016-04-23 20:42:56 +00:00
|
|
|
cid = bin_to_hex(chunk_id)
|
2016-10-09 22:22:01 +00:00
|
|
|
msg += " [chunk: %06d_%s]" % (chunk_no, cid) # see "debug dump-archive-items"
|
2015-12-07 23:21:46 +00:00
|
|
|
self.error_found = True
|
|
|
|
logger.error(msg)
|
2015-11-03 22:45:49 +00:00
|
|
|
|
2016-11-13 14:58:42 +00:00
|
|
|
def list_keys_safe(keys):
|
2022-02-27 18:31:33 +00:00
|
|
|
return ", ".join(k.decode(errors="replace") if isinstance(k, bytes) else str(k) for k in keys)
|
2016-11-13 14:58:42 +00:00
|
|
|
|
2016-06-12 21:36:56 +00:00
|
|
|
def valid_item(obj):
|
|
|
|
if not isinstance(obj, StableDict):
|
2016-11-13 14:58:42 +00:00
|
|
|
return False, "not a dictionary"
|
2022-05-06 01:59:10 +00:00
|
|
|
keys = set(obj)
|
2016-11-13 14:58:42 +00:00
|
|
|
if not required_item_keys.issubset(keys):
|
|
|
|
return False, "missing required keys: " + list_keys_safe(required_item_keys - keys)
|
|
|
|
if not keys.issubset(item_keys):
|
|
|
|
return False, "invalid keys: " + list_keys_safe(keys - item_keys)
|
|
|
|
return True, ""
|
2016-06-12 21:36:56 +00:00
|
|
|
|
2015-11-03 22:45:49 +00:00
|
|
|
i = 0
|
2022-08-23 01:25:06 +00:00
|
|
|
archive_items = archive_get_items(archive, repo_objs=self.repo_objs, repository=repository)
|
2022-08-05 20:06:08 +00:00
|
|
|
for state, items in groupby(archive_items, missing_chunk_detector):
|
2014-02-24 21:43:17 +00:00
|
|
|
items = list(items)
|
2014-02-16 21:21:18 +00:00
|
|
|
if state % 2:
|
2015-11-03 22:45:49 +00:00
|
|
|
for chunk_id in items:
|
|
|
|
report("item metadata chunk missing", chunk_id, i)
|
|
|
|
i += 1
|
2014-02-24 21:43:17 +00:00
|
|
|
continue
|
|
|
|
if state > 0:
|
|
|
|
unpacker.resync()
|
2014-03-13 21:29:47 +00:00
|
|
|
for chunk_id, cdata in zip(items, repository.get_many(items)):
|
2015-11-03 22:45:49 +00:00
|
|
|
try:
|
2022-08-23 01:25:06 +00:00
|
|
|
_, data = self.repo_objs.parse(chunk_id, cdata)
|
2022-04-06 23:22:34 +00:00
|
|
|
unpacker.feed(data)
|
2015-11-03 22:45:49 +00:00
|
|
|
for item in unpacker:
|
2016-11-13 14:58:42 +00:00
|
|
|
valid, reason = valid_item(item)
|
|
|
|
if valid:
|
2016-05-31 23:45:45 +00:00
|
|
|
yield Item(internal_dict=item)
|
2015-11-03 22:45:49 +00:00
|
|
|
else:
|
2016-11-13 14:58:42 +00:00
|
|
|
report(
|
|
|
|
"Did not get expected metadata dict when unpacking item metadata (%s)" % reason,
|
|
|
|
chunk_id,
|
|
|
|
i,
|
|
|
|
)
|
2022-04-06 23:22:34 +00:00
|
|
|
except IntegrityError as integrity_error:
|
2022-08-23 01:25:06 +00:00
|
|
|
# repo_objs.parse() detected integrity issues.
|
2022-04-06 23:22:34 +00:00
|
|
|
# maybe the repo gave us a valid cdata, but not for the chunk_id we wanted.
|
|
|
|
# or the authentication of cdata failed, meaning the encrypted data was corrupted.
|
|
|
|
report(str(integrity_error), chunk_id, i)
|
2018-10-29 10:54:24 +00:00
|
|
|
except msgpack.UnpackException:
|
2016-07-28 20:10:29 +00:00
|
|
|
report("Unpacker crashed while unpacking item metadata, trying to resync...", chunk_id, i)
|
|
|
|
unpacker.resync()
|
2015-11-03 22:45:49 +00:00
|
|
|
except Exception:
|
2022-04-06 23:22:34 +00:00
|
|
|
report("Exception while decrypting or unpacking item metadata", chunk_id, i)
|
2015-11-03 22:45:49 +00:00
|
|
|
raise
|
|
|
|
i += 1
|
2014-02-16 21:21:18 +00:00
|
|
|
|
2022-06-25 20:17:29 +00:00
|
|
|
sort_by = sort_by.split(",")
|
2022-09-15 22:37:18 +00:00
|
|
|
if any((first, last, match)):
|
|
|
|
archive_infos = self.manifest.archives.list(sort_by=sort_by, match=match, first=first, last=last)
|
|
|
|
if match and not archive_infos:
|
|
|
|
logger.warning("--match-archives %s does not match any archives", match)
|
2022-06-25 20:17:29 +00:00
|
|
|
if first and len(archive_infos) < first:
|
|
|
|
logger.warning("--first %d archives: only found %d archives", first, len(archive_infos))
|
|
|
|
if last and len(archive_infos) < last:
|
|
|
|
logger.warning("--last %d archives: only found %d archives", last, len(archive_infos))
|
2015-08-08 20:11:40 +00:00
|
|
|
else:
|
2022-06-25 20:17:29 +00:00
|
|
|
archive_infos = self.manifest.archives.list(sort_by=sort_by)
|
2016-10-18 01:57:42 +00:00
|
|
|
num_archives = len(archive_infos)
|
2016-01-16 22:42:54 +00:00
|
|
|
|
2021-05-14 16:17:04 +00:00
|
|
|
pi = ProgressIndicatorPercent(
|
|
|
|
total=num_archives, msg="Checking archives %3.1f%%", step=0.1, msgid="check.rebuild_refcounts"
|
|
|
|
)
|
2016-01-16 22:42:54 +00:00
|
|
|
with cache_if_remote(self.repository) as repository:
|
2016-10-18 01:57:42 +00:00
|
|
|
for i, info in enumerate(archive_infos):
|
2021-05-14 16:17:04 +00:00
|
|
|
pi.show(i)
|
2022-02-27 18:31:33 +00:00
|
|
|
logger.info(f"Analyzing archive {info.name} ({i + 1}/{num_archives})")
|
2016-08-15 02:17:41 +00:00
|
|
|
archive_id = info.id
|
2016-01-16 22:42:54 +00:00
|
|
|
if archive_id not in self.chunks:
|
2022-04-06 23:35:35 +00:00
|
|
|
logger.error("Archive metadata block %s is missing!", bin_to_hex(archive_id))
|
2016-01-16 22:42:54 +00:00
|
|
|
self.error_found = True
|
2016-08-15 02:17:41 +00:00
|
|
|
del self.manifest.archives[info.name]
|
2016-01-16 22:42:54 +00:00
|
|
|
continue
|
|
|
|
mark_as_possibly_superseded(archive_id)
|
|
|
|
cdata = self.repository.get(archive_id)
|
2022-04-06 23:35:35 +00:00
|
|
|
try:
|
2022-08-23 01:25:06 +00:00
|
|
|
_, data = self.repo_objs.parse(archive_id, cdata)
|
2022-04-06 23:35:35 +00:00
|
|
|
except IntegrityError as integrity_error:
|
|
|
|
logger.error("Archive metadata block %s is corrupted: %s", bin_to_hex(archive_id), integrity_error)
|
|
|
|
self.error_found = True
|
|
|
|
del self.manifest.archives[info.name]
|
|
|
|
continue
|
2016-08-14 23:11:33 +00:00
|
|
|
archive = ArchiveItem(internal_dict=msgpack.unpackb(data))
|
2022-05-17 00:00:00 +00:00
|
|
|
if archive.version != 2:
|
2016-01-16 22:42:54 +00:00
|
|
|
raise Exception("Unknown archive metadata version")
|
|
|
|
items_buffer = ChunkBuffer(self.key)
|
|
|
|
items_buffer.write_chunk = add_callback
|
|
|
|
for item in robust_iterator(archive):
|
2016-05-31 23:45:45 +00:00
|
|
|
if "chunks" in item:
|
2019-02-01 22:30:45 +00:00
|
|
|
verify_file_chunks(info.name, item)
|
2016-01-16 22:42:54 +00:00
|
|
|
items_buffer.add(item)
|
|
|
|
items_buffer.flush(flush=True)
|
2022-08-23 01:25:06 +00:00
|
|
|
for previous_item_id in archive_get_items(
|
|
|
|
archive, repo_objs=self.repo_objs, repository=self.repository
|
|
|
|
):
|
2016-01-16 22:42:54 +00:00
|
|
|
mark_as_possibly_superseded(previous_item_id)
|
2022-08-05 20:06:08 +00:00
|
|
|
for previous_item_ptr in archive.item_ptrs:
|
|
|
|
mark_as_possibly_superseded(previous_item_ptr)
|
2022-08-23 01:25:06 +00:00
|
|
|
archive.item_ptrs = archive_put_items(
|
|
|
|
items_buffer.chunks, repo_objs=self.repo_objs, add_reference=add_reference
|
|
|
|
)
|
2018-07-01 00:34:48 +00:00
|
|
|
data = msgpack.packb(archive.as_dict())
|
2016-01-16 22:42:54 +00:00
|
|
|
new_archive_id = self.key.id_hash(data)
|
2022-08-23 01:25:06 +00:00
|
|
|
cdata = self.repo_objs.format(new_archive_id, {}, data)
|
2022-06-10 13:59:29 +00:00
|
|
|
add_reference(new_archive_id, len(data), cdata)
|
2016-08-15 02:17:41 +00:00
|
|
|
self.manifest.archives[info.name] = (new_archive_id, info.ts)
|
2021-05-14 16:17:04 +00:00
|
|
|
pi.finish()
|
2014-02-24 22:37:21 +00:00
|
|
|
|
2015-08-09 10:43:57 +00:00
|
|
|
def orphan_chunks_check(self):
|
|
|
|
if self.check_all:
|
2016-04-16 15:48:47 +00:00
|
|
|
unused = {id_ for id_, entry in self.chunks.iteritems() if entry.refcount == 0}
|
2015-08-09 10:43:57 +00:00
|
|
|
orphaned = unused - self.possibly_superseded
|
|
|
|
if orphaned:
|
2022-02-27 18:31:33 +00:00
|
|
|
logger.error(f"{len(orphaned)} orphaned objects found!")
|
2015-12-07 23:21:46 +00:00
|
|
|
self.error_found = True
|
2018-04-28 19:03:08 +00:00
|
|
|
if self.repair and unused:
|
|
|
|
logger.info(
|
|
|
|
"Deleting %d orphaned and %d superseded objects..." % (len(orphaned), len(self.possibly_superseded))
|
|
|
|
)
|
2015-08-09 10:43:57 +00:00
|
|
|
for id_ in unused:
|
|
|
|
self.repository.delete(id_)
|
2018-04-28 19:03:08 +00:00
|
|
|
logger.info("Finished deleting orphaned/superseded objects.")
|
2015-08-09 10:43:57 +00:00
|
|
|
else:
|
2016-04-03 15:58:15 +00:00
|
|
|
logger.info("Orphaned objects check skipped (needs all archives checked).")
|
2015-08-09 10:43:57 +00:00
|
|
|
|
2022-12-17 15:48:54 +00:00
|
|
|
def finish(self):
|
2014-02-24 22:37:21 +00:00
|
|
|
if self.repair:
|
2018-04-27 21:57:47 +00:00
|
|
|
logger.info("Writing Manifest.")
|
2014-02-24 22:37:21 +00:00
|
|
|
self.manifest.write()
|
2018-06-24 17:08:49 +00:00
|
|
|
logger.info("Committing repo.")
|
2022-12-17 15:48:54 +00:00
|
|
|
self.repository.commit(compact=False)
|
2016-04-07 09:29:52 +00:00
|
|
|
|
|
|
|
|
|
|
|
class ArchiveRecreater:
|
|
|
|
class Interrupted(Exception):
|
|
|
|
def __init__(self, metadata=None):
|
|
|
|
self.metadata = metadata or {}
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def is_temporary_archive(archive_name):
|
|
|
|
return archive_name.endswith(".recreate")
|
2022-07-06 13:37:27 +00:00
|
|
|
|
2016-04-07 09:29:52 +00:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
manifest,
|
|
|
|
cache,
|
|
|
|
matcher,
|
2017-01-29 17:13:51 +00:00
|
|
|
exclude_caches=False,
|
|
|
|
exclude_if_present=None,
|
|
|
|
keep_exclude_tags=False,
|
2017-04-04 13:11:15 +00:00
|
|
|
chunker_params=None,
|
|
|
|
compression=None,
|
|
|
|
recompress=False,
|
|
|
|
always_recompress=False,
|
2016-11-19 18:09:47 +00:00
|
|
|
dry_run=False,
|
|
|
|
stats=False,
|
|
|
|
progress=False,
|
|
|
|
file_status_printer=None,
|
2019-11-16 10:03:34 +00:00
|
|
|
timestamp=None,
|
|
|
|
checkpoint_interval=1800,
|
|
|
|
):
|
2016-04-07 09:29:52 +00:00
|
|
|
self.manifest = manifest
|
2022-08-23 01:25:06 +00:00
|
|
|
self.repository = manifest.repository
|
|
|
|
self.key = manifest.key
|
|
|
|
self.repo_objs = manifest.repo_objs
|
2016-04-07 09:29:52 +00:00
|
|
|
self.cache = cache
|
|
|
|
|
|
|
|
self.matcher = matcher
|
|
|
|
self.exclude_caches = exclude_caches
|
|
|
|
self.exclude_if_present = exclude_if_present or []
|
2017-01-29 17:13:51 +00:00
|
|
|
self.keep_exclude_tags = keep_exclude_tags
|
2016-04-07 09:29:52 +00:00
|
|
|
|
2016-12-02 19:19:59 +00:00
|
|
|
self.rechunkify = chunker_params is not None
|
|
|
|
if self.rechunkify:
|
|
|
|
logger.debug("Rechunking archives to %s", chunker_params)
|
2016-04-07 09:29:52 +00:00
|
|
|
self.chunker_params = chunker_params or CHUNKER_PARAMS
|
2017-04-04 13:11:15 +00:00
|
|
|
self.recompress = recompress
|
2016-07-31 21:09:57 +00:00
|
|
|
self.always_recompress = always_recompress
|
2016-04-18 23:13:10 +00:00
|
|
|
self.compression = compression or CompressionSpec("none")
|
|
|
|
self.seen_chunks = set()
|
2016-04-07 09:29:52 +00:00
|
|
|
|
2019-11-16 10:03:34 +00:00
|
|
|
self.timestamp = timestamp
|
2016-04-07 09:29:52 +00:00
|
|
|
self.dry_run = dry_run
|
|
|
|
self.stats = stats
|
|
|
|
self.progress = progress
|
|
|
|
self.print_file_status = file_status_printer or (lambda *args: None)
|
2016-12-02 17:15:11 +00:00
|
|
|
self.checkpoint_interval = None if dry_run else checkpoint_interval
|
2016-04-07 09:29:52 +00:00
|
|
|
|
2016-08-02 13:53:29 +00:00
|
|
|
def recreate(self, archive_name, comment=None, target_name=None):
|
2016-04-07 09:29:52 +00:00
|
|
|
assert not self.is_temporary_archive(archive_name)
|
|
|
|
archive = self.open_archive(archive_name)
|
2016-11-19 15:49:20 +00:00
|
|
|
target = self.create_target(archive, target_name)
|
2016-04-07 09:29:52 +00:00
|
|
|
if self.exclude_if_present or self.exclude_caches:
|
|
|
|
self.matcher_add_tagged_dirs(archive)
|
2023-01-14 18:05:50 +00:00
|
|
|
if (
|
|
|
|
self.matcher.empty()
|
|
|
|
and not self.recompress
|
|
|
|
and not target.recreate_rechunkify
|
|
|
|
and comment is None
|
|
|
|
and target_name is None
|
|
|
|
):
|
|
|
|
# nothing to do
|
2017-06-03 13:47:01 +00:00
|
|
|
return False
|
2016-11-19 15:49:20 +00:00
|
|
|
self.process_items(archive, target)
|
2016-08-02 13:53:29 +00:00
|
|
|
replace_original = target_name is None
|
2016-12-02 10:09:52 +00:00
|
|
|
self.save(archive, target, comment, replace_original=replace_original)
|
2017-06-03 13:47:01 +00:00
|
|
|
return True
|
2016-04-07 09:29:52 +00:00
|
|
|
|
2016-11-19 15:49:20 +00:00
|
|
|
def process_items(self, archive, target):
|
2016-04-07 09:29:52 +00:00
|
|
|
matcher = self.matcher
|
|
|
|
|
|
|
|
for item in archive.iter_items():
|
2016-05-31 23:45:45 +00:00
|
|
|
if not matcher.match(item.path):
|
|
|
|
self.print_file_status("x", item.path)
|
2016-04-07 09:29:52 +00:00
|
|
|
continue
|
|
|
|
if self.dry_run:
|
2016-05-31 23:45:45 +00:00
|
|
|
self.print_file_status("-", item.path)
|
2016-04-07 09:29:52 +00:00
|
|
|
else:
|
2016-11-19 15:49:20 +00:00
|
|
|
self.process_item(archive, target, item)
|
2016-04-07 09:29:52 +00:00
|
|
|
if self.progress:
|
|
|
|
target.stats.show_progress(final=True)
|
|
|
|
|
|
|
|
def process_item(self, archive, target, item):
|
2021-04-26 15:50:21 +00:00
|
|
|
status = file_status(item.mode)
|
2016-05-31 23:45:45 +00:00
|
|
|
if "chunks" in item:
|
2021-04-26 15:50:21 +00:00
|
|
|
self.print_file_status(status, item.path)
|
|
|
|
status = None
|
2016-11-19 18:09:47 +00:00
|
|
|
self.process_chunks(archive, target, item)
|
2016-04-07 09:29:52 +00:00
|
|
|
target.stats.nfiles += 1
|
2018-03-10 14:41:01 +00:00
|
|
|
target.add_item(item, stats=target.stats)
|
2021-04-26 15:50:21 +00:00
|
|
|
self.print_file_status(status, item.path)
|
2016-04-07 09:29:52 +00:00
|
|
|
|
|
|
|
def process_chunks(self, archive, target, item):
|
|
|
|
if not self.recompress and not target.recreate_rechunkify:
|
2022-06-10 18:36:58 +00:00
|
|
|
for chunk_id, size in item.chunks:
|
2016-04-07 09:29:52 +00:00
|
|
|
self.cache.chunk_incref(chunk_id, target.stats)
|
2016-05-31 23:45:45 +00:00
|
|
|
return item.chunks
|
2016-12-02 11:54:27 +00:00
|
|
|
chunk_iterator = self.iter_chunks(archive, target, list(item.chunks))
|
2017-04-03 19:48:06 +00:00
|
|
|
chunk_processor = partial(self.chunk_processor, target)
|
2018-03-10 14:11:08 +00:00
|
|
|
target.process_file_chunks(item, self.cache, target.stats, self.progress, chunk_iterator, chunk_processor)
|
2016-11-19 18:09:47 +00:00
|
|
|
|
2020-12-15 01:37:26 +00:00
|
|
|
def chunk_processor(self, target, chunk):
|
2021-01-08 18:16:47 +00:00
|
|
|
chunk_id, data = cached_hash(chunk, self.key.id_hash)
|
2016-11-19 18:09:47 +00:00
|
|
|
if chunk_id in self.seen_chunks:
|
|
|
|
return self.cache.chunk_incref(chunk_id, target.stats)
|
|
|
|
overwrite = self.recompress
|
|
|
|
if self.recompress and not self.always_recompress and chunk_id in self.cache.chunks:
|
|
|
|
# Check if this chunk is already compressed the way we want it
|
2022-09-08 17:56:37 +00:00
|
|
|
old_meta = self.repo_objs.parse_meta(chunk_id, self.repository.get(chunk_id, read_data=False))
|
2022-09-04 20:34:58 +00:00
|
|
|
compr_hdr = bytes((old_meta["ctype"], old_meta["clevel"]))
|
|
|
|
compressor_cls, level = Compressor.detect(compr_hdr)
|
2022-08-23 01:25:06 +00:00
|
|
|
if (
|
2022-09-07 12:28:54 +00:00
|
|
|
compressor_cls.name == self.repo_objs.compressor.decide({}, data).name
|
2022-08-23 01:25:06 +00:00
|
|
|
and level == self.repo_objs.compressor.level
|
|
|
|
):
|
2022-07-05 00:38:09 +00:00
|
|
|
# Stored chunk has the same compression method and level as we wanted
|
2016-11-19 18:09:47 +00:00
|
|
|
overwrite = False
|
2022-09-05 00:53:28 +00:00
|
|
|
chunk_entry = self.cache.add_chunk(chunk_id, {}, data, stats=target.stats, overwrite=overwrite, wait=False)
|
2017-03-05 04:19:32 +00:00
|
|
|
self.cache.repository.async_response(wait=False)
|
2016-12-02 10:39:10 +00:00
|
|
|
self.seen_chunks.add(chunk_entry.id)
|
|
|
|
return chunk_entry
|
2016-11-19 18:09:47 +00:00
|
|
|
|
2016-12-02 11:54:27 +00:00
|
|
|
def iter_chunks(self, archive, target, chunks):
|
2022-06-10 18:36:58 +00:00
|
|
|
chunk_iterator = archive.pipeline.fetch_many([chunk_id for chunk_id, _ in chunks])
|
2016-04-07 09:29:52 +00:00
|
|
|
if target.recreate_rechunkify:
|
|
|
|
# The target.chunker will read the file contents through ChunkIteratorFileWrapper chunk-by-chunk
|
|
|
|
# (does not load the entire file into memory)
|
|
|
|
file = ChunkIteratorFileWrapper(chunk_iterator)
|
2016-12-02 10:20:26 +00:00
|
|
|
yield from target.chunker.chunkify(file)
|
2016-11-19 18:09:47 +00:00
|
|
|
else:
|
|
|
|
for chunk in chunk_iterator:
|
2020-12-15 01:37:26 +00:00
|
|
|
yield Chunk(chunk, size=len(chunk), allocation=CH_DATA)
|
2016-03-18 02:16:12 +00:00
|
|
|
|
2016-11-19 18:09:47 +00:00
|
|
|
def save(self, archive, target, comment=None, replace_original=True):
|
2016-04-07 09:29:52 +00:00
|
|
|
if self.dry_run:
|
2016-11-19 18:09:47 +00:00
|
|
|
return
|
|
|
|
if comment is None:
|
|
|
|
comment = archive.metadata.get("comment", "")
|
2019-11-16 10:03:34 +00:00
|
|
|
|
|
|
|
# Keep for the statistics if necessary
|
|
|
|
if self.stats:
|
|
|
|
_start = target.start
|
|
|
|
|
|
|
|
if self.timestamp is None:
|
|
|
|
additional_metadata = {
|
|
|
|
"time": archive.metadata.time,
|
|
|
|
"time_end": archive.metadata.get("time_end") or archive.metadata.time,
|
|
|
|
"cmdline": archive.metadata.cmdline,
|
|
|
|
# but also remember recreate metadata:
|
|
|
|
"recreate_cmdline": sys.argv,
|
|
|
|
}
|
|
|
|
else:
|
|
|
|
additional_metadata = {
|
|
|
|
"cmdline": archive.metadata.cmdline,
|
|
|
|
# but also remember recreate metadata:
|
|
|
|
"recreate_cmdline": sys.argv,
|
|
|
|
}
|
|
|
|
|
2022-10-03 20:27:42 +00:00
|
|
|
target.save(comment=comment, timestamp=self.timestamp, additional_metadata=additional_metadata)
|
2016-11-19 18:09:47 +00:00
|
|
|
if replace_original:
|
|
|
|
archive.delete(Statistics(), progress=self.progress)
|
|
|
|
target.rename(archive.name)
|
|
|
|
if self.stats:
|
2019-11-16 10:03:34 +00:00
|
|
|
target.start = _start
|
2022-12-04 09:51:02 +00:00
|
|
|
target.end = archive_ts_now()
|
2022-06-23 12:13:19 +00:00
|
|
|
log_multi(str(target), str(target.stats))
|
2016-04-07 09:29:52 +00:00
|
|
|
|
|
|
|
def matcher_add_tagged_dirs(self, archive):
|
|
|
|
"""Add excludes to the matcher created by exclude_cache and exclude_if_present."""
|
2022-07-06 13:37:27 +00:00
|
|
|
|
2016-04-07 09:29:52 +00:00
|
|
|
def exclude(dir, tag_item):
|
2017-01-29 17:13:51 +00:00
|
|
|
if self.keep_exclude_tags:
|
allow excluding parent and including child, fixes #2314
This fixes the problem raised by issue #2314 by requiring that each root
subtree be fully traversed.
The problem occurs when a patterns file excludes a parent directory P later
in the file, but earlier in the file a subdirectory S of P is included.
Because a tree is processed recursively with a depth-first search, P is
processed before S is. Previously, if P was excluded, then S would not even
be considered. Now, it is possible to recurse into P nonetheless, while not
adding P (as a directory entry) to the archive.
With this commit, a `-` in a patterns-file will allow an excluded directory
to be searched for matching descendants. If the old behavior is desired, it
can be achieved by using a `!` in place of the `-`.
The following is a list of specific changes made by this commit:
* renamed InclExclPattern named-tuple -> CmdTuple (with names 'val' and 'cmd'), since it is used more generally for commands, and not only for representing patterns.
* represent commands as IECommand enum types (RootPath, PatternStyle, Include, Exclude, ExcludeNoRecurse)
* archiver: Archiver.build_matcher() paths arg renamed -> include_paths to prevent confusion as to whether the list of paths are to be included or excluded.
* helpers: PatternMatcher has recurse_dir attribute that is used to communicate whether an excluded dir should be recursed (used by Archiver._process())
* archiver: Archiver.build_matcher() now only returns a PatternMatcher instance, and not an include_patterns list -- this list is now created and housed within the PatternMatcher instance, and can be accessed from there.
* moved operation of finding unmatched patterns from Archiver to PatternMatcher.get_unmatched_include_patterns()
* added / modified some documentation of code
* renamed _PATTERN_STYLES -> _PATTERN_CLASSES since "style" is ambiguous and this helps clarify that the set contains classes and not instances.
* have PatternBase subclass instances store whether excluded dirs are to be recursed. Because PatternBase objs are created corresponding to each +, -, ! command it is necessary to differentiate - from ! within these objects.
* add test for '!' exclusion rule (which doesn't recurse)
2017-03-20 22:04:45 +00:00
|
|
|
tag_files.append(PathPrefixPattern(tag_item.path, recurse_dir=False))
|
|
|
|
tagged_dirs.append(FnmatchPattern(dir + "/", recurse_dir=False))
|
2016-04-07 09:29:52 +00:00
|
|
|
else:
|
allow excluding parent and including child, fixes #2314
This fixes the problem raised by issue #2314 by requiring that each root
subtree be fully traversed.
The problem occurs when a patterns file excludes a parent directory P later
in the file, but earlier in the file a subdirectory S of P is included.
Because a tree is processed recursively with a depth-first search, P is
processed before S is. Previously, if P was excluded, then S would not even
be considered. Now, it is possible to recurse into P nonetheless, while not
adding P (as a directory entry) to the archive.
With this commit, a `-` in a patterns-file will allow an excluded directory
to be searched for matching descendants. If the old behavior is desired, it
can be achieved by using a `!` in place of the `-`.
The following is a list of specific changes made by this commit:
* renamed InclExclPattern named-tuple -> CmdTuple (with names 'val' and 'cmd'), since it is used more generally for commands, and not only for representing patterns.
* represent commands as IECommand enum types (RootPath, PatternStyle, Include, Exclude, ExcludeNoRecurse)
* archiver: Archiver.build_matcher() paths arg renamed -> include_paths to prevent confusion as to whether the list of paths are to be included or excluded.
* helpers: PatternMatcher has recurse_dir attribute that is used to communicate whether an excluded dir should be recursed (used by Archiver._process())
* archiver: Archiver.build_matcher() now only returns a PatternMatcher instance, and not an include_patterns list -- this list is now created and housed within the PatternMatcher instance, and can be accessed from there.
* moved operation of finding unmatched patterns from Archiver to PatternMatcher.get_unmatched_include_patterns()
* added / modified some documentation of code
* renamed _PATTERN_STYLES -> _PATTERN_CLASSES since "style" is ambiguous and this helps clarify that the set contains classes and not instances.
* have PatternBase subclass instances store whether excluded dirs are to be recursed. Because PatternBase objs are created corresponding to each +, -, ! command it is necessary to differentiate - from ! within these objects.
* add test for '!' exclusion rule (which doesn't recurse)
2017-03-20 22:04:45 +00:00
|
|
|
tagged_dirs.append(PathPrefixPattern(dir, recurse_dir=False))
|
2016-04-07 09:29:52 +00:00
|
|
|
|
|
|
|
matcher = self.matcher
|
|
|
|
tag_files = []
|
|
|
|
tagged_dirs = []
|
2020-06-14 19:43:28 +00:00
|
|
|
|
2016-04-07 09:29:52 +00:00
|
|
|
for item in archive.iter_items(
|
2020-06-14 19:43:28 +00:00
|
|
|
filter=lambda item: os.path.basename(item.path) == CACHE_TAG_NAME or matcher.match(item.path)
|
|
|
|
):
|
2017-01-29 17:13:51 +00:00
|
|
|
dir, tag_file = os.path.split(item.path)
|
|
|
|
if tag_file in self.exclude_if_present:
|
|
|
|
exclude(dir, item)
|
2020-06-14 19:43:28 +00:00
|
|
|
elif self.exclude_caches and tag_file == CACHE_TAG_NAME and stat.S_ISREG(item.mode):
|
2022-05-11 21:52:04 +00:00
|
|
|
file = open_item(archive, item)
|
2020-06-14 19:43:28 +00:00
|
|
|
if file.read(len(CACHE_TAG_CONTENTS)) == CACHE_TAG_CONTENTS:
|
|
|
|
exclude(dir, item)
|
allow excluding parent and including child, fixes #2314
This fixes the problem raised by issue #2314 by requiring that each root
subtree be fully traversed.
The problem occurs when a patterns file excludes a parent directory P later
in the file, but earlier in the file a subdirectory S of P is included.
Because a tree is processed recursively with a depth-first search, P is
processed before S is. Previously, if P was excluded, then S would not even
be considered. Now, it is possible to recurse into P nonetheless, while not
adding P (as a directory entry) to the archive.
With this commit, a `-` in a patterns-file will allow an excluded directory
to be searched for matching descendants. If the old behavior is desired, it
can be achieved by using a `!` in place of the `-`.
The following is a list of specific changes made by this commit:
* renamed InclExclPattern named-tuple -> CmdTuple (with names 'val' and 'cmd'), since it is used more generally for commands, and not only for representing patterns.
* represent commands as IECommand enum types (RootPath, PatternStyle, Include, Exclude, ExcludeNoRecurse)
* archiver: Archiver.build_matcher() paths arg renamed -> include_paths to prevent confusion as to whether the list of paths are to be included or excluded.
* helpers: PatternMatcher has recurse_dir attribute that is used to communicate whether an excluded dir should be recursed (used by Archiver._process())
* archiver: Archiver.build_matcher() now only returns a PatternMatcher instance, and not an include_patterns list -- this list is now created and housed within the PatternMatcher instance, and can be accessed from there.
* moved operation of finding unmatched patterns from Archiver to PatternMatcher.get_unmatched_include_patterns()
* added / modified some documentation of code
* renamed _PATTERN_STYLES -> _PATTERN_CLASSES since "style" is ambiguous and this helps clarify that the set contains classes and not instances.
* have PatternBase subclass instances store whether excluded dirs are to be recursed. Because PatternBase objs are created corresponding to each +, -, ! command it is necessary to differentiate - from ! within these objects.
* add test for '!' exclusion rule (which doesn't recurse)
2017-03-20 22:04:45 +00:00
|
|
|
matcher.add(tag_files, IECommand.Include)
|
|
|
|
matcher.add(tagged_dirs, IECommand.ExcludeNoRecurse)
|
2016-04-07 09:29:52 +00:00
|
|
|
|
2016-11-19 15:49:20 +00:00
|
|
|
def create_target(self, archive, target_name=None):
|
|
|
|
"""Create target archive."""
|
2016-08-02 13:53:29 +00:00
|
|
|
target_name = target_name or archive.name + ".recreate"
|
2016-11-19 15:49:20 +00:00
|
|
|
target = self.create_target_archive(target_name)
|
2016-04-07 09:29:52 +00:00
|
|
|
# If the archives use the same chunker params, then don't rechunkify
|
2016-12-02 19:19:59 +00:00
|
|
|
source_chunker_params = tuple(archive.metadata.get("chunker_params", []))
|
2019-01-05 03:38:06 +00:00
|
|
|
if len(source_chunker_params) == 4 and isinstance(source_chunker_params[0], int):
|
|
|
|
# this is a borg < 1.2 chunker_params tuple, no chunker algo specified, but we only had buzhash:
|
2019-02-13 03:36:09 +00:00
|
|
|
source_chunker_params = (CH_BUZHASH,) + source_chunker_params
|
2016-12-02 19:19:59 +00:00
|
|
|
target.recreate_rechunkify = self.rechunkify and source_chunker_params != target.chunker_params
|
|
|
|
if target.recreate_rechunkify:
|
|
|
|
logger.debug(
|
|
|
|
"Rechunking archive from %s to %s", source_chunker_params or "(unknown)", target.chunker_params
|
|
|
|
)
|
2017-07-29 14:11:33 +00:00
|
|
|
target.process_file_chunks = ChunksProcessor(
|
|
|
|
cache=self.cache,
|
|
|
|
key=self.key,
|
|
|
|
add_item=target.add_item,
|
|
|
|
write_checkpoint=target.write_checkpoint,
|
2017-10-29 09:53:12 +00:00
|
|
|
checkpoint_interval=self.checkpoint_interval,
|
|
|
|
rechunkify=target.recreate_rechunkify,
|
|
|
|
).process_file_chunks
|
2022-10-02 12:09:19 +00:00
|
|
|
target.chunker = get_chunker(*target.chunker_params, seed=self.key.chunk_seed, sparse=False)
|
2016-11-19 15:49:20 +00:00
|
|
|
return target
|
2016-04-07 09:29:52 +00:00
|
|
|
|
|
|
|
def create_target_archive(self, name):
|
|
|
|
target = Archive(
|
|
|
|
self.manifest,
|
|
|
|
name,
|
|
|
|
create=True,
|
|
|
|
progress=self.progress,
|
|
|
|
chunker_params=self.chunker_params,
|
|
|
|
cache=self.cache,
|
2017-04-03 19:48:06 +00:00
|
|
|
checkpoint_interval=self.checkpoint_interval,
|
|
|
|
)
|
2016-04-07 09:29:52 +00:00
|
|
|
return target
|
|
|
|
|
|
|
|
def open_archive(self, name, **kwargs):
|
2022-08-23 01:25:06 +00:00
|
|
|
return Archive(self.manifest, name, cache=self.cache, **kwargs)
|