1
0
Fork 0
mirror of https://github.com/borgbackup/borg.git synced 2024-12-29 11:16:43 +00:00

blacken the code

This commit is contained in:
Thomas Waldmann 2024-08-14 16:29:17 +02:00
parent d59306f48b
commit 1231c961fb
No known key found for this signature in database
GPG key ID: 243ACFA951F78E01
12 changed files with 73 additions and 40 deletions

View file

@ -31,7 +31,9 @@
logger = create_logger(__name__)
def get_repository(location, *, create, exclusive, lock_wait, lock, append_only, make_parent_dirs, storage_quota, args, v1_or_v2):
def get_repository(
location, *, create, exclusive, lock_wait, lock, append_only, make_parent_dirs, storage_quota, args, v1_or_v2
):
if location.proto in ("ssh", "socket"):
RemoteRepoCls = RemoteRepository if v1_or_v2 else RemoteRepository3
repository = RemoteRepoCls(
@ -209,7 +211,8 @@ def wrapper(self, args, **kwargs):
acceptable_versions = (1, 2) if v1_or_v2 else (3,)
if repository.version not in acceptable_versions:
raise Error(
f"This borg version only accepts version {' or '.join(acceptable_versions)} repos for --other-repo.")
f"This borg version only accepts version {' or '.join(acceptable_versions)} repos for --other-repo."
)
kwargs["other_repository"] = repository
if manifest or cache:
manifest_ = Manifest.load(

View file

@ -11,6 +11,7 @@
from ..repository3 import Repository3
from ..logger import create_logger
logger = create_logger()
@ -32,7 +33,13 @@ def garbage_collect(self):
logger.info("Getting object IDs present in the repository...")
self.repository_chunks = self.get_repository_chunks()
logger.info("Computing object IDs used by archives...")
self.used_chunks, self.wanted_chunks, self.total_files, self.total_size, self.archives_count = self.analyze_archives()
(
self.used_chunks,
self.wanted_chunks,
self.total_files,
self.total_size,
self.archives_count,
) = self.analyze_archives()
self.report_and_delete()
logger.info("Finished compaction / garbage collection...")
@ -109,8 +116,7 @@ def report_and_delete(self):
if unused:
logger.info(f"Deleting {len(unused)} unused objects...")
pi = ProgressIndicatorPercent(
total=len(unused), msg="Deleting unused objects %3.1f%%", step=0.1,
msgid="compact.report_and_delete"
total=len(unused), msg="Deleting unused objects %3.1f%%", step=0.1, msgid="compact.report_and_delete"
)
for i, id in enumerate(unused):
pi.show(i)

View file

@ -51,7 +51,6 @@ def do_delete(self, args, repository):
self.print_warning("Aborted.", wc=None)
return
def build_parser_delete(self, subparsers, common_parser, mid_common_parser):
from ._common import process_epilog, define_archive_filters_group

View file

@ -832,6 +832,7 @@ class AdHocCache(ChunksMixin):
Chunks that were not added during the current AdHocCache lifetime won't have correct size set
(0 bytes) and will have an infinite reference count (MAX_VALUE).
"""
def __init__(self, manifest, warn_if_unencrypted=True, lock_wait=None, iec=False):
assert isinstance(manifest, Manifest)
self.manifest = manifest

View file

@ -1192,7 +1192,7 @@ def default(self, o):
return {"id": bin_to_hex(o.id), "location": o._location.canonical_path()}
if isinstance(o, Archive):
return o.info()
if isinstance(o, (AdHocWithFilesCache, )):
if isinstance(o, (AdHocWithFilesCache,)):
return {"path": o.path}
if isinstance(o, AdHocCache):
return {}

View file

@ -65,7 +65,7 @@ class Lock:
matter how (e.g. if an exception occurred).
"""
def __init__(self, store, exclusive=False, sleep=None, timeout=1.0, stale=30*60, id=None):
def __init__(self, store, exclusive=False, sleep=None, timeout=1.0, stale=30 * 60, id=None):
self.store = store
self.is_exclusive = exclusive
self.sleep = sleep
@ -75,7 +75,7 @@ def __init__(self, store, exclusive=False, sleep=None, timeout=1.0, stale=30*60,
self.retry_delay_min = 1.0
self.retry_delay_max = 5.0
self.stale_td = datetime.timedelta(seconds=stale) # ignore/delete it if older
self.refresh_td = datetime.timedelta(seconds=stale//2) # don't refresh it if younger
self.refresh_td = datetime.timedelta(seconds=stale // 2) # don't refresh it if younger
self.last_refresh_dt = None
self.id = id or platform.get_process_id()
assert len(self.id) == 3
@ -134,7 +134,9 @@ def _find_locks(self, *, only_exclusive=False, only_mine=False):
found_locks = []
for key in locks:
lock = locks[key]
if (not only_exclusive or lock["exclusive"]) and (not only_mine or (lock["hostid"], lock["processid"], lock["threadid"]) == self.id):
if (not only_exclusive or lock["exclusive"]) and (
not only_mine or (lock["hostid"], lock["processid"], lock["threadid"]) == self.id
):
found_locks.append(lock)
return found_locks
@ -150,7 +152,9 @@ def acquire(self):
key = self._create_lock(exclusive=self.is_exclusive)
# obviously we have a race condition here: other client(s) might have created exclusive
# lock(s) at the same time in parallel. thus we have to check again.
time.sleep(self.race_recheck_delay) # give other clients time to notice our exclusive lock, stop creating theirs
time.sleep(
self.race_recheck_delay
) # give other clients time to notice our exclusive lock, stop creating theirs
exclusive_locks = self._find_locks(only_exclusive=True)
if self.is_exclusive:
if len(exclusive_locks) == 1 and exclusive_locks[0]["key"] == key:

View file

@ -8,6 +8,7 @@
from borgstore.store import ObjectNotFound, ItemInfo
from .logger import create_logger
logger = create_logger()
from .constants import * # NOQA
@ -263,6 +264,7 @@ def load(cls, repository, operations, key=None, *, ro_cls=RepoObj):
if isinstance(repository, (Repository3, RemoteRepository3)):
from .helpers import msgpack
archives = {}
try:
infos = list(repository.store_list("archives"))
@ -357,10 +359,7 @@ def write(self):
manifest_archives = StableDict(self.archives.get_raw_dict())
manifest = ManifestItem(
version=2,
archives=manifest_archives,
timestamp=self.timestamp,
config=StableDict(self.config),
version=2, archives=manifest_archives, timestamp=self.timestamp, config=StableDict(self.config)
)
data = self.key.pack_metadata(manifest.as_dict())
self.id = self.repo_objs.id_hash(data)

View file

@ -945,7 +945,15 @@ def handle_error(unpacked):
v1_or_v2={"since": parse_version("2.0.0b8"), "previously": True}, # TODO fix version
)
def open(
self, path, create=False, lock_wait=None, lock=True, exclusive=False, append_only=False, make_parent_dirs=False, v1_or_v2=False
self,
path,
create=False,
lock_wait=None,
lock=True,
exclusive=False,
append_only=False,
make_parent_dirs=False,
v1_or_v2=False,
):
"""actual remoting is done via self.call in the @api decorator"""

View file

@ -373,7 +373,15 @@ def _resolve_path(self, path):
return os.path.realpath(path)
def open(
self, path, create=False, lock_wait=None, lock=True, exclusive=None, append_only=False, make_parent_dirs=False, v1_or_v2=False
self,
path,
create=False,
lock_wait=None,
lock=True,
exclusive=None,
append_only=False,
make_parent_dirs=False,
v1_or_v2=False,
):
self.RepoCls = Repository if v1_or_v2 else Repository3
self.rpc_methods = self._rpc_methods if v1_or_v2 else self._rpc_methods3
@ -975,7 +983,15 @@ def handle_error(unpacked):
v1_or_v2={"since": parse_version("2.0.0b8"), "previously": True}, # TODO fix version
)
def open(
self, path, create=False, lock_wait=None, lock=True, exclusive=False, append_only=False, make_parent_dirs=False, v1_or_v2=False
self,
path,
create=False,
lock_wait=None,
lock=True,
exclusive=False,
append_only=False,
make_parent_dirs=False,
v1_or_v2=False,
):
"""actual remoting is done via self.call in the @api decorator"""

View file

@ -22,7 +22,7 @@ def extract_crypted_data(cls, data: bytes) -> bytes:
# used for crypto type detection
hdr_size = cls.obj_header.size
hdr = cls.ObjHeader(*cls.obj_header.unpack(data[:hdr_size]))
return data[hdr_size + hdr.meta_size:]
return data[hdr_size + hdr.meta_size :]
def __init__(self, key):
self.key = key
@ -80,7 +80,7 @@ def parse_meta(self, id: bytes, cdata: bytes, ro_type: str) -> dict:
hdr_size = self.obj_header.size
hdr = self.ObjHeader(*self.obj_header.unpack(obj[:hdr_size]))
assert hdr_size + hdr.meta_size <= len(obj)
meta_encrypted = obj[hdr_size:hdr_size + hdr.meta_size]
meta_encrypted = obj[hdr_size : hdr_size + hdr.meta_size]
meta_packed = self.key.decrypt(id, meta_encrypted)
meta = msgpack.unpackb(meta_packed)
if ro_type != ROBJ_DONTCARE and meta["type"] != ro_type:
@ -114,7 +114,7 @@ def parse(
if ro_type != ROBJ_DONTCARE and meta_compressed["type"] != ro_type:
raise IntegrityError(f"ro_type expected: {ro_type} got: {meta_compressed['type']}")
assert hdr_size + hdr.meta_size + hdr.data_size <= len(obj)
data_encrypted = obj[hdr_size + hdr.meta_size:hdr_size + hdr.meta_size + hdr.data_size]
data_encrypted = obj[hdr_size + hdr.meta_size : hdr_size + hdr.meta_size + hdr.data_size]
data_compressed = self.key.decrypt(id, data_encrypted) # does not include the type/level bytes
if decompress:
ctype = meta_compressed["ctype"]

View file

@ -104,7 +104,7 @@ def __init__(
self._send_log = send_log_cb or (lambda: None)
self.do_create = create
self.created = False
self.acceptable_repo_versions = (3, )
self.acceptable_repo_versions = (3,)
self.opened = False
self.append_only = append_only # XXX not implemented / not implementable
self.storage_quota = storage_quota # XXX not implemented
@ -196,7 +196,13 @@ def close(self):
def info(self):
"""return some infos about the repo (must be opened first)"""
info = dict(id=self.id, version=self.version, storage_quota_use=self.storage_quota_use, storage_quota=self.storage_quota, append_only=self.append_only)
info = dict(
id=self.id,
version=self.version,
storage_quota_use=self.storage_quota_use,
storage_quota=self.storage_quota,
append_only=self.append_only,
)
return info
def commit(self, compact=True, threshold=0.1):
@ -204,6 +210,7 @@ def commit(self, compact=True, threshold=0.1):
def check(self, repair=False, max_duration=0):
"""Check repository consistency"""
def log_error(msg):
nonlocal obj_corrupted
obj_corrupted = True
@ -228,12 +235,12 @@ def log_error(msg):
obj_size = len(obj)
if obj_size >= hdr_size:
hdr = RepoObj.ObjHeader(*RepoObj.obj_header.unpack(obj[:hdr_size]))
meta = obj[hdr_size:hdr_size+hdr.meta_size]
meta = obj[hdr_size : hdr_size + hdr.meta_size]
if hdr.meta_size != len(meta):
log_error("metadata size incorrect.")
elif hdr.meta_hash != xxh64(meta):
log_error("metadata does not match checksum.")
data = obj[hdr_size+hdr.meta_size:hdr_size+hdr.meta_size+hdr.data_size]
data = obj[hdr_size + hdr.meta_size : hdr_size + hdr.meta_size + hdr.data_size]
if hdr.data_size != len(data):
log_error("data size incorrect.")
elif hdr.data_hash != xxh64(data):
@ -276,12 +283,11 @@ def list(self, limit=None, marker=None, mask=0, value=0):
ids = []
if marker is not None:
idx = ids.index(marker)
ids = ids[idx + 1:]
ids = ids[idx + 1 :]
if limit is not None:
return ids[:limit]
return ids
def scan(self, limit=None, state=None):
"""
list (the next) <limit> chunk IDs from the repository.
@ -312,24 +318,19 @@ def get(self, id, read_data=True):
obj = self.store.load(key, size=hdr_size + extra_size)
hdr = obj[0:hdr_size]
if len(hdr) != hdr_size:
raise IntegrityError(
f"Object too small [id {id_hex}]: expected {hdr_size}, got {len(hdr)} bytes"
)
raise IntegrityError(f"Object too small [id {id_hex}]: expected {hdr_size}, got {len(hdr)} bytes")
meta_size = RepoObj.obj_header.unpack(hdr)[0]
if meta_size > extra_size:
# we did not get enough, need to load more, but not all.
# this should be rare, as chunk metadata is rather small usually.
obj = self.store.load(key, size=hdr_size + meta_size)
meta = obj[hdr_size:hdr_size + meta_size]
meta = obj[hdr_size : hdr_size + meta_size]
if len(meta) != meta_size:
raise IntegrityError(
f"Object too small [id {id_hex}]: expected {meta_size}, got {len(meta)} bytes"
)
raise IntegrityError(f"Object too small [id {id_hex}]: expected {meta_size}, got {len(meta)} bytes")
return hdr + meta
except StoreObjectNotFound:
raise self.ObjectNotFound(id, self.path) from None
def get_many(self, ids, read_data=True, is_preloaded=False):
for id_ in ids:
yield self.get(id_, read_data=read_data)

View file

@ -4,11 +4,7 @@
from borgstore.store import Store
from ..locking3 import (
Lock,
LockFailed,
NotLocked,
)
from ..locking3 import Lock, LockFailed, NotLocked
ID1 = "foo", 1, 1
ID2 = "bar", 2, 2