1
0
Fork 0
mirror of https://github.com/borgbackup/borg.git synced 2025-01-01 12:45:34 +00:00

blacken the code

This commit is contained in:
Thomas Waldmann 2024-08-14 16:29:17 +02:00
parent d59306f48b
commit 1231c961fb
No known key found for this signature in database
GPG key ID: 243ACFA951F78E01
12 changed files with 73 additions and 40 deletions

View file

@ -31,7 +31,9 @@
logger = create_logger(__name__)
def get_repository(location, *, create, exclusive, lock_wait, lock, append_only, make_parent_dirs, storage_quota, args, v1_or_v2):
def get_repository(
location, *, create, exclusive, lock_wait, lock, append_only, make_parent_dirs, storage_quota, args, v1_or_v2
):
if location.proto in ("ssh", "socket"):
RemoteRepoCls = RemoteRepository if v1_or_v2 else RemoteRepository3
repository = RemoteRepoCls(
@ -209,7 +211,8 @@ def wrapper(self, args, **kwargs):
acceptable_versions = (1, 2) if v1_or_v2 else (3,)
if repository.version not in acceptable_versions:
raise Error(
f"This borg version only accepts version {' or '.join(acceptable_versions)} repos for --other-repo.")
f"This borg version only accepts version {' or '.join(acceptable_versions)} repos for --other-repo."
)
kwargs["other_repository"] = repository
if manifest or cache:
manifest_ = Manifest.load(

View file

@ -11,6 +11,7 @@
from ..repository3 import Repository3
from ..logger import create_logger
logger = create_logger()
@ -32,7 +33,13 @@ def garbage_collect(self):
logger.info("Getting object IDs present in the repository...")
self.repository_chunks = self.get_repository_chunks()
logger.info("Computing object IDs used by archives...")
self.used_chunks, self.wanted_chunks, self.total_files, self.total_size, self.archives_count = self.analyze_archives()
(
self.used_chunks,
self.wanted_chunks,
self.total_files,
self.total_size,
self.archives_count,
) = self.analyze_archives()
self.report_and_delete()
logger.info("Finished compaction / garbage collection...")
@ -109,8 +116,7 @@ def report_and_delete(self):
if unused:
logger.info(f"Deleting {len(unused)} unused objects...")
pi = ProgressIndicatorPercent(
total=len(unused), msg="Deleting unused objects %3.1f%%", step=0.1,
msgid="compact.report_and_delete"
total=len(unused), msg="Deleting unused objects %3.1f%%", step=0.1, msgid="compact.report_and_delete"
)
for i, id in enumerate(unused):
pi.show(i)

View file

@ -51,7 +51,6 @@ def do_delete(self, args, repository):
self.print_warning("Aborted.", wc=None)
return
def build_parser_delete(self, subparsers, common_parser, mid_common_parser):
from ._common import process_epilog, define_archive_filters_group

View file

@ -832,6 +832,7 @@ class AdHocCache(ChunksMixin):
Chunks that were not added during the current AdHocCache lifetime won't have correct size set
(0 bytes) and will have an infinite reference count (MAX_VALUE).
"""
def __init__(self, manifest, warn_if_unencrypted=True, lock_wait=None, iec=False):
assert isinstance(manifest, Manifest)
self.manifest = manifest

View file

@ -134,7 +134,9 @@ def _find_locks(self, *, only_exclusive=False, only_mine=False):
found_locks = []
for key in locks:
lock = locks[key]
if (not only_exclusive or lock["exclusive"]) and (not only_mine or (lock["hostid"], lock["processid"], lock["threadid"]) == self.id):
if (not only_exclusive or lock["exclusive"]) and (
not only_mine or (lock["hostid"], lock["processid"], lock["threadid"]) == self.id
):
found_locks.append(lock)
return found_locks
@ -150,7 +152,9 @@ def acquire(self):
key = self._create_lock(exclusive=self.is_exclusive)
# obviously we have a race condition here: other client(s) might have created exclusive
# lock(s) at the same time in parallel. thus we have to check again.
time.sleep(self.race_recheck_delay) # give other clients time to notice our exclusive lock, stop creating theirs
time.sleep(
self.race_recheck_delay
) # give other clients time to notice our exclusive lock, stop creating theirs
exclusive_locks = self._find_locks(only_exclusive=True)
if self.is_exclusive:
if len(exclusive_locks) == 1 and exclusive_locks[0]["key"] == key:

View file

@ -8,6 +8,7 @@
from borgstore.store import ObjectNotFound, ItemInfo
from .logger import create_logger
logger = create_logger()
from .constants import * # NOQA
@ -263,6 +264,7 @@ def load(cls, repository, operations, key=None, *, ro_cls=RepoObj):
if isinstance(repository, (Repository3, RemoteRepository3)):
from .helpers import msgpack
archives = {}
try:
infos = list(repository.store_list("archives"))
@ -357,10 +359,7 @@ def write(self):
manifest_archives = StableDict(self.archives.get_raw_dict())
manifest = ManifestItem(
version=2,
archives=manifest_archives,
timestamp=self.timestamp,
config=StableDict(self.config),
version=2, archives=manifest_archives, timestamp=self.timestamp, config=StableDict(self.config)
)
data = self.key.pack_metadata(manifest.as_dict())
self.id = self.repo_objs.id_hash(data)

View file

@ -945,7 +945,15 @@ def handle_error(unpacked):
v1_or_v2={"since": parse_version("2.0.0b8"), "previously": True}, # TODO fix version
)
def open(
self, path, create=False, lock_wait=None, lock=True, exclusive=False, append_only=False, make_parent_dirs=False, v1_or_v2=False
self,
path,
create=False,
lock_wait=None,
lock=True,
exclusive=False,
append_only=False,
make_parent_dirs=False,
v1_or_v2=False,
):
"""actual remoting is done via self.call in the @api decorator"""

View file

@ -373,7 +373,15 @@ def _resolve_path(self, path):
return os.path.realpath(path)
def open(
self, path, create=False, lock_wait=None, lock=True, exclusive=None, append_only=False, make_parent_dirs=False, v1_or_v2=False
self,
path,
create=False,
lock_wait=None,
lock=True,
exclusive=None,
append_only=False,
make_parent_dirs=False,
v1_or_v2=False,
):
self.RepoCls = Repository if v1_or_v2 else Repository3
self.rpc_methods = self._rpc_methods if v1_or_v2 else self._rpc_methods3
@ -975,7 +983,15 @@ def handle_error(unpacked):
v1_or_v2={"since": parse_version("2.0.0b8"), "previously": True}, # TODO fix version
)
def open(
self, path, create=False, lock_wait=None, lock=True, exclusive=False, append_only=False, make_parent_dirs=False, v1_or_v2=False
self,
path,
create=False,
lock_wait=None,
lock=True,
exclusive=False,
append_only=False,
make_parent_dirs=False,
v1_or_v2=False,
):
"""actual remoting is done via self.call in the @api decorator"""

View file

@ -196,7 +196,13 @@ def close(self):
def info(self):
"""return some infos about the repo (must be opened first)"""
info = dict(id=self.id, version=self.version, storage_quota_use=self.storage_quota_use, storage_quota=self.storage_quota, append_only=self.append_only)
info = dict(
id=self.id,
version=self.version,
storage_quota_use=self.storage_quota_use,
storage_quota=self.storage_quota,
append_only=self.append_only,
)
return info
def commit(self, compact=True, threshold=0.1):
@ -204,6 +210,7 @@ def commit(self, compact=True, threshold=0.1):
def check(self, repair=False, max_duration=0):
"""Check repository consistency"""
def log_error(msg):
nonlocal obj_corrupted
obj_corrupted = True
@ -281,7 +288,6 @@ def list(self, limit=None, marker=None, mask=0, value=0):
return ids[:limit]
return ids
def scan(self, limit=None, state=None):
"""
list (the next) <limit> chunk IDs from the repository.
@ -312,9 +318,7 @@ def get(self, id, read_data=True):
obj = self.store.load(key, size=hdr_size + extra_size)
hdr = obj[0:hdr_size]
if len(hdr) != hdr_size:
raise IntegrityError(
f"Object too small [id {id_hex}]: expected {hdr_size}, got {len(hdr)} bytes"
)
raise IntegrityError(f"Object too small [id {id_hex}]: expected {hdr_size}, got {len(hdr)} bytes")
meta_size = RepoObj.obj_header.unpack(hdr)[0]
if meta_size > extra_size:
# we did not get enough, need to load more, but not all.
@ -322,14 +326,11 @@ def get(self, id, read_data=True):
obj = self.store.load(key, size=hdr_size + meta_size)
meta = obj[hdr_size : hdr_size + meta_size]
if len(meta) != meta_size:
raise IntegrityError(
f"Object too small [id {id_hex}]: expected {meta_size}, got {len(meta)} bytes"
)
raise IntegrityError(f"Object too small [id {id_hex}]: expected {meta_size}, got {len(meta)} bytes")
return hdr + meta
except StoreObjectNotFound:
raise self.ObjectNotFound(id, self.path) from None
def get_many(self, ids, read_data=True, is_preloaded=False):
for id_ in ids:
yield self.get(id_, read_data=read_data)

View file

@ -4,11 +4,7 @@
from borgstore.store import Store
from ..locking3 import (
Lock,
LockFailed,
NotLocked,
)
from ..locking3 import Lock, LockFailed, NotLocked
ID1 = "foo", 1, 1
ID2 = "bar", 2, 2