mirror of
https://github.com/borgbackup/borg.git
synced 2024-12-22 07:43:06 +00:00
hashindex: remove .compact
our on disk formats only store used keys/values, so they are always compact on-disk.
This commit is contained in:
parent
3c794fa980
commit
68143d6f99
4 changed files with 7 additions and 16 deletions
|
@ -65,7 +65,7 @@ def save_chunk_index(self):
|
|||
# as we put the wrong size in there, we need to clean up the size:
|
||||
self.chunks[id] = ChunkIndexEntry(refcount=ChunkIndex.MAX_VALUE, size=0)
|
||||
# now self.chunks is an uptodate ChunkIndex, usable for general borg usage!
|
||||
write_chunkindex_to_repo_cache(self.repository, self.chunks, compact=True, clear=True, force_write=True)
|
||||
write_chunkindex_to_repo_cache(self.repository, self.chunks, clear=True, force_write=True)
|
||||
self.chunks = None # nothing there (cleared!)
|
||||
|
||||
def analyze_archives(self) -> Tuple[Set, Set, int, int, int]:
|
||||
|
|
|
@ -630,11 +630,8 @@ def load_chunks_hash(repository) -> bytes:
|
|||
return hash
|
||||
|
||||
|
||||
def write_chunkindex_to_repo_cache(repository, chunks, *, compact=False, clear=False, force_write=False):
|
||||
def write_chunkindex_to_repo_cache(repository, chunks, *, clear=False, force_write=False):
|
||||
cached_hash = load_chunks_hash(repository)
|
||||
if compact:
|
||||
# if we don't need the in-memory chunks index anymore:
|
||||
chunks.compact() # vacuum the hash table
|
||||
with io.BytesIO() as f:
|
||||
chunks.write(f)
|
||||
data = f.getvalue()
|
||||
|
@ -698,7 +695,7 @@ def build_chunkindex_from_repo(repository, *, disable_caches=False, cache_immedi
|
|||
logger.debug(f"queried {num_chunks} chunk IDs in {duration} s, ~{speed}/s")
|
||||
if cache_immediately:
|
||||
# immediately update cache/chunks, so we only rarely have to do it the slow way:
|
||||
write_chunkindex_to_repo_cache(repository, chunks, compact=False, clear=False, force_write=True)
|
||||
write_chunkindex_to_repo_cache(repository, chunks, clear=False, force_write=True)
|
||||
return chunks
|
||||
|
||||
|
||||
|
@ -770,8 +767,8 @@ def add_chunk(
|
|||
return ChunkListEntry(id, size)
|
||||
|
||||
def _write_chunks_cache(self, chunks):
|
||||
# this is called from .close, so we can clear/compact here:
|
||||
write_chunkindex_to_repo_cache(self.repository, self._chunks, compact=True, clear=True)
|
||||
# this is called from .close, so we can clear here:
|
||||
write_chunkindex_to_repo_cache(self.repository, self._chunks, clear=True)
|
||||
self._chunks = None # nothing there (cleared!)
|
||||
|
||||
def refresh_lock(self, now):
|
||||
|
|
|
@ -54,9 +54,6 @@ class ChunkIndex:
|
|||
refcount = min(self.MAX_VALUE, v.refcount + refs)
|
||||
self[key] = v._replace(refcount=refcount, size=size)
|
||||
|
||||
def compact(self):
|
||||
return 0
|
||||
|
||||
def clear(self):
|
||||
pass
|
||||
|
||||
|
@ -155,9 +152,6 @@ class NSIndex1:
|
|||
else:
|
||||
do_yield = key == marker
|
||||
|
||||
def compact(self):
|
||||
return 0
|
||||
|
||||
def clear(self):
|
||||
pass
|
||||
|
||||
|
|
|
@ -193,7 +193,7 @@ def create(self):
|
|||
# to build the ChunkIndex the slow way by listing all the directories.
|
||||
from borg.cache import write_chunkindex_to_repo_cache
|
||||
|
||||
write_chunkindex_to_repo_cache(self, ChunkIndex(), compact=True, clear=True, force_write=True)
|
||||
write_chunkindex_to_repo_cache(self, ChunkIndex(), clear=True, force_write=True)
|
||||
finally:
|
||||
self.store.close()
|
||||
|
||||
|
@ -385,7 +385,7 @@ def check_object(obj):
|
|||
# if we did a full pass in one go, we built a complete, uptodate ChunkIndex, cache it!
|
||||
from .cache import write_chunkindex_to_repo_cache
|
||||
|
||||
write_chunkindex_to_repo_cache(self, chunks, compact=True, clear=True, force_write=True)
|
||||
write_chunkindex_to_repo_cache(self, chunks, clear=True, force_write=True)
|
||||
except StoreObjectNotFound:
|
||||
# it can be that there is no "data/" at all, then it crashes when iterating infos.
|
||||
pass
|
||||
|
|
Loading…
Reference in a new issue