1
0
Fork 0
mirror of https://github.com/borgbackup/borg.git synced 2024-12-26 01:37:20 +00:00

Rename Chunk.meta[compress] => Chunk.meta[compressor]

This commit is contained in:
Marian Beermann 2017-03-31 13:43:48 +02:00
parent 88647595ac
commit 0c7410104c
3 changed files with 16 additions and 12 deletions

View file

@ -970,12 +970,12 @@ def process_file(self, path, st, cache, ignore_inode=False):
if chunks is not None:
item.chunks = chunks
else:
compress = self.compression_decider1.decide(path)
self.file_compression_logger.debug('%s -> compression %s', path, compress.name)
compressor = self.compression_decider1.decide(path)
self.file_compression_logger.debug('%s -> compression %s', path, compressor.name)
with backup_io('open'):
fh = Archive._open_rb(path)
with os.fdopen(fh, 'rb') as fd:
self.chunk_file(item, cache, self.stats, backup_io_iter(self.chunker.chunkify(fd, fh)), compress=compress)
self.chunk_file(item, cache, self.stats, backup_io_iter(self.chunker.chunkify(fd, fh)), compressor=compressor)
if not is_special_file:
# we must not memorize special files, because the contents of e.g. a
# block or char device will change without its mtime/size/inode changing.
@ -1652,20 +1652,20 @@ def process_chunks(self, archive, target, item):
self.cache.chunk_incref(chunk_id, target.stats)
return item.chunks
chunk_iterator = self.iter_chunks(archive, target, list(item.chunks))
compress = self.compression_decider1.decide(item.path)
chunk_processor = partial(self.chunk_processor, target, compress)
compressor = self.compression_decider1.decide(item.path)
chunk_processor = partial(self.chunk_processor, target, compressor)
target.chunk_file(item, self.cache, target.stats, chunk_iterator, chunk_processor)
def chunk_processor(self, target, compress, data):
def chunk_processor(self, target, compressor, data):
chunk_id = self.key.id_hash(data)
if chunk_id in self.seen_chunks:
return self.cache.chunk_incref(chunk_id, target.stats)
chunk = Chunk(data, compress=compress)
chunk = Chunk(data, compressor=compressor)
overwrite = self.recompress
if self.recompress and not self.always_recompress and chunk_id in self.cache.chunks:
# Check if this chunk is already compressed the way we want it
old_chunk = self.key.decrypt(None, self.repository.get(chunk_id), decompress=False)
if Compressor.detect(old_chunk.data).name == compress.name:
if Compressor.detect(old_chunk.data).name == compressor.name:
# Stored chunk has the same compression we wanted
overwrite = False
chunk_entry = self.cache.add_chunk(chunk_id, chunk, target.stats, overwrite=overwrite, wait=False)

View file

@ -12,15 +12,19 @@ which sets the .compressor attribute on the Key.
For chunks that emanate from files CompressionDecider1 may set a specific
Compressor based on patterns (this is the --compression-from option). This is stored
as a Compressor instance in the "compress" key in the Chunk's meta dictionary.
as a Compressor instance in the "compressor" key in the Chunk's meta dictionary.
When compressing either the Compressor specified in the Chunk's meta dictionary
is used, or the default Compressor of the key.
When compressing (KeyBase.compress) either the Compressor specified in the Chunk's
meta dictionary is used, or the default Compressor of the key.
The "auto" mode (e.g. --compression auto,lzma,4) is implemented as a meta Compressor,
meaning that Auto acts like a Compressor, but defers actual work to others (namely
LZ4 as a heuristic whether compression is worth it, and the specified Compressor
for the actual compression).
Decompression is normally handled through Compressor.decompress which will detect
which compressor has been used to compress the data and dispatch to the correct
decompressor.
"""
import zlib

View file

@ -152,7 +152,7 @@ def id_hash(self, data):
def compress(self, chunk):
meta, data = chunk
return meta.get('compress', self.compressor).compress(data)
return meta.get('compressor', self.compressor).compress(data)
def encrypt(self, chunk):
pass