1
0
Fork 0
mirror of https://github.com/borgbackup/borg.git synced 2025-02-23 22:51:35 +00:00

Merge pull request #1401 from PlasmaPower/fuse-cache-partially-read

borg mount: cache partially read data chunks
This commit is contained in:
TW 2016-07-28 22:34:03 +02:00 committed by GitHub
commit e06b89abb3
2 changed files with 20 additions and 2 deletions

View file

@ -1275,6 +1275,11 @@ def build_parser(self, args=None, prog=None):
option is given the command will run in the background until the filesystem option is given the command will run in the background until the filesystem
is ``umounted``. is ``umounted``.
The BORG_MOUNT_DATA_CACHE_ENTRIES environment variable is meant for advanced users
to tweak the performance. It sets the number of cached data chunks; additional
memory usage can be up to ~8 MiB times this number. The default is the number
of CPU cores.
For mount options, see the fuse(8) manual page. Additional mount options For mount options, see the fuse(8) manual page. Additional mount options
supported by borg: supported by borg:

View file

@ -13,6 +13,7 @@
from .archive import Archive from .archive import Archive
from .helpers import daemonize, bigint_to_int from .helpers import daemonize, bigint_to_int
from .logger import create_logger from .logger import create_logger
from .lrucache import LRUCache
logger = create_logger() logger = create_logger()
@ -62,6 +63,9 @@ def __init__(self, key, repository, manifest, archive, cached_repo):
self.pending_archives = {} self.pending_archives = {}
self.accounted_chunks = {} self.accounted_chunks = {}
self.cache = ItemCache() self.cache = ItemCache()
data_cache_capacity = int(os.environ.get('BORG_MOUNT_DATA_CACHE_ENTRIES', os.cpu_count() or 1))
logger.debug('mount data cache capacity: %d chunks', data_cache_capacity)
self.data_cache = LRUCache(capacity=data_cache_capacity, dispose=lambda _: None)
if archive: if archive:
self.process_archive(archive) self.process_archive(archive)
else: else:
@ -282,8 +286,17 @@ def read(self, fh, offset, size):
offset -= s offset -= s
continue continue
n = min(size, s - offset) n = min(size, s - offset)
chunk = self.key.decrypt(id, self.repository.get(id)) if id in self.data_cache:
parts.append(chunk[offset:offset + n]) data = self.data_cache[id]
if offset + n == len(data):
# evict fully read chunk from cache
del self.data_cache[id]
else:
data = self.key.decrypt(id, self.repository.get(id))
if offset + n < len(data):
# chunk was only partially read, cache it
self.data_cache[id] = data
parts.append(data[offset:offset + n])
offset = 0 offset = 0
size -= n size -= n
if not size: if not size: