Merge pull request #965 from enkore/fix/mount-perf

borg mount: cache partially read data chunks
This commit is contained in:
enkore 2016-04-23 18:07:01 +02:00
commit 65f442dc8d
2 changed files with 22 additions and 1 deletions

View File

@ -1537,6 +1537,11 @@ class Archiver:
To allow a regular user to use fstab entries, add the ``user`` option:
``/path/to/repo /mnt/point fuse.borgfs defaults,noauto,user 0 0``
The BORG_MOUNT_DATA_CACHE_ENTRIES environment variable is meant for advanced users
to tweak the performance. It sets the number of cached data chunks; additional
memory usage can be up to ~8 MiB times this number. The default is the number
of CPU cores.
""")
subparser = subparsers.add_parser('mount', parents=[common_parser], add_help=False,
description=self.do_mount.__doc__,

View File

@ -8,9 +8,13 @@ import tempfile
import time
from .archive import Archive
from .helpers import daemonize, bigint_to_int
from .logger import create_logger
from .lrucache import LRUCache
from distutils.version import LooseVersion
import msgpack
logger = create_logger()
# Does this version of llfuse support ns precision?
have_fuse_xtime_ns = hasattr(llfuse.EntryAttributes, 'st_mtime_ns')
@ -54,6 +58,9 @@ class FuseOperations(llfuse.Operations):
self.pending_archives = {}
self.accounted_chunks = {}
self.cache = ItemCache()
data_cache_capacity = int(os.environ.get('BORG_MOUNT_DATA_CACHE_ENTRIES', os.cpu_count() or 1))
logger.debug('mount data cache capacity: %d chunks', data_cache_capacity)
self.data_cache = LRUCache(capacity=data_cache_capacity, dispose=lambda _: None)
if archive:
self.process_archive(archive)
else:
@ -229,7 +236,16 @@ class FuseOperations(llfuse.Operations):
offset -= s
continue
n = min(size, s - offset)
_, data = self.key.decrypt(id, self.repository.get(id))
if id in self.data_cache:
data = self.data_cache[id]
if offset + n == len(data):
# evict fully read chunk from cache
del self.data_cache[id]
else:
_, data = self.key.decrypt(id, self.repository.get(id))
if offset + n < len(data):
# chunk was only partially read, cache it
self.data_cache[id] = data
parts.append(data[offset:offset + n])
offset = 0
size -= n