1
0
Fork 0
mirror of https://github.com/borgbackup/borg.git synced 2025-03-20 02:45:42 +00:00

mount: Use RepositoryCache

This makes a full repository mount a bit faster but it's still
too slow and memory hungry.
This commit is contained in:
Jonas Borgström 2014-03-26 22:42:20 +01:00
parent 8e078b5f91
commit af262482de
4 changed files with 47 additions and 41 deletions

View file

@ -5,7 +5,7 @@ import errno
import shutil
import tempfile
from attic.key import key_factory
from attic.remote import RemoteRepository, RepositoryCache
from attic.remote import cache_if_remote
import msgpack
import os
import socket
@ -609,11 +609,7 @@ class ArchiveChecker:
for item in unpacker:
yield item
if isinstance(self.repository, RemoteRepository):
repository = RepositoryCache(self.repository)
else:
repository = self.repository
repository = cache_if_remote(self.repository)
num_archives = len(self.manifest.archives)
for i, (name, info) in enumerate(list(self.manifest.archives.items()), 1):
self.report_progress('Analyzing archive {} ({}/{})'.format(name, i, num_archives))

View file

@ -1,5 +1,5 @@
from configparser import RawConfigParser
from attic.remote import RemoteRepository, RepositoryCache
from attic.remote import cache_if_remote
import msgpack
import os
from binascii import hexlify
@ -146,10 +146,7 @@ class Cache(object):
print('Initializing cache...')
self.chunks.clear()
unpacker = msgpack.Unpacker()
if isinstance(self.repository, RemoteRepository):
repository = RepositoryCache(self.repository)
else:
repository = self.repository
repository = cache_if_remote(self.repository)
for name, info in self.manifest.archives.items():
archive_id = info[b'id']
cdata = repository.get(archive_id)

View file

@ -1,12 +1,14 @@
from collections import defaultdict
import errno
import llfuse
import msgpack
import os
import stat
import time
from attic.archive import Archive
from attic.helpers import daemonize
from attic.remote import cache_if_remote
# Does this version of llfuse support ns precision?
have_fuse_mtime_ns = hasattr(llfuse.EntryAttributes, 'st_mtime_ns')
@ -18,7 +20,7 @@ class AtticOperations(llfuse.Operations):
super(AtticOperations, self).__init__()
self._inode_count = 0
self.key = key
self.repository = repository
self.repository = cache_if_remote(repository)
self.items = {}
self.parent = {}
self.contents = defaultdict(dict)
@ -33,36 +35,41 @@ class AtticOperations(llfuse.Operations):
def process_archive(self, archive, prefix=[]):
"""Build fuse inode hierarcy from archive metadata
"""
for item in archive.iter_items():
segments = prefix + os.fsencode(os.path.normpath(item[b'path'])).split(b'/')
num_segments = len(segments)
parent = 1
for i, segment in enumerate(segments, 1):
# Insert a default root inode if needed
if self._inode_count == 0 and segment:
archive_inode = self.allocate_inode()
self.items[archive_inode] = self.default_dir
self.parent[archive_inode] = parent
# Leaf segment?
if i == num_segments:
if b'source' in item and stat.S_ISREG(item[b'mode']):
inode = self._find_inode(item[b'source'], prefix)
self.items[inode][b'nlink'] = self.items[inode].get(b'nlink', 1) + 1
unpacker = msgpack.Unpacker()
for key, chunk in zip(archive.metadata[b'items'], self.repository.get_many(archive.metadata[b'items'])):
data = self.key.decrypt(key, chunk)
unpacker.feed(data)
for item in unpacker:
segments = prefix + os.fsencode(os.path.normpath(item[b'path'])).split(b'/')
del item[b'path']
num_segments = len(segments)
parent = 1
for i, segment in enumerate(segments, 1):
# Insert a default root inode if needed
if self._inode_count == 0 and segment:
archive_inode = self.allocate_inode()
self.items[archive_inode] = self.default_dir
self.parent[archive_inode] = parent
# Leaf segment?
if i == num_segments:
if b'source' in item and stat.S_ISREG(item[b'mode']):
inode = self._find_inode(item[b'source'], prefix)
self.items[inode][b'nlink'] = self.items[inode].get(b'nlink', 1) + 1
else:
inode = self.allocate_inode()
self.items[inode] = item
self.parent[inode] = parent
if segment:
self.contents[parent][segment] = inode
elif segment in self.contents[parent]:
parent = self.contents[parent][segment]
else:
inode = self.allocate_inode()
self.items[inode] = item
self.parent[inode] = parent
if segment:
self.contents[parent][segment] = inode
elif segment in self.contents[parent]:
parent = self.contents[parent][segment]
else:
inode = self.allocate_inode()
self.items[inode] = self.default_dir
self.parent[inode] = parent
if segment:
self.contents[parent][segment] = inode
parent = inode
self.items[inode] = self.default_dir
self.parent[inode] = parent
if segment:
self.contents[parent][segment] = inode
parent = inode
def allocate_inode(self):
self._inode_count += 1

View file

@ -298,3 +298,9 @@ class RepositoryCache:
# Consume any pending requests
for _ in repository_iterator:
pass
def cache_if_remote(repository):
if isinstance(repository, RemoteRepository):
return RepositoryCache(repository)
return repository