mirror of
https://github.com/borgbackup/borg.git
synced 2025-03-20 10:55:42 +00:00
mount: Use RepositoryCache
This makes a full repository mount a bit faster but it's still too slow and memory hungry.
This commit is contained in:
parent
8e078b5f91
commit
af262482de
4 changed files with 47 additions and 41 deletions
|
@ -5,7 +5,7 @@ import errno
|
||||||
import shutil
|
import shutil
|
||||||
import tempfile
|
import tempfile
|
||||||
from attic.key import key_factory
|
from attic.key import key_factory
|
||||||
from attic.remote import RemoteRepository, RepositoryCache
|
from attic.remote import cache_if_remote
|
||||||
import msgpack
|
import msgpack
|
||||||
import os
|
import os
|
||||||
import socket
|
import socket
|
||||||
|
@ -609,11 +609,7 @@ class ArchiveChecker:
|
||||||
for item in unpacker:
|
for item in unpacker:
|
||||||
yield item
|
yield item
|
||||||
|
|
||||||
if isinstance(self.repository, RemoteRepository):
|
repository = cache_if_remote(self.repository)
|
||||||
repository = RepositoryCache(self.repository)
|
|
||||||
else:
|
|
||||||
repository = self.repository
|
|
||||||
|
|
||||||
num_archives = len(self.manifest.archives)
|
num_archives = len(self.manifest.archives)
|
||||||
for i, (name, info) in enumerate(list(self.manifest.archives.items()), 1):
|
for i, (name, info) in enumerate(list(self.manifest.archives.items()), 1):
|
||||||
self.report_progress('Analyzing archive {} ({}/{})'.format(name, i, num_archives))
|
self.report_progress('Analyzing archive {} ({}/{})'.format(name, i, num_archives))
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
from configparser import RawConfigParser
|
from configparser import RawConfigParser
|
||||||
from attic.remote import RemoteRepository, RepositoryCache
|
from attic.remote import cache_if_remote
|
||||||
import msgpack
|
import msgpack
|
||||||
import os
|
import os
|
||||||
from binascii import hexlify
|
from binascii import hexlify
|
||||||
|
@ -146,10 +146,7 @@ class Cache(object):
|
||||||
print('Initializing cache...')
|
print('Initializing cache...')
|
||||||
self.chunks.clear()
|
self.chunks.clear()
|
||||||
unpacker = msgpack.Unpacker()
|
unpacker = msgpack.Unpacker()
|
||||||
if isinstance(self.repository, RemoteRepository):
|
repository = cache_if_remote(self.repository)
|
||||||
repository = RepositoryCache(self.repository)
|
|
||||||
else:
|
|
||||||
repository = self.repository
|
|
||||||
for name, info in self.manifest.archives.items():
|
for name, info in self.manifest.archives.items():
|
||||||
archive_id = info[b'id']
|
archive_id = info[b'id']
|
||||||
cdata = repository.get(archive_id)
|
cdata = repository.get(archive_id)
|
||||||
|
|
|
@ -1,12 +1,14 @@
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
import errno
|
import errno
|
||||||
import llfuse
|
import llfuse
|
||||||
|
import msgpack
|
||||||
import os
|
import os
|
||||||
import stat
|
import stat
|
||||||
import time
|
import time
|
||||||
from attic.archive import Archive
|
from attic.archive import Archive
|
||||||
|
|
||||||
from attic.helpers import daemonize
|
from attic.helpers import daemonize
|
||||||
|
from attic.remote import cache_if_remote
|
||||||
|
|
||||||
# Does this version of llfuse support ns precision?
|
# Does this version of llfuse support ns precision?
|
||||||
have_fuse_mtime_ns = hasattr(llfuse.EntryAttributes, 'st_mtime_ns')
|
have_fuse_mtime_ns = hasattr(llfuse.EntryAttributes, 'st_mtime_ns')
|
||||||
|
|
||||||
|
@ -18,7 +20,7 @@ class AtticOperations(llfuse.Operations):
|
||||||
super(AtticOperations, self).__init__()
|
super(AtticOperations, self).__init__()
|
||||||
self._inode_count = 0
|
self._inode_count = 0
|
||||||
self.key = key
|
self.key = key
|
||||||
self.repository = repository
|
self.repository = cache_if_remote(repository)
|
||||||
self.items = {}
|
self.items = {}
|
||||||
self.parent = {}
|
self.parent = {}
|
||||||
self.contents = defaultdict(dict)
|
self.contents = defaultdict(dict)
|
||||||
|
@ -33,36 +35,41 @@ class AtticOperations(llfuse.Operations):
|
||||||
def process_archive(self, archive, prefix=[]):
|
def process_archive(self, archive, prefix=[]):
|
||||||
"""Build fuse inode hierarcy from archive metadata
|
"""Build fuse inode hierarcy from archive metadata
|
||||||
"""
|
"""
|
||||||
for item in archive.iter_items():
|
unpacker = msgpack.Unpacker()
|
||||||
segments = prefix + os.fsencode(os.path.normpath(item[b'path'])).split(b'/')
|
for key, chunk in zip(archive.metadata[b'items'], self.repository.get_many(archive.metadata[b'items'])):
|
||||||
num_segments = len(segments)
|
data = self.key.decrypt(key, chunk)
|
||||||
parent = 1
|
unpacker.feed(data)
|
||||||
for i, segment in enumerate(segments, 1):
|
for item in unpacker:
|
||||||
# Insert a default root inode if needed
|
segments = prefix + os.fsencode(os.path.normpath(item[b'path'])).split(b'/')
|
||||||
if self._inode_count == 0 and segment:
|
del item[b'path']
|
||||||
archive_inode = self.allocate_inode()
|
num_segments = len(segments)
|
||||||
self.items[archive_inode] = self.default_dir
|
parent = 1
|
||||||
self.parent[archive_inode] = parent
|
for i, segment in enumerate(segments, 1):
|
||||||
# Leaf segment?
|
# Insert a default root inode if needed
|
||||||
if i == num_segments:
|
if self._inode_count == 0 and segment:
|
||||||
if b'source' in item and stat.S_ISREG(item[b'mode']):
|
archive_inode = self.allocate_inode()
|
||||||
inode = self._find_inode(item[b'source'], prefix)
|
self.items[archive_inode] = self.default_dir
|
||||||
self.items[inode][b'nlink'] = self.items[inode].get(b'nlink', 1) + 1
|
self.parent[archive_inode] = parent
|
||||||
|
# Leaf segment?
|
||||||
|
if i == num_segments:
|
||||||
|
if b'source' in item and stat.S_ISREG(item[b'mode']):
|
||||||
|
inode = self._find_inode(item[b'source'], prefix)
|
||||||
|
self.items[inode][b'nlink'] = self.items[inode].get(b'nlink', 1) + 1
|
||||||
|
else:
|
||||||
|
inode = self.allocate_inode()
|
||||||
|
self.items[inode] = item
|
||||||
|
self.parent[inode] = parent
|
||||||
|
if segment:
|
||||||
|
self.contents[parent][segment] = inode
|
||||||
|
elif segment in self.contents[parent]:
|
||||||
|
parent = self.contents[parent][segment]
|
||||||
else:
|
else:
|
||||||
inode = self.allocate_inode()
|
inode = self.allocate_inode()
|
||||||
self.items[inode] = item
|
self.items[inode] = self.default_dir
|
||||||
self.parent[inode] = parent
|
self.parent[inode] = parent
|
||||||
if segment:
|
if segment:
|
||||||
self.contents[parent][segment] = inode
|
self.contents[parent][segment] = inode
|
||||||
elif segment in self.contents[parent]:
|
parent = inode
|
||||||
parent = self.contents[parent][segment]
|
|
||||||
else:
|
|
||||||
inode = self.allocate_inode()
|
|
||||||
self.items[inode] = self.default_dir
|
|
||||||
self.parent[inode] = parent
|
|
||||||
if segment:
|
|
||||||
self.contents[parent][segment] = inode
|
|
||||||
parent = inode
|
|
||||||
|
|
||||||
def allocate_inode(self):
|
def allocate_inode(self):
|
||||||
self._inode_count += 1
|
self._inode_count += 1
|
||||||
|
|
|
@ -298,3 +298,9 @@ class RepositoryCache:
|
||||||
# Consume any pending requests
|
# Consume any pending requests
|
||||||
for _ in repository_iterator:
|
for _ in repository_iterator:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def cache_if_remote(repository):
|
||||||
|
if isinstance(repository, RemoteRepository):
|
||||||
|
return RepositoryCache(repository)
|
||||||
|
return repository
|
Loading…
Add table
Reference in a new issue