mirror of
https://github.com/borgbackup/borg.git
synced 2024-12-28 02:38:43 +00:00
borg mount: support exclusion group options and paths, fixes #2138
borg mount [options] repo_or_archive mountpoint path [paths...]
paths: you can just give some "root paths" (like for borg extract) to
only partially populate the FUSE filesystem.
Similar for these exclusion group options:
--exclude
--exclude-from
--pattern
--patterns-from
--strip-components
(cherry picked from commit 77df1cfe8c
)
This commit is contained in:
parent
babdef574c
commit
52410b6976
2 changed files with 18 additions and 3 deletions
|
@ -2517,6 +2517,9 @@ def define_archive_filters_group(subparser, *, sort_by=True, first_last=True):
|
||||||
subparser.add_argument('-o', dest='options', type=str,
|
subparser.add_argument('-o', dest='options', type=str,
|
||||||
help='Extra mount options')
|
help='Extra mount options')
|
||||||
define_archive_filters_group(subparser)
|
define_archive_filters_group(subparser)
|
||||||
|
subparser.add_argument('paths', metavar='PATH', nargs='*', type=str,
|
||||||
|
help='paths to extract; patterns are supported')
|
||||||
|
define_exclusion_group(subparser, strip_components=True)
|
||||||
if parser.prog == 'borgfs':
|
if parser.prog == 'borgfs':
|
||||||
return parser
|
return parser
|
||||||
|
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
logger = create_logger()
|
logger = create_logger()
|
||||||
|
|
||||||
from .crypto.low_level import blake2b_128
|
from .crypto.low_level import blake2b_128
|
||||||
|
from .archiver import Archiver
|
||||||
from .archive import Archive
|
from .archive import Archive
|
||||||
from .hashindex import FuseVersionsIndex
|
from .hashindex import FuseVersionsIndex
|
||||||
from .helpers import daemonize, hardlinkable, signal_handler, format_file_size
|
from .helpers import daemonize, hardlinkable, signal_handler, format_file_size
|
||||||
|
@ -118,7 +119,7 @@ def get(self, inode):
|
||||||
else:
|
else:
|
||||||
raise ValueError('Invalid entry type in self.meta')
|
raise ValueError('Invalid entry type in self.meta')
|
||||||
|
|
||||||
def iter_archive_items(self, archive_item_ids):
|
def iter_archive_items(self, archive_item_ids, filter=None):
|
||||||
unpacker = msgpack.Unpacker()
|
unpacker = msgpack.Unpacker()
|
||||||
|
|
||||||
# Current offset in the metadata stream, which consists of all metadata chunks glued together
|
# Current offset in the metadata stream, which consists of all metadata chunks glued together
|
||||||
|
@ -161,6 +162,11 @@ def write_bytes(append_msgpacked_bytes):
|
||||||
# Need more data, feed the next chunk
|
# Need more data, feed the next chunk
|
||||||
break
|
break
|
||||||
|
|
||||||
|
item = Item(internal_dict=item)
|
||||||
|
if filter and not filter(item):
|
||||||
|
msgpacked_bytes = b''
|
||||||
|
continue
|
||||||
|
|
||||||
current_item = msgpacked_bytes
|
current_item = msgpacked_bytes
|
||||||
current_item_length = len(current_item)
|
current_item_length = len(current_item)
|
||||||
current_spans_chunks = stream_offset - current_item_length < chunk_begin
|
current_spans_chunks = stream_offset - current_item_length < chunk_begin
|
||||||
|
@ -197,7 +203,7 @@ def write_bytes(append_msgpacked_bytes):
|
||||||
inode = write_offset + self.offset
|
inode = write_offset + self.offset
|
||||||
write_offset += 9
|
write_offset += 9
|
||||||
|
|
||||||
yield inode, Item(internal_dict=item)
|
yield inode, item
|
||||||
|
|
||||||
self.write_offset = write_offset
|
self.write_offset = write_offset
|
||||||
|
|
||||||
|
@ -325,7 +331,13 @@ def process_archive(self, archive_name, prefix=[]):
|
||||||
t0 = time.perf_counter()
|
t0 = time.perf_counter()
|
||||||
archive = Archive(self.repository_uncached, self.key, self.manifest, archive_name,
|
archive = Archive(self.repository_uncached, self.key, self.manifest, archive_name,
|
||||||
consider_part_files=self.args.consider_part_files)
|
consider_part_files=self.args.consider_part_files)
|
||||||
for item_inode, item in self.cache.iter_archive_items(archive.metadata.items):
|
strip_components = self.args.strip_components
|
||||||
|
matcher = Archiver.build_matcher(self.args.patterns, self.args.paths)
|
||||||
|
dummy = lambda x, y: None # TODO: add hardlink_master support code, see Archiver
|
||||||
|
filter = Archiver.build_filter(matcher, dummy, strip_components)
|
||||||
|
for item_inode, item in self.cache.iter_archive_items(archive.metadata.items, filter=filter):
|
||||||
|
if strip_components:
|
||||||
|
item.path = os.sep.join(item.path.split(os.sep)[strip_components:])
|
||||||
path = os.fsencode(item.path)
|
path = os.fsencode(item.path)
|
||||||
is_dir = stat.S_ISDIR(item.mode)
|
is_dir = stat.S_ISDIR(item.mode)
|
||||||
if is_dir:
|
if is_dir:
|
||||||
|
|
Loading…
Reference in a new issue