2013-07-21 22:41:06 +00:00
|
|
|
import errno
|
2014-03-28 21:51:09 +00:00
|
|
|
import io
|
2013-07-21 22:41:06 +00:00
|
|
|
import os
|
2013-07-23 11:07:48 +00:00
|
|
|
import stat
|
2014-03-28 21:51:09 +00:00
|
|
|
import tempfile
|
2013-07-21 22:41:06 +00:00
|
|
|
import time
|
2016-05-30 23:18:03 +00:00
|
|
|
from collections import defaultdict
|
2017-01-24 13:18:25 +00:00
|
|
|
from signal import SIGINT
|
2016-02-16 18:28:16 +00:00
|
|
|
from distutils.version import LooseVersion
|
2016-06-04 16:26:55 +00:00
|
|
|
from zlib import adler32
|
2016-05-30 23:18:03 +00:00
|
|
|
|
|
|
|
import llfuse
|
2015-11-13 15:38:50 +00:00
|
|
|
import msgpack
|
2015-10-08 21:03:35 +00:00
|
|
|
|
2016-05-30 23:18:03 +00:00
|
|
|
from .logger import create_logger
|
2016-04-23 16:03:05 +00:00
|
|
|
logger = create_logger()
|
|
|
|
|
2016-05-30 23:18:03 +00:00
|
|
|
from .archive import Archive
|
2016-09-15 18:31:57 +00:00
|
|
|
from .helpers import daemonize
|
2016-05-31 23:45:45 +00:00
|
|
|
from .item import Item
|
2016-05-30 23:18:03 +00:00
|
|
|
from .lrucache import LRUCache
|
|
|
|
|
2013-07-30 12:52:02 +00:00
|
|
|
# Does this version of llfuse support ns precision?
|
2015-10-26 01:07:55 +00:00
|
|
|
have_fuse_xtime_ns = hasattr(llfuse.EntryAttributes, 'st_mtime_ns')
|
2013-07-27 12:44:12 +00:00
|
|
|
|
2016-02-16 18:28:16 +00:00
|
|
|
fuse_version = LooseVersion(getattr(llfuse, '__version__', '0.1'))
|
|
|
|
if fuse_version >= '0.42':
|
|
|
|
def fuse_main():
|
|
|
|
return llfuse.main(workers=1)
|
|
|
|
else:
|
|
|
|
def fuse_main():
|
|
|
|
llfuse.main(single=True)
|
|
|
|
return None
|
2013-07-21 22:41:06 +00:00
|
|
|
|
2016-02-17 00:05:04 +00:00
|
|
|
|
2014-03-28 21:51:09 +00:00
|
|
|
class ItemCache:
|
|
|
|
def __init__(self):
|
2015-07-11 15:22:12 +00:00
|
|
|
self.fd = tempfile.TemporaryFile(prefix='borg-tmp')
|
2014-03-28 21:51:09 +00:00
|
|
|
self.offset = 1000000
|
|
|
|
|
|
|
|
def add(self, item):
|
|
|
|
pos = self.fd.seek(0, io.SEEK_END)
|
2016-05-31 23:45:45 +00:00
|
|
|
self.fd.write(msgpack.packb(item.as_dict()))
|
2014-03-28 21:51:09 +00:00
|
|
|
return pos + self.offset
|
|
|
|
|
|
|
|
def get(self, inode):
|
2016-09-15 12:56:11 +00:00
|
|
|
offset = inode - self.offset
|
|
|
|
if offset < 0:
|
|
|
|
raise ValueError('ItemCache.get() called with an invalid inode number')
|
|
|
|
self.fd.seek(offset, io.SEEK_SET)
|
2016-05-31 23:45:45 +00:00
|
|
|
item = next(msgpack.Unpacker(self.fd, read_size=1024))
|
|
|
|
return Item(internal_dict=item)
|
2014-03-28 21:51:09 +00:00
|
|
|
|
|
|
|
|
2015-05-09 16:40:55 +00:00
|
|
|
class FuseOperations(llfuse.Operations):
|
|
|
|
"""Export archive as a fuse filesystem
|
2013-07-21 22:41:06 +00:00
|
|
|
"""
|
2016-06-04 16:26:55 +00:00
|
|
|
# mount options
|
2016-07-09 22:20:11 +00:00
|
|
|
allow_damaged_files = False
|
2016-06-04 16:26:55 +00:00
|
|
|
versions = False
|
2016-07-10 00:19:27 +00:00
|
|
|
|
2016-10-13 02:40:33 +00:00
|
|
|
def __init__(self, key, repository, manifest, args, cached_repo):
|
2015-07-11 16:31:49 +00:00
|
|
|
super().__init__()
|
2016-06-04 16:26:55 +00:00
|
|
|
self.repository_uncached = repository
|
2016-01-16 23:28:54 +00:00
|
|
|
self.repository = cached_repo
|
2016-10-13 01:08:57 +00:00
|
|
|
self.args = args
|
2016-06-04 16:26:55 +00:00
|
|
|
self.manifest = manifest
|
|
|
|
self.key = key
|
|
|
|
self._inode_count = 0
|
2013-07-21 22:41:06 +00:00
|
|
|
self.items = {}
|
2013-07-23 11:40:54 +00:00
|
|
|
self.parent = {}
|
|
|
|
self.contents = defaultdict(dict)
|
2016-05-31 23:45:45 +00:00
|
|
|
self.default_dir = Item(mode=0o40755, mtime=int(time.time() * 1e9), uid=os.getuid(), gid=os.getgid())
|
2014-03-27 21:43:06 +00:00
|
|
|
self.pending_archives = {}
|
2014-03-28 21:51:09 +00:00
|
|
|
self.cache = ItemCache()
|
2016-04-23 16:03:05 +00:00
|
|
|
data_cache_capacity = int(os.environ.get('BORG_MOUNT_DATA_CACHE_ENTRIES', os.cpu_count() or 1))
|
|
|
|
logger.debug('mount data cache capacity: %d chunks', data_cache_capacity)
|
|
|
|
self.data_cache = LRUCache(capacity=data_cache_capacity, dispose=lambda _: None)
|
2016-06-04 16:26:55 +00:00
|
|
|
|
|
|
|
def _create_filesystem(self):
|
2016-07-28 22:14:06 +00:00
|
|
|
self._create_dir(parent=1) # first call, create root dir (inode == 1)
|
2016-10-13 01:08:57 +00:00
|
|
|
if self.args.location.archive:
|
|
|
|
archive = Archive(self.repository_uncached, self.key, self.manifest, self.args.location.archive,
|
|
|
|
consider_part_files=self.args.consider_part_files)
|
|
|
|
self.process_archive(archive)
|
2014-03-26 20:47:01 +00:00
|
|
|
else:
|
2016-10-13 03:21:52 +00:00
|
|
|
archive_names = (x.name for x in self.manifest.archives.list_considering(self.args))
|
2016-10-13 01:08:57 +00:00
|
|
|
for name in archive_names:
|
|
|
|
archive = Archive(self.repository_uncached, self.key, self.manifest, name,
|
|
|
|
consider_part_files=self.args.consider_part_files)
|
2016-06-04 16:26:55 +00:00
|
|
|
if self.versions:
|
|
|
|
# process archives immediately
|
|
|
|
self.process_archive(archive)
|
|
|
|
else:
|
|
|
|
# lazy load archives, create archive placeholder inode
|
|
|
|
archive_inode = self._create_dir(parent=1)
|
|
|
|
self.contents[1][os.fsencode(name)] = archive_inode
|
|
|
|
self.pending_archives[archive_inode] = archive
|
2014-03-26 20:47:01 +00:00
|
|
|
|
2016-07-09 22:20:11 +00:00
|
|
|
def mount(self, mountpoint, mount_options, foreground=False):
|
|
|
|
"""Mount filesystem on *mountpoint* with *mount_options*."""
|
|
|
|
options = ['fsname=borgfs', 'ro']
|
|
|
|
if mount_options:
|
|
|
|
options.extend(mount_options.split(','))
|
|
|
|
try:
|
|
|
|
options.remove('allow_damaged_files')
|
|
|
|
self.allow_damaged_files = True
|
|
|
|
except ValueError:
|
|
|
|
pass
|
2016-06-04 16:26:55 +00:00
|
|
|
try:
|
|
|
|
options.remove('versions')
|
|
|
|
self.versions = True
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
self._create_filesystem()
|
2016-07-09 22:20:11 +00:00
|
|
|
llfuse.init(self, mountpoint, options)
|
|
|
|
if not foreground:
|
|
|
|
daemonize()
|
|
|
|
|
|
|
|
# If the file system crashes, we do not want to umount because in that
|
|
|
|
# case the mountpoint suddenly appears to become empty. This can have
|
|
|
|
# nasty consequences, imagine the user has e.g. an active rsync mirror
|
|
|
|
# job - seeing the mountpoint empty, rsync would delete everything in the
|
|
|
|
# mirror.
|
|
|
|
umount = False
|
|
|
|
try:
|
|
|
|
signal = fuse_main()
|
2017-01-24 13:18:25 +00:00
|
|
|
# no crash and no signal (or it's ^C and we're in the foreground) -> umount request
|
|
|
|
umount = (signal is None or (signal == SIGINT and foreground))
|
2016-07-09 22:20:11 +00:00
|
|
|
finally:
|
|
|
|
llfuse.close(umount)
|
|
|
|
|
2016-07-28 22:14:06 +00:00
|
|
|
def _create_dir(self, parent):
|
|
|
|
"""Create directory
|
|
|
|
"""
|
|
|
|
ino = self.allocate_inode()
|
|
|
|
self.items[ino] = self.default_dir
|
|
|
|
self.parent[ino] = parent
|
|
|
|
return ino
|
|
|
|
|
2014-03-26 20:47:01 +00:00
|
|
|
def process_archive(self, archive, prefix=[]):
|
2014-09-07 12:54:18 +00:00
|
|
|
"""Build fuse inode hierarchy from archive metadata
|
2014-03-26 20:47:01 +00:00
|
|
|
"""
|
2016-09-15 18:31:57 +00:00
|
|
|
self.file_versions = {} # for versions mode: original path -> version
|
2014-03-26 21:42:20 +00:00
|
|
|
unpacker = msgpack.Unpacker()
|
2016-08-14 23:11:33 +00:00
|
|
|
for key, chunk in zip(archive.metadata.items, self.repository.get_many(archive.metadata.items)):
|
2016-03-18 02:16:12 +00:00
|
|
|
_, data = self.key.decrypt(key, chunk)
|
2014-03-26 21:42:20 +00:00
|
|
|
unpacker.feed(data)
|
|
|
|
for item in unpacker:
|
2016-05-31 23:45:45 +00:00
|
|
|
item = Item(internal_dict=item)
|
2016-09-15 19:11:23 +00:00
|
|
|
path = os.fsencode(os.path.normpath(item.path))
|
2016-06-04 16:26:55 +00:00
|
|
|
is_dir = stat.S_ISDIR(item.mode)
|
2016-09-01 02:30:55 +00:00
|
|
|
if is_dir:
|
|
|
|
try:
|
|
|
|
# This can happen if an archive was created with a command line like
|
|
|
|
# $ borg create ... dir1/file dir1
|
|
|
|
# In this case the code below will have created a default_dir inode for dir1 already.
|
2016-09-15 19:11:23 +00:00
|
|
|
inode = self._find_inode(path, prefix)
|
2016-09-01 02:30:55 +00:00
|
|
|
except KeyError:
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
self.items[inode] = item
|
|
|
|
continue
|
2016-09-15 19:11:23 +00:00
|
|
|
segments = prefix + path.split(b'/')
|
2014-03-26 21:42:20 +00:00
|
|
|
parent = 1
|
2016-09-01 03:18:10 +00:00
|
|
|
for segment in segments[:-1]:
|
|
|
|
parent = self.process_inner(segment, parent)
|
|
|
|
self.process_leaf(segments[-1], item, parent, prefix, is_dir)
|
2016-06-04 16:26:55 +00:00
|
|
|
|
|
|
|
def process_leaf(self, name, item, parent, prefix, is_dir):
|
2016-09-15 13:41:15 +00:00
|
|
|
def file_version(item):
|
2016-06-04 16:26:55 +00:00
|
|
|
if 'chunks' in item:
|
|
|
|
ident = 0
|
|
|
|
for chunkid, _, _ in item.chunks:
|
|
|
|
ident = adler32(chunkid, ident)
|
2016-09-15 13:41:15 +00:00
|
|
|
return ident
|
2016-06-04 16:26:55 +00:00
|
|
|
|
2016-09-15 18:31:57 +00:00
|
|
|
def make_versioned_name(name, version, add_dir=False):
|
|
|
|
if add_dir:
|
|
|
|
# add intermediate directory with same name as filename
|
|
|
|
path_fname = name.rsplit(b'/', 1)
|
|
|
|
name += b'/' + path_fname[-1]
|
|
|
|
return name + os.fsencode('.%08x' % version)
|
|
|
|
|
2016-06-04 16:26:55 +00:00
|
|
|
if self.versions and not is_dir:
|
|
|
|
parent = self.process_inner(name, parent)
|
2016-09-15 13:41:15 +00:00
|
|
|
version = file_version(item)
|
|
|
|
if version is not None:
|
2016-09-15 18:31:57 +00:00
|
|
|
# regular file, with contents - maybe a hardlink master
|
|
|
|
name = make_versioned_name(name, version)
|
2016-09-15 19:11:23 +00:00
|
|
|
path = os.fsencode(os.path.normpath(item.path))
|
|
|
|
self.file_versions[path] = version
|
2016-06-04 16:26:55 +00:00
|
|
|
|
2017-01-29 04:49:53 +00:00
|
|
|
path = item.path
|
2016-09-15 18:31:57 +00:00
|
|
|
del item.path # safe some space
|
2016-06-04 16:26:55 +00:00
|
|
|
if 'source' in item and stat.S_ISREG(item.mode):
|
2016-09-15 18:31:57 +00:00
|
|
|
# a hardlink, no contents, <source> is the hardlink master
|
2016-09-15 19:11:23 +00:00
|
|
|
source = os.fsencode(os.path.normpath(item.source))
|
2016-09-15 18:31:57 +00:00
|
|
|
if self.versions:
|
|
|
|
# adjust source name with version
|
|
|
|
version = self.file_versions[source]
|
2016-09-15 19:11:23 +00:00
|
|
|
source = make_versioned_name(source, version, add_dir=True)
|
2016-09-15 18:31:57 +00:00
|
|
|
name = make_versioned_name(name, version)
|
2017-01-29 04:49:53 +00:00
|
|
|
try:
|
|
|
|
inode = self._find_inode(source, prefix)
|
|
|
|
except KeyError:
|
|
|
|
logger.warning('Skipping broken hard link: %s -> %s', path, item.source)
|
|
|
|
return
|
2016-06-04 16:26:55 +00:00
|
|
|
item = self.cache.get(inode)
|
|
|
|
item.nlink = item.get('nlink', 1) + 1
|
|
|
|
self.items[inode] = item
|
|
|
|
else:
|
|
|
|
inode = self.cache.add(item)
|
|
|
|
self.parent[inode] = parent
|
|
|
|
if name:
|
|
|
|
self.contents[parent][name] = inode
|
|
|
|
|
2016-09-01 03:44:38 +00:00
|
|
|
def process_inner(self, name, parent_inode):
|
|
|
|
dir = self.contents[parent_inode]
|
|
|
|
if name in dir:
|
|
|
|
inode = dir[name]
|
2016-06-04 16:26:55 +00:00
|
|
|
else:
|
2016-09-01 03:44:38 +00:00
|
|
|
inode = self._create_dir(parent_inode)
|
2016-06-04 16:26:55 +00:00
|
|
|
if name:
|
2016-09-01 03:44:38 +00:00
|
|
|
dir[name] = inode
|
|
|
|
return inode
|
2013-07-23 11:40:54 +00:00
|
|
|
|
|
|
|
def allocate_inode(self):
|
|
|
|
self._inode_count += 1
|
|
|
|
return self._inode_count
|
2013-07-21 22:41:06 +00:00
|
|
|
|
2016-02-16 18:28:16 +00:00
|
|
|
def statfs(self, ctx=None):
|
2013-07-27 12:31:28 +00:00
|
|
|
stat_ = llfuse.StatvfsData()
|
|
|
|
stat_.f_bsize = 512
|
|
|
|
stat_.f_frsize = 512
|
|
|
|
stat_.f_blocks = 0
|
|
|
|
stat_.f_bfree = 0
|
|
|
|
stat_.f_bavail = 0
|
|
|
|
stat_.f_files = 0
|
|
|
|
stat_.f_ffree = 0
|
|
|
|
stat_.f_favail = 0
|
|
|
|
return stat_
|
|
|
|
|
2014-03-28 21:51:09 +00:00
|
|
|
def get_item(self, inode):
|
|
|
|
try:
|
|
|
|
return self.items[inode]
|
|
|
|
except KeyError:
|
|
|
|
return self.cache.get(inode)
|
|
|
|
|
2014-03-26 20:47:01 +00:00
|
|
|
def _find_inode(self, path, prefix=[]):
|
2016-09-15 19:11:23 +00:00
|
|
|
segments = prefix + path.split(b'/')
|
2013-07-23 11:07:48 +00:00
|
|
|
inode = 1
|
|
|
|
for segment in segments:
|
2013-07-23 11:40:54 +00:00
|
|
|
inode = self.contents[inode][segment]
|
|
|
|
return inode
|
2013-07-21 22:41:06 +00:00
|
|
|
|
2016-02-16 18:28:16 +00:00
|
|
|
def getattr(self, inode, ctx=None):
|
2014-03-28 21:51:09 +00:00
|
|
|
item = self.get_item(inode)
|
2013-07-21 22:41:06 +00:00
|
|
|
entry = llfuse.EntryAttributes()
|
2013-07-23 11:40:54 +00:00
|
|
|
entry.st_ino = inode
|
2013-07-21 22:41:06 +00:00
|
|
|
entry.generation = 0
|
|
|
|
entry.entry_timeout = 300
|
|
|
|
entry.attr_timeout = 300
|
2016-05-31 23:45:45 +00:00
|
|
|
entry.st_mode = item.mode
|
|
|
|
entry.st_nlink = item.get('nlink', 1)
|
|
|
|
entry.st_uid = item.uid
|
|
|
|
entry.st_gid = item.gid
|
|
|
|
entry.st_rdev = item.get('rdev', 0)
|
2017-02-14 20:08:38 +00:00
|
|
|
entry.st_size = item.file_size()
|
2013-07-21 22:41:06 +00:00
|
|
|
entry.st_blksize = 512
|
2017-02-14 20:08:38 +00:00
|
|
|
entry.st_blocks = (entry.st_size + entry.st_blksize - 1) // entry.st_blksize
|
2015-10-26 01:07:55 +00:00
|
|
|
# note: older archives only have mtime (not atime nor ctime)
|
2016-06-04 15:34:03 +00:00
|
|
|
mtime_ns = item.mtime
|
2015-10-26 01:07:55 +00:00
|
|
|
if have_fuse_xtime_ns:
|
2016-06-04 15:34:03 +00:00
|
|
|
entry.st_mtime_ns = mtime_ns
|
|
|
|
entry.st_atime_ns = item.get('atime', mtime_ns)
|
|
|
|
entry.st_ctime_ns = item.get('ctime', mtime_ns)
|
2013-07-30 12:52:02 +00:00
|
|
|
else:
|
2016-06-04 15:34:03 +00:00
|
|
|
entry.st_mtime = mtime_ns / 1e9
|
|
|
|
entry.st_atime = item.get('atime', mtime_ns) / 1e9
|
|
|
|
entry.st_ctime = item.get('ctime', mtime_ns) / 1e9
|
2013-07-21 22:41:06 +00:00
|
|
|
return entry
|
|
|
|
|
2016-02-16 18:28:16 +00:00
|
|
|
def listxattr(self, inode, ctx=None):
|
2014-03-28 21:51:09 +00:00
|
|
|
item = self.get_item(inode)
|
2016-05-31 23:45:45 +00:00
|
|
|
return item.get('xattrs', {}).keys()
|
2013-07-23 08:44:29 +00:00
|
|
|
|
2016-02-16 18:28:16 +00:00
|
|
|
def getxattr(self, inode, name, ctx=None):
|
2014-03-28 21:51:09 +00:00
|
|
|
item = self.get_item(inode)
|
2013-07-23 08:44:29 +00:00
|
|
|
try:
|
2016-05-31 23:45:45 +00:00
|
|
|
return item.get('xattrs', {})[name]
|
2013-07-23 08:44:29 +00:00
|
|
|
except KeyError:
|
2016-07-07 18:33:44 +00:00
|
|
|
raise llfuse.FUSEError(llfuse.ENOATTR) from None
|
2013-07-23 08:44:29 +00:00
|
|
|
|
2014-03-27 21:43:06 +00:00
|
|
|
def _load_pending_archive(self, inode):
|
|
|
|
# Check if this is an archive we need to load
|
|
|
|
archive = self.pending_archives.pop(inode, None)
|
|
|
|
if archive:
|
|
|
|
self.process_archive(archive, [os.fsencode(archive.name)])
|
|
|
|
|
2016-02-16 18:28:16 +00:00
|
|
|
def lookup(self, parent_inode, name, ctx=None):
|
2014-03-27 21:43:06 +00:00
|
|
|
self._load_pending_archive(parent_inode)
|
2013-07-21 22:41:06 +00:00
|
|
|
if name == b'.':
|
|
|
|
inode = parent_inode
|
|
|
|
elif name == b'..':
|
2013-07-23 11:40:54 +00:00
|
|
|
inode = self.parent[parent_inode]
|
2013-07-21 22:41:06 +00:00
|
|
|
else:
|
2013-07-23 11:40:54 +00:00
|
|
|
inode = self.contents[parent_inode].get(name)
|
2013-07-21 22:41:06 +00:00
|
|
|
if not inode:
|
|
|
|
raise llfuse.FUSEError(errno.ENOENT)
|
|
|
|
return self.getattr(inode)
|
|
|
|
|
2016-02-16 18:28:16 +00:00
|
|
|
def open(self, inode, flags, ctx=None):
|
2016-07-10 00:19:27 +00:00
|
|
|
if not self.allow_damaged_files:
|
|
|
|
item = self.get_item(inode)
|
2016-07-10 23:23:27 +00:00
|
|
|
if 'chunks_healthy' in item:
|
2016-07-10 00:19:27 +00:00
|
|
|
# Processed archive items don't carry the path anymore; for converting the inode
|
|
|
|
# to the path we'd either have to store the inverse of the current structure,
|
|
|
|
# or search the entire archive. So we just don't print it. It's easy to correlate anyway.
|
|
|
|
logger.warning('File has damaged (all-zero) chunks. Try running borg check --repair. '
|
|
|
|
'Mount with allow_damaged_files to read damaged files.')
|
|
|
|
raise llfuse.FUSEError(errno.EIO)
|
2013-07-21 22:41:06 +00:00
|
|
|
return inode
|
|
|
|
|
2016-02-16 18:28:16 +00:00
|
|
|
def opendir(self, inode, ctx=None):
|
2014-03-27 21:43:06 +00:00
|
|
|
self._load_pending_archive(inode)
|
2013-07-21 22:41:06 +00:00
|
|
|
return inode
|
|
|
|
|
|
|
|
def read(self, fh, offset, size):
|
|
|
|
parts = []
|
2014-03-28 21:51:09 +00:00
|
|
|
item = self.get_item(fh)
|
2016-05-31 23:45:45 +00:00
|
|
|
for id, s, csize in item.chunks:
|
2013-07-21 22:41:06 +00:00
|
|
|
if s < offset:
|
|
|
|
offset -= s
|
|
|
|
continue
|
|
|
|
n = min(size, s - offset)
|
2016-04-23 16:03:05 +00:00
|
|
|
if id in self.data_cache:
|
|
|
|
data = self.data_cache[id]
|
|
|
|
if offset + n == len(data):
|
|
|
|
# evict fully read chunk from cache
|
|
|
|
del self.data_cache[id]
|
|
|
|
else:
|
|
|
|
_, data = self.key.decrypt(id, self.repository.get(id))
|
|
|
|
if offset + n < len(data):
|
|
|
|
# chunk was only partially read, cache it
|
|
|
|
self.data_cache[id] = data
|
2016-03-18 02:16:12 +00:00
|
|
|
parts.append(data[offset:offset + n])
|
2013-07-21 22:41:06 +00:00
|
|
|
offset = 0
|
|
|
|
size -= n
|
|
|
|
if not size:
|
|
|
|
break
|
|
|
|
return b''.join(parts)
|
|
|
|
|
|
|
|
def readdir(self, fh, off):
|
2013-07-23 11:40:54 +00:00
|
|
|
entries = [(b'.', fh), (b'..', self.parent[fh])]
|
|
|
|
entries.extend(self.contents[fh].items())
|
2013-07-21 22:41:06 +00:00
|
|
|
for i, (name, inode) in enumerate(entries[off:], off):
|
|
|
|
yield name, self.getattr(inode), i + 1
|
|
|
|
|
2016-02-16 18:28:16 +00:00
|
|
|
def readlink(self, inode, ctx=None):
|
2014-03-28 21:51:09 +00:00
|
|
|
item = self.get_item(inode)
|
2016-05-31 23:45:45 +00:00
|
|
|
return os.fsencode(item.source)
|