2014-06-13 18:07:01 +00:00
|
|
|
import errno
|
2010-11-15 21:18:47 +00:00
|
|
|
import fcntl
|
2015-12-12 17:25:38 +00:00
|
|
|
import logging
|
2010-11-15 21:18:47 +00:00
|
|
|
import os
|
|
|
|
import select
|
2015-10-05 22:54:00 +00:00
|
|
|
import shlex
|
2012-10-17 09:40:23 +00:00
|
|
|
from subprocess import Popen, PIPE
|
2010-11-15 21:18:47 +00:00
|
|
|
import sys
|
2014-03-13 21:29:47 +00:00
|
|
|
import tempfile
|
2015-03-21 01:17:19 +00:00
|
|
|
|
2015-05-22 17:21:41 +00:00
|
|
|
from . import __version__
|
2010-11-15 21:18:47 +00:00
|
|
|
|
2015-12-12 21:45:29 +00:00
|
|
|
from .helpers import Error, IntegrityError, sysinfo
|
2016-07-28 07:30:46 +00:00
|
|
|
from .helpers import replace_placeholders
|
2013-06-24 20:41:05 +00:00
|
|
|
from .repository import Repository
|
2010-11-15 21:18:47 +00:00
|
|
|
|
2015-11-13 15:38:50 +00:00
|
|
|
import msgpack
|
2015-10-08 21:03:35 +00:00
|
|
|
|
2015-12-03 16:50:37 +00:00
|
|
|
RPC_PROTOCOL_VERSION = 2
|
|
|
|
|
2012-10-17 09:40:23 +00:00
|
|
|
BUFSIZE = 10 * 1024 * 1024
|
2011-07-05 19:29:15 +00:00
|
|
|
|
2016-05-25 22:14:27 +00:00
|
|
|
MAX_INFLIGHT = 100
|
|
|
|
|
2011-07-05 19:29:15 +00:00
|
|
|
|
2013-12-15 19:35:29 +00:00
|
|
|
class ConnectionClosed(Error):
|
|
|
|
"""Connection closed by remote host"""
|
2013-06-28 11:31:57 +00:00
|
|
|
|
|
|
|
|
2015-10-31 21:41:08 +00:00
|
|
|
class ConnectionClosedWithHint(ConnectionClosed):
|
|
|
|
"""Connection closed by remote host. {}"""
|
|
|
|
|
|
|
|
|
2014-03-24 20:28:59 +00:00
|
|
|
class PathNotAllowed(Error):
|
|
|
|
"""Repository path not allowed"""
|
|
|
|
|
2015-07-11 16:31:49 +00:00
|
|
|
|
2015-01-11 13:06:59 +00:00
|
|
|
class InvalidRPCMethod(Error):
|
2015-10-31 21:23:32 +00:00
|
|
|
"""RPC method {} is not valid"""
|
2014-03-24 20:28:59 +00:00
|
|
|
|
|
|
|
|
2016-08-22 17:50:53 +00:00
|
|
|
class UnexpectedRPCDataFormatFromClient(Error):
|
|
|
|
"""Borg {}: Got unexpected RPC data format from client."""
|
|
|
|
|
|
|
|
|
|
|
|
class UnexpectedRPCDataFormatFromServer(Error):
|
|
|
|
"""Got unexpected RPC data format from server."""
|
|
|
|
|
|
|
|
|
2015-08-12 02:28:31 +00:00
|
|
|
class RepositoryServer: # pragma: no cover
|
2015-01-11 13:06:59 +00:00
|
|
|
rpc_methods = (
|
2015-07-11 16:31:49 +00:00
|
|
|
'__len__',
|
|
|
|
'check',
|
|
|
|
'commit',
|
|
|
|
'delete',
|
2015-07-26 15:38:16 +00:00
|
|
|
'destroy',
|
2015-07-11 16:31:49 +00:00
|
|
|
'get',
|
|
|
|
'list',
|
|
|
|
'negotiate',
|
|
|
|
'open',
|
|
|
|
'put',
|
|
|
|
'rollback',
|
2015-07-14 22:01:07 +00:00
|
|
|
'save_key',
|
|
|
|
'load_key',
|
2015-11-21 19:50:53 +00:00
|
|
|
'break_lock',
|
2015-07-11 16:31:49 +00:00
|
|
|
)
|
2010-11-15 21:18:47 +00:00
|
|
|
|
2016-06-30 15:59:12 +00:00
|
|
|
def __init__(self, restrict_to_paths, append_only):
|
2013-06-20 10:44:58 +00:00
|
|
|
self.repository = None
|
2014-03-24 20:28:59 +00:00
|
|
|
self.restrict_to_paths = restrict_to_paths
|
2016-06-30 15:59:12 +00:00
|
|
|
self.append_only = append_only
|
2010-11-15 21:18:47 +00:00
|
|
|
|
|
|
|
def serve(self):
|
2015-03-14 18:45:01 +00:00
|
|
|
stdin_fd = sys.stdin.fileno()
|
|
|
|
stdout_fd = sys.stdout.fileno()
|
2015-12-12 20:24:21 +00:00
|
|
|
stderr_fd = sys.stdout.fileno()
|
2010-11-15 21:18:47 +00:00
|
|
|
# Make stdin non-blocking
|
2015-03-14 18:45:01 +00:00
|
|
|
fl = fcntl.fcntl(stdin_fd, fcntl.F_GETFL)
|
|
|
|
fcntl.fcntl(stdin_fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
|
2012-02-07 19:39:22 +00:00
|
|
|
# Make stdout blocking
|
2015-03-14 18:45:01 +00:00
|
|
|
fl = fcntl.fcntl(stdout_fd, fcntl.F_GETFL)
|
|
|
|
fcntl.fcntl(stdout_fd, fcntl.F_SETFL, fl & ~os.O_NONBLOCK)
|
2015-12-12 20:24:21 +00:00
|
|
|
# Make stderr blocking
|
|
|
|
fl = fcntl.fcntl(stderr_fd, fcntl.F_GETFL)
|
|
|
|
fcntl.fcntl(stderr_fd, fcntl.F_SETFL, fl & ~os.O_NONBLOCK)
|
2013-05-17 12:30:39 +00:00
|
|
|
unpacker = msgpack.Unpacker(use_list=False)
|
2010-11-15 21:18:47 +00:00
|
|
|
while True:
|
2015-03-14 18:45:01 +00:00
|
|
|
r, w, es = select.select([stdin_fd], [], [], 10)
|
2010-11-15 21:18:47 +00:00
|
|
|
if r:
|
2015-03-14 18:45:01 +00:00
|
|
|
data = os.read(stdin_fd, BUFSIZE)
|
2010-11-15 21:18:47 +00:00
|
|
|
if not data:
|
2016-08-22 17:48:39 +00:00
|
|
|
if self.repository is not None:
|
|
|
|
self.repository.close()
|
2016-08-22 18:22:02 +00:00
|
|
|
else:
|
|
|
|
os.write(stderr_fd, "Borg {}: Got connection close before repository was opened.\n"
|
|
|
|
.format(__version__).encode())
|
2010-11-15 21:18:47 +00:00
|
|
|
return
|
|
|
|
unpacker.feed(data)
|
2015-03-09 20:59:10 +00:00
|
|
|
for unpacked in unpacker:
|
|
|
|
if not (isinstance(unpacked, tuple) and len(unpacked) == 4):
|
2016-08-22 17:48:39 +00:00
|
|
|
if self.repository is not None:
|
|
|
|
self.repository.close()
|
2016-08-22 17:50:53 +00:00
|
|
|
raise UnexpectedRPCDataFormatFromClient(__version__)
|
2015-03-09 20:59:10 +00:00
|
|
|
type, msgid, method, args = unpacked
|
2013-06-03 11:45:48 +00:00
|
|
|
method = method.decode('ascii')
|
2010-11-15 21:18:47 +00:00
|
|
|
try:
|
2015-07-11 16:31:49 +00:00
|
|
|
if method not in self.rpc_methods:
|
2015-01-11 13:06:59 +00:00
|
|
|
raise InvalidRPCMethod(method)
|
2010-11-17 21:40:39 +00:00
|
|
|
try:
|
|
|
|
f = getattr(self, method)
|
|
|
|
except AttributeError:
|
2013-06-20 10:44:58 +00:00
|
|
|
f = getattr(self.repository, method)
|
2010-11-17 21:40:39 +00:00
|
|
|
res = f(*args)
|
2015-03-21 01:17:19 +00:00
|
|
|
except BaseException as e:
|
2016-04-04 15:09:52 +00:00
|
|
|
# These exceptions are reconstructed on the client end in RemoteRepository.call_many(),
|
|
|
|
# and will be handled just like locally raised exceptions. Suppress the remote traceback
|
|
|
|
# for these, except ErrorWithTraceback, which should always display a traceback.
|
|
|
|
if not isinstance(e, (Repository.DoesNotExist, Repository.AlreadyExists, PathNotAllowed)):
|
|
|
|
logging.exception('Borg %s: exception in RPC call:', __version__)
|
|
|
|
logging.error(sysinfo())
|
2015-12-12 21:45:29 +00:00
|
|
|
exc = "Remote Exception (see remote log for the traceback)"
|
2015-03-21 01:22:51 +00:00
|
|
|
os.write(stdout_fd, msgpack.packb((1, msgid, e.__class__.__name__, exc)))
|
2010-11-15 21:18:47 +00:00
|
|
|
else:
|
2015-03-14 18:45:01 +00:00
|
|
|
os.write(stdout_fd, msgpack.packb((1, msgid, None, res)))
|
2010-11-15 21:18:47 +00:00
|
|
|
if es:
|
2016-04-03 15:14:43 +00:00
|
|
|
self.repository.close()
|
2010-11-15 21:18:47 +00:00
|
|
|
return
|
|
|
|
|
2011-09-12 19:51:23 +00:00
|
|
|
def negotiate(self, versions):
|
2015-12-03 16:50:37 +00:00
|
|
|
return RPC_PROTOCOL_VERSION
|
2011-09-12 19:51:23 +00:00
|
|
|
|
2016-08-02 22:34:22 +00:00
|
|
|
def open(self, path, create=False, lock_wait=None, lock=True, exclusive=None, append_only=False):
|
2013-06-03 11:45:48 +00:00
|
|
|
path = os.fsdecode(path)
|
2016-10-12 22:38:04 +00:00
|
|
|
if path.startswith('/~'): # /~/x = path x relative to home dir, /~username/x = relative to "user" home dir
|
2010-11-17 21:40:39 +00:00
|
|
|
path = path[1:]
|
2016-10-12 22:38:04 +00:00
|
|
|
elif path.startswith('/./'): # /./x = path x relative to cwd
|
|
|
|
path = path[3:]
|
2014-03-24 20:28:59 +00:00
|
|
|
path = os.path.realpath(os.path.expanduser(path))
|
|
|
|
if self.restrict_to_paths:
|
2016-08-02 13:50:21 +00:00
|
|
|
# if --restrict-to-path P is given, we make sure that we only operate in/below path P.
|
|
|
|
# for the prefix check, it is important that the compared pathes both have trailing slashes,
|
|
|
|
# so that a path /foobar will NOT be accepted with --restrict-to-path /foo option.
|
|
|
|
path_with_sep = os.path.join(path, '') # make sure there is a trailing slash (os.sep)
|
2014-03-24 20:28:59 +00:00
|
|
|
for restrict_to_path in self.restrict_to_paths:
|
2016-08-02 13:50:21 +00:00
|
|
|
restrict_to_path_with_sep = os.path.join(os.path.realpath(restrict_to_path), '') # trailing slash
|
|
|
|
if path_with_sep.startswith(restrict_to_path_with_sep):
|
2014-03-24 20:28:59 +00:00
|
|
|
break
|
|
|
|
else:
|
|
|
|
raise PathNotAllowed(path)
|
2016-07-23 16:22:07 +00:00
|
|
|
self.repository = Repository(path, create, lock_wait=lock_wait, lock=lock,
|
|
|
|
append_only=self.append_only or append_only,
|
|
|
|
exclusive=exclusive)
|
2016-04-03 15:14:43 +00:00
|
|
|
self.repository.__enter__() # clean exit handled by serve() method
|
2013-06-20 10:44:58 +00:00
|
|
|
return self.repository.id
|
2010-11-17 21:40:39 +00:00
|
|
|
|
2010-11-15 21:18:47 +00:00
|
|
|
|
2015-03-17 22:03:36 +00:00
|
|
|
class RemoteRepository:
|
2014-03-24 20:28:59 +00:00
|
|
|
extra_test_args = []
|
2010-11-15 21:18:47 +00:00
|
|
|
|
|
|
|
class RPCError(Exception):
|
2016-07-22 17:58:53 +00:00
|
|
|
def __init__(self, name, remote_type):
|
2010-11-15 21:18:47 +00:00
|
|
|
self.name = name
|
2016-07-22 17:58:53 +00:00
|
|
|
self.remote_type = remote_type
|
2010-11-15 21:18:47 +00:00
|
|
|
|
2016-07-22 17:58:53 +00:00
|
|
|
class NoAppendOnlyOnServer(Error):
|
|
|
|
"""Server does not support --append-only."""
|
|
|
|
|
2016-07-23 16:22:07 +00:00
|
|
|
def __init__(self, location, create=False, exclusive=False, lock_wait=None, lock=True, append_only=False, args=None):
|
2015-12-19 13:30:05 +00:00
|
|
|
self.location = self._location = location
|
2014-01-22 19:58:48 +00:00
|
|
|
self.preload_ids = []
|
|
|
|
self.msgid = 0
|
2013-06-03 11:45:48 +00:00
|
|
|
self.to_send = b''
|
2014-01-22 19:58:48 +00:00
|
|
|
self.cache = {}
|
|
|
|
self.ignore_responses = set()
|
|
|
|
self.responses = {}
|
2013-05-17 12:30:39 +00:00
|
|
|
self.unpacker = msgpack.Unpacker(use_list=False)
|
2014-01-22 19:58:48 +00:00
|
|
|
self.p = None
|
2015-12-12 14:31:43 +00:00
|
|
|
testing = location.host == '__testsuite__'
|
|
|
|
borg_cmd = self.borg_cmd(args, testing)
|
2016-01-11 01:08:58 +00:00
|
|
|
env = dict(os.environ)
|
2015-12-12 14:31:43 +00:00
|
|
|
if not testing:
|
|
|
|
borg_cmd = self.ssh_cmd(location) + borg_cmd
|
2016-01-11 01:08:58 +00:00
|
|
|
# pyinstaller binary adds LD_LIBRARY_PATH=/tmp/_ME... but we do not want
|
|
|
|
# that the system's ssh binary picks up (non-matching) libraries from there
|
|
|
|
env.pop('LD_LIBRARY_PATH', None)
|
2016-06-04 22:25:30 +00:00
|
|
|
env.pop('BORG_PASSPHRASE', None) # security: do not give secrets to subprocess
|
2016-07-28 07:30:46 +00:00
|
|
|
env['BORG_VERSION'] = __version__
|
2016-01-11 01:08:58 +00:00
|
|
|
self.p = Popen(borg_cmd, bufsize=0, stdin=PIPE, stdout=PIPE, stderr=PIPE, env=env)
|
2012-11-27 23:03:35 +00:00
|
|
|
self.stdin_fd = self.p.stdin.fileno()
|
2012-10-17 09:40:23 +00:00
|
|
|
self.stdout_fd = self.p.stdout.fileno()
|
2015-12-12 20:24:21 +00:00
|
|
|
self.stderr_fd = self.p.stderr.fileno()
|
2012-12-06 21:58:57 +00:00
|
|
|
fcntl.fcntl(self.stdin_fd, fcntl.F_SETFL, fcntl.fcntl(self.stdin_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
|
|
|
|
fcntl.fcntl(self.stdout_fd, fcntl.F_SETFL, fcntl.fcntl(self.stdout_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
|
2015-12-12 20:24:21 +00:00
|
|
|
fcntl.fcntl(self.stderr_fd, fcntl.F_SETFL, fcntl.fcntl(self.stderr_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
|
|
|
|
self.r_fds = [self.stdout_fd, self.stderr_fd]
|
|
|
|
self.x_fds = [self.stdin_fd, self.stdout_fd, self.stderr_fd]
|
2012-11-27 23:03:35 +00:00
|
|
|
|
2015-06-18 21:18:05 +00:00
|
|
|
try:
|
2016-07-30 17:42:25 +00:00
|
|
|
try:
|
|
|
|
version = self.call('negotiate', RPC_PROTOCOL_VERSION)
|
|
|
|
except ConnectionClosed:
|
|
|
|
raise ConnectionClosedWithHint('Is borg working on the server?') from None
|
|
|
|
if version != RPC_PROTOCOL_VERSION:
|
|
|
|
raise Exception('Server insisted on using unsupported protocol version %d' % version)
|
2016-07-23 16:22:07 +00:00
|
|
|
try:
|
|
|
|
self.id = self.call('open', self.location.path, create, lock_wait, lock, exclusive, append_only)
|
|
|
|
except self.RPCError as err:
|
|
|
|
if err.remote_type != 'TypeError':
|
|
|
|
raise
|
2016-08-09 15:35:27 +00:00
|
|
|
msg = """\
|
|
|
|
Please note:
|
|
|
|
If you see a TypeError complaining about the number of positional arguments
|
|
|
|
given to open(), you can ignore it if it comes from a borg version < 1.0.7.
|
|
|
|
This TypeError is a cosmetic side effect of the compatibility code borg
|
|
|
|
clients >= 1.0.7 have to support older borg servers.
|
|
|
|
This problem will go away as soon as the server has been upgraded to 1.0.7+.
|
|
|
|
"""
|
|
|
|
# emit this msg in the same way as the "Remote: ..." lines that show the remote TypeError
|
|
|
|
sys.stderr.write(msg)
|
2016-07-23 16:22:07 +00:00
|
|
|
if append_only:
|
|
|
|
raise self.NoAppendOnlyOnServer()
|
2016-07-22 17:58:53 +00:00
|
|
|
self.id = self.call('open', self.location.path, create, lock_wait, lock)
|
2016-04-03 19:44:29 +00:00
|
|
|
except Exception:
|
|
|
|
self.close()
|
|
|
|
raise
|
2012-10-17 09:40:23 +00:00
|
|
|
|
|
|
|
def __del__(self):
|
2016-08-09 21:26:56 +00:00
|
|
|
if len(self.responses):
|
|
|
|
logging.debug("still %d cached responses left in RemoteRepository" % (len(self.responses),))
|
2016-04-03 15:14:43 +00:00
|
|
|
if self.p:
|
|
|
|
self.close()
|
|
|
|
assert False, "cleanup happened in Repository.__del__"
|
2012-11-30 20:47:35 +00:00
|
|
|
|
2015-07-14 22:01:07 +00:00
|
|
|
def __repr__(self):
|
|
|
|
return '<%s %s>' % (self.__class__.__name__, self.location.canonical_path())
|
|
|
|
|
2016-04-03 15:14:43 +00:00
|
|
|
def __enter__(self):
|
|
|
|
return self
|
|
|
|
|
|
|
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
2016-07-03 00:58:17 +00:00
|
|
|
try:
|
|
|
|
if exc_type is not None:
|
|
|
|
self.rollback()
|
|
|
|
finally:
|
|
|
|
# in any case, we want to cleanly close the repo, even if the
|
|
|
|
# rollback can not succeed (e.g. because the connection was
|
|
|
|
# already closed) and raised another exception:
|
|
|
|
self.close()
|
2016-04-03 15:14:43 +00:00
|
|
|
|
2015-12-12 14:31:43 +00:00
|
|
|
def borg_cmd(self, args, testing):
|
|
|
|
"""return a borg serve command line"""
|
|
|
|
# give some args/options to "borg serve" process as they were given to us
|
|
|
|
opts = []
|
|
|
|
if args is not None:
|
|
|
|
opts.append('--umask=%03o' % args.umask)
|
2015-12-12 17:25:38 +00:00
|
|
|
root_logger = logging.getLogger()
|
|
|
|
if root_logger.isEnabledFor(logging.DEBUG):
|
2015-12-12 14:31:43 +00:00
|
|
|
opts.append('--debug')
|
2015-12-12 17:25:38 +00:00
|
|
|
elif root_logger.isEnabledFor(logging.INFO):
|
2015-12-12 14:31:43 +00:00
|
|
|
opts.append('--info')
|
2015-12-12 17:25:38 +00:00
|
|
|
elif root_logger.isEnabledFor(logging.WARNING):
|
|
|
|
pass # warning is default
|
2016-04-03 18:17:09 +00:00
|
|
|
elif root_logger.isEnabledFor(logging.ERROR):
|
|
|
|
opts.append('--error')
|
|
|
|
elif root_logger.isEnabledFor(logging.CRITICAL):
|
|
|
|
opts.append('--critical')
|
2015-12-12 14:31:43 +00:00
|
|
|
else:
|
|
|
|
raise ValueError('log level missing, fix this code')
|
|
|
|
if testing:
|
2016-01-30 20:32:45 +00:00
|
|
|
return [sys.executable, '-m', 'borg.archiver', 'serve'] + opts + self.extra_test_args
|
2015-12-12 14:31:43 +00:00
|
|
|
else: # pragma: no cover
|
2016-07-04 13:06:20 +00:00
|
|
|
remote_path = args.remote_path or os.environ.get('BORG_REMOTE_PATH', 'borg')
|
2016-07-28 07:30:46 +00:00
|
|
|
remote_path = replace_placeholders(remote_path)
|
2016-07-04 13:06:20 +00:00
|
|
|
return [remote_path, 'serve'] + opts
|
2015-10-05 22:51:20 +00:00
|
|
|
|
|
|
|
def ssh_cmd(self, location):
|
2015-12-12 14:31:43 +00:00
|
|
|
"""return a ssh command line that can be prefixed to a borg command line"""
|
2015-10-05 22:54:00 +00:00
|
|
|
args = shlex.split(os.environ.get('BORG_RSH', 'ssh'))
|
2015-10-05 22:51:20 +00:00
|
|
|
if location.port:
|
|
|
|
args += ['-p', str(location.port)]
|
|
|
|
if location.user:
|
|
|
|
args.append('%s@%s' % (location.user, location.host))
|
|
|
|
else:
|
|
|
|
args.append('%s' % location.host)
|
|
|
|
return args
|
|
|
|
|
2014-01-22 19:58:48 +00:00
|
|
|
def call(self, cmd, *args, **kw):
|
|
|
|
for resp in self.call_many(cmd, [args], **kw):
|
|
|
|
return resp
|
|
|
|
|
|
|
|
def call_many(self, cmd, calls, wait=True, is_preloaded=False):
|
2014-01-30 21:16:21 +00:00
|
|
|
if not calls:
|
|
|
|
return
|
2015-03-17 22:47:21 +00:00
|
|
|
|
2014-01-22 19:58:48 +00:00
|
|
|
def fetch_from_cache(args):
|
|
|
|
msgid = self.cache[args].pop(0)
|
|
|
|
if not self.cache[args]:
|
|
|
|
del self.cache[args]
|
|
|
|
return msgid
|
|
|
|
|
2016-06-27 20:44:41 +00:00
|
|
|
def handle_error(error, res):
|
|
|
|
if error == b'DoesNotExist':
|
|
|
|
raise Repository.DoesNotExist(self.location.orig)
|
|
|
|
elif error == b'AlreadyExists':
|
|
|
|
raise Repository.AlreadyExists(self.location.orig)
|
|
|
|
elif error == b'CheckNeeded':
|
|
|
|
raise Repository.CheckNeeded(self.location.orig)
|
|
|
|
elif error == b'IntegrityError':
|
|
|
|
raise IntegrityError(res)
|
|
|
|
elif error == b'PathNotAllowed':
|
|
|
|
raise PathNotAllowed(*res)
|
|
|
|
elif error == b'ObjectNotFound':
|
|
|
|
raise Repository.ObjectNotFound(res[0], self.location.orig)
|
|
|
|
elif error == b'InvalidRPCMethod':
|
|
|
|
raise InvalidRPCMethod(*res)
|
|
|
|
else:
|
2016-07-22 17:58:53 +00:00
|
|
|
raise self.RPCError(res.decode('utf-8'), error.decode('utf-8'))
|
2016-06-27 20:44:41 +00:00
|
|
|
|
2014-01-22 19:58:48 +00:00
|
|
|
calls = list(calls)
|
|
|
|
waiting_for = []
|
|
|
|
while wait or calls:
|
|
|
|
while waiting_for:
|
|
|
|
try:
|
|
|
|
error, res = self.responses.pop(waiting_for[0])
|
|
|
|
waiting_for.pop(0)
|
|
|
|
if error:
|
2016-06-27 20:44:41 +00:00
|
|
|
handle_error(error, res)
|
2014-01-22 19:58:48 +00:00
|
|
|
else:
|
|
|
|
yield res
|
|
|
|
if not waiting_for and not calls:
|
|
|
|
return
|
|
|
|
except KeyError:
|
|
|
|
break
|
2016-05-25 22:14:27 +00:00
|
|
|
if self.to_send or ((calls or self.preload_ids) and len(waiting_for) < MAX_INFLIGHT):
|
|
|
|
w_fds = [self.stdin_fd]
|
|
|
|
else:
|
|
|
|
w_fds = []
|
2012-12-06 21:58:57 +00:00
|
|
|
r, w, x = select.select(self.r_fds, w_fds, self.x_fds, 1)
|
2012-11-30 20:47:35 +00:00
|
|
|
if x:
|
2015-07-15 09:14:53 +00:00
|
|
|
raise Exception('FD exception occurred')
|
2015-12-12 20:24:21 +00:00
|
|
|
for fd in r:
|
|
|
|
if fd is self.stdout_fd:
|
|
|
|
data = os.read(fd, BUFSIZE)
|
|
|
|
if not data:
|
|
|
|
raise ConnectionClosed()
|
|
|
|
self.unpacker.feed(data)
|
|
|
|
for unpacked in self.unpacker:
|
|
|
|
if not (isinstance(unpacked, tuple) and len(unpacked) == 4):
|
2016-08-22 17:50:53 +00:00
|
|
|
raise UnexpectedRPCDataFormatFromServer()
|
2015-12-12 20:24:21 +00:00
|
|
|
type, msgid, error, res = unpacked
|
|
|
|
if msgid in self.ignore_responses:
|
|
|
|
self.ignore_responses.remove(msgid)
|
2016-06-27 20:44:41 +00:00
|
|
|
if error:
|
|
|
|
handle_error(error, res)
|
2015-12-12 20:24:21 +00:00
|
|
|
else:
|
|
|
|
self.responses[msgid] = error, res
|
|
|
|
elif fd is self.stderr_fd:
|
|
|
|
data = os.read(fd, 32768)
|
|
|
|
if not data:
|
|
|
|
raise ConnectionClosed()
|
|
|
|
data = data.decode('utf-8')
|
2015-12-14 22:07:06 +00:00
|
|
|
for line in data.splitlines(keepends=True):
|
2015-12-12 20:24:21 +00:00
|
|
|
if line.startswith('$LOG '):
|
|
|
|
_, level, msg = line.split(' ', 2)
|
|
|
|
level = getattr(logging, level, logging.CRITICAL) # str -> int
|
2015-12-12 21:13:41 +00:00
|
|
|
logging.log(level, msg.rstrip())
|
2015-12-12 20:24:21 +00:00
|
|
|
else:
|
2015-12-12 21:13:41 +00:00
|
|
|
sys.stderr.write("Remote: " + line)
|
2012-12-06 21:58:57 +00:00
|
|
|
if w:
|
2016-05-25 22:14:27 +00:00
|
|
|
while not self.to_send and (calls or self.preload_ids) and len(waiting_for) < MAX_INFLIGHT:
|
2014-01-22 19:58:48 +00:00
|
|
|
if calls:
|
|
|
|
if is_preloaded:
|
|
|
|
if calls[0] in self.cache:
|
|
|
|
waiting_for.append(fetch_from_cache(calls.pop(0)))
|
|
|
|
else:
|
|
|
|
args = calls.pop(0)
|
|
|
|
if cmd == 'get' and args in self.cache:
|
|
|
|
waiting_for.append(fetch_from_cache(args))
|
|
|
|
else:
|
|
|
|
self.msgid += 1
|
|
|
|
waiting_for.append(self.msgid)
|
|
|
|
self.to_send = msgpack.packb((1, self.msgid, cmd, args))
|
|
|
|
if not self.to_send and self.preload_ids:
|
|
|
|
args = (self.preload_ids.pop(0),)
|
|
|
|
self.msgid += 1
|
|
|
|
self.cache.setdefault(args, []).append(self.msgid)
|
|
|
|
self.to_send = msgpack.packb((1, self.msgid, cmd, args))
|
2012-11-27 23:03:35 +00:00
|
|
|
|
|
|
|
if self.to_send:
|
2014-06-13 18:07:01 +00:00
|
|
|
try:
|
|
|
|
self.to_send = self.to_send[os.write(self.stdin_fd, self.to_send):]
|
|
|
|
except OSError as e:
|
|
|
|
# io.write might raise EAGAIN even though select indicates
|
|
|
|
# that the fd should be writable
|
|
|
|
if e.errno != errno.EAGAIN:
|
|
|
|
raise
|
2014-01-22 19:58:48 +00:00
|
|
|
self.ignore_responses |= set(waiting_for)
|
2011-07-06 20:16:07 +00:00
|
|
|
|
2015-11-18 01:27:25 +00:00
|
|
|
def check(self, repair=False, save_space=False):
|
|
|
|
return self.call('check', repair, save_space)
|
2014-02-04 22:49:10 +00:00
|
|
|
|
2015-11-18 01:27:25 +00:00
|
|
|
def commit(self, save_space=False):
|
|
|
|
return self.call('commit', save_space)
|
2010-11-15 21:18:47 +00:00
|
|
|
|
|
|
|
def rollback(self, *args):
|
2014-01-22 19:58:48 +00:00
|
|
|
return self.call('rollback')
|
2010-11-15 21:18:47 +00:00
|
|
|
|
2015-03-09 15:02:06 +00:00
|
|
|
def destroy(self):
|
|
|
|
return self.call('destroy')
|
|
|
|
|
2014-02-08 23:17:32 +00:00
|
|
|
def __len__(self):
|
|
|
|
return self.call('__len__')
|
|
|
|
|
2014-02-10 20:51:25 +00:00
|
|
|
def list(self, limit=None, marker=None):
|
|
|
|
return self.call('list', limit, marker)
|
|
|
|
|
2014-01-22 19:58:48 +00:00
|
|
|
def get(self, id_):
|
|
|
|
for resp in self.get_many([id_]):
|
|
|
|
return resp
|
|
|
|
|
|
|
|
def get_many(self, ids, is_preloaded=False):
|
2014-01-23 20:43:20 +00:00
|
|
|
for resp in self.call_many('get', [(id_,) for id_ in ids], is_preloaded=is_preloaded):
|
|
|
|
yield resp
|
2011-07-06 20:16:07 +00:00
|
|
|
|
2014-01-22 19:58:48 +00:00
|
|
|
def put(self, id_, data, wait=True):
|
|
|
|
return self.call('put', id_, data, wait=wait)
|
2012-10-17 09:40:23 +00:00
|
|
|
|
2014-01-22 19:58:48 +00:00
|
|
|
def delete(self, id_, wait=True):
|
|
|
|
return self.call('delete', id_, wait=wait)
|
2012-11-30 20:47:35 +00:00
|
|
|
|
2015-07-14 22:01:07 +00:00
|
|
|
def save_key(self, keydata):
|
|
|
|
return self.call('save_key', keydata)
|
|
|
|
|
|
|
|
def load_key(self):
|
|
|
|
return self.call('load_key')
|
|
|
|
|
2015-11-21 19:50:53 +00:00
|
|
|
def break_lock(self):
|
|
|
|
return self.call('break_lock')
|
|
|
|
|
2012-11-30 20:47:35 +00:00
|
|
|
def close(self):
|
|
|
|
if self.p:
|
|
|
|
self.p.stdin.close()
|
|
|
|
self.p.stdout.close()
|
|
|
|
self.p.wait()
|
|
|
|
self.p = None
|
2014-01-22 19:58:48 +00:00
|
|
|
|
|
|
|
def preload(self, ids):
|
|
|
|
self.preload_ids += ids
|
2014-03-13 21:29:47 +00:00
|
|
|
|
|
|
|
|
2016-01-16 22:42:54 +00:00
|
|
|
class RepositoryNoCache:
|
|
|
|
"""A not caching Repository wrapper, passes through to repository.
|
2014-03-13 21:29:47 +00:00
|
|
|
|
2016-01-16 22:42:54 +00:00
|
|
|
Just to have same API (including the context manager) as RepositoryCache.
|
2014-03-13 21:29:47 +00:00
|
|
|
"""
|
|
|
|
def __init__(self, repository):
|
|
|
|
self.repository = repository
|
|
|
|
|
2016-01-16 22:42:54 +00:00
|
|
|
def close(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def __enter__(self):
|
|
|
|
return self
|
|
|
|
|
|
|
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
|
|
self.close()
|
2014-03-13 21:29:47 +00:00
|
|
|
|
|
|
|
def get(self, key):
|
|
|
|
return next(self.get_many([key]))
|
|
|
|
|
2016-01-16 22:42:54 +00:00
|
|
|
def get_many(self, keys):
|
|
|
|
for data in self.repository.get_many(keys):
|
|
|
|
yield data
|
|
|
|
|
|
|
|
|
|
|
|
class RepositoryCache(RepositoryNoCache):
|
|
|
|
"""A caching Repository wrapper
|
|
|
|
|
|
|
|
Caches Repository GET operations using a local temporary Repository.
|
|
|
|
"""
|
2016-05-20 00:01:07 +00:00
|
|
|
# maximum object size that will be cached, 64 kiB.
|
|
|
|
THRESHOLD = 2**16
|
|
|
|
|
2016-01-16 22:42:54 +00:00
|
|
|
def __init__(self, repository):
|
|
|
|
super().__init__(repository)
|
|
|
|
tmppath = tempfile.mkdtemp(prefix='borg-tmp')
|
|
|
|
self.caching_repo = Repository(tmppath, create=True, exclusive=True)
|
2016-04-03 15:14:43 +00:00
|
|
|
self.caching_repo.__enter__() # handled by context manager in base class
|
2016-01-16 22:42:54 +00:00
|
|
|
|
|
|
|
def close(self):
|
|
|
|
if self.caching_repo is not None:
|
|
|
|
self.caching_repo.destroy()
|
|
|
|
self.caching_repo = None
|
|
|
|
|
2014-03-13 21:29:47 +00:00
|
|
|
def get_many(self, keys):
|
2015-07-11 22:18:49 +00:00
|
|
|
unknown_keys = [key for key in keys if key not in self.caching_repo]
|
2014-03-13 21:29:47 +00:00
|
|
|
repository_iterator = zip(unknown_keys, self.repository.get_many(unknown_keys))
|
|
|
|
for key in keys:
|
|
|
|
try:
|
2015-07-11 22:18:49 +00:00
|
|
|
yield self.caching_repo.get(key)
|
|
|
|
except Repository.ObjectNotFound:
|
2014-03-13 21:29:47 +00:00
|
|
|
for key_, data in repository_iterator:
|
|
|
|
if key_ == key:
|
2016-05-20 00:01:07 +00:00
|
|
|
if len(data) <= self.THRESHOLD:
|
|
|
|
self.caching_repo.put(key, data)
|
2014-03-13 21:29:47 +00:00
|
|
|
yield data
|
|
|
|
break
|
|
|
|
# Consume any pending requests
|
|
|
|
for _ in repository_iterator:
|
|
|
|
pass
|
2014-03-26 21:42:20 +00:00
|
|
|
|
|
|
|
|
|
|
|
def cache_if_remote(repository):
|
|
|
|
if isinstance(repository, RemoteRepository):
|
|
|
|
return RepositoryCache(repository)
|
2016-01-16 22:42:54 +00:00
|
|
|
else:
|
|
|
|
return RepositoryNoCache(repository)
|