2014-06-13 18:07:01 +00:00
|
|
|
import errno
|
2010-11-15 21:18:47 +00:00
|
|
|
import fcntl
|
|
|
|
import msgpack
|
|
|
|
import os
|
|
|
|
import select
|
2014-03-13 21:29:47 +00:00
|
|
|
import shutil
|
2012-10-17 09:40:23 +00:00
|
|
|
from subprocess import Popen, PIPE
|
2010-11-15 21:18:47 +00:00
|
|
|
import sys
|
2014-03-13 21:29:47 +00:00
|
|
|
import tempfile
|
2010-11-15 21:18:47 +00:00
|
|
|
|
2014-03-13 21:29:47 +00:00
|
|
|
from .hashindex import NSIndex
|
2014-02-09 14:52:36 +00:00
|
|
|
from .helpers import Error, IntegrityError
|
2013-06-24 20:41:05 +00:00
|
|
|
from .repository import Repository
|
2010-11-15 21:18:47 +00:00
|
|
|
|
2012-10-17 09:40:23 +00:00
|
|
|
BUFSIZE = 10 * 1024 * 1024
|
2011-07-05 19:29:15 +00:00
|
|
|
|
|
|
|
|
2013-12-15 19:35:29 +00:00
|
|
|
class ConnectionClosed(Error):
|
|
|
|
"""Connection closed by remote host"""
|
2013-06-28 11:31:57 +00:00
|
|
|
|
|
|
|
|
2014-03-24 20:28:59 +00:00
|
|
|
class PathNotAllowed(Error):
|
|
|
|
"""Repository path not allowed"""
|
|
|
|
|
|
|
|
|
2013-06-20 10:44:58 +00:00
|
|
|
class RepositoryServer(object):
|
2010-11-15 21:18:47 +00:00
|
|
|
|
2014-03-24 20:28:59 +00:00
|
|
|
def __init__(self, restrict_to_paths):
|
2013-06-20 10:44:58 +00:00
|
|
|
self.repository = None
|
2014-03-24 20:28:59 +00:00
|
|
|
self.restrict_to_paths = restrict_to_paths
|
2010-11-15 21:18:47 +00:00
|
|
|
|
|
|
|
def serve(self):
|
|
|
|
# Make stdin non-blocking
|
|
|
|
fl = fcntl.fcntl(sys.stdin.fileno(), fcntl.F_GETFL)
|
|
|
|
fcntl.fcntl(sys.stdin.fileno(), fcntl.F_SETFL, fl | os.O_NONBLOCK)
|
2012-02-07 19:39:22 +00:00
|
|
|
# Make stdout blocking
|
|
|
|
fl = fcntl.fcntl(sys.stdout.fileno(), fcntl.F_GETFL)
|
|
|
|
fcntl.fcntl(sys.stdout.fileno(), fcntl.F_SETFL, fl & ~os.O_NONBLOCK)
|
2013-05-17 12:30:39 +00:00
|
|
|
unpacker = msgpack.Unpacker(use_list=False)
|
2010-11-15 21:18:47 +00:00
|
|
|
while True:
|
|
|
|
r, w, es = select.select([sys.stdin], [], [], 10)
|
|
|
|
if r:
|
|
|
|
data = os.read(sys.stdin.fileno(), BUFSIZE)
|
|
|
|
if not data:
|
|
|
|
return
|
|
|
|
unpacker.feed(data)
|
|
|
|
for type, msgid, method, args in unpacker:
|
2013-06-03 11:45:48 +00:00
|
|
|
method = method.decode('ascii')
|
2010-11-15 21:18:47 +00:00
|
|
|
try:
|
2010-11-17 21:40:39 +00:00
|
|
|
try:
|
|
|
|
f = getattr(self, method)
|
|
|
|
except AttributeError:
|
2013-06-20 10:44:58 +00:00
|
|
|
f = getattr(self.repository, method)
|
2010-11-17 21:40:39 +00:00
|
|
|
res = f(*args)
|
2013-06-03 11:45:48 +00:00
|
|
|
except Exception as e:
|
2014-01-23 20:43:20 +00:00
|
|
|
sys.stdout.buffer.write(msgpack.packb((1, msgid, e.__class__.__name__, e.args)))
|
2010-11-15 21:18:47 +00:00
|
|
|
else:
|
2013-06-03 11:45:48 +00:00
|
|
|
sys.stdout.buffer.write(msgpack.packb((1, msgid, None, res)))
|
2010-11-15 21:18:47 +00:00
|
|
|
sys.stdout.flush()
|
|
|
|
if es:
|
|
|
|
return
|
|
|
|
|
2011-09-12 19:51:23 +00:00
|
|
|
def negotiate(self, versions):
|
|
|
|
return 1
|
|
|
|
|
2010-11-17 21:40:39 +00:00
|
|
|
def open(self, path, create=False):
|
2013-06-03 11:45:48 +00:00
|
|
|
path = os.fsdecode(path)
|
2010-11-17 21:40:39 +00:00
|
|
|
if path.startswith('/~'):
|
|
|
|
path = path[1:]
|
2014-03-24 20:28:59 +00:00
|
|
|
path = os.path.realpath(os.path.expanduser(path))
|
|
|
|
if self.restrict_to_paths:
|
|
|
|
for restrict_to_path in self.restrict_to_paths:
|
|
|
|
if path.startswith(os.path.realpath(restrict_to_path)):
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
raise PathNotAllowed(path)
|
|
|
|
self.repository = Repository(path, create)
|
2013-06-20 10:44:58 +00:00
|
|
|
return self.repository.id
|
2010-11-17 21:40:39 +00:00
|
|
|
|
2010-11-15 21:18:47 +00:00
|
|
|
|
2013-06-20 10:44:58 +00:00
|
|
|
class RemoteRepository(object):
|
2014-03-24 20:28:59 +00:00
|
|
|
extra_test_args = []
|
2010-11-15 21:18:47 +00:00
|
|
|
|
|
|
|
class RPCError(Exception):
|
|
|
|
|
|
|
|
def __init__(self, name):
|
|
|
|
self.name = name
|
|
|
|
|
|
|
|
def __init__(self, location, create=False):
|
2014-01-23 20:43:20 +00:00
|
|
|
self.location = location
|
2014-01-22 19:58:48 +00:00
|
|
|
self.preload_ids = []
|
|
|
|
self.msgid = 0
|
2013-06-03 11:45:48 +00:00
|
|
|
self.to_send = b''
|
2014-01-22 19:58:48 +00:00
|
|
|
self.cache = {}
|
|
|
|
self.ignore_responses = set()
|
|
|
|
self.responses = {}
|
2013-05-17 12:30:39 +00:00
|
|
|
self.unpacker = msgpack.Unpacker(use_list=False)
|
2014-01-22 19:58:48 +00:00
|
|
|
self.p = None
|
2013-07-03 20:30:04 +00:00
|
|
|
if location.host == '__testsuite__':
|
2014-03-24 20:28:59 +00:00
|
|
|
args = [sys.executable, '-m', 'attic.archiver', 'serve'] + self.extra_test_args
|
2013-07-03 20:30:04 +00:00
|
|
|
else:
|
2014-01-22 19:58:48 +00:00
|
|
|
args = ['ssh']
|
2013-11-27 16:07:35 +00:00
|
|
|
if location.port:
|
|
|
|
args += ['-p', str(location.port)]
|
|
|
|
if location.user:
|
|
|
|
args.append('%s@%s' % (location.user, location.host))
|
|
|
|
else:
|
|
|
|
args.append('%s' % location.host)
|
|
|
|
args += ['attic', 'serve']
|
2012-10-17 09:40:23 +00:00
|
|
|
self.p = Popen(args, bufsize=0, stdin=PIPE, stdout=PIPE)
|
2012-11-27 23:03:35 +00:00
|
|
|
self.stdin_fd = self.p.stdin.fileno()
|
2012-10-17 09:40:23 +00:00
|
|
|
self.stdout_fd = self.p.stdout.fileno()
|
2012-12-06 21:58:57 +00:00
|
|
|
fcntl.fcntl(self.stdin_fd, fcntl.F_SETFL, fcntl.fcntl(self.stdin_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
|
|
|
|
fcntl.fcntl(self.stdout_fd, fcntl.F_SETFL, fcntl.fcntl(self.stdout_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
|
2012-11-27 23:03:35 +00:00
|
|
|
self.r_fds = [self.stdout_fd]
|
|
|
|
self.x_fds = [self.stdin_fd, self.stdout_fd]
|
|
|
|
|
2014-01-22 19:58:48 +00:00
|
|
|
version = self.call('negotiate', 1)
|
2011-09-12 19:51:23 +00:00
|
|
|
if version != 1:
|
|
|
|
raise Exception('Server insisted on using unsupported protocol version %d' % version)
|
2014-01-23 20:43:20 +00:00
|
|
|
self.id = self.call('open', location.path, create)
|
2012-10-17 09:40:23 +00:00
|
|
|
|
|
|
|
def __del__(self):
|
2012-11-30 20:47:35 +00:00
|
|
|
self.close()
|
|
|
|
|
2014-01-22 19:58:48 +00:00
|
|
|
def call(self, cmd, *args, **kw):
|
|
|
|
for resp in self.call_many(cmd, [args], **kw):
|
|
|
|
return resp
|
|
|
|
|
|
|
|
def call_many(self, cmd, calls, wait=True, is_preloaded=False):
|
2014-01-30 21:16:21 +00:00
|
|
|
if not calls:
|
|
|
|
return
|
2014-01-22 19:58:48 +00:00
|
|
|
def fetch_from_cache(args):
|
|
|
|
msgid = self.cache[args].pop(0)
|
|
|
|
if not self.cache[args]:
|
|
|
|
del self.cache[args]
|
|
|
|
return msgid
|
|
|
|
|
|
|
|
calls = list(calls)
|
|
|
|
waiting_for = []
|
2012-12-06 21:58:57 +00:00
|
|
|
w_fds = [self.stdin_fd]
|
2014-01-22 19:58:48 +00:00
|
|
|
while wait or calls:
|
|
|
|
while waiting_for:
|
|
|
|
try:
|
|
|
|
error, res = self.responses.pop(waiting_for[0])
|
|
|
|
waiting_for.pop(0)
|
|
|
|
if error:
|
2014-01-23 20:43:20 +00:00
|
|
|
if error == b'DoesNotExist':
|
|
|
|
raise Repository.DoesNotExist(self.location.orig)
|
|
|
|
elif error == b'AlreadyExists':
|
|
|
|
raise Repository.AlreadyExists(self.location.orig)
|
2014-02-05 21:49:12 +00:00
|
|
|
elif error == b'CheckNeeded':
|
|
|
|
raise Repository.CheckNeeded(self.location.orig)
|
2014-02-09 14:52:36 +00:00
|
|
|
elif error == b'IntegrityError':
|
2014-02-18 20:33:06 +00:00
|
|
|
raise IntegrityError(res)
|
2014-03-24 20:28:59 +00:00
|
|
|
elif error == b'PathNotAllowed':
|
|
|
|
raise PathNotAllowed(*res)
|
2014-10-09 18:41:47 +00:00
|
|
|
if error == b'ObjectNotFound':
|
|
|
|
raise Repository.ObjectNotFound(res[0], self.location.orig)
|
2014-01-22 19:58:48 +00:00
|
|
|
raise self.RPCError(error)
|
|
|
|
else:
|
|
|
|
yield res
|
|
|
|
if not waiting_for and not calls:
|
|
|
|
return
|
|
|
|
except KeyError:
|
|
|
|
break
|
2012-12-06 21:58:57 +00:00
|
|
|
r, w, x = select.select(self.r_fds, w_fds, self.x_fds, 1)
|
2012-11-30 20:47:35 +00:00
|
|
|
if x:
|
|
|
|
raise Exception('FD exception occured')
|
|
|
|
if r:
|
2012-12-09 22:06:33 +00:00
|
|
|
data = os.read(self.stdout_fd, BUFSIZE)
|
|
|
|
if not data:
|
2013-06-28 11:31:57 +00:00
|
|
|
raise ConnectionClosed()
|
2012-12-09 22:06:33 +00:00
|
|
|
self.unpacker.feed(data)
|
2012-11-30 20:47:35 +00:00
|
|
|
for type, msgid, error, res in self.unpacker:
|
2014-01-22 19:58:48 +00:00
|
|
|
if msgid in self.ignore_responses:
|
|
|
|
self.ignore_responses.remove(msgid)
|
2012-11-30 20:47:35 +00:00
|
|
|
else:
|
2014-01-22 19:58:48 +00:00
|
|
|
self.responses[msgid] = error, res
|
2012-12-06 21:58:57 +00:00
|
|
|
if w:
|
2014-01-22 19:58:48 +00:00
|
|
|
while not self.to_send and (calls or self.preload_ids) and len(waiting_for) < 100:
|
|
|
|
if calls:
|
|
|
|
if is_preloaded:
|
|
|
|
if calls[0] in self.cache:
|
|
|
|
waiting_for.append(fetch_from_cache(calls.pop(0)))
|
|
|
|
else:
|
|
|
|
args = calls.pop(0)
|
|
|
|
if cmd == 'get' and args in self.cache:
|
|
|
|
waiting_for.append(fetch_from_cache(args))
|
|
|
|
else:
|
|
|
|
self.msgid += 1
|
|
|
|
waiting_for.append(self.msgid)
|
|
|
|
self.to_send = msgpack.packb((1, self.msgid, cmd, args))
|
|
|
|
if not self.to_send and self.preload_ids:
|
|
|
|
args = (self.preload_ids.pop(0),)
|
|
|
|
self.msgid += 1
|
|
|
|
self.cache.setdefault(args, []).append(self.msgid)
|
|
|
|
self.to_send = msgpack.packb((1, self.msgid, cmd, args))
|
2012-11-27 23:03:35 +00:00
|
|
|
|
|
|
|
if self.to_send:
|
2014-06-13 18:07:01 +00:00
|
|
|
try:
|
|
|
|
self.to_send = self.to_send[os.write(self.stdin_fd, self.to_send):]
|
|
|
|
except OSError as e:
|
|
|
|
# io.write might raise EAGAIN even though select indicates
|
|
|
|
# that the fd should be writable
|
|
|
|
if e.errno != errno.EAGAIN:
|
|
|
|
raise
|
2014-01-22 19:58:48 +00:00
|
|
|
if not self.to_send and not (calls or self.preload_ids):
|
2012-11-27 23:03:35 +00:00
|
|
|
w_fds = []
|
2014-01-22 19:58:48 +00:00
|
|
|
self.ignore_responses |= set(waiting_for)
|
2011-07-06 20:16:07 +00:00
|
|
|
|
2014-02-26 22:13:48 +00:00
|
|
|
def check(self, repair=False):
|
|
|
|
return self.call('check', repair)
|
2014-02-04 22:49:10 +00:00
|
|
|
|
2011-09-05 19:20:17 +00:00
|
|
|
def commit(self, *args):
|
2014-01-22 19:58:48 +00:00
|
|
|
return self.call('commit')
|
2010-11-15 21:18:47 +00:00
|
|
|
|
|
|
|
def rollback(self, *args):
|
2014-01-22 19:58:48 +00:00
|
|
|
return self.call('rollback')
|
2010-11-15 21:18:47 +00:00
|
|
|
|
2014-02-08 23:17:32 +00:00
|
|
|
def __len__(self):
|
|
|
|
return self.call('__len__')
|
|
|
|
|
2014-02-10 20:51:25 +00:00
|
|
|
def list(self, limit=None, marker=None):
|
|
|
|
return self.call('list', limit, marker)
|
|
|
|
|
2014-01-22 19:58:48 +00:00
|
|
|
def get(self, id_):
|
|
|
|
for resp in self.get_many([id_]):
|
|
|
|
return resp
|
|
|
|
|
|
|
|
def get_many(self, ids, is_preloaded=False):
|
2014-01-23 20:43:20 +00:00
|
|
|
for resp in self.call_many('get', [(id_,) for id_ in ids], is_preloaded=is_preloaded):
|
|
|
|
yield resp
|
2011-07-06 20:16:07 +00:00
|
|
|
|
2014-01-22 19:58:48 +00:00
|
|
|
def put(self, id_, data, wait=True):
|
|
|
|
return self.call('put', id_, data, wait=wait)
|
2012-10-17 09:40:23 +00:00
|
|
|
|
2014-01-22 19:58:48 +00:00
|
|
|
def delete(self, id_, wait=True):
|
|
|
|
return self.call('delete', id_, wait=wait)
|
2012-11-30 20:47:35 +00:00
|
|
|
|
|
|
|
def close(self):
|
|
|
|
if self.p:
|
|
|
|
self.p.stdin.close()
|
|
|
|
self.p.stdout.close()
|
|
|
|
self.p.wait()
|
|
|
|
self.p = None
|
2014-01-22 19:58:48 +00:00
|
|
|
|
|
|
|
def preload(self, ids):
|
|
|
|
self.preload_ids += ids
|
2014-03-13 21:29:47 +00:00
|
|
|
|
|
|
|
|
|
|
|
class RepositoryCache:
|
|
|
|
"""A caching Repository wrapper
|
|
|
|
|
|
|
|
Caches Repository GET operations using a temporary file
|
|
|
|
"""
|
|
|
|
def __init__(self, repository):
|
|
|
|
self.tmppath = None
|
|
|
|
self.index = None
|
|
|
|
self.data_fd = None
|
|
|
|
self.repository = repository
|
|
|
|
self.entries = {}
|
|
|
|
self.initialize()
|
|
|
|
|
|
|
|
def __del__(self):
|
|
|
|
self.cleanup()
|
|
|
|
|
|
|
|
def initialize(self):
|
|
|
|
self.tmppath = tempfile.mkdtemp()
|
2014-07-10 13:32:12 +00:00
|
|
|
self.index = NSIndex()
|
2014-03-13 21:29:47 +00:00
|
|
|
self.data_fd = open(os.path.join(self.tmppath, 'data'), 'a+b')
|
|
|
|
|
|
|
|
def cleanup(self):
|
|
|
|
del self.index
|
|
|
|
if self.data_fd:
|
|
|
|
self.data_fd.close()
|
|
|
|
if self.tmppath:
|
|
|
|
shutil.rmtree(self.tmppath)
|
|
|
|
|
|
|
|
def load_object(self, offset, size):
|
|
|
|
self.data_fd.seek(offset)
|
|
|
|
data = self.data_fd.read(size)
|
|
|
|
assert len(data) == size
|
|
|
|
return data
|
|
|
|
|
|
|
|
def store_object(self, key, data):
|
|
|
|
self.data_fd.seek(0, os.SEEK_END)
|
|
|
|
self.data_fd.write(data)
|
|
|
|
offset = self.data_fd.tell()
|
|
|
|
self.index[key] = offset - len(data), len(data)
|
|
|
|
|
|
|
|
def get(self, key):
|
|
|
|
return next(self.get_many([key]))
|
|
|
|
|
|
|
|
def get_many(self, keys):
|
|
|
|
unknown_keys = [key for key in keys if not key in self.index]
|
|
|
|
repository_iterator = zip(unknown_keys, self.repository.get_many(unknown_keys))
|
|
|
|
for key in keys:
|
|
|
|
try:
|
|
|
|
yield self.load_object(*self.index[key])
|
|
|
|
except KeyError:
|
|
|
|
for key_, data in repository_iterator:
|
|
|
|
if key_ == key:
|
|
|
|
self.store_object(key, data)
|
|
|
|
yield data
|
|
|
|
break
|
|
|
|
# Consume any pending requests
|
|
|
|
for _ in repository_iterator:
|
|
|
|
pass
|
2014-03-26 21:42:20 +00:00
|
|
|
|
|
|
|
|
|
|
|
def cache_if_remote(repository):
|
|
|
|
if isinstance(repository, RemoteRepository):
|
|
|
|
return RepositoryCache(repository)
|
|
|
|
return repository
|