borg/attic/repository.py

596 lines
23 KiB
Python
Raw Normal View History

2013-06-20 10:48:54 +00:00
from configparser import RawConfigParser
from binascii import hexlify
2014-02-10 20:51:25 +00:00
from itertools import islice
import errno
2013-06-20 10:48:54 +00:00
import os
import shutil
2013-06-20 10:48:54 +00:00
import struct
import sys
2013-06-20 10:48:54 +00:00
from zlib import crc32
from .hashindex import NSIndex
from .helpers import Error, IntegrityError, read_msgpack, write_msgpack, unhexlify, UpgradableLock
2013-06-20 10:48:54 +00:00
from .lrucache import LRUCache
MAX_OBJECT_SIZE = 20 * 1024 * 1024
2015-05-09 18:12:23 +00:00
MAGIC = b'BORG_SEG'
2013-06-20 10:48:54 +00:00
TAG_PUT = 0
TAG_DELETE = 1
TAG_COMMIT = 2
2015-03-17 22:03:36 +00:00
class Repository:
2013-06-20 10:48:54 +00:00
"""Filesystem based transactional key value store
On disk layout:
dir/README
dir/config
dir/data/<X / SEGMENTS_PER_DIR>/<X>
dir/index.X
dir/hints.X
"""
DEFAULT_MAX_SEGMENT_SIZE = 5 * 1024 * 1024
DEFAULT_SEGMENTS_PER_DIR = 10000
class DoesNotExist(Error):
2015-03-17 22:23:56 +00:00
"""Repository {} does not exist."""
class AlreadyExists(Error):
2015-03-17 22:23:56 +00:00
"""Repository {} already exists."""
class InvalidRepository(Error):
2015-03-17 22:23:56 +00:00
"""{} is not a valid repository."""
2013-06-20 10:48:54 +00:00
class CheckNeeded(Error):
"""Inconsistency detected. Please run "borg check {}"."""
class ObjectNotFound(Error):
2015-03-17 22:23:56 +00:00
"""Object with key {} not found in repository {}."""
def __init__(self, path, create=False, exclusive=False):
self.path = path
2013-06-20 10:48:54 +00:00
self.io = None
self.lock = None
2014-02-08 12:31:51 +00:00
self.index = None
2014-02-08 23:17:32 +00:00
self._active_txn = False
2013-06-20 10:48:54 +00:00
if create:
self.create(path)
self.open(path, exclusive)
2013-06-20 10:48:54 +00:00
2013-06-24 20:41:05 +00:00
def __del__(self):
self.close()
2013-06-20 10:48:54 +00:00
def create(self, path):
"""Create a new empty repository at `path`
"""
if os.path.exists(path) and (not os.path.isdir(path) or os.listdir(path)):
raise self.AlreadyExists(path)
if not os.path.exists(path):
os.mkdir(path)
with open(os.path.join(path, 'README'), 'w') as fd:
fd.write('This is a Borg repository\n')
2013-06-20 10:48:54 +00:00
os.mkdir(os.path.join(path, 'data'))
config = RawConfigParser()
config.add_section('repository')
config.set('repository', 'version', '1')
config.set('repository', 'segments_per_dir', self.DEFAULT_SEGMENTS_PER_DIR)
config.set('repository', 'max_segment_size', self.DEFAULT_MAX_SEGMENT_SIZE)
config.set('repository', 'id', hexlify(os.urandom(32)).decode('ascii'))
with open(os.path.join(path, 'config'), 'w') as fd:
config.write(fd)
def destroy(self):
"""Destroy the repository at `self.path`
"""
self.close()
os.remove(os.path.join(self.path, 'config')) # kill config first
shutil.rmtree(self.path)
2014-02-08 12:31:51 +00:00
def get_index_transaction_id(self):
2015-02-28 02:43:08 +00:00
indices = sorted((int(name[6:]) for name in os.listdir(self.path) if name.startswith('index.') and name[6:].isdigit()))
if indices:
return indices[-1]
2014-02-08 12:31:51 +00:00
else:
return None
2014-02-08 23:17:32 +00:00
def get_transaction_id(self):
index_transaction_id = self.get_index_transaction_id()
2014-02-18 20:16:36 +00:00
segments_transaction_id = self.io.get_segments_transaction_id()
if index_transaction_id is not None and segments_transaction_id is None:
raise self.CheckNeeded(self.path)
# Attempt to automatically rebuild index if we crashed between commit
# tag write and index save
2014-02-08 23:17:32 +00:00
if index_transaction_id != segments_transaction_id:
if index_transaction_id is not None and index_transaction_id > segments_transaction_id:
replay_from = None
else:
replay_from = index_transaction_id
self.replay_segments(replay_from, segments_transaction_id)
return self.get_index_transaction_id()
2014-02-08 23:17:32 +00:00
def open(self, path, exclusive):
2013-06-20 10:48:54 +00:00
self.path = path
if not os.path.isdir(path):
raise self.DoesNotExist(path)
self.config = RawConfigParser()
self.config.read(os.path.join(self.path, 'config'))
2015-03-17 22:47:21 +00:00
if 'repository' not in self.config.sections() or self.config.getint('repository', 'version') != 1:
raise self.InvalidRepository(path)
self.lock = UpgradableLock(os.path.join(path, 'config'), exclusive)
2013-06-20 10:48:54 +00:00
self.max_segment_size = self.config.getint('repository', 'max_segment_size')
self.segments_per_dir = self.config.getint('repository', 'segments_per_dir')
self.id = unhexlify(self.config.get('repository', 'id').strip())
2014-02-08 23:17:32 +00:00
self.io = LoggedIO(self.path, self.max_segment_size, self.segments_per_dir)
2013-06-20 10:48:54 +00:00
def close(self):
if self.lock:
if self.io:
self.io.close()
self.io = None
self.lock.release()
self.lock = None
2013-06-20 10:48:54 +00:00
2014-02-08 12:31:51 +00:00
def commit(self):
2013-06-20 10:48:54 +00:00
"""Commit transaction
"""
self.io.write_commit()
self.compact_segments()
self.write_index()
self.rollback()
def open_index(self, transaction_id):
2014-02-08 23:17:32 +00:00
if transaction_id is None:
return NSIndex()
return NSIndex.read((os.path.join(self.path, 'index.%d') % transaction_id).encode('utf-8'))
2014-02-08 23:17:32 +00:00
def prepare_txn(self, transaction_id, do_cleanup=True):
self._active_txn = True
2014-02-08 23:17:32 +00:00
self.lock.upgrade()
if not self.index:
self.index = self.open_index(transaction_id)
2014-02-08 23:17:32 +00:00
if transaction_id is None:
2013-06-20 10:48:54 +00:00
self.segments = {}
self.compact = set()
else:
if do_cleanup:
self.io.cleanup(transaction_id)
2014-02-08 23:17:32 +00:00
hints = read_msgpack(os.path.join(self.path, 'hints.%d' % transaction_id))
2013-06-20 10:48:54 +00:00
if hints[b'version'] != 1:
raise ValueError('Unknown hints file version: %d' % hints['version'])
self.segments = hints[b'segments']
self.compact = set(hints[b'compact'])
def write_index(self):
hints = {b'version': 1,
b'segments': self.segments,
b'compact': list(self.compact)}
2014-02-08 23:17:32 +00:00
transaction_id = self.io.get_segments_transaction_id()
write_msgpack(os.path.join(self.path, 'hints.%d' % transaction_id), hints)
self.index.write(os.path.join(self.path, 'index.tmp'))
2013-06-20 10:48:54 +00:00
os.rename(os.path.join(self.path, 'index.tmp'),
2014-02-08 23:17:32 +00:00
os.path.join(self.path, 'index.%d' % transaction_id))
2013-06-20 10:48:54 +00:00
# Remove old indices
2014-02-08 23:17:32 +00:00
current = '.%d' % transaction_id
2013-06-20 10:48:54 +00:00
for name in os.listdir(self.path):
if not name.startswith('index.') and not name.startswith('hints.'):
continue
if name.endswith(current):
continue
os.unlink(os.path.join(self.path, name))
self.index = None
2013-06-20 10:48:54 +00:00
def compact_segments(self):
"""Compact sparse segments by copying data into new segments
"""
if not self.compact:
return
index_transaction_id = self.get_index_transaction_id()
2013-06-20 10:48:54 +00:00
segments = self.segments
for segment in sorted(self.compact):
if self.io.segment_exists(segment):
for tag, key, offset, data in self.io.iter_objects(segment, include_data=True):
if tag == TAG_PUT and self.index.get(key, (-1, -1)) == (segment, offset):
new_segment, offset = self.io.write_put(key, data)
self.index[key] = new_segment, offset
segments.setdefault(new_segment, 0)
segments[new_segment] += 1
segments[segment] -= 1
elif tag == TAG_DELETE:
if index_transaction_id is None or segment > index_transaction_id:
self.io.write_delete(key)
2013-06-20 10:48:54 +00:00
assert segments[segment] == 0
2013-06-20 10:48:54 +00:00
self.io.write_commit()
for segment in sorted(self.compact):
2013-06-20 10:48:54 +00:00
assert self.segments.pop(segment) == 0
self.io.delete_segment(segment)
self.compact = set()
def replay_segments(self, index_transaction_id, segments_transaction_id):
self.prepare_txn(index_transaction_id, do_cleanup=False)
for segment, filename in self.io.segment_iterator():
if index_transaction_id is not None and segment <= index_transaction_id:
continue
if segment > segments_transaction_id:
break
self.segments[segment] = 0
for tag, key, offset in self.io.iter_objects(segment):
if tag == TAG_PUT:
try:
s, _ = self.index[key]
self.compact.add(s)
self.segments[s] -= 1
except KeyError:
pass
self.index[key] = segment, offset
self.segments[segment] += 1
elif tag == TAG_DELETE:
try:
s, _ = self.index.pop(key)
self.segments[s] -= 1
self.compact.add(s)
except KeyError:
pass
self.compact.add(segment)
elif tag == TAG_COMMIT:
continue
else:
raise self.CheckNeeded(self.path)
if self.segments[segment] == 0:
self.compact.add(segment)
self.write_index()
self.rollback()
def check(self, repair=False):
"""Check repository consistency
This method verifies all segment checksums and makes sure
the index is consistent with the data stored in the segments.
"""
2014-02-09 14:52:36 +00:00
error_found = False
2015-03-17 22:47:21 +00:00
def report_error(msg):
2014-02-09 14:52:36 +00:00
nonlocal error_found
error_found = True
print(msg, file=sys.stderr)
2014-02-09 14:52:36 +00:00
2014-02-08 23:17:32 +00:00
assert not self._active_txn
try:
transaction_id = self.get_transaction_id()
current_index = self.open_index(transaction_id)
except Exception:
transaction_id = self.io.get_segments_transaction_id()
2014-02-08 23:17:32 +00:00
current_index = None
if transaction_id is None:
transaction_id = self.get_index_transaction_id()
if transaction_id is None:
transaction_id = self.io.get_latest_segment()
if repair:
self.io.cleanup(transaction_id)
segments_transaction_id = self.io.get_segments_transaction_id()
self.prepare_txn(None)
2014-02-08 12:31:51 +00:00
for segment, filename in self.io.segment_iterator():
2014-02-08 23:17:32 +00:00
if segment > transaction_id:
continue
try:
objects = list(self.io.iter_objects(segment))
except (IntegrityError, struct.error):
report_error('Error reading segment {}'.format(segment))
objects = []
2014-02-08 23:17:32 +00:00
if repair:
self.io.recover_segment(segment, filename)
objects = list(self.io.iter_objects(segment))
self.segments[segment] = 0
for tag, key, offset in objects:
if tag == TAG_PUT:
2014-02-08 23:17:32 +00:00
try:
s, _ = self.index[key]
self.compact.add(s)
self.segments[s] -= 1
except KeyError:
pass
self.index[key] = segment, offset
self.segments[segment] += 1
elif tag == TAG_DELETE:
try:
s, _ = self.index.pop(key)
self.segments[s] -= 1
self.compact.add(s)
except KeyError:
pass
self.compact.add(segment)
elif tag == TAG_COMMIT:
continue
else:
report_error('Unexpected tag {} in segment {}'.format(tag, segment))
# We might need to add a commit tag if no committed segment is found
if repair and segments_transaction_id is None:
report_error('Adding commit tag to segment {}'.format(transaction_id))
self.io.segment = transaction_id + 1
self.io.write_commit()
self.io.close_segment()
if current_index and not repair:
if len(current_index) != len(self.index):
report_error('Index object count mismatch. {} != {}'.format(len(current_index), len(self.index)))
elif current_index:
for key, value in self.index.iteritems():
if current_index.get(key, (-1, -1)) != value:
report_error('Index mismatch for key {}. {} != {}'.format(key, value, current_index.get(key, (-1, -1))))
2014-02-08 23:17:32 +00:00
if repair:
self.compact_segments()
2014-02-08 23:17:32 +00:00
self.write_index()
2014-02-09 14:52:36 +00:00
self.rollback()
return not error_found or repair
2013-06-20 10:48:54 +00:00
def rollback(self):
"""
"""
2014-02-08 12:31:51 +00:00
self.index = None
self._active_txn = False
2013-06-20 10:48:54 +00:00
2014-02-08 23:17:32 +00:00
def __len__(self):
2014-02-08 12:31:51 +00:00
if not self.index:
self.index = self.open_index(self.get_transaction_id())
2013-06-20 10:48:54 +00:00
return len(self.index)
2014-02-10 20:51:25 +00:00
def list(self, limit=None, marker=None):
if not self.index:
self.index = self.open_index(self.get_transaction_id())
2014-02-10 20:51:25 +00:00
return [id_ for id_, _ in islice(self.index.iteritems(marker=marker), limit)]
2014-02-08 12:31:51 +00:00
def get(self, id_):
if not self.index:
self.index = self.open_index(self.get_transaction_id())
2013-06-20 10:48:54 +00:00
try:
2014-02-08 12:31:51 +00:00
segment, offset = self.index[id_]
return self.io.read(segment, offset, id_)
2013-06-20 10:48:54 +00:00
except KeyError:
raise self.ObjectNotFound(id_, self.path)
2013-06-20 10:48:54 +00:00
def get_many(self, ids, is_preloaded=False):
for id_ in ids:
yield self.get(id_)
2013-06-20 10:48:54 +00:00
def put(self, id, data, wait=True):
if not self._active_txn:
self.prepare_txn(self.get_transaction_id())
2013-06-20 10:48:54 +00:00
try:
segment, _ = self.index[id]
self.segments[segment] -= 1
self.compact.add(segment)
segment = self.io.write_delete(id)
self.segments.setdefault(segment, 0)
self.compact.add(segment)
except KeyError:
pass
segment, offset = self.io.write_put(id, data)
self.segments.setdefault(segment, 0)
self.segments[segment] += 1
self.index[id] = segment, offset
def delete(self, id, wait=True):
if not self._active_txn:
self.prepare_txn(self.get_transaction_id())
2013-06-20 10:48:54 +00:00
try:
segment, offset = self.index.pop(id)
except KeyError:
raise self.ObjectNotFound(id, self.path)
self.segments[segment] -= 1
self.compact.add(segment)
segment = self.io.write_delete(id)
self.compact.add(segment)
self.segments.setdefault(segment, 0)
2013-06-20 10:48:54 +00:00
def preload(self, ids):
"""Preload objects (only applies to remote repositories
"""
2013-06-20 10:48:54 +00:00
2015-03-17 22:03:36 +00:00
class LoggedIO:
2013-06-20 10:48:54 +00:00
header_fmt = struct.Struct('<IIB')
assert header_fmt.size == 9
put_header_fmt = struct.Struct('<IIB32s')
assert put_header_fmt.size == 41
header_no_crc_fmt = struct.Struct('<IB')
assert header_no_crc_fmt.size == 5
crc_fmt = struct.Struct('<I')
assert crc_fmt.size == 4
_commit = header_no_crc_fmt.pack(9, TAG_COMMIT)
COMMIT = crc_fmt.pack(crc32(_commit)) + _commit
def __init__(self, path, limit, segments_per_dir, capacity=90):
2013-06-20 10:48:54 +00:00
self.path = path
self.fds = LRUCache(capacity)
2014-02-08 23:17:32 +00:00
self.segment = 0
2013-06-20 10:48:54 +00:00
self.limit = limit
self.segments_per_dir = segments_per_dir
self.offset = 0
self._write_fd = None
def close(self):
for segment in list(self.fds.keys()):
self.fds.pop(segment).close()
self.close_segment()
self.fds = None # Just to make sure we're disabled
2014-02-08 12:31:51 +00:00
def segment_iterator(self, reverse=False):
data_path = os.path.join(self.path, 'data')
dirs = sorted((dir for dir in os.listdir(data_path) if dir.isdigit()), key=int, reverse=reverse)
for dir in dirs:
filenames = os.listdir(os.path.join(data_path, dir))
sorted_filenames = sorted((filename for filename in filenames
if filename.isdigit()), key=int, reverse=reverse)
for filename in sorted_filenames:
yield int(filename), os.path.join(data_path, dir, filename)
def get_latest_segment(self):
for segment, filename in self.segment_iterator(reverse=True):
return segment
return None
2014-02-18 20:16:36 +00:00
def get_segments_transaction_id(self):
2014-02-08 12:31:51 +00:00
"""Verify that the transaction id is consistent with the index transaction id
2013-06-20 10:48:54 +00:00
"""
2014-02-08 12:31:51 +00:00
for segment, filename in self.segment_iterator(reverse=True):
if self.is_committed_segment(filename):
2014-02-08 23:17:32 +00:00
return segment
return None
2014-02-08 23:17:32 +00:00
def cleanup(self, transaction_id):
2014-02-08 12:31:51 +00:00
"""Delete segment files left by aborted transactions
"""
2014-02-08 23:17:32 +00:00
self.segment = transaction_id + 1
2014-02-08 12:31:51 +00:00
for segment, filename in self.segment_iterator(reverse=True):
2014-02-08 23:17:32 +00:00
if segment > transaction_id:
2014-02-08 12:31:51 +00:00
os.unlink(filename)
else:
break
2013-06-20 10:48:54 +00:00
2014-02-08 12:31:51 +00:00
def is_committed_segment(self, filename):
"""Check if segment ends with a COMMIT_TAG tag
"""
2013-06-20 10:48:54 +00:00
with open(filename, 'rb') as fd:
try:
fd.seek(-self.header_fmt.size, os.SEEK_END)
except OSError as e:
# return False if segment file is empty or too small
if e.errno == errno.EINVAL:
return False
raise e
2013-06-20 10:48:54 +00:00
return fd.read(self.header_fmt.size) == self.COMMIT
def segment_filename(self, segment):
return os.path.join(self.path, 'data', str(segment // self.segments_per_dir), str(segment))
def get_write_fd(self, no_new=False):
if not no_new and self.offset and self.offset > self.limit:
self.close_segment()
if not self._write_fd:
if self.segment % self.segments_per_dir == 0:
dirname = os.path.join(self.path, 'data', str(self.segment // self.segments_per_dir))
if not os.path.exists(dirname):
os.mkdir(dirname)
self._write_fd = open(self.segment_filename(self.segment), 'ab')
2013-07-08 21:38:27 +00:00
self._write_fd.write(MAGIC)
2013-06-20 10:48:54 +00:00
self.offset = 8
return self._write_fd
def get_fd(self, segment):
try:
return self.fds[segment]
except KeyError:
fd = open(self.segment_filename(segment), 'rb')
self.fds[segment] = fd
return fd
def delete_segment(self, segment):
try:
os.unlink(self.segment_filename(segment))
except OSError:
pass
def segment_exists(self, segment):
return os.path.exists(self.segment_filename(segment))
def iter_objects(self, segment, include_data=False):
2013-06-20 10:48:54 +00:00
fd = self.get_fd(segment)
fd.seek(0)
2013-07-08 21:38:27 +00:00
if fd.read(8) != MAGIC:
2013-06-20 10:48:54 +00:00
raise IntegrityError('Invalid segment header')
offset = 8
header = fd.read(self.header_fmt.size)
while header:
crc, size, tag = self.header_fmt.unpack(header)
if size > MAX_OBJECT_SIZE:
raise IntegrityError('Invalid segment object size')
rest = fd.read(size - self.header_fmt.size)
if crc32(rest, crc32(memoryview(header)[4:])) & 0xffffffff != crc:
raise IntegrityError('Segment checksum mismatch')
if tag not in (TAG_PUT, TAG_DELETE, TAG_COMMIT):
raise IntegrityError('Invalid segment entry header')
key = None
if tag in (TAG_PUT, TAG_DELETE):
key = rest[:32]
if include_data:
yield tag, key, offset, rest[32:]
else:
yield tag, key, offset
2013-06-20 10:48:54 +00:00
offset += size
header = fd.read(self.header_fmt.size)
2014-02-08 23:17:32 +00:00
def recover_segment(self, segment, filename):
self.fds.pop(segment).close()
# FIXME: save a copy of the original file
with open(filename, 'rb') as fd:
data = memoryview(fd.read())
os.rename(filename, filename + '.beforerecover')
print('attempting to recover ' + filename, file=sys.stderr)
with open(filename, 'wb') as fd:
fd.write(MAGIC)
while len(data) >= self.header_fmt.size:
crc, size, tag = self.header_fmt.unpack(data[:self.header_fmt.size])
if size < self.header_fmt.size or size > len(data):
2014-02-08 23:17:32 +00:00
data = data[1:]
continue
if crc32(data[4:size]) & 0xffffffff != crc:
data = data[1:]
continue
fd.write(data[:size])
data = data[size:]
2013-06-20 10:48:54 +00:00
def read(self, segment, offset, id):
2014-02-08 23:17:32 +00:00
if segment == self.segment and self._write_fd:
2013-06-20 10:48:54 +00:00
self._write_fd.flush()
fd = self.get_fd(segment)
fd.seek(offset)
header = fd.read(self.put_header_fmt.size)
crc, size, tag, key = self.put_header_fmt.unpack(header)
if size > MAX_OBJECT_SIZE:
raise IntegrityError('Invalid segment object size')
data = fd.read(size - self.put_header_fmt.size)
if crc32(data, crc32(memoryview(header)[4:])) & 0xffffffff != crc:
raise IntegrityError('Segment checksum mismatch')
if tag != TAG_PUT or id != key:
raise IntegrityError('Invalid segment entry header')
return data
def write_put(self, id, data):
size = len(data) + self.put_header_fmt.size
fd = self.get_write_fd()
offset = self.offset
header = self.header_no_crc_fmt.pack(size, TAG_PUT)
crc = self.crc_fmt.pack(crc32(data, crc32(id, crc32(header))) & 0xffffffff)
fd.write(b''.join((crc, header, id, data)))
self.offset += size
return self.segment, offset
def write_delete(self, id):
fd = self.get_write_fd()
header = self.header_no_crc_fmt.pack(self.put_header_fmt.size, TAG_DELETE)
crc = self.crc_fmt.pack(crc32(id, crc32(header)) & 0xffffffff)
fd.write(b''.join((crc, header, id)))
self.offset += self.put_header_fmt.size
return self.segment
def write_commit(self):
fd = self.get_write_fd(no_new=True)
header = self.header_no_crc_fmt.pack(self.header_fmt.size, TAG_COMMIT)
crc = self.crc_fmt.pack(crc32(header) & 0xffffffff)
fd.write(b''.join((crc, header)))
self.close_segment()
def close_segment(self):
if self._write_fd:
self.segment += 1
self.offset = 0
self._write_fd.flush()
os.fsync(self._write_fd.fileno())
if hasattr(os, 'posix_fadvise'): # python >= 3.3, only on UNIX
# tell the OS that it does not need to cache what we just wrote,
# avoids spoiling the cache for the OS and other processes.
os.posix_fadvise(self._write_fd.fileno(), 0, 0, os.POSIX_FADV_DONTNEED)
2013-06-20 10:48:54 +00:00
self._write_fd.close()
self._write_fd = None