mirror of
https://github.com/borgbackup/borg.git
synced 2024-12-24 16:55:36 +00:00
repo: do not put objects that we won't get, fixes #1451
we will not get() objects that have a segment entry larger than MAX_OBJECT_SIZE. thus we should never produce such entries. also: introduce repository.MAX_DATA_SIZE that gives the max payload size.
This commit is contained in:
parent
6c1c87f7ae
commit
a360307938
3 changed files with 24 additions and 2 deletions
|
@ -731,8 +731,12 @@ def _read(self, fd, fmt, header, segment, offset, acceptable_tags):
|
|||
return size, tag, key, data
|
||||
|
||||
def write_put(self, id, data, raise_full=False):
|
||||
data_size = len(data)
|
||||
if data_size > MAX_DATA_SIZE:
|
||||
# this would push the segment entry size beyond MAX_OBJECT_SIZE.
|
||||
raise IntegrityError('More than allowed put data [{} > {}]'.format(data_size, MAX_DATA_SIZE))
|
||||
fd = self.get_write_fd(raise_full=raise_full)
|
||||
size = len(data) + self.put_header_fmt.size
|
||||
size = data_size + self.put_header_fmt.size
|
||||
offset = self.offset
|
||||
header = self.header_no_crc_fmt.pack(size, TAG_PUT)
|
||||
crc = self.crc_fmt.pack(crc32(data, crc32(id, crc32(header))) & 0xffffffff)
|
||||
|
@ -771,3 +775,6 @@ def close_segment(self):
|
|||
self._write_fd.close()
|
||||
sync_dir(os.path.dirname(self._write_fd.name))
|
||||
self._write_fd = None
|
||||
|
||||
|
||||
MAX_DATA_SIZE = MAX_OBJECT_SIZE - LoggedIO.put_header_fmt.size
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
from ..helpers import Location, IntegrityError
|
||||
from ..locking import Lock, LockFailed
|
||||
from ..remote import RemoteRepository, InvalidRPCMethod
|
||||
from ..repository import Repository, LoggedIO, TAG_COMMIT
|
||||
from ..repository import Repository, LoggedIO, TAG_COMMIT, MAX_DATA_SIZE
|
||||
from . import BaseTestCase
|
||||
|
||||
|
||||
|
@ -128,6 +128,13 @@ def test_list(self):
|
|||
self.assert_equal(second_half, all[50:])
|
||||
self.assert_equal(len(self.repository.list(limit=50)), 50)
|
||||
|
||||
def test_max_data_size(self):
|
||||
max_data = b'x' * MAX_DATA_SIZE
|
||||
self.repository.put(b'00000000000000000000000000000000', max_data)
|
||||
self.assert_equal(self.repository.get(b'00000000000000000000000000000000'), max_data)
|
||||
self.assert_raises(IntegrityError,
|
||||
lambda: self.repository.put(b'00000000000000000000000000000001', max_data + b'x'))
|
||||
|
||||
|
||||
class RepositoryCommitTestCase(RepositoryTestCaseBase):
|
||||
|
||||
|
|
|
@ -57,6 +57,14 @@ Security fixes:
|
|||
|
||||
- fix security issue with remote repository access, #1428
|
||||
|
||||
Bug fixes:
|
||||
|
||||
- do not write objects to repository that are bigger than the allowed size,
|
||||
borg will reject reading them, #1451.
|
||||
IMPORTANT: if you created archives with many millions of files or
|
||||
directories, please verify if you can open them successfully,
|
||||
e.g. try a "borg list REPO::ARCHIVE".
|
||||
|
||||
|
||||
Version 1.0.7rc1 (2016-08-05)
|
||||
-----------------------------
|
||||
|
|
Loading…
Reference in a new issue