pack: Hide more implementation details

This commit is contained in:
Michael Eischer 2022-03-28 22:12:16 +02:00
parent a773cb6527
commit fefe9f5c0e
3 changed files with 27 additions and 27 deletions

View File

@ -47,7 +47,7 @@ func (p *Packer) Add(t restic.BlobType, id restic.ID, data []byte) (int, error)
return n, errors.Wrap(err, "Write") return n, errors.Wrap(err, "Write")
} }
var EntrySize = uint(binary.Size(restic.BlobType(0)) + headerLengthSize + len(restic.ID{})) var entrySize = uint(binary.Size(restic.BlobType(0)) + headerLengthSize + len(restic.ID{}))
// headerEntry describes the format of header entries. It serves only as // headerEntry describes the format of header entries. It serves only as
// documentation. // documentation.
@ -101,7 +101,7 @@ func (p *Packer) Finalize() (uint, error) {
// makeHeader constructs the header for p. // makeHeader constructs the header for p.
func (p *Packer) makeHeader() ([]byte, error) { func (p *Packer) makeHeader() ([]byte, error) {
buf := make([]byte, 0, len(p.blobs)*int(EntrySize)) buf := make([]byte, 0, len(p.blobs)*int(entrySize))
for _, b := range p.blobs { for _, b := range p.blobs {
switch b.Type { switch b.Type {
@ -152,7 +152,7 @@ func (p *Packer) String() string {
var ( var (
// we require at least one entry in the header, and one blob for a pack file // we require at least one entry in the header, and one blob for a pack file
minFileSize = EntrySize + crypto.Extension + uint(headerLengthSize) minFileSize = entrySize + crypto.Extension + uint(headerLengthSize)
) )
const ( const (
@ -173,7 +173,7 @@ const (
// the appropriate size. // the appropriate size.
func readRecords(rd io.ReaderAt, size int64, max int) ([]byte, int, error) { func readRecords(rd io.ReaderAt, size int64, max int) ([]byte, int, error) {
var bufsize int var bufsize int
bufsize += max * int(EntrySize) bufsize += max * int(entrySize)
bufsize += crypto.Extension bufsize += crypto.Extension
bufsize += headerLengthSize bufsize += headerLengthSize
@ -197,7 +197,7 @@ func readRecords(rd io.ReaderAt, size int64, max int) ([]byte, int, error) {
err = InvalidFileError{Message: "header length is zero"} err = InvalidFileError{Message: "header length is zero"}
case hlen < crypto.Extension: case hlen < crypto.Extension:
err = InvalidFileError{Message: "header length is too small"} err = InvalidFileError{Message: "header length is too small"}
case (hlen-crypto.Extension)%uint32(EntrySize) != 0: case (hlen-crypto.Extension)%uint32(entrySize) != 0:
err = InvalidFileError{Message: "header length is invalid"} err = InvalidFileError{Message: "header length is invalid"}
case int64(hlen) > size-int64(headerLengthSize): case int64(hlen) > size-int64(headerLengthSize):
err = InvalidFileError{Message: "header is larger than file"} err = InvalidFileError{Message: "header is larger than file"}
@ -208,7 +208,7 @@ func readRecords(rd io.ReaderAt, size int64, max int) ([]byte, int, error) {
return nil, 0, errors.Wrap(err, "readHeader") return nil, 0, errors.Wrap(err, "readHeader")
} }
total := (int(hlen) - crypto.Extension) / int(EntrySize) total := (int(hlen) - crypto.Extension) / int(entrySize)
if total < max { if total < max {
// truncate to the beginning of the pack header // truncate to the beginning of the pack header
b = b[len(b)-int(hlen):] b = b[len(b)-int(hlen):]
@ -274,7 +274,7 @@ func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []restic.Blob, hdr
return nil, 0, err return nil, 0, err
} }
entries = make([]restic.Blob, 0, uint(len(buf))/EntrySize) entries = make([]restic.Blob, 0, uint(len(buf))/entrySize)
pos := uint(0) pos := uint(0)
for len(buf) > 0 { for len(buf) > 0 {
@ -286,18 +286,18 @@ func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []restic.Blob, hdr
entries = append(entries, entry) entries = append(entries, entry)
pos += entry.Length pos += entry.Length
buf = buf[EntrySize:] buf = buf[entrySize:]
} }
return entries, hdrSize, nil return entries, hdrSize, nil
} }
func parseHeaderEntry(p []byte) (b restic.Blob, err error) { func parseHeaderEntry(p []byte) (b restic.Blob, err error) {
if uint(len(p)) < EntrySize { if uint(len(p)) < entrySize {
err = errors.Errorf("parseHeaderEntry: buffer of size %d too short", len(p)) err = errors.Errorf("parseHeaderEntry: buffer of size %d too short", len(p))
return b, err return b, err
} }
p = p[:EntrySize] p = p[:entrySize]
switch p[0] { switch p[0] {
case 0: case 0:
@ -315,7 +315,7 @@ func parseHeaderEntry(p []byte) (b restic.Blob, err error) {
} }
func CalculateHeaderSize(blobs []restic.Blob) int { func CalculateHeaderSize(blobs []restic.Blob) int {
return headerSize + len(blobs)*int(EntrySize) return headerSize + len(blobs)*int(entrySize)
} }
// Size returns the size of all packs computed by index information. // Size returns the size of all packs computed by index information.
@ -333,7 +333,7 @@ func Size(ctx context.Context, mi restic.MasterIndex, onlyHdr bool) map[restic.I
if !onlyHdr { if !onlyHdr {
size += int64(blob.Length) size += int64(blob.Length)
} }
packSize[blob.PackID] = size + int64(EntrySize) packSize[blob.PackID] = size + int64(entrySize)
} }
return packSize return packSize

View File

@ -41,7 +41,7 @@ func TestParseHeaderEntry(t *testing.T) {
buf.Reset() buf.Reset()
_ = binary.Write(buf, binary.LittleEndian, &h) _ = binary.Write(buf, binary.LittleEndian, &h)
b, err = parseHeaderEntry(buf.Bytes()[:EntrySize-1]) b, err = parseHeaderEntry(buf.Bytes()[:entrySize-1])
rtest.Assert(t, err != nil, "no error for short input") rtest.Assert(t, err != nil, "no error for short input")
} }
@ -58,7 +58,7 @@ func (rd *countingReaderAt) ReadAt(p []byte, off int64) (n int, err error) {
func TestReadHeaderEagerLoad(t *testing.T) { func TestReadHeaderEagerLoad(t *testing.T) {
testReadHeader := func(dataSize, entryCount, expectedReadInvocationCount int) { testReadHeader := func(dataSize, entryCount, expectedReadInvocationCount int) {
expectedHeader := rtest.Random(0, entryCount*int(EntrySize)+crypto.Extension) expectedHeader := rtest.Random(0, entryCount*int(entrySize)+crypto.Extension)
buf := &bytes.Buffer{} buf := &bytes.Buffer{}
buf.Write(rtest.Random(0, dataSize)) // pack blobs data buf.Write(rtest.Random(0, dataSize)) // pack blobs data
@ -83,8 +83,8 @@ func TestReadHeaderEagerLoad(t *testing.T) {
testReadHeader(100, eagerEntries+1, 2) testReadHeader(100, eagerEntries+1, 2)
// file size == eager header load size // file size == eager header load size
eagerLoadSize := int((eagerEntries * EntrySize) + crypto.Extension) eagerLoadSize := int((eagerEntries * entrySize) + crypto.Extension)
headerSize := int(1*EntrySize) + crypto.Extension headerSize := int(1*entrySize) + crypto.Extension
dataSize := eagerLoadSize - headerSize - binary.Size(uint32(0)) dataSize := eagerLoadSize - headerSize - binary.Size(uint32(0))
testReadHeader(dataSize-1, 1, 1) testReadHeader(dataSize-1, 1, 1)
testReadHeader(dataSize, 1, 1) testReadHeader(dataSize, 1, 1)
@ -96,8 +96,8 @@ func TestReadHeaderEagerLoad(t *testing.T) {
func TestReadRecords(t *testing.T) { func TestReadRecords(t *testing.T) {
testReadRecords := func(dataSize, entryCount, totalRecords int) { testReadRecords := func(dataSize, entryCount, totalRecords int) {
totalHeader := rtest.Random(0, totalRecords*int(EntrySize)+crypto.Extension) totalHeader := rtest.Random(0, totalRecords*int(entrySize)+crypto.Extension)
off := len(totalHeader) - (entryCount*int(EntrySize) + crypto.Extension) off := len(totalHeader) - (entryCount*int(entrySize) + crypto.Extension)
if off < 0 { if off < 0 {
off = 0 off = 0
} }
@ -127,8 +127,8 @@ func TestReadRecords(t *testing.T) {
testReadRecords(100, eagerEntries, eagerEntries+1) testReadRecords(100, eagerEntries, eagerEntries+1)
// file size == eager header load size // file size == eager header load size
eagerLoadSize := int((eagerEntries * EntrySize) + crypto.Extension) eagerLoadSize := int((eagerEntries * entrySize) + crypto.Extension)
headerSize := int(1*EntrySize) + crypto.Extension headerSize := int(1*entrySize) + crypto.Extension
dataSize := eagerLoadSize - headerSize - binary.Size(uint32(0)) dataSize := eagerLoadSize - headerSize - binary.Size(uint32(0))
testReadRecords(dataSize-1, 1, 1) testReadRecords(dataSize-1, 1, 1)
testReadRecords(dataSize, 1, 1) testReadRecords(dataSize, 1, 1)

View File

@ -5,7 +5,6 @@ import (
"context" "context"
"crypto/rand" "crypto/rand"
"crypto/sha256" "crypto/sha256"
"encoding/binary"
"encoding/json" "encoding/json"
"io" "io"
"testing" "testing"
@ -54,17 +53,18 @@ func verifyBlobs(t testing.TB, bufs []Buf, k *crypto.Key, rd io.ReaderAt, packSi
for _, buf := range bufs { for _, buf := range bufs {
written += len(buf.data) written += len(buf.data)
} }
// header length + header + header crypto
headerSize := binary.Size(uint32(0)) + restic.CiphertextLength(len(bufs)*int(pack.EntrySize))
written += headerSize
// check length
rtest.Equals(t, uint(written), packSize)
// read and parse it again // read and parse it again
entries, hdrSize, err := pack.List(k, rd, int64(packSize)) entries, hdrSize, err := pack.List(k, rd, int64(packSize))
rtest.OK(t, err) rtest.OK(t, err)
rtest.Equals(t, len(entries), len(bufs)) rtest.Equals(t, len(entries), len(bufs))
// check the head size calculation for consistency
headerSize := pack.CalculateHeaderSize(entries)
written += headerSize
// check length
rtest.Equals(t, uint(written), packSize)
rtest.Equals(t, headerSize, int(hdrSize)) rtest.Equals(t, headerSize, int(hdrSize))
var buf []byte var buf []byte