2016-01-23 16:08:03 +00:00
|
|
|
package test
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2017-06-03 15:39:57 +00:00
|
|
|
"context"
|
2016-01-23 16:08:03 +00:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"math/rand"
|
2017-01-25 16:07:36 +00:00
|
|
|
"os"
|
2016-01-23 16:08:03 +00:00
|
|
|
"reflect"
|
|
|
|
"sort"
|
|
|
|
"testing"
|
2017-05-28 08:16:29 +00:00
|
|
|
"time"
|
2016-01-23 16:08:03 +00:00
|
|
|
|
2021-08-15 16:19:43 +00:00
|
|
|
"github.com/minio/sha256-simd"
|
2017-07-23 12:21:03 +00:00
|
|
|
"github.com/restic/restic/internal/errors"
|
2017-07-24 15:42:25 +00:00
|
|
|
"github.com/restic/restic/internal/restic"
|
2016-08-29 17:18:57 +00:00
|
|
|
|
2017-07-23 12:21:03 +00:00
|
|
|
"github.com/restic/restic/internal/test"
|
|
|
|
|
|
|
|
"github.com/restic/restic/internal/backend"
|
2016-01-23 16:08:03 +00:00
|
|
|
)
|
|
|
|
|
2017-05-28 08:16:29 +00:00
|
|
|
func seedRand(t testing.TB) {
|
|
|
|
seed := time.Now().UnixNano()
|
|
|
|
rand.Seed(seed)
|
|
|
|
t.Logf("rand initialized with seed %d", seed)
|
|
|
|
}
|
|
|
|
|
2023-10-01 09:40:12 +00:00
|
|
|
func beTest(ctx context.Context, be backend.Backend, h backend.Handle) (bool, error) {
|
2022-12-03 10:28:10 +00:00
|
|
|
_, err := be.Stat(ctx, h)
|
|
|
|
if err != nil && be.IsNotExist(err) {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return err == nil, err
|
|
|
|
}
|
|
|
|
|
2024-05-09 16:59:29 +00:00
|
|
|
func LoadAll(ctx context.Context, be backend.Backend, h backend.Handle) ([]byte, error) {
|
|
|
|
var buf []byte
|
|
|
|
err := be.Load(ctx, h, 0, 0, func(rd io.Reader) error {
|
|
|
|
var err error
|
|
|
|
buf, err = io.ReadAll(rd)
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return buf, nil
|
|
|
|
}
|
|
|
|
|
2023-06-08 15:05:20 +00:00
|
|
|
// TestStripPasswordCall tests that the StripPassword method of a factory can be called without crashing.
|
|
|
|
// It does not verify whether passwords are removed correctly
|
2023-06-08 17:35:20 +00:00
|
|
|
func (s *Suite[C]) TestStripPasswordCall(_ *testing.T) {
|
2023-06-08 15:05:20 +00:00
|
|
|
s.Factory.StripPassword("some random string")
|
|
|
|
}
|
|
|
|
|
2017-05-14 10:56:10 +00:00
|
|
|
// TestCreateWithConfig tests that creating a backend in a location which already
|
2016-01-23 17:07:15 +00:00
|
|
|
// has a config file fails.
|
2023-04-21 19:06:56 +00:00
|
|
|
func (s *Suite[C]) TestCreateWithConfig(t *testing.T) {
|
2017-05-01 20:23:46 +00:00
|
|
|
b := s.open(t)
|
|
|
|
defer s.close(t, b)
|
2016-01-23 17:07:15 +00:00
|
|
|
|
2017-05-14 10:50:20 +00:00
|
|
|
// remove a config if present
|
2023-10-01 09:40:12 +00:00
|
|
|
cfgHandle := backend.Handle{Type: backend.ConfigFile}
|
2022-12-03 10:28:10 +00:00
|
|
|
cfgPresent, err := beTest(context.TODO(), b, cfgHandle)
|
2017-05-14 10:50:20 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to test for config: %+v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if cfgPresent {
|
|
|
|
remove(t, b, cfgHandle)
|
|
|
|
}
|
|
|
|
|
2016-01-23 17:07:15 +00:00
|
|
|
// save a config
|
2023-10-01 09:40:12 +00:00
|
|
|
store(t, b, backend.ConfigFile, []byte("test config"))
|
2016-01-23 17:07:15 +00:00
|
|
|
|
|
|
|
// now create the backend again, this must fail
|
2023-06-08 14:53:55 +00:00
|
|
|
_, err = s.createOrError()
|
2016-01-23 17:07:15 +00:00
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("expected error not found for creating a backend with an existing config file")
|
|
|
|
}
|
|
|
|
|
|
|
|
// remove config
|
2023-10-01 09:40:12 +00:00
|
|
|
err = b.Remove(context.TODO(), backend.Handle{Type: backend.ConfigFile, Name: ""})
|
2016-01-23 17:07:15 +00:00
|
|
|
if err != nil {
|
2017-04-10 19:46:41 +00:00
|
|
|
t.Fatalf("unexpected error removing config: %+v", err)
|
2016-01-23 17:07:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-14 10:56:10 +00:00
|
|
|
// TestConfig saves and loads a config from the backend.
|
2023-04-21 19:06:56 +00:00
|
|
|
func (s *Suite[C]) TestConfig(t *testing.T) {
|
2017-05-01 20:23:46 +00:00
|
|
|
b := s.open(t)
|
|
|
|
defer s.close(t, b)
|
2016-01-23 16:08:03 +00:00
|
|
|
|
|
|
|
var testString = "Config"
|
|
|
|
|
|
|
|
// create config and read it back
|
2024-05-09 16:59:29 +00:00
|
|
|
_, err := LoadAll(context.TODO(), b, backend.Handle{Type: backend.ConfigFile})
|
2016-01-23 16:08:03 +00:00
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("did not get expected error for non-existing config")
|
|
|
|
}
|
2022-12-03 17:22:43 +00:00
|
|
|
test.Assert(t, b.IsNotExist(err), "IsNotExist() did not recognize error from LoadAll(): %v", err)
|
2024-05-10 22:13:23 +00:00
|
|
|
test.Assert(t, b.IsPermanentError(err), "IsPermanentError() did not recognize error from LoadAll(): %v", err)
|
2016-01-23 16:08:03 +00:00
|
|
|
|
2023-10-01 09:40:12 +00:00
|
|
|
err = b.Save(context.TODO(), backend.Handle{Type: backend.ConfigFile}, backend.NewByteReader([]byte(testString), b.Hasher()))
|
2016-01-23 16:08:03 +00:00
|
|
|
if err != nil {
|
2017-04-10 19:46:41 +00:00
|
|
|
t.Fatalf("Save() error: %+v", err)
|
2016-01-23 16:08:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// try accessing the config with different names, should all return the
|
|
|
|
// same config
|
|
|
|
for _, name := range []string{"", "foo", "bar", "0000000000000000000000000000000000000000000000000000000000000000"} {
|
2023-10-01 09:40:12 +00:00
|
|
|
h := backend.Handle{Type: backend.ConfigFile, Name: name}
|
2024-05-09 16:59:29 +00:00
|
|
|
buf, err := LoadAll(context.TODO(), b, h)
|
2016-01-23 16:08:03 +00:00
|
|
|
if err != nil {
|
2017-04-10 19:46:41 +00:00
|
|
|
t.Fatalf("unable to read config with name %q: %+v", name, err)
|
2016-01-23 16:08:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if string(buf) != testString {
|
|
|
|
t.Fatalf("wrong data returned, want %q, got %q", testString, string(buf))
|
|
|
|
}
|
|
|
|
}
|
2017-05-14 10:50:20 +00:00
|
|
|
|
|
|
|
// remove the config
|
2023-10-01 09:40:12 +00:00
|
|
|
remove(t, b, backend.Handle{Type: backend.ConfigFile})
|
2016-01-23 16:08:03 +00:00
|
|
|
}
|
|
|
|
|
2017-05-14 10:56:10 +00:00
|
|
|
// TestLoad tests the backend's Load function.
|
2023-04-21 19:06:56 +00:00
|
|
|
func (s *Suite[C]) TestLoad(t *testing.T) {
|
2017-05-28 08:16:29 +00:00
|
|
|
seedRand(t)
|
|
|
|
|
2017-05-01 20:23:46 +00:00
|
|
|
b := s.open(t)
|
|
|
|
defer s.close(t, b)
|
2017-01-22 21:01:12 +00:00
|
|
|
|
2023-10-01 09:40:12 +00:00
|
|
|
err := testLoad(b, backend.Handle{Type: backend.PackFile, Name: "foobar"})
|
2017-01-22 21:01:12 +00:00
|
|
|
if err == nil {
|
2017-01-23 17:11:10 +00:00
|
|
|
t.Fatalf("Load() did not return an error for non-existing blob")
|
2017-01-22 21:01:12 +00:00
|
|
|
}
|
2022-12-03 17:22:43 +00:00
|
|
|
test.Assert(t, b.IsNotExist(err), "IsNotExist() did not recognize non-existing blob: %v", err)
|
2024-05-10 22:13:23 +00:00
|
|
|
test.Assert(t, b.IsPermanentError(err), "IsPermanentError() did not recognize non-existing blob: %v", err)
|
2017-01-22 21:01:12 +00:00
|
|
|
|
|
|
|
length := rand.Intn(1<<24) + 2000
|
|
|
|
|
|
|
|
data := test.Random(23, length)
|
|
|
|
id := restic.Hash(data)
|
|
|
|
|
2023-10-01 09:40:12 +00:00
|
|
|
handle := backend.Handle{Type: backend.PackFile, Name: id.String()}
|
|
|
|
err = b.Save(context.TODO(), handle, backend.NewByteReader(data, b.Hasher()))
|
2017-01-22 21:01:12 +00:00
|
|
|
if err != nil {
|
2017-04-10 19:46:41 +00:00
|
|
|
t.Fatalf("Save() error: %+v", err)
|
2017-01-22 21:01:12 +00:00
|
|
|
}
|
|
|
|
|
2017-05-28 10:32:53 +00:00
|
|
|
t.Logf("saved %d bytes as %v", length, handle)
|
|
|
|
|
2018-01-17 04:59:16 +00:00
|
|
|
err = b.Load(context.TODO(), handle, 0, 0, func(rd io.Reader) error {
|
2022-12-02 18:36:43 +00:00
|
|
|
_, err := io.Copy(io.Discard, rd)
|
2018-03-14 19:54:48 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2018-01-17 04:59:16 +00:00
|
|
|
return errors.Errorf("deliberate error")
|
|
|
|
})
|
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("Load() did not propagate consumer error!")
|
|
|
|
}
|
|
|
|
if err.Error() != "deliberate error" {
|
|
|
|
t.Fatalf("Load() did not correctly propagate consumer error!")
|
2017-01-22 21:01:12 +00:00
|
|
|
}
|
|
|
|
|
2017-05-01 10:42:10 +00:00
|
|
|
loadTests := 50
|
2017-05-01 20:23:46 +00:00
|
|
|
if s.MinimalData {
|
2017-05-01 10:42:10 +00:00
|
|
|
loadTests = 10
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < loadTests; i++ {
|
2017-01-22 21:01:12 +00:00
|
|
|
l := rand.Intn(length + 2000)
|
|
|
|
o := rand.Intn(length + 2000)
|
|
|
|
|
|
|
|
d := data
|
|
|
|
if o < len(d) {
|
|
|
|
d = d[o:]
|
|
|
|
} else {
|
2017-05-28 10:32:42 +00:00
|
|
|
t.Logf("offset == length, skipping test")
|
|
|
|
continue
|
2017-01-22 21:01:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
getlen := l
|
2024-05-10 22:13:23 +00:00
|
|
|
if l >= len(d) {
|
|
|
|
if rand.Float32() >= 0.5 {
|
|
|
|
getlen = 0
|
|
|
|
} else {
|
|
|
|
getlen = len(d)
|
|
|
|
}
|
2017-01-22 21:01:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if l > 0 && l < len(d) {
|
|
|
|
d = d[:l]
|
|
|
|
}
|
|
|
|
|
2018-01-17 04:59:16 +00:00
|
|
|
var buf []byte
|
|
|
|
err := b.Load(context.TODO(), handle, getlen, int64(o), func(rd io.Reader) (ierr error) {
|
2022-12-02 18:36:43 +00:00
|
|
|
buf, ierr = io.ReadAll(rd)
|
2018-01-17 04:59:16 +00:00
|
|
|
return ierr
|
|
|
|
})
|
2017-01-22 21:01:12 +00:00
|
|
|
if err != nil {
|
2017-05-28 10:32:53 +00:00
|
|
|
t.Logf("Load, l %v, o %v, len(d) %v, getlen %v", l, o, len(d), getlen)
|
2017-04-10 19:46:41 +00:00
|
|
|
t.Errorf("Load(%d, %d) returned unexpected error: %+v", l, o, err)
|
2017-01-22 21:01:12 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-04-26 18:47:15 +00:00
|
|
|
if l == 0 && len(buf) != len(d) {
|
2017-05-28 10:32:53 +00:00
|
|
|
t.Logf("Load, l %v, o %v, len(d) %v, getlen %v", l, o, len(d), getlen)
|
2017-04-26 18:47:15 +00:00
|
|
|
t.Errorf("Load(%d, %d) wrong number of bytes read: want %d, got %d", l, o, len(d), len(buf))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if l > 0 && l <= len(d) && len(buf) != l {
|
2017-05-28 10:32:53 +00:00
|
|
|
t.Logf("Load, l %v, o %v, len(d) %v, getlen %v", l, o, len(d), getlen)
|
2017-01-23 17:11:10 +00:00
|
|
|
t.Errorf("Load(%d, %d) wrong number of bytes read: want %d, got %d", l, o, l, len(buf))
|
2017-01-22 21:01:12 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if l > len(d) && len(buf) != len(d) {
|
2017-05-28 10:32:53 +00:00
|
|
|
t.Logf("Load, l %v, o %v, len(d) %v, getlen %v", l, o, len(d), getlen)
|
2017-01-23 17:11:10 +00:00
|
|
|
t.Errorf("Load(%d, %d) wrong number of bytes read for overlong read: want %d, got %d", l, o, l, len(buf))
|
2017-01-22 21:01:12 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if !bytes.Equal(buf, d) {
|
2017-05-28 10:32:53 +00:00
|
|
|
t.Logf("Load, l %v, o %v, len(d) %v, getlen %v", l, o, len(d), getlen)
|
2017-01-23 17:11:10 +00:00
|
|
|
t.Errorf("Load(%d, %d) returned wrong bytes", l, o)
|
2017-01-22 21:01:12 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-05-10 22:13:23 +00:00
|
|
|
// test error checking for partial and fully out of bounds read
|
|
|
|
// only test for length > 0 as we currently do not need strict out of bounds handling for length==0
|
|
|
|
for _, offset := range []int{length - 99, length - 50, length, length + 100} {
|
|
|
|
err = b.Load(context.TODO(), handle, 100, int64(offset), func(rd io.Reader) (ierr error) {
|
|
|
|
_, ierr = io.ReadAll(rd)
|
|
|
|
return ierr
|
|
|
|
})
|
|
|
|
test.Assert(t, err != nil, "Load() did not return error on out of bounds read! o %v, l %v, filelength %v", offset, 100, length)
|
|
|
|
test.Assert(t, b.IsPermanentError(err), "IsPermanentError() did not recognize out of range read: %v", err)
|
|
|
|
test.Assert(t, !b.IsNotExist(err), "IsNotExist() must not recognize out of range read: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-06-03 15:39:57 +00:00
|
|
|
test.OK(t, b.Remove(context.TODO(), handle))
|
2017-01-22 21:01:12 +00:00
|
|
|
}
|
|
|
|
|
2023-05-18 18:38:43 +00:00
|
|
|
type setter interface {
|
|
|
|
SetListMaxItems(int)
|
|
|
|
}
|
|
|
|
|
2017-09-18 10:01:54 +00:00
|
|
|
// TestList makes sure that the backend implements List() pagination correctly.
|
2023-04-21 19:06:56 +00:00
|
|
|
func (s *Suite[C]) TestList(t *testing.T) {
|
2017-09-17 09:09:09 +00:00
|
|
|
seedRand(t)
|
|
|
|
|
2017-09-18 10:01:54 +00:00
|
|
|
numTestFiles := rand.Intn(20) + 20
|
|
|
|
|
2017-09-17 09:09:09 +00:00
|
|
|
b := s.open(t)
|
|
|
|
defer s.close(t, b)
|
|
|
|
|
2018-01-27 13:06:15 +00:00
|
|
|
// Check that the backend is empty to start with
|
|
|
|
var found []string
|
2023-10-01 09:40:12 +00:00
|
|
|
err := b.List(context.TODO(), backend.PackFile, func(fi backend.FileInfo) error {
|
2018-01-27 13:06:15 +00:00
|
|
|
found = append(found, fi.Name)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("List returned error %v", err)
|
|
|
|
}
|
|
|
|
if found != nil {
|
|
|
|
t.Fatalf("backend not empty at start of test - contains: %v", found)
|
|
|
|
}
|
|
|
|
|
2018-01-20 18:34:38 +00:00
|
|
|
list1 := make(map[restic.ID]int64)
|
2017-09-17 09:09:09 +00:00
|
|
|
|
|
|
|
for i := 0; i < numTestFiles; i++ {
|
2018-01-20 18:34:38 +00:00
|
|
|
data := test.Random(rand.Int(), rand.Intn(100)+55)
|
2017-09-18 10:01:54 +00:00
|
|
|
id := restic.Hash(data)
|
2023-10-01 09:40:12 +00:00
|
|
|
h := backend.Handle{Type: backend.PackFile, Name: id.String()}
|
|
|
|
err := b.Save(context.TODO(), h, backend.NewByteReader(data, b.Hasher()))
|
2017-09-18 10:01:54 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2018-01-20 18:34:38 +00:00
|
|
|
list1[id] = int64(len(data))
|
2017-09-17 09:36:45 +00:00
|
|
|
}
|
|
|
|
|
2017-09-18 10:01:54 +00:00
|
|
|
t.Logf("wrote %v files", len(list1))
|
2017-09-17 09:36:45 +00:00
|
|
|
|
2017-09-18 10:01:54 +00:00
|
|
|
var tests = []struct {
|
|
|
|
maxItems int
|
|
|
|
}{
|
2017-09-18 11:18:42 +00:00
|
|
|
{11}, {23}, {numTestFiles}, {numTestFiles + 10}, {numTestFiles + 1123},
|
2017-09-17 09:36:45 +00:00
|
|
|
}
|
|
|
|
|
2017-09-18 10:01:54 +00:00
|
|
|
for _, test := range tests {
|
|
|
|
t.Run(fmt.Sprintf("max-%v", test.maxItems), func(t *testing.T) {
|
2018-01-20 18:34:38 +00:00
|
|
|
list2 := make(map[restic.ID]int64)
|
2017-09-17 09:36:45 +00:00
|
|
|
|
2017-09-18 10:01:54 +00:00
|
|
|
if s, ok := b.(setter); ok {
|
|
|
|
t.Logf("setting max list items to %d", test.maxItems)
|
|
|
|
s.SetListMaxItems(test.maxItems)
|
|
|
|
}
|
2017-09-17 09:09:09 +00:00
|
|
|
|
2023-10-01 09:40:12 +00:00
|
|
|
err := b.List(context.TODO(), backend.PackFile, func(fi backend.FileInfo) error {
|
2018-01-20 12:43:07 +00:00
|
|
|
id, err := restic.ParseID(fi.Name)
|
2017-09-18 10:01:54 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2018-01-20 18:34:38 +00:00
|
|
|
list2[id] = fi.Size
|
2018-01-20 12:43:07 +00:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("List returned error %v", err)
|
2017-09-18 10:01:54 +00:00
|
|
|
}
|
2017-09-17 09:09:09 +00:00
|
|
|
|
2017-09-18 10:01:54 +00:00
|
|
|
t.Logf("loaded %v IDs from backend", len(list2))
|
2017-09-17 09:09:09 +00:00
|
|
|
|
2018-01-20 18:34:38 +00:00
|
|
|
for id, size := range list1 {
|
|
|
|
size2, ok := list2[id]
|
|
|
|
if !ok {
|
|
|
|
t.Errorf("id %v not returned by List()", id.Str())
|
|
|
|
}
|
|
|
|
|
|
|
|
if size != size2 {
|
|
|
|
t.Errorf("wrong size for id %v returned: want %v, got %v", id.Str(), size, size2)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for id := range list2 {
|
|
|
|
_, ok := list1[id]
|
|
|
|
if !ok {
|
|
|
|
t.Errorf("extra id %v returned by List()", id.Str())
|
|
|
|
}
|
2017-09-18 10:01:54 +00:00
|
|
|
}
|
|
|
|
})
|
2017-09-17 09:09:09 +00:00
|
|
|
}
|
|
|
|
|
2017-09-18 10:01:54 +00:00
|
|
|
t.Logf("remove %d files", numTestFiles)
|
2023-10-01 09:40:12 +00:00
|
|
|
handles := make([]backend.Handle, 0, len(list1))
|
2017-09-17 09:09:09 +00:00
|
|
|
for id := range list1 {
|
2023-10-01 09:40:12 +00:00
|
|
|
handles = append(handles, backend.Handle{Type: backend.PackFile, Name: id.String()})
|
2017-09-18 11:18:42 +00:00
|
|
|
}
|
|
|
|
|
2018-01-27 13:06:15 +00:00
|
|
|
err = s.delayedRemove(t, b, handles...)
|
2017-09-18 11:18:42 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
2017-09-17 09:09:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-20 12:43:50 +00:00
|
|
|
// TestListCancel tests that the context is respected and the error is returned by List.
|
2023-04-21 19:06:56 +00:00
|
|
|
func (s *Suite[C]) TestListCancel(t *testing.T) {
|
2018-01-20 12:43:50 +00:00
|
|
|
seedRand(t)
|
|
|
|
|
|
|
|
numTestFiles := 5
|
|
|
|
|
|
|
|
b := s.open(t)
|
|
|
|
defer s.close(t, b)
|
|
|
|
|
2023-10-01 09:40:12 +00:00
|
|
|
testFiles := make([]backend.Handle, 0, numTestFiles)
|
2018-01-20 12:43:50 +00:00
|
|
|
|
|
|
|
for i := 0; i < numTestFiles; i++ {
|
|
|
|
data := []byte(fmt.Sprintf("random test blob %v", i))
|
|
|
|
id := restic.Hash(data)
|
2023-10-01 09:40:12 +00:00
|
|
|
h := backend.Handle{Type: backend.PackFile, Name: id.String()}
|
|
|
|
err := b.Save(context.TODO(), h, backend.NewByteReader(data, b.Hasher()))
|
2018-01-20 12:43:50 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
testFiles = append(testFiles, h)
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Run("Cancelled", func(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.TODO())
|
|
|
|
cancel()
|
|
|
|
|
|
|
|
// pass in a cancelled context
|
2023-10-01 09:40:12 +00:00
|
|
|
err := b.List(ctx, backend.PackFile, func(fi backend.FileInfo) error {
|
2018-01-20 12:43:50 +00:00
|
|
|
t.Errorf("got FileInfo %v for cancelled context", fi)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2022-06-13 18:35:37 +00:00
|
|
|
if !errors.Is(err, context.Canceled) {
|
|
|
|
t.Fatalf("expected error not found, want %v, got %v", context.Canceled, err)
|
2018-01-20 12:43:50 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("First", func(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.TODO())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
i := 0
|
2023-10-01 09:40:12 +00:00
|
|
|
err := b.List(ctx, backend.PackFile, func(fi backend.FileInfo) error {
|
2018-01-20 12:43:50 +00:00
|
|
|
i++
|
|
|
|
// cancel the context on the first file
|
|
|
|
if i == 1 {
|
|
|
|
cancel()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2022-06-13 18:35:37 +00:00
|
|
|
if !errors.Is(err, context.Canceled) {
|
2018-01-20 12:43:50 +00:00
|
|
|
t.Fatalf("expected error not found, want %v, got %v", context.Canceled, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if i != 1 {
|
|
|
|
t.Fatalf("wrong number of files returned by List, want %v, got %v", 1, i)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("Last", func(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.TODO())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
i := 0
|
2023-10-01 09:40:12 +00:00
|
|
|
err := b.List(ctx, backend.PackFile, func(fi backend.FileInfo) error {
|
2018-01-20 12:43:50 +00:00
|
|
|
// cancel the context at the last file
|
|
|
|
i++
|
|
|
|
if i == numTestFiles {
|
|
|
|
cancel()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2022-06-13 18:35:37 +00:00
|
|
|
if !errors.Is(err, context.Canceled) {
|
2018-01-20 12:43:50 +00:00
|
|
|
t.Fatalf("expected error not found, want %v, got %v", context.Canceled, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if i != numTestFiles {
|
|
|
|
t.Fatalf("wrong number of files returned by List, want %v, got %v", numTestFiles, i)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("Timeout", func(t *testing.T) {
|
2018-01-23 20:41:21 +00:00
|
|
|
// rather large timeout, let's try to get at least one item
|
|
|
|
timeout := time.Second
|
2018-01-20 12:43:50 +00:00
|
|
|
|
2020-03-03 16:53:24 +00:00
|
|
|
ctxTimeout, cancel := context.WithTimeout(context.TODO(), timeout)
|
|
|
|
defer cancel()
|
2018-01-20 12:43:50 +00:00
|
|
|
|
|
|
|
i := 0
|
2018-01-23 20:41:21 +00:00
|
|
|
// pass in a context with a timeout
|
2023-10-01 09:40:12 +00:00
|
|
|
err := b.List(ctxTimeout, backend.PackFile, func(fi backend.FileInfo) error {
|
2018-01-20 12:43:50 +00:00
|
|
|
i++
|
|
|
|
|
|
|
|
// wait until the context is cancelled
|
2018-01-23 20:41:21 +00:00
|
|
|
<-ctxTimeout.Done()
|
2023-07-23 09:21:49 +00:00
|
|
|
// The cancellation of a context first closes the done channel of the context and
|
|
|
|
// _afterwards_ propagates the cancellation to child contexts. If the List
|
|
|
|
// implementation uses a child context, then it may take a moment until that context
|
|
|
|
// is also cancelled. Thus give the context cancellation a moment to propagate.
|
|
|
|
time.Sleep(time.Millisecond)
|
2018-01-20 12:43:50 +00:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2022-06-13 18:35:37 +00:00
|
|
|
if !errors.Is(err, context.DeadlineExceeded) {
|
2018-01-20 12:43:50 +00:00
|
|
|
t.Fatalf("expected error not found, want %#v, got %#v", context.DeadlineExceeded, err)
|
|
|
|
}
|
|
|
|
|
2018-02-10 11:53:38 +00:00
|
|
|
if i > 2 {
|
|
|
|
t.Fatalf("wrong number of files returned by List, want <= 2, got %v", i)
|
2018-01-20 12:43:50 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
err := s.delayedRemove(t, b, testFiles...)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-25 16:07:36 +00:00
|
|
|
type errorCloser struct {
|
2018-03-03 13:20:54 +00:00
|
|
|
io.ReadSeeker
|
2018-03-04 09:40:42 +00:00
|
|
|
l int64
|
2017-06-16 08:55:04 +00:00
|
|
|
t testing.TB
|
2020-12-19 11:39:48 +00:00
|
|
|
h []byte
|
2017-01-25 16:07:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (ec errorCloser) Close() error {
|
|
|
|
ec.t.Error("forbidden method close was called")
|
|
|
|
return errors.New("forbidden method close was called")
|
|
|
|
}
|
|
|
|
|
2018-03-04 09:40:42 +00:00
|
|
|
func (ec errorCloser) Length() int64 {
|
2017-06-16 08:55:04 +00:00
|
|
|
return ec.l
|
2017-05-13 17:56:11 +00:00
|
|
|
}
|
|
|
|
|
2020-12-19 11:39:48 +00:00
|
|
|
func (ec errorCloser) Hash() []byte {
|
|
|
|
return ec.h
|
|
|
|
}
|
|
|
|
|
2018-03-03 13:20:54 +00:00
|
|
|
func (ec errorCloser) Rewind() error {
|
|
|
|
_, err := ec.ReadSeeker.Seek(0, io.SeekStart)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-05-14 10:56:10 +00:00
|
|
|
// TestSave tests saving data in the backend.
|
2023-04-21 19:06:56 +00:00
|
|
|
func (s *Suite[C]) TestSave(t *testing.T) {
|
2017-05-28 08:16:29 +00:00
|
|
|
seedRand(t)
|
|
|
|
|
2017-05-01 20:23:46 +00:00
|
|
|
b := s.open(t)
|
|
|
|
defer s.close(t, b)
|
2016-08-31 20:51:35 +00:00
|
|
|
var id restic.ID
|
2016-01-24 15:59:38 +00:00
|
|
|
|
2017-05-01 10:42:10 +00:00
|
|
|
saveTests := 10
|
2017-05-01 20:23:46 +00:00
|
|
|
if s.MinimalData {
|
2017-05-01 10:42:10 +00:00
|
|
|
saveTests = 2
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < saveTests; i++ {
|
2016-01-24 16:46:18 +00:00
|
|
|
length := rand.Intn(1<<23) + 200000
|
2016-09-04 12:38:18 +00:00
|
|
|
data := test.Random(23, length)
|
2021-08-15 16:19:43 +00:00
|
|
|
id = sha256.Sum256(data)
|
2016-01-24 15:59:38 +00:00
|
|
|
|
2023-10-01 09:40:12 +00:00
|
|
|
h := backend.Handle{
|
|
|
|
Type: backend.PackFile,
|
2021-08-15 16:19:43 +00:00
|
|
|
Name: id.String(),
|
2016-01-24 15:59:38 +00:00
|
|
|
}
|
2023-10-01 09:40:12 +00:00
|
|
|
err := b.Save(context.TODO(), h, backend.NewByteReader(data, b.Hasher()))
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
2016-01-24 15:59:38 +00:00
|
|
|
|
2024-05-09 16:59:29 +00:00
|
|
|
buf, err := LoadAll(context.TODO(), b, h)
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
2016-01-24 15:59:38 +00:00
|
|
|
if len(buf) != len(data) {
|
|
|
|
t.Fatalf("number of bytes does not match, want %v, got %v", len(data), len(buf))
|
|
|
|
}
|
|
|
|
|
|
|
|
if !bytes.Equal(buf, data) {
|
|
|
|
t.Fatalf("data not equal")
|
|
|
|
}
|
|
|
|
|
2017-06-03 15:39:57 +00:00
|
|
|
fi, err := b.Stat(context.TODO(), h)
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
2016-01-24 15:59:38 +00:00
|
|
|
|
2018-01-20 18:34:38 +00:00
|
|
|
if fi.Name != h.Name {
|
|
|
|
t.Errorf("Stat() returned wrong name, want %q, got %q", h.Name, fi.Name)
|
|
|
|
}
|
|
|
|
|
2016-01-24 15:59:38 +00:00
|
|
|
if fi.Size != int64(len(data)) {
|
2018-01-20 18:34:38 +00:00
|
|
|
t.Errorf("Stat() returned different size, want %q, got %d", len(data), fi.Size)
|
2016-01-24 15:59:38 +00:00
|
|
|
}
|
|
|
|
|
2017-06-03 15:39:57 +00:00
|
|
|
err = b.Remove(context.TODO(), h)
|
2016-01-24 15:59:38 +00:00
|
|
|
if err != nil {
|
2017-04-10 19:46:41 +00:00
|
|
|
t.Fatalf("error removing item: %+v", err)
|
2016-01-24 15:59:38 +00:00
|
|
|
}
|
|
|
|
}
|
2017-01-25 16:07:36 +00:00
|
|
|
|
|
|
|
// test saving from a tempfile
|
2022-12-02 18:36:43 +00:00
|
|
|
tmpfile, err := os.CreateTemp("", "restic-backend-save-test-")
|
2017-01-25 16:07:36 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
length := rand.Intn(1<<23) + 200000
|
|
|
|
data := test.Random(23, length)
|
2021-08-15 16:19:43 +00:00
|
|
|
id = sha256.Sum256(data)
|
2017-01-25 16:07:36 +00:00
|
|
|
|
|
|
|
if _, err = tmpfile.Write(data); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2017-05-13 22:15:17 +00:00
|
|
|
if _, err = tmpfile.Seek(0, io.SeekStart); err != nil {
|
2017-01-25 16:07:36 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2023-10-01 09:40:12 +00:00
|
|
|
h := backend.Handle{Type: backend.PackFile, Name: id.String()}
|
2017-01-25 16:07:36 +00:00
|
|
|
|
|
|
|
// wrap the tempfile in an errorCloser, so we can detect if the backend
|
|
|
|
// closes the reader
|
2020-12-19 11:39:48 +00:00
|
|
|
var beHash []byte
|
|
|
|
if b.Hasher() != nil {
|
|
|
|
beHasher := b.Hasher()
|
|
|
|
// must never fail according to interface
|
2021-01-29 21:12:51 +00:00
|
|
|
_, err := beHasher.Write(data)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2020-12-19 11:39:48 +00:00
|
|
|
beHash = beHasher.Sum(nil)
|
|
|
|
}
|
|
|
|
err = b.Save(context.TODO(), h, errorCloser{
|
|
|
|
t: t,
|
|
|
|
l: int64(length),
|
|
|
|
ReadSeeker: tmpfile,
|
|
|
|
h: beHash,
|
|
|
|
})
|
2017-01-25 16:07:36 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2017-09-16 11:59:55 +00:00
|
|
|
err = s.delayedRemove(t, b, h)
|
2017-05-13 22:15:17 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error removing item: %+v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = tmpfile.Close(); err != nil {
|
2017-01-25 16:07:36 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2017-05-13 22:15:17 +00:00
|
|
|
if err = os.Remove(tmpfile.Name()); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2016-01-24 15:59:38 +00:00
|
|
|
}
|
|
|
|
|
2020-12-18 22:53:15 +00:00
|
|
|
type incompleteByteReader struct {
|
2023-10-01 09:40:12 +00:00
|
|
|
backend.ByteReader
|
2020-12-18 22:53:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (r *incompleteByteReader) Length() int64 {
|
|
|
|
return r.ByteReader.Length() + 42
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestSaveError tests saving data in the backend.
|
2023-04-21 19:06:56 +00:00
|
|
|
func (s *Suite[C]) TestSaveError(t *testing.T) {
|
2020-12-18 22:53:15 +00:00
|
|
|
seedRand(t)
|
|
|
|
|
|
|
|
b := s.open(t)
|
|
|
|
defer func() {
|
|
|
|
// rclone will report an error when closing the backend. We have to ignore it
|
|
|
|
// otherwise this test will always fail
|
|
|
|
_ = b.Close()
|
|
|
|
}()
|
|
|
|
|
|
|
|
length := rand.Intn(1<<23) + 200000
|
2021-01-29 21:52:26 +00:00
|
|
|
data := test.Random(24, length)
|
2020-12-18 22:53:15 +00:00
|
|
|
var id restic.ID
|
|
|
|
copy(id[:], data)
|
|
|
|
|
|
|
|
// test that incomplete uploads fail
|
2023-10-01 09:40:12 +00:00
|
|
|
h := backend.Handle{Type: backend.PackFile, Name: id.String()}
|
|
|
|
err := b.Save(context.TODO(), h, &incompleteByteReader{ByteReader: *backend.NewByteReader(data, b.Hasher())})
|
2021-01-29 21:52:26 +00:00
|
|
|
// try to delete possible leftovers
|
|
|
|
_ = s.delayedRemove(t, b, h)
|
2020-12-18 22:53:15 +00:00
|
|
|
if err == nil {
|
|
|
|
t.Fatal("incomplete upload did not fail")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-29 21:52:07 +00:00
|
|
|
type wrongByteReader struct {
|
2023-10-01 09:40:12 +00:00
|
|
|
backend.ByteReader
|
2021-01-29 21:52:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (b *wrongByteReader) Hash() []byte {
|
|
|
|
h := b.ByteReader.Hash()
|
|
|
|
modHash := make([]byte, len(h))
|
|
|
|
copy(modHash, h)
|
|
|
|
// flip a bit in the hash
|
|
|
|
modHash[0] ^= 0x01
|
|
|
|
return modHash
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestSaveWrongHash tests that uploads with a wrong hash fail
|
2023-04-21 19:06:56 +00:00
|
|
|
func (s *Suite[C]) TestSaveWrongHash(t *testing.T) {
|
2021-01-29 21:52:07 +00:00
|
|
|
seedRand(t)
|
|
|
|
|
|
|
|
b := s.open(t)
|
|
|
|
defer s.close(t, b)
|
|
|
|
// nothing to do if the backend doesn't support external hashes
|
|
|
|
if b.Hasher() == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
length := rand.Intn(1<<23) + 200000
|
|
|
|
data := test.Random(25, length)
|
|
|
|
var id restic.ID
|
|
|
|
copy(id[:], data)
|
|
|
|
|
|
|
|
// test that upload with hash mismatch fails
|
2023-10-01 09:40:12 +00:00
|
|
|
h := backend.Handle{Type: backend.PackFile, Name: id.String()}
|
|
|
|
err := b.Save(context.TODO(), h, &wrongByteReader{ByteReader: *backend.NewByteReader(data, b.Hasher())})
|
2022-12-03 10:28:10 +00:00
|
|
|
exists, err2 := beTest(context.TODO(), b, h)
|
2021-01-29 21:52:07 +00:00
|
|
|
if err2 != nil {
|
|
|
|
t.Fatal(err2)
|
|
|
|
}
|
|
|
|
_ = s.delayedRemove(t, b, h)
|
|
|
|
if err == nil {
|
|
|
|
t.Fatal("upload with wrong hash did not fail")
|
|
|
|
}
|
|
|
|
t.Logf("%v", err)
|
|
|
|
if exists {
|
|
|
|
t.Fatal("Backend returned an error but stored the file anyways")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-23 16:08:03 +00:00
|
|
|
var testStrings = []struct {
|
|
|
|
id string
|
|
|
|
data string
|
|
|
|
}{
|
|
|
|
{"c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2", "foobar"},
|
|
|
|
{"248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"},
|
|
|
|
{"cc5d46bdb4991c6eae3eb739c9c8a7a46fe9654fab79c47b4fe48383b5b25e1c", "foo/bar"},
|
|
|
|
{"4e54d2c721cbdb730f01b10b62dec622962b36966ec685880effa63d71c808f2", "foo/../../baz"},
|
|
|
|
}
|
|
|
|
|
2023-10-01 09:40:12 +00:00
|
|
|
func store(t testing.TB, b backend.Backend, tpe backend.FileType, data []byte) backend.Handle {
|
2016-08-31 20:51:35 +00:00
|
|
|
id := restic.Hash(data)
|
2023-10-01 09:40:12 +00:00
|
|
|
h := backend.Handle{Name: id.String(), Type: tpe}
|
|
|
|
err := b.Save(context.TODO(), h, backend.NewByteReader([]byte(data), b.Hasher()))
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
2017-01-25 16:48:35 +00:00
|
|
|
return h
|
2016-01-23 16:08:03 +00:00
|
|
|
}
|
|
|
|
|
2017-05-28 11:05:35 +00:00
|
|
|
// testLoad loads a blob (but discards its contents).
|
2023-10-01 09:40:12 +00:00
|
|
|
func testLoad(b backend.Backend, h backend.Handle) error {
|
2018-01-17 04:59:16 +00:00
|
|
|
return b.Load(context.TODO(), h, 0, 0, func(rd io.Reader) (ierr error) {
|
2022-12-02 18:36:43 +00:00
|
|
|
_, ierr = io.Copy(io.Discard, rd)
|
2018-01-17 04:59:16 +00:00
|
|
|
return ierr
|
|
|
|
})
|
2017-05-28 11:05:35 +00:00
|
|
|
}
|
|
|
|
|
2023-10-01 09:40:12 +00:00
|
|
|
func (s *Suite[C]) delayedRemove(t testing.TB, be backend.Backend, handles ...backend.Handle) error {
|
2017-03-29 21:58:25 +00:00
|
|
|
// Some backend (swift, I'm looking at you) may implement delayed
|
|
|
|
// removal of data. Let's wait a bit if this happens.
|
2017-05-01 08:13:03 +00:00
|
|
|
|
2017-07-21 16:00:17 +00:00
|
|
|
for _, h := range handles {
|
|
|
|
err := be.Remove(context.TODO(), h)
|
2017-09-16 11:59:55 +00:00
|
|
|
if s.ErrorHandler != nil {
|
|
|
|
err = s.ErrorHandler(t, be, err)
|
|
|
|
}
|
2017-06-15 18:05:35 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2017-03-29 21:58:25 +00:00
|
|
|
}
|
2017-07-21 16:00:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, h := range handles {
|
|
|
|
start := time.Now()
|
|
|
|
attempt := 0
|
|
|
|
var found bool
|
|
|
|
var err error
|
2017-09-16 11:59:55 +00:00
|
|
|
for time.Since(start) <= s.WaitForDelayedRemoval {
|
2022-12-03 10:28:10 +00:00
|
|
|
found, err = beTest(context.TODO(), be, h)
|
2017-09-16 11:59:55 +00:00
|
|
|
if s.ErrorHandler != nil {
|
|
|
|
err = s.ErrorHandler(t, be, err)
|
|
|
|
}
|
2017-07-21 16:00:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-06-15 18:05:35 +00:00
|
|
|
|
2017-07-21 16:00:17 +00:00
|
|
|
if !found {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
time.Sleep(2 * time.Second)
|
|
|
|
attempt++
|
2017-06-15 18:05:35 +00:00
|
|
|
}
|
|
|
|
|
2017-07-21 16:00:17 +00:00
|
|
|
if found {
|
|
|
|
t.Fatalf("removed blob %v still present after %v (%d attempts)", h, time.Since(start), attempt)
|
|
|
|
}
|
2017-03-29 21:58:25 +00:00
|
|
|
}
|
2017-06-18 15:36:57 +00:00
|
|
|
|
|
|
|
return nil
|
2017-03-29 21:58:25 +00:00
|
|
|
}
|
|
|
|
|
2023-10-01 09:40:12 +00:00
|
|
|
func delayedList(t testing.TB, b backend.Backend, tpe backend.FileType, max int, maxwait time.Duration) restic.IDs {
|
2017-06-15 17:41:07 +00:00
|
|
|
list := restic.NewIDSet()
|
2017-06-18 15:36:57 +00:00
|
|
|
start := time.Now()
|
2017-06-15 17:41:07 +00:00
|
|
|
for i := 0; i < max; i++ {
|
2023-10-01 09:40:12 +00:00
|
|
|
err := b.List(context.TODO(), tpe, func(fi backend.FileInfo) error {
|
2018-01-20 12:43:07 +00:00
|
|
|
id := restic.TestParseID(fi.Name)
|
2017-06-15 17:41:07 +00:00
|
|
|
list.Insert(id)
|
2018-01-20 12:43:07 +00:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
2017-06-15 17:41:07 +00:00
|
|
|
}
|
2018-01-20 12:43:07 +00:00
|
|
|
|
2017-06-18 15:36:57 +00:00
|
|
|
if len(list) < max && time.Since(start) < maxwait {
|
|
|
|
time.Sleep(500 * time.Millisecond)
|
2017-06-15 17:41:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return list.List()
|
|
|
|
}
|
|
|
|
|
2017-05-14 10:56:10 +00:00
|
|
|
// TestBackend tests all functions of the backend.
|
2023-04-21 19:06:56 +00:00
|
|
|
func (s *Suite[C]) TestBackend(t *testing.T) {
|
2017-05-01 20:23:46 +00:00
|
|
|
b := s.open(t)
|
|
|
|
defer s.close(t, b)
|
2016-01-23 16:08:03 +00:00
|
|
|
|
2022-12-03 17:22:43 +00:00
|
|
|
test.Assert(t, !b.IsNotExist(nil), "IsNotExist() recognized nil error")
|
2024-05-10 22:13:23 +00:00
|
|
|
test.Assert(t, !b.IsPermanentError(nil), "IsPermanentError() recognized nil error")
|
2022-12-03 17:22:43 +00:00
|
|
|
|
2023-10-01 09:40:12 +00:00
|
|
|
for _, tpe := range []backend.FileType{
|
|
|
|
backend.PackFile, backend.KeyFile, backend.LockFile,
|
|
|
|
backend.SnapshotFile, backend.IndexFile,
|
2016-01-23 16:08:03 +00:00
|
|
|
} {
|
|
|
|
// detect non-existing files
|
2016-09-04 12:38:18 +00:00
|
|
|
for _, ts := range testStrings {
|
|
|
|
id, err := restic.ParseID(ts.id)
|
|
|
|
test.OK(t, err)
|
2016-01-23 16:08:03 +00:00
|
|
|
|
|
|
|
// test if blob is already in repository
|
2023-10-01 09:40:12 +00:00
|
|
|
h := backend.Handle{Type: tpe, Name: id.String()}
|
2022-12-03 10:28:10 +00:00
|
|
|
ret, err := beTest(context.TODO(), b, h)
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
|
|
|
test.Assert(t, !ret, "blob was found to exist before creating")
|
2016-01-23 16:08:03 +00:00
|
|
|
|
2016-01-24 00:00:27 +00:00
|
|
|
// try to stat a not existing blob
|
2017-06-03 15:39:57 +00:00
|
|
|
_, err = b.Stat(context.TODO(), h)
|
2016-09-04 12:38:18 +00:00
|
|
|
test.Assert(t, err != nil, "blob data could be extracted before creation")
|
2022-12-03 17:22:43 +00:00
|
|
|
test.Assert(t, b.IsNotExist(err), "IsNotExist() did not recognize Stat() error: %v", err)
|
2024-05-10 22:13:23 +00:00
|
|
|
test.Assert(t, b.IsPermanentError(err), "IsPermanentError() did not recognize Stat() error: %v", err)
|
2016-01-23 16:08:03 +00:00
|
|
|
|
|
|
|
// try to read not existing blob
|
2023-05-18 17:21:28 +00:00
|
|
|
err = testLoad(b, h)
|
2017-05-28 11:05:35 +00:00
|
|
|
test.Assert(t, err != nil, "blob could be read before creation")
|
2022-12-03 17:22:43 +00:00
|
|
|
test.Assert(t, b.IsNotExist(err), "IsNotExist() did not recognize Load() error: %v", err)
|
2024-05-10 22:13:23 +00:00
|
|
|
test.Assert(t, b.IsPermanentError(err), "IsPermanentError() did not recognize Load() error: %v", err)
|
2016-01-23 16:08:03 +00:00
|
|
|
|
|
|
|
// try to get string out, should fail
|
2022-12-03 10:28:10 +00:00
|
|
|
ret, err = beTest(context.TODO(), b, h)
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
|
|
|
test.Assert(t, !ret, "id %q was found (but should not have)", ts.id)
|
2016-01-23 16:08:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// add files
|
2016-09-04 12:38:18 +00:00
|
|
|
for _, ts := range testStrings {
|
|
|
|
store(t, b, tpe, []byte(ts.data))
|
2016-01-23 16:08:03 +00:00
|
|
|
|
2016-01-24 00:00:27 +00:00
|
|
|
// test Load()
|
2023-10-01 09:40:12 +00:00
|
|
|
h := backend.Handle{Type: tpe, Name: ts.id}
|
2024-05-09 16:59:29 +00:00
|
|
|
buf, err := LoadAll(context.TODO(), b, h)
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
|
|
|
test.Equals(t, ts.data, string(buf))
|
2016-01-23 16:08:03 +00:00
|
|
|
|
|
|
|
// try to read it out with an offset and a length
|
|
|
|
start := 1
|
2016-09-04 12:38:18 +00:00
|
|
|
end := len(ts.data) - 2
|
2016-01-23 16:08:03 +00:00
|
|
|
length := end - start
|
|
|
|
|
2016-01-24 00:00:27 +00:00
|
|
|
buf2 := make([]byte, length)
|
2018-01-17 04:59:16 +00:00
|
|
|
var n int
|
|
|
|
err = b.Load(context.TODO(), h, len(buf2), int64(start), func(rd io.Reader) (ierr error) {
|
|
|
|
n, ierr = io.ReadFull(rd, buf2)
|
|
|
|
return ierr
|
|
|
|
})
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
2017-01-23 16:20:08 +00:00
|
|
|
test.OK(t, err)
|
|
|
|
test.Equals(t, len(buf2), n)
|
2016-09-04 12:38:18 +00:00
|
|
|
test.Equals(t, ts.data[start:end], string(buf2))
|
2016-01-23 16:08:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// test adding the first file again
|
2016-09-04 12:38:18 +00:00
|
|
|
ts := testStrings[0]
|
2023-10-01 09:40:12 +00:00
|
|
|
h := backend.Handle{Type: tpe, Name: ts.id}
|
2016-01-23 16:08:03 +00:00
|
|
|
|
|
|
|
// remove and recreate
|
backend: Relax requirement for new files
Before, all backend implementations were required to return an error if
the file that is to be written already exists in the backend. For most
backends, that means making a request (e.g. via HTTP) and returning an
error when the file already exists.
This is not accurate, the file could have been created between the HTTP
request testing for it, and when writing starts. In addition, apart from
the `config` file in the repo, all other file names have pseudo-random
names with a very very low probability of a collision. And even if a
file name is written again, the way the restic repo is structured this
just means that the same content is placed there again. Which is not a
problem, just not very efficient.
So, this commit relaxes the requirement to return an error when the file
in the backend already exists, which allows reducing the number of API
requests and thereby the latency for remote backends.
2018-02-17 21:39:18 +00:00
|
|
|
err := s.delayedRemove(t, b, h)
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
2016-01-23 16:08:03 +00:00
|
|
|
|
|
|
|
// test that the blob is gone
|
2022-12-03 10:28:10 +00:00
|
|
|
ok, err := beTest(context.TODO(), b, h)
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
2017-04-11 18:08:25 +00:00
|
|
|
test.Assert(t, !ok, "removed blob still present")
|
2016-01-23 16:08:03 +00:00
|
|
|
|
|
|
|
// create blob
|
2023-10-01 09:40:12 +00:00
|
|
|
err = b.Save(context.TODO(), h, backend.NewByteReader([]byte(ts.data), b.Hasher()))
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
2016-01-23 16:08:03 +00:00
|
|
|
|
|
|
|
// list items
|
2016-08-31 20:51:35 +00:00
|
|
|
IDs := restic.IDs{}
|
2016-01-23 16:08:03 +00:00
|
|
|
|
2016-09-04 12:38:18 +00:00
|
|
|
for _, ts := range testStrings {
|
|
|
|
id, err := restic.ParseID(ts.id)
|
|
|
|
test.OK(t, err)
|
2016-01-23 16:08:03 +00:00
|
|
|
IDs = append(IDs, id)
|
|
|
|
}
|
|
|
|
|
2017-06-18 15:36:57 +00:00
|
|
|
list := delayedList(t, b, tpe, len(IDs), s.WaitForDelayedRemoval)
|
2016-01-23 16:08:03 +00:00
|
|
|
if len(IDs) != len(list) {
|
|
|
|
t.Fatalf("wrong number of IDs returned: want %d, got %d", len(IDs), len(list))
|
|
|
|
}
|
|
|
|
|
|
|
|
sort.Sort(IDs)
|
|
|
|
sort.Sort(list)
|
|
|
|
|
|
|
|
if !reflect.DeepEqual(IDs, list) {
|
|
|
|
t.Fatalf("lists aren't equal, want:\n %v\n got:\n%v\n", IDs, list)
|
|
|
|
}
|
|
|
|
|
2023-10-01 09:40:12 +00:00
|
|
|
var handles []backend.Handle
|
2017-10-14 13:53:32 +00:00
|
|
|
for _, ts := range testStrings {
|
|
|
|
id, err := restic.ParseID(ts.id)
|
|
|
|
test.OK(t, err)
|
2017-01-25 16:48:35 +00:00
|
|
|
|
2023-10-01 09:40:12 +00:00
|
|
|
h := backend.Handle{Type: tpe, Name: id.String()}
|
2016-01-23 16:08:03 +00:00
|
|
|
|
2022-12-03 10:28:10 +00:00
|
|
|
found, err := beTest(context.TODO(), b, h)
|
2017-10-14 13:53:32 +00:00
|
|
|
test.OK(t, err)
|
2022-12-03 10:28:10 +00:00
|
|
|
test.Assert(t, found, fmt.Sprintf("id %v/%q not found", tpe, id))
|
2017-07-21 16:00:17 +00:00
|
|
|
|
2017-10-14 13:53:32 +00:00
|
|
|
handles = append(handles, h)
|
2016-01-23 16:08:03 +00:00
|
|
|
}
|
2017-10-14 13:53:32 +00:00
|
|
|
|
|
|
|
test.OK(t, s.delayedRemove(t, b, handles...))
|
2016-01-23 16:08:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-14 13:56:46 +00:00
|
|
|
// TestZZZDelete tests the Delete function. The name ensures that this test is executed last.
|
2023-04-21 19:06:56 +00:00
|
|
|
func (s *Suite[C]) TestZZZDelete(t *testing.T) {
|
2017-05-01 20:23:46 +00:00
|
|
|
if !test.TestCleanupTempDirs {
|
|
|
|
t.Skipf("not removing backend, TestCleanupTempDirs is false")
|
|
|
|
}
|
|
|
|
|
|
|
|
b := s.open(t)
|
|
|
|
defer s.close(t, b)
|
2016-01-23 16:08:03 +00:00
|
|
|
|
2017-10-14 13:56:46 +00:00
|
|
|
err := b.Delete(context.TODO())
|
2016-01-23 16:08:03 +00:00
|
|
|
if err != nil {
|
2017-04-10 19:46:41 +00:00
|
|
|
t.Fatalf("error deleting backend: %+v", err)
|
2016-01-23 16:08:03 +00:00
|
|
|
}
|
|
|
|
}
|