2016-01-23 16:08:03 +00:00
|
|
|
package test
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2017-06-03 15:39:57 +00:00
|
|
|
"context"
|
2016-01-23 16:08:03 +00:00
|
|
|
"fmt"
|
|
|
|
"io"
|
2017-01-22 21:01:12 +00:00
|
|
|
"io/ioutil"
|
2016-01-23 16:08:03 +00:00
|
|
|
"math/rand"
|
2017-01-25 16:07:36 +00:00
|
|
|
"os"
|
2016-01-23 16:08:03 +00:00
|
|
|
"reflect"
|
|
|
|
"sort"
|
|
|
|
"testing"
|
2017-05-28 08:16:29 +00:00
|
|
|
"time"
|
2016-01-23 16:08:03 +00:00
|
|
|
|
2021-08-15 16:19:43 +00:00
|
|
|
"github.com/minio/sha256-simd"
|
2017-07-23 12:21:03 +00:00
|
|
|
"github.com/restic/restic/internal/errors"
|
2017-07-24 15:42:25 +00:00
|
|
|
"github.com/restic/restic/internal/restic"
|
2016-08-29 17:18:57 +00:00
|
|
|
|
2017-07-23 12:21:03 +00:00
|
|
|
"github.com/restic/restic/internal/test"
|
|
|
|
|
|
|
|
"github.com/restic/restic/internal/backend"
|
2016-01-23 16:08:03 +00:00
|
|
|
)
|
|
|
|
|
2017-05-28 08:16:29 +00:00
|
|
|
func seedRand(t testing.TB) {
|
|
|
|
seed := time.Now().UnixNano()
|
|
|
|
rand.Seed(seed)
|
|
|
|
t.Logf("rand initialized with seed %d", seed)
|
|
|
|
}
|
|
|
|
|
2017-05-14 10:56:10 +00:00
|
|
|
// TestCreateWithConfig tests that creating a backend in a location which already
|
2016-01-23 17:07:15 +00:00
|
|
|
// has a config file fails.
|
2017-05-14 10:56:10 +00:00
|
|
|
func (s *Suite) TestCreateWithConfig(t *testing.T) {
|
2017-05-01 20:23:46 +00:00
|
|
|
b := s.open(t)
|
|
|
|
defer s.close(t, b)
|
2016-01-23 17:07:15 +00:00
|
|
|
|
2017-05-14 10:50:20 +00:00
|
|
|
// remove a config if present
|
|
|
|
cfgHandle := restic.Handle{Type: restic.ConfigFile}
|
2017-06-03 15:39:57 +00:00
|
|
|
cfgPresent, err := b.Test(context.TODO(), cfgHandle)
|
2017-05-14 10:50:20 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to test for config: %+v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if cfgPresent {
|
|
|
|
remove(t, b, cfgHandle)
|
|
|
|
}
|
|
|
|
|
2016-01-23 17:07:15 +00:00
|
|
|
// save a config
|
2016-08-31 20:39:36 +00:00
|
|
|
store(t, b, restic.ConfigFile, []byte("test config"))
|
2016-01-23 17:07:15 +00:00
|
|
|
|
|
|
|
// now create the backend again, this must fail
|
2017-05-14 10:50:20 +00:00
|
|
|
_, err = s.Create(s.Config)
|
2016-01-23 17:07:15 +00:00
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("expected error not found for creating a backend with an existing config file")
|
|
|
|
}
|
|
|
|
|
|
|
|
// remove config
|
2017-06-03 15:39:57 +00:00
|
|
|
err = b.Remove(context.TODO(), restic.Handle{Type: restic.ConfigFile, Name: ""})
|
2016-01-23 17:07:15 +00:00
|
|
|
if err != nil {
|
2017-04-10 19:46:41 +00:00
|
|
|
t.Fatalf("unexpected error removing config: %+v", err)
|
2016-01-23 17:07:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-14 10:56:10 +00:00
|
|
|
// TestLocation tests that a location string is returned.
|
|
|
|
func (s *Suite) TestLocation(t *testing.T) {
|
2017-05-01 20:23:46 +00:00
|
|
|
b := s.open(t)
|
|
|
|
defer s.close(t, b)
|
2016-01-23 16:08:03 +00:00
|
|
|
|
|
|
|
l := b.Location()
|
|
|
|
if l == "" {
|
|
|
|
t.Fatalf("invalid location string %q", l)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-14 10:56:10 +00:00
|
|
|
// TestConfig saves and loads a config from the backend.
|
|
|
|
func (s *Suite) TestConfig(t *testing.T) {
|
2017-05-01 20:23:46 +00:00
|
|
|
b := s.open(t)
|
|
|
|
defer s.close(t, b)
|
2016-01-23 16:08:03 +00:00
|
|
|
|
|
|
|
var testString = "Config"
|
|
|
|
|
|
|
|
// create config and read it back
|
2019-03-24 20:59:14 +00:00
|
|
|
_, err := backend.LoadAll(context.TODO(), nil, b, restic.Handle{Type: restic.ConfigFile})
|
2016-01-23 16:08:03 +00:00
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("did not get expected error for non-existing config")
|
|
|
|
}
|
|
|
|
|
2020-12-19 11:39:48 +00:00
|
|
|
err = b.Save(context.TODO(), restic.Handle{Type: restic.ConfigFile}, restic.NewByteReader([]byte(testString), b.Hasher()))
|
2016-01-23 16:08:03 +00:00
|
|
|
if err != nil {
|
2017-04-10 19:46:41 +00:00
|
|
|
t.Fatalf("Save() error: %+v", err)
|
2016-01-23 16:08:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// try accessing the config with different names, should all return the
|
|
|
|
// same config
|
|
|
|
for _, name := range []string{"", "foo", "bar", "0000000000000000000000000000000000000000000000000000000000000000"} {
|
2016-09-01 19:19:30 +00:00
|
|
|
h := restic.Handle{Type: restic.ConfigFile, Name: name}
|
2019-03-24 20:59:14 +00:00
|
|
|
buf, err := backend.LoadAll(context.TODO(), nil, b, h)
|
2016-01-23 16:08:03 +00:00
|
|
|
if err != nil {
|
2017-04-10 19:46:41 +00:00
|
|
|
t.Fatalf("unable to read config with name %q: %+v", name, err)
|
2016-01-23 16:08:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if string(buf) != testString {
|
|
|
|
t.Fatalf("wrong data returned, want %q, got %q", testString, string(buf))
|
|
|
|
}
|
|
|
|
}
|
2017-05-14 10:50:20 +00:00
|
|
|
|
|
|
|
// remove the config
|
|
|
|
remove(t, b, restic.Handle{Type: restic.ConfigFile})
|
2016-01-23 16:08:03 +00:00
|
|
|
}
|
|
|
|
|
2017-05-14 10:56:10 +00:00
|
|
|
// TestLoad tests the backend's Load function.
|
|
|
|
func (s *Suite) TestLoad(t *testing.T) {
|
2017-05-28 08:16:29 +00:00
|
|
|
seedRand(t)
|
|
|
|
|
2017-05-01 20:23:46 +00:00
|
|
|
b := s.open(t)
|
|
|
|
defer s.close(t, b)
|
2017-01-22 21:01:12 +00:00
|
|
|
|
2018-01-17 04:59:16 +00:00
|
|
|
noop := func(rd io.Reader) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
err := b.Load(context.TODO(), restic.Handle{}, 0, 0, noop)
|
2017-01-22 21:01:12 +00:00
|
|
|
if err == nil {
|
2017-01-23 17:11:10 +00:00
|
|
|
t.Fatalf("Load() did not return an error for invalid handle")
|
2017-01-22 21:01:12 +00:00
|
|
|
}
|
|
|
|
|
2020-08-16 09:16:38 +00:00
|
|
|
err = testLoad(b, restic.Handle{Type: restic.PackFile, Name: "foobar"}, 0, 0)
|
2017-01-22 21:01:12 +00:00
|
|
|
if err == nil {
|
2017-01-23 17:11:10 +00:00
|
|
|
t.Fatalf("Load() did not return an error for non-existing blob")
|
2017-01-22 21:01:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
length := rand.Intn(1<<24) + 2000
|
|
|
|
|
|
|
|
data := test.Random(23, length)
|
|
|
|
id := restic.Hash(data)
|
|
|
|
|
2020-08-16 09:16:38 +00:00
|
|
|
handle := restic.Handle{Type: restic.PackFile, Name: id.String()}
|
2020-12-19 11:39:48 +00:00
|
|
|
err = b.Save(context.TODO(), handle, restic.NewByteReader(data, b.Hasher()))
|
2017-01-22 21:01:12 +00:00
|
|
|
if err != nil {
|
2017-04-10 19:46:41 +00:00
|
|
|
t.Fatalf("Save() error: %+v", err)
|
2017-01-22 21:01:12 +00:00
|
|
|
}
|
|
|
|
|
2017-05-28 10:32:53 +00:00
|
|
|
t.Logf("saved %d bytes as %v", length, handle)
|
|
|
|
|
2018-01-17 04:59:16 +00:00
|
|
|
err = b.Load(context.TODO(), handle, 100, -1, noop)
|
2017-01-22 21:01:12 +00:00
|
|
|
if err == nil {
|
2017-01-23 17:11:10 +00:00
|
|
|
t.Fatalf("Load() returned no error for negative offset!")
|
2017-01-22 21:01:12 +00:00
|
|
|
}
|
|
|
|
|
2018-01-17 04:59:16 +00:00
|
|
|
err = b.Load(context.TODO(), handle, 0, 0, func(rd io.Reader) error {
|
2018-03-14 19:54:48 +00:00
|
|
|
_, err := io.Copy(ioutil.Discard, rd)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2018-01-17 04:59:16 +00:00
|
|
|
return errors.Errorf("deliberate error")
|
|
|
|
})
|
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("Load() did not propagate consumer error!")
|
|
|
|
}
|
|
|
|
if err.Error() != "deliberate error" {
|
|
|
|
t.Fatalf("Load() did not correctly propagate consumer error!")
|
2017-01-22 21:01:12 +00:00
|
|
|
}
|
|
|
|
|
2017-05-01 10:42:10 +00:00
|
|
|
loadTests := 50
|
2017-05-01 20:23:46 +00:00
|
|
|
if s.MinimalData {
|
2017-05-01 10:42:10 +00:00
|
|
|
loadTests = 10
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < loadTests; i++ {
|
2017-01-22 21:01:12 +00:00
|
|
|
l := rand.Intn(length + 2000)
|
|
|
|
o := rand.Intn(length + 2000)
|
|
|
|
|
|
|
|
d := data
|
|
|
|
if o < len(d) {
|
|
|
|
d = d[o:]
|
|
|
|
} else {
|
2017-05-28 10:32:42 +00:00
|
|
|
t.Logf("offset == length, skipping test")
|
|
|
|
continue
|
2017-01-22 21:01:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
getlen := l
|
|
|
|
if l >= len(d) && rand.Float32() >= 0.5 {
|
|
|
|
getlen = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
if l > 0 && l < len(d) {
|
|
|
|
d = d[:l]
|
|
|
|
}
|
|
|
|
|
2018-01-17 04:59:16 +00:00
|
|
|
var buf []byte
|
|
|
|
err := b.Load(context.TODO(), handle, getlen, int64(o), func(rd io.Reader) (ierr error) {
|
|
|
|
buf, ierr = ioutil.ReadAll(rd)
|
|
|
|
return ierr
|
|
|
|
})
|
2017-01-22 21:01:12 +00:00
|
|
|
if err != nil {
|
2017-05-28 10:32:53 +00:00
|
|
|
t.Logf("Load, l %v, o %v, len(d) %v, getlen %v", l, o, len(d), getlen)
|
2017-04-10 19:46:41 +00:00
|
|
|
t.Errorf("Load(%d, %d) returned unexpected error: %+v", l, o, err)
|
2017-01-22 21:01:12 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-04-26 18:47:15 +00:00
|
|
|
if l == 0 && len(buf) != len(d) {
|
2017-05-28 10:32:53 +00:00
|
|
|
t.Logf("Load, l %v, o %v, len(d) %v, getlen %v", l, o, len(d), getlen)
|
2017-04-26 18:47:15 +00:00
|
|
|
t.Errorf("Load(%d, %d) wrong number of bytes read: want %d, got %d", l, o, len(d), len(buf))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if l > 0 && l <= len(d) && len(buf) != l {
|
2017-05-28 10:32:53 +00:00
|
|
|
t.Logf("Load, l %v, o %v, len(d) %v, getlen %v", l, o, len(d), getlen)
|
2017-01-23 17:11:10 +00:00
|
|
|
t.Errorf("Load(%d, %d) wrong number of bytes read: want %d, got %d", l, o, l, len(buf))
|
2017-01-22 21:01:12 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if l > len(d) && len(buf) != len(d) {
|
2017-05-28 10:32:53 +00:00
|
|
|
t.Logf("Load, l %v, o %v, len(d) %v, getlen %v", l, o, len(d), getlen)
|
2017-01-23 17:11:10 +00:00
|
|
|
t.Errorf("Load(%d, %d) wrong number of bytes read for overlong read: want %d, got %d", l, o, l, len(buf))
|
2017-01-22 21:01:12 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if !bytes.Equal(buf, d) {
|
2017-05-28 10:32:53 +00:00
|
|
|
t.Logf("Load, l %v, o %v, len(d) %v, getlen %v", l, o, len(d), getlen)
|
2017-01-23 17:11:10 +00:00
|
|
|
t.Errorf("Load(%d, %d) returned wrong bytes", l, o)
|
2017-01-22 21:01:12 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-03 15:39:57 +00:00
|
|
|
test.OK(t, b.Remove(context.TODO(), handle))
|
2017-01-22 21:01:12 +00:00
|
|
|
}
|
|
|
|
|
2017-09-18 10:01:54 +00:00
|
|
|
// TestList makes sure that the backend implements List() pagination correctly.
|
2017-09-17 09:09:09 +00:00
|
|
|
func (s *Suite) TestList(t *testing.T) {
|
|
|
|
seedRand(t)
|
|
|
|
|
2017-09-18 10:01:54 +00:00
|
|
|
numTestFiles := rand.Intn(20) + 20
|
|
|
|
|
2017-09-17 09:09:09 +00:00
|
|
|
b := s.open(t)
|
|
|
|
defer s.close(t, b)
|
|
|
|
|
2018-01-27 13:06:15 +00:00
|
|
|
// Check that the backend is empty to start with
|
|
|
|
var found []string
|
2020-08-16 09:16:38 +00:00
|
|
|
err := b.List(context.TODO(), restic.PackFile, func(fi restic.FileInfo) error {
|
2018-01-27 13:06:15 +00:00
|
|
|
found = append(found, fi.Name)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("List returned error %v", err)
|
|
|
|
}
|
|
|
|
if found != nil {
|
|
|
|
t.Fatalf("backend not empty at start of test - contains: %v", found)
|
|
|
|
}
|
|
|
|
|
2018-01-20 18:34:38 +00:00
|
|
|
list1 := make(map[restic.ID]int64)
|
2017-09-17 09:09:09 +00:00
|
|
|
|
|
|
|
for i := 0; i < numTestFiles; i++ {
|
2018-01-20 18:34:38 +00:00
|
|
|
data := test.Random(rand.Int(), rand.Intn(100)+55)
|
2017-09-18 10:01:54 +00:00
|
|
|
id := restic.Hash(data)
|
2020-08-16 09:16:38 +00:00
|
|
|
h := restic.Handle{Type: restic.PackFile, Name: id.String()}
|
2020-12-19 11:39:48 +00:00
|
|
|
err := b.Save(context.TODO(), h, restic.NewByteReader(data, b.Hasher()))
|
2017-09-18 10:01:54 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2018-01-20 18:34:38 +00:00
|
|
|
list1[id] = int64(len(data))
|
2017-09-17 09:36:45 +00:00
|
|
|
}
|
|
|
|
|
2017-09-18 10:01:54 +00:00
|
|
|
t.Logf("wrote %v files", len(list1))
|
2017-09-17 09:36:45 +00:00
|
|
|
|
2017-09-18 10:01:54 +00:00
|
|
|
var tests = []struct {
|
|
|
|
maxItems int
|
|
|
|
}{
|
2017-09-18 11:18:42 +00:00
|
|
|
{11}, {23}, {numTestFiles}, {numTestFiles + 10}, {numTestFiles + 1123},
|
2017-09-17 09:36:45 +00:00
|
|
|
}
|
|
|
|
|
2017-09-18 10:01:54 +00:00
|
|
|
for _, test := range tests {
|
|
|
|
t.Run(fmt.Sprintf("max-%v", test.maxItems), func(t *testing.T) {
|
2018-01-20 18:34:38 +00:00
|
|
|
list2 := make(map[restic.ID]int64)
|
2017-09-17 09:36:45 +00:00
|
|
|
|
2017-09-18 10:01:54 +00:00
|
|
|
type setter interface {
|
|
|
|
SetListMaxItems(int)
|
|
|
|
}
|
2017-09-17 09:09:09 +00:00
|
|
|
|
2017-09-18 10:01:54 +00:00
|
|
|
if s, ok := b.(setter); ok {
|
|
|
|
t.Logf("setting max list items to %d", test.maxItems)
|
|
|
|
s.SetListMaxItems(test.maxItems)
|
|
|
|
}
|
2017-09-17 09:09:09 +00:00
|
|
|
|
2020-08-16 09:16:38 +00:00
|
|
|
err := b.List(context.TODO(), restic.PackFile, func(fi restic.FileInfo) error {
|
2018-01-20 12:43:07 +00:00
|
|
|
id, err := restic.ParseID(fi.Name)
|
2017-09-18 10:01:54 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2018-01-20 18:34:38 +00:00
|
|
|
list2[id] = fi.Size
|
2018-01-20 12:43:07 +00:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("List returned error %v", err)
|
2017-09-18 10:01:54 +00:00
|
|
|
}
|
2017-09-17 09:09:09 +00:00
|
|
|
|
2017-09-18 10:01:54 +00:00
|
|
|
t.Logf("loaded %v IDs from backend", len(list2))
|
2017-09-17 09:09:09 +00:00
|
|
|
|
2018-01-20 18:34:38 +00:00
|
|
|
for id, size := range list1 {
|
|
|
|
size2, ok := list2[id]
|
|
|
|
if !ok {
|
|
|
|
t.Errorf("id %v not returned by List()", id.Str())
|
|
|
|
}
|
|
|
|
|
|
|
|
if size != size2 {
|
|
|
|
t.Errorf("wrong size for id %v returned: want %v, got %v", id.Str(), size, size2)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for id := range list2 {
|
|
|
|
_, ok := list1[id]
|
|
|
|
if !ok {
|
|
|
|
t.Errorf("extra id %v returned by List()", id.Str())
|
|
|
|
}
|
2017-09-18 10:01:54 +00:00
|
|
|
}
|
|
|
|
})
|
2017-09-17 09:09:09 +00:00
|
|
|
}
|
|
|
|
|
2017-09-18 10:01:54 +00:00
|
|
|
t.Logf("remove %d files", numTestFiles)
|
2017-09-18 11:18:42 +00:00
|
|
|
handles := make([]restic.Handle, 0, len(list1))
|
2017-09-17 09:09:09 +00:00
|
|
|
for id := range list1 {
|
2020-08-16 09:16:38 +00:00
|
|
|
handles = append(handles, restic.Handle{Type: restic.PackFile, Name: id.String()})
|
2017-09-18 11:18:42 +00:00
|
|
|
}
|
|
|
|
|
2018-01-27 13:06:15 +00:00
|
|
|
err = s.delayedRemove(t, b, handles...)
|
2017-09-18 11:18:42 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
2017-09-17 09:09:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-20 12:43:50 +00:00
|
|
|
// TestListCancel tests that the context is respected and the error is returned by List.
|
|
|
|
func (s *Suite) TestListCancel(t *testing.T) {
|
|
|
|
seedRand(t)
|
|
|
|
|
|
|
|
numTestFiles := 5
|
|
|
|
|
|
|
|
b := s.open(t)
|
|
|
|
defer s.close(t, b)
|
|
|
|
|
|
|
|
testFiles := make([]restic.Handle, 0, numTestFiles)
|
|
|
|
|
|
|
|
for i := 0; i < numTestFiles; i++ {
|
|
|
|
data := []byte(fmt.Sprintf("random test blob %v", i))
|
|
|
|
id := restic.Hash(data)
|
2020-08-16 09:16:38 +00:00
|
|
|
h := restic.Handle{Type: restic.PackFile, Name: id.String()}
|
2020-12-19 11:39:48 +00:00
|
|
|
err := b.Save(context.TODO(), h, restic.NewByteReader(data, b.Hasher()))
|
2018-01-20 12:43:50 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
testFiles = append(testFiles, h)
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Run("Cancelled", func(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.TODO())
|
|
|
|
cancel()
|
|
|
|
|
|
|
|
// pass in a cancelled context
|
2020-08-16 09:16:38 +00:00
|
|
|
err := b.List(ctx, restic.PackFile, func(fi restic.FileInfo) error {
|
2018-01-20 12:43:50 +00:00
|
|
|
t.Errorf("got FileInfo %v for cancelled context", fi)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2018-01-20 18:34:38 +00:00
|
|
|
if errors.Cause(err) != context.Canceled {
|
|
|
|
t.Fatalf("expected error not found, want %v, got %v", context.Canceled, errors.Cause(err))
|
2018-01-20 12:43:50 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("First", func(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.TODO())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
i := 0
|
2020-08-16 09:16:38 +00:00
|
|
|
err := b.List(ctx, restic.PackFile, func(fi restic.FileInfo) error {
|
2018-01-20 12:43:50 +00:00
|
|
|
i++
|
|
|
|
// cancel the context on the first file
|
|
|
|
if i == 1 {
|
|
|
|
cancel()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2018-01-25 13:53:50 +00:00
|
|
|
if errors.Cause(err) != context.Canceled {
|
2018-01-20 12:43:50 +00:00
|
|
|
t.Fatalf("expected error not found, want %v, got %v", context.Canceled, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if i != 1 {
|
|
|
|
t.Fatalf("wrong number of files returned by List, want %v, got %v", 1, i)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("Last", func(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.TODO())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
i := 0
|
2020-08-16 09:16:38 +00:00
|
|
|
err := b.List(ctx, restic.PackFile, func(fi restic.FileInfo) error {
|
2018-01-20 12:43:50 +00:00
|
|
|
// cancel the context at the last file
|
|
|
|
i++
|
|
|
|
if i == numTestFiles {
|
|
|
|
cancel()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2018-01-25 13:53:50 +00:00
|
|
|
if errors.Cause(err) != context.Canceled {
|
2018-01-20 12:43:50 +00:00
|
|
|
t.Fatalf("expected error not found, want %v, got %v", context.Canceled, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if i != numTestFiles {
|
|
|
|
t.Fatalf("wrong number of files returned by List, want %v, got %v", numTestFiles, i)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("Timeout", func(t *testing.T) {
|
2018-01-23 20:41:21 +00:00
|
|
|
// rather large timeout, let's try to get at least one item
|
|
|
|
timeout := time.Second
|
2018-01-20 12:43:50 +00:00
|
|
|
|
2020-03-03 16:53:24 +00:00
|
|
|
ctxTimeout, cancel := context.WithTimeout(context.TODO(), timeout)
|
|
|
|
defer cancel()
|
2018-01-20 12:43:50 +00:00
|
|
|
|
|
|
|
i := 0
|
2018-01-23 20:41:21 +00:00
|
|
|
// pass in a context with a timeout
|
2020-08-16 09:16:38 +00:00
|
|
|
err := b.List(ctxTimeout, restic.PackFile, func(fi restic.FileInfo) error {
|
2018-01-20 12:43:50 +00:00
|
|
|
i++
|
|
|
|
|
|
|
|
// wait until the context is cancelled
|
2018-01-23 20:41:21 +00:00
|
|
|
<-ctxTimeout.Done()
|
2018-01-20 12:43:50 +00:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2018-01-25 13:53:50 +00:00
|
|
|
if errors.Cause(err) != context.DeadlineExceeded {
|
2018-01-20 12:43:50 +00:00
|
|
|
t.Fatalf("expected error not found, want %#v, got %#v", context.DeadlineExceeded, err)
|
|
|
|
}
|
|
|
|
|
2018-02-10 11:53:38 +00:00
|
|
|
if i > 2 {
|
|
|
|
t.Fatalf("wrong number of files returned by List, want <= 2, got %v", i)
|
2018-01-20 12:43:50 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
err := s.delayedRemove(t, b, testFiles...)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-25 16:07:36 +00:00
|
|
|
type errorCloser struct {
|
2018-03-03 13:20:54 +00:00
|
|
|
io.ReadSeeker
|
2018-03-04 09:40:42 +00:00
|
|
|
l int64
|
2017-06-16 08:55:04 +00:00
|
|
|
t testing.TB
|
2020-12-19 11:39:48 +00:00
|
|
|
h []byte
|
2017-01-25 16:07:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (ec errorCloser) Close() error {
|
|
|
|
ec.t.Error("forbidden method close was called")
|
|
|
|
return errors.New("forbidden method close was called")
|
|
|
|
}
|
|
|
|
|
2018-03-04 09:40:42 +00:00
|
|
|
func (ec errorCloser) Length() int64 {
|
2017-06-16 08:55:04 +00:00
|
|
|
return ec.l
|
2017-05-13 17:56:11 +00:00
|
|
|
}
|
|
|
|
|
2020-12-19 11:39:48 +00:00
|
|
|
func (ec errorCloser) Hash() []byte {
|
|
|
|
return ec.h
|
|
|
|
}
|
|
|
|
|
2018-03-03 13:20:54 +00:00
|
|
|
func (ec errorCloser) Rewind() error {
|
|
|
|
_, err := ec.ReadSeeker.Seek(0, io.SeekStart)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-05-14 10:56:10 +00:00
|
|
|
// TestSave tests saving data in the backend.
|
|
|
|
func (s *Suite) TestSave(t *testing.T) {
|
2017-05-28 08:16:29 +00:00
|
|
|
seedRand(t)
|
|
|
|
|
2017-05-01 20:23:46 +00:00
|
|
|
b := s.open(t)
|
|
|
|
defer s.close(t, b)
|
2016-08-31 20:51:35 +00:00
|
|
|
var id restic.ID
|
2016-01-24 15:59:38 +00:00
|
|
|
|
2017-05-01 10:42:10 +00:00
|
|
|
saveTests := 10
|
2017-05-01 20:23:46 +00:00
|
|
|
if s.MinimalData {
|
2017-05-01 10:42:10 +00:00
|
|
|
saveTests = 2
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < saveTests; i++ {
|
2016-01-24 16:46:18 +00:00
|
|
|
length := rand.Intn(1<<23) + 200000
|
2016-09-04 12:38:18 +00:00
|
|
|
data := test.Random(23, length)
|
2021-08-15 16:19:43 +00:00
|
|
|
id = sha256.Sum256(data)
|
2016-01-24 15:59:38 +00:00
|
|
|
|
2016-08-31 20:39:36 +00:00
|
|
|
h := restic.Handle{
|
2020-08-16 09:16:38 +00:00
|
|
|
Type: restic.PackFile,
|
2021-08-15 16:19:43 +00:00
|
|
|
Name: id.String(),
|
2016-01-24 15:59:38 +00:00
|
|
|
}
|
2020-12-19 11:39:48 +00:00
|
|
|
err := b.Save(context.TODO(), h, restic.NewByteReader(data, b.Hasher()))
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
2016-01-24 15:59:38 +00:00
|
|
|
|
2019-03-24 20:59:14 +00:00
|
|
|
buf, err := backend.LoadAll(context.TODO(), nil, b, h)
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
2016-01-24 15:59:38 +00:00
|
|
|
if len(buf) != len(data) {
|
|
|
|
t.Fatalf("number of bytes does not match, want %v, got %v", len(data), len(buf))
|
|
|
|
}
|
|
|
|
|
|
|
|
if !bytes.Equal(buf, data) {
|
|
|
|
t.Fatalf("data not equal")
|
|
|
|
}
|
|
|
|
|
2017-06-03 15:39:57 +00:00
|
|
|
fi, err := b.Stat(context.TODO(), h)
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
2016-01-24 15:59:38 +00:00
|
|
|
|
2018-01-20 18:34:38 +00:00
|
|
|
if fi.Name != h.Name {
|
|
|
|
t.Errorf("Stat() returned wrong name, want %q, got %q", h.Name, fi.Name)
|
|
|
|
}
|
|
|
|
|
2016-01-24 15:59:38 +00:00
|
|
|
if fi.Size != int64(len(data)) {
|
2018-01-20 18:34:38 +00:00
|
|
|
t.Errorf("Stat() returned different size, want %q, got %d", len(data), fi.Size)
|
2016-01-24 15:59:38 +00:00
|
|
|
}
|
|
|
|
|
2017-06-03 15:39:57 +00:00
|
|
|
err = b.Remove(context.TODO(), h)
|
2016-01-24 15:59:38 +00:00
|
|
|
if err != nil {
|
2017-04-10 19:46:41 +00:00
|
|
|
t.Fatalf("error removing item: %+v", err)
|
2016-01-24 15:59:38 +00:00
|
|
|
}
|
|
|
|
}
|
2017-01-25 16:07:36 +00:00
|
|
|
|
|
|
|
// test saving from a tempfile
|
|
|
|
tmpfile, err := ioutil.TempFile("", "restic-backend-save-test-")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
length := rand.Intn(1<<23) + 200000
|
|
|
|
data := test.Random(23, length)
|
2021-08-15 16:19:43 +00:00
|
|
|
id = sha256.Sum256(data)
|
2017-01-25 16:07:36 +00:00
|
|
|
|
|
|
|
if _, err = tmpfile.Write(data); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2017-05-13 22:15:17 +00:00
|
|
|
if _, err = tmpfile.Seek(0, io.SeekStart); err != nil {
|
2017-01-25 16:07:36 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2020-08-16 09:16:38 +00:00
|
|
|
h := restic.Handle{Type: restic.PackFile, Name: id.String()}
|
2017-01-25 16:07:36 +00:00
|
|
|
|
|
|
|
// wrap the tempfile in an errorCloser, so we can detect if the backend
|
|
|
|
// closes the reader
|
2020-12-19 11:39:48 +00:00
|
|
|
var beHash []byte
|
|
|
|
if b.Hasher() != nil {
|
|
|
|
beHasher := b.Hasher()
|
|
|
|
// must never fail according to interface
|
2021-01-29 21:12:51 +00:00
|
|
|
_, err := beHasher.Write(data)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2020-12-19 11:39:48 +00:00
|
|
|
beHash = beHasher.Sum(nil)
|
|
|
|
}
|
|
|
|
err = b.Save(context.TODO(), h, errorCloser{
|
|
|
|
t: t,
|
|
|
|
l: int64(length),
|
|
|
|
ReadSeeker: tmpfile,
|
|
|
|
h: beHash,
|
|
|
|
})
|
2017-01-25 16:07:36 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2017-09-16 11:59:55 +00:00
|
|
|
err = s.delayedRemove(t, b, h)
|
2017-05-13 22:15:17 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error removing item: %+v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = tmpfile.Close(); err != nil {
|
2017-01-25 16:07:36 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2017-05-13 22:15:17 +00:00
|
|
|
if err = os.Remove(tmpfile.Name()); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2016-01-24 15:59:38 +00:00
|
|
|
}
|
|
|
|
|
2020-12-18 22:53:15 +00:00
|
|
|
type incompleteByteReader struct {
|
|
|
|
restic.ByteReader
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *incompleteByteReader) Length() int64 {
|
|
|
|
return r.ByteReader.Length() + 42
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestSaveError tests saving data in the backend.
|
|
|
|
func (s *Suite) TestSaveError(t *testing.T) {
|
|
|
|
seedRand(t)
|
|
|
|
|
|
|
|
b := s.open(t)
|
|
|
|
defer func() {
|
|
|
|
// rclone will report an error when closing the backend. We have to ignore it
|
|
|
|
// otherwise this test will always fail
|
|
|
|
_ = b.Close()
|
|
|
|
}()
|
|
|
|
|
|
|
|
length := rand.Intn(1<<23) + 200000
|
2021-01-29 21:52:26 +00:00
|
|
|
data := test.Random(24, length)
|
2020-12-18 22:53:15 +00:00
|
|
|
var id restic.ID
|
|
|
|
copy(id[:], data)
|
|
|
|
|
|
|
|
// test that incomplete uploads fail
|
|
|
|
h := restic.Handle{Type: restic.PackFile, Name: id.String()}
|
2020-12-19 11:39:48 +00:00
|
|
|
err := b.Save(context.TODO(), h, &incompleteByteReader{ByteReader: *restic.NewByteReader(data, b.Hasher())})
|
2021-01-29 21:52:26 +00:00
|
|
|
// try to delete possible leftovers
|
|
|
|
_ = s.delayedRemove(t, b, h)
|
2020-12-18 22:53:15 +00:00
|
|
|
if err == nil {
|
|
|
|
t.Fatal("incomplete upload did not fail")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-29 21:52:07 +00:00
|
|
|
type wrongByteReader struct {
|
|
|
|
restic.ByteReader
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *wrongByteReader) Hash() []byte {
|
|
|
|
h := b.ByteReader.Hash()
|
|
|
|
modHash := make([]byte, len(h))
|
|
|
|
copy(modHash, h)
|
|
|
|
// flip a bit in the hash
|
|
|
|
modHash[0] ^= 0x01
|
|
|
|
return modHash
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestSaveWrongHash tests that uploads with a wrong hash fail
|
|
|
|
func (s *Suite) TestSaveWrongHash(t *testing.T) {
|
|
|
|
seedRand(t)
|
|
|
|
|
|
|
|
b := s.open(t)
|
|
|
|
defer s.close(t, b)
|
|
|
|
// nothing to do if the backend doesn't support external hashes
|
|
|
|
if b.Hasher() == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
length := rand.Intn(1<<23) + 200000
|
|
|
|
data := test.Random(25, length)
|
|
|
|
var id restic.ID
|
|
|
|
copy(id[:], data)
|
|
|
|
|
|
|
|
// test that upload with hash mismatch fails
|
|
|
|
h := restic.Handle{Type: restic.PackFile, Name: id.String()}
|
|
|
|
err := b.Save(context.TODO(), h, &wrongByteReader{ByteReader: *restic.NewByteReader(data, b.Hasher())})
|
|
|
|
exists, err2 := b.Test(context.TODO(), h)
|
|
|
|
if err2 != nil {
|
|
|
|
t.Fatal(err2)
|
|
|
|
}
|
|
|
|
_ = s.delayedRemove(t, b, h)
|
|
|
|
if err == nil {
|
|
|
|
t.Fatal("upload with wrong hash did not fail")
|
|
|
|
}
|
|
|
|
t.Logf("%v", err)
|
|
|
|
if exists {
|
|
|
|
t.Fatal("Backend returned an error but stored the file anyways")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-23 16:08:03 +00:00
|
|
|
var testStrings = []struct {
|
|
|
|
id string
|
|
|
|
data string
|
|
|
|
}{
|
|
|
|
{"c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2", "foobar"},
|
|
|
|
{"248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"},
|
|
|
|
{"cc5d46bdb4991c6eae3eb739c9c8a7a46fe9654fab79c47b4fe48383b5b25e1c", "foo/bar"},
|
|
|
|
{"4e54d2c721cbdb730f01b10b62dec622962b36966ec685880effa63d71c808f2", "foo/../../baz"},
|
|
|
|
}
|
|
|
|
|
2017-01-25 16:48:35 +00:00
|
|
|
func store(t testing.TB, b restic.Backend, tpe restic.FileType, data []byte) restic.Handle {
|
2016-08-31 20:51:35 +00:00
|
|
|
id := restic.Hash(data)
|
2017-01-25 16:48:35 +00:00
|
|
|
h := restic.Handle{Name: id.String(), Type: tpe}
|
2020-12-19 11:39:48 +00:00
|
|
|
err := b.Save(context.TODO(), h, restic.NewByteReader([]byte(data), b.Hasher()))
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
2017-01-25 16:48:35 +00:00
|
|
|
return h
|
2016-01-23 16:08:03 +00:00
|
|
|
}
|
|
|
|
|
2017-05-28 11:05:35 +00:00
|
|
|
// testLoad loads a blob (but discards its contents).
|
|
|
|
func testLoad(b restic.Backend, h restic.Handle, length int, offset int64) error {
|
2018-01-17 04:59:16 +00:00
|
|
|
return b.Load(context.TODO(), h, 0, 0, func(rd io.Reader) (ierr error) {
|
|
|
|
_, ierr = io.Copy(ioutil.Discard, rd)
|
|
|
|
return ierr
|
|
|
|
})
|
2017-05-28 11:05:35 +00:00
|
|
|
}
|
|
|
|
|
2017-09-16 11:59:55 +00:00
|
|
|
func (s *Suite) delayedRemove(t testing.TB, be restic.Backend, handles ...restic.Handle) error {
|
2017-03-29 21:58:25 +00:00
|
|
|
// Some backend (swift, I'm looking at you) may implement delayed
|
|
|
|
// removal of data. Let's wait a bit if this happens.
|
2017-05-01 08:13:03 +00:00
|
|
|
|
2017-07-21 16:00:17 +00:00
|
|
|
for _, h := range handles {
|
|
|
|
err := be.Remove(context.TODO(), h)
|
2017-09-16 11:59:55 +00:00
|
|
|
if s.ErrorHandler != nil {
|
|
|
|
err = s.ErrorHandler(t, be, err)
|
|
|
|
}
|
2017-06-15 18:05:35 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2017-03-29 21:58:25 +00:00
|
|
|
}
|
2017-07-21 16:00:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, h := range handles {
|
|
|
|
start := time.Now()
|
|
|
|
attempt := 0
|
|
|
|
var found bool
|
|
|
|
var err error
|
2017-09-16 11:59:55 +00:00
|
|
|
for time.Since(start) <= s.WaitForDelayedRemoval {
|
2017-07-21 16:00:17 +00:00
|
|
|
found, err = be.Test(context.TODO(), h)
|
2017-09-16 11:59:55 +00:00
|
|
|
if s.ErrorHandler != nil {
|
|
|
|
err = s.ErrorHandler(t, be, err)
|
|
|
|
}
|
2017-07-21 16:00:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-06-15 18:05:35 +00:00
|
|
|
|
2017-07-21 16:00:17 +00:00
|
|
|
if !found {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
time.Sleep(2 * time.Second)
|
|
|
|
attempt++
|
2017-06-15 18:05:35 +00:00
|
|
|
}
|
|
|
|
|
2017-07-21 16:00:17 +00:00
|
|
|
if found {
|
|
|
|
t.Fatalf("removed blob %v still present after %v (%d attempts)", h, time.Since(start), attempt)
|
|
|
|
}
|
2017-03-29 21:58:25 +00:00
|
|
|
}
|
2017-06-18 15:36:57 +00:00
|
|
|
|
|
|
|
return nil
|
2017-03-29 21:58:25 +00:00
|
|
|
}
|
|
|
|
|
2017-06-18 15:36:57 +00:00
|
|
|
func delayedList(t testing.TB, b restic.Backend, tpe restic.FileType, max int, maxwait time.Duration) restic.IDs {
|
2017-06-15 17:41:07 +00:00
|
|
|
list := restic.NewIDSet()
|
2017-06-18 15:36:57 +00:00
|
|
|
start := time.Now()
|
2017-06-15 17:41:07 +00:00
|
|
|
for i := 0; i < max; i++ {
|
2018-01-20 12:43:07 +00:00
|
|
|
err := b.List(context.TODO(), tpe, func(fi restic.FileInfo) error {
|
|
|
|
id := restic.TestParseID(fi.Name)
|
2017-06-15 17:41:07 +00:00
|
|
|
list.Insert(id)
|
2018-01-20 12:43:07 +00:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
2017-06-15 17:41:07 +00:00
|
|
|
}
|
2018-01-20 12:43:07 +00:00
|
|
|
|
2017-06-18 15:36:57 +00:00
|
|
|
if len(list) < max && time.Since(start) < maxwait {
|
|
|
|
time.Sleep(500 * time.Millisecond)
|
2017-06-15 17:41:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return list.List()
|
|
|
|
}
|
|
|
|
|
2017-05-14 10:56:10 +00:00
|
|
|
// TestBackend tests all functions of the backend.
|
|
|
|
func (s *Suite) TestBackend(t *testing.T) {
|
2017-05-01 20:23:46 +00:00
|
|
|
b := s.open(t)
|
|
|
|
defer s.close(t, b)
|
2016-01-23 16:08:03 +00:00
|
|
|
|
2016-08-31 20:39:36 +00:00
|
|
|
for _, tpe := range []restic.FileType{
|
2020-08-16 09:16:38 +00:00
|
|
|
restic.PackFile, restic.KeyFile, restic.LockFile,
|
2016-08-31 20:39:36 +00:00
|
|
|
restic.SnapshotFile, restic.IndexFile,
|
2016-01-23 16:08:03 +00:00
|
|
|
} {
|
|
|
|
// detect non-existing files
|
2016-09-04 12:38:18 +00:00
|
|
|
for _, ts := range testStrings {
|
|
|
|
id, err := restic.ParseID(ts.id)
|
|
|
|
test.OK(t, err)
|
2016-01-23 16:08:03 +00:00
|
|
|
|
|
|
|
// test if blob is already in repository
|
2017-01-25 16:48:35 +00:00
|
|
|
h := restic.Handle{Type: tpe, Name: id.String()}
|
2017-06-03 15:39:57 +00:00
|
|
|
ret, err := b.Test(context.TODO(), h)
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
|
|
|
test.Assert(t, !ret, "blob was found to exist before creating")
|
2016-01-23 16:08:03 +00:00
|
|
|
|
2016-01-24 00:00:27 +00:00
|
|
|
// try to stat a not existing blob
|
2017-06-03 15:39:57 +00:00
|
|
|
_, err = b.Stat(context.TODO(), h)
|
2016-09-04 12:38:18 +00:00
|
|
|
test.Assert(t, err != nil, "blob data could be extracted before creation")
|
2016-01-23 16:08:03 +00:00
|
|
|
|
|
|
|
// try to read not existing blob
|
2017-05-28 11:05:35 +00:00
|
|
|
err = testLoad(b, h, 0, 0)
|
|
|
|
test.Assert(t, err != nil, "blob could be read before creation")
|
2016-01-23 16:08:03 +00:00
|
|
|
|
|
|
|
// try to get string out, should fail
|
2017-06-03 15:39:57 +00:00
|
|
|
ret, err = b.Test(context.TODO(), h)
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
|
|
|
test.Assert(t, !ret, "id %q was found (but should not have)", ts.id)
|
2016-01-23 16:08:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// add files
|
2016-09-04 12:38:18 +00:00
|
|
|
for _, ts := range testStrings {
|
|
|
|
store(t, b, tpe, []byte(ts.data))
|
2016-01-23 16:08:03 +00:00
|
|
|
|
2016-01-24 00:00:27 +00:00
|
|
|
// test Load()
|
2016-09-04 12:38:18 +00:00
|
|
|
h := restic.Handle{Type: tpe, Name: ts.id}
|
2019-03-24 20:59:14 +00:00
|
|
|
buf, err := backend.LoadAll(context.TODO(), nil, b, h)
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
|
|
|
test.Equals(t, ts.data, string(buf))
|
2016-01-23 16:08:03 +00:00
|
|
|
|
|
|
|
// try to read it out with an offset and a length
|
|
|
|
start := 1
|
2016-09-04 12:38:18 +00:00
|
|
|
end := len(ts.data) - 2
|
2016-01-23 16:08:03 +00:00
|
|
|
length := end - start
|
|
|
|
|
2016-01-24 00:00:27 +00:00
|
|
|
buf2 := make([]byte, length)
|
2018-01-17 04:59:16 +00:00
|
|
|
var n int
|
|
|
|
err = b.Load(context.TODO(), h, len(buf2), int64(start), func(rd io.Reader) (ierr error) {
|
|
|
|
n, ierr = io.ReadFull(rd, buf2)
|
|
|
|
return ierr
|
|
|
|
})
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
2017-01-23 16:20:08 +00:00
|
|
|
test.OK(t, err)
|
|
|
|
test.Equals(t, len(buf2), n)
|
2016-09-04 12:38:18 +00:00
|
|
|
test.Equals(t, ts.data[start:end], string(buf2))
|
2016-01-23 16:08:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// test adding the first file again
|
2016-09-04 12:38:18 +00:00
|
|
|
ts := testStrings[0]
|
2017-03-16 20:50:26 +00:00
|
|
|
h := restic.Handle{Type: tpe, Name: ts.id}
|
2016-01-23 16:08:03 +00:00
|
|
|
|
|
|
|
// remove and recreate
|
backend: Relax requirement for new files
Before, all backend implementations were required to return an error if
the file that is to be written already exists in the backend. For most
backends, that means making a request (e.g. via HTTP) and returning an
error when the file already exists.
This is not accurate, the file could have been created between the HTTP
request testing for it, and when writing starts. In addition, apart from
the `config` file in the repo, all other file names have pseudo-random
names with a very very low probability of a collision. And even if a
file name is written again, the way the restic repo is structured this
just means that the same content is placed there again. Which is not a
problem, just not very efficient.
So, this commit relaxes the requirement to return an error when the file
in the backend already exists, which allows reducing the number of API
requests and thereby the latency for remote backends.
2018-02-17 21:39:18 +00:00
|
|
|
err := s.delayedRemove(t, b, h)
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
2016-01-23 16:08:03 +00:00
|
|
|
|
|
|
|
// test that the blob is gone
|
2017-06-03 15:39:57 +00:00
|
|
|
ok, err := b.Test(context.TODO(), h)
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
2017-04-11 18:08:25 +00:00
|
|
|
test.Assert(t, !ok, "removed blob still present")
|
2016-01-23 16:08:03 +00:00
|
|
|
|
|
|
|
// create blob
|
2020-12-19 11:39:48 +00:00
|
|
|
err = b.Save(context.TODO(), h, restic.NewByteReader([]byte(ts.data), b.Hasher()))
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
2016-01-23 16:08:03 +00:00
|
|
|
|
|
|
|
// list items
|
2016-08-31 20:51:35 +00:00
|
|
|
IDs := restic.IDs{}
|
2016-01-23 16:08:03 +00:00
|
|
|
|
2016-09-04 12:38:18 +00:00
|
|
|
for _, ts := range testStrings {
|
|
|
|
id, err := restic.ParseID(ts.id)
|
|
|
|
test.OK(t, err)
|
2016-01-23 16:08:03 +00:00
|
|
|
IDs = append(IDs, id)
|
|
|
|
}
|
|
|
|
|
2017-06-18 15:36:57 +00:00
|
|
|
list := delayedList(t, b, tpe, len(IDs), s.WaitForDelayedRemoval)
|
2016-01-23 16:08:03 +00:00
|
|
|
if len(IDs) != len(list) {
|
|
|
|
t.Fatalf("wrong number of IDs returned: want %d, got %d", len(IDs), len(list))
|
|
|
|
}
|
|
|
|
|
|
|
|
sort.Sort(IDs)
|
|
|
|
sort.Sort(list)
|
|
|
|
|
|
|
|
if !reflect.DeepEqual(IDs, list) {
|
|
|
|
t.Fatalf("lists aren't equal, want:\n %v\n got:\n%v\n", IDs, list)
|
|
|
|
}
|
|
|
|
|
2017-10-14 13:53:32 +00:00
|
|
|
var handles []restic.Handle
|
|
|
|
for _, ts := range testStrings {
|
|
|
|
id, err := restic.ParseID(ts.id)
|
|
|
|
test.OK(t, err)
|
2017-01-25 16:48:35 +00:00
|
|
|
|
2017-10-14 13:53:32 +00:00
|
|
|
h := restic.Handle{Type: tpe, Name: id.String()}
|
2016-01-23 16:08:03 +00:00
|
|
|
|
2017-10-14 13:53:32 +00:00
|
|
|
found, err := b.Test(context.TODO(), h)
|
|
|
|
test.OK(t, err)
|
|
|
|
test.Assert(t, found, fmt.Sprintf("id %q not found", id))
|
2017-07-21 16:00:17 +00:00
|
|
|
|
2017-10-14 13:53:32 +00:00
|
|
|
handles = append(handles, h)
|
2016-01-23 16:08:03 +00:00
|
|
|
}
|
2017-10-14 13:53:32 +00:00
|
|
|
|
|
|
|
test.OK(t, s.delayedRemove(t, b, handles...))
|
2016-01-23 16:08:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-14 13:56:46 +00:00
|
|
|
// TestZZZDelete tests the Delete function. The name ensures that this test is executed last.
|
|
|
|
func (s *Suite) TestZZZDelete(t *testing.T) {
|
2017-05-01 20:23:46 +00:00
|
|
|
if !test.TestCleanupTempDirs {
|
|
|
|
t.Skipf("not removing backend, TestCleanupTempDirs is false")
|
|
|
|
}
|
|
|
|
|
|
|
|
b := s.open(t)
|
|
|
|
defer s.close(t, b)
|
2016-01-23 16:08:03 +00:00
|
|
|
|
2017-10-14 13:56:46 +00:00
|
|
|
err := b.Delete(context.TODO())
|
2016-01-23 16:08:03 +00:00
|
|
|
if err != nil {
|
2017-04-10 19:46:41 +00:00
|
|
|
t.Fatalf("error deleting backend: %+v", err)
|
2016-01-23 16:08:03 +00:00
|
|
|
}
|
|
|
|
}
|