2016-01-23 16:08:03 +00:00
|
|
|
package test
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2017-06-03 15:39:57 +00:00
|
|
|
"context"
|
2016-01-23 16:08:03 +00:00
|
|
|
"fmt"
|
|
|
|
"io"
|
2017-01-22 21:01:12 +00:00
|
|
|
"io/ioutil"
|
2016-01-23 16:08:03 +00:00
|
|
|
"math/rand"
|
2017-01-25 16:07:36 +00:00
|
|
|
"os"
|
2016-01-23 16:08:03 +00:00
|
|
|
"reflect"
|
|
|
|
"sort"
|
|
|
|
"testing"
|
2017-05-28 08:16:29 +00:00
|
|
|
"time"
|
2016-01-23 16:08:03 +00:00
|
|
|
|
2017-07-23 12:21:03 +00:00
|
|
|
"github.com/restic/restic/internal/errors"
|
2017-07-24 15:42:25 +00:00
|
|
|
"github.com/restic/restic/internal/restic"
|
2016-08-29 17:18:57 +00:00
|
|
|
|
2017-07-23 12:21:03 +00:00
|
|
|
"github.com/restic/restic/internal/test"
|
|
|
|
|
|
|
|
"github.com/restic/restic/internal/backend"
|
2016-01-23 16:08:03 +00:00
|
|
|
)
|
|
|
|
|
2017-05-28 08:16:29 +00:00
|
|
|
func seedRand(t testing.TB) {
|
|
|
|
seed := time.Now().UnixNano()
|
|
|
|
rand.Seed(seed)
|
|
|
|
t.Logf("rand initialized with seed %d", seed)
|
|
|
|
}
|
|
|
|
|
2017-05-14 10:56:10 +00:00
|
|
|
// TestCreateWithConfig tests that creating a backend in a location which already
|
2016-01-23 17:07:15 +00:00
|
|
|
// has a config file fails.
|
2017-05-14 10:56:10 +00:00
|
|
|
func (s *Suite) TestCreateWithConfig(t *testing.T) {
|
2017-05-01 20:23:46 +00:00
|
|
|
b := s.open(t)
|
|
|
|
defer s.close(t, b)
|
2016-01-23 17:07:15 +00:00
|
|
|
|
2017-05-14 10:50:20 +00:00
|
|
|
// remove a config if present
|
|
|
|
cfgHandle := restic.Handle{Type: restic.ConfigFile}
|
2017-06-03 15:39:57 +00:00
|
|
|
cfgPresent, err := b.Test(context.TODO(), cfgHandle)
|
2017-05-14 10:50:20 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to test for config: %+v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if cfgPresent {
|
|
|
|
remove(t, b, cfgHandle)
|
|
|
|
}
|
|
|
|
|
2016-01-23 17:07:15 +00:00
|
|
|
// save a config
|
2016-08-31 20:39:36 +00:00
|
|
|
store(t, b, restic.ConfigFile, []byte("test config"))
|
2016-01-23 17:07:15 +00:00
|
|
|
|
|
|
|
// now create the backend again, this must fail
|
2017-05-14 10:50:20 +00:00
|
|
|
_, err = s.Create(s.Config)
|
2016-01-23 17:07:15 +00:00
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("expected error not found for creating a backend with an existing config file")
|
|
|
|
}
|
|
|
|
|
|
|
|
// remove config
|
2017-06-03 15:39:57 +00:00
|
|
|
err = b.Remove(context.TODO(), restic.Handle{Type: restic.ConfigFile, Name: ""})
|
2016-01-23 17:07:15 +00:00
|
|
|
if err != nil {
|
2017-04-10 19:46:41 +00:00
|
|
|
t.Fatalf("unexpected error removing config: %+v", err)
|
2016-01-23 17:07:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-14 10:56:10 +00:00
|
|
|
// TestLocation tests that a location string is returned.
|
|
|
|
func (s *Suite) TestLocation(t *testing.T) {
|
2017-05-01 20:23:46 +00:00
|
|
|
b := s.open(t)
|
|
|
|
defer s.close(t, b)
|
2016-01-23 16:08:03 +00:00
|
|
|
|
|
|
|
l := b.Location()
|
|
|
|
if l == "" {
|
|
|
|
t.Fatalf("invalid location string %q", l)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-14 10:56:10 +00:00
|
|
|
// TestConfig saves and loads a config from the backend.
|
|
|
|
func (s *Suite) TestConfig(t *testing.T) {
|
2017-05-01 20:23:46 +00:00
|
|
|
b := s.open(t)
|
|
|
|
defer s.close(t, b)
|
2016-01-23 16:08:03 +00:00
|
|
|
|
|
|
|
var testString = "Config"
|
|
|
|
|
|
|
|
// create config and read it back
|
2017-06-03 15:39:57 +00:00
|
|
|
_, err := backend.LoadAll(context.TODO(), b, restic.Handle{Type: restic.ConfigFile})
|
2016-01-23 16:08:03 +00:00
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("did not get expected error for non-existing config")
|
|
|
|
}
|
|
|
|
|
2018-03-03 13:20:54 +00:00
|
|
|
err = b.Save(context.TODO(), restic.Handle{Type: restic.ConfigFile}, restic.NewByteReader([]byte(testString)))
|
2016-01-23 16:08:03 +00:00
|
|
|
if err != nil {
|
2017-04-10 19:46:41 +00:00
|
|
|
t.Fatalf("Save() error: %+v", err)
|
2016-01-23 16:08:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// try accessing the config with different names, should all return the
|
|
|
|
// same config
|
|
|
|
for _, name := range []string{"", "foo", "bar", "0000000000000000000000000000000000000000000000000000000000000000"} {
|
2016-09-01 19:19:30 +00:00
|
|
|
h := restic.Handle{Type: restic.ConfigFile, Name: name}
|
2017-06-03 15:39:57 +00:00
|
|
|
buf, err := backend.LoadAll(context.TODO(), b, h)
|
2016-01-23 16:08:03 +00:00
|
|
|
if err != nil {
|
2017-04-10 19:46:41 +00:00
|
|
|
t.Fatalf("unable to read config with name %q: %+v", name, err)
|
2016-01-23 16:08:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if string(buf) != testString {
|
|
|
|
t.Fatalf("wrong data returned, want %q, got %q", testString, string(buf))
|
|
|
|
}
|
|
|
|
}
|
2017-05-14 10:50:20 +00:00
|
|
|
|
|
|
|
// remove the config
|
|
|
|
remove(t, b, restic.Handle{Type: restic.ConfigFile})
|
2016-01-23 16:08:03 +00:00
|
|
|
}
|
|
|
|
|
2017-05-14 10:56:10 +00:00
|
|
|
// TestLoad tests the backend's Load function.
|
|
|
|
func (s *Suite) TestLoad(t *testing.T) {
|
2017-05-28 08:16:29 +00:00
|
|
|
seedRand(t)
|
|
|
|
|
2017-05-01 20:23:46 +00:00
|
|
|
b := s.open(t)
|
|
|
|
defer s.close(t, b)
|
2017-01-22 21:01:12 +00:00
|
|
|
|
2018-01-17 04:59:16 +00:00
|
|
|
noop := func(rd io.Reader) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
err := b.Load(context.TODO(), restic.Handle{}, 0, 0, noop)
|
2017-01-22 21:01:12 +00:00
|
|
|
if err == nil {
|
2017-01-23 17:11:10 +00:00
|
|
|
t.Fatalf("Load() did not return an error for invalid handle")
|
2017-01-22 21:01:12 +00:00
|
|
|
}
|
|
|
|
|
2017-05-28 11:05:35 +00:00
|
|
|
err = testLoad(b, restic.Handle{Type: restic.DataFile, Name: "foobar"}, 0, 0)
|
2017-01-22 21:01:12 +00:00
|
|
|
if err == nil {
|
2017-01-23 17:11:10 +00:00
|
|
|
t.Fatalf("Load() did not return an error for non-existing blob")
|
2017-01-22 21:01:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
length := rand.Intn(1<<24) + 2000
|
|
|
|
|
|
|
|
data := test.Random(23, length)
|
|
|
|
id := restic.Hash(data)
|
|
|
|
|
|
|
|
handle := restic.Handle{Type: restic.DataFile, Name: id.String()}
|
2018-03-03 13:20:54 +00:00
|
|
|
err = b.Save(context.TODO(), handle, restic.NewByteReader(data))
|
2017-01-22 21:01:12 +00:00
|
|
|
if err != nil {
|
2017-04-10 19:46:41 +00:00
|
|
|
t.Fatalf("Save() error: %+v", err)
|
2017-01-22 21:01:12 +00:00
|
|
|
}
|
|
|
|
|
2017-05-28 10:32:53 +00:00
|
|
|
t.Logf("saved %d bytes as %v", length, handle)
|
|
|
|
|
2018-01-17 04:59:16 +00:00
|
|
|
err = b.Load(context.TODO(), handle, 100, -1, noop)
|
2017-01-22 21:01:12 +00:00
|
|
|
if err == nil {
|
2017-01-23 17:11:10 +00:00
|
|
|
t.Fatalf("Load() returned no error for negative offset!")
|
2017-01-22 21:01:12 +00:00
|
|
|
}
|
|
|
|
|
2018-01-17 04:59:16 +00:00
|
|
|
err = b.Load(context.TODO(), handle, 0, 0, func(rd io.Reader) error {
|
2018-03-14 19:54:48 +00:00
|
|
|
_, err := io.Copy(ioutil.Discard, rd)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2018-01-17 04:59:16 +00:00
|
|
|
return errors.Errorf("deliberate error")
|
|
|
|
})
|
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("Load() did not propagate consumer error!")
|
|
|
|
}
|
|
|
|
if err.Error() != "deliberate error" {
|
|
|
|
t.Fatalf("Load() did not correctly propagate consumer error!")
|
2017-01-22 21:01:12 +00:00
|
|
|
}
|
|
|
|
|
2017-05-01 10:42:10 +00:00
|
|
|
loadTests := 50
|
2017-05-01 20:23:46 +00:00
|
|
|
if s.MinimalData {
|
2017-05-01 10:42:10 +00:00
|
|
|
loadTests = 10
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < loadTests; i++ {
|
2017-01-22 21:01:12 +00:00
|
|
|
l := rand.Intn(length + 2000)
|
|
|
|
o := rand.Intn(length + 2000)
|
|
|
|
|
|
|
|
d := data
|
|
|
|
if o < len(d) {
|
|
|
|
d = d[o:]
|
|
|
|
} else {
|
2017-05-28 10:32:42 +00:00
|
|
|
t.Logf("offset == length, skipping test")
|
|
|
|
continue
|
2017-01-22 21:01:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
getlen := l
|
|
|
|
if l >= len(d) && rand.Float32() >= 0.5 {
|
|
|
|
getlen = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
if l > 0 && l < len(d) {
|
|
|
|
d = d[:l]
|
|
|
|
}
|
|
|
|
|
2018-01-17 04:59:16 +00:00
|
|
|
var buf []byte
|
|
|
|
err := b.Load(context.TODO(), handle, getlen, int64(o), func(rd io.Reader) (ierr error) {
|
|
|
|
buf, ierr = ioutil.ReadAll(rd)
|
|
|
|
return ierr
|
|
|
|
})
|
2017-01-22 21:01:12 +00:00
|
|
|
if err != nil {
|
2017-05-28 10:32:53 +00:00
|
|
|
t.Logf("Load, l %v, o %v, len(d) %v, getlen %v", l, o, len(d), getlen)
|
2017-04-10 19:46:41 +00:00
|
|
|
t.Errorf("Load(%d, %d) returned unexpected error: %+v", l, o, err)
|
2017-01-22 21:01:12 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-04-26 18:47:15 +00:00
|
|
|
if l == 0 && len(buf) != len(d) {
|
2017-05-28 10:32:53 +00:00
|
|
|
t.Logf("Load, l %v, o %v, len(d) %v, getlen %v", l, o, len(d), getlen)
|
2017-04-26 18:47:15 +00:00
|
|
|
t.Errorf("Load(%d, %d) wrong number of bytes read: want %d, got %d", l, o, len(d), len(buf))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if l > 0 && l <= len(d) && len(buf) != l {
|
2017-05-28 10:32:53 +00:00
|
|
|
t.Logf("Load, l %v, o %v, len(d) %v, getlen %v", l, o, len(d), getlen)
|
2017-01-23 17:11:10 +00:00
|
|
|
t.Errorf("Load(%d, %d) wrong number of bytes read: want %d, got %d", l, o, l, len(buf))
|
2017-01-22 21:01:12 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if l > len(d) && len(buf) != len(d) {
|
2017-05-28 10:32:53 +00:00
|
|
|
t.Logf("Load, l %v, o %v, len(d) %v, getlen %v", l, o, len(d), getlen)
|
2017-01-23 17:11:10 +00:00
|
|
|
t.Errorf("Load(%d, %d) wrong number of bytes read for overlong read: want %d, got %d", l, o, l, len(buf))
|
2017-01-22 21:01:12 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if !bytes.Equal(buf, d) {
|
2017-05-28 10:32:53 +00:00
|
|
|
t.Logf("Load, l %v, o %v, len(d) %v, getlen %v", l, o, len(d), getlen)
|
2017-01-23 17:11:10 +00:00
|
|
|
t.Errorf("Load(%d, %d) returned wrong bytes", l, o)
|
2017-01-22 21:01:12 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-03 15:39:57 +00:00
|
|
|
test.OK(t, b.Remove(context.TODO(), handle))
|
2017-01-22 21:01:12 +00:00
|
|
|
}
|
|
|
|
|
2017-09-18 10:01:54 +00:00
|
|
|
// TestList makes sure that the backend implements List() pagination correctly.
|
2017-09-17 09:09:09 +00:00
|
|
|
func (s *Suite) TestList(t *testing.T) {
|
|
|
|
seedRand(t)
|
|
|
|
|
2017-09-18 10:01:54 +00:00
|
|
|
numTestFiles := rand.Intn(20) + 20
|
|
|
|
|
2017-09-17 09:09:09 +00:00
|
|
|
b := s.open(t)
|
|
|
|
defer s.close(t, b)
|
|
|
|
|
2018-01-27 13:06:15 +00:00
|
|
|
// Check that the backend is empty to start with
|
|
|
|
var found []string
|
|
|
|
err := b.List(context.TODO(), restic.DataFile, func(fi restic.FileInfo) error {
|
|
|
|
found = append(found, fi.Name)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("List returned error %v", err)
|
|
|
|
}
|
|
|
|
if found != nil {
|
|
|
|
t.Fatalf("backend not empty at start of test - contains: %v", found)
|
|
|
|
}
|
|
|
|
|
2018-01-20 18:34:38 +00:00
|
|
|
list1 := make(map[restic.ID]int64)
|
2017-09-17 09:09:09 +00:00
|
|
|
|
|
|
|
for i := 0; i < numTestFiles; i++ {
|
2018-01-20 18:34:38 +00:00
|
|
|
data := test.Random(rand.Int(), rand.Intn(100)+55)
|
2017-09-18 10:01:54 +00:00
|
|
|
id := restic.Hash(data)
|
|
|
|
h := restic.Handle{Type: restic.DataFile, Name: id.String()}
|
2018-03-03 13:20:54 +00:00
|
|
|
err := b.Save(context.TODO(), h, restic.NewByteReader(data))
|
2017-09-18 10:01:54 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2018-01-20 18:34:38 +00:00
|
|
|
list1[id] = int64(len(data))
|
2017-09-17 09:36:45 +00:00
|
|
|
}
|
|
|
|
|
2017-09-18 10:01:54 +00:00
|
|
|
t.Logf("wrote %v files", len(list1))
|
2017-09-17 09:36:45 +00:00
|
|
|
|
2017-09-18 10:01:54 +00:00
|
|
|
var tests = []struct {
|
|
|
|
maxItems int
|
|
|
|
}{
|
2017-09-18 11:18:42 +00:00
|
|
|
{11}, {23}, {numTestFiles}, {numTestFiles + 10}, {numTestFiles + 1123},
|
2017-09-17 09:36:45 +00:00
|
|
|
}
|
|
|
|
|
2017-09-18 10:01:54 +00:00
|
|
|
for _, test := range tests {
|
|
|
|
t.Run(fmt.Sprintf("max-%v", test.maxItems), func(t *testing.T) {
|
2018-01-20 18:34:38 +00:00
|
|
|
list2 := make(map[restic.ID]int64)
|
2017-09-17 09:36:45 +00:00
|
|
|
|
2017-09-18 10:01:54 +00:00
|
|
|
type setter interface {
|
|
|
|
SetListMaxItems(int)
|
|
|
|
}
|
2017-09-17 09:09:09 +00:00
|
|
|
|
2017-09-18 10:01:54 +00:00
|
|
|
if s, ok := b.(setter); ok {
|
|
|
|
t.Logf("setting max list items to %d", test.maxItems)
|
|
|
|
s.SetListMaxItems(test.maxItems)
|
|
|
|
}
|
2017-09-17 09:09:09 +00:00
|
|
|
|
2018-01-20 12:43:07 +00:00
|
|
|
err := b.List(context.TODO(), restic.DataFile, func(fi restic.FileInfo) error {
|
|
|
|
id, err := restic.ParseID(fi.Name)
|
2017-09-18 10:01:54 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2018-01-20 18:34:38 +00:00
|
|
|
list2[id] = fi.Size
|
2018-01-20 12:43:07 +00:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("List returned error %v", err)
|
2017-09-18 10:01:54 +00:00
|
|
|
}
|
2017-09-17 09:09:09 +00:00
|
|
|
|
2017-09-18 10:01:54 +00:00
|
|
|
t.Logf("loaded %v IDs from backend", len(list2))
|
2017-09-17 09:09:09 +00:00
|
|
|
|
2018-01-20 18:34:38 +00:00
|
|
|
for id, size := range list1 {
|
|
|
|
size2, ok := list2[id]
|
|
|
|
if !ok {
|
|
|
|
t.Errorf("id %v not returned by List()", id.Str())
|
|
|
|
}
|
|
|
|
|
|
|
|
if size != size2 {
|
|
|
|
t.Errorf("wrong size for id %v returned: want %v, got %v", id.Str(), size, size2)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for id := range list2 {
|
|
|
|
_, ok := list1[id]
|
|
|
|
if !ok {
|
|
|
|
t.Errorf("extra id %v returned by List()", id.Str())
|
|
|
|
}
|
2017-09-18 10:01:54 +00:00
|
|
|
}
|
|
|
|
})
|
2017-09-17 09:09:09 +00:00
|
|
|
}
|
|
|
|
|
2017-09-18 10:01:54 +00:00
|
|
|
t.Logf("remove %d files", numTestFiles)
|
2017-09-18 11:18:42 +00:00
|
|
|
handles := make([]restic.Handle, 0, len(list1))
|
2017-09-17 09:09:09 +00:00
|
|
|
for id := range list1 {
|
2017-09-18 11:18:42 +00:00
|
|
|
handles = append(handles, restic.Handle{Type: restic.DataFile, Name: id.String()})
|
|
|
|
}
|
|
|
|
|
2018-01-27 13:06:15 +00:00
|
|
|
err = s.delayedRemove(t, b, handles...)
|
2017-09-18 11:18:42 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
2017-09-17 09:09:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-20 12:43:50 +00:00
|
|
|
// TestListCancel tests that the context is respected and the error is returned by List.
|
|
|
|
func (s *Suite) TestListCancel(t *testing.T) {
|
|
|
|
seedRand(t)
|
|
|
|
|
|
|
|
numTestFiles := 5
|
|
|
|
|
|
|
|
b := s.open(t)
|
|
|
|
defer s.close(t, b)
|
|
|
|
|
|
|
|
testFiles := make([]restic.Handle, 0, numTestFiles)
|
|
|
|
|
|
|
|
for i := 0; i < numTestFiles; i++ {
|
|
|
|
data := []byte(fmt.Sprintf("random test blob %v", i))
|
|
|
|
id := restic.Hash(data)
|
|
|
|
h := restic.Handle{Type: restic.DataFile, Name: id.String()}
|
2018-03-03 13:20:54 +00:00
|
|
|
err := b.Save(context.TODO(), h, restic.NewByteReader(data))
|
2018-01-20 12:43:50 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
testFiles = append(testFiles, h)
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Run("Cancelled", func(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.TODO())
|
|
|
|
cancel()
|
|
|
|
|
|
|
|
// pass in a cancelled context
|
|
|
|
err := b.List(ctx, restic.DataFile, func(fi restic.FileInfo) error {
|
|
|
|
t.Errorf("got FileInfo %v for cancelled context", fi)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2018-01-20 18:34:38 +00:00
|
|
|
if errors.Cause(err) != context.Canceled {
|
|
|
|
t.Fatalf("expected error not found, want %v, got %v", context.Canceled, errors.Cause(err))
|
2018-01-20 12:43:50 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("First", func(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.TODO())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
i := 0
|
|
|
|
err := b.List(ctx, restic.DataFile, func(fi restic.FileInfo) error {
|
|
|
|
i++
|
|
|
|
// cancel the context on the first file
|
|
|
|
if i == 1 {
|
|
|
|
cancel()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2018-01-25 13:53:50 +00:00
|
|
|
if errors.Cause(err) != context.Canceled {
|
2018-01-20 12:43:50 +00:00
|
|
|
t.Fatalf("expected error not found, want %v, got %v", context.Canceled, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if i != 1 {
|
|
|
|
t.Fatalf("wrong number of files returned by List, want %v, got %v", 1, i)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("Last", func(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.TODO())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
i := 0
|
|
|
|
err := b.List(ctx, restic.DataFile, func(fi restic.FileInfo) error {
|
|
|
|
// cancel the context at the last file
|
|
|
|
i++
|
|
|
|
if i == numTestFiles {
|
|
|
|
cancel()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2018-01-25 13:53:50 +00:00
|
|
|
if errors.Cause(err) != context.Canceled {
|
2018-01-20 12:43:50 +00:00
|
|
|
t.Fatalf("expected error not found, want %v, got %v", context.Canceled, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if i != numTestFiles {
|
|
|
|
t.Fatalf("wrong number of files returned by List, want %v, got %v", numTestFiles, i)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("Timeout", func(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.TODO())
|
|
|
|
defer cancel()
|
|
|
|
|
2018-01-23 20:41:21 +00:00
|
|
|
// rather large timeout, let's try to get at least one item
|
|
|
|
timeout := time.Second
|
2018-01-20 12:43:50 +00:00
|
|
|
|
|
|
|
ctxTimeout, _ := context.WithTimeout(ctx, timeout)
|
|
|
|
|
|
|
|
i := 0
|
2018-01-23 20:41:21 +00:00
|
|
|
// pass in a context with a timeout
|
2018-01-20 12:43:50 +00:00
|
|
|
err := b.List(ctxTimeout, restic.DataFile, func(fi restic.FileInfo) error {
|
|
|
|
i++
|
|
|
|
|
|
|
|
// wait until the context is cancelled
|
2018-01-23 20:41:21 +00:00
|
|
|
<-ctxTimeout.Done()
|
2018-01-20 12:43:50 +00:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2018-01-25 13:53:50 +00:00
|
|
|
if errors.Cause(err) != context.DeadlineExceeded {
|
2018-01-20 12:43:50 +00:00
|
|
|
t.Fatalf("expected error not found, want %#v, got %#v", context.DeadlineExceeded, err)
|
|
|
|
}
|
|
|
|
|
2018-02-10 11:53:38 +00:00
|
|
|
if i > 2 {
|
|
|
|
t.Fatalf("wrong number of files returned by List, want <= 2, got %v", i)
|
2018-01-20 12:43:50 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
err := s.delayedRemove(t, b, testFiles...)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-25 16:07:36 +00:00
|
|
|
type errorCloser struct {
|
2018-03-03 13:20:54 +00:00
|
|
|
io.ReadSeeker
|
2018-03-04 09:40:42 +00:00
|
|
|
l int64
|
2017-06-16 08:55:04 +00:00
|
|
|
t testing.TB
|
2017-01-25 16:07:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (ec errorCloser) Close() error {
|
|
|
|
ec.t.Error("forbidden method close was called")
|
|
|
|
return errors.New("forbidden method close was called")
|
|
|
|
}
|
|
|
|
|
2018-03-04 09:40:42 +00:00
|
|
|
func (ec errorCloser) Length() int64 {
|
2017-06-16 08:55:04 +00:00
|
|
|
return ec.l
|
2017-05-13 17:56:11 +00:00
|
|
|
}
|
|
|
|
|
2018-03-03 13:20:54 +00:00
|
|
|
func (ec errorCloser) Rewind() error {
|
|
|
|
_, err := ec.ReadSeeker.Seek(0, io.SeekStart)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-05-14 10:56:10 +00:00
|
|
|
// TestSave tests saving data in the backend.
|
|
|
|
func (s *Suite) TestSave(t *testing.T) {
|
2017-05-28 08:16:29 +00:00
|
|
|
seedRand(t)
|
|
|
|
|
2017-05-01 20:23:46 +00:00
|
|
|
b := s.open(t)
|
|
|
|
defer s.close(t, b)
|
2016-08-31 20:51:35 +00:00
|
|
|
var id restic.ID
|
2016-01-24 15:59:38 +00:00
|
|
|
|
2017-05-01 10:42:10 +00:00
|
|
|
saveTests := 10
|
2017-05-01 20:23:46 +00:00
|
|
|
if s.MinimalData {
|
2017-05-01 10:42:10 +00:00
|
|
|
saveTests = 2
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < saveTests; i++ {
|
2016-01-24 16:46:18 +00:00
|
|
|
length := rand.Intn(1<<23) + 200000
|
2016-09-04 12:38:18 +00:00
|
|
|
data := test.Random(23, length)
|
2016-01-24 16:46:18 +00:00
|
|
|
// use the first 32 byte as the ID
|
|
|
|
copy(id[:], data)
|
2016-01-24 15:59:38 +00:00
|
|
|
|
2016-08-31 20:39:36 +00:00
|
|
|
h := restic.Handle{
|
2016-09-01 19:19:30 +00:00
|
|
|
Type: restic.DataFile,
|
|
|
|
Name: fmt.Sprintf("%s-%d", id, i),
|
2016-01-24 15:59:38 +00:00
|
|
|
}
|
2018-03-03 13:20:54 +00:00
|
|
|
err := b.Save(context.TODO(), h, restic.NewByteReader(data))
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
2016-01-24 15:59:38 +00:00
|
|
|
|
2017-06-03 15:39:57 +00:00
|
|
|
buf, err := backend.LoadAll(context.TODO(), b, h)
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
2016-01-24 15:59:38 +00:00
|
|
|
if len(buf) != len(data) {
|
|
|
|
t.Fatalf("number of bytes does not match, want %v, got %v", len(data), len(buf))
|
|
|
|
}
|
|
|
|
|
|
|
|
if !bytes.Equal(buf, data) {
|
|
|
|
t.Fatalf("data not equal")
|
|
|
|
}
|
|
|
|
|
2017-06-03 15:39:57 +00:00
|
|
|
fi, err := b.Stat(context.TODO(), h)
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
2016-01-24 15:59:38 +00:00
|
|
|
|
2018-01-20 18:34:38 +00:00
|
|
|
if fi.Name != h.Name {
|
|
|
|
t.Errorf("Stat() returned wrong name, want %q, got %q", h.Name, fi.Name)
|
|
|
|
}
|
|
|
|
|
2016-01-24 15:59:38 +00:00
|
|
|
if fi.Size != int64(len(data)) {
|
2018-01-20 18:34:38 +00:00
|
|
|
t.Errorf("Stat() returned different size, want %q, got %d", len(data), fi.Size)
|
2016-01-24 15:59:38 +00:00
|
|
|
}
|
|
|
|
|
2017-06-03 15:39:57 +00:00
|
|
|
err = b.Remove(context.TODO(), h)
|
2016-01-24 15:59:38 +00:00
|
|
|
if err != nil {
|
2017-04-10 19:46:41 +00:00
|
|
|
t.Fatalf("error removing item: %+v", err)
|
2016-01-24 15:59:38 +00:00
|
|
|
}
|
|
|
|
}
|
2017-01-25 16:07:36 +00:00
|
|
|
|
|
|
|
// test saving from a tempfile
|
|
|
|
tmpfile, err := ioutil.TempFile("", "restic-backend-save-test-")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
length := rand.Intn(1<<23) + 200000
|
|
|
|
data := test.Random(23, length)
|
|
|
|
copy(id[:], data)
|
|
|
|
|
|
|
|
if _, err = tmpfile.Write(data); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2017-05-13 22:15:17 +00:00
|
|
|
if _, err = tmpfile.Seek(0, io.SeekStart); err != nil {
|
2017-01-25 16:07:36 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
h := restic.Handle{Type: restic.DataFile, Name: id.String()}
|
|
|
|
|
|
|
|
// wrap the tempfile in an errorCloser, so we can detect if the backend
|
|
|
|
// closes the reader
|
2018-03-04 09:40:42 +00:00
|
|
|
err = b.Save(context.TODO(), h, errorCloser{t: t, l: int64(length), ReadSeeker: tmpfile})
|
2017-01-25 16:07:36 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2017-09-16 11:59:55 +00:00
|
|
|
err = s.delayedRemove(t, b, h)
|
2017-05-13 22:15:17 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error removing item: %+v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = tmpfile.Close(); err != nil {
|
2017-01-25 16:07:36 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2017-05-13 22:15:17 +00:00
|
|
|
if err = os.Remove(tmpfile.Name()); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2016-01-24 15:59:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var filenameTests = []struct {
|
|
|
|
name string
|
|
|
|
data string
|
|
|
|
}{
|
|
|
|
{"1dfc6bc0f06cb255889e9ea7860a5753e8eb9665c9a96627971171b444e3113e", "x"},
|
2017-03-16 20:50:26 +00:00
|
|
|
{"f00b4r", "foobar"},
|
2016-01-24 15:59:38 +00:00
|
|
|
{
|
|
|
|
"1dfc6bc0f06cb255889e9ea7860a5753e8eb9665c9a96627971171b444e3113e4bf8f2d9144cc5420a80f04a4880ad6155fc58903a4fb6457c476c43541dcaa6-5",
|
|
|
|
"foobar content of data blob",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2017-05-14 10:56:10 +00:00
|
|
|
// TestSaveFilenames tests saving data with various file names in the backend.
|
|
|
|
func (s *Suite) TestSaveFilenames(t *testing.T) {
|
2017-05-01 20:23:46 +00:00
|
|
|
b := s.open(t)
|
|
|
|
defer s.close(t, b)
|
2016-01-24 15:59:38 +00:00
|
|
|
|
|
|
|
for i, test := range filenameTests {
|
2016-09-01 19:19:30 +00:00
|
|
|
h := restic.Handle{Name: test.name, Type: restic.DataFile}
|
2018-03-03 13:20:54 +00:00
|
|
|
err := b.Save(context.TODO(), h, restic.NewByteReader([]byte(test.data)))
|
2016-01-24 15:59:38 +00:00
|
|
|
if err != nil {
|
2017-04-10 19:46:41 +00:00
|
|
|
t.Errorf("test %d failed: Save() returned %+v", i, err)
|
2016-01-24 15:59:38 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-06-03 15:39:57 +00:00
|
|
|
buf, err := backend.LoadAll(context.TODO(), b, h)
|
2016-01-24 15:59:38 +00:00
|
|
|
if err != nil {
|
2017-04-10 19:46:41 +00:00
|
|
|
t.Errorf("test %d failed: Load() returned %+v", i, err)
|
2016-01-24 15:59:38 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if !bytes.Equal(buf, []byte(test.data)) {
|
|
|
|
t.Errorf("test %d: returned wrong bytes", i)
|
|
|
|
}
|
|
|
|
|
2017-06-03 15:39:57 +00:00
|
|
|
err = b.Remove(context.TODO(), h)
|
2016-01-24 15:59:38 +00:00
|
|
|
if err != nil {
|
2017-04-10 19:46:41 +00:00
|
|
|
t.Errorf("test %d failed: Remove() returned %+v", i, err)
|
2016-01-24 15:59:38 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-23 16:08:03 +00:00
|
|
|
var testStrings = []struct {
|
|
|
|
id string
|
|
|
|
data string
|
|
|
|
}{
|
|
|
|
{"c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2", "foobar"},
|
|
|
|
{"248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"},
|
|
|
|
{"cc5d46bdb4991c6eae3eb739c9c8a7a46fe9654fab79c47b4fe48383b5b25e1c", "foo/bar"},
|
|
|
|
{"4e54d2c721cbdb730f01b10b62dec622962b36966ec685880effa63d71c808f2", "foo/../../baz"},
|
|
|
|
}
|
|
|
|
|
2017-01-25 16:48:35 +00:00
|
|
|
func store(t testing.TB, b restic.Backend, tpe restic.FileType, data []byte) restic.Handle {
|
2016-08-31 20:51:35 +00:00
|
|
|
id := restic.Hash(data)
|
2017-01-25 16:48:35 +00:00
|
|
|
h := restic.Handle{Name: id.String(), Type: tpe}
|
2018-03-03 13:20:54 +00:00
|
|
|
err := b.Save(context.TODO(), h, restic.NewByteReader([]byte(data)))
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
2017-01-25 16:48:35 +00:00
|
|
|
return h
|
2016-01-23 16:08:03 +00:00
|
|
|
}
|
|
|
|
|
2017-05-28 11:05:35 +00:00
|
|
|
// testLoad loads a blob (but discards its contents).
|
|
|
|
func testLoad(b restic.Backend, h restic.Handle, length int, offset int64) error {
|
2018-01-17 04:59:16 +00:00
|
|
|
return b.Load(context.TODO(), h, 0, 0, func(rd io.Reader) (ierr error) {
|
|
|
|
_, ierr = io.Copy(ioutil.Discard, rd)
|
|
|
|
return ierr
|
|
|
|
})
|
2017-05-28 11:05:35 +00:00
|
|
|
}
|
|
|
|
|
2017-09-16 11:59:55 +00:00
|
|
|
func (s *Suite) delayedRemove(t testing.TB, be restic.Backend, handles ...restic.Handle) error {
|
2017-03-29 21:58:25 +00:00
|
|
|
// Some backend (swift, I'm looking at you) may implement delayed
|
|
|
|
// removal of data. Let's wait a bit if this happens.
|
2017-05-01 08:13:03 +00:00
|
|
|
|
2017-07-21 16:00:17 +00:00
|
|
|
for _, h := range handles {
|
|
|
|
err := be.Remove(context.TODO(), h)
|
2017-09-16 11:59:55 +00:00
|
|
|
if s.ErrorHandler != nil {
|
|
|
|
err = s.ErrorHandler(t, be, err)
|
|
|
|
}
|
2017-06-15 18:05:35 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2017-03-29 21:58:25 +00:00
|
|
|
}
|
2017-07-21 16:00:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, h := range handles {
|
|
|
|
start := time.Now()
|
|
|
|
attempt := 0
|
|
|
|
var found bool
|
|
|
|
var err error
|
2017-09-16 11:59:55 +00:00
|
|
|
for time.Since(start) <= s.WaitForDelayedRemoval {
|
2017-07-21 16:00:17 +00:00
|
|
|
found, err = be.Test(context.TODO(), h)
|
2017-09-16 11:59:55 +00:00
|
|
|
if s.ErrorHandler != nil {
|
|
|
|
err = s.ErrorHandler(t, be, err)
|
|
|
|
}
|
2017-07-21 16:00:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-06-15 18:05:35 +00:00
|
|
|
|
2017-07-21 16:00:17 +00:00
|
|
|
if !found {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
time.Sleep(2 * time.Second)
|
|
|
|
attempt++
|
2017-06-15 18:05:35 +00:00
|
|
|
}
|
|
|
|
|
2017-07-21 16:00:17 +00:00
|
|
|
if found {
|
|
|
|
t.Fatalf("removed blob %v still present after %v (%d attempts)", h, time.Since(start), attempt)
|
|
|
|
}
|
2017-03-29 21:58:25 +00:00
|
|
|
}
|
2017-06-18 15:36:57 +00:00
|
|
|
|
|
|
|
return nil
|
2017-03-29 21:58:25 +00:00
|
|
|
}
|
|
|
|
|
2017-06-18 15:36:57 +00:00
|
|
|
func delayedList(t testing.TB, b restic.Backend, tpe restic.FileType, max int, maxwait time.Duration) restic.IDs {
|
2017-06-15 17:41:07 +00:00
|
|
|
list := restic.NewIDSet()
|
2017-06-18 15:36:57 +00:00
|
|
|
start := time.Now()
|
2017-06-15 17:41:07 +00:00
|
|
|
for i := 0; i < max; i++ {
|
2018-01-20 12:43:07 +00:00
|
|
|
err := b.List(context.TODO(), tpe, func(fi restic.FileInfo) error {
|
|
|
|
id := restic.TestParseID(fi.Name)
|
2017-06-15 17:41:07 +00:00
|
|
|
list.Insert(id)
|
2018-01-20 12:43:07 +00:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
2017-06-15 17:41:07 +00:00
|
|
|
}
|
2018-01-20 12:43:07 +00:00
|
|
|
|
2017-06-18 15:36:57 +00:00
|
|
|
if len(list) < max && time.Since(start) < maxwait {
|
|
|
|
time.Sleep(500 * time.Millisecond)
|
2017-06-15 17:41:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return list.List()
|
|
|
|
}
|
|
|
|
|
2017-05-14 10:56:10 +00:00
|
|
|
// TestBackend tests all functions of the backend.
|
|
|
|
func (s *Suite) TestBackend(t *testing.T) {
|
2017-05-01 20:23:46 +00:00
|
|
|
b := s.open(t)
|
|
|
|
defer s.close(t, b)
|
2016-01-23 16:08:03 +00:00
|
|
|
|
2016-08-31 20:39:36 +00:00
|
|
|
for _, tpe := range []restic.FileType{
|
|
|
|
restic.DataFile, restic.KeyFile, restic.LockFile,
|
|
|
|
restic.SnapshotFile, restic.IndexFile,
|
2016-01-23 16:08:03 +00:00
|
|
|
} {
|
|
|
|
// detect non-existing files
|
2016-09-04 12:38:18 +00:00
|
|
|
for _, ts := range testStrings {
|
|
|
|
id, err := restic.ParseID(ts.id)
|
|
|
|
test.OK(t, err)
|
2016-01-23 16:08:03 +00:00
|
|
|
|
|
|
|
// test if blob is already in repository
|
2017-01-25 16:48:35 +00:00
|
|
|
h := restic.Handle{Type: tpe, Name: id.String()}
|
2017-06-03 15:39:57 +00:00
|
|
|
ret, err := b.Test(context.TODO(), h)
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
|
|
|
test.Assert(t, !ret, "blob was found to exist before creating")
|
2016-01-23 16:08:03 +00:00
|
|
|
|
2016-01-24 00:00:27 +00:00
|
|
|
// try to stat a not existing blob
|
2017-06-03 15:39:57 +00:00
|
|
|
_, err = b.Stat(context.TODO(), h)
|
2016-09-04 12:38:18 +00:00
|
|
|
test.Assert(t, err != nil, "blob data could be extracted before creation")
|
2016-01-23 16:08:03 +00:00
|
|
|
|
|
|
|
// try to read not existing blob
|
2017-05-28 11:05:35 +00:00
|
|
|
err = testLoad(b, h, 0, 0)
|
|
|
|
test.Assert(t, err != nil, "blob could be read before creation")
|
2016-01-23 16:08:03 +00:00
|
|
|
|
|
|
|
// try to get string out, should fail
|
2017-06-03 15:39:57 +00:00
|
|
|
ret, err = b.Test(context.TODO(), h)
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
|
|
|
test.Assert(t, !ret, "id %q was found (but should not have)", ts.id)
|
2016-01-23 16:08:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// add files
|
2016-09-04 12:38:18 +00:00
|
|
|
for _, ts := range testStrings {
|
|
|
|
store(t, b, tpe, []byte(ts.data))
|
2016-01-23 16:08:03 +00:00
|
|
|
|
2016-01-24 00:00:27 +00:00
|
|
|
// test Load()
|
2016-09-04 12:38:18 +00:00
|
|
|
h := restic.Handle{Type: tpe, Name: ts.id}
|
2017-06-03 15:39:57 +00:00
|
|
|
buf, err := backend.LoadAll(context.TODO(), b, h)
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
|
|
|
test.Equals(t, ts.data, string(buf))
|
2016-01-23 16:08:03 +00:00
|
|
|
|
|
|
|
// try to read it out with an offset and a length
|
|
|
|
start := 1
|
2016-09-04 12:38:18 +00:00
|
|
|
end := len(ts.data) - 2
|
2016-01-23 16:08:03 +00:00
|
|
|
length := end - start
|
|
|
|
|
2016-01-24 00:00:27 +00:00
|
|
|
buf2 := make([]byte, length)
|
2018-01-17 04:59:16 +00:00
|
|
|
var n int
|
|
|
|
err = b.Load(context.TODO(), h, len(buf2), int64(start), func(rd io.Reader) (ierr error) {
|
|
|
|
n, ierr = io.ReadFull(rd, buf2)
|
|
|
|
return ierr
|
|
|
|
})
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
2017-01-23 16:20:08 +00:00
|
|
|
test.OK(t, err)
|
|
|
|
test.Equals(t, len(buf2), n)
|
2016-09-04 12:38:18 +00:00
|
|
|
test.Equals(t, ts.data[start:end], string(buf2))
|
2016-01-23 16:08:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// test adding the first file again
|
2016-09-04 12:38:18 +00:00
|
|
|
ts := testStrings[0]
|
2017-03-16 20:50:26 +00:00
|
|
|
h := restic.Handle{Type: tpe, Name: ts.id}
|
2016-01-23 16:08:03 +00:00
|
|
|
|
|
|
|
// remove and recreate
|
backend: Relax requirement for new files
Before, all backend implementations were required to return an error if
the file that is to be written already exists in the backend. For most
backends, that means making a request (e.g. via HTTP) and returning an
error when the file already exists.
This is not accurate, the file could have been created between the HTTP
request testing for it, and when writing starts. In addition, apart from
the `config` file in the repo, all other file names have pseudo-random
names with a very very low probability of a collision. And even if a
file name is written again, the way the restic repo is structured this
just means that the same content is placed there again. Which is not a
problem, just not very efficient.
So, this commit relaxes the requirement to return an error when the file
in the backend already exists, which allows reducing the number of API
requests and thereby the latency for remote backends.
2018-02-17 21:39:18 +00:00
|
|
|
err := s.delayedRemove(t, b, h)
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
2016-01-23 16:08:03 +00:00
|
|
|
|
|
|
|
// test that the blob is gone
|
2017-06-03 15:39:57 +00:00
|
|
|
ok, err := b.Test(context.TODO(), h)
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
2017-04-11 18:08:25 +00:00
|
|
|
test.Assert(t, !ok, "removed blob still present")
|
2016-01-23 16:08:03 +00:00
|
|
|
|
|
|
|
// create blob
|
2018-03-03 13:20:54 +00:00
|
|
|
err = b.Save(context.TODO(), h, restic.NewByteReader([]byte(ts.data)))
|
2016-09-04 12:38:18 +00:00
|
|
|
test.OK(t, err)
|
2016-01-23 16:08:03 +00:00
|
|
|
|
|
|
|
// list items
|
2016-08-31 20:51:35 +00:00
|
|
|
IDs := restic.IDs{}
|
2016-01-23 16:08:03 +00:00
|
|
|
|
2016-09-04 12:38:18 +00:00
|
|
|
for _, ts := range testStrings {
|
|
|
|
id, err := restic.ParseID(ts.id)
|
|
|
|
test.OK(t, err)
|
2016-01-23 16:08:03 +00:00
|
|
|
IDs = append(IDs, id)
|
|
|
|
}
|
|
|
|
|
2017-06-18 15:36:57 +00:00
|
|
|
list := delayedList(t, b, tpe, len(IDs), s.WaitForDelayedRemoval)
|
2016-01-23 16:08:03 +00:00
|
|
|
if len(IDs) != len(list) {
|
|
|
|
t.Fatalf("wrong number of IDs returned: want %d, got %d", len(IDs), len(list))
|
|
|
|
}
|
|
|
|
|
|
|
|
sort.Sort(IDs)
|
|
|
|
sort.Sort(list)
|
|
|
|
|
|
|
|
if !reflect.DeepEqual(IDs, list) {
|
|
|
|
t.Fatalf("lists aren't equal, want:\n %v\n got:\n%v\n", IDs, list)
|
|
|
|
}
|
|
|
|
|
2017-10-14 13:53:32 +00:00
|
|
|
var handles []restic.Handle
|
|
|
|
for _, ts := range testStrings {
|
|
|
|
id, err := restic.ParseID(ts.id)
|
|
|
|
test.OK(t, err)
|
2017-01-25 16:48:35 +00:00
|
|
|
|
2017-10-14 13:53:32 +00:00
|
|
|
h := restic.Handle{Type: tpe, Name: id.String()}
|
2016-01-23 16:08:03 +00:00
|
|
|
|
2017-10-14 13:53:32 +00:00
|
|
|
found, err := b.Test(context.TODO(), h)
|
|
|
|
test.OK(t, err)
|
|
|
|
test.Assert(t, found, fmt.Sprintf("id %q not found", id))
|
2017-07-21 16:00:17 +00:00
|
|
|
|
2017-10-14 13:53:32 +00:00
|
|
|
handles = append(handles, h)
|
2016-01-23 16:08:03 +00:00
|
|
|
}
|
2017-10-14 13:53:32 +00:00
|
|
|
|
|
|
|
test.OK(t, s.delayedRemove(t, b, handles...))
|
2016-01-23 16:08:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-14 13:56:46 +00:00
|
|
|
// TestZZZDelete tests the Delete function. The name ensures that this test is executed last.
|
|
|
|
func (s *Suite) TestZZZDelete(t *testing.T) {
|
2017-05-01 20:23:46 +00:00
|
|
|
if !test.TestCleanupTempDirs {
|
|
|
|
t.Skipf("not removing backend, TestCleanupTempDirs is false")
|
|
|
|
}
|
|
|
|
|
|
|
|
b := s.open(t)
|
|
|
|
defer s.close(t, b)
|
2016-01-23 16:08:03 +00:00
|
|
|
|
2017-10-14 13:56:46 +00:00
|
|
|
err := b.Delete(context.TODO())
|
2016-01-23 16:08:03 +00:00
|
|
|
if err != nil {
|
2017-04-10 19:46:41 +00:00
|
|
|
t.Fatalf("error deleting backend: %+v", err)
|
2016-01-23 16:08:03 +00:00
|
|
|
}
|
|
|
|
}
|