1
0
Fork 0
mirror of https://github.com/restic/restic.git synced 2024-12-21 23:33:03 +00:00

Merge pull request #4709 from MichaelEischer/refactor-locking

Refactor locking into repository package
This commit is contained in:
Michael Eischer 2024-03-28 23:53:09 +01:00 committed by GitHub
commit 510f6f06b0
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
42 changed files with 584 additions and 786 deletions

View file

@ -0,0 +1,10 @@
Bugfix: Correct `--no-lock` handling of `ls` and `tag` command
The `ls` command never locked the repository. This has been fixed. The old
behavior is still supported using `ls --no-lock`. The latter invocation also
works with older restic versions.
The `tag` command erroneously accepted the `--no-lock` command. The command
now always requires an exclusive lock.
https://github.com/restic/restic/pull/4709

View file

@ -463,10 +463,11 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter
Verbosef("open repository\n")
}
repo, err := OpenRepository(ctx, gopts)
ctx, repo, unlock, err := openWithAppendLock(ctx, gopts, opts.DryRun)
if err != nil {
return err
}
defer unlock()
var progressPrinter backup.ProgressPrinter
if gopts.JSON {
@ -478,22 +479,6 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter
calculateProgressInterval(!gopts.Quiet, gopts.JSON))
defer progressReporter.Done()
if opts.DryRun {
repo.SetDryRun()
}
if !gopts.JSON {
progressPrinter.V("lock repository")
}
if !opts.DryRun {
var lock *restic.Lock
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
}
// rejectByNameFuncs collect functions that can reject items from the backup based on path only
rejectByNameFuncs, err := collectRejectByNameFuncs(opts, repo)
if err != nil {

View file

@ -9,7 +9,6 @@ import (
"runtime"
"testing"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
@ -250,29 +249,18 @@ func TestBackupTreeLoadError(t *testing.T) {
opts := BackupOptions{}
// Backup a subdirectory first, such that we can remove the tree pack for the subdirectory
testRunBackup(t, env.testdata, []string{"test"}, opts, env.gopts)
r, err := OpenRepository(context.TODO(), env.gopts)
rtest.OK(t, err)
rtest.OK(t, r.LoadIndex(context.TODO(), nil))
treePacks := restic.NewIDSet()
r.Index().Each(context.TODO(), func(pb restic.PackedBlob) {
if pb.Type == restic.TreeBlob {
treePacks.Insert(pb.PackID)
}
})
treePacks := listTreePacks(env.gopts, t)
testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
testRunCheck(t, env.gopts)
// delete the subdirectory pack first
for id := range treePacks {
rtest.OK(t, r.Backend().Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: id.String()}))
}
removePacks(env.gopts, t, treePacks)
testRunRebuildIndex(t, env.gopts)
// now the repo is missing the tree blob in the index; check should report this
testRunCheckMustFail(t, env.gopts)
// second backup should report an error but "heal" this situation
err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
err := testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
rtest.Assert(t, err != nil, "backup should have reported an error for the subdirectory")
testRunCheck(t, env.gopts)

View file

@ -64,19 +64,11 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error {
return err
}
repo, err := OpenRepository(ctx, gopts)
ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock)
if err != nil {
return err
}
if !gopts.NoLock {
var lock *restic.Lock
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
}
defer unlock()
tpe := args[0]

View file

@ -204,20 +204,14 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
return code, nil
})
repo, err := OpenRepository(ctx, gopts)
if !gopts.NoLock {
Verbosef("create exclusive lock for repository\n")
}
ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, gopts.NoLock)
if err != nil {
return err
}
if !gopts.NoLock {
Verbosef("create exclusive lock for repository\n")
var lock *restic.Lock
lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
}
defer unlock()
chkr := checker.New(repo, opts.CheckUnused)
err = chkr.LoadSnapshots(ctx)

View file

@ -62,30 +62,17 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []
gopts, secondaryGopts = secondaryGopts, gopts
}
srcRepo, err := OpenRepository(ctx, gopts)
ctx, srcRepo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock)
if err != nil {
return err
}
defer unlock()
dstRepo, err := OpenRepository(ctx, secondaryGopts)
if err != nil {
return err
}
if !gopts.NoLock {
var srcLock *restic.Lock
srcLock, ctx, err = lockRepo(ctx, srcRepo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(srcLock)
if err != nil {
return err
}
}
dstLock, ctx, err := lockRepo(ctx, dstRepo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(dstLock)
ctx, dstRepo, unlock, err := openWithAppendLock(ctx, secondaryGopts, false)
if err != nil {
return err
}
defer unlock()
srcSnapshotLister, err := restic.MemorizeList(ctx, srcRepo, restic.SnapshotFile)
if err != nil {

View file

@ -153,19 +153,11 @@ func runDebugDump(ctx context.Context, gopts GlobalOptions, args []string) error
return errors.Fatal("type not specified")
}
repo, err := OpenRepository(ctx, gopts)
ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock)
if err != nil {
return err
}
if !gopts.NoLock {
var lock *restic.Lock
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
}
defer unlock()
tpe := args[0]
@ -442,10 +434,15 @@ func storePlainBlob(id restic.ID, prefix string, plain []byte) error {
}
func runDebugExamine(ctx context.Context, gopts GlobalOptions, opts DebugExamineOptions, args []string) error {
repo, err := OpenRepository(ctx, gopts)
if opts.ExtractPack && gopts.NoLock {
return fmt.Errorf("--extract-pack and --no-lock are mutually exclusive")
}
ctx, repo, unlock, err := openWithAppendLock(ctx, gopts, gopts.NoLock)
if err != nil {
return err
}
defer unlock()
ids := make([]restic.ID, 0)
for _, name := range args {
@ -464,15 +461,6 @@ func runDebugExamine(ctx context.Context, gopts GlobalOptions, opts DebugExamine
return errors.Fatal("no pack files to examine")
}
if !gopts.NoLock {
var lock *restic.Lock
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
}
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
err = repo.LoadIndex(ctx, bar)
if err != nil {

View file

@ -344,19 +344,11 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args []
return errors.Fatalf("specify two snapshot IDs")
}
repo, err := OpenRepository(ctx, gopts)
ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock)
if err != nil {
return err
}
if !gopts.NoLock {
var lock *restic.Lock
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
}
defer unlock()
// cache snapshots listing
be, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile)

View file

@ -131,19 +131,11 @@ func runDump(ctx context.Context, opts DumpOptions, gopts GlobalOptions, args []
splittedPath := splitPath(path.Clean(pathToPrint))
repo, err := OpenRepository(ctx, gopts)
ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock)
if err != nil {
return err
}
if !gopts.NoLock {
var lock *restic.Lock
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
}
defer unlock()
sn, subfolder, err := (&restic.SnapshotFilter{
Hosts: opts.Hosts,

View file

@ -563,19 +563,11 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args []
return errors.Fatal("cannot have several ID types")
}
repo, err := OpenRepository(ctx, gopts)
ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock)
if err != nil {
return err
}
if !gopts.NoLock {
var lock *restic.Lock
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
}
defer unlock()
snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile)
if err != nil {

View file

@ -163,23 +163,15 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption
return err
}
repo, err := OpenRepository(ctx, gopts)
if err != nil {
return err
}
if gopts.NoLock && !opts.DryRun {
return errors.Fatal("--no-lock is only applicable in combination with --dry-run for forget command")
}
if !opts.DryRun || !gopts.NoLock {
var lock *restic.Lock
lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, opts.DryRun && gopts.NoLock)
if err != nil {
return err
}
defer unlock()
var snapshots restic.Snapshots
removeSnIDs := restic.NewIDSet()

View file

@ -50,16 +50,11 @@ func runKeyAdd(ctx context.Context, gopts GlobalOptions, opts KeyAddOptions, arg
return fmt.Errorf("the key add command expects no arguments, only options - please see `restic help key add` for usage and flags")
}
repo, err := OpenRepository(ctx, gopts)
if err != nil {
return err
}
lock, ctx, err := lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
ctx, repo, unlock, err := openWithAppendLock(ctx, gopts, false)
if err != nil {
return err
}
defer unlock()
return addKey(ctx, repo, gopts, opts)
}

View file

@ -40,19 +40,11 @@ func runKeyList(ctx context.Context, gopts GlobalOptions, args []string) error {
return fmt.Errorf("the key list command expects no arguments, only options - please see `restic help key list` for usage and flags")
}
repo, err := OpenRepository(ctx, gopts)
ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock)
if err != nil {
return err
}
if !gopts.NoLock {
var lock *restic.Lock
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
}
defer unlock()
return listKeys(ctx, repo, gopts)
}

View file

@ -47,16 +47,11 @@ func runKeyPasswd(ctx context.Context, gopts GlobalOptions, opts KeyPasswdOption
return fmt.Errorf("the key passwd command expects no arguments, only options - please see `restic help key passwd` for usage and flags")
}
repo, err := OpenRepository(ctx, gopts)
if err != nil {
return err
}
lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false)
if err != nil {
return err
}
defer unlock()
return changePassword(ctx, repo, gopts, opts)
}

View file

@ -37,20 +37,13 @@ func runKeyRemove(ctx context.Context, gopts GlobalOptions, args []string) error
return fmt.Errorf("key remove expects one argument as the key id")
}
repo, err := OpenRepository(ctx, gopts)
ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false)
if err != nil {
return err
}
defer unlock()
lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
idPrefix := args[0]
return deleteKey(ctx, repo, idPrefix)
return deleteKey(ctx, repo, args[0])
}
func deleteKey(ctx context.Context, repo *repository.Repository, idPrefix string) error {

View file

@ -36,19 +36,11 @@ func runList(ctx context.Context, gopts GlobalOptions, args []string) error {
return errors.Fatal("type not specified")
}
repo, err := OpenRepository(ctx, gopts)
ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock || args[0] == "locks")
if err != nil {
return err
}
if !gopts.NoLock && args[0] != "locks" {
var lock *restic.Lock
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
}
defer unlock()
var t restic.FileType
switch args[0] {

View file

@ -309,10 +309,11 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri
return false
}
repo, err := OpenRepository(ctx, gopts)
ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock)
if err != nil {
return err
}
defer unlock()
snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile)
if err != nil {

View file

@ -117,16 +117,11 @@ func applyMigrations(ctx context.Context, opts MigrateOptions, gopts GlobalOptio
}
func runMigrate(ctx context.Context, opts MigrateOptions, gopts GlobalOptions, args []string) error {
repo, err := OpenRepository(ctx, gopts)
if err != nil {
return err
}
lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false)
if err != nil {
return err
}
defer unlock()
if len(args) == 0 {
return checkMigrations(ctx, repo)

View file

@ -125,19 +125,11 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args
debug.Log("start mount")
defer debug.Log("finish mount")
repo, err := OpenRepository(ctx, gopts)
ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock)
if err != nil {
return err
}
if !gopts.NoLock {
var lock *restic.Lock
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
}
defer unlock()
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
err = repo.LoadIndex(ctx, bar)

View file

@ -12,7 +12,6 @@ import (
"testing"
"time"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
@ -86,12 +85,12 @@ func listSnapshots(t testing.TB, dir string) []string {
return names
}
func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Repository, mountpoint, repodir string, snapshotIDs restic.IDs, expectedSnapshotsInFuseDir int) {
func checkSnapshots(t testing.TB, gopts GlobalOptions, mountpoint string, snapshotIDs restic.IDs, expectedSnapshotsInFuseDir int) {
t.Logf("checking for %d snapshots: %v", len(snapshotIDs), snapshotIDs)
var wg sync.WaitGroup
wg.Add(1)
go testRunMount(t, global, mountpoint, &wg)
go testRunMount(t, gopts, mountpoint, &wg)
waitForMount(t, mountpoint)
defer wg.Wait()
defer testRunUmount(t, mountpoint)
@ -100,7 +99,7 @@ func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Reposit
t.Fatal(`virtual directory "snapshots" doesn't exist`)
}
ids := listSnapshots(t, repodir)
ids := listSnapshots(t, gopts.Repo)
t.Logf("found %v snapshots in repo: %v", len(ids), ids)
namesInSnapshots := listSnapshots(t, mountpoint)
@ -124,6 +123,10 @@ func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Reposit
}
}
_, repo, unlock, err := openWithReadLock(context.TODO(), gopts, false)
rtest.OK(t, err)
defer unlock()
for _, id := range snapshotIDs {
snapshot, err := restic.LoadSnapshot(context.TODO(), repo, id)
rtest.OK(t, err)
@ -166,10 +169,7 @@ func TestMount(t *testing.T) {
testRunInit(t, env.gopts)
repo, err := OpenRepository(context.TODO(), env.gopts)
rtest.OK(t, err)
checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, []restic.ID{}, 0)
checkSnapshots(t, env.gopts, env.mountpoint, []restic.ID{}, 0)
rtest.SetupTarTestFixture(t, env.testdata, filepath.Join("testdata", "backup-data.tar.gz"))
@ -179,7 +179,7 @@ func TestMount(t *testing.T) {
rtest.Assert(t, len(snapshotIDs) == 1,
"expected one snapshot, got %v", snapshotIDs)
checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs, 2)
checkSnapshots(t, env.gopts, env.mountpoint, snapshotIDs, 2)
// second backup, implicit incremental
testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts)
@ -187,7 +187,7 @@ func TestMount(t *testing.T) {
rtest.Assert(t, len(snapshotIDs) == 2,
"expected two snapshots, got %v", snapshotIDs)
checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs, 3)
checkSnapshots(t, env.gopts, env.mountpoint, snapshotIDs, 3)
// third backup, explicit incremental
bopts := BackupOptions{Parent: snapshotIDs[0].String()}
@ -196,7 +196,7 @@ func TestMount(t *testing.T) {
rtest.Assert(t, len(snapshotIDs) == 3,
"expected three snapshots, got %v", snapshotIDs)
checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs, 4)
checkSnapshots(t, env.gopts, env.mountpoint, snapshotIDs, 4)
}
func TestMountSameTimestamps(t *testing.T) {
@ -211,14 +211,11 @@ func TestMountSameTimestamps(t *testing.T) {
rtest.SetupTarTestFixture(t, env.base, filepath.Join("testdata", "repo-same-timestamps.tar.gz"))
repo, err := OpenRepository(context.TODO(), env.gopts)
rtest.OK(t, err)
ids := []restic.ID{
restic.TestParseID("280303689e5027328889a06d718b729e96a1ce6ae9ef8290bff550459ae611ee"),
restic.TestParseID("75ad6cdc0868e082f2596d5ab8705e9f7d87316f5bf5690385eeff8dbe49d9f5"),
restic.TestParseID("5fd0d8b2ef0fa5d23e58f1e460188abb0f525c0f0c4af8365a1280c807a80a1b"),
}
checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, ids, 4)
checkSnapshots(t, env.gopts, env.mountpoint, ids, 4)
}

View file

@ -148,10 +148,11 @@ func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions) error
return errors.Fatal("disabled compression and `--repack-uncompressed` are mutually exclusive")
}
repo, err := OpenRepository(ctx, gopts)
ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false)
if err != nil {
return err
}
defer unlock()
if repo.Connections() < 2 {
return errors.Fatal("prune requires a backend connection limit of at least two")
@ -169,12 +170,6 @@ func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions) error
opts.unsafeRecovery = true
}
lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
return runPruneWithRepo(ctx, opts, gopts, repo, restic.NewIDSet())
}

View file

@ -40,16 +40,11 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error {
return err
}
repo, err := OpenRepository(ctx, gopts)
if err != nil {
return err
}
lock, ctx, err := lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
ctx, repo, unlock, err := openWithAppendLock(ctx, gopts, false)
if err != nil {
return err
}
defer unlock()
snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile)
if err != nil {

View file

@ -56,16 +56,11 @@ func init() {
}
func runRebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOptions) error {
repo, err := OpenRepository(ctx, gopts)
if err != nil {
return err
}
lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false)
if err != nil {
return err
}
defer unlock()
return rebuildIndex(ctx, opts, gopts, repo)
}

View file

@ -52,16 +52,11 @@ func runRepairPacks(ctx context.Context, gopts GlobalOptions, term *termstatus.T
return errors.Fatal("no ids specified")
}
repo, err := OpenRepository(ctx, gopts)
if err != nil {
return err
}
lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false)
if err != nil {
return err
}
defer unlock()
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
err = repo.LoadIndex(ctx, bar)

View file

@ -66,22 +66,11 @@ func init() {
}
func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOptions, args []string) error {
repo, err := OpenRepository(ctx, gopts)
ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, opts.DryRun)
if err != nil {
return err
}
if !opts.DryRun {
var lock *restic.Lock
var err error
lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
} else {
repo.SetDryRun()
}
defer unlock()
snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile)
if err != nil {

View file

@ -127,19 +127,11 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions,
debug.Log("restore %v to %v", snapshotIDString, opts.Target)
repo, err := OpenRepository(ctx, gopts)
ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock)
if err != nil {
return err
}
if !gopts.NoLock {
var lock *restic.Lock
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
}
defer unlock()
sn, subfolder, err := (&restic.SnapshotFilter{
Hosts: opts.Hosts,

View file

@ -256,27 +256,22 @@ func runRewrite(ctx context.Context, opts RewriteOptions, gopts GlobalOptions, a
return errors.Fatal("Nothing to do: no excludes provided and no new metadata provided")
}
repo, err := OpenRepository(ctx, gopts)
var (
repo *repository.Repository
unlock func()
err error
)
if opts.Forget {
Verbosef("create exclusive lock for repository\n")
ctx, repo, unlock, err = openWithExclusiveLock(ctx, gopts, opts.DryRun)
} else {
ctx, repo, unlock, err = openWithAppendLock(ctx, gopts, opts.DryRun)
}
if err != nil {
return err
}
if !opts.DryRun {
var lock *restic.Lock
var err error
if opts.Forget {
Verbosef("create exclusive lock for repository\n")
lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
} else {
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
}
defer unlockRepo(lock)
if err != nil {
return err
}
} else {
repo.SetDryRun()
}
defer unlock()
snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile)
if err != nil {

View file

@ -78,8 +78,11 @@ func testRewriteMetadata(t *testing.T, metadata snapshotMetadataArgs) {
createBasicRewriteRepo(t, env)
testRunRewriteExclude(t, env.gopts, []string{}, true, metadata)
repo, _ := OpenRepository(context.TODO(), env.gopts)
snapshots, err := restic.TestLoadAllSnapshots(context.TODO(), repo, nil)
ctx, repo, unlock, err := openWithReadLock(context.TODO(), env.gopts, false)
rtest.OK(t, err)
defer unlock()
snapshots, err := restic.TestLoadAllSnapshots(ctx, repo, nil)
rtest.OK(t, err)
rtest.Assert(t, len(snapshots) == 1, "expected one snapshot, got %v", len(snapshots))
newSnapshot := snapshots[0]

View file

@ -59,19 +59,11 @@ func init() {
}
func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions, args []string) error {
repo, err := OpenRepository(ctx, gopts)
ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock)
if err != nil {
return err
}
if !gopts.NoLock {
var lock *restic.Lock
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
}
defer unlock()
var snapshots restic.Snapshots
for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) {

View file

@ -80,19 +80,11 @@ func runStats(ctx context.Context, opts StatsOptions, gopts GlobalOptions, args
return err
}
repo, err := OpenRepository(ctx, gopts)
ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock)
if err != nil {
return err
}
if !gopts.NoLock {
var lock *restic.Lock
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
}
defer unlock()
snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile)
if err != nil {

View file

@ -104,20 +104,12 @@ func runTag(ctx context.Context, opts TagOptions, gopts GlobalOptions, args []st
return errors.Fatal("--set and --add/--remove cannot be given at the same time")
}
repo, err := OpenRepository(ctx, gopts)
Verbosef("create exclusive lock for repository\n")
ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false)
if err != nil {
return err
}
if !gopts.NoLock {
Verbosef("create exclusive lock for repository\n")
var lock *restic.Lock
lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
return nil
}
defer unlock()
changeCnt := 0
for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) {

View file

@ -232,47 +232,66 @@ func testSetupBackupData(t testing.TB, env *testEnvironment) string {
}
func listPacks(gopts GlobalOptions, t *testing.T) restic.IDSet {
r, err := OpenRepository(context.TODO(), gopts)
ctx, r, unlock, err := openWithReadLock(context.TODO(), gopts, false)
rtest.OK(t, err)
defer unlock()
packs := restic.NewIDSet()
rtest.OK(t, r.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error {
rtest.OK(t, r.List(ctx, restic.PackFile, func(id restic.ID, size int64) error {
packs.Insert(id)
return nil
}))
return packs
}
func removePacks(gopts GlobalOptions, t testing.TB, remove restic.IDSet) {
r, err := OpenRepository(context.TODO(), gopts)
func listTreePacks(gopts GlobalOptions, t *testing.T) restic.IDSet {
ctx, r, unlock, err := openWithReadLock(context.TODO(), gopts, false)
rtest.OK(t, err)
defer unlock()
rtest.OK(t, r.LoadIndex(ctx, nil))
treePacks := restic.NewIDSet()
r.Index().Each(ctx, func(pb restic.PackedBlob) {
if pb.Type == restic.TreeBlob {
treePacks.Insert(pb.PackID)
}
})
return treePacks
}
func removePacks(gopts GlobalOptions, t testing.TB, remove restic.IDSet) {
ctx, r, unlock, err := openWithExclusiveLock(context.TODO(), gopts, false)
rtest.OK(t, err)
defer unlock()
for id := range remove {
rtest.OK(t, r.Backend().Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: id.String()}))
rtest.OK(t, r.Backend().Remove(ctx, backend.Handle{Type: restic.PackFile, Name: id.String()}))
}
}
func removePacksExcept(gopts GlobalOptions, t testing.TB, keep restic.IDSet, removeTreePacks bool) {
r, err := OpenRepository(context.TODO(), gopts)
ctx, r, unlock, err := openWithExclusiveLock(context.TODO(), gopts, false)
rtest.OK(t, err)
defer unlock()
// Get all tree packs
rtest.OK(t, r.LoadIndex(context.TODO(), nil))
rtest.OK(t, r.LoadIndex(ctx, nil))
treePacks := restic.NewIDSet()
r.Index().Each(context.TODO(), func(pb restic.PackedBlob) {
r.Index().Each(ctx, func(pb restic.PackedBlob) {
if pb.Type == restic.TreeBlob {
treePacks.Insert(pb.PackID)
}
})
// remove all packs containing data blobs
rtest.OK(t, r.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error {
rtest.OK(t, r.List(ctx, restic.PackFile, func(id restic.ID, size int64) error {
if treePacks.Has(id) != removeTreePacks || keep.Has(id) {
return nil
}
return r.Backend().Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: id.String()})
return r.Backend().Remove(ctx, backend.Handle{Type: restic.PackFile, Name: id.String()})
}))
}

View file

@ -154,12 +154,13 @@ func TestFindListOnce(t *testing.T) {
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
thirdSnapshot := restic.NewIDSet(testListSnapshots(t, env.gopts, 3)...)
repo, err := OpenRepository(context.TODO(), env.gopts)
ctx, repo, unlock, err := openWithReadLock(context.TODO(), env.gopts, false)
rtest.OK(t, err)
defer unlock()
snapshotIDs := restic.NewIDSet()
// specify the two oldest snapshots explicitly and use "latest" to reference the newest one
for sn := range FindFilteredSnapshots(context.TODO(), repo, repo, &restic.SnapshotFilter{}, []string{
for sn := range FindFilteredSnapshots(ctx, repo, repo, &restic.SnapshotFilter{}, []string{
secondSnapshot[0].String(),
secondSnapshot[1].String()[:8],
"latest",

View file

@ -2,316 +2,54 @@ package main
import (
"context"
"fmt"
"sync"
"time"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/repository"
)
type lockContext struct {
lock *restic.Lock
cancel context.CancelFunc
refreshWG sync.WaitGroup
}
var globalLocks struct {
locks map[*restic.Lock]*lockContext
sync.Mutex
sync.Once
}
func lockRepo(ctx context.Context, repo restic.Repository, retryLock time.Duration, json bool) (*restic.Lock, context.Context, error) {
return lockRepository(ctx, repo, false, retryLock, json)
}
func lockRepoExclusive(ctx context.Context, repo restic.Repository, retryLock time.Duration, json bool) (*restic.Lock, context.Context, error) {
return lockRepository(ctx, repo, true, retryLock, json)
}
var (
retrySleepStart = 5 * time.Second
retrySleepMax = 60 * time.Second
)
func minDuration(a, b time.Duration) time.Duration {
if a <= b {
return a
}
return b
}
// lockRepository wraps the ctx such that it is cancelled when the repository is unlocked
// cancelling the original context also stops the lock refresh
func lockRepository(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, json bool) (*restic.Lock, context.Context, error) {
// make sure that a repository is unlocked properly and after cancel() was
// called by the cleanup handler in global.go
globalLocks.Do(func() {
AddCleanupHandler(unlockAll)
})
lockFn := restic.NewLock
if exclusive {
lockFn = restic.NewExclusiveLock
}
var lock *restic.Lock
var err error
retrySleep := minDuration(retrySleepStart, retryLock)
retryMessagePrinted := false
retryTimeout := time.After(retryLock)
retryLoop:
for {
lock, err = lockFn(ctx, repo)
if err != nil && restic.IsAlreadyLocked(err) {
if !retryMessagePrinted {
if !json {
Verbosef("repo already locked, waiting up to %s for the lock\n", retryLock)
}
retryMessagePrinted = true
}
debug.Log("repo already locked, retrying in %v", retrySleep)
retrySleepCh := time.After(retrySleep)
select {
case <-ctx.Done():
return nil, ctx, ctx.Err()
case <-retryTimeout:
debug.Log("repo already locked, timeout expired")
// Last lock attempt
lock, err = lockFn(ctx, repo)
break retryLoop
case <-retrySleepCh:
retrySleep = minDuration(retrySleep*2, retrySleepMax)
}
} else {
// anything else, either a successful lock or another error
break retryLoop
}
}
if restic.IsInvalidLock(err) {
return nil, ctx, errors.Fatalf("%v\n\nthe `unlock --remove-all` command can be used to remove invalid locks. Make sure that no other restic process is accessing the repository when running the command", err)
}
func internalOpenWithLocked(ctx context.Context, gopts GlobalOptions, dryRun bool, exclusive bool) (context.Context, *repository.Repository, func(), error) {
repo, err := OpenRepository(ctx, gopts)
if err != nil {
return nil, ctx, fmt.Errorf("unable to create lock in backend: %w", err)
return nil, nil, nil, err
}
debug.Log("create lock %p (exclusive %v)", lock, exclusive)
ctx, cancel := context.WithCancel(ctx)
lockInfo := &lockContext{
lock: lock,
cancel: cancel,
}
lockInfo.refreshWG.Add(2)
refreshChan := make(chan struct{})
forceRefreshChan := make(chan refreshLockRequest)
unlock := func() {}
if !dryRun {
var lock *repository.Unlocker
globalLocks.Lock()
globalLocks.locks[lock] = lockInfo
go refreshLocks(ctx, repo.Backend(), lockInfo, refreshChan, forceRefreshChan)
go monitorLockRefresh(ctx, lockInfo, refreshChan, forceRefreshChan)
globalLocks.Unlock()
lock, ctx, err = repository.Lock(ctx, repo, exclusive, gopts.RetryLock, func(msg string) {
if !gopts.JSON {
Verbosef("%s", msg)
}
}, Warnf)
return lock, ctx, err
}
unlock = lock.Unlock
// make sure that a repository is unlocked properly and after cancel() was
// called by the cleanup handler in global.go
AddCleanupHandler(func(code int) (int, error) {
lock.Unlock()
return code, nil
})
var refreshInterval = 5 * time.Minute
// consider a lock refresh failed a bit before the lock actually becomes stale
// the difference allows to compensate for a small time drift between clients.
var refreshabilityTimeout = restic.StaleLockTimeout - refreshInterval*3/2
type refreshLockRequest struct {
result chan bool
}
func refreshLocks(ctx context.Context, backend backend.Backend, lockInfo *lockContext, refreshed chan<- struct{}, forceRefresh <-chan refreshLockRequest) {
debug.Log("start")
lock := lockInfo.lock
ticker := time.NewTicker(refreshInterval)
lastRefresh := lock.Time
defer func() {
ticker.Stop()
// ensure that the context was cancelled before removing the lock
lockInfo.cancel()
// remove the lock from the repo
debug.Log("unlocking repository with lock %v", lock)
if err := lock.Unlock(); err != nil {
debug.Log("error while unlocking: %v", err)
Warnf("error while unlocking: %v", err)
}
lockInfo.refreshWG.Done()
}()
for {
select {
case <-ctx.Done():
debug.Log("terminate")
return
case req := <-forceRefresh:
debug.Log("trying to refresh stale lock")
// keep on going if our current lock still exists
success := tryRefreshStaleLock(ctx, backend, lock, lockInfo.cancel)
// inform refresh goroutine about forced refresh
select {
case <-ctx.Done():
case req.result <- success:
}
if success {
// update lock refresh time
lastRefresh = lock.Time
}
case <-ticker.C:
if time.Since(lastRefresh) > refreshabilityTimeout {
// the lock is too old, wait until the expiry monitor cancels the context
continue
}
debug.Log("refreshing locks")
err := lock.Refresh(context.TODO())
if err != nil {
Warnf("unable to refresh lock: %v\n", err)
} else {
lastRefresh = lock.Time
// inform monitor goroutine about successful refresh
select {
case <-ctx.Done():
case refreshed <- struct{}{}:
}
}
if err != nil {
return nil, nil, nil, err
}
} else {
repo.SetDryRun()
}
return ctx, repo, unlock, nil
}
func monitorLockRefresh(ctx context.Context, lockInfo *lockContext, refreshed <-chan struct{}, forceRefresh chan<- refreshLockRequest) {
// time.Now() might use a monotonic timer which is paused during standby
// convert to unix time to ensure we compare real time values
lastRefresh := time.Now().UnixNano()
pollDuration := 1 * time.Second
if refreshInterval < pollDuration {
// require for TestLockFailedRefresh
pollDuration = refreshInterval / 5
}
// timers are paused during standby, which is a problem as the refresh timeout
// _must_ expire if the host was too long in standby. Thus fall back to periodic checks
// https://github.com/golang/go/issues/35012
ticker := time.NewTicker(pollDuration)
defer func() {
ticker.Stop()
lockInfo.cancel()
lockInfo.refreshWG.Done()
}()
var refreshStaleLockResult chan bool
for {
select {
case <-ctx.Done():
debug.Log("terminate expiry monitoring")
return
case <-refreshed:
if refreshStaleLockResult != nil {
// ignore delayed refresh notifications while the stale lock is refreshed
continue
}
lastRefresh = time.Now().UnixNano()
case <-ticker.C:
if time.Now().UnixNano()-lastRefresh < refreshabilityTimeout.Nanoseconds() || refreshStaleLockResult != nil {
continue
}
debug.Log("trying to refreshStaleLock")
// keep on going if our current lock still exists
refreshReq := refreshLockRequest{
result: make(chan bool),
}
refreshStaleLockResult = refreshReq.result
// inform refresh goroutine about forced refresh
select {
case <-ctx.Done():
case forceRefresh <- refreshReq:
}
case success := <-refreshStaleLockResult:
if success {
lastRefresh = time.Now().UnixNano()
refreshStaleLockResult = nil
continue
}
Warnf("Fatal: failed to refresh lock in time\n")
return
}
}
func openWithReadLock(ctx context.Context, gopts GlobalOptions, noLock bool) (context.Context, *repository.Repository, func(), error) {
// TODO enfore read-only operations once the locking code has moved to the repository
return internalOpenWithLocked(ctx, gopts, noLock, false)
}
func tryRefreshStaleLock(ctx context.Context, be backend.Backend, lock *restic.Lock, cancel context.CancelFunc) bool {
freeze := backend.AsBackend[backend.FreezeBackend](be)
if freeze != nil {
debug.Log("freezing backend")
freeze.Freeze()
defer freeze.Unfreeze()
}
err := lock.RefreshStaleLock(ctx)
if err != nil {
Warnf("failed to refresh stale lock: %v\n", err)
// cancel context while the backend is still frozen to prevent accidental modifications
cancel()
return false
}
return true
func openWithAppendLock(ctx context.Context, gopts GlobalOptions, dryRun bool) (context.Context, *repository.Repository, func(), error) {
// TODO enfore non-exclusive operations once the locking code has moved to the repository
return internalOpenWithLocked(ctx, gopts, dryRun, false)
}
func unlockRepo(lock *restic.Lock) {
if lock == nil {
return
}
globalLocks.Lock()
lockInfo, exists := globalLocks.locks[lock]
delete(globalLocks.locks, lock)
globalLocks.Unlock()
if !exists {
debug.Log("unable to find lock %v in the global list of locks, ignoring", lock)
return
}
lockInfo.cancel()
lockInfo.refreshWG.Wait()
}
func unlockAll(code int) (int, error) {
globalLocks.Lock()
locks := globalLocks.locks
debug.Log("unlocking %d locks", len(globalLocks.locks))
for _, lockInfo := range globalLocks.locks {
lockInfo.cancel()
}
globalLocks.locks = make(map[*restic.Lock]*lockContext)
globalLocks.Unlock()
for _, lockInfo := range locks {
lockInfo.refreshWG.Wait()
}
return code, nil
}
func init() {
globalLocks.locks = make(map[*restic.Lock]*lockContext)
func openWithExclusiveLock(ctx context.Context, gopts GlobalOptions, dryRun bool) (context.Context, *repository.Repository, func(), error) {
return internalOpenWithLocked(ctx, gopts, dryRun, true)
}

View file

@ -72,11 +72,9 @@ func assertOnlyMixedPackHints(t *testing.T, hints []error) {
}
func TestCheckRepo(t *testing.T) {
repodir, cleanup := test.Env(t, checkerTestData)
repo, cleanup := repository.TestFromFixture(t, checkerTestData)
defer cleanup()
repo := repository.TestOpenLocal(t, repodir)
chkr := checker.New(repo, false)
hints, errs := chkr.LoadIndex(context.TODO(), nil)
if len(errs) > 0 {
@ -92,11 +90,9 @@ func TestCheckRepo(t *testing.T) {
}
func TestMissingPack(t *testing.T) {
repodir, cleanup := test.Env(t, checkerTestData)
repo, cleanup := repository.TestFromFixture(t, checkerTestData)
defer cleanup()
repo := repository.TestOpenLocal(t, repodir)
packHandle := backend.Handle{
Type: restic.PackFile,
Name: "657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6",
@ -123,11 +119,9 @@ func TestMissingPack(t *testing.T) {
}
func TestUnreferencedPack(t *testing.T) {
repodir, cleanup := test.Env(t, checkerTestData)
repo, cleanup := repository.TestFromFixture(t, checkerTestData)
defer cleanup()
repo := repository.TestOpenLocal(t, repodir)
// index 3f1a only references pack 60e0
packID := "60e0438dcb978ec6860cc1f8c43da648170ee9129af8f650f876bad19f8f788e"
indexHandle := backend.Handle{
@ -156,11 +150,9 @@ func TestUnreferencedPack(t *testing.T) {
}
func TestUnreferencedBlobs(t *testing.T) {
repodir, cleanup := test.Env(t, checkerTestData)
repo, cleanup := repository.TestFromFixture(t, checkerTestData)
defer cleanup()
repo := repository.TestOpenLocal(t, repodir)
snapshotHandle := backend.Handle{
Type: restic.SnapshotFile,
Name: "51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02",
@ -195,11 +187,9 @@ func TestUnreferencedBlobs(t *testing.T) {
}
func TestModifiedIndex(t *testing.T) {
repodir, cleanup := test.Env(t, checkerTestData)
repo, cleanup := repository.TestFromFixture(t, checkerTestData)
defer cleanup()
repo := repository.TestOpenLocal(t, repodir)
done := make(chan struct{})
defer close(done)
@ -274,11 +264,9 @@ func TestModifiedIndex(t *testing.T) {
var checkerDuplicateIndexTestData = filepath.Join("testdata", "duplicate-packs-in-index-test-repo.tar.gz")
func TestDuplicatePacksInIndex(t *testing.T) {
repodir, cleanup := test.Env(t, checkerDuplicateIndexTestData)
repo, cleanup := repository.TestFromFixture(t, checkerDuplicateIndexTestData)
defer cleanup()
repo := repository.TestOpenLocal(t, repodir)
chkr := checker.New(repo, false)
hints, errs := chkr.LoadIndex(context.TODO(), nil)
if len(hints) == 0 {
@ -342,9 +330,7 @@ func TestCheckerModifiedData(t *testing.T) {
t.Logf("archived as %v", sn.ID().Str())
beError := &errorBackend{Backend: repo.Backend()}
checkRepo, err := repository.New(beError, repository.Options{})
test.OK(t, err)
test.OK(t, checkRepo.SearchKey(context.TODO(), test.TestPassword, 5, ""))
checkRepo := repository.TestOpenBackend(t, beError)
chkr := checker.New(checkRepo, false)
@ -399,10 +385,8 @@ func (r *loadTreesOnceRepository) LoadTree(ctx context.Context, id restic.ID) (*
}
func TestCheckerNoDuplicateTreeDecodes(t *testing.T) {
repodir, cleanup := test.Env(t, checkerTestData)
repo, cleanup := repository.TestFromFixture(t, checkerTestData)
defer cleanup()
repo := repository.TestOpenLocal(t, repodir)
checkRepo := &loadTreesOnceRepository{
Repository: repo,
loadedTrees: restic.NewIDSet(),
@ -549,9 +533,7 @@ func TestCheckerBlobTypeConfusion(t *testing.T) {
}
func loadBenchRepository(t *testing.B) (*checker.Checker, restic.Repository, func()) {
repodir, cleanup := test.Env(t, checkerTestData)
repo := repository.TestOpenLocal(t, repodir)
repo, cleanup := repository.TestFromFixture(t, checkerTestData)
chkr := checker.New(repo, false)
hints, errs := chkr.LoadIndex(context.TODO(), nil)

View file

@ -15,11 +15,9 @@ import (
var repoFixture = filepath.Join("..", "repository", "testdata", "test-repo.tar.gz")
func TestRepositoryForAllIndexes(t *testing.T) {
repodir, cleanup := rtest.Env(t, repoFixture)
repo, cleanup := repository.TestFromFixture(t, repoFixture)
defer cleanup()
repo := repository.TestOpenLocal(t, repodir)
expectedIndexIDs := restic.NewIDSet()
rtest.OK(t, repo.List(context.TODO(), restic.IndexFile, func(id restic.ID, size int64) error {
expectedIndexIDs.Insert(id)

View file

@ -43,11 +43,11 @@ type Key struct {
id restic.ID
}
// Params tracks the parameters used for the KDF. If not set, it will be
// params tracks the parameters used for the KDF. If not set, it will be
// calibrated on the first run of AddKey().
var Params *crypto.Params
var params *crypto.Params
var (
const (
// KDFTimeout specifies the maximum runtime for the KDF.
KDFTimeout = 500 * time.Millisecond
@ -196,13 +196,13 @@ func LoadKey(ctx context.Context, s *Repository, id restic.ID) (k *Key, err erro
// AddKey adds a new key to an already existing repository.
func AddKey(ctx context.Context, s *Repository, password, username, hostname string, template *crypto.Key) (*Key, error) {
// make sure we have valid KDF parameters
if Params == nil {
if params == nil {
p, err := crypto.Calibrate(KDFTimeout, KDFMemory)
if err != nil {
return nil, errors.Wrap(err, "Calibrate")
}
Params = &p
params = &p
debug.Log("calibrated KDF parameters are %v", p)
}
@ -213,9 +213,9 @@ func AddKey(ctx context.Context, s *Repository, password, username, hostname str
Hostname: hostname,
KDF: "scrypt",
N: Params.N,
R: Params.R,
P: Params.P,
N: params.N,
R: params.R,
P: params.P,
}
if newkey.Hostname == "" {
@ -237,7 +237,7 @@ func AddKey(ctx context.Context, s *Repository, password, username, hostname str
}
// call KDF to derive user key
newkey.user, err = crypto.KDF(*Params, newkey.Salt, password)
newkey.user, err = crypto.KDF(*params, newkey.Salt, password)
if err != nil {
return nil, err
}

274
internal/repository/lock.go Normal file
View file

@ -0,0 +1,274 @@
package repository
import (
"context"
"fmt"
"sync"
"time"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
)
type lockContext struct {
lock *restic.Lock
cancel context.CancelFunc
refreshWG sync.WaitGroup
}
type locker struct {
retrySleepStart time.Duration
retrySleepMax time.Duration
refreshInterval time.Duration
refreshabilityTimeout time.Duration
}
const defaultRefreshInterval = 5 * time.Minute
var lockerInst = &locker{
retrySleepStart: 5 * time.Second,
retrySleepMax: 60 * time.Second,
refreshInterval: defaultRefreshInterval,
// consider a lock refresh failed a bit before the lock actually becomes stale
// the difference allows to compensate for a small time drift between clients.
refreshabilityTimeout: restic.StaleLockTimeout - defaultRefreshInterval*3/2,
}
func Lock(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, printRetry func(msg string), logger func(format string, args ...interface{})) (*Unlocker, context.Context, error) {
return lockerInst.Lock(ctx, repo, exclusive, retryLock, printRetry, logger)
}
// Lock wraps the ctx such that it is cancelled when the repository is unlocked
// cancelling the original context also stops the lock refresh
func (l *locker) Lock(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, printRetry func(msg string), logger func(format string, args ...interface{})) (*Unlocker, context.Context, error) {
lockFn := restic.NewLock
if exclusive {
lockFn = restic.NewExclusiveLock
}
var lock *restic.Lock
var err error
retrySleep := minDuration(l.retrySleepStart, retryLock)
retryMessagePrinted := false
retryTimeout := time.After(retryLock)
retryLoop:
for {
lock, err = lockFn(ctx, repo)
if err != nil && restic.IsAlreadyLocked(err) {
if !retryMessagePrinted {
printRetry(fmt.Sprintf("repo already locked, waiting up to %s for the lock\n", retryLock))
retryMessagePrinted = true
}
debug.Log("repo already locked, retrying in %v", retrySleep)
retrySleepCh := time.After(retrySleep)
select {
case <-ctx.Done():
return nil, ctx, ctx.Err()
case <-retryTimeout:
debug.Log("repo already locked, timeout expired")
// Last lock attempt
lock, err = lockFn(ctx, repo)
break retryLoop
case <-retrySleepCh:
retrySleep = minDuration(retrySleep*2, l.retrySleepMax)
}
} else {
// anything else, either a successful lock or another error
break retryLoop
}
}
if restic.IsInvalidLock(err) {
return nil, ctx, errors.Fatalf("%v\n\nthe `unlock --remove-all` command can be used to remove invalid locks. Make sure that no other restic process is accessing the repository when running the command", err)
}
if err != nil {
return nil, ctx, fmt.Errorf("unable to create lock in backend: %w", err)
}
debug.Log("create lock %p (exclusive %v)", lock, exclusive)
ctx, cancel := context.WithCancel(ctx)
lockInfo := &lockContext{
lock: lock,
cancel: cancel,
}
lockInfo.refreshWG.Add(2)
refreshChan := make(chan struct{})
forceRefreshChan := make(chan refreshLockRequest)
go l.refreshLocks(ctx, repo.Backend(), lockInfo, refreshChan, forceRefreshChan, logger)
go l.monitorLockRefresh(ctx, lockInfo, refreshChan, forceRefreshChan, logger)
return &Unlocker{lockInfo}, ctx, nil
}
func minDuration(a, b time.Duration) time.Duration {
if a <= b {
return a
}
return b
}
type refreshLockRequest struct {
result chan bool
}
func (l *locker) refreshLocks(ctx context.Context, backend backend.Backend, lockInfo *lockContext, refreshed chan<- struct{}, forceRefresh <-chan refreshLockRequest, logger func(format string, args ...interface{})) {
debug.Log("start")
lock := lockInfo.lock
ticker := time.NewTicker(l.refreshInterval)
lastRefresh := lock.Time
defer func() {
ticker.Stop()
// ensure that the context was cancelled before removing the lock
lockInfo.cancel()
// remove the lock from the repo
debug.Log("unlocking repository with lock %v", lock)
if err := lock.Unlock(); err != nil {
debug.Log("error while unlocking: %v", err)
logger("error while unlocking: %v", err)
}
lockInfo.refreshWG.Done()
}()
for {
select {
case <-ctx.Done():
debug.Log("terminate")
return
case req := <-forceRefresh:
debug.Log("trying to refresh stale lock")
// keep on going if our current lock still exists
success := tryRefreshStaleLock(ctx, backend, lock, lockInfo.cancel, logger)
// inform refresh goroutine about forced refresh
select {
case <-ctx.Done():
case req.result <- success:
}
if success {
// update lock refresh time
lastRefresh = lock.Time
}
case <-ticker.C:
if time.Since(lastRefresh) > l.refreshabilityTimeout {
// the lock is too old, wait until the expiry monitor cancels the context
continue
}
debug.Log("refreshing locks")
err := lock.Refresh(context.TODO())
if err != nil {
logger("unable to refresh lock: %v\n", err)
} else {
lastRefresh = lock.Time
// inform monitor goroutine about successful refresh
select {
case <-ctx.Done():
case refreshed <- struct{}{}:
}
}
}
}
}
func (l *locker) monitorLockRefresh(ctx context.Context, lockInfo *lockContext, refreshed <-chan struct{}, forceRefresh chan<- refreshLockRequest, logger func(format string, args ...interface{})) {
// time.Now() might use a monotonic timer which is paused during standby
// convert to unix time to ensure we compare real time values
lastRefresh := time.Now().UnixNano()
pollDuration := 1 * time.Second
if l.refreshInterval < pollDuration {
// required for TestLockFailedRefresh
pollDuration = l.refreshInterval / 5
}
// timers are paused during standby, which is a problem as the refresh timeout
// _must_ expire if the host was too long in standby. Thus fall back to periodic checks
// https://github.com/golang/go/issues/35012
ticker := time.NewTicker(pollDuration)
defer func() {
ticker.Stop()
lockInfo.cancel()
lockInfo.refreshWG.Done()
}()
var refreshStaleLockResult chan bool
for {
select {
case <-ctx.Done():
debug.Log("terminate expiry monitoring")
return
case <-refreshed:
if refreshStaleLockResult != nil {
// ignore delayed refresh notifications while the stale lock is refreshed
continue
}
lastRefresh = time.Now().UnixNano()
case <-ticker.C:
if time.Now().UnixNano()-lastRefresh < l.refreshabilityTimeout.Nanoseconds() || refreshStaleLockResult != nil {
continue
}
debug.Log("trying to refreshStaleLock")
// keep on going if our current lock still exists
refreshReq := refreshLockRequest{
result: make(chan bool),
}
refreshStaleLockResult = refreshReq.result
// inform refresh goroutine about forced refresh
select {
case <-ctx.Done():
case forceRefresh <- refreshReq:
}
case success := <-refreshStaleLockResult:
if success {
lastRefresh = time.Now().UnixNano()
refreshStaleLockResult = nil
continue
}
logger("Fatal: failed to refresh lock in time\n")
return
}
}
}
func tryRefreshStaleLock(ctx context.Context, be backend.Backend, lock *restic.Lock, cancel context.CancelFunc, logger func(format string, args ...interface{})) bool {
freeze := backend.AsBackend[backend.FreezeBackend](be)
if freeze != nil {
debug.Log("freezing backend")
freeze.Freeze()
defer freeze.Unfreeze()
}
err := lock.RefreshStaleLock(ctx)
if err != nil {
logger("failed to refresh stale lock: %v\n", err)
// cancel context while the backend is still frozen to prevent accidental modifications
cancel()
return false
}
return true
}
type Unlocker struct {
info *lockContext
}
func (l *Unlocker) Unlock() {
l.info.cancel()
l.info.refreshWG.Wait()
}

View file

@ -1,4 +1,4 @@
package main
package repository
import (
"context"
@ -10,94 +10,76 @@ import (
"time"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/location"
"github.com/restic/restic/internal/backend/mem"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/test"
rtest "github.com/restic/restic/internal/test"
)
func openLockTestRepo(t *testing.T, wrapper backendWrapper) (*repository.Repository, func(), *testEnvironment) {
env, cleanup := withTestEnvironment(t)
type backendWrapper func(r backend.Backend) (backend.Backend, error)
reg := location.NewRegistry()
reg.Register(mem.NewFactory())
env.gopts.backends = reg
env.gopts.Repo = "mem:"
func openLockTestRepo(t *testing.T, wrapper backendWrapper) restic.Repository {
be := backend.Backend(mem.New())
// initialize repo
TestRepositoryWithBackend(t, be, 0, Options{})
// reopen repository to allow injecting a backend wrapper
if wrapper != nil {
env.gopts.backendTestHook = wrapper
var err error
be, err = wrapper(be)
rtest.OK(t, err)
}
testRunInit(t, env.gopts)
repo, err := OpenRepository(context.TODO(), env.gopts)
test.OK(t, err)
return repo, cleanup, env
return TestOpenBackend(t, be)
}
func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, env *testEnvironment) (*restic.Lock, context.Context) {
lock, wrappedCtx, err := lockRepo(ctx, repo, env.gopts.RetryLock, env.gopts.JSON)
func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, lockerInst *locker, retryLock time.Duration) (*Unlocker, context.Context) {
lock, wrappedCtx, err := lockerInst.Lock(ctx, repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {})
test.OK(t, err)
test.OK(t, wrappedCtx.Err())
if lock.Stale() {
if lock.info.lock.Stale() {
t.Fatal("lock returned stale lock")
}
return lock, wrappedCtx
}
func TestLock(t *testing.T) {
repo, cleanup, env := openLockTestRepo(t, nil)
defer cleanup()
t.Parallel()
repo := openLockTestRepo(t, nil)
lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env)
unlockRepo(lock)
lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, lockerInst, 0)
lock.Unlock()
if wrappedCtx.Err() == nil {
t.Fatal("unlock did not cancel context")
}
}
func TestLockCancel(t *testing.T) {
repo, cleanup, env := openLockTestRepo(t, nil)
defer cleanup()
t.Parallel()
repo := openLockTestRepo(t, nil)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
lock, wrappedCtx := checkedLockRepo(ctx, t, repo, env)
lock, wrappedCtx := checkedLockRepo(ctx, t, repo, lockerInst, 0)
cancel()
if wrappedCtx.Err() == nil {
t.Fatal("canceled parent context did not cancel context")
}
// unlockRepo should not crash
unlockRepo(lock)
}
func TestLockUnlockAll(t *testing.T) {
repo, cleanup, env := openLockTestRepo(t, nil)
defer cleanup()
lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env)
_, err := unlockAll(0)
test.OK(t, err)
if wrappedCtx.Err() == nil {
t.Fatal("canceled parent context did not cancel context")
}
// unlockRepo should not crash
unlockRepo(lock)
// Unlock should not crash
lock.Unlock()
}
func TestLockConflict(t *testing.T) {
repo, cleanup, env := openLockTestRepo(t, nil)
defer cleanup()
repo2, err := OpenRepository(context.TODO(), env.gopts)
test.OK(t, err)
t.Parallel()
repo := openLockTestRepo(t, nil)
repo2 := TestOpenBackend(t, repo.Backend())
lock, _, err := lockRepoExclusive(context.Background(), repo, env.gopts.RetryLock, env.gopts.JSON)
lock, _, err := Lock(context.Background(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {})
test.OK(t, err)
defer unlockRepo(lock)
_, _, err = lockRepo(context.Background(), repo2, env.gopts.RetryLock, env.gopts.JSON)
defer lock.Unlock()
_, _, err = Lock(context.Background(), repo2, false, 0, func(msg string) {}, func(format string, args ...interface{}) {})
if err == nil {
t.Fatal("second lock should have failed")
}
@ -118,20 +100,19 @@ func (b *writeOnceBackend) Save(ctx context.Context, h backend.Handle, rd backen
}
func TestLockFailedRefresh(t *testing.T) {
repo, cleanup, env := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) {
t.Parallel()
repo := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) {
return &writeOnceBackend{Backend: r}, nil
})
defer cleanup()
// reduce locking intervals to be suitable for testing
ri, rt := refreshInterval, refreshabilityTimeout
refreshInterval = 20 * time.Millisecond
refreshabilityTimeout = 100 * time.Millisecond
defer func() {
refreshInterval, refreshabilityTimeout = ri, rt
}()
lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env)
li := &locker{
retrySleepStart: lockerInst.retrySleepStart,
retrySleepMax: lockerInst.retrySleepMax,
refreshInterval: 20 * time.Millisecond,
refreshabilityTimeout: 100 * time.Millisecond,
}
lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, li, 0)
select {
case <-wrappedCtx.Done():
@ -139,8 +120,8 @@ func TestLockFailedRefresh(t *testing.T) {
case <-time.After(time.Second):
t.Fatal("failed lock refresh did not cause context cancellation")
}
// unlockRepo should not crash
unlockRepo(lock)
// Unlock should not crash
lock.Unlock()
}
type loggingBackend struct {
@ -156,24 +137,23 @@ func (b *loggingBackend) Save(ctx context.Context, h backend.Handle, rd backend.
}
func TestLockSuccessfulRefresh(t *testing.T) {
repo, cleanup, env := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) {
t.Parallel()
repo := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) {
return &loggingBackend{
Backend: r,
t: t,
}, nil
})
defer cleanup()
t.Logf("test for successful lock refresh %v", time.Now())
// reduce locking intervals to be suitable for testing
ri, rt := refreshInterval, refreshabilityTimeout
refreshInterval = 60 * time.Millisecond
refreshabilityTimeout = 500 * time.Millisecond
defer func() {
refreshInterval, refreshabilityTimeout = ri, rt
}()
lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env)
li := &locker{
retrySleepStart: lockerInst.retrySleepStart,
retrySleepMax: lockerInst.retrySleepMax,
refreshInterval: 60 * time.Millisecond,
refreshabilityTimeout: 500 * time.Millisecond,
}
lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, li, 0)
select {
case <-wrappedCtx.Done():
@ -186,11 +166,11 @@ func TestLockSuccessfulRefresh(t *testing.T) {
buf = buf[:n]
t.Log(string(buf))
case <-time.After(2 * refreshabilityTimeout):
case <-time.After(2 * li.refreshabilityTimeout):
// expected lock refresh to work
}
// unlockRepo should not crash
unlockRepo(lock)
// Unlock should not crash
lock.Unlock()
}
type slowBackend struct {
@ -208,26 +188,26 @@ func (b *slowBackend) Save(ctx context.Context, h backend.Handle, rd backend.Rew
}
func TestLockSuccessfulStaleRefresh(t *testing.T) {
t.Parallel()
var sb *slowBackend
repo, cleanup, env := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) {
repo := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) {
sb = &slowBackend{Backend: r}
return sb, nil
})
defer cleanup()
t.Logf("test for successful lock refresh %v", time.Now())
// reduce locking intervals to be suitable for testing
ri, rt := refreshInterval, refreshabilityTimeout
refreshInterval = 10 * time.Millisecond
refreshabilityTimeout = 50 * time.Millisecond
defer func() {
refreshInterval, refreshabilityTimeout = ri, rt
}()
li := &locker{
retrySleepStart: lockerInst.retrySleepStart,
retrySleepMax: lockerInst.retrySleepMax,
refreshInterval: 10 * time.Millisecond,
refreshabilityTimeout: 50 * time.Millisecond,
}
lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env)
lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, li, 0)
// delay lock refreshing long enough that the lock would expire
sb.m.Lock()
sb.sleep = refreshabilityTimeout + refreshInterval
sb.sleep = li.refreshabilityTimeout + li.refreshInterval
sb.m.Unlock()
select {
@ -235,7 +215,7 @@ func TestLockSuccessfulStaleRefresh(t *testing.T) {
// don't call t.Fatal to allow the lock to be properly cleaned up
t.Error("lock refresh failed", time.Now())
case <-time.After(refreshabilityTimeout):
case <-time.After(li.refreshabilityTimeout):
}
// reset slow backend
sb.m.Lock()
@ -248,25 +228,26 @@ func TestLockSuccessfulStaleRefresh(t *testing.T) {
// don't call t.Fatal to allow the lock to be properly cleaned up
t.Error("lock refresh failed", time.Now())
case <-time.After(3 * refreshabilityTimeout):
case <-time.After(3 * li.refreshabilityTimeout):
// expected lock refresh to work
}
// unlockRepo should not crash
unlockRepo(lock)
// Unlock should not crash
lock.Unlock()
}
func TestLockWaitTimeout(t *testing.T) {
repo, cleanup, env := openLockTestRepo(t, nil)
defer cleanup()
t.Parallel()
repo := openLockTestRepo(t, nil)
elock, _, err := lockRepoExclusive(context.TODO(), repo, env.gopts.RetryLock, env.gopts.JSON)
elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {})
test.OK(t, err)
defer elock.Unlock()
retryLock := 200 * time.Millisecond
start := time.Now()
lock, _, err := lockRepo(context.TODO(), repo, retryLock, env.gopts.JSON)
_, _, err = Lock(context.TODO(), repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {})
duration := time.Since(start)
test.Assert(t, err != nil,
@ -275,17 +256,15 @@ func TestLockWaitTimeout(t *testing.T) {
"create normal lock with exclusively locked repo didn't return the correct error")
test.Assert(t, retryLock <= duration && duration < retryLock*3/2,
"create normal lock with exclusively locked repo didn't wait for the specified timeout")
test.OK(t, lock.Unlock())
test.OK(t, elock.Unlock())
}
func TestLockWaitCancel(t *testing.T) {
repo, cleanup, env := openLockTestRepo(t, nil)
defer cleanup()
t.Parallel()
repo := openLockTestRepo(t, nil)
elock, _, err := lockRepoExclusive(context.TODO(), repo, env.gopts.RetryLock, env.gopts.JSON)
elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {})
test.OK(t, err)
defer elock.Unlock()
retryLock := 200 * time.Millisecond
cancelAfter := 40 * time.Millisecond
@ -294,7 +273,7 @@ func TestLockWaitCancel(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
time.AfterFunc(cancelAfter, cancel)
lock, _, err := lockRepo(ctx, repo, retryLock, env.gopts.JSON)
_, _, err = Lock(ctx, repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {})
duration := time.Since(start)
test.Assert(t, err != nil,
@ -303,27 +282,23 @@ func TestLockWaitCancel(t *testing.T) {
"create normal lock with exclusively locked repo didn't return the correct error")
test.Assert(t, cancelAfter <= duration && duration < retryLock-10*time.Millisecond,
"create normal lock with exclusively locked repo didn't return in time, duration %v", duration)
test.OK(t, lock.Unlock())
test.OK(t, elock.Unlock())
}
func TestLockWaitSuccess(t *testing.T) {
repo, cleanup, env := openLockTestRepo(t, nil)
defer cleanup()
t.Parallel()
repo := openLockTestRepo(t, nil)
elock, _, err := lockRepoExclusive(context.TODO(), repo, env.gopts.RetryLock, env.gopts.JSON)
elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {})
test.OK(t, err)
retryLock := 200 * time.Millisecond
unlockAfter := 40 * time.Millisecond
time.AfterFunc(unlockAfter, func() {
test.OK(t, elock.Unlock())
elock.Unlock()
})
lock, _, err := lockRepo(context.TODO(), repo, retryLock, env.gopts.JSON)
lock, _, err := Lock(context.TODO(), repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {})
test.OK(t, err)
test.OK(t, lock.Unlock())
lock.Unlock()
}

View file

@ -221,10 +221,9 @@ func benchmarkLoadUnpacked(b *testing.B, version uint) {
var repoFixture = filepath.Join("testdata", "test-repo.tar.gz")
func TestRepositoryLoadIndex(t *testing.T) {
repodir, cleanup := rtest.Env(t, repoFixture)
repo, cleanup := repository.TestFromFixture(t, repoFixture)
defer cleanup()
repo := repository.TestOpenLocal(t, repodir)
rtest.OK(t, repo.LoadIndex(context.TODO(), nil))
}
@ -243,7 +242,7 @@ func loadIndex(ctx context.Context, repo restic.LoaderUnpacked, id restic.ID) (*
}
func TestRepositoryLoadUnpackedBroken(t *testing.T) {
repodir, cleanup := rtest.Env(t, repoFixture)
repo, cleanup := repository.TestFromFixture(t, repoFixture)
defer cleanup()
data := rtest.Random(23, 12345)
@ -252,7 +251,6 @@ func TestRepositoryLoadUnpackedBroken(t *testing.T) {
// damage buffer
data[0] ^= 0xff
repo := repository.TestOpenLocal(t, repodir)
// store broken file
err := repo.Backend().Save(context.TODO(), h, backend.NewByteReader(data, nil))
rtest.OK(t, err)
@ -289,10 +287,7 @@ func TestRepositoryLoadUnpackedRetryBroken(t *testing.T) {
be, err := local.Open(context.TODO(), local.Config{Path: repodir, Connections: 2})
rtest.OK(t, err)
repo, err := repository.New(&damageOnceBackend{Backend: be}, repository.Options{})
rtest.OK(t, err)
err = repo.SearchKey(context.TODO(), rtest.TestPassword, 10, "")
rtest.OK(t, err)
repo := repository.TestOpenBackend(t, &damageOnceBackend{Backend: be})
rtest.OK(t, repo.LoadIndex(context.TODO(), nil))
}

View file

@ -4,6 +4,7 @@ import (
"context"
"fmt"
"os"
"sync"
"testing"
"github.com/restic/restic/internal/backend"
@ -17,21 +18,22 @@ import (
"github.com/restic/chunker"
)
// testKDFParams are the parameters for the KDF to be used during testing.
var testKDFParams = crypto.Params{
N: 128,
R: 1,
P: 1,
}
type logger interface {
Logf(format string, args ...interface{})
}
var paramsOnce sync.Once
// TestUseLowSecurityKDFParameters configures low-security KDF parameters for testing.
func TestUseLowSecurityKDFParameters(t logger) {
t.Logf("using low-security KDF parameters for test")
Params = &testKDFParams
paramsOnce.Do(func() {
params = &crypto.Params{
N: 128,
R: 1,
P: 1,
}
})
}
// TestBackend returns a fully configured in-memory backend.
@ -39,7 +41,7 @@ func TestBackend(_ testing.TB) backend.Backend {
return mem.New()
}
const TestChunkerPol = chunker.Pol(0x3DA3358B4DC173)
const testChunkerPol = chunker.Pol(0x3DA3358B4DC173)
// TestRepositoryWithBackend returns a repository initialized with a test
// password. If be is nil, an in-memory backend is used. A constant polynomial
@ -58,7 +60,7 @@ func TestRepositoryWithBackend(t testing.TB, be backend.Backend, version uint, o
t.Fatalf("TestRepository(): new repo failed: %v", err)
}
cfg := restic.TestCreateConfig(t, TestChunkerPol, version)
cfg := restic.TestCreateConfig(t, testChunkerPol, version)
err = repo.init(context.TODO(), test.TestPassword, cfg)
if err != nil {
t.Fatalf("TestRepository(): initialize repo failed: %v", err)
@ -98,8 +100,15 @@ func TestRepositoryWithVersion(t testing.TB, version uint) restic.Repository {
return TestRepositoryWithBackend(t, nil, version, opts)
}
func TestFromFixture(t testing.TB, repoFixture string) (restic.Repository, func()) {
repodir, cleanup := test.Env(t, repoFixture)
repo := TestOpenLocal(t, repodir)
return repo, cleanup
}
// TestOpenLocal opens a local repository.
func TestOpenLocal(t testing.TB, dir string) (r restic.Repository) {
func TestOpenLocal(t testing.TB, dir string) restic.Repository {
var be backend.Backend
be, err := local.Open(context.TODO(), local.Config{Path: dir, Connections: 2})
if err != nil {
@ -108,6 +117,10 @@ func TestOpenLocal(t testing.TB, dir string) (r restic.Repository) {
be = retry.New(be, 3, nil, nil)
return TestOpenBackend(t, be)
}
func TestOpenBackend(t testing.TB, be backend.Backend) restic.Repository {
repo, err := New(be, Options{})
if err != nil {
t.Fatal(err)

View file

@ -2,6 +2,7 @@ package restic
import (
"context"
"sync"
"testing"
"github.com/restic/restic/internal/errors"
@ -67,12 +68,15 @@ func TestCreateConfig(t testing.TB, pol chunker.Pol, version uint) (cfg Config)
}
var checkPolynomial = true
var checkPolynomialOnce sync.Once
// TestDisableCheckPolynomial disables the check that the polynomial used for
// the chunker.
func TestDisableCheckPolynomial(t testing.TB) {
t.Logf("disabling check of the chunker polynomial")
checkPolynomial = false
checkPolynomialOnce.Do(func() {
checkPolynomial = false
})
}
// LoadConfig returns loads, checks and returns the config for a repository.