mirror of
https://github.com/restic/restic.git
synced 2024-12-21 23:33:03 +00:00
Merge pull request #3704 from MichaelEischer/compression-migrations
Support migration to repository format with compression
This commit is contained in:
commit
d2c5843c68
26 changed files with 439 additions and 38 deletions
|
@ -13,7 +13,12 @@ The new format version has not received much testing yet. Do not rely on it as
|
|||
your only backup copy! Please run `check` in regular intervals to detect any
|
||||
problems.
|
||||
|
||||
Upgrading in place is not yet supported. As a workaround, first create a new
|
||||
To upgrade in place run `migrate upgrade_repo_v2` followed by `prune`. See the
|
||||
documentation for more details. The migration checks the repository integrity
|
||||
and upgrades the repository format but will not change any data. Afterwards,
|
||||
prune will rewrite the metadata to make use of compression.
|
||||
|
||||
As an alternative you can use the `copy` command to migrate snapshots: first create a new
|
||||
repository using `init --repository-version 2 --copy-chunker-params --repo2 path/to/old/repo`.
|
||||
Then use the `copy` command to copy all snapshots to the new repository.
|
||||
|
||||
|
|
|
@ -219,15 +219,20 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error {
|
|||
Verbosef("load indexes\n")
|
||||
hints, errs := chkr.LoadIndex(gopts.ctx)
|
||||
|
||||
dupFound := false
|
||||
errorsFound := false
|
||||
suggestIndexRebuild := false
|
||||
for _, hint := range hints {
|
||||
Printf("%v\n", hint)
|
||||
if _, ok := hint.(checker.ErrDuplicatePacks); ok {
|
||||
dupFound = true
|
||||
switch hint.(type) {
|
||||
case *checker.ErrDuplicatePacks, *checker.ErrOldIndexFormat:
|
||||
Printf("%v\n", hint)
|
||||
suggestIndexRebuild = true
|
||||
default:
|
||||
Warnf("error: %v\n", hint)
|
||||
errorsFound = true
|
||||
}
|
||||
}
|
||||
|
||||
if dupFound {
|
||||
if suggestIndexRebuild {
|
||||
Printf("This is non-critical, you can run `restic rebuild-index' to correct this\n")
|
||||
}
|
||||
|
||||
|
@ -238,7 +243,6 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error {
|
|||
return errors.Fatal("LoadIndex returned errors")
|
||||
}
|
||||
|
||||
errorsFound := false
|
||||
orphanedPacks := 0
|
||||
errChan := make(chan error)
|
||||
|
||||
|
@ -252,11 +256,11 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error {
|
|||
continue
|
||||
}
|
||||
errorsFound = true
|
||||
Warnf("%v\n", err)
|
||||
Warnf("error: %v\n", err)
|
||||
}
|
||||
|
||||
if orphanedPacks > 0 {
|
||||
Verbosef("%d additional files were found in the repo, which likely contain duplicate data.\nYou can run `restic prune` to correct this.\n", orphanedPacks)
|
||||
Verbosef("%d additional files were found in the repo, which likely contain duplicate data.\nThis is non-critical, you can run `restic prune` to correct this.\n", orphanedPacks)
|
||||
}
|
||||
|
||||
Verbosef("check snapshots, trees and blobs\n")
|
||||
|
@ -273,7 +277,7 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error {
|
|||
|
||||
for err := range errChan {
|
||||
errorsFound = true
|
||||
if e, ok := err.(checker.TreeError); ok {
|
||||
if e, ok := err.(*checker.TreeError); ok {
|
||||
Warnf("error for tree %v:\n", e.ID.Str())
|
||||
for _, treeErr := range e.Errors {
|
||||
Warnf(" %v\n", treeErr)
|
||||
|
|
|
@ -8,11 +8,12 @@ import (
|
|||
)
|
||||
|
||||
var cmdMigrate = &cobra.Command{
|
||||
Use: "migrate [flags] [name]",
|
||||
Use: "migrate [flags] [migration name] [...]",
|
||||
Short: "Apply migrations",
|
||||
Long: `
|
||||
The "migrate" command applies migrations to a repository. When no migration
|
||||
name is explicitly given, a list of migrations that can be applied is printed.
|
||||
The "migrate" command checks which migrations can be applied for a repository
|
||||
and prints a list with available migration names. If one or more migration
|
||||
names are specified, these migrations are applied.
|
||||
|
||||
EXIT STATUS
|
||||
===========
|
||||
|
@ -41,6 +42,8 @@ func init() {
|
|||
func checkMigrations(opts MigrateOptions, gopts GlobalOptions, repo restic.Repository) error {
|
||||
ctx := gopts.ctx
|
||||
Printf("available migrations:\n")
|
||||
found := false
|
||||
|
||||
for _, m := range migrations.All {
|
||||
ok, err := m.Check(ctx, repo)
|
||||
if err != nil {
|
||||
|
@ -48,10 +51,15 @@ func checkMigrations(opts MigrateOptions, gopts GlobalOptions, repo restic.Repos
|
|||
}
|
||||
|
||||
if ok {
|
||||
Printf(" %v: %v\n", m.Name(), m.Desc())
|
||||
Printf(" %v\t%v\n", m.Name(), m.Desc())
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
Printf("no migrations found")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -76,6 +84,20 @@ func applyMigrations(opts MigrateOptions, gopts GlobalOptions, repo restic.Repos
|
|||
Warnf("check for migration %v failed, continuing anyway\n", m.Name())
|
||||
}
|
||||
|
||||
repoCheckOpts := m.RepoCheckOptions()
|
||||
if repoCheckOpts != nil {
|
||||
Printf("checking repository integrity...\n")
|
||||
|
||||
checkOptions := CheckOptions{}
|
||||
checkGopts := gopts
|
||||
// the repository is already locked
|
||||
checkGopts.NoLock = true
|
||||
err = runCheck(checkOptions, checkGopts, []string{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
Printf("applying migration %v...\n", m.Name())
|
||||
if err = m.Apply(ctx, repo); err != nil {
|
||||
Warnf("migration %v failed: %v\n", m.Name(), err)
|
||||
|
|
|
@ -51,6 +51,7 @@ type PruneOptions struct {
|
|||
MaxRepackBytes uint64
|
||||
|
||||
RepackCachableOnly bool
|
||||
RepackUncompressed bool
|
||||
}
|
||||
|
||||
var pruneOptions PruneOptions
|
||||
|
@ -68,6 +69,7 @@ func addPruneOptions(c *cobra.Command) {
|
|||
f.StringVar(&pruneOptions.MaxUnused, "max-unused", "5%", "tolerate given `limit` of unused data (absolute value in bytes with suffixes k/K, m/M, g/G, t/T, a value in % or the word 'unlimited')")
|
||||
f.StringVar(&pruneOptions.MaxRepackSize, "max-repack-size", "", "maximum `size` to repack (allowed suffixes: k/K, m/M, g/G, t/T)")
|
||||
f.BoolVar(&pruneOptions.RepackCachableOnly, "repack-cacheable-only", false, "only repack packs which are cacheable")
|
||||
f.BoolVar(&pruneOptions.RepackUncompressed, "repack-uncompressed", false, "repack all uncompressed data")
|
||||
}
|
||||
|
||||
func verifyPruneOptions(opts *PruneOptions) error {
|
||||
|
@ -135,6 +137,10 @@ func runPrune(opts PruneOptions, gopts GlobalOptions) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if opts.RepackUncompressed && gopts.Compression == repository.CompressionOff {
|
||||
return errors.Fatal("disabled compression and `--repack-uncompressed` are mutually exclusive")
|
||||
}
|
||||
|
||||
repo, err := OpenRepository(gopts)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -144,6 +150,10 @@ func runPrune(opts PruneOptions, gopts GlobalOptions) error {
|
|||
return errors.Fatal("prune requires a backend connection limit of at least two")
|
||||
}
|
||||
|
||||
if repo.Config().Version < 2 && opts.RepackUncompressed {
|
||||
return errors.Fatal("compression requires at least repository format version 2")
|
||||
}
|
||||
|
||||
if opts.UnsafeNoSpaceRecovery != "" {
|
||||
repoID := repo.Config().ID
|
||||
if opts.UnsafeNoSpaceRecovery != repoID {
|
||||
|
@ -191,6 +201,7 @@ type packInfo struct {
|
|||
usedSize uint64
|
||||
unusedSize uint64
|
||||
tpe restic.BlobType
|
||||
uncompressed bool
|
||||
}
|
||||
|
||||
type packInfoWithID struct {
|
||||
|
@ -299,6 +310,9 @@ func prune(opts PruneOptions, gopts GlobalOptions, repo restic.Repository, usedB
|
|||
ip.unusedSize += size
|
||||
ip.unusedBlobs++
|
||||
}
|
||||
if !blob.IsCompressed() {
|
||||
ip.uncompressed = true
|
||||
}
|
||||
// update indexPack
|
||||
indexPack[blob.PackID] = ip
|
||||
}
|
||||
|
@ -318,6 +332,8 @@ func prune(opts PruneOptions, gopts GlobalOptions, repo restic.Repository, usedB
|
|||
}
|
||||
}
|
||||
|
||||
repoVersion := repo.Config().Version
|
||||
|
||||
// loop over all packs and decide what to do
|
||||
bar := newProgressMax(!gopts.Quiet, uint64(len(indexPack)), "packs processed")
|
||||
err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error {
|
||||
|
@ -350,6 +366,15 @@ func prune(opts PruneOptions, gopts GlobalOptions, repo restic.Repository, usedB
|
|||
stats.packs.partlyUsed++
|
||||
}
|
||||
|
||||
mustCompress := false
|
||||
if repoVersion >= 2 {
|
||||
// repo v2: always repack tree blobs if uncompressed
|
||||
// compress data blobs if requested
|
||||
mustCompress = (p.tpe == restic.TreeBlob || opts.RepackUncompressed) && p.uncompressed
|
||||
}
|
||||
// use a flag that pack must be compressed
|
||||
p.uncompressed = mustCompress
|
||||
|
||||
// decide what to do
|
||||
switch {
|
||||
case p.usedBlobs == 0 && p.duplicateBlobs == 0:
|
||||
|
@ -362,7 +387,7 @@ func prune(opts PruneOptions, gopts GlobalOptions, repo restic.Repository, usedB
|
|||
// if this is a data pack and --repack-cacheable-only is set => keep pack!
|
||||
keep(p)
|
||||
|
||||
case p.unusedBlobs == 0 && p.duplicateBlobs == 0 && p.tpe != restic.InvalidBlob:
|
||||
case p.unusedBlobs == 0 && p.duplicateBlobs == 0 && p.tpe != restic.InvalidBlob && !mustCompress:
|
||||
// All blobs in pack are used and not duplicates/mixed => keep pack!
|
||||
keep(p)
|
||||
|
||||
|
@ -447,8 +472,8 @@ func prune(opts PruneOptions, gopts GlobalOptions, repo restic.Repository, usedB
|
|||
case reachedRepackSize:
|
||||
keep(p.packInfo)
|
||||
|
||||
case p.duplicateBlobs > 0, p.tpe != restic.DataBlob:
|
||||
// repacking duplicates/non-data is only limited by repackSize
|
||||
case p.duplicateBlobs > 0, p.tpe != restic.DataBlob, p.uncompressed:
|
||||
// repacking duplicates/non-data/uncompressed-trees is only limited by repackSize
|
||||
repack(p.ID, p.packInfo)
|
||||
|
||||
case reachedUnusedSizeAfter:
|
||||
|
|
|
@ -40,7 +40,17 @@ options exist:
|
|||
be used to explicitely set the version for the new repository. By default,
|
||||
the current stable version is used. Have a look at the `design documentation
|
||||
<https://github.com/restic/restic/blob/master/doc/design.rst>`__ for
|
||||
details.
|
||||
details. The alias ``latest`` will always point to the latest repository version.
|
||||
The below table shows which restic version is required to use a certain
|
||||
repository version and shows new features introduced by the repository format.
|
||||
|
||||
+--------------------+------------------------+---------------------+
|
||||
| Repository version | Minimum restic version | Major new features |
|
||||
+====================+========================+=====================+
|
||||
| ``1`` | any version | |
|
||||
+--------------------+------------------------+---------------------+
|
||||
| ``2`` | >= 0.14.0 | Compression support |
|
||||
+--------------------+------------------------+---------------------+
|
||||
|
||||
|
||||
Local
|
||||
|
|
|
@ -298,3 +298,26 @@ a file size value the following command may be used:
|
|||
|
||||
$ restic -r /srv/restic-repo check --read-data-subset=50M
|
||||
$ restic -r /srv/restic-repo check --read-data-subset=10G
|
||||
|
||||
|
||||
Upgrading the repository format version
|
||||
=======================================
|
||||
|
||||
Repositories created using earlier restic versions use an older repository
|
||||
format version and have to be upgraded to allow using all new features.
|
||||
Upgrading must be done explicitly as a newer repository version increases the
|
||||
minimum restic version required to access the repository. For example the
|
||||
repository format version 2 is only readable using restic 0.14.0 or newer.
|
||||
|
||||
Upgrading to repo version 2 is a two step process: first run
|
||||
``migrate upgrade_repo_v2`` which will check the repository integrity and
|
||||
then upgrade the repository version. Repository problems must be corrected
|
||||
before the migration will be possible. After the migration is complete, run
|
||||
``prune`` to compress the repository metadata. To limit the amount of data
|
||||
rewritten in at once, you can use the ``prune --max-repack-size size``
|
||||
parameter, see :ref:`customize-pruning` for more details.
|
||||
|
||||
File contents stored in the repository will not be rewritten, data from new
|
||||
backups will be compressed. Over time more and more of the repository will
|
||||
be compressed. To speed up this process and compress all not yet compressed
|
||||
data, you can run ``prune --repack-uncompressed``.
|
||||
|
|
|
@ -388,6 +388,8 @@ the specified duration: if ``forget --keep-within 7d`` is run 8 days after the
|
|||
last good snapshot, then the attacker can still use that opportunity to remove
|
||||
all legitimate snapshots.
|
||||
|
||||
.. _customize-pruning:
|
||||
|
||||
Customize pruning
|
||||
*****************
|
||||
|
||||
|
|
|
@ -125,6 +125,11 @@ func (be *Backend) Hasher() hash.Hash {
|
|||
return md5.New()
|
||||
}
|
||||
|
||||
// HasAtomicReplace returns whether Save() can atomically replace files
|
||||
func (be *Backend) HasAtomicReplace() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Path returns the path in the bucket that is used for this backend.
|
||||
func (be *Backend) Path() string {
|
||||
return be.prefix
|
||||
|
|
|
@ -147,6 +147,11 @@ func (be *b2Backend) Hasher() hash.Hash {
|
|||
return nil
|
||||
}
|
||||
|
||||
// HasAtomicReplace returns whether Save() can atomically replace files
|
||||
func (be *b2Backend) HasAtomicReplace() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// IsNotExist returns true if the error is caused by a non-existing file.
|
||||
func (be *b2Backend) IsNotExist(err error) bool {
|
||||
return b2.IsNotExist(errors.Cause(err))
|
||||
|
|
|
@ -67,6 +67,10 @@ func (be *Backend) Hasher() hash.Hash {
|
|||
return be.b.Hasher()
|
||||
}
|
||||
|
||||
func (be *Backend) HasAtomicReplace() bool {
|
||||
return be.b.HasAtomicReplace()
|
||||
}
|
||||
|
||||
func (be *Backend) IsNotExist(err error) bool {
|
||||
return be.b.IsNotExist(err)
|
||||
}
|
||||
|
|
|
@ -201,6 +201,11 @@ func (be *Backend) Hasher() hash.Hash {
|
|||
return md5.New()
|
||||
}
|
||||
|
||||
// HasAtomicReplace returns whether Save() can atomically replace files
|
||||
func (be *Backend) HasAtomicReplace() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Path returns the path in the bucket that is used for this backend.
|
||||
func (be *Backend) Path() string {
|
||||
return be.prefix
|
||||
|
|
|
@ -102,6 +102,11 @@ func (b *Local) Hasher() hash.Hash {
|
|||
return nil
|
||||
}
|
||||
|
||||
// HasAtomicReplace returns whether Save() can atomically replace files
|
||||
func (b *Local) HasAtomicReplace() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// IsNotExist returns true if the error is caused by a non existing file.
|
||||
func (b *Local) IsNotExist(err error) bool {
|
||||
return errors.Is(err, os.ErrNotExist)
|
||||
|
|
|
@ -268,6 +268,11 @@ func (be *MemoryBackend) Hasher() hash.Hash {
|
|||
return md5.New()
|
||||
}
|
||||
|
||||
// HasAtomicReplace returns whether Save() can atomically replace files
|
||||
func (be *MemoryBackend) HasAtomicReplace() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Delete removes all data in the backend.
|
||||
func (be *MemoryBackend) Delete(ctx context.Context) error {
|
||||
be.m.Lock()
|
||||
|
|
|
@ -121,6 +121,12 @@ func (b *Backend) Hasher() hash.Hash {
|
|||
return nil
|
||||
}
|
||||
|
||||
// HasAtomicReplace returns whether Save() can atomically replace files
|
||||
func (b *Backend) HasAtomicReplace() bool {
|
||||
// rest-server prevents overwriting
|
||||
return false
|
||||
}
|
||||
|
||||
// Save stores data in the backend at the handle.
|
||||
func (b *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
|
||||
if err := h.Valid(); err != nil {
|
||||
|
|
|
@ -269,6 +269,11 @@ func (be *Backend) Hasher() hash.Hash {
|
|||
return nil
|
||||
}
|
||||
|
||||
// HasAtomicReplace returns whether Save() can atomically replace files
|
||||
func (be *Backend) HasAtomicReplace() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Path returns the path in the bucket that is used for this backend.
|
||||
func (be *Backend) Path() string {
|
||||
return be.cfg.Prefix
|
||||
|
|
|
@ -267,6 +267,12 @@ func (r *SFTP) Hasher() hash.Hash {
|
|||
return nil
|
||||
}
|
||||
|
||||
// HasAtomicReplace returns whether Save() can atomically replace files
|
||||
func (r *SFTP) HasAtomicReplace() bool {
|
||||
// we use sftp's 'Rename()' in 'Save()' which does not allow overwriting
|
||||
return false
|
||||
}
|
||||
|
||||
// Join joins the given paths and cleans them afterwards. This always uses
|
||||
// forward slashes, which is required by sftp.
|
||||
func Join(parts ...string) string {
|
||||
|
|
|
@ -129,6 +129,11 @@ func (be *beSwift) Hasher() hash.Hash {
|
|||
return md5.New()
|
||||
}
|
||||
|
||||
// HasAtomicReplace returns whether Save() can atomically replace files
|
||||
func (be *beSwift) HasAtomicReplace() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Load runs fn with a reader that yields the contents of the file at h at the
|
||||
// given offset.
|
||||
func (be *beSwift) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
|
||||
|
|
|
@ -63,7 +63,7 @@ type ErrDuplicatePacks struct {
|
|||
Indexes restic.IDSet
|
||||
}
|
||||
|
||||
func (e ErrDuplicatePacks) Error() string {
|
||||
func (e *ErrDuplicatePacks) Error() string {
|
||||
return fmt.Sprintf("pack %v contained in several indexes: %v", e.PackID.Str(), e.Indexes)
|
||||
}
|
||||
|
||||
|
@ -73,7 +73,7 @@ type ErrOldIndexFormat struct {
|
|||
restic.ID
|
||||
}
|
||||
|
||||
func (err ErrOldIndexFormat) Error() string {
|
||||
func (err *ErrOldIndexFormat) Error() string {
|
||||
return fmt.Sprintf("index %v has old format", err.ID.Str())
|
||||
}
|
||||
|
||||
|
@ -93,7 +93,7 @@ func (c *Checker) LoadIndex(ctx context.Context) (hints []error, errs []error) {
|
|||
|
||||
if oldFormat {
|
||||
debug.Log("index %v has old format", id.Str())
|
||||
hints = append(hints, ErrOldIndexFormat{id})
|
||||
hints = append(hints, &ErrOldIndexFormat{id})
|
||||
}
|
||||
|
||||
err = errors.Wrapf(err, "error loading index %v", id.Str())
|
||||
|
@ -137,7 +137,7 @@ func (c *Checker) LoadIndex(ctx context.Context) (hints []error, errs []error) {
|
|||
for packID := range c.packs {
|
||||
debug.Log(" check pack %v: contained in %d indexes", packID, len(packToIndex[packID]))
|
||||
if len(packToIndex[packID]) > 1 {
|
||||
hints = append(hints, ErrDuplicatePacks{
|
||||
hints = append(hints, &ErrDuplicatePacks{
|
||||
PackID: packID,
|
||||
Indexes: packToIndex[packID],
|
||||
})
|
||||
|
@ -257,7 +257,7 @@ type TreeError struct {
|
|||
Errors []error
|
||||
}
|
||||
|
||||
func (e TreeError) Error() string {
|
||||
func (e *TreeError) Error() string {
|
||||
return fmt.Sprintf("tree %v: %v", e.ID.Str(), e.Errors)
|
||||
}
|
||||
|
||||
|
@ -276,7 +276,7 @@ func (c *Checker) checkTreeWorker(ctx context.Context, trees <-chan restic.TreeI
|
|||
if len(errs) == 0 {
|
||||
continue
|
||||
}
|
||||
treeError := TreeError{ID: job.ID, Errors: errs}
|
||||
treeError := &TreeError{ID: job.ID, Errors: errs}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
|
|
|
@ -289,7 +289,7 @@ func TestDuplicatePacksInIndex(t *testing.T) {
|
|||
|
||||
found := false
|
||||
for _, hint := range hints {
|
||||
if _, ok := hint.(checker.ErrDuplicatePacks); ok {
|
||||
if _, ok := hint.(*checker.ErrDuplicatePacks); ok {
|
||||
found = true
|
||||
} else {
|
||||
t.Errorf("got unexpected hint: %v", hint)
|
||||
|
|
|
@ -6,11 +6,16 @@ import (
|
|||
"github.com/restic/restic/internal/restic"
|
||||
)
|
||||
|
||||
type RepositoryCheckOptions struct {
|
||||
}
|
||||
|
||||
// Migration implements a data migration.
|
||||
type Migration interface {
|
||||
// Check returns true if the migration can be applied to a repo.
|
||||
Check(context.Context, restic.Repository) (bool, error)
|
||||
|
||||
RepoCheckOptions() *RepositoryCheckOptions
|
||||
|
||||
// Apply runs the migration.
|
||||
Apply(context.Context, restic.Repository) error
|
||||
|
||||
|
|
|
@ -37,6 +37,10 @@ func (m *S3Layout) Check(ctx context.Context, repo restic.Repository) (bool, err
|
|||
return true, nil
|
||||
}
|
||||
|
||||
func (m *S3Layout) RepoCheckOptions() *RepositoryCheckOptions {
|
||||
return nil
|
||||
}
|
||||
|
||||
func retry(max int, fail func(err error), f func() error) error {
|
||||
var err error
|
||||
for i := 0; i < max; i++ {
|
||||
|
|
126
internal/migrations/upgrade_repo_v2.go
Normal file
126
internal/migrations/upgrade_repo_v2.go
Normal file
|
@ -0,0 +1,126 @@
|
|||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/restic/restic/internal/restic"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register(&UpgradeRepoV2{})
|
||||
}
|
||||
|
||||
type UpgradeRepoV2Error struct {
|
||||
UploadNewConfigError error
|
||||
ReuploadOldConfigError error
|
||||
|
||||
BackupFilePath string
|
||||
}
|
||||
|
||||
func (err *UpgradeRepoV2Error) Error() string {
|
||||
if err.ReuploadOldConfigError != nil {
|
||||
return fmt.Sprintf("error uploading config (%v), re-uploading old config filed failed as well (%v), but there is a backup of the config file in %v", err.UploadNewConfigError, err.ReuploadOldConfigError, err.BackupFilePath)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("error uploading config (%v), re-uploaded old config was successful, there is a backup of the config file in %v", err.UploadNewConfigError, err.BackupFilePath)
|
||||
}
|
||||
|
||||
func (err *UpgradeRepoV2Error) Unwrap() error {
|
||||
// consider the original upload error as the primary cause
|
||||
return err.UploadNewConfigError
|
||||
}
|
||||
|
||||
type UpgradeRepoV2 struct{}
|
||||
|
||||
func (*UpgradeRepoV2) Name() string {
|
||||
return "upgrade_repo_v2"
|
||||
}
|
||||
|
||||
func (*UpgradeRepoV2) Desc() string {
|
||||
return "upgrade a repository to version 2"
|
||||
}
|
||||
|
||||
func (*UpgradeRepoV2) Check(ctx context.Context, repo restic.Repository) (bool, error) {
|
||||
isV1 := repo.Config().Version == 1
|
||||
return isV1, nil
|
||||
}
|
||||
|
||||
func (*UpgradeRepoV2) RepoCheckOptions() *RepositoryCheckOptions {
|
||||
return &RepositoryCheckOptions{}
|
||||
}
|
||||
func (*UpgradeRepoV2) upgrade(ctx context.Context, repo restic.Repository) error {
|
||||
h := restic.Handle{Type: restic.ConfigFile}
|
||||
|
||||
if !repo.Backend().HasAtomicReplace() {
|
||||
// remove the original file for backends which do not support atomic overwriting
|
||||
err := repo.Backend().Remove(ctx, h)
|
||||
if err != nil {
|
||||
return fmt.Errorf("remove config failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// upgrade config
|
||||
cfg := repo.Config()
|
||||
cfg.Version = 2
|
||||
|
||||
_, err := repo.SaveJSONUnpacked(ctx, restic.ConfigFile, cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("save new config file failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *UpgradeRepoV2) Apply(ctx context.Context, repo restic.Repository) error {
|
||||
tempdir, err := ioutil.TempDir("", "restic-migrate-upgrade-repo-v2-")
|
||||
if err != nil {
|
||||
return fmt.Errorf("create temp dir failed: %w", err)
|
||||
}
|
||||
|
||||
h := restic.Handle{Type: restic.ConfigFile}
|
||||
|
||||
// read raw config file and save it to a temp dir, just in case
|
||||
var rawConfigFile []byte
|
||||
err = repo.Backend().Load(ctx, h, 0, 0, func(rd io.Reader) (err error) {
|
||||
rawConfigFile, err = ioutil.ReadAll(rd)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("load config file failed: %w", err)
|
||||
}
|
||||
|
||||
backupFileName := filepath.Join(tempdir, "config")
|
||||
err = ioutil.WriteFile(backupFileName, rawConfigFile, 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("write config file backup to %v failed: %w", tempdir, err)
|
||||
}
|
||||
|
||||
// run the upgrade
|
||||
err = m.upgrade(ctx, repo)
|
||||
if err != nil {
|
||||
|
||||
// build an error we can return to the caller
|
||||
repoError := &UpgradeRepoV2Error{
|
||||
UploadNewConfigError: err,
|
||||
BackupFilePath: backupFileName,
|
||||
}
|
||||
|
||||
// try contingency methods, reupload the original file
|
||||
_ = repo.Backend().Remove(ctx, h)
|
||||
err = repo.Backend().Save(ctx, h, restic.NewByteReader(rawConfigFile, nil))
|
||||
if err != nil {
|
||||
repoError.ReuploadOldConfigError = err
|
||||
}
|
||||
|
||||
return repoError
|
||||
}
|
||||
|
||||
_ = os.Remove(backupFileName)
|
||||
_ = os.Remove(tempdir)
|
||||
return nil
|
||||
}
|
112
internal/migrations/upgrade_repo_v2_test.go
Normal file
112
internal/migrations/upgrade_repo_v2_test.go
Normal file
|
@ -0,0 +1,112 @@
|
|||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/repository"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
"github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
func TestUpgradeRepoV2(t *testing.T) {
|
||||
repo, cleanup := repository.TestRepositoryWithVersion(t, 1)
|
||||
defer cleanup()
|
||||
|
||||
if repo.Config().Version != 1 {
|
||||
t.Fatal("test repo has wrong version")
|
||||
}
|
||||
|
||||
m := &UpgradeRepoV2{}
|
||||
|
||||
ok, err := m.Check(context.Background(), repo)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !ok {
|
||||
t.Fatal("migration check returned false")
|
||||
}
|
||||
|
||||
err = m.Apply(context.Background(), repo)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
type failBackend struct {
|
||||
restic.Backend
|
||||
|
||||
mu sync.Mutex
|
||||
ConfigFileSavesUntilError uint
|
||||
}
|
||||
|
||||
func (be *failBackend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
|
||||
if h.Type != restic.ConfigFile {
|
||||
return be.Backend.Save(ctx, h, rd)
|
||||
}
|
||||
|
||||
be.mu.Lock()
|
||||
if be.ConfigFileSavesUntilError == 0 {
|
||||
be.mu.Unlock()
|
||||
return errors.New("failure induced for testing")
|
||||
}
|
||||
|
||||
be.ConfigFileSavesUntilError--
|
||||
be.mu.Unlock()
|
||||
|
||||
return be.Backend.Save(ctx, h, rd)
|
||||
}
|
||||
|
||||
func TestUpgradeRepoV2Failure(t *testing.T) {
|
||||
be, cleanup := repository.TestBackend(t)
|
||||
defer cleanup()
|
||||
|
||||
// wrap backend so that it fails upgrading the config after the initial write
|
||||
be = &failBackend{
|
||||
ConfigFileSavesUntilError: 1,
|
||||
Backend: be,
|
||||
}
|
||||
|
||||
repo, cleanup := repository.TestRepositoryWithBackend(t, be, 1)
|
||||
defer cleanup()
|
||||
|
||||
if repo.Config().Version != 1 {
|
||||
t.Fatal("test repo has wrong version")
|
||||
}
|
||||
|
||||
m := &UpgradeRepoV2{}
|
||||
|
||||
ok, err := m.Check(context.Background(), repo)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !ok {
|
||||
t.Fatal("migration check returned false")
|
||||
}
|
||||
|
||||
err = m.Apply(context.Background(), repo)
|
||||
if err == nil {
|
||||
t.Fatal("expected error returned from Apply(), got nil")
|
||||
}
|
||||
|
||||
upgradeErr := err.(*UpgradeRepoV2Error)
|
||||
if upgradeErr.UploadNewConfigError == nil {
|
||||
t.Fatal("expected upload error, got nil")
|
||||
}
|
||||
|
||||
if upgradeErr.ReuploadOldConfigError == nil {
|
||||
t.Fatal("expected reupload error, got nil")
|
||||
}
|
||||
|
||||
if upgradeErr.BackupFilePath == "" {
|
||||
t.Fatal("no backup file path found")
|
||||
}
|
||||
test.OK(t, os.Remove(upgradeErr.BackupFilePath))
|
||||
test.OK(t, os.Remove(filepath.Dir(upgradeErr.BackupFilePath)))
|
||||
}
|
|
@ -11,18 +11,19 @@ import (
|
|||
|
||||
// Backend implements a mock backend.
|
||||
type Backend struct {
|
||||
CloseFn func() error
|
||||
IsNotExistFn func(err error) bool
|
||||
SaveFn func(ctx context.Context, h restic.Handle, rd restic.RewindReader) error
|
||||
OpenReaderFn func(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error)
|
||||
StatFn func(ctx context.Context, h restic.Handle) (restic.FileInfo, error)
|
||||
ListFn func(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error
|
||||
RemoveFn func(ctx context.Context, h restic.Handle) error
|
||||
TestFn func(ctx context.Context, h restic.Handle) (bool, error)
|
||||
DeleteFn func(ctx context.Context) error
|
||||
ConnectionsFn func() uint
|
||||
LocationFn func() string
|
||||
HasherFn func() hash.Hash
|
||||
CloseFn func() error
|
||||
IsNotExistFn func(err error) bool
|
||||
SaveFn func(ctx context.Context, h restic.Handle, rd restic.RewindReader) error
|
||||
OpenReaderFn func(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error)
|
||||
StatFn func(ctx context.Context, h restic.Handle) (restic.FileInfo, error)
|
||||
ListFn func(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error
|
||||
RemoveFn func(ctx context.Context, h restic.Handle) error
|
||||
TestFn func(ctx context.Context, h restic.Handle) (bool, error)
|
||||
DeleteFn func(ctx context.Context) error
|
||||
ConnectionsFn func() uint
|
||||
LocationFn func() string
|
||||
HasherFn func() hash.Hash
|
||||
HasAtomicReplaceFn func() bool
|
||||
}
|
||||
|
||||
// NewBackend returns new mock Backend instance
|
||||
|
@ -66,6 +67,14 @@ func (m *Backend) Hasher() hash.Hash {
|
|||
return m.HasherFn()
|
||||
}
|
||||
|
||||
// HasAtomicReplace returns whether Save() can atomically replace files
|
||||
func (m *Backend) HasAtomicReplace() bool {
|
||||
if m.HasAtomicReplaceFn == nil {
|
||||
return false
|
||||
}
|
||||
return m.HasAtomicReplaceFn()
|
||||
}
|
||||
|
||||
// IsNotExist returns true if the error is caused by a missing file.
|
||||
func (m *Backend) IsNotExist(err error) bool {
|
||||
if m.IsNotExistFn == nil {
|
||||
|
|
|
@ -24,6 +24,9 @@ type Backend interface {
|
|||
// Hasher may return a hash function for calculating a content hash for the backend
|
||||
Hasher() hash.Hash
|
||||
|
||||
// HasAtomicReplace returns whether Save() can atomically replace files
|
||||
HasAtomicReplace() bool
|
||||
|
||||
// Test a boolean value whether a File with the name and type exists.
|
||||
Test(ctx context.Context, h Handle) (bool, error)
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ const MaxRepoVersion = 2
|
|||
|
||||
// StableRepoVersion is the version that is written to the config when a repository
|
||||
// is newly created with Init().
|
||||
const StableRepoVersion = 1
|
||||
const StableRepoVersion = 2
|
||||
|
||||
// JSONUnpackedLoader loads unpacked JSON.
|
||||
type JSONUnpackedLoader interface {
|
||||
|
|
Loading…
Reference in a new issue