2015-05-09 21:52:03 +00:00
|
|
|
package repository
|
2014-12-21 16:02:49 +00:00
|
|
|
|
|
|
|
import (
|
2021-08-20 21:21:05 +00:00
|
|
|
"bufio"
|
2015-04-26 15:44:38 +00:00
|
|
|
"bytes"
|
2017-06-04 09:16:55 +00:00
|
|
|
"context"
|
2015-01-10 22:40:10 +00:00
|
|
|
"fmt"
|
2018-02-12 03:41:59 +00:00
|
|
|
"io"
|
2015-07-26 19:58:03 +00:00
|
|
|
"os"
|
2023-06-02 19:56:14 +00:00
|
|
|
"runtime"
|
2021-08-20 21:21:05 +00:00
|
|
|
"sort"
|
2020-10-10 19:31:40 +00:00
|
|
|
"sync"
|
2014-12-21 16:02:49 +00:00
|
|
|
|
2021-09-05 10:20:07 +00:00
|
|
|
"github.com/cenkalti/backoff/v4"
|
2022-02-12 23:12:40 +00:00
|
|
|
"github.com/klauspost/compress/zstd"
|
2020-09-19 10:41:52 +00:00
|
|
|
"github.com/restic/chunker"
|
2022-07-17 11:47:54 +00:00
|
|
|
"github.com/restic/restic/internal/backend"
|
backup: add --dry-run/-n flag to show what would happen.
This can be used to check how large a backup is or validate exclusions.
It does not actually write any data to the underlying backend. This is
implemented as a simple overlay backend that accepts writes without
forwarding them, passes through reads, and generally does the minimal
necessary to pretend that progress is actually happening.
Fixes #1542
Example usage:
$ restic -vv --dry-run . | grep add
new /changelog/unreleased/issue-1542, saved in 0.000s (350 B added)
modified /cmd/restic/cmd_backup.go, saved in 0.000s (16.543 KiB added)
modified /cmd/restic/global.go, saved in 0.000s (0 B added)
new /internal/backend/dry/dry_backend_test.go, saved in 0.000s (3.866 KiB added)
new /internal/backend/dry/dry_backend.go, saved in 0.000s (3.744 KiB added)
modified /internal/backend/test/tests.go, saved in 0.000s (0 B added)
modified /internal/repository/repository.go, saved in 0.000s (20.707 KiB added)
modified /internal/ui/backup.go, saved in 0.000s (9.110 KiB added)
modified /internal/ui/jsonstatus/status.go, saved in 0.001s (11.055 KiB added)
modified /restic, saved in 0.131s (25.542 MiB added)
Would add to the repo: 25.892 MiB
2019-06-13 03:39:13 +00:00
|
|
|
"github.com/restic/restic/internal/backend/dryrun"
|
2017-09-24 20:54:04 +00:00
|
|
|
"github.com/restic/restic/internal/cache"
|
2018-10-28 20:12:15 +00:00
|
|
|
"github.com/restic/restic/internal/crypto"
|
|
|
|
"github.com/restic/restic/internal/debug"
|
2017-07-23 12:21:03 +00:00
|
|
|
"github.com/restic/restic/internal/errors"
|
2022-06-12 12:43:43 +00:00
|
|
|
"github.com/restic/restic/internal/index"
|
2017-07-23 12:21:03 +00:00
|
|
|
"github.com/restic/restic/internal/pack"
|
2018-10-28 20:12:15 +00:00
|
|
|
"github.com/restic/restic/internal/restic"
|
2020-10-10 19:31:40 +00:00
|
|
|
"github.com/restic/restic/internal/ui/progress"
|
2020-03-19 10:27:19 +00:00
|
|
|
|
2019-03-24 20:27:28 +00:00
|
|
|
"golang.org/x/sync/errgroup"
|
2014-12-21 16:02:49 +00:00
|
|
|
)
|
|
|
|
|
2021-08-20 14:15:40 +00:00
|
|
|
const MaxStreamBufferSize = 4 * 1024 * 1024
|
2021-08-20 21:21:05 +00:00
|
|
|
|
2022-07-02 21:30:26 +00:00
|
|
|
const MinPackSize = 4 * 1024 * 1024
|
|
|
|
const DefaultPackSize = 16 * 1024 * 1024
|
|
|
|
const MaxPackSize = 128 * 1024 * 1024
|
|
|
|
|
2015-05-09 21:59:58 +00:00
|
|
|
// Repository is used to access a repository in a backend.
|
|
|
|
type Repository struct {
|
2023-10-01 09:40:12 +00:00
|
|
|
be backend.Backend
|
2022-10-15 14:01:38 +00:00
|
|
|
cfg restic.Config
|
|
|
|
key *crypto.Key
|
|
|
|
keyID restic.ID
|
|
|
|
idx *index.MasterIndex
|
|
|
|
Cache *cache.Cache
|
2020-03-02 17:27:52 +00:00
|
|
|
|
2022-04-13 18:34:05 +00:00
|
|
|
opts Options
|
|
|
|
|
2020-06-12 07:24:38 +00:00
|
|
|
noAutoIndexUpdate bool
|
2015-04-26 15:44:38 +00:00
|
|
|
|
2021-08-07 20:52:05 +00:00
|
|
|
packerWg *errgroup.Group
|
|
|
|
uploader *packerUploader
|
|
|
|
treePM *packerManager
|
|
|
|
dataPM *packerManager
|
2022-02-12 23:12:40 +00:00
|
|
|
|
2022-02-19 20:15:31 +00:00
|
|
|
allocEnc sync.Once
|
|
|
|
allocDec sync.Once
|
|
|
|
enc *zstd.Encoder
|
|
|
|
dec *zstd.Decoder
|
2014-12-21 16:02:49 +00:00
|
|
|
}
|
|
|
|
|
2022-04-13 18:34:05 +00:00
|
|
|
type Options struct {
|
|
|
|
Compression CompressionMode
|
2022-07-02 21:30:26 +00:00
|
|
|
PackSize uint
|
2022-04-13 18:34:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// CompressionMode configures if data should be compressed.
|
|
|
|
type CompressionMode uint
|
|
|
|
|
|
|
|
// Constants for the different compression levels.
|
|
|
|
const (
|
2022-10-29 20:03:39 +00:00
|
|
|
CompressionAuto CompressionMode = 0
|
|
|
|
CompressionOff CompressionMode = 1
|
|
|
|
CompressionMax CompressionMode = 2
|
|
|
|
CompressionInvalid CompressionMode = 3
|
2022-04-13 18:34:05 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// Set implements the method needed for pflag command flag parsing.
|
|
|
|
func (c *CompressionMode) Set(s string) error {
|
|
|
|
switch s {
|
|
|
|
case "auto":
|
|
|
|
*c = CompressionAuto
|
|
|
|
case "off":
|
|
|
|
*c = CompressionOff
|
|
|
|
case "max":
|
|
|
|
*c = CompressionMax
|
|
|
|
default:
|
2022-10-29 20:03:39 +00:00
|
|
|
*c = CompressionInvalid
|
2022-04-13 18:34:05 +00:00
|
|
|
return fmt.Errorf("invalid compression mode %q, must be one of (auto|off|max)", s)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *CompressionMode) String() string {
|
|
|
|
switch *c {
|
|
|
|
case CompressionAuto:
|
|
|
|
return "auto"
|
|
|
|
case CompressionOff:
|
|
|
|
return "off"
|
|
|
|
case CompressionMax:
|
|
|
|
return "max"
|
|
|
|
default:
|
|
|
|
return "invalid"
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
func (c *CompressionMode) Type() string {
|
|
|
|
return "mode"
|
|
|
|
}
|
|
|
|
|
2015-07-02 20:53:03 +00:00
|
|
|
// New returns a new repository with backend be.
|
2023-10-01 09:40:12 +00:00
|
|
|
func New(be backend.Backend, opts Options) (*Repository, error) {
|
2022-10-29 20:03:39 +00:00
|
|
|
if opts.Compression == CompressionInvalid {
|
2023-05-13 20:43:42 +00:00
|
|
|
return nil, errors.New("invalid compression mode")
|
2022-10-29 20:03:39 +00:00
|
|
|
}
|
|
|
|
|
2022-07-02 21:30:26 +00:00
|
|
|
if opts.PackSize == 0 {
|
|
|
|
opts.PackSize = DefaultPackSize
|
|
|
|
}
|
|
|
|
if opts.PackSize > MaxPackSize {
|
2023-05-13 20:43:42 +00:00
|
|
|
return nil, fmt.Errorf("pack size larger than limit of %v MiB", MaxPackSize/1024/1024)
|
2022-07-02 21:30:26 +00:00
|
|
|
} else if opts.PackSize < MinPackSize {
|
2023-05-13 20:43:42 +00:00
|
|
|
return nil, fmt.Errorf("pack size smaller than minimum of %v MiB", MinPackSize/1024/1024)
|
2022-07-02 21:30:26 +00:00
|
|
|
}
|
|
|
|
|
2016-03-06 11:26:25 +00:00
|
|
|
repo := &Repository{
|
2021-08-07 20:52:05 +00:00
|
|
|
be: be,
|
|
|
|
opts: opts,
|
2022-06-12 12:43:43 +00:00
|
|
|
idx: index.NewMasterIndex(),
|
2016-03-06 11:26:25 +00:00
|
|
|
}
|
|
|
|
|
2022-07-02 21:30:26 +00:00
|
|
|
return repo, nil
|
2014-12-21 16:02:49 +00:00
|
|
|
}
|
|
|
|
|
2020-07-28 20:24:43 +00:00
|
|
|
// DisableAutoIndexUpdate deactives the automatic finalization and upload of new
|
|
|
|
// indexes once these are full
|
2020-06-12 07:24:38 +00:00
|
|
|
func (r *Repository) DisableAutoIndexUpdate() {
|
|
|
|
r.noAutoIndexUpdate = true
|
|
|
|
}
|
|
|
|
|
2022-04-29 21:12:43 +00:00
|
|
|
// setConfig assigns the given config and updates the repository parameters accordingly
|
|
|
|
func (r *Repository) setConfig(cfg restic.Config) {
|
|
|
|
r.cfg = cfg
|
|
|
|
if r.cfg.Version >= 2 {
|
2022-06-12 12:43:43 +00:00
|
|
|
r.idx.MarkCompressed()
|
2022-04-29 21:12:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-31 20:51:35 +00:00
|
|
|
// Config returns the repository configuration.
|
2016-08-31 20:39:36 +00:00
|
|
|
func (r *Repository) Config() restic.Config {
|
|
|
|
return r.cfg
|
|
|
|
}
|
|
|
|
|
2022-07-02 21:52:02 +00:00
|
|
|
// PackSize return the target size of a pack file when uploading
|
|
|
|
func (r *Repository) PackSize() uint {
|
2022-07-02 21:30:26 +00:00
|
|
|
return r.opts.PackSize
|
|
|
|
}
|
|
|
|
|
2017-06-10 11:10:08 +00:00
|
|
|
// UseCache replaces the backend with the wrapped cache.
|
2020-03-02 17:27:52 +00:00
|
|
|
func (r *Repository) UseCache(c *cache.Cache) {
|
2017-06-10 11:10:08 +00:00
|
|
|
if c == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
debug.Log("using cache")
|
|
|
|
r.Cache = c
|
|
|
|
r.be = c.Wrap(r.be)
|
|
|
|
}
|
|
|
|
|
backup: add --dry-run/-n flag to show what would happen.
This can be used to check how large a backup is or validate exclusions.
It does not actually write any data to the underlying backend. This is
implemented as a simple overlay backend that accepts writes without
forwarding them, passes through reads, and generally does the minimal
necessary to pretend that progress is actually happening.
Fixes #1542
Example usage:
$ restic -vv --dry-run . | grep add
new /changelog/unreleased/issue-1542, saved in 0.000s (350 B added)
modified /cmd/restic/cmd_backup.go, saved in 0.000s (16.543 KiB added)
modified /cmd/restic/global.go, saved in 0.000s (0 B added)
new /internal/backend/dry/dry_backend_test.go, saved in 0.000s (3.866 KiB added)
new /internal/backend/dry/dry_backend.go, saved in 0.000s (3.744 KiB added)
modified /internal/backend/test/tests.go, saved in 0.000s (0 B added)
modified /internal/repository/repository.go, saved in 0.000s (20.707 KiB added)
modified /internal/ui/backup.go, saved in 0.000s (9.110 KiB added)
modified /internal/ui/jsonstatus/status.go, saved in 0.001s (11.055 KiB added)
modified /restic, saved in 0.131s (25.542 MiB added)
Would add to the repo: 25.892 MiB
2019-06-13 03:39:13 +00:00
|
|
|
// SetDryRun sets the repo backend into dry-run mode.
|
|
|
|
func (r *Repository) SetDryRun() {
|
|
|
|
r.be = dryrun.New(r.be)
|
|
|
|
}
|
|
|
|
|
2023-01-27 14:01:54 +00:00
|
|
|
// LoadUnpacked loads and decrypts the file with the given type and ID.
|
|
|
|
func (r *Repository) LoadUnpacked(ctx context.Context, t restic.FileType, id restic.ID) ([]byte, error) {
|
2018-01-25 19:49:41 +00:00
|
|
|
debug.Log("load %v with id %v", t, id)
|
2015-04-26 15:44:38 +00:00
|
|
|
|
2019-12-05 13:29:34 +00:00
|
|
|
if t == restic.ConfigFile {
|
|
|
|
id = restic.ID{}
|
|
|
|
}
|
|
|
|
|
2021-09-19 18:02:38 +00:00
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
|
|
|
2023-10-01 09:40:12 +00:00
|
|
|
h := backend.Handle{Type: t, Name: id.String()}
|
2021-09-19 18:02:38 +00:00
|
|
|
retriedInvalidData := false
|
2023-01-14 15:06:35 +00:00
|
|
|
var dataErr error
|
2023-01-27 14:01:54 +00:00
|
|
|
wr := new(bytes.Buffer)
|
|
|
|
|
2019-03-24 20:59:14 +00:00
|
|
|
err := r.be.Load(ctx, h, 0, 0, func(rd io.Reader) error {
|
|
|
|
// make sure this call is idempotent, in case an error occurs
|
2023-01-27 14:01:54 +00:00
|
|
|
wr.Reset()
|
2019-03-24 20:59:14 +00:00
|
|
|
_, cerr := io.Copy(wr, rd)
|
|
|
|
if cerr != nil {
|
|
|
|
return cerr
|
|
|
|
}
|
2021-09-19 18:02:38 +00:00
|
|
|
|
2023-01-27 14:01:54 +00:00
|
|
|
buf := wr.Bytes()
|
2021-09-19 18:02:38 +00:00
|
|
|
if t != restic.ConfigFile && !restic.Hash(buf).Equal(id) {
|
|
|
|
debug.Log("retry loading broken blob %v", h)
|
|
|
|
if !retriedInvalidData {
|
|
|
|
retriedInvalidData = true
|
|
|
|
} else {
|
2023-01-14 15:06:35 +00:00
|
|
|
// with a canceled context there is not guarantee which error will
|
|
|
|
// be returned by `be.Load`.
|
|
|
|
dataErr = fmt.Errorf("load(%v): %w", h, restic.ErrInvalidData)
|
2021-09-19 18:02:38 +00:00
|
|
|
cancel()
|
|
|
|
}
|
2023-01-14 15:04:14 +00:00
|
|
|
return restic.ErrInvalidData
|
|
|
|
|
2021-09-19 18:02:38 +00:00
|
|
|
}
|
2019-03-24 20:59:14 +00:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2023-01-14 15:06:35 +00:00
|
|
|
if dataErr != nil {
|
|
|
|
return nil, dataErr
|
|
|
|
}
|
2015-03-28 10:50:23 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-01-27 14:01:54 +00:00
|
|
|
buf := wr.Bytes()
|
2017-10-29 10:33:57 +00:00
|
|
|
nonce, ciphertext := buf[:r.key.NonceSize()], buf[r.key.NonceSize():]
|
|
|
|
plaintext, err := r.key.Open(ciphertext[:0], nonce, ciphertext, nil)
|
2015-04-26 15:44:38 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2015-01-10 22:40:10 +00:00
|
|
|
}
|
2022-02-12 23:12:40 +00:00
|
|
|
if t != restic.ConfigFile {
|
|
|
|
return r.decompressUnpacked(plaintext)
|
|
|
|
}
|
2015-01-10 22:40:10 +00:00
|
|
|
|
2017-10-29 10:33:57 +00:00
|
|
|
return plaintext, nil
|
2015-04-26 15:44:38 +00:00
|
|
|
}
|
|
|
|
|
2020-03-06 08:17:33 +00:00
|
|
|
type haver interface {
|
2023-10-01 09:40:12 +00:00
|
|
|
Has(backend.Handle) bool
|
2020-03-06 08:17:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// sortCachedPacksFirst moves all cached pack files to the front of blobs.
|
2020-03-10 14:56:08 +00:00
|
|
|
func sortCachedPacksFirst(cache haver, blobs []restic.PackedBlob) {
|
2020-03-06 08:17:33 +00:00
|
|
|
if cache == nil {
|
2020-03-10 14:56:08 +00:00
|
|
|
return
|
2017-07-16 19:06:43 +00:00
|
|
|
}
|
|
|
|
|
2019-07-06 15:42:29 +00:00
|
|
|
// no need to sort a list with one element
|
|
|
|
if len(blobs) == 1 {
|
2020-03-10 14:56:08 +00:00
|
|
|
return
|
2019-07-06 15:42:29 +00:00
|
|
|
}
|
|
|
|
|
2020-03-06 08:18:38 +00:00
|
|
|
cached := blobs[:0]
|
2017-07-16 19:06:43 +00:00
|
|
|
noncached := make([]restic.PackedBlob, 0, len(blobs)/2)
|
|
|
|
|
|
|
|
for _, blob := range blobs {
|
2023-10-01 09:40:12 +00:00
|
|
|
if cache.Has(backend.Handle{Type: restic.PackFile, Name: blob.PackID.String()}) {
|
2017-07-16 19:06:43 +00:00
|
|
|
cached = append(cached, blob)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
noncached = append(noncached, blob)
|
|
|
|
}
|
|
|
|
|
2020-03-10 14:56:08 +00:00
|
|
|
copy(blobs[len(cached):], noncached)
|
2017-07-16 19:06:43 +00:00
|
|
|
}
|
|
|
|
|
2020-03-10 16:52:14 +00:00
|
|
|
// LoadBlob loads a blob of type t from the repository.
|
|
|
|
// It may use all of buf[:cap(buf)] as scratch space.
|
|
|
|
func (r *Repository) LoadBlob(ctx context.Context, t restic.BlobType, id restic.ID, buf []byte) ([]byte, error) {
|
|
|
|
debug.Log("load %v with id %v (buf len %v, cap %d)", t, id, len(buf), cap(buf))
|
2016-08-03 20:38:05 +00:00
|
|
|
|
|
|
|
// lookup packs
|
2020-11-05 21:18:00 +00:00
|
|
|
blobs := r.idx.Lookup(restic.BlobHandle{ID: id, Type: t})
|
2020-06-14 11:26:10 +00:00
|
|
|
if len(blobs) == 0 {
|
2018-01-25 19:49:41 +00:00
|
|
|
debug.Log("id %v not found in index", id)
|
2020-03-10 16:52:14 +00:00
|
|
|
return nil, errors.Errorf("id %v not found in repository", id)
|
2015-04-26 15:44:38 +00:00
|
|
|
}
|
|
|
|
|
2017-07-16 19:06:43 +00:00
|
|
|
// try cached pack files first
|
2020-03-10 14:56:08 +00:00
|
|
|
sortCachedPacksFirst(r.Cache, blobs)
|
2017-07-16 19:06:43 +00:00
|
|
|
|
2016-08-28 20:18:02 +00:00
|
|
|
var lastError error
|
2016-08-03 20:38:05 +00:00
|
|
|
for _, blob := range blobs {
|
2018-01-25 19:49:41 +00:00
|
|
|
debug.Log("blob %v/%v found: %v", t, id, blob)
|
2016-08-03 20:38:05 +00:00
|
|
|
|
|
|
|
if blob.Type != t {
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("blob %v has wrong block type, want %v", blob, t)
|
2016-08-03 20:38:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// load blob from pack
|
2023-10-01 09:40:12 +00:00
|
|
|
h := backend.Handle{Type: restic.PackFile, Name: blob.PackID.String(), IsMetadata: t.IsMetadata()}
|
2017-01-24 10:27:36 +00:00
|
|
|
|
2020-03-10 16:52:14 +00:00
|
|
|
switch {
|
|
|
|
case cap(buf) < int(blob.Length):
|
|
|
|
buf = make([]byte, blob.Length)
|
|
|
|
case len(buf) != int(blob.Length):
|
|
|
|
buf = buf[:blob.Length]
|
2017-01-24 10:27:36 +00:00
|
|
|
}
|
|
|
|
|
2022-07-17 11:47:54 +00:00
|
|
|
n, err := backend.ReadAt(ctx, r.be, h, int64(blob.Offset), buf)
|
2016-08-03 20:38:05 +00:00
|
|
|
if err != nil {
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("error loading blob %v: %v", blob, err)
|
2016-08-28 20:18:02 +00:00
|
|
|
lastError = err
|
2016-08-03 20:38:05 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if uint(n) != blob.Length {
|
2016-08-28 20:18:02 +00:00
|
|
|
lastError = errors.Errorf("error loading blob %v: wrong length returned, want %d, got %d",
|
|
|
|
id.Str(), blob.Length, uint(n))
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("lastError: %v", lastError)
|
2016-08-03 20:38:05 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// decrypt
|
2020-03-10 16:52:14 +00:00
|
|
|
nonce, ciphertext := buf[:r.key.NonceSize()], buf[r.key.NonceSize():]
|
2017-10-29 10:33:57 +00:00
|
|
|
plaintext, err := r.key.Open(ciphertext[:0], nonce, ciphertext, nil)
|
2016-08-03 20:38:05 +00:00
|
|
|
if err != nil {
|
2016-08-28 20:18:02 +00:00
|
|
|
lastError = errors.Errorf("decrypting blob %v failed: %v", id, err)
|
2016-08-03 20:38:05 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2022-02-13 16:24:09 +00:00
|
|
|
if blob.IsCompressed() {
|
2022-02-19 20:15:31 +00:00
|
|
|
plaintext, err = r.getZstdDecoder().DecodeAll(plaintext, make([]byte, 0, blob.DataLength()))
|
2022-02-13 16:24:09 +00:00
|
|
|
if err != nil {
|
|
|
|
lastError = errors.Errorf("decompressing blob %v failed: %v", id, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-03 20:38:05 +00:00
|
|
|
// check hash
|
2017-10-29 10:33:57 +00:00
|
|
|
if !restic.Hash(plaintext).Equal(id) {
|
2016-08-28 20:18:02 +00:00
|
|
|
lastError = errors.Errorf("blob %v returned invalid hash", id)
|
2016-08-03 20:38:05 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2022-02-13 16:24:09 +00:00
|
|
|
if len(plaintext) > cap(buf) {
|
|
|
|
return plaintext, nil
|
|
|
|
}
|
2020-03-10 16:52:14 +00:00
|
|
|
// move decrypted data to the start of the buffer
|
2022-06-06 14:26:38 +00:00
|
|
|
buf = buf[:len(plaintext)]
|
2020-03-10 16:52:14 +00:00
|
|
|
copy(buf, plaintext)
|
2022-06-06 14:26:38 +00:00
|
|
|
return buf, nil
|
2015-04-26 15:44:38 +00:00
|
|
|
}
|
2015-01-10 22:40:10 +00:00
|
|
|
|
2016-08-28 20:18:02 +00:00
|
|
|
if lastError != nil {
|
2020-03-10 16:52:14 +00:00
|
|
|
return nil, lastError
|
2016-08-28 20:18:02 +00:00
|
|
|
}
|
|
|
|
|
2020-03-10 16:52:14 +00:00
|
|
|
return nil, errors.Errorf("loading blob %v from %v packs failed", id.Str(), len(blobs))
|
2015-01-10 22:40:10 +00:00
|
|
|
}
|
|
|
|
|
2015-07-26 14:43:42 +00:00
|
|
|
// LookupBlobSize returns the size of blob id.
|
2018-01-12 06:20:12 +00:00
|
|
|
func (r *Repository) LookupBlobSize(id restic.ID, tpe restic.BlobType) (uint, bool) {
|
2020-11-05 21:18:00 +00:00
|
|
|
return r.idx.LookupSize(restic.BlobHandle{ID: id, Type: tpe})
|
2015-07-26 14:43:42 +00:00
|
|
|
}
|
|
|
|
|
2022-02-19 20:15:31 +00:00
|
|
|
func (r *Repository) getZstdEncoder() *zstd.Encoder {
|
|
|
|
r.allocEnc.Do(func() {
|
2022-04-13 18:34:05 +00:00
|
|
|
level := zstd.SpeedDefault
|
|
|
|
if r.opts.Compression == CompressionMax {
|
|
|
|
level = zstd.SpeedBestCompression
|
|
|
|
}
|
|
|
|
|
2022-04-20 18:46:11 +00:00
|
|
|
opts := []zstd.EOption{
|
|
|
|
// Set the compression level configured.
|
|
|
|
zstd.WithEncoderLevel(level),
|
|
|
|
// Disable CRC, we have enough checks in place, makes the
|
|
|
|
// compressed data four bytes shorter.
|
|
|
|
zstd.WithEncoderCRC(false),
|
|
|
|
// Set a window of 512kbyte, so we have good lookbehind for usual
|
|
|
|
// blob sizes.
|
|
|
|
zstd.WithWindowSize(512 * 1024),
|
|
|
|
}
|
|
|
|
|
|
|
|
enc, err := zstd.NewWriter(nil, opts...)
|
2022-02-19 20:15:31 +00:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
r.enc = enc
|
|
|
|
})
|
|
|
|
return r.enc
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *Repository) getZstdDecoder() *zstd.Decoder {
|
|
|
|
r.allocDec.Do(func() {
|
2022-04-20 18:46:11 +00:00
|
|
|
opts := []zstd.DOption{
|
|
|
|
// Use all available cores.
|
|
|
|
zstd.WithDecoderConcurrency(0),
|
|
|
|
// Limit the maximum decompressed memory. Set to a very high,
|
|
|
|
// conservative value.
|
|
|
|
zstd.WithDecoderMaxMemory(16 * 1024 * 1024 * 1024),
|
|
|
|
}
|
|
|
|
|
|
|
|
dec, err := zstd.NewReader(nil, opts...)
|
2022-02-19 20:15:31 +00:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
r.dec = dec
|
|
|
|
})
|
|
|
|
return r.dec
|
|
|
|
}
|
|
|
|
|
2022-02-12 23:05:14 +00:00
|
|
|
// saveAndEncrypt encrypts data and stores it to the backend as type t. If data
|
2022-05-01 12:26:57 +00:00
|
|
|
// is small enough, it will be packed together with other small blobs. The
|
|
|
|
// caller must ensure that the id matches the data. Returned is the size data
|
|
|
|
// occupies in the repo (compressed or not, including the encryption overhead).
|
|
|
|
func (r *Repository) saveAndEncrypt(ctx context.Context, t restic.BlobType, data []byte, id restic.ID) (size int, err error) {
|
2018-01-25 19:49:41 +00:00
|
|
|
debug.Log("save id %v (%v, %d bytes)", id, t, len(data))
|
2015-01-10 22:40:10 +00:00
|
|
|
|
2022-02-13 16:24:09 +00:00
|
|
|
uncompressedLength := 0
|
|
|
|
if r.cfg.Version > 1 {
|
2022-04-13 18:34:05 +00:00
|
|
|
|
|
|
|
// we have a repo v2, so compression is available. if the user opts to
|
|
|
|
// not compress, we won't compress any data, but everything else is
|
|
|
|
// compressed.
|
|
|
|
if r.opts.Compression != CompressionOff || t != restic.DataBlob {
|
|
|
|
uncompressedLength = len(data)
|
|
|
|
data = r.getZstdEncoder().EncodeAll(data, nil)
|
|
|
|
}
|
2022-02-13 16:24:09 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 10:33:57 +00:00
|
|
|
nonce := crypto.NewRandomNonce()
|
2020-02-26 22:26:11 +00:00
|
|
|
|
2022-06-12 12:48:30 +00:00
|
|
|
ciphertext := make([]byte, 0, crypto.CiphertextLength(len(data)))
|
2017-10-29 10:33:57 +00:00
|
|
|
ciphertext = append(ciphertext, nonce...)
|
|
|
|
|
2015-01-10 22:40:10 +00:00
|
|
|
// encrypt blob
|
2017-10-29 10:33:57 +00:00
|
|
|
ciphertext = r.key.Seal(ciphertext, nonce, data, nil)
|
2015-01-10 22:40:10 +00:00
|
|
|
|
2015-04-26 15:44:38 +00:00
|
|
|
// find suitable packer and add blob
|
2017-07-16 18:16:02 +00:00
|
|
|
var pm *packerManager
|
|
|
|
|
|
|
|
switch t {
|
|
|
|
case restic.TreeBlob:
|
|
|
|
pm = r.treePM
|
|
|
|
case restic.DataBlob:
|
|
|
|
pm = r.dataPM
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("invalid type: %v", t))
|
|
|
|
}
|
|
|
|
|
2021-08-07 20:52:05 +00:00
|
|
|
return pm.SaveBlob(ctx, t, id, ciphertext, uncompressedLength)
|
2015-04-26 15:44:38 +00:00
|
|
|
}
|
|
|
|
|
2022-02-12 23:12:40 +00:00
|
|
|
func (r *Repository) compressUnpacked(p []byte) ([]byte, error) {
|
|
|
|
// compression is only available starting from version 2
|
|
|
|
if r.cfg.Version < 2 {
|
|
|
|
return p, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// version byte
|
|
|
|
out := []byte{2}
|
2022-02-19 20:15:31 +00:00
|
|
|
out = r.getZstdEncoder().EncodeAll(p, out)
|
2022-02-12 23:12:40 +00:00
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *Repository) decompressUnpacked(p []byte) ([]byte, error) {
|
|
|
|
// compression is only available starting from version 2
|
|
|
|
if r.cfg.Version < 2 {
|
|
|
|
return p, nil
|
|
|
|
}
|
|
|
|
|
2022-04-16 19:05:15 +00:00
|
|
|
if len(p) == 0 {
|
2022-02-12 23:12:40 +00:00
|
|
|
// too short for version header
|
|
|
|
return p, nil
|
|
|
|
}
|
|
|
|
if p[0] == '[' || p[0] == '{' {
|
|
|
|
// probably raw JSON
|
|
|
|
return p, nil
|
|
|
|
}
|
|
|
|
// version
|
|
|
|
if p[0] != 2 {
|
|
|
|
return nil, errors.New("not supported encoding format")
|
|
|
|
}
|
|
|
|
|
2022-02-19 20:15:31 +00:00
|
|
|
return r.getZstdDecoder().DecodeAll(p[1:], nil)
|
2022-02-12 23:12:40 +00:00
|
|
|
}
|
|
|
|
|
2016-01-24 17:52:11 +00:00
|
|
|
// SaveUnpacked encrypts data and stores it in the backend. Returned is the
|
|
|
|
// storage hash.
|
2017-06-04 09:16:55 +00:00
|
|
|
func (r *Repository) SaveUnpacked(ctx context.Context, t restic.FileType, p []byte) (id restic.ID, err error) {
|
2022-02-12 23:12:40 +00:00
|
|
|
if t != restic.ConfigFile {
|
|
|
|
p, err = r.compressUnpacked(p)
|
|
|
|
if err != nil {
|
|
|
|
return restic.ID{}, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-12 12:48:30 +00:00
|
|
|
ciphertext := crypto.NewBlobBuffer(len(p))
|
2017-10-29 10:33:57 +00:00
|
|
|
ciphertext = ciphertext[:0]
|
|
|
|
nonce := crypto.NewRandomNonce()
|
|
|
|
ciphertext = append(ciphertext, nonce...)
|
|
|
|
|
|
|
|
ciphertext = r.key.Seal(ciphertext, nonce, p, nil)
|
2015-02-15 16:26:08 +00:00
|
|
|
|
2019-12-05 13:29:34 +00:00
|
|
|
if t == restic.ConfigFile {
|
|
|
|
id = restic.ID{}
|
|
|
|
} else {
|
|
|
|
id = restic.Hash(ciphertext)
|
|
|
|
}
|
2023-10-01 09:40:12 +00:00
|
|
|
h := backend.Handle{Type: t, Name: id.String()}
|
2015-04-26 15:44:38 +00:00
|
|
|
|
2023-10-01 09:40:12 +00:00
|
|
|
err = r.be.Save(ctx, h, backend.NewByteReader(ciphertext, r.be.Hasher()))
|
2015-02-15 16:26:08 +00:00
|
|
|
if err != nil {
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("error saving blob %v: %v", h, err)
|
2016-08-31 18:29:54 +00:00
|
|
|
return restic.ID{}, err
|
2015-02-15 16:26:08 +00:00
|
|
|
}
|
|
|
|
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("blob %v saved", h)
|
2016-01-24 17:50:41 +00:00
|
|
|
return id, nil
|
2015-04-26 15:44:38 +00:00
|
|
|
}
|
|
|
|
|
2020-06-06 20:20:44 +00:00
|
|
|
// Flush saves all remaining packs and the index
|
2017-11-22 11:27:29 +00:00
|
|
|
func (r *Repository) Flush(ctx context.Context) error {
|
2022-05-26 11:30:52 +00:00
|
|
|
if err := r.flushPacks(ctx); err != nil {
|
2020-06-06 20:20:44 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-10-10 16:54:13 +00:00
|
|
|
// Save index after flushing only if noAutoIndexUpdate is not set
|
|
|
|
if r.noAutoIndexUpdate {
|
|
|
|
return nil
|
|
|
|
}
|
2022-05-26 10:38:18 +00:00
|
|
|
return r.idx.SaveIndex(ctx, r)
|
2020-06-06 20:20:44 +00:00
|
|
|
}
|
|
|
|
|
2021-08-07 20:52:05 +00:00
|
|
|
func (r *Repository) StartPackUploader(ctx context.Context, wg *errgroup.Group) {
|
|
|
|
if r.packerWg != nil {
|
|
|
|
panic("uploader already started")
|
2017-07-16 18:24:37 +00:00
|
|
|
}
|
|
|
|
|
2021-08-07 20:52:05 +00:00
|
|
|
innerWg, ctx := errgroup.WithContext(ctx)
|
|
|
|
r.packerWg = innerWg
|
|
|
|
r.uploader = newPackerUploader(ctx, innerWg, r, r.be.Connections())
|
2022-07-02 21:52:02 +00:00
|
|
|
r.treePM = newPackerManager(r.key, restic.TreeBlob, r.PackSize(), r.uploader.QueuePacker)
|
|
|
|
r.dataPM = newPackerManager(r.key, restic.DataBlob, r.PackSize(), r.uploader.QueuePacker)
|
2017-07-16 18:16:02 +00:00
|
|
|
|
2021-08-07 20:52:05 +00:00
|
|
|
wg.Go(func() error {
|
|
|
|
return innerWg.Wait()
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// FlushPacks saves all remaining packs.
|
|
|
|
func (r *Repository) flushPacks(ctx context.Context) error {
|
|
|
|
if r.packerWg == nil {
|
|
|
|
return nil
|
2015-04-26 15:44:38 +00:00
|
|
|
}
|
2021-08-07 20:52:05 +00:00
|
|
|
|
|
|
|
err := r.treePM.Flush(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
err = r.dataPM.Flush(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
r.uploader.TriggerShutdown()
|
|
|
|
err = r.packerWg.Wait()
|
|
|
|
|
|
|
|
r.treePM = nil
|
|
|
|
r.dataPM = nil
|
|
|
|
r.uploader = nil
|
|
|
|
r.packerWg = nil
|
|
|
|
|
|
|
|
return err
|
2015-04-26 15:44:38 +00:00
|
|
|
}
|
|
|
|
|
2015-07-02 20:53:03 +00:00
|
|
|
// Backend returns the backend for the repository.
|
2023-10-01 09:40:12 +00:00
|
|
|
func (r *Repository) Backend() backend.Backend {
|
2015-07-02 19:52:57 +00:00
|
|
|
return r.be
|
2015-04-26 15:44:38 +00:00
|
|
|
}
|
|
|
|
|
2021-08-07 22:38:17 +00:00
|
|
|
func (r *Repository) Connections() uint {
|
|
|
|
return r.be.Connections()
|
|
|
|
}
|
|
|
|
|
2015-10-12 20:34:12 +00:00
|
|
|
// Index returns the currently used MasterIndex.
|
2020-07-25 19:19:46 +00:00
|
|
|
func (r *Repository) Index() restic.MasterIndex {
|
2015-07-02 19:52:57 +00:00
|
|
|
return r.idx
|
2015-04-26 15:44:38 +00:00
|
|
|
}
|
|
|
|
|
2015-05-09 11:25:52 +00:00
|
|
|
// SetIndex instructs the repository to use the given index.
|
2020-07-25 19:19:46 +00:00
|
|
|
func (r *Repository) SetIndex(i restic.MasterIndex) error {
|
2022-06-12 12:43:43 +00:00
|
|
|
r.idx = i.(*index.MasterIndex)
|
2022-06-12 12:47:30 +00:00
|
|
|
return r.prepareCache()
|
2015-02-08 21:54:45 +00:00
|
|
|
}
|
|
|
|
|
2015-10-12 20:34:12 +00:00
|
|
|
// LoadIndex loads all index files from the backend in parallel and stores them
|
2023-07-16 02:48:30 +00:00
|
|
|
func (r *Repository) LoadIndex(ctx context.Context, p *progress.Counter) error {
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("Loading index")
|
2015-04-26 15:44:38 +00:00
|
|
|
|
2023-10-01 11:05:56 +00:00
|
|
|
indexList, err := restic.MemorizeList(ctx, r, restic.IndexFile)
|
2023-08-13 18:33:13 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-07-16 02:48:30 +00:00
|
|
|
if p != nil {
|
|
|
|
var numIndexFiles uint64
|
2023-10-01 11:05:56 +00:00
|
|
|
err := indexList.List(ctx, restic.IndexFile, func(id restic.ID, size int64) error {
|
2023-07-16 02:48:30 +00:00
|
|
|
numIndexFiles++
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
p.SetMax(numIndexFiles)
|
|
|
|
defer p.Done()
|
|
|
|
}
|
|
|
|
|
2023-08-13 18:33:13 +00:00
|
|
|
err = index.ForAllIndexes(ctx, indexList, r, func(id restic.ID, idx *index.Index, oldFormat bool, err error) error {
|
2020-11-07 17:50:19 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2015-07-04 16:38:32 +00:00
|
|
|
}
|
2020-11-07 17:50:19 +00:00
|
|
|
r.idx.Insert(idx)
|
2023-07-16 02:48:30 +00:00
|
|
|
if p != nil {
|
|
|
|
p.Add(1)
|
|
|
|
}
|
2019-03-24 20:27:28 +00:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
2023-05-13 20:43:42 +00:00
|
|
|
return err
|
2015-07-04 16:38:32 +00:00
|
|
|
}
|
|
|
|
|
2021-01-30 15:35:05 +00:00
|
|
|
err = r.idx.MergeFinalIndexes()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-11-07 17:50:19 +00:00
|
|
|
|
2023-06-02 19:56:14 +00:00
|
|
|
// Trigger GC to reset garbage collection threshold
|
|
|
|
runtime.GC()
|
|
|
|
|
2022-02-13 16:24:09 +00:00
|
|
|
if r.cfg.Version < 2 {
|
|
|
|
// sanity check
|
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
|
|
defer cancel()
|
2022-08-19 18:04:39 +00:00
|
|
|
|
|
|
|
invalidIndex := false
|
|
|
|
r.idx.Each(ctx, func(blob restic.PackedBlob) {
|
2022-02-13 16:24:09 +00:00
|
|
|
if blob.IsCompressed() {
|
2022-08-19 18:04:39 +00:00
|
|
|
invalidIndex = true
|
2022-02-13 16:24:09 +00:00
|
|
|
}
|
2022-08-19 18:04:39 +00:00
|
|
|
})
|
|
|
|
if invalidIndex {
|
2023-05-13 20:43:42 +00:00
|
|
|
return errors.New("index uses feature not supported by repository version 1")
|
2022-02-13 16:24:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-24 20:27:28 +00:00
|
|
|
// remove index files from the cache which have been removed in the repo
|
2022-06-12 12:47:30 +00:00
|
|
|
return r.prepareCache()
|
2018-03-31 07:50:45 +00:00
|
|
|
}
|
2017-07-18 21:16:50 +00:00
|
|
|
|
2020-10-10 19:31:40 +00:00
|
|
|
// CreateIndexFromPacks creates a new index by reading all given pack files (with sizes).
|
|
|
|
// The index is added to the MasterIndex but not marked as finalized.
|
|
|
|
// Returned is the list of pack files which could not be read.
|
|
|
|
func (r *Repository) CreateIndexFromPacks(ctx context.Context, packsize map[restic.ID]int64, p *progress.Counter) (invalid restic.IDs, err error) {
|
|
|
|
var m sync.Mutex
|
|
|
|
|
|
|
|
debug.Log("Loading index from pack files")
|
|
|
|
|
|
|
|
// track spawned goroutines using wg, create a new context which is
|
|
|
|
// cancelled as soon as an error occurs.
|
|
|
|
wg, ctx := errgroup.WithContext(ctx)
|
|
|
|
|
|
|
|
type FileInfo struct {
|
|
|
|
restic.ID
|
|
|
|
Size int64
|
|
|
|
}
|
|
|
|
ch := make(chan FileInfo)
|
|
|
|
|
|
|
|
// send list of pack files through ch, which is closed afterwards
|
|
|
|
wg.Go(func() error {
|
|
|
|
defer close(ch)
|
|
|
|
for id, size := range packsize {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
2022-05-10 20:17:50 +00:00
|
|
|
return ctx.Err()
|
2020-10-10 19:31:40 +00:00
|
|
|
case ch <- FileInfo{id, size}:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
// a worker receives an pack ID from ch, reads the pack contents, and adds them to idx
|
|
|
|
worker := func() error {
|
|
|
|
for fi := range ch {
|
|
|
|
entries, _, err := r.ListPack(ctx, fi.ID, fi.Size)
|
|
|
|
if err != nil {
|
|
|
|
debug.Log("unable to list pack file %v", fi.ID.Str())
|
|
|
|
m.Lock()
|
|
|
|
invalid = append(invalid, fi.ID)
|
|
|
|
m.Unlock()
|
|
|
|
}
|
2022-05-26 14:13:41 +00:00
|
|
|
r.idx.StorePack(fi.ID, entries)
|
2020-10-10 19:31:40 +00:00
|
|
|
p.Add(1)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-08-07 22:38:17 +00:00
|
|
|
// decoding the pack header is usually quite fast, thus we are primarily IO-bound
|
|
|
|
workerCount := int(r.Connections())
|
2020-10-10 19:31:40 +00:00
|
|
|
// run workers on ch
|
2021-08-07 22:38:17 +00:00
|
|
|
for i := 0; i < workerCount; i++ {
|
2022-05-10 20:17:50 +00:00
|
|
|
wg.Go(worker)
|
|
|
|
}
|
2020-10-10 19:31:40 +00:00
|
|
|
|
|
|
|
err = wg.Wait()
|
|
|
|
if err != nil {
|
2023-05-13 20:43:42 +00:00
|
|
|
return invalid, err
|
2020-10-10 19:31:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return invalid, nil
|
|
|
|
}
|
|
|
|
|
2022-06-12 12:47:30 +00:00
|
|
|
// prepareCache initializes the local cache. indexIDs is the list of IDs of
|
2018-03-31 07:50:45 +00:00
|
|
|
// index files still present in the repo.
|
2022-06-12 12:47:30 +00:00
|
|
|
func (r *Repository) prepareCache() error {
|
2018-03-31 07:50:45 +00:00
|
|
|
if r.Cache == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-08-16 14:00:36 +00:00
|
|
|
indexIDs := r.idx.IDs()
|
2018-03-31 08:02:09 +00:00
|
|
|
debug.Log("prepare cache with %d index files", len(indexIDs))
|
|
|
|
|
2018-03-31 07:50:45 +00:00
|
|
|
// clear old index files
|
|
|
|
err := r.Cache.Clear(restic.IndexFile, indexIDs)
|
|
|
|
if err != nil {
|
|
|
|
fmt.Fprintf(os.Stderr, "error clearing index files in cache: %v\n", err)
|
|
|
|
}
|
|
|
|
|
2021-08-16 14:01:11 +00:00
|
|
|
packs := r.idx.Packs(restic.NewIDSet())
|
2017-09-24 20:54:04 +00:00
|
|
|
|
2020-08-16 09:16:38 +00:00
|
|
|
// clear old packs
|
|
|
|
err = r.Cache.Clear(restic.PackFile, packs)
|
2018-03-31 07:50:45 +00:00
|
|
|
if err != nil {
|
2020-08-16 09:16:38 +00:00
|
|
|
fmt.Fprintf(os.Stderr, "error clearing pack files in cache: %v\n", err)
|
2018-03-31 07:50:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2015-04-26 15:44:38 +00:00
|
|
|
}
|
|
|
|
|
2015-05-04 18:39:45 +00:00
|
|
|
// SearchKey finds a key with the supplied password, afterwards the config is
|
2016-08-21 11:09:31 +00:00
|
|
|
// read and parsed. It tries at most maxKeys key files in the repo.
|
2018-11-25 14:10:45 +00:00
|
|
|
func (r *Repository) SearchKey(ctx context.Context, password string, maxKeys int, keyHint string) error {
|
|
|
|
key, err := SearchKey(ctx, r, password, maxKeys, keyHint)
|
2014-12-21 17:10:19 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2024-01-21 19:38:42 +00:00
|
|
|
oldKey := r.key
|
|
|
|
oldKeyID := r.keyID
|
|
|
|
|
2015-07-02 19:52:57 +00:00
|
|
|
r.key = key.master
|
2022-10-15 14:01:38 +00:00
|
|
|
r.keyID = key.ID()
|
2022-04-29 21:12:43 +00:00
|
|
|
cfg, err := restic.LoadConfig(ctx, r)
|
2024-01-21 19:38:42 +00:00
|
|
|
if err != nil {
|
|
|
|
r.key = oldKey
|
|
|
|
r.keyID = oldKeyID
|
|
|
|
|
|
|
|
if err == crypto.ErrUnauthenticated {
|
|
|
|
return fmt.Errorf("config or key %v is damaged: %w", key.ID(), err)
|
|
|
|
}
|
2023-05-13 20:43:42 +00:00
|
|
|
return fmt.Errorf("config cannot be loaded: %w", err)
|
2018-03-09 20:05:14 +00:00
|
|
|
}
|
2022-04-29 21:12:43 +00:00
|
|
|
|
|
|
|
r.setConfig(cfg)
|
2018-03-09 20:05:14 +00:00
|
|
|
return nil
|
2015-05-03 14:36:52 +00:00
|
|
|
}
|
2014-12-21 17:10:19 +00:00
|
|
|
|
2015-07-02 20:36:31 +00:00
|
|
|
// Init creates a new master key with the supplied password, initializes and
|
|
|
|
// saves the repository config.
|
2022-02-12 23:52:03 +00:00
|
|
|
func (r *Repository) Init(ctx context.Context, version uint, password string, chunkerPolynomial *chunker.Pol) error {
|
|
|
|
if version > restic.MaxRepoVersion {
|
2022-05-07 20:23:59 +00:00
|
|
|
return fmt.Errorf("repository version %v too high", version)
|
2022-02-12 23:52:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if version < restic.MinRepoVersion {
|
2022-05-07 20:23:59 +00:00
|
|
|
return fmt.Errorf("repository version %v too low", version)
|
2022-02-12 23:52:03 +00:00
|
|
|
}
|
|
|
|
|
2023-10-01 09:40:12 +00:00
|
|
|
_, err := r.be.Stat(ctx, backend.Handle{Type: restic.ConfigFile})
|
2022-12-03 10:28:10 +00:00
|
|
|
if err != nil && !r.be.IsNotExist(err) {
|
2015-05-03 15:46:18 +00:00
|
|
|
return err
|
|
|
|
}
|
2022-12-03 10:28:10 +00:00
|
|
|
if err == nil {
|
2015-05-03 15:46:18 +00:00
|
|
|
return errors.New("repository master key and config already initialized")
|
|
|
|
}
|
|
|
|
|
2022-02-12 23:52:03 +00:00
|
|
|
cfg, err := restic.CreateConfig(version)
|
2016-07-31 14:27:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-09-19 10:41:52 +00:00
|
|
|
if chunkerPolynomial != nil {
|
|
|
|
cfg.ChunkerPolynomial = *chunkerPolynomial
|
|
|
|
}
|
2016-07-31 14:27:36 +00:00
|
|
|
|
2017-06-04 09:16:55 +00:00
|
|
|
return r.init(ctx, password, cfg)
|
2016-07-31 14:27:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// init creates a new master key with the supplied password and uses it to save
|
|
|
|
// the config into the repo.
|
2017-06-04 09:16:55 +00:00
|
|
|
func (r *Repository) init(ctx context.Context, password string, cfg restic.Config) error {
|
2020-04-10 09:37:39 +00:00
|
|
|
key, err := createMasterKey(ctx, r, password)
|
2015-05-03 14:36:52 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-07-02 19:52:57 +00:00
|
|
|
r.key = key.master
|
2022-10-15 14:01:38 +00:00
|
|
|
r.keyID = key.ID()
|
2022-04-29 21:12:43 +00:00
|
|
|
r.setConfig(cfg)
|
2022-06-12 12:38:19 +00:00
|
|
|
return restic.SaveConfig(ctx, r, cfg)
|
2014-12-21 17:10:19 +00:00
|
|
|
}
|
|
|
|
|
2015-07-02 20:53:03 +00:00
|
|
|
// Key returns the current master key.
|
2015-07-02 19:52:57 +00:00
|
|
|
func (r *Repository) Key() *crypto.Key {
|
|
|
|
return r.key
|
2014-12-21 17:10:19 +00:00
|
|
|
}
|
|
|
|
|
2022-10-15 14:01:38 +00:00
|
|
|
// KeyID returns the id of the current key in the backend.
|
|
|
|
func (r *Repository) KeyID() restic.ID {
|
|
|
|
return r.keyID
|
2015-05-03 16:04:13 +00:00
|
|
|
}
|
|
|
|
|
2018-01-21 16:25:36 +00:00
|
|
|
// List runs fn for all files of type t in the repo.
|
|
|
|
func (r *Repository) List(ctx context.Context, t restic.FileType, fn func(restic.ID, int64) error) error {
|
2023-10-01 09:40:12 +00:00
|
|
|
return r.be.List(ctx, t, func(fi backend.FileInfo) error {
|
2018-01-21 16:25:36 +00:00
|
|
|
id, err := restic.ParseID(fi.Name)
|
|
|
|
if err != nil {
|
|
|
|
debug.Log("unable to parse %v as an ID", fi.Name)
|
2018-02-26 19:53:38 +00:00
|
|
|
return nil
|
2017-03-06 21:19:38 +00:00
|
|
|
}
|
2018-01-21 16:25:36 +00:00
|
|
|
return fn(id, fi.Size)
|
|
|
|
})
|
2014-12-21 16:02:49 +00:00
|
|
|
}
|
|
|
|
|
2016-08-07 19:56:42 +00:00
|
|
|
// ListPack returns the list of blobs saved in the pack id and the length of
|
2023-10-27 16:56:00 +00:00
|
|
|
// the pack header.
|
2020-11-16 03:03:45 +00:00
|
|
|
func (r *Repository) ListPack(ctx context.Context, id restic.ID, size int64) ([]restic.Blob, uint32, error) {
|
2023-10-01 09:40:12 +00:00
|
|
|
h := backend.Handle{Type: restic.PackFile, Name: id.String()}
|
2016-08-07 19:56:42 +00:00
|
|
|
|
2022-07-17 11:47:54 +00:00
|
|
|
return pack.List(r.Key(), backend.ReaderAt(ctx, r.Backend(), h), size)
|
2016-05-08 11:51:21 +00:00
|
|
|
}
|
|
|
|
|
2015-07-02 20:53:03 +00:00
|
|
|
// Delete calls backend.Delete() if implemented, and returns an error
|
|
|
|
// otherwise.
|
2017-06-04 09:16:55 +00:00
|
|
|
func (r *Repository) Delete(ctx context.Context) error {
|
2017-10-14 13:56:38 +00:00
|
|
|
return r.be.Delete(ctx)
|
2014-12-21 16:02:49 +00:00
|
|
|
}
|
2015-03-14 10:56:45 +00:00
|
|
|
|
2015-07-02 20:53:03 +00:00
|
|
|
// Close closes the repository by closing the backend.
|
2015-07-02 19:52:57 +00:00
|
|
|
func (r *Repository) Close() error {
|
|
|
|
return r.be.Close()
|
2015-03-28 10:50:23 +00:00
|
|
|
}
|
2016-09-03 09:22:01 +00:00
|
|
|
|
2020-06-06 20:20:44 +00:00
|
|
|
// SaveBlob saves a blob of type t into the repository.
|
|
|
|
// It takes care that no duplicates are saved; this can be overwritten
|
|
|
|
// by setting storeDuplicate to true.
|
|
|
|
// If id is the null id, it will be computed and returned.
|
2022-05-01 12:26:57 +00:00
|
|
|
// Also returns if the blob was already known before.
|
|
|
|
// If the blob was not known before, it returns the number of bytes the blob
|
|
|
|
// occupies in the repo (compressed or not, including encryption overhead).
|
|
|
|
func (r *Repository) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (newID restic.ID, known bool, size int, err error) {
|
2020-06-06 20:20:44 +00:00
|
|
|
|
|
|
|
// compute plaintext hash if not already set
|
|
|
|
if id.IsNull() {
|
2022-09-04 08:49:16 +00:00
|
|
|
// Special case the hash calculation for all zero chunks. This is especially
|
|
|
|
// useful for sparse files containing large all zero regions. For these we can
|
|
|
|
// process chunks as fast as we can read the from disk.
|
|
|
|
if len(buf) == chunker.MinSize && restic.ZeroPrefixLen(buf) == chunker.MinSize {
|
|
|
|
newID = ZeroChunk()
|
|
|
|
} else {
|
|
|
|
newID = restic.Hash(buf)
|
|
|
|
}
|
2020-06-06 20:20:44 +00:00
|
|
|
} else {
|
|
|
|
newID = id
|
|
|
|
}
|
|
|
|
|
|
|
|
// first try to add to pending blobs; if not successful, this blob is already known
|
2022-06-12 12:43:43 +00:00
|
|
|
known = !r.idx.AddPending(restic.BlobHandle{ID: newID, Type: t})
|
2020-06-06 20:20:44 +00:00
|
|
|
|
2020-10-05 21:13:38 +00:00
|
|
|
// only save when needed or explicitly told
|
2020-06-06 20:20:44 +00:00
|
|
|
if !known || storeDuplicate {
|
2022-05-01 12:26:57 +00:00
|
|
|
size, err = r.saveAndEncrypt(ctx, t, buf, newID)
|
2016-09-03 18:55:22 +00:00
|
|
|
}
|
2020-06-06 20:20:44 +00:00
|
|
|
|
2022-05-01 12:26:57 +00:00
|
|
|
return newID, known, size, err
|
2016-09-03 18:55:22 +00:00
|
|
|
}
|
|
|
|
|
2023-12-31 11:07:19 +00:00
|
|
|
type backendLoadFn func(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error
|
2021-08-20 21:21:05 +00:00
|
|
|
|
2022-07-23 20:40:15 +00:00
|
|
|
// Skip sections with more than 4MB unused blobs
|
|
|
|
const maxUnusedRange = 4 * 1024 * 1024
|
|
|
|
|
2023-12-31 11:07:19 +00:00
|
|
|
// LoadBlobsFromPack loads the listed blobs from the specified pack file. The plaintext blob is passed to
|
2023-12-30 20:40:41 +00:00
|
|
|
// the handleBlobFn callback or an error if decryption failed or the blob hash does not match.
|
2023-12-30 23:18:41 +00:00
|
|
|
// handleBlobFn is called at most once for each blob. If the callback returns an error,
|
2023-12-31 11:07:19 +00:00
|
|
|
// then LoadBlobsFromPack will abort and not retry it.
|
|
|
|
func (r *Repository) LoadBlobsFromPack(ctx context.Context, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error {
|
|
|
|
return streamPack(ctx, r.Backend().Load, r.key, packID, blobs, handleBlobFn)
|
|
|
|
}
|
|
|
|
|
|
|
|
func streamPack(ctx context.Context, beLoad backendLoadFn, key *crypto.Key, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error {
|
2021-08-20 21:21:05 +00:00
|
|
|
if len(blobs) == 0 {
|
|
|
|
// nothing to do
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
sort.Slice(blobs, func(i, j int) bool {
|
|
|
|
return blobs[i].Offset < blobs[j].Offset
|
|
|
|
})
|
2022-07-23 20:40:15 +00:00
|
|
|
|
|
|
|
lowerIdx := 0
|
|
|
|
lastPos := blobs[0].Offset
|
|
|
|
for i := 0; i < len(blobs); i++ {
|
|
|
|
if blobs[i].Offset < lastPos {
|
|
|
|
// don't wait for streamPackPart to fail
|
|
|
|
return errors.Errorf("overlapping blobs in pack %v", packID)
|
|
|
|
}
|
|
|
|
if blobs[i].Offset-lastPos > maxUnusedRange {
|
|
|
|
// load everything up to the skipped file section
|
|
|
|
err := streamPackPart(ctx, beLoad, key, packID, blobs[lowerIdx:i], handleBlobFn)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
lowerIdx = i
|
|
|
|
}
|
|
|
|
lastPos = blobs[i].Offset + blobs[i].Length
|
|
|
|
}
|
|
|
|
// load remainder
|
|
|
|
return streamPackPart(ctx, beLoad, key, packID, blobs[lowerIdx:], handleBlobFn)
|
|
|
|
}
|
|
|
|
|
2023-12-31 11:07:19 +00:00
|
|
|
func streamPackPart(ctx context.Context, beLoad backendLoadFn, key *crypto.Key, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error {
|
2023-10-01 09:40:12 +00:00
|
|
|
h := backend.Handle{Type: restic.PackFile, Name: packID.String(), IsMetadata: false}
|
2021-08-20 21:21:05 +00:00
|
|
|
|
|
|
|
dataStart := blobs[0].Offset
|
|
|
|
dataEnd := blobs[len(blobs)-1].Offset + blobs[len(blobs)-1].Length
|
|
|
|
|
|
|
|
debug.Log("streaming pack %v (%d to %d bytes), blobs: %v", packID, dataStart, dataEnd, len(blobs))
|
|
|
|
|
2022-02-13 16:24:09 +00:00
|
|
|
dec, err := zstd.NewReader(nil)
|
|
|
|
if err != nil {
|
|
|
|
panic(dec)
|
|
|
|
}
|
|
|
|
defer dec.Close()
|
|
|
|
|
2021-09-05 10:20:07 +00:00
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
2021-08-20 21:21:05 +00:00
|
|
|
// stream blobs in pack
|
2022-02-13 16:24:09 +00:00
|
|
|
err = beLoad(ctx, h, int(dataEnd-dataStart), int64(dataStart), func(rd io.Reader) error {
|
2023-12-06 12:11:55 +00:00
|
|
|
// prevent callbacks after cancellation
|
2021-09-05 10:20:07 +00:00
|
|
|
if ctx.Err() != nil {
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
2021-08-20 21:21:05 +00:00
|
|
|
bufferSize := int(dataEnd - dataStart)
|
2021-08-20 14:15:40 +00:00
|
|
|
if bufferSize > MaxStreamBufferSize {
|
|
|
|
bufferSize = MaxStreamBufferSize
|
2021-08-20 21:21:05 +00:00
|
|
|
}
|
|
|
|
bufRd := bufio.NewReaderSize(rd, bufferSize)
|
2023-12-30 23:18:41 +00:00
|
|
|
it := NewPackBlobIterator(packID, bufRd, dataStart, blobs, key, dec)
|
2021-08-20 21:21:05 +00:00
|
|
|
|
2023-12-30 23:18:41 +00:00
|
|
|
for {
|
|
|
|
val, err := it.Next()
|
|
|
|
if err == ErrPackEOF {
|
|
|
|
break
|
|
|
|
} else if err != nil {
|
2021-08-20 21:21:05 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-12-30 23:18:41 +00:00
|
|
|
err = handleBlobFn(val.Handle, val.Plaintext, val.Err)
|
2021-08-20 21:21:05 +00:00
|
|
|
if err != nil {
|
2021-09-05 10:20:07 +00:00
|
|
|
cancel()
|
2021-09-04 14:09:34 +00:00
|
|
|
return backoff.Permanent(err)
|
2021-08-20 21:21:05 +00:00
|
|
|
}
|
2023-12-30 20:40:41 +00:00
|
|
|
// ensure that each blob is only passed once to handleBlobFn
|
|
|
|
blobs = blobs[1:]
|
2021-08-20 21:21:05 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
return errors.Wrap(err, "StreamPack")
|
|
|
|
}
|
2022-09-04 08:49:16 +00:00
|
|
|
|
2023-12-30 23:18:41 +00:00
|
|
|
type PackBlobIterator struct {
|
|
|
|
packID restic.ID
|
|
|
|
rd *bufio.Reader
|
|
|
|
currentOffset uint
|
|
|
|
|
|
|
|
blobs []restic.Blob
|
|
|
|
key *crypto.Key
|
|
|
|
dec *zstd.Decoder
|
|
|
|
|
|
|
|
buf []byte
|
|
|
|
decode []byte
|
|
|
|
}
|
|
|
|
|
|
|
|
type PackBlobValue struct {
|
|
|
|
Handle restic.BlobHandle
|
|
|
|
Plaintext []byte
|
|
|
|
Err error
|
|
|
|
}
|
|
|
|
|
|
|
|
var ErrPackEOF = errors.New("reached EOF of pack file")
|
|
|
|
|
|
|
|
func NewPackBlobIterator(packID restic.ID, rd *bufio.Reader, currentOffset uint,
|
|
|
|
blobs []restic.Blob, key *crypto.Key, dec *zstd.Decoder) *PackBlobIterator {
|
|
|
|
return &PackBlobIterator{
|
|
|
|
packID: packID,
|
|
|
|
rd: rd,
|
|
|
|
currentOffset: currentOffset,
|
|
|
|
blobs: blobs,
|
|
|
|
key: key,
|
|
|
|
dec: dec,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next returns the next blob, an error or ErrPackEOF if all blobs were read
|
|
|
|
func (b *PackBlobIterator) Next() (PackBlobValue, error) {
|
|
|
|
if len(b.blobs) == 0 {
|
|
|
|
return PackBlobValue{}, ErrPackEOF
|
|
|
|
}
|
|
|
|
|
|
|
|
entry := b.blobs[0]
|
|
|
|
b.blobs = b.blobs[1:]
|
|
|
|
|
|
|
|
skipBytes := int(entry.Offset - b.currentOffset)
|
|
|
|
if skipBytes < 0 {
|
|
|
|
return PackBlobValue{}, errors.Errorf("overlapping blobs in pack %v", b.packID)
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err := b.rd.Discard(skipBytes)
|
|
|
|
if err != nil {
|
|
|
|
return PackBlobValue{}, err
|
|
|
|
}
|
|
|
|
b.currentOffset = entry.Offset
|
|
|
|
|
|
|
|
h := restic.BlobHandle{ID: entry.ID, Type: entry.Type}
|
|
|
|
debug.Log(" process blob %v, skipped %d, %v", h, skipBytes, entry)
|
|
|
|
|
|
|
|
if uint(cap(b.buf)) < entry.Length {
|
|
|
|
b.buf = make([]byte, entry.Length)
|
|
|
|
}
|
|
|
|
b.buf = b.buf[:entry.Length]
|
|
|
|
|
|
|
|
n, err := io.ReadFull(b.rd, b.buf)
|
|
|
|
if err != nil {
|
|
|
|
debug.Log(" read error %v", err)
|
|
|
|
return PackBlobValue{}, errors.Wrap(err, "ReadFull")
|
|
|
|
}
|
|
|
|
|
|
|
|
if n != len(b.buf) {
|
|
|
|
return PackBlobValue{}, errors.Errorf("read blob %v from %v: not enough bytes read, want %v, got %v",
|
|
|
|
h, b.packID.Str(), len(b.buf), n)
|
|
|
|
}
|
|
|
|
b.currentOffset = entry.Offset + entry.Length
|
|
|
|
|
|
|
|
if int(entry.Length) <= b.key.NonceSize() {
|
|
|
|
debug.Log("%v", b.blobs)
|
|
|
|
return PackBlobValue{}, errors.Errorf("invalid blob length %v", entry)
|
|
|
|
}
|
|
|
|
|
|
|
|
// decryption errors are likely permanent, give the caller a chance to skip them
|
|
|
|
nonce, ciphertext := b.buf[:b.key.NonceSize()], b.buf[b.key.NonceSize():]
|
|
|
|
plaintext, err := b.key.Open(ciphertext[:0], nonce, ciphertext, nil)
|
|
|
|
if err == nil && entry.IsCompressed() {
|
|
|
|
// DecodeAll will allocate a slice if it is not large enough since it
|
|
|
|
// knows the decompressed size (because we're using EncodeAll)
|
|
|
|
b.decode, err = b.dec.DecodeAll(plaintext, b.decode[:0])
|
|
|
|
plaintext = b.decode
|
|
|
|
if err != nil {
|
|
|
|
err = errors.Errorf("decompressing blob %v failed: %v", h, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err == nil {
|
|
|
|
id := restic.Hash(plaintext)
|
|
|
|
if !id.Equal(entry.ID) {
|
|
|
|
debug.Log("read blob %v/%v from %v: wrong data returned, hash is %v",
|
|
|
|
h.Type, h.ID, b.packID.Str(), id)
|
|
|
|
err = errors.Errorf("read blob %v from %v: wrong data returned, hash is %v",
|
|
|
|
h, b.packID.Str(), id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return PackBlobValue{entry.BlobHandle, plaintext, err}, nil
|
|
|
|
}
|
|
|
|
|
2022-09-04 08:49:16 +00:00
|
|
|
var zeroChunkOnce sync.Once
|
|
|
|
var zeroChunkID restic.ID
|
|
|
|
|
|
|
|
// ZeroChunk computes and returns (cached) the ID of an all-zero chunk with size chunker.MinSize
|
|
|
|
func ZeroChunk() restic.ID {
|
|
|
|
zeroChunkOnce.Do(func() {
|
|
|
|
zeroChunkID = restic.Hash(make([]byte, chunker.MinSize))
|
|
|
|
})
|
|
|
|
return zeroChunkID
|
|
|
|
}
|