2017-07-08 13:38:48 +00:00
|
|
|
package azure
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2018-05-31 19:26:28 +00:00
|
|
|
"encoding/base64"
|
2017-07-08 13:38:48 +00:00
|
|
|
"io"
|
2017-08-05 19:25:38 +00:00
|
|
|
"net/http"
|
2017-07-08 13:38:48 +00:00
|
|
|
"os"
|
|
|
|
"path"
|
|
|
|
"strings"
|
|
|
|
|
|
|
|
"github.com/restic/restic/internal/backend"
|
|
|
|
"github.com/restic/restic/internal/debug"
|
2017-08-05 19:46:15 +00:00
|
|
|
"github.com/restic/restic/internal/errors"
|
2017-07-08 13:38:48 +00:00
|
|
|
"github.com/restic/restic/internal/restic"
|
2020-12-17 11:47:53 +00:00
|
|
|
|
|
|
|
"github.com/Azure/azure-sdk-for-go/storage"
|
|
|
|
"github.com/cenkalti/backoff/v4"
|
2017-07-08 13:38:48 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// Backend stores data on an azure endpoint.
|
|
|
|
type Backend struct {
|
2017-09-18 10:01:54 +00:00
|
|
|
accountName string
|
|
|
|
container *storage.Container
|
|
|
|
sem *backend.Semaphore
|
|
|
|
prefix string
|
|
|
|
listMaxItems int
|
2017-07-08 13:38:48 +00:00
|
|
|
backend.Layout
|
|
|
|
}
|
|
|
|
|
2017-09-18 10:01:54 +00:00
|
|
|
const defaultListMaxItems = 5000
|
|
|
|
|
2017-07-08 13:38:48 +00:00
|
|
|
// make sure that *Backend implements backend.Backend
|
|
|
|
var _ restic.Backend = &Backend{}
|
|
|
|
|
2017-09-24 18:04:23 +00:00
|
|
|
func open(cfg Config, rt http.RoundTripper) (*Backend, error) {
|
2017-07-08 13:38:48 +00:00
|
|
|
debug.Log("open, config %#v", cfg)
|
|
|
|
|
|
|
|
client, err := storage.NewBasicClient(cfg.AccountName, cfg.AccountKey)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "NewBasicClient")
|
|
|
|
}
|
|
|
|
|
2017-09-24 18:04:23 +00:00
|
|
|
client.HTTPClient = &http.Client{Transport: rt}
|
2017-08-05 19:25:38 +00:00
|
|
|
|
2017-07-08 13:38:48 +00:00
|
|
|
service := client.GetBlobService()
|
|
|
|
|
|
|
|
sem, err := backend.NewSemaphore(cfg.Connections)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
be := &Backend{
|
|
|
|
container: service.GetContainerReference(cfg.Container),
|
|
|
|
accountName: cfg.AccountName,
|
|
|
|
sem: sem,
|
|
|
|
prefix: cfg.Prefix,
|
|
|
|
Layout: &backend.DefaultLayout{
|
|
|
|
Path: cfg.Prefix,
|
|
|
|
Join: path.Join,
|
|
|
|
},
|
2017-09-18 10:01:54 +00:00
|
|
|
listMaxItems: defaultListMaxItems,
|
2017-07-08 13:38:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return be, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Open opens the Azure backend at specified container.
|
2018-05-31 19:26:28 +00:00
|
|
|
func Open(cfg Config, rt http.RoundTripper) (*Backend, error) {
|
2017-09-24 18:04:23 +00:00
|
|
|
return open(cfg, rt)
|
2017-07-08 13:38:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create opens the Azure backend at specified container and creates the container if
|
|
|
|
// it does not exist yet.
|
2018-05-31 19:26:28 +00:00
|
|
|
func Create(cfg Config, rt http.RoundTripper) (*Backend, error) {
|
2017-09-24 18:04:23 +00:00
|
|
|
be, err := open(cfg, rt)
|
2017-07-08 13:38:48 +00:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "open")
|
|
|
|
}
|
|
|
|
|
|
|
|
options := storage.CreateContainerOptions{
|
|
|
|
Access: storage.ContainerAccessTypePrivate,
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = be.container.CreateIfNotExists(&options)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "container.CreateIfNotExists")
|
|
|
|
}
|
|
|
|
|
|
|
|
return be, nil
|
|
|
|
}
|
|
|
|
|
2017-09-18 10:01:54 +00:00
|
|
|
// SetListMaxItems sets the number of list items to load per request.
|
|
|
|
func (be *Backend) SetListMaxItems(i int) {
|
|
|
|
be.listMaxItems = i
|
|
|
|
}
|
|
|
|
|
2017-07-08 13:38:48 +00:00
|
|
|
// IsNotExist returns true if the error is caused by a not existing file.
|
|
|
|
func (be *Backend) IsNotExist(err error) bool {
|
|
|
|
debug.Log("IsNotExist(%T, %#v)", err, err)
|
|
|
|
return os.IsNotExist(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Join combines path components with slashes.
|
|
|
|
func (be *Backend) Join(p ...string) string {
|
|
|
|
return path.Join(p...)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Location returns this backend's location (the container name).
|
|
|
|
func (be *Backend) Location() string {
|
|
|
|
return be.Join(be.container.Name, be.prefix)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Path returns the path in the bucket that is used for this backend.
|
|
|
|
func (be *Backend) Path() string {
|
|
|
|
return be.prefix
|
|
|
|
}
|
|
|
|
|
2020-12-18 22:36:45 +00:00
|
|
|
type azureAdapter struct {
|
|
|
|
restic.RewindReader
|
|
|
|
}
|
|
|
|
|
|
|
|
func (azureAdapter) Close() error { return nil }
|
|
|
|
|
|
|
|
func (a *azureAdapter) Len() int {
|
|
|
|
return int(a.Length())
|
|
|
|
}
|
|
|
|
|
2017-07-08 13:38:48 +00:00
|
|
|
// Save stores data in the backend at the handle.
|
2018-03-03 13:20:54 +00:00
|
|
|
func (be *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
|
2017-07-08 13:38:48 +00:00
|
|
|
if err := h.Valid(); err != nil {
|
2020-12-17 11:47:53 +00:00
|
|
|
return backoff.Permanent(err)
|
2017-07-08 13:38:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
objName := be.Filename(h)
|
|
|
|
|
|
|
|
debug.Log("Save %v at %v", h, objName)
|
|
|
|
|
|
|
|
be.sem.GetToken()
|
|
|
|
|
|
|
|
debug.Log("InsertObject(%v, %v)", be.container.Name, objName)
|
|
|
|
|
2018-05-31 19:26:28 +00:00
|
|
|
var err error
|
|
|
|
if rd.Length() < 256*1024*1024 {
|
|
|
|
// wrap the reader so that net/http client cannot close the reader
|
2020-12-18 22:36:45 +00:00
|
|
|
// CreateBlockBlobFromReader reads length from `Len()``
|
|
|
|
dataReader := azureAdapter{rd}
|
2018-05-31 19:26:28 +00:00
|
|
|
|
|
|
|
// if it's smaller than 256miB, then just create the file directly from the reader
|
|
|
|
err = be.container.GetBlobReference(objName).CreateBlockBlobFromReader(dataReader, nil)
|
|
|
|
} else {
|
|
|
|
// otherwise use the more complicated method
|
|
|
|
err = be.saveLarge(ctx, objName, rd)
|
|
|
|
|
|
|
|
}
|
2017-07-08 13:38:48 +00:00
|
|
|
|
|
|
|
be.sem.ReleaseToken()
|
|
|
|
debug.Log("%v, err %#v", objName, err)
|
|
|
|
|
|
|
|
return errors.Wrap(err, "CreateBlockBlobFromReader")
|
|
|
|
}
|
|
|
|
|
2018-05-31 19:26:28 +00:00
|
|
|
func (be *Backend) saveLarge(ctx context.Context, objName string, rd restic.RewindReader) error {
|
|
|
|
// create the file on the server
|
|
|
|
file := be.container.GetBlobReference(objName)
|
|
|
|
err := file.CreateBlockBlob(nil)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "CreateBlockBlob")
|
|
|
|
}
|
|
|
|
|
|
|
|
// read the data, in 100 MiB chunks
|
|
|
|
buf := make([]byte, 100*1024*1024)
|
|
|
|
var blocks []storage.Block
|
2020-12-18 22:41:29 +00:00
|
|
|
uploadedBytes := 0
|
2018-05-31 19:26:28 +00:00
|
|
|
|
|
|
|
for {
|
|
|
|
n, err := io.ReadFull(rd, buf)
|
|
|
|
if err == io.ErrUnexpectedEOF {
|
|
|
|
err = nil
|
|
|
|
}
|
|
|
|
if err == io.EOF {
|
|
|
|
// end of file reached, no bytes have been read at all
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "ReadFull")
|
|
|
|
}
|
|
|
|
|
|
|
|
buf = buf[:n]
|
2020-12-18 22:41:29 +00:00
|
|
|
uploadedBytes += n
|
2018-05-31 19:26:28 +00:00
|
|
|
|
|
|
|
// upload it as a new "block", use the base64 hash for the ID
|
|
|
|
h := restic.Hash(buf)
|
|
|
|
id := base64.StdEncoding.EncodeToString(h[:])
|
|
|
|
debug.Log("PutBlock %v with %d bytes", id, len(buf))
|
|
|
|
err = file.PutBlock(id, buf, nil)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "PutBlock")
|
|
|
|
}
|
|
|
|
|
|
|
|
blocks = append(blocks, storage.Block{
|
|
|
|
ID: id,
|
|
|
|
Status: "Uncommitted",
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-12-18 22:41:29 +00:00
|
|
|
// sanity check
|
|
|
|
if uploadedBytes != int(rd.Length()) {
|
|
|
|
return errors.Errorf("wrote %d bytes instead of the expected %d bytes", uploadedBytes, rd.Length())
|
|
|
|
}
|
|
|
|
|
2018-05-31 19:26:28 +00:00
|
|
|
debug.Log("uploaded %d parts: %v", len(blocks), blocks)
|
|
|
|
err = file.PutBlockList(blocks, nil)
|
|
|
|
debug.Log("PutBlockList returned %v", err)
|
|
|
|
return errors.Wrap(err, "PutBlockList")
|
|
|
|
}
|
|
|
|
|
2017-07-08 13:38:48 +00:00
|
|
|
// wrapReader wraps an io.ReadCloser to run an additional function on Close.
|
|
|
|
type wrapReader struct {
|
|
|
|
io.ReadCloser
|
|
|
|
f func()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (wr wrapReader) Close() error {
|
|
|
|
err := wr.ReadCloser.Close()
|
|
|
|
wr.f()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-01-17 04:59:16 +00:00
|
|
|
// Load runs fn with a reader that yields the contents of the file at h at the
|
|
|
|
// given offset.
|
|
|
|
func (be *Backend) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
|
|
|
|
return backend.DefaultLoad(ctx, h, length, offset, be.openReader, fn)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (be *Backend) openReader(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
|
2017-07-08 13:38:48 +00:00
|
|
|
debug.Log("Load %v, length %v, offset %v from %v", h, length, offset, be.Filename(h))
|
|
|
|
if err := h.Valid(); err != nil {
|
2020-12-17 11:47:53 +00:00
|
|
|
return nil, backoff.Permanent(err)
|
2017-07-08 13:38:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if offset < 0 {
|
|
|
|
return nil, errors.New("offset is negative")
|
|
|
|
}
|
|
|
|
|
|
|
|
if length < 0 {
|
|
|
|
return nil, errors.Errorf("invalid length %d", length)
|
|
|
|
}
|
|
|
|
|
|
|
|
objName := be.Filename(h)
|
|
|
|
blob := be.container.GetBlobReference(objName)
|
|
|
|
|
|
|
|
start := uint64(offset)
|
|
|
|
var end uint64
|
|
|
|
|
|
|
|
if length > 0 {
|
|
|
|
end = uint64(offset + int64(length) - 1)
|
|
|
|
} else {
|
|
|
|
end = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
be.sem.GetToken()
|
|
|
|
|
|
|
|
rd, err := blob.GetRange(&storage.GetBlobRangeOptions{Range: &storage.BlobRange{Start: start, End: end}})
|
|
|
|
if err != nil {
|
|
|
|
be.sem.ReleaseToken()
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
closeRd := wrapReader{
|
|
|
|
ReadCloser: rd,
|
|
|
|
f: func() {
|
|
|
|
debug.Log("Close()")
|
|
|
|
be.sem.ReleaseToken()
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
return closeRd, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stat returns information about a blob.
|
2017-10-31 11:32:30 +00:00
|
|
|
func (be *Backend) Stat(ctx context.Context, h restic.Handle) (restic.FileInfo, error) {
|
2017-07-08 13:38:48 +00:00
|
|
|
debug.Log("%v", h)
|
|
|
|
|
|
|
|
objName := be.Filename(h)
|
|
|
|
blob := be.container.GetBlobReference(objName)
|
|
|
|
|
2017-10-31 11:32:30 +00:00
|
|
|
be.sem.GetToken()
|
|
|
|
err := blob.GetProperties(nil)
|
|
|
|
be.sem.ReleaseToken()
|
|
|
|
|
|
|
|
if err != nil {
|
2017-07-08 13:38:48 +00:00
|
|
|
debug.Log("blob.GetProperties err %v", err)
|
|
|
|
return restic.FileInfo{}, errors.Wrap(err, "blob.GetProperties")
|
|
|
|
}
|
|
|
|
|
2018-01-20 18:34:38 +00:00
|
|
|
fi := restic.FileInfo{
|
|
|
|
Size: int64(blob.Properties.ContentLength),
|
|
|
|
Name: h.Name,
|
|
|
|
}
|
|
|
|
return fi, nil
|
2017-07-08 13:38:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Test returns true if a blob of the given type and name exists in the backend.
|
|
|
|
func (be *Backend) Test(ctx context.Context, h restic.Handle) (bool, error) {
|
|
|
|
objName := be.Filename(h)
|
2017-10-31 11:32:30 +00:00
|
|
|
|
|
|
|
be.sem.GetToken()
|
2017-07-08 13:38:48 +00:00
|
|
|
found, err := be.container.GetBlobReference(objName).Exists()
|
2017-10-31 11:32:30 +00:00
|
|
|
be.sem.ReleaseToken()
|
|
|
|
|
2017-07-08 13:38:48 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
return found, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove removes the blob with the given name and type.
|
|
|
|
func (be *Backend) Remove(ctx context.Context, h restic.Handle) error {
|
|
|
|
objName := be.Filename(h)
|
2017-10-31 11:32:30 +00:00
|
|
|
|
|
|
|
be.sem.GetToken()
|
2017-07-08 13:38:48 +00:00
|
|
|
_, err := be.container.GetBlobReference(objName).DeleteIfExists(nil)
|
2017-10-31 11:32:30 +00:00
|
|
|
be.sem.ReleaseToken()
|
|
|
|
|
2017-07-08 13:38:48 +00:00
|
|
|
debug.Log("Remove(%v) at %v -> err %v", h, objName, err)
|
|
|
|
return errors.Wrap(err, "client.RemoveObject")
|
|
|
|
}
|
|
|
|
|
2018-01-20 18:34:38 +00:00
|
|
|
// List runs fn for each file in the backend which has the type t. When an
|
|
|
|
// error occurs (or fn returns an error), List stops and returns it.
|
|
|
|
func (be *Backend) List(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error {
|
2017-07-08 13:38:48 +00:00
|
|
|
debug.Log("listing %v", t)
|
|
|
|
|
2017-12-14 18:13:01 +00:00
|
|
|
prefix, _ := be.Basedir(t)
|
2017-07-08 13:38:48 +00:00
|
|
|
|
|
|
|
// make sure prefix ends with a slash
|
2018-01-20 18:34:38 +00:00
|
|
|
if !strings.HasSuffix(prefix, "/") {
|
2017-07-08 13:38:48 +00:00
|
|
|
prefix += "/"
|
|
|
|
}
|
|
|
|
|
2017-09-17 09:32:05 +00:00
|
|
|
params := storage.ListBlobsParameters{
|
2017-09-18 10:01:54 +00:00
|
|
|
MaxResults: uint(be.listMaxItems),
|
2017-09-17 09:32:05 +00:00
|
|
|
Prefix: prefix,
|
|
|
|
}
|
|
|
|
|
2018-01-20 18:34:38 +00:00
|
|
|
for {
|
|
|
|
be.sem.GetToken()
|
|
|
|
obj, err := be.container.ListBlobs(params)
|
|
|
|
be.sem.ReleaseToken()
|
2017-07-08 13:38:48 +00:00
|
|
|
|
2018-01-20 18:34:38 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-10-31 11:32:30 +00:00
|
|
|
|
2018-01-20 18:34:38 +00:00
|
|
|
debug.Log("got %v objects", len(obj.Blobs))
|
|
|
|
|
|
|
|
for _, item := range obj.Blobs {
|
|
|
|
m := strings.TrimPrefix(item.Name, prefix)
|
|
|
|
if m == "" {
|
|
|
|
continue
|
2017-09-17 09:32:05 +00:00
|
|
|
}
|
|
|
|
|
2018-01-20 18:34:38 +00:00
|
|
|
fi := restic.FileInfo{
|
|
|
|
Name: path.Base(m),
|
|
|
|
Size: item.Properties.ContentLength,
|
|
|
|
}
|
2017-07-08 13:38:48 +00:00
|
|
|
|
2018-01-20 18:34:38 +00:00
|
|
|
if ctx.Err() != nil {
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
2017-09-17 09:32:05 +00:00
|
|
|
|
2018-01-20 18:34:38 +00:00
|
|
|
err := fn(fi)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2017-07-08 13:38:48 +00:00
|
|
|
}
|
|
|
|
|
2018-01-20 18:34:38 +00:00
|
|
|
if ctx.Err() != nil {
|
|
|
|
return ctx.Err()
|
2017-07-08 13:38:48 +00:00
|
|
|
}
|
2018-01-20 18:34:38 +00:00
|
|
|
|
2017-07-08 13:38:48 +00:00
|
|
|
}
|
|
|
|
|
2018-01-20 18:34:38 +00:00
|
|
|
if obj.NextMarker == "" {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
params.Marker = obj.NextMarker
|
|
|
|
}
|
|
|
|
|
|
|
|
return ctx.Err()
|
2017-07-08 13:38:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Remove keys for a specified backend type.
|
|
|
|
func (be *Backend) removeKeys(ctx context.Context, t restic.FileType) error {
|
2018-01-20 18:34:38 +00:00
|
|
|
return be.List(ctx, t, func(fi restic.FileInfo) error {
|
|
|
|
return be.Remove(ctx, restic.Handle{Type: t, Name: fi.Name})
|
|
|
|
})
|
2017-07-08 13:38:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Delete removes all restic keys in the bucket. It will not remove the bucket itself.
|
|
|
|
func (be *Backend) Delete(ctx context.Context) error {
|
|
|
|
alltypes := []restic.FileType{
|
2020-08-16 09:16:38 +00:00
|
|
|
restic.PackFile,
|
2017-07-08 13:38:48 +00:00
|
|
|
restic.KeyFile,
|
|
|
|
restic.LockFile,
|
|
|
|
restic.SnapshotFile,
|
|
|
|
restic.IndexFile}
|
|
|
|
|
|
|
|
for _, t := range alltypes {
|
|
|
|
err := be.removeKeys(ctx, t)
|
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return be.Remove(ctx, restic.Handle{Type: restic.ConfigFile})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close does nothing
|
|
|
|
func (be *Backend) Close() error { return nil }
|