1
0
Fork 0
mirror of https://github.com/restic/restic.git synced 2024-12-21 23:33:03 +00:00

Merge pull request #5046 from konidev20/fix-gh-4521-azure-blob-storage-add-support-for-access-tiers

azure: add support for access tiers hot, cool and cold
This commit is contained in:
Michael Eischer 2024-11-11 22:01:52 +01:00 committed by GitHub
commit 1133498ef8
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 69 additions and 7 deletions

View file

@ -0,0 +1,21 @@
Enhancement: Add config option to set Microsoft Blob Storage Access Tier
The `azure.access-tier` option can be passed to Restic (using `-o`) to
specify the access tier for Microsoft Blob Storage objects created by Restic.
The access tier is passed as-is to Microsoft Blob Storage, so it needs to be
understood by the API. The allowed values are `Hot`, `Cool`, or `Cold`.
If unspecified, the default is inferred from the default configured on the
storage account.
You can mix access tiers in the same container, and the setting isn't
stored in the restic repository, so be sure to specify it with each
command that writes to Microsoft Blob Storage.
There is no official `Archive` storage support in restic, use this option at
your own risk. To restore any data, it is still necessary to manually warm up
the required data in the `Archive` tier.
https://github.com/restic/restic/issues/4521
https://github.com/restic/restic/pull/5046

View file

@ -568,6 +568,10 @@ The number of concurrent connections to the Azure Blob Storage service can be se
``-o azure.connections=10`` switch. By default, at most five parallel connections are
established.
The access tier of the blobs uploaded to the Azure Blob Storage service can be set with the
``-o azure.access-tier=Cool`` switch. The allowed values are ``Hot``, ``Cool`` or ``Cold``.
If unspecified, the default is inferred from the default configured on the storage account.
Google Cloud Storage
********************

View file

@ -37,6 +37,8 @@ type Backend struct {
prefix string
listMaxItems int
layout.Layout
accessTier blob.AccessTier
}
const saveLargeSize = 256 * 1024 * 1024
@ -124,17 +126,33 @@ func open(cfg Config, rt http.RoundTripper) (*Backend, error) {
}
}
var accessTier blob.AccessTier
// if the access tier is not supported, then we will not set the access tier; during the upload process,
// the value will be inferred from the default configured on the storage account.
for _, tier := range supportedAccessTiers() {
if strings.EqualFold(string(tier), cfg.AccessTier) {
accessTier = tier
debug.Log(" - using access tier %v", accessTier)
break
}
}
be := &Backend{
container: client,
cfg: cfg,
connections: cfg.Connections,
Layout: layout.NewDefaultLayout(cfg.Prefix, path.Join),
listMaxItems: defaultListMaxItems,
accessTier: accessTier,
}
return be, nil
}
func supportedAccessTiers() []blob.AccessTier {
return []blob.AccessTier{blob.AccessTierHot, blob.AccessTierCool, blob.AccessTierCold, blob.AccessTierArchive}
}
// Open opens the Azure backend at specified container.
func Open(_ context.Context, cfg Config, rt http.RoundTripper) (*Backend, error) {
return open(cfg, rt)
@ -213,25 +231,39 @@ func (be *Backend) Path() string {
return be.prefix
}
// useAccessTier determines whether to apply the configured access tier to a given file.
// For archive access tier, only data files are stored using that class; metadata
// must remain instantly accessible.
func (be *Backend) useAccessTier(h backend.Handle) bool {
notArchiveClass := !strings.EqualFold(be.cfg.AccessTier, "archive")
isDataFile := h.Type == backend.PackFile && !h.IsMetadata
return isDataFile || notArchiveClass
}
// Save stores data in the backend at the handle.
func (be *Backend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error {
objName := be.Filename(h)
debug.Log("InsertObject(%v, %v)", be.cfg.AccountName, objName)
var accessTier blob.AccessTier
if be.useAccessTier(h) {
accessTier = be.accessTier
}
var err error
if rd.Length() < saveLargeSize {
// if it's smaller than 256miB, then just create the file directly from the reader
err = be.saveSmall(ctx, objName, rd)
err = be.saveSmall(ctx, objName, rd, accessTier)
} else {
// otherwise use the more complicated method
err = be.saveLarge(ctx, objName, rd)
err = be.saveLarge(ctx, objName, rd, accessTier)
}
return err
}
func (be *Backend) saveSmall(ctx context.Context, objName string, rd backend.RewindReader) error {
func (be *Backend) saveSmall(ctx context.Context, objName string, rd backend.RewindReader, accessTier blob.AccessTier) error {
blockBlobClient := be.container.NewBlockBlobClient(objName)
// upload it as a new "block", use the base64 hash for the ID
@ -252,11 +284,13 @@ func (be *Backend) saveSmall(ctx context.Context, objName string, rd backend.Rew
}
blocks := []string{id}
_, err = blockBlobClient.CommitBlockList(ctx, blocks, &blockblob.CommitBlockListOptions{})
_, err = blockBlobClient.CommitBlockList(ctx, blocks, &blockblob.CommitBlockListOptions{
Tier: &accessTier,
})
return errors.Wrap(err, "CommitBlockList")
}
func (be *Backend) saveLarge(ctx context.Context, objName string, rd backend.RewindReader) error {
func (be *Backend) saveLarge(ctx context.Context, objName string, rd backend.RewindReader, accessTier blob.AccessTier) error {
blockBlobClient := be.container.NewBlockBlobClient(objName)
buf := make([]byte, 100*1024*1024)
@ -303,7 +337,9 @@ func (be *Backend) saveLarge(ctx context.Context, objName string, rd backend.Rew
return errors.Errorf("wrote %d bytes instead of the expected %d bytes", uploadedBytes, rd.Length())
}
_, err := blockBlobClient.CommitBlockList(ctx, blocks, &blockblob.CommitBlockListOptions{})
_, err := blockBlobClient.CommitBlockList(ctx, blocks, &blockblob.CommitBlockListOptions{
Tier: &accessTier,
})
debug.Log("uploaded %d parts: %v", len(blocks), blocks)
return errors.Wrap(err, "CommitBlockList")

View file

@ -23,6 +23,7 @@ type Config struct {
Prefix string
Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"`
AccessTier string `option:"access-tier" help:"set the access tier for the blob storage (default: inferred from the storage account defaults)"`
}
// NewConfig returns a new Config with the default values filled in.