mirror of
https://github.com/restic/restic.git
synced 2025-01-03 13:45:20 +00:00
Stop Counters where they're constructed and started
This commit is contained in:
parent
ddca699cd2
commit
21b787a4d1
8 changed files with 7 additions and 11 deletions
|
@ -259,6 +259,7 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error {
|
||||||
errorsFound = true
|
errorsFound = true
|
||||||
Warnf("%v\n", err)
|
Warnf("%v\n", err)
|
||||||
}
|
}
|
||||||
|
p.Done()
|
||||||
}
|
}
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
|
|
|
@ -482,6 +482,7 @@ func prune(opts PruneOptions, gopts GlobalOptions, repo restic.Repository, usedB
|
||||||
Verbosef("repacking packs\n")
|
Verbosef("repacking packs\n")
|
||||||
bar := newProgressMax(!gopts.Quiet, uint64(len(repackPacks)), "packs repacked")
|
bar := newProgressMax(!gopts.Quiet, uint64(len(repackPacks)), "packs repacked")
|
||||||
_, err := repository.Repack(ctx, repo, repackPacks, keepBlobs, bar)
|
_, err := repository.Repack(ctx, repo, repackPacks, keepBlobs, bar)
|
||||||
|
bar.Done()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -63,6 +63,7 @@ func rebuildIndex(ctx context.Context, repo restic.Repository, ignorePacks resti
|
||||||
|
|
||||||
bar := newProgressMax(!globalOptions.Quiet, packs-uint64(len(ignorePacks)), "packs")
|
bar := newProgressMax(!globalOptions.Quiet, packs-uint64(len(ignorePacks)), "packs")
|
||||||
idx, invalidFiles, err := index.New(ctx, repo, ignorePacks, bar)
|
idx, invalidFiles, err := index.New(ctx, repo, ignorePacks, bar)
|
||||||
|
bar.Done()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -773,14 +773,13 @@ func checkPack(ctx context.Context, r restic.Repository, id restic.ID) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadData loads all data from the repository and checks the integrity.
|
// ReadData loads all data from the repository and checks the integrity.
|
||||||
func (c *Checker) ReadData(ctx context.Context, p *progress.Counter, errChan chan<- error) {
|
func (c *Checker) ReadData(ctx context.Context, errChan chan<- error) {
|
||||||
c.ReadPacks(ctx, c.packs, p, errChan)
|
c.ReadPacks(ctx, c.packs, nil, errChan)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadPacks loads data from specified packs and checks the integrity.
|
// ReadPacks loads data from specified packs and checks the integrity.
|
||||||
func (c *Checker) ReadPacks(ctx context.Context, packs restic.IDSet, p *progress.Counter, errChan chan<- error) {
|
func (c *Checker) ReadPacks(ctx context.Context, packs restic.IDSet, p *progress.Counter, errChan chan<- error) {
|
||||||
defer close(errChan)
|
defer close(errChan)
|
||||||
defer p.Done()
|
|
||||||
|
|
||||||
g, ctx := errgroup.WithContext(ctx)
|
g, ctx := errgroup.WithContext(ctx)
|
||||||
ch := make(chan restic.ID)
|
ch := make(chan restic.ID)
|
||||||
|
|
|
@ -50,7 +50,7 @@ func checkData(chkr *checker.Checker) []error {
|
||||||
return collectErrors(
|
return collectErrors(
|
||||||
context.TODO(),
|
context.TODO(),
|
||||||
func(ctx context.Context, errCh chan<- error) {
|
func(ctx context.Context, errCh chan<- error) {
|
||||||
chkr.ReadData(ctx, nil, errCh)
|
chkr.ReadData(ctx, errCh)
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,7 +44,7 @@ func TestCheckRepo(t testing.TB, repo restic.Repository) {
|
||||||
|
|
||||||
// read data
|
// read data
|
||||||
errChan = make(chan error)
|
errChan = make(chan error)
|
||||||
go chkr.ReadData(context.TODO(), nil, errChan)
|
go chkr.ReadData(context.TODO(), errChan)
|
||||||
|
|
||||||
for err := range errChan {
|
for err := range errChan {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
|
|
|
@ -51,8 +51,6 @@ type Lister interface {
|
||||||
// New creates a new index for repo from scratch. InvalidFiles contains all IDs
|
// New creates a new index for repo from scratch. InvalidFiles contains all IDs
|
||||||
// of files that cannot be listed successfully.
|
// of files that cannot be listed successfully.
|
||||||
func New(ctx context.Context, repo Lister, ignorePacks restic.IDSet, p *progress.Counter) (idx *Index, invalidFiles restic.IDs, err error) {
|
func New(ctx context.Context, repo Lister, ignorePacks restic.IDSet, p *progress.Counter) (idx *Index, invalidFiles restic.IDs, err error) {
|
||||||
defer p.Done()
|
|
||||||
|
|
||||||
type Job struct {
|
type Job struct {
|
||||||
PackID restic.ID
|
PackID restic.ID
|
||||||
Size int64
|
Size int64
|
||||||
|
@ -191,8 +189,6 @@ func loadIndexJSON(ctx context.Context, repo ListLoader, id restic.ID) (*indexJS
|
||||||
func Load(ctx context.Context, repo ListLoader, p *progress.Counter) (*Index, error) {
|
func Load(ctx context.Context, repo ListLoader, p *progress.Counter) (*Index, error) {
|
||||||
debug.Log("loading indexes")
|
debug.Log("loading indexes")
|
||||||
|
|
||||||
defer p.Done()
|
|
||||||
|
|
||||||
supersedes := make(map[restic.ID]restic.IDSet)
|
supersedes := make(map[restic.ID]restic.IDSet)
|
||||||
results := make(map[restic.ID]map[restic.ID]Pack)
|
results := make(map[restic.ID]map[restic.ID]Pack)
|
||||||
|
|
||||||
|
|
|
@ -25,8 +25,6 @@ const numRepackWorkers = 8
|
||||||
// The map keepBlobs is modified by Repack, it is used to keep track of which
|
// The map keepBlobs is modified by Repack, it is used to keep track of which
|
||||||
// blobs have been processed.
|
// blobs have been processed.
|
||||||
func Repack(ctx context.Context, repo restic.Repository, packs restic.IDSet, keepBlobs restic.BlobSet, p *progress.Counter) (obsoletePacks restic.IDSet, err error) {
|
func Repack(ctx context.Context, repo restic.Repository, packs restic.IDSet, keepBlobs restic.BlobSet, p *progress.Counter) (obsoletePacks restic.IDSet, err error) {
|
||||||
defer p.Done()
|
|
||||||
|
|
||||||
debug.Log("repacking %d packs while keeping %d blobs", len(packs), len(keepBlobs))
|
debug.Log("repacking %d packs while keeping %d blobs", len(packs), len(keepBlobs))
|
||||||
|
|
||||||
wg, wgCtx := errgroup.WithContext(ctx)
|
wg, wgCtx := errgroup.WithContext(ctx)
|
||||||
|
|
Loading…
Reference in a new issue