mirror of https://github.com/restic/restic.git
Add progress report for loading blobs
This commit is contained in:
parent
58cded6b75
commit
a59b0ec1f6
|
@ -60,7 +60,10 @@ func NewArchiver(s Server) (*Archiver, error) {
|
||||||
|
|
||||||
// Preload loads all tree objects from repository and adds all blobs that are
|
// Preload loads all tree objects from repository and adds all blobs that are
|
||||||
// still available to the map for deduplication.
|
// still available to the map for deduplication.
|
||||||
func (arch *Archiver) Preload() error {
|
func (arch *Archiver) Preload(p *Progress) error {
|
||||||
|
p.Start()
|
||||||
|
defer p.Done()
|
||||||
|
|
||||||
debug.Log("Archiver.Preload", "Start loading known blobs")
|
debug.Log("Archiver.Preload", "Start loading known blobs")
|
||||||
|
|
||||||
// load all trees, in parallel
|
// load all trees, in parallel
|
||||||
|
@ -73,8 +76,8 @@ func (arch *Archiver) Preload() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
debug.Log("Archiver.Preload", "load tree %v with %d blobs", id, tree.Map.Len())
|
debug.Log("Archiver.Preload", "load tree %v with %d blobs", id, tree.Map.Len())
|
||||||
|
|
||||||
arch.m.Merge(tree.Map)
|
arch.m.Merge(tree.Map)
|
||||||
|
p.Report(Stat{Trees: 1, Blobs: uint64(tree.Map.Len())})
|
||||||
}
|
}
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}
|
}
|
||||||
|
|
|
@ -146,7 +146,7 @@ func BenchmarkArchiveDirectory(b *testing.B) {
|
||||||
func snapshot(t testing.TB, server restic.Server, path string) *restic.Snapshot {
|
func snapshot(t testing.TB, server restic.Server, path string) *restic.Snapshot {
|
||||||
arch, err := restic.NewArchiver(server)
|
arch, err := restic.NewArchiver(server)
|
||||||
ok(t, err)
|
ok(t, err)
|
||||||
ok(t, arch.Preload())
|
ok(t, arch.Preload(nil))
|
||||||
sn, _, err := arch.Snapshot(nil, path, nil)
|
sn, _, err := arch.Snapshot(nil, path, nil)
|
||||||
ok(t, err)
|
ok(t, err)
|
||||||
return sn
|
return sn
|
||||||
|
@ -230,6 +230,6 @@ func BenchmarkPreload(t *testing.B) {
|
||||||
// create new archiver and preload
|
// create new archiver and preload
|
||||||
arch2, err := restic.NewArchiver(server)
|
arch2, err := restic.NewArchiver(server)
|
||||||
ok(t, err)
|
ok(t, err)
|
||||||
ok(t, arch2.Preload())
|
ok(t, arch2.Preload(nil))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -78,17 +78,55 @@ func newScanProgress() *restic.Progress {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
scanProgress := restic.NewProgress(time.Second)
|
p := restic.NewProgress(time.Second)
|
||||||
if terminal.IsTerminal(int(os.Stdout.Fd())) {
|
p.OnUpdate = func(s restic.Stat, d time.Duration, ticker bool) {
|
||||||
scanProgress.OnUpdate = func(s restic.Stat, d time.Duration, ticker bool) {
|
fmt.Printf("\x1b[2K\r[%s] %d directories, %d files, %s", format_duration(d), s.Dirs, s.Files, format_bytes(s.Bytes))
|
||||||
fmt.Printf("\x1b[2K\r[%s] %d directories, %d files, %s", format_duration(d), s.Dirs, s.Files, format_bytes(s.Bytes))
|
}
|
||||||
}
|
p.OnDone = func(s restic.Stat, d time.Duration, ticker bool) {
|
||||||
scanProgress.OnDone = func(s restic.Stat, d time.Duration, ticker bool) {
|
fmt.Printf("\nDone in %s\n", format_duration(d))
|
||||||
fmt.Printf("\nDone in %s\n", format_duration(d))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return scanProgress
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func newLoadBlobsProgress(s restic.Server) (*restic.Progress, error) {
|
||||||
|
if !terminal.IsTerminal(int(os.Stdout.Fd())) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
trees, err := s.Count(backend.Tree)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
eta := uint64(0)
|
||||||
|
tps := uint64(0) // trees per second
|
||||||
|
|
||||||
|
p := restic.NewProgress(time.Second)
|
||||||
|
p.OnUpdate = func(s restic.Stat, d time.Duration, ticker bool) {
|
||||||
|
sec := uint64(d / time.Second)
|
||||||
|
if trees > 0 && sec > 0 && ticker {
|
||||||
|
tps = uint64(s.Trees) / sec
|
||||||
|
if tps > 0 {
|
||||||
|
eta = (uint64(trees) - s.Trees) / tps
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// fmt.Printf("sec: %v, trees: %v / %v\n", sec, s.Trees, trees)
|
||||||
|
|
||||||
|
fmt.Printf("\x1b[2K\r[%s] %3.2f%% %d trees/s %d / %d trees, %d blobs ETA %s",
|
||||||
|
format_duration(d),
|
||||||
|
float64(s.Trees)/float64(trees)*100,
|
||||||
|
tps,
|
||||||
|
s.Trees, trees,
|
||||||
|
s.Blobs,
|
||||||
|
format_seconds(eta))
|
||||||
|
}
|
||||||
|
p.OnDone = func(s restic.Stat, d time.Duration, ticker bool) {
|
||||||
|
fmt.Printf("\nDone in %s\n", format_duration(d))
|
||||||
|
}
|
||||||
|
|
||||||
|
return p, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newArchiveProgress(todo restic.Stat) *restic.Progress {
|
func newArchiveProgress(todo restic.Stat) *restic.Progress {
|
||||||
|
@ -154,8 +192,7 @@ func (cmd CmdBackup) Execute(args []string) error {
|
||||||
|
|
||||||
fmt.Printf("scan %s\n", target)
|
fmt.Printf("scan %s\n", target)
|
||||||
|
|
||||||
sp := newScanProgress()
|
stat, err := restic.Scan(target, newScanProgress())
|
||||||
stat, err := restic.Scan(target, sp)
|
|
||||||
|
|
||||||
// TODO: add filter
|
// TODO: add filter
|
||||||
// arch.Filter = func(dir string, fi os.FileInfo) bool {
|
// arch.Filter = func(dir string, fi os.FileInfo) bool {
|
||||||
|
@ -178,13 +215,17 @@ func (cmd CmdBackup) Execute(args []string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("loading blobs\n")
|
fmt.Printf("loading blobs\n")
|
||||||
err = arch.Preload()
|
pb, err := newLoadBlobsProgress(s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
ap := newArchiveProgress(stat)
|
err = arch.Preload(pb)
|
||||||
_, id, err := arch.Snapshot(ap, target, parentSnapshotID)
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, id, err := arch.Snapshot(newArchiveProgress(stat), target, parentSnapshotID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -173,6 +173,8 @@ func (s *Stat) Add(other Stat) {
|
||||||
s.Bytes += other.Bytes
|
s.Bytes += other.Bytes
|
||||||
s.Dirs += other.Dirs
|
s.Dirs += other.Dirs
|
||||||
s.Files += other.Files
|
s.Files += other.Files
|
||||||
|
s.Trees += other.Trees
|
||||||
|
s.Blobs += other.Blobs
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s Stat) String() string {
|
func (s Stat) String() string {
|
||||||
|
@ -192,6 +194,6 @@ func (s Stat) String() string {
|
||||||
str = fmt.Sprintf("%dB", s.Bytes)
|
str = fmt.Sprintf("%dB", s.Bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Sprintf("Stat(%d files, %d dirs, %v)",
|
return fmt.Sprintf("Stat(%d files, %d dirs, %v trees, %v blobs, %v)",
|
||||||
s.Files, s.Dirs, str)
|
s.Files, s.Dirs, s.Trees, s.Blobs, str)
|
||||||
}
|
}
|
||||||
|
|
10
server.go
10
server.go
|
@ -422,6 +422,16 @@ func (s Server) Stats() (ServerStats, error) {
|
||||||
return ServerStats{Blobs: uint(blobs.Len()), Trees: uint(trees)}, err
|
return ServerStats{Blobs: uint(blobs.Len()), Trees: uint(trees)}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Count counts the number of objects of type t in the backend.
|
||||||
|
func (s Server) Count(t backend.Type) (int, error) {
|
||||||
|
l, err := s.be.List(t)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return len(l), nil
|
||||||
|
}
|
||||||
|
|
||||||
// Proxy methods to backend
|
// Proxy methods to backend
|
||||||
|
|
||||||
func (s Server) List(t backend.Type) (backend.IDs, error) {
|
func (s Server) List(t backend.Type) (backend.IDs, error) {
|
||||||
|
|
Loading…
Reference in New Issue