2014-12-05 20:45:49 +00:00
|
|
|
package restic
|
2014-09-23 20:39:12 +00:00
|
|
|
|
|
|
|
import (
|
2014-11-30 21:49:14 +00:00
|
|
|
"fmt"
|
2014-11-17 22:28:51 +00:00
|
|
|
"io"
|
2014-09-23 20:39:12 +00:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2014-11-16 21:50:20 +00:00
|
|
|
"sync"
|
2014-11-23 08:22:18 +00:00
|
|
|
"time"
|
2014-09-23 20:39:12 +00:00
|
|
|
|
2014-11-23 11:14:56 +00:00
|
|
|
"github.com/juju/arrar"
|
2014-12-05 20:45:49 +00:00
|
|
|
"github.com/restic/restic/backend"
|
|
|
|
"github.com/restic/restic/chunker"
|
2014-09-23 20:39:12 +00:00
|
|
|
)
|
|
|
|
|
2014-11-16 21:50:20 +00:00
|
|
|
const (
|
2014-11-23 15:48:00 +00:00
|
|
|
maxConcurrentFiles = 8
|
|
|
|
maxConcurrentBlobs = 8
|
2014-11-23 08:22:18 +00:00
|
|
|
|
|
|
|
statTimeout = 20 * time.Millisecond
|
2014-11-16 21:50:20 +00:00
|
|
|
)
|
|
|
|
|
2014-09-23 20:39:12 +00:00
|
|
|
type Archiver struct {
|
2014-12-21 17:10:19 +00:00
|
|
|
s Server
|
|
|
|
ch *ContentHandler
|
2014-11-16 21:50:20 +00:00
|
|
|
|
2015-01-04 15:10:30 +00:00
|
|
|
bl *BlobList // blobs used for the current snapshot
|
2014-09-23 20:39:12 +00:00
|
|
|
|
2014-11-16 21:50:20 +00:00
|
|
|
fileToken chan struct{}
|
2014-11-22 21:05:39 +00:00
|
|
|
blobToken chan struct{}
|
2014-11-16 21:50:20 +00:00
|
|
|
|
2014-11-16 20:29:11 +00:00
|
|
|
Stats Stats
|
|
|
|
|
2014-09-23 20:39:12 +00:00
|
|
|
Error func(dir string, fi os.FileInfo, err error) error
|
|
|
|
Filter func(item string, fi os.FileInfo) bool
|
2014-11-16 20:29:11 +00:00
|
|
|
|
2014-11-23 08:22:18 +00:00
|
|
|
ScannerStats chan Stats
|
|
|
|
SaveStats chan Stats
|
2014-11-16 21:50:20 +00:00
|
|
|
|
2014-11-23 08:22:18 +00:00
|
|
|
statsMutex sync.Mutex
|
|
|
|
updateStats Stats
|
2014-11-16 20:29:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type Stats struct {
|
|
|
|
Files int
|
|
|
|
Directories int
|
|
|
|
Other int
|
|
|
|
Bytes uint64
|
2014-09-23 20:39:12 +00:00
|
|
|
}
|
|
|
|
|
2014-11-23 08:22:18 +00:00
|
|
|
func (s *Stats) Add(other Stats) {
|
|
|
|
s.Bytes += other.Bytes
|
|
|
|
s.Directories += other.Directories
|
|
|
|
s.Files += other.Files
|
|
|
|
s.Other += other.Other
|
|
|
|
}
|
|
|
|
|
2014-12-21 17:10:19 +00:00
|
|
|
func NewArchiver(s Server) (*Archiver, error) {
|
2014-09-23 20:39:12 +00:00
|
|
|
var err error
|
2014-11-16 21:50:20 +00:00
|
|
|
arch := &Archiver{
|
2014-12-21 16:02:49 +00:00
|
|
|
s: s,
|
2014-11-16 21:50:20 +00:00
|
|
|
fileToken: make(chan struct{}, maxConcurrentFiles),
|
2014-11-22 21:05:39 +00:00
|
|
|
blobToken: make(chan struct{}, maxConcurrentBlobs),
|
2014-11-16 21:50:20 +00:00
|
|
|
}
|
|
|
|
|
2014-11-22 21:05:39 +00:00
|
|
|
// fill file and blob token
|
2014-11-16 21:50:20 +00:00
|
|
|
for i := 0; i < maxConcurrentFiles; i++ {
|
|
|
|
arch.fileToken <- struct{}{}
|
|
|
|
}
|
2014-09-23 20:39:12 +00:00
|
|
|
|
2014-11-22 21:05:39 +00:00
|
|
|
for i := 0; i < maxConcurrentBlobs; i++ {
|
|
|
|
arch.blobToken <- struct{}{}
|
|
|
|
}
|
|
|
|
|
2014-09-23 20:39:12 +00:00
|
|
|
// abort on all errors
|
|
|
|
arch.Error = func(string, os.FileInfo, error) error { return err }
|
|
|
|
// allow all files
|
|
|
|
arch.Filter = func(string, os.FileInfo) bool { return true }
|
|
|
|
|
2014-11-21 20:21:44 +00:00
|
|
|
arch.bl = NewBlobList()
|
2014-12-21 17:10:19 +00:00
|
|
|
arch.ch, err = NewContentHandler(s)
|
2014-09-23 20:39:12 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// load all blobs from all snapshots
|
2014-11-23 21:26:01 +00:00
|
|
|
err = arch.ch.LoadAllMaps()
|
2014-09-23 20:39:12 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return arch, nil
|
|
|
|
}
|
|
|
|
|
2014-11-23 08:22:18 +00:00
|
|
|
func (arch *Archiver) update(ch chan Stats, stats Stats) {
|
|
|
|
if ch == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// load old stats from global state
|
|
|
|
arch.statsMutex.Lock()
|
|
|
|
stats.Add(arch.updateStats)
|
|
|
|
arch.updateStats = Stats{}
|
|
|
|
arch.statsMutex.Unlock()
|
|
|
|
|
|
|
|
// try to send stats through the channel, with a timeout
|
|
|
|
timeout := time.After(statTimeout)
|
|
|
|
|
|
|
|
select {
|
|
|
|
case ch <- stats:
|
|
|
|
break
|
|
|
|
case _ = <-timeout:
|
|
|
|
|
|
|
|
// save cumulated stats to global state
|
|
|
|
arch.statsMutex.Lock()
|
|
|
|
arch.updateStats.Add(stats)
|
|
|
|
arch.statsMutex.Unlock()
|
|
|
|
|
|
|
|
break
|
2014-11-16 21:50:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-21 20:21:44 +00:00
|
|
|
func (arch *Archiver) Save(t backend.Type, data []byte) (Blob, error) {
|
2014-09-23 20:39:12 +00:00
|
|
|
blob, err := arch.ch.Save(t, data)
|
|
|
|
if err != nil {
|
2014-11-21 20:21:44 +00:00
|
|
|
return Blob{}, err
|
2014-09-23 20:39:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// store blob in storage map for current snapshot
|
2014-11-21 20:21:44 +00:00
|
|
|
arch.bl.Insert(blob)
|
2014-09-23 20:39:12 +00:00
|
|
|
|
|
|
|
return blob, nil
|
|
|
|
}
|
|
|
|
|
2014-11-21 20:21:44 +00:00
|
|
|
func (arch *Archiver) SaveJSON(t backend.Type, item interface{}) (Blob, error) {
|
2014-09-23 20:39:12 +00:00
|
|
|
blob, err := arch.ch.SaveJSON(t, item)
|
|
|
|
if err != nil {
|
2014-11-21 20:21:44 +00:00
|
|
|
return Blob{}, err
|
2014-09-23 20:39:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// store blob in storage map for current snapshot
|
2014-11-21 20:21:44 +00:00
|
|
|
arch.bl.Insert(blob)
|
2014-09-23 20:39:12 +00:00
|
|
|
|
|
|
|
return blob, nil
|
|
|
|
}
|
|
|
|
|
2014-11-17 22:28:51 +00:00
|
|
|
// SaveFile stores the content of the file on the backend as a Blob by calling
|
|
|
|
// Save for each chunk.
|
2014-11-16 20:29:11 +00:00
|
|
|
func (arch *Archiver) SaveFile(node *Node) error {
|
2014-11-17 22:28:51 +00:00
|
|
|
file, err := os.Open(node.path)
|
|
|
|
defer file.Close()
|
2014-09-23 20:39:12 +00:00
|
|
|
if err != nil {
|
2014-11-23 11:14:56 +00:00
|
|
|
return arrar.Annotate(err, "SaveFile()")
|
2014-11-17 22:28:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var blobs Blobs
|
|
|
|
|
|
|
|
// if the file is small enough, store it directly
|
|
|
|
if node.Size < chunker.MinSize {
|
2014-11-23 15:48:00 +00:00
|
|
|
// acquire token
|
|
|
|
token := <-arch.blobToken
|
|
|
|
defer func() {
|
|
|
|
arch.blobToken <- token
|
|
|
|
}()
|
|
|
|
|
|
|
|
buf := GetChunkBuf("blob single file")
|
|
|
|
defer FreeChunkBuf("blob single file", buf)
|
|
|
|
n, err := io.ReadFull(file, buf)
|
2014-11-30 15:06:37 +00:00
|
|
|
if err != nil && err != io.ErrUnexpectedEOF && err != io.EOF {
|
|
|
|
return arrar.Annotate(err, "SaveFile() read small file")
|
2014-11-17 22:28:51 +00:00
|
|
|
}
|
|
|
|
|
2014-11-30 21:16:34 +00:00
|
|
|
if err == io.EOF {
|
|
|
|
// use empty blob list for empty files
|
|
|
|
blobs = Blobs{}
|
|
|
|
} else {
|
|
|
|
blob, err := arch.ch.Save(backend.Data, buf[:n])
|
|
|
|
if err != nil {
|
|
|
|
return arrar.Annotate(err, "SaveFile() save chunk")
|
|
|
|
}
|
|
|
|
|
|
|
|
arch.update(arch.SaveStats, Stats{Bytes: blob.Size})
|
|
|
|
|
|
|
|
blobs = Blobs{blob}
|
2014-11-17 22:28:51 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// else store all chunks
|
2014-11-17 22:37:03 +00:00
|
|
|
chnker := chunker.New(file)
|
2014-11-22 21:05:39 +00:00
|
|
|
chans := [](<-chan Blob){}
|
2014-11-23 15:48:00 +00:00
|
|
|
defer chnker.Free()
|
2014-11-17 22:28:51 +00:00
|
|
|
|
2014-11-30 21:49:14 +00:00
|
|
|
chunks := 0
|
|
|
|
|
2014-11-17 22:28:51 +00:00
|
|
|
for {
|
2014-11-23 15:48:00 +00:00
|
|
|
buf := GetChunkBuf("blob chunker")
|
|
|
|
chunk, err := chnker.Next(buf)
|
2014-11-17 22:28:51 +00:00
|
|
|
if err == io.EOF {
|
2014-11-23 15:48:00 +00:00
|
|
|
FreeChunkBuf("blob chunker", buf)
|
2014-11-17 22:28:51 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
2014-11-23 15:48:00 +00:00
|
|
|
FreeChunkBuf("blob chunker", buf)
|
2014-11-30 15:06:37 +00:00
|
|
|
return arrar.Annotate(err, "SaveFile() chunker.Next()")
|
2014-11-17 22:28:51 +00:00
|
|
|
}
|
|
|
|
|
2014-11-30 21:49:14 +00:00
|
|
|
chunks++
|
|
|
|
|
2014-11-22 21:05:39 +00:00
|
|
|
// acquire token, start goroutine to save chunk
|
|
|
|
token := <-arch.blobToken
|
|
|
|
resCh := make(chan Blob, 1)
|
|
|
|
|
|
|
|
go func(ch chan<- Blob) {
|
|
|
|
blob, err := arch.ch.Save(backend.Data, chunk.Data)
|
|
|
|
// TODO handle error
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2014-11-17 22:28:51 +00:00
|
|
|
|
2014-11-23 15:48:00 +00:00
|
|
|
FreeChunkBuf("blob chunker", buf)
|
|
|
|
|
2014-11-23 08:22:18 +00:00
|
|
|
arch.update(arch.SaveStats, Stats{Bytes: blob.Size})
|
2014-11-22 21:05:39 +00:00
|
|
|
arch.blobToken <- token
|
|
|
|
ch <- blob
|
|
|
|
}(resCh)
|
|
|
|
|
|
|
|
chans = append(chans, resCh)
|
|
|
|
}
|
2014-11-17 22:28:51 +00:00
|
|
|
|
2014-11-22 21:05:39 +00:00
|
|
|
blobs = []Blob{}
|
|
|
|
for _, ch := range chans {
|
|
|
|
blobs = append(blobs, <-ch)
|
2014-11-17 22:28:51 +00:00
|
|
|
}
|
2014-11-30 21:49:14 +00:00
|
|
|
|
|
|
|
if len(blobs) != chunks {
|
|
|
|
return fmt.Errorf("chunker returned %v chunks, but only %v blobs saved", chunks, len(blobs))
|
|
|
|
}
|
2014-09-23 20:39:12 +00:00
|
|
|
}
|
|
|
|
|
2014-12-07 12:30:16 +00:00
|
|
|
var bytes uint64
|
|
|
|
|
2014-09-23 20:39:12 +00:00
|
|
|
node.Content = make([]backend.ID, len(blobs))
|
|
|
|
for i, blob := range blobs {
|
|
|
|
node.Content[i] = blob.ID
|
2014-11-21 20:21:44 +00:00
|
|
|
arch.bl.Insert(blob)
|
2014-12-07 12:30:16 +00:00
|
|
|
bytes += blob.Size
|
|
|
|
}
|
|
|
|
|
|
|
|
if bytes != node.Size {
|
|
|
|
return fmt.Errorf("errors saving node %q: saved %d bytes, wanted %d bytes", node.path, bytes, node.Size)
|
2014-09-23 20:39:12 +00:00
|
|
|
}
|
|
|
|
|
2014-11-30 15:06:37 +00:00
|
|
|
return nil
|
2014-09-23 20:39:12 +00:00
|
|
|
}
|
|
|
|
|
2015-01-04 15:10:30 +00:00
|
|
|
func (arch *Archiver) scan(dir string) (*Tree, error) {
|
|
|
|
var err error
|
2014-11-30 21:34:21 +00:00
|
|
|
|
2014-11-16 20:29:11 +00:00
|
|
|
// open and list path
|
2014-09-23 20:39:12 +00:00
|
|
|
fd, err := os.Open(dir)
|
|
|
|
defer fd.Close()
|
|
|
|
if err != nil {
|
2014-11-23 11:14:56 +00:00
|
|
|
return nil, arch.Error(dir, nil, err)
|
2014-09-23 20:39:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
entries, err := fd.Readdir(-1)
|
|
|
|
if err != nil {
|
2014-11-16 20:29:11 +00:00
|
|
|
return nil, err
|
2014-09-23 20:39:12 +00:00
|
|
|
}
|
|
|
|
|
2014-11-30 21:34:21 +00:00
|
|
|
// build new tree
|
2014-09-23 20:39:12 +00:00
|
|
|
tree := Tree{}
|
|
|
|
for _, entry := range entries {
|
|
|
|
path := filepath.Join(dir, entry.Name())
|
|
|
|
|
|
|
|
if !arch.Filter(path, entry) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
node, err := NodeFromFileInfo(path, entry)
|
|
|
|
if err != nil {
|
2014-11-16 20:29:11 +00:00
|
|
|
// TODO: error processing
|
|
|
|
return nil, err
|
2014-09-23 20:39:12 +00:00
|
|
|
}
|
|
|
|
|
2014-11-30 21:34:21 +00:00
|
|
|
err = tree.Insert(node)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2014-09-23 20:39:12 +00:00
|
|
|
|
|
|
|
if entry.IsDir() {
|
2015-01-04 15:10:30 +00:00
|
|
|
node.Tree, err = arch.scan(path)
|
2014-09-23 20:39:12 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
2014-11-30 21:34:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, node := range tree {
|
|
|
|
if node.Type == "file" && node.Content != nil {
|
|
|
|
continue
|
|
|
|
}
|
2014-09-23 20:39:12 +00:00
|
|
|
|
2014-11-16 20:29:11 +00:00
|
|
|
switch node.Type {
|
|
|
|
case "file":
|
|
|
|
arch.Stats.Files++
|
|
|
|
arch.Stats.Bytes += node.Size
|
|
|
|
case "dir":
|
|
|
|
arch.Stats.Directories++
|
|
|
|
default:
|
|
|
|
arch.Stats.Other++
|
2014-09-23 20:39:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-23 08:22:18 +00:00
|
|
|
arch.update(arch.ScannerStats, arch.Stats)
|
2014-11-16 20:29:11 +00:00
|
|
|
|
|
|
|
return &tree, nil
|
2014-09-23 20:39:12 +00:00
|
|
|
}
|
|
|
|
|
2015-01-04 15:10:30 +00:00
|
|
|
func (arch *Archiver) Scan(path string) (*Tree, error) {
|
2014-11-23 08:22:18 +00:00
|
|
|
// reset global stats
|
|
|
|
arch.updateStats = Stats{}
|
|
|
|
|
2014-11-16 20:29:11 +00:00
|
|
|
fi, err := os.Lstat(path)
|
|
|
|
if err != nil {
|
2014-11-23 11:14:56 +00:00
|
|
|
return nil, arrar.Annotatef(err, "Lstat(%q)", path)
|
2014-11-16 20:29:11 +00:00
|
|
|
}
|
2014-09-23 20:39:12 +00:00
|
|
|
|
2014-11-16 20:29:11 +00:00
|
|
|
node, err := NodeFromFileInfo(path, fi)
|
2014-09-23 20:39:12 +00:00
|
|
|
if err != nil {
|
2014-11-23 11:14:56 +00:00
|
|
|
return nil, arrar.Annotate(err, "NodeFromFileInfo()")
|
2014-11-16 20:29:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if node.Type != "dir" {
|
2014-11-30 21:34:21 +00:00
|
|
|
t := &Tree{node}
|
|
|
|
|
2015-01-04 15:10:30 +00:00
|
|
|
// update stats
|
2014-11-30 21:34:21 +00:00
|
|
|
if node.Content == nil && node.Subtree == nil {
|
|
|
|
arch.Stats.Files = 1
|
|
|
|
arch.Stats.Bytes = node.Size
|
|
|
|
}
|
|
|
|
|
2014-11-23 08:22:18 +00:00
|
|
|
arch.update(arch.ScannerStats, arch.Stats)
|
2014-11-30 21:34:21 +00:00
|
|
|
|
|
|
|
return t, nil
|
2014-09-23 20:39:12 +00:00
|
|
|
}
|
|
|
|
|
2014-11-16 20:29:11 +00:00
|
|
|
arch.Stats.Directories = 1
|
2014-11-30 21:34:21 +00:00
|
|
|
|
2015-01-04 15:10:30 +00:00
|
|
|
node.Tree, err = arch.scan(path)
|
2014-09-23 20:39:12 +00:00
|
|
|
if err != nil {
|
2014-11-23 11:14:56 +00:00
|
|
|
return nil, arrar.Annotate(err, "loadTree()")
|
2014-09-23 20:39:12 +00:00
|
|
|
}
|
|
|
|
|
2014-11-23 08:22:18 +00:00
|
|
|
arch.update(arch.ScannerStats, arch.Stats)
|
2014-09-23 20:39:12 +00:00
|
|
|
|
2014-11-16 20:29:11 +00:00
|
|
|
return &Tree{node}, nil
|
|
|
|
}
|
2014-09-23 20:39:12 +00:00
|
|
|
|
2014-11-21 20:21:44 +00:00
|
|
|
func (arch *Archiver) saveTree(t *Tree) (Blob, error) {
|
2014-11-16 21:50:20 +00:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
|
2014-11-16 20:29:11 +00:00
|
|
|
for _, node := range *t {
|
|
|
|
if node.Tree != nil && node.Subtree == nil {
|
|
|
|
b, err := arch.saveTree(node.Tree)
|
|
|
|
if err != nil {
|
2014-11-21 20:21:44 +00:00
|
|
|
return Blob{}, err
|
2014-11-16 20:29:11 +00:00
|
|
|
}
|
|
|
|
node.Subtree = b.ID
|
2014-11-23 08:22:18 +00:00
|
|
|
arch.update(arch.SaveStats, Stats{Directories: 1})
|
2014-11-16 20:29:11 +00:00
|
|
|
} else if node.Type == "file" && len(node.Content) == 0 {
|
2014-11-23 15:48:00 +00:00
|
|
|
// get token
|
|
|
|
token := <-arch.fileToken
|
|
|
|
|
2014-11-16 21:50:20 +00:00
|
|
|
// start goroutine
|
|
|
|
wg.Add(1)
|
|
|
|
go func(n *Node) {
|
|
|
|
defer wg.Done()
|
|
|
|
defer func() {
|
|
|
|
arch.fileToken <- token
|
|
|
|
}()
|
|
|
|
|
|
|
|
// TODO: handle error
|
2014-11-29 09:52:06 +00:00
|
|
|
err := arch.SaveFile(n)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2014-11-23 08:22:18 +00:00
|
|
|
arch.update(arch.SaveStats, Stats{Files: 1})
|
2014-11-16 21:50:20 +00:00
|
|
|
}(node)
|
2014-11-16 20:29:11 +00:00
|
|
|
} else {
|
2014-11-23 08:22:18 +00:00
|
|
|
arch.update(arch.SaveStats, Stats{Other: 1})
|
2014-09-23 20:39:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-16 21:50:20 +00:00
|
|
|
wg.Wait()
|
|
|
|
|
2014-11-30 23:06:29 +00:00
|
|
|
// check for invalid file nodes
|
|
|
|
for _, node := range *t {
|
|
|
|
if node.Type == "file" && node.Content == nil {
|
|
|
|
return Blob{}, fmt.Errorf("node %v has empty content", node.Name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-16 20:29:11 +00:00
|
|
|
blob, err := arch.SaveJSON(backend.Tree, t)
|
|
|
|
if err != nil {
|
2014-11-21 20:21:44 +00:00
|
|
|
return Blob{}, err
|
2014-11-16 20:29:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return blob, nil
|
|
|
|
}
|
|
|
|
|
2014-11-30 21:34:21 +00:00
|
|
|
func (arch *Archiver) Snapshot(dir string, t *Tree, parentSnapshot backend.ID) (*Snapshot, backend.ID, error) {
|
2014-11-23 08:22:18 +00:00
|
|
|
// reset global stats
|
|
|
|
arch.updateStats = Stats{}
|
|
|
|
|
2014-12-21 16:20:49 +00:00
|
|
|
sn, err := NewSnapshot(dir)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
2014-11-30 21:34:21 +00:00
|
|
|
sn.Parent = parentSnapshot
|
2014-11-16 20:29:11 +00:00
|
|
|
|
|
|
|
blob, err := arch.saveTree(t)
|
2014-09-23 20:39:12 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
2014-12-07 13:14:07 +00:00
|
|
|
sn.Tree = blob.ID
|
2014-09-23 20:39:12 +00:00
|
|
|
|
2014-11-23 21:26:01 +00:00
|
|
|
// save bloblist
|
|
|
|
blob, err = arch.SaveJSON(backend.Map, arch.bl)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
sn.Map = blob.Storage
|
|
|
|
|
2014-09-23 20:39:12 +00:00
|
|
|
// save snapshot
|
|
|
|
blob, err = arch.SaveJSON(backend.Snapshot, sn)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
2014-11-16 20:29:11 +00:00
|
|
|
return sn, blob.Storage, nil
|
2014-09-23 20:39:12 +00:00
|
|
|
}
|