Skip to content

Commit

Permalink
Fix progress bar for "get" command
Browse files Browse the repository at this point in the history
License: MIT
Signed-off-by: rht <[email protected]>
  • Loading branch information
rht authored and rht committed Aug 8, 2015
1 parent d1366cd commit 65963e0
Show file tree
Hide file tree
Showing 2 changed files with 62 additions and 13 deletions.
34 changes: 21 additions & 13 deletions core/commands/get.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,9 @@ may also specify the level of compression by specifying '-l=<1-9>'.
// only use this when the flag is '-C' without '-a'
reader, err = getZip(req.Context(), node, p, cmplvl)
} else {
reader, err = get(req.Context(), node, p, cmplvl)
var length uint64
reader, length, err = get(req.Context(), node, p, cmplvl)
res.SetLength(length)
}
if err != nil {
res.SetError(err, cmds.ErrNormal)
Expand Down Expand Up @@ -107,17 +109,16 @@ may also specify the level of compression by specifying '-l=<1-9>'.
Compression: cmplvl,
}

if err := gw.Write(outReader, outPath); err != nil {
if err := gw.Write(outReader, outPath, res.Length()); err != nil {
res.SetError(err, cmds.ErrNormal)
return
}
},
}

func progressBarForReader(out io.Writer, r io.Reader) (*pb.ProgressBar, *pb.Reader) {
func progressBarForReader(out io.Writer, r io.Reader, l uint64) (*pb.ProgressBar, *pb.Reader) {
// setup bar reader
// TODO: get total length of files
bar := pb.New(0).SetUnits(pb.U_BYTES)
bar := pb.New64(int64(l)).SetUnits(pb.U_BYTES)
bar.Output = out
barR := bar.NewProxyReader(r)
return bar, barR
Expand All @@ -131,11 +132,11 @@ type getWriter struct {
Compression int
}

func (gw *getWriter) Write(r io.Reader, fpath string) error {
func (gw *getWriter) Write(r io.Reader, fpath string, l uint64) error {
if gw.Archive || gw.Compression != gzip.NoCompression {
return gw.writeArchive(r, fpath)
}
return gw.writeExtracted(r, fpath)
return gw.writeExtracted(r, fpath, l)
}

func (gw *getWriter) writeArchive(r io.Reader, fpath string) error {
Expand All @@ -161,17 +162,17 @@ func (gw *getWriter) writeArchive(r io.Reader, fpath string) error {
defer file.Close()

fmt.Fprintf(gw.Out, "Saving archive to %s\n", fpath)
bar, barR := progressBarForReader(gw.Err, r)
bar, barR := progressBarForReader(gw.Err, r, 0)
bar.Start()
defer bar.Finish()

_, err = io.Copy(file, barR)
return err
}

func (gw *getWriter) writeExtracted(r io.Reader, fpath string) error {
func (gw *getWriter) writeExtracted(r io.Reader, fpath string, l uint64) error {
fmt.Fprintf(gw.Out, "Saving file(s) to %s\n", fpath)
bar, barR := progressBarForReader(gw.Err, r)
bar, barR := progressBarForReader(gw.Err, r, l)
bar.Start()
defer bar.Finish()

Expand All @@ -193,13 +194,20 @@ func getCompressOptions(req cmds.Request) (int, error) {
return gzip.NoCompression, nil
}

func get(ctx context.Context, node *core.IpfsNode, p path.Path, compression int) (io.Reader, error) {
func get(ctx context.Context, node *core.IpfsNode, p path.Path, compression int) (io.Reader, uint64, error) {
dn, err := core.Resolve(ctx, node, p)
if err != nil {
return nil, err
return nil, 0, err
}

reader, err := utar.DagArchive(ctx, dn, p.String(), node.DAG, compression)

length, err := utar.GetTarSize(ctx, dn, node.DAG)
if err != nil {
return nil, 0, err
}

return utar.DagArchive(ctx, dn, p.String(), node.DAG, compression)
return reader, length, err
}

// getZip is equivalent to `ipfs getdag $hash | gzip`
Expand Down
41 changes: 41 additions & 0 deletions unixfs/tar/writer.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ import (
upb "github.com/ipfs/go-ipfs/unixfs/pb"
)

const tarBlockSize = 512

// DefaultBufSize is the buffer size for gets. for now, 1MB, which is ~4 blocks.
// TODO: does this need to be configurable?
var DefaultBufSize = 1048576
Expand Down Expand Up @@ -55,6 +57,45 @@ func DagArchive(ctx cxt.Context, nd *mdag.Node, name string, dag mdag.DAGService
return piper, nil
}

func GetTarSize(ctx cxt.Context, nd *mdag.Node, dag mdag.DAGService) (uint64, error) {
return _getTarSize(ctx, nd, dag, true)
}

func _getTarSize(ctx cxt.Context, nd *mdag.Node, dag mdag.DAGService, isRoot bool) (uint64, error) {
size := uint64(0)
pb := new(upb.Data)
if err := proto.Unmarshal(nd.Data, pb); err != nil {
return 0, err
}

switch pb.GetType() {
case upb.Data_Directory:
for _, ng := range dag.GetDAG(ctx, nd) {
child, err := ng.Get(ctx)
if err != nil {
return 0, err
}
childSize, err := _getTarSize(ctx, child, dag, false)
if err != nil {
return 0, err
}
size += childSize
}
case upb.Data_File:
unixSize := pb.GetFilesize()
// tar header + file size + round up to nearest 512 bytes
size = tarBlockSize + unixSize + (tarBlockSize - unixSize%tarBlockSize)
default:
return 0, fmt.Errorf("unixfs type not supported: %s", pb.GetType())
}

if isRoot {
size += 2 * tarBlockSize // tar root padding
}

return size, nil
}

// Writer is a utility structure that helps to write
// unixfs merkledag nodes as a tar archive format.
// It wraps any io.Writer.
Expand Down

0 comments on commit 65963e0

Please sign in to comment.