diff --git a/core/builder.go b/core/builder.go index d5d46dd6e8e..af3a038408b 100644 --- a/core/builder.go +++ b/core/builder.go @@ -159,5 +159,10 @@ func setupNode(ctx context.Context, n *IpfsNode, cfg *BuildCfg) error { } n.Resolver = &path.Resolver{DAG: n.DAG} + err = n.loadFilesRoot() + if err != nil { + return err + } + return nil } diff --git a/core/commands/files/files.go b/core/commands/files/files.go new file mode 100644 index 00000000000..c0f32a5b9c3 --- /dev/null +++ b/core/commands/files/files.go @@ -0,0 +1,707 @@ +package commands + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + gopath "path" + "strings" + + cmds "github.com/ipfs/go-ipfs/commands" + core "github.com/ipfs/go-ipfs/core" + dag "github.com/ipfs/go-ipfs/merkledag" + mfs "github.com/ipfs/go-ipfs/mfs" + path "github.com/ipfs/go-ipfs/path" + ft "github.com/ipfs/go-ipfs/unixfs" + u "github.com/ipfs/go-ipfs/util" + + context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" +) + +var log = u.Logger("cmds/files") + +var FilesCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Manipulate unixfs files", + ShortDescription: ` +Files is an API for manipulating ipfs objects as if they were a unix filesystem. +`, + }, + Subcommands: map[string]*cmds.Command{ + "read": FilesReadCmd, + "write": FilesWriteCmd, + "mv": FilesMvCmd, + "cp": FilesCpCmd, + "ls": FilesLsCmd, + "mkdir": FilesMkdirCmd, + "stat": FilesStatCmd, + "rm": FilesRmCmd, + }, +} + +var FilesStatCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "display file status", + }, + + Arguments: []cmds.Argument{ + cmds.StringArg("path", true, false, "path to node to stat"), + }, + Run: func(req cmds.Request, res cmds.Response) { + node, err := req.InvocContext().GetNode() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + path, err := checkPath(req.Arguments()[0]) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + fsn, err := mfs.Lookup(node.FilesRoot, path) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + o, err := statNode(fsn) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + res.SetOutput(o) + }, + Marshalers: cmds.MarshalerMap{ + cmds.Text: func(res cmds.Response) (io.Reader, error) { + out := res.Output().(*Object) + buf := new(bytes.Buffer) + fmt.Fprintln(buf, out.Hash) + fmt.Fprintf(buf, "Size: %d\n", out.Size) + fmt.Fprintf(buf, "CumulativeSize: %d\n", out.CumulativeSize) + fmt.Fprintf(buf, "ChildBlocks: %d\n", out.Blocks) + return buf, nil + }, + }, + Type: Object{}, +} + +func statNode(fsn mfs.FSNode) (*Object, error) { + nd, err := fsn.GetNode() + if err != nil { + return nil, err + } + + k, err := nd.Key() + if err != nil { + return nil, err + } + + d, err := ft.FromBytes(nd.Data) + if err != nil { + return nil, err + } + + cumulsize, err := nd.Size() + if err != nil { + return nil, err + } + + return &Object{ + Hash: k.B58String(), + Blocks: len(nd.Links), + Size: d.GetFilesize(), + CumulativeSize: cumulsize, + }, nil +} + +var FilesCpCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "copy files into mfs", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("source", true, false, "source object to copy"), + cmds.StringArg("dest", true, false, "destination to copy object to"), + }, + Run: func(req cmds.Request, res cmds.Response) { + node, err := req.InvocContext().GetNode() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + src, err := checkPath(req.Arguments()[0]) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + dst, err := checkPath(req.Arguments()[1]) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + nd, err := getNodeFromPath(req.Context(), node, src) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + err = mfs.PutNode(node.FilesRoot, dst, nd) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + }, +} + +func getNodeFromPath(ctx context.Context, node *core.IpfsNode, p string) (*dag.Node, error) { + switch { + case strings.HasPrefix(p, "/ipfs/"): + np, err := path.ParsePath(p) + if err != nil { + return nil, err + } + + return core.Resolve(ctx, node, np) + default: + fsn, err := mfs.Lookup(node.FilesRoot, p) + if err != nil { + return nil, err + } + + return fsn.GetNode() + } +} + +type Object struct { + Hash string + Size uint64 + CumulativeSize uint64 + Blocks int +} + +type FilesLsOutput struct { + Entries []mfs.NodeListing +} + +var FilesLsCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "List directories", + ShortDescription: ` +List directories. + +Examples: + + $ ipfs files ls /welcome/docs/ + about + contact + help + quick-start + readme + security-notes + + $ ipfs files ls /myfiles/a/b/c/d + foo + bar +`, + }, + Arguments: []cmds.Argument{ + cmds.StringArg("path", true, false, "path to show listing for"), + }, + Options: []cmds.Option{ + cmds.BoolOption("l", "use long listing format"), + }, + Run: func(req cmds.Request, res cmds.Response) { + path, err := checkPath(req.Arguments()[0]) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + nd, err := req.InvocContext().GetNode() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + fsn, err := mfs.Lookup(nd.FilesRoot, path) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + switch fsn := fsn.(type) { + case *mfs.Directory: + listing, err := fsn.List() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + res.SetOutput(&FilesLsOutput{listing}) + return + case *mfs.File: + parts := strings.Split(path, "/") + name := parts[len(parts)-1] + out := &FilesLsOutput{[]mfs.NodeListing{mfs.NodeListing{Name: name, Type: 1}}} + res.SetOutput(out) + return + default: + res.SetError(errors.New("unrecognized type"), cmds.ErrNormal) + } + }, + Marshalers: cmds.MarshalerMap{ + cmds.Text: func(res cmds.Response) (io.Reader, error) { + out := res.Output().(*FilesLsOutput) + buf := new(bytes.Buffer) + long, _, _ := res.Request().Option("l").Bool() + + for _, o := range out.Entries { + if long { + fmt.Fprintf(buf, "%s\t%s\t%d\n", o.Name, o.Hash, o.Size) + } else { + fmt.Fprintf(buf, "%s\n", o.Name) + } + } + return buf, nil + }, + }, + Type: FilesLsOutput{}, +} + +var FilesReadCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Read a file in a given mfs", + ShortDescription: ` +Read a specified number of bytes from a file at a given offset. By default, will +read the entire file similar to unix cat. + +Examples: + + $ ipfs files read /test/hello + hello + `, + }, + + Arguments: []cmds.Argument{ + cmds.StringArg("path", true, false, "path to file to be read"), + }, + Options: []cmds.Option{ + cmds.IntOption("o", "offset", "offset to read from"), + cmds.IntOption("n", "count", "maximum number of bytes to read"), + }, + Run: func(req cmds.Request, res cmds.Response) { + n, err := req.InvocContext().GetNode() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + path, err := checkPath(req.Arguments()[0]) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + fsn, err := mfs.Lookup(n.FilesRoot, path) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + fi, ok := fsn.(*mfs.File) + if !ok { + res.SetError(fmt.Errorf("%s was not a file", path), cmds.ErrNormal) + return + } + + offset, _, err := req.Option("offset").Int() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + if offset < 0 { + res.SetError(fmt.Errorf("cannot specify negative offset"), cmds.ErrNormal) + return + } + + filen, err := fi.Size() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + if int64(offset) > filen { + res.SetError(fmt.Errorf("offset was past end of file (%d > %d)", offset, filen), cmds.ErrNormal) + return + } + + _, err = fi.Seek(int64(offset), os.SEEK_SET) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + var r io.Reader = fi + count, found, err := req.Option("count").Int() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + if found { + if count < 0 { + res.SetError(fmt.Errorf("cannot specify negative 'count'"), cmds.ErrNormal) + return + } + r = io.LimitReader(fi, int64(count)) + } + + res.SetOutput(r) + }, +} + +var FilesMvCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Move files", + ShortDescription: ` +Move files around. Just like traditional unix mv. + +Example: + + $ ipfs files mv /myfs/a/b/c /myfs/foo/newc + +`, + }, + + Arguments: []cmds.Argument{ + cmds.StringArg("source", true, false, "source file to move"), + cmds.StringArg("dest", true, false, "target path for file to be moved to"), + }, + Run: func(req cmds.Request, res cmds.Response) { + n, err := req.InvocContext().GetNode() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + src, err := checkPath(req.Arguments()[0]) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + dst, err := checkPath(req.Arguments()[1]) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + err = mfs.Mv(n.FilesRoot, src, dst) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + }, +} + +var FilesWriteCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Write to a mutable file in a given filesystem", + ShortDescription: ` +Write data to a file in a given filesystem. This command allows you to specify +a beginning offset to write to. The entire length of the input will be written. + +If the '--create' option is specified, the file will be created if it does not +exist. Nonexistant intermediate directories will not be created. + +Example: + + echo "hello world" | ipfs files write --create /myfs/a/b/file + echo "hello world" | ipfs files write --truncate /myfs/a/b/file +`, + }, + Arguments: []cmds.Argument{ + cmds.StringArg("path", true, false, "path to write to"), + cmds.FileArg("data", true, false, "data to write").EnableStdin(), + }, + Options: []cmds.Option{ + cmds.IntOption("o", "offset", "offset to write to"), + cmds.BoolOption("e", "create", "create the file if it does not exist"), + cmds.BoolOption("t", "truncate", "truncate the file before writing"), + cmds.IntOption("n", "count", "maximum number of bytes to read"), + }, + Run: func(req cmds.Request, res cmds.Response) { + path, err := checkPath(req.Arguments()[0]) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + create, _, _ := req.Option("create").Bool() + trunc, _, _ := req.Option("truncate").Bool() + + nd, err := req.InvocContext().GetNode() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + fi, err := getFileHandle(nd.FilesRoot, path, create) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + defer fi.Close() + + if trunc { + if err := fi.Truncate(0); err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + } + + offset, _, err := req.Option("offset").Int() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + if offset < 0 { + res.SetError(fmt.Errorf("cannot have negative write offset"), cmds.ErrNormal) + return + } + + count, countfound, err := req.Option("count").Int() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + if countfound && count < 0 { + res.SetError(fmt.Errorf("cannot have negative byte count"), cmds.ErrNormal) + return + } + + _, err = fi.Seek(int64(offset), os.SEEK_SET) + if err != nil { + log.Error("seekfail: ", err) + res.SetError(err, cmds.ErrNormal) + return + } + + input, err := req.Files().NextFile() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + var r io.Reader = input + if countfound { + r = io.LimitReader(r, int64(count)) + } + + n, err := io.Copy(fi, input) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + log.Debugf("wrote %d bytes to %s", n, path) + }, +} + +var FilesMkdirCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "make directories", + ShortDescription: ` +Create the directory if it does not already exist. + +Note: all paths must be absolute. + +Examples: + + $ ipfs mfs mkdir /test/newdir + $ ipfs mfs mkdir -p /test/does/not/exist/yet +`, + }, + + Arguments: []cmds.Argument{ + cmds.StringArg("path", true, false, "path to dir to make"), + }, + Options: []cmds.Option{ + cmds.BoolOption("p", "parents", "no error if existing, make parent directories as needed"), + }, + Run: func(req cmds.Request, res cmds.Response) { + n, err := req.InvocContext().GetNode() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + dashp, _, _ := req.Option("parents").Bool() + dirtomake, err := checkPath(req.Arguments()[0]) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + err = mfs.Mkdir(n.FilesRoot, dirtomake, dashp) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + }, +} + +var FilesRmCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "remove a file", + ShortDescription: ` +remove files or directories + + $ ipfs files rm /foo + $ ipfs files ls /bar + cat + dog + fish + $ ipfs files rm -r /bar +`, + }, + + Arguments: []cmds.Argument{ + cmds.StringArg("path", true, true, "file to remove"), + }, + Options: []cmds.Option{ + cmds.BoolOption("r", "recursive", "recursively remove directories"), + }, + Run: func(req cmds.Request, res cmds.Response) { + nd, err := req.InvocContext().GetNode() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + path, err := checkPath(req.Arguments()[0]) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + if path == "/" { + res.SetError(fmt.Errorf("cannot delete root"), cmds.ErrNormal) + return + } + + // 'rm a/b/c/' will fail unless we trim the slash at the end + if path[len(path)-1] == '/' { + path = path[:len(path)-1] + } + + dir, name := gopath.Split(path) + parent, err := mfs.Lookup(nd.FilesRoot, dir) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + pdir, ok := parent.(*mfs.Directory) + if !ok { + res.SetError(fmt.Errorf("no such file or directory: %s", path), cmds.ErrNormal) + return + } + + childi, err := pdir.Child(name) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + dashr, _, _ := req.Option("r").Bool() + + switch childi.(type) { + case *mfs.Directory: + if dashr { + err := pdir.Unlink(name) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + } else { + res.SetError(fmt.Errorf("%s is a directory, use -r to remove directories", path), cmds.ErrNormal) + return + } + default: + err := pdir.Unlink(name) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + } + }, +} + +func getFileHandle(r *mfs.Root, path string, create bool) (*mfs.File, error) { + + target, err := mfs.Lookup(r, path) + switch err { + case nil: + fi, ok := target.(*mfs.File) + if !ok { + return nil, fmt.Errorf("%s was not a file", path) + } + return fi, nil + + case os.ErrNotExist: + if !create { + return nil, err + } + + // if create is specified and the file doesnt exist, we create the file + dirname, fname := gopath.Split(path) + pdiri, err := mfs.Lookup(r, dirname) + if err != nil { + log.Error("lookupfail ", dirname) + return nil, err + } + pdir, ok := pdiri.(*mfs.Directory) + if !ok { + return nil, fmt.Errorf("%s was not a directory", dirname) + } + + nd := &dag.Node{Data: ft.FilePBData(nil, 0)} + err = pdir.AddChild(fname, nd) + if err != nil { + return nil, err + } + + fsn, err := pdir.Child(fname) + if err != nil { + return nil, err + } + + fi, ok := fsn.(*mfs.File) + if !ok { + return nil, errors.New("expected *mfs.File, didnt get it. This is likely a race condition") + } + return fi, nil + + default: + return nil, err + } +} + +func checkPath(p string) (string, error) { + if len(p) == 0 { + return "", fmt.Errorf("paths must not be empty") + } + + if p[0] != '/' { + return "", fmt.Errorf("paths must start with a leading slash") + } + + cleaned := gopath.Clean(p) + if p[len(p)-1] == '/' && p != "/" { + cleaned += "/" + } + return cleaned, nil +} diff --git a/core/commands/root.go b/core/commands/root.go index 987178058b0..17f0ace753c 100644 --- a/core/commands/root.go +++ b/core/commands/root.go @@ -5,6 +5,7 @@ import ( "strings" cmds "github.com/ipfs/go-ipfs/commands" + files "github.com/ipfs/go-ipfs/core/commands/files" unixfs "github.com/ipfs/go-ipfs/core/commands/unixfs" evlog "github.com/ipfs/go-ipfs/thirdparty/eventlog" ) @@ -94,6 +95,7 @@ var rootSubcommands = map[string]*cmds.Command{ "dht": DhtCmd, "diag": DiagCmd, "dns": DNSCmd, + "files": files.FilesCmd, "get": GetCmd, "id": IDCmd, "log": LogCmd, diff --git a/core/core.go b/core/core.go index 346bbc6d34b..ce5cd9bb62d 100644 --- a/core/core.go +++ b/core/core.go @@ -17,6 +17,7 @@ import ( "time" b58 "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-base58" + ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" ma "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" goprocess "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess" mamask "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/whyrusleeping/multiaddr-filter" @@ -40,11 +41,13 @@ import ( offroute "github.com/ipfs/go-ipfs/routing/offline" bstore "github.com/ipfs/go-ipfs/blocks/blockstore" + key "github.com/ipfs/go-ipfs/blocks/key" bserv "github.com/ipfs/go-ipfs/blockservice" exchange "github.com/ipfs/go-ipfs/exchange" bitswap "github.com/ipfs/go-ipfs/exchange/bitswap" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" rp "github.com/ipfs/go-ipfs/exchange/reprovide" + mfs "github.com/ipfs/go-ipfs/mfs" mount "github.com/ipfs/go-ipfs/fuse/mount" merkledag "github.com/ipfs/go-ipfs/merkledag" @@ -53,6 +56,7 @@ import ( pin "github.com/ipfs/go-ipfs/pin" repo "github.com/ipfs/go-ipfs/repo" config "github.com/ipfs/go-ipfs/repo/config" + uio "github.com/ipfs/go-ipfs/unixfs/io" ) const IpnsValidatorTag = "ipns" @@ -92,6 +96,7 @@ type IpfsNode struct { Resolver *path.Resolver // the path resolution system Reporter metrics.Reporter Discovery discovery.Service + FilesRoot *mfs.Root // Online PeerHost p2phost.Host // the network host (server+client) @@ -249,8 +254,14 @@ func (n *IpfsNode) teardown() error { log.Debug("core is shutting down...") // owned objects are closed in this teardown to ensure that they're closed // regardless of which constructor was used to add them to the node. - closers := []io.Closer{ - n.Repo, + var closers []io.Closer + + // NOTE: the order that objects are added(closed) matters, if an object + // needs to use another during its shutdown/cleanup process, it should be + // closed before that other object + + if n.FilesRoot != nil { + closers = append(closers, n.FilesRoot) } if n.Exchange != nil { @@ -264,6 +275,10 @@ func (n *IpfsNode) teardown() error { closers = append(closers, mount.Closer(n.Mounts.Ipns)) } + if dht, ok := n.Routing.(*dht.IpfsDHT); ok { + closers = append(closers, dht.Process()) + } + if n.Blocks != nil { closers = append(closers, n.Blocks) } @@ -272,14 +287,13 @@ func (n *IpfsNode) teardown() error { closers = append(closers, n.Bootstrapper) } - if dht, ok := n.Routing.(*dht.IpfsDHT); ok { - closers = append(closers, dht.Process()) - } - if n.PeerHost != nil { closers = append(closers, n.PeerHost) } + // Repo closed last, most things need to preserve state here + closers = append(closers, n.Repo) + var errs []error for _, closer := range closers { if err := closer.Close(); err != nil { @@ -390,6 +404,41 @@ func (n *IpfsNode) loadBootstrapPeers() ([]peer.PeerInfo, error) { return toPeerInfos(parsed), nil } +func (n *IpfsNode) loadFilesRoot() error { + dsk := ds.NewKey("/local/filesroot") + pf := func(ctx context.Context, k key.Key) error { + return n.Repo.Datastore().Put(dsk, []byte(k)) + } + + var nd *merkledag.Node + val, err := n.Repo.Datastore().Get(dsk) + + switch { + case err == ds.ErrNotFound || val == nil: + nd = uio.NewEmptyDirectory() + _, err := n.DAG.Add(nd) + if err != nil { + return fmt.Errorf("failure writing to dagstore: %s", err) + } + case err == nil: + k := key.Key(val.([]byte)) + nd, err = n.DAG.Get(n.Context(), k) + if err != nil { + return fmt.Errorf("error loading filesroot from DAG: %s", err) + } + default: + return err + } + + mr, err := mfs.NewRoot(n.Context(), n.DAG, nd, pf) + if err != nil { + return err + } + + n.FilesRoot = mr + return nil +} + // SetupOfflineRouting loads the local nodes private key and // uses it to instantiate a routing system in offline mode. // This is primarily used for offline ipns modifications. diff --git a/mfs/ops.go b/mfs/ops.go index 75f187f528b..33514fc67a1 100644 --- a/mfs/ops.go +++ b/mfs/ops.go @@ -3,10 +3,149 @@ package mfs import ( "errors" "fmt" + "os" + gopath "path" "strings" + + dag "github.com/ipfs/go-ipfs/merkledag" ) -func rootLookup(r *Root, path string) (FSNode, error) { +// Mv moves the file or directory at 'src' to 'dst' +func Mv(r *Root, src, dst string) error { + srcDir, srcFname := gopath.Split(src) + + var dstDirStr string + var filename string + if dst[len(dst)-1] == '/' { + dstDirStr = dst + filename = srcFname + } else { + dstDirStr, filename = gopath.Split(dst) + } + + // get parent directories of both src and dest first + dstDir, err := lookupDir(r, dstDirStr) + if err != nil { + return err + } + + srcDirObj, err := lookupDir(r, srcDir) + if err != nil { + return err + } + + srcObj, err := srcDirObj.Child(srcFname) + if err != nil { + return err + } + + nd, err := srcObj.GetNode() + if err != nil { + return err + } + + fsn, err := dstDir.Child(filename) + if err == nil { + switch n := fsn.(type) { + case *File: + _ = dstDir.Unlink(filename) + case *Directory: + dstDir = n + default: + return fmt.Errorf("unexpected type at path: %s", dst) + } + } else if err != os.ErrNotExist { + return err + } + + err = dstDir.AddChild(filename, nd) + if err != nil { + return err + } + + err = srcDirObj.Unlink(srcFname) + if err != nil { + return err + } + + return nil +} + +func lookupDir(r *Root, path string) (*Directory, error) { + di, err := Lookup(r, path) + if err != nil { + return nil, err + } + + d, ok := di.(*Directory) + if !ok { + return nil, fmt.Errorf("%s is not a directory", path) + } + + return d, nil +} + +// PutNode inserts 'nd' at 'path' in the given mfs +func PutNode(r *Root, path string, nd *dag.Node) error { + dirp, filename := gopath.Split(path) + + pdir, err := lookupDir(r, dirp) + if err != nil { + return err + } + + return pdir.AddChild(filename, nd) +} + +// Mkdir creates a directory at 'path' under the directory 'd', creating +// intermediary directories as needed if 'parents' is set to true +func Mkdir(r *Root, path string, parents bool) error { + parts := strings.Split(path, "/") + if parts[0] == "" { + parts = parts[1:] + } + + // allow 'mkdir /a/b/c/' to create c + if parts[len(parts)-1] == "" { + parts = parts[:len(parts)-1] + } + + if len(parts) == 0 { + // this will only happen on 'mkdir /' + return fmt.Errorf("cannot mkdir '%s'", path) + } + + cur := r.GetValue().(*Directory) + for i, d := range parts[:len(parts)-1] { + fsn, err := cur.Child(d) + if err == os.ErrNotExist && parents { + mkd, err := cur.Mkdir(d) + if err != nil { + return err + } + fsn = mkd + } else if err != nil { + return err + } + + next, ok := fsn.(*Directory) + if !ok { + return fmt.Errorf("%s was not a directory", strings.Join(parts[:i], "/")) + } + cur = next + } + + _, err := cur.Mkdir(parts[len(parts)-1]) + if err != nil { + if !parents || err != os.ErrExist { + return err + } + } + + return nil +} + +func Lookup(r *Root, path string) (FSNode, error) { dir, ok := r.GetValue().(*Directory) if !ok { return nil, errors.New("root was not a directory") diff --git a/test/sharness/t0250-files-api.sh b/test/sharness/t0250-files-api.sh new file mode 100755 index 00000000000..4ee01bb114a --- /dev/null +++ b/test/sharness/t0250-files-api.sh @@ -0,0 +1,343 @@ +#!/bin/sh +# +# Copyright (c) 2015 Jeromy Johnson +# MIT Licensed; see the LICENSE file in this repository. +# + +test_description="test the unix files api" + +. lib/test-lib.sh + +test_init_ipfs + +# setup files for testing +test_expect_success "can create some files for testing" ' + FILE1=$(echo foo | ipfs add -q) && + FILE2=$(echo bar | ipfs add -q) && + FILE3=$(echo baz | ipfs add -q) && + mkdir stuff_test && + echo cats > stuff_test/a && + echo dogs > stuff_test/b && + echo giraffes > stuff_test/c && + DIR1=$(ipfs add -q stuff_test | tail -n1) +' + +verify_path_exists() { + # simply running ls on a file should be a good 'check' + ipfs files ls $1 +} + +verify_dir_contents() { + dir=$1 + shift + rm -f expected + touch expected + for e in $@ + do + echo $e >> expected + done + + test_expect_success "can list dir" ' + ipfs files ls $dir > output + ' + + test_expect_success "dir entries look good" ' + test_sort_cmp output expected + ' +} + +test_files_api() { + test_expect_success "can mkdir in root" ' + ipfs files mkdir /cats + ' + + test_expect_success "directory was created" ' + verify_path_exists /cats + ' + + test_expect_success "directory is empty" ' + verify_dir_contents /cats + ' + + test_expect_success "check root hash" ' + ipfs files stat / | head -n1 > roothash + ' + + test_expect_success "cannot mkdir /" ' + test_expect_code 1 ipfs files mkdir / + ' + + test_expect_success "check root hash was not changed" ' + ipfs files stat / | head -n1 > roothashafter && + test_cmp roothash roothashafter + ' + + test_expect_success "can put files into directory" ' + ipfs files cp /ipfs/$FILE1 /cats/file1 + ' + + test_expect_success "file shows up in directory" ' + verify_dir_contents /cats file1 + ' + + test_expect_success "can read file" ' + ipfs files read /cats/file1 > file1out + ' + + test_expect_success "output looks good" ' + echo foo > expected && + test_cmp expected file1out + ' + + test_expect_success "can put another file into root" ' + ipfs files cp /ipfs/$FILE2 /file2 + ' + + test_expect_success "file shows up in root" ' + verify_dir_contents / file2 cats + ' + + test_expect_success "can read file" ' + ipfs files read /file2 > file2out + ' + + test_expect_success "output looks good" ' + echo bar > expected && + test_cmp expected file2out + ' + + test_expect_success "can make deep directory" ' + ipfs files mkdir -p /cats/this/is/a/dir + ' + + test_expect_success "directory was created correctly" ' + verify_path_exists /cats/this/is/a/dir && + verify_dir_contents /cats this file1 && + verify_dir_contents /cats/this is && + verify_dir_contents /cats/this/is a && + verify_dir_contents /cats/this/is/a dir && + verify_dir_contents /cats/this/is/a/dir + ' + + test_expect_success "can copy file into new dir" ' + ipfs files cp /ipfs/$FILE3 /cats/this/is/a/dir/file3 + ' + + test_expect_success "can read file" ' + ipfs files read /cats/this/is/a/dir/file3 > output + ' + + test_expect_success "output looks good" ' + echo baz > expected && + test_cmp expected output + ' + + test_expect_success "file shows up in dir" ' + verify_dir_contents /cats/this/is/a/dir file3 + ' + + test_expect_success "can remove file" ' + ipfs files rm /cats/this/is/a/dir/file3 + ' + + test_expect_success "file no longer appears" ' + verify_dir_contents /cats/this/is/a/dir + ' + + test_expect_success "can remove dir" ' + ipfs files rm -r /cats/this/is/a/dir + ' + + test_expect_success "dir no longer appears" ' + verify_dir_contents /cats/this/is/a + ' + + test_expect_success "can remove file from root" ' + ipfs files rm /file2 + ' + + test_expect_success "file no longer appears" ' + verify_dir_contents / cats + ' + + test_expect_success "check root hash" ' + ipfs files stat / | head -n1 > roothash + ' + + test_expect_success "cannot remove root" ' + test_expect_code 1 ipfs files rm -r / + ' + + test_expect_success "check root hash was not changed" ' + ipfs files stat / | head -n1 > roothashafter && + test_cmp roothash roothashafter + ' + + # test read options + + test_expect_success "read from offset works" ' + ipfs files read -o 1 /cats/file1 > output + ' + + test_expect_success "output looks good" ' + echo oo > expected && + test_cmp expected output + ' + + test_expect_success "read with size works" ' + ipfs files read -n 2 /cats/file1 > output + ' + + test_expect_success "output looks good" ' + printf fo > expected && + test_cmp expected output + ' + + test_expect_success "cannot read from negative offset" ' + test_expect_code 1 ipfs files read --offset -3 /cats/file1 + ' + + test_expect_success "read from offset 0 works" ' + ipfs files read --offset 0 /cats/file1 > output + ' + + test_expect_success "output looks good" ' + echo foo > expected && + test_cmp expected output + ' + + test_expect_success "read last byte works" ' + ipfs files read --offset 2 /cats/file1 > output + ' + + test_expect_success "output looks good" ' + echo o > expected && + test_cmp expected output + ' + + test_expect_success "offset past end of file fails" ' + test_expect_code 1 ipfs files read --offset 5 /cats/file1 + ' + + test_expect_success "cannot read negative count bytes" ' + test_expect_code 1 ipfs read --count -1 /cats/file1 + ' + + test_expect_success "reading zero bytes prints nothing" ' + ipfs files read --count 0 /cats/file1 > output + ' + + test_expect_success "output looks good" ' + printf "" > expected && + test_cmp expected output + ' + + test_expect_success "count > len(file) prints entire file" ' + ipfs files read --count 200 /cats/file1 > output + ' + + test_expect_success "output looks good" ' + echo foo > expected && + test_cmp expected output + ' + + # test write + + test_expect_success "can write file" ' + echo "ipfs rocks" > tmpfile && + cat tmpfile | ipfs files write --create /cats/ipfs + ' + + test_expect_success "file was created" ' + verify_dir_contents /cats ipfs file1 this + ' + + test_expect_success "can read file we just wrote" ' + ipfs files read /cats/ipfs > output + ' + + test_expect_success "can write to offset" ' + echo "is super cool" | ipfs files write -o 5 /cats/ipfs + ' + + test_expect_success "file looks correct" ' + echo "ipfs is super cool" > expected && + ipfs files read /cats/ipfs > output && + test_cmp expected output + ' + + test_expect_success "cant write to negative offset" ' + ipfs files stat /cats/ipfs | head -n1 > filehash && + test_expect_code 1 ipfs files write --offset -1 /cats/ipfs < output + ' + + test_expect_success "verify file was not changed" ' + ipfs files stat /cats/ipfs | head -n1 > afterhash && + test_cmp filehash afterhash + ' + + test_expect_success "write new file for testing" ' + echo foobar | ipfs files write --create /fun + ' + + test_expect_success "write to offset past end works" ' + echo blah | ipfs files write --offset 50 /fun + ' + + test_expect_success "can read file" ' + ipfs files read /fun > sparse_output + ' + + test_expect_success "output looks good" ' + echo foobar > sparse_expected && + echo blah | dd of=sparse_expected bs=50 seek=1 && + test_cmp sparse_expected sparse_output + ' + + test_expect_success "cleanup" ' + ipfs files rm /fun + ' + + test_expect_success "cannot write to directory" ' + ipfs files stat /cats | head -n1 > dirhash && + test_expect_code 1 ipfs files write /cats < output + ' + + test_expect_success "verify dir was not changed" ' + ipfs files stat /cats | head -n1 > afterdirhash && + test_cmp dirhash afterdirhash + ' + + test_expect_success "cannot write to nonexistant path" ' + test_expect_code 1 ipfs files write /cats/bar/ < output + ' + + test_expect_success "no new paths were created" ' + verify_dir_contents /cats file1 ipfs this + ' + + # test mv + test_expect_success "can mv dir" ' + ipfs files mv /cats/this/is /cats/ + ' + + test_expect_success "mv worked" ' + verify_dir_contents /cats file1 ipfs this is && + verify_dir_contents /cats/this + ' + + test_expect_success "cleanup, remove 'cats'" ' + ipfs files rm -r /cats + ' + + test_expect_success "cleanup looks good" ' + verify_dir_contents / + ' +} + +# test offline and online +test_files_api +test_launch_ipfs_daemon +test_files_api +test_kill_ipfs_daemon +test_done diff --git a/unixfs/mod/dagmodifier.go b/unixfs/mod/dagmodifier.go index d2ad2fd8ff0..40cee0995c2 100644 --- a/unixfs/mod/dagmodifier.go +++ b/unixfs/mod/dagmodifier.go @@ -368,19 +368,31 @@ func (dm *DagModifier) Seek(offset int64, whence int) (int64, error) { return 0, err } + fisize, err := dm.Size() + if err != nil { + return 0, err + } + + var newoffset uint64 switch whence { case os.SEEK_CUR: - dm.curWrOff += uint64(offset) - dm.writeStart = dm.curWrOff + newoffset = dm.curWrOff + uint64(offset) case os.SEEK_SET: - dm.curWrOff = uint64(offset) - dm.writeStart = uint64(offset) + newoffset = uint64(offset) case os.SEEK_END: return 0, ErrSeekEndNotImpl default: return 0, ErrUnrecognizedWhence } + if offset > fisize { + if err := dm.expandSparse(offset - fisize); err != nil { + return 0, err + } + } + dm.curWrOff = newoffset + dm.writeStart = newoffset + if dm.read != nil { _, err = dm.read.Seek(offset, whence) if err != nil { diff --git a/unixfs/mod/dagmodifier_test.go b/unixfs/mod/dagmodifier_test.go index 6f53a90d1eb..f3341690c08 100644 --- a/unixfs/mod/dagmodifier_test.go +++ b/unixfs/mod/dagmodifier_test.go @@ -487,6 +487,53 @@ func TestSparseWrite(t *testing.T) { } } +func TestSeekPastEndWrite(t *testing.T) { + dserv := getMockDagServ(t) + _, n := getNode(t, dserv, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dagmod, err := NewDagModifier(ctx, n, dserv, sizeSplitterGen(512)) + if err != nil { + t.Fatal(err) + } + + buf := make([]byte, 5000) + u.NewTimeSeededRand().Read(buf[2500:]) + + nseek, err := dagmod.Seek(2500, os.SEEK_SET) + if err != nil { + t.Fatal(err) + } + + if nseek != 2500 { + t.Fatal("failed to seek") + } + + wrote, err := dagmod.Write(buf[2500:]) + if err != nil { + t.Fatal(err) + } + + if wrote != 2500 { + t.Fatal("incorrect write amount") + } + + _, err = dagmod.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatal(err) + } + + out, err := ioutil.ReadAll(dagmod) + if err != nil { + t.Fatal(err) + } + + if err = arrComp(out, buf); err != nil { + t.Fatal(err) + } +} + func BenchmarkDagmodWrite(b *testing.B) { b.StopTimer() dserv := getMockDagServ(b)