From 30f7aa9b15b02d70f5e4f3afa3453993ee065014 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Thu, 1 Sep 2016 07:50:27 -0700 Subject: [PATCH] integrate CIDv0 License: MIT Signed-off-by: Jeromy --- assets/assets.go | 20 +- blocks/blocks.go | 10 +- blocks/blocks_test.go | 2 +- blocks/blockstore/arc_cache.go | 8 +- blocks/blockstore/blockstore.go | 4 +- blocks/blockstore/blockstore_test.go | 2 +- blocks/blockstore/bloom_cache.go | 7 +- blockservice/blockservice.go | 87 ++++--- blockservice/test/blocks_test.go | 64 +++-- core/commands/bitswap.go | 9 +- core/commands/block.go | 44 ++-- core/commands/dht.go | 41 +-- core/commands/files/files.go | 7 +- core/commands/ls.go | 2 +- core/commands/object/object.go | 17 +- core/commands/object/patch.go | 20 +- core/commands/pin.go | 32 ++- core/commands/refs.go | 50 ++-- core/commands/resolve.go | 8 +- core/commands/tar.go | 8 +- core/commands/unixfs/ls.go | 10 +- core/core.go | 14 +- core/corehttp/gateway_handler.go | 55 ++-- core/corerepo/gc.go | 11 +- core/corerepo/pinning.go | 25 +- core/coreunix/add.go | 55 ++-- core/coreunix/add_test.go | 13 +- core/coreunix/metadata.go | 18 +- core/coreunix/metadata_test.go | 24 +- core/pathresolver.go | 14 +- exchange/bitswap/bitswap.go | 18 +- exchange/bitswap/bitswap_test.go | 7 +- exchange/bitswap/decision/engine.go | 6 +- exchange/bitswap/decision/engine_test.go | 2 +- exchange/bitswap/message/message.go | 2 +- .../notifications/notifications_test.go | 2 +- exchange/bitswap/testnet/network_test.go | 2 +- exchange/interface.go | 1 + exchange/offline/offline.go | 1 + fuse/ipns/common.go | 2 +- fuse/ipns/ipns_unix.go | 5 +- fuse/readonly/readonly_unix.go | 7 +- importer/balanced/balanced_test.go | 25 ++ merkledag/coding.go | 7 +- merkledag/merkledag.go | 206 ++++++++------- merkledag/merkledag_test.go | 90 +++---- merkledag/node.go | 56 ++-- merkledag/node_test.go | 8 +- merkledag/traverse/traverse.go | 6 +- merkledag/utils/diff.go | 31 +-- merkledag/utils/utils_test.go | 29 +-- mfs/dir.go | 7 +- mfs/mfs_test.go | 22 +- mfs/repub_test.go | 8 +- mfs/system.go | 46 ++-- namesys/dns.go | 2 +- namesys/publisher.go | 2 +- namesys/routing.go | 4 +- package.json | 6 + path/path.go | 30 +-- path/resolver.go | 14 +- path/resolver_test.go | 12 +- pin/gc/gc.go | 25 +- pin/pin.go | 243 ++++++++++-------- pin/pin_test.go | 18 +- pin/set.go | 104 +++----- pin/set_test.go | 92 +------ test/integration/bitswap_wo_routing_test.go | 10 +- test/sharness/t0050-block.sh | 2 +- unixfs/io/dirbuilder.go | 6 +- unixfs/mod/dagmodifier.go | 27 +- 71 files changed, 900 insertions(+), 974 deletions(-) diff --git a/assets/assets.go b/assets/assets.go index 4965b0f5afea..7758a601848f 100644 --- a/assets/assets.go +++ b/assets/assets.go @@ -8,10 +8,10 @@ import ( "fmt" "path/filepath" - "github.com/ipfs/go-ipfs/blocks/key" "github.com/ipfs/go-ipfs/core" "github.com/ipfs/go-ipfs/core/coreunix" uio "github.com/ipfs/go-ipfs/unixfs/io" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) // initDocPaths lists the paths for the docs we want to seed during --init @@ -25,7 +25,7 @@ var initDocPaths = []string{ } // SeedInitDocs adds the list of embedded init documentation to the passed node, pins it and returns the root key -func SeedInitDocs(nd *core.IpfsNode) (*key.Key, error) { +func SeedInitDocs(nd *core.IpfsNode) (*cid.Cid, error) { return addAssetList(nd, initDocPaths) } @@ -34,11 +34,11 @@ var initDirIndex = []string{ filepath.Join("..", "vendor", "dir-index-html-v1.0.0", "dir-index.html"), } -func SeedInitDirIndex(nd *core.IpfsNode) (*key.Key, error) { +func SeedInitDirIndex(nd *core.IpfsNode) (*cid.Cid, error) { return addAssetList(nd, initDirIndex) } -func addAssetList(nd *core.IpfsNode, l []string) (*key.Key, error) { +func addAssetList(nd *core.IpfsNode, l []string) (*cid.Cid, error) { dirb := uio.NewDirectory(nd.DAG) for _, p := range l { @@ -53,14 +53,18 @@ func addAssetList(nd *core.IpfsNode, l []string) (*key.Key, error) { } fname := filepath.Base(p) - k := key.B58KeyDecode(s) - if err := dirb.AddChild(nd.Context(), fname, k); err != nil { + c, err := cid.Decode(s) + if err != nil { + return nil, err + } + + if err := dirb.AddChild(nd.Context(), fname, c); err != nil { return nil, fmt.Errorf("assets: could not add '%s' as a child: %s", fname, err) } } dir := dirb.GetNode() - dkey, err := nd.DAG.Add(dir) + dcid, err := nd.DAG.Add(dir) if err != nil { return nil, fmt.Errorf("assets: DAG.Add(dir) failed: %s", err) } @@ -73,5 +77,5 @@ func addAssetList(nd *core.IpfsNode, l []string) (*key.Key, error) { return nil, fmt.Errorf("assets: Pinning flush failed: %s", err) } - return &dkey, nil + return dcid, nil } diff --git a/blocks/blocks.go b/blocks/blocks.go index c41e1323a11f..d8f5a11d8144 100644 --- a/blocks/blocks.go +++ b/blocks/blocks.go @@ -7,15 +7,17 @@ import ( "fmt" key "github.com/ipfs/go-ipfs/blocks/key" + mh "gx/ipfs/QmYf7ng2hG5XBtJA3tN34DQ2GUN5HNksEw1rLDkmr6vGku/go-multihash" u "gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) var ErrWrongHash = errors.New("data did not match given hash!") type Block interface { Multihash() mh.Multihash - Data() []byte + RawData() []byte Key() key.Key String() string Loggable() map[string]interface{} @@ -49,10 +51,14 @@ func (b *BasicBlock) Multihash() mh.Multihash { return b.multihash } -func (b *BasicBlock) Data() []byte { +func (b *BasicBlock) RawData() []byte { return b.data } +func (b *BasicBlock) Cid() *cid.Cid { + return cid.NewCidV0(b.multihash) +} + // Key returns the block's Multihash as a Key value. func (b *BasicBlock) Key() key.Key { return key.Key(b.multihash) diff --git a/blocks/blocks_test.go b/blocks/blocks_test.go index 1fecff844571..07f13de29cd9 100644 --- a/blocks/blocks_test.go +++ b/blocks/blocks_test.go @@ -25,7 +25,7 @@ func TestData(t *testing.T) { data := []byte("some data") block := NewBlock(data) - if !bytes.Equal(block.Data(), data) { + if !bytes.Equal(block.RawData(), data) { t.Error("data is wrong") } } diff --git a/blocks/blockstore/arc_cache.go b/blocks/blockstore/arc_cache.go index c0ec192312c7..17349e9be88d 100644 --- a/blocks/blockstore/arc_cache.go +++ b/blocks/blockstore/arc_cache.go @@ -99,12 +99,14 @@ func (b *arccache) PutMany(bs []blocks.Block) error { good = append(good, block) } } - err := b.blockstore.PutMany(bs) + + err := b.blockstore.PutMany(good) if err != nil { return err } - for _, block := range bs { - b.arc.Add(block.Key(), true) + + for _, blk := range good { + b.arc.Add(blk.Key(), true) } return nil } diff --git a/blocks/blockstore/blockstore.go b/blocks/blockstore/blockstore.go index f96178b44330..dc0f3613413c 100644 --- a/blocks/blockstore/blockstore.go +++ b/blocks/blockstore/blockstore.go @@ -117,7 +117,7 @@ func (bs *blockstore) Put(block blocks.Block) error { if err == nil && exists { return nil // already stored. } - return bs.datastore.Put(k, block.Data()) + return bs.datastore.Put(k, block.RawData()) } func (bs *blockstore) PutMany(blocks []blocks.Block) error { @@ -132,7 +132,7 @@ func (bs *blockstore) PutMany(blocks []blocks.Block) error { continue } - err = t.Put(k, b.Data()) + err = t.Put(k, b.RawData()) if err != nil { return err } diff --git a/blocks/blockstore/blockstore_test.go b/blocks/blockstore/blockstore_test.go index fc78ca6e9b12..e4b6931ae655 100644 --- a/blocks/blockstore/blockstore_test.go +++ b/blocks/blockstore/blockstore_test.go @@ -48,7 +48,7 @@ func TestPutThenGetBlock(t *testing.T) { if err != nil { t.Fatal(err) } - if !bytes.Equal(block.Data(), blockFromBlockstore.Data()) { + if !bytes.Equal(block.RawData(), blockFromBlockstore.RawData()) { t.Fail() } } diff --git a/blocks/blockstore/bloom_cache.go b/blocks/blockstore/bloom_cache.go index e10dacfaf1b9..680908b5c8de 100644 --- a/blocks/blockstore/bloom_cache.go +++ b/blocks/blockstore/bloom_cache.go @@ -132,10 +132,11 @@ func (b *bloomcache) PutMany(bs []blocks.Block) error { good = append(good, block) } } - err := b.blockstore.PutMany(bs) + + err := b.blockstore.PutMany(good) if err == nil { - for _, block := range bs { - b.bloom.AddTS([]byte(block.Key())) + for _, blk := range good { + b.bloom.AddTS([]byte(blk.Key())) } } return err diff --git a/blockservice/blockservice.go b/blockservice/blockservice.go index 25282a441454..f98c0f96f482 100644 --- a/blockservice/blockservice.go +++ b/blockservice/blockservice.go @@ -5,13 +5,16 @@ package blockservice import ( "errors" + "fmt" blocks "github.com/ipfs/go-ipfs/blocks" "github.com/ipfs/go-ipfs/blocks/blockstore" key "github.com/ipfs/go-ipfs/blocks/key" exchange "github.com/ipfs/go-ipfs/exchange" + logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) var log = logging.Logger("blockservice") @@ -27,6 +30,12 @@ type BlockService struct { Exchange exchange.Interface } +// an Object is simply a typed block +type Object interface { + Cid() *cid.Cid + blocks.Block +} + // NewBlockService creates a BlockService with given datastore instance. func New(bs blockstore.Blockstore, rem exchange.Interface) *BlockService { if rem == nil { @@ -41,30 +50,41 @@ func New(bs blockstore.Blockstore, rem exchange.Interface) *BlockService { // AddBlock adds a particular block to the service, Putting it into the datastore. // TODO pass a context into this if the remote.HasBlock is going to remain here. -func (s *BlockService) AddBlock(b blocks.Block) (key.Key, error) { - k := b.Key() - has, err := s.Blockstore.Has(k) +func (s *BlockService) AddObject(o Object) (*cid.Cid, error) { + // TODO: while this is a great optimization, we should think about the + // possibility of streaming writes directly to disk. If we can pass this object + // all the way down to the datastore without having to 'buffer' its data, + // we could implement a `WriteTo` method on it that could do a streaming write + // of the content, saving us (probably) considerable memory. + c := o.Cid() + has, err := s.Blockstore.Has(key.Key(c.Hash())) if err != nil { - return k, err + return nil, err } + if has { - return k, nil + return c, nil } - err = s.Blockstore.Put(b) + err = s.Blockstore.Put(o) if err != nil { - return k, err + return nil, err } - if err := s.Exchange.HasBlock(b); err != nil { - return "", errors.New("blockservice is closed") + + if err := s.Exchange.HasBlock(o); err != nil { + return nil, errors.New("blockservice is closed") } - return k, nil + + return c, nil } -func (s *BlockService) AddBlocks(bs []blocks.Block) ([]key.Key, error) { +func (s *BlockService) AddObjects(bs []Object) ([]*cid.Cid, error) { var toput []blocks.Block + var toputcids []*cid.Cid for _, b := range bs { - has, err := s.Blockstore.Has(b.Key()) + c := b.Cid() + + has, err := s.Blockstore.Has(key.Key(c.Hash())) if err != nil { return nil, err } @@ -74,6 +94,7 @@ func (s *BlockService) AddBlocks(bs []blocks.Block) ([]key.Key, error) { } toput = append(toput, b) + toputcids = append(toputcids, c) } err := s.Blockstore.PutMany(toput) @@ -81,26 +102,25 @@ func (s *BlockService) AddBlocks(bs []blocks.Block) ([]key.Key, error) { return nil, err } - var ks []key.Key - for _, b := range toput { - if err := s.Exchange.HasBlock(b); err != nil { - return nil, errors.New("blockservice is closed") + var ks []*cid.Cid + for _, o := range toput { + if err := s.Exchange.HasBlock(o); err != nil { + return nil, fmt.Errorf("blockservice is closed (%s)", err) } - ks = append(ks, b.Key()) + + c := o.(Object).Cid() // cast is safe, we created these + ks = append(ks, c) } return ks, nil } // GetBlock retrieves a particular block from the service, // Getting it from the datastore using the key (hash). -func (s *BlockService) GetBlock(ctx context.Context, k key.Key) (blocks.Block, error) { - if k == "" { - log.Debug("BlockService GetBlock: Nil Key") - return nil, ErrNotFound - } +func (s *BlockService) GetBlock(ctx context.Context, c *cid.Cid) (blocks.Block, error) { + log.Debugf("BlockService GetBlock: '%s'", c) - log.Debugf("BlockService GetBlock: '%s'", k) - block, err := s.Blockstore.Get(k) + // TODO: blockstore shouldnt care about Cids, need an easier way to strip the abstraction + block, err := s.Blockstore.Get(key.Key(c.Hash())) if err == nil { return block, nil } @@ -109,7 +129,7 @@ func (s *BlockService) GetBlock(ctx context.Context, k key.Key) (blocks.Block, e // TODO be careful checking ErrNotFound. If the underlying // implementation changes, this will break. log.Debug("Blockservice: Searching bitswap") - blk, err := s.Exchange.GetBlock(ctx, k) + blk, err := s.Exchange.GetBlock(ctx, key.Key(c.Hash())) if err != nil { if err == blockstore.ErrNotFound { return nil, ErrNotFound @@ -130,12 +150,13 @@ func (s *BlockService) GetBlock(ctx context.Context, k key.Key) (blocks.Block, e // GetBlocks gets a list of blocks asynchronously and returns through // the returned channel. // NB: No guarantees are made about order. -func (s *BlockService) GetBlocks(ctx context.Context, ks []key.Key) <-chan blocks.Block { +func (s *BlockService) GetBlocks(ctx context.Context, ks []*cid.Cid) <-chan blocks.Block { out := make(chan blocks.Block, 0) go func() { defer close(out) var misses []key.Key - for _, k := range ks { + for _, c := range ks { + k := key.Key(c.Hash()) hit, err := s.Blockstore.Get(k) if err != nil { misses = append(misses, k) @@ -171,11 +192,19 @@ func (s *BlockService) GetBlocks(ctx context.Context, ks []key.Key) <-chan block } // DeleteBlock deletes a block in the blockservice from the datastore -func (s *BlockService) DeleteBlock(k key.Key) error { - return s.Blockstore.DeleteBlock(k) +func (s *BlockService) DeleteObject(o Object) error { + return s.Blockstore.DeleteBlock(o.Key()) } func (s *BlockService) Close() error { log.Debug("blockservice is shutting down...") return s.Exchange.Close() } + +type RawBlockObject struct { + blocks.Block +} + +func (rob *RawBlockObject) Cid() *cid.Cid { + return cid.NewCidV0(rob.Block.Multihash()) +} diff --git a/blockservice/test/blocks_test.go b/blockservice/test/blocks_test.go index 81d61818b34e..a64264dab124 100644 --- a/blockservice/test/blocks_test.go +++ b/blockservice/test/blocks_test.go @@ -2,80 +2,98 @@ package bstest import ( "bytes" + "fmt" "testing" "time" blocks "github.com/ipfs/go-ipfs/blocks" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" - blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil" key "github.com/ipfs/go-ipfs/blocks/key" . "github.com/ipfs/go-ipfs/blockservice" offline "github.com/ipfs/go-ipfs/exchange/offline" + ds "gx/ipfs/QmNgqJarToRiq2GBaPJhkmW4B5BxS5B74E1rkGvv2JoaTp/go-datastore" dssync "gx/ipfs/QmNgqJarToRiq2GBaPJhkmW4B5BxS5B74E1rkGvv2JoaTp/go-datastore/sync" u "gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util" "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) +func newObject(data []byte) *testObject { + return &testObject{ + Block: blocks.NewBlock(data), + } +} + +type testObject struct { + blocks.Block +} + +func (o *testObject) Cid() *cid.Cid { + return cid.NewCidV0(o.Block.Multihash()) +} + func TestBlocks(t *testing.T) { bstore := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) bs := New(bstore, offline.Exchange(bstore)) defer bs.Close() - _, err := bs.GetBlock(context.Background(), key.Key("")) - if err != ErrNotFound { - t.Error("Empty String Key should error", err) - } - - b := blocks.NewBlock([]byte("beep boop")) + o := newObject([]byte("beep boop")) h := u.Hash([]byte("beep boop")) - if !bytes.Equal(b.Multihash(), h) { + if !bytes.Equal(o.Multihash(), h) { t.Error("Block Multihash and data multihash not equal") } - if b.Key() != key.Key(h) { + if o.Key() != key.Key(h) { t.Error("Block key and data multihash key not equal") } - k, err := bs.AddBlock(b) + k, err := bs.AddObject(o) if err != nil { t.Error("failed to add block to BlockService", err) return } - if k != b.Key() { + if !k.Equals(o.Cid()) { t.Error("returned key is not equal to block key", err) } ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() - b2, err := bs.GetBlock(ctx, b.Key()) + b2, err := bs.GetBlock(ctx, o.Cid()) if err != nil { t.Error("failed to retrieve block from BlockService", err) return } - if b.Key() != b2.Key() { + if o.Key() != b2.Key() { t.Error("Block keys not equal.") } - if !bytes.Equal(b.Data(), b2.Data()) { + if !bytes.Equal(o.RawData(), b2.RawData()) { t.Error("Block data is not equal.") } } +func makeObjects(n int) []*testObject { + var out []*testObject + for i := 0; i < n; i++ { + out = append(out, newObject([]byte(fmt.Sprintf("object %d", i)))) + } + return out +} + func TestGetBlocksSequential(t *testing.T) { var servs = Mocks(4) for _, s := range servs { defer s.Close() } - bg := blocksutil.NewBlockGenerator() - blks := bg.Blocks(50) + objs := makeObjects(50) - var keys []key.Key - for _, blk := range blks { - keys = append(keys, blk.Key()) - servs[0].AddBlock(blk) + var cids []*cid.Cid + for _, o := range objs { + cids = append(cids, o.Cid()) + servs[0].AddObject(o) } t.Log("one instance at a time, get blocks concurrently") @@ -83,7 +101,7 @@ func TestGetBlocksSequential(t *testing.T) { for i := 1; i < len(servs); i++ { ctx, cancel := context.WithTimeout(context.Background(), time.Second*50) defer cancel() - out := servs[i].GetBlocks(ctx, keys) + out := servs[i].GetBlocks(ctx, cids) gotten := make(map[key.Key]blocks.Block) for blk := range out { if _, ok := gotten[blk.Key()]; ok { @@ -91,8 +109,8 @@ func TestGetBlocksSequential(t *testing.T) { } gotten[blk.Key()] = blk } - if len(gotten) != len(blks) { - t.Fatalf("Didnt get enough blocks back: %d/%d", len(gotten), len(blks)) + if len(gotten) != len(objs) { + t.Fatalf("Didnt get enough blocks back: %d/%d", len(gotten), len(objs)) } } } diff --git a/core/commands/bitswap.go b/core/commands/bitswap.go index f06d1e78d0dc..716ff383b563 100644 --- a/core/commands/bitswap.go +++ b/core/commands/bitswap.go @@ -13,6 +13,7 @@ import ( "gx/ipfs/QmPSBJL4momYnE7DcUyk2DVhD6rH488ZmHBGLbxNdhU44K/go-humanize" peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" u "gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) var BitswapCmd = &cmds.Command{ @@ -55,13 +56,13 @@ var unwantCmd = &cmds.Command{ var ks []key.Key for _, arg := range req.Arguments() { - dec := key.B58KeyDecode(arg) - if dec == "" { - res.SetError(fmt.Errorf("Incorrectly formatted key: %s", arg), cmds.ErrNormal) + c, err := cid.Decode(arg) + if err != nil { + res.SetError(err, cmds.ErrNormal) return } - ks = append(ks, dec) + ks = append(ks, key.Key(c.Hash())) } bs.CancelWants(ks) diff --git a/core/commands/block.go b/core/commands/block.go index 48fa90e8818e..52d1934ee3ad 100644 --- a/core/commands/block.go +++ b/core/commands/block.go @@ -14,8 +14,8 @@ import ( cmds "github.com/ipfs/go-ipfs/commands" "github.com/ipfs/go-ipfs/pin" ds "gx/ipfs/QmNgqJarToRiq2GBaPJhkmW4B5BxS5B74E1rkGvv2JoaTp/go-datastore" - mh "gx/ipfs/QmYf7ng2hG5XBtJA3tN34DQ2GUN5HNksEw1rLDkmr6vGku/go-multihash" u "gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) type BlockStat struct { @@ -70,7 +70,7 @@ on raw ipfs blocks. It outputs the following to stdout: res.SetOutput(&BlockStat{ Key: b.Key().B58String(), - Size: len(b.Data()), + Size: len(b.RawData()), }) }, Type: BlockStat{}, @@ -101,7 +101,7 @@ It outputs to stdout, and is a base58 encoded multihash. return } - res.SetOutput(bytes.NewReader(b.Data())) + res.SetOutput(bytes.NewReader(b.RawData())) }, } @@ -145,7 +145,7 @@ It reads from stdin, and is a base58 encoded multihash. b := blocks.NewBlock(data) log.Debugf("BlockPut key: '%q'", b.Key()) - k, err := n.Blocks.AddBlock(b) + k, err := n.Blocks.AddObject(b) if err != nil { res.SetError(err, cmds.ErrNormal) return @@ -175,13 +175,12 @@ func getBlockForKey(req cmds.Request, skey string) (blocks.Block, error) { return nil, errors.New("Not a valid hash") } - h, err := mh.FromB58String(skey) + c, err := cid.Decode(skey) if err != nil { return nil, err } - k := key.Key(h) - b, err := n.Blocks.GetBlock(req.Context(), k) + b, err := n.Blocks.GetBlock(req.Context(), c) if err != nil { return nil, err } @@ -214,17 +213,22 @@ It takes a list of base58 encoded multihashs to remove. hashes := req.Arguments() force, _, _ := req.Option("force").Bool() quiet, _, _ := req.Option("quiet").Bool() - keys := make([]key.Key, 0, len(hashes)) + cids := make([]*cid.Cid, 0, len(hashes)) for _, hash := range hashes { - k := key.B58KeyDecode(hash) - keys = append(keys, k) + c, err := cid.Decode(hash) + if err != nil { + res.SetError(fmt.Errorf("invalid content id: %s (%s)", hash, err), cmds.ErrNormal) + return + } + + cids = append(cids, c) } outChan := make(chan interface{}) res.SetOutput((<-chan interface{})(outChan)) go func() { defer close(outChan) pinning := n.Pinning - err := rmBlocks(n.Blockstore, pinning, outChan, keys, rmBlocksOpts{ + err := rmBlocks(n.Blockstore, pinning, outChan, cids, rmBlocksOpts{ quiet: quiet, force: force, }) @@ -275,31 +279,31 @@ type rmBlocksOpts struct { force bool } -func rmBlocks(blocks bs.GCBlockstore, pins pin.Pinner, out chan<- interface{}, keys []key.Key, opts rmBlocksOpts) error { +func rmBlocks(blocks bs.GCBlockstore, pins pin.Pinner, out chan<- interface{}, cids []*cid.Cid, opts rmBlocksOpts) error { unlocker := blocks.GCLock() defer unlocker.Unlock() - stillOkay, err := checkIfPinned(pins, keys, out) + stillOkay, err := checkIfPinned(pins, cids, out) if err != nil { return fmt.Errorf("pin check failed: %s", err) } - for _, k := range stillOkay { - err := blocks.DeleteBlock(k) + for _, c := range stillOkay { + err := blocks.DeleteBlock(key.Key(c.Hash())) if err != nil && opts.force && (err == bs.ErrNotFound || err == ds.ErrNotFound) { // ignore non-existent blocks } else if err != nil { - out <- &RemovedBlock{Hash: k.String(), Error: err.Error()} + out <- &RemovedBlock{Hash: c.String(), Error: err.Error()} } else if !opts.quiet { - out <- &RemovedBlock{Hash: k.String()} + out <- &RemovedBlock{Hash: c.String()} } } return nil } -func checkIfPinned(pins pin.Pinner, keys []key.Key, out chan<- interface{}) ([]key.Key, error) { - stillOkay := make([]key.Key, 0, len(keys)) - res, err := pins.CheckIfPinned(keys...) +func checkIfPinned(pins pin.Pinner, cids []*cid.Cid, out chan<- interface{}) ([]*cid.Cid, error) { + stillOkay := make([]*cid.Cid, 0, len(cids)) + res, err := pins.CheckIfPinned(cids...) if err != nil { return nil, err } diff --git a/core/commands/dht.go b/core/commands/dht.go index 638163b66c61..42d87548acdc 100644 --- a/core/commands/dht.go +++ b/core/commands/dht.go @@ -14,10 +14,12 @@ import ( path "github.com/ipfs/go-ipfs/path" routing "github.com/ipfs/go-ipfs/routing" ipdht "github.com/ipfs/go-ipfs/routing/dht" + pstore "gx/ipfs/QmSZi9ygLohBUGyHMqE5N6eToPwqcg7bZQTULeVLFu7Q6d/go-libp2p-peerstore" peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" u "gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util" "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) var ErrNotDHT = errors.New("routing service is not a DHT") @@ -257,26 +259,26 @@ var provideRefDhtCmd = &cmds.Command{ rec, _, _ := req.Option("recursive").Bool() - var keys []key.Key + var cids []*cid.Cid for _, arg := range req.Arguments() { - k := key.B58KeyDecode(arg) - if k == "" { - res.SetError(fmt.Errorf("incorrectly formatted key: ", arg), cmds.ErrNormal) + c, err := cid.Decode(arg) + if err != nil { + res.SetError(err, cmds.ErrNormal) return } - has, err := n.Blockstore.Has(k) + has, err := n.Blockstore.Has(key.Key(c.Hash())) if err != nil { res.SetError(err, cmds.ErrNormal) return } if !has { - res.SetError(fmt.Errorf("block %s not found locally, cannot provide", k), cmds.ErrNormal) + res.SetError(fmt.Errorf("block %s not found locally, cannot provide", c), cmds.ErrNormal) return } - keys = append(keys, k) + cids = append(cids, c) } outChan := make(chan interface{}) @@ -296,9 +298,9 @@ var provideRefDhtCmd = &cmds.Command{ defer close(events) var err error if rec { - err = provideKeysRec(ctx, n.Routing, n.DAG, keys) + err = provideKeysRec(ctx, n.Routing, n.DAG, cids) } else { - err = provideKeys(ctx, n.Routing, keys) + err = provideKeys(ctx, n.Routing, cids) } if err != nil { notif.PublishQueryEvent(ctx, ¬if.QueryEvent{ @@ -345,9 +347,9 @@ var provideRefDhtCmd = &cmds.Command{ Type: notif.QueryEvent{}, } -func provideKeys(ctx context.Context, r routing.IpfsRouting, keys []key.Key) error { - for _, k := range keys { - err := r.Provide(ctx, k) +func provideKeys(ctx context.Context, r routing.IpfsRouting, cids []*cid.Cid) error { + for _, c := range cids { + err := r.Provide(ctx, key.Key(c.Hash())) if err != nil { return err } @@ -355,16 +357,23 @@ func provideKeys(ctx context.Context, r routing.IpfsRouting, keys []key.Key) err return nil } -func provideKeysRec(ctx context.Context, r routing.IpfsRouting, dserv dag.DAGService, keys []key.Key) error { +func provideKeysRec(ctx context.Context, r routing.IpfsRouting, dserv dag.DAGService, cids []*cid.Cid) error { provided := make(map[key.Key]struct{}) - for _, k := range keys { + for _, c := range cids { kset := key.NewKeySet() - node, err := dserv.Get(ctx, k) + node, err := dserv.Get(ctx, c) if err != nil { return err } - err = dag.EnumerateChildrenAsync(ctx, dserv, node, kset) + err = dag.EnumerateChildrenAsync(ctx, dserv, node, func(c *cid.Cid) bool { + k := key.Key(c.Hash()) + if kset.Has(k) { + kset.Add(k) + return true + } + return false + }) if err != nil { return err } diff --git a/core/commands/files/files.go b/core/commands/files/files.go index 6e471e991ca9..c53cc9a18e47 100644 --- a/core/commands/files/files.go +++ b/core/commands/files/files.go @@ -158,10 +158,7 @@ func statNode(ds dag.DAGService, fsn mfs.FSNode) (*Object, error) { return nil, err } - k, err := nd.Key() - if err != nil { - return nil, err - } + c := nd.Cid() d, err := ft.FromBytes(nd.Data()) if err != nil { @@ -184,7 +181,7 @@ func statNode(ds dag.DAGService, fsn mfs.FSNode) (*Object, error) { } return &Object{ - Hash: k.B58String(), + Hash: c.String(), Blocks: len(nd.Links), Size: d.GetFilesize(), CumulativeSize: cumulsize, diff --git a/core/commands/ls.go b/core/commands/ls.go index b0de779ef0e8..911fd1fea45c 100644 --- a/core/commands/ls.go +++ b/core/commands/ls.go @@ -97,7 +97,7 @@ The JSON output contains type information. res.SetError(err, cmds.ErrNormal) return } - linkNode, err = merkledag.DecodeProtobuf(b.Data()) + linkNode, err = merkledag.DecodeProtobuf(b.RawData()) if err != nil { res.SetError(err, cmds.ErrNormal) return diff --git a/core/commands/object/object.go b/core/commands/object/object.go index 1cc7a520427e..16da349d9a0a 100644 --- a/core/commands/object/object.go +++ b/core/commands/object/object.go @@ -87,7 +87,12 @@ is the raw data of the object. return } - fpath := path.Path(req.Arguments()[0]) + fpath, err := path.ParsePath(req.Arguments()[0]) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + node, err := core.Resolve(req.Context(), n, fpath) if err != nil { res.SetError(err, cmds.ErrNormal) @@ -424,7 +429,7 @@ Available templates: res.SetError(err, cmds.ErrNormal) return } - res.SetOutput(&Object{Hash: k.B58String()}) + res.SetOutput(&Object{Hash: k.String()}) }, Marshalers: cmds.MarshalerMap{ cmds.Text: func(res cmds.Response) (io.Reader, error) { @@ -538,13 +543,9 @@ func getObjectEnc(o interface{}) objectEncoding { } func getOutput(dagnode *dag.Node) (*Object, error) { - key, err := dagnode.Key() - if err != nil { - return nil, err - } - + c := dagnode.Cid() output := &Object{ - Hash: key.B58String(), + Hash: c.String(), Links: make([]Link, len(dagnode.Links)), } diff --git a/core/commands/object/patch.go b/core/commands/object/patch.go index 623186a119af..62d812e22847 100644 --- a/core/commands/object/patch.go +++ b/core/commands/object/patch.go @@ -99,7 +99,7 @@ the limit will not be respected by the network. return } - res.SetOutput(&Object{Hash: newkey.B58String()}) + res.SetOutput(&Object{Hash: newkey.String()}) }, Type: Object{}, Marshalers: cmds.MarshalerMap{ @@ -161,7 +161,7 @@ Example: return } - res.SetOutput(&Object{Hash: newkey.B58String()}) + res.SetOutput(&Object{Hash: newkey.String()}) }, Type: Object{}, Marshalers: cmds.MarshalerMap{ @@ -215,13 +215,9 @@ Removes a link by the given name from root. return } - nk, err := nnode.Key() - if err != nil { - res.SetError(err, cmds.ErrNormal) - return - } + nc := nnode.Cid() - res.SetOutput(&Object{Hash: nk.B58String()}) + res.SetOutput(&Object{Hash: nc.String()}) }, Type: Object{}, Marshalers: cmds.MarshalerMap{ @@ -310,13 +306,9 @@ to a file containing 'bar', and returns the hash of the new object. return } - nk, err := nnode.Key() - if err != nil { - res.SetError(err, cmds.ErrNormal) - return - } + nc := nnode.Cid() - res.SetOutput(&Object{Hash: nk.B58String()}) + res.SetOutput(&Object{Hash: nc.String()}) }, Type: Object{}, Marshalers: cmds.MarshalerMap{ diff --git a/core/commands/pin.go b/core/commands/pin.go index efa6fa1bb3e4..af3be62901c0 100644 --- a/core/commands/pin.go +++ b/core/commands/pin.go @@ -5,15 +5,16 @@ import ( "fmt" "io" - key "github.com/ipfs/go-ipfs/blocks/key" cmds "github.com/ipfs/go-ipfs/commands" core "github.com/ipfs/go-ipfs/core" corerepo "github.com/ipfs/go-ipfs/core/corerepo" dag "github.com/ipfs/go-ipfs/merkledag" path "github.com/ipfs/go-ipfs/path" pin "github.com/ipfs/go-ipfs/pin" + u "gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) var PinCmd = &cmds.Command{ @@ -29,7 +30,7 @@ var PinCmd = &cmds.Command{ } type PinOutput struct { - Pins []key.Key + Pins []*cid.Cid } var addPinCmd = &cmds.Command{ @@ -271,12 +272,12 @@ func pinLsKeys(args []string, typeStr string, ctx context.Context, n *core.IpfsN keys := make(map[string]RefKeyObject) for _, p := range args { - dagNode, err := core.Resolve(ctx, n, path.Path(p)) + pth, err := path.ParsePath(p) if err != nil { return nil, err } - k, err := dagNode.Key() + dagNode, err := core.Resolve(ctx, n, pth) if err != nil { return nil, err } @@ -286,7 +287,8 @@ func pinLsKeys(args []string, typeStr string, ctx context.Context, n *core.IpfsN return nil, fmt.Errorf("Invalid pin mode '%s'", typeStr) } - pinType, pinned, err := n.Pinning.IsPinnedWithType(k, mode) + c := dagNode.Cid() + pinType, pinned, err := n.Pinning.IsPinnedWithType(c, mode) if err != nil { return nil, err } @@ -300,7 +302,7 @@ func pinLsKeys(args []string, typeStr string, ctx context.Context, n *core.IpfsN default: pinType = "indirect through " + pinType } - keys[k.B58String()] = RefKeyObject{ + keys[c.String()] = RefKeyObject{ Type: pinType, } } @@ -312,9 +314,9 @@ func pinLsAll(typeStr string, ctx context.Context, n *core.IpfsNode) (map[string keys := make(map[string]RefKeyObject) - AddToResultKeys := func(keyList []key.Key, typeStr string) { - for _, k := range keyList { - keys[k.B58String()] = RefKeyObject{ + AddToResultKeys := func(keyList []*cid.Cid, typeStr string) { + for _, c := range keyList { + keys[c.String()] = RefKeyObject{ Type: typeStr, } } @@ -324,18 +326,24 @@ func pinLsAll(typeStr string, ctx context.Context, n *core.IpfsNode) (map[string AddToResultKeys(n.Pinning.DirectKeys(), "direct") } if typeStr == "indirect" || typeStr == "all" { - ks := key.NewKeySet() + set := cid.NewSet() for _, k := range n.Pinning.RecursiveKeys() { nd, err := n.DAG.Get(ctx, k) if err != nil { return nil, err } - err = dag.EnumerateChildren(n.Context(), n.DAG, nd, ks, false) + err = dag.EnumerateChildren(n.Context(), n.DAG, nd, func(c *cid.Cid) bool { + if !set.Has(c) { + set.Add(c) + return true + } + return false + }, false) if err != nil { return nil, err } } - AddToResultKeys(ks.Keys(), "indirect") + AddToResultKeys(set.Keys(), "indirect") } if typeStr == "recursive" || typeStr == "all" { AddToResultKeys(n.Pinning.RecursiveKeys(), "recursive") diff --git a/core/commands/refs.go b/core/commands/refs.go index 0fd4b7735dd7..ec61698beb5b 100644 --- a/core/commands/refs.go +++ b/core/commands/refs.go @@ -11,8 +11,10 @@ import ( "github.com/ipfs/go-ipfs/core" dag "github.com/ipfs/go-ipfs/merkledag" path "github.com/ipfs/go-ipfs/path" + u "gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) // KeyList is a general type for outputting lists of keys @@ -220,7 +222,7 @@ type RefWriter struct { Recursive bool PrintFmt string - seen map[key.Key]struct{} + seen *cid.Set } // WriteRefs writes refs of the given object to the underlying writer. @@ -232,19 +234,16 @@ func (rw *RefWriter) WriteRefs(n *dag.Node) (int, error) { } func (rw *RefWriter) writeRefsRecursive(n *dag.Node) (int, error) { - nkey, err := n.Key() - if err != nil { - return 0, err - } + nc := n.Cid() var count int for i, ng := range dag.GetDAG(rw.Ctx, rw.DAG, n) { - lk := key.Key(n.Links[i].Hash) - if rw.skip(lk) { + lc := cid.NewCidV0(n.Links[i].Hash) + if rw.skip(lc) { continue } - if err := rw.WriteEdge(nkey, lk, n.Links[i].Name); err != nil { + if err := rw.WriteEdge(nc, lc, n.Links[i].Name); err != nil { return count, err } @@ -263,24 +262,21 @@ func (rw *RefWriter) writeRefsRecursive(n *dag.Node) (int, error) { } func (rw *RefWriter) writeRefsSingle(n *dag.Node) (int, error) { - nkey, err := n.Key() - if err != nil { - return 0, err - } + c := n.Cid() - if rw.skip(nkey) { + if rw.skip(c) { return 0, nil } count := 0 for _, l := range n.Links { - lk := key.Key(l.Hash) + lc := cid.NewCidV0(l.Hash) - if rw.skip(lk) { + if rw.skip(lc) { continue } - if err := rw.WriteEdge(nkey, lk, l.Name); err != nil { + if err := rw.WriteEdge(c, lc, l.Name); err != nil { return count, err } count++ @@ -288,25 +284,25 @@ func (rw *RefWriter) writeRefsSingle(n *dag.Node) (int, error) { return count, nil } -// skip returns whether to skip a key -func (rw *RefWriter) skip(k key.Key) bool { +// skip returns whether to skip a cid +func (rw *RefWriter) skip(c *cid.Cid) bool { if !rw.Unique { return false } if rw.seen == nil { - rw.seen = make(map[key.Key]struct{}) + rw.seen = cid.NewSet() } - _, found := rw.seen[k] - if !found { - rw.seen[k] = struct{}{} + has := rw.seen.Has(c) + if !has { + rw.seen.Add(c) } - return found + return has } // Write one edge -func (rw *RefWriter) WriteEdge(from, to key.Key, linkname string) error { +func (rw *RefWriter) WriteEdge(from, to *cid.Cid, linkname string) error { if rw.Ctx != nil { select { case <-rw.Ctx.Done(): // just in case. @@ -319,11 +315,11 @@ func (rw *RefWriter) WriteEdge(from, to key.Key, linkname string) error { switch { case rw.PrintFmt != "": s = rw.PrintFmt - s = strings.Replace(s, "", from.B58String(), -1) - s = strings.Replace(s, "", to.B58String(), -1) + s = strings.Replace(s, "", from.String(), -1) + s = strings.Replace(s, "", to.String(), -1) s = strings.Replace(s, "", linkname, -1) default: - s += to.B58String() + s += to.String() } rw.out <- &RefWrapper{Ref: s} diff --git a/core/commands/resolve.go b/core/commands/resolve.go index 692141a5700d..8b358141e308 100644 --- a/core/commands/resolve.go +++ b/core/commands/resolve.go @@ -105,13 +105,9 @@ Resolve the value of an IPFS DAG path: return } - key, err := node.Key() - if err != nil { - res.SetError(err, cmds.ErrNormal) - return - } + c := node.Cid() - res.SetOutput(&ResolvedPath{path.FromKey(key)}) + res.SetOutput(&ResolvedPath{path.FromCid(c)}) }, Marshalers: cmds.MarshalerMap{ cmds.Text: func(res cmds.Response) (io.Reader, error) { diff --git a/core/commands/tar.go b/core/commands/tar.go index 7334c725d751..306ac1a16860 100644 --- a/core/commands/tar.go +++ b/core/commands/tar.go @@ -53,16 +53,12 @@ represent it. return } - k, err := node.Key() - if err != nil { - res.SetError(err, cmds.ErrNormal) - return - } + c := node.Cid() fi.FileName() res.SetOutput(&coreunix.AddedObject{ Name: fi.FileName(), - Hash: k.B58String(), + Hash: c.String(), }) }, Type: coreunix.AddedObject{}, diff --git a/core/commands/unixfs/ls.go b/core/commands/unixfs/ls.go index 757ee154b447..36aeb061065e 100644 --- a/core/commands/unixfs/ls.go +++ b/core/commands/unixfs/ls.go @@ -93,13 +93,9 @@ possible, please use 'ipfs ls' instead. return } - key, err := merkleNode.Key() - if err != nil { - res.SetError(err, cmds.ErrNormal) - return - } + c := merkleNode.Cid() - hash := key.B58String() + hash := c.String() output.Arguments[fpath] = hash if _, ok := output.Objects[hash]; ok { @@ -116,7 +112,7 @@ possible, please use 'ipfs ls' instead. t := unixFSNode.GetType() output.Objects[hash] = &LsObject{ - Hash: key.String(), + Hash: c.String(), Type: t.String(), Size: unixFSNode.GetFilesize(), } diff --git a/core/core.go b/core/core.go index 2d3b32810348..698ef2ad554b 100644 --- a/core/core.go +++ b/core/core.go @@ -35,6 +35,7 @@ import ( swarm "gx/ipfs/Qmf4ETeAWXuThBfWwonVyFqGFSgTWepUDEr1txcctvpTXS/go-libp2p/p2p/net/swarm" addrutil "gx/ipfs/Qmf4ETeAWXuThBfWwonVyFqGFSgTWepUDEr1txcctvpTXS/go-libp2p/p2p/net/swarm/addr" ping "gx/ipfs/Qmf4ETeAWXuThBfWwonVyFqGFSgTWepUDEr1txcctvpTXS/go-libp2p/p2p/protocol/ping" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" routing "github.com/ipfs/go-ipfs/routing" dht "github.com/ipfs/go-ipfs/routing/dht" @@ -42,7 +43,6 @@ import ( offroute "github.com/ipfs/go-ipfs/routing/offline" bstore "github.com/ipfs/go-ipfs/blocks/blockstore" - key "github.com/ipfs/go-ipfs/blocks/key" bserv "github.com/ipfs/go-ipfs/blockservice" exchange "github.com/ipfs/go-ipfs/exchange" bitswap "github.com/ipfs/go-ipfs/exchange/bitswap" @@ -487,8 +487,8 @@ func (n *IpfsNode) loadBootstrapPeers() ([]pstore.PeerInfo, error) { func (n *IpfsNode) loadFilesRoot() error { dsk := ds.NewKey("/local/filesroot") - pf := func(ctx context.Context, k key.Key) error { - return n.Repo.Datastore().Put(dsk, []byte(k)) + pf := func(ctx context.Context, c *cid.Cid) error { + return n.Repo.Datastore().Put(dsk, c.Bytes()) } var nd *merkledag.Node @@ -502,8 +502,12 @@ func (n *IpfsNode) loadFilesRoot() error { return fmt.Errorf("failure writing to dagstore: %s", err) } case err == nil: - k := key.Key(val.([]byte)) - nd, err = n.DAG.Get(n.Context(), k) + c, err := cid.Cast(val.([]byte)) + if err != nil { + return err + } + + nd, err = n.DAG.Get(n.Context(), c) if err != nil { return fmt.Errorf("error loading filesroot from DAG: %s", err) } diff --git a/core/corehttp/gateway_handler.go b/core/corehttp/gateway_handler.go index 88b7a85877de..659d847b3f1f 100644 --- a/core/corehttp/gateway_handler.go +++ b/core/corehttp/gateway_handler.go @@ -12,8 +12,8 @@ import ( humanize "gx/ipfs/QmPSBJL4momYnE7DcUyk2DVhD6rH488ZmHBGLbxNdhU44K/go-humanize" "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" - key "github.com/ipfs/go-ipfs/blocks/key" core "github.com/ipfs/go-ipfs/core" "github.com/ipfs/go-ipfs/importer" chunk "github.com/ipfs/go-ipfs/importer/chunk" @@ -357,14 +357,20 @@ func (i *gatewayHandler) putHandler(w http.ResponseWriter, r *http.Request) { newPath = path.Join(rsegs[2:]) } - var newkey key.Key + var newcid *cid.Cid rnode, err := core.Resolve(ctx, i.node, rootPath) switch ev := err.(type) { case path.ErrNoLink: // ev.Node < node where resolve failed // ev.Name < new link // but we need to patch from the root - rnode, err := i.node.DAG.Get(ctx, key.B58KeyDecode(rsegs[1])) + c, err := cid.Decode(rsegs[1]) + if err != nil { + webError(w, "putHandler: bad input path", err, http.StatusBadRequest) + return + } + + rnode, err := i.node.DAG.Get(ctx, c) if err != nil { webError(w, "putHandler: Could not create DAG from request", err, http.StatusInternalServerError) return @@ -383,21 +389,17 @@ func (i *gatewayHandler) putHandler(w http.ResponseWriter, r *http.Request) { return } - newkey, err = nnode.Key() - if err != nil { - webError(w, "putHandler: could not get key of edited node", err, http.StatusInternalServerError) - return - } + newcid = nnode.Cid() case nil: // object set-data case rnode.SetData(newnode.Data()) - newkey, err = i.node.DAG.Add(rnode) + newcid, err = i.node.DAG.Add(rnode) if err != nil { - nnk, _ := newnode.Key() - rk, _ := rnode.Key() - webError(w, fmt.Sprintf("putHandler: Could not add newnode(%q) to root(%q)", nnk.B58String(), rk.B58String()), err, http.StatusInternalServerError) + nnk := newnode.Cid() + rk := rnode.Cid() + webError(w, fmt.Sprintf("putHandler: Could not add newnode(%q) to root(%q)", nnk.String(), rk.String()), err, http.StatusInternalServerError) return } default: @@ -407,8 +409,8 @@ func (i *gatewayHandler) putHandler(w http.ResponseWriter, r *http.Request) { } i.addUserHeaders(w) // ok, _now_ write user's headers. - w.Header().Set("IPFS-Hash", newkey.String()) - http.Redirect(w, r, gopath.Join(ipfsPathPrefix, newkey.String(), newPath), http.StatusCreated) + w.Header().Set("IPFS-Hash", newcid.String()) + http.Redirect(w, r, gopath.Join(ipfsPathPrefix, newcid.String(), newPath), http.StatusCreated) } func (i *gatewayHandler) deleteHandler(w http.ResponseWriter, r *http.Request) { @@ -416,20 +418,13 @@ func (i *gatewayHandler) deleteHandler(w http.ResponseWriter, r *http.Request) { ctx, cancel := context.WithCancel(i.node.Context()) defer cancel() - ipfsNode, err := core.Resolve(ctx, i.node, path.Path(urlPath)) + p, err := path.ParsePath(urlPath) if err != nil { - // FIXME HTTP error code - webError(w, "Could not resolve name", err, http.StatusInternalServerError) + webError(w, "failed to parse path", err, http.StatusBadRequest) return } - k, err := ipfsNode.Key() - if err != nil { - webError(w, "Could not get key from resolved node", err, http.StatusInternalServerError) - return - } - - h, components, err := path.SplitAbsPath(path.FromKey(k)) + c, components, err := path.SplitAbsPath(p) if err != nil { webError(w, "Could not split path", err, http.StatusInternalServerError) return @@ -437,7 +432,7 @@ func (i *gatewayHandler) deleteHandler(w http.ResponseWriter, r *http.Request) { tctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() - rootnd, err := i.node.Resolver.DAG.Get(tctx, key.Key(h)) + rootnd, err := i.node.Resolver.DAG.Get(tctx, c) if err != nil { webError(w, "Could not resolve root object", err, http.StatusBadRequest) return @@ -475,15 +470,11 @@ func (i *gatewayHandler) deleteHandler(w http.ResponseWriter, r *http.Request) { } // Redirect to new path - key, err := newnode.Key() - if err != nil { - webError(w, "Could not get key of new node", err, http.StatusInternalServerError) - return - } + ncid := newnode.Cid() i.addUserHeaders(w) // ok, _now_ write user's headers. - w.Header().Set("IPFS-Hash", key.String()) - http.Redirect(w, r, gopath.Join(ipfsPathPrefix+key.String(), path.Join(components[:len(components)-1])), http.StatusCreated) + w.Header().Set("IPFS-Hash", ncid.String()) + http.Redirect(w, r, gopath.Join(ipfsPathPrefix+ncid.String(), path.Join(components[:len(components)-1])), http.StatusCreated) } func (i *gatewayHandler) addUserHeaders(w http.ResponseWriter) { diff --git a/core/corerepo/gc.go b/core/corerepo/gc.go index 223ab87d73b1..bbd774915503 100644 --- a/core/corerepo/gc.go +++ b/core/corerepo/gc.go @@ -9,9 +9,11 @@ import ( mfs "github.com/ipfs/go-ipfs/mfs" gc "github.com/ipfs/go-ipfs/pin/gc" repo "github.com/ipfs/go-ipfs/repo" + humanize "gx/ipfs/QmPSBJL4momYnE7DcUyk2DVhD6rH488ZmHBGLbxNdhU44K/go-humanize" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) var log = logging.Logger("corerepo") @@ -72,16 +74,13 @@ func NewGC(n *core.IpfsNode) (*GC, error) { }, nil } -func BestEffortRoots(filesRoot *mfs.Root) ([]key.Key, error) { +func BestEffortRoots(filesRoot *mfs.Root) ([]*cid.Cid, error) { rootDag, err := filesRoot.GetValue().GetNode() if err != nil { return nil, err } - rootKey, err := rootDag.Key() - if err != nil { - return nil, err - } - return []key.Key{rootKey}, nil + + return []*cid.Cid{rootDag.Cid()}, nil } func GarbageCollect(n *core.IpfsNode, ctx context.Context) error { diff --git a/core/corerepo/pinning.go b/core/corerepo/pinning.go index 675ba8349459..84a72fc1931f 100644 --- a/core/corerepo/pinning.go +++ b/core/corerepo/pinning.go @@ -16,15 +16,15 @@ package corerepo import ( "fmt" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - - key "github.com/ipfs/go-ipfs/blocks/key" "github.com/ipfs/go-ipfs/core" "github.com/ipfs/go-ipfs/merkledag" path "github.com/ipfs/go-ipfs/path" + + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) -func Pin(n *core.IpfsNode, ctx context.Context, paths []string, recursive bool) ([]key.Key, error) { +func Pin(n *core.IpfsNode, ctx context.Context, paths []string, recursive bool) ([]*cid.Cid, error) { dagnodes := make([]*merkledag.Node, 0) for _, fpath := range paths { dagnode, err := core.Resolve(ctx, n, path.Path(fpath)) @@ -34,20 +34,17 @@ func Pin(n *core.IpfsNode, ctx context.Context, paths []string, recursive bool) dagnodes = append(dagnodes, dagnode) } - var out []key.Key + var out []*cid.Cid for _, dagnode := range dagnodes { - k, err := dagnode.Key() - if err != nil { - return nil, err - } + c := dagnode.Cid() ctx, cancel := context.WithCancel(ctx) defer cancel() - err = n.Pinning.Pin(ctx, dagnode, recursive) + err := n.Pinning.Pin(ctx, dagnode, recursive) if err != nil { return nil, fmt.Errorf("pin: %s", err) } - out = append(out, k) + out = append(out, c) } err := n.Pinning.Flush() @@ -58,16 +55,16 @@ func Pin(n *core.IpfsNode, ctx context.Context, paths []string, recursive bool) return out, nil } -func Unpin(n *core.IpfsNode, ctx context.Context, paths []string, recursive bool) ([]key.Key, error) { +func Unpin(n *core.IpfsNode, ctx context.Context, paths []string, recursive bool) ([]*cid.Cid, error) { - var unpinned []key.Key + var unpinned []*cid.Cid for _, p := range paths { p, err := path.ParsePath(p) if err != nil { return nil, err } - k, err := core.ResolveToKey(ctx, n, p) + k, err := core.ResolveToCid(ctx, n, p) if err != nil { return nil, err } diff --git a/core/coreunix/add.go b/core/coreunix/add.go index bd3929ea7b38..ebf972a66ac4 100644 --- a/core/coreunix/add.go +++ b/core/coreunix/add.go @@ -7,24 +7,24 @@ import ( "os" gopath "path" + bs "github.com/ipfs/go-ipfs/blocks/blockstore" bstore "github.com/ipfs/go-ipfs/blocks/blockstore" - key "github.com/ipfs/go-ipfs/blocks/key" bserv "github.com/ipfs/go-ipfs/blockservice" + "github.com/ipfs/go-ipfs/commands/files" + core "github.com/ipfs/go-ipfs/core" "github.com/ipfs/go-ipfs/exchange/offline" importer "github.com/ipfs/go-ipfs/importer" "github.com/ipfs/go-ipfs/importer/chunk" + dag "github.com/ipfs/go-ipfs/merkledag" mfs "github.com/ipfs/go-ipfs/mfs" "github.com/ipfs/go-ipfs/pin" + unixfs "github.com/ipfs/go-ipfs/unixfs" + ds "gx/ipfs/QmNgqJarToRiq2GBaPJhkmW4B5BxS5B74E1rkGvv2JoaTp/go-datastore" syncds "gx/ipfs/QmNgqJarToRiq2GBaPJhkmW4B5BxS5B74E1rkGvv2JoaTp/go-datastore/sync" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - - bs "github.com/ipfs/go-ipfs/blocks/blockstore" - "github.com/ipfs/go-ipfs/commands/files" - core "github.com/ipfs/go-ipfs/core" - dag "github.com/ipfs/go-ipfs/merkledag" - unixfs "github.com/ipfs/go-ipfs/unixfs" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) var log = logging.Logger("coreunix") @@ -103,7 +103,7 @@ type Adder struct { root *dag.Node mr *mfs.Root unlocker bs.Unlocker - tempRoot key.Key + tempRoot *cid.Cid } func (adder *Adder) SetMfsRoot(r *mfs.Root) { @@ -166,7 +166,7 @@ func (adder *Adder) PinRoot() error { return err } - if adder.tempRoot != "" { + if adder.tempRoot != nil { err := adder.pinning.Unpin(adder.ctx, adder.tempRoot, true) if err != nil { return err @@ -259,12 +259,8 @@ func Add(n *core.IpfsNode, r io.Reader) (string, error) { if err != nil { return "", err } - k, err := node.Key() - if err != nil { - return "", err - } - return k.String(), nil + return node.Cid().String(), nil } // AddR recursively adds files in |path|. @@ -297,12 +293,7 @@ func AddR(n *core.IpfsNode, root string) (key string, err error) { return "", err } - k, err := nd.Key() - if err != nil { - return "", err - } - - return k.String(), nil + return nd.String(), nil } // AddWrapped adds data from a reader, and wraps it with a directory object @@ -329,23 +320,14 @@ func AddWrapped(n *core.IpfsNode, r io.Reader, filename string) (string, *dag.No return "", nil, err } - k, err := dagnode.Key() - if err != nil { - return "", nil, err - } - - return gopath.Join(k.String(), filename), dagnode, nil + c := dagnode.Cid() + return gopath.Join(c.String(), filename), dagnode, nil } func (adder *Adder) addNode(node *dag.Node, path string) error { // patch it into the root if path == "" { - key, err := node.Key() - if err != nil { - return err - } - - path = key.B58String() + path = node.Cid().String() } dir := gopath.Dir(path) @@ -490,13 +472,10 @@ func NewMemoryDagService() dag.DAGService { // from core/commands/object.go func getOutput(dagnode *dag.Node) (*Object, error) { - key, err := dagnode.Key() - if err != nil { - return nil, err - } + c := dagnode.Cid() output := &Object{ - Hash: key.B58String(), + Hash: c.String(), Links: make([]Link, len(dagnode.Links)), } diff --git a/core/coreunix/add_test.go b/core/coreunix/add_test.go index 7a43b634903a..4e10f41e67ba 100644 --- a/core/coreunix/add_test.go +++ b/core/coreunix/add_test.go @@ -15,7 +15,9 @@ import ( "github.com/ipfs/go-ipfs/repo" "github.com/ipfs/go-ipfs/repo/config" "github.com/ipfs/go-ipfs/thirdparty/testutil" + "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) func TestAddRecursive(t *testing.T) { @@ -142,10 +144,14 @@ func TestAddGCLive(t *testing.T) { } } - var last key.Key + var last *cid.Cid for a := range out { // wait for it to finish - last = key.B58KeyDecode(a.(*AddedObject).Hash) + c, err := cid.Decode(a.(*AddedObject).Hash) + if err != nil { + t.Fatal(err) + } + last = c } ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) @@ -155,7 +161,8 @@ func TestAddGCLive(t *testing.T) { t.Fatal(err) } - err = dag.EnumerateChildren(ctx, node.DAG, root, key.NewKeySet(), false) + set := cid.NewSet() + err = dag.EnumerateChildren(ctx, node.DAG, root, set.Visit, false) if err != nil { t.Fatal(err) } diff --git a/core/coreunix/metadata.go b/core/coreunix/metadata.go index eb318e1a2ebc..ca58f8a918d0 100644 --- a/core/coreunix/metadata.go +++ b/core/coreunix/metadata.go @@ -1,16 +1,19 @@ package coreunix import ( - key "github.com/ipfs/go-ipfs/blocks/key" core "github.com/ipfs/go-ipfs/core" dag "github.com/ipfs/go-ipfs/merkledag" ft "github.com/ipfs/go-ipfs/unixfs" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) func AddMetadataTo(n *core.IpfsNode, skey string, m *ft.Metadata) (string, error) { - ukey := key.B58KeyDecode(skey) + c, err := cid.Decode(skey) + if err != nil { + return "", err + } - nd, err := n.DAG.Get(n.Context(), ukey) + nd, err := n.DAG.Get(n.Context(), c) if err != nil { return "", err } @@ -31,13 +34,16 @@ func AddMetadataTo(n *core.IpfsNode, skey string, m *ft.Metadata) (string, error return "", err } - return nk.B58String(), nil + return nk.String(), nil } func Metadata(n *core.IpfsNode, skey string) (*ft.Metadata, error) { - ukey := key.B58KeyDecode(skey) + c, err := cid.Decode(skey) + if err != nil { + return nil, err + } - nd, err := n.DAG.Get(n.Context(), ukey) + nd, err := n.DAG.Get(n.Context(), c) if err != nil { return nil, err } diff --git a/core/coreunix/metadata_test.go b/core/coreunix/metadata_test.go index 47d516d1cd12..97877cebe2be 100644 --- a/core/coreunix/metadata_test.go +++ b/core/coreunix/metadata_test.go @@ -5,12 +5,7 @@ import ( "io/ioutil" "testing" - ds "gx/ipfs/QmNgqJarToRiq2GBaPJhkmW4B5BxS5B74E1rkGvv2JoaTp/go-datastore" - dssync "gx/ipfs/QmNgqJarToRiq2GBaPJhkmW4B5BxS5B74E1rkGvv2JoaTp/go-datastore/sync" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - bstore "github.com/ipfs/go-ipfs/blocks/blockstore" - key "github.com/ipfs/go-ipfs/blocks/key" bserv "github.com/ipfs/go-ipfs/blockservice" core "github.com/ipfs/go-ipfs/core" offline "github.com/ipfs/go-ipfs/exchange/offline" @@ -19,7 +14,12 @@ import ( merkledag "github.com/ipfs/go-ipfs/merkledag" ft "github.com/ipfs/go-ipfs/unixfs" uio "github.com/ipfs/go-ipfs/unixfs/io" + + ds "gx/ipfs/QmNgqJarToRiq2GBaPJhkmW4B5BxS5B74E1rkGvv2JoaTp/go-datastore" + dssync "gx/ipfs/QmNgqJarToRiq2GBaPJhkmW4B5BxS5B74E1rkGvv2JoaTp/go-datastore/sync" u "gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) func getDagserv(t *testing.T) merkledag.DAGService { @@ -41,10 +41,7 @@ func TestMetadata(t *testing.T) { t.Fatal(err) } - k, err := nd.Key() - if err != nil { - t.Fatal(err) - } + c := nd.Cid() m := new(ft.Metadata) m.MimeType = "THIS IS A TEST" @@ -52,7 +49,7 @@ func TestMetadata(t *testing.T) { // Such effort, many compromise ipfsnode := &core.IpfsNode{DAG: ds} - mdk, err := AddMetadataTo(ipfsnode, k.B58String(), m) + mdk, err := AddMetadataTo(ipfsnode, c.String(), m) if err != nil { t.Fatal(err) } @@ -65,7 +62,12 @@ func TestMetadata(t *testing.T) { t.Fatalf("something went wrong in conversion: '%s' != '%s'", rec.MimeType, m.MimeType) } - retnode, err := ds.Get(ctx, key.B58KeyDecode(mdk)) + cdk, err := cid.Decode(mdk) + if err != nil { + t.Fatal(err) + } + + retnode, err := ds.Get(ctx, cdk) if err != nil { t.Fatal(err) } diff --git a/core/pathresolver.go b/core/pathresolver.go index 555cc3dfadc9..f6091cd79355 100644 --- a/core/pathresolver.go +++ b/core/pathresolver.go @@ -6,9 +6,9 @@ import ( context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - key "github.com/ipfs/go-ipfs/blocks/key" merkledag "github.com/ipfs/go-ipfs/merkledag" path "github.com/ipfs/go-ipfs/path" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) // ErrNoNamesys is an explicit error for when an IPFS node doesn't @@ -61,31 +61,31 @@ func Resolve(ctx context.Context, n *IpfsNode, p path.Path) (*merkledag.Node, er // It first checks if the path is already in the form of just a key ( or // /ipfs/) and returns immediately if so. Otherwise, it falls back onto // Resolve to perform resolution of the dagnode being referenced. -func ResolveToKey(ctx context.Context, n *IpfsNode, p path.Path) (key.Key, error) { +func ResolveToCid(ctx context.Context, n *IpfsNode, p path.Path) (*cid.Cid, error) { // If the path is simply a key, parse and return it. Parsed paths are already // normalized (read: prepended with /ipfs/ if needed), so segment[1] should // always be the key. if p.IsJustAKey() { - return key.B58KeyDecode(p.Segments()[1]), nil + return cid.Decode(p.Segments()[1]) } // Fall back onto regular dagnode resolution. Retrieve the second-to-last // segment of the path and resolve its link to the last segment. head, tail, err := p.PopLastSegment() if err != nil { - return key.Key(""), err + return nil, err } dagnode, err := Resolve(ctx, n, head) if err != nil { - return key.Key(""), err + return nil, err } // Extract and return the key of the link to the target dag node. link, err := dagnode.GetNodeLink(tail) if err != nil { - return key.Key(""), err + return nil, err } - return key.Key(link.Hash), nil + return cid.NewCidV0(link.Hash), nil } diff --git a/exchange/bitswap/bitswap.go b/exchange/bitswap/bitswap.go index c98a98db7332..27d0a7b606af 100644 --- a/exchange/bitswap/bitswap.go +++ b/exchange/bitswap/bitswap.go @@ -8,12 +8,6 @@ import ( "sync" "time" - process "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" - procctx "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess/context" - logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - blocks "github.com/ipfs/go-ipfs/blocks" blockstore "github.com/ipfs/go-ipfs/blocks/blockstore" key "github.com/ipfs/go-ipfs/blocks/key" @@ -26,6 +20,12 @@ import ( flags "github.com/ipfs/go-ipfs/flags" "github.com/ipfs/go-ipfs/thirdparty/delay" loggables "github.com/ipfs/go-ipfs/thirdparty/loggables" + + process "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" + procctx "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess/context" + logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" + peer "gx/ipfs/QmWtbQU15LaB5B1JC2F7TV9P4K88vD3PpA4AJrwfCjhML8/go-libp2p-peer" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) var log = logging.Logger("bitswap") @@ -252,8 +252,8 @@ func (bs *Bitswap) GetBlocks(ctx context.Context, keys []key.Key) (<-chan blocks } // CancelWant removes a given key from the wantlist -func (bs *Bitswap) CancelWants(ks []key.Key) { - bs.wm.CancelWants(ks) +func (bs *Bitswap) CancelWants(keys []key.Key) { + bs.wm.CancelWants(keys) } // HasBlock announces the existance of a block to this bitswap service. The @@ -343,7 +343,7 @@ func (bs *Bitswap) updateReceiveCounters(b blocks.Block) error { } if err == nil && has { bs.dupBlocksRecvd++ - bs.dupDataRecvd += uint64(len(b.Data())) + bs.dupDataRecvd += uint64(len(b.RawData())) } if has { diff --git a/exchange/bitswap/bitswap_test.go b/exchange/bitswap/bitswap_test.go index df2bf9e279be..ea512f15d672 100644 --- a/exchange/bitswap/bitswap_test.go +++ b/exchange/bitswap/bitswap_test.go @@ -90,7 +90,7 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { t.Fatal("Expected to succeed") } - if !bytes.Equal(block.Data(), received.Data()) { + if !bytes.Equal(block.RawData(), received.RawData()) { t.Fatal("Data doesn't match") } } @@ -289,7 +289,10 @@ func TestEmptyKey(t *testing.T) { defer sg.Close() bs := sg.Instances(1)[0].Exchange - _, err := bs.GetBlock(context.Background(), key.Key("")) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + + _, err := bs.GetBlock(ctx, key.Key("")) if err != blockstore.ErrNotFound { t.Error("empty str key should return ErrNotFound") } diff --git a/exchange/bitswap/decision/engine.go b/exchange/bitswap/decision/engine.go index 51a0f052487e..067c8705390b 100644 --- a/exchange/bitswap/decision/engine.go +++ b/exchange/bitswap/decision/engine.go @@ -247,8 +247,8 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error { } for _, block := range m.Blocks() { - log.Debugf("got block %s %d bytes", block.Key(), len(block.Data())) - l.ReceivedBytes(len(block.Data())) + log.Debugf("got block %s %d bytes", block, len(block.RawData())) + l.ReceivedBytes(len(block.RawData())) } return nil } @@ -286,7 +286,7 @@ func (e *Engine) AddBlock(block blocks.Block) { func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) error { l := e.findOrCreate(p) for _, block := range m.Blocks() { - l.SentBytes(len(block.Data())) + l.SentBytes(len(block.RawData())) l.wantList.Remove(block.Key()) e.peerRequestQueue.Remove(block.Key(), p) } diff --git a/exchange/bitswap/decision/engine_test.go b/exchange/bitswap/decision/engine_test.go index f9cb8aae34a9..e25575161502 100644 --- a/exchange/bitswap/decision/engine_test.go +++ b/exchange/bitswap/decision/engine_test.go @@ -188,7 +188,7 @@ func checkHandledInOrder(t *testing.T, e *Engine, keys []string) error { received := envelope.Block expected := blocks.NewBlock([]byte(k)) if received.Key() != expected.Key() { - return errors.New(fmt.Sprintln("received", string(received.Data()), "expected", string(expected.Data()))) + return errors.New(fmt.Sprintln("received", string(received.RawData()), "expected", string(expected.RawData()))) } } return nil diff --git a/exchange/bitswap/message/message.go b/exchange/bitswap/message/message.go index f3b45e0545fe..f73dedf6aee1 100644 --- a/exchange/bitswap/message/message.go +++ b/exchange/bitswap/message/message.go @@ -159,7 +159,7 @@ func (m *impl) ToProto() *pb.Message { }) } for _, b := range m.Blocks() { - pbm.Blocks = append(pbm.Blocks, b.Data()) + pbm.Blocks = append(pbm.Blocks, b.RawData()) } return pbm } diff --git a/exchange/bitswap/notifications/notifications_test.go b/exchange/bitswap/notifications/notifications_test.go index 3e923b84ef75..0880296e5e4b 100644 --- a/exchange/bitswap/notifications/notifications_test.go +++ b/exchange/bitswap/notifications/notifications_test.go @@ -159,7 +159,7 @@ func assertBlockChannelNil(t *testing.T, blockChannel <-chan blocks.Block) { } func assertBlocksEqual(t *testing.T, a, b blocks.Block) { - if !bytes.Equal(a.Data(), b.Data()) { + if !bytes.Equal(a.RawData(), b.RawData()) { t.Fatal("blocks aren't equal") } if a.Key() != b.Key() { diff --git a/exchange/bitswap/testnet/network_test.go b/exchange/bitswap/testnet/network_test.go index 077c220e05bd..dfbf45c01faa 100644 --- a/exchange/bitswap/testnet/network_test.go +++ b/exchange/bitswap/testnet/network_test.go @@ -44,7 +44,7 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { // TODO assert that this came from the correct peer and that the message contents are as expected ok := false for _, b := range msgFromResponder.Blocks() { - if string(b.Data()) == expectedStr { + if string(b.RawData()) == expectedStr { wg.Done() ok = true } diff --git a/exchange/interface.go b/exchange/interface.go index 6db476d9ec2e..6f246ebc0a4b 100644 --- a/exchange/interface.go +++ b/exchange/interface.go @@ -6,6 +6,7 @@ import ( blocks "github.com/ipfs/go-ipfs/blocks" key "github.com/ipfs/go-ipfs/blocks/key" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/exchange/offline/offline.go b/exchange/offline/offline.go index d2ee4fbaa645..e36d59a67f24 100644 --- a/exchange/offline/offline.go +++ b/exchange/offline/offline.go @@ -7,6 +7,7 @@ import ( "github.com/ipfs/go-ipfs/blocks/blockstore" key "github.com/ipfs/go-ipfs/blocks/key" exchange "github.com/ipfs/go-ipfs/exchange" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) diff --git a/fuse/ipns/common.go b/fuse/ipns/common.go index 10163195026e..80fd2e7e77d0 100644 --- a/fuse/ipns/common.go +++ b/fuse/ipns/common.go @@ -33,7 +33,7 @@ func InitializeKeyspace(n *core.IpfsNode, key ci.PrivKey) error { } pub := nsys.NewRoutingPublisher(n.Routing, n.Repo.Datastore()) - if err := pub.Publish(ctx, key, path.FromKey(nodek)); err != nil { + if err := pub.Publish(ctx, key, path.FromCid(nodek)); err != nil { return err } diff --git a/fuse/ipns/ipns_unix.go b/fuse/ipns/ipns_unix.go index 4e2e302f24fa..ba613bc5c7b1 100644 --- a/fuse/ipns/ipns_unix.go +++ b/fuse/ipns/ipns_unix.go @@ -21,6 +21,7 @@ import ( path "github.com/ipfs/go-ipfs/path" ft "github.com/ipfs/go-ipfs/unixfs" ci "gx/ipfs/QmVoi5es8D5fNHZDqoW6DgDAEPEV5hQp8GBz161vZXiwpQ/go-libp2p-crypto" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) func init() { @@ -81,8 +82,8 @@ type Root struct { } func ipnsPubFunc(ipfs *core.IpfsNode, k ci.PrivKey) mfs.PubFunc { - return func(ctx context.Context, key key.Key) error { - return ipfs.Namesys.Publish(ctx, k, path.FromKey(key)) + return func(ctx context.Context, c *cid.Cid) error { + return ipfs.Namesys.Publish(ctx, k, path.FromCid(c)) } } diff --git a/fuse/readonly/readonly_unix.go b/fuse/readonly/readonly_unix.go index 3333a41d4476..d24e067fa008 100644 --- a/fuse/readonly/readonly_unix.go +++ b/fuse/readonly/readonly_unix.go @@ -163,15 +163,12 @@ func (s *Node) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, func (s *Node) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { - k, err := s.Nd.Key() - if err != nil { - return err - } + c := s.Nd.Cid() // setup our logging event lm := make(lgbl.DeferredMap) lm["fs"] = "ipfs" - lm["key"] = func() interface{} { return k.B58String() } + lm["key"] = func() interface{} { return c.String() } lm["req_offset"] = req.Offset lm["req_size"] = req.Size defer log.EventBegin(ctx, "fuseRead", lm).Done() diff --git a/importer/balanced/balanced_test.go b/importer/balanced/balanced_test.go index 60a479fdfb8b..ac25abe919f8 100644 --- a/importer/balanced/balanced_test.go +++ b/importer/balanced/balanced_test.go @@ -15,6 +15,7 @@ import ( mdtest "github.com/ipfs/go-ipfs/merkledag/test" pin "github.com/ipfs/go-ipfs/pin" uio "github.com/ipfs/go-ipfs/unixfs/io" + u "gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util" "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" ) @@ -78,6 +79,30 @@ func TestBuilderConsistency(t *testing.T) { testFileConsistency(t, 100000, chunk.DefaultBlockSize) } +func TestNoChunking(t *testing.T) { + ds := mdtest.Mock() + + nd, should := getTestDag(t, ds, 1000, 2000) + r, err := uio.NewDagReader(context.Background(), nd, ds) + if err != nil { + t.Fatal(err) + } + + dagrArrComp(t, r, should) +} + +func TestTwoChunks(t *testing.T) { + ds := mdtest.Mock() + + nd, should := getTestDag(t, ds, 2000, 1000) + r, err := uio.NewDagReader(context.Background(), nd, ds) + if err != nil { + t.Fatal(err) + } + + dagrArrComp(t, r, should) +} + func arrComp(a, b []byte) error { if len(a) != len(b) { return fmt.Errorf("Arrays differ in length. %d != %d", len(a), len(b)) diff --git a/merkledag/coding.go b/merkledag/coding.go index 2c92b559f3a6..1367356158e1 100644 --- a/merkledag/coding.go +++ b/merkledag/coding.go @@ -4,10 +4,11 @@ import ( "fmt" "sort" - mh "gx/ipfs/QmYf7ng2hG5XBtJA3tN34DQ2GUN5HNksEw1rLDkmr6vGku/go-multihash" - pb "github.com/ipfs/go-ipfs/merkledag/pb" + + mh "gx/ipfs/QmYf7ng2hG5XBtJA3tN34DQ2GUN5HNksEw1rLDkmr6vGku/go-multihash" u "gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) // for now, we use a PBNode intermediate thing. @@ -83,7 +84,7 @@ func (n *Node) EncodeProtobuf(force bool) ([]byte, error) { } if n.cached == nil { - n.cached = u.Hash(n.encoded) + n.cached = cid.NewCidV0(u.Hash(n.encoded)) } return n.encoded, nil diff --git a/merkledag/merkledag.go b/merkledag/merkledag.go index 7d601d86d383..84be64e88aec 100644 --- a/merkledag/merkledag.go +++ b/merkledag/merkledag.go @@ -6,11 +6,12 @@ import ( "strings" "sync" - blocks "github.com/ipfs/go-ipfs/blocks" key "github.com/ipfs/go-ipfs/blocks/key" bserv "github.com/ipfs/go-ipfs/blockservice" + logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) var log = logging.Logger("merkledag") @@ -18,13 +19,13 @@ var ErrNotFound = fmt.Errorf("merkledag: not found") // DAGService is an IPFS Merkle DAG service. type DAGService interface { - Add(*Node) (key.Key, error) - Get(context.Context, key.Key) (*Node, error) + Add(*Node) (*cid.Cid, error) + Get(context.Context, *cid.Cid) (*Node, error) Remove(*Node) error // GetDAG returns, in order, all the single leve child // nodes of the passed in node. - GetMany(context.Context, []key.Key) <-chan *NodeOption + GetMany(context.Context, []*cid.Cid) <-chan *NodeOption Batch() *Batch } @@ -43,24 +44,12 @@ type dagService struct { } // Add adds a node to the dagService, storing the block in the BlockService -func (n *dagService) Add(nd *Node) (key.Key, error) { +func (n *dagService) Add(nd *Node) (*cid.Cid, error) { if n == nil { // FIXME remove this assertion. protect with constructor invariant - return "", fmt.Errorf("dagService is nil") - } - - d, err := nd.EncodeProtobuf(false) - if err != nil { - return "", err - } - - mh, err := nd.Multihash() - if err != nil { - return "", err + return nil, fmt.Errorf("dagService is nil") } - b, _ := blocks.NewBlockWithHash(d, mh) - - return n.Blocks.AddBlock(b) + return n.Blocks.AddObject(nd) } func (n *dagService) Batch() *Batch { @@ -68,56 +57,67 @@ func (n *dagService) Batch() *Batch { } // Get retrieves a node from the dagService, fetching the block in the BlockService -func (n *dagService) Get(ctx context.Context, k key.Key) (*Node, error) { - if k == "" { - return nil, ErrNotFound - } +func (n *dagService) Get(ctx context.Context, c *cid.Cid) (*Node, error) { if n == nil { return nil, fmt.Errorf("dagService is nil") } + ctx, cancel := context.WithCancel(ctx) defer cancel() - b, err := n.Blocks.GetBlock(ctx, k) + b, err := n.Blocks.GetBlock(ctx, c) if err != nil { if err == bserv.ErrNotFound { return nil, ErrNotFound } - return nil, fmt.Errorf("Failed to get block for %s: %v", k.B58String(), err) + return nil, fmt.Errorf("Failed to get block for %s: %v", c, err) } - res, err := DecodeProtobuf(b.Data()) - if err != nil { - if strings.Contains(err.Error(), "Unmarshal failed") { - return nil, fmt.Errorf("The block referred to by '%s' was not a valid merkledag node", k) + var res *Node + switch c.Type() { + case cid.Protobuf: + out, err := DecodeProtobuf(b.RawData()) + if err != nil { + fmt.Println("bad data: ", b.RawData()) + fmt.Println(err) + if strings.Contains(err.Error(), "Unmarshal failed") { + return nil, fmt.Errorf("The block referred to by '%s' was not a valid merkledag node", c) + } + return nil, fmt.Errorf("Failed to decode Protocol Buffers: %v", err) } - return nil, fmt.Errorf("Failed to decode Protocol Buffers: %v", err) + res = out + default: + return nil, fmt.Errorf("unrecognized formatting type") } - res.cached = k.ToMultihash() + res.cached = c return res, nil } func (n *dagService) Remove(nd *Node) error { - k, err := nd.Key() - if err != nil { - return err - } - return n.Blocks.DeleteBlock(k) + return n.Blocks.DeleteObject(nd) } // FetchGraph fetches all nodes that are children of the given node func FetchGraph(ctx context.Context, root *Node, serv DAGService) error { - return EnumerateChildrenAsync(ctx, serv, root, key.NewKeySet()) + s := cid.NewSet() + return EnumerateChildrenAsync(ctx, serv, root, func(c *cid.Cid) bool { + has := s.Has(c) + if !has { + s.Add(c) + return true + } + return false + }) } // FindLinks searches this nodes links for the given key, // returns the indexes of any links pointing to it -func FindLinks(links []key.Key, k key.Key, start int) []int { +func FindLinks(links []*cid.Cid, c *cid.Cid, start int) []int { var out []int - for i, lnk_k := range links[start:] { - if k == lnk_k { + for i, lnk_c := range links[start:] { + if c.Equals(lnk_c) { out = append(out, i+start) } } @@ -129,11 +129,21 @@ type NodeOption struct { Err error } -func (ds *dagService) GetMany(ctx context.Context, keys []key.Key) <-chan *NodeOption { +func cidsToKeyMapping(cids []*cid.Cid) map[key.Key]*cid.Cid { + mapping := make(map[key.Key]*cid.Cid) + for _, c := range cids { + mapping[key.Key(c.Hash())] = c + } + return mapping +} + +func (ds *dagService) GetMany(ctx context.Context, keys []*cid.Cid) <-chan *NodeOption { out := make(chan *NodeOption, len(keys)) blocks := ds.Blocks.GetBlocks(ctx, keys) var count int + mapping := cidsToKeyMapping(keys) + go func() { defer close(out) for { @@ -145,12 +155,23 @@ func (ds *dagService) GetMany(ctx context.Context, keys []key.Key) <-chan *NodeO } return } - nd, err := DecodeProtobuf(b.Data()) - if err != nil { - out <- &NodeOption{Err: err} + + c := mapping[b.Key()] + + var nd *Node + switch c.Type() { + case cid.Protobuf: + decnd, err := DecodeProtobuf(b.RawData()) + if err != nil { + out <- &NodeOption{Err: err} + return + } + decnd.cached = cid.NewCidV0(b.Multihash()) + nd = decnd + default: + out <- &NodeOption{Err: fmt.Errorf("unrecognized object type: %s", c.Type())} return } - nd.cached = b.Key().ToMultihash() // buffered, no need to select out <- &NodeOption{Node: nd} @@ -169,17 +190,17 @@ func (ds *dagService) GetMany(ctx context.Context, keys []key.Key) <-chan *NodeO // It returns a channel of nodes, which the caller can receive // all the child nodes of 'root' on, in proper order. func GetDAG(ctx context.Context, ds DAGService, root *Node) []NodeGetter { - var keys []key.Key + var cids []*cid.Cid for _, lnk := range root.Links { - keys = append(keys, key.Key(lnk.Hash)) + cids = append(cids, cid.NewCidV0(lnk.Hash)) } - return GetNodes(ctx, ds, keys) + return GetNodes(ctx, ds, cids) } // GetNodes returns an array of 'NodeGetter' promises, with each corresponding // to the key with the same index as the passed in keys -func GetNodes(ctx context.Context, ds DAGService, keys []key.Key) []NodeGetter { +func GetNodes(ctx context.Context, ds DAGService, keys []*cid.Cid) []NodeGetter { // Early out if no work to do if len(keys) == 0 { @@ -216,14 +237,7 @@ func GetNodes(ctx context.Context, ds DAGService, keys []key.Key) []NodeGetter { } nd := opt.Node - - k, err := nd.Key() - if err != nil { - log.Error("Failed to get node key: ", err) - continue - } - - is := FindLinks(keys, k, 0) + is := FindLinks(keys, nd.Cid(), 0) for _, i := range is { count++ promises[i].Send(nd) @@ -237,13 +251,14 @@ func GetNodes(ctx context.Context, ds DAGService, keys []key.Key) []NodeGetter { } // Remove duplicates from a list of keys -func dedupeKeys(ks []key.Key) []key.Key { - kmap := make(map[key.Key]struct{}) - var out []key.Key - for _, k := range ks { - if _, ok := kmap[k]; !ok { - kmap[k] = struct{}{} - out = append(out, k) +func dedupeKeys(cids []*cid.Cid) []*cid.Cid { + kmap := make(map[string]struct{}) + var out []*cid.Cid + for _, c := range cids { + s := string(c.Bytes()) + if _, ok := kmap[s]; !ok { + kmap[s] = struct{}{} + out = append(out, c) } } return out @@ -327,50 +342,44 @@ func (np *nodePromise) Get(ctx context.Context) (*Node, error) { type Batch struct { ds *dagService - blocks []blocks.Block + objects []bserv.Object size int MaxSize int } -func (t *Batch) Add(nd *Node) (key.Key, error) { +func (t *Batch) Add(nd *Node) (*cid.Cid, error) { d, err := nd.EncodeProtobuf(false) if err != nil { - return "", err - } - - mh, err := nd.Multihash() - if err != nil { - return "", err + return nil, err } - b, _ := blocks.NewBlockWithHash(d, mh) - - k := key.Key(mh) - - t.blocks = append(t.blocks, b) - t.size += len(b.Data()) + t.objects = append(t.objects, nd) + t.size += len(d) if t.size > t.MaxSize { - return k, t.Commit() + return nd.Cid(), t.Commit() } - return k, nil + return nd.Cid(), nil } func (t *Batch) Commit() error { - _, err := t.ds.Blocks.AddBlocks(t.blocks) - t.blocks = nil + _, err := t.ds.Blocks.AddObjects(t.objects) + t.objects = nil t.size = 0 return err } +func legacyCidFromLink(lnk *Link) *cid.Cid { + return cid.NewCidV0(lnk.Hash) +} + // EnumerateChildren will walk the dag below the given root node and add all // unseen children to the passed in set. // TODO: parallelize to avoid disk latency perf hits? -func EnumerateChildren(ctx context.Context, ds DAGService, root *Node, set key.KeySet, bestEffort bool) error { +func EnumerateChildren(ctx context.Context, ds DAGService, root *Node, visit func(*cid.Cid) bool, bestEffort bool) error { for _, lnk := range root.Links { - k := key.Key(lnk.Hash) - if !set.Has(k) { - set.Add(k) - child, err := ds.Get(ctx, k) + c := legacyCidFromLink(lnk) + if visit(c) { + child, err := ds.Get(ctx, c) if err != nil { if bestEffort && err == ErrNotFound { continue @@ -378,7 +387,7 @@ func EnumerateChildren(ctx context.Context, ds DAGService, root *Node, set key.K return err } } - err = EnumerateChildren(ctx, ds, child, set, bestEffort) + err = EnumerateChildren(ctx, ds, child, visit, bestEffort) if err != nil { return err } @@ -387,8 +396,8 @@ func EnumerateChildren(ctx context.Context, ds DAGService, root *Node, set key.K return nil } -func EnumerateChildrenAsync(ctx context.Context, ds DAGService, root *Node, set key.KeySet) error { - toprocess := make(chan []key.Key, 8) +func EnumerateChildrenAsync(ctx context.Context, ds DAGService, root *Node, visit func(*cid.Cid) bool) error { + toprocess := make(chan []*cid.Cid, 8) nodes := make(chan *NodeOption, 8) ctx, cancel := context.WithCancel(ctx) @@ -416,13 +425,12 @@ func EnumerateChildrenAsync(ctx context.Context, ds DAGService, root *Node, set // a node has been fetched live-- - var keys []key.Key + var cids []*cid.Cid for _, lnk := range nd.Links { - k := key.Key(lnk.Hash) - if !set.Has(k) { - set.Add(k) + c := legacyCidFromLink(lnk) + if visit(c) { live++ - keys = append(keys, k) + cids = append(cids, c) } } @@ -430,9 +438,9 @@ func EnumerateChildrenAsync(ctx context.Context, ds DAGService, root *Node, set return nil } - if len(keys) > 0 { + if len(cids) > 0 { select { - case toprocess <- keys: + case toprocess <- cids: case <-ctx.Done(): return ctx.Err() } @@ -443,7 +451,7 @@ func EnumerateChildrenAsync(ctx context.Context, ds DAGService, root *Node, set } } -func fetchNodes(ctx context.Context, ds DAGService, in <-chan []key.Key, out chan<- *NodeOption) { +func fetchNodes(ctx context.Context, ds DAGService, in <-chan []*cid.Cid, out chan<- *NodeOption) { var wg sync.WaitGroup defer func() { // wait for all 'get' calls to complete so we don't accidentally send @@ -452,7 +460,7 @@ func fetchNodes(ctx context.Context, ds DAGService, in <-chan []key.Key, out cha close(out) }() - get := func(ks []key.Key) { + get := func(ks []*cid.Cid) { defer wg.Done() nodes := ds.GetMany(ctx, ks) for opt := range nodes { diff --git a/merkledag/merkledag_test.go b/merkledag/merkledag_test.go index 38545ac126fa..532934c3d15f 100644 --- a/merkledag/merkledag_test.go +++ b/merkledag/merkledag_test.go @@ -20,8 +20,10 @@ import ( mdpb "github.com/ipfs/go-ipfs/merkledag/pb" dstest "github.com/ipfs/go-ipfs/merkledag/test" uio "github.com/ipfs/go-ipfs/unixfs/io" + u "gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util" "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) func TestNode(t *testing.T) { @@ -52,17 +54,9 @@ func TestNode(t *testing.T) { fmt.Println("encoded:", e) } - h, err := n.Multihash() - if err != nil { - t.Error(err) - } else { - fmt.Println("hash:", h) - } - - k, err := n.Key() - if err != nil { - t.Error(err) - } else if k != key.Key(h) { + h := n.Multihash() + k := n.Key() + if k != key.Key(h) { t.Error("Key is not equivalent to multihash") } else { fmt.Println("key: ", k) @@ -89,11 +83,7 @@ func SubtestNodeStat(t *testing.T, n *Node) { return } - k, err := n.Key() - if err != nil { - t.Error("n.Key() failed") - return - } + k := n.Key() expected := NodeStat{ NumLinks: len(n.Links), @@ -169,10 +159,7 @@ func runBatchFetchTest(t *testing.T, read io.Reader) { t.Log("Added file to first node.") - k, err := root.Key() - if err != nil { - t.Fatal(err) - } + c := root.Cid() wg := sync.WaitGroup{} errs := make(chan error) @@ -181,7 +168,7 @@ func runBatchFetchTest(t *testing.T, read io.Reader) { wg.Add(1) go func(i int) { defer wg.Done() - first, err := dagservs[i].Get(ctx, k) + first, err := dagservs[i].Get(ctx, c) if err != nil { errs <- err } @@ -215,34 +202,17 @@ func runBatchFetchTest(t *testing.T, read io.Reader) { } func assertCanGet(t *testing.T, ds DAGService, n *Node) { - k, err := n.Key() - if err != nil { + if _, err := ds.Get(context.Background(), n.Cid()); err != nil { t.Fatal(err) } - - if _, err := ds.Get(context.Background(), k); err != nil { - t.Fatal(err) - } -} - -func TestEmptyKey(t *testing.T) { - ds := dstest.Mock() - _, err := ds.Get(context.Background(), key.Key("")) - if err != ErrNotFound { - t.Error("dag service should error when key is nil", err) - } } func TestCantGet(t *testing.T) { ds := dstest.Mock() a := NodeWithData([]byte("A")) - k, err := a.Key() - if err != nil { - t.Fatal(err) - } - - _, err = ds.Get(context.Background(), k) + c := a.Cid() + _, err := ds.Get(context.Background(), c) if !strings.Contains(err.Error(), "not found") { t.Fatal("expected err not found, got: ", err) } @@ -270,9 +240,8 @@ func TestFetchGraph(t *testing.T) { bs := bserv.New(bsis[1].Blockstore, offline.Exchange(bsis[1].Blockstore)) offline_ds := NewDAGService(bs) - ks := key.NewKeySet() - err = EnumerateChildren(context.Background(), offline_ds, root, ks, false) + err = EnumerateChildren(context.Background(), offline_ds, root, func(_ *cid.Cid) bool { return true }, false) if err != nil { t.Fatal(err) } @@ -288,8 +257,14 @@ func TestEnumerateChildren(t *testing.T) { t.Fatal(err) } - ks := key.NewKeySet() - err = EnumerateChildren(context.Background(), ds, root, ks, false) + set := cid.NewSet() + err = EnumerateChildren(context.Background(), ds, root, func(c *cid.Cid) bool { + if !set.Has(c) { + set.Add(c) + return true + } + return false + }, false) if err != nil { t.Fatal(err) } @@ -298,11 +273,11 @@ func TestEnumerateChildren(t *testing.T) { traverse = func(n *Node) { // traverse dag and check for _, lnk := range n.Links { - k := key.Key(lnk.Hash) - if !ks.Has(k) { + c := cid.NewCidV0(lnk.Hash) + if !set.Has(c) { t.Fatal("missing key in set!") } - child, err := ds.Get(context.Background(), k) + child, err := ds.Get(context.Background(), c) if err != nil { t.Fatal(err) } @@ -379,3 +354,22 @@ func TestUnmarshalFailure(t *testing.T) { n := &Node{} n.Marshal() } + +func TestBasicAddGet(t *testing.T) { + ds := dstest.Mock() + nd := new(Node) + + c, err := ds.Add(nd) + if err != nil { + t.Fatal(err) + } + + out, err := ds.Get(context.Background(), c) + if err != nil { + t.Fatal(err) + } + + if !nd.Cid().Equals(out.Cid()) { + t.Fatal("output didnt match input") + } +} diff --git a/merkledag/node.go b/merkledag/node.go index 7be5c4d0a835..b3add5f37ac2 100644 --- a/merkledag/node.go +++ b/merkledag/node.go @@ -7,6 +7,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" mh "gx/ipfs/QmYf7ng2hG5XBtJA3tN34DQ2GUN5HNksEw1rLDkmr6vGku/go-multihash" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) var ErrLinkNotFound = fmt.Errorf("no link by that name") @@ -20,7 +21,7 @@ type Node struct { // cache encoded/marshaled value encoded []byte - cached mh.Multihash + cached *cid.Cid } // NodeStat is a statistics object for a Node. Mostly sizes. @@ -63,10 +64,8 @@ func MakeLink(n *Node) (*Link, error) { return nil, err } - h, err := n.Multihash() - if err != nil { - return nil, err - } + h := n.Multihash() + return &Link{ Size: s, Hash: h, @@ -75,7 +74,7 @@ func MakeLink(n *Node) (*Link, error) { // GetNode returns the MDAG Node that this link points to func (l *Link) GetNode(ctx context.Context, serv DAGService) (*Node, error) { - return serv.Get(ctx, key.Key(l.Hash)) + return serv.Get(ctx, legacyCidFromLink(l)) } func NodeWithData(d []byte) *Node { @@ -184,6 +183,11 @@ func (n *Node) Copy() *Node { return nnode } +func (n *Node) RawData() []byte { + out, _ := n.EncodeProtobuf(false) + return out +} + func (n *Node) Data() []byte { return n.data } @@ -231,13 +235,8 @@ func (n *Node) Stat() (*NodeStat, error) { return nil, err } - key, err := n.Key() - if err != nil { - return nil, err - } - return &NodeStat{ - Hash: key.B58String(), + Hash: n.Key().B58String(), NumLinks: len(n.Links), BlockSize: len(enc), LinksSize: len(enc) - len(n.data), // includes framing. @@ -246,19 +245,34 @@ func (n *Node) Stat() (*NodeStat, error) { }, nil } +func (n *Node) Key() key.Key { + return key.Key(n.Multihash()) +} + +func (n *Node) Loggable() map[string]interface{} { + return map[string]interface{}{ + "node": n.String(), + } +} + +func (n *Node) Cid() *cid.Cid { + h := n.Multihash() + + return cid.NewCidV0(h) +} + +func (n *Node) String() string { + return n.Cid().String() +} + // Multihash hashes the encoded data of this node. -func (n *Node) Multihash() (mh.Multihash, error) { +func (n *Node) Multihash() mh.Multihash { // NOTE: EncodeProtobuf generates the hash and puts it in n.cached. _, err := n.EncodeProtobuf(false) if err != nil { - return nil, err + // Note: no possibility exists for an error to be returned through here + panic(err) } - return n.cached, nil -} - -// Key returns the Multihash as a key, for maps. -func (n *Node) Key() (key.Key, error) { - h, err := n.Multihash() - return key.Key(h), err + return n.cached.Hash() } diff --git a/merkledag/node_test.go b/merkledag/node_test.go index d248ad359654..a35013dcaa01 100644 --- a/merkledag/node_test.go +++ b/merkledag/node_test.go @@ -67,9 +67,9 @@ func TestFindLink(t *testing.T) { nd := &Node{ Links: []*Link{ - &Link{Name: "a", Hash: k.ToMultihash()}, - &Link{Name: "c", Hash: k.ToMultihash()}, - &Link{Name: "b", Hash: k.ToMultihash()}, + &Link{Name: "a", Hash: k.Hash()}, + &Link{Name: "c", Hash: k.Hash()}, + &Link{Name: "b", Hash: k.Hash()}, }, } @@ -107,7 +107,7 @@ func TestFindLink(t *testing.T) { t.Fatal(err) } - if olnk.Hash.B58String() == k.B58String() { + if olnk.Hash.B58String() == k.String() { t.Fatal("new link should have different hash") } } diff --git a/merkledag/traverse/traverse.go b/merkledag/traverse/traverse.go index d07354617345..a3bb06001dd9 100644 --- a/merkledag/traverse/traverse.go +++ b/merkledag/traverse/traverse.go @@ -41,11 +41,7 @@ type traversal struct { func (t *traversal) shouldSkip(n *mdag.Node) (bool, error) { if t.opts.SkipDuplicates { - k, err := n.Key() - if err != nil { - return true, err - } - + k := n.Key() if _, found := t.seen[string(k)]; found { return true, nil } diff --git a/merkledag/utils/diff.go b/merkledag/utils/diff.go index 493394437be8..406000596c06 100644 --- a/merkledag/utils/diff.go +++ b/merkledag/utils/diff.go @@ -5,9 +5,10 @@ import ( "fmt" "path" - key "github.com/ipfs/go-ipfs/blocks/key" dag "github.com/ipfs/go-ipfs/merkledag" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) const ( @@ -19,18 +20,18 @@ const ( type Change struct { Type int Path string - Before key.Key - After key.Key + Before *cid.Cid + After *cid.Cid } func (c *Change) String() string { switch c.Type { case Add: - return fmt.Sprintf("Added %s at %s", c.After.B58String()[:6], c.Path) + return fmt.Sprintf("Added %s at %s", c.After.String(), c.Path) case Remove: - return fmt.Sprintf("Removed %s from %s", c.Before.B58String()[:6], c.Path) + return fmt.Sprintf("Removed %s from %s", c.Before.String(), c.Path) case Mod: - return fmt.Sprintf("Changed %s to %s at %s", c.Before.B58String()[:6], c.After.B58String()[:6], c.Path) + return fmt.Sprintf("Changed %s to %s at %s", c.Before.String(), c.After.String(), c.Path) default: panic("nope") } @@ -77,21 +78,11 @@ func ApplyChange(ctx context.Context, ds dag.DAGService, nd *dag.Node, cs []*Cha func Diff(ctx context.Context, ds dag.DAGService, a, b *dag.Node) ([]*Change, error) { if len(a.Links) == 0 && len(b.Links) == 0 { - ak, err := a.Key() - if err != nil { - return nil, err - } - - bk, err := b.Key() - if err != nil { - return nil, err - } - return []*Change{ &Change{ Type: Mod, - Before: ak, - After: bk, + Before: a.Cid(), + After: b.Cid(), }, }, nil } @@ -136,14 +127,14 @@ func Diff(ctx context.Context, ds dag.DAGService, a, b *dag.Node) ([]*Change, er out = append(out, &Change{ Type: Remove, Path: lnk.Name, - Before: key.Key(lnk.Hash), + Before: cid.NewCidV0(lnk.Hash), }) } for _, lnk := range clean_b.Links { out = append(out, &Change{ Type: Add, Path: lnk.Name, - After: key.Key(lnk.Hash), + After: cid.NewCidV0(lnk.Hash), }) } diff --git a/merkledag/utils/utils_test.go b/merkledag/utils/utils_test.go index 1ec444b0ba19..0585f8684672 100644 --- a/merkledag/utils/utils_test.go +++ b/merkledag/utils/utils_test.go @@ -3,12 +3,12 @@ package dagutils import ( "testing" - key "github.com/ipfs/go-ipfs/blocks/key" dag "github.com/ipfs/go-ipfs/merkledag" mdtest "github.com/ipfs/go-ipfs/merkledag/test" path "github.com/ipfs/go-ipfs/path" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) func TestAddLink(t *testing.T) { @@ -31,17 +31,13 @@ func TestAddLink(t *testing.T) { t.Fatal(err) } - fnpkey, err := fnprime.Key() - if err != nil { - t.Fatal(err) - } - - if fnpkey != fk { + fnpkey := fnprime.Cid() + if !fnpkey.Equals(fk) { t.Fatal("wrong child node found!") } } -func assertNodeAtPath(t *testing.T, ds dag.DAGService, root *dag.Node, pth string, exp key.Key) { +func assertNodeAtPath(t *testing.T, ds dag.DAGService, root *dag.Node, pth string, exp *cid.Cid) { parts := path.SplitList(pth) cur := root for _, e := range parts { @@ -53,12 +49,8 @@ func assertNodeAtPath(t *testing.T, ds dag.DAGService, root *dag.Node, pth strin cur = nxt } - curk, err := cur.Key() - if err != nil { - t.Fatal(err) - } - - if curk != exp { + curc := cur.Cid() + if !curc.Equals(exp) { t.Fatal("node not as expected at end of path") } } @@ -77,13 +69,10 @@ func TestInsertNode(t *testing.T) { testInsert(t, e, "", "bar", true, "cannot create link with no name!") testInsert(t, e, "////", "slashes", true, "cannot create link with no name!") - k, err := e.GetNode().Key() - if err != nil { - t.Fatal(err) - } + c := e.GetNode().Cid() - if k.B58String() != "QmZ8yeT9uD6ouJPNAYt62XffYuXBT6b4mP4obRSE9cJrSt" { - t.Fatal("output was different than expected: ", k) + if c.String() != "QmZ8yeT9uD6ouJPNAYt62XffYuXBT6b4mP4obRSE9cJrSt" { + t.Fatal("output was different than expected: ", c) } } diff --git a/mfs/dir.go b/mfs/dir.go index 9009d2431b81..3612516f51bb 100644 --- a/mfs/dir.go +++ b/mfs/dir.go @@ -246,12 +246,7 @@ func (d *Directory) List() ([]NodeListing, error) { return nil, err } - k, err := nd.Key() - if err != nil { - return nil, err - } - - child.Hash = k.B58String() + child.Hash = nd.Key().B58String() out = append(out, child) } diff --git a/mfs/mfs_test.go b/mfs/mfs_test.go index f4aba72cbe83..13da0358ed60 100644 --- a/mfs/mfs_test.go +++ b/mfs/mfs_test.go @@ -14,7 +14,6 @@ import ( "time" bstore "github.com/ipfs/go-ipfs/blocks/blockstore" - key "github.com/ipfs/go-ipfs/blocks/key" bserv "github.com/ipfs/go-ipfs/blockservice" offline "github.com/ipfs/go-ipfs/exchange/offline" importer "github.com/ipfs/go-ipfs/importer" @@ -28,6 +27,7 @@ import ( dssync "gx/ipfs/QmNgqJarToRiq2GBaPJhkmW4B5BxS5B74E1rkGvv2JoaTp/go-datastore/sync" u "gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util" "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) func emptyDirNode() *dag.Node { @@ -187,8 +187,8 @@ func setupRoot(ctx context.Context, t *testing.T) (dag.DAGService, *Root) { ds := getDagserv(t) root := emptyDirNode() - rt, err := NewRoot(ctx, ds, root, func(ctx context.Context, k key.Key) error { - fmt.Println("PUBLISHED: ", k) + rt, err := NewRoot(ctx, ds, root, func(ctx context.Context, c *cid.Cid) error { + fmt.Println("PUBLISHED: ", c) return nil }) @@ -280,10 +280,7 @@ func TestDirectoryLoadFromDag(t *testing.T) { t.Fatal(err) } - fihash, err := nd.Multihash() - if err != nil { - t.Fatal(err) - } + fihash := nd.Multihash() dir := emptyDirNode() _, err = ds.Add(dir) @@ -291,10 +288,7 @@ func TestDirectoryLoadFromDag(t *testing.T) { t.Fatal(err) } - dirhash, err := dir.Multihash() - if err != nil { - t.Fatal(err) - } + dirhash := dir.Multihash() top := emptyDirNode() top.Links = []*dag.Link{ @@ -803,11 +797,7 @@ func TestFlushing(t *testing.T) { t.Fatal("root wasnt a directory") } - rnk, err := rnd.Key() - if err != nil { - t.Fatal(err) - } - + rnk := rnd.Key() exp := "QmWMVyhTuyxUrXX3ynz171jq76yY3PktfY9Bxiph7b9ikr" if rnk.B58String() != exp { t.Fatalf("dag looks wrong, expected %s, but got %s", exp, rnk.B58String()) diff --git a/mfs/repub_test.go b/mfs/repub_test.go index 32e0b2b27bea..09d8d4124d30 100644 --- a/mfs/repub_test.go +++ b/mfs/repub_test.go @@ -4,10 +4,10 @@ import ( "testing" "time" - key "github.com/ipfs/go-ipfs/blocks/key" ci "github.com/ipfs/go-ipfs/thirdparty/testutil/ci" "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) func TestRepublisher(t *testing.T) { @@ -19,7 +19,7 @@ func TestRepublisher(t *testing.T) { pub := make(chan struct{}) - pf := func(ctx context.Context, k key.Key) error { + pf := func(ctx context.Context, c *cid.Cid) error { pub <- struct{}{} return nil } @@ -30,7 +30,7 @@ func TestRepublisher(t *testing.T) { rp := NewRepublisher(ctx, pf, tshort, tlong) go rp.Run() - rp.Update("test") + rp.Update(nil) // should hit short timeout select { @@ -43,7 +43,7 @@ func TestRepublisher(t *testing.T) { go func() { for { - rp.Update("a") + rp.Update(nil) time.Sleep(time.Millisecond * 10) select { case <-cctx.Done(): diff --git a/mfs/system.go b/mfs/system.go index 56891cc215b7..3e2e74e7641c 100644 --- a/mfs/system.go +++ b/mfs/system.go @@ -14,12 +14,12 @@ import ( "sync" "time" - key "github.com/ipfs/go-ipfs/blocks/key" dag "github.com/ipfs/go-ipfs/merkledag" ft "github.com/ipfs/go-ipfs/unixfs" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) var ErrNotExist = errors.New("no such rootfs") @@ -61,19 +61,15 @@ type Root struct { Type string } -type PubFunc func(context.Context, key.Key) error +type PubFunc func(context.Context, *cid.Cid) error // newRoot creates a new Root and starts up a republisher routine for it func NewRoot(parent context.Context, ds dag.DAGService, node *dag.Node, pf PubFunc) (*Root, error) { - ndk, err := node.Key() - if err != nil { - return nil, err - } var repub *Republisher if pf != nil { repub = NewRepublisher(parent, pf, time.Millisecond*300, time.Second*3) - repub.setVal(ndk) + repub.setVal(node.Cid()) go repub.Run() } @@ -91,9 +87,9 @@ func NewRoot(parent context.Context, ds dag.DAGService, node *dag.Node, pf PubFu switch pbn.GetType() { case ft.TDirectory: - root.val = NewDirectory(parent, ndk.String(), node, root, ds) + root.val = NewDirectory(parent, node.String(), node, root, ds) case ft.TFile, ft.TMetadata, ft.TRaw: - fi, err := NewFile(ndk.String(), node, root, ds) + fi, err := NewFile(node.String(), node, root, ds) if err != nil { return nil, err } @@ -114,13 +110,8 @@ func (kr *Root) Flush() error { return err } - k, err := nd.Key() - if err != nil { - return err - } - if kr.repub != nil { - kr.repub.Update(k) + kr.repub.Update(nd.Cid()) } return nil } @@ -128,13 +119,13 @@ func (kr *Root) Flush() error { // closeChild implements the childCloser interface, and signals to the publisher that // there are changes ready to be published func (kr *Root) closeChild(name string, nd *dag.Node, sync bool) error { - k, err := kr.dserv.Add(nd) + c, err := kr.dserv.Add(nd) if err != nil { return err } if kr.repub != nil { - kr.repub.Update(k) + kr.repub.Update(c) } return nil } @@ -145,13 +136,8 @@ func (kr *Root) Close() error { return err } - k, err := nd.Key() - if err != nil { - return err - } - if kr.repub != nil { - kr.repub.Update(k) + kr.repub.Update(nd.Cid()) return kr.repub.Close() } @@ -170,11 +156,11 @@ type Republisher struct { cancel func() lk sync.Mutex - val key.Key - lastpub key.Key + val *cid.Cid + lastpub *cid.Cid } -func (rp *Republisher) getVal() key.Key { +func (rp *Republisher) getVal() *cid.Cid { rp.lk.Lock() defer rp.lk.Unlock() return rp.val @@ -195,10 +181,10 @@ func NewRepublisher(ctx context.Context, pf PubFunc, tshort, tlong time.Duration } } -func (p *Republisher) setVal(k key.Key) { +func (p *Republisher) setVal(c *cid.Cid) { p.lk.Lock() defer p.lk.Unlock() - p.val = k + p.val = c } func (p *Republisher) pubNow() { @@ -230,8 +216,8 @@ func (p *Republisher) Close() error { // Touch signals that an update has occurred since the last publish. // Multiple consecutive touches may extend the time period before // the next Publish occurs in order to more efficiently batch updates -func (np *Republisher) Update(k key.Key) { - np.setVal(k) +func (np *Republisher) Update(c *cid.Cid) { + np.setVal(c) select { case np.Publish <- struct{}{}: default: diff --git a/namesys/dns.go b/namesys/dns.go index d825ea00ed13..79fb00c2f5f6 100644 --- a/namesys/dns.go +++ b/namesys/dns.go @@ -114,7 +114,7 @@ func workDomain(r *DNSResolver, name string, res chan lookupRes) { } func parseEntry(txt string) (path.Path, error) { - p, err := path.ParseKeyToPath(txt) // bare IPFS multihashes + p, err := path.ParseCidToPath(txt) // bare IPFS multihashes if err == nil { return p, nil } diff --git a/namesys/publisher.go b/namesys/publisher.go index 77604ae95586..61fa8d6d05ec 100644 --- a/namesys/publisher.go +++ b/namesys/publisher.go @@ -348,7 +348,7 @@ func InitializeKeyspace(ctx context.Context, ds dag.DAGService, pub Publisher, p return err } - err = pub.Publish(ctx, key, path.FromKey(nodek)) + err = pub.Publish(ctx, key, path.FromCid(nodek)) if err != nil { return err } diff --git a/namesys/routing.go b/namesys/routing.go index d2c4fda11019..b4eea2af9366 100644 --- a/namesys/routing.go +++ b/namesys/routing.go @@ -14,9 +14,11 @@ import ( pb "github.com/ipfs/go-ipfs/namesys/pb" path "github.com/ipfs/go-ipfs/path" routing "github.com/ipfs/go-ipfs/routing" + logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" ci "gx/ipfs/QmVoi5es8D5fNHZDqoW6DgDAEPEV5hQp8GBz161vZXiwpQ/go-libp2p-crypto" u "gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) var log = logging.Logger("namesys") @@ -196,7 +198,7 @@ func (r *routingResolver) resolveOnce(ctx context.Context, name string) (path.Pa } else { // Its an old style multihash record log.Warning("Detected old style multihash record") - p := path.FromKey(key.Key(valh)) + p := path.FromCid(cid.NewCidV0(valh)) r.cacheSet(name, p, entry) return p, nil } diff --git a/package.json b/package.json index 7a9bee150c6c..94ae18e3ea52 100644 --- a/package.json +++ b/package.json @@ -203,6 +203,12 @@ "hash": "Qmf4ETeAWXuThBfWwonVyFqGFSgTWepUDEr1txcctvpTXS", "name": "go-libp2p", "version": "3.4.1" + }, + { + "author": "whyrusleeping", + "hash": "QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp", + "name": "go-cid", + "version": "0.1.0" } ], "gxVersion": "0.4.0", diff --git a/path/path.go b/path/path.go index 790168de0926..884c1780d065 100644 --- a/path/path.go +++ b/path/path.go @@ -5,10 +5,7 @@ import ( "path" "strings" - key "github.com/ipfs/go-ipfs/blocks/key" - - b58 "gx/ipfs/QmT8rehPR3F6bmwL6zjUN8XpiDBFFpMP2myPdC6ApsWfJf/go-base58" - mh "gx/ipfs/QmYf7ng2hG5XBtJA3tN34DQ2GUN5HNksEw1rLDkmr6vGku/go-multihash" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) // ErrBadPath is returned when a given path is incorrectly formatted @@ -23,9 +20,9 @@ func FromString(s string) Path { return Path(s) } -// FromKey safely converts a Key type to a Path type -func FromKey(k key.Key) Path { - return Path("/ipfs/" + k.String()) +// FromCid safely converts a cid.Cid type to a Path type +func FromCid(c *cid.Cid) Path { + return Path("/ipfs/" + c.String()) } func (p Path) Segments() []string { @@ -75,7 +72,7 @@ func FromSegments(prefix string, seg ...string) (Path, error) { func ParsePath(txt string) (Path, error) { parts := strings.Split(txt, "/") if len(parts) == 1 { - kp, err := ParseKeyToPath(txt) + kp, err := ParseCidToPath(txt) if err == nil { return kp, nil } @@ -84,7 +81,7 @@ func ParsePath(txt string) (Path, error) { // if the path doesnt being with a '/' // we expect this to start with a hash, and be an 'ipfs' path if parts[0] != "" { - if _, err := ParseKeyToPath(parts[0]); err != nil { + if _, err := ParseCidToPath(parts[0]); err != nil { return "", ErrBadPath } // The case when the path starts with hash without a protocol prefix @@ -96,7 +93,7 @@ func ParsePath(txt string) (Path, error) { } if parts[1] == "ipfs" { - if _, err := ParseKeyToPath(parts[2]); err != nil { + if _, err := ParseCidToPath(parts[2]); err != nil { return "", err } } else if parts[1] != "ipns" { @@ -106,20 +103,17 @@ func ParsePath(txt string) (Path, error) { return Path(txt), nil } -func ParseKeyToPath(txt string) (Path, error) { +func ParseCidToPath(txt string) (Path, error) { if txt == "" { return "", ErrNoComponents } - chk := b58.Decode(txt) - if len(chk) == 0 { - return "", errors.New("not a key") - } - - if _, err := mh.Cast(chk); err != nil { + c, err := cid.Decode(txt) + if err != nil { return "", err } - return FromKey(key.Key(chk)), nil + + return FromCid(c), nil } func (p *Path) IsValid() error { diff --git a/path/resolver.go b/path/resolver.go index a254f456c13b..8fc59ac9de3f 100644 --- a/path/resolver.go +++ b/path/resolver.go @@ -9,9 +9,9 @@ import ( mh "gx/ipfs/QmYf7ng2hG5XBtJA3tN34DQ2GUN5HNksEw1rLDkmr6vGku/go-multihash" "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - key "github.com/ipfs/go-ipfs/blocks/key" merkledag "github.com/ipfs/go-ipfs/merkledag" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) var log = logging.Logger("path") @@ -38,7 +38,7 @@ type Resolver struct { // SplitAbsPath clean up and split fpath. It extracts the first component (which // must be a Multihash) and return it separately. -func SplitAbsPath(fpath Path) (mh.Multihash, []string, error) { +func SplitAbsPath(fpath Path) (*cid.Cid, []string, error) { log.Debugf("Resolve: '%s'", fpath) @@ -52,14 +52,12 @@ func SplitAbsPath(fpath Path) (mh.Multihash, []string, error) { return nil, nil, ErrNoComponents } - // first element in the path is a b58 hash (for now) - h, err := mh.FromB58String(parts[0]) + c, err := cid.Decode(parts[0]) if err != nil { - log.Debug("given path element is not a base58 string.\n") return nil, nil, err } - return h, parts[1:], nil + return c, parts[1:], nil } // ResolvePath fetches the node for given path. It returns the last item @@ -87,7 +85,7 @@ func (s *Resolver) ResolvePathComponents(ctx context.Context, fpath Path) ([]*me } log.Debug("resolve dag get") - nd, err := s.DAG.Get(ctx, key.Key(h)) + nd, err := s.DAG.Get(ctx, h) if err != nil { return nil, err } @@ -117,7 +115,7 @@ func (s *Resolver) ResolveLinks(ctx context.Context, ndd *merkledag.Node, names nextnode, err := nd.GetLinkedNode(ctx, s.DAG, name) if err == merkledag.ErrLinkNotFound { - n, _ := nd.Multihash() + n := nd.Multihash() return result, ErrNoLink{Name: name, Node: n} } else if err != nil { return append(result, nextnode), err diff --git a/path/resolver_test.go b/path/resolver_test.go index 735a79e6d2bf..3a45581ed711 100644 --- a/path/resolver_test.go +++ b/path/resolver_test.go @@ -17,7 +17,7 @@ func randNode() (*merkledag.Node, key.Key) { node := new(merkledag.Node) node.SetData(make([]byte, 32)) util.NewTimeSeededRand().Read(node.Data()) - k, _ := node.Key() + k := node.Key() return node, k } @@ -46,10 +46,7 @@ func TestRecurivePathResolution(t *testing.T) { } } - aKey, err := a.Key() - if err != nil { - t.Fatal(err) - } + aKey := a.Key() segments := []string{aKey.String(), "child", "grandchild"} p, err := path.FromSegments("/ipfs/", segments...) @@ -63,10 +60,7 @@ func TestRecurivePathResolution(t *testing.T) { t.Fatal(err) } - key, err := node.Key() - if err != nil { - t.Fatal(err) - } + key := node.Key() if key.String() != cKey.String() { t.Fatal(fmt.Errorf( "recursive path resolution failed for %s: %s != %s", diff --git a/pin/gc/gc.go b/pin/gc/gc.go index 0eb87f867e93..c1e2eb4718e2 100644 --- a/pin/gc/gc.go +++ b/pin/gc/gc.go @@ -10,6 +10,7 @@ import ( logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) var log = logging.Logger("gc") @@ -23,7 +24,7 @@ var log = logging.Logger("gc") // // The routine then iterates over every block in the blockstore and // deletes any block that is not found in the marked set. -func GC(ctx context.Context, bs bstore.GCBlockstore, pn pin.Pinner, bestEffortRoots []key.Key) (<-chan key.Key, error) { +func GC(ctx context.Context, bs bstore.GCBlockstore, pn pin.Pinner, bestEffortRoots []*cid.Cid) (<-chan key.Key, error) { unlocker := bs.GCLock() bsrv := bserv.New(bs, offline.Exchange(bs)) @@ -70,16 +71,24 @@ func GC(ctx context.Context, bs bstore.GCBlockstore, pn pin.Pinner, bestEffortRo return output, nil } -func Descendants(ctx context.Context, ds dag.DAGService, set key.KeySet, roots []key.Key, bestEffort bool) error { - for _, k := range roots { - set.Add(k) - nd, err := ds.Get(ctx, k) +func Descendants(ctx context.Context, ds dag.DAGService, set key.KeySet, roots []*cid.Cid, bestEffort bool) error { + for _, c := range roots { + set.Add(key.Key(c.Hash())) + nd, err := ds.Get(ctx, c) if err != nil { return err } // EnumerateChildren recursively walks the dag and adds the keys to the given set - err = dag.EnumerateChildren(ctx, ds, nd, set, bestEffort) + err = dag.EnumerateChildren(ctx, ds, nd, func(c *cid.Cid) bool { + k := key.Key(c.Hash()) + seen := set.Has(k) + if seen { + return false + } + set.Add(k) + return true + }, bestEffort) if err != nil { return err } @@ -88,7 +97,7 @@ func Descendants(ctx context.Context, ds dag.DAGService, set key.KeySet, roots [ return nil } -func ColoredSet(ctx context.Context, pn pin.Pinner, ds dag.DAGService, bestEffortRoots []key.Key) (key.KeySet, error) { +func ColoredSet(ctx context.Context, pn pin.Pinner, ds dag.DAGService, bestEffortRoots []*cid.Cid) (key.KeySet, error) { // KeySet currently implemented in memory, in the future, may be bloom filter or // disk backed to conserve memory. gcs := key.NewKeySet() @@ -103,7 +112,7 @@ func ColoredSet(ctx context.Context, pn pin.Pinner, ds dag.DAGService, bestEffor } for _, k := range pn.DirectKeys() { - gcs.Add(k) + gcs.Add(key.Key(k.Hash())) } err = Descendants(ctx, ds, gcs, pn.InternalPins(), false) diff --git a/pin/pin.go b/pin/pin.go index d034cbc43bb9..56979cc69f8b 100644 --- a/pin/pin.go +++ b/pin/pin.go @@ -4,22 +4,33 @@ package pin import ( "fmt" + "os" "sync" "time" key "github.com/ipfs/go-ipfs/blocks/key" - "github.com/ipfs/go-ipfs/blocks/set" mdag "github.com/ipfs/go-ipfs/merkledag" + ds "gx/ipfs/QmNgqJarToRiq2GBaPJhkmW4B5BxS5B74E1rkGvv2JoaTp/go-datastore" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) var log = logging.Logger("pin") var pinDatastoreKey = ds.NewKey("/local/pins") -var emptyKey = key.B58KeyDecode("QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n") +var emptyKey *cid.Cid + +func init() { + e, err := cid.Decode("QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n") + if err != nil { + log.Error("failed to decode empty key constant") + os.Exit(1) + } + emptyKey = e +} const ( linkRecursive = "recursive" @@ -70,45 +81,45 @@ func StringToPinMode(s string) (PinMode, bool) { } type Pinner interface { - IsPinned(key.Key) (string, bool, error) - IsPinnedWithType(key.Key, PinMode) (string, bool, error) + IsPinned(*cid.Cid) (string, bool, error) + IsPinnedWithType(*cid.Cid, PinMode) (string, bool, error) Pin(context.Context, *mdag.Node, bool) error - Unpin(context.Context, key.Key, bool) error + Unpin(context.Context, *cid.Cid, bool) error // Check if a set of keys are pinned, more efficient than // calling IsPinned for each key - CheckIfPinned(keys ...key.Key) ([]Pinned, error) + CheckIfPinned(cids ...*cid.Cid) ([]Pinned, error) // PinWithMode is for manually editing the pin structure. Use with // care! If used improperly, garbage collection may not be // successful. - PinWithMode(key.Key, PinMode) + PinWithMode(*cid.Cid, PinMode) // RemovePinWithMode is for manually editing the pin structure. // Use with care! If used improperly, garbage collection may not // be successful. - RemovePinWithMode(key.Key, PinMode) + RemovePinWithMode(*cid.Cid, PinMode) Flush() error - DirectKeys() []key.Key - RecursiveKeys() []key.Key - InternalPins() []key.Key + DirectKeys() []*cid.Cid + RecursiveKeys() []*cid.Cid + InternalPins() []*cid.Cid } type Pinned struct { - Key key.Key + Key *cid.Cid Mode PinMode - Via key.Key + Via *cid.Cid } // pinner implements the Pinner interface type pinner struct { lock sync.RWMutex - recursePin set.BlockSet - directPin set.BlockSet + recursePin *cid.Set + directPin *cid.Set // Track the keys used for storing the pinning state, so gc does // not delete them. - internalPin map[key.Key]struct{} + internalPin *cid.Set dserv mdag.DAGService internal mdag.DAGService // dagservice used to store internal objects dstore ds.Datastore @@ -117,15 +128,16 @@ type pinner struct { // NewPinner creates a new pinner using the given datastore as a backend func NewPinner(dstore ds.Datastore, serv, internal mdag.DAGService) Pinner { - rcset := set.NewSimpleBlockSet() - dirset := set.NewSimpleBlockSet() + rcset := cid.NewSet() + dirset := cid.NewSet() return &pinner{ - recursePin: rcset, - directPin: dirset, - dserv: serv, - dstore: dstore, - internal: internal, + recursePin: rcset, + directPin: dirset, + dserv: serv, + dstore: dstore, + internal: internal, + internalPin: cid.NewSet(), } } @@ -133,18 +145,16 @@ func NewPinner(dstore ds.Datastore, serv, internal mdag.DAGService) Pinner { func (p *pinner) Pin(ctx context.Context, node *mdag.Node, recurse bool) error { p.lock.Lock() defer p.lock.Unlock() - k, err := node.Key() - if err != nil { - return err - } + c := node.Cid() + k := key.Key(c.Hash()) if recurse { - if p.recursePin.HasKey(k) { + if p.recursePin.Has(c) { return nil } - if p.directPin.HasKey(k) { - p.directPin.RemoveBlock(k) + if p.directPin.Has(c) { + p.directPin.Remove(c) } // fetch entire graph @@ -153,17 +163,17 @@ func (p *pinner) Pin(ctx context.Context, node *mdag.Node, recurse bool) error { return err } - p.recursePin.AddBlock(k) + p.recursePin.Add(c) } else { - if _, err := p.dserv.Get(ctx, k); err != nil { + if _, err := p.dserv.Get(ctx, c); err != nil { return err } - if p.recursePin.HasKey(k) { + if p.recursePin.Has(c) { return fmt.Errorf("%s already pinned recursively", k.B58String()) } - p.directPin.AddBlock(k) + p.directPin.Add(c) } return nil } @@ -171,10 +181,10 @@ func (p *pinner) Pin(ctx context.Context, node *mdag.Node, recurse bool) error { var ErrNotPinned = fmt.Errorf("not pinned") // Unpin a given key -func (p *pinner) Unpin(ctx context.Context, k key.Key, recursive bool) error { +func (p *pinner) Unpin(ctx context.Context, c *cid.Cid, recursive bool) error { p.lock.Lock() defer p.lock.Unlock() - reason, pinned, err := p.isPinnedWithType(k, Any) + reason, pinned, err := p.isPinnedWithType(c, Any) if err != nil { return err } @@ -184,41 +194,41 @@ func (p *pinner) Unpin(ctx context.Context, k key.Key, recursive bool) error { switch reason { case "recursive": if recursive { - p.recursePin.RemoveBlock(k) + p.recursePin.Remove(c) return nil } else { - return fmt.Errorf("%s is pinned recursively", k) + return fmt.Errorf("%s is pinned recursively", c) } case "direct": - p.directPin.RemoveBlock(k) + p.directPin.Remove(c) return nil default: - return fmt.Errorf("%s is pinned indirectly under %s", k, reason) + return fmt.Errorf("%s is pinned indirectly under %s", c, reason) } } -func (p *pinner) isInternalPin(key key.Key) bool { - _, ok := p.internalPin[key] - return ok +func (p *pinner) isInternalPin(c *cid.Cid) bool { + return p.internalPin.Has(c) } // IsPinned returns whether or not the given key is pinned // and an explanation of why its pinned -func (p *pinner) IsPinned(k key.Key) (string, bool, error) { +func (p *pinner) IsPinned(c *cid.Cid) (string, bool, error) { p.lock.RLock() defer p.lock.RUnlock() - return p.isPinnedWithType(k, Any) + return p.isPinnedWithType(c, Any) } -func (p *pinner) IsPinnedWithType(k key.Key, mode PinMode) (string, bool, error) { +func (p *pinner) IsPinnedWithType(c *cid.Cid, mode PinMode) (string, bool, error) { p.lock.RLock() defer p.lock.RUnlock() - return p.isPinnedWithType(k, mode) + return p.isPinnedWithType(c, mode) } // isPinnedWithType is the implementation of IsPinnedWithType that does not lock. // intended for use by other pinned methods that already take locks -func (p *pinner) isPinnedWithType(k key.Key, mode PinMode) (string, bool, error) { +func (p *pinner) isPinnedWithType(c *cid.Cid, mode PinMode) (string, bool, error) { + k := key.Key(c.Hash()) switch mode { case Any, Direct, Indirect, Recursive, Internal: default: @@ -226,21 +236,21 @@ func (p *pinner) isPinnedWithType(k key.Key, mode PinMode) (string, bool, error) mode, Direct, Indirect, Recursive, Internal, Any) return "", false, err } - if (mode == Recursive || mode == Any) && p.recursePin.HasKey(k) { + if (mode == Recursive || mode == Any) && p.recursePin.Has(c) { return linkRecursive, true, nil } if mode == Recursive { return "", false, nil } - if (mode == Direct || mode == Any) && p.directPin.HasKey(k) { + if (mode == Direct || mode == Any) && p.directPin.Has(c) { return linkDirect, true, nil } if mode == Direct { return "", false, nil } - if (mode == Internal || mode == Any) && p.isInternalPin(k) { + if (mode == Internal || mode == Any) && p.isInternalPin(c) { return linkInternal, true, nil } if mode == Internal { @@ -248,8 +258,8 @@ func (p *pinner) isPinnedWithType(k key.Key, mode PinMode) (string, bool, error) } // Default is Indirect - for _, rk := range p.recursePin.GetKeys() { - rnd, err := p.dserv.Get(context.Background(), rk) + for _, rc := range p.recursePin.Keys() { + rnd, err := p.dserv.Get(context.Background(), rc) if err != nil { return "", false, err } @@ -259,90 +269,99 @@ func (p *pinner) isPinnedWithType(k key.Key, mode PinMode) (string, bool, error) return "", false, err } if has { - return rk.B58String(), true, nil + return rc.String(), true, nil } } return "", false, nil } -func (p *pinner) CheckIfPinned(keys ...key.Key) ([]Pinned, error) { +func (p *pinner) CheckIfPinned(cids ...*cid.Cid) ([]Pinned, error) { p.lock.RLock() defer p.lock.RUnlock() - pinned := make([]Pinned, 0, len(keys)) - toCheck := make(map[key.Key]struct{}) + pinned := make([]Pinned, 0, len(cids)) + toCheck := cid.NewSet() // First check for non-Indirect pins directly - for _, k := range keys { - if p.recursePin.HasKey(k) { - pinned = append(pinned, Pinned{Key: k, Mode: Recursive}) - } else if p.directPin.HasKey(k) { - pinned = append(pinned, Pinned{Key: k, Mode: Direct}) - } else if p.isInternalPin(k) { - pinned = append(pinned, Pinned{Key: k, Mode: Internal}) + for _, c := range cids { + if p.recursePin.Has(c) { + pinned = append(pinned, Pinned{Key: c, Mode: Recursive}) + } else if p.directPin.Has(c) { + pinned = append(pinned, Pinned{Key: c, Mode: Direct}) + } else if p.isInternalPin(c) { + pinned = append(pinned, Pinned{Key: c, Mode: Internal}) } else { - toCheck[k] = struct{}{} + toCheck.Add(c) } } // Now walk all recursive pins to check for indirect pins - var checkChildren func(key.Key, key.Key) error - checkChildren = func(rk key.Key, parentKey key.Key) error { + var checkChildren func(*cid.Cid, *cid.Cid) error + checkChildren = func(rk, parentKey *cid.Cid) error { parent, err := p.dserv.Get(context.Background(), parentKey) if err != nil { return err } for _, lnk := range parent.Links { - k := key.Key(lnk.Hash) + c := cid.NewCidV0(lnk.Hash) - if _, found := toCheck[k]; found { + if toCheck.Has(c) { pinned = append(pinned, - Pinned{Key: k, Mode: Indirect, Via: rk}) - delete(toCheck, k) + Pinned{Key: c, Mode: Indirect, Via: rk}) + toCheck.Remove(c) } - err := checkChildren(rk, k) + err := checkChildren(rk, c) if err != nil { return err } - if len(toCheck) == 0 { + if toCheck.Len() == 0 { return nil } } return nil } - for _, rk := range p.recursePin.GetKeys() { + + for _, rk := range p.recursePin.Keys() { err := checkChildren(rk, rk) if err != nil { return nil, err } - if len(toCheck) == 0 { + if toCheck.Len() == 0 { break } } // Anything left in toCheck is not pinned - for k, _ := range toCheck { + for _, k := range toCheck.Keys() { pinned = append(pinned, Pinned{Key: k, Mode: NotPinned}) } return pinned, nil } -func (p *pinner) RemovePinWithMode(key key.Key, mode PinMode) { +func (p *pinner) RemovePinWithMode(c *cid.Cid, mode PinMode) { p.lock.Lock() defer p.lock.Unlock() switch mode { case Direct: - p.directPin.RemoveBlock(key) + p.directPin.Remove(c) case Recursive: - p.recursePin.RemoveBlock(key) + p.recursePin.Remove(c) default: // programmer error, panic OK panic("unrecognized pin type") } } +func cidSetWithValues(cids []*cid.Cid) *cid.Set { + out := cid.NewSet() + for _, c := range cids { + out.Add(c) + } + return out +} + // LoadPinner loads a pinner and its keysets from the given datastore func LoadPinner(d ds.Datastore, dserv, internal mdag.DAGService) (Pinner, error) { p := new(pinner) @@ -356,29 +375,29 @@ func LoadPinner(d ds.Datastore, dserv, internal mdag.DAGService) (Pinner, error) return nil, fmt.Errorf("cannot load pin state: %s was not bytes", pinDatastoreKey) } - rootKey := key.Key(rootKeyBytes) + rootCid, err := cid.Cast(rootKeyBytes) + if err != nil { + return nil, err + } ctx, cancel := context.WithTimeout(context.TODO(), time.Second*5) defer cancel() - root, err := internal.Get(ctx, rootKey) + root, err := internal.Get(ctx, rootCid) if err != nil { return nil, fmt.Errorf("cannot find pinning root object: %v", err) } - internalPin := map[key.Key]struct{}{ - rootKey: struct{}{}, - } - recordInternal := func(k key.Key) { - internalPin[k] = struct{}{} - } + internalset := cid.NewSet() + internalset.Add(rootCid) + recordInternal := internalset.Add { // load recursive set recurseKeys, err := loadSet(ctx, internal, root, linkRecursive, recordInternal) if err != nil { return nil, fmt.Errorf("cannot load recursive pins: %v", err) } - p.recursePin = set.SimpleSetFromKeys(recurseKeys) + p.recursePin = cidSetWithValues(recurseKeys) } { // load direct set @@ -386,10 +405,10 @@ func LoadPinner(d ds.Datastore, dserv, internal mdag.DAGService) (Pinner, error) if err != nil { return nil, fmt.Errorf("cannot load direct pins: %v", err) } - p.directPin = set.SimpleSetFromKeys(directKeys) + p.directPin = cidSetWithValues(directKeys) } - p.internalPin = internalPin + p.internalPin = internalset // assign services p.dserv = dserv @@ -400,13 +419,13 @@ func LoadPinner(d ds.Datastore, dserv, internal mdag.DAGService) (Pinner, error) } // DirectKeys returns a slice containing the directly pinned keys -func (p *pinner) DirectKeys() []key.Key { - return p.directPin.GetKeys() +func (p *pinner) DirectKeys() []*cid.Cid { + return p.directPin.Keys() } // RecursiveKeys returns a slice containing the recursively pinned keys -func (p *pinner) RecursiveKeys() []key.Key { - return p.recursePin.GetKeys() +func (p *pinner) RecursiveKeys() []*cid.Cid { + return p.recursePin.Keys() } // Flush encodes and writes pinner keysets to the datastore @@ -416,14 +435,12 @@ func (p *pinner) Flush() error { ctx := context.TODO() - internalPin := make(map[key.Key]struct{}) - recordInternal := func(k key.Key) { - internalPin[k] = struct{}{} - } + internalset := cid.NewSet() + recordInternal := internalset.Add root := &mdag.Node{} { - n, err := storeSet(ctx, p.internal, p.directPin.GetKeys(), recordInternal) + n, err := storeSet(ctx, p.internal, p.directPin.Keys(), recordInternal) if err != nil { return err } @@ -433,7 +450,7 @@ func (p *pinner) Flush() error { } { - n, err := storeSet(ctx, p.internal, p.recursePin.GetKeys(), recordInternal) + n, err := storeSet(ctx, p.internal, p.recursePin.Keys(), recordInternal) if err != nil { return err } @@ -453,45 +470,45 @@ func (p *pinner) Flush() error { return err } - internalPin[k] = struct{}{} - if err := p.dstore.Put(pinDatastoreKey, []byte(k)); err != nil { + internalset.Add(k) + if err := p.dstore.Put(pinDatastoreKey, k.Bytes()); err != nil { return fmt.Errorf("cannot store pin state: %v", err) } - p.internalPin = internalPin + p.internalPin = internalset return nil } -func (p *pinner) InternalPins() []key.Key { +func (p *pinner) InternalPins() []*cid.Cid { p.lock.Lock() defer p.lock.Unlock() - var out []key.Key - for k, _ := range p.internalPin { - out = append(out, k) + var out []*cid.Cid + for _, c := range p.internalPin.Keys() { + out = append(out, c) } return out } // PinWithMode allows the user to have fine grained control over pin // counts -func (p *pinner) PinWithMode(k key.Key, mode PinMode) { +func (p *pinner) PinWithMode(c *cid.Cid, mode PinMode) { p.lock.Lock() defer p.lock.Unlock() switch mode { case Recursive: - p.recursePin.AddBlock(k) + p.recursePin.Add(c) case Direct: - p.directPin.AddBlock(k) + p.directPin.Add(c) } } func hasChild(ds mdag.DAGService, root *mdag.Node, child key.Key) (bool, error) { for _, lnk := range root.Links { - k := key.Key(lnk.Hash) - if k == child { + c := cid.NewCidV0(lnk.Hash) + if key.Key(c.Hash()) == child { return true, nil } - nd, err := ds.Get(context.Background(), k) + nd, err := ds.Get(context.Background(), c) if err != nil { return false, err } diff --git a/pin/pin_test.go b/pin/pin_test.go index 91c6b8c0e904..f1f626f5478e 100644 --- a/pin/pin_test.go +++ b/pin/pin_test.go @@ -4,28 +4,28 @@ import ( "testing" "time" - context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - "github.com/ipfs/go-ipfs/blocks/blockstore" - key "github.com/ipfs/go-ipfs/blocks/key" bs "github.com/ipfs/go-ipfs/blockservice" "github.com/ipfs/go-ipfs/exchange/offline" mdag "github.com/ipfs/go-ipfs/merkledag" + ds "gx/ipfs/QmNgqJarToRiq2GBaPJhkmW4B5BxS5B74E1rkGvv2JoaTp/go-datastore" dssync "gx/ipfs/QmNgqJarToRiq2GBaPJhkmW4B5BxS5B74E1rkGvv2JoaTp/go-datastore/sync" "gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) -func randNode() (*mdag.Node, key.Key) { +func randNode() (*mdag.Node, *cid.Cid) { nd := new(mdag.Node) nd.SetData(make([]byte, 32)) util.NewTimeSeededRand().Read(nd.Data()) - k, _ := nd.Key() + k := nd.Cid() return nd, k } -func assertPinned(t *testing.T, p Pinner, k key.Key, failmsg string) { - _, pinned, err := p.IsPinned(k) +func assertPinned(t *testing.T, p Pinner, c *cid.Cid, failmsg string) { + _, pinned, err := p.IsPinned(c) if err != nil { t.Fatal(err) } @@ -93,7 +93,7 @@ func TestPinnerBasic(t *testing.T) { assertPinned(t, p, ck, "child of recursively pinned node not found") - bk, _ := b.Key() + bk := b.Cid() assertPinned(t, p, bk, "Recursively pinned node not found..") d, _ := randNode() @@ -119,7 +119,7 @@ func TestPinnerBasic(t *testing.T) { t.Fatal(err) } - dk, _ := d.Key() + dk := d.Cid() assertPinned(t, p, dk, "pinned node not found.") // Test recursive unpin diff --git a/pin/set.go b/pin/set.go index 7257ccaecb9d..eb5cb5d91523 100644 --- a/pin/set.go +++ b/pin/set.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "hash/fnv" - "io" "sort" "unsafe" @@ -16,6 +15,7 @@ import ( "github.com/ipfs/go-ipfs/pin/internal/pb" "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) const ( @@ -31,18 +31,18 @@ func randomSeed() (uint32, error) { return binary.LittleEndian.Uint32(buf[:]), nil } -func hash(seed uint32, k key.Key) uint32 { +func hash(seed uint32, c *cid.Cid) uint32 { var buf [4]byte binary.LittleEndian.PutUint32(buf[:], seed) h := fnv.New32a() _, _ = h.Write(buf[:]) - _, _ = io.WriteString(h, string(k)) + _, _ = h.Write(c.Bytes()) return h.Sum32() } -type itemIterator func() (k key.Key, data []byte, ok bool) +type itemIterator func() (c *cid.Cid, data []byte, ok bool) -type keyObserver func(key.Key) +type keyObserver func(*cid.Cid) // refcount is the marshaled format of refcounts. It may change // between versions; this is valid for version 1. Changing it may @@ -100,7 +100,7 @@ func storeItems(ctx context.Context, dag merkledag.DAGService, estimatedLen uint Links: make([]*merkledag.Link, 0, defaultFanout+maxItems), } for i := 0; i < defaultFanout; i++ { - n.Links = append(n.Links, &merkledag.Link{Hash: emptyKey.ToMultihash()}) + n.Links = append(n.Links, &merkledag.Link{Hash: emptyKey.Hash()}) } internalKeys(emptyKey) hdr := &pb.Set{ @@ -121,7 +121,7 @@ func storeItems(ctx context.Context, dag merkledag.DAGService, estimatedLen uint // all done break } - n.Links = append(n.Links, &merkledag.Link{Hash: k.ToMultihash()}) + n.Links = append(n.Links, &merkledag.Link{Hash: k.Hash()}) n.SetData(append(n.Data(), data...)) } // sort by hash, also swap item Data @@ -134,7 +134,7 @@ func storeItems(ctx context.Context, dag merkledag.DAGService, estimatedLen uint // wasteful but simple type item struct { - k key.Key + c *cid.Cid data []byte } hashed := make(map[uint32][]item) @@ -147,13 +147,13 @@ func storeItems(ctx context.Context, dag merkledag.DAGService, estimatedLen uint hashed[h] = append(hashed[h], item{k, data}) } for h, items := range hashed { - childIter := func() (k key.Key, data []byte, ok bool) { + childIter := func() (c *cid.Cid, data []byte, ok bool) { if len(items) == 0 { - return "", nil, false + return nil, nil, false } first := items[0] items = items[1:] - return first.k, first.data, true + return first.c, first.data, true } child, err := storeItems(ctx, dag, uint64(len(items)), childIter, internalKeys) if err != nil { @@ -170,7 +170,7 @@ func storeItems(ctx context.Context, dag merkledag.DAGService, estimatedLen uint internalKeys(childKey) l := &merkledag.Link{ Name: "", - Hash: childKey.ToMultihash(), + Hash: childKey.Hash(), Size: size, } n.Links[int(h%defaultFanout)] = l @@ -231,8 +231,9 @@ func walkItems(ctx context.Context, dag merkledag.DAGService, n *merkledag.Node, } } for _, l := range n.Links[:fanout] { - children(key.Key(l.Hash)) - if key.Key(l.Hash) == emptyKey { + c := cid.NewCidV0(l.Hash) + children(c) + if c.Equals(emptyKey) { continue } subtree, err := l.GetNode(ctx, dag) @@ -246,20 +247,23 @@ func walkItems(ctx context.Context, dag merkledag.DAGService, n *merkledag.Node, return nil } -func loadSet(ctx context.Context, dag merkledag.DAGService, root *merkledag.Node, name string, internalKeys keyObserver) ([]key.Key, error) { +func loadSet(ctx context.Context, dag merkledag.DAGService, root *merkledag.Node, name string, internalKeys keyObserver) ([]*cid.Cid, error) { l, err := root.GetNodeLink(name) if err != nil { return nil, err } - internalKeys(key.Key(l.Hash)) + + lnkc := cid.NewCidV0(l.Hash) + internalKeys(lnkc) + n, err := l.GetNode(ctx, dag) if err != nil { return nil, err } - var res []key.Key + var res []*cid.Cid walk := func(buf []byte, idx int, link *merkledag.Link) error { - res = append(res, key.Key(link.Hash)) + res = append(res, cid.NewCidV0(link.Hash)) return nil } if err := walkItems(ctx, dag, n, walk, internalKeys); err != nil { @@ -273,7 +277,8 @@ func loadMultiset(ctx context.Context, dag merkledag.DAGService, root *merkledag if err != nil { return nil, fmt.Errorf("Failed to get link %s: %v", name, err) } - internalKeys(key.Key(l.Hash)) + c := cid.NewCidV0(l.Hash) + internalKeys(c) n, err := l.GetNode(ctx, dag) if err != nil { return nil, fmt.Errorf("Failed to get node from link %s: %v", name, err) @@ -292,24 +297,24 @@ func loadMultiset(ctx context.Context, dag merkledag.DAGService, root *merkledag return refcounts, nil } -func storeSet(ctx context.Context, dag merkledag.DAGService, keys []key.Key, internalKeys keyObserver) (*merkledag.Node, error) { - iter := func() (k key.Key, data []byte, ok bool) { - if len(keys) == 0 { - return "", nil, false +func storeSet(ctx context.Context, dag merkledag.DAGService, cids []*cid.Cid, internalKeys keyObserver) (*merkledag.Node, error) { + iter := func() (c *cid.Cid, data []byte, ok bool) { + if len(cids) == 0 { + return nil, nil, false } - first := keys[0] - keys = keys[1:] + first := cids[0] + cids = cids[1:] return first, nil, true } - n, err := storeItems(ctx, dag, uint64(len(keys)), iter, internalKeys) + n, err := storeItems(ctx, dag, uint64(len(cids)), iter, internalKeys) if err != nil { return nil, err } - k, err := dag.Add(n) + c, err := dag.Add(n) if err != nil { return nil, err } - internalKeys(k) + internalKeys(c) return n, nil } @@ -320,46 +325,3 @@ func copyRefcounts(orig map[key.Key]uint64) map[key.Key]uint64 { } return r } - -func storeMultiset(ctx context.Context, dag merkledag.DAGService, refcounts map[key.Key]uint64, internalKeys keyObserver) (*merkledag.Node, error) { - // make a working copy of the refcounts - refcounts = copyRefcounts(refcounts) - - iter := func() (k key.Key, data []byte, ok bool) { - // Every call of this function returns the next refcount item. - // - // This function splits out the uint64 reference counts as - // smaller increments, as fits in type refcount. Most of the - // time the refcount will fit inside just one, so this saves - // space. - // - // We use range here to pick an arbitrary item in the map, but - // not really iterate the map. - for k, refs := range refcounts { - // Max value a single multiset item can store - num := ^refcount(0) - if refs <= uint64(num) { - // Remaining count fits in a single item; remove the - // key from the map. - num = refcount(refs) - delete(refcounts, k) - } else { - // Count is too large to fit in one item, the key will - // repeat in some later call. - refcounts[k] -= uint64(num) - } - return k, num.Bytes(), true - } - return "", nil, false - } - n, err := storeItems(ctx, dag, uint64(len(refcounts)), iter, internalKeys) - if err != nil { - return nil, err - } - k, err := dag.Add(n) - if err != nil { - return nil, err - } - internalKeys(k) - return n, nil -} diff --git a/pin/set_test.go b/pin/set_test.go index 83d65dd02fc0..a5e9152d4cdc 100644 --- a/pin/set_test.go +++ b/pin/set_test.go @@ -1,20 +1,6 @@ package pin -import ( - "testing" - "testing/quick" - - "github.com/ipfs/go-ipfs/blocks/blockstore" - "github.com/ipfs/go-ipfs/blocks/key" - "github.com/ipfs/go-ipfs/blockservice" - "github.com/ipfs/go-ipfs/exchange/offline" - "github.com/ipfs/go-ipfs/merkledag" - "gx/ipfs/QmNgqJarToRiq2GBaPJhkmW4B5BxS5B74E1rkGvv2JoaTp/go-datastore" - dssync "gx/ipfs/QmNgqJarToRiq2GBaPJhkmW4B5BxS5B74E1rkGvv2JoaTp/go-datastore/sync" - mh "gx/ipfs/QmYf7ng2hG5XBtJA3tN34DQ2GUN5HNksEw1rLDkmr6vGku/go-multihash" - u "gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util" - "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" -) +import "github.com/ipfs/go-ipfs/blocks/key" func ignoreKeys(key.Key) {} @@ -25,79 +11,3 @@ func copyMap(m map[key.Key]uint16) map[key.Key]uint64 { } return c } - -func TestMultisetRoundtrip(t *testing.T) { - dstore := dssync.MutexWrap(datastore.NewMapDatastore()) - bstore := blockstore.NewBlockstore(dstore) - bserv := blockservice.New(bstore, offline.Exchange(bstore)) - dag := merkledag.NewDAGService(bserv) - - fn := func(m map[key.Key]uint16) bool { - // Convert invalid multihash from input to valid ones - for k, v := range m { - if _, err := mh.Cast([]byte(k)); err != nil { - delete(m, k) - m[key.Key(u.Hash([]byte(k)))] = v - } - } - - // Generate a smaller range for refcounts than full uint64, as - // otherwise this just becomes overly cpu heavy, splitting it - // out into too many items. That means we need to convert to - // the right kind of map. As storeMultiset mutates the map as - // part of its bookkeeping, this is actually good. - refcounts := copyMap(m) - - ctx := context.Background() - n, err := storeMultiset(ctx, dag, refcounts, ignoreKeys) - if err != nil { - t.Fatalf("storing multiset: %v", err) - } - - // Check that the node n is in the DAG - k, err := n.Key() - if err != nil { - t.Fatalf("Could not get key: %v", err) - } - _, err = dag.Get(ctx, k) - if err != nil { - t.Fatalf("Could not get node: %v", err) - } - - root := &merkledag.Node{} - const linkName = "dummylink" - if err := root.AddNodeLink(linkName, n); err != nil { - t.Fatalf("adding link to root node: %v", err) - } - - roundtrip, err := loadMultiset(ctx, dag, root, linkName, ignoreKeys) - if err != nil { - t.Fatalf("loading multiset: %v", err) - } - - orig := copyMap(m) - success := true - for k, want := range orig { - if got, ok := roundtrip[k]; ok { - if got != want { - success = false - t.Logf("refcount changed: %v -> %v for %q", want, got, k) - } - delete(orig, k) - delete(roundtrip, k) - } - } - for k, v := range orig { - success = false - t.Logf("refcount missing: %v for %q", v, k) - } - for k, v := range roundtrip { - success = false - t.Logf("refcount extra: %v for %q", v, k) - } - return success - } - if err := quick.Check(fn, nil); err != nil { - t.Fatal(err) - } -} diff --git a/test/integration/bitswap_wo_routing_test.go b/test/integration/bitswap_wo_routing_test.go index e315ecb56981..fa701bf36a58 100644 --- a/test/integration/bitswap_wo_routing_test.go +++ b/test/integration/bitswap_wo_routing_test.go @@ -7,8 +7,10 @@ import ( "github.com/ipfs/go-ipfs/blocks" "github.com/ipfs/go-ipfs/core" "github.com/ipfs/go-ipfs/core/mock" + context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" mocknet "gx/ipfs/Qmf4ETeAWXuThBfWwonVyFqGFSgTWepUDEr1txcctvpTXS/go-libp2p/p2p/net/mock" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) func TestBitswapWithoutRouting(t *testing.T) { @@ -68,10 +70,10 @@ func TestBitswapWithoutRouting(t *testing.T) { } log.Debugf("%d %s get block.", i, n.Identity) - b, err := n.Blocks.GetBlock(ctx, block0.Key()) + b, err := n.Blocks.GetBlock(ctx, cid.NewCidV0(block0.Multihash())) if err != nil { t.Error(err) - } else if !bytes.Equal(b.Data(), block0.Data()) { + } else if !bytes.Equal(b.RawData(), block0.RawData()) { t.Error("byte comparison fail") } else { log.Debug("got block: %s", b.Key()) @@ -85,10 +87,10 @@ func TestBitswapWithoutRouting(t *testing.T) { // get it out. for _, n := range nodes { - b, err := n.Blocks.GetBlock(ctx, block1.Key()) + b, err := n.Blocks.GetBlock(ctx, cid.NewCidV0(block1.Multihash())) if err != nil { t.Error(err) - } else if !bytes.Equal(b.Data(), block1.Data()) { + } else if !bytes.Equal(b.RawData(), block1.RawData()) { t.Error("byte comparison fail") } else { log.Debug("got block: %s", b.Key()) diff --git a/test/sharness/t0050-block.sh b/test/sharness/t0050-block.sh index 3cb0aeaca6c2..d9c83406aecc 100755 --- a/test/sharness/t0050-block.sh +++ b/test/sharness/t0050-block.sh @@ -125,7 +125,7 @@ test_expect_success "add and pin directory" ' HASH=QmRKqGMAM6EZngbpjSqrvYzq5Qd8b1bSWymjSUY9zQSNDk HASH2=QmdnpnsaEj69isdw5sNzp3h3HkaDz7xKq7BmvFFBzNr5e7 -RANDOMHASH=QRmKqGMAM6EbngbZjSqrvYzq5Qd8b1bSWymjSUY9zQSNDq +RANDOMHASH=QmRKqGMAM6EbngbZjSqrvYzq5Qd8b1bSWymjSUY9zQSNDq test_expect_success "multi-block 'ipfs block rm' mixed" ' test_must_fail ipfs block rm $FILE1HASH $DIRHASH $HASH $FILE3HASH $RANDOMHASH $HASH2 2> block_rm_err diff --git a/unixfs/io/dirbuilder.go b/unixfs/io/dirbuilder.go index 3db0b9ef9684..7a7783a7d450 100644 --- a/unixfs/io/dirbuilder.go +++ b/unixfs/io/dirbuilder.go @@ -3,9 +3,9 @@ package io import ( "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" - key "github.com/ipfs/go-ipfs/blocks/key" mdag "github.com/ipfs/go-ipfs/merkledag" format "github.com/ipfs/go-ipfs/unixfs" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) type directoryBuilder struct { @@ -29,8 +29,8 @@ func NewDirectory(dserv mdag.DAGService) *directoryBuilder { } // AddChild adds a (name, key)-pair to the root node. -func (d *directoryBuilder) AddChild(ctx context.Context, name string, k key.Key) error { - cnode, err := d.dserv.Get(ctx, k) +func (d *directoryBuilder) AddChild(ctx context.Context, name string, c *cid.Cid) error { + cnode, err := d.dserv.Get(ctx, c) if err != nil { return err } diff --git a/unixfs/mod/dagmodifier.go b/unixfs/mod/dagmodifier.go index 784cef8a4927..d45dffdef0e7 100644 --- a/unixfs/mod/dagmodifier.go +++ b/unixfs/mod/dagmodifier.go @@ -6,7 +6,6 @@ import ( "io" "os" - key "github.com/ipfs/go-ipfs/blocks/key" chunk "github.com/ipfs/go-ipfs/importer/chunk" help "github.com/ipfs/go-ipfs/importer/helpers" trickle "github.com/ipfs/go-ipfs/importer/trickle" @@ -15,9 +14,9 @@ import ( uio "github.com/ipfs/go-ipfs/unixfs/io" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" - mh "gx/ipfs/QmYf7ng2hG5XBtJA3tN34DQ2GUN5HNksEw1rLDkmr6vGku/go-multihash" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + cid "gx/ipfs/QmfSc2xehWmWLnwwYR91Y8QF4xdASypTFVknutoKQS3GHp/go-cid" ) var ErrSeekFail = errors.New("failed to seek properly") @@ -169,12 +168,12 @@ func (dm *DagModifier) Sync() error { buflen := dm.wrBuf.Len() // overwrite existing dag nodes - thisk, done, err := dm.modifyDag(dm.curNode, dm.writeStart, dm.wrBuf) + thisc, done, err := dm.modifyDag(dm.curNode, dm.writeStart, dm.wrBuf) if err != nil { return err } - nd, err := dm.dagserv.Get(dm.ctx, thisk) + nd, err := dm.dagserv.Get(dm.ctx, thisc) if err != nil { return err } @@ -188,7 +187,7 @@ func (dm *DagModifier) Sync() error { return err } - thisk, err = dm.dagserv.Add(nd) + _, err = dm.dagserv.Add(nd) if err != nil { return err } @@ -205,30 +204,30 @@ func (dm *DagModifier) Sync() error { // modifyDag writes the data in 'data' over the data in 'node' starting at 'offset' // returns the new key of the passed in node and whether or not all the data in the reader // has been consumed. -func (dm *DagModifier) modifyDag(node *mdag.Node, offset uint64, data io.Reader) (key.Key, bool, error) { +func (dm *DagModifier) modifyDag(node *mdag.Node, offset uint64, data io.Reader) (*cid.Cid, bool, error) { f, err := ft.FromBytes(node.Data()) if err != nil { - return "", false, err + return nil, false, err } // If we've reached a leaf node. if len(node.Links) == 0 { n, err := data.Read(f.Data[offset:]) if err != nil && err != io.EOF { - return "", false, err + return nil, false, err } // Update newly written node.. b, err := proto.Marshal(f) if err != nil { - return "", false, err + return nil, false, err } nd := new(mdag.Node) nd.SetData(b) k, err := dm.dagserv.Add(nd) if err != nil { - return "", false, err + return nil, false, err } // Hey look! we're done! @@ -247,20 +246,20 @@ func (dm *DagModifier) modifyDag(node *mdag.Node, offset uint64, data io.Reader) if cur+bs > offset { child, err := node.Links[i].GetNode(dm.ctx, dm.dagserv) if err != nil { - return "", false, err + return nil, false, err } k, sdone, err := dm.modifyDag(child, offset-cur, data) if err != nil { - return "", false, err + return nil, false, err } offset += bs - node.Links[i].Hash = mh.Multihash(k) + node.Links[i].Hash = k.Hash() // Recache serialized node _, err = node.EncodeProtobuf(true) if err != nil { - return "", false, err + return nil, false, err } if sdone {