diff --git a/core/builder.go b/core/builder.go index 2cd3bb6ea6f..f0e0bb81464 100644 --- a/core/builder.go +++ b/core/builder.go @@ -213,7 +213,7 @@ func setupNode(ctx context.Context, n *IpfsNode, cfg *BuildCfg) error { n.GCLocker = bstore.NewGCLocker() n.Blockstore = bstore.NewGCBlockstore(cbs, n.GCLocker) - if conf.Experimental.FilestoreEnabled { + if conf.Experimental.FilestoreEnabled || conf.Experimental.UrlstoreEnabled { // hash security n.Filestore = filestore.NewFilestore(cbs, n.Repo.FileManager()) n.Blockstore = bstore.NewGCBlockstore(n.Filestore, n.GCLocker) diff --git a/core/commands/add.go b/core/commands/add.go index 43520bcca97..46204c06db2 100644 --- a/core/commands/add.go +++ b/core/commands/add.go @@ -10,6 +10,7 @@ import ( blockservice "github.com/ipfs/go-ipfs/blockservice" core "github.com/ipfs/go-ipfs/core" "github.com/ipfs/go-ipfs/core/coreunix" + filestore "github.com/ipfs/go-ipfs/filestore" dag "github.com/ipfs/go-ipfs/merkledag" dagtest "github.com/ipfs/go-ipfs/merkledag/test" mfs "github.com/ipfs/go-ipfs/mfs" @@ -183,8 +184,7 @@ You can now check what blocks have been created by: // nocopy -> filestoreEnabled if nocopy && !cfg.Experimental.FilestoreEnabled { - res.SetError(errors.New("filestore is not enabled, see https://git.io/vNItf"), - cmdkit.ErrClient) + res.SetError(filestore.ErrFilestoreNotEnabled, cmdkit.ErrClient) return } diff --git a/core/commands/commands_test.go b/core/commands/commands_test.go index 91c05c36dc3..2c9788cc452 100644 --- a/core/commands/commands_test.go +++ b/core/commands/commands_test.go @@ -210,6 +210,8 @@ func TestCommands(t *testing.T) { "/tar/add", "/tar/cat", "/update", + "/urlstore", + "/urlstore/add", "/version", } diff --git a/core/commands/filestore.go b/core/commands/filestore.go index effd28e9e7d..f24be141076 100644 --- a/core/commands/filestore.go +++ b/core/commands/filestore.go @@ -237,7 +237,7 @@ func getFilestore(env interface{}) (*core.IpfsNode, *filestore.Filestore, error) } fs := n.Filestore if fs == nil { - return n, nil, fmt.Errorf("filestore not enabled") + return n, nil, filestore.ErrFilestoreNotEnabled } return n, fs, err } diff --git a/core/commands/root.go b/core/commands/root.go index 9aa8271613b..648a2a88610 100644 --- a/core/commands/root.go +++ b/core/commands/root.go @@ -5,12 +5,12 @@ import ( "strings" oldcmds "github.com/ipfs/go-ipfs/commands" + lgc "github.com/ipfs/go-ipfs/commands/legacy" dag "github.com/ipfs/go-ipfs/core/commands/dag" e "github.com/ipfs/go-ipfs/core/commands/e" ocmd "github.com/ipfs/go-ipfs/core/commands/object" unixfs "github.com/ipfs/go-ipfs/core/commands/unixfs" - lgc "github.com/ipfs/go-ipfs/commands/legacy" "gx/ipfs/QmNueRyPRQiV7PUEpnP4GgGLuK1rKQLaRW7sfPvUetYig1/go-ipfs-cmds" logging "gx/ipfs/QmcVVHfdyv15GVPk7NrxdWjh2hLVccXnoD8j2tyQShiXJb/go-log" "gx/ipfs/QmdE4gMduCKCGAcczM2F5ioYDfdeKuPix138wrES1YSr7f/go-ipfs-cmdkit" @@ -136,6 +136,7 @@ var rootSubcommands = map[string]*cmds.Command{ "tar": lgc.NewCommand(TarCmd), "file": lgc.NewCommand(unixfs.UnixFSCmd), "update": lgc.NewCommand(ExternalBinary()), + "urlstore": lgc.NewCommand(urlStoreCmd), "version": lgc.NewCommand(VersionCmd), "shutdown": lgc.NewCommand(daemonShutdownCmd), } diff --git a/core/commands/urlstore.go b/core/commands/urlstore.go new file mode 100644 index 00000000000..b903f95ed8f --- /dev/null +++ b/core/commands/urlstore.go @@ -0,0 +1,122 @@ +package commands + +import ( + "fmt" + "io" + "net/http" + "strings" + + cmds "github.com/ipfs/go-ipfs/commands" + filestore "github.com/ipfs/go-ipfs/filestore" + balanced "github.com/ipfs/go-ipfs/importer/balanced" + ihelper "github.com/ipfs/go-ipfs/importer/helpers" + + mh "gx/ipfs/QmPnFwZ2JXKnXgMw8CdBPxn7FWh6LLdjUjxV1fKHuJnkr8/go-multihash" + chunk "gx/ipfs/QmXnzH7wowyLZy8XJxxaQCVTgLMcDXdMBznmsrmQWCyiQV/go-ipfs-chunker" + cid "gx/ipfs/QmapdYm1b22Frv3k17fqrBYTFRxwiaVJkB299Mfn33edeB/go-cid" + cmdkit "gx/ipfs/QmdE4gMduCKCGAcczM2F5ioYDfdeKuPix138wrES1YSr7f/go-ipfs-cmdkit" +) + +var urlStoreCmd = &cmds.Command{ + + Subcommands: map[string]*cmds.Command{ + "add": urlAdd, + }, +} + +var urlAdd = &cmds.Command{ + Helptext: cmdkit.HelpText{ + Tagline: "Add URL via urlstore.", + LongDescription: ` +Add URLs to ipfs without storing the data locally. + +The URL provided must be stable and ideally on a web server under your +control. + +The file is added using raw-leaves but otherwise using the default +settings for 'ipfs add'. + +The file is not pinned, so this command should be followed by an 'ipfs +pin add'. + +This command is considered temporary until a better solution can be +found. It may disappear or the semantics can change at any +time. +`, + }, + Arguments: []cmdkit.Argument{ + cmdkit.StringArg("url", true, false, "URL to add to IPFS"), + }, + Type: BlockStat{}, + + Run: func(req cmds.Request, res cmds.Response) { + url := req.Arguments()[0] + n, err := req.InvocContext().GetNode() + if err != nil { + res.SetError(err, cmdkit.ErrNormal) + return + } + + if !filestore.IsURL(url) { + res.SetError(fmt.Errorf("unsupported url syntax: %s", url), cmdkit.ErrNormal) + return + } + + cfg, err := n.Repo.Config() + if err != nil { + res.SetError(err, cmdkit.ErrNormal) + return + } + + if !cfg.Experimental.UrlstoreEnabled { + res.SetError(filestore.ErrUrlstoreNotEnabled, cmdkit.ErrNormal) + return + } + + hreq, err := http.NewRequest("GET", url, nil) + if err != nil { + res.SetError(err, cmdkit.ErrNormal) + return + } + + hres, err := http.DefaultClient.Do(hreq) + if err != nil { + res.SetError(err, cmdkit.ErrNormal) + return + } + if hres.StatusCode != http.StatusOK { + res.SetError(fmt.Errorf("expected code 200, got: %d", hres.StatusCode), cmdkit.ErrNormal) + return + } + + chk := chunk.NewSizeSplitter(hres.Body, chunk.DefaultBlockSize) + prefix := cid.NewPrefixV1(cid.DagProtobuf, mh.SHA2_256) + dbp := &ihelper.DagBuilderParams{ + Dagserv: n.DAG, + RawLeaves: true, + Maxlinks: ihelper.DefaultLinksPerBlock, + NoCopy: true, + Prefix: &prefix, + URL: url, + } + + blc, err := balanced.Layout(dbp.New(chk)) + if err != nil { + res.SetError(err, cmdkit.ErrNormal) + return + } + + res.SetOutput(BlockStat{ + Key: blc.Cid().String(), + Size: int(hres.ContentLength), + }) + }, + Marshalers: cmds.MarshalerMap{ + cmds.Text: func(res cmds.Response) (io.Reader, error) { + ch := res.Output().(<-chan interface{}) + bs0 := <-ch + bs := bs0.(*BlockStat) + return strings.NewReader(bs.Key + "\n"), nil + }, + }, +} diff --git a/docs/experimental-features.md b/docs/experimental-features.md index 7e916e950ba..aeb3fd23b48 100644 --- a/docs/experimental-features.md +++ b/docs/experimental-features.md @@ -17,6 +17,7 @@ the above issue. - [go-multiplex stream muxer](#go-multiplex-stream-muxer) - [Raw leaves for unixfs files](#raw-leaves-for-unixfs-files) - [ipfs filestore](#ipfs-filestore) +- [ipfs urlstore](#ipfs-urlstore) - [BadgerDB datastore](#badger-datastore) - [Private Networks](#private-networks) - [ipfs p2p](#ipfs-p2p) @@ -164,6 +165,26 @@ And then pass the `--nocopy` flag when running `ipfs add` --- +## ipfs urlstore +Allows ipfs to retrieve blocks contents via a url instead of storing it in the datastore + +### State +experimental. + +### In Version +???. + +### How to enable +Modify your ipfs config: +``` +ipfs config --json Experimental.UrlstoreEnabled true +``` + +### Road to being a real feature +???. + +--- + ## Private Networks Allows ipfs to only connect to other peers who have a shared secret key. diff --git a/filestore/filestore.go b/filestore/filestore.go index 66289b0686f..0c80fb1d59a 100644 --- a/filestore/filestore.go +++ b/filestore/filestore.go @@ -9,6 +9,7 @@ package filestore import ( "context" + "errors" blocks "gx/ipfs/QmTRCUvZLiir12Qr6MV3HKfKMHX8Nf1Vddn6t2g5nsQSb9/go-block-format" posinfo "gx/ipfs/QmUWsXLvYYDAaoAt9TPZpFX4ffHHMg46AHrz1ZLTN5ABbe/go-ipfs-posinfo" @@ -20,6 +21,9 @@ import ( var log = logging.Logger("filestore") +var ErrFilestoreNotEnabled = errors.New("filestore is not enabled, see https://git.io/vNItf") +var ErrUrlstoreNotEnabled = errors.New("urlstore is not enabled") + // Filestore implements a Blockstore by combining a standard Blockstore // to store regular blocks and a special Blockstore called // FileManager to store blocks which data exists in an external file. diff --git a/filestore/filestore_test.go b/filestore/filestore_test.go index 9589c3dd5fa..279a2bc8218 100644 --- a/filestore/filestore_test.go +++ b/filestore/filestore_test.go @@ -23,6 +23,7 @@ func newTestFilestore(t *testing.T) (string, *Filestore) { t.Fatal(err) } fm := NewFileManager(mds, testdir) + fm.AllowFiles = true bs := blockstore.NewBlockstore(mds) fstore := NewFilestore(bs, fm) @@ -162,3 +163,15 @@ func TestDeletes(t *testing.T) { } } } + +func TestIsURL(t *testing.T) { + if !IsURL("http://www.example.com") { + t.Fatal("IsURL failed: http://www.example.com") + } + if !IsURL("https://www.example.com") { + t.Fatal("IsURL failed: https://www.example.com") + } + if IsURL("adir/afile") || IsURL("http:/ /afile") || IsURL("http:/a/file") { + t.Fatal("IsURL recognized non-url") + } +} diff --git a/filestore/fsrefstore.go b/filestore/fsrefstore.go index 2e481ade90d..960fc93e834 100644 --- a/filestore/fsrefstore.go +++ b/filestore/fsrefstore.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io" + "net/http" "os" "path/filepath" @@ -28,8 +29,10 @@ var FilestorePrefix = ds.NewKey("filestore") // to the actual location of the block data in the filesystem // (a path and an offset). type FileManager struct { - ds ds.Batching - root string + AllowFiles bool + AllowUrls bool + ds ds.Batching + root string } // CorruptReferenceError implements the error interface. @@ -51,7 +54,7 @@ func (c CorruptReferenceError) Error() string { // datastore and root. All FilestoreNodes paths are relative to the // root path given here, which is prepended for any operations. func NewFileManager(ds ds.Batching, root string) *FileManager { - return &FileManager{dsns.Wrap(ds, FilestorePrefix), root} + return &FileManager{ds: dsns.Wrap(ds, FilestorePrefix), root: root} } // AllKeysChan returns a channel from which to read the keys stored in @@ -111,7 +114,6 @@ func (f *FileManager) Get(c *cid.Cid) (blocks.Block, error) { if err != nil { return nil, err } - out, err := f.readDataObj(c, dobj) if err != nil { return nil, err @@ -120,6 +122,13 @@ func (f *FileManager) Get(c *cid.Cid) (blocks.Block, error) { return blocks.NewBlockWithCid(out, c) } +func (f *FileManager) readDataObj(c *cid.Cid, d *pb.DataObj) ([]byte, error) { + if IsURL(d.GetFilePath()) { + return f.readURLDataObj(c, d) + } + return f.readFileDataObj(c, d) +} + func (f *FileManager) getDataObj(c *cid.Cid) (*pb.DataObj, error) { o, err := f.ds.Get(dshelp.CidToDsKey(c)) switch err { @@ -148,8 +157,11 @@ func unmarshalDataObj(o interface{}) (*pb.DataObj, error) { return &dobj, nil } -// reads and verifies the block -func (f *FileManager) readDataObj(c *cid.Cid, d *pb.DataObj) ([]byte, error) { +func (f *FileManager) readFileDataObj(c *cid.Cid, d *pb.DataObj) ([]byte, error) { + if !f.AllowFiles { + return nil, ErrFilestoreNotEnabled + } + p := filepath.FromSlash(d.GetFilePath()) abspath := filepath.Join(f.root, p) @@ -187,6 +199,50 @@ func (f *FileManager) readDataObj(c *cid.Cid, d *pb.DataObj) ([]byte, error) { return outbuf, nil } +// reads and verifies the block from URL +func (f *FileManager) readURLDataObj(c *cid.Cid, d *pb.DataObj) ([]byte, error) { + if !f.AllowUrls { + return nil, ErrUrlstoreNotEnabled + } + + req, err := http.NewRequest("GET", d.GetFilePath(), nil) + if err != nil { + return nil, err + } + + req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", d.GetOffset(), d.GetOffset()+d.GetSize_()-1)) + + res, err := http.DefaultClient.Do(req) + if err != nil { + return nil, &CorruptReferenceError{StatusFileError, err} + } + if res.StatusCode != http.StatusPartialContent { + return nil, &CorruptReferenceError{StatusFileError, + fmt.Errorf("expected HTTP 206 got %d", res.StatusCode)} + } + + outbuf := make([]byte, d.GetSize_()) + _, err = io.ReadFull(res.Body, outbuf) + if err == io.EOF || err == io.ErrUnexpectedEOF { + return nil, &CorruptReferenceError{StatusFileChanged, err} + } else if err != nil { + return nil, &CorruptReferenceError{StatusFileError, err} + } + res.Body.Close() + + outcid, err := c.Prefix().Sum(outbuf) + if err != nil { + return nil, err + } + + if !c.Equals(outcid) { + return nil, &CorruptReferenceError{StatusFileChanged, + fmt.Errorf("data in file did not match. %s offset %d", d.GetFilePath(), d.GetOffset())} + } + + return outbuf, nil +} + // Has returns if the FileManager is storing a block reference. It does not // validate the data, nor checks if the reference is valid. func (f *FileManager) Has(c *cid.Cid) (bool, error) { @@ -209,16 +265,26 @@ func (f *FileManager) Put(b *posinfo.FilestoreNode) error { func (f *FileManager) putTo(b *posinfo.FilestoreNode, to putter) error { var dobj pb.DataObj - if !filepath.HasPrefix(b.PosInfo.FullPath, f.root) { - return fmt.Errorf("cannot add filestore references outside ipfs root (%s)", f.root) - } + if IsURL(b.PosInfo.FullPath) { + if !f.AllowUrls { + return ErrUrlstoreNotEnabled + } + dobj.FilePath = proto.String(b.PosInfo.FullPath) + } else { + if !f.AllowFiles { + return ErrFilestoreNotEnabled + } + if !filepath.HasPrefix(b.PosInfo.FullPath, f.root) { + return fmt.Errorf("cannot add filestore references outside ipfs root (%s)", f.root) + } - p, err := filepath.Rel(f.root, b.PosInfo.FullPath) - if err != nil { - return err - } + p, err := filepath.Rel(f.root, b.PosInfo.FullPath) + if err != nil { + return err + } - dobj.FilePath = proto.String(filepath.ToSlash(p)) + dobj.FilePath = proto.String(filepath.ToSlash(p)) + } dobj.Offset = proto.Uint64(b.PosInfo.Offset) dobj.Size_ = proto.Uint64(uint64(len(b.RawData()))) @@ -246,3 +312,12 @@ func (f *FileManager) PutMany(bs []*posinfo.FilestoreNode) error { return batch.Commit() } + +// IsURL returns true if the string represents a valid URL that the +// urlstore can handle. More specifically it returns true if a string +// begins with 'http://' or 'https://'. +func IsURL(str string) bool { + return (len(str) > 7 && str[0] == 'h' && str[1] == 't' && str[2] == 't' && str[3] == 'p') && + ((len(str) > 8 && str[4] == 's' && str[5] == ':' && str[6] == '/' && str[7] == '/') || + (str[4] == ':' && str[5] == '/' && str[6] == '/')) +} diff --git a/importer/helpers/dagbuilder.go b/importer/helpers/dagbuilder.go index 3e5480fb61a..eaa11d0d66b 100644 --- a/importer/helpers/dagbuilder.go +++ b/importer/helpers/dagbuilder.go @@ -48,6 +48,11 @@ type DagBuilderParams struct { // NoCopy signals to the chunker that it should track fileinfo for // filestore adds NoCopy bool + + // URL if non-empty (and NoCopy is also true) indicates that the + // file will not be stored in the datastore but instead retrieved + // from this location via the urlstore. + URL string } // New generates a new DagBuilderHelper from the given params and a given @@ -65,6 +70,10 @@ func (dbp *DagBuilderParams) New(spl chunker.Splitter) *DagBuilderHelper { db.fullPath = fi.AbsPath() db.stat = fi.Stat() } + + if dbp.URL != "" && dbp.NoCopy { + db.fullPath = dbp.URL + } return db } diff --git a/repo/config/experiments.go b/repo/config/experiments.go index f76572ee2af..ab48c868159 100644 --- a/repo/config/experiments.go +++ b/repo/config/experiments.go @@ -2,6 +2,7 @@ package config type Experiments struct { FilestoreEnabled bool + UrlstoreEnabled bool ShardingEnabled bool Libp2pStreamMounting bool } diff --git a/repo/fsrepo/fsrepo.go b/repo/fsrepo/fsrepo.go index 7d53e226397..64bd53ce9e9 100644 --- a/repo/fsrepo/fsrepo.go +++ b/repo/fsrepo/fsrepo.go @@ -175,8 +175,10 @@ func open(repoPath string) (repo.Repo, error) { return nil, err } - if r.config.Experimental.FilestoreEnabled { + if r.config.Experimental.FilestoreEnabled || r.config.Experimental.UrlstoreEnabled { r.filemgr = filestore.NewFileManager(r.ds, filepath.Dir(r.path)) + r.filemgr.AllowFiles = r.config.Experimental.FilestoreEnabled + r.filemgr.AllowUrls = r.config.Experimental.UrlstoreEnabled } keepLocked = true diff --git a/test/sharness/t0272-urlstore.sh b/test/sharness/t0272-urlstore.sh new file mode 100755 index 00000000000..312247f2f07 --- /dev/null +++ b/test/sharness/t0272-urlstore.sh @@ -0,0 +1,135 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2017 Jeromy Johnson +# MIT Licensed; see the LICENSE file in this repository. +# + +test_description="Test out the urlstore functionality" + +. lib/test-lib.sh + +test_init_ipfs + +test_expect_success "create some random files" ' + random 2222 7 > file1 && + random 500000 7 > file2 && + random 50000000 7 > file3 +' + +test_expect_success "add files using trickle dag format without raw leaves" ' + HASH1a=$(ipfs add -q --trickle --raw-leaves=false file1) && + HASH2a=$(ipfs add -q --trickle --raw-leaves=false file2) && + HASH3a=$(ipfs add -q --trickle --raw-leaves=false file3) +' +test_launch_ipfs_daemon --offline + +test_expect_success "make sure files can be retrived via the gateway" ' + curl http://127.0.0.1:$GWAY_PORT/ipfs/$HASH1a -o file1.actual && + test_cmp file1 file1.actual && + curl http://127.0.0.1:$GWAY_PORT/ipfs/$HASH2a -o file2.actual && + test_cmp file2 file2.actual && + curl http://127.0.0.1:$GWAY_PORT/ipfs/$HASH3a -o file3.actual && + test_cmp file3 file3.actual +' + +test_expect_success "add files without enabling url store" ' + test_must_fail ipfs urlstore add http://127.0.0.1:$GWAY_PORT/ipfs/$HASH1a && + test_must_fail ipfs urlstore add http://127.0.0.1:$GWAY_PORT/ipfs/$HASH2a +' + +test_kill_ipfs_daemon + +test_expect_success "enable urlstore" ' + ipfs config --json Experimental.UrlstoreEnabled true +' + +test_launch_ipfs_daemon --offline + +test_expect_success "add files using gateway address via url store" ' + HASH1=$(ipfs urlstore add http://127.0.0.1:$GWAY_PORT/ipfs/$HASH1a) && + HASH2=$(ipfs urlstore add http://127.0.0.1:$GWAY_PORT/ipfs/$HASH2a) && + ipfs pin add $HASH1 $HASH2 +' + +test_expect_success "make sure hashes are different" ' + test $HASH1a != $HASH1 && + test $HASH2a != $HASH2 +' + +test_expect_success "get files via urlstore" ' + ipfs get $HASH1 -o file1.actual && + test_cmp file1 file1.actual && + ipfs get $HASH2 -o file2.actual && + test_cmp file2 file2.actual +' + +cat < ls_expect +zb2rhX1q5oFFzEkPNsTe1Y8osUdFqSQGjUWRZsqC9fbY6WVSk 262144 http://127.0.0.1:$GWAY_PORT/ipfs/QmUow2T4P69nEsqTQDZCt8yg9CPS8GFmpuDAr5YtsPhTdM 0 +zb2rhYbKFn1UWGHXaAitcdVTkDGTykX8RFpGWzRFuLpoe9VE4 237856 http://127.0.0.1:$GWAY_PORT/ipfs/QmUow2T4P69nEsqTQDZCt8yg9CPS8GFmpuDAr5YtsPhTdM 262144 +zb2rhjddJ5DNzBrFu8G6CP1ApY25BukwCeskXHzN1H18CiVVZ 2222 http://127.0.0.1:$GWAY_PORT/ipfs/QmcHm3BL2cXuQ6rJdKQgPrmT9suqGkfy2KzH3MkXPEBXU6 0 +EOF + +test_expect_success "ipfs filestore ls works with urls" ' + ipfs filestore ls | sort > ls_actual && + test_cmp ls_expect ls_actual +' + +cat < verify_expect +ok zb2rhX1q5oFFzEkPNsTe1Y8osUdFqSQGjUWRZsqC9fbY6WVSk 262144 http://127.0.0.1:$GWAY_PORT/ipfs/QmUow2T4P69nEsqTQDZCt8yg9CPS8GFmpuDAr5YtsPhTdM 0 +ok zb2rhYbKFn1UWGHXaAitcdVTkDGTykX8RFpGWzRFuLpoe9VE4 237856 http://127.0.0.1:$GWAY_PORT/ipfs/QmUow2T4P69nEsqTQDZCt8yg9CPS8GFmpuDAr5YtsPhTdM 262144 +ok zb2rhjddJ5DNzBrFu8G6CP1ApY25BukwCeskXHzN1H18CiVVZ 2222 http://127.0.0.1:$GWAY_PORT/ipfs/QmcHm3BL2cXuQ6rJdKQgPrmT9suqGkfy2KzH3MkXPEBXU6 0 +EOF + +test_expect_success "ipfs filestore verify works with urls" ' + ipfs filestore verify | sort > verify_actual && + test_cmp verify_expect verify_actual +' + +test_expect_success "remove original hashes from local gateway" ' + ipfs pin rm $HASH1a $HASH2a && + ipfs repo gc > /dev/null +' + +test_expect_success "gatway no longer has files" ' + test_must_fail curl -f http://127.0.0.1:$GWAY_PORT/ipfs/$HASH1a -o file1.actual + test_must_fail curl -f http://127.0.0.1:$GWAY_PORT/ipfs/$HASH2a -o file2.actual +' + +cat < verify_expect_2 +error zb2rhX1q5oFFzEkPNsTe1Y8osUdFqSQGjUWRZsqC9fbY6WVSk 262144 http://127.0.0.1:$GWAY_PORT/ipfs/QmUow2T4P69nEsqTQDZCt8yg9CPS8GFmpuDAr5YtsPhTdM 0 +error zb2rhYbKFn1UWGHXaAitcdVTkDGTykX8RFpGWzRFuLpoe9VE4 237856 http://127.0.0.1:$GWAY_PORT/ipfs/QmUow2T4P69nEsqTQDZCt8yg9CPS8GFmpuDAr5YtsPhTdM 262144 +error zb2rhjddJ5DNzBrFu8G6CP1ApY25BukwCeskXHzN1H18CiVVZ 2222 http://127.0.0.1:$GWAY_PORT/ipfs/QmcHm3BL2cXuQ6rJdKQgPrmT9suqGkfy2KzH3MkXPEBXU6 0 +EOF + +test_expect_success "ipfs filestore verify is correct" ' + ipfs filestore verify | sort > verify_actual_2 && + test_cmp verify_expect_2 verify_actual_2 +' + +test_expect_success "files can not be retrieved via the urlstore" ' + test_must_fail ipfs cat $HASH1 > /dev/null && + test_must_fail ipfs cat $HASH2 > /dev/null +' + +test_expect_success "add large file using gateway address via url store" ' + HASH3=$(ipfs urlstore add http://127.0.0.1:$GWAY_PORT/ipfs/$HASH3a) +' + +test_expect_success "make sure hashes are different" ' + test $HASH3a != $HASH3 +' + +test_expect_success "get large file via urlstore" ' + ipfs get $HASH3 -o file3.actual && + test_cmp file3 file3.actual +' + +test_kill_ipfs_daemon + +test_expect_success "files can not be retrieved via the urlstore" ' + test_must_fail ipfs cat $HASH1 > /dev/null && + test_must_fail ipfs cat $HASH2 > /dev/null && + test_must_fail ipfs cat $HASH3 > /dev/null +' + +test_done