diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index d30fe665cc6..a56ee039f96 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -95,6 +95,10 @@ "ImportPath": "github.com/cryptix/mdns", "Rev": "04ff72a32679d57d009c0ac0fc5c4cda10350bad" }, + { + "ImportPath": "github.com/docker/spdystream", + "Rev": "e372247595b2edd26f6d022288e97eed793d70a2" + }, { "ImportPath": "github.com/dustin/go-humanize", "Rev": "00897f070f09f194c26d65afae734ba4c32404e8" @@ -162,7 +166,7 @@ }, { "ImportPath": "github.com/jbenet/go-datastore", - "Rev": "c835c30f206c1e97172e428f052e225adab9abde" + "Rev": "bec407bccea1cfaf56ee946e947642e3ac5a9258" }, { "ImportPath": "github.com/jbenet/go-detect-race", @@ -220,7 +224,7 @@ }, { "ImportPath": "github.com/jbenet/go-stream-muxer", - "Rev": "4a97500beeb081571128d41d539787e137f18404" + "Rev": "e2e261765847234749629e0190fef193a4548303" }, { "ImportPath": "github.com/jbenet/go-temp-err-catcher", @@ -334,6 +338,14 @@ "ImportPath": "github.com/whyrusleeping/go-metrics", "Rev": "1cd8009604ec2238b5a71305a0ecd974066e0e16" }, + { + "ImportPath": "github.com/whyrusleeping/go-multiplex", + "Rev": "474b9aebeb391746f304ddf7c764a5da12319857" + }, + { + "ImportPath": "github.com/whyrusleeping/go-multistream", + "Rev": "31bb014803a6eba2261bda5593e42c016a5f33bb" + }, { "ImportPath": "github.com/whyrusleeping/multiaddr-filter", "Rev": "9e26222151125ecd3fc1fd190179b6bdd55f5608" diff --git a/Godeps/_workspace/src/github.com/chriscool/go-sleep/.gitignore b/Godeps/_workspace/src/github.com/chriscool/go-sleep/.gitignore new file mode 100644 index 00000000000..1bc62c4f51c --- /dev/null +++ b/Godeps/_workspace/src/github.com/chriscool/go-sleep/.gitignore @@ -0,0 +1 @@ +go-sleep diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/docker/spdystream/CONTRIBUTING.md new file mode 100644 index 00000000000..d4eddcc5396 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/CONTRIBUTING.md @@ -0,0 +1,13 @@ +# Contributing to SpdyStream + +Want to hack on spdystream? Awesome! Here are instructions to get you +started. + +SpdyStream is a part of the [Docker](https://docker.io) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read +[Docker's contributions guidelines](https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md). + +Happy hacking! diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/LICENSE b/Godeps/_workspace/src/github.com/docker/spdystream/LICENSE new file mode 100644 index 00000000000..27448585ad4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/spdystream/MAINTAINERS new file mode 100644 index 00000000000..4eb44dcf437 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/MAINTAINERS @@ -0,0 +1 @@ +Derek McGowan (@dmcg) diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/README.md b/Godeps/_workspace/src/github.com/docker/spdystream/README.md new file mode 100644 index 00000000000..076b17919c0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/README.md @@ -0,0 +1,78 @@ +# SpdyStream + +A multiplexed stream library using spdy + +## Usage + +Client example (connecting to mirroring server without auth) + +```go +package main + +import ( + "fmt" + "github.com/docker/spdystream" + "net" + "net/http" +) + +func main() { + conn, err := net.Dial("tcp", "localhost:8080") + if err != nil { + panic(err) + } + spdyConn, err := spdystream.NewConnection(conn, false) + if err != nil { + panic(err) + } + go spdyConn.Serve(spdystream.NoOpStreamHandler) + stream, err := spdyConn.CreateStream(http.Header{}, nil, false) + if err != nil { + panic(err) + } + + stream.Wait() + + fmt.Fprint(stream, "Writing to stream") + + buf := make([]byte, 25) + stream.Read(buf) + fmt.Println(string(buf)) + + stream.Close() +} +``` + +Server example (mirroring server without auth) + +```go +package main + +import ( + "github.com/docker/spdystream" + "net" +) + +func main() { + listener, err := net.Listen("tcp", "localhost:8080") + if err != nil { + panic(err) + } + for { + conn, err := listener.Accept() + if err != nil { + panic(err) + } + spdyConn, err := spdystream.NewConnection(conn, true) + if err != nil { + panic(err) + } + go spdyConn.Serve(spdystream.MirrorStreamHandler) + } +} +``` + +## Copyright and license + +Code and documentation copyright 2013-2014 Docker, inc. Code released under the Apache 2.0 license. +Docs released under Creative commons. diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/connection.go b/Godeps/_workspace/src/github.com/docker/spdystream/connection.go new file mode 100644 index 00000000000..c539c7040ce --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/connection.go @@ -0,0 +1,902 @@ +package spdystream + +import ( + "errors" + "fmt" + "io" + "net" + "net/http" + "sync" + "time" + + "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/docker/spdystream/spdy" +) + +var ( + ErrInvalidStreamId = errors.New("Invalid stream id") + ErrTimeout = errors.New("Timeout occured") + ErrReset = errors.New("Stream reset") + ErrWriteClosedStream = errors.New("Write on closed stream") +) + +const ( + FRAME_WORKERS = 5 + QUEUE_SIZE = 50 +) + +type StreamHandler func(stream *Stream) + +type AuthHandler func(header http.Header, slot uint8, parent uint32) bool + +type idleAwareFramer struct { + f *spdy.Framer + conn *Connection + writeLock sync.Mutex + resetChan chan struct{} + setTimeoutChan chan time.Duration + timeout time.Duration +} + +func newIdleAwareFramer(framer *spdy.Framer) *idleAwareFramer { + iaf := &idleAwareFramer{ + f: framer, + resetChan: make(chan struct{}, 2), + setTimeoutChan: make(chan time.Duration), + } + return iaf +} + +func (i *idleAwareFramer) monitor() { + var ( + timer *time.Timer + expired <-chan time.Time + resetChan = i.resetChan + ) +Loop: + for { + select { + case timeout := <-i.setTimeoutChan: + i.timeout = timeout + if timeout == 0 { + if timer != nil { + timer.Stop() + } + } else { + if timer == nil { + timer = time.NewTimer(timeout) + expired = timer.C + } else { + timer.Reset(timeout) + } + } + case <-resetChan: + if timer != nil && i.timeout > 0 { + timer.Reset(i.timeout) + } + case <-expired: + i.conn.streamCond.L.Lock() + streams := i.conn.streams + i.conn.streams = make(map[spdy.StreamId]*Stream) + i.conn.streamCond.Broadcast() + i.conn.streamCond.L.Unlock() + go func() { + for _, stream := range streams { + stream.resetStream() + } + i.conn.Close() + }() + case <-i.conn.closeChan: + if timer != nil { + timer.Stop() + } + + // Start a goroutine to drain resetChan. This is needed because we've seen + // some unit tests with large numbers of goroutines get into a situation + // where resetChan fills up, at least 1 call to Write() is still trying to + // send to resetChan, the connection gets closed, and this case statement + // attempts to grab the write lock that Write() already has, causing a + // deadlock. + // + // See https://github.com/docker/spdystream/issues/49 for more details. + go func() { + for _ = range resetChan { + } + }() + + i.writeLock.Lock() + close(resetChan) + i.resetChan = nil + i.writeLock.Unlock() + + break Loop + } + } + + // Drain resetChan + for _ = range resetChan { + } +} + +func (i *idleAwareFramer) WriteFrame(frame spdy.Frame) error { + i.writeLock.Lock() + defer i.writeLock.Unlock() + if i.resetChan == nil { + return io.EOF + } + err := i.f.WriteFrame(frame) + if err != nil { + return err + } + + i.resetChan <- struct{}{} + + return nil +} + +func (i *idleAwareFramer) ReadFrame() (spdy.Frame, error) { + frame, err := i.f.ReadFrame() + if err != nil { + return nil, err + } + + // resetChan should never be closed since it is only closed + // when the connection has closed its closeChan. This closure + // only occurs after all Reads have finished + // TODO (dmcgowan): refactor relationship into connection + i.resetChan <- struct{}{} + + return frame, nil +} + +type Connection struct { + conn net.Conn + framer *idleAwareFramer + + closeChan chan bool + goneAway bool + lastStreamChan chan<- *Stream + goAwayTimeout time.Duration + closeTimeout time.Duration + + streamLock *sync.RWMutex + streamCond *sync.Cond + streams map[spdy.StreamId]*Stream + + nextIdLock sync.Mutex + receiveIdLock sync.Mutex + nextStreamId spdy.StreamId + receivedStreamId spdy.StreamId + + pingIdLock sync.Mutex + pingId uint32 + pingChans map[uint32]chan error + + shutdownLock sync.Mutex + shutdownChan chan error + hasShutdown bool +} + +// NewConnection creates a new spdy connection from an existing +// network connection. +func NewConnection(conn net.Conn, server bool) (*Connection, error) { + framer, framerErr := spdy.NewFramer(conn, conn) + if framerErr != nil { + return nil, framerErr + } + idleAwareFramer := newIdleAwareFramer(framer) + var sid spdy.StreamId + var rid spdy.StreamId + var pid uint32 + if server { + sid = 2 + rid = 1 + pid = 2 + } else { + sid = 1 + rid = 2 + pid = 1 + } + + streamLock := new(sync.RWMutex) + streamCond := sync.NewCond(streamLock) + + session := &Connection{ + conn: conn, + framer: idleAwareFramer, + + closeChan: make(chan bool), + goAwayTimeout: time.Duration(0), + closeTimeout: time.Duration(0), + + streamLock: streamLock, + streamCond: streamCond, + streams: make(map[spdy.StreamId]*Stream), + nextStreamId: sid, + receivedStreamId: rid, + + pingId: pid, + pingChans: make(map[uint32]chan error), + + shutdownChan: make(chan error), + } + idleAwareFramer.conn = session + go idleAwareFramer.monitor() + + return session, nil +} + +// Ping sends a ping frame across the connection and +// returns the response time +func (s *Connection) Ping() (time.Duration, error) { + pid := s.pingId + s.pingIdLock.Lock() + if s.pingId > 0x7ffffffe { + s.pingId = s.pingId - 0x7ffffffe + } else { + s.pingId = s.pingId + 2 + } + s.pingIdLock.Unlock() + pingChan := make(chan error) + s.pingChans[pid] = pingChan + defer delete(s.pingChans, pid) + + frame := &spdy.PingFrame{Id: pid} + startTime := time.Now() + writeErr := s.framer.WriteFrame(frame) + if writeErr != nil { + return time.Duration(0), writeErr + } + select { + case <-s.closeChan: + return time.Duration(0), errors.New("connection closed") + case err, ok := <-pingChan: + if ok && err != nil { + return time.Duration(0), err + } + break + } + return time.Now().Sub(startTime), nil +} + +// Serve handles frames sent from the server, including reply frames +// which are needed to fully initiate connections. Both clients and servers +// should call Serve in a separate goroutine before creating streams. +func (s *Connection) Serve(newHandler StreamHandler) { + // Parition queues to ensure stream frames are handled + // by the same worker, ensuring order is maintained + frameQueues := make([]*PriorityFrameQueue, FRAME_WORKERS) + for i := 0; i < FRAME_WORKERS; i++ { + frameQueues[i] = NewPriorityFrameQueue(QUEUE_SIZE) + // Ensure frame queue is drained when connection is closed + go func(frameQueue *PriorityFrameQueue) { + <-s.closeChan + frameQueue.Drain() + }(frameQueues[i]) + + go s.frameHandler(frameQueues[i], newHandler) + } + + var partitionRoundRobin int + for { + readFrame, err := s.framer.ReadFrame() + if err != nil { + if err != io.EOF { + fmt.Errorf("frame read error: %s", err) + } else { + debugMessage("EOF received") + } + break + } + var priority uint8 + var partition int + switch frame := readFrame.(type) { + case *spdy.SynStreamFrame: + if s.checkStreamFrame(frame) { + priority = frame.Priority + partition = int(frame.StreamId % FRAME_WORKERS) + debugMessage("(%p) Add stream frame: %d ", s, frame.StreamId) + s.addStreamFrame(frame) + } else { + debugMessage("(%p) Rejected stream frame: %d ", s, frame.StreamId) + continue + } + case *spdy.SynReplyFrame: + priority = s.getStreamPriority(frame.StreamId) + partition = int(frame.StreamId % FRAME_WORKERS) + case *spdy.DataFrame: + priority = s.getStreamPriority(frame.StreamId) + partition = int(frame.StreamId % FRAME_WORKERS) + case *spdy.RstStreamFrame: + priority = s.getStreamPriority(frame.StreamId) + partition = int(frame.StreamId % FRAME_WORKERS) + case *spdy.HeadersFrame: + priority = s.getStreamPriority(frame.StreamId) + partition = int(frame.StreamId % FRAME_WORKERS) + case *spdy.PingFrame: + priority = 0 + partition = partitionRoundRobin + partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS + case *spdy.GoAwayFrame: + priority = 0 + partition = partitionRoundRobin + partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS + default: + priority = 7 + partition = partitionRoundRobin + partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS + } + frameQueues[partition].Push(readFrame, priority) + } + close(s.closeChan) + + s.streamCond.L.Lock() + // notify streams that they're now closed, which will + // unblock any stream Read() calls + for _, stream := range s.streams { + stream.closeRemoteChannels() + } + s.streams = make(map[spdy.StreamId]*Stream) + s.streamCond.Broadcast() + s.streamCond.L.Unlock() +} + +func (s *Connection) frameHandler(frameQueue *PriorityFrameQueue, newHandler StreamHandler) { + for { + popFrame := frameQueue.Pop() + if popFrame == nil { + return + } + + var frameErr error + switch frame := popFrame.(type) { + case *spdy.SynStreamFrame: + frameErr = s.handleStreamFrame(frame, newHandler) + case *spdy.SynReplyFrame: + frameErr = s.handleReplyFrame(frame) + case *spdy.DataFrame: + frameErr = s.handleDataFrame(frame) + case *spdy.RstStreamFrame: + frameErr = s.handleResetFrame(frame) + case *spdy.HeadersFrame: + frameErr = s.handleHeaderFrame(frame) + case *spdy.PingFrame: + frameErr = s.handlePingFrame(frame) + case *spdy.GoAwayFrame: + frameErr = s.handleGoAwayFrame(frame) + default: + frameErr = fmt.Errorf("unhandled frame type: %T", frame) + } + + if frameErr != nil { + fmt.Errorf("frame handling error: %s", frameErr) + } + } +} + +func (s *Connection) getStreamPriority(streamId spdy.StreamId) uint8 { + stream, streamOk := s.getStream(streamId) + if !streamOk { + return 7 + } + return stream.priority +} + +func (s *Connection) addStreamFrame(frame *spdy.SynStreamFrame) { + var parent *Stream + if frame.AssociatedToStreamId != spdy.StreamId(0) { + parent, _ = s.getStream(frame.AssociatedToStreamId) + } + + stream := &Stream{ + streamId: frame.StreamId, + parent: parent, + conn: s, + startChan: make(chan error), + headers: frame.Headers, + finished: (frame.CFHeader.Flags & spdy.ControlFlagUnidirectional) != 0x00, + replyCond: sync.NewCond(new(sync.Mutex)), + dataChan: make(chan []byte), + headerChan: make(chan http.Header), + closeChan: make(chan bool), + } + if frame.CFHeader.Flags&spdy.ControlFlagFin != 0x00 { + stream.closeRemoteChannels() + } + + s.addStream(stream) +} + +// checkStreamFrame checks to see if a stream frame is allowed. +// If the stream is invalid, then a reset frame with protocol error +// will be returned. +func (s *Connection) checkStreamFrame(frame *spdy.SynStreamFrame) bool { + s.receiveIdLock.Lock() + defer s.receiveIdLock.Unlock() + if s.goneAway { + return false + } + validationErr := s.validateStreamId(frame.StreamId) + if validationErr != nil { + go func() { + resetErr := s.sendResetFrame(spdy.ProtocolError, frame.StreamId) + if resetErr != nil { + fmt.Errorf("reset error: %s", resetErr) + } + }() + return false + } + return true +} + +func (s *Connection) handleStreamFrame(frame *spdy.SynStreamFrame, newHandler StreamHandler) error { + stream, ok := s.getStream(frame.StreamId) + if !ok { + return fmt.Errorf("Missing stream: %d", frame.StreamId) + } + + newHandler(stream) + + return nil +} + +func (s *Connection) handleReplyFrame(frame *spdy.SynReplyFrame) error { + debugMessage("(%p) Reply frame received for %d", s, frame.StreamId) + stream, streamOk := s.getStream(frame.StreamId) + if !streamOk { + debugMessage("Reply frame gone away for %d", frame.StreamId) + // Stream has already gone away + return nil + } + if stream.replied { + // Stream has already received reply + return nil + } + stream.replied = true + + // TODO Check for error + if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 { + s.remoteStreamFinish(stream) + } + + close(stream.startChan) + + return nil +} + +func (s *Connection) handleResetFrame(frame *spdy.RstStreamFrame) error { + stream, streamOk := s.getStream(frame.StreamId) + if !streamOk { + // Stream has already been removed + return nil + } + s.removeStream(stream) + stream.closeRemoteChannels() + + if !stream.replied { + stream.replied = true + stream.startChan <- ErrReset + close(stream.startChan) + } + + stream.finishLock.Lock() + stream.finished = true + stream.finishLock.Unlock() + + return nil +} + +func (s *Connection) handleHeaderFrame(frame *spdy.HeadersFrame) error { + stream, streamOk := s.getStream(frame.StreamId) + if !streamOk { + // Stream has already gone away + return nil + } + if !stream.replied { + // No reply received...Protocol error? + return nil + } + + // TODO limit headers while not blocking (use buffered chan or goroutine?) + select { + case <-stream.closeChan: + return nil + case stream.headerChan <- frame.Headers: + } + + if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 { + s.remoteStreamFinish(stream) + } + + return nil +} + +func (s *Connection) handleDataFrame(frame *spdy.DataFrame) error { + debugMessage("(%p) Data frame received for %d", s, frame.StreamId) + stream, streamOk := s.getStream(frame.StreamId) + if !streamOk { + debugMessage("Data frame gone away for %d", frame.StreamId) + // Stream has already gone away + return nil + } + if !stream.replied { + debugMessage("Data frame not replied %d", frame.StreamId) + // No reply received...Protocol error? + return nil + } + + debugMessage("(%p) (%d) Data frame handling", stream, stream.streamId) + if len(frame.Data) > 0 { + stream.dataLock.RLock() + select { + case <-stream.closeChan: + debugMessage("(%p) (%d) Data frame not sent (stream shut down)", stream, stream.streamId) + case stream.dataChan <- frame.Data: + debugMessage("(%p) (%d) Data frame sent", stream, stream.streamId) + } + stream.dataLock.RUnlock() + } + if (frame.Flags & spdy.DataFlagFin) != 0x00 { + s.remoteStreamFinish(stream) + } + return nil +} + +func (s *Connection) handlePingFrame(frame *spdy.PingFrame) error { + if s.pingId&0x01 != frame.Id&0x01 { + return s.framer.WriteFrame(frame) + } + pingChan, pingOk := s.pingChans[frame.Id] + if pingOk { + close(pingChan) + } + return nil +} + +func (s *Connection) handleGoAwayFrame(frame *spdy.GoAwayFrame) error { + debugMessage("(%p) Go away received", s) + s.receiveIdLock.Lock() + if s.goneAway { + s.receiveIdLock.Unlock() + return nil + } + s.goneAway = true + s.receiveIdLock.Unlock() + + if s.lastStreamChan != nil { + stream, _ := s.getStream(frame.LastGoodStreamId) + go func() { + s.lastStreamChan <- stream + }() + } + + // Do not block frame handler waiting for closure + go s.shutdown(s.goAwayTimeout) + + return nil +} + +func (s *Connection) remoteStreamFinish(stream *Stream) { + stream.closeRemoteChannels() + + stream.finishLock.Lock() + if stream.finished { + // Stream is fully closed, cleanup + s.removeStream(stream) + } + stream.finishLock.Unlock() +} + +// CreateStream creates a new spdy stream using the parameters for +// creating the stream frame. The stream frame will be sent upon +// calling this function, however this function does not wait for +// the reply frame. If waiting for the reply is desired, use +// the stream Wait or WaitTimeout function on the stream returned +// by this function. +func (s *Connection) CreateStream(headers http.Header, parent *Stream, fin bool) (*Stream, error) { + streamId := s.getNextStreamId() + if streamId == 0 { + return nil, fmt.Errorf("Unable to get new stream id") + } + + stream := &Stream{ + streamId: streamId, + parent: parent, + conn: s, + startChan: make(chan error), + headers: headers, + dataChan: make(chan []byte), + headerChan: make(chan http.Header), + closeChan: make(chan bool), + } + + debugMessage("(%p) (%p) Create stream", s, stream) + + s.addStream(stream) + + return stream, s.sendStream(stream, fin) +} + +func (s *Connection) shutdown(closeTimeout time.Duration) { + // TODO Ensure this isn't called multiple times + s.shutdownLock.Lock() + if s.hasShutdown { + s.shutdownLock.Unlock() + return + } + s.hasShutdown = true + s.shutdownLock.Unlock() + + var timeout <-chan time.Time + if closeTimeout > time.Duration(0) { + timeout = time.After(closeTimeout) + } + streamsClosed := make(chan bool) + + go func() { + s.streamCond.L.Lock() + for len(s.streams) > 0 { + debugMessage("Streams opened: %d, %#v", len(s.streams), s.streams) + s.streamCond.Wait() + } + s.streamCond.L.Unlock() + close(streamsClosed) + }() + + var err error + select { + case <-streamsClosed: + // No active streams, close should be safe + err = s.conn.Close() + case <-timeout: + // Force ungraceful close + err = s.conn.Close() + // Wait for cleanup to clear active streams + <-streamsClosed + } + + if err != nil { + duration := 10 * time.Minute + time.AfterFunc(duration, func() { + select { + case err, ok := <-s.shutdownChan: + if ok { + fmt.Errorf("Unhandled close error after %s: %s", duration, err) + } + default: + } + }) + s.shutdownChan <- err + } + close(s.shutdownChan) + + return +} + +// Closes spdy connection by sending GoAway frame and initiating shutdown +func (s *Connection) Close() error { + s.receiveIdLock.Lock() + if s.goneAway { + s.receiveIdLock.Unlock() + return nil + } + s.goneAway = true + s.receiveIdLock.Unlock() + + var lastStreamId spdy.StreamId + if s.receivedStreamId > 2 { + lastStreamId = s.receivedStreamId - 2 + } + + goAwayFrame := &spdy.GoAwayFrame{ + LastGoodStreamId: lastStreamId, + Status: spdy.GoAwayOK, + } + + err := s.framer.WriteFrame(goAwayFrame) + if err != nil { + return err + } + + go s.shutdown(s.closeTimeout) + + return nil +} + +// CloseWait closes the connection and waits for shutdown +// to finish. Note the underlying network Connection +// is not closed until the end of shutdown. +func (s *Connection) CloseWait() error { + closeErr := s.Close() + if closeErr != nil { + return closeErr + } + shutdownErr, ok := <-s.shutdownChan + if ok { + return shutdownErr + } + return nil +} + +// Wait waits for the connection to finish shutdown or for +// the wait timeout duration to expire. This needs to be +// called either after Close has been called or the GOAWAYFRAME +// has been received. If the wait timeout is 0, this function +// will block until shutdown finishes. If wait is never called +// and a shutdown error occurs, that error will be logged as an +// unhandled error. +func (s *Connection) Wait(waitTimeout time.Duration) error { + var timeout <-chan time.Time + if waitTimeout > time.Duration(0) { + timeout = time.After(waitTimeout) + } + + select { + case err, ok := <-s.shutdownChan: + if ok { + return err + } + case <-timeout: + return ErrTimeout + } + return nil +} + +// NotifyClose registers a channel to be called when the remote +// peer inidicates connection closure. The last stream to be +// received by the remote will be sent on the channel. The notify +// timeout will determine the duration between go away received +// and the connection being closed. +func (s *Connection) NotifyClose(c chan<- *Stream, timeout time.Duration) { + s.goAwayTimeout = timeout + s.lastStreamChan = c +} + +// SetCloseTimeout sets the amount of time close will wait for +// streams to finish before terminating the underlying network +// connection. Setting the timeout to 0 will cause close to +// wait forever, which is the default. +func (s *Connection) SetCloseTimeout(timeout time.Duration) { + s.closeTimeout = timeout +} + +// SetIdleTimeout sets the amount of time the connection may sit idle before +// it is forcefully terminated. +func (s *Connection) SetIdleTimeout(timeout time.Duration) { + s.framer.setTimeoutChan <- timeout +} + +func (s *Connection) sendHeaders(headers http.Header, stream *Stream, fin bool) error { + var flags spdy.ControlFlags + if fin { + flags = spdy.ControlFlagFin + } + + headerFrame := &spdy.HeadersFrame{ + StreamId: stream.streamId, + Headers: headers, + CFHeader: spdy.ControlFrameHeader{Flags: flags}, + } + + return s.framer.WriteFrame(headerFrame) +} + +func (s *Connection) sendReply(headers http.Header, stream *Stream, fin bool) error { + var flags spdy.ControlFlags + if fin { + flags = spdy.ControlFlagFin + } + + replyFrame := &spdy.SynReplyFrame{ + StreamId: stream.streamId, + Headers: headers, + CFHeader: spdy.ControlFrameHeader{Flags: flags}, + } + + return s.framer.WriteFrame(replyFrame) +} + +func (s *Connection) sendResetFrame(status spdy.RstStreamStatus, streamId spdy.StreamId) error { + resetFrame := &spdy.RstStreamFrame{ + StreamId: streamId, + Status: status, + } + + return s.framer.WriteFrame(resetFrame) +} + +func (s *Connection) sendReset(status spdy.RstStreamStatus, stream *Stream) error { + return s.sendResetFrame(status, stream.streamId) +} + +func (s *Connection) sendStream(stream *Stream, fin bool) error { + var flags spdy.ControlFlags + if fin { + flags = spdy.ControlFlagFin + stream.finished = true + } + + var parentId spdy.StreamId + if stream.parent != nil { + parentId = stream.parent.streamId + } + + streamFrame := &spdy.SynStreamFrame{ + StreamId: spdy.StreamId(stream.streamId), + AssociatedToStreamId: spdy.StreamId(parentId), + Headers: stream.headers, + CFHeader: spdy.ControlFrameHeader{Flags: flags}, + } + + return s.framer.WriteFrame(streamFrame) +} + +// getNextStreamId returns the next sequential id +// every call should produce a unique value or an error +func (s *Connection) getNextStreamId() spdy.StreamId { + s.nextIdLock.Lock() + defer s.nextIdLock.Unlock() + sid := s.nextStreamId + if sid > 0x7fffffff { + return 0 + } + s.nextStreamId = s.nextStreamId + 2 + return sid +} + +// PeekNextStreamId returns the next sequential id and keeps the next id untouched +func (s *Connection) PeekNextStreamId() spdy.StreamId { + sid := s.nextStreamId + return sid +} + +func (s *Connection) validateStreamId(rid spdy.StreamId) error { + if rid > 0x7fffffff || rid < s.receivedStreamId { + return ErrInvalidStreamId + } + s.receivedStreamId = rid + 2 + return nil +} + +func (s *Connection) addStream(stream *Stream) { + s.streamCond.L.Lock() + s.streams[stream.streamId] = stream + debugMessage("(%p) (%p) Stream added, broadcasting: %d", s, stream, stream.streamId) + s.streamCond.Broadcast() + s.streamCond.L.Unlock() +} + +func (s *Connection) removeStream(stream *Stream) { + s.streamCond.L.Lock() + delete(s.streams, stream.streamId) + debugMessage("Stream removed, broadcasting: %d", stream.streamId) + s.streamCond.Broadcast() + s.streamCond.L.Unlock() +} + +func (s *Connection) getStream(streamId spdy.StreamId) (stream *Stream, ok bool) { + s.streamLock.RLock() + stream, ok = s.streams[streamId] + s.streamLock.RUnlock() + return +} + +// FindStream looks up the given stream id and either waits for the +// stream to be found or returns nil if the stream id is no longer +// valid. +func (s *Connection) FindStream(streamId uint32) *Stream { + var stream *Stream + var ok bool + s.streamCond.L.Lock() + stream, ok = s.streams[spdy.StreamId(streamId)] + debugMessage("(%p) Found stream %d? %t", s, spdy.StreamId(streamId), ok) + for !ok && streamId >= uint32(s.receivedStreamId) { + s.streamCond.Wait() + stream, ok = s.streams[spdy.StreamId(streamId)] + } + s.streamCond.L.Unlock() + return stream +} + +func (s *Connection) CloseChan() <-chan bool { + return s.closeChan +} diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/handlers.go b/Godeps/_workspace/src/github.com/docker/spdystream/handlers.go new file mode 100644 index 00000000000..b59fa5fdcd0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/handlers.go @@ -0,0 +1,38 @@ +package spdystream + +import ( + "io" + "net/http" +) + +// MirrorStreamHandler mirrors all streams. +func MirrorStreamHandler(stream *Stream) { + replyErr := stream.SendReply(http.Header{}, false) + if replyErr != nil { + return + } + + go func() { + io.Copy(stream, stream) + stream.Close() + }() + go func() { + for { + header, receiveErr := stream.ReceiveHeader() + if receiveErr != nil { + return + } + sendErr := stream.SendHeader(header, false) + if sendErr != nil { + return + } + } + }() +} + +// NoopStreamHandler does nothing when stream connects, most +// likely used with RejectAuthHandler which will not allow any +// streams to make it to the stream handler. +func NoOpStreamHandler(stream *Stream) { + stream.SendReply(http.Header{}, false) +} diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/priority.go b/Godeps/_workspace/src/github.com/docker/spdystream/priority.go new file mode 100644 index 00000000000..26d89abea06 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/priority.go @@ -0,0 +1,98 @@ +package spdystream + +import ( + "container/heap" + "sync" + + "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/docker/spdystream/spdy" +) + +type prioritizedFrame struct { + frame spdy.Frame + priority uint8 + insertId uint64 +} + +type frameQueue []*prioritizedFrame + +func (fq frameQueue) Len() int { + return len(fq) +} + +func (fq frameQueue) Less(i, j int) bool { + if fq[i].priority == fq[j].priority { + return fq[i].insertId < fq[j].insertId + } + return fq[i].priority < fq[j].priority +} + +func (fq frameQueue) Swap(i, j int) { + fq[i], fq[j] = fq[j], fq[i] +} + +func (fq *frameQueue) Push(x interface{}) { + *fq = append(*fq, x.(*prioritizedFrame)) +} + +func (fq *frameQueue) Pop() interface{} { + old := *fq + n := len(old) + *fq = old[0 : n-1] + return old[n-1] +} + +type PriorityFrameQueue struct { + queue *frameQueue + c *sync.Cond + size int + nextInsertId uint64 + drain bool +} + +func NewPriorityFrameQueue(size int) *PriorityFrameQueue { + queue := make(frameQueue, 0, size) + heap.Init(&queue) + + return &PriorityFrameQueue{ + queue: &queue, + size: size, + c: sync.NewCond(&sync.Mutex{}), + } +} + +func (q *PriorityFrameQueue) Push(frame spdy.Frame, priority uint8) { + q.c.L.Lock() + defer q.c.L.Unlock() + for q.queue.Len() >= q.size { + q.c.Wait() + } + pFrame := &prioritizedFrame{ + frame: frame, + priority: priority, + insertId: q.nextInsertId, + } + q.nextInsertId = q.nextInsertId + 1 + heap.Push(q.queue, pFrame) + q.c.Signal() +} + +func (q *PriorityFrameQueue) Pop() spdy.Frame { + q.c.L.Lock() + defer q.c.L.Unlock() + for q.queue.Len() == 0 { + if q.drain { + return nil + } + q.c.Wait() + } + frame := heap.Pop(q.queue).(*prioritizedFrame).frame + q.c.Signal() + return frame +} + +func (q *PriorityFrameQueue) Drain() { + q.c.L.Lock() + defer q.c.L.Unlock() + q.drain = true + q.c.Broadcast() +} diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/priority_test.go b/Godeps/_workspace/src/github.com/docker/spdystream/priority_test.go new file mode 100644 index 00000000000..f153a496502 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/priority_test.go @@ -0,0 +1,108 @@ +package spdystream + +import ( + "sync" + "testing" + "time" + + "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/docker/spdystream/spdy" +) + +func TestPriorityQueueOrdering(t *testing.T) { + queue := NewPriorityFrameQueue(150) + data1 := &spdy.DataFrame{} + data2 := &spdy.DataFrame{} + data3 := &spdy.DataFrame{} + data4 := &spdy.DataFrame{} + queue.Push(data1, 2) + queue.Push(data2, 1) + queue.Push(data3, 1) + queue.Push(data4, 0) + + if queue.Pop() != data4 { + t.Fatalf("Wrong order, expected data4 first") + } + if queue.Pop() != data2 { + t.Fatalf("Wrong order, expected data2 second") + } + if queue.Pop() != data3 { + t.Fatalf("Wrong order, expected data3 third") + } + if queue.Pop() != data1 { + t.Fatalf("Wrong order, expected data1 fourth") + } + + // Insert 50 Medium priority frames + for i := spdy.StreamId(50); i < 100; i++ { + queue.Push(&spdy.DataFrame{StreamId: i}, 1) + } + // Insert 50 low priority frames + for i := spdy.StreamId(100); i < 150; i++ { + queue.Push(&spdy.DataFrame{StreamId: i}, 2) + } + // Insert 50 high priority frames + for i := spdy.StreamId(0); i < 50; i++ { + queue.Push(&spdy.DataFrame{StreamId: i}, 0) + } + + for i := spdy.StreamId(0); i < 150; i++ { + frame := queue.Pop() + if frame.(*spdy.DataFrame).StreamId != i { + t.Fatalf("Wrong frame\nActual: %d\nExpecting: %d", frame.(*spdy.DataFrame).StreamId, i) + } + } +} + +func TestPriorityQueueSync(t *testing.T) { + queue := NewPriorityFrameQueue(150) + var wg sync.WaitGroup + insertRange := func(start, stop spdy.StreamId, priority uint8) { + for i := start; i < stop; i++ { + queue.Push(&spdy.DataFrame{StreamId: i}, priority) + } + wg.Done() + } + wg.Add(3) + go insertRange(spdy.StreamId(100), spdy.StreamId(150), 2) + go insertRange(spdy.StreamId(0), spdy.StreamId(50), 0) + go insertRange(spdy.StreamId(50), spdy.StreamId(100), 1) + + wg.Wait() + for i := spdy.StreamId(0); i < 150; i++ { + frame := queue.Pop() + if frame.(*spdy.DataFrame).StreamId != i { + t.Fatalf("Wrong frame\nActual: %d\nExpecting: %d", frame.(*spdy.DataFrame).StreamId, i) + } + } +} + +func TestPriorityQueueBlocking(t *testing.T) { + queue := NewPriorityFrameQueue(15) + for i := 0; i < 15; i++ { + queue.Push(&spdy.DataFrame{}, 2) + } + doneChan := make(chan bool) + go func() { + queue.Push(&spdy.DataFrame{}, 2) + close(doneChan) + }() + select { + case <-doneChan: + t.Fatalf("Push succeeded, expected to block") + case <-time.After(time.Millisecond): + break + } + + queue.Pop() + + select { + case <-doneChan: + break + case <-time.After(time.Millisecond): + t.Fatalf("Push should have succeeded, but timeout reached") + } + + for i := 0; i < 15; i++ { + queue.Pop() + } +} diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/spdy/dictionary.go b/Godeps/_workspace/src/github.com/docker/spdystream/spdy/dictionary.go new file mode 100644 index 00000000000..5a5ff0e14cd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/spdy/dictionary.go @@ -0,0 +1,187 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package spdy + +// headerDictionary is the dictionary sent to the zlib compressor/decompressor. +var headerDictionary = []byte{ + 0x00, 0x00, 0x00, 0x07, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x00, 0x00, 0x00, 0x04, 0x68, + 0x65, 0x61, 0x64, 0x00, 0x00, 0x00, 0x04, 0x70, + 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x03, 0x70, + 0x75, 0x74, 0x00, 0x00, 0x00, 0x06, 0x64, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x00, 0x00, 0x00, 0x05, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x00, 0x00, 0x00, + 0x06, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x00, + 0x00, 0x00, 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70, + 0x74, 0x2d, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, + 0x74, 0x00, 0x00, 0x00, 0x0f, 0x61, 0x63, 0x63, + 0x65, 0x70, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x0f, + 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x6c, + 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x00, + 0x00, 0x00, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70, + 0x74, 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, + 0x00, 0x00, 0x00, 0x03, 0x61, 0x67, 0x65, 0x00, + 0x00, 0x00, 0x05, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x00, 0x00, 0x00, 0x0d, 0x61, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x00, 0x00, 0x00, 0x0d, 0x63, 0x61, 0x63, + 0x68, 0x65, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x00, 0x00, 0x00, 0x0a, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x00, 0x00, 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x2d, 0x62, 0x61, 0x73, 0x65, + 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, + 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x00, 0x00, 0x00, 0x0e, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, + 0x00, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x2d, 0x6d, 0x64, 0x35, 0x00, 0x00, 0x00, + 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, + 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x00, 0x00, + 0x00, 0x04, 0x64, 0x61, 0x74, 0x65, 0x00, 0x00, + 0x00, 0x04, 0x65, 0x74, 0x61, 0x67, 0x00, 0x00, + 0x00, 0x06, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, + 0x00, 0x00, 0x00, 0x07, 0x65, 0x78, 0x70, 0x69, + 0x72, 0x65, 0x73, 0x00, 0x00, 0x00, 0x04, 0x66, + 0x72, 0x6f, 0x6d, 0x00, 0x00, 0x00, 0x04, 0x68, + 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x08, 0x69, + 0x66, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, + 0x00, 0x00, 0x11, 0x69, 0x66, 0x2d, 0x6d, 0x6f, + 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x2d, 0x73, + 0x69, 0x6e, 0x63, 0x65, 0x00, 0x00, 0x00, 0x0d, + 0x69, 0x66, 0x2d, 0x6e, 0x6f, 0x6e, 0x65, 0x2d, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, 0x00, 0x00, + 0x08, 0x69, 0x66, 0x2d, 0x72, 0x61, 0x6e, 0x67, + 0x65, 0x00, 0x00, 0x00, 0x13, 0x69, 0x66, 0x2d, + 0x75, 0x6e, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, + 0x65, 0x64, 0x2d, 0x73, 0x69, 0x6e, 0x63, 0x65, + 0x00, 0x00, 0x00, 0x0d, 0x6c, 0x61, 0x73, 0x74, + 0x2d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, + 0x64, 0x00, 0x00, 0x00, 0x08, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, + 0x0c, 0x6d, 0x61, 0x78, 0x2d, 0x66, 0x6f, 0x72, + 0x77, 0x61, 0x72, 0x64, 0x73, 0x00, 0x00, 0x00, + 0x06, 0x70, 0x72, 0x61, 0x67, 0x6d, 0x61, 0x00, + 0x00, 0x00, 0x12, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, 0x00, + 0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2d, 0x61, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, 0x05, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, 0x00, + 0x07, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x72, + 0x00, 0x00, 0x00, 0x0b, 0x72, 0x65, 0x74, 0x72, + 0x79, 0x2d, 0x61, 0x66, 0x74, 0x65, 0x72, 0x00, + 0x00, 0x00, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x00, 0x00, 0x00, 0x02, 0x74, 0x65, 0x00, + 0x00, 0x00, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, + 0x65, 0x72, 0x00, 0x00, 0x00, 0x11, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2d, 0x65, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x00, + 0x00, 0x00, 0x07, 0x75, 0x70, 0x67, 0x72, 0x61, + 0x64, 0x65, 0x00, 0x00, 0x00, 0x0a, 0x75, 0x73, + 0x65, 0x72, 0x2d, 0x61, 0x67, 0x65, 0x6e, 0x74, + 0x00, 0x00, 0x00, 0x04, 0x76, 0x61, 0x72, 0x79, + 0x00, 0x00, 0x00, 0x03, 0x76, 0x69, 0x61, 0x00, + 0x00, 0x00, 0x07, 0x77, 0x61, 0x72, 0x6e, 0x69, + 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, 0x77, 0x77, + 0x77, 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, + 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, + 0x00, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x00, 0x00, 0x00, 0x03, 0x67, 0x65, 0x74, 0x00, + 0x00, 0x00, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x00, 0x00, 0x00, 0x06, 0x32, 0x30, 0x30, + 0x20, 0x4f, 0x4b, 0x00, 0x00, 0x00, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x00, 0x00, + 0x00, 0x08, 0x48, 0x54, 0x54, 0x50, 0x2f, 0x31, + 0x2e, 0x31, 0x00, 0x00, 0x00, 0x03, 0x75, 0x72, + 0x6c, 0x00, 0x00, 0x00, 0x06, 0x70, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x00, 0x00, 0x00, 0x0a, 0x73, + 0x65, 0x74, 0x2d, 0x63, 0x6f, 0x6f, 0x6b, 0x69, + 0x65, 0x00, 0x00, 0x00, 0x0a, 0x6b, 0x65, 0x65, + 0x70, 0x2d, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x00, + 0x00, 0x00, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, + 0x6e, 0x31, 0x30, 0x30, 0x31, 0x30, 0x31, 0x32, + 0x30, 0x31, 0x32, 0x30, 0x32, 0x32, 0x30, 0x35, + 0x32, 0x30, 0x36, 0x33, 0x30, 0x30, 0x33, 0x30, + 0x32, 0x33, 0x30, 0x33, 0x33, 0x30, 0x34, 0x33, + 0x30, 0x35, 0x33, 0x30, 0x36, 0x33, 0x30, 0x37, + 0x34, 0x30, 0x32, 0x34, 0x30, 0x35, 0x34, 0x30, + 0x36, 0x34, 0x30, 0x37, 0x34, 0x30, 0x38, 0x34, + 0x30, 0x39, 0x34, 0x31, 0x30, 0x34, 0x31, 0x31, + 0x34, 0x31, 0x32, 0x34, 0x31, 0x33, 0x34, 0x31, + 0x34, 0x34, 0x31, 0x35, 0x34, 0x31, 0x36, 0x34, + 0x31, 0x37, 0x35, 0x30, 0x32, 0x35, 0x30, 0x34, + 0x35, 0x30, 0x35, 0x32, 0x30, 0x33, 0x20, 0x4e, + 0x6f, 0x6e, 0x2d, 0x41, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, + 0x20, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x30, 0x34, 0x20, + 0x4e, 0x6f, 0x20, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x33, 0x30, 0x31, 0x20, 0x4d, 0x6f, + 0x76, 0x65, 0x64, 0x20, 0x50, 0x65, 0x72, 0x6d, + 0x61, 0x6e, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x34, + 0x30, 0x30, 0x20, 0x42, 0x61, 0x64, 0x20, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x34, 0x30, + 0x31, 0x20, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x34, 0x30, + 0x33, 0x20, 0x46, 0x6f, 0x72, 0x62, 0x69, 0x64, + 0x64, 0x65, 0x6e, 0x34, 0x30, 0x34, 0x20, 0x4e, + 0x6f, 0x74, 0x20, 0x46, 0x6f, 0x75, 0x6e, 0x64, + 0x35, 0x30, 0x30, 0x20, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x20, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x20, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x35, 0x30, 0x31, 0x20, 0x4e, 0x6f, 0x74, + 0x20, 0x49, 0x6d, 0x70, 0x6c, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x65, 0x64, 0x35, 0x30, 0x33, 0x20, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x20, + 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, + 0x62, 0x6c, 0x65, 0x4a, 0x61, 0x6e, 0x20, 0x46, + 0x65, 0x62, 0x20, 0x4d, 0x61, 0x72, 0x20, 0x41, + 0x70, 0x72, 0x20, 0x4d, 0x61, 0x79, 0x20, 0x4a, + 0x75, 0x6e, 0x20, 0x4a, 0x75, 0x6c, 0x20, 0x41, + 0x75, 0x67, 0x20, 0x53, 0x65, 0x70, 0x74, 0x20, + 0x4f, 0x63, 0x74, 0x20, 0x4e, 0x6f, 0x76, 0x20, + 0x44, 0x65, 0x63, 0x20, 0x30, 0x30, 0x3a, 0x30, + 0x30, 0x3a, 0x30, 0x30, 0x20, 0x4d, 0x6f, 0x6e, + 0x2c, 0x20, 0x54, 0x75, 0x65, 0x2c, 0x20, 0x57, + 0x65, 0x64, 0x2c, 0x20, 0x54, 0x68, 0x75, 0x2c, + 0x20, 0x46, 0x72, 0x69, 0x2c, 0x20, 0x53, 0x61, + 0x74, 0x2c, 0x20, 0x53, 0x75, 0x6e, 0x2c, 0x20, + 0x47, 0x4d, 0x54, 0x63, 0x68, 0x75, 0x6e, 0x6b, + 0x65, 0x64, 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, + 0x68, 0x74, 0x6d, 0x6c, 0x2c, 0x69, 0x6d, 0x61, + 0x67, 0x65, 0x2f, 0x70, 0x6e, 0x67, 0x2c, 0x69, + 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x6a, 0x70, 0x67, + 0x2c, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x67, + 0x69, 0x66, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78, + 0x6d, 0x6c, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78, + 0x68, 0x74, 0x6d, 0x6c, 0x2b, 0x78, 0x6d, 0x6c, + 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x70, 0x6c, + 0x61, 0x69, 0x6e, 0x2c, 0x74, 0x65, 0x78, 0x74, + 0x2f, 0x6a, 0x61, 0x76, 0x61, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x2c, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, + 0x65, 0x6d, 0x61, 0x78, 0x2d, 0x61, 0x67, 0x65, + 0x3d, 0x67, 0x7a, 0x69, 0x70, 0x2c, 0x64, 0x65, + 0x66, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x73, 0x64, + 0x63, 0x68, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, + 0x74, 0x3d, 0x75, 0x74, 0x66, 0x2d, 0x38, 0x63, + 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x3d, 0x69, + 0x73, 0x6f, 0x2d, 0x38, 0x38, 0x35, 0x39, 0x2d, + 0x31, 0x2c, 0x75, 0x74, 0x66, 0x2d, 0x2c, 0x2a, + 0x2c, 0x65, 0x6e, 0x71, 0x3d, 0x30, 0x2e, +} diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/spdy/read.go b/Godeps/_workspace/src/github.com/docker/spdystream/spdy/read.go new file mode 100644 index 00000000000..9359a95015c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/spdy/read.go @@ -0,0 +1,348 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package spdy + +import ( + "compress/zlib" + "encoding/binary" + "io" + "net/http" + "strings" +) + +func (frame *SynStreamFrame) read(h ControlFrameHeader, f *Framer) error { + return f.readSynStreamFrame(h, frame) +} + +func (frame *SynReplyFrame) read(h ControlFrameHeader, f *Framer) error { + return f.readSynReplyFrame(h, frame) +} + +func (frame *RstStreamFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil { + return err + } + if frame.Status == 0 { + return &Error{InvalidControlFrame, frame.StreamId} + } + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + return nil +} + +func (frame *SettingsFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + var numSettings uint32 + if err := binary.Read(f.r, binary.BigEndian, &numSettings); err != nil { + return err + } + frame.FlagIdValues = make([]SettingsFlagIdValue, numSettings) + for i := uint32(0); i < numSettings; i++ { + if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Id); err != nil { + return err + } + frame.FlagIdValues[i].Flag = SettingsFlag((frame.FlagIdValues[i].Id & 0xff000000) >> 24) + frame.FlagIdValues[i].Id &= 0xffffff + if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Value); err != nil { + return err + } + } + return nil +} + +func (frame *PingFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + if err := binary.Read(f.r, binary.BigEndian, &frame.Id); err != nil { + return err + } + if frame.Id == 0 { + return &Error{ZeroStreamId, 0} + } + if frame.CFHeader.Flags != 0 { + return &Error{InvalidControlFrame, StreamId(frame.Id)} + } + return nil +} + +func (frame *GoAwayFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + if err := binary.Read(f.r, binary.BigEndian, &frame.LastGoodStreamId); err != nil { + return err + } + if frame.CFHeader.Flags != 0 { + return &Error{InvalidControlFrame, frame.LastGoodStreamId} + } + if frame.CFHeader.length != 8 { + return &Error{InvalidControlFrame, frame.LastGoodStreamId} + } + if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil { + return err + } + return nil +} + +func (frame *HeadersFrame) read(h ControlFrameHeader, f *Framer) error { + return f.readHeadersFrame(h, frame) +} + +func (frame *WindowUpdateFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + if frame.CFHeader.Flags != 0 { + return &Error{InvalidControlFrame, frame.StreamId} + } + if frame.CFHeader.length != 8 { + return &Error{InvalidControlFrame, frame.StreamId} + } + if err := binary.Read(f.r, binary.BigEndian, &frame.DeltaWindowSize); err != nil { + return err + } + return nil +} + +func newControlFrame(frameType ControlFrameType) (controlFrame, error) { + ctor, ok := cframeCtor[frameType] + if !ok { + return nil, &Error{Err: InvalidControlFrame} + } + return ctor(), nil +} + +var cframeCtor = map[ControlFrameType]func() controlFrame{ + TypeSynStream: func() controlFrame { return new(SynStreamFrame) }, + TypeSynReply: func() controlFrame { return new(SynReplyFrame) }, + TypeRstStream: func() controlFrame { return new(RstStreamFrame) }, + TypeSettings: func() controlFrame { return new(SettingsFrame) }, + TypePing: func() controlFrame { return new(PingFrame) }, + TypeGoAway: func() controlFrame { return new(GoAwayFrame) }, + TypeHeaders: func() controlFrame { return new(HeadersFrame) }, + TypeWindowUpdate: func() controlFrame { return new(WindowUpdateFrame) }, +} + +func (f *Framer) uncorkHeaderDecompressor(payloadSize int64) error { + if f.headerDecompressor != nil { + f.headerReader.N = payloadSize + return nil + } + f.headerReader = io.LimitedReader{R: f.r, N: payloadSize} + decompressor, err := zlib.NewReaderDict(&f.headerReader, []byte(headerDictionary)) + if err != nil { + return err + } + f.headerDecompressor = decompressor + return nil +} + +// ReadFrame reads SPDY encoded data and returns a decompressed Frame. +func (f *Framer) ReadFrame() (Frame, error) { + var firstWord uint32 + if err := binary.Read(f.r, binary.BigEndian, &firstWord); err != nil { + return nil, err + } + if firstWord&0x80000000 != 0 { + frameType := ControlFrameType(firstWord & 0xffff) + version := uint16(firstWord >> 16 & 0x7fff) + return f.parseControlFrame(version, frameType) + } + return f.parseDataFrame(StreamId(firstWord & 0x7fffffff)) +} + +func (f *Framer) parseControlFrame(version uint16, frameType ControlFrameType) (Frame, error) { + var length uint32 + if err := binary.Read(f.r, binary.BigEndian, &length); err != nil { + return nil, err + } + flags := ControlFlags((length & 0xff000000) >> 24) + length &= 0xffffff + header := ControlFrameHeader{version, frameType, flags, length} + cframe, err := newControlFrame(frameType) + if err != nil { + return nil, err + } + if err = cframe.read(header, f); err != nil { + return nil, err + } + return cframe, nil +} + +func parseHeaderValueBlock(r io.Reader, streamId StreamId) (http.Header, error) { + var numHeaders uint32 + if err := binary.Read(r, binary.BigEndian, &numHeaders); err != nil { + return nil, err + } + var e error + h := make(http.Header, int(numHeaders)) + for i := 0; i < int(numHeaders); i++ { + var length uint32 + if err := binary.Read(r, binary.BigEndian, &length); err != nil { + return nil, err + } + nameBytes := make([]byte, length) + if _, err := io.ReadFull(r, nameBytes); err != nil { + return nil, err + } + name := string(nameBytes) + if name != strings.ToLower(name) { + e = &Error{UnlowercasedHeaderName, streamId} + name = strings.ToLower(name) + } + if h[name] != nil { + e = &Error{DuplicateHeaders, streamId} + } + if err := binary.Read(r, binary.BigEndian, &length); err != nil { + return nil, err + } + value := make([]byte, length) + if _, err := io.ReadFull(r, value); err != nil { + return nil, err + } + valueList := strings.Split(string(value), headerValueSeparator) + for _, v := range valueList { + h.Add(name, v) + } + } + if e != nil { + return h, e + } + return h, nil +} + +func (f *Framer) readSynStreamFrame(h ControlFrameHeader, frame *SynStreamFrame) error { + frame.CFHeader = h + var err error + if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + if err = binary.Read(f.r, binary.BigEndian, &frame.AssociatedToStreamId); err != nil { + return err + } + if err = binary.Read(f.r, binary.BigEndian, &frame.Priority); err != nil { + return err + } + frame.Priority >>= 5 + if err = binary.Read(f.r, binary.BigEndian, &frame.Slot); err != nil { + return err + } + reader := f.r + if !f.headerCompressionDisabled { + err := f.uncorkHeaderDecompressor(int64(h.length - 10)) + if err != nil { + return err + } + reader = f.headerDecompressor + } + frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) + if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { + err = &Error{WrongCompressedPayloadSize, 0} + } + if err != nil { + return err + } + for h := range frame.Headers { + if invalidReqHeaders[h] { + return &Error{InvalidHeaderPresent, frame.StreamId} + } + } + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + return nil +} + +func (f *Framer) readSynReplyFrame(h ControlFrameHeader, frame *SynReplyFrame) error { + frame.CFHeader = h + var err error + if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + reader := f.r + if !f.headerCompressionDisabled { + err := f.uncorkHeaderDecompressor(int64(h.length - 4)) + if err != nil { + return err + } + reader = f.headerDecompressor + } + frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) + if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { + err = &Error{WrongCompressedPayloadSize, 0} + } + if err != nil { + return err + } + for h := range frame.Headers { + if invalidRespHeaders[h] { + return &Error{InvalidHeaderPresent, frame.StreamId} + } + } + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + return nil +} + +func (f *Framer) readHeadersFrame(h ControlFrameHeader, frame *HeadersFrame) error { + frame.CFHeader = h + var err error + if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + reader := f.r + if !f.headerCompressionDisabled { + err := f.uncorkHeaderDecompressor(int64(h.length - 4)) + if err != nil { + return err + } + reader = f.headerDecompressor + } + frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) + if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { + err = &Error{WrongCompressedPayloadSize, 0} + } + if err != nil { + return err + } + var invalidHeaders map[string]bool + if frame.StreamId%2 == 0 { + invalidHeaders = invalidReqHeaders + } else { + invalidHeaders = invalidRespHeaders + } + for h := range frame.Headers { + if invalidHeaders[h] { + return &Error{InvalidHeaderPresent, frame.StreamId} + } + } + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + return nil +} + +func (f *Framer) parseDataFrame(streamId StreamId) (*DataFrame, error) { + var length uint32 + if err := binary.Read(f.r, binary.BigEndian, &length); err != nil { + return nil, err + } + var frame DataFrame + frame.StreamId = streamId + frame.Flags = DataFlags(length >> 24) + length &= 0xffffff + frame.Data = make([]byte, length) + if _, err := io.ReadFull(f.r, frame.Data); err != nil { + return nil, err + } + if frame.StreamId == 0 { + return nil, &Error{ZeroStreamId, 0} + } + return &frame, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/spdy/spdy_test.go b/Godeps/_workspace/src/github.com/docker/spdystream/spdy/spdy_test.go new file mode 100644 index 00000000000..ce581f1d056 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/spdy/spdy_test.go @@ -0,0 +1,644 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package spdy + +import ( + "bytes" + "compress/zlib" + "encoding/base64" + "io" + "io/ioutil" + "net/http" + "reflect" + "testing" +) + +var HeadersFixture = http.Header{ + "Url": []string{"http://www.google.com/"}, + "Method": []string{"get"}, + "Version": []string{"http/1.1"}, +} + +func TestHeaderParsing(t *testing.T) { + var headerValueBlockBuf bytes.Buffer + writeHeaderValueBlock(&headerValueBlockBuf, HeadersFixture) + const bogusStreamId = 1 + newHeaders, err := parseHeaderValueBlock(&headerValueBlockBuf, bogusStreamId) + if err != nil { + t.Fatal("parseHeaderValueBlock:", err) + } + if !reflect.DeepEqual(HeadersFixture, newHeaders) { + t.Fatal("got: ", newHeaders, "\nwant: ", HeadersFixture) + } +} + +func TestCreateParseSynStreamFrameCompressionDisable(t *testing.T) { + buffer := new(bytes.Buffer) + // Fixture framer for no compression test. + framer := &Framer{ + headerCompressionDisabled: true, + w: buffer, + headerBuf: new(bytes.Buffer), + r: buffer, + } + synStreamFrame := SynStreamFrame{ + CFHeader: ControlFrameHeader{ + version: Version, + frameType: TypeSynStream, + }, + StreamId: 2, + Headers: HeadersFixture, + } + if err := framer.WriteFrame(&synStreamFrame); err != nil { + t.Fatal("WriteFrame without compression:", err) + } + frame, err := framer.ReadFrame() + if err != nil { + t.Fatal("ReadFrame without compression:", err) + } + parsedSynStreamFrame, ok := frame.(*SynStreamFrame) + if !ok { + t.Fatal("Parsed incorrect frame type:", frame) + } + if !reflect.DeepEqual(synStreamFrame, *parsedSynStreamFrame) { + t.Fatal("got: ", *parsedSynStreamFrame, "\nwant: ", synStreamFrame) + } +} + +func TestCreateParseSynStreamFrameCompressionEnable(t *testing.T) { + buffer := new(bytes.Buffer) + framer, err := NewFramer(buffer, buffer) + synStreamFrame := SynStreamFrame{ + CFHeader: ControlFrameHeader{ + version: Version, + frameType: TypeSynStream, + }, + StreamId: 2, + Headers: HeadersFixture, + } + if err != nil { + t.Fatal("Failed to create new framer:", err) + } + if err := framer.WriteFrame(&synStreamFrame); err != nil { + t.Fatal("WriteFrame with compression:", err) + } + frame, err := framer.ReadFrame() + if err != nil { + t.Fatal("ReadFrame with compression:", err) + } + parsedSynStreamFrame, ok := frame.(*SynStreamFrame) + if !ok { + t.Fatal("Parsed incorrect frame type:", frame) + } + if !reflect.DeepEqual(synStreamFrame, *parsedSynStreamFrame) { + t.Fatal("got: ", *parsedSynStreamFrame, "\nwant: ", synStreamFrame) + } +} + +func TestCreateParseSynReplyFrameCompressionDisable(t *testing.T) { + buffer := new(bytes.Buffer) + framer := &Framer{ + headerCompressionDisabled: true, + w: buffer, + headerBuf: new(bytes.Buffer), + r: buffer, + } + synReplyFrame := SynReplyFrame{ + CFHeader: ControlFrameHeader{ + version: Version, + frameType: TypeSynReply, + }, + StreamId: 2, + Headers: HeadersFixture, + } + if err := framer.WriteFrame(&synReplyFrame); err != nil { + t.Fatal("WriteFrame without compression:", err) + } + frame, err := framer.ReadFrame() + if err != nil { + t.Fatal("ReadFrame without compression:", err) + } + parsedSynReplyFrame, ok := frame.(*SynReplyFrame) + if !ok { + t.Fatal("Parsed incorrect frame type:", frame) + } + if !reflect.DeepEqual(synReplyFrame, *parsedSynReplyFrame) { + t.Fatal("got: ", *parsedSynReplyFrame, "\nwant: ", synReplyFrame) + } +} + +func TestCreateParseSynReplyFrameCompressionEnable(t *testing.T) { + buffer := new(bytes.Buffer) + framer, err := NewFramer(buffer, buffer) + synReplyFrame := SynReplyFrame{ + CFHeader: ControlFrameHeader{ + version: Version, + frameType: TypeSynReply, + }, + StreamId: 2, + Headers: HeadersFixture, + } + if err != nil { + t.Fatal("Failed to create new framer:", err) + } + if err := framer.WriteFrame(&synReplyFrame); err != nil { + t.Fatal("WriteFrame with compression:", err) + } + frame, err := framer.ReadFrame() + if err != nil { + t.Fatal("ReadFrame with compression:", err) + } + parsedSynReplyFrame, ok := frame.(*SynReplyFrame) + if !ok { + t.Fatal("Parsed incorrect frame type:", frame) + } + if !reflect.DeepEqual(synReplyFrame, *parsedSynReplyFrame) { + t.Fatal("got: ", *parsedSynReplyFrame, "\nwant: ", synReplyFrame) + } +} + +func TestCreateParseRstStream(t *testing.T) { + buffer := new(bytes.Buffer) + framer, err := NewFramer(buffer, buffer) + if err != nil { + t.Fatal("Failed to create new framer:", err) + } + rstStreamFrame := RstStreamFrame{ + CFHeader: ControlFrameHeader{ + version: Version, + frameType: TypeRstStream, + }, + StreamId: 1, + Status: InvalidStream, + } + if err := framer.WriteFrame(&rstStreamFrame); err != nil { + t.Fatal("WriteFrame:", err) + } + frame, err := framer.ReadFrame() + if err != nil { + t.Fatal("ReadFrame:", err) + } + parsedRstStreamFrame, ok := frame.(*RstStreamFrame) + if !ok { + t.Fatal("Parsed incorrect frame type:", frame) + } + if !reflect.DeepEqual(rstStreamFrame, *parsedRstStreamFrame) { + t.Fatal("got: ", *parsedRstStreamFrame, "\nwant: ", rstStreamFrame) + } +} + +func TestCreateParseSettings(t *testing.T) { + buffer := new(bytes.Buffer) + framer, err := NewFramer(buffer, buffer) + if err != nil { + t.Fatal("Failed to create new framer:", err) + } + settingsFrame := SettingsFrame{ + CFHeader: ControlFrameHeader{ + version: Version, + frameType: TypeSettings, + }, + FlagIdValues: []SettingsFlagIdValue{ + {FlagSettingsPersistValue, SettingsCurrentCwnd, 10}, + {FlagSettingsPersisted, SettingsUploadBandwidth, 1}, + }, + } + if err := framer.WriteFrame(&settingsFrame); err != nil { + t.Fatal("WriteFrame:", err) + } + frame, err := framer.ReadFrame() + if err != nil { + t.Fatal("ReadFrame:", err) + } + parsedSettingsFrame, ok := frame.(*SettingsFrame) + if !ok { + t.Fatal("Parsed incorrect frame type:", frame) + } + if !reflect.DeepEqual(settingsFrame, *parsedSettingsFrame) { + t.Fatal("got: ", *parsedSettingsFrame, "\nwant: ", settingsFrame) + } +} + +func TestCreateParsePing(t *testing.T) { + buffer := new(bytes.Buffer) + framer, err := NewFramer(buffer, buffer) + if err != nil { + t.Fatal("Failed to create new framer:", err) + } + pingFrame := PingFrame{ + CFHeader: ControlFrameHeader{ + version: Version, + frameType: TypePing, + }, + Id: 31337, + } + if err := framer.WriteFrame(&pingFrame); err != nil { + t.Fatal("WriteFrame:", err) + } + if pingFrame.CFHeader.Flags != 0 { + t.Fatal("Incorrect frame type:", pingFrame) + } + frame, err := framer.ReadFrame() + if err != nil { + t.Fatal("ReadFrame:", err) + } + parsedPingFrame, ok := frame.(*PingFrame) + if !ok { + t.Fatal("Parsed incorrect frame type:", frame) + } + if parsedPingFrame.CFHeader.Flags != 0 { + t.Fatal("Parsed incorrect frame type:", parsedPingFrame) + } + if !reflect.DeepEqual(pingFrame, *parsedPingFrame) { + t.Fatal("got: ", *parsedPingFrame, "\nwant: ", pingFrame) + } +} + +func TestCreateParseGoAway(t *testing.T) { + buffer := new(bytes.Buffer) + framer, err := NewFramer(buffer, buffer) + if err != nil { + t.Fatal("Failed to create new framer:", err) + } + goAwayFrame := GoAwayFrame{ + CFHeader: ControlFrameHeader{ + version: Version, + frameType: TypeGoAway, + }, + LastGoodStreamId: 31337, + Status: 1, + } + if err := framer.WriteFrame(&goAwayFrame); err != nil { + t.Fatal("WriteFrame:", err) + } + if goAwayFrame.CFHeader.Flags != 0 { + t.Fatal("Incorrect frame type:", goAwayFrame) + } + if goAwayFrame.CFHeader.length != 8 { + t.Fatal("Incorrect frame type:", goAwayFrame) + } + frame, err := framer.ReadFrame() + if err != nil { + t.Fatal("ReadFrame:", err) + } + parsedGoAwayFrame, ok := frame.(*GoAwayFrame) + if !ok { + t.Fatal("Parsed incorrect frame type:", frame) + } + if parsedGoAwayFrame.CFHeader.Flags != 0 { + t.Fatal("Incorrect frame type:", parsedGoAwayFrame) + } + if parsedGoAwayFrame.CFHeader.length != 8 { + t.Fatal("Incorrect frame type:", parsedGoAwayFrame) + } + if !reflect.DeepEqual(goAwayFrame, *parsedGoAwayFrame) { + t.Fatal("got: ", *parsedGoAwayFrame, "\nwant: ", goAwayFrame) + } +} + +func TestCreateParseHeadersFrame(t *testing.T) { + buffer := new(bytes.Buffer) + framer := &Framer{ + headerCompressionDisabled: true, + w: buffer, + headerBuf: new(bytes.Buffer), + r: buffer, + } + headersFrame := HeadersFrame{ + CFHeader: ControlFrameHeader{ + version: Version, + frameType: TypeHeaders, + }, + StreamId: 2, + } + headersFrame.Headers = HeadersFixture + if err := framer.WriteFrame(&headersFrame); err != nil { + t.Fatal("WriteFrame without compression:", err) + } + frame, err := framer.ReadFrame() + if err != nil { + t.Fatal("ReadFrame without compression:", err) + } + parsedHeadersFrame, ok := frame.(*HeadersFrame) + if !ok { + t.Fatal("Parsed incorrect frame type:", frame) + } + if !reflect.DeepEqual(headersFrame, *parsedHeadersFrame) { + t.Fatal("got: ", *parsedHeadersFrame, "\nwant: ", headersFrame) + } +} + +func TestCreateParseHeadersFrameCompressionEnable(t *testing.T) { + buffer := new(bytes.Buffer) + headersFrame := HeadersFrame{ + CFHeader: ControlFrameHeader{ + version: Version, + frameType: TypeHeaders, + }, + StreamId: 2, + } + headersFrame.Headers = HeadersFixture + + framer, err := NewFramer(buffer, buffer) + if err := framer.WriteFrame(&headersFrame); err != nil { + t.Fatal("WriteFrame with compression:", err) + } + frame, err := framer.ReadFrame() + if err != nil { + t.Fatal("ReadFrame with compression:", err) + } + parsedHeadersFrame, ok := frame.(*HeadersFrame) + if !ok { + t.Fatal("Parsed incorrect frame type:", frame) + } + if !reflect.DeepEqual(headersFrame, *parsedHeadersFrame) { + t.Fatal("got: ", *parsedHeadersFrame, "\nwant: ", headersFrame) + } +} + +func TestCreateParseWindowUpdateFrame(t *testing.T) { + buffer := new(bytes.Buffer) + framer, err := NewFramer(buffer, buffer) + if err != nil { + t.Fatal("Failed to create new framer:", err) + } + windowUpdateFrame := WindowUpdateFrame{ + CFHeader: ControlFrameHeader{ + version: Version, + frameType: TypeWindowUpdate, + }, + StreamId: 31337, + DeltaWindowSize: 1, + } + if err := framer.WriteFrame(&windowUpdateFrame); err != nil { + t.Fatal("WriteFrame:", err) + } + if windowUpdateFrame.CFHeader.Flags != 0 { + t.Fatal("Incorrect frame type:", windowUpdateFrame) + } + if windowUpdateFrame.CFHeader.length != 8 { + t.Fatal("Incorrect frame type:", windowUpdateFrame) + } + frame, err := framer.ReadFrame() + if err != nil { + t.Fatal("ReadFrame:", err) + } + parsedWindowUpdateFrame, ok := frame.(*WindowUpdateFrame) + if !ok { + t.Fatal("Parsed incorrect frame type:", frame) + } + if parsedWindowUpdateFrame.CFHeader.Flags != 0 { + t.Fatal("Incorrect frame type:", parsedWindowUpdateFrame) + } + if parsedWindowUpdateFrame.CFHeader.length != 8 { + t.Fatal("Incorrect frame type:", parsedWindowUpdateFrame) + } + if !reflect.DeepEqual(windowUpdateFrame, *parsedWindowUpdateFrame) { + t.Fatal("got: ", *parsedWindowUpdateFrame, "\nwant: ", windowUpdateFrame) + } +} + +func TestCreateParseDataFrame(t *testing.T) { + buffer := new(bytes.Buffer) + framer, err := NewFramer(buffer, buffer) + if err != nil { + t.Fatal("Failed to create new framer:", err) + } + dataFrame := DataFrame{ + StreamId: 1, + Data: []byte{'h', 'e', 'l', 'l', 'o'}, + } + if err := framer.WriteFrame(&dataFrame); err != nil { + t.Fatal("WriteFrame:", err) + } + frame, err := framer.ReadFrame() + if err != nil { + t.Fatal("ReadFrame:", err) + } + parsedDataFrame, ok := frame.(*DataFrame) + if !ok { + t.Fatal("Parsed incorrect frame type:", frame) + } + if !reflect.DeepEqual(dataFrame, *parsedDataFrame) { + t.Fatal("got: ", *parsedDataFrame, "\nwant: ", dataFrame) + } +} + +func TestCompressionContextAcrossFrames(t *testing.T) { + buffer := new(bytes.Buffer) + framer, err := NewFramer(buffer, buffer) + if err != nil { + t.Fatal("Failed to create new framer:", err) + } + headersFrame := HeadersFrame{ + CFHeader: ControlFrameHeader{ + version: Version, + frameType: TypeHeaders, + }, + StreamId: 2, + Headers: HeadersFixture, + } + if err := framer.WriteFrame(&headersFrame); err != nil { + t.Fatal("WriteFrame (HEADERS):", err) + } + synStreamFrame := SynStreamFrame{ + ControlFrameHeader{ + Version, + TypeSynStream, + 0, // Flags + 0, // length + }, + 2, // StreamId + 0, // AssociatedTOStreamID + 0, // Priority + 1, // Slot + nil, // Headers + } + synStreamFrame.Headers = HeadersFixture + + if err := framer.WriteFrame(&synStreamFrame); err != nil { + t.Fatal("WriteFrame (SYN_STREAM):", err) + } + frame, err := framer.ReadFrame() + if err != nil { + t.Fatal("ReadFrame (HEADERS):", err, buffer.Bytes()) + } + parsedHeadersFrame, ok := frame.(*HeadersFrame) + if !ok { + t.Fatalf("expected HeadersFrame; got %T %v", frame, frame) + } + if !reflect.DeepEqual(headersFrame, *parsedHeadersFrame) { + t.Fatal("got: ", *parsedHeadersFrame, "\nwant: ", headersFrame) + } + frame, err = framer.ReadFrame() + if err != nil { + t.Fatal("ReadFrame (SYN_STREAM):", err, buffer.Bytes()) + } + parsedSynStreamFrame, ok := frame.(*SynStreamFrame) + if !ok { + t.Fatalf("expected SynStreamFrame; got %T %v", frame, frame) + } + if !reflect.DeepEqual(synStreamFrame, *parsedSynStreamFrame) { + t.Fatal("got: ", *parsedSynStreamFrame, "\nwant: ", synStreamFrame) + } +} + +func TestMultipleSPDYFrames(t *testing.T) { + // Initialize the framers. + pr1, pw1 := io.Pipe() + pr2, pw2 := io.Pipe() + writer, err := NewFramer(pw1, pr2) + if err != nil { + t.Fatal("Failed to create writer:", err) + } + reader, err := NewFramer(pw2, pr1) + if err != nil { + t.Fatal("Failed to create reader:", err) + } + + // Set up the frames we're actually transferring. + headersFrame := HeadersFrame{ + CFHeader: ControlFrameHeader{ + version: Version, + frameType: TypeHeaders, + }, + StreamId: 2, + Headers: HeadersFixture, + } + synStreamFrame := SynStreamFrame{ + CFHeader: ControlFrameHeader{ + version: Version, + frameType: TypeSynStream, + }, + StreamId: 2, + Headers: HeadersFixture, + } + + // Start the goroutines to write the frames. + go func() { + if err := writer.WriteFrame(&headersFrame); err != nil { + t.Fatal("WriteFrame (HEADERS): ", err) + } + if err := writer.WriteFrame(&synStreamFrame); err != nil { + t.Fatal("WriteFrame (SYN_STREAM): ", err) + } + }() + + // Read the frames and verify they look as expected. + frame, err := reader.ReadFrame() + if err != nil { + t.Fatal("ReadFrame (HEADERS): ", err) + } + parsedHeadersFrame, ok := frame.(*HeadersFrame) + if !ok { + t.Fatal("Parsed incorrect frame type:", frame) + } + if !reflect.DeepEqual(headersFrame, *parsedHeadersFrame) { + t.Fatal("got: ", *parsedHeadersFrame, "\nwant: ", headersFrame) + } + frame, err = reader.ReadFrame() + if err != nil { + t.Fatal("ReadFrame (SYN_STREAM):", err) + } + parsedSynStreamFrame, ok := frame.(*SynStreamFrame) + if !ok { + t.Fatal("Parsed incorrect frame type.") + } + if !reflect.DeepEqual(synStreamFrame, *parsedSynStreamFrame) { + t.Fatal("got: ", *parsedSynStreamFrame, "\nwant: ", synStreamFrame) + } +} + +func TestReadMalformedZlibHeader(t *testing.T) { + // These were constructed by corrupting the first byte of the zlib + // header after writing. + malformedStructs := map[string]string{ + "SynStreamFrame": "gAIAAQAAABgAAAACAAAAAAAAF/nfolGyYmAAAAAA//8=", + "SynReplyFrame": "gAIAAgAAABQAAAACAAAX+d+iUbJiYAAAAAD//w==", + "HeadersFrame": "gAIACAAAABQAAAACAAAX+d+iUbJiYAAAAAD//w==", + } + for name, bad := range malformedStructs { + b, err := base64.StdEncoding.DecodeString(bad) + if err != nil { + t.Errorf("Unable to decode base64 encoded frame %s: %v", name, err) + } + buf := bytes.NewBuffer(b) + reader, err := NewFramer(buf, buf) + if err != nil { + t.Fatalf("NewFramer: %v", err) + } + _, err = reader.ReadFrame() + if err != zlib.ErrHeader { + t.Errorf("Frame %s, expected: %#v, actual: %#v", name, zlib.ErrHeader, err) + } + } +} + +// TODO: these tests are too weak for updating SPDY spec. Fix me. + +type zeroStream struct { + frame Frame + encoded string +} + +var streamIdZeroFrames = map[string]zeroStream{ + "SynStreamFrame": { + &SynStreamFrame{StreamId: 0}, + "gAIAAQAAABgAAAAAAAAAAAAAePnfolGyYmAAAAAA//8=", + }, + "SynReplyFrame": { + &SynReplyFrame{StreamId: 0}, + "gAIAAgAAABQAAAAAAAB4+d+iUbJiYAAAAAD//w==", + }, + "RstStreamFrame": { + &RstStreamFrame{StreamId: 0}, + "gAIAAwAAAAgAAAAAAAAAAA==", + }, + "HeadersFrame": { + &HeadersFrame{StreamId: 0}, + "gAIACAAAABQAAAAAAAB4+d+iUbJiYAAAAAD//w==", + }, + "DataFrame": { + &DataFrame{StreamId: 0}, + "AAAAAAAAAAA=", + }, + "PingFrame": { + &PingFrame{Id: 0}, + "gAIABgAAAAQAAAAA", + }, +} + +func TestNoZeroStreamId(t *testing.T) { + t.Log("skipping") // TODO: update to work with SPDY3 + return + + for name, f := range streamIdZeroFrames { + b, err := base64.StdEncoding.DecodeString(f.encoded) + if err != nil { + t.Errorf("Unable to decode base64 encoded frame %s: %v", f, err) + continue + } + framer, err := NewFramer(ioutil.Discard, bytes.NewReader(b)) + if err != nil { + t.Fatalf("NewFramer: %v", err) + } + err = framer.WriteFrame(f.frame) + checkZeroStreamId(t, name, "WriteFrame", err) + + _, err = framer.ReadFrame() + checkZeroStreamId(t, name, "ReadFrame", err) + } +} + +func checkZeroStreamId(t *testing.T, frame string, method string, err error) { + if err == nil { + t.Errorf("%s ZeroStreamId, no error on %s", method, frame) + return + } + eerr, ok := err.(*Error) + if !ok || eerr.Err != ZeroStreamId { + t.Errorf("%s ZeroStreamId, incorrect error %#v, frame %s", method, eerr, frame) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/spdy/types.go b/Godeps/_workspace/src/github.com/docker/spdystream/spdy/types.go new file mode 100644 index 00000000000..7b6ee9c6f2b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/spdy/types.go @@ -0,0 +1,275 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package spdy implements the SPDY protocol (currently SPDY/3), described in +// http://www.chromium.org/spdy/spdy-protocol/spdy-protocol-draft3. +package spdy + +import ( + "bytes" + "compress/zlib" + "io" + "net/http" +) + +// Version is the protocol version number that this package implements. +const Version = 3 + +// ControlFrameType stores the type field in a control frame header. +type ControlFrameType uint16 + +const ( + TypeSynStream ControlFrameType = 0x0001 + TypeSynReply = 0x0002 + TypeRstStream = 0x0003 + TypeSettings = 0x0004 + TypePing = 0x0006 + TypeGoAway = 0x0007 + TypeHeaders = 0x0008 + TypeWindowUpdate = 0x0009 +) + +// ControlFlags are the flags that can be set on a control frame. +type ControlFlags uint8 + +const ( + ControlFlagFin ControlFlags = 0x01 + ControlFlagUnidirectional = 0x02 + ControlFlagSettingsClearSettings = 0x01 +) + +// DataFlags are the flags that can be set on a data frame. +type DataFlags uint8 + +const ( + DataFlagFin DataFlags = 0x01 +) + +// MaxDataLength is the maximum number of bytes that can be stored in one frame. +const MaxDataLength = 1<<24 - 1 + +// headerValueSepator separates multiple header values. +const headerValueSeparator = "\x00" + +// Frame is a single SPDY frame in its unpacked in-memory representation. Use +// Framer to read and write it. +type Frame interface { + write(f *Framer) error +} + +// ControlFrameHeader contains all the fields in a control frame header, +// in its unpacked in-memory representation. +type ControlFrameHeader struct { + // Note, high bit is the "Control" bit. + version uint16 // spdy version number + frameType ControlFrameType + Flags ControlFlags + length uint32 // length of data field +} + +type controlFrame interface { + Frame + read(h ControlFrameHeader, f *Framer) error +} + +// StreamId represents a 31-bit value identifying the stream. +type StreamId uint32 + +// SynStreamFrame is the unpacked, in-memory representation of a SYN_STREAM +// frame. +type SynStreamFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + AssociatedToStreamId StreamId // stream id for a stream which this stream is associated to + Priority uint8 // priority of this frame (3-bit) + Slot uint8 // index in the server's credential vector of the client certificate + Headers http.Header +} + +// SynReplyFrame is the unpacked, in-memory representation of a SYN_REPLY frame. +type SynReplyFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + Headers http.Header +} + +// RstStreamStatus represents the status that led to a RST_STREAM. +type RstStreamStatus uint32 + +const ( + ProtocolError RstStreamStatus = iota + 1 + InvalidStream + RefusedStream + UnsupportedVersion + Cancel + InternalError + FlowControlError + StreamInUse + StreamAlreadyClosed + InvalidCredentials + FrameTooLarge +) + +// RstStreamFrame is the unpacked, in-memory representation of a RST_STREAM +// frame. +type RstStreamFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + Status RstStreamStatus +} + +// SettingsFlag represents a flag in a SETTINGS frame. +type SettingsFlag uint8 + +const ( + FlagSettingsPersistValue SettingsFlag = 0x1 + FlagSettingsPersisted = 0x2 +) + +// SettingsFlag represents the id of an id/value pair in a SETTINGS frame. +type SettingsId uint32 + +const ( + SettingsUploadBandwidth SettingsId = iota + 1 + SettingsDownloadBandwidth + SettingsRoundTripTime + SettingsMaxConcurrentStreams + SettingsCurrentCwnd + SettingsDownloadRetransRate + SettingsInitialWindowSize + SettingsClientCretificateVectorSize +) + +// SettingsFlagIdValue is the unpacked, in-memory representation of the +// combined flag/id/value for a setting in a SETTINGS frame. +type SettingsFlagIdValue struct { + Flag SettingsFlag + Id SettingsId + Value uint32 +} + +// SettingsFrame is the unpacked, in-memory representation of a SPDY +// SETTINGS frame. +type SettingsFrame struct { + CFHeader ControlFrameHeader + FlagIdValues []SettingsFlagIdValue +} + +// PingFrame is the unpacked, in-memory representation of a PING frame. +type PingFrame struct { + CFHeader ControlFrameHeader + Id uint32 // unique id for this ping, from server is even, from client is odd. +} + +// GoAwayStatus represents the status in a GoAwayFrame. +type GoAwayStatus uint32 + +const ( + GoAwayOK GoAwayStatus = iota + GoAwayProtocolError + GoAwayInternalError +) + +// GoAwayFrame is the unpacked, in-memory representation of a GOAWAY frame. +type GoAwayFrame struct { + CFHeader ControlFrameHeader + LastGoodStreamId StreamId // last stream id which was accepted by sender + Status GoAwayStatus +} + +// HeadersFrame is the unpacked, in-memory representation of a HEADERS frame. +type HeadersFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + Headers http.Header +} + +// WindowUpdateFrame is the unpacked, in-memory representation of a +// WINDOW_UPDATE frame. +type WindowUpdateFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + DeltaWindowSize uint32 // additional number of bytes to existing window size +} + +// TODO: Implement credential frame and related methods. + +// DataFrame is the unpacked, in-memory representation of a DATA frame. +type DataFrame struct { + // Note, high bit is the "Control" bit. Should be 0 for data frames. + StreamId StreamId + Flags DataFlags + Data []byte // payload data of this frame +} + +// A SPDY specific error. +type ErrorCode string + +const ( + UnlowercasedHeaderName ErrorCode = "header was not lowercased" + DuplicateHeaders = "multiple headers with same name" + WrongCompressedPayloadSize = "compressed payload size was incorrect" + UnknownFrameType = "unknown frame type" + InvalidControlFrame = "invalid control frame" + InvalidDataFrame = "invalid data frame" + InvalidHeaderPresent = "frame contained invalid header" + ZeroStreamId = "stream id zero is disallowed" +) + +// Error contains both the type of error and additional values. StreamId is 0 +// if Error is not associated with a stream. +type Error struct { + Err ErrorCode + StreamId StreamId +} + +func (e *Error) Error() string { + return string(e.Err) +} + +var invalidReqHeaders = map[string]bool{ + "Connection": true, + "Host": true, + "Keep-Alive": true, + "Proxy-Connection": true, + "Transfer-Encoding": true, +} + +var invalidRespHeaders = map[string]bool{ + "Connection": true, + "Keep-Alive": true, + "Proxy-Connection": true, + "Transfer-Encoding": true, +} + +// Framer handles serializing/deserializing SPDY frames, including compressing/ +// decompressing payloads. +type Framer struct { + headerCompressionDisabled bool + w io.Writer + headerBuf *bytes.Buffer + headerCompressor *zlib.Writer + r io.Reader + headerReader io.LimitedReader + headerDecompressor io.ReadCloser +} + +// NewFramer allocates a new Framer for a given SPDY connection, represented by +// a io.Writer and io.Reader. Note that Framer will read and write individual fields +// from/to the Reader and Writer, so the caller should pass in an appropriately +// buffered implementation to optimize performance. +func NewFramer(w io.Writer, r io.Reader) (*Framer, error) { + compressBuf := new(bytes.Buffer) + compressor, err := zlib.NewWriterLevelDict(compressBuf, zlib.BestCompression, []byte(headerDictionary)) + if err != nil { + return nil, err + } + framer := &Framer{ + w: w, + headerBuf: compressBuf, + headerCompressor: compressor, + r: r, + } + return framer, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/spdy/write.go b/Godeps/_workspace/src/github.com/docker/spdystream/spdy/write.go new file mode 100644 index 00000000000..b212f66a235 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/spdy/write.go @@ -0,0 +1,318 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package spdy + +import ( + "encoding/binary" + "io" + "net/http" + "strings" +) + +func (frame *SynStreamFrame) write(f *Framer) error { + return f.writeSynStreamFrame(frame) +} + +func (frame *SynReplyFrame) write(f *Framer) error { + return f.writeSynReplyFrame(frame) +} + +func (frame *RstStreamFrame) write(f *Framer) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeRstStream + frame.CFHeader.Flags = 0 + frame.CFHeader.length = 8 + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + if frame.Status == 0 { + return &Error{InvalidControlFrame, frame.StreamId} + } + if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil { + return + } + return +} + +func (frame *SettingsFrame) write(f *Framer) (err error) { + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeSettings + frame.CFHeader.length = uint32(len(frame.FlagIdValues)*8 + 4) + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, uint32(len(frame.FlagIdValues))); err != nil { + return + } + for _, flagIdValue := range frame.FlagIdValues { + flagId := uint32(flagIdValue.Flag)<<24 | uint32(flagIdValue.Id) + if err = binary.Write(f.w, binary.BigEndian, flagId); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, flagIdValue.Value); err != nil { + return + } + } + return +} + +func (frame *PingFrame) write(f *Framer) (err error) { + if frame.Id == 0 { + return &Error{ZeroStreamId, 0} + } + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypePing + frame.CFHeader.Flags = 0 + frame.CFHeader.length = 4 + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.Id); err != nil { + return + } + return +} + +func (frame *GoAwayFrame) write(f *Framer) (err error) { + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeGoAway + frame.CFHeader.Flags = 0 + frame.CFHeader.length = 8 + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.LastGoodStreamId); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil { + return + } + return nil +} + +func (frame *HeadersFrame) write(f *Framer) error { + return f.writeHeadersFrame(frame) +} + +func (frame *WindowUpdateFrame) write(f *Framer) (err error) { + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeWindowUpdate + frame.CFHeader.Flags = 0 + frame.CFHeader.length = 8 + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.DeltaWindowSize); err != nil { + return + } + return nil +} + +func (frame *DataFrame) write(f *Framer) error { + return f.writeDataFrame(frame) +} + +// WriteFrame writes a frame. +func (f *Framer) WriteFrame(frame Frame) error { + return frame.write(f) +} + +func writeControlFrameHeader(w io.Writer, h ControlFrameHeader) error { + if err := binary.Write(w, binary.BigEndian, 0x8000|h.version); err != nil { + return err + } + if err := binary.Write(w, binary.BigEndian, h.frameType); err != nil { + return err + } + flagsAndLength := uint32(h.Flags)<<24 | h.length + if err := binary.Write(w, binary.BigEndian, flagsAndLength); err != nil { + return err + } + return nil +} + +func writeHeaderValueBlock(w io.Writer, h http.Header) (n int, err error) { + n = 0 + if err = binary.Write(w, binary.BigEndian, uint32(len(h))); err != nil { + return + } + n += 2 + for name, values := range h { + if err = binary.Write(w, binary.BigEndian, uint32(len(name))); err != nil { + return + } + n += 2 + name = strings.ToLower(name) + if _, err = io.WriteString(w, name); err != nil { + return + } + n += len(name) + v := strings.Join(values, headerValueSeparator) + if err = binary.Write(w, binary.BigEndian, uint32(len(v))); err != nil { + return + } + n += 2 + if _, err = io.WriteString(w, v); err != nil { + return + } + n += len(v) + } + return +} + +func (f *Framer) writeSynStreamFrame(frame *SynStreamFrame) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + // Marshal the headers. + var writer io.Writer = f.headerBuf + if !f.headerCompressionDisabled { + writer = f.headerCompressor + } + if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { + return + } + if !f.headerCompressionDisabled { + f.headerCompressor.Flush() + } + + // Set ControlFrameHeader. + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeSynStream + frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 10) + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return err + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return err + } + if err = binary.Write(f.w, binary.BigEndian, frame.AssociatedToStreamId); err != nil { + return err + } + if err = binary.Write(f.w, binary.BigEndian, frame.Priority<<5); err != nil { + return err + } + if err = binary.Write(f.w, binary.BigEndian, frame.Slot); err != nil { + return err + } + if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { + return err + } + f.headerBuf.Reset() + return nil +} + +func (f *Framer) writeSynReplyFrame(frame *SynReplyFrame) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + // Marshal the headers. + var writer io.Writer = f.headerBuf + if !f.headerCompressionDisabled { + writer = f.headerCompressor + } + if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { + return + } + if !f.headerCompressionDisabled { + f.headerCompressor.Flush() + } + + // Set ControlFrameHeader. + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeSynReply + frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4) + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { + return + } + f.headerBuf.Reset() + return +} + +func (f *Framer) writeHeadersFrame(frame *HeadersFrame) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + // Marshal the headers. + var writer io.Writer = f.headerBuf + if !f.headerCompressionDisabled { + writer = f.headerCompressor + } + if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { + return + } + if !f.headerCompressionDisabled { + f.headerCompressor.Flush() + } + + // Set ControlFrameHeader. + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeHeaders + frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4) + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { + return + } + f.headerBuf.Reset() + return +} + +func (f *Framer) writeDataFrame(frame *DataFrame) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + if frame.StreamId&0x80000000 != 0 || len(frame.Data) > MaxDataLength { + return &Error{InvalidDataFrame, frame.StreamId} + } + + // Serialize frame to Writer. + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + flagsAndLength := uint32(frame.Flags)<<24 | uint32(len(frame.Data)) + if err = binary.Write(f.w, binary.BigEndian, flagsAndLength); err != nil { + return + } + if _, err = f.w.Write(frame.Data); err != nil { + return + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/spdy_bench_test.go b/Godeps/_workspace/src/github.com/docker/spdystream/spdy_bench_test.go new file mode 100644 index 00000000000..6f9e4910151 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/spdy_bench_test.go @@ -0,0 +1,113 @@ +package spdystream + +import ( + "fmt" + "io" + "net" + "net/http" + "sync" + "testing" +) + +func configureServer() (io.Closer, string, *sync.WaitGroup) { + authenticated = true + wg := &sync.WaitGroup{} + server, listen, serverErr := runServer(wg) + + if serverErr != nil { + panic(serverErr) + } + + return server, listen, wg +} + +func BenchmarkDial10000(b *testing.B) { + server, addr, wg := configureServer() + + defer func() { + server.Close() + wg.Wait() + }() + + for i := 0; i < b.N; i++ { + conn, dialErr := net.Dial("tcp", addr) + if dialErr != nil { + panic(fmt.Sprintf("Error dialing server: %s", dialErr)) + } + conn.Close() + } +} + +func BenchmarkDialWithSPDYStream10000(b *testing.B) { + server, addr, wg := configureServer() + + defer func() { + server.Close() + wg.Wait() + }() + + for i := 0; i < b.N; i++ { + conn, dialErr := net.Dial("tcp", addr) + if dialErr != nil { + b.Fatalf("Error dialing server: %s", dialErr) + } + + spdyConn, spdyErr := NewConnection(conn, false) + if spdyErr != nil { + b.Fatalf("Error creating spdy connection: %s", spdyErr) + } + go spdyConn.Serve(NoOpStreamHandler) + + closeErr := spdyConn.Close() + if closeErr != nil { + b.Fatalf("Error closing connection: %s, closeErr") + } + } +} + +func benchmarkStreamWithDataAndSize(size uint64, b *testing.B) { + server, addr, wg := configureServer() + + defer func() { + server.Close() + wg.Wait() + }() + + for i := 0; i < b.N; i++ { + conn, dialErr := net.Dial("tcp", addr) + if dialErr != nil { + b.Fatalf("Error dialing server: %s", dialErr) + } + + spdyConn, spdyErr := NewConnection(conn, false) + if spdyErr != nil { + b.Fatalf("Error creating spdy connection: %s", spdyErr) + } + + go spdyConn.Serve(MirrorStreamHandler) + + stream, err := spdyConn.CreateStream(http.Header{}, nil, false) + + writer := make([]byte, size) + + stream.Write(writer) + + if err != nil { + panic(err) + } + + reader := make([]byte, size) + stream.Read(reader) + + stream.Close() + + closeErr := spdyConn.Close() + if closeErr != nil { + b.Fatalf("Error closing connection: %s, closeErr") + } + } +} + +func BenchmarkStreamWith1Byte10000(b *testing.B) { benchmarkStreamWithDataAndSize(1, b) } +func BenchmarkStreamWith1KiloByte10000(b *testing.B) { benchmarkStreamWithDataAndSize(1024, b) } +func BenchmarkStreamWith1Megabyte10000(b *testing.B) { benchmarkStreamWithDataAndSize(1024*1024, b) } diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/spdy_test.go b/Godeps/_workspace/src/github.com/docker/spdystream/spdy_test.go new file mode 100644 index 00000000000..9c8fa131a7e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/spdy_test.go @@ -0,0 +1,909 @@ +package spdystream + +import ( + "bufio" + "bytes" + "io" + "net" + "net/http" + "net/http/httptest" + "sync" + "testing" + "time" +) + +func TestSpdyStreams(t *testing.T) { + var wg sync.WaitGroup + server, listen, serverErr := runServer(&wg) + if serverErr != nil { + t.Fatalf("Error initializing server: %s", serverErr) + } + + conn, dialErr := net.Dial("tcp", listen) + if dialErr != nil { + t.Fatalf("Error dialing server: %s", dialErr) + } + + spdyConn, spdyErr := NewConnection(conn, false) + if spdyErr != nil { + t.Fatalf("Error creating spdy connection: %s", spdyErr) + } + go spdyConn.Serve(NoOpStreamHandler) + + authenticated = true + stream, streamErr := spdyConn.CreateStream(http.Header{}, nil, false) + if streamErr != nil { + t.Fatalf("Error creating stream: %s", streamErr) + } + + waitErr := stream.Wait() + if waitErr != nil { + t.Fatalf("Error waiting for stream: %s", waitErr) + } + + message := []byte("hello") + writeErr := stream.WriteData(message, false) + if writeErr != nil { + t.Fatalf("Error writing data") + } + + buf := make([]byte, 10) + n, readErr := stream.Read(buf) + if readErr != nil { + t.Fatalf("Error reading data from stream: %s", readErr) + } + if n != 5 { + t.Fatalf("Unexpected number of bytes read:\nActual: %d\nExpected: 5", n) + } + if bytes.Compare(buf[:n], message) != 0 { + t.Fatalf("Did not receive expected message:\nActual: %s\nExpectd: %s", buf, message) + } + + headers := http.Header{ + "TestKey": []string{"TestVal"}, + } + sendErr := stream.SendHeader(headers, false) + if sendErr != nil { + t.Fatalf("Error sending headers: %s", sendErr) + } + receiveHeaders, receiveErr := stream.ReceiveHeader() + if receiveErr != nil { + t.Fatalf("Error receiving headers: %s", receiveErr) + } + if len(receiveHeaders) != 1 { + t.Fatalf("Unexpected number of headers:\nActual: %d\nExpecting:%d", len(receiveHeaders), 1) + } + testVal := receiveHeaders.Get("TestKey") + if testVal != "TestVal" { + t.Fatalf("Wrong test value:\nActual: %q\nExpecting: %q", testVal, "TestVal") + } + + writeErr = stream.WriteData(message, true) + if writeErr != nil { + t.Fatalf("Error writing data") + } + + smallBuf := make([]byte, 3) + n, readErr = stream.Read(smallBuf) + if readErr != nil { + t.Fatalf("Error reading data from stream: %s", readErr) + } + if n != 3 { + t.Fatalf("Unexpected number of bytes read:\nActual: %d\nExpected: 3", n) + } + if bytes.Compare(smallBuf[:n], []byte("hel")) != 0 { + t.Fatalf("Did not receive expected message:\nActual: %s\nExpectd: %s", smallBuf[:n], message) + } + n, readErr = stream.Read(smallBuf) + if readErr != nil { + t.Fatalf("Error reading data from stream: %s", readErr) + } + if n != 2 { + t.Fatalf("Unexpected number of bytes read:\nActual: %d\nExpected: 2", n) + } + if bytes.Compare(smallBuf[:n], []byte("lo")) != 0 { + t.Fatalf("Did not receive expected message:\nActual: %s\nExpected: lo", smallBuf[:n]) + } + + n, readErr = stream.Read(buf) + if readErr != io.EOF { + t.Fatalf("Expected EOF reading from finished stream, read %d bytes", n) + } + + // Closing again should return error since stream is already closed + streamCloseErr := stream.Close() + if streamCloseErr == nil { + t.Fatalf("No error closing finished stream") + } + if streamCloseErr != ErrWriteClosedStream { + t.Fatalf("Unexpected error closing stream: %s", streamCloseErr) + } + + streamResetErr := stream.Reset() + if streamResetErr != nil { + t.Fatalf("Error reseting stream: %s", streamResetErr) + } + + authenticated = false + badStream, badStreamErr := spdyConn.CreateStream(http.Header{}, nil, false) + if badStreamErr != nil { + t.Fatalf("Error creating stream: %s", badStreamErr) + } + + waitErr = badStream.Wait() + if waitErr == nil { + t.Fatalf("Did not receive error creating stream") + } + if waitErr != ErrReset { + t.Fatalf("Unexpected error creating stream: %s", waitErr) + } + streamCloseErr = badStream.Close() + if streamCloseErr == nil { + t.Fatalf("No error closing bad stream") + } + + spdyCloseErr := spdyConn.Close() + if spdyCloseErr != nil { + t.Fatalf("Error closing spdy connection: %s", spdyCloseErr) + } + + closeErr := server.Close() + if closeErr != nil { + t.Fatalf("Error shutting down server: %s", closeErr) + } + wg.Wait() +} + +func TestPing(t *testing.T) { + var wg sync.WaitGroup + server, listen, serverErr := runServer(&wg) + if serverErr != nil { + t.Fatalf("Error initializing server: %s", serverErr) + } + + conn, dialErr := net.Dial("tcp", listen) + if dialErr != nil { + t.Fatalf("Error dialing server: %s", dialErr) + } + + spdyConn, spdyErr := NewConnection(conn, false) + if spdyErr != nil { + t.Fatalf("Error creating spdy connection: %s", spdyErr) + } + go spdyConn.Serve(NoOpStreamHandler) + + pingTime, pingErr := spdyConn.Ping() + if pingErr != nil { + t.Fatalf("Error pinging server: %s", pingErr) + } + if pingTime == time.Duration(0) { + t.Fatalf("Expecting non-zero ping time") + } + + closeErr := server.Close() + if closeErr != nil { + t.Fatalf("Error shutting down server: %s", closeErr) + } + wg.Wait() +} + +func TestHalfClose(t *testing.T) { + var wg sync.WaitGroup + server, listen, serverErr := runServer(&wg) + if serverErr != nil { + t.Fatalf("Error initializing server: %s", serverErr) + } + + conn, dialErr := net.Dial("tcp", listen) + if dialErr != nil { + t.Fatalf("Error dialing server: %s", dialErr) + } + + spdyConn, spdyErr := NewConnection(conn, false) + if spdyErr != nil { + t.Fatalf("Error creating spdy connection: %s", spdyErr) + } + go spdyConn.Serve(NoOpStreamHandler) + + authenticated = true + stream, streamErr := spdyConn.CreateStream(http.Header{}, nil, false) + if streamErr != nil { + t.Fatalf("Error creating stream: %s", streamErr) + } + + waitErr := stream.Wait() + if waitErr != nil { + t.Fatalf("Error waiting for stream: %s", waitErr) + } + + message := []byte("hello and will read after close") + writeErr := stream.WriteData(message, false) + if writeErr != nil { + t.Fatalf("Error writing data") + } + + streamCloseErr := stream.Close() + if streamCloseErr != nil { + t.Fatalf("Error closing stream: %s", streamCloseErr) + } + + buf := make([]byte, 40) + n, readErr := stream.Read(buf) + if readErr != nil { + t.Fatalf("Error reading data from stream: %s", readErr) + } + if n != 31 { + t.Fatalf("Unexpected number of bytes read:\nActual: %d\nExpected: 5", n) + } + if bytes.Compare(buf[:n], message) != 0 { + t.Fatalf("Did not receive expected message:\nActual: %s\nExpectd: %s", buf, message) + } + + spdyCloseErr := spdyConn.Close() + if spdyCloseErr != nil { + t.Fatalf("Error closing spdy connection: %s", spdyCloseErr) + } + + closeErr := server.Close() + if closeErr != nil { + t.Fatalf("Error shutting down server: %s", closeErr) + } + wg.Wait() +} + +func TestUnexpectedRemoteConnectionClosed(t *testing.T) { + tt := []struct { + closeReceiver bool + closeSender bool + }{ + {closeReceiver: true, closeSender: false}, + {closeReceiver: false, closeSender: true}, + {closeReceiver: false, closeSender: false}, + } + for tix, tc := range tt { + listener, listenErr := net.Listen("tcp", "localhost:0") + if listenErr != nil { + t.Fatalf("Error listening: %v", listenErr) + } + + var serverConn net.Conn + var connErr error + go func() { + serverConn, connErr = listener.Accept() + if connErr != nil { + t.Fatalf("Error accepting: %v", connErr) + } + + serverSpdyConn, _ := NewConnection(serverConn, true) + go serverSpdyConn.Serve(func(stream *Stream) { + stream.SendReply(http.Header{}, tc.closeSender) + }) + }() + + conn, dialErr := net.Dial("tcp", listener.Addr().String()) + if dialErr != nil { + t.Fatalf("Error dialing server: %s", dialErr) + } + + spdyConn, spdyErr := NewConnection(conn, false) + if spdyErr != nil { + t.Fatalf("Error creating spdy connection: %s", spdyErr) + } + go spdyConn.Serve(NoOpStreamHandler) + + authenticated = true + stream, streamErr := spdyConn.CreateStream(http.Header{}, nil, false) + if streamErr != nil { + t.Fatalf("Error creating stream: %s", streamErr) + } + + waitErr := stream.Wait() + if waitErr != nil { + t.Fatalf("Error waiting for stream: %s", waitErr) + } + + if tc.closeReceiver { + // make stream half closed, receive only + stream.Close() + } + + streamch := make(chan error, 1) + go func() { + b := make([]byte, 1) + _, err := stream.Read(b) + streamch <- err + }() + + closeErr := serverConn.Close() + if closeErr != nil { + t.Fatalf("Error shutting down server: %s", closeErr) + } + + select { + case e := <-streamch: + if e == nil || e != io.EOF { + t.Fatalf("(%d) Expected to get an EOF stream error", tix) + } + } + + closeErr = conn.Close() + if closeErr != nil { + t.Fatalf("Error closing client connection: %s", closeErr) + } + + listenErr = listener.Close() + if listenErr != nil { + t.Fatalf("Error closing listener: %s", listenErr) + } + } +} + +func TestCloseNotification(t *testing.T) { + listener, listenErr := net.Listen("tcp", "localhost:0") + if listenErr != nil { + t.Fatalf("Error listening: %v", listenErr) + } + listen := listener.Addr().String() + + serverConnChan := make(chan net.Conn) + go func() { + serverConn, err := listener.Accept() + if err != nil { + t.Fatalf("Error accepting: %v", err) + } + + serverSpdyConn, err := NewConnection(serverConn, true) + if err != nil { + t.Fatalf("Error creating server connection: %v", err) + } + go serverSpdyConn.Serve(NoOpStreamHandler) + <-serverSpdyConn.CloseChan() + serverConnChan <- serverConn + }() + + conn, dialErr := net.Dial("tcp", listen) + if dialErr != nil { + t.Fatalf("Error dialing server: %s", dialErr) + } + + spdyConn, spdyErr := NewConnection(conn, false) + if spdyErr != nil { + t.Fatalf("Error creating spdy connection: %s", spdyErr) + } + go spdyConn.Serve(NoOpStreamHandler) + + // close client conn + err := conn.Close() + if err != nil { + t.Fatalf("Error closing client connection: %v", err) + } + + var serverConn net.Conn + select { + case serverConn = <-serverConnChan: + } + + err = serverConn.Close() + if err != nil { + t.Fatalf("Error closing serverConn: %v", err) + } + + listenErr = listener.Close() + if listenErr != nil { + t.Fatalf("Error closing listener: %s", listenErr) + } +} + +func TestIdleShutdownRace(t *testing.T) { + var wg sync.WaitGroup + server, listen, serverErr := runServer(&wg) + if serverErr != nil { + t.Fatalf("Error initializing server: %s", serverErr) + } + + conn, dialErr := net.Dial("tcp", listen) + if dialErr != nil { + t.Fatalf("Error dialing server: %s", dialErr) + } + + spdyConn, spdyErr := NewConnection(conn, false) + if spdyErr != nil { + t.Fatalf("Error creating spdy connection: %s", spdyErr) + } + go spdyConn.Serve(NoOpStreamHandler) + + authenticated = true + stream, err := spdyConn.CreateStream(http.Header{}, nil, false) + if err != nil { + t.Fatalf("Error creating stream: %v", err) + } + + spdyConn.SetIdleTimeout(5 * time.Millisecond) + go func() { + time.Sleep(5 * time.Millisecond) + stream.Reset() + }() + + select { + case <-spdyConn.CloseChan(): + case <-time.After(20 * time.Millisecond): + t.Fatal("Timed out waiting for idle connection closure") + } + + closeErr := server.Close() + if closeErr != nil { + t.Fatalf("Error shutting down server: %s", closeErr) + } + wg.Wait() +} + +func TestIdleNoTimeoutSet(t *testing.T) { + var wg sync.WaitGroup + server, listen, serverErr := runServer(&wg) + if serverErr != nil { + t.Fatalf("Error initializing server: %s", serverErr) + } + + conn, dialErr := net.Dial("tcp", listen) + if dialErr != nil { + t.Fatalf("Error dialing server: %s", dialErr) + } + + spdyConn, spdyErr := NewConnection(conn, false) + if spdyErr != nil { + t.Fatalf("Error creating spdy connection: %s", spdyErr) + } + go spdyConn.Serve(NoOpStreamHandler) + + select { + case <-spdyConn.CloseChan(): + t.Fatal("Unexpected connection closure") + case <-time.After(10 * time.Millisecond): + } + + closeErr := server.Close() + if closeErr != nil { + t.Fatalf("Error shutting down server: %s", closeErr) + } + wg.Wait() +} + +func TestIdleClearTimeout(t *testing.T) { + var wg sync.WaitGroup + server, listen, serverErr := runServer(&wg) + if serverErr != nil { + t.Fatalf("Error initializing server: %s", serverErr) + } + + conn, dialErr := net.Dial("tcp", listen) + if dialErr != nil { + t.Fatalf("Error dialing server: %s", dialErr) + } + + spdyConn, spdyErr := NewConnection(conn, false) + if spdyErr != nil { + t.Fatalf("Error creating spdy connection: %s", spdyErr) + } + go spdyConn.Serve(NoOpStreamHandler) + + spdyConn.SetIdleTimeout(10 * time.Millisecond) + spdyConn.SetIdleTimeout(0) + select { + case <-spdyConn.CloseChan(): + t.Fatal("Unexpected connection closure") + case <-time.After(20 * time.Millisecond): + } + + closeErr := server.Close() + if closeErr != nil { + t.Fatalf("Error shutting down server: %s", closeErr) + } + wg.Wait() +} + +func TestIdleNoData(t *testing.T) { + var wg sync.WaitGroup + server, listen, serverErr := runServer(&wg) + if serverErr != nil { + t.Fatalf("Error initializing server: %s", serverErr) + } + + conn, dialErr := net.Dial("tcp", listen) + if dialErr != nil { + t.Fatalf("Error dialing server: %s", dialErr) + } + + spdyConn, spdyErr := NewConnection(conn, false) + if spdyErr != nil { + t.Fatalf("Error creating spdy connection: %s", spdyErr) + } + go spdyConn.Serve(NoOpStreamHandler) + + spdyConn.SetIdleTimeout(10 * time.Millisecond) + <-spdyConn.CloseChan() + + closeErr := server.Close() + if closeErr != nil { + t.Fatalf("Error shutting down server: %s", closeErr) + } + wg.Wait() +} + +func TestIdleWithData(t *testing.T) { + var wg sync.WaitGroup + server, listen, serverErr := runServer(&wg) + if serverErr != nil { + t.Fatalf("Error initializing server: %s", serverErr) + } + + conn, dialErr := net.Dial("tcp", listen) + if dialErr != nil { + t.Fatalf("Error dialing server: %s", dialErr) + } + + spdyConn, spdyErr := NewConnection(conn, false) + if spdyErr != nil { + t.Fatalf("Error creating spdy connection: %s", spdyErr) + } + go spdyConn.Serve(NoOpStreamHandler) + + spdyConn.SetIdleTimeout(25 * time.Millisecond) + + authenticated = true + stream, err := spdyConn.CreateStream(http.Header{}, nil, false) + if err != nil { + t.Fatalf("Error creating stream: %v", err) + } + + writeCh := make(chan struct{}) + + go func() { + b := []byte{1, 2, 3, 4, 5} + for i := 0; i < 10; i++ { + _, err = stream.Write(b) + if err != nil { + t.Fatalf("Error writing to stream: %v", err) + } + time.Sleep(10 * time.Millisecond) + } + close(writeCh) + }() + + writesFinished := false + +Loop: + for { + select { + case <-writeCh: + writesFinished = true + case <-spdyConn.CloseChan(): + if !writesFinished { + t.Fatal("Connection closed before all writes finished") + } + break Loop + } + } + + closeErr := server.Close() + if closeErr != nil { + t.Fatalf("Error shutting down server: %s", closeErr) + } + wg.Wait() +} + +func TestIdleRace(t *testing.T) { + var wg sync.WaitGroup + server, listen, serverErr := runServer(&wg) + if serverErr != nil { + t.Fatalf("Error initializing server: %s", serverErr) + } + + conn, dialErr := net.Dial("tcp", listen) + if dialErr != nil { + t.Fatalf("Error dialing server: %s", dialErr) + } + + spdyConn, spdyErr := NewConnection(conn, false) + if spdyErr != nil { + t.Fatalf("Error creating spdy connection: %s", spdyErr) + } + go spdyConn.Serve(NoOpStreamHandler) + + spdyConn.SetIdleTimeout(10 * time.Millisecond) + + authenticated = true + + for i := 0; i < 10; i++ { + _, err := spdyConn.CreateStream(http.Header{}, nil, false) + if err != nil { + t.Fatalf("Error creating stream: %v", err) + } + } + + <-spdyConn.CloseChan() + + closeErr := server.Close() + if closeErr != nil { + t.Fatalf("Error shutting down server: %s", closeErr) + } + wg.Wait() +} + +func TestHalfClosedIdleTimeout(t *testing.T) { + listener, listenErr := net.Listen("tcp", "localhost:0") + if listenErr != nil { + t.Fatalf("Error listening: %v", listenErr) + } + listen := listener.Addr().String() + + go func() { + serverConn, err := listener.Accept() + if err != nil { + t.Fatalf("Error accepting: %v", err) + } + + serverSpdyConn, err := NewConnection(serverConn, true) + if err != nil { + t.Fatalf("Error creating server connection: %v", err) + } + go serverSpdyConn.Serve(func(s *Stream) { + s.SendReply(http.Header{}, true) + }) + serverSpdyConn.SetIdleTimeout(10 * time.Millisecond) + }() + + conn, dialErr := net.Dial("tcp", listen) + if dialErr != nil { + t.Fatalf("Error dialing server: %s", dialErr) + } + + spdyConn, spdyErr := NewConnection(conn, false) + if spdyErr != nil { + t.Fatalf("Error creating spdy connection: %s", spdyErr) + } + go spdyConn.Serve(NoOpStreamHandler) + + stream, err := spdyConn.CreateStream(http.Header{}, nil, false) + if err != nil { + t.Fatalf("Error creating stream: %v", err) + } + + time.Sleep(20 * time.Millisecond) + + stream.Reset() + + err = spdyConn.Close() + if err != nil { + t.Fatalf("Error closing client spdy conn: %v", err) + } +} + +func TestStreamReset(t *testing.T) { + var wg sync.WaitGroup + server, listen, serverErr := runServer(&wg) + if serverErr != nil { + t.Fatalf("Error initializing server: %s", serverErr) + } + + conn, dialErr := net.Dial("tcp", listen) + if dialErr != nil { + t.Fatalf("Error dialing server: %s", dialErr) + } + + spdyConn, spdyErr := NewConnection(conn, false) + if spdyErr != nil { + t.Fatalf("Error creating spdy connection: %s", spdyErr) + } + go spdyConn.Serve(NoOpStreamHandler) + + authenticated = true + stream, streamErr := spdyConn.CreateStream(http.Header{}, nil, false) + if streamErr != nil { + t.Fatalf("Error creating stream: %s", streamErr) + } + + buf := []byte("dskjahfkdusahfkdsahfkdsafdkas") + for i := 0; i < 10; i++ { + if _, err := stream.Write(buf); err != nil { + t.Fatalf("Error writing to stream: %s", err) + } + } + for i := 0; i < 10; i++ { + if _, err := stream.Read(buf); err != nil { + t.Fatalf("Error reading from stream: %s", err) + } + } + + // fmt.Printf("Resetting...\n") + if err := stream.Reset(); err != nil { + t.Fatalf("Error reseting stream: %s", err) + } + + closeErr := server.Close() + if closeErr != nil { + t.Fatalf("Error shutting down server: %s", closeErr) + } + wg.Wait() +} + +func TestStreamResetWithDataRemaining(t *testing.T) { + var wg sync.WaitGroup + server, listen, serverErr := runServer(&wg) + if serverErr != nil { + t.Fatalf("Error initializing server: %s", serverErr) + } + + conn, dialErr := net.Dial("tcp", listen) + if dialErr != nil { + t.Fatalf("Error dialing server: %s", dialErr) + } + + spdyConn, spdyErr := NewConnection(conn, false) + if spdyErr != nil { + t.Fatalf("Error creating spdy connection: %s", spdyErr) + } + go spdyConn.Serve(NoOpStreamHandler) + + authenticated = true + stream, streamErr := spdyConn.CreateStream(http.Header{}, nil, false) + if streamErr != nil { + t.Fatalf("Error creating stream: %s", streamErr) + } + + buf := []byte("dskjahfkdusahfkdsahfkdsafdkas") + for i := 0; i < 10; i++ { + if _, err := stream.Write(buf); err != nil { + t.Fatalf("Error writing to stream: %s", err) + } + } + + // read a bit to make sure a goroutine gets to <-dataChan + if _, err := stream.Read(buf); err != nil { + t.Fatalf("Error reading from stream: %s", err) + } + + // fmt.Printf("Resetting...\n") + if err := stream.Reset(); err != nil { + t.Fatalf("Error reseting stream: %s", err) + } + + closeErr := server.Close() + if closeErr != nil { + t.Fatalf("Error shutting down server: %s", closeErr) + } + wg.Wait() +} + +type roundTripper struct { + conn net.Conn +} + +func (s *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + r := *req + req = &r + + conn, err := net.Dial("tcp", req.URL.Host) + if err != nil { + return nil, err + } + + err = req.Write(conn) + if err != nil { + return nil, err + } + + resp, err := http.ReadResponse(bufio.NewReader(conn), req) + if err != nil { + return nil, err + } + + s.conn = conn + + return resp, nil +} + +// see https://github.com/GoogleCloudPlatform/kubernetes/issues/4882 +func TestFramingAfterRemoteConnectionClosed(t *testing.T) { + server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + streamCh := make(chan *Stream) + + w.WriteHeader(http.StatusSwitchingProtocols) + + netconn, _, _ := w.(http.Hijacker).Hijack() + conn, _ := NewConnection(netconn, true) + go conn.Serve(func(s *Stream) { + s.SendReply(http.Header{}, false) + streamCh <- s + }) + + stream := <-streamCh + io.Copy(stream, stream) + + closeChan := make(chan struct{}) + go func() { + stream.Reset() + conn.Close() + close(closeChan) + }() + + <-closeChan + })) + + server.Start() + defer server.Close() + + req, err := http.NewRequest("GET", server.URL, nil) + if err != nil { + t.Fatalf("Error creating request: %s", err) + } + + rt := &roundTripper{} + client := &http.Client{Transport: rt} + + _, err = client.Do(req) + if err != nil { + t.Fatalf("unexpected error from client.Do: %s", err) + } + + conn, err := NewConnection(rt.conn, false) + go conn.Serve(NoOpStreamHandler) + + stream, err := conn.CreateStream(http.Header{}, nil, false) + if err != nil { + t.Fatalf("error creating client stream: %s", err) + } + + n, err := stream.Write([]byte("hello")) + if err != nil { + t.Fatalf("error writing to stream: %s", err) + } + if n != 5 { + t.Fatalf("Expected to write 5 bytes, but actually wrote %d", n) + } + + b := make([]byte, 5) + n, err = stream.Read(b) + if err != nil { + t.Fatalf("error reading from stream: %s", err) + } + if n != 5 { + t.Fatalf("Expected to read 5 bytes, but actually read %d", n) + } + if e, a := "hello", string(b[0:n]); e != a { + t.Fatalf("expected '%s', got '%s'", e, a) + } + + stream.Reset() + conn.Close() +} + +var authenticated bool + +func authStreamHandler(stream *Stream) { + if !authenticated { + stream.Refuse() + } + MirrorStreamHandler(stream) +} + +func runServer(wg *sync.WaitGroup) (io.Closer, string, error) { + listener, listenErr := net.Listen("tcp", "localhost:0") + if listenErr != nil { + return nil, "", listenErr + } + wg.Add(1) + go func() { + for { + conn, connErr := listener.Accept() + if connErr != nil { + break + } + + spdyConn, _ := NewConnection(conn, true) + go spdyConn.Serve(authStreamHandler) + + } + wg.Done() + }() + return listener, listener.Addr().String(), nil +} diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/stream.go b/Godeps/_workspace/src/github.com/docker/spdystream/stream.go new file mode 100644 index 00000000000..52d2a00bc2c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/stream.go @@ -0,0 +1,327 @@ +package spdystream + +import ( + "errors" + "fmt" + "io" + "net" + "net/http" + "sync" + "time" + + "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/docker/spdystream/spdy" +) + +var ( + ErrUnreadPartialData = errors.New("unread partial data") +) + +type Stream struct { + streamId spdy.StreamId + parent *Stream + conn *Connection + startChan chan error + + dataLock sync.RWMutex + dataChan chan []byte + unread []byte + + priority uint8 + headers http.Header + headerChan chan http.Header + finishLock sync.Mutex + finished bool + replyCond *sync.Cond + replied bool + closeLock sync.Mutex + closeChan chan bool +} + +// WriteData writes data to stream, sending a dataframe per call +func (s *Stream) WriteData(data []byte, fin bool) error { + s.waitWriteReply() + var flags spdy.DataFlags + + if fin { + flags = spdy.DataFlagFin + s.finishLock.Lock() + if s.finished { + s.finishLock.Unlock() + return ErrWriteClosedStream + } + s.finished = true + s.finishLock.Unlock() + } + + dataFrame := &spdy.DataFrame{ + StreamId: s.streamId, + Flags: flags, + Data: data, + } + + debugMessage("(%p) (%d) Writing data frame", s, s.streamId) + return s.conn.framer.WriteFrame(dataFrame) +} + +// Write writes bytes to a stream, calling write data for each call. +func (s *Stream) Write(data []byte) (n int, err error) { + err = s.WriteData(data, false) + if err == nil { + n = len(data) + } + return +} + +// Read reads bytes from a stream, a single read will never get more +// than what is sent on a single data frame, but a multiple calls to +// read may get data from the same data frame. +func (s *Stream) Read(p []byte) (n int, err error) { + if s.unread == nil { + select { + case <-s.closeChan: + return 0, io.EOF + case read, ok := <-s.dataChan: + if !ok { + return 0, io.EOF + } + s.unread = read + } + } + n = copy(p, s.unread) + if n < len(s.unread) { + s.unread = s.unread[n:] + } else { + s.unread = nil + } + return +} + +// ReadData reads an entire data frame and returns the byte array +// from the data frame. If there is unread data from the result +// of a Read call, this function will return an ErrUnreadPartialData. +func (s *Stream) ReadData() ([]byte, error) { + debugMessage("(%p) Reading data from %d", s, s.streamId) + if s.unread != nil { + return nil, ErrUnreadPartialData + } + select { + case <-s.closeChan: + return nil, io.EOF + case read, ok := <-s.dataChan: + if !ok { + return nil, io.EOF + } + return read, nil + } +} + +func (s *Stream) waitWriteReply() { + if s.replyCond != nil { + s.replyCond.L.Lock() + for !s.replied { + s.replyCond.Wait() + } + s.replyCond.L.Unlock() + } +} + +// Wait waits for the stream to receive a reply. +func (s *Stream) Wait() error { + return s.WaitTimeout(time.Duration(0)) +} + +// WaitTimeout waits for the stream to receive a reply or for timeout. +// When the timeout is reached, ErrTimeout will be returned. +func (s *Stream) WaitTimeout(timeout time.Duration) error { + var timeoutChan <-chan time.Time + if timeout > time.Duration(0) { + timeoutChan = time.After(timeout) + } + + select { + case err := <-s.startChan: + if err != nil { + return err + } + break + case <-timeoutChan: + return ErrTimeout + } + return nil +} + +// Close closes the stream by sending an empty data frame with the +// finish flag set, indicating this side is finished with the stream. +func (s *Stream) Close() error { + select { + case <-s.closeChan: + // Stream is now fully closed + s.conn.removeStream(s) + default: + break + } + return s.WriteData([]byte{}, true) +} + +// Reset sends a reset frame, putting the stream into the fully closed state. +func (s *Stream) Reset() error { + s.conn.removeStream(s) + return s.resetStream() +} + +func (s *Stream) resetStream() error { + s.finishLock.Lock() + if s.finished { + s.finishLock.Unlock() + return nil + } + s.finished = true + s.finishLock.Unlock() + + s.closeRemoteChannels() + + resetFrame := &spdy.RstStreamFrame{ + StreamId: s.streamId, + Status: spdy.Cancel, + } + return s.conn.framer.WriteFrame(resetFrame) +} + +// CreateSubStream creates a stream using the current as the parent +func (s *Stream) CreateSubStream(headers http.Header, fin bool) (*Stream, error) { + return s.conn.CreateStream(headers, s, fin) +} + +// SetPriority sets the stream priority, does not affect the +// remote priority of this stream after Open has been called. +// Valid values are 0 through 7, 0 being the highest priority +// and 7 the lowest. +func (s *Stream) SetPriority(priority uint8) { + s.priority = priority +} + +// SendHeader sends a header frame across the stream +func (s *Stream) SendHeader(headers http.Header, fin bool) error { + return s.conn.sendHeaders(headers, s, fin) +} + +// SendReply sends a reply on a stream, only valid to be called once +// when handling a new stream +func (s *Stream) SendReply(headers http.Header, fin bool) error { + if s.replyCond == nil { + return errors.New("cannot reply on initiated stream") + } + s.replyCond.L.Lock() + defer s.replyCond.L.Unlock() + if s.replied { + return nil + } + + err := s.conn.sendReply(headers, s, fin) + if err != nil { + return err + } + + s.replied = true + s.replyCond.Broadcast() + return nil +} + +// Refuse sends a reset frame with the status refuse, only +// valid to be called once when handling a new stream. This +// may be used to indicate that a stream is not allowed +// when http status codes are not being used. +func (s *Stream) Refuse() error { + if s.replied { + return nil + } + s.replied = true + return s.conn.sendReset(spdy.RefusedStream, s) +} + +// Cancel sends a reset frame with the status canceled. This +// can be used at any time by the creator of the Stream to +// indicate the stream is no longer needed. +func (s *Stream) Cancel() error { + return s.conn.sendReset(spdy.Cancel, s) +} + +// ReceiveHeader receives a header sent on the other side +// of the stream. This function will block until a header +// is received or stream is closed. +func (s *Stream) ReceiveHeader() (http.Header, error) { + select { + case <-s.closeChan: + break + case header, ok := <-s.headerChan: + if !ok { + return nil, fmt.Errorf("header chan closed") + } + return header, nil + } + return nil, fmt.Errorf("stream closed") +} + +// Parent returns the parent stream +func (s *Stream) Parent() *Stream { + return s.parent +} + +// Headers returns the headers used to create the stream +func (s *Stream) Headers() http.Header { + return s.headers +} + +// String returns the string version of stream using the +// streamId to uniquely identify the stream +func (s *Stream) String() string { + return fmt.Sprintf("stream:%d", s.streamId) +} + +// Identifier returns a 32 bit identifier for the stream +func (s *Stream) Identifier() uint32 { + return uint32(s.streamId) +} + +// IsFinished returns whether the stream has finished +// sending data +func (s *Stream) IsFinished() bool { + return s.finished +} + +// Implement net.Conn interface + +func (s *Stream) LocalAddr() net.Addr { + return s.conn.conn.LocalAddr() +} + +func (s *Stream) RemoteAddr() net.Addr { + return s.conn.conn.RemoteAddr() +} + +// TODO set per stream values instead of connection-wide + +func (s *Stream) SetDeadline(t time.Time) error { + return s.conn.conn.SetDeadline(t) +} + +func (s *Stream) SetReadDeadline(t time.Time) error { + return s.conn.conn.SetReadDeadline(t) +} + +func (s *Stream) SetWriteDeadline(t time.Time) error { + return s.conn.conn.SetWriteDeadline(t) +} + +func (s *Stream) closeRemoteChannels() { + s.closeLock.Lock() + defer s.closeLock.Unlock() + select { + case <-s.closeChan: + default: + close(s.closeChan) + s.dataLock.Lock() + defer s.dataLock.Unlock() + close(s.dataChan) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/utils.go b/Godeps/_workspace/src/github.com/docker/spdystream/utils.go new file mode 100644 index 00000000000..1b2c199a402 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/utils.go @@ -0,0 +1,16 @@ +package spdystream + +import ( + "log" + "os" +) + +var ( + DEBUG = os.Getenv("DEBUG") +) + +func debugMessage(fmt string, args ...interface{}) { + if DEBUG != "" { + log.Printf(fmt, args...) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/ws/connection.go b/Godeps/_workspace/src/github.com/docker/spdystream/ws/connection.go new file mode 100644 index 00000000000..d0ea001b454 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/ws/connection.go @@ -0,0 +1,65 @@ +package ws + +import ( + "github.com/gorilla/websocket" + "io" + "log" + "time" +) + +// Wrap an HTTP2 connection over WebSockets and +// use the underlying WebSocket framing for proxy +// compatibility. +type Conn struct { + *websocket.Conn + reader io.Reader +} + +func NewConnection(w *websocket.Conn) *Conn { + return &Conn{Conn: w} +} + +func (c Conn) Write(b []byte) (int, error) { + err := c.WriteMessage(websocket.BinaryMessage, b) + if err != nil { + return 0, err + } + return len(b), nil +} + +func (c Conn) Read(b []byte) (int, error) { + if c.reader == nil { + t, r, err := c.NextReader() + if err != nil { + return 0, err + } + if t != websocket.BinaryMessage { + log.Printf("ws: ignored non-binary message in stream") + return 0, nil + } + c.reader = r + } + n, err := c.reader.Read(b) + if err != nil { + if err == io.EOF { + c.reader = nil + } + return n, err + } + return n, nil +} + +func (c Conn) SetDeadline(t time.Time) error { + if err := c.Conn.SetReadDeadline(t); err != nil { + return err + } + if err := c.Conn.SetWriteDeadline(t); err != nil { + return err + } + return nil +} + +func (c Conn) Close() error { + err := c.Conn.Close() + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/spdystream/ws/ws_test.go b/Godeps/_workspace/src/github.com/docker/spdystream/ws/ws_test.go new file mode 100644 index 00000000000..58d2b991263 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/spdystream/ws/ws_test.go @@ -0,0 +1,175 @@ +package ws + +import ( + "bytes" + "github.com/gorilla/websocket" + "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/docker/spdystream" + "io" + "log" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +var upgrader = websocket.Upgrader{ + ReadBufferSize: 1024, + WriteBufferSize: 1024, +} + +var serverSpdyConn *spdystream.Connection + +// Connect to the Websocket endpoint at ws://localhost +// using SPDY over Websockets framing. +func ExampleConn() { + wsconn, _, _ := websocket.DefaultDialer.Dial("ws://localhost/", http.Header{"Origin": {"http://localhost/"}}) + conn, _ := spdystream.NewConnection(NewConnection(wsconn), false) + go conn.Serve(spdystream.NoOpStreamHandler, spdystream.NoAuthHandler) + stream, _ := conn.CreateStream(http.Header{}, nil, false) + stream.Wait() +} + +func serveWs(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + http.Error(w, "Method not allowed", 405) + return + } + + ws, err := upgrader.Upgrade(w, r, nil) + if err != nil { + if _, ok := err.(websocket.HandshakeError); !ok { + log.Println(err) + } + return + } + + wrap := NewConnection(ws) + spdyConn, err := spdystream.NewConnection(wrap, true) + if err != nil { + log.Fatal(err) + return + } + serverSpdyConn = spdyConn + go spdyConn.Serve(spdystream.MirrorStreamHandler, authStreamHandler) +} + +func TestSpdyStreamOverWs(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(serveWs)) + defer server.Close() + defer func() { + if serverSpdyConn != nil { + serverSpdyConn.Close() + } + }() + + wsconn, _, err := websocket.DefaultDialer.Dial(strings.Replace(server.URL, "http://", "ws://", 1), http.Header{"Origin": {server.URL}}) + if err != nil { + t.Fatal(err) + } + + wrap := NewConnection(wsconn) + spdyConn, err := spdystream.NewConnection(wrap, false) + if err != nil { + defer wsconn.Close() + t.Fatal(err) + } + defer spdyConn.Close() + authenticated = true + go spdyConn.Serve(spdystream.NoOpStreamHandler, spdystream.RejectAuthHandler) + + stream, streamErr := spdyConn.CreateStream(http.Header{}, nil, false) + if streamErr != nil { + t.Fatalf("Error creating stream: %s", streamErr) + } + + waitErr := stream.Wait() + if waitErr != nil { + t.Fatalf("Error waiting for stream: %s", waitErr) + } + + message := []byte("hello") + writeErr := stream.WriteData(message, false) + if writeErr != nil { + t.Fatalf("Error writing data") + } + + buf := make([]byte, 10) + n, readErr := stream.Read(buf) + if readErr != nil { + t.Fatalf("Error reading data from stream: %s", readErr) + } + if n != 5 { + t.Fatalf("Unexpected number of bytes read:\nActual: %d\nExpected: 5", n) + } + if bytes.Compare(buf[:n], message) != 0 { + t.Fatalf("Did not receive expected message:\nActual: %s\nExpectd: %s", buf, message) + } + + writeErr = stream.WriteData(message, true) + if writeErr != nil { + t.Fatalf("Error writing data") + } + + smallBuf := make([]byte, 3) + n, readErr = stream.Read(smallBuf) + if readErr != nil { + t.Fatalf("Error reading data from stream: %s", readErr) + } + if n != 3 { + t.Fatalf("Unexpected number of bytes read:\nActual: %d\nExpected: 3", n) + } + if bytes.Compare(smallBuf[:n], []byte("hel")) != 0 { + t.Fatalf("Did not receive expected message:\nActual: %s\nExpectd: %s", smallBuf[:n], message) + } + n, readErr = stream.Read(smallBuf) + if readErr != nil { + t.Fatalf("Error reading data from stream: %s", readErr) + } + if n != 2 { + t.Fatalf("Unexpected number of bytes read:\nActual: %d\nExpected: 2", n) + } + if bytes.Compare(smallBuf[:n], []byte("lo")) != 0 { + t.Fatalf("Did not receive expected message:\nActual: %s\nExpected: lo", smallBuf[:n]) + } + + n, readErr = stream.Read(buf) + if readErr != io.EOF { + t.Fatalf("Expected EOF reading from finished stream, read %d bytes", n) + } + + streamCloseErr := stream.Close() + if streamCloseErr != nil { + t.Fatalf("Error closing stream: %s", streamCloseErr) + } + + // Closing again should return nil + streamCloseErr = stream.Close() + if streamCloseErr != nil { + t.Fatalf("Error closing stream: %s", streamCloseErr) + } + + authenticated = false + badStream, badStreamErr := spdyConn.CreateStream(http.Header{}, nil, false) + if badStreamErr != nil { + t.Fatalf("Error creating stream: %s", badStreamErr) + } + + waitErr = badStream.Wait() + if waitErr == nil { + t.Fatalf("Did not receive error creating stream") + } + if waitErr != spdystream.ErrReset { + t.Fatalf("Unexpected error creating stream: %s", waitErr) + } + + spdyCloseErr := spdyConn.Close() + if spdyCloseErr != nil { + t.Fatalf("Error closing spdy connection: %s", spdyCloseErr) + } +} + +var authenticated bool + +func authStreamHandler(header http.Header, slot uint8, parent uint32) bool { + return authenticated +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-datastore/coalesce/coalesce.go b/Godeps/_workspace/src/github.com/jbenet/go-datastore/coalesce/coalesce.go index e85a4b49132..976ae4dbf7c 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-datastore/coalesce/coalesce.go +++ b/Godeps/_workspace/src/github.com/jbenet/go-datastore/coalesce/coalesce.go @@ -8,10 +8,10 @@ import ( dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/query" ) +// parent keys var ( - putKey = "put" - getKey = // parent keys - "get" + putKey = "put" + getKey = "get" hasKey = "has" deleteKey = "delete" ) diff --git a/Godeps/_workspace/src/github.com/jbenet/go-datastore/elastigo/datastore.go b/Godeps/_workspace/src/github.com/jbenet/go-datastore/elastigo/datastore.go index 8058d19a853..e77bf755423 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-datastore/elastigo/datastore.go +++ b/Godeps/_workspace/src/github.com/jbenet/go-datastore/elastigo/datastore.go @@ -6,9 +6,10 @@ import ( "net/url" "strings" - "github.com/codahale/blake2" ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" query "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/query" + + "github.com/codahale/blake2" "github.com/mattbaird/elastigo/api" "github.com/mattbaird/elastigo/core" ) diff --git a/Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs/flatfs.go b/Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs/flatfs.go index 07502114e20..f85ad05ddb4 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs/flatfs.go +++ b/Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs/flatfs.go @@ -15,6 +15,7 @@ import ( "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/query" "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-os-rename" + logging "github.com/ipfs/go-ipfs/vendor/QmQg1J6vikuXF9oDvm4wpdeAUvvkVEKW1EYDw9HhTMnP2b/go-log" ) @@ -33,11 +34,14 @@ type Datastore struct { path string // length of the dir splay prefix, in bytes of hex digits hexPrefixLen int + + // sychronize all writes and directory changes for added safety + sync bool } var _ datastore.Datastore = (*Datastore)(nil) -func New(path string, prefixLen int) (*Datastore, error) { +func New(path string, prefixLen int, sync bool) (*Datastore, error) { if prefixLen <= 0 || prefixLen > maxPrefixLen { return nil, ErrBadPrefixLen } @@ -45,6 +49,7 @@ func New(path string, prefixLen int) (*Datastore, error) { path: path, // convert from binary bytes to bytes of hex encoding hexPrefixLen: prefixLen * hex.EncodedLen(1), + sync: sync, } return fs, nil } @@ -80,8 +85,10 @@ func (fs *Datastore) makePrefixDir(dir string) error { // it, the creation of the prefix dir itself might not be // durable yet. Sync the root dir after a successful mkdir of // a prefix dir, just to be paranoid. - if err := syncDir(fs.path); err != nil { - return err + if fs.sync { + if err := syncDir(fs.path); err != nil { + return err + } } return nil } @@ -148,8 +155,10 @@ func (fs *Datastore) doPut(key datastore.Key, val []byte) error { if _, err := tmp.Write(val); err != nil { return err } - if err := tmp.Sync(); err != nil { - return err + if fs.sync { + if err := tmp.Sync(); err != nil { + return err + } } if err := tmp.Close(); err != nil { return err @@ -162,8 +171,10 @@ func (fs *Datastore) doPut(key datastore.Key, val []byte) error { } removed = true - if err := syncDir(dir); err != nil { - return err + if fs.sync { + if err := syncDir(dir); err != nil { + return err + } } return nil } @@ -213,8 +224,10 @@ func (fs *Datastore) putMany(data map[datastore.Key]interface{}) error { // Now we sync everything // sync and close files for fi, _ := range files { - if err := fi.Sync(); err != nil { - return err + if fs.sync { + if err := fi.Sync(); err != nil { + return err + } } if err := fi.Close(); err != nil { @@ -236,15 +249,17 @@ func (fs *Datastore) putMany(data map[datastore.Key]interface{}) error { } // now sync the dirs for those files - for _, dir := range dirsToSync { - if err := syncDir(dir); err != nil { - return err + if fs.sync { + for _, dir := range dirsToSync { + if err := syncDir(dir); err != nil { + return err + } } - } - // sync top flatfs dir - if err := syncDir(fs.path); err != nil { - return err + // sync top flatfs dir + if err := syncDir(fs.path); err != nil { + return err + } } return nil diff --git a/Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs/flatfs_test.go b/Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs/flatfs_test.go index cd36d684e2b..f63b74bf763 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs/flatfs_test.go +++ b/Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs/flatfs_test.go @@ -8,11 +8,12 @@ import ( "runtime" "testing" - rand "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/dustin/randbo" "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs" "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/query" dstest "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/test" + + rand "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/dustin/randbo" ) func tempdir(t testing.TB) (path string, cleanup func()) { @@ -34,7 +35,7 @@ func TestBadPrefixLen(t *testing.T) { defer cleanup() for i := 0; i > -3; i-- { - _, err := flatfs.New(temp, 0) + _, err := flatfs.New(temp, i, false) if g, e := err, flatfs.ErrBadPrefixLen; g != e { t.Errorf("expected ErrBadPrefixLen, got: %v", g) } @@ -45,7 +46,7 @@ func TestPutBadValueType(t *testing.T) { temp, cleanup := tempdir(t) defer cleanup() - fs, err := flatfs.New(temp, 2) + fs, err := flatfs.New(temp, 2, false) if err != nil { t.Fatalf("New fail: %v\n", err) } @@ -60,7 +61,7 @@ func TestPut(t *testing.T) { temp, cleanup := tempdir(t) defer cleanup() - fs, err := flatfs.New(temp, 2) + fs, err := flatfs.New(temp, 2, false) if err != nil { t.Fatalf("New fail: %v\n", err) } @@ -75,7 +76,7 @@ func TestGet(t *testing.T) { temp, cleanup := tempdir(t) defer cleanup() - fs, err := flatfs.New(temp, 2) + fs, err := flatfs.New(temp, 2, false) if err != nil { t.Fatalf("New fail: %v\n", err) } @@ -103,7 +104,7 @@ func TestPutOverwrite(t *testing.T) { temp, cleanup := tempdir(t) defer cleanup() - fs, err := flatfs.New(temp, 2) + fs, err := flatfs.New(temp, 2, false) if err != nil { t.Fatalf("New fail: %v\n", err) } @@ -135,7 +136,7 @@ func TestGetNotFoundError(t *testing.T) { temp, cleanup := tempdir(t) defer cleanup() - fs, err := flatfs.New(temp, 2) + fs, err := flatfs.New(temp, 2, false) if err != nil { t.Fatalf("New fail: %v\n", err) } @@ -153,7 +154,7 @@ func TestStorage(t *testing.T) { const prefixLen = 2 const prefix = "7175" const target = prefix + string(os.PathSeparator) + "71757578.data" - fs, err := flatfs.New(temp, prefixLen) + fs, err := flatfs.New(temp, prefixLen, false) if err != nil { t.Fatalf("New fail: %v\n", err) } @@ -208,7 +209,7 @@ func TestHasNotFound(t *testing.T) { temp, cleanup := tempdir(t) defer cleanup() - fs, err := flatfs.New(temp, 2) + fs, err := flatfs.New(temp, 2, false) if err != nil { t.Fatalf("New fail: %v\n", err) } @@ -226,7 +227,7 @@ func TestHasFound(t *testing.T) { temp, cleanup := tempdir(t) defer cleanup() - fs, err := flatfs.New(temp, 2) + fs, err := flatfs.New(temp, 2, false) if err != nil { t.Fatalf("New fail: %v\n", err) } @@ -248,7 +249,7 @@ func TestDeleteNotFound(t *testing.T) { temp, cleanup := tempdir(t) defer cleanup() - fs, err := flatfs.New(temp, 2) + fs, err := flatfs.New(temp, 2, false) if err != nil { t.Fatalf("New fail: %v\n", err) } @@ -263,7 +264,7 @@ func TestDeleteFound(t *testing.T) { temp, cleanup := tempdir(t) defer cleanup() - fs, err := flatfs.New(temp, 2) + fs, err := flatfs.New(temp, 2, false) if err != nil { t.Fatalf("New fail: %v\n", err) } @@ -288,7 +289,7 @@ func TestQuerySimple(t *testing.T) { temp, cleanup := tempdir(t) defer cleanup() - fs, err := flatfs.New(temp, 2) + fs, err := flatfs.New(temp, 2, false) if err != nil { t.Fatalf("New fail: %v\n", err) } @@ -324,7 +325,7 @@ func TestBatchPut(t *testing.T) { temp, cleanup := tempdir(t) defer cleanup() - fs, err := flatfs.New(temp, 2) + fs, err := flatfs.New(temp, 2, false) if err != nil { t.Fatalf("New fail: %v\n", err) } @@ -336,7 +337,7 @@ func TestBatchDelete(t *testing.T) { temp, cleanup := tempdir(t) defer cleanup() - fs, err := flatfs.New(temp, 2) + fs, err := flatfs.New(temp, 2, false) if err != nil { t.Fatalf("New fail: %v\n", err) } @@ -359,7 +360,7 @@ func BenchmarkConsecutivePut(b *testing.B) { temp, cleanup := tempdir(b) defer cleanup() - fs, err := flatfs.New(temp, 2) + fs, err := flatfs.New(temp, 2, false) if err != nil { b.Fatalf("New fail: %v\n", err) } @@ -389,7 +390,7 @@ func BenchmarkBatchedPut(b *testing.B) { temp, cleanup := tempdir(b) defer cleanup() - fs, err := flatfs.New(temp, 2) + fs, err := flatfs.New(temp, 2, false) if err != nil { b.Fatalf("New fail: %v\n", err) } diff --git a/Godeps/_workspace/src/github.com/jbenet/go-datastore/lru/datastore_test.go b/Godeps/_workspace/src/github.com/jbenet/go-datastore/lru/datastore_test.go index b1822471d8a..dc31b19a16e 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-datastore/lru/datastore_test.go +++ b/Godeps/_workspace/src/github.com/jbenet/go-datastore/lru/datastore_test.go @@ -5,10 +5,11 @@ import ( "testing" ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" - lru "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/lru" // Hook up gocheck into the "go test" runner. + lru "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/lru" . "gopkg.in/check.v1" ) +// Hook up gocheck into the "go test" runner. func Test(t *testing.T) { TestingT(t) } type DSSuite struct{} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-datastore/timecache/timecache.go b/Godeps/_workspace/src/github.com/jbenet/go-datastore/timecache/timecache.go index 1da1ef02c2d..5ac675d598c 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-datastore/timecache/timecache.go +++ b/Godeps/_workspace/src/github.com/jbenet/go-datastore/timecache/timecache.go @@ -9,10 +9,10 @@ import ( dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/query" ) +// op keys var ( - putKey = "put" - getKey = // op keys - "get" + putKey = "put" + getKey = "get" hasKey = "has" deleteKey = "delete" ) diff --git a/Godeps/_workspace/src/github.com/jbenet/go-msgio/msgio.go b/Godeps/_workspace/src/github.com/jbenet/go-msgio/msgio.go index 4bb92debedb..a740710d846 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-msgio/msgio.go +++ b/Godeps/_workspace/src/github.com/jbenet/go-msgio/msgio.go @@ -1,6 +1,7 @@ package msgio import ( + "bufio" "errors" "io" "sync" @@ -75,7 +76,8 @@ type ReadWriteCloser interface { // writer is the underlying type that implements the Writer interface. type writer struct { - W io.Writer + W io.Writer + buf *bufio.Writer lock sync.Locker } @@ -83,7 +85,7 @@ type writer struct { // NewWriter wraps an io.Writer with a msgio framed writer. The msgio.Writer // will write the length prefix of every message written. func NewWriter(w io.Writer) WriteCloser { - return &writer{W: w, lock: new(sync.Mutex)} + return &writer{W: w, buf: bufio.NewWriter(w), lock: new(sync.Mutex)} } func (s *writer) Write(msg []byte) (int, error) { @@ -100,8 +102,13 @@ func (s *writer) WriteMsg(msg []byte) (err error) { if err := WriteLen(s.W, len(msg)); err != nil { return err } - _, err = s.W.Write(msg) - return err + + _, err = s.buf.Write(msg) + if err != nil { + return err + } + + return s.buf.Flush() } func (s *writer) Close() error { diff --git a/Godeps/_workspace/src/github.com/jbenet/go-stream-muxer/Godeps/Godeps.json b/Godeps/_workspace/src/github.com/jbenet/go-stream-muxer/Godeps/Godeps.json index 47481401c27..346185df7bf 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-stream-muxer/Godeps/Godeps.json +++ b/Godeps/_workspace/src/github.com/jbenet/go-stream-muxer/Godeps/Godeps.json @@ -19,11 +19,11 @@ }, { "ImportPath": "github.com/whyrusleeping/go-multiplex", - "Rev": "ce5baa716247510379cb7640a14da857afd3b622" + "Rev": "474b9aebeb391746f304ddf7c764a5da12319857" }, { "ImportPath": "github.com/whyrusleeping/go-multistream", - "Rev": "08e8f9c9f5665ed0c63ffde4fa5ef1d5fb3d516d" + "Rev": "31bb014803a6eba2261bda5593e42c016a5f33bb" } ] } diff --git a/Godeps/_workspace/src/github.com/jbenet/go-stream-muxer/multiplex/multiplex.go b/Godeps/_workspace/src/github.com/jbenet/go-stream-muxer/multiplex/multiplex.go index e3257d7afc6..69b093b67c0 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-stream-muxer/multiplex/multiplex.go +++ b/Godeps/_workspace/src/github.com/jbenet/go-stream-muxer/multiplex/multiplex.go @@ -5,7 +5,7 @@ import ( "net" smux "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-stream-muxer" - mp "github.com/whyrusleeping/go-multiplex" // Conn is a connection to a remote peer. + mp "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/whyrusleeping/go-multiplex" // Conn is a connection to a remote peer. ) var ErrUseServe = errors.New("not implemented, use Serve") @@ -29,15 +29,19 @@ func (c *conn) OpenStream() (smux.Stream, error) { // AcceptStream accepts a stream opened by the other side. func (c *conn) AcceptStream() (smux.Stream, error) { - return nil, ErrUseServe + return c.Multiplex.Accept() } // Serve starts listening for incoming requests and handles them // using given StreamHandler func (c *conn) Serve(handler smux.StreamHandler) { - c.Multiplex.Serve(func(s *mp.Stream) { - handler(s) - }) + for { + s, err := c.AcceptStream() + if err != nil { + return + } + go handler(s) + } } // Transport is a go-peerstream transport that constructs diff --git a/Godeps/_workspace/src/github.com/jbenet/go-stream-muxer/multistream/multistream.go b/Godeps/_workspace/src/github.com/jbenet/go-stream-muxer/multistream/multistream.go index fe04c4d196a..d60396ab187 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-stream-muxer/multistream/multistream.go +++ b/Godeps/_workspace/src/github.com/jbenet/go-stream-muxer/multistream/multistream.go @@ -5,7 +5,7 @@ package multistream import ( "net" - mss "github.com/whyrusleeping/go-multistream" + mss "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/whyrusleeping/go-multistream" smux "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-stream-muxer" multiplex "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-stream-muxer/multiplex" diff --git a/Godeps/_workspace/src/github.com/jbenet/go-stream-muxer/spdystream/spdystream.go b/Godeps/_workspace/src/github.com/jbenet/go-stream-muxer/spdystream/spdystream.go index 17baf08fa6c..25830832c89 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-stream-muxer/spdystream/spdystream.go +++ b/Godeps/_workspace/src/github.com/jbenet/go-stream-muxer/spdystream/spdystream.go @@ -5,7 +5,7 @@ import ( "net" "net/http" - ss "github.com/docker/spdystream" + ss "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/docker/spdystream" smux "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-stream-muxer" ) diff --git a/Godeps/_workspace/src/github.com/whyrusleeping/go-multistream/README.md b/Godeps/_workspace/src/github.com/whyrusleeping/go-multistream/README.md new file mode 100644 index 00000000000..1ade9dc60da --- /dev/null +++ b/Godeps/_workspace/src/github.com/whyrusleeping/go-multistream/README.md @@ -0,0 +1,43 @@ +#Multistream-select router +This package implements a simple stream router for the multistream-select protocol. +The protocol is defined [here](https://github.com/jbenet/multistream). + + +Usage: + +```go +package main + +import ( + "fmt" + ms "github.com/whyrusleeping/go-multistream" + "io" + "net" +) + +func main() { + mux := ms.NewMultistreamMuxer() + mux.AddHandler("/cats", func(rwc io.ReadWriteCloser) error { + fmt.Fprintln(rwc, "HELLO I LIKE CATS") + return rwc.Close() + }) + mux.AddHandler("/dogs", func(rwc io.ReadWriteCloser) error { + fmt.Fprintln(rwc, "HELLO I LIKE DOGS") + return rwc.Close() + }) + + list, err := net.Listen("tcp", ":8765") + if err != nil { + panic(err) + } + + for { + con, err := list.Accept() + if err != nil { + panic(err) + } + + go mux.Handle(con) + } +} +``` diff --git a/Godeps/_workspace/src/github.com/whyrusleeping/go-multistream/client.go b/Godeps/_workspace/src/github.com/whyrusleeping/go-multistream/client.go new file mode 100644 index 00000000000..622fa3b10b3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/whyrusleeping/go-multistream/client.go @@ -0,0 +1,75 @@ +package multistream + +import ( + "errors" + "io" +) + +var ErrNotSupported = errors.New("protocol not supported") + +func SelectProtoOrFail(proto string, rwc io.ReadWriteCloser) error { + err := handshake(rwc) + if err != nil { + return err + } + + return trySelect(proto, rwc) +} + +func SelectOneOf(protos []string, rwc io.ReadWriteCloser) (string, error) { + err := handshake(rwc) + if err != nil { + return "", err + } + + for _, p := range protos { + err := trySelect(p, rwc) + switch err { + case nil: + return p, nil + case ErrNotSupported: + default: + return "", err + } + } + return "", ErrNotSupported +} + +func handshake(rwc io.ReadWriteCloser) error { + tok, err := ReadNextToken(rwc) + if err != nil { + return err + } + + if tok != ProtocolID { + return errors.New("received mismatch in protocol id") + } + + err = delimWrite(rwc, []byte(ProtocolID)) + if err != nil { + return err + } + + return nil +} + +func trySelect(proto string, rwc io.ReadWriteCloser) error { + err := delimWrite(rwc, []byte(proto)) + if err != nil { + return err + } + + tok, err := ReadNextToken(rwc) + if err != nil { + return err + } + + switch tok { + case proto: + return nil + case "na": + return ErrNotSupported + default: + return errors.New("unrecognized response: " + tok) + } +} diff --git a/Godeps/_workspace/src/github.com/whyrusleeping/go-multistream/lazy.go b/Godeps/_workspace/src/github.com/whyrusleeping/go-multistream/lazy.go new file mode 100644 index 00000000000..e86296a769d --- /dev/null +++ b/Godeps/_workspace/src/github.com/whyrusleeping/go-multistream/lazy.go @@ -0,0 +1,124 @@ +package multistream + +import ( + "fmt" + "io" + "sync" +) + +type Multistream interface { + io.ReadWriteCloser + Protocol() string +} + +func NewMSSelect(c io.ReadWriteCloser, proto string) Multistream { + return NewMultistream(NewMultistream(c, ProtocolID), proto) +} + +func NewMultistream(c io.ReadWriteCloser, proto string) Multistream { + return &lazyConn{ + proto: proto, + con: c, + } +} + +type lazyConn struct { + rhandshake bool // only accessed by 'Read' should not call read async + + rhlock sync.Mutex + rhsync bool //protected by mutex + rerr error + + whandshake bool + + whlock sync.Mutex + whsync bool + werr error + + proto string + con io.ReadWriteCloser +} + +func (l *lazyConn) Protocol() string { + return l.proto +} + +func (l *lazyConn) Read(b []byte) (int, error) { + if !l.rhandshake { + go l.writeHandshake() + err := l.readHandshake() + if err != nil { + return 0, err + } + + l.rhandshake = true + } + + if len(b) == 0 { + return 0, nil + } + + return l.con.Read(b) +} + +func (l *lazyConn) readHandshake() error { + l.rhlock.Lock() + defer l.rhlock.Unlock() + + // if we've already done this, exit + if l.rhsync { + return l.rerr + } + l.rhsync = true + + // read protocol + tok, err := ReadNextToken(l.con) + if err != nil { + l.rerr = err + return err + } + + if tok != l.proto { + l.rerr = fmt.Errorf("protocol mismatch in lazy handshake ( %s != %s )", tok, l.proto) + return l.rerr + } + + return nil +} + +func (l *lazyConn) writeHandshake() error { + l.whlock.Lock() + defer l.whlock.Unlock() + + if l.whsync { + return l.werr + } + + l.whsync = true + + err := delimWrite(l.con, []byte(l.proto)) + if err != nil { + l.werr = err + return err + } + + return nil +} + +func (l *lazyConn) Write(b []byte) (int, error) { + if !l.whandshake { + go l.readHandshake() + err := l.writeHandshake() + if err != nil { + return 0, err + } + + l.whandshake = true + } + + return l.con.Write(b) +} + +func (l *lazyConn) Close() error { + return l.con.Close() +} diff --git a/Godeps/_workspace/src/github.com/whyrusleeping/go-multistream/multistream.go b/Godeps/_workspace/src/github.com/whyrusleeping/go-multistream/multistream.go new file mode 100644 index 00000000000..ecec8df73a1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/whyrusleeping/go-multistream/multistream.go @@ -0,0 +1,201 @@ +package multistream + +import ( + "bytes" + "encoding/binary" + "errors" + "io" + "sync" +) + +var ErrTooLarge = errors.New("incoming message was too large") + +const ProtocolID = "/multistream/1.0.0" + +type HandlerFunc func(io.ReadWriteCloser) error + +type MultistreamMuxer struct { + handlerlock sync.Mutex + handlers map[string]HandlerFunc +} + +func NewMultistreamMuxer() *MultistreamMuxer { + return &MultistreamMuxer{handlers: make(map[string]HandlerFunc)} +} + +func writeUvarint(w io.Writer, i uint64) error { + varintbuf := make([]byte, 32) + n := binary.PutUvarint(varintbuf, i) + _, err := w.Write(varintbuf[:n]) + if err != nil { + return err + } + return nil +} + +func delimWrite(w io.Writer, mes []byte) error { + err := writeUvarint(w, uint64(len(mes)+1)) + if err != nil { + return err + } + + _, err = w.Write(mes) + if err != nil { + return err + } + + _, err = w.Write([]byte{'\n'}) + if err != nil { + return err + } + return nil +} + +func (msm *MultistreamMuxer) AddHandler(protocol string, handler HandlerFunc) { + msm.handlerlock.Lock() + msm.handlers[protocol] = handler + msm.handlerlock.Unlock() +} + +func (msm *MultistreamMuxer) RemoveHandler(protocol string) { + msm.handlerlock.Lock() + delete(msm.handlers, protocol) + msm.handlerlock.Unlock() +} + +func (msm *MultistreamMuxer) Protocols() []string { + var out []string + msm.handlerlock.Lock() + for k, _ := range msm.handlers { + out = append(out, k) + } + msm.handlerlock.Unlock() + return out +} + +func (msm *MultistreamMuxer) Negotiate(rwc io.ReadWriteCloser) (string, HandlerFunc, error) { + // Send our protocol ID + err := delimWrite(rwc, []byte(ProtocolID)) + if err != nil { + return "", nil, err + } + + line, err := ReadNextToken(rwc) + if err != nil { + return "", nil, err + } + + if line != ProtocolID { + rwc.Close() + return "", nil, errors.New("client connected with incorrect version") + } + +loop: + for { + // Now read and respond to commands until they send a valid protocol id + tok, err := ReadNextToken(rwc) + if err != nil { + return "", nil, err + } + + switch tok { + case "ls": + err := msm.Ls(rwc) + if err != nil { + return "", nil, err + } + default: + msm.handlerlock.Lock() + h, ok := msm.handlers[tok] + msm.handlerlock.Unlock() + if !ok { + err := delimWrite(rwc, []byte("na")) + if err != nil { + return "", nil, err + } + continue loop + } + + err := delimWrite(rwc, []byte(tok)) + if err != nil { + return "", nil, err + } + + // hand off processing to the sub-protocol handler + return tok, h, nil + } + } + +} + +func (msm *MultistreamMuxer) Ls(rwc io.Writer) error { + buf := new(bytes.Buffer) + msm.handlerlock.Lock() + for proto, _ := range msm.handlers { + err := delimWrite(buf, []byte(proto)) + if err != nil { + msm.handlerlock.Unlock() + return err + } + } + msm.handlerlock.Unlock() + err := delimWrite(rwc, buf.Bytes()) + if err != nil { + return err + } + return nil +} + +func (msm *MultistreamMuxer) Handle(rwc io.ReadWriteCloser) error { + _, h, err := msm.Negotiate(rwc) + if err != nil { + return err + } + return h(rwc) +} + +func ReadNextToken(rw io.ReadWriter) (string, error) { + br := &byteReader{rw} + length, err := binary.ReadUvarint(br) + if err != nil { + return "", err + } + + if length > 64*1024 { + err := delimWrite(rw, []byte("messages over 64k are not allowed")) + if err != nil { + return "", err + } + return "", ErrTooLarge + } + + buf := make([]byte, length) + _, err = io.ReadFull(rw, buf) + if err != nil { + return "", err + } + + if len(buf) == 0 || buf[length-1] != '\n' { + return "", errors.New("message did not have trailing newline") + } + + // slice off the trailing newline + buf = buf[:length-1] + + return string(buf), nil +} + +// byteReader implements the ByteReader interface that ReadUVarint requires +type byteReader struct { + io.Reader +} + +func (br *byteReader) ReadByte() (byte, error) { + var b [1]byte + _, err := br.Read(b[:]) + + if err != nil { + return 0, err + } + return b[0], nil +} diff --git a/Godeps/_workspace/src/github.com/whyrusleeping/go-multistream/multistream_test.go b/Godeps/_workspace/src/github.com/whyrusleeping/go-multistream/multistream_test.go new file mode 100644 index 00000000000..aaf0f7f5734 --- /dev/null +++ b/Godeps/_workspace/src/github.com/whyrusleeping/go-multistream/multistream_test.go @@ -0,0 +1,259 @@ +package multistream + +import ( + "crypto/rand" + "io" + "net" + "testing" + "time" +) + +func TestProtocolNegotiation(t *testing.T) { + a, b := net.Pipe() + + mux := NewMultistreamMuxer() + mux.AddHandler("/a", nil) + mux.AddHandler("/b", nil) + mux.AddHandler("/c", nil) + + done := make(chan struct{}) + go func() { + selected, _, err := mux.Negotiate(a) + if err != nil { + t.Fatal(err) + } + if selected != "/a" { + t.Fatal("incorrect protocol selected") + } + close(done) + }() + + err := SelectProtoOrFail("/a", b) + if err != nil { + t.Fatal(err) + } + + select { + case <-time.After(time.Second): + t.Fatal("protocol negotiation didnt complete") + case <-done: + } + + verifyPipe(t, a, b) +} + +func TestSelectOne(t *testing.T) { + a, b := net.Pipe() + + mux := NewMultistreamMuxer() + mux.AddHandler("/a", nil) + mux.AddHandler("/b", nil) + mux.AddHandler("/c", nil) + + done := make(chan struct{}) + go func() { + selected, _, err := mux.Negotiate(a) + if err != nil { + t.Fatal(err) + } + if selected != "/c" { + t.Fatal("incorrect protocol selected") + } + close(done) + }() + + sel, err := SelectOneOf([]string{"/d", "/e", "/c"}, b) + if err != nil { + t.Fatal(err) + } + + if sel != "/c" { + t.Fatal("selected wrong protocol") + } + + select { + case <-time.After(time.Second): + t.Fatal("protocol negotiation didnt complete") + case <-done: + } + + verifyPipe(t, a, b) +} + +func TestSelectOneAndWrite(t *testing.T) { + a, b := net.Pipe() + + mux := NewMultistreamMuxer() + mux.AddHandler("/a", nil) + mux.AddHandler("/b", nil) + mux.AddHandler("/c", nil) + + done := make(chan struct{}) + go func() { + selected, _, err := mux.Negotiate(a) + if err != nil { + t.Fatal(err) + } + if selected != "/c" { + t.Fatal("incorrect protocol selected") + } + close(done) + }() + + sel, err := SelectOneOf([]string{"/d", "/e", "/c"}, b) + if err != nil { + t.Fatal(err) + } + + if sel != "/c" { + t.Fatal("selected wrong protocol") + } + + select { + case <-time.After(time.Second): + t.Fatal("protocol negotiation didnt complete") + case <-done: + } + + verifyPipe(t, a, b) +} + +func TestLazyConns(t *testing.T) { + a, b := net.Pipe() + + mux := NewMultistreamMuxer() + mux.AddHandler("/a", nil) + mux.AddHandler("/b", nil) + mux.AddHandler("/c", nil) + + la := NewMSSelect(a, "/c") + lb := NewMSSelect(b, "/c") + + verifyPipe(t, la, lb) +} + +func TestLazyAndMux(t *testing.T) { + a, b := net.Pipe() + + mux := NewMultistreamMuxer() + mux.AddHandler("/a", nil) + mux.AddHandler("/b", nil) + mux.AddHandler("/c", nil) + + done := make(chan struct{}) + go func() { + selected, _, err := mux.Negotiate(a) + if err != nil { + t.Fatal(err) + } + if selected != "/c" { + t.Fatal("incorrect protocol selected") + } + + msg := make([]byte, 5) + _, err = a.Read(msg) + if err != nil { + t.Fatal(err) + } + + close(done) + }() + + lb := NewMSSelect(b, "/c") + + // do a write to push the handshake through + _, err := lb.Write([]byte("hello")) + if err != nil { + t.Fatal(err) + } + + select { + case <-time.After(time.Second): + t.Fatal("failed to complete in time") + case <-done: + } + + verifyPipe(t, a, lb) +} + +func TestLazyAndMuxWrite(t *testing.T) { + a, b := net.Pipe() + + mux := NewMultistreamMuxer() + mux.AddHandler("/a", nil) + mux.AddHandler("/b", nil) + mux.AddHandler("/c", nil) + + done := make(chan struct{}) + go func() { + selected, _, err := mux.Negotiate(a) + if err != nil { + t.Fatal(err) + } + if selected != "/c" { + t.Fatal("incorrect protocol selected") + } + + _, err = a.Write([]byte("hello")) + if err != nil { + t.Fatal(err) + } + + close(done) + }() + + lb := NewMSSelect(b, "/c") + + // do a write to push the handshake through + msg := make([]byte, 5) + _, err := lb.Read(msg) + if err != nil { + t.Fatal(err) + } + + if string(msg) != "hello" { + t.Fatal("wrong!") + } + + select { + case <-time.After(time.Second): + t.Fatal("failed to complete in time") + case <-done: + } + + verifyPipe(t, a, lb) +} + +func verifyPipe(t *testing.T, a, b io.ReadWriter) { + mes := make([]byte, 1024) + rand.Read(mes) + go func() { + b.Write(mes) + a.Write(mes) + }() + + buf := make([]byte, len(mes)) + n, err := a.Read(buf) + if err != nil { + t.Fatal(err) + } + if n != len(buf) { + t.Fatal("failed to read enough") + } + + if string(buf) != string(mes) { + t.Fatal("somehow read wrong message") + } + + n, err = b.Read(buf) + if err != nil { + t.Fatal(err) + } + if n != len(buf) { + t.Fatal("failed to read enough") + } + + if string(buf) != string(mes) { + t.Fatal("somehow read wrong message") + } +} diff --git a/assets/bindata.go b/assets/bindata.go index c8d41ca7f24..b13848679d9 100644 --- a/assets/bindata.go +++ b/assets/bindata.go @@ -94,7 +94,7 @@ func initDocAbout() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "init-doc/about", size: 1677, mode: os.FileMode(420), modTime: time.Unix(1431511975, 0)} + info := bindataFileInfo{name: "init-doc/about", size: 1677, mode: os.FileMode(420), modTime: time.Unix(1429745997, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -114,7 +114,7 @@ func initDocContact() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "init-doc/contact", size: 189, mode: os.FileMode(420), modTime: time.Unix(1431511975, 0)} + info := bindataFileInfo{name: "init-doc/contact", size: 189, mode: os.FileMode(420), modTime: time.Unix(1441247038, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -134,7 +134,7 @@ func initDocHelp() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "init-doc/help", size: 311, mode: os.FileMode(420), modTime: time.Unix(1431511975, 0)} + info := bindataFileInfo{name: "init-doc/help", size: 311, mode: os.FileMode(420), modTime: time.Unix(1441247038, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -154,7 +154,7 @@ func initDocQuickStart() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "init-doc/quick-start", size: 1686, mode: os.FileMode(420), modTime: time.Unix(1441256262, 0)} + info := bindataFileInfo{name: "init-doc/quick-start", size: 1686, mode: os.FileMode(420), modTime: time.Unix(1441247038, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -174,7 +174,7 @@ func initDocReadme() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "init-doc/readme", size: 1091, mode: os.FileMode(420), modTime: time.Unix(1431511975, 0)} + info := bindataFileInfo{name: "init-doc/readme", size: 1091, mode: os.FileMode(420), modTime: time.Unix(1441247038, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -194,7 +194,7 @@ func initDocSecurityNotes() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "init-doc/security-notes", size: 1016, mode: os.FileMode(420), modTime: time.Unix(1431511975, 0)} + info := bindataFileInfo{name: "init-doc/security-notes", size: 1016, mode: os.FileMode(420), modTime: time.Unix(1429745997, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -214,7 +214,7 @@ func VendorDirIndexHtmlV100Gxlastpubver() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "../vendor/dir-index-html-v1.0.0/.gxlastpubver", size: 46, mode: os.FileMode(420), modTime: time.Unix(1441386353, 0)} + info := bindataFileInfo{name: "../vendor/dir-index-html-v1.0.0/.gxlastpubver", size: 46, mode: os.FileMode(420), modTime: time.Unix(1441855299, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -234,7 +234,7 @@ func VendorDirIndexHtmlV100ReadmeMd() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "../vendor/dir-index-html-v1.0.0/README.md", size: 153, mode: os.FileMode(420), modTime: time.Unix(1441386353, 0)} + info := bindataFileInfo{name: "../vendor/dir-index-html-v1.0.0/README.md", size: 153, mode: os.FileMode(420), modTime: time.Unix(1441855299, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -254,7 +254,7 @@ func VendorDirIndexHtmlV100DirIndexUncatHtml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "../vendor/dir-index-html-v1.0.0/dir-index-uncat.html", size: 1600, mode: os.FileMode(420), modTime: time.Unix(1441386353, 0)} + info := bindataFileInfo{name: "../vendor/dir-index-html-v1.0.0/dir-index-uncat.html", size: 1600, mode: os.FileMode(420), modTime: time.Unix(1441855299, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -274,7 +274,7 @@ func VendorDirIndexHtmlV100DirIndexHtml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "../vendor/dir-index-html-v1.0.0/dir-index.html", size: 105904, mode: os.FileMode(420), modTime: time.Unix(1441386353, 0)} + info := bindataFileInfo{name: "../vendor/dir-index-html-v1.0.0/dir-index.html", size: 105904, mode: os.FileMode(420), modTime: time.Unix(1441855299, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -294,7 +294,7 @@ func VendorDirIndexHtmlV100KnowniconsTxt() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "../vendor/dir-index-html-v1.0.0/knownIcons.txt", size: 305, mode: os.FileMode(420), modTime: time.Unix(1441386353, 0)} + info := bindataFileInfo{name: "../vendor/dir-index-html-v1.0.0/knownIcons.txt", size: 305, mode: os.FileMode(420), modTime: time.Unix(1441855299, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -314,7 +314,7 @@ func VendorDirIndexHtmlV100PackageJson() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "../vendor/dir-index-html-v1.0.0/package.json", size: 53, mode: os.FileMode(420), modTime: time.Unix(1441386353, 0)} + info := bindataFileInfo{name: "../vendor/dir-index-html-v1.0.0/package.json", size: 53, mode: os.FileMode(420), modTime: time.Unix(1441855299, 0)} a := &asset{bytes: bytes, info: info} return a, nil } diff --git a/blocks/blockstore/blockstore.go b/blocks/blockstore/blockstore.go index c4eefaddf3e..bc000df932a 100644 --- a/blocks/blockstore/blockstore.go +++ b/blocks/blockstore/blockstore.go @@ -4,6 +4,7 @@ package blockstore import ( "errors" + "sync" ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" dsns "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/namespace" @@ -24,7 +25,7 @@ var ValueTypeMismatch = errors.New("The retrieved value is not a Block") var ErrNotFound = errors.New("blockstore: block not found") -// Blockstore wraps a ThreadSafeDatastore +// Blockstore wraps a Datastore type Blockstore interface { DeleteBlock(key.Key) error Has(key.Key) (bool, error) @@ -35,17 +36,34 @@ type Blockstore interface { AllKeysChan(ctx context.Context) (<-chan key.Key, error) } -func NewBlockstore(d ds.ThreadSafeDatastore) Blockstore { +type GCBlockstore interface { + Blockstore + + // GCLock locks the blockstore for garbage collection. No operations + // that expect to finish with a pin should ocurr simultaneously. + // Reading during GC is safe, and requires no lock. + GCLock() func() + + // PinLock locks the blockstore for sequences of puts expected to finish + // with a pin (before GC). Multiple put->pin sequences can write through + // at the same time, but no GC should not happen simulatenously. + // Reading during Pinning is safe, and requires no lock. + PinLock() func() +} + +func NewBlockstore(d ds.Batching) *blockstore { + var dsb ds.Batching dd := dsns.Wrap(d, BlockPrefix) + dsb = dd return &blockstore{ - datastore: dd, + datastore: dsb, } } type blockstore struct { datastore ds.Batching - // cant be ThreadSafeDatastore cause namespace.Datastore doesnt support it. - // we do check it on `NewBlockstore` though. + + lk sync.RWMutex } func (bs *blockstore) Get(k key.Key) (*blocks.Block, error) { @@ -172,3 +190,13 @@ func (bs *blockstore) AllKeysChan(ctx context.Context) (<-chan key.Key, error) { return output, nil } + +func (bs *blockstore) GCLock() func() { + bs.lk.Lock() + return bs.lk.Unlock +} + +func (bs *blockstore) PinLock() func() { + bs.lk.RLock() + return bs.lk.RUnlock +} diff --git a/blocks/blockstore/write_cache.go b/blocks/blockstore/write_cache.go index 5b2f55a2a2a..52af696e4ae 100644 --- a/blocks/blockstore/write_cache.go +++ b/blocks/blockstore/write_cache.go @@ -8,7 +8,7 @@ import ( ) // WriteCached returns a blockstore that caches up to |size| unique writes (bs.Put). -func WriteCached(bs Blockstore, size int) (Blockstore, error) { +func WriteCached(bs Blockstore, size int) (*writecache, error) { c, err := lru.New(size) if err != nil { return nil, err @@ -58,3 +58,11 @@ func (w *writecache) PutMany(bs []*blocks.Block) error { func (w *writecache) AllKeysChan(ctx context.Context) (<-chan key.Key, error) { return w.blockstore.AllKeysChan(ctx) } + +func (w *writecache) GCLock() func() { + return w.blockstore.(GCBlockstore).GCLock() +} + +func (w *writecache) PinLock() func() { + return w.blockstore.(GCBlockstore).PinLock() +} diff --git a/blocks/key/key_set.go b/blocks/key/key_set.go index f9e177d6a3b..f880ec33edd 100644 --- a/blocks/key/key_set.go +++ b/blocks/key/key_set.go @@ -1,46 +1,39 @@ package key -import ( - "sync" -) - type KeySet interface { Add(Key) + Has(Key) bool Remove(Key) Keys() []Key } -type ks struct { - lock sync.RWMutex - data map[Key]struct{} +type keySet struct { + keys map[Key]struct{} } func NewKeySet() KeySet { - return &ks{ - data: make(map[Key]struct{}), - } + return &keySet{make(map[Key]struct{})} } -func (wl *ks) Add(k Key) { - wl.lock.Lock() - defer wl.lock.Unlock() - - wl.data[k] = struct{}{} +func (gcs *keySet) Add(k Key) { + gcs.keys[k] = struct{}{} } -func (wl *ks) Remove(k Key) { - wl.lock.Lock() - defer wl.lock.Unlock() - - delete(wl.data, k) +func (gcs *keySet) Has(k Key) bool { + _, has := gcs.keys[k] + return has } -func (wl *ks) Keys() []Key { - wl.lock.RLock() - defer wl.lock.RUnlock() - keys := make([]Key, 0) - for k := range wl.data { - keys = append(keys, k) +func (ks *keySet) Keys() []Key { + var out []Key + for k, _ := range ks.keys { + out = append(out, k) } - return keys + return out } + +func (ks *keySet) Remove(k Key) { + delete(ks.keys, k) +} + +// TODO: implement disk-backed keyset for working with massive DAGs diff --git a/blocks/set/dbset.go b/blocks/set/dbset.go deleted file mode 100644 index 3db4d313800..00000000000 --- a/blocks/set/dbset.go +++ /dev/null @@ -1,48 +0,0 @@ -package set - -import ( - ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" - "github.com/ipfs/go-ipfs/blocks/bloom" - key "github.com/ipfs/go-ipfs/blocks/key" -) - -type datastoreBlockSet struct { - dstore ds.Datastore - bset BlockSet -} - -// NewDBWrapperSet returns a new blockset wrapping a given datastore -func NewDBWrapperSet(d ds.Datastore, bset BlockSet) BlockSet { - return &datastoreBlockSet{ - dstore: d, - bset: bset, - } -} - -func (d *datastoreBlockSet) AddBlock(k key.Key) { - err := d.dstore.Put(k.DsKey(), []byte{}) - if err != nil { - log.Debugf("blockset put error: %s", err) - } - - d.bset.AddBlock(k) -} - -func (d *datastoreBlockSet) RemoveBlock(k key.Key) { - d.bset.RemoveBlock(k) - if !d.bset.HasKey(k) { - d.dstore.Delete(k.DsKey()) - } -} - -func (d *datastoreBlockSet) HasKey(k key.Key) bool { - return d.bset.HasKey(k) -} - -func (d *datastoreBlockSet) GetBloomFilter() bloom.Filter { - return d.bset.GetBloomFilter() -} - -func (d *datastoreBlockSet) GetKeys() []key.Key { - return d.bset.GetKeys() -} diff --git a/cmd/ipfs/main.go b/cmd/ipfs/main.go index 81d908f9c4b..946f2cf1eb5 100644 --- a/cmd/ipfs/main.go +++ b/cmd/ipfs/main.go @@ -328,7 +328,7 @@ func callCommand(ctx context.Context, req cmds.Request, root *cmds.Command, cmd if isConnRefused(err) { err = repo.ErrApiNotRunning } - return nil, err + return nil, wrapContextCanceled(err) } } else { @@ -685,3 +685,10 @@ func isConnRefused(err error) bool { return netoperr.Op == "dial" } + +func wrapContextCanceled(err error) error { + if strings.Contains(err.Error(), "request canceled") { + err = errors.New("request canceled") + } + return err +} diff --git a/commands/http/client.go b/commands/http/client.go index 3da268ffee9..b60f13ecf66 100644 --- a/commands/http/client.go +++ b/commands/http/client.go @@ -30,20 +30,13 @@ type Client interface { type client struct { serverAddress string - httpClient http.Client + httpClient *http.Client } func NewClient(address string) Client { - // We cannot use the default transport because of a bug in go's connection reuse - // code. It causes random failures in the connection including io.EOF and connection - // refused on 'client.Do' return &client{ serverAddress: address, - httpClient: http.Client{ - Transport: &http.Transport{ - DisableKeepAlives: true, - }, - }, + httpClient: http.DefaultClient, } } @@ -103,46 +96,27 @@ func (c *client) Send(req cmds.Request) (cmds.Response, error) { version := config.CurrentVersionNumber httpReq.Header.Set(uaHeader, fmt.Sprintf("/go-ipfs/%s/", version)) - ec := make(chan error, 1) - rc := make(chan cmds.Response, 1) - dc := req.Context().Done() + httpReq.Cancel = req.Context().Done() + httpReq.Close = true - go func() { - httpRes, err := c.httpClient.Do(httpReq) - if err != nil { - ec <- err - return - } - - // using the overridden JSON encoding in request - res, err := getResponse(httpRes, req) - if err != nil { - ec <- err - return - } + httpRes, err := c.httpClient.Do(httpReq) + if err != nil { + return nil, err + } - rc <- res - }() + // using the overridden JSON encoding in request + res, err := getResponse(httpRes, req) + if err != nil { + return nil, err + } - for { - select { - case <-dc: - log.Debug("Context cancelled, cancelling HTTP request...") - tr := http.DefaultTransport.(*http.Transport) - tr.CancelRequest(httpReq) - dc = nil // Wait for ec or rc - case err := <-ec: - return nil, err - case res := <-rc: - if found && len(previousUserProvidedEncoding) > 0 { - // reset to user provided encoding after sending request - // NB: if user has provided an encoding but it is the empty string, - // still leave it as JSON. - req.SetOption(cmds.EncShort, previousUserProvidedEncoding) - } - return res, nil - } + if found && len(previousUserProvidedEncoding) > 0 { + // reset to user provided encoding after sending request + // NB: if user has provided an encoding but it is the empty string, + // still leave it as JSON. + req.SetOption(cmds.EncShort, previousUserProvidedEncoding) } + return res, nil } func getQuery(req cmds.Request) (string, error) { diff --git a/commands/http/handler.go b/commands/http/handler.go index 4a59bb8a081..e446f413273 100644 --- a/commands/http/handler.go +++ b/commands/http/handler.go @@ -149,15 +149,6 @@ func (i internalHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { ctx, cancel := context.WithCancel(node.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func() { - select { - case <-cn.CloseNotify(): - case <-ctx.Done(): - } - cancel() - }() - } err = req.SetRootContext(ctx) if err != nil { @@ -166,7 +157,11 @@ func (i internalHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } // call the command - res := i.root.Call(req) + go func() { + res := i.root.Call(req) + // now handle responding to the client properly + sendResponse(w, r, res, req) + }() // set user's headers first. for k, v := range i.cfg.Headers { @@ -174,9 +169,16 @@ func (i internalHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { w.Header()[k] = v } } + if cn, ok := w.(http.CloseNotifier); ok { + select { + case <-cn.CloseNotify(): + log.Error("closenotify") + case <-ctx.Done(): + log.Error("done") + } + cancel() + } - // now handle responding to the client properly - sendResponse(w, r, res, req) } func guessMimeType(res cmds.Response) (string, error) { @@ -278,7 +280,11 @@ func flushCopy(w io.Writer, r io.Reader) error { n, err := r.Read(buf) switch err { case io.EOF: - return nil + if n <= 0 { + return nil + } + // if data was returned alongside the EOF, pretend we didnt + // get an EOF. The next read call should also EOF. case nil: // continue default: diff --git a/core/builder.go b/core/builder.go index 999f11a46b1..af3a038408b 100644 --- a/core/builder.go +++ b/core/builder.go @@ -63,7 +63,7 @@ func (cfg *BuildCfg) fillDefaults() error { return nil } -func defaultRepo(dstore ds.ThreadSafeDatastore) (repo.Repo, error) { +func defaultRepo(dstore repo.Datastore) (repo.Repo, error) { c := cfg.Config{} priv, pub, err := ci.GenerateKeyPairWithReader(ci.RSA, 1024, rand.Reader) if err != nil { @@ -159,5 +159,10 @@ func setupNode(ctx context.Context, n *IpfsNode, cfg *BuildCfg) error { } n.Resolver = &path.Resolver{DAG: n.DAG} + err = n.loadFilesRoot() + if err != nil { + return err + } + return nil } diff --git a/core/commands/add.go b/core/commands/add.go index a594f90f0bd..1232f1db6ab 100644 --- a/core/commands/add.go +++ b/core/commands/add.go @@ -3,34 +3,19 @@ package commands import ( "fmt" "io" - "path" "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/cheggaaa/pb" - ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" - syncds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" - cxt "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + "github.com/ipfs/go-ipfs/core/coreunix" - bstore "github.com/ipfs/go-ipfs/blocks/blockstore" - bserv "github.com/ipfs/go-ipfs/blockservice" cmds "github.com/ipfs/go-ipfs/commands" files "github.com/ipfs/go-ipfs/commands/files" core "github.com/ipfs/go-ipfs/core" - offline "github.com/ipfs/go-ipfs/exchange/offline" - importer "github.com/ipfs/go-ipfs/importer" - "github.com/ipfs/go-ipfs/importer/chunk" - dag "github.com/ipfs/go-ipfs/merkledag" - dagutils "github.com/ipfs/go-ipfs/merkledag/utils" - pin "github.com/ipfs/go-ipfs/pin" - ft "github.com/ipfs/go-ipfs/unixfs" u "github.com/ipfs/go-ipfs/util" ) // Error indicating the max depth has been exceded. var ErrDepthLimitExceeded = fmt.Errorf("depth limit exceeded") -// how many bytes of progress to wait before sending a progress update message -const progressReaderIncrement = 1024 * 256 - const ( quietOptionName = "quiet" progressOptionName = "progress" @@ -39,14 +24,9 @@ const ( hiddenOptionName = "hidden" onlyHashOptionName = "only-hash" chunkerOptionName = "chunker" + pinOptionName = "pin" ) -type AddedObject struct { - Name string - Hash string `json:",omitempty"` - Bytes int64 `json:",omitempty"` -} - var AddCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Add an object to ipfs.", @@ -70,6 +50,7 @@ remains to be implemented. cmds.BoolOption(wrapOptionName, "w", "Wrap files with a directory object"), cmds.BoolOption(hiddenOptionName, "H", "Include files that are hidden"), cmds.StringOption(chunkerOptionName, "s", "chunking algorithm to use"), + cmds.BoolOption(pinOptionName, "Pin this object when adding. Default true"), }, PreRun: func(req cmds.Request) error { if quiet, _, _ := req.Option(quietOptionName).Bool(); quiet { @@ -115,8 +96,12 @@ remains to be implemented. hash, _, _ := req.Option(onlyHashOptionName).Bool() hidden, _, _ := req.Option(hiddenOptionName).Bool() chunker, _, _ := req.Option(chunkerOptionName).String() + dopin, pin_found, _ := req.Option(pinOptionName).Bool() + + if !pin_found { // default + dopin = true + } - e := dagutils.NewDagEditor(NewMemoryDagService(), newDirNode()) if hash { nilnode, err := core.NewNode(n.Context(), &core.BuildCfg{ //TODO: need this to be true or all files @@ -133,17 +118,13 @@ remains to be implemented. outChan := make(chan interface{}, 8) res.SetOutput((<-chan interface{})(outChan)) - fileAdder := adder{ - ctx: req.Context(), - node: n, - editor: e, - out: outChan, - chunker: chunker, - progress: progress, - hidden: hidden, - trickle: trickle, - wrap: wrap, - } + fileAdder := coreunix.NewAdder(req.Context(), n, outChan) + fileAdder.Chunker = chunker + fileAdder.Progress = progress + fileAdder.Hidden = hidden + fileAdder.Trickle = trickle + fileAdder.Wrap = wrap + fileAdder.Pin = dopin // addAllFiles loops over a convenience slice file to // add each file individually. e.g. 'ipfs add a b c' @@ -157,44 +138,29 @@ remains to be implemented. return nil // done } - if _, err := fileAdder.addFile(file); err != nil { + if _, err := fileAdder.AddFile(file); err != nil { return err } } } - pinRoot := func(rootnd *dag.Node) error { - rnk, err := rootnd.Key() - if err != nil { - return err - } - - mp := n.Pinning.GetManual() - mp.RemovePinWithMode(rnk, pin.Indirect) - mp.PinWithMode(rnk, pin.Recursive) - return n.Pinning.Flush() - } - addAllAndPin := func(f files.File) error { if err := addAllFiles(f); err != nil { return err } - if !hash { - // copy intermediary nodes from editor to our actual dagservice - err := e.WriteOutputTo(n.DAG) - if err != nil { - log.Error("WRITE OUT: ", err) - return err - } + if hash { + return nil } - rootnd, err := fileAdder.RootNode() + // copy intermediary nodes from editor to our actual dagservice + _, err := fileAdder.Finalize(n.DAG) if err != nil { + log.Error("WRITE OUT: ", err) return err } - return pinRoot(rootnd) + return fileAdder.PinRoot() } go func() { @@ -253,7 +219,7 @@ remains to be implemented. var totalProgress, prevFiles, lastBytes int64 for out := range outChan { - output := out.(*AddedObject) + output := out.(*coreunix.AddedObject) if len(output.Hash) > 0 { if showProgressBar { // clear progress bar line before we print "added x" output @@ -289,240 +255,5 @@ remains to be implemented. } } }, - Type: AddedObject{}, -} - -func NewMemoryDagService() dag.DAGService { - // build mem-datastore for editor's intermediary nodes - bs := bstore.NewBlockstore(syncds.MutexWrap(ds.NewMapDatastore())) - bsrv := bserv.New(bs, offline.Exchange(bs)) - return dag.NewDAGService(bsrv) -} - -// Internal structure for holding the switches passed to the `add` call -type adder struct { - ctx cxt.Context - node *core.IpfsNode - editor *dagutils.Editor - out chan interface{} - progress bool - hidden bool - trickle bool - wrap bool - chunker string - - nextUntitled int -} - -// Perform the actual add & pin locally, outputting results to reader -func add(n *core.IpfsNode, reader io.Reader, useTrickle bool, chunker string) (*dag.Node, error) { - chnk, err := chunk.FromString(reader, chunker) - if err != nil { - return nil, err - } - - var node *dag.Node - if useTrickle { - node, err = importer.BuildTrickleDagFromReader( - n.DAG, - chnk, - importer.PinIndirectCB(n.Pinning.GetManual()), - ) - } else { - node, err = importer.BuildDagFromReader( - n.DAG, - chnk, - importer.PinIndirectCB(n.Pinning.GetManual()), - ) - } - - if err != nil { - return nil, err - } - - return node, nil -} - -func (params *adder) RootNode() (*dag.Node, error) { - r := params.editor.GetNode() - - // if not wrapping, AND one root file, use that hash as root. - if !params.wrap && len(r.Links) == 1 { - var err error - r, err = r.Links[0].GetNode(params.ctx, params.editor.GetDagService()) - // no need to output, as we've already done so. - return r, err - } - - // otherwise need to output, as we have not. - err := outputDagnode(params.out, "", r) - return r, err -} - -func (params *adder) addNode(node *dag.Node, path string) error { - // patch it into the root - if path == "" { - key, err := node.Key() - if err != nil { - return err - } - - path = key.Pretty() - } - - if err := params.editor.InsertNodeAtPath(params.ctx, path, node, newDirNode); err != nil { - return err - } - - return outputDagnode(params.out, path, node) -} - -// Add the given file while respecting the params. -func (params *adder) addFile(file files.File) (*dag.Node, error) { - // Check if file is hidden - if fileIsHidden := files.IsHidden(file); fileIsHidden && !params.hidden { - log.Debugf("%s is hidden, skipping", file.FileName()) - return nil, &hiddenFileError{file.FileName()} - } - - // Check if "file" is actually a directory - if file.IsDirectory() { - return params.addDir(file) - } - - if s, ok := file.(*files.Symlink); ok { - sdata, err := ft.SymlinkData(s.Target) - if err != nil { - return nil, err - } - - dagnode := &dag.Node{Data: sdata} - _, err = params.node.DAG.Add(dagnode) - if err != nil { - return nil, err - } - - err = params.addNode(dagnode, s.FileName()) - return dagnode, err - } - - // if the progress flag was specified, wrap the file so that we can send - // progress updates to the client (over the output channel) - var reader io.Reader = file - if params.progress { - reader = &progressReader{file: file, out: params.out} - } - - dagnode, err := add(params.node, reader, params.trickle, params.chunker) - if err != nil { - return nil, err - } - - // patch it into the root - log.Infof("adding file: %s", file.FileName()) - err = params.addNode(dagnode, file.FileName()) - return dagnode, err -} - -func (params *adder) addDir(file files.File) (*dag.Node, error) { - tree := &dag.Node{Data: ft.FolderPBData()} - log.Infof("adding directory: %s", file.FileName()) - - for { - file, err := file.NextFile() - if err != nil && err != io.EOF { - return nil, err - } - if file == nil { - break - } - - node, err := params.addFile(file) - if _, ok := err.(*hiddenFileError); ok { - // hidden file error, set the node to nil for below - node = nil - } else if err != nil { - return nil, err - } - - if node != nil { - _, name := path.Split(file.FileName()) - - err = tree.AddNodeLink(name, node) - if err != nil { - return nil, err - } - } - } - - if err := params.addNode(tree, file.FileName()); err != nil { - return nil, err - } - - k, err := params.node.DAG.Add(tree) - if err != nil { - return nil, err - } - - params.node.Pinning.GetManual().PinWithMode(k, pin.Indirect) - - return tree, nil -} - -// outputDagnode sends dagnode info over the output channel -func outputDagnode(out chan interface{}, name string, dn *dag.Node) error { - o, err := getOutput(dn) - if err != nil { - return err - } - - out <- &AddedObject{ - Hash: o.Hash, - Name: name, - } - - return nil -} - -type hiddenFileError struct { - fileName string -} - -func (e *hiddenFileError) Error() string { - return fmt.Sprintf("%s is a hidden file", e.fileName) -} - -type ignoreFileError struct { - fileName string -} - -func (e *ignoreFileError) Error() string { - return fmt.Sprintf("%s is an ignored file", e.fileName) -} - -type progressReader struct { - file files.File - out chan interface{} - bytes int64 - lastProgress int64 -} - -func (i *progressReader) Read(p []byte) (int, error) { - n, err := i.file.Read(p) - - i.bytes += int64(n) - if i.bytes-i.lastProgress >= progressReaderIncrement || err == io.EOF { - i.lastProgress = i.bytes - i.out <- &AddedObject{ - Name: i.file.FileName(), - Bytes: i.bytes, - } - } - - return n, err -} - -// TODO: generalize this to more than unix-fs nodes. -func newDirNode() *dag.Node { - return &dag.Node{Data: ft.FolderPBData()} + Type: coreunix.AddedObject{}, } diff --git a/core/commands/files/files.go b/core/commands/files/files.go new file mode 100644 index 00000000000..cffb6f2d0dc --- /dev/null +++ b/core/commands/files/files.go @@ -0,0 +1,707 @@ +package commands + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + gopath "path" + "strings" + + cmds "github.com/ipfs/go-ipfs/commands" + core "github.com/ipfs/go-ipfs/core" + dag "github.com/ipfs/go-ipfs/merkledag" + mfs "github.com/ipfs/go-ipfs/mfs" + path "github.com/ipfs/go-ipfs/path" + ft "github.com/ipfs/go-ipfs/unixfs" + + context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + logging "github.com/ipfs/go-ipfs/vendor/QmQg1J6vikuXF9oDvm4wpdeAUvvkVEKW1EYDw9HhTMnP2b/go-log" +) + +var log = logging.Logger("cmds/files") + +var FilesCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Manipulate unixfs files", + ShortDescription: ` +Files is an API for manipulating ipfs objects as if they were a unix filesystem. +`, + }, + Subcommands: map[string]*cmds.Command{ + "read": FilesReadCmd, + "write": FilesWriteCmd, + "mv": FilesMvCmd, + "cp": FilesCpCmd, + "ls": FilesLsCmd, + "mkdir": FilesMkdirCmd, + "stat": FilesStatCmd, + "rm": FilesRmCmd, + }, +} + +var FilesStatCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "display file status", + }, + + Arguments: []cmds.Argument{ + cmds.StringArg("path", true, false, "path to node to stat"), + }, + Run: func(req cmds.Request, res cmds.Response) { + node, err := req.InvocContext().GetNode() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + path, err := checkPath(req.Arguments()[0]) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + fsn, err := mfs.Lookup(node.FilesRoot, path) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + o, err := statNode(fsn) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + res.SetOutput(o) + }, + Marshalers: cmds.MarshalerMap{ + cmds.Text: func(res cmds.Response) (io.Reader, error) { + out := res.Output().(*Object) + buf := new(bytes.Buffer) + fmt.Fprintln(buf, out.Hash) + fmt.Fprintf(buf, "Size: %d\n", out.Size) + fmt.Fprintf(buf, "CumulativeSize: %d\n", out.CumulativeSize) + fmt.Fprintf(buf, "ChildBlocks: %d\n", out.Blocks) + return buf, nil + }, + }, + Type: Object{}, +} + +func statNode(fsn mfs.FSNode) (*Object, error) { + nd, err := fsn.GetNode() + if err != nil { + return nil, err + } + + k, err := nd.Key() + if err != nil { + return nil, err + } + + d, err := ft.FromBytes(nd.Data) + if err != nil { + return nil, err + } + + cumulsize, err := nd.Size() + if err != nil { + return nil, err + } + + return &Object{ + Hash: k.B58String(), + Blocks: len(nd.Links), + Size: d.GetFilesize(), + CumulativeSize: cumulsize, + }, nil +} + +var FilesCpCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "copy files into mfs", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("source", true, false, "source object to copy"), + cmds.StringArg("dest", true, false, "destination to copy object to"), + }, + Run: func(req cmds.Request, res cmds.Response) { + node, err := req.InvocContext().GetNode() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + src, err := checkPath(req.Arguments()[0]) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + dst, err := checkPath(req.Arguments()[1]) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + nd, err := getNodeFromPath(req.Context(), node, src) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + err = mfs.PutNode(node.FilesRoot, dst, nd) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + }, +} + +func getNodeFromPath(ctx context.Context, node *core.IpfsNode, p string) (*dag.Node, error) { + switch { + case strings.HasPrefix(p, "/ipfs/"): + np, err := path.ParsePath(p) + if err != nil { + return nil, err + } + + return core.Resolve(ctx, node, np) + default: + fsn, err := mfs.Lookup(node.FilesRoot, p) + if err != nil { + return nil, err + } + + return fsn.GetNode() + } +} + +type Object struct { + Hash string + Size uint64 + CumulativeSize uint64 + Blocks int +} + +type FilesLsOutput struct { + Entries []mfs.NodeListing +} + +var FilesLsCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "List directories", + ShortDescription: ` +List directories. + +Examples: + + $ ipfs files ls /welcome/docs/ + about + contact + help + quick-start + readme + security-notes + + $ ipfs files ls /myfiles/a/b/c/d + foo + bar +`, + }, + Arguments: []cmds.Argument{ + cmds.StringArg("path", true, false, "path to show listing for"), + }, + Options: []cmds.Option{ + cmds.BoolOption("l", "use long listing format"), + }, + Run: func(req cmds.Request, res cmds.Response) { + path, err := checkPath(req.Arguments()[0]) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + nd, err := req.InvocContext().GetNode() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + fsn, err := mfs.Lookup(nd.FilesRoot, path) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + switch fsn := fsn.(type) { + case *mfs.Directory: + listing, err := fsn.List() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + res.SetOutput(&FilesLsOutput{listing}) + return + case *mfs.File: + parts := strings.Split(path, "/") + name := parts[len(parts)-1] + out := &FilesLsOutput{[]mfs.NodeListing{mfs.NodeListing{Name: name, Type: 1}}} + res.SetOutput(out) + return + default: + res.SetError(errors.New("unrecognized type"), cmds.ErrNormal) + } + }, + Marshalers: cmds.MarshalerMap{ + cmds.Text: func(res cmds.Response) (io.Reader, error) { + out := res.Output().(*FilesLsOutput) + buf := new(bytes.Buffer) + long, _, _ := res.Request().Option("l").Bool() + + for _, o := range out.Entries { + if long { + fmt.Fprintf(buf, "%s\t%s\t%d\n", o.Name, o.Hash, o.Size) + } else { + fmt.Fprintf(buf, "%s\n", o.Name) + } + } + return buf, nil + }, + }, + Type: FilesLsOutput{}, +} + +var FilesReadCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Read a file in a given mfs", + ShortDescription: ` +Read a specified number of bytes from a file at a given offset. By default, will +read the entire file similar to unix cat. + +Examples: + + $ ipfs files read /test/hello + hello + `, + }, + + Arguments: []cmds.Argument{ + cmds.StringArg("path", true, false, "path to file to be read"), + }, + Options: []cmds.Option{ + cmds.IntOption("o", "offset", "offset to read from"), + cmds.IntOption("n", "count", "maximum number of bytes to read"), + }, + Run: func(req cmds.Request, res cmds.Response) { + n, err := req.InvocContext().GetNode() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + path, err := checkPath(req.Arguments()[0]) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + fsn, err := mfs.Lookup(n.FilesRoot, path) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + fi, ok := fsn.(*mfs.File) + if !ok { + res.SetError(fmt.Errorf("%s was not a file", path), cmds.ErrNormal) + return + } + + offset, _, err := req.Option("offset").Int() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + if offset < 0 { + res.SetError(fmt.Errorf("cannot specify negative offset"), cmds.ErrNormal) + return + } + + filen, err := fi.Size() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + if int64(offset) > filen { + res.SetError(fmt.Errorf("offset was past end of file (%d > %d)", offset, filen), cmds.ErrNormal) + return + } + + _, err = fi.Seek(int64(offset), os.SEEK_SET) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + var r io.Reader = fi + count, found, err := req.Option("count").Int() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + if found { + if count < 0 { + res.SetError(fmt.Errorf("cannot specify negative 'count'"), cmds.ErrNormal) + return + } + r = io.LimitReader(fi, int64(count)) + } + + res.SetOutput(r) + }, +} + +var FilesMvCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Move files", + ShortDescription: ` +Move files around. Just like traditional unix mv. + +Example: + + $ ipfs files mv /myfs/a/b/c /myfs/foo/newc + +`, + }, + + Arguments: []cmds.Argument{ + cmds.StringArg("source", true, false, "source file to move"), + cmds.StringArg("dest", true, false, "target path for file to be moved to"), + }, + Run: func(req cmds.Request, res cmds.Response) { + n, err := req.InvocContext().GetNode() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + src, err := checkPath(req.Arguments()[0]) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + dst, err := checkPath(req.Arguments()[1]) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + err = mfs.Mv(n.FilesRoot, src, dst) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + }, +} + +var FilesWriteCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Write to a mutable file in a given filesystem", + ShortDescription: ` +Write data to a file in a given filesystem. This command allows you to specify +a beginning offset to write to. The entire length of the input will be written. + +If the '--create' option is specified, the file will be created if it does not +exist. Nonexistant intermediate directories will not be created. + +Example: + + echo "hello world" | ipfs files write --create /myfs/a/b/file + echo "hello world" | ipfs files write --truncate /myfs/a/b/file +`, + }, + Arguments: []cmds.Argument{ + cmds.StringArg("path", true, false, "path to write to"), + cmds.FileArg("data", true, false, "data to write").EnableStdin(), + }, + Options: []cmds.Option{ + cmds.IntOption("o", "offset", "offset to write to"), + cmds.BoolOption("e", "create", "create the file if it does not exist"), + cmds.BoolOption("t", "truncate", "truncate the file before writing"), + cmds.IntOption("n", "count", "maximum number of bytes to read"), + }, + Run: func(req cmds.Request, res cmds.Response) { + path, err := checkPath(req.Arguments()[0]) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + create, _, _ := req.Option("create").Bool() + trunc, _, _ := req.Option("truncate").Bool() + + nd, err := req.InvocContext().GetNode() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + fi, err := getFileHandle(nd.FilesRoot, path, create) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + defer fi.Close() + + if trunc { + if err := fi.Truncate(0); err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + } + + offset, _, err := req.Option("offset").Int() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + if offset < 0 { + res.SetError(fmt.Errorf("cannot have negative write offset"), cmds.ErrNormal) + return + } + + count, countfound, err := req.Option("count").Int() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + if countfound && count < 0 { + res.SetError(fmt.Errorf("cannot have negative byte count"), cmds.ErrNormal) + return + } + + _, err = fi.Seek(int64(offset), os.SEEK_SET) + if err != nil { + log.Error("seekfail: ", err) + res.SetError(err, cmds.ErrNormal) + return + } + + input, err := req.Files().NextFile() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + var r io.Reader = input + if countfound { + r = io.LimitReader(r, int64(count)) + } + + n, err := io.Copy(fi, input) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + log.Debugf("wrote %d bytes to %s", n, path) + }, +} + +var FilesMkdirCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "make directories", + ShortDescription: ` +Create the directory if it does not already exist. + +Note: all paths must be absolute. + +Examples: + + $ ipfs mfs mkdir /test/newdir + $ ipfs mfs mkdir -p /test/does/not/exist/yet +`, + }, + + Arguments: []cmds.Argument{ + cmds.StringArg("path", true, false, "path to dir to make"), + }, + Options: []cmds.Option{ + cmds.BoolOption("p", "parents", "no error if existing, make parent directories as needed"), + }, + Run: func(req cmds.Request, res cmds.Response) { + n, err := req.InvocContext().GetNode() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + dashp, _, _ := req.Option("parents").Bool() + dirtomake, err := checkPath(req.Arguments()[0]) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + err = mfs.Mkdir(n.FilesRoot, dirtomake, dashp) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + }, +} + +var FilesRmCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "remove a file", + ShortDescription: ` +remove files or directories + + $ ipfs files rm /foo + $ ipfs files ls /bar + cat + dog + fish + $ ipfs files rm -r /bar +`, + }, + + Arguments: []cmds.Argument{ + cmds.StringArg("path", true, true, "file to remove"), + }, + Options: []cmds.Option{ + cmds.BoolOption("r", "recursive", "recursively remove directories"), + }, + Run: func(req cmds.Request, res cmds.Response) { + nd, err := req.InvocContext().GetNode() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + path, err := checkPath(req.Arguments()[0]) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + if path == "/" { + res.SetError(fmt.Errorf("cannot delete root"), cmds.ErrNormal) + return + } + + // 'rm a/b/c/' will fail unless we trim the slash at the end + if path[len(path)-1] == '/' { + path = path[:len(path)-1] + } + + dir, name := gopath.Split(path) + parent, err := mfs.Lookup(nd.FilesRoot, dir) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + pdir, ok := parent.(*mfs.Directory) + if !ok { + res.SetError(fmt.Errorf("no such file or directory: %s", path), cmds.ErrNormal) + return + } + + childi, err := pdir.Child(name) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + dashr, _, _ := req.Option("r").Bool() + + switch childi.(type) { + case *mfs.Directory: + if dashr { + err := pdir.Unlink(name) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + } else { + res.SetError(fmt.Errorf("%s is a directory, use -r to remove directories", path), cmds.ErrNormal) + return + } + default: + err := pdir.Unlink(name) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + } + }, +} + +func getFileHandle(r *mfs.Root, path string, create bool) (*mfs.File, error) { + + target, err := mfs.Lookup(r, path) + switch err { + case nil: + fi, ok := target.(*mfs.File) + if !ok { + return nil, fmt.Errorf("%s was not a file", path) + } + return fi, nil + + case os.ErrNotExist: + if !create { + return nil, err + } + + // if create is specified and the file doesnt exist, we create the file + dirname, fname := gopath.Split(path) + pdiri, err := mfs.Lookup(r, dirname) + if err != nil { + log.Error("lookupfail ", dirname) + return nil, err + } + pdir, ok := pdiri.(*mfs.Directory) + if !ok { + return nil, fmt.Errorf("%s was not a directory", dirname) + } + + nd := &dag.Node{Data: ft.FilePBData(nil, 0)} + err = pdir.AddChild(fname, nd) + if err != nil { + return nil, err + } + + fsn, err := pdir.Child(fname) + if err != nil { + return nil, err + } + + fi, ok := fsn.(*mfs.File) + if !ok { + return nil, errors.New("expected *mfs.File, didnt get it. This is likely a race condition") + } + return fi, nil + + default: + return nil, err + } +} + +func checkPath(p string) (string, error) { + if len(p) == 0 { + return "", fmt.Errorf("paths must not be empty") + } + + if p[0] != '/' { + return "", fmt.Errorf("paths must start with a leading slash") + } + + cleaned := gopath.Clean(p) + if p[len(p)-1] == '/' && p != "/" { + cleaned += "/" + } + return cleaned, nil +} diff --git a/core/commands/object.go b/core/commands/object.go index 2b6a1494ef1..b7f129a3b7c 100644 --- a/core/commands/object.go +++ b/core/commands/object.go @@ -599,14 +599,17 @@ func rmLinkCaller(req cmds.Request, root *dag.Node) (key.Key, error) { path := req.Arguments()[2] - e := dagutils.NewDagEditor(nd.DAG, root) + e := dagutils.NewDagEditor(root, nd.DAG) err = e.RmLink(req.Context(), path) if err != nil { return "", err } - nnode := e.GetNode() + nnode, err := e.Finalize(nd.DAG) + if err != nil { + return "", err + } return nnode.Key() } @@ -636,7 +639,7 @@ func addLinkCaller(req cmds.Request, root *dag.Node) (key.Key, error) { } } - e := dagutils.NewDagEditor(nd.DAG, root) + e := dagutils.NewDagEditor(root, nd.DAG) childnd, err := nd.DAG.Get(req.Context(), childk) if err != nil { @@ -648,7 +651,10 @@ func addLinkCaller(req cmds.Request, root *dag.Node) (key.Key, error) { return "", err } - nnode := e.GetNode() + nnode, err := e.Finalize(nd.DAG) + if err != nil { + return "", err + } return nnode.Key() } diff --git a/core/commands/pin.go b/core/commands/pin.go index 5aa87924c0b..89c3cf14b3c 100644 --- a/core/commands/pin.go +++ b/core/commands/pin.go @@ -8,6 +8,7 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" cmds "github.com/ipfs/go-ipfs/commands" corerepo "github.com/ipfs/go-ipfs/core/corerepo" + dag "github.com/ipfs/go-ipfs/merkledag" u "github.com/ipfs/go-ipfs/util" ) @@ -50,6 +51,9 @@ on disk. return } + unlock := n.Blockstore.PinLock() + defer unlock() + // set recursive flag recursive, found, err := req.Option("recursive").Bool() if err != nil { @@ -157,8 +161,16 @@ Returns a list of objects that are pinned locally. By default, only recursively pinned returned, but others may be shown via the '--type' flag. `, LongDescription: ` +<<<<<<< HEAD Returns a list of objects that are pinned locally. By default, only recursively pinned returned, but others may be shown via the '--type' flag. + +Use --type= to specify the type of pinned keys to list. Valid values are: + * "direct": pin that specific object. + * "recursive": pin that specific object, and indirectly pin all its decendants + * "indirect": pinned indirectly by an ancestor (like a refcount) + * "all" + Example: $ echo "hello" | ipfs add -q QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN @@ -204,24 +216,35 @@ Example: if typeStr == "direct" || typeStr == "all" { for _, k := range n.Pinning.DirectKeys() { keys[k.B58String()] = RefKeyObject{ - Type: "direct", - Count: 1, + Type: "direct", } } } if typeStr == "indirect" || typeStr == "all" { - for k, v := range n.Pinning.IndirectKeys() { + ks := key.NewKeySet() + for _, k := range n.Pinning.RecursiveKeys() { + nd, err := n.DAG.Get(n.Context(), k) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + err = dag.EnumerateChildren(n.Context(), n.DAG, nd, ks) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + } + for _, k := range ks.Keys() { keys[k.B58String()] = RefKeyObject{ - Type: "indirect", - Count: v, + Type: "indirect", } } } if typeStr == "recursive" || typeStr == "all" { for _, k := range n.Pinning.RecursiveKeys() { keys[k.B58String()] = RefKeyObject{ - Type: "recursive", - Count: 1, + Type: "recursive", } } } @@ -231,16 +254,6 @@ Example: Type: RefKeyList{}, Marshalers: cmds.MarshalerMap{ cmds.Text: func(res cmds.Response) (io.Reader, error) { - typeStr, _, err := res.Request().Option("type").String() - if err != nil { - return nil, err - } - - count, _, err := res.Request().Option("count").Bool() - if err != nil { - return nil, err - } - quiet, _, err := res.Request().Option("quiet").Bool() if err != nil { return nil, err @@ -251,21 +264,11 @@ Example: return nil, u.ErrCast() } out := new(bytes.Buffer) - if typeStr == "indirect" && count { - for k, v := range keys.Keys { - if quiet { - fmt.Fprintf(out, "%s %d\n", k, v.Count) - } else { - fmt.Fprintf(out, "%s %s %d\n", k, v.Type, v.Count) - } - } - } else { - for k, v := range keys.Keys { - if quiet { - fmt.Fprintf(out, "%s\n", k) - } else { - fmt.Fprintf(out, "%s %s\n", k, v.Type) - } + for k, v := range keys.Keys { + if quiet { + fmt.Fprintf(out, "%s\n", k) + } else { + fmt.Fprintf(out, "%s %s\n", k, v.Type) } } return out, nil @@ -274,8 +277,7 @@ Example: } type RefKeyObject struct { - Type string - Count int + Type string } type RefKeyList struct { diff --git a/core/commands/root.go b/core/commands/root.go index ce67217502e..d760c840f7c 100644 --- a/core/commands/root.go +++ b/core/commands/root.go @@ -5,6 +5,7 @@ import ( "strings" cmds "github.com/ipfs/go-ipfs/commands" + files "github.com/ipfs/go-ipfs/core/commands/files" unixfs "github.com/ipfs/go-ipfs/core/commands/unixfs" logging "github.com/ipfs/go-ipfs/vendor/QmQg1J6vikuXF9oDvm4wpdeAUvvkVEKW1EYDw9HhTMnP2b/go-log" ) @@ -94,6 +95,7 @@ var rootSubcommands = map[string]*cmds.Command{ "dht": DhtCmd, "diag": DiagCmd, "dns": DNSCmd, + "files": files.FilesCmd, "get": GetCmd, "id": IDCmd, "log": LogCmd, diff --git a/core/commands/tar.go b/core/commands/tar.go index 0d6fc1318fa..53eaca12586 100644 --- a/core/commands/tar.go +++ b/core/commands/tar.go @@ -6,6 +6,7 @@ import ( cmds "github.com/ipfs/go-ipfs/commands" core "github.com/ipfs/go-ipfs/core" + "github.com/ipfs/go-ipfs/core/coreunix" path "github.com/ipfs/go-ipfs/path" tar "github.com/ipfs/go-ipfs/tar" ) @@ -58,15 +59,15 @@ var tarAddCmd = &cmds.Command{ } fi.FileName() - res.SetOutput(&AddedObject{ + res.SetOutput(&coreunix.AddedObject{ Name: fi.FileName(), Hash: k.B58String(), }) }, - Type: AddedObject{}, + Type: coreunix.AddedObject{}, Marshalers: cmds.MarshalerMap{ cmds.Text: func(res cmds.Response) (io.Reader, error) { - o := res.Output().(*AddedObject) + o := res.Output().(*coreunix.AddedObject) return strings.NewReader(o.Hash), nil }, }, diff --git a/core/core.go b/core/core.go index fbbfc35f34f..98c3d5a8552 100644 --- a/core/core.go +++ b/core/core.go @@ -41,14 +41,15 @@ import ( offroute "github.com/ipfs/go-ipfs/routing/offline" bstore "github.com/ipfs/go-ipfs/blocks/blockstore" + key "github.com/ipfs/go-ipfs/blocks/key" bserv "github.com/ipfs/go-ipfs/blockservice" exchange "github.com/ipfs/go-ipfs/exchange" bitswap "github.com/ipfs/go-ipfs/exchange/bitswap" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" rp "github.com/ipfs/go-ipfs/exchange/reprovide" + mfs "github.com/ipfs/go-ipfs/mfs" mount "github.com/ipfs/go-ipfs/fuse/mount" - ipnsfs "github.com/ipfs/go-ipfs/ipnsfs" merkledag "github.com/ipfs/go-ipfs/merkledag" namesys "github.com/ipfs/go-ipfs/namesys" ipnsrp "github.com/ipfs/go-ipfs/namesys/republisher" @@ -56,6 +57,7 @@ import ( pin "github.com/ipfs/go-ipfs/pin" repo "github.com/ipfs/go-ipfs/repo" config "github.com/ipfs/go-ipfs/repo/config" + uio "github.com/ipfs/go-ipfs/unixfs/io" u "github.com/ipfs/go-ipfs/util" ) @@ -90,12 +92,13 @@ type IpfsNode struct { // Services Peerstore peer.Peerstore // storage for other Peer instances - Blockstore bstore.Blockstore // the block store (lower level) + Blockstore bstore.GCBlockstore // the block store (lower level) Blocks *bserv.BlockService // the block service, get/add blocks. DAG merkledag.DAGService // the merkle dag service, get/add objects. Resolver *path.Resolver // the path resolution system Reporter metrics.Reporter Discovery discovery.Service + FilesRoot *mfs.Root // Online PeerHost p2phost.Host // the network host (server+client) @@ -108,8 +111,6 @@ type IpfsNode struct { Reprovider *rp.Reprovider // the value reprovider system IpnsRepub *ipnsrp.Republisher - IpnsFs *ipnsfs.Filesystem - proc goprocess.Process ctx context.Context @@ -320,8 +321,14 @@ func (n *IpfsNode) teardown() error { log.Debug("core is shutting down...") // owned objects are closed in this teardown to ensure that they're closed // regardless of which constructor was used to add them to the node. - closers := []io.Closer{ - n.Repo, + var closers []io.Closer + + // NOTE: the order that objects are added(closed) matters, if an object + // needs to use another during its shutdown/cleanup process, it should be + // closed before that other object + + if n.FilesRoot != nil { + closers = append(closers, n.FilesRoot) } if n.Exchange != nil { @@ -335,10 +342,8 @@ func (n *IpfsNode) teardown() error { closers = append(closers, mount.Closer(n.Mounts.Ipns)) } - // Filesystem needs to be closed before network, dht, and blockservice - // so it can use them as its shutting down - if n.IpnsFs != nil { - closers = append(closers, n.IpnsFs) + if dht, ok := n.Routing.(*dht.IpfsDHT); ok { + closers = append(closers, dht.Process()) } if n.Blocks != nil { @@ -349,14 +354,13 @@ func (n *IpfsNode) teardown() error { closers = append(closers, n.Bootstrapper) } - if dht, ok := n.Routing.(*dht.IpfsDHT); ok { - closers = append(closers, dht.Process()) - } - if n.PeerHost != nil { closers = append(closers, n.PeerHost) } + // Repo closed last, most things need to preserve state here + closers = append(closers, n.Repo) + var errs []error for _, closer := range closers { if err := closer.Close(); err != nil { @@ -467,6 +471,41 @@ func (n *IpfsNode) loadBootstrapPeers() ([]peer.PeerInfo, error) { return toPeerInfos(parsed), nil } +func (n *IpfsNode) loadFilesRoot() error { + dsk := ds.NewKey("/local/filesroot") + pf := func(ctx context.Context, k key.Key) error { + return n.Repo.Datastore().Put(dsk, []byte(k)) + } + + var nd *merkledag.Node + val, err := n.Repo.Datastore().Get(dsk) + + switch { + case err == ds.ErrNotFound || val == nil: + nd = uio.NewEmptyDirectory() + _, err := n.DAG.Add(nd) + if err != nil { + return fmt.Errorf("failure writing to dagstore: %s", err) + } + case err == nil: + k := key.Key(val.([]byte)) + nd, err = n.DAG.Get(n.Context(), k) + if err != nil { + return fmt.Errorf("error loading filesroot from DAG: %s", err) + } + default: + return err + } + + mr, err := mfs.NewRoot(n.Context(), n.DAG, nd, pf) + if err != nil { + return err + } + + n.FilesRoot = mr + return nil +} + // SetupOfflineRouting loads the local nodes private key and // uses it to instantiate a routing system in offline mode. // This is primarily used for offline ipns modifications. @@ -570,14 +609,14 @@ func startListening(ctx context.Context, host p2phost.Host, cfg *config.Config) return nil } -func constructDHTRouting(ctx context.Context, host p2phost.Host, dstore ds.ThreadSafeDatastore) (routing.IpfsRouting, error) { +func constructDHTRouting(ctx context.Context, host p2phost.Host, dstore repo.Datastore) (routing.IpfsRouting, error) { dhtRouting := dht.NewDHT(ctx, host, dstore) dhtRouting.Validator[IpnsValidatorTag] = namesys.IpnsRecordValidator dhtRouting.Selector[IpnsValidatorTag] = namesys.IpnsSelectorFunc return dhtRouting, nil } -type RoutingOption func(context.Context, p2phost.Host, ds.ThreadSafeDatastore) (routing.IpfsRouting, error) +type RoutingOption func(context.Context, p2phost.Host, repo.Datastore) (routing.IpfsRouting, error) type DiscoveryOption func(p2phost.Host) (discovery.Service, error) diff --git a/core/core_test.go b/core/core_test.go index d91b9992df9..42568b4c0f4 100644 --- a/core/core_test.go +++ b/core/core_test.go @@ -16,9 +16,6 @@ func TestInitialization(t *testing.T) { good := []*config.Config{ { Identity: id, - Datastore: config.Datastore{ - Type: "memory", - }, Addresses: config.Addresses{ Swarm: []string{"/ip4/0.0.0.0/tcp/4001"}, API: "/ip4/127.0.0.1/tcp/8000", @@ -27,10 +24,6 @@ func TestInitialization(t *testing.T) { { Identity: id, - Datastore: config.Datastore{ - Type: "leveldb", - Path: ".testdb", - }, Addresses: config.Addresses{ Swarm: []string{"/ip4/0.0.0.0/tcp/4001"}, API: "/ip4/127.0.0.1/tcp/8000", @@ -40,7 +33,6 @@ func TestInitialization(t *testing.T) { bad := []*config.Config{ {}, - {Datastore: config.Datastore{Type: "memory"}}, } for i, c := range good { diff --git a/core/corehttp/gateway_handler.go b/core/corehttp/gateway_handler.go index d9864c05146..b61d03f4fbf 100644 --- a/core/corehttp/gateway_handler.go +++ b/core/corehttp/gateway_handler.go @@ -49,8 +49,7 @@ func (i *gatewayHandler) newDagFromReader(r io.Reader) (*dag.Node, error) { // return ufs.AddFromReader(i.node, r.Body) return importer.BuildDagFromReader( i.node.DAG, - chunk.DefaultSplitter(r), - importer.BasicPinnerCB(i.node.Pinning.GetManual())) + chunk.DefaultSplitter(r)) } // TODO(btc): break this apart into separate handlers using a more expressive muxer @@ -87,9 +86,20 @@ func (i *gatewayHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } func (i *gatewayHandler) getOrHeadHandler(w http.ResponseWriter, r *http.Request) { - ctx, cancel := context.WithCancel(i.node.Context()) + ctx, cancel := context.WithTimeout(i.node.Context(), time.Hour) + // the hour is a hard fallback, we don't expect it to happen, but just in case defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func() { + select { + case <-cn.CloseNotify(): + case <-ctx.Done(): + } + cancel() + }() + } + urlPath := r.URL.Path // IPNSHostnameOption might have constructed an IPNS path using the Host header. @@ -321,14 +331,20 @@ func (i *gatewayHandler) putHandler(w http.ResponseWriter, r *http.Request) { return } - e := dagutils.NewDagEditor(i.node.DAG, rnode) + e := dagutils.NewDagEditor(rnode, i.node.DAG) err = e.InsertNodeAtPath(ctx, newPath, newnode, uio.NewEmptyDirectory) if err != nil { webError(w, "putHandler: InsertNodeAtPath failed", err, http.StatusInternalServerError) return } - newkey, err = e.GetNode().Key() + nnode, err := e.Finalize(i.node.DAG) + if err != nil { + webError(w, "putHandler: could not get node", err, http.StatusInternalServerError) + return + } + + newkey, err = nnode.Key() if err != nil { webError(w, "putHandler: could not get key of edited node", err, http.StatusInternalServerError) return diff --git a/core/corerepo/gc.go b/core/corerepo/gc.go index 5175a041068..9209207a859 100644 --- a/core/corerepo/gc.go +++ b/core/corerepo/gc.go @@ -8,6 +8,7 @@ import ( context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" key "github.com/ipfs/go-ipfs/blocks/key" "github.com/ipfs/go-ipfs/core" + gc "github.com/ipfs/go-ipfs/pin/gc" repo "github.com/ipfs/go-ipfs/repo" logging "github.com/ipfs/go-ipfs/vendor/QmQg1J6vikuXF9oDvm4wpdeAUvvkVEKW1EYDw9HhTMnP2b/go-log" ) @@ -73,53 +74,42 @@ func NewGC(n *core.IpfsNode) (*GC, error) { func GarbageCollect(n *core.IpfsNode, ctx context.Context) error { ctx, cancel := context.WithCancel(ctx) defer cancel() // in case error occurs during operation - keychan, err := n.Blockstore.AllKeysChan(ctx) + rmed, err := gc.GC(ctx, n.Blockstore, n.Pinning) if err != nil { return err } - for k := range keychan { // rely on AllKeysChan to close chan - if !n.Pinning.IsPinned(k) { - if err := n.Blockstore.DeleteBlock(k); err != nil { - return err + + for { + select { + case _, ok := <-rmed: + if !ok { + return nil } + case <-ctx.Done(): + return ctx.Err() } } - return nil + } func GarbageCollectAsync(n *core.IpfsNode, ctx context.Context) (<-chan *KeyRemoved, error) { - - keychan, err := n.Blockstore.AllKeysChan(ctx) + rmed, err := gc.GC(ctx, n.Blockstore, n.Pinning) if err != nil { return nil, err } - output := make(chan *KeyRemoved) + out := make(chan *KeyRemoved) go func() { - defer close(output) - for { + defer close(out) + for k := range rmed { select { - case k, ok := <-keychan: - if !ok { - return - } - if !n.Pinning.IsPinned(k) { - err := n.Blockstore.DeleteBlock(k) - if err != nil { - log.Debugf("Error removing key from blockstore: %s", err) - continue - } - select { - case output <- &KeyRemoved{k}: - case <-ctx.Done(): - } - } + case out <- &KeyRemoved{k}: case <-ctx.Done(): return } } }() - return output, nil + return out, nil } func PeriodicGC(ctx context.Context, node *core.IpfsNode) error { diff --git a/core/corerouting/core.go b/core/corerouting/core.go index 41b3345eb6d..52f76a5c5d5 100644 --- a/core/corerouting/core.go +++ b/core/corerouting/core.go @@ -8,6 +8,7 @@ import ( core "github.com/ipfs/go-ipfs/core" "github.com/ipfs/go-ipfs/p2p/host" "github.com/ipfs/go-ipfs/p2p/peer" + repo "github.com/ipfs/go-ipfs/repo" routing "github.com/ipfs/go-ipfs/routing" supernode "github.com/ipfs/go-ipfs/routing/supernode" gcproxy "github.com/ipfs/go-ipfs/routing/supernode/proxy" @@ -27,8 +28,8 @@ var ( // SupernodeServer returns a configuration for a routing server that stores // routing records to the provided datastore. Only routing records are store in // the datastore. -func SupernodeServer(recordSource ds.ThreadSafeDatastore) core.RoutingOption { - return func(ctx context.Context, ph host.Host, dstore ds.ThreadSafeDatastore) (routing.IpfsRouting, error) { +func SupernodeServer(recordSource ds.Datastore) core.RoutingOption { + return func(ctx context.Context, ph host.Host, dstore repo.Datastore) (routing.IpfsRouting, error) { server, err := supernode.NewServer(recordSource, ph.Peerstore(), ph.ID()) if err != nil { return nil, err @@ -44,7 +45,7 @@ func SupernodeServer(recordSource ds.ThreadSafeDatastore) core.RoutingOption { // TODO doc func SupernodeClient(remotes ...peer.PeerInfo) core.RoutingOption { - return func(ctx context.Context, ph host.Host, dstore ds.ThreadSafeDatastore) (routing.IpfsRouting, error) { + return func(ctx context.Context, ph host.Host, dstore repo.Datastore) (routing.IpfsRouting, error) { if len(remotes) < 1 { return nil, errServersMissing } diff --git a/core/coreunix/add.go b/core/coreunix/add.go index 2a0a354a8b6..3070e874461 100644 --- a/core/coreunix/add.go +++ b/core/coreunix/add.go @@ -1,39 +1,175 @@ package coreunix import ( + "fmt" "io" "io/ioutil" "os" gopath "path" + ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" + syncds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + bstore "github.com/ipfs/go-ipfs/blocks/blockstore" + bserv "github.com/ipfs/go-ipfs/blockservice" + "github.com/ipfs/go-ipfs/exchange/offline" + importer "github.com/ipfs/go-ipfs/importer" + "github.com/ipfs/go-ipfs/importer/chunk" + dagutils "github.com/ipfs/go-ipfs/merkledag/utils" + "github.com/ipfs/go-ipfs/pin" "github.com/ipfs/go-ipfs/commands/files" core "github.com/ipfs/go-ipfs/core" - importer "github.com/ipfs/go-ipfs/importer" - chunk "github.com/ipfs/go-ipfs/importer/chunk" - merkledag "github.com/ipfs/go-ipfs/merkledag" - "github.com/ipfs/go-ipfs/pin" + dag "github.com/ipfs/go-ipfs/merkledag" unixfs "github.com/ipfs/go-ipfs/unixfs" logging "github.com/ipfs/go-ipfs/vendor/QmQg1J6vikuXF9oDvm4wpdeAUvvkVEKW1EYDw9HhTMnP2b/go-log" ) var log = logging.Logger("coreunix") +// how many bytes of progress to wait before sending a progress update message +const progressReaderIncrement = 1024 * 256 + +type Link struct { + Name, Hash string + Size uint64 +} + +type Object struct { + Hash string + Links []Link +} + +type hiddenFileError struct { + fileName string +} + +func (e *hiddenFileError) Error() string { + return fmt.Sprintf("%s is a hidden file", e.fileName) +} + +type ignoreFileError struct { + fileName string +} + +func (e *ignoreFileError) Error() string { + return fmt.Sprintf("%s is an ignored file", e.fileName) +} + +type AddedObject struct { + Name string + Hash string `json:",omitempty"` + Bytes int64 `json:",omitempty"` +} + +func NewAdder(ctx context.Context, n *core.IpfsNode, out chan interface{}) *Adder { + e := dagutils.NewDagEditor(newDirNode(), nil) + return &Adder{ + ctx: ctx, + node: n, + editor: e, + out: out, + Progress: false, + Hidden: true, + Pin: true, + Trickle: false, + Wrap: false, + Chunker: "", + } +} + +// Internal structure for holding the switches passed to the `add` call +type Adder struct { + ctx context.Context + node *core.IpfsNode + editor *dagutils.Editor + out chan interface{} + Progress bool + Hidden bool + Pin bool + Trickle bool + Wrap bool + Chunker string + root *dag.Node +} + +// Perform the actual add & pin locally, outputting results to reader +func (params Adder) add(reader io.Reader) (*dag.Node, error) { + chnk, err := chunk.FromString(reader, params.Chunker) + if err != nil { + return nil, err + } + + if params.Trickle { + return importer.BuildTrickleDagFromReader( + params.node.DAG, + chnk, + ) + } + return importer.BuildDagFromReader( + params.node.DAG, + chnk, + ) +} + +func (params *Adder) RootNode() (*dag.Node, error) { + // for memoizing + if params.root != nil { + return params.root, nil + } + + root := params.editor.GetNode() + + // if not wrapping, AND one root file, use that hash as root. + if !params.Wrap && len(root.Links) == 1 { + var err error + root, err = root.Links[0].GetNode(params.ctx, params.editor.GetDagService()) + params.root = root + // no need to output, as we've already done so. + return root, err + } + + // otherwise need to output, as we have not. + err := outputDagnode(params.out, "", root) + params.root = root + return root, err +} + +func (params *Adder) PinRoot() error { + root, err := params.RootNode() + if err != nil { + return err + } + if !params.Pin { + return nil + } + + rnk, err := root.Key() + if err != nil { + return err + } + + params.node.Pinning.PinWithMode(rnk, pin.Recursive) + return params.node.Pinning.Flush() +} + +func (params *Adder) Finalize(DAG dag.DAGService) (*dag.Node, error) { + return params.editor.Finalize(DAG) +} + // Add builds a merkledag from the a reader, pinning all objects to the local // datastore. Returns a key representing the root node. func Add(n *core.IpfsNode, r io.Reader) (string, error) { - // TODO more attractive function signature importer.BuildDagFromReader + unlock := n.Blockstore.PinLock() + defer unlock() - dagNode, err := importer.BuildDagFromReader( - n.DAG, - chunk.NewSizeSplitter(r, chunk.DefaultBlockSize), - importer.BasicPinnerCB(n.Pinning.GetManual()), - ) + fileAdder := NewAdder(n.Context(), n, nil) + + node, err := fileAdder.add(r) if err != nil { return "", err } - k, err := dagNode.Key() + k, err := node.Key() if err != nil { return "", err } @@ -43,6 +179,9 @@ func Add(n *core.IpfsNode, r io.Reader) (string, error) { // AddR recursively adds files in |path|. func AddR(n *core.IpfsNode, root string) (key string, err error) { + unlock := n.Blockstore.PinLock() + defer unlock() + stat, err := os.Lstat(root) if err != nil { return "", err @@ -54,7 +193,9 @@ func AddR(n *core.IpfsNode, root string) (key string, err error) { } defer f.Close() - dagnode, err := addFile(n, f) + fileAdder := NewAdder(n.Context(), n, nil) + + dagnode, err := fileAdder.AddFile(f) if err != nil { return "", err } @@ -64,11 +205,6 @@ func AddR(n *core.IpfsNode, root string) (key string, err error) { return "", err } - n.Pinning.GetManual().RemovePinWithMode(k, pin.Indirect) - if err := n.Pinning.Flush(); err != nil { - return "", err - } - return k.String(), nil } @@ -76,10 +212,14 @@ func AddR(n *core.IpfsNode, root string) (key string, err error) { // to preserve the filename. // Returns the path of the added file ("/filename"), the DAG node of // the directory, and and error if any. -func AddWrapped(n *core.IpfsNode, r io.Reader, filename string) (string, *merkledag.Node, error) { +func AddWrapped(n *core.IpfsNode, r io.Reader, filename string) (string, *dag.Node, error) { file := files.NewReaderFile(filename, filename, ioutil.NopCloser(r), nil) dir := files.NewSliceFile("", "", []files.File{file}) - dagnode, err := addDir(n, dir) + fileAdder := NewAdder(n.Context(), n, nil) + + unlock := n.Blockstore.PinLock() + defer unlock() + dagnode, err := fileAdder.addDir(dir) if err != nil { return "", nil, err } @@ -90,61 +230,181 @@ func AddWrapped(n *core.IpfsNode, r io.Reader, filename string) (string, *merkle return gopath.Join(k.String(), filename), dagnode, nil } -func add(n *core.IpfsNode, reader io.Reader) (*merkledag.Node, error) { - mp := n.Pinning.GetManual() +func (params *Adder) addNode(node *dag.Node, path string) error { + // patch it into the root + if path == "" { + key, err := node.Key() + if err != nil { + return err + } - return importer.BuildDagFromReader( - n.DAG, - chunk.DefaultSplitter(reader), - importer.PinIndirectCB(mp), - ) -} + path = key.Pretty() + } -func addNode(n *core.IpfsNode, node *merkledag.Node) error { - if err := n.DAG.AddRecursive(node); err != nil { // add the file to the graph + local storage + if err := params.editor.InsertNodeAtPath(params.ctx, path, node, newDirNode); err != nil { return err } - ctx, cancel := context.WithCancel(n.Context()) - defer cancel() - err := n.Pinning.Pin(ctx, node, true) // ensure we keep it - return err + + return outputDagnode(params.out, path, node) } -func addFile(n *core.IpfsNode, file files.File) (*merkledag.Node, error) { - if file.IsDirectory() { - return addDir(n, file) +// Add the given file while respecting the params. +func (params *Adder) AddFile(file files.File) (*dag.Node, error) { + switch { + case files.IsHidden(file) && !params.Hidden: + log.Debugf("%s is hidden, skipping", file.FileName()) + return nil, &hiddenFileError{file.FileName()} + case file.IsDirectory(): + return params.addDir(file) } - return add(n, file) -} -func addDir(n *core.IpfsNode, dir files.File) (*merkledag.Node, error) { + // case for symlink + if s, ok := file.(*files.Symlink); ok { + sdata, err := unixfs.SymlinkData(s.Target) + if err != nil { + return nil, err + } + + dagnode := &dag.Node{Data: sdata} + _, err = params.node.DAG.Add(dagnode) + if err != nil { + return nil, err + } + + err = params.addNode(dagnode, s.FileName()) + return dagnode, err + } - tree := &merkledag.Node{Data: unixfs.FolderPBData()} + // case for regular file + // if the progress flag was specified, wrap the file so that we can send + // progress updates to the client (over the output channel) + var reader io.Reader = file + if params.Progress { + reader = &progressReader{file: file, out: params.out} + } + + dagnode, err := params.add(reader) + if err != nil { + return nil, err + } + + // patch it into the root + log.Infof("adding file: %s", file.FileName()) + err = params.addNode(dagnode, file.FileName()) + return dagnode, err +} + +func (params *Adder) addDir(dir files.File) (*dag.Node, error) { + tree := newDirNode() + log.Infof("adding directory: %s", dir.FileName()) -Loop: for { file, err := dir.NextFile() - switch { - case err != nil && err != io.EOF: + if err != nil && err != io.EOF { return nil, err - case err == io.EOF: - break Loop + } + if file == nil { + break } - node, err := addFile(n, file) - if err != nil { + node, err := params.AddFile(file) + if _, ok := err.(*hiddenFileError); ok { + // hidden file error, skip file + continue + } else if err != nil { return nil, err } _, name := gopath.Split(file.FileName()) - if err := tree.AddNodeLink(name, node); err != nil { + if err := tree.AddNodeLinkClean(name, node); err != nil { return nil, err } } - if err := addNode(n, tree); err != nil { + if err := params.addNode(tree, dir.FileName()); err != nil { + return nil, err + } + + if _, err := params.node.DAG.Add(tree); err != nil { return nil, err } + return tree, nil } + +// outputDagnode sends dagnode info over the output channel +func outputDagnode(out chan interface{}, name string, dn *dag.Node) error { + if out == nil { + return nil + } + + o, err := getOutput(dn) + if err != nil { + return err + } + + out <- &AddedObject{ + Hash: o.Hash, + Name: name, + } + + return nil +} + +func NewMemoryDagService() dag.DAGService { + // build mem-datastore for editor's intermediary nodes + bs := bstore.NewBlockstore(syncds.MutexWrap(ds.NewMapDatastore())) + bsrv := bserv.New(bs, offline.Exchange(bs)) + return dag.NewDAGService(bsrv) +} + +// TODO: generalize this to more than unix-fs nodes. +func newDirNode() *dag.Node { + return &dag.Node{Data: unixfs.FolderPBData()} +} + +// from core/commands/object.go +func getOutput(dagnode *dag.Node) (*Object, error) { + key, err := dagnode.Key() + if err != nil { + return nil, err + } + + output := &Object{ + Hash: key.Pretty(), + Links: make([]Link, len(dagnode.Links)), + } + + for i, link := range dagnode.Links { + output.Links[i] = Link{ + Name: link.Name, + Hash: link.Hash.B58String(), + Size: link.Size, + } + } + + return output, nil +} + +type progressReader struct { + file files.File + out chan interface{} + bytes int64 + lastProgress int64 +} + +func (i *progressReader) Read(p []byte) (int, error) { + n, err := i.file.Read(p) + + i.bytes += int64(n) + if i.bytes-i.lastProgress >= progressReaderIncrement || err == io.EOF { + i.lastProgress = i.bytes + i.out <- &AddedObject{ + Name: i.file.FileName(), + Bytes: i.bytes, + } + } + + return n, err +} diff --git a/core/coreunix/metadata_test.go b/core/coreunix/metadata_test.go index 034cb7c89ef..86f003e090c 100644 --- a/core/coreunix/metadata_test.go +++ b/core/coreunix/metadata_test.go @@ -36,7 +36,7 @@ func TestMetadata(t *testing.T) { data := make([]byte, 1000) u.NewTimeSeededRand().Read(data) r := bytes.NewReader(data) - nd, err := importer.BuildDagFromReader(ds, chunk.DefaultSplitter(r), nil) + nd, err := importer.BuildDagFromReader(ds, chunk.DefaultSplitter(r)) if err != nil { t.Fatal(err) } diff --git a/fuse/ipns/ipns_test.go b/fuse/ipns/ipns_test.go index fdee5741883..c5f8d6a7389 100644 --- a/fuse/ipns/ipns_test.go +++ b/fuse/ipns/ipns_test.go @@ -16,7 +16,7 @@ import ( context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" core "github.com/ipfs/go-ipfs/core" - nsfs "github.com/ipfs/go-ipfs/ipnsfs" + //mfs "github.com/ipfs/go-ipfs/mfs" namesys "github.com/ipfs/go-ipfs/namesys" offroute "github.com/ipfs/go-ipfs/routing/offline" u "github.com/ipfs/go-ipfs/util" @@ -115,12 +115,10 @@ func setupIpnsTest(t *testing.T, node *core.IpfsNode) (*core.IpfsNode, *fstest.M node.Routing = offroute.NewOfflineRouter(node.Repo.Datastore(), node.PrivateKey) node.Namesys = namesys.NewNameSystem(node.Routing, node.Repo.Datastore(), 0) - ipnsfs, err := nsfs.NewFilesystem(context.Background(), node.DAG, node.Namesys, node.Pinning, node.PrivateKey) + err = InitializeKeyspace(node, node.PrivateKey) if err != nil { t.Fatal(err) } - - node.IpnsFs = ipnsfs } fs, err := NewFileSystem(node, node.PrivateKey, "", "") diff --git a/fuse/ipns/ipns_unix.go b/fuse/ipns/ipns_unix.go index fd3e3a39e5d..18d5255c4d3 100644 --- a/fuse/ipns/ipns_unix.go +++ b/fuse/ipns/ipns_unix.go @@ -17,9 +17,10 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" core "github.com/ipfs/go-ipfs/core" - nsfs "github.com/ipfs/go-ipfs/ipnsfs" dag "github.com/ipfs/go-ipfs/merkledag" + mfs "github.com/ipfs/go-ipfs/mfs" ci "github.com/ipfs/go-ipfs/p2p/crypto" + path "github.com/ipfs/go-ipfs/path" ft "github.com/ipfs/go-ipfs/unixfs" ) @@ -33,10 +34,15 @@ type FileSystem struct { // NewFileSystem constructs new fs using given core.IpfsNode instance. func NewFileSystem(ipfs *core.IpfsNode, sk ci.PrivKey, ipfspath, ipnspath string) (*FileSystem, error) { - root, err := CreateRoot(ipfs, []ci.PrivKey{sk}, ipfspath, ipnspath) + + kmap := map[string]ci.PrivKey{ + "local": sk, + } + root, err := CreateRoot(ipfs, kmap, ipfspath, ipnspath) if err != nil { return nil, err } + return &FileSystem{Ipfs: ipfs, RootNode: root}, nil } @@ -56,60 +62,102 @@ func (f *FileSystem) Destroy() { // Root is the root object of the filesystem tree. type Root struct { Ipfs *core.IpfsNode - Keys []ci.PrivKey + Keys map[string]ci.PrivKey // Used for symlinking into ipfs IpfsRoot string IpnsRoot string LocalDirs map[string]fs.Node - Roots map[string]*nsfs.KeyRoot + Roots map[string]*keyRoot + + LocalLinks map[string]*Link +} + +func ipnsPubFunc(ipfs *core.IpfsNode, k ci.PrivKey) mfs.PubFunc { + return func(ctx context.Context, key key.Key) error { + return ipfs.Namesys.Publish(ctx, k, path.FromKey(key)) + } +} + +func loadRoot(ctx context.Context, rt *keyRoot, ipfs *core.IpfsNode, name string) (fs.Node, error) { + p, err := path.ParsePath("/ipns/" + name) + if err != nil { + log.Errorf("mkpath %s: %s", name, err) + return nil, err + } + + node, err := core.Resolve(ctx, ipfs, p) + if err != nil { + log.Errorf("looking up %s: %s", p, err) + return nil, err + } + + root, err := mfs.NewRoot(ctx, ipfs.DAG, node, ipnsPubFunc(ipfs, rt.k)) + if err != nil { + return nil, err + } + + rt.root = root + + switch val := root.GetValue().(type) { + case *mfs.Directory: + return &Directory{dir: val}, nil + case *mfs.File: + return &File{fi: val}, nil + default: + return nil, errors.New("unrecognized type") + } - fs *nsfs.Filesystem - LocalLink *Link + panic("not reached") } -func CreateRoot(ipfs *core.IpfsNode, keys []ci.PrivKey, ipfspath, ipnspath string) (*Root, error) { +type keyRoot struct { + k ci.PrivKey + alias string + root *mfs.Root +} + +func CreateRoot(ipfs *core.IpfsNode, keys map[string]ci.PrivKey, ipfspath, ipnspath string) (*Root, error) { ldirs := make(map[string]fs.Node) - roots := make(map[string]*nsfs.KeyRoot) - for _, k := range keys { + roots := make(map[string]*keyRoot) + links := make(map[string]*Link) + for alias, k := range keys { pkh, err := k.GetPublic().Hash() if err != nil { return nil, err } name := key.Key(pkh).B58String() - root, err := ipfs.IpnsFs.GetRoot(name) + + kr := &keyRoot{k: k, alias: alias} + fsn, err := loadRoot(ipfs.Context(), kr, ipfs, name) if err != nil { return nil, err } - roots[name] = root + roots[name] = kr + ldirs[name] = fsn - switch val := root.GetValue().(type) { - case *nsfs.Directory: - ldirs[name] = &Directory{dir: val} - case *nsfs.File: - ldirs[name] = &File{fi: val} - default: - return nil, errors.New("unrecognized type") + // set up alias symlink + links[alias] = &Link{ + Target: name, } } return &Root{ - fs: ipfs.IpnsFs, - Ipfs: ipfs, - IpfsRoot: ipfspath, - IpnsRoot: ipnspath, - Keys: keys, - LocalDirs: ldirs, - LocalLink: &Link{ipfs.Identity.Pretty()}, - Roots: roots, + Ipfs: ipfs, + IpfsRoot: ipfspath, + IpnsRoot: ipnspath, + Keys: keys, + LocalDirs: ldirs, + LocalLinks: links, + Roots: roots, }, nil } // Attr returns file attributes. func (*Root) Attr(ctx context.Context, a *fuse.Attr) error { log.Debug("Root Attr") - *a = fuse.Attr{Mode: os.ModeDir | 0111} // -rw+x + a.Mode = os.ModeDir | 0111 // -rw+x return nil } @@ -121,12 +169,8 @@ func (s *Root) Lookup(ctx context.Context, name string) (fs.Node, error) { return nil, fuse.ENOENT } - // Local symlink to the node ID keyspace - if name == "local" { - if s.LocalLink == nil { - return nil, fuse.ENOENT - } - return s.LocalLink, nil + if lnk, ok := s.LocalLinks[name]; ok { + return lnk, nil } nd, ok := s.LocalDirs[name] @@ -152,15 +196,15 @@ func (s *Root) Lookup(ctx context.Context, name string) (fs.Node, error) { if segments[0] == "ipfs" { p := strings.Join(resolved.Segments()[1:], "/") return &Link{s.IpfsRoot + "/" + p}, nil - } else { - log.Error("Invalid path.Path: ", resolved) - return nil, errors.New("invalid path from ipns record") } + + log.Error("Invalid path.Path: ", resolved) + return nil, errors.New("invalid path from ipns record") } func (r *Root) Close() error { - for _, kr := range r.Roots { - err := kr.Publish(r.Ipfs.Context()) + for _, mr := range r.Roots { + err := mr.root.Close() if err != nil { return err } @@ -181,13 +225,9 @@ func (r *Root) Forget() { // as well as a symlink to the peerID key func (r *Root) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { log.Debug("Root ReadDirAll") - listing := []fuse.Dirent{ - { - Name: "local", - Type: fuse.DT_Link, - }, - } - for _, k := range r.Keys { + + var listing []fuse.Dirent + for alias, k := range r.Keys { pub := k.GetPublic() hash, err := pub.Hash() if err != nil { @@ -197,21 +237,25 @@ func (r *Root) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { Name: key.Key(hash).Pretty(), Type: fuse.DT_Dir, } - listing = append(listing, ent) + link := fuse.Dirent{ + Name: alias, + Type: fuse.DT_Link, + } + listing = append(listing, ent, link) } return listing, nil } -// Directory is wrapper over an ipnsfs directory to satisfy the fuse fs interface +// Directory is wrapper over an mfs directory to satisfy the fuse fs interface type Directory struct { - dir *nsfs.Directory + dir *mfs.Directory fs.NodeRef } -// File is wrapper over an ipnsfs file to satisfy the fuse fs interface +// File is wrapper over an mfs file to satisfy the fuse fs interface type File struct { - fi *nsfs.File + fi *mfs.File fs.NodeRef } @@ -219,11 +263,9 @@ type File struct { // Attr returns the attributes of a given node. func (d *Directory) Attr(ctx context.Context, a *fuse.Attr) error { log.Debug("Directory Attr") - *a = fuse.Attr{ - Mode: os.ModeDir | 0555, - Uid: uint32(os.Getuid()), - Gid: uint32(os.Getgid()), - } + a.Mode = os.ModeDir | 0555 + a.Uid = uint32(os.Getuid()) + a.Gid = uint32(os.Getgid()) return nil } @@ -235,12 +277,10 @@ func (fi *File) Attr(ctx context.Context, a *fuse.Attr) error { // In this case, the dag node in question may not be unixfs return fmt.Errorf("fuse/ipns: failed to get file.Size(): %s", err) } - *a = fuse.Attr{ - Mode: os.FileMode(0666), - Size: uint64(size), - Uid: uint32(os.Getuid()), - Gid: uint32(os.Getgid()), - } + a.Mode = os.FileMode(0666) + a.Size = uint64(size) + a.Uid = uint32(os.Getuid()) + a.Gid = uint32(os.Getgid()) return nil } @@ -253,9 +293,9 @@ func (s *Directory) Lookup(ctx context.Context, name string) (fs.Node, error) { } switch child := child.(type) { - case *nsfs.Directory: + case *mfs.Directory: return &Directory{dir: child}, nil - case *nsfs.File: + case *mfs.File: return &File{fi: child}, nil default: // NB: if this happens, we do not want to continue, unpredictable behaviour @@ -267,19 +307,17 @@ func (s *Directory) Lookup(ctx context.Context, name string) (fs.Node, error) { // ReadDirAll reads the link structure as directory entries func (dir *Directory) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { var entries []fuse.Dirent - for _, name := range dir.dir.List() { - dirent := fuse.Dirent{Name: name} - - // TODO: make dir.dir.List() return dirinfos - child, err := dir.dir.Child(name) - if err != nil { - return nil, err - } + listing, err := dir.dir.List() + if err != nil { + return nil, err + } + for _, entry := range listing { + dirent := fuse.Dirent{Name: entry.Name} - switch child.Type() { - case nsfs.TDir: + switch mfs.NodeType(entry.Type) { + case mfs.TDir: dirent.Type = fuse.DT_Dir - case nsfs.TFile: + case mfs.TFile: dirent.Type = fuse.DT_File } @@ -339,15 +377,17 @@ func (fi *File) Flush(ctx context.Context, req *fuse.FlushRequest) error { } func (fi *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error { - cursize, err := fi.fi.Size() - if err != nil { - return err - } - if cursize != int64(req.Size) { - err := fi.fi.Truncate(int64(req.Size)) + if req.Valid.Size() { + cursize, err := fi.fi.Size() if err != nil { return err } + if cursize != int64(req.Size) { + err := fi.fi.Truncate(int64(req.Size)) + if err != nil { + return err + } + } } return nil } @@ -421,7 +461,7 @@ func (dir *Directory) Create(ctx context.Context, req *fuse.CreateRequest, resp return nil, nil, err } - fi, ok := child.(*nsfs.File) + fi, ok := child.(*mfs.File) if !ok { return nil, nil, errors.New("child creation failed") } diff --git a/fuse/ipns/link_unix.go b/fuse/ipns/link_unix.go index a8414a36586..d45ce02836f 100644 --- a/fuse/ipns/link_unix.go +++ b/fuse/ipns/link_unix.go @@ -16,9 +16,7 @@ type Link struct { func (l *Link) Attr(ctx context.Context, a *fuse.Attr) error { log.Debug("Link attr.") - *a = fuse.Attr{ - Mode: os.ModeSymlink | 0555, - } + a.Mode = os.ModeSymlink | 0555 return nil } diff --git a/fuse/ipns/mount_unix.go b/fuse/ipns/mount_unix.go index 620ce9fa78c..57b234db876 100644 --- a/fuse/ipns/mount_unix.go +++ b/fuse/ipns/mount_unix.go @@ -6,7 +6,6 @@ package ipns import ( core "github.com/ipfs/go-ipfs/core" mount "github.com/ipfs/go-ipfs/fuse/mount" - ipnsfs "github.com/ipfs/go-ipfs/ipnsfs" ) // Mount mounts ipns at a given location, and returns a mount.Mount instance. @@ -18,14 +17,6 @@ func Mount(ipfs *core.IpfsNode, ipnsmp, ipfsmp string) (mount.Mount, error) { allow_other := cfg.Mounts.FuseAllowOther - if ipfs.IpnsFs == nil { - fs, err := ipnsfs.NewFilesystem(ipfs.Context(), ipfs.DAG, ipfs.Namesys, ipfs.Pinning, ipfs.PrivateKey) - if err != nil { - return nil, err - } - ipfs.IpnsFs = fs - } - fsys, err := NewFileSystem(ipfs, ipfs.PrivateKey, ipfsmp, ipnsmp) if err != nil { return nil, err diff --git a/fuse/readonly/ipfs_test.go b/fuse/readonly/ipfs_test.go index 7add4deb397..4aca4425eb2 100644 --- a/fuse/readonly/ipfs_test.go +++ b/fuse/readonly/ipfs_test.go @@ -36,7 +36,7 @@ func randObj(t *testing.T, nd *core.IpfsNode, size int64) (*dag.Node, []byte) { buf := make([]byte, size) u.NewTimeSeededRand().Read(buf) read := bytes.NewReader(buf) - obj, err := importer.BuildTrickleDagFromReader(nd.DAG, chunk.DefaultSplitter(read), nil) + obj, err := importer.BuildTrickleDagFromReader(nd.DAG, chunk.DefaultSplitter(read)) if err != nil { t.Fatal(err) } diff --git a/fuse/readonly/readonly_unix.go b/fuse/readonly/readonly_unix.go index ffd32b369ff..ac55359477b 100644 --- a/fuse/readonly/readonly_unix.go +++ b/fuse/readonly/readonly_unix.go @@ -46,7 +46,7 @@ type Root struct { // Attr returns file attributes. func (*Root) Attr(ctx context.Context, a *fuse.Attr) error { - *a = fuse.Attr{Mode: os.ModeDir | 0111} // -rw+x + a.Mode = os.ModeDir | 0111 // -rw+x return nil } @@ -118,7 +118,6 @@ func (s *Node) Attr(ctx context.Context, a *fuse.Attr) error { a.Size = uint64(len(s.cached.GetData())) a.Uid = uint32(os.Getuid()) a.Gid = uint32(os.Getgid()) - default: return fmt.Errorf("Invalid data type - %s", s.cached.GetType()) } diff --git a/importer/balanced/balanced_test.go b/importer/balanced/balanced_test.go index 2d589fc1ee0..5968d6f650a 100644 --- a/importer/balanced/balanced_test.go +++ b/importer/balanced/balanced_test.go @@ -128,7 +128,7 @@ func arrComp(a, b []byte) error { type dagservAndPinner struct { ds dag.DAGService - mp pin.ManualPinner + mp pin.Pinner } func TestIndirectBlocks(t *testing.T) { diff --git a/importer/helpers/dagbuilder.go b/importer/helpers/dagbuilder.go index 40617fdc271..1d9f0bd10af 100644 --- a/importer/helpers/dagbuilder.go +++ b/importer/helpers/dagbuilder.go @@ -2,30 +2,18 @@ package helpers import ( dag "github.com/ipfs/go-ipfs/merkledag" - "github.com/ipfs/go-ipfs/pin" ) -// NodeCB is callback function for dag generation -// the `last` flag signifies whether or not this is the last -// (top-most root) node being added. useful for things like -// only pinning the first node recursively. -type NodeCB func(node *dag.Node, last bool) error - -var nilFunc NodeCB = func(_ *dag.Node, _ bool) error { return nil } - // DagBuilderHelper wraps together a bunch of objects needed to // efficiently create unixfs dag trees type DagBuilderHelper struct { dserv dag.DAGService - mp pin.ManualPinner in <-chan []byte errs <-chan error recvdErr error nextData []byte // the next item to return. maxlinks int - ncb NodeCB - - batch *dag.Batch + batch *dag.Batch } type DagBuilderParams struct { @@ -34,25 +22,16 @@ type DagBuilderParams struct { // DAGService to write blocks to (required) Dagserv dag.DAGService - - // Callback for each block added - NodeCB NodeCB } // Generate a new DagBuilderHelper from the given params, using 'in' as a // data source func (dbp *DagBuilderParams) New(in <-chan []byte, errs <-chan error) *DagBuilderHelper { - ncb := dbp.NodeCB - if ncb == nil { - ncb = nilFunc - } - return &DagBuilderHelper{ dserv: dbp.Dagserv, in: in, errs: errs, maxlinks: dbp.Maxlinks, - ncb: ncb, batch: dbp.Dagserv.Batch(), } } @@ -106,7 +85,6 @@ func (db *DagBuilderHelper) GetDagServ() dag.DAGService { // FillNodeLayer will add datanodes as children to the give node until // at most db.indirSize ndoes are added // -// warning: **children** pinned indirectly, but input node IS NOT pinned. func (db *DagBuilderHelper) FillNodeLayer(node *UnixfsNode) error { // while we have room AND we're not done @@ -150,12 +128,6 @@ func (db *DagBuilderHelper) Add(node *UnixfsNode) (*dag.Node, error) { return nil, err } - // node callback - err = db.ncb(dn, true) - if err != nil { - return nil, err - } - return dn, nil } diff --git a/importer/helpers/helpers.go b/importer/helpers/helpers.go index cb8422126e6..5c76cfdbe80 100644 --- a/importer/helpers/helpers.go +++ b/importer/helpers/helpers.go @@ -4,10 +4,8 @@ import ( "fmt" "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" - key "github.com/ipfs/go-ipfs/blocks/key" chunk "github.com/ipfs/go-ipfs/importer/chunk" dag "github.com/ipfs/go-ipfs/merkledag" - "github.com/ipfs/go-ipfs/pin" ft "github.com/ipfs/go-ipfs/unixfs" ) @@ -108,21 +106,11 @@ func (n *UnixfsNode) AddChild(child *UnixfsNode, db *DagBuilderHelper) error { return err } - // Pin the child node indirectly - err = db.ncb(childnode, false) - if err != nil { - return err - } - return nil } // Removes the child node at the given index func (n *UnixfsNode) RemoveChild(index int, dbh *DagBuilderHelper) { - k := key.Key(n.node.Links[index].Hash) - if dbh.mp != nil { - dbh.mp.RemovePinWithMode(k, pin.Indirect) - } n.ufmt.RemoveBlockSize(index) n.node.Links = append(n.node.Links[:index], n.node.Links[index+1:]...) } diff --git a/importer/importer.go b/importer/importer.go index 33e0b67bc37..b16b5b05bd0 100644 --- a/importer/importer.go +++ b/importer/importer.go @@ -12,7 +12,6 @@ import ( h "github.com/ipfs/go-ipfs/importer/helpers" trickle "github.com/ipfs/go-ipfs/importer/trickle" dag "github.com/ipfs/go-ipfs/merkledag" - "github.com/ipfs/go-ipfs/pin" logging "github.com/ipfs/go-ipfs/vendor/QmQg1J6vikuXF9oDvm4wpdeAUvvkVEKW1EYDw9HhTMnP2b/go-log" ) @@ -20,7 +19,7 @@ var log = logging.Logger("importer") // Builds a DAG from the given file, writing created blocks to disk as they are // created -func BuildDagFromFile(fpath string, ds dag.DAGService, mp pin.ManualPinner) (*dag.Node, error) { +func BuildDagFromFile(fpath string, ds dag.DAGService) (*dag.Node, error) { stat, err := os.Lstat(fpath) if err != nil { return nil, err @@ -36,60 +35,29 @@ func BuildDagFromFile(fpath string, ds dag.DAGService, mp pin.ManualPinner) (*da } defer f.Close() - return BuildDagFromReader(ds, chunk.NewSizeSplitter(f, chunk.DefaultBlockSize), BasicPinnerCB(mp)) + return BuildDagFromReader(ds, chunk.NewSizeSplitter(f, chunk.DefaultBlockSize)) } -func BuildDagFromReader(ds dag.DAGService, spl chunk.Splitter, ncb h.NodeCB) (*dag.Node, error) { +func BuildDagFromReader(ds dag.DAGService, spl chunk.Splitter) (*dag.Node, error) { // Start the splitter blkch, errch := chunk.Chan(spl) dbp := h.DagBuilderParams{ Dagserv: ds, Maxlinks: h.DefaultLinksPerBlock, - NodeCB: ncb, } return bal.BalancedLayout(dbp.New(blkch, errch)) } -func BuildTrickleDagFromReader(ds dag.DAGService, spl chunk.Splitter, ncb h.NodeCB) (*dag.Node, error) { +func BuildTrickleDagFromReader(ds dag.DAGService, spl chunk.Splitter) (*dag.Node, error) { // Start the splitter blkch, errch := chunk.Chan(spl) dbp := h.DagBuilderParams{ Dagserv: ds, Maxlinks: h.DefaultLinksPerBlock, - NodeCB: ncb, } return trickle.TrickleLayout(dbp.New(blkch, errch)) } - -func BasicPinnerCB(p pin.ManualPinner) h.NodeCB { - return func(n *dag.Node, last bool) error { - k, err := n.Key() - if err != nil { - return err - } - - if last { - p.PinWithMode(k, pin.Recursive) - return p.Flush() - } else { - p.PinWithMode(k, pin.Indirect) - return nil - } - } -} - -func PinIndirectCB(p pin.ManualPinner) h.NodeCB { - return func(n *dag.Node, last bool) error { - k, err := n.Key() - if err != nil { - return err - } - - p.PinWithMode(k, pin.Indirect) - return nil - } -} diff --git a/importer/importer_test.go b/importer/importer_test.go index 96b20341e1d..c41156f22c1 100644 --- a/importer/importer_test.go +++ b/importer/importer_test.go @@ -17,7 +17,7 @@ import ( func getBalancedDag(t testing.TB, size int64, blksize int64) (*dag.Node, dag.DAGService) { ds := mdtest.Mock() r := io.LimitReader(u.NewTimeSeededRand(), size) - nd, err := BuildDagFromReader(ds, chunk.NewSizeSplitter(r, blksize), nil) + nd, err := BuildDagFromReader(ds, chunk.NewSizeSplitter(r, blksize)) if err != nil { t.Fatal(err) } @@ -27,7 +27,7 @@ func getBalancedDag(t testing.TB, size int64, blksize int64) (*dag.Node, dag.DAG func getTrickleDag(t testing.TB, size int64, blksize int64) (*dag.Node, dag.DAGService) { ds := mdtest.Mock() r := io.LimitReader(u.NewTimeSeededRand(), size) - nd, err := BuildTrickleDagFromReader(ds, chunk.NewSizeSplitter(r, blksize), nil) + nd, err := BuildTrickleDagFromReader(ds, chunk.NewSizeSplitter(r, blksize)) if err != nil { t.Fatal(err) } @@ -40,7 +40,7 @@ func TestBalancedDag(t *testing.T) { u.NewTimeSeededRand().Read(buf) r := bytes.NewReader(buf) - nd, err := BuildDagFromReader(ds, chunk.DefaultSplitter(r), nil) + nd, err := BuildDagFromReader(ds, chunk.DefaultSplitter(r)) if err != nil { t.Fatal(err) } diff --git a/importer/trickle/trickle_test.go b/importer/trickle/trickle_test.go index b58acac97b9..2cd98ec975c 100644 --- a/importer/trickle/trickle_test.go +++ b/importer/trickle/trickle_test.go @@ -125,7 +125,7 @@ func arrComp(a, b []byte) error { type dagservAndPinner struct { ds merkledag.DAGService - mp pin.ManualPinner + mp pin.Pinner } func TestIndirectBlocks(t *testing.T) { diff --git a/ipnsfs/system.go b/ipnsfs/system.go deleted file mode 100644 index 4fe935d0334..00000000000 --- a/ipnsfs/system.go +++ /dev/null @@ -1,304 +0,0 @@ -// package ipnsfs implements an in memory model of a mutable ipns filesystem, -// to be used by the fuse filesystem. -// -// It consists of four main structs: -// 1) The Filesystem -// The filesystem serves as a container and entry point for the ipns filesystem -// 2) KeyRoots -// KeyRoots represent the root of the keyspace controlled by a given keypair -// 3) Directories -// 4) Files -package ipnsfs - -import ( - "errors" - "os" - "sync" - "time" - - key "github.com/ipfs/go-ipfs/blocks/key" - dag "github.com/ipfs/go-ipfs/merkledag" - namesys "github.com/ipfs/go-ipfs/namesys" - ci "github.com/ipfs/go-ipfs/p2p/crypto" - path "github.com/ipfs/go-ipfs/path" - pin "github.com/ipfs/go-ipfs/pin" - ft "github.com/ipfs/go-ipfs/unixfs" - - context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" - logging "github.com/ipfs/go-ipfs/vendor/QmQg1J6vikuXF9oDvm4wpdeAUvvkVEKW1EYDw9HhTMnP2b/go-log" -) - -var log = logging.Logger("ipnsfs") - -var ErrIsDirectory = errors.New("error: is a directory") - -// Filesystem is the writeable fuse filesystem structure -type Filesystem struct { - ctx context.Context - - dserv dag.DAGService - - nsys namesys.NameSystem - - resolver *path.Resolver - - pins pin.Pinner - - roots map[string]*KeyRoot -} - -// NewFilesystem instantiates an ipns filesystem using the given parameters and locally owned keys -func NewFilesystem(ctx context.Context, ds dag.DAGService, nsys namesys.NameSystem, pins pin.Pinner, keys ...ci.PrivKey) (*Filesystem, error) { - roots := make(map[string]*KeyRoot) - fs := &Filesystem{ - ctx: ctx, - roots: roots, - nsys: nsys, - dserv: ds, - pins: pins, - resolver: &path.Resolver{DAG: ds}, - } - for _, k := range keys { - pkh, err := k.GetPublic().Hash() - if err != nil { - return nil, err - } - - root, err := fs.newKeyRoot(ctx, k) - if err != nil { - return nil, err - } - roots[key.Key(pkh).Pretty()] = root - } - - return fs, nil -} - -func (fs *Filesystem) Close() error { - wg := sync.WaitGroup{} - for _, r := range fs.roots { - wg.Add(1) - go func(r *KeyRoot) { - defer wg.Done() - err := r.Publish(fs.ctx) - if err != nil { - log.Info(err) - return - } - }(r) - } - wg.Wait() - return nil -} - -// GetRoot returns the KeyRoot of the given name -func (fs *Filesystem) GetRoot(name string) (*KeyRoot, error) { - r, ok := fs.roots[name] - if ok { - return r, nil - } - return nil, os.ErrNotExist -} - -type childCloser interface { - closeChild(string, *dag.Node) error -} - -type NodeType int - -const ( - TFile NodeType = iota - TDir -) - -// FSNode represents any node (directory, root, or file) in the ipns filesystem -type FSNode interface { - GetNode() (*dag.Node, error) - Type() NodeType - Lock() - Unlock() -} - -// KeyRoot represents the root of a filesystem tree pointed to by a given keypair -type KeyRoot struct { - key ci.PrivKey - name string - - // node is the merkledag node pointed to by this keypair - node *dag.Node - - // A pointer to the filesystem to access components - fs *Filesystem - - // val represents the node pointed to by this key. It can either be a File or a Directory - val FSNode - - repub *Republisher -} - -// newKeyRoot creates a new KeyRoot for the given key, and starts up a republisher routine -// for it -func (fs *Filesystem) newKeyRoot(parent context.Context, k ci.PrivKey) (*KeyRoot, error) { - hash, err := k.GetPublic().Hash() - if err != nil { - return nil, err - } - - name := "/ipns/" + key.Key(hash).String() - - root := new(KeyRoot) - root.key = k - root.fs = fs - root.name = name - - ctx, cancel := context.WithCancel(parent) - defer cancel() - - pointsTo, err := fs.nsys.Resolve(ctx, name) - if err != nil { - err = namesys.InitializeKeyspace(ctx, fs.dserv, fs.nsys, fs.pins, k) - if err != nil { - return nil, err - } - - pointsTo, err = fs.nsys.Resolve(ctx, name) - if err != nil { - return nil, err - } - } - - mnode, err := fs.resolver.ResolvePath(ctx, pointsTo) - if err != nil { - log.Errorf("Failed to retrieve value '%s' for ipns entry: %s\n", pointsTo, err) - return nil, err - } - - root.node = mnode - - root.repub = NewRepublisher(root, time.Millisecond*300, time.Second*3) - go root.repub.Run(parent) - - pbn, err := ft.FromBytes(mnode.Data) - if err != nil { - log.Error("IPNS pointer was not unixfs node") - return nil, err - } - - switch pbn.GetType() { - case ft.TDirectory: - root.val = NewDirectory(ctx, pointsTo.String(), mnode, root, fs) - case ft.TFile, ft.TMetadata, ft.TRaw: - fi, err := NewFile(pointsTo.String(), mnode, root, fs) - if err != nil { - return nil, err - } - root.val = fi - default: - panic("unrecognized! (NYI)") - } - return root, nil -} - -func (kr *KeyRoot) GetValue() FSNode { - return kr.val -} - -// closeChild implements the childCloser interface, and signals to the publisher that -// there are changes ready to be published -func (kr *KeyRoot) closeChild(name string, nd *dag.Node) error { - kr.repub.Touch() - return nil -} - -// Publish publishes the ipns entry associated with this key -func (kr *KeyRoot) Publish(ctx context.Context) error { - child, ok := kr.val.(FSNode) - if !ok { - return errors.New("child of key root not valid type") - } - - nd, err := child.GetNode() - if err != nil { - return err - } - - // Holding this lock so our child doesnt change out from under us - child.Lock() - k, err := kr.fs.dserv.Add(nd) - if err != nil { - child.Unlock() - return err - } - child.Unlock() - // Dont want to hold the lock while we publish - // otherwise we are holding the lock through a costly - // network operation - - kp := path.FromKey(k) - - ev := &logging.Metadata{"name": kr.name, "key": kp} - defer log.EventBegin(ctx, "ipnsfsPublishing", ev).Done() - log.Info("ipnsfs publishing %s -> %s", kr.name, kp) - - return kr.fs.nsys.Publish(ctx, kr.key, kp) -} - -// Republisher manages when to publish the ipns entry associated with a given key -type Republisher struct { - TimeoutLong time.Duration - TimeoutShort time.Duration - Publish chan struct{} - root *KeyRoot -} - -// NewRepublisher creates a new Republisher object to republish the given keyroot -// using the given short and long time intervals -func NewRepublisher(root *KeyRoot, tshort, tlong time.Duration) *Republisher { - return &Republisher{ - TimeoutShort: tshort, - TimeoutLong: tlong, - Publish: make(chan struct{}, 1), - root: root, - } -} - -// Touch signals that an update has occurred since the last publish. -// Multiple consecutive touches may extend the time period before -// the next Publish occurs in order to more efficiently batch updates -func (np *Republisher) Touch() { - select { - case np.Publish <- struct{}{}: - default: - } -} - -// Run is the main republisher loop -func (np *Republisher) Run(ctx context.Context) { - for { - select { - case <-np.Publish: - quick := time.After(np.TimeoutShort) - longer := time.After(np.TimeoutLong) - - wait: - select { - case <-ctx.Done(): - return - case <-np.Publish: - quick = time.After(np.TimeoutShort) - goto wait - case <-quick: - case <-longer: - } - - log.Info("Publishing Changes!") - err := np.root.Publish(ctx) - if err != nil { - log.Error("republishRoot error: %s", err) - } - - case <-ctx.Done(): - return - } - } -} diff --git a/merkledag/merkledag.go b/merkledag/merkledag.go index da921ed099b..b84327dfdf3 100644 --- a/merkledag/merkledag.go +++ b/merkledag/merkledag.go @@ -3,7 +3,6 @@ package merkledag import ( "fmt" - "sync" "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" blocks "github.com/ipfs/go-ipfs/blocks" @@ -21,6 +20,7 @@ type DAGService interface { AddRecursive(*Node) error Get(context.Context, key.Key) (*Node, error) Remove(*Node) error + RemoveRecursive(*Node) error // GetDAG returns, in order, all the single leve child // nodes of the passed in node. @@ -108,10 +108,10 @@ func (n *dagService) Get(ctx context.Context, k key.Key) (*Node, error) { } // Remove deletes the given node and all of its children from the BlockService -func (n *dagService) Remove(nd *Node) error { +func (n *dagService) RemoveRecursive(nd *Node) error { for _, l := range nd.Links { if l.Node != nil { - n.Remove(l.Node) + n.RemoveRecursive(l.Node) } } k, err := nd.Key() @@ -121,41 +121,17 @@ func (n *dagService) Remove(nd *Node) error { return n.Blocks.DeleteBlock(k) } -// FetchGraph asynchronously fetches all nodes that are children of the given -// node, and returns a channel that may be waited upon for the fetch to complete -func FetchGraph(ctx context.Context, root *Node, serv DAGService) chan struct{} { - log.Warning("Untested.") - var wg sync.WaitGroup - done := make(chan struct{}) - - for _, l := range root.Links { - wg.Add(1) - go func(lnk *Link) { - - // Signal child is done on way out - defer wg.Done() - select { - case <-ctx.Done(): - return - } - - nd, err := lnk.GetNode(ctx, serv) - if err != nil { - log.Debug(err) - return - } - - // Wait for children to finish - <-FetchGraph(ctx, nd, serv) - }(l) +func (n *dagService) Remove(nd *Node) error { + k, err := nd.Key() + if err != nil { + return err } + return n.Blocks.DeleteBlock(k) +} - go func() { - wg.Wait() - done <- struct{}{} - }() - - return done +// FetchGraph fetches all nodes that are children of the given node +func FetchGraph(ctx context.Context, root *Node, serv DAGService) error { + return EnumerateChildrenAsync(ctx, serv, root, key.NewKeySet()) } // FindLinks searches this nodes links for the given key, @@ -318,3 +294,104 @@ func (t *Batch) Commit() error { t.size = 0 return err } + +// EnumerateChildren will walk the dag below the given root node and add all +// unseen children to the passed in set. +// TODO: parallelize to avoid disk latency perf hits? +func EnumerateChildren(ctx context.Context, ds DAGService, root *Node, set key.KeySet) error { + for _, lnk := range root.Links { + k := key.Key(lnk.Hash) + if !set.Has(k) { + set.Add(k) + child, err := ds.Get(ctx, k) + if err != nil { + return err + } + err = EnumerateChildren(ctx, ds, child, set) + if err != nil { + return err + } + } + } + return nil +} + +func EnumerateChildrenAsync(ctx context.Context, ds DAGService, root *Node, set key.KeySet) error { + toprocess := make(chan []key.Key, 8) + nodes := make(chan *Node, 8) + errs := make(chan error, 1) + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + defer close(toprocess) + + go fetchNodes(ctx, ds, toprocess, nodes, errs) + + nodes <- root + live := 1 + + for { + select { + case nd, ok := <-nodes: + if !ok { + return nil + } + // a node has been fetched + live-- + + var keys []key.Key + for _, lnk := range nd.Links { + k := key.Key(lnk.Hash) + if !set.Has(k) { + set.Add(k) + live++ + keys = append(keys, k) + } + } + + if live == 0 { + return nil + } + + if len(keys) > 0 { + select { + case toprocess <- keys: + case <-ctx.Done(): + return ctx.Err() + } + } + case err := <-errs: + return err + case <-ctx.Done(): + return ctx.Err() + } + } +} + +func fetchNodes(ctx context.Context, ds DAGService, in <-chan []key.Key, out chan<- *Node, errs chan<- error) { + defer close(out) + + get := func(g NodeGetter) { + nd, err := g.Get(ctx) + if err != nil { + select { + case errs <- err: + case <-ctx.Done(): + } + return + } + + select { + case out <- nd: + case <-ctx.Done(): + return + } + } + + for ks := range in { + ng := ds.GetNodes(ctx, ks) + for _, g := range ng { + go get(g) + } + } +} diff --git a/merkledag/merkledag_test.go b/merkledag/merkledag_test.go index 40bc457405a..6efd687aa7a 100644 --- a/merkledag/merkledag_test.go +++ b/merkledag/merkledag_test.go @@ -27,7 +27,7 @@ import ( type dagservAndPinner struct { ds DAGService - mp pin.ManualPinner + mp pin.Pinner } func getDagservAndPinner(t *testing.T) dagservAndPinner { @@ -35,7 +35,7 @@ func getDagservAndPinner(t *testing.T) dagservAndPinner { bs := bstore.NewBlockstore(db) blockserv := bserv.New(bs, offline.Exchange(bs)) dserv := NewDAGService(blockserv) - mpin := pin.NewPinner(db, dserv).GetManual() + mpin := pin.NewPinner(db, dserv) return dagservAndPinner{ ds: dserv, mp: mpin, @@ -129,7 +129,7 @@ func SubtestNodeStat(t *testing.T, n *Node) { } if expected != *actual { - t.Errorf("n.Stat incorrect.\nexpect: %s\nactual: %s", expected, actual) + t.Error("n.Stat incorrect.\nexpect: %s\nactual: %s", expected, actual) } else { fmt.Printf("n.Stat correct: %s\n", actual) } @@ -163,7 +163,7 @@ func runBatchFetchTest(t *testing.T, read io.Reader) { spl := chunk.NewSizeSplitter(read, 512) - root, err := imp.BuildDagFromReader(dagservs[0], spl, nil) + root, err := imp.BuildDagFromReader(dagservs[0], spl) if err != nil { t.Fatal(err) } @@ -220,7 +220,6 @@ func runBatchFetchTest(t *testing.T, read io.Reader) { wg.Wait() } - func TestRecursiveAdd(t *testing.T) { a := &Node{Data: []byte("A")} b := &Node{Data: []byte("B")} @@ -286,3 +285,68 @@ func TestCantGet(t *testing.T) { t.Fatal("expected err not found, got: ", err) } } + +func TestFetchGraph(t *testing.T) { + var dservs []DAGService + bsis := bstest.Mocks(2) + for _, bsi := range bsis { + dservs = append(dservs, NewDAGService(bsi)) + } + + read := io.LimitReader(u.NewTimeSeededRand(), 1024*32) + root, err := imp.BuildDagFromReader(dservs[0], chunk.NewSizeSplitter(read, 512)) + if err != nil { + t.Fatal(err) + } + + err = FetchGraph(context.TODO(), root, dservs[1]) + if err != nil { + t.Fatal(err) + } + + // create an offline dagstore and ensure all blocks were fetched + bs := bserv.New(bsis[1].Blockstore, offline.Exchange(bsis[1].Blockstore)) + + offline_ds := NewDAGService(bs) + ks := key.NewKeySet() + + err = EnumerateChildren(context.Background(), offline_ds, root, ks) + if err != nil { + t.Fatal(err) + } +} + +func TestEnumerateChildren(t *testing.T) { + bsi := bstest.Mocks(1) + ds := NewDAGService(bsi[0]) + + read := io.LimitReader(u.NewTimeSeededRand(), 1024*1024) + root, err := imp.BuildDagFromReader(ds, chunk.NewSizeSplitter(read, 512)) + if err != nil { + t.Fatal(err) + } + + ks := key.NewKeySet() + err = EnumerateChildren(context.Background(), ds, root, ks) + if err != nil { + t.Fatal(err) + } + + var traverse func(n *Node) + traverse = func(n *Node) { + // traverse dag and check + for _, lnk := range n.Links { + k := key.Key(lnk.Hash) + if !ks.Has(k) { + t.Fatal("missing key in set!") + } + child, err := ds.Get(context.Background(), k) + if err != nil { + t.Fatal(err) + } + traverse(child) + } + } + + traverse(root) +} diff --git a/merkledag/node.go b/merkledag/node.go index f84695f912d..b644cae1216 100644 --- a/merkledag/node.go +++ b/merkledag/node.go @@ -9,6 +9,8 @@ import ( key "github.com/ipfs/go-ipfs/blocks/key" ) +var ErrLinkNotFound = fmt.Errorf("no link by that name") + // Node represents a node in the IPFS Merkle DAG. // nodes have opaque data and a set of navigable links. type Node struct { @@ -160,7 +162,7 @@ func (n *Node) GetNodeLink(name string) (*Link, error) { }, nil } } - return nil, ErrNotFound + return nil, ErrLinkNotFound } func (n *Node) GetLinkedNode(ctx context.Context, ds DAGService, name string) (*Node, error) { diff --git a/merkledag/utils/diff.go b/merkledag/utils/diff.go index 47ca5124f12..8ee50819c53 100644 --- a/merkledag/utils/diff.go +++ b/merkledag/utils/diff.go @@ -37,7 +37,7 @@ func (c *Change) String() string { } func ApplyChange(ctx context.Context, ds dag.DAGService, nd *dag.Node, cs []*Change) (*dag.Node, error) { - e := NewDagEditor(ds, nd) + e := NewDagEditor(nd, ds) for _, c := range cs { switch c.Type { case Add: @@ -71,7 +71,8 @@ func ApplyChange(ctx context.Context, ds dag.DAGService, nd *dag.Node, cs []*Cha } } } - return e.GetNode(), nil + + return e.Finalize(ds) } func Diff(ctx context.Context, ds dag.DAGService, a, b *dag.Node) []*Change { diff --git a/merkledag/utils/utils.go b/merkledag/utils/utils.go index b8dde47e762..1f19e3380c3 100644 --- a/merkledag/utils/utils.go +++ b/merkledag/utils/utils.go @@ -4,20 +4,41 @@ import ( "errors" "strings" + ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" + syncds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + bstore "github.com/ipfs/go-ipfs/blocks/blockstore" + bserv "github.com/ipfs/go-ipfs/blockservice" + offline "github.com/ipfs/go-ipfs/exchange/offline" dag "github.com/ipfs/go-ipfs/merkledag" ) type Editor struct { root *dag.Node - ds dag.DAGService + + // tmp is a temporary in memory (for now) dagstore for all of the + // intermediary nodes to be stored in + tmp dag.DAGService + + // src is the dagstore with *all* of the data on it, it is used to pull + // nodes from for modification (nil is a valid value) + src dag.DAGService +} + +func NewMemoryDagService() dag.DAGService { + // build mem-datastore for editor's intermediary nodes + bs := bstore.NewBlockstore(syncds.MutexWrap(ds.NewMapDatastore())) + bsrv := bserv.New(bs, offline.Exchange(bs)) + return dag.NewDAGService(bsrv) } -func NewDagEditor(ds dag.DAGService, root *dag.Node) *Editor { +// root is the node to be modified, source is the dagstore to pull nodes from (optional) +func NewDagEditor(root *dag.Node, source dag.DAGService) *Editor { return &Editor{ root: root, - ds: ds, + tmp: NewMemoryDagService(), + src: source, } } @@ -26,7 +47,7 @@ func (e *Editor) GetNode() *dag.Node { } func (e *Editor) GetDagService() dag.DAGService { - return e.ds + return e.tmp } func addLink(ctx context.Context, ds dag.DAGService, root *dag.Node, childname string, childnd *dag.Node) (*dag.Node, error) { @@ -40,6 +61,8 @@ func addLink(ctx context.Context, ds dag.DAGService, root *dag.Node, childname s return nil, err } + _ = ds.Remove(root) + // ensure no link with that name already exists _ = root.RemoveNodeLink(childname) // ignore error, only option is ErrNotFound @@ -55,7 +78,7 @@ func addLink(ctx context.Context, ds dag.DAGService, root *dag.Node, childname s func (e *Editor) InsertNodeAtPath(ctx context.Context, path string, toinsert *dag.Node, create func() *dag.Node) error { splpath := strings.Split(path, "/") - nd, err := insertNodeAtPath(ctx, e.ds, e.root, splpath, toinsert, create) + nd, err := e.insertNodeAtPath(ctx, e.root, splpath, toinsert, create) if err != nil { return err } @@ -63,33 +86,43 @@ func (e *Editor) InsertNodeAtPath(ctx context.Context, path string, toinsert *da return nil } -func insertNodeAtPath(ctx context.Context, ds dag.DAGService, root *dag.Node, path []string, toinsert *dag.Node, create func() *dag.Node) (*dag.Node, error) { +func (e *Editor) insertNodeAtPath(ctx context.Context, root *dag.Node, path []string, toinsert *dag.Node, create func() *dag.Node) (*dag.Node, error) { if len(path) == 1 { - return addLink(ctx, ds, root, path[0], toinsert) + return addLink(ctx, e.tmp, root, path[0], toinsert) } - nd, err := root.GetLinkedNode(ctx, ds, path[0]) + nd, err := root.GetLinkedNode(ctx, e.tmp, path[0]) if err != nil { // if 'create' is true, we create directories on the way down as needed - if err == dag.ErrNotFound && create != nil { + if err == dag.ErrLinkNotFound && create != nil { nd = create() - } else { + err = nil // no longer an error case + } else if err == dag.ErrNotFound { + // try finding it in our source dagstore + nd, err = root.GetLinkedNode(ctx, e.src, path[0]) + } + + // if we receive an ErrNotFound, then our second 'GetLinkedNode' call + // also fails, we want to error out + if err != nil { return nil, err } } - ndprime, err := insertNodeAtPath(ctx, ds, nd, path[1:], toinsert, create) + ndprime, err := e.insertNodeAtPath(ctx, nd, path[1:], toinsert, create) if err != nil { return nil, err } + _ = e.tmp.Remove(root) + _ = root.RemoveNodeLink(path[0]) err = root.AddNodeLinkClean(path[0], ndprime) if err != nil { return nil, err } - _, err = ds.Add(root) + _, err = e.tmp.Add(root) if err != nil { return nil, err } @@ -99,7 +132,7 @@ func insertNodeAtPath(ctx context.Context, ds dag.DAGService, root *dag.Node, pa func (e *Editor) RmLink(ctx context.Context, path string) error { splpath := strings.Split(path, "/") - nd, err := rmLink(ctx, e.ds, e.root, splpath) + nd, err := e.rmLink(ctx, e.root, splpath) if err != nil { return err } @@ -107,7 +140,7 @@ func (e *Editor) RmLink(ctx context.Context, path string) error { return nil } -func rmLink(ctx context.Context, ds dag.DAGService, root *dag.Node, path []string) (*dag.Node, error) { +func (e *Editor) rmLink(ctx context.Context, root *dag.Node, path []string) (*dag.Node, error) { if len(path) == 1 { // base case, remove node in question err := root.RemoveNodeLink(path[0]) @@ -115,7 +148,7 @@ func rmLink(ctx context.Context, ds dag.DAGService, root *dag.Node, path []strin return nil, err } - _, err = ds.Add(root) + _, err = e.tmp.Add(root) if err != nil { return nil, err } @@ -123,23 +156,30 @@ func rmLink(ctx context.Context, ds dag.DAGService, root *dag.Node, path []strin return root, nil } - nd, err := root.GetLinkedNode(ctx, ds, path[0]) + // search for node in both tmp dagstore and source dagstore + nd, err := root.GetLinkedNode(ctx, e.tmp, path[0]) + if err == dag.ErrNotFound { + nd, err = root.GetLinkedNode(ctx, e.src, path[0]) + } + if err != nil { return nil, err } - nnode, err := rmLink(ctx, ds, nd, path[1:]) + nnode, err := e.rmLink(ctx, nd, path[1:]) if err != nil { return nil, err } + _ = e.tmp.Remove(root) + _ = root.RemoveNodeLink(path[0]) err = root.AddNodeLinkClean(path[0], nnode) if err != nil { return nil, err } - _, err = ds.Add(root) + _, err = e.tmp.Add(root) if err != nil { return nil, err } @@ -147,8 +187,10 @@ func rmLink(ctx context.Context, ds dag.DAGService, root *dag.Node, path []strin return root, nil } -func (e *Editor) WriteOutputTo(ds dag.DAGService) error { - return copyDag(e.GetNode(), e.ds, ds) +func (e *Editor) Finalize(ds dag.DAGService) (*dag.Node, error) { + nd := e.GetNode() + err := copyDag(nd, e.tmp, ds) + return nd, err } func copyDag(nd *dag.Node, from, to dag.DAGService) error { diff --git a/merkledag/utils/utils_test.go b/merkledag/utils/utils_test.go index 18839bf8fed..498f676b255 100644 --- a/merkledag/utils/utils_test.go +++ b/merkledag/utils/utils_test.go @@ -66,13 +66,12 @@ func assertNodeAtPath(t *testing.T, ds dag.DAGService, root *dag.Node, path stri } func TestInsertNode(t *testing.T) { - ds := mdtest.Mock() root := new(dag.Node) - e := NewDagEditor(ds, root) + e := NewDagEditor(root, nil) testInsert(t, e, "a", "anodefortesting", false, "") testInsert(t, e, "a/b", "data", false, "") - testInsert(t, e, "a/b/c/d/e", "blah", false, "merkledag: not found") + testInsert(t, e, "a/b/c/d/e", "blah", false, "no link by that name") testInsert(t, e, "a/b/c/d/e", "foo", true, "") testInsert(t, e, "a/b/c/d/f", "baz", true, "") testInsert(t, e, "a/b/c/d/f", "bar", true, "") @@ -92,7 +91,7 @@ func TestInsertNode(t *testing.T) { func testInsert(t *testing.T, e *Editor, path, data string, create bool, experr string) { child := &dag.Node{Data: []byte(data)} - ck, err := e.ds.Add(child) + ck, err := e.tmp.Add(child) if err != nil { t.Fatal(err) } @@ -117,8 +116,8 @@ func testInsert(t *testing.T, e *Editor, path, data string, create bool, experr } if err != nil { - t.Fatal(err) + t.Fatal(err, path, data, create, experr) } - assertNodeAtPath(t, e.ds, e.root, path, ck) + assertNodeAtPath(t, e.tmp, e.root, path, ck) } diff --git a/ipnsfs/dir.go b/mfs/dir.go similarity index 79% rename from ipnsfs/dir.go rename to mfs/dir.go index a7e264f96f5..264dea4a0d7 100644 --- a/ipnsfs/dir.go +++ b/mfs/dir.go @@ -1,4 +1,4 @@ -package ipnsfs +package mfs import ( "errors" @@ -15,9 +15,10 @@ import ( var ErrNotYetImplemented = errors.New("not yet implemented") var ErrInvalidChild = errors.New("invalid child node") +var ErrDirExists = errors.New("directory already has entry by that name") type Directory struct { - fs *Filesystem + dserv dag.DAGService parent childCloser childDirs map[string]*Directory @@ -30,10 +31,10 @@ type Directory struct { name string } -func NewDirectory(ctx context.Context, name string, node *dag.Node, parent childCloser, fs *Filesystem) *Directory { +func NewDirectory(ctx context.Context, name string, node *dag.Node, parent childCloser, dserv dag.DAGService) *Directory { return &Directory{ + dserv: dserv, ctx: ctx, - fs: fs, name: name, node: node, parent: parent, @@ -45,7 +46,7 @@ func NewDirectory(ctx context.Context, name string, node *dag.Node, parent child // closeChild updates the child by the given name to the dag node 'nd' // and changes its own dag node, then propogates the changes upward func (d *Directory) closeChild(name string, nd *dag.Node) error { - _, err := d.fs.dserv.Add(nd) + _, err := d.dserv.Add(nd) if err != nil { return err } @@ -89,7 +90,7 @@ func (d *Directory) childFile(name string) (*File, error) { case ufspb.Data_Directory: return nil, ErrIsDirectory case ufspb.Data_File: - nfi, err := NewFile(name, nd, d, d.fs) + nfi, err := NewFile(name, nd, d, d.dserv) if err != nil { return nil, err } @@ -122,7 +123,7 @@ func (d *Directory) childDir(name string) (*Directory, error) { switch i.GetType() { case ufspb.Data_Directory: - ndir := NewDirectory(d.ctx, name, nd, d, d.fs) + ndir := NewDirectory(d.ctx, name, nd, d, d.dserv) d.childDirs[name] = ndir return ndir, nil case ufspb.Data_File: @@ -139,7 +140,7 @@ func (d *Directory) childDir(name string) (*Directory, error) { func (d *Directory) childFromDag(name string) (*dag.Node, error) { for _, lnk := range d.node.Links { if lnk.Name == name { - return lnk.GetNode(d.ctx, d.fs.dserv) + return lnk.GetNode(d.ctx, d.dserv) } } @@ -156,6 +157,7 @@ func (d *Directory) Child(name string) (FSNode, error) { // childUnsync returns the child under this directory by the given name // without locking, useful for operations which already hold a lock func (d *Directory) childUnsync(name string) (FSNode, error) { + dir, err := d.childDir(name) if err == nil { return dir, nil @@ -168,15 +170,51 @@ func (d *Directory) childUnsync(name string) (FSNode, error) { return nil, os.ErrNotExist } -func (d *Directory) List() []string { +type NodeListing struct { + Name string + Type int + Size int64 + Hash string +} + +func (d *Directory) List() ([]NodeListing, error) { d.lock.Lock() defer d.lock.Unlock() - var out []string - for _, lnk := range d.node.Links { - out = append(out, lnk.Name) + var out []NodeListing + for _, l := range d.node.Links { + child := NodeListing{} + child.Name = l.Name + + c, err := d.childUnsync(l.Name) + if err != nil { + return nil, err + } + + child.Type = int(c.Type()) + if c, ok := c.(*File); ok { + size, err := c.Size() + if err != nil { + return nil, err + } + child.Size = size + } + nd, err := c.GetNode() + if err != nil { + return nil, err + } + + k, err := nd.Key() + if err != nil { + return nil, err + } + + child.Hash = k.B58String() + + out = append(out, child) } - return out + + return out, nil } func (d *Directory) Mkdir(name string) (*Directory, error) { @@ -193,6 +231,12 @@ func (d *Directory) Mkdir(name string) (*Directory, error) { } ndir := &dag.Node{Data: ft.FolderPBData()} + + _, err = d.dserv.Add(ndir) + if err != nil { + return nil, err + } + err = d.node.AddNodeLinkClean(name, ndir) if err != nil { return nil, err @@ -225,6 +269,7 @@ func (d *Directory) Unlink(name string) error { func (d *Directory) AddChild(name string, nd *dag.Node) error { d.Lock() defer d.Unlock() + pbn, err := ft.FromBytes(nd.Data) if err != nil { return err @@ -232,7 +277,12 @@ func (d *Directory) AddChild(name string, nd *dag.Node) error { _, err = d.childUnsync(name) if err == nil { - return errors.New("directory already has entry by that name") + return ErrDirExists + } + + _, err = d.dserv.Add(nd) + if err != nil { + return err } err = d.node.AddNodeLinkClean(name, nd) @@ -242,9 +292,9 @@ func (d *Directory) AddChild(name string, nd *dag.Node) error { switch pbn.GetType() { case ft.TDirectory: - d.childDirs[name] = NewDirectory(d.ctx, name, nd, d, d.fs) + d.childDirs[name] = NewDirectory(d.ctx, name, nd, d, d.dserv) case ft.TFile, ft.TMetadata, ft.TRaw: - nfi, err := NewFile(name, nd, d, d.fs) + nfi, err := NewFile(name, nd, d, d.dserv) if err != nil { return err } diff --git a/ipnsfs/file.go b/mfs/file.go similarity index 91% rename from ipnsfs/file.go rename to mfs/file.go index 306ed5a0063..fea1112dc3a 100644 --- a/ipnsfs/file.go +++ b/mfs/file.go @@ -1,4 +1,4 @@ -package ipnsfs +package mfs import ( "sync" @@ -12,7 +12,6 @@ import ( type File struct { parent childCloser - fs *Filesystem name string hasChanges bool @@ -22,14 +21,13 @@ type File struct { } // NewFile returns a NewFile object with the given parameters -func NewFile(name string, node *dag.Node, parent childCloser, fs *Filesystem) (*File, error) { - dmod, err := mod.NewDagModifier(context.Background(), node, fs.dserv, fs.pins.GetManual(), chunk.DefaultSplitter) +func NewFile(name string, node *dag.Node, parent childCloser, dserv dag.DAGService) (*File, error) { + dmod, err := mod.NewDagModifier(context.Background(), node, dserv, chunk.DefaultSplitter) if err != nil { return nil, err } return &File{ - fs: fs, parent: parent, name: name, mod: dmod, diff --git a/mfs/mfs_test.go b/mfs/mfs_test.go new file mode 100644 index 00000000000..609d81a29cf --- /dev/null +++ b/mfs/mfs_test.go @@ -0,0 +1,476 @@ +package mfs + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "sort" + "strings" + "testing" + + ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" + dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" + "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + + bstore "github.com/ipfs/go-ipfs/blocks/blockstore" + key "github.com/ipfs/go-ipfs/blocks/key" + bserv "github.com/ipfs/go-ipfs/blockservice" + offline "github.com/ipfs/go-ipfs/exchange/offline" + importer "github.com/ipfs/go-ipfs/importer" + chunk "github.com/ipfs/go-ipfs/importer/chunk" + dag "github.com/ipfs/go-ipfs/merkledag" + ft "github.com/ipfs/go-ipfs/unixfs" + uio "github.com/ipfs/go-ipfs/unixfs/io" + u "github.com/ipfs/go-ipfs/util" +) + +func getDagserv(t *testing.T) dag.DAGService { + db := dssync.MutexWrap(ds.NewMapDatastore()) + bs := bstore.NewBlockstore(db) + blockserv := bserv.New(bs, offline.Exchange(bs)) + return dag.NewDAGService(blockserv) +} + +func getRandFile(t *testing.T, ds dag.DAGService, size int64) *dag.Node { + r := io.LimitReader(u.NewTimeSeededRand(), size) + nd, err := importer.BuildDagFromReader(ds, chunk.DefaultSplitter(r)) + if err != nil { + t.Fatal(err) + } + return nd +} + +func mkdirP(t *testing.T, root *Directory, path string) *Directory { + dirs := strings.Split(path, "/") + cur := root + for _, d := range dirs { + n, err := cur.Mkdir(d) + if err != nil && err != os.ErrExist { + t.Fatal(err) + } + if err == os.ErrExist { + fsn, err := cur.Child(d) + if err != nil { + t.Fatal(err) + } + switch fsn := fsn.(type) { + case *Directory: + n = fsn + case *File: + t.Fatal("tried to make a directory where a file already exists") + } + } + + cur = n + } + return cur +} + +func assertDirAtPath(root *Directory, path string, children []string) error { + fsn, err := DirLookup(root, path) + if err != nil { + return err + } + + dir, ok := fsn.(*Directory) + if !ok { + return fmt.Errorf("%s was not a directory", path) + } + + listing, err := dir.List() + if err != nil { + return err + } + + var names []string + for _, d := range listing { + names = append(names, d.Name) + } + + sort.Strings(children) + sort.Strings(names) + if !compStrArrs(children, names) { + return errors.New("directories children did not match!") + } + + return nil +} + +func compStrArrs(a, b []string) bool { + if len(a) != len(b) { + return false + } + + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + return false + } + } + + return true +} + +func assertFileAtPath(ds dag.DAGService, root *Directory, exp *dag.Node, path string) error { + parts := strings.Split(path, "/") + cur := root + for i, d := range parts[:len(parts)-1] { + next, err := cur.Child(d) + if err != nil { + return fmt.Errorf("looking for %s failed: %s", path, err) + } + + nextDir, ok := next.(*Directory) + if !ok { + return fmt.Errorf("%s points to a non-directory", parts[:i+1]) + } + + cur = nextDir + } + + last := parts[len(parts)-1] + finaln, err := cur.Child(last) + if err != nil { + return err + } + + file, ok := finaln.(*File) + if !ok { + return fmt.Errorf("%s was not a file!", path) + } + + out, err := ioutil.ReadAll(file) + if err != nil { + return err + } + + expbytes, err := catNode(ds, exp) + if err != nil { + return err + } + + if !bytes.Equal(out, expbytes) { + return fmt.Errorf("Incorrect data at path!") + } + return nil +} + +func catNode(ds dag.DAGService, nd *dag.Node) ([]byte, error) { + r, err := uio.NewDagReader(context.TODO(), nd, ds) + if err != nil { + return nil, err + } + defer r.Close() + + return ioutil.ReadAll(r) +} + +func setupRoot(ctx context.Context, t *testing.T) (dag.DAGService, *Root) { + ds := getDagserv(t) + + root := &dag.Node{Data: ft.FolderPBData()} + rt, err := NewRoot(ctx, ds, root, func(ctx context.Context, k key.Key) error { + fmt.Println("PUBLISHED: ", k) + return nil + }) + + if err != nil { + t.Fatal(err) + } + + return ds, rt +} + +func TestBasic(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ds, rt := setupRoot(ctx, t) + + rootdir := rt.GetValue().(*Directory) + + // test making a basic dir + _, err := rootdir.Mkdir("a") + if err != nil { + t.Fatal(err) + } + + path := "a/b/c/d/e/f/g" + d := mkdirP(t, rootdir, path) + + fi := getRandFile(t, ds, 1000) + + // test inserting that file + err = d.AddChild("afile", fi) + if err != nil { + t.Fatal(err) + } + + err = assertFileAtPath(ds, rootdir, fi, "a/b/c/d/e/f/g/afile") + if err != nil { + t.Fatal(err) + } +} + +func TestMkdir(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, rt := setupRoot(ctx, t) + + rootdir := rt.GetValue().(*Directory) + + dirsToMake := []string{"a", "B", "foo", "bar", "cats", "fish"} + sort.Strings(dirsToMake) // sort for easy comparing later + + for _, d := range dirsToMake { + _, err := rootdir.Mkdir(d) + if err != nil { + t.Fatal(err) + } + } + + err := assertDirAtPath(rootdir, "/", dirsToMake) + if err != nil { + t.Fatal(err) + } + + for _, d := range dirsToMake { + mkdirP(t, rootdir, "a/"+d) + } + + err = assertDirAtPath(rootdir, "/a", dirsToMake) + if err != nil { + t.Fatal(err) + } + + // mkdir over existing dir should fail + _, err = rootdir.Mkdir("a") + if err == nil { + t.Fatal("should have failed!") + } +} + +func TestDirectoryLoadFromDag(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ds, rt := setupRoot(ctx, t) + + rootdir := rt.GetValue().(*Directory) + + nd := getRandFile(t, ds, 1000) + _, err := ds.Add(nd) + if err != nil { + t.Fatal(err) + } + + fihash, err := nd.Multihash() + if err != nil { + t.Fatal(err) + } + + dir := &dag.Node{Data: ft.FolderPBData()} + _, err = ds.Add(dir) + if err != nil { + t.Fatal(err) + } + + dirhash, err := dir.Multihash() + if err != nil { + t.Fatal(err) + } + + top := &dag.Node{ + Data: ft.FolderPBData(), + Links: []*dag.Link{ + &dag.Link{ + Name: "a", + Hash: fihash, + }, + &dag.Link{ + Name: "b", + Hash: dirhash, + }, + }, + } + + err = rootdir.AddChild("foo", top) + if err != nil { + t.Fatal(err) + } + + // get this dir + topi, err := rootdir.Child("foo") + if err != nil { + t.Fatal(err) + } + + topd := topi.(*Directory) + + // mkdir over existing but unloaded child file should fail + _, err = topd.Mkdir("a") + if err == nil { + t.Fatal("expected to fail!") + } + + // mkdir over existing but unloaded child dir should fail + _, err = topd.Mkdir("b") + if err == nil { + t.Fatal("expected to fail!") + } + + // adding a child over an existing path fails + err = topd.AddChild("b", nd) + if err == nil { + t.Fatal("expected to fail!") + } + + err = assertFileAtPath(ds, rootdir, nd, "foo/a") + if err != nil { + t.Fatal(err) + } + + err = assertDirAtPath(rootdir, "foo/b", nil) + if err != nil { + t.Fatal(err) + } + + err = rootdir.Unlink("foo") + if err != nil { + t.Fatal(err) + } + + err = assertDirAtPath(rootdir, "", nil) + if err != nil { + t.Fatal(err) + } +} + +func TestMfsFile(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ds, rt := setupRoot(ctx, t) + + rootdir := rt.GetValue().(*Directory) + + fisize := 1000 + nd := getRandFile(t, ds, 1000) + + err := rootdir.AddChild("file", nd) + if err != nil { + t.Fatal(err) + } + + fsn, err := rootdir.Child("file") + if err != nil { + t.Fatal(err) + } + + fi := fsn.(*File) + + if fi.Type() != TFile { + t.Fatal("some is seriously wrong here") + } + + // assert size is as expected + size, err := fi.Size() + if size != int64(fisize) { + t.Fatal("size isnt correct") + } + + // write to beginning of file + b := []byte("THIS IS A TEST") + n, err := fi.Write(b) + if err != nil { + t.Fatal(err) + } + + if n != len(b) { + t.Fatal("didnt write correct number of bytes") + } + + // sync file + err = fi.Sync() + if err != nil { + t.Fatal(err) + } + + // make sure size hasnt changed + size, err = fi.Size() + if size != int64(fisize) { + t.Fatal("size isnt correct") + } + + // seek back to beginning + ns, err := fi.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatal(err) + } + + if ns != 0 { + t.Fatal("didnt seek to beginning") + } + + // read back bytes we wrote + buf := make([]byte, len(b)) + n, err = fi.Read(buf) + if err != nil { + t.Fatal(err) + } + + if n != len(buf) { + t.Fatal("didnt read enough") + } + + if !bytes.Equal(buf, b) { + t.Fatal("data read was different than data written") + } + + // truncate file to ten bytes + err = fi.Truncate(10) + if err != nil { + t.Fatal(err) + } + + size, err = fi.Size() + if err != nil { + t.Fatal(err) + } + + if size != 10 { + t.Fatal("size was incorrect: ", size) + } + + // 'writeAt' to extend it + data := []byte("this is a test foo foo foo") + nwa, err := fi.WriteAt(data, 5) + if err != nil { + t.Fatal(err) + } + + if nwa != len(data) { + t.Fatal(err) + } + + // assert size once more + size, err = fi.Size() + if err != nil { + t.Fatal(err) + } + + if size != int64(5+len(data)) { + t.Fatal("size was incorrect") + } + + // make sure we can get node. TODO: verify it later + _, err = fi.GetNode() + if err != nil { + t.Fatal(err) + } + + // close it out! + err = fi.Close() + if err != nil { + t.Fatal(err) + } +} diff --git a/mfs/ops.go b/mfs/ops.go new file mode 100644 index 00000000000..33514fc67a1 --- /dev/null +++ b/mfs/ops.go @@ -0,0 +1,182 @@ +package mfs + +import ( + "errors" + "fmt" + "os" + gopath "path" + "strings" + + dag "github.com/ipfs/go-ipfs/merkledag" +) + +// Mv moves the file or directory at 'src' to 'dst' +func Mv(r *Root, src, dst string) error { + srcDir, srcFname := gopath.Split(src) + + var dstDirStr string + var filename string + if dst[len(dst)-1] == '/' { + dstDirStr = dst + filename = srcFname + } else { + dstDirStr, filename = gopath.Split(dst) + } + + // get parent directories of both src and dest first + dstDir, err := lookupDir(r, dstDirStr) + if err != nil { + return err + } + + srcDirObj, err := lookupDir(r, srcDir) + if err != nil { + return err + } + + srcObj, err := srcDirObj.Child(srcFname) + if err != nil { + return err + } + + nd, err := srcObj.GetNode() + if err != nil { + return err + } + + fsn, err := dstDir.Child(filename) + if err == nil { + switch n := fsn.(type) { + case *File: + _ = dstDir.Unlink(filename) + case *Directory: + dstDir = n + default: + return fmt.Errorf("unexpected type at path: %s", dst) + } + } else if err != os.ErrNotExist { + return err + } + + err = dstDir.AddChild(filename, nd) + if err != nil { + return err + } + + err = srcDirObj.Unlink(srcFname) + if err != nil { + return err + } + + return nil +} + +func lookupDir(r *Root, path string) (*Directory, error) { + di, err := Lookup(r, path) + if err != nil { + return nil, err + } + + d, ok := di.(*Directory) + if !ok { + return nil, fmt.Errorf("%s is not a directory", path) + } + + return d, nil +} + +// PutNode inserts 'nd' at 'path' in the given mfs +func PutNode(r *Root, path string, nd *dag.Node) error { + dirp, filename := gopath.Split(path) + + pdir, err := lookupDir(r, dirp) + if err != nil { + return err + } + + return pdir.AddChild(filename, nd) +} + +// Mkdir creates a directory at 'path' under the directory 'd', creating +// intermediary directories as needed if 'parents' is set to true +func Mkdir(r *Root, path string, parents bool) error { + parts := strings.Split(path, "/") + if parts[0] == "" { + parts = parts[1:] + } + + // allow 'mkdir /a/b/c/' to create c + if parts[len(parts)-1] == "" { + parts = parts[:len(parts)-1] + } + + if len(parts) == 0 { + // this will only happen on 'mkdir /' + return fmt.Errorf("cannot mkdir '%s'", path) + } + + cur := r.GetValue().(*Directory) + for i, d := range parts[:len(parts)-1] { + fsn, err := cur.Child(d) + if err == os.ErrNotExist && parents { + mkd, err := cur.Mkdir(d) + if err != nil { + return err + } + fsn = mkd + } else if err != nil { + return err + } + + next, ok := fsn.(*Directory) + if !ok { + return fmt.Errorf("%s was not a directory", strings.Join(parts[:i], "/")) + } + cur = next + } + + _, err := cur.Mkdir(parts[len(parts)-1]) + if err != nil { + if !parents || err != os.ErrExist { + return err + } + } + + return nil +} + +func Lookup(r *Root, path string) (FSNode, error) { + dir, ok := r.GetValue().(*Directory) + if !ok { + return nil, errors.New("root was not a directory") + } + + return DirLookup(dir, path) +} + +// DirLookup will look up a file or directory at the given path +// under the directory 'd' +func DirLookup(d *Directory, path string) (FSNode, error) { + path = strings.Trim(path, "/") + parts := strings.Split(path, "/") + if len(parts) == 1 && parts[0] == "" { + return d, nil + } + + var cur FSNode + cur = d + for i, p := range parts { + chdir, ok := cur.(*Directory) + if !ok { + return nil, fmt.Errorf("cannot access %s: Not a directory", strings.Join(parts[:i+1], "/")) + } + + child, err := chdir.Child(p) + if err != nil { + return nil, err + } + + cur = child + } + return cur, nil +} diff --git a/mfs/repub_test.go b/mfs/repub_test.go new file mode 100644 index 00000000000..36db90e8051 --- /dev/null +++ b/mfs/repub_test.go @@ -0,0 +1,78 @@ +package mfs + +import ( + "testing" + "time" + + key "github.com/ipfs/go-ipfs/blocks/key" + ci "github.com/ipfs/go-ipfs/util/testutil/ci" + + "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" +) + +func TestRepublisher(t *testing.T) { + if ci.IsRunning() { + t.Skip("dont run timing tests in CI") + } + + ctx := context.TODO() + + pub := make(chan struct{}) + + pf := func(ctx context.Context, k key.Key) error { + pub <- struct{}{} + return nil + } + + tshort := time.Millisecond * 50 + tlong := time.Second / 2 + + rp := NewRepublisher(ctx, pf, tshort, tlong) + go rp.Run() + + rp.Update("test") + + // should hit short timeout + select { + case <-time.After(tshort * 2): + t.Fatal("publish didnt happen in time") + case <-pub: + } + + cctx, cancel := context.WithCancel(context.Background()) + + go func() { + for { + rp.Update("a") + time.Sleep(time.Millisecond * 10) + select { + case <-cctx.Done(): + return + default: + } + } + }() + + select { + case <-pub: + t.Fatal("shouldnt have received publish yet!") + case <-time.After((tlong * 9) / 10): + } + select { + case <-pub: + case <-time.After(tlong / 2): + t.Fatal("waited too long for pub!") + } + + cancel() + + go func() { + err := rp.Close() + if err != nil { + t.Fatal(err) + } + }() + + // final pub from closing + <-pub +} diff --git a/mfs/system.go b/mfs/system.go new file mode 100644 index 00000000000..22ef63cd4a2 --- /dev/null +++ b/mfs/system.go @@ -0,0 +1,236 @@ +// package mfs implements an in memory model of a mutable ipfs filesystem. +// +// It consists of four main structs: +// 1) The Filesystem +// The filesystem serves as a container and entry point for various mfs filesystems +// 2) Root +// Root represents an individual filesystem mounted within the mfs system as a whole +// 3) Directories +// 4) Files +package mfs + +import ( + "errors" + "sync" + "time" + + key "github.com/ipfs/go-ipfs/blocks/key" + dag "github.com/ipfs/go-ipfs/merkledag" + ft "github.com/ipfs/go-ipfs/unixfs" + + context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + logging "github.com/ipfs/go-ipfs/vendor/QmQg1J6vikuXF9oDvm4wpdeAUvvkVEKW1EYDw9HhTMnP2b/go-log" +) + +var ErrNotExist = errors.New("no such rootfs") + +var log = logging.Logger("mfs") + +var ErrIsDirectory = errors.New("error: is a directory") + +type childCloser interface { + closeChild(string, *dag.Node) error +} + +type NodeType int + +const ( + TFile NodeType = iota + TDir +) + +// FSNode represents any node (directory, root, or file) in the mfs filesystem +type FSNode interface { + GetNode() (*dag.Node, error) + Type() NodeType + Lock() + Unlock() +} + +// Root represents the root of a filesystem tree +type Root struct { + // node is the merkledag root + node *dag.Node + + // val represents the node. It can either be a File or a Directory + val FSNode + + repub *Republisher + + dserv dag.DAGService + + Type string +} + +type PubFunc func(context.Context, key.Key) error + +// newRoot creates a new Root and starts up a republisher routine for it +func NewRoot(parent context.Context, ds dag.DAGService, node *dag.Node, pf PubFunc) (*Root, error) { + ndk, err := node.Key() + if err != nil { + return nil, err + } + + root := &Root{ + node: node, + repub: NewRepublisher(parent, pf, time.Millisecond*300, time.Second*3), + dserv: ds, + } + + root.repub.setVal(ndk) + go root.repub.Run() + + pbn, err := ft.FromBytes(node.Data) + if err != nil { + log.Error("IPNS pointer was not unixfs node") + return nil, err + } + + switch pbn.GetType() { + case ft.TDirectory: + root.val = NewDirectory(parent, ndk.String(), node, root, ds) + case ft.TFile, ft.TMetadata, ft.TRaw: + fi, err := NewFile(ndk.String(), node, root, ds) + if err != nil { + return nil, err + } + root.val = fi + default: + panic("unrecognized! (NYI)") + } + return root, nil +} + +func (kr *Root) GetValue() FSNode { + return kr.val +} + +// closeChild implements the childCloser interface, and signals to the publisher that +// there are changes ready to be published +func (kr *Root) closeChild(name string, nd *dag.Node) error { + k, err := kr.dserv.Add(nd) + if err != nil { + return err + } + + kr.repub.Update(k) + return nil +} + +func (kr *Root) Close() error { + return kr.repub.Close() +} + +// Republisher manages when to publish a given entry +type Republisher struct { + TimeoutLong time.Duration + TimeoutShort time.Duration + Publish chan struct{} + pubfunc PubFunc + pubnowch chan struct{} + + ctx context.Context + cancel func() + + lk sync.Mutex + val key.Key + lastpub key.Key +} + +func (rp *Republisher) getVal() key.Key { + rp.lk.Lock() + defer rp.lk.Unlock() + return rp.val +} + +// NewRepublisher creates a new Republisher object to republish the given root +// using the given short and long time intervals +func NewRepublisher(ctx context.Context, pf PubFunc, tshort, tlong time.Duration) *Republisher { + ctx, cancel := context.WithCancel(ctx) + return &Republisher{ + TimeoutShort: tshort, + TimeoutLong: tlong, + Publish: make(chan struct{}, 1), + pubfunc: pf, + pubnowch: make(chan struct{}), + ctx: ctx, + cancel: cancel, + } +} + +func (p *Republisher) setVal(k key.Key) { + p.lk.Lock() + defer p.lk.Unlock() + p.val = k +} + +func (p *Republisher) pubNow() { + select { + case p.pubnowch <- struct{}{}: + default: + } +} + +func (p *Republisher) Close() error { + err := p.publish(p.ctx) + p.cancel() + return err +} + +// Touch signals that an update has occurred since the last publish. +// Multiple consecutive touches may extend the time period before +// the next Publish occurs in order to more efficiently batch updates +func (np *Republisher) Update(k key.Key) { + np.setVal(k) + select { + case np.Publish <- struct{}{}: + default: + } +} + +// Run is the main republisher loop +func (np *Republisher) Run() { + for { + select { + case <-np.Publish: + quick := time.After(np.TimeoutShort) + longer := time.After(np.TimeoutLong) + + wait: + select { + case <-np.ctx.Done(): + return + case <-np.Publish: + quick = time.After(np.TimeoutShort) + goto wait + case <-quick: + case <-longer: + case <-np.pubnowch: + } + + err := np.publish(np.ctx) + if err != nil { + log.Error("republishRoot error: %s", err) + } + + case <-np.ctx.Done(): + return + } + } +} + +func (np *Republisher) publish(ctx context.Context) error { + np.lk.Lock() + topub := np.val + np.lk.Unlock() + + log.Info("Publishing Changes!") + err := np.pubfunc(ctx, topub) + if err != nil { + return err + } + np.lk.Lock() + np.lastpub = topub + np.lk.Unlock() + return nil +} diff --git a/p2p/host/basic/basic_host.go b/p2p/host/basic/basic_host.go index e5a294f6539..65987e7d803 100644 --- a/p2p/host/basic/basic_host.go +++ b/p2p/host/basic/basic_host.go @@ -15,6 +15,8 @@ import ( protocol "github.com/ipfs/go-ipfs/p2p/protocol" identify "github.com/ipfs/go-ipfs/p2p/protocol/identify" relay "github.com/ipfs/go-ipfs/p2p/protocol/relay" + + msmux "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/whyrusleeping/go-multistream" ) var log = logging.Logger("p2p/host/basic") @@ -39,7 +41,7 @@ const ( // * uses a nat service to establish NAT port mappings type BasicHost struct { network inet.Network - mux *protocol.Mux + mux *msmux.MultistreamMuxer ids *identify.IDService relay *relay.RelayService natmgr *natManager @@ -53,7 +55,7 @@ type BasicHost struct { func New(net inet.Network, opts ...interface{}) *BasicHost { h := &BasicHost{ network: net, - mux: protocol.NewMux(), + mux: msmux.NewMultistreamMuxer(), bwc: metrics.NewBandwidthCounter(), } @@ -67,7 +69,12 @@ func New(net inet.Network, opts ...interface{}) *BasicHost { // setup host services h.ids = identify.NewIDService(h) - h.relay = relay.NewRelayService(h, h.Mux().HandleSync) + + muxh := h.Mux().Handle + handle := func(s inet.Stream) { + muxh(s) + } + h.relay = relay.NewRelayService(h, handle) for _, o := range opts { switch o := o.(type) { @@ -95,7 +102,7 @@ func (h *BasicHost) newConnHandler(c inet.Conn) { // newStreamHandler is the remote-opened stream handler for inet.Network // TODO: this feels a bit wonky func (h *BasicHost) newStreamHandler(s inet.Stream) { - protoID, handle, err := h.Mux().ReadHeader(s) + protoID, handle, err := h.Mux().Negotiate(s) if err != nil { if err == io.EOF { log.Debugf("protocol EOF: %s", s.Conn().RemotePeer()) @@ -105,7 +112,7 @@ func (h *BasicHost) newStreamHandler(s inet.Stream) { return } - logStream := mstream.WrapStream(s, protoID, h.bwc) + logStream := mstream.WrapStream(s, protocol.ID(protoID), h.bwc) go handle(logStream) } @@ -126,7 +133,7 @@ func (h *BasicHost) Network() inet.Network { } // Mux returns the Mux multiplexing incoming streams to protocol handlers -func (h *BasicHost) Mux() *protocol.Mux { +func (h *BasicHost) Mux() *msmux.MultistreamMuxer { return h.mux } @@ -140,12 +147,15 @@ func (h *BasicHost) IDService() *identify.IDService { // host.Mux().SetHandler(proto, handler) // (Threadsafe) func (h *BasicHost) SetStreamHandler(pid protocol.ID, handler inet.StreamHandler) { - h.Mux().SetHandler(pid, handler) + h.Mux().AddHandler(string(pid), func(rwc io.ReadWriteCloser) error { + handler(rwc.(inet.Stream)) + return nil + }) } // RemoveStreamHandler returns .. func (h *BasicHost) RemoveStreamHandler(pid protocol.ID) { - h.Mux().RemoveHandler(pid) + h.Mux().RemoveHandler(string(pid)) } // NewStream opens a new stream to given peer p, and writes a p2p/protocol @@ -160,12 +170,11 @@ func (h *BasicHost) NewStream(pid protocol.ID, p peer.ID) (inet.Stream, error) { logStream := mstream.WrapStream(s, pid, h.bwc) - if err := protocol.WriteHeader(logStream, pid); err != nil { - logStream.Close() - return nil, err - } - - return logStream, nil + lzcon := msmux.NewMSSelect(logStream, string(pid)) + return &streamWrapper{ + Stream: logStream, + rw: lzcon, + }, nil } // Connect ensures there is a connection between this host and the peer with @@ -244,3 +253,16 @@ func (h *BasicHost) Close() error { func (h *BasicHost) GetBandwidthReporter() metrics.Reporter { return h.bwc } + +type streamWrapper struct { + inet.Stream + rw io.ReadWriter +} + +func (s *streamWrapper) Read(b []byte) (int, error) { + return s.rw.Read(b) +} + +func (s *streamWrapper) Write(b []byte) (int, error) { + return s.rw.Write(b) +} diff --git a/p2p/host/host.go b/p2p/host/host.go index 066b0094182..014aa0a1a3d 100644 --- a/p2p/host/host.go +++ b/p2p/host/host.go @@ -8,6 +8,8 @@ import ( peer "github.com/ipfs/go-ipfs/p2p/peer" protocol "github.com/ipfs/go-ipfs/p2p/protocol" logging "github.com/ipfs/go-ipfs/vendor/QmQg1J6vikuXF9oDvm4wpdeAUvvkVEKW1EYDw9HhTMnP2b/go-log" + + msmux "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/whyrusleeping/go-multistream" ) var log = logging.Logger("p2p/host") @@ -31,7 +33,7 @@ type Host interface { Network() inet.Network // Mux returns the Mux multiplexing incoming streams to protocol handlers - Mux() *protocol.Mux + Mux() *msmux.MultistreamMuxer // Connect ensures there is a connection between this host and the peer with // given peer.ID. Connect will absorb the addresses in pi into its internal diff --git a/p2p/host/routed/routed.go b/p2p/host/routed/routed.go index 28c93a205fc..5723f1b2eeb 100644 --- a/p2p/host/routed/routed.go +++ b/p2p/host/routed/routed.go @@ -15,6 +15,8 @@ import ( peer "github.com/ipfs/go-ipfs/p2p/peer" protocol "github.com/ipfs/go-ipfs/p2p/protocol" routing "github.com/ipfs/go-ipfs/routing" + + msmux "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/whyrusleeping/go-multistream" ) var log = logging.Logger("p2p/host/routed") @@ -97,7 +99,7 @@ func (rh *RoutedHost) Network() inet.Network { return rh.host.Network() } -func (rh *RoutedHost) Mux() *protocol.Mux { +func (rh *RoutedHost) Mux() *msmux.MultistreamMuxer { return rh.host.Mux() } diff --git a/p2p/net/conn/conn.go b/p2p/net/conn/conn.go index e7909caddde..c195b93a20b 100644 --- a/p2p/net/conn/conn.go +++ b/p2p/net/conn/conn.go @@ -6,7 +6,6 @@ import ( "net" "time" - msgio "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-msgio" mpool "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-msgio/mpool" ma "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" manet "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr-net" @@ -32,7 +31,6 @@ type singleConn struct { local peer.ID remote peer.ID maconn manet.Conn - msgrw msgio.ReadWriteCloser event io.Closer } @@ -44,7 +42,6 @@ func newSingleConn(ctx context.Context, local, remote peer.ID, maconn manet.Conn local: local, remote: remote, maconn: maconn, - msgrw: msgio.NewReadWriter(maconn), event: log.EventBegin(ctx, "connLifetime", ml), } @@ -62,7 +59,7 @@ func (c *singleConn) Close() error { }() // close underlying connection - return c.msgrw.Close() + return c.maconn.Close() } // ID is an identifier unique to this connection. @@ -123,31 +120,12 @@ func (c *singleConn) RemotePeer() peer.ID { // Read reads data, net.Conn style func (c *singleConn) Read(buf []byte) (int, error) { - return c.msgrw.Read(buf) + return c.maconn.Read(buf) } // Write writes data, net.Conn style func (c *singleConn) Write(buf []byte) (int, error) { - return c.msgrw.Write(buf) -} - -func (c *singleConn) NextMsgLen() (int, error) { - return c.msgrw.NextMsgLen() -} - -// ReadMsg reads data, net.Conn style -func (c *singleConn) ReadMsg() ([]byte, error) { - return c.msgrw.ReadMsg() -} - -// WriteMsg writes data, net.Conn style -func (c *singleConn) WriteMsg(buf []byte) error { - return c.msgrw.WriteMsg(buf) -} - -// ReleaseMsg releases a buffer -func (c *singleConn) ReleaseMsg(m []byte) { - c.msgrw.ReleaseMsg(m) + return c.maconn.Write(buf) } // ID returns the ID of a given Conn. diff --git a/p2p/net/conn/conn_test.go b/p2p/net/conn/conn_test.go index 03e09d86984..25b23072b1b 100644 --- a/p2p/net/conn/conn_test.go +++ b/p2p/net/conn/conn_test.go @@ -8,17 +8,25 @@ import ( "testing" "time" + msgio "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-msgio" context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" travis "github.com/ipfs/go-ipfs/util/testutil/ci/travis" ) +func msgioWrap(c Conn) msgio.ReadWriter { + return msgio.NewReadWriter(c) +} + func testOneSendRecv(t *testing.T, c1, c2 Conn) { + mc1 := msgioWrap(c1) + mc2 := msgioWrap(c2) + log.Debugf("testOneSendRecv from %s to %s", c1.LocalPeer(), c2.LocalPeer()) m1 := []byte("hello") - if err := c1.WriteMsg(m1); err != nil { + if err := mc1.WriteMsg(m1); err != nil { t.Fatal(err) } - m2, err := c2.ReadMsg() + m2, err := mc2.ReadMsg() if err != nil { t.Fatal(err) } @@ -28,11 +36,14 @@ func testOneSendRecv(t *testing.T, c1, c2 Conn) { } func testNotOneSendRecv(t *testing.T, c1, c2 Conn) { + mc1 := msgioWrap(c1) + mc2 := msgioWrap(c2) + m1 := []byte("hello") - if err := c1.WriteMsg(m1); err == nil { + if err := mc1.WriteMsg(m1); err == nil { t.Fatal("write should have failed", err) } - _, err := c2.ReadMsg() + _, err := mc2.ReadMsg() if err == nil { t.Fatal("read should have failed", err) } @@ -72,10 +83,13 @@ func TestCloseLeak(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) c1, c2, _, _ := setupSingleConn(t, ctx) + mc1 := msgioWrap(c1) + mc2 := msgioWrap(c2) + for i := 0; i < num; i++ { b1 := []byte(fmt.Sprintf("beep%d", i)) - c1.WriteMsg(b1) - b2, err := c2.ReadMsg() + mc1.WriteMsg(b1) + b2, err := mc2.ReadMsg() if err != nil { panic(err) } @@ -84,8 +98,8 @@ func TestCloseLeak(t *testing.T) { } b2 = []byte(fmt.Sprintf("boop%d", i)) - c2.WriteMsg(b2) - b1, err = c1.ReadMsg() + mc2.WriteMsg(b2) + b1, err = mc1.ReadMsg() if err != nil { panic(err) } diff --git a/p2p/net/conn/dial_test.go b/p2p/net/conn/dial_test.go index 78c9d1d12b2..164a8dbd7c6 100644 --- a/p2p/net/conn/dial_test.go +++ b/p2p/net/conn/dial_test.go @@ -187,10 +187,10 @@ func testDialer(t *testing.T, secure bool) { } // fmt.Println("sending") - c.WriteMsg([]byte("beep")) - c.WriteMsg([]byte("boop")) - - out, err := c.ReadMsg() + mc := msgioWrap(c) + mc.WriteMsg([]byte("beep")) + mc.WriteMsg([]byte("boop")) + out, err := mc.ReadMsg() if err != nil { t.Fatal(err) } @@ -201,7 +201,7 @@ func testDialer(t *testing.T, secure bool) { t.Error("unexpected conn output", data) } - out, err = c.ReadMsg() + out, err = mc.ReadMsg() if err != nil { t.Fatal(err) } diff --git a/p2p/net/conn/interface.go b/p2p/net/conn/interface.go index bbd13bdf775..b5fda20ac0e 100644 --- a/p2p/net/conn/interface.go +++ b/p2p/net/conn/interface.go @@ -11,7 +11,6 @@ import ( transport "github.com/ipfs/go-ipfs/p2p/net/transport" peer "github.com/ipfs/go-ipfs/p2p/peer" - msgio "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-msgio" ma "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" ) @@ -46,8 +45,8 @@ type Conn interface { SetReadDeadline(t time.Time) error SetWriteDeadline(t time.Time) error - msgio.Reader - msgio.Writer + io.Reader + io.Writer } // Dialer is an object that can open connections. We could have a "convenience" diff --git a/p2p/net/conn/secure_conn.go b/p2p/net/conn/secure_conn.go index f5ac698e62f..4e786c4b271 100644 --- a/p2p/net/conn/secure_conn.go +++ b/p2p/net/conn/secure_conn.go @@ -119,20 +119,6 @@ func (c *secureConn) Write(buf []byte) (int, error) { return c.secure.ReadWriter().Write(buf) } -func (c *secureConn) NextMsgLen() (int, error) { - return c.secure.ReadWriter().NextMsgLen() -} - -// ReadMsg reads data, net.Conn style -func (c *secureConn) ReadMsg() ([]byte, error) { - return c.secure.ReadWriter().ReadMsg() -} - -// WriteMsg writes data, net.Conn style -func (c *secureConn) WriteMsg(buf []byte) error { - return c.secure.ReadWriter().WriteMsg(buf) -} - // ReleaseMsg releases a buffer func (c *secureConn) ReleaseMsg(m []byte) { c.secure.ReadWriter().ReleaseMsg(m) diff --git a/p2p/net/conn/secure_conn_test.go b/p2p/net/conn/secure_conn_test.go index f027b6a4c6d..9f5a53794ee 100644 --- a/p2p/net/conn/secure_conn_test.go +++ b/p2p/net/conn/secure_conn_test.go @@ -145,13 +145,16 @@ func TestSecureCloseLeak(t *testing.T) { } runPair := func(c1, c2 Conn, num int) { + mc1 := msgioWrap(c1) + mc2 := msgioWrap(c2) + log.Debugf("runPair %d", num) for i := 0; i < num; i++ { log.Debugf("runPair iteration %d", i) b1 := []byte("beep") - c1.WriteMsg(b1) - b2, err := c2.ReadMsg() + mc1.WriteMsg(b1) + b2, err := mc2.ReadMsg() if err != nil { panic(err) } @@ -160,8 +163,8 @@ func TestSecureCloseLeak(t *testing.T) { } b2 = []byte("beep") - c2.WriteMsg(b2) - b1, err = c1.ReadMsg() + mc2.WriteMsg(b2) + b1, err = mc1.ReadMsg() if err != nil { panic(err) } diff --git a/p2p/net/filter/filter.go b/p2p/net/filter/filter.go index 21127d3f709..20b62ce1227 100644 --- a/p2p/net/filter/filter.go +++ b/p2p/net/filter/filter.go @@ -3,12 +3,14 @@ package filter import ( "net" "strings" + "sync" ma "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" manet "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr-net" ) type Filters struct { + mu sync.RWMutex filters map[string]*net.IPNet } @@ -19,6 +21,8 @@ func NewFilters() *Filters { } func (fs *Filters) AddDialFilter(f *net.IPNet) { + fs.mu.Lock() + defer fs.mu.Unlock() fs.filters[f.String()] = f } @@ -31,6 +35,8 @@ func (f *Filters) AddrBlocked(a ma.Multiaddr) bool { ipstr := strings.Split(addr, ":")[0] ip := net.ParseIP(ipstr) + f.mu.RLock() + defer f.mu.RUnlock() for _, ft := range f.filters { if ft.Contains(ip) { return true @@ -41,6 +47,8 @@ func (f *Filters) AddrBlocked(a ma.Multiaddr) bool { func (f *Filters) Filters() []*net.IPNet { var out []*net.IPNet + f.mu.RLock() + defer f.mu.RUnlock() for _, ff := range f.filters { out = append(out, ff) } @@ -48,5 +56,7 @@ func (f *Filters) Filters() []*net.IPNet { } func (f *Filters) Remove(ff *net.IPNet) { + f.mu.Lock() + defer f.mu.Unlock() delete(f.filters, ff.String()) } diff --git a/p2p/net/swarm/swarm.go b/p2p/net/swarm/swarm.go index dabcf5368e9..0c6271fc10e 100644 --- a/p2p/net/swarm/swarm.go +++ b/p2p/net/swarm/swarm.go @@ -20,7 +20,7 @@ import ( ma "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" ps "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-peerstream" pst "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-stream-muxer" - psy "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-stream-muxer/yamux" + psmss "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-stream-muxer/multistream" "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess" goprocessctx "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess/context" prom "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus" @@ -40,9 +40,7 @@ var peersTotal = prom.NewGaugeVec(prom.GaugeOpts{ }, []string{"peer_id"}) func init() { - tpt := *psy.DefaultTransport - tpt.MaxStreamWindowSize = 512 * 1024 - PSTransport = &tpt + PSTransport = psmss.NewTransport() } // Swarm is a connection muxer, allowing connections to other peers to diff --git a/p2p/net/swarm/swarm_test.go b/p2p/net/swarm/swarm_test.go index 9193db0109a..cc458c4cae9 100644 --- a/p2p/net/swarm/swarm_test.go +++ b/p2p/net/swarm/swarm_test.go @@ -237,6 +237,15 @@ func TestSwarm(t *testing.T) { SubtestSwarm(t, swarms, msgs) } +func TestBasicSwarm(t *testing.T) { + // t.Skip("skipping for another test") + t.Parallel() + + msgs := 1 + swarms := 2 + SubtestSwarm(t, swarms, msgs) +} + func TestConnHandler(t *testing.T) { // t.Skip("skipping for another test") t.Parallel() diff --git a/p2p/protocol/identify/id.go b/p2p/protocol/identify/id.go index ac8b44764d2..a8408b61de5 100644 --- a/p2p/protocol/identify/id.go +++ b/p2p/protocol/identify/id.go @@ -7,13 +7,13 @@ import ( semver "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/coreos/go-semver/semver" ggio "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/gogo/protobuf/io" ma "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" + msmux "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/whyrusleeping/go-multistream" context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" mstream "github.com/ipfs/go-ipfs/metrics/stream" host "github.com/ipfs/go-ipfs/p2p/host" inet "github.com/ipfs/go-ipfs/p2p/net" peer "github.com/ipfs/go-ipfs/p2p/peer" - protocol "github.com/ipfs/go-ipfs/p2p/protocol" pb "github.com/ipfs/go-ipfs/p2p/protocol/identify/pb" config "github.com/ipfs/go-ipfs/repo/config" lgbl "github.com/ipfs/go-ipfs/util/eventlog/loggables" @@ -23,7 +23,7 @@ import ( var log = logging.Logger("net/identify") // ID is the protocol.ID of the Identify Service. -const ID protocol.ID = "/ipfs/identify" +const ID = "/ipfs/identify" // IpfsVersion holds the current protocol version for a client running this code // TODO(jbenet): fix the versioning mess. @@ -87,14 +87,14 @@ func (ids *IDService) IdentifyConn(c inet.Conn) { s = mstream.WrapStream(s, ID, bwc) // ok give the response to our handler. - if err := protocol.WriteHeader(s, ID); err != nil { + if err := msmux.SelectProtoOrFail(ID, s); err != nil { log.Debugf("error writing stream header for %s", ID) log.Event(context.TODO(), "IdentifyOpenFailed", c.RemotePeer()) s.Close() - c.Close() return + } else { + ids.ResponseHandler(s) } - ids.ResponseHandler(s) } ids.currmu.Lock() diff --git a/p2p/protocol/mux.go b/p2p/protocol/mux.go deleted file mode 100644 index 75286b72134..00000000000 --- a/p2p/protocol/mux.go +++ /dev/null @@ -1,142 +0,0 @@ -package protocol - -import ( - "fmt" - "io" - "sync" - - context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" - inet "github.com/ipfs/go-ipfs/p2p/net" - lgbl "github.com/ipfs/go-ipfs/util/eventlog/loggables" - logging "github.com/ipfs/go-ipfs/vendor/QmQg1J6vikuXF9oDvm4wpdeAUvvkVEKW1EYDw9HhTMnP2b/go-log" -) - -var log = logging.Logger("net/mux") - -type streamHandlerMap map[ID]inet.StreamHandler - -// Mux provides simple stream multixplexing. -// It helps you precisely when: -// * You have many streams -// * You have function handlers -// -// It contains the handlers for each protocol accepted. -// It dispatches handlers for streams opened by remote peers. -type Mux struct { - lock sync.RWMutex - handlers streamHandlerMap - defaultHandler inet.StreamHandler -} - -func NewMux() *Mux { - return &Mux{ - handlers: streamHandlerMap{}, - } -} - -// Protocols returns the list of protocols this muxer has handlers for -func (m *Mux) Protocols() []ID { - m.lock.RLock() - l := make([]ID, 0, len(m.handlers)) - for p := range m.handlers { - l = append(l, p) - } - m.lock.RUnlock() - return l -} - -// ReadHeader reads the stream and returns the next Handler function -// according to the muxer encoding. -func (m *Mux) ReadHeader(s io.Reader) (ID, inet.StreamHandler, error) { - p, err := ReadHeader(s) - if err != nil { - return "", nil, err - } - - m.lock.RLock() - defer m.lock.RUnlock() - h, found := m.handlers[p] - - switch { - case !found && m.defaultHandler != nil: - return p, m.defaultHandler, nil - case !found && m.defaultHandler == nil: - return p, nil, fmt.Errorf("%s no handler with name: %s (%d)", m, p, len(p)) - default: - return p, h, nil - } -} - -// String returns the muxer's printing representation -func (m *Mux) String() string { - m.lock.RLock() - defer m.lock.RUnlock() - return fmt.Sprintf("", m, len(m.handlers)) -} - -func (m *Mux) SetDefaultHandler(h inet.StreamHandler) { - m.lock.Lock() - m.defaultHandler = h - m.lock.Unlock() -} - -// SetHandler sets the protocol handler on the Network's Muxer. -// This operation is threadsafe. -func (m *Mux) SetHandler(p ID, h inet.StreamHandler) { - log.Debugf("%s setting handler for protocol: %s (%d)", m, p, len(p)) - m.lock.Lock() - m.handlers[p] = h - m.lock.Unlock() -} - -// RemoveHandler removes the protocol handler on the Network's Muxer. -// This operation is threadsafe. -func (m *Mux) RemoveHandler(p ID) { - log.Debugf("%s removing handler for protocol: %s (%d)", m, p, len(p)) - m.lock.Lock() - delete(m.handlers, p) - m.lock.Unlock() -} - -// Handle reads the next name off the Stream, and calls a handler function -// This is done in its own goroutine, to avoid blocking the caller. -func (m *Mux) Handle(s inet.Stream) { - go m.HandleSync(s) -} - -// HandleSync reads the next name off the Stream, and calls a handler function -// This is done synchronously. The handler function will return before -// HandleSync returns. -func (m *Mux) HandleSync(s inet.Stream) { - ctx := context.Background() - - name, handler, err := m.ReadHeader(s) - if err != nil { - err = fmt.Errorf("protocol mux error: %s", err) - log.Event(ctx, "muxError", lgbl.Error(err)) - s.Close() - return - } - - log.Debugf("muxer handle protocol %s: %s", s.Conn().RemotePeer(), name) - handler(s) -} - -// ReadLengthPrefix reads the name from Reader with a length-byte-prefix. -func ReadLengthPrefix(r io.Reader) (string, error) { - // c-string identifier - // the first byte is our length - l := make([]byte, 1) - if _, err := io.ReadFull(r, l); err != nil { - return "", err - } - length := int(l[0]) - - // the next are our identifier - name := make([]byte, length) - if _, err := io.ReadFull(r, name); err != nil { - return "", err - } - - return string(name), nil -} diff --git a/p2p/protocol/mux_test.go b/p2p/protocol/mux_test.go deleted file mode 100644 index 9e3b2455268..00000000000 --- a/p2p/protocol/mux_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package protocol - -import ( - "bytes" - "testing" - - inet "github.com/ipfs/go-ipfs/p2p/net" -) - -var testCases = map[string]string{ - "/bitswap": "\u0009/bitswap\n", - "/dht": "\u0005/dht\n", - "/ipfs": "\u0006/ipfs\n", - "/ipfs/dksnafkasnfkdajfkdajfdsjadosiaaodj": ")/ipfs/dksnafkasnfkdajfkdajfdsjadosiaaodj\n", -} - -func TestWrite(t *testing.T) { - for k, v := range testCases { - buf := new(bytes.Buffer) - if err := WriteHeader(buf, ID(k)); err != nil { - t.Fatal(err) - } - - v2 := buf.Bytes() - if !bytes.Equal(v2, []byte(v)) { - t.Errorf("failed: %s - %v != %v", k, []byte(v), v2) - } - } -} - -func TestHandler(t *testing.T) { - - outs := make(chan string, 10) - - h := func(n string) func(s inet.Stream) { - return func(s inet.Stream) { - outs <- n - } - } - - m := NewMux() - m.SetDefaultHandler(h("default")) - m.SetHandler("/dht", h("bitswap")) - // m.Handlers["/ipfs"] = h("bitswap") // default! - m.SetHandler("/bitswap", h("bitswap")) - m.SetHandler("/ipfs/dksnafkasnfkdajfkdajfdsjadosiaaodj", h("bitswap")) - - for k, v := range testCases { - buf := new(bytes.Buffer) - if _, err := buf.Write([]byte(v)); err != nil { - t.Error(err) - continue - } - - name, err := ReadHeader(buf) - if err != nil { - t.Error(err) - continue - } - - if name != ID(k) { - t.Errorf("name mismatch: %s != %s", k, name) - continue - } - } - -} diff --git a/p2p/protocol/protocol.go b/p2p/protocol/protocol.go index e67bb3e56b2..f7e4a32baf0 100644 --- a/p2p/protocol/protocol.go +++ b/p2p/protocol/protocol.go @@ -1,11 +1,5 @@ package protocol -import ( - "io" - - msgio "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-msgio" -) - // ID is an identifier used to write protocol headers in streams. type ID string @@ -13,28 +7,3 @@ type ID string const ( TestingID ID = "/p2p/_testing" ) - -// WriteHeader writes a protocol.ID header to an io.Writer. This is so -// multiple protocols can be multiplexed on top of the same transport. -// -// We use go-msgio varint encoding: -// \n -// (the varint includes the \n) -func WriteHeader(w io.Writer, id ID) error { - vw := msgio.NewVarintWriter(w) - s := string(id) + "\n" // add \n - return vw.WriteMsg([]byte(s)) -} - -// ReadHeader reads a protocol.ID header from an io.Reader. This is so -// multiple protocols can be multiplexed on top of the same transport. -// See WriteHeader. -func ReadHeader(r io.Reader) (ID, error) { - vr := msgio.NewVarintReader(r) - msg, err := vr.ReadMsg() - if err != nil { - return ID(""), err - } - msg = msg[:len(msg)-1] // remove \n - return ID(msg), nil -} diff --git a/p2p/protocol/relay/relay_test.go b/p2p/protocol/relay/relay_test.go index aecdfadd397..671f6dddad2 100644 --- a/p2p/protocol/relay/relay_test.go +++ b/p2p/protocol/relay/relay_test.go @@ -10,6 +10,7 @@ import ( testutil "github.com/ipfs/go-ipfs/p2p/test/util" logging "github.com/ipfs/go-ipfs/vendor/QmQg1J6vikuXF9oDvm4wpdeAUvvkVEKW1EYDw9HhTMnP2b/go-log" + msmux "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/whyrusleeping/go-multistream" context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" ) @@ -62,7 +63,7 @@ func TestRelaySimple(t *testing.T) { // ok now the header's there, we can write the next protocol header. log.Debug("write testing header") - if err := protocol.WriteHeader(s, protocol.TestingID); err != nil { + if err := msmux.SelectProtoOrFail(string(protocol.TestingID), s); err != nil { t.Fatal(err) } @@ -155,7 +156,7 @@ func TestRelayAcrossFour(t *testing.T) { } log.Debugf("write relay header n1->n4 (%s -> %s)", n1p, n4p) - if err := protocol.WriteHeader(s, relay.ID); err != nil { + if err := msmux.SelectProtoOrFail(string(relay.ID), s); err != nil { t.Fatal(err) } if err := relay.WriteHeader(s, n1p, n4p); err != nil { @@ -163,7 +164,7 @@ func TestRelayAcrossFour(t *testing.T) { } log.Debugf("write relay header n1->n5 (%s -> %s)", n1p, n5p) - if err := protocol.WriteHeader(s, relay.ID); err != nil { + if err := msmux.SelectProtoOrFail(string(relay.ID), s); err != nil { t.Fatal(err) } if err := relay.WriteHeader(s, n1p, n5p); err != nil { @@ -172,7 +173,7 @@ func TestRelayAcrossFour(t *testing.T) { // ok now the header's there, we can write the next protocol header. log.Debug("write testing header") - if err := protocol.WriteHeader(s, protocol.TestingID); err != nil { + if err := msmux.SelectProtoOrFail(string(protocol.TestingID), s); err != nil { t.Fatal(err) } @@ -257,7 +258,7 @@ func TestRelayStress(t *testing.T) { // ok now the header's there, we can write the next protocol header. log.Debug("write testing header") - if err := protocol.WriteHeader(s, protocol.TestingID); err != nil { + if err := msmux.SelectProtoOrFail(string(protocol.TestingID), s); err != nil { t.Fatal(err) } diff --git a/p2p/test/backpressure/backpressure_test.go b/p2p/test/backpressure/backpressure_test.go index bacdcec3d89..b13d772469b 100644 --- a/p2p/test/backpressure/backpressure_test.go +++ b/p2p/test/backpressure/backpressure_test.go @@ -299,6 +299,12 @@ func TestStBackpressureStreamWrite(t *testing.T) { } } + // trigger lazy connection handshaking + _, err = s.Read(nil) + if err != nil { + t.Fatal(err) + } + // 500ms rounds of lockstep write + drain roundsStart := time.Now() roundsTotal := 0 diff --git a/pin/gc/gc.go b/pin/gc/gc.go new file mode 100644 index 00000000000..ec61f816a44 --- /dev/null +++ b/pin/gc/gc.go @@ -0,0 +1,109 @@ +package gc + +import ( + bstore "github.com/ipfs/go-ipfs/blocks/blockstore" + key "github.com/ipfs/go-ipfs/blocks/key" + bserv "github.com/ipfs/go-ipfs/blockservice" + offline "github.com/ipfs/go-ipfs/exchange/offline" + dag "github.com/ipfs/go-ipfs/merkledag" + pin "github.com/ipfs/go-ipfs/pin" + + context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + logging "github.com/ipfs/go-ipfs/vendor/QmQg1J6vikuXF9oDvm4wpdeAUvvkVEKW1EYDw9HhTMnP2b/go-log" +) + +var log = logging.Logger("gc") + +// GC performs a mark and sweep garbage collection of the blocks in the blockstore +// first, it creates a 'marked' set and adds to it the following: +// - all recursively pinned blocks, plus all of their descendants (recursively) +// - all directly pinned blocks +// - all blocks utilized internally by the pinner +// +// The routine then iterates over every block in the blockstore and +// deletes any block that is not found in the marked set. +func GC(ctx context.Context, bs bstore.GCBlockstore, pn pin.Pinner) (<-chan key.Key, error) { + unlock := bs.GCLock() + defer unlock() + + bsrv := bserv.New(bs, offline.Exchange(bs)) + ds := dag.NewDAGService(bsrv) + + gcs, err := ColoredSet(pn, ds) + if err != nil { + return nil, err + } + + keychan, err := bs.AllKeysChan(ctx) + if err != nil { + return nil, err + } + + output := make(chan key.Key) + go func() { + defer close(output) + for { + select { + case k, ok := <-keychan: + if !ok { + return + } + if !gcs.Has(k) { + err := bs.DeleteBlock(k) + if err != nil { + log.Debugf("Error removing key from blockstore: %s", err) + return + } + select { + case output <- k: + case <-ctx.Done(): + return + } + } + case <-ctx.Done(): + return + } + } + }() + + return output, nil +} + +func Descendants(ds dag.DAGService, set key.KeySet, roots []key.Key) error { + for _, k := range roots { + set.Add(k) + nd, err := ds.Get(context.Background(), k) + if err != nil { + return err + } + + // EnumerateChildren recursively walks the dag and adds the keys to the given set + err = dag.EnumerateChildren(context.Background(), ds, nd, set) + if err != nil { + return err + } + } + + return nil +} + +func ColoredSet(pn pin.Pinner, ds dag.DAGService) (key.KeySet, error) { + // KeySet currently implemented in memory, in the future, may be bloom filter or + // disk backed to conserve memory. + gcs := key.NewKeySet() + err := Descendants(ds, gcs, pn.RecursiveKeys()) + if err != nil { + return nil, err + } + + for _, k := range pn.DirectKeys() { + gcs.Add(k) + } + + err = Descendants(ds, gcs, pn.InternalPins()) + if err != nil { + return nil, err + } + + return gcs, nil +} diff --git a/pin/indirect.go b/pin/indirect.go index dca99600fc8..22e3a1fb47c 100644 --- a/pin/indirect.go +++ b/pin/indirect.go @@ -1,78 +1,39 @@ package pin import ( - ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" key "github.com/ipfs/go-ipfs/blocks/key" - "github.com/ipfs/go-ipfs/blocks/set" ) type indirectPin struct { - blockset set.BlockSet - refCounts map[key.Key]int + refCounts map[key.Key]uint64 } -func NewIndirectPin(dstore ds.Datastore) *indirectPin { +func newIndirectPin() *indirectPin { return &indirectPin{ - blockset: set.NewDBWrapperSet(dstore, set.NewSimpleBlockSet()), - refCounts: make(map[key.Key]int), + refCounts: make(map[key.Key]uint64), } } -func loadIndirPin(d ds.Datastore, k ds.Key) (*indirectPin, error) { - var rcStore map[string]int - err := loadSet(d, k, &rcStore) - if err != nil { - return nil, err - } - - refcnt := make(map[key.Key]int) - var keys []key.Key - for encK, v := range rcStore { - if v > 0 { - k := key.B58KeyDecode(encK) - keys = append(keys, k) - refcnt[k] = v - } - } - // log.Debugf("indirPin keys: %#v", keys) - - return &indirectPin{blockset: set.SimpleSetFromKeys(keys), refCounts: refcnt}, nil -} - -func storeIndirPin(d ds.Datastore, k ds.Key, p *indirectPin) error { - - rcStore := map[string]int{} - for k, v := range p.refCounts { - rcStore[key.B58KeyEncode(k)] = v - } - return storeSet(d, k, rcStore) -} - func (i *indirectPin) Increment(k key.Key) { - c := i.refCounts[k] - i.refCounts[k] = c + 1 - if c <= 0 { - i.blockset.AddBlock(k) - } + i.refCounts[k]++ } func (i *indirectPin) Decrement(k key.Key) { - c := i.refCounts[k] - 1 - i.refCounts[k] = c - if c <= 0 { - i.blockset.RemoveBlock(k) + if i.refCounts[k] == 0 { + log.Warningf("pinning: bad call: asked to unpin nonexistent indirect key: %v", k) + return + } + i.refCounts[k]-- + if i.refCounts[k] == 0 { delete(i.refCounts, k) } } func (i *indirectPin) HasKey(k key.Key) bool { - return i.blockset.HasKey(k) -} - -func (i *indirectPin) Set() set.BlockSet { - return i.blockset + _, found := i.refCounts[k] + return found } -func (i *indirectPin) GetRefs() map[key.Key]int { +func (i *indirectPin) GetRefs() map[key.Key]uint64 { return i.refCounts } diff --git a/pin/internal/pb/doc.go b/pin/internal/pb/doc.go new file mode 100644 index 00000000000..1143a4d83f7 --- /dev/null +++ b/pin/internal/pb/doc.go @@ -0,0 +1,6 @@ +package pb + +//go:generate protoc --gogo_out=. header.proto + +// kludge to get vendoring right in protobuf output +//go:generate sed -i s,github.com/,github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/,g header.pb.go diff --git a/pin/internal/pb/header.pb.go b/pin/internal/pb/header.pb.go new file mode 100644 index 00000000000..eafb246e702 --- /dev/null +++ b/pin/internal/pb/header.pb.go @@ -0,0 +1,59 @@ +// Code generated by protoc-gen-gogo. +// source: header.proto +// DO NOT EDIT! + +/* +Package pb is a generated protocol buffer package. + +It is generated from these files: + header.proto + +It has these top-level messages: + Set +*/ +package pb + +import proto "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/gogo/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +type Set struct { + // 1 for now, library will refuse to handle entries with an unrecognized version. + Version *uint32 `protobuf:"varint,1,opt,name=version" json:"version,omitempty"` + // how many of the links are subtrees + Fanout *uint32 `protobuf:"varint,2,opt,name=fanout" json:"fanout,omitempty"` + // hash seed for subtree selection, a random number + Seed *uint32 `protobuf:"fixed32,3,opt,name=seed" json:"seed,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Set) Reset() { *m = Set{} } +func (m *Set) String() string { return proto.CompactTextString(m) } +func (*Set) ProtoMessage() {} + +func (m *Set) GetVersion() uint32 { + if m != nil && m.Version != nil { + return *m.Version + } + return 0 +} + +func (m *Set) GetFanout() uint32 { + if m != nil && m.Fanout != nil { + return *m.Fanout + } + return 0 +} + +func (m *Set) GetSeed() uint32 { + if m != nil && m.Seed != nil { + return *m.Seed + } + return 0 +} + +func init() { +} diff --git a/pin/internal/pb/header.proto b/pin/internal/pb/header.proto new file mode 100644 index 00000000000..36b32b36dd1 --- /dev/null +++ b/pin/internal/pb/header.proto @@ -0,0 +1,14 @@ +syntax = "proto2"; + +package ipfs.pin; + +option go_package = "pb"; + +message Set { + // 1 for now, library will refuse to handle entries with an unrecognized version. + optional uint32 version = 1; + // how many of the links are subtrees + optional uint32 fanout = 2; + // hash seed for subtree selection, a random number + optional fixed32 seed = 3; +} diff --git a/pin/pin.go b/pin/pin.go index 53d965e9b6f..41d97a14201 100644 --- a/pin/pin.go +++ b/pin/pin.go @@ -1,15 +1,13 @@ -// package pin implemnts structures and methods to keep track of +// package pin implements structures and methods to keep track of // which objects a user wants to keep stored locally. package pin import ( - "encoding/json" - "errors" "fmt" "sync" + "time" ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" - nsds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/namespace" context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" key "github.com/ipfs/go-ipfs/blocks/key" "github.com/ipfs/go-ipfs/blocks/set" @@ -18,37 +16,42 @@ import ( ) var log = logging.Logger("pin") -var recursePinDatastoreKey = ds.NewKey("/local/pins/recursive/keys") -var directPinDatastoreKey = ds.NewKey("/local/pins/direct/keys") -var indirectPinDatastoreKey = ds.NewKey("/local/pins/indirect/keys") + +var pinDatastoreKey = ds.NewKey("/local/pins") + +var emptyKey = key.B58KeyDecode("QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n") + +const ( + linkDirect = "direct" + linkRecursive = "recursive" +) type PinMode int const ( Recursive PinMode = iota Direct - Indirect NotPinned ) type Pinner interface { - IsPinned(key.Key) bool + IsPinned(key.Key) (string, bool, error) Pin(context.Context, *mdag.Node, bool) error Unpin(context.Context, key.Key, bool) error - Flush() error - GetManual() ManualPinner - DirectKeys() []key.Key - IndirectKeys() map[key.Key]int - RecursiveKeys() []key.Key -} -// ManualPinner is for manually editing the pin structure -// Use with care! If used improperly, garbage collection -// may not be successful -type ManualPinner interface { + // PinWithMode is for manually editing the pin structure. Use with + // care! If used improperly, garbage collection may not be + // successful. PinWithMode(key.Key, PinMode) + // RemovePinWithMode is for manually editing the pin structure. + // Use with care! If used improperly, garbage collection may not + // be successful. RemovePinWithMode(key.Key, PinMode) - Pinner + + Flush() error + DirectKeys() []key.Key + RecursiveKeys() []key.Key + InternalPins() []key.Key } // pinner implements the Pinner interface @@ -56,26 +59,25 @@ type pinner struct { lock sync.RWMutex recursePin set.BlockSet directPin set.BlockSet - indirPin *indirectPin - dserv mdag.DAGService - dstore ds.ThreadSafeDatastore + + // Track the keys used for storing the pinning state, so gc does + // not delete them. + internalPin map[key.Key]struct{} + dserv mdag.DAGService + dstore ds.Datastore } // NewPinner creates a new pinner using the given datastore as a backend -func NewPinner(dstore ds.ThreadSafeDatastore, serv mdag.DAGService) Pinner { +func NewPinner(dstore ds.Datastore, serv mdag.DAGService) Pinner { // Load set from given datastore... - rcds := nsds.Wrap(dstore, recursePinDatastoreKey) - rcset := set.NewDBWrapperSet(rcds, set.NewSimpleBlockSet()) + rcset := set.NewSimpleBlockSet() - dirds := nsds.Wrap(dstore, directPinDatastoreKey) - dirset := set.NewDBWrapperSet(dirds, set.NewSimpleBlockSet()) + dirset := set.NewSimpleBlockSet() - nsdstore := nsds.Wrap(dstore, indirectPinDatastoreKey) return &pinner{ recursePin: rcset, directPin: dirset, - indirPin: NewIndirectPin(nsdstore), dserv: serv, dstore: dstore, } @@ -99,7 +101,8 @@ func (p *pinner) Pin(ctx context.Context, node *mdag.Node, recurse bool) error { p.directPin.RemoveBlock(k) } - err := p.pinLinks(ctx, node) + // fetch entire graph + err := mdag.FetchGraph(ctx, node, p.dserv) if err != nil { return err } @@ -123,82 +126,70 @@ func (p *pinner) Pin(ctx context.Context, node *mdag.Node, recurse bool) error { func (p *pinner) Unpin(ctx context.Context, k key.Key, recursive bool) error { p.lock.Lock() defer p.lock.Unlock() - if p.recursePin.HasKey(k) { + reason, pinned, err := p.isPinned(k) + if err != nil { + return err + } + if !pinned { + return fmt.Errorf("%s is not pinned", k) + } + switch reason { + case "recursive": if recursive { p.recursePin.RemoveBlock(k) - node, err := p.dserv.Get(ctx, k) - if err != nil { - return err - } - - return p.unpinLinks(ctx, node) + return nil } else { return fmt.Errorf("%s is pinned recursively", k) } - } else if p.directPin.HasKey(k) { + case "direct": p.directPin.RemoveBlock(k) return nil - } else if p.indirPin.HasKey(k) { - return fmt.Errorf("%s is pinned indirectly. indirect pins cannot be removed directly", k) - } else { - return fmt.Errorf("%s is not pinned", k) + default: + return fmt.Errorf("%s is pinned indirectly under %s", k, reason) } } -func (p *pinner) unpinLinks(ctx context.Context, node *mdag.Node) error { - for _, l := range node.Links { - node, err := l.GetNode(ctx, p.dserv) - if err != nil { - return err - } - - k, err := node.Key() - if err != nil { - return err - } - - p.indirPin.Decrement(k) +func (p *pinner) isInternalPin(key key.Key) bool { + _, ok := p.internalPin[key] + return ok +} - err = p.unpinLinks(ctx, node) - if err != nil { - return err - } - } - return nil +// IsPinned returns whether or not the given key is pinned +// and an explanation of why its pinned +func (p *pinner) IsPinned(k key.Key) (string, bool, error) { + p.lock.RLock() + defer p.lock.RUnlock() + return p.isPinned(k) } -func (p *pinner) pinIndirectRecurse(ctx context.Context, node *mdag.Node) error { - k, err := node.Key() - if err != nil { - return err +// isPinned is the implementation of IsPinned that does not lock. +// intended for use by other pinned methods that already take locks +func (p *pinner) isPinned(k key.Key) (string, bool, error) { + if p.recursePin.HasKey(k) { + return "recursive", true, nil + } + if p.directPin.HasKey(k) { + return "direct", true, nil + } + if p.isInternalPin(k) { + return "internal", true, nil } - p.indirPin.Increment(k) - return p.pinLinks(ctx, node) -} - -func (p *pinner) pinLinks(ctx context.Context, node *mdag.Node) error { - for _, ng := range p.dserv.GetDAG(ctx, node) { - subnode, err := ng.Get(ctx) + for _, rk := range p.recursePin.GetKeys() { + rnd, err := p.dserv.Get(context.Background(), rk) if err != nil { - // TODO: Maybe just log and continue? - return err + return "", false, err } - err = p.pinIndirectRecurse(ctx, subnode) + + has, err := hasChild(p.dserv, rnd, k) if err != nil { - return err + return "", false, err + } + if has { + return rk.B58String(), true, nil } } - return nil -} - -// IsPinned returns whether or not the given key is pinned -func (p *pinner) IsPinned(key key.Key) bool { - p.lock.RLock() - defer p.lock.RUnlock() - return p.recursePin.HasKey(key) || - p.directPin.HasKey(key) || - p.indirPin.HasKey(key) + return "", false, nil } func (p *pinner) RemovePinWithMode(key key.Key, mode PinMode) { @@ -207,8 +198,6 @@ func (p *pinner) RemovePinWithMode(key key.Key, mode PinMode) { switch mode { case Direct: p.directPin.RemoveBlock(key) - case Indirect: - p.indirPin.Decrement(key) case Recursive: p.recursePin.RemoveBlock(key) default: @@ -218,32 +207,52 @@ func (p *pinner) RemovePinWithMode(key key.Key, mode PinMode) { } // LoadPinner loads a pinner and its keysets from the given datastore -func LoadPinner(d ds.ThreadSafeDatastore, dserv mdag.DAGService) (Pinner, error) { +func LoadPinner(d ds.Datastore, dserv mdag.DAGService) (Pinner, error) { p := new(pinner) + rootKeyI, err := d.Get(pinDatastoreKey) + if err != nil { + return nil, fmt.Errorf("cannot load pin state: %v", err) + } + rootKeyBytes, ok := rootKeyI.([]byte) + if !ok { + return nil, fmt.Errorf("cannot load pin state: %s was not bytes", pinDatastoreKey) + } + + rootKey := key.Key(rootKeyBytes) + + ctx, cancel := context.WithTimeout(context.TODO(), time.Second*5) + defer cancel() + + root, err := dserv.Get(ctx, rootKey) + if err != nil { + return nil, fmt.Errorf("cannot find pinning root object: %v", err) + } + + internalPin := map[key.Key]struct{}{ + rootKey: struct{}{}, + } + recordInternal := func(k key.Key) { + internalPin[k] = struct{}{} + } + { // load recursive set - var recurseKeys []key.Key - if err := loadSet(d, recursePinDatastoreKey, &recurseKeys); err != nil { - return nil, err + recurseKeys, err := loadSet(ctx, dserv, root, linkRecursive, recordInternal) + if err != nil { + return nil, fmt.Errorf("cannot load recursive pins: %v", err) } p.recursePin = set.SimpleSetFromKeys(recurseKeys) } { // load direct set - var directKeys []key.Key - if err := loadSet(d, directPinDatastoreKey, &directKeys); err != nil { - return nil, err + directKeys, err := loadSet(ctx, dserv, root, linkDirect, recordInternal) + if err != nil { + return nil, fmt.Errorf("cannot load direct pins: %v", err) } p.directPin = set.SimpleSetFromKeys(directKeys) } - { // load indirect set - var err error - p.indirPin, err = loadIndirPin(d, indirectPinDatastoreKey) - if err != nil { - return nil, err - } - } + p.internalPin = internalPin // assign services p.dserv = dserv @@ -257,11 +266,6 @@ func (p *pinner) DirectKeys() []key.Key { return p.directPin.GetKeys() } -// IndirectKeys returns a slice containing the indirectly pinned keys -func (p *pinner) IndirectKeys() map[key.Key]int { - return p.indirPin.GetRefs() -} - // RecursiveKeys returns a slice containing the recursively pinned keys func (p *pinner) RecursiveKeys() []key.Key { return p.recursePin.GetKeys() @@ -272,48 +276,65 @@ func (p *pinner) Flush() error { p.lock.Lock() defer p.lock.Unlock() - err := storeSet(p.dstore, directPinDatastoreKey, p.directPin.GetKeys()) - if err != nil { - return err + ctx := context.TODO() + + internalPin := make(map[key.Key]struct{}) + recordInternal := func(k key.Key) { + internalPin[k] = struct{}{} } - err = storeSet(p.dstore, recursePinDatastoreKey, p.recursePin.GetKeys()) - if err != nil { - return err + root := &mdag.Node{} + { + n, err := storeSet(ctx, p.dserv, p.directPin.GetKeys(), recordInternal) + if err != nil { + return err + } + if err := root.AddNodeLink(linkDirect, n); err != nil { + return err + } } - err = storeIndirPin(p.dstore, indirectPinDatastoreKey, p.indirPin) - if err != nil { - return err + { + n, err := storeSet(ctx, p.dserv, p.recursePin.GetKeys(), recordInternal) + if err != nil { + return err + } + if err := root.AddNodeLink(linkRecursive, n); err != nil { + return err + } } - return nil -} -// helpers to marshal / unmarshal a pin set -func storeSet(d ds.Datastore, k ds.Key, val interface{}) error { - buf, err := json.Marshal(val) + // add the empty node, its referenced by the pin sets but never created + _, err := p.dserv.Add(new(mdag.Node)) if err != nil { return err } - return d.Put(k, buf) -} - -func loadSet(d ds.Datastore, k ds.Key, val interface{}) error { - buf, err := d.Get(k) + k, err := p.dserv.Add(root) if err != nil { return err } - bf, ok := buf.([]byte) - if !ok { - return errors.New("invalid pin set value in datastore") + internalPin[k] = struct{}{} + if err := p.dstore.Put(pinDatastoreKey, []byte(k)); err != nil { + return fmt.Errorf("cannot store pin state: %v", err) } - return json.Unmarshal(bf, val) + p.internalPin = internalPin + return nil } -// PinWithMode is a method on ManualPinners, allowing the user to have fine -// grained control over pin counts +func (p *pinner) InternalPins() []key.Key { + p.lock.Lock() + defer p.lock.Unlock() + var out []key.Key + for k, _ := range p.internalPin { + out = append(out, k) + } + return out +} + +// PinWithMode allows the user to have fine grained control over pin +// counts func (p *pinner) PinWithMode(k key.Key, mode PinMode) { p.lock.Lock() defer p.lock.Unlock() @@ -322,11 +343,29 @@ func (p *pinner) PinWithMode(k key.Key, mode PinMode) { p.recursePin.AddBlock(k) case Direct: p.directPin.AddBlock(k) - case Indirect: - p.indirPin.Increment(k) } } -func (p *pinner) GetManual() ManualPinner { - return p +func hasChild(ds mdag.DAGService, root *mdag.Node, child key.Key) (bool, error) { + for _, lnk := range root.Links { + k := key.Key(lnk.Hash) + if k == child { + return true, nil + } + + nd, err := ds.Get(context.Background(), k) + if err != nil { + return false, err + } + + has, err := hasChild(ds, nd, child) + if err != nil { + return false, err + } + + if has { + return has, nil + } + } + return false, nil } diff --git a/pin/pin_test.go b/pin/pin_test.go index d3947254d55..818a414ab9e 100644 --- a/pin/pin_test.go +++ b/pin/pin_test.go @@ -24,6 +24,17 @@ func randNode() (*mdag.Node, key.Key) { return nd, k } +func assertPinned(t *testing.T, p Pinner, k key.Key, failmsg string) { + _, pinned, err := p.IsPinned(k) + if err != nil { + t.Fatal(err) + } + + if !pinned { + t.Fatal(failmsg) + } +} + func TestPinnerBasic(t *testing.T) { ctx := context.Background() @@ -48,13 +59,11 @@ func TestPinnerBasic(t *testing.T) { t.Fatal(err) } - if !p.IsPinned(ak) { - t.Fatal("Failed to find key") - } + assertPinned(t, p, ak, "Failed to find key") // create new node c, to be indirectly pinned through b - c, ck := randNode() - _, err = dserv.Add(c) + c, _ := randNode() + ck, err := dserv.Add(c) if err != nil { t.Fatal(err) } @@ -82,20 +91,16 @@ func TestPinnerBasic(t *testing.T) { t.Fatal(err) } - if !p.IsPinned(ck) { - t.Fatal("Child of recursively pinned node not found") - } + assertPinned(t, p, ck, "child of recursively pinned node not found") bk, _ := b.Key() - if !p.IsPinned(bk) { - t.Fatal("Recursively pinned node not found..") - } + assertPinned(t, p, bk, "Recursively pinned node not found..") d, _ := randNode() d.AddNodeLink("a", a) d.AddNodeLink("c", c) - e, ek := randNode() + e, _ := randNode() d.AddNodeLink("e", e) // Must be in dagserv for unpin to work @@ -110,14 +115,8 @@ func TestPinnerBasic(t *testing.T) { t.Fatal(err) } - if !p.IsPinned(ek) { - t.Fatal(err) - } - dk, _ := d.Key() - if !p.IsPinned(dk) { - t.Fatal("pinned node not found.") - } + assertPinned(t, p, dk, "pinned node not found.") // Test recursive unpin err = p.Unpin(ctx, dk, true) @@ -125,11 +124,6 @@ func TestPinnerBasic(t *testing.T) { t.Fatal(err) } - // c should still be pinned under b - if !p.IsPinned(ck) { - t.Fatal("Recursive / indirect unpin fail.") - } - err = p.Flush() if err != nil { t.Fatal(err) @@ -141,19 +135,10 @@ func TestPinnerBasic(t *testing.T) { } // Test directly pinned - if !np.IsPinned(ak) { - t.Fatal("Could not find pinned node!") - } - - // Test indirectly pinned - if !np.IsPinned(ck) { - t.Fatal("could not find indirectly pinned node") - } + assertPinned(t, np, ak, "Could not find pinned node!") // Test recursively pinned - if !np.IsPinned(bk) { - t.Fatal("could not find recursively pinned node") - } + assertPinned(t, np, bk, "could not find recursively pinned node") } func TestDuplicateSemantics(t *testing.T) { @@ -192,12 +177,27 @@ func TestDuplicateSemantics(t *testing.T) { } } +func TestFlush(t *testing.T) { + dstore := dssync.MutexWrap(ds.NewMapDatastore()) + bstore := blockstore.NewBlockstore(dstore) + bserv := bs.New(bstore, offline.Exchange(bstore)) + + dserv := mdag.NewDAGService(bserv) + p := NewPinner(dstore, dserv) + _, k := randNode() + + p.PinWithMode(k, Recursive) + if err := p.Flush(); err != nil { + t.Fatal(err) + } + assertPinned(t, p, k, "expected key to still be pinned") +} + func TestPinRecursiveFail(t *testing.T) { ctx := context.Background() dstore := dssync.MutexWrap(ds.NewMapDatastore()) bstore := blockstore.NewBlockstore(dstore) bserv := bs.New(bstore, offline.Exchange(bstore)) - dserv := mdag.NewDAGService(bserv) p := NewPinner(dstore, dserv) @@ -210,21 +210,21 @@ func TestPinRecursiveFail(t *testing.T) { } // Note: this isnt a time based test, we expect the pin to fail - mctx, cancel := context.WithTimeout(ctx, time.Millisecond) - defer cancel() + mctx, _ := context.WithTimeout(ctx, time.Millisecond) err = p.Pin(mctx, a, true) if err == nil { t.Fatal("should have failed to pin here") } - if _, err := dserv.Add(b); err != nil { + _, err = dserv.Add(b) + if err != nil { t.Fatal(err) } // this one is time based... but shouldnt cause any issues - mctx, cancel = context.WithTimeout(ctx, time.Second) - defer cancel() - if err := p.Pin(mctx, a, true); err != nil { + mctx, _ = context.WithTimeout(ctx, time.Second) + err = p.Pin(mctx, a, true) + if err != nil { t.Fatal(err) } } diff --git a/pin/set.go b/pin/set.go new file mode 100644 index 00000000000..71851af6eda --- /dev/null +++ b/pin/set.go @@ -0,0 +1,366 @@ +package pin + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "errors" + "fmt" + "hash/fnv" + "io" + "sort" + "unsafe" + + "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/gogo/protobuf/proto" + "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + "github.com/ipfs/go-ipfs/blocks/key" + "github.com/ipfs/go-ipfs/merkledag" + "github.com/ipfs/go-ipfs/pin/internal/pb" +) + +const ( + defaultFanout = 256 + maxItems = 8192 +) + +func randomSeed() (uint32, error) { + var buf [4]byte + if _, err := rand.Read(buf[:]); err != nil { + return 0, err + } + return binary.LittleEndian.Uint32(buf[:]), nil +} + +func hash(seed uint32, k key.Key) uint32 { + var buf [4]byte + binary.LittleEndian.PutUint32(buf[:], seed) + h := fnv.New32a() + _, _ = h.Write(buf[:]) + _, _ = io.WriteString(h, string(k)) + return h.Sum32() +} + +type itemIterator func() (k key.Key, data []byte, ok bool) + +type keyObserver func(key.Key) + +// refcount is the marshaled format of refcounts. It may change +// between versions; this is valid for version 1. Changing it may +// become desirable if there are many links with refcount > 255. +// +// There are two guarantees that need to be preserved, if this is +// changed: +// +// - the marshaled format is of fixed size, matching +// unsafe.Sizeof(refcount(0)) +// - methods of refcount handle endianness, and may +// in later versions need encoding/binary. +type refcount uint8 + +func (r refcount) Bytes() []byte { + return []byte{byte(r)} +} + +// readRefcount returns the idx'th refcount in []byte, which is +// assumed to be a sequence of refcount.Bytes results. +func (r *refcount) ReadFromIdx(buf []byte, idx int) { + *r = refcount(buf[idx]) +} + +type sortByHash struct { + links []*merkledag.Link + data []byte +} + +func (s sortByHash) Len() int { + return len(s.links) +} + +func (s sortByHash) Less(a, b int) bool { + return bytes.Compare(s.links[a].Hash, s.links[b].Hash) == -1 +} + +func (s sortByHash) Swap(a, b int) { + s.links[a], s.links[b] = s.links[b], s.links[a] + if len(s.data) != 0 { + const n = int(unsafe.Sizeof(refcount(0))) + tmp := make([]byte, n) + copy(tmp, s.data[a*n:a*n+n]) + copy(s.data[a*n:a*n+n], s.data[b*n:b*n+n]) + copy(s.data[b*n:b*n+n], tmp) + } +} + +func storeItems(ctx context.Context, dag merkledag.DAGService, estimatedLen uint64, iter itemIterator, internalKeys keyObserver) (*merkledag.Node, error) { + seed, err := randomSeed() + if err != nil { + return nil, err + } + n := &merkledag.Node{ + Links: make([]*merkledag.Link, 0, defaultFanout+maxItems), + } + for i := 0; i < defaultFanout; i++ { + n.Links = append(n.Links, &merkledag.Link{Hash: emptyKey.ToMultihash()}) + } + internalKeys(emptyKey) + hdr := &pb.Set{ + Version: proto.Uint32(1), + Fanout: proto.Uint32(defaultFanout), + Seed: proto.Uint32(seed), + } + if err := writeHdr(n, hdr); err != nil { + return nil, err + } + hdrLen := len(n.Data) + + if estimatedLen < maxItems { + // it'll probably fit + for i := 0; i < maxItems; i++ { + k, data, ok := iter() + if !ok { + // all done + break + } + n.Links = append(n.Links, &merkledag.Link{Hash: k.ToMultihash()}) + n.Data = append(n.Data, data...) + } + // sort by hash, also swap item Data + s := sortByHash{ + links: n.Links[defaultFanout:], + data: n.Data[hdrLen:], + } + sort.Stable(s) + } + + // wasteful but simple + type item struct { + k key.Key + data []byte + } + hashed := make(map[uint32][]item) + for { + k, data, ok := iter() + if !ok { + break + } + h := hash(seed, k) + hashed[h] = append(hashed[h], item{k, data}) + } + for h, items := range hashed { + childIter := func() (k key.Key, data []byte, ok bool) { + if len(items) == 0 { + return "", nil, false + } + first := items[0] + items = items[1:] + return first.k, first.data, true + } + child, err := storeItems(ctx, dag, uint64(len(items)), childIter, internalKeys) + if err != nil { + return nil, err + } + size, err := child.Size() + if err != nil { + return nil, err + } + childKey, err := dag.Add(child) + if err != nil { + return nil, err + } + internalKeys(childKey) + l := &merkledag.Link{ + Name: "", + Hash: childKey.ToMultihash(), + Size: size, + Node: child, + } + n.Links[int(h%defaultFanout)] = l + } + return n, nil +} + +func readHdr(n *merkledag.Node) (*pb.Set, []byte, error) { + hdrLenRaw, consumed := binary.Uvarint(n.Data) + if consumed <= 0 { + return nil, nil, errors.New("invalid Set header length") + } + buf := n.Data[consumed:] + if hdrLenRaw > uint64(len(buf)) { + return nil, nil, errors.New("impossibly large Set header length") + } + // as hdrLenRaw was <= an int, we now know it fits in an int + hdrLen := int(hdrLenRaw) + var hdr pb.Set + if err := proto.Unmarshal(buf[:hdrLen], &hdr); err != nil { + return nil, nil, err + } + buf = buf[hdrLen:] + + if v := hdr.GetVersion(); v != 1 { + return nil, nil, fmt.Errorf("unsupported Set version: %d", v) + } + if uint64(hdr.GetFanout()) > uint64(len(n.Links)) { + return nil, nil, errors.New("impossibly large Fanout") + } + return &hdr, buf, nil +} + +func writeHdr(n *merkledag.Node, hdr *pb.Set) error { + hdrData, err := proto.Marshal(hdr) + if err != nil { + return err + } + n.Data = make([]byte, binary.MaxVarintLen64, binary.MaxVarintLen64+len(hdrData)) + written := binary.PutUvarint(n.Data, uint64(len(hdrData))) + n.Data = n.Data[:written] + n.Data = append(n.Data, hdrData...) + return nil +} + +type walkerFunc func(buf []byte, idx int, link *merkledag.Link) error + +func walkItems(ctx context.Context, dag merkledag.DAGService, n *merkledag.Node, fn walkerFunc, children keyObserver) error { + hdr, buf, err := readHdr(n) + if err != nil { + return err + } + // readHdr guarantees fanout is a safe value + fanout := hdr.GetFanout() + for i, l := range n.Links[fanout:] { + if err := fn(buf, i, l); err != nil { + return err + } + } + for _, l := range n.Links[:fanout] { + children(key.Key(l.Hash)) + if key.Key(l.Hash) == emptyKey { + continue + } + subtree, err := l.GetNode(ctx, dag) + if err != nil { + return err + } + if err := walkItems(ctx, dag, subtree, fn, children); err != nil { + return err + } + } + return nil +} + +func loadSet(ctx context.Context, dag merkledag.DAGService, root *merkledag.Node, name string, internalKeys keyObserver) ([]key.Key, error) { + l, err := root.GetNodeLink(name) + if err != nil { + return nil, err + } + internalKeys(key.Key(l.Hash)) + n, err := l.GetNode(ctx, dag) + if err != nil { + return nil, err + } + + var res []key.Key + walk := func(buf []byte, idx int, link *merkledag.Link) error { + res = append(res, key.Key(link.Hash)) + return nil + } + if err := walkItems(ctx, dag, n, walk, internalKeys); err != nil { + return nil, err + } + return res, nil +} + +func loadMultiset(ctx context.Context, dag merkledag.DAGService, root *merkledag.Node, name string, internalKeys keyObserver) (map[key.Key]uint64, error) { + l, err := root.GetNodeLink(name) + if err != nil { + return nil, err + } + internalKeys(key.Key(l.Hash)) + n, err := l.GetNode(ctx, dag) + if err != nil { + return nil, err + } + + refcounts := make(map[key.Key]uint64) + walk := func(buf []byte, idx int, link *merkledag.Link) error { + var r refcount + r.ReadFromIdx(buf, idx) + refcounts[key.Key(link.Hash)] += uint64(r) + return nil + } + if err := walkItems(ctx, dag, n, walk, internalKeys); err != nil { + return nil, err + } + return refcounts, nil +} + +func storeSet(ctx context.Context, dag merkledag.DAGService, keys []key.Key, internalKeys keyObserver) (*merkledag.Node, error) { + iter := func() (k key.Key, data []byte, ok bool) { + if len(keys) == 0 { + return "", nil, false + } + first := keys[0] + keys = keys[1:] + return first, nil, true + } + n, err := storeItems(ctx, dag, uint64(len(keys)), iter, internalKeys) + if err != nil { + return nil, err + } + k, err := dag.Add(n) + if err != nil { + return nil, err + } + internalKeys(k) + return n, nil +} + +func copyRefcounts(orig map[key.Key]uint64) map[key.Key]uint64 { + r := make(map[key.Key]uint64, len(orig)) + for k, v := range orig { + r[k] = v + } + return r +} + +func storeMultiset(ctx context.Context, dag merkledag.DAGService, refcounts map[key.Key]uint64, internalKeys keyObserver) (*merkledag.Node, error) { + // make a working copy of the refcounts + refcounts = copyRefcounts(refcounts) + + iter := func() (k key.Key, data []byte, ok bool) { + // Every call of this function returns the next refcount item. + // + // This function splits out the uint64 reference counts as + // smaller increments, as fits in type refcount. Most of the + // time the refcount will fit inside just one, so this saves + // space. + // + // We use range here to pick an arbitrary item in the map, but + // not really iterate the map. + for k, refs := range refcounts { + // Max value a single multiset item can store + num := ^refcount(0) + if refs <= uint64(num) { + // Remaining count fits in a single item; remove the + // key from the map. + num = refcount(refs) + delete(refcounts, k) + } else { + // Count is too large to fit in one item, the key will + // repeat in some later call. + refcounts[k] -= uint64(num) + } + return k, num.Bytes(), true + } + return "", nil, false + } + n, err := storeItems(ctx, dag, uint64(len(refcounts)), iter, internalKeys) + if err != nil { + return nil, err + } + k, err := dag.Add(n) + if err != nil { + return nil, err + } + internalKeys(k) + return n, nil +} diff --git a/pin/set_test.go b/pin/set_test.go new file mode 100644 index 00000000000..a4874493960 --- /dev/null +++ b/pin/set_test.go @@ -0,0 +1,82 @@ +package pin + +import ( + "testing" + "testing/quick" + + "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" + dssync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" + "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" + "github.com/ipfs/go-ipfs/blocks/blockstore" + "github.com/ipfs/go-ipfs/blocks/key" + "github.com/ipfs/go-ipfs/blockservice" + "github.com/ipfs/go-ipfs/exchange/offline" + "github.com/ipfs/go-ipfs/merkledag" +) + +func ignoreKeys(key.Key) {} + +func copyMap(m map[key.Key]uint16) map[key.Key]uint64 { + c := make(map[key.Key]uint64, len(m)) + for k, v := range m { + c[k] = uint64(v) + } + return c +} + +func TestMultisetRoundtrip(t *testing.T) { + dstore := dssync.MutexWrap(datastore.NewMapDatastore()) + bstore := blockstore.NewBlockstore(dstore) + bserv := blockservice.New(bstore, offline.Exchange(bstore)) + dag := merkledag.NewDAGService(bserv) + + fn := func(m map[key.Key]uint16) bool { + // Generate a smaller range for refcounts than full uint64, as + // otherwise this just becomes overly cpu heavy, splitting it + // out into too many items. That means we need to convert to + // the right kind of map. As storeMultiset mutates the map as + // part of its bookkeeping, this is actually good. + refcounts := copyMap(m) + + ctx := context.Background() + n, err := storeMultiset(ctx, dag, refcounts, ignoreKeys) + if err != nil { + t.Fatalf("storing multiset: %v", err) + } + root := &merkledag.Node{} + const linkName = "dummylink" + if err := root.AddNodeLink(linkName, n); err != nil { + t.Fatalf("adding link to root node: %v", err) + } + + roundtrip, err := loadMultiset(ctx, dag, root, linkName, ignoreKeys) + if err != nil { + t.Fatalf("loading multiset: %v", err) + } + + orig := copyMap(m) + success := true + for k, want := range orig { + if got, ok := roundtrip[k]; ok { + if got != want { + success = false + t.Logf("refcount changed: %v -> %v for %q", want, got, k) + } + delete(orig, k) + delete(roundtrip, k) + } + } + for k, v := range orig { + success = false + t.Logf("refcount missing: %v for %q", v, k) + } + for k, v := range roundtrip { + success = false + t.Logf("refcount extra: %v for %q", v, k) + } + return success + } + if err := quick.Check(fn, nil); err != nil { + t.Fatal(err) + } +} diff --git a/repo/config/datastore.go b/repo/config/datastore.go index 6749a4c39a0..52582bd5cb5 100644 --- a/repo/config/datastore.go +++ b/repo/config/datastore.go @@ -1,5 +1,9 @@ package config +import ( + "encoding/json" +) + // DefaultDataStoreDirectory is the directory to store all the local IPFS data. const DefaultDataStoreDirectory = "datastore" @@ -10,6 +14,23 @@ type Datastore struct { StorageMax string // in B, kB, kiB, MB, ... StorageGCWatermark int64 // in percentage to multiply on StorageMax GCPeriod string // in ns, us, ms, s, m, h + + Params *json.RawMessage + NoSync bool +} + +func (d *Datastore) ParamData() []byte { + if d.Params == nil { + return nil + } + + return []byte(*d.Params) +} + +type S3Datastore struct { + Region string `json:"region"` + Bucket string `json:"bucket"` + ACL string `json:"acl"` } // DataStorePath returns the default data store path given a configuration root diff --git a/repo/config/init.go b/repo/config/init.go index 4d50ac6611f..c287aee2090 100644 --- a/repo/config/init.go +++ b/repo/config/init.go @@ -11,11 +11,6 @@ import ( ) func Init(out io.Writer, nBitsForKeypair int) (*Config, error) { - ds, err := datastoreConfig() - if err != nil { - return nil, err - } - identity, err := identityConfig(out, nBitsForKeypair) if err != nil { return nil, err @@ -47,7 +42,6 @@ func Init(out io.Writer, nBitsForKeypair int) (*Config, error) { Bootstrap: BootstrapPeerStrings(bootstrapPeers), SupernodeRouting: *snr, - Datastore: *ds, Identity: identity, Discovery: Discovery{MDNS{ Enabled: true, diff --git a/repo/config/logs.go b/repo/config/logs.go deleted file mode 100644 index d912156bec0..00000000000 --- a/repo/config/logs.go +++ /dev/null @@ -1 +0,0 @@ -package config diff --git a/repo/config/version.go b/repo/config/version.go index e364d713fc9..85e269ec5d3 100644 --- a/repo/config/version.go +++ b/repo/config/version.go @@ -7,12 +7,12 @@ import ( "time" ) -// CurrentVersionNumber is the current application's version literal -const CurrentVersionNumber = "0.3.10-dev" - // CurrentCommit is the current git commit, this is set as a ldflag in the Makefile var CurrentCommit string +// CurrentVersionNumber is the current application's version literal +const CurrentVersionNumber = "0.4.0-dev" + // Version regulates checking if the most recent version is run type Version struct { // Current is the ipfs version for which config was generated diff --git a/repo/fsrepo/datastores.go b/repo/fsrepo/datastores.go new file mode 100644 index 00000000000..7ed6081372a --- /dev/null +++ b/repo/fsrepo/datastores.go @@ -0,0 +1,38 @@ +package fsrepo + +import ( + "fmt" + + "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/crowdmob/goamz/aws" + "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/crowdmob/goamz/s3" + + repo "github.com/ipfs/go-ipfs/repo" + config "github.com/ipfs/go-ipfs/repo/config" + "github.com/ipfs/go-ipfs/thirdparty/s3-datastore" +) + +func openS3Datastore(params config.S3Datastore) (repo.Datastore, error) { + // TODO support credentials files + auth, err := aws.EnvAuth() + if err != nil { + return nil, err + } + + region := aws.GetRegion(params.Region) + if region.Name == "" { + return nil, fmt.Errorf("unknown AWS region: %q", params.Region) + } + + if params.Bucket == "" { + return nil, fmt.Errorf("invalid S3 bucket: %q", params.Bucket) + } + + client := s3.New(auth, region) + // There are too many gophermucking s3datastores in my + // gophermucking source. + return &s3datastore.S3Datastore{ + Client: client, + Bucket: params.Bucket, + ACL: s3.ACL(params.ACL), + }, nil +} diff --git a/repo/fsrepo/defaultds.go b/repo/fsrepo/defaultds.go new file mode 100644 index 00000000000..4bca3107188 --- /dev/null +++ b/repo/fsrepo/defaultds.go @@ -0,0 +1,87 @@ +package fsrepo + +import ( + "fmt" + "path" + + ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" + "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs" + levelds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/leveldb" + "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/measure" + mount "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/syncmount" + ldbopts "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" + repo "github.com/ipfs/go-ipfs/repo" + config "github.com/ipfs/go-ipfs/repo/config" + "github.com/ipfs/go-ipfs/thirdparty/dir" +) + +const ( + leveldbDirectory = "datastore" + flatfsDirectory = "blocks" +) + +func openDefaultDatastore(r *FSRepo) (repo.Datastore, error) { + leveldbPath := path.Join(r.path, leveldbDirectory) + + // save leveldb reference so it can be neatly closed afterward + leveldbDS, err := levelds.NewDatastore(leveldbPath, &levelds.Options{ + Compression: ldbopts.NoCompression, + }) + if err != nil { + return nil, fmt.Errorf("unable to open leveldb datastore: %v", err) + } + + // 4TB of 256kB objects ~=17M objects, splitting that 256-way + // leads to ~66k objects per dir, splitting 256*256-way leads to + // only 256. + // + // The keys seen by the block store have predictable prefixes, + // including "/" from datastore.Key and 2 bytes from multihash. To + // reach a uniform 256-way split, we need approximately 4 bytes of + // prefix. + syncfs := !r.config.Datastore.NoSync + blocksDS, err := flatfs.New(path.Join(r.path, flatfsDirectory), 4, syncfs) + if err != nil { + return nil, fmt.Errorf("unable to open flatfs datastore: %v", err) + } + + // Add our PeerID to metrics paths to keep them unique + // + // As some tests just pass a zero-value Config to fsrepo.Init, + // cope with missing PeerID. + id := r.config.Identity.PeerID + if id == "" { + // the tests pass in a zero Config; cope with it + id = fmt.Sprintf("uninitialized_%p", r) + } + prefix := "fsrepo." + id + ".datastore." + metricsBlocks := measure.New(prefix+"blocks", blocksDS) + metricsLevelDB := measure.New(prefix+"leveldb", leveldbDS) + mountDS := mount.New([]mount.Mount{ + { + Prefix: ds.NewKey("/blocks"), + Datastore: metricsBlocks, + }, + { + Prefix: ds.NewKey("/"), + Datastore: metricsLevelDB, + }, + }) + + return mountDS, nil +} + +func initDefaultDatastore(repoPath string, conf *config.Config) error { + // The actual datastore contents are initialized lazily when Opened. + // During Init, we merely check that the directory is writeable. + leveldbPath := path.Join(repoPath, leveldbDirectory) + if err := dir.Writable(leveldbPath); err != nil { + return fmt.Errorf("datastore: %s", err) + } + + flatfsPath := path.Join(repoPath, flatfsDirectory) + if err := dir.Writable(flatfsPath); err != nil { + return fmt.Errorf("datastore: %s", err) + } + return nil +} diff --git a/repo/fsrepo/fsrepo.go b/repo/fsrepo/fsrepo.go index c62e515bad3..87546bd74e7 100644 --- a/repo/fsrepo/fsrepo.go +++ b/repo/fsrepo/fsrepo.go @@ -1,6 +1,7 @@ package fsrepo import ( + "encoding/json" "errors" "fmt" "io" @@ -10,12 +11,7 @@ import ( "strings" "sync" - ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs" - levelds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/leveldb" "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/measure" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/mount" - ldbopts "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" repo "github.com/ipfs/go-ipfs/repo" "github.com/ipfs/go-ipfs/repo/common" config "github.com/ipfs/go-ipfs/repo/config" @@ -24,14 +20,13 @@ import ( serialize "github.com/ipfs/go-ipfs/repo/fsrepo/serialize" dir "github.com/ipfs/go-ipfs/thirdparty/dir" util "github.com/ipfs/go-ipfs/util" - ds2 "github.com/ipfs/go-ipfs/util/datastore2" logging "github.com/ipfs/go-ipfs/vendor/QmQg1J6vikuXF9oDvm4wpdeAUvvkVEKW1EYDw9HhTMnP2b/go-log" ) var log = logging.Logger("fsrepo") // version number that we are currently expecting to see -var RepoVersion = "2" +var RepoVersion = "3" var migrationInstructions = `See https://github.com/ipfs/fs-repo-migrations/blob/master/run.md Sorry for the inconvenience. In the future, these will run automatically.` @@ -56,11 +51,7 @@ func (err NoRepoError) Error() string { return fmt.Sprintf("no ipfs repo found in %s.\nplease run: ipfs init", err.Path) } -const ( - leveldbDirectory = "datastore" - flatfsDirectory = "blocks" - apiFile = "api" -) +const apiFile = "api" var ( @@ -94,7 +85,7 @@ type FSRepo struct { // the same fsrepo path concurrently lockfile io.Closer config *config.Config - ds ds.ThreadSafeDatastore + ds repo.Datastore } var _ repo.Repo = (*FSRepo)(nil) @@ -247,16 +238,8 @@ func Init(repoPath string, conf *config.Config) error { return err } - // The actual datastore contents are initialized lazily when Opened. - // During Init, we merely check that the directory is writeable. - leveldbPath := filepath.Join(repoPath, leveldbDirectory) - if err := dir.Writable(leveldbPath); err != nil { - return fmt.Errorf("datastore: %s", err) - } - - flatfsPath := filepath.Join(repoPath, flatfsDirectory) - if err := dir.Writable(flatfsPath); err != nil { - return fmt.Errorf("datastore: %s", err) + if err := initDefaultDatastore(repoPath, conf); err != nil { + return err } if err := dir.Writable(filepath.Join(repoPath, "logs")); err != nil { @@ -343,29 +326,31 @@ func (r *FSRepo) openConfig() error { // openDatastore returns an error if the config file is not present. func (r *FSRepo) openDatastore() error { - leveldbPath := filepath.Join(r.path, leveldbDirectory) - var err error - // save leveldb reference so it can be neatly closed afterward - leveldbDS, err := levelds.NewDatastore(leveldbPath, &levelds.Options{ - Compression: ldbopts.NoCompression, - }) - if err != nil { - return errors.New("unable to open leveldb datastore") - } + switch r.config.Datastore.Type { + case "default", "leveldb", "": + d, err := openDefaultDatastore(r) + if err != nil { + return err + } + r.ds = d + case "s3": + var dscfg config.S3Datastore + if err := json.Unmarshal(r.config.Datastore.ParamData(), &dscfg); err != nil { + return fmt.Errorf("datastore s3: %v", err) + } - // 4TB of 256kB objects ~=17M objects, splitting that 256-way - // leads to ~66k objects per dir, splitting 256*256-way leads to - // only 256. - // - // The keys seen by the block store have predictable prefixes, - // including "/" from datastore.Key and 2 bytes from multihash. To - // reach a uniform 256-way split, we need approximately 4 bytes of - // prefix. - blocksDS, err := flatfs.New(filepath.Join(r.path, flatfsDirectory), 4) - if err != nil { - return errors.New("unable to open flatfs datastore") + ds, err := openS3Datastore(dscfg) + if err != nil { + return err + } + + r.ds = ds + default: + return fmt.Errorf("unknown datastore type: %s", r.config.Datastore.Type) } + // Wrap it with metrics gathering + // // Add our PeerID to metrics paths to keep them unique // // As some tests just pass a zero-value Config to fsrepo.Init, @@ -375,27 +360,9 @@ func (r *FSRepo) openDatastore() error { // the tests pass in a zero Config; cope with it id = fmt.Sprintf("uninitialized_%p", r) } - prefix := "fsrepo." + id + ".datastore." - metricsBlocks := measure.New(prefix+"blocks", blocksDS) - metricsLevelDB := measure.New(prefix+"leveldb", leveldbDS) - mountDS := mount.New([]mount.Mount{ - { - Prefix: ds.NewKey("/blocks"), - Datastore: metricsBlocks, - }, - { - Prefix: ds.NewKey("/"), - Datastore: metricsLevelDB, - }, - }) - // Make sure it's ok to claim the virtual datastore from mount as - // threadsafe. There's no clean way to make mount itself provide - // this information without copy-pasting the code into two - // variants. This is the same dilemma as the `[].byte` attempt at - // introducing const types to Go. - var _ ds.ThreadSafeDatastore = blocksDS - var _ ds.ThreadSafeDatastore = leveldbDS - r.ds = ds2.ClaimThreadSafe{mountDS} + prefix := "fsrepo." + id + ".datastore" + r.ds = measure.New(prefix, r.ds) + return nil } @@ -408,15 +375,15 @@ func (r *FSRepo) Close() error { return errors.New("repo is closed") } - if err := r.ds.(io.Closer).Close(); err != nil { - return err - } - err := os.Remove(filepath.Join(r.path, apiFile)) if err != nil { log.Warning("error removing api file: ", err) } + if err := r.ds.Close(); err != nil { + return err + } + // This code existed in the previous versions, but // EventlogComponent.Close was never called. Preserving here // pending further discussion. @@ -579,7 +546,7 @@ func (r *FSRepo) SetConfigKey(key string, value interface{}) error { // Datastore returns a repo-owned datastore. If FSRepo is Closed, return value // is undefined. -func (r *FSRepo) Datastore() ds.ThreadSafeDatastore { +func (r *FSRepo) Datastore() repo.Datastore { packageLock.Lock() d := r.ds packageLock.Unlock() @@ -622,8 +589,10 @@ func isInitializedUnsynced(repoPath string) bool { if !configIsInitialized(repoPath) { return false } + if !util.FileExists(filepath.Join(repoPath, leveldbDirectory)) { return false } + return true } diff --git a/repo/fsrepo/serialize/serialize.go b/repo/fsrepo/serialize/serialize.go index 01458fe5daf..52186cc23c1 100644 --- a/repo/fsrepo/serialize/serialize.go +++ b/repo/fsrepo/serialize/serialize.go @@ -69,12 +69,5 @@ func Load(filename string) (*config.Config, error) { return nil, err } - // tilde expansion on datastore path - // TODO why is this here?? - cfg.Datastore.Path, err = util.TildeExpansion(cfg.Datastore.Path) - if err != nil { - return nil, err - } - return &cfg, err } diff --git a/repo/fsrepo/serialize/serialize_test.go b/repo/fsrepo/serialize/serialize_test.go index ce06e8d5a19..4547a4b809f 100644 --- a/repo/fsrepo/serialize/serialize_test.go +++ b/repo/fsrepo/serialize/serialize_test.go @@ -9,26 +9,25 @@ import ( func TestConfig(t *testing.T) { const filename = ".ipfsconfig" - const dsPath = "/path/to/datastore" cfgWritten := new(config.Config) - cfgWritten.Datastore.Path = dsPath + cfgWritten.Identity.PeerID = "faketest" + err := WriteConfigFile(filename, cfgWritten) if err != nil { - t.Error(err) + t.Fatal(err) } cfgRead, err := Load(filename) if err != nil { - t.Error(err) - return + t.Fatal(err) } - if cfgWritten.Datastore.Path != cfgRead.Datastore.Path { - t.Fail() + if cfgWritten.Identity.PeerID != cfgRead.Identity.PeerID { + t.Fatal() } st, err := os.Stat(filename) if err != nil { t.Fatalf("cannot stat config file: %v", err) } if g := st.Mode().Perm(); g&0117 != 0 { - t.Errorf("config file should not be executable or accessible to world: %v", g) + t.Fatalf("config file should not be executable or accessible to world: %v", g) } } diff --git a/repo/mock.go b/repo/mock.go index e79a1faef3d..bd8e72af87d 100644 --- a/repo/mock.go +++ b/repo/mock.go @@ -3,7 +3,6 @@ package repo import ( "errors" - ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" "github.com/ipfs/go-ipfs/repo/config" ) @@ -12,7 +11,7 @@ var errTODO = errors.New("TODO") // Mock is not thread-safe type Mock struct { C config.Config - D ds.ThreadSafeDatastore + D Datastore } func (m *Mock) Config() (*config.Config, error) { @@ -32,7 +31,7 @@ func (m *Mock) GetConfigKey(key string) (interface{}, error) { return nil, errTODO } -func (m *Mock) Datastore() ds.ThreadSafeDatastore { return m.D } +func (m *Mock) Datastore() Datastore { return m.D } func (m *Mock) GetStorageUsage() (uint64, error) { return 0, nil } diff --git a/repo/repo.go b/repo/repo.go index ed3b03112af..5f0512c50c0 100644 --- a/repo/repo.go +++ b/repo/repo.go @@ -4,8 +4,7 @@ import ( "errors" "io" - datastore "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" - + ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" config "github.com/ipfs/go-ipfs/repo/config" ) @@ -20,7 +19,7 @@ type Repo interface { SetConfigKey(key string, value interface{}) error GetConfigKey(key string) (interface{}, error) - Datastore() datastore.ThreadSafeDatastore + Datastore() Datastore GetStorageUsage() (uint64, error) // SetAPIAddr sets the API address in the repo. @@ -28,3 +27,10 @@ type Repo interface { io.Closer } + +// Datastore is the interface required from a datastore to be +// acceptable to FSRepo. +type Datastore interface { + ds.Batching // should be threadsafe, just be careful + io.Closer +} diff --git a/routing/dht/dht.go b/routing/dht/dht.go index 3f50652fd9b..42a68fa5967 100644 --- a/routing/dht/dht.go +++ b/routing/dht/dht.go @@ -44,7 +44,7 @@ type IpfsDHT struct { self peer.ID // Local peer (yourself) peerstore peer.Peerstore // Peer Registry - datastore ds.ThreadSafeDatastore // Local data + datastore ds.Datastore // Local data routingTable *kb.RoutingTable // Array of routing tables for differently distanced nodes providers *ProviderManager @@ -60,7 +60,7 @@ type IpfsDHT struct { } // NewDHT creates a new DHT object with the given peer as the 'local' host -func NewDHT(ctx context.Context, h host.Host, dstore ds.ThreadSafeDatastore) *IpfsDHT { +func NewDHT(ctx context.Context, h host.Host, dstore ds.Datastore) *IpfsDHT { dht := new(IpfsDHT) dht.datastore = dstore dht.self = h.ID() diff --git a/routing/kbucket/sorting.go b/routing/kbucket/sorting.go index 31c64591a92..875b822615c 100644 --- a/routing/kbucket/sorting.go +++ b/routing/kbucket/sorting.go @@ -32,10 +32,6 @@ func copyPeersFromList(target ID, peerArr peerSorterArr, peerList *list.List) pe distance: xor(target, pID), } peerArr = append(peerArr, &pd) - if e == nil { - log.Debug("list element was nil") - return peerArr - } } return peerArr } diff --git a/routing/kbucket/table.go b/routing/kbucket/table.go index 044d3a2c289..d4cf051f330 100644 --- a/routing/kbucket/table.go +++ b/routing/kbucket/table.go @@ -155,9 +155,10 @@ func (rt *RoutingTable) NearestPeers(id ID, count int) []peer.ID { bucket = rt.Buckets[cpl] var peerArr peerSorterArr - if bucket.Len() == 0 { - // In the case of an unusual split, one bucket may be empty. - // if this happens, search both surrounding buckets for nearest peer + peerArr = copyPeersFromList(id, peerArr, bucket.list) + if len(peerArr) < count { + // In the case of an unusual split, one bucket may be short or empty. + // if this happens, search both surrounding buckets for nearby peers if cpl > 0 { plist := rt.Buckets[cpl-1].list peerArr = copyPeersFromList(id, peerArr, plist) @@ -167,8 +168,6 @@ func (rt *RoutingTable) NearestPeers(id ID, count int) []peer.ID { plist := rt.Buckets[cpl+1].list peerArr = copyPeersFromList(id, peerArr, plist) } - } else { - peerArr = copyPeersFromList(id, peerArr, bucket.list) } // Sort by distance to local peer diff --git a/routing/none/none_client.go b/routing/none/none_client.go index efa0b8a996d..6d16a88bf73 100644 --- a/routing/none/none_client.go +++ b/routing/none/none_client.go @@ -3,11 +3,11 @@ package nilrouting import ( "errors" - ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" key "github.com/ipfs/go-ipfs/blocks/key" p2phost "github.com/ipfs/go-ipfs/p2p/host" peer "github.com/ipfs/go-ipfs/p2p/peer" + repo "github.com/ipfs/go-ipfs/repo" routing "github.com/ipfs/go-ipfs/routing" logging "github.com/ipfs/go-ipfs/vendor/QmQg1J6vikuXF9oDvm4wpdeAUvvkVEKW1EYDw9HhTMnP2b/go-log" ) @@ -47,7 +47,7 @@ func (c *nilclient) Bootstrap(_ context.Context) error { return nil } -func ConstructNilRouting(_ context.Context, _ p2phost.Host, _ ds.ThreadSafeDatastore) (routing.IpfsRouting, error) { +func ConstructNilRouting(_ context.Context, _ p2phost.Host, _ repo.Datastore) (routing.IpfsRouting, error) { return &nilclient{}, nil } diff --git a/routing/supernode/server.go b/routing/supernode/server.go index 97a5c832db2..ab82ab5f15c 100644 --- a/routing/supernode/server.go +++ b/routing/supernode/server.go @@ -18,13 +18,13 @@ import ( // Server handles routing queries using a database backend type Server struct { local peer.ID - routingBackend datastore.ThreadSafeDatastore + routingBackend datastore.Datastore peerstore peer.Peerstore *proxy.Loopback // so server can be injected into client } // NewServer creates a new Supernode routing Server -func NewServer(ds datastore.ThreadSafeDatastore, ps peer.Peerstore, local peer.ID) (*Server, error) { +func NewServer(ds datastore.Datastore, ps peer.Peerstore, local peer.ID) (*Server, error) { s := &Server{local, ds, ps, nil} s.Loopback = &proxy.Loopback{ Handler: s, diff --git a/tar/format.go b/tar/format.go index 8e59f02c3af..fc73e17f74b 100644 --- a/tar/format.go +++ b/tar/format.go @@ -46,7 +46,7 @@ func ImportTar(r io.Reader, ds dag.DAGService) (*dag.Node, error) { root := new(dag.Node) root.Data = []byte("ipfs/tar") - e := dagutil.NewDagEditor(ds, root) + e := dagutil.NewDagEditor(root, ds) for { h, err := tr.Next() @@ -68,7 +68,7 @@ func ImportTar(r io.Reader, ds dag.DAGService) (*dag.Node, error) { if h.Size > 0 { spl := chunk.NewRabin(tr, uint64(chunk.DefaultBlockSize)) - nd, err := importer.BuildDagFromReader(ds, spl, nil) + nd, err := importer.BuildDagFromReader(ds, spl) if err != nil { return nil, err } @@ -91,13 +91,7 @@ func ImportTar(r io.Reader, ds dag.DAGService) (*dag.Node, error) { } } - root = e.GetNode() - _, err = ds.Add(root) - if err != nil { - return nil, err - } - - return root, nil + return e.Finalize(ds) } // adds a '-' to the beginning of each path element so we can use 'data' as a @@ -178,7 +172,7 @@ func (tr *tarReader) Read(b []byte) (int, error) { tr.hdrBuf = bytes.NewReader(headerNd.Data) dataNd, err := headerNd.GetLinkedNode(tr.ctx, tr.ds, "data") - if err != nil && err != dag.ErrNotFound { + if err != nil && err != dag.ErrLinkNotFound { return 0, err } diff --git a/test/ipfs-test-lib.sh b/test/ipfs-test-lib.sh index b05287c2e5b..f03bcb9097d 100644 --- a/test/ipfs-test-lib.sh +++ b/test/ipfs-test-lib.sh @@ -35,3 +35,38 @@ shellquote() { done printf '\n' } + +# Test whether all the expected lines are included in a file. The file +# can have extra lines. +# +# $1 - Path to file with expected lines. +# $2 - Path to file with actual output. +# +# Examples +# +# test_expect_success 'foo says hello' ' +# echo hello >expected && +# foo >actual && +# test_cmp expected actual +# ' +# +# Returns the exit code of the command set by TEST_CMP. +test_includes_lines() { + sort "$1" >"$1_sorted" && + sort "$2" >"$2_sorted" && + comm -2 -3 "$1_sorted" "$2_sorted" >"$2_missing" && + [ ! -s "$2_missing" ] || test_fsh comm -2 -3 "$1_sorted" "$2_sorted" +} + +# Depending on GNU seq availability is not nice. +# Git also has test_seq but it uses Perl. +test_seq() { + test "$1" -le "$2" || return + i="$1" + j="$2" + while test "$i" -le "$j" + do + echo "$i" + i=$(expr "$i" + 1) + done +} diff --git a/test/sharness/lib/test-lib.sh b/test/sharness/lib/test-lib.sh index 21f566ee2bd..c9751dc8331 100644 --- a/test/sharness/lib/test-lib.sh +++ b/test/sharness/lib/test-lib.sh @@ -214,12 +214,20 @@ test_launch_ipfs_daemon() { fi } +do_umount() { + if [ "$(uname -s)" = "Linux" ]; then + fusermount -u "$1" + else + umount "$1" + fi +} + test_mount_ipfs() { # make sure stuff is unmounted first. test_expect_success FUSE "'ipfs mount' succeeds" ' - umount "$(pwd)/ipfs" || true && - umount "$(pwd)/ipns" || true && + do_umount "$(pwd)/ipfs" || true && + do_umount "$(pwd)/ipns" || true && ipfs mount >actual ' diff --git a/test/sharness/t0020-init.sh b/test/sharness/t0020-init.sh index 8416502b388..678bbbc7391 100755 --- a/test/sharness/t0020-init.sh +++ b/test/sharness/t0020-init.sh @@ -53,8 +53,8 @@ test_expect_success ".ipfs/ has been created" ' ' test_expect_success "ipfs config succeeds" ' - echo leveldb >expected_config && - ipfs config Datastore.Type >actual_config && + echo /ipfs >expected_config && + ipfs config Mounts.IPFS >actual_config && test_cmp expected_config actual_config ' diff --git a/test/sharness/t0060-daemon.sh b/test/sharness/t0060-daemon.sh index 8084fb3743d..f793b578096 100755 --- a/test/sharness/t0060-daemon.sh +++ b/test/sharness/t0060-daemon.sh @@ -105,7 +105,7 @@ test_expect_success "nc is available" ' test_expect_success "transport should be encrypted" ' nc -w 5 localhost 4001 >swarmnc && grep -q "AES-256,AES-128" swarmnc && - test_must_fail grep -q "/ipfs/identify" swarmnc || + test_must_fail grep -q "/multistream/1.0.0" swarmnc || test_fsh cat swarmnc ' diff --git a/test/sharness/t0061-daemon-opts.sh b/test/sharness/t0061-daemon-opts.sh index f2f965fedd8..bc5df702402 100755 --- a/test/sharness/t0061-daemon-opts.sh +++ b/test/sharness/t0061-daemon-opts.sh @@ -29,7 +29,7 @@ test_expect_success 'api gateway should be unrestricted' ' test_expect_success 'transport should be unencrypted' ' go-sleep 0.5s | nc localhost "$PORT_SWARM" >swarmnc && test_must_fail grep -q "AES-256,AES-128" swarmnc && - grep -q "/ipfs/identify" swarmnc || + grep -q "/multistream/1.0.0" swarmnc || test_fsh cat swarmnc ' diff --git a/test/sharness/t0080-repo.sh b/test/sharness/t0080-repo.sh index fe0cf55b541..1ab6238096d 100755 --- a/test/sharness/t0080-repo.sh +++ b/test/sharness/t0080-repo.sh @@ -15,11 +15,6 @@ test_expect_success "'ipfs repo gc' succeeds" ' ipfs repo gc >gc_out_actual ' -test_expect_success "'ipfs repo gc' looks good (empty)" ' - true >empty && - test_cmp empty gc_out_actual -' - test_expect_success "'ipfs add afile' succeeds" ' echo "some text" >afile && HASH=`ipfs add -q afile` @@ -36,8 +31,7 @@ test_expect_success "'ipfs repo gc' succeeds" ' test_expect_success "'ipfs repo gc' looks good (patch root)" ' PATCH_ROOT=QmQXirSbubiySKnqaFyfs5YzziXRB5JEVQVjU6xsd7innr && - echo "removed $PATCH_ROOT" >patch_root && - test_cmp patch_root gc_out_actual + grep "removed $PATCH_ROOT" gc_out_actual ' test_expect_success "'ipfs repo gc' doesnt remove file" ' @@ -66,13 +60,8 @@ test_expect_failure "ipfs repo gc fully reverse ipfs add" ' ' test_expect_success "file no longer pinned" ' - # we expect the welcome files to show up here - echo "$HASH_WELCOME_DOCS" >expected2 && - ipfs refs -r "$HASH_WELCOME_DOCS" >>expected2 && - EMPTY_DIR=QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn && - echo "$EMPTY_DIR" >>expected2 && ipfs pin ls --type=recursive --quiet >actual2 && - test_sort_cmp expected2 actual2 + test_expect_code 1 grep $HASH actual2 ' test_expect_success "recursively pin afile(default action)" ' @@ -114,10 +103,9 @@ test_expect_success "remove direct pin" ' ' test_expect_success "'ipfs repo gc' removes file" ' - echo "removed $PATCH_ROOT" >expected7 && - echo "removed $HASH" >>expected7 && ipfs repo gc >actual7 && - test_sort_cmp expected7 actual7 + grep "removed $HASH" actual7 && + grep "removed $PATCH_ROOT" actual7 ' # TODO: there seems to be a serious bug with leveldb not returning a key. @@ -138,7 +126,7 @@ test_expect_success "adding multiblock random file succeeds" ' test_expect_success "'ipfs pin ls --type=indirect' is correct" ' ipfs refs "$MBLOCKHASH" >refsout && ipfs refs -r "$HASH_WELCOME_DOCS" >>refsout && - sed -i="" "s/\(.*\)/\1 indirect/g" refsout && + sed -i"~" "s/\(.*\)/\1 indirect/g" refsout && ipfs pin ls --type=indirect >indirectpins && test_sort_cmp refsout indirectpins ' @@ -165,8 +153,7 @@ test_expect_success "'ipfs pin ls --type=recursive' is correct" ' echo "$MBLOCKHASH" >rp_expected && echo "$HASH_WELCOME_DOCS" >>rp_expected && echo "$EMPTY_DIR" >>rp_expected && - ipfs refs -r "$HASH_WELCOME_DOCS" >>rp_expected && - sed -i="" "s/\(.*\)/\1 recursive/g" rp_expected && + sed -i"~" "s/\(.*\)/\1 recursive/g" rp_expected && ipfs pin ls --type=recursive >rp_actual && test_sort_cmp rp_expected rp_actual ' diff --git a/test/sharness/t0081-repo-pinning.sh b/test/sharness/t0081-repo-pinning.sh index 1c062d79b69..f57a8630392 100755 --- a/test/sharness/t0081-repo-pinning.sh +++ b/test/sharness/t0081-repo-pinning.sh @@ -71,6 +71,9 @@ HASH_DIR4="QmW98gV71Ns4bX7QbgWAqLiGF3SDC1JpveZSgBh4ExaSAd" HASH_DIR3="QmRsCaNBMkweZ9vHT5PJRd2TT9rtNKEKyuognCEVxZxF1H" HASH_DIR2="QmTUTQAgeVfughDSFukMZLbfGvetDJY7Ef5cDXkKK4abKC" HASH_DIR1="QmNyZVFbgvmzguS2jVMRb8PQMNcCMJrn9E3doDhBbcPNTY" +HASH_NOPINDIR="QmWHjrRJYSfYKz5V9dWWSKu47GdY7NewyRhyTiroXgWcDU" +HASH_NOPIN_FILE1="QmUJT3GQi1dxQyTZbkaWeer9GkCn1d3W3HHRLSDr6PTcpx" +HASH_NOPIN_FILE2="QmarR7m9JT7qHEGhuFNZUEMAnoZ8E9QAfsthHCQ9Y2GfoT" DIR1="dir1" DIR2="dir1/dir2" @@ -150,8 +153,7 @@ test_expect_success "nothing is pinned directly" ' ' test_expect_success "'ipfs repo gc' succeeds" ' - ipfs repo gc >gc_out_actual && - test_must_be_empty gc_out_actual + ipfs repo gc >gc_out_actual ' test_expect_success "objects are still there" ' @@ -217,7 +219,7 @@ test_expect_success "'ipfs repo gc' succeeds" ' echo "removed $HASH_FILE3" > gc_out_exp2 && echo "removed $HASH_FILE5" >> gc_out_exp2 && echo "removed $HASH_DIR3" >> gc_out_exp2 && - test_sort_cmp gc_out_exp2 gc_out_actual2 + test_includes_lines gc_out_exp2 gc_out_actual2 ' # use object links for HASH_DIR1 here because its children @@ -249,6 +251,34 @@ test_expect_success "recursive pin fails without objects" ' test_fsh cat err_expected8 ' +test_expect_success "test add nopin file" ' + echo "test nopin data" > test_nopin_data && + NOPINHASH=$(ipfs add -q --pin=false test_nopin_data) && + test_pin_flag "$NOPINHASH" direct false && + test_pin_flag "$NOPINHASH" indirect false && + test_pin_flag "$NOPINHASH" recursive false +' + + +test_expect_success "test add nopin dir" ' + mkdir nopin_dir1 && + echo "some nopin text 1" >nopin_dir1/file1 && + echo "some nopin text 2" >nopin_dir1/file2 && + ipfs add -q -r --pin=false nopin_dir1 | tail -n1 >actual1 && + echo "$HASH_NOPINDIR" >expected1 && + test_cmp actual1 expected1 && + test_pin_flag "$HASH_NOPINDIR" direct false && + test_pin_flag "$HASH_NOPINDIR" indirect false && + test_pin_flag "$HASH_NOPINDIR" recursive false && + test_pin_flag "$HASH_NOPIN_FILE1" direct false && + test_pin_flag "$HASH_NOPIN_FILE1" indirect false && + test_pin_flag "$HASH_NOPIN_FILE1" recursive false && + test_pin_flag "$HASH_NOPIN_FILE2" direct false && + test_pin_flag "$HASH_NOPIN_FILE2" indirect false && + test_pin_flag "$HASH_NOPIN_FILE2" recursive false + +' + # test_kill_ipfs_daemon test_done diff --git a/test/sharness/t0250-files-api.sh b/test/sharness/t0250-files-api.sh new file mode 100755 index 00000000000..b011a8bd57a --- /dev/null +++ b/test/sharness/t0250-files-api.sh @@ -0,0 +1,343 @@ +#!/bin/sh +# +# Copyright (c) 2015 Jeromy Johnson +# MIT Licensed; see the LICENSE file in this repository. +# + +test_description="test the unix files api" + +. lib/test-lib.sh + +test_init_ipfs + +# setup files for testing +test_expect_success "can create some files for testing" ' + FILE1=$(echo foo | ipfs add -q) && + FILE2=$(echo bar | ipfs add -q) && + FILE3=$(echo baz | ipfs add -q) && + mkdir stuff_test && + echo cats > stuff_test/a && + echo dogs > stuff_test/b && + echo giraffes > stuff_test/c && + DIR1=$(ipfs add -q stuff_test | tail -n1) +' + +verify_path_exists() { + # simply running ls on a file should be a good 'check' + ipfs files ls $1 +} + +verify_dir_contents() { + dir=$1 + shift + rm -f expected + touch expected + for e in $@ + do + echo $e >> expected + done + + test_expect_success "can list dir" ' + ipfs files ls $dir > output + ' + + test_expect_success "dir entries look good" ' + test_sort_cmp output expected + ' +} + +test_files_api() { + test_expect_success "can mkdir in root" ' + ipfs files mkdir /cats + ' + + test_expect_success "directory was created" ' + verify_path_exists /cats + ' + + test_expect_success "directory is empty" ' + verify_dir_contents /cats + ' + + test_expect_success "check root hash" ' + ipfs files stat / | head -n1 > roothash + ' + + test_expect_success "cannot mkdir /" ' + test_expect_code 1 ipfs files mkdir / + ' + + test_expect_success "check root hash was not changed" ' + ipfs files stat / | head -n1 > roothashafter && + test_cmp roothash roothashafter + ' + + test_expect_success "can put files into directory" ' + ipfs files cp /ipfs/$FILE1 /cats/file1 + ' + + test_expect_success "file shows up in directory" ' + verify_dir_contents /cats file1 + ' + + test_expect_success "can read file" ' + ipfs files read /cats/file1 > file1out + ' + + test_expect_success "output looks good" ' + echo foo > expected && + test_cmp expected file1out + ' + + test_expect_success "can put another file into root" ' + ipfs files cp /ipfs/$FILE2 /file2 + ' + + test_expect_success "file shows up in root" ' + verify_dir_contents / file2 cats + ' + + test_expect_success "can read file" ' + ipfs files read /file2 > file2out + ' + + test_expect_success "output looks good" ' + echo bar > expected && + test_cmp expected file2out + ' + + test_expect_success "can make deep directory" ' + ipfs files mkdir -p /cats/this/is/a/dir + ' + + test_expect_success "directory was created correctly" ' + verify_path_exists /cats/this/is/a/dir && + verify_dir_contents /cats this file1 && + verify_dir_contents /cats/this is && + verify_dir_contents /cats/this/is a && + verify_dir_contents /cats/this/is/a dir && + verify_dir_contents /cats/this/is/a/dir + ' + + test_expect_success "can copy file into new dir" ' + ipfs files cp /ipfs/$FILE3 /cats/this/is/a/dir/file3 + ' + + test_expect_success "can read file" ' + ipfs files read /cats/this/is/a/dir/file3 > output + ' + + test_expect_success "output looks good" ' + echo baz > expected && + test_cmp expected output + ' + + test_expect_success "file shows up in dir" ' + verify_dir_contents /cats/this/is/a/dir file3 + ' + + test_expect_success "can remove file" ' + ipfs files rm /cats/this/is/a/dir/file3 + ' + + test_expect_success "file no longer appears" ' + verify_dir_contents /cats/this/is/a/dir + ' + + test_expect_success "can remove dir" ' + ipfs files rm -r /cats/this/is/a/dir + ' + + test_expect_success "dir no longer appears" ' + verify_dir_contents /cats/this/is/a + ' + + test_expect_success "can remove file from root" ' + ipfs files rm /file2 + ' + + test_expect_success "file no longer appears" ' + verify_dir_contents / cats + ' + + test_expect_success "check root hash" ' + ipfs files stat / | head -n1 > roothash + ' + + test_expect_success "cannot remove root" ' + test_expect_code 1 ipfs files rm -r / + ' + + test_expect_success "check root hash was not changed" ' + ipfs files stat / | head -n1 > roothashafter && + test_cmp roothash roothashafter + ' + + # test read options + + test_expect_success "read from offset works" ' + ipfs files read -o 1 /cats/file1 > output + ' + + test_expect_success "output looks good" ' + echo oo > expected && + test_cmp expected output + ' + + test_expect_success "read with size works" ' + ipfs files read -n 2 /cats/file1 > output + ' + + test_expect_success "output looks good" ' + printf fo > expected && + test_cmp expected output + ' + + test_expect_success "cannot read from negative offset" ' + test_expect_code 1 ipfs files read --offset -3 /cats/file1 + ' + + test_expect_success "read from offset 0 works" ' + ipfs files read --offset 0 /cats/file1 > output + ' + + test_expect_success "output looks good" ' + echo foo > expected && + test_cmp expected output + ' + + test_expect_success "read last byte works" ' + ipfs files read --offset 2 /cats/file1 > output + ' + + test_expect_success "output looks good" ' + echo o > expected && + test_cmp expected output + ' + + test_expect_success "offset past end of file fails" ' + test_expect_code 1 ipfs files read --offset 5 /cats/file1 + ' + + test_expect_success "cannot read negative count bytes" ' + test_expect_code 1 ipfs read --count -1 /cats/file1 + ' + + test_expect_success "reading zero bytes prints nothing" ' + ipfs files read --count 0 /cats/file1 > output + ' + + test_expect_success "output looks good" ' + printf "" > expected && + test_cmp expected output + ' + + test_expect_success "count > len(file) prints entire file" ' + ipfs files read --count 200 /cats/file1 > output + ' + + test_expect_success "output looks good" ' + echo foo > expected && + test_cmp expected output + ' + + # test write + + test_expect_success "can write file" ' + echo "ipfs rocks" > tmpfile && + cat tmpfile | ipfs files write --create /cats/ipfs + ' + + test_expect_success "file was created" ' + verify_dir_contents /cats ipfs file1 this + ' + + test_expect_success "can read file we just wrote" ' + ipfs files read /cats/ipfs > output + ' + + test_expect_success "can write to offset" ' + echo "is super cool" | ipfs files write -o 5 /cats/ipfs + ' + + test_expect_success "file looks correct" ' + echo "ipfs is super cool" > expected && + ipfs files read /cats/ipfs > output && + test_cmp expected output + ' + + test_expect_success "cant write to negative offset" ' + ipfs files stat /cats/ipfs | head -n1 > filehash && + test_expect_code 1 ipfs files write --offset -1 /cats/ipfs < output + ' + + test_expect_success "verify file was not changed" ' + ipfs files stat /cats/ipfs | head -n1 > afterhash && + test_cmp filehash afterhash + ' + + test_expect_success "write new file for testing" ' + echo foobar | ipfs files write --create /fun + ' + + test_expect_success "write to offset past end works" ' + echo blah | ipfs files write --offset 50 /fun + ' + + test_expect_success "can read file" ' + ipfs files read /fun > sparse_output + ' + + test_expect_success "output looks good" ' + echo foobar > sparse_expected && + echo blah | dd of=sparse_expected bs=50 seek=1 && + test_cmp sparse_expected sparse_output + ' + + test_expect_success "cleanup" ' + ipfs files rm /fun + ' + + test_expect_success "cannot write to directory" ' + ipfs files stat /cats | head -n1 > dirhash && + test_expect_code 1 ipfs files write /cats < output + ' + + test_expect_success "verify dir was not changed" ' + ipfs files stat /cats | head -n1 > afterdirhash && + test_cmp dirhash afterdirhash + ' + + test_expect_success "cannot write to nonexistant path" ' + test_expect_code 1 ipfs files write /cats/bar/ < output + ' + + test_expect_success "no new paths were created" ' + verify_dir_contents /cats file1 ipfs this + ' + + # test mv + test_expect_success "can mv dir" ' + ipfs files mv /cats/this/is /cats/ + ' + + test_expect_success "mv worked" ' + verify_dir_contents /cats file1 ipfs this is && + verify_dir_contents /cats/this + ' + + test_expect_success "cleanup, remove 'cats'" ' + ipfs files rm -r /cats + ' + + test_expect_success "cleanup looks good" ' + verify_dir_contents / + ' +} + +# test offline and online +test_files_api +test_launch_ipfs_daemon +test_files_api +test_kill_ipfs_daemon +test_done diff --git a/thirdparty/s3-datastore/datastore.go b/thirdparty/s3-datastore/datastore.go index 87e21d72932..2c6a8946100 100644 --- a/thirdparty/s3-datastore/datastore.go +++ b/thirdparty/s3-datastore/datastore.go @@ -1,6 +1,7 @@ package s3datastore import ( + "encoding/hex" "errors" "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/crowdmob/goamz/s3" @@ -17,6 +18,19 @@ var ErrInvalidType = errors.New("s3 datastore: invalid type error") type S3Datastore struct { Client *s3.S3 Bucket string + ACL s3.ACL +} + +func (ds *S3Datastore) encode(key datastore.Key) string { + return hex.EncodeToString(key.Bytes()) +} + +func (ds *S3Datastore) decode(raw string) (datastore.Key, bool) { + k, err := hex.DecodeString(raw) + if err != nil { + return datastore.Key{}, false + } + return datastore.NewKey(string(k)), true } func (ds *S3Datastore) Put(key datastore.Key, value interface{}) (err error) { @@ -24,24 +38,41 @@ func (ds *S3Datastore) Put(key datastore.Key, value interface{}) (err error) { if !ok { return ErrInvalidType } - // TODO extract perms and s3 options - return ds.Client.Bucket(ds.Bucket).Put(key.String(), data, "application/protobuf", s3.PublicRead, s3.Options{}) + // TODO extract s3 options + + k := ds.encode(key) + acl := ds.ACL + if acl == "" { + acl = s3.Private + } + return ds.Client.Bucket(ds.Bucket).Put(k, data, "application/protobuf", acl, s3.Options{}) } func (ds *S3Datastore) Get(key datastore.Key) (value interface{}, err error) { - return ds.Client.Bucket(ds.Bucket).Get(key.String()) + k := ds.encode(key) + return ds.Client.Bucket(ds.Bucket).Get(k) } func (ds *S3Datastore) Has(key datastore.Key) (exists bool, err error) { - return ds.Client.Bucket(ds.Bucket).Exists(key.String()) + k := ds.encode(key) + return ds.Client.Bucket(ds.Bucket).Exists(k) } func (ds *S3Datastore) Delete(key datastore.Key) (err error) { - return ds.Client.Bucket(ds.Bucket).Del(key.String()) + k := ds.encode(key) + return ds.Client.Bucket(ds.Bucket).Del(k) } func (ds *S3Datastore) Query(q query.Query) (query.Results, error) { return nil, errors.New("TODO implement query for s3 datastore?") } +func (ds *S3Datastore) Close() error { + return nil +} + +func (ds *S3Datastore) Batch() (datastore.Batch, error) { + return datastore.NewBasicBatch(ds), nil +} + func (ds *S3Datastore) IsThreadSafe() {} diff --git a/unixfs/format.go b/unixfs/format.go index 9193ddede17..472a575e7cd 100644 --- a/unixfs/format.go +++ b/unixfs/format.go @@ -67,6 +67,7 @@ func WrapData(b []byte) []byte { typ := pb.Data_Raw pbdata.Data = b pbdata.Type = &typ + pbdata.Filesize = proto.Uint64(uint64(len(b))) out, err := proto.Marshal(pbdata) if err != nil { diff --git a/unixfs/mod/dagmodifier.go b/unixfs/mod/dagmodifier.go index 5f5eddc9044..aa4de8caf84 100644 --- a/unixfs/mod/dagmodifier.go +++ b/unixfs/mod/dagmodifier.go @@ -11,12 +11,10 @@ import ( context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" key "github.com/ipfs/go-ipfs/blocks/key" - imp "github.com/ipfs/go-ipfs/importer" chunk "github.com/ipfs/go-ipfs/importer/chunk" help "github.com/ipfs/go-ipfs/importer/helpers" trickle "github.com/ipfs/go-ipfs/importer/trickle" mdag "github.com/ipfs/go-ipfs/merkledag" - pin "github.com/ipfs/go-ipfs/pin" ft "github.com/ipfs/go-ipfs/unixfs" uio "github.com/ipfs/go-ipfs/unixfs/io" logging "github.com/ipfs/go-ipfs/vendor/QmQg1J6vikuXF9oDvm4wpdeAUvvkVEKW1EYDw9HhTMnP2b/go-log" @@ -37,7 +35,6 @@ var log = logging.Logger("dagio") type DagModifier struct { dagserv mdag.DAGService curNode *mdag.Node - mp pin.ManualPinner splitter chunk.SplitterGen ctx context.Context @@ -50,13 +47,12 @@ type DagModifier struct { read *uio.DagReader } -func NewDagModifier(ctx context.Context, from *mdag.Node, serv mdag.DAGService, mp pin.ManualPinner, spl chunk.SplitterGen) (*DagModifier, error) { +func NewDagModifier(ctx context.Context, from *mdag.Node, serv mdag.DAGService, spl chunk.SplitterGen) (*DagModifier, error) { return &DagModifier{ curNode: from.Copy(), dagserv: serv, splitter: spl, ctx: ctx, - mp: mp, }, nil } @@ -175,7 +171,7 @@ func (dm *DagModifier) Sync() error { buflen := dm.wrBuf.Len() // Grab key for unpinning after mod operation - curk, err := dm.curNode.Key() + _, err := dm.curNode.Key() if err != nil { return err } @@ -209,14 +205,6 @@ func (dm *DagModifier) Sync() error { dm.curNode = nd } - // Finalize correct pinning, and flush pinner - dm.mp.PinWithMode(thisk, pin.Recursive) - dm.mp.RemovePinWithMode(curk, pin.Recursive) - err = dm.mp.Flush() - if err != nil { - return err - } - dm.writeStart += uint64(buflen) dm.wrBuf = nil @@ -265,10 +253,6 @@ func (dm *DagModifier) modifyDag(node *mdag.Node, offset uint64, data io.Reader) for i, bs := range f.GetBlocksizes() { // We found the correct child to write into if cur+bs > offset { - // Unpin block - ckey := key.Key(node.Links[i].Hash) - dm.mp.RemovePinWithMode(ckey, pin.Indirect) - child, err := node.Links[i].GetNode(dm.ctx, dm.dagserv) if err != nil { return "", false, err @@ -278,9 +262,6 @@ func (dm *DagModifier) modifyDag(node *mdag.Node, offset uint64, data io.Reader) return "", false, err } - // pin the new node - dm.mp.PinWithMode(k, pin.Indirect) - offset += bs node.Links[i].Hash = mh.Multihash(k) @@ -309,7 +290,6 @@ func (dm *DagModifier) appendData(node *mdag.Node, blks <-chan []byte, errs <-ch dbp := &help.DagBuilderParams{ Dagserv: dm.dagserv, Maxlinks: help.DefaultLinksPerBlock, - NodeCB: imp.BasicPinnerCB(dm.mp), } return trickle.TrickleAppend(dm.ctx, node, dbp.New(blks, errs)) @@ -388,19 +368,31 @@ func (dm *DagModifier) Seek(offset int64, whence int) (int64, error) { return 0, err } + fisize, err := dm.Size() + if err != nil { + return 0, err + } + + var newoffset uint64 switch whence { case os.SEEK_CUR: - dm.curWrOff += uint64(offset) - dm.writeStart = dm.curWrOff + newoffset = dm.curWrOff + uint64(offset) case os.SEEK_SET: - dm.curWrOff = uint64(offset) - dm.writeStart = uint64(offset) + newoffset = uint64(offset) case os.SEEK_END: return 0, ErrSeekEndNotImpl default: return 0, ErrUnrecognizedWhence } + if offset > fisize { + if err := dm.expandSparse(offset - fisize); err != nil { + return 0, err + } + } + dm.curWrOff = newoffset + dm.writeStart = newoffset + if dm.read != nil { _, err = dm.read.Seek(offset, whence) if err != nil { diff --git a/unixfs/mod/dagmodifier_test.go b/unixfs/mod/dagmodifier_test.go index 475e7c6c412..f3341690c08 100644 --- a/unixfs/mod/dagmodifier_test.go +++ b/unixfs/mod/dagmodifier_test.go @@ -4,13 +4,11 @@ import ( "fmt" "io" "io/ioutil" - "math/rand" "os" "testing" "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" "github.com/ipfs/go-ipfs/blocks/blockstore" - key "github.com/ipfs/go-ipfs/blocks/key" bs "github.com/ipfs/go-ipfs/blockservice" "github.com/ipfs/go-ipfs/exchange/offline" imp "github.com/ipfs/go-ipfs/importer" @@ -18,7 +16,6 @@ import ( h "github.com/ipfs/go-ipfs/importer/helpers" trickle "github.com/ipfs/go-ipfs/importer/trickle" mdag "github.com/ipfs/go-ipfs/merkledag" - pin "github.com/ipfs/go-ipfs/pin" ft "github.com/ipfs/go-ipfs/unixfs" uio "github.com/ipfs/go-ipfs/unixfs/io" u "github.com/ipfs/go-ipfs/util" @@ -27,27 +24,26 @@ import ( context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" ) -func getMockDagServ(t testing.TB) (mdag.DAGService, pin.ManualPinner) { +func getMockDagServ(t testing.TB) mdag.DAGService { dstore := ds.NewMapDatastore() tsds := sync.MutexWrap(dstore) bstore := blockstore.NewBlockstore(tsds) bserv := bs.New(bstore, offline.Exchange(bstore)) - dserv := mdag.NewDAGService(bserv) - return dserv, pin.NewPinner(tsds, dserv).GetManual() + return mdag.NewDAGService(bserv) } -func getMockDagServAndBstore(t testing.TB) (mdag.DAGService, blockstore.Blockstore, pin.ManualPinner) { +func getMockDagServAndBstore(t testing.TB) (mdag.DAGService, blockstore.GCBlockstore) { dstore := ds.NewMapDatastore() tsds := sync.MutexWrap(dstore) bstore := blockstore.NewBlockstore(tsds) bserv := bs.New(bstore, offline.Exchange(bstore)) dserv := mdag.NewDAGService(bserv) - return dserv, bstore, pin.NewPinner(tsds, dserv).GetManual() + return dserv, bstore } -func getNode(t testing.TB, dserv mdag.DAGService, size int64, pinner pin.ManualPinner) ([]byte, *mdag.Node) { +func getNode(t testing.TB, dserv mdag.DAGService, size int64) ([]byte, *mdag.Node) { in := io.LimitReader(u.NewTimeSeededRand(), size) - node, err := imp.BuildTrickleDagFromReader(dserv, sizeSplitterGen(500)(in), imp.BasicPinnerCB(pinner)) + node, err := imp.BuildTrickleDagFromReader(dserv, sizeSplitterGen(500)(in)) if err != nil { t.Fatal(err) } @@ -118,12 +114,12 @@ func sizeSplitterGen(size int64) chunk.SplitterGen { } func TestDagModifierBasic(t *testing.T) { - dserv, pin := getMockDagServ(t) - b, n := getNode(t, dserv, 50000, pin) + dserv := getMockDagServ(t) + b, n := getNode(t, dserv, 50000) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - dagmod, err := NewDagModifier(ctx, n, dserv, pin, sizeSplitterGen(512)) + dagmod, err := NewDagModifier(ctx, n, dserv, sizeSplitterGen(512)) if err != nil { t.Fatal(err) } @@ -172,13 +168,13 @@ func TestDagModifierBasic(t *testing.T) { } func TestMultiWrite(t *testing.T) { - dserv, pins := getMockDagServ(t) - _, n := getNode(t, dserv, 0, pins) + dserv := getMockDagServ(t) + _, n := getNode(t, dserv, 0) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - dagmod, err := NewDagModifier(ctx, n, dserv, pins, sizeSplitterGen(512)) + dagmod, err := NewDagModifier(ctx, n, dserv, sizeSplitterGen(512)) if err != nil { t.Fatal(err) } @@ -225,13 +221,13 @@ func TestMultiWrite(t *testing.T) { } func TestMultiWriteAndFlush(t *testing.T) { - dserv, pins := getMockDagServ(t) - _, n := getNode(t, dserv, 0, pins) + dserv := getMockDagServ(t) + _, n := getNode(t, dserv, 0) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - dagmod, err := NewDagModifier(ctx, n, dserv, pins, sizeSplitterGen(512)) + dagmod, err := NewDagModifier(ctx, n, dserv, sizeSplitterGen(512)) if err != nil { t.Fatal(err) } @@ -273,13 +269,13 @@ func TestMultiWriteAndFlush(t *testing.T) { } func TestWriteNewFile(t *testing.T) { - dserv, pins := getMockDagServ(t) - _, n := getNode(t, dserv, 0, pins) + dserv := getMockDagServ(t) + _, n := getNode(t, dserv, 0) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - dagmod, err := NewDagModifier(ctx, n, dserv, pins, sizeSplitterGen(512)) + dagmod, err := NewDagModifier(ctx, n, dserv, sizeSplitterGen(512)) if err != nil { t.Fatal(err) } @@ -316,13 +312,13 @@ func TestWriteNewFile(t *testing.T) { } func TestMultiWriteCoal(t *testing.T) { - dserv, pins := getMockDagServ(t) - _, n := getNode(t, dserv, 0, pins) + dserv := getMockDagServ(t) + _, n := getNode(t, dserv, 0) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - dagmod, err := NewDagModifier(ctx, n, dserv, pins, sizeSplitterGen(512)) + dagmod, err := NewDagModifier(ctx, n, dserv, sizeSplitterGen(512)) if err != nil { t.Fatal(err) } @@ -362,13 +358,13 @@ func TestMultiWriteCoal(t *testing.T) { } func TestLargeWriteChunks(t *testing.T) { - dserv, pins := getMockDagServ(t) - _, n := getNode(t, dserv, 0, pins) + dserv := getMockDagServ(t) + _, n := getNode(t, dserv, 0) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - dagmod, err := NewDagModifier(ctx, n, dserv, pins, sizeSplitterGen(512)) + dagmod, err := NewDagModifier(ctx, n, dserv, sizeSplitterGen(512)) if err != nil { t.Fatal(err) } @@ -401,12 +397,12 @@ func TestLargeWriteChunks(t *testing.T) { } func TestDagTruncate(t *testing.T) { - dserv, pins := getMockDagServ(t) - b, n := getNode(t, dserv, 50000, pins) + dserv := getMockDagServ(t) + b, n := getNode(t, dserv, 50000) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - dagmod, err := NewDagModifier(ctx, n, dserv, pins, sizeSplitterGen(512)) + dagmod, err := NewDagModifier(ctx, n, dserv, sizeSplitterGen(512)) if err != nil { t.Fatal(err) } @@ -415,6 +411,14 @@ func TestDagTruncate(t *testing.T) { if err != nil { t.Fatal(err) } + size, err := dagmod.Size() + if err != nil { + t.Fatal(err) + } + + if size != 12345 { + t.Fatal("size was incorrect!") + } _, err = dagmod.Seek(0, os.SEEK_SET) if err != nil { @@ -429,15 +433,29 @@ func TestDagTruncate(t *testing.T) { if err = arrComp(out, b[:12345]); err != nil { t.Fatal(err) } + + err = dagmod.Truncate(10) + if err != nil { + t.Fatal(err) + } + + size, err = dagmod.Size() + if err != nil { + t.Fatal(err) + } + + if size != 10 { + t.Fatal("size was incorrect!") + } } func TestSparseWrite(t *testing.T) { - dserv, pins := getMockDagServ(t) - _, n := getNode(t, dserv, 0, pins) + dserv := getMockDagServ(t) + _, n := getNode(t, dserv, 0) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - dagmod, err := NewDagModifier(ctx, n, dserv, pins, sizeSplitterGen(512)) + dagmod, err := NewDagModifier(ctx, n, dserv, sizeSplitterGen(512)) if err != nil { t.Fatal(err) } @@ -469,136 +487,63 @@ func TestSparseWrite(t *testing.T) { } } -func basicGC(t *testing.T, bs blockstore.Blockstore, pins pin.ManualPinner) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // in case error occurs during operation - keychan, err := bs.AllKeysChan(ctx) - if err != nil { - t.Fatal(err) - } - for k := range keychan { // rely on AllKeysChan to close chan - if !pins.IsPinned(k) { - err := bs.DeleteBlock(k) - if err != nil { - t.Fatal(err) - } - } - } -} -func TestCorrectPinning(t *testing.T) { - dserv, bstore, pins := getMockDagServAndBstore(t) - b, n := getNode(t, dserv, 50000, pins) +func TestSeekPastEndWrite(t *testing.T) { + dserv := getMockDagServ(t) + _, n := getNode(t, dserv, 0) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - dagmod, err := NewDagModifier(ctx, n, dserv, pins, sizeSplitterGen(512)) + dagmod, err := NewDagModifier(ctx, n, dserv, sizeSplitterGen(512)) if err != nil { t.Fatal(err) } - buf := make([]byte, 1024) - for i := 0; i < 100; i++ { - size, err := dagmod.Size() - if err != nil { - t.Fatal(err) - } - offset := rand.Intn(int(size)) - u.NewTimeSeededRand().Read(buf) - - if offset+len(buf) > int(size) { - b = append(b[:offset], buf...) - } else { - copy(b[offset:], buf) - } - - n, err := dagmod.WriteAt(buf, int64(offset)) - if err != nil { - t.Fatal(err) - } - if n != len(buf) { - t.Fatal("wrote incorrect number of bytes") - } - } + buf := make([]byte, 5000) + u.NewTimeSeededRand().Read(buf[2500:]) - fisize, err := dagmod.Size() + nseek, err := dagmod.Seek(2500, os.SEEK_SET) if err != nil { t.Fatal(err) } - if int(fisize) != len(b) { - t.Fatal("reported filesize incorrect", fisize, len(b)) + if nseek != 2500 { + t.Fatal("failed to seek") } - // Run a GC, then ensure we can still read the file correctly - basicGC(t, bstore, pins) - - nd, err := dagmod.GetNode() - if err != nil { - t.Fatal(err) - } - read, err := uio.NewDagReader(context.Background(), nd, dserv) + wrote, err := dagmod.Write(buf[2500:]) if err != nil { t.Fatal(err) } - out, err := ioutil.ReadAll(read) - if err != nil { - t.Fatal(err) + if wrote != 2500 { + t.Fatal("incorrect write amount") } - if err = arrComp(out, b); err != nil { + _, err = dagmod.Seek(0, os.SEEK_SET) + if err != nil { t.Fatal(err) } - rootk, err := nd.Key() + out, err := ioutil.ReadAll(dagmod) if err != nil { t.Fatal(err) } - // Verify only one recursive pin - recpins := pins.RecursiveKeys() - if len(recpins) != 1 { - t.Fatal("Incorrect number of pinned entries") - } - - // verify the correct node is pinned - if recpins[0] != rootk { - t.Fatal("Incorrect node recursively pinned") - } - - indirpins := pins.IndirectKeys() - children := enumerateChildren(t, nd, dserv) - if len(indirpins) != len(children) { - t.Log(len(indirpins), len(children)) - t.Fatal("Incorrect number of indirectly pinned blocks") - } - -} - -func enumerateChildren(t *testing.T, nd *mdag.Node, ds mdag.DAGService) []key.Key { - var out []key.Key - for _, lnk := range nd.Links { - out = append(out, key.Key(lnk.Hash)) - child, err := lnk.GetNode(context.Background(), ds) - if err != nil { - t.Fatal(err) - } - children := enumerateChildren(t, child, ds) - out = append(out, children...) + if err = arrComp(out, buf); err != nil { + t.Fatal(err) } - return out } func BenchmarkDagmodWrite(b *testing.B) { b.StopTimer() - dserv, pins := getMockDagServ(b) - _, n := getNode(b, dserv, 0, pins) + dserv := getMockDagServ(b) + _, n := getNode(b, dserv, 0) ctx, cancel := context.WithCancel(context.Background()) defer cancel() wrsize := 4096 - dagmod, err := NewDagModifier(ctx, n, dserv, pins, sizeSplitterGen(512)) + dagmod, err := NewDagModifier(ctx, n, dserv, sizeSplitterGen(512)) if err != nil { b.Fatal(err) }